#守护着我的光[超话]#✨#每日一善# #阳光信用#
Life is like a box of chocolates, you never know what you are going to get.
生活就像一盒巧克力,你永远不知道下一个是什么. ——《阿甘正传》
Sometimes you can't see what you're learning until you come out the other side.
有时候,直到拨云见日,才能豁然开朗。
Life is like a box of chocolates, you never know what you are going to get.
生活就像一盒巧克力,你永远不知道下一个是什么. ——《阿甘正传》
Sometimes you can't see what you're learning until you come out the other side.
有时候,直到拨云见日,才能豁然开朗。
refer to the following Colab python code,
write a new Colab python code for request :
user import Three View or Multiview orthographic projection image png jpg etc file, loading image to produce 3D cloud point,
write the results on .obj file
! pip install plotly -q
!git clone https://t.cn/A6KTcqVE
%cd shap-e
!pip install -e .
!git clone https://t.cn/A6NRWmuS
#Enter the directory and install the requirements
%cd shap-e
!pip install -e .
from PIL import Image
import torch
from tqdm.auto import tqdm
from point_e.diffusion.configs import DIFFUSION_CONFIGS, diffusion_from_config
from point_e.diffusion.sampler import PointCloudSampler
from point_e.models.download import load_checkpoint
from point_e.models.configs import MODEL_CONFIGS, model_from_config
from point_e.util.plotting import plot_point_cloud
#Implementation and Cooking the 3D models, import all the necessary libraries.
#%cd /content/shap-e
import torch
from shap_e.diffusion.sample import sample_latents
from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
from shap_e.models.download import load_model, load_config
from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget
#set the device to cuda if available, otherwise to cpu.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#load the models and weights.
xm = load_model('transmitter', device=device)
model = load_model('text300M', device=device)
diffusion = diffusion_from_config(load_config('diffusion'))
#generate the 3D models.
batch_size = 1 # this is the size of the models, higher values take longer to generate.
guidance_scale = 65.0 # this is the scale of the guidance, higher values make the model look more like the prompt.
latents = sample_latents(
batch_size=batch_size,
model=model,
diffusion=diffusion,
guidance_scale=guidance_scale,
model_kwargs=dict(texts=[prompt] * batch_size),
progress=True,
clip_denoised=True,
use_fp16=True,
use_karras=True,
karras_steps=64,
sigma_min=1E-3,
sigma_max=160,
s_churn=0,
)
render_mode = 'stf' #
size = 128 # this is the size of the renders, higher values take longer to render.
cameras = create_pan_cameras(size, device)
for i, latent in enumerate(latents):
images = decode_latent_images(xm, latent, cameras, rendering_mode=render_mode)
display(gif_widget(images))
#save the 3D models as .ply and .obj files.
# Example of saving the latents as meshes.
from shap_e.util.notebooks import decode_latent_mesh
for i, latent in enumerate(latents):
t = decode_latent_mesh(xm, latent).tri_mesh()
with open(f'example_mesh_{i}.ply', 'wb') as f: # this is three-dimensional geometric data of model.
t.write_ply(f)
with open(f'example_mesh_{i}.obj', 'w') as f: # we will use this file to customize in Blender Studio later.
t.write_obj(f)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('creating base model...')
base_name = 'base40M'
base_model = model_from_config(MODEL_CONFIGS[base_name], device)
base_model.eval()
base_diffusion = diffusion_from_config(DIFFUSION_CONFIGS[base_name])
print('creating upsample model...')
upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device)
upsampler_model.eval()
upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample'])
print('downloading base checkpoint...')
base_model.load_state_dict(load_checkpoint(base_name, device))
print('downloading upsampler checkpoint...')
upsampler_model.load_state_dict(load_checkpoint('upsample', device))
sampler = PointCloudSampler(
device=device,
models=[base_model, upsampler_model],
diffusions=[base_diffusion, upsampler_diffusion],
num_points=[1024, 4096 - 1024],
aux_channels=['R', 'G', 'B'],
guidance_scale=[3.0, 3.0],
)
from google.colab import files
uploaded = files.upload()
# Load an image to condition on.
img = Image.open('figure_all.jpg')
# Produce a sample from the model.
samples = None
for x in tqdm(sampler.sample_batch_progressive(batch_size=1, model_kwargs=dict(images=[img]))):
samples = x
img
pc = sampler.output_to_point_clouds(samples)[0]
fig = plot_point_cloud(pc, grid_size=3, fixed_bounds=((-0.75, -0.75, -0.75),(0.75, 0.75, 0.75)))
import plotly.graph_objects as go
fig_plotly = go.Figure(
data=[
go.Scatter3d(
x=pc.coords[:,0], y=pc.coords[:,1], z=pc.coords[:,2],
mode='markers',
marker=dict(
size=2,
color=['rgb({},{},{})'.format(r,g,b) for r,g,b in zip(pc.channels["R"], pc.channels["G"], pc.channels["B"])],
)
)
],
layout=dict(
scene=dict(
xaxis=dict(visible=False),
yaxis=dict(visible=False),
zaxis=dict(visible=False)
)
),
)
fig_plotly.show(renderer="colab")
from point_e.util.pc_to_mesh import marching_cubes_mesh
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('creating SDF model...')
name = 'sdf'
model = model_from_config(MODEL_CONFIGS[name], device)
model.eval()
print('loading SDF model...')
model.load_state_dict(load_checkpoint(name, device))
import skimage.measure as measure
# Produce a mesh (with vertex colors)
mesh = marching_cubes_mesh(
pc=pc,
model=model,
batch_size=4096,
grid_size=32, # increase to 128 for resolution used in evals
progress=True,
)
# Write the mesh to a PLY file to import into some other program.
with open('figure_all.obj', 'wb') as f:
mesh.write_ply(f)
write a new Colab python code for request :
user import Three View or Multiview orthographic projection image png jpg etc file, loading image to produce 3D cloud point,
write the results on .obj file
! pip install plotly -q
!git clone https://t.cn/A6KTcqVE
%cd shap-e
!pip install -e .
!git clone https://t.cn/A6NRWmuS
#Enter the directory and install the requirements
%cd shap-e
!pip install -e .
from PIL import Image
import torch
from tqdm.auto import tqdm
from point_e.diffusion.configs import DIFFUSION_CONFIGS, diffusion_from_config
from point_e.diffusion.sampler import PointCloudSampler
from point_e.models.download import load_checkpoint
from point_e.models.configs import MODEL_CONFIGS, model_from_config
from point_e.util.plotting import plot_point_cloud
#Implementation and Cooking the 3D models, import all the necessary libraries.
#%cd /content/shap-e
import torch
from shap_e.diffusion.sample import sample_latents
from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
from shap_e.models.download import load_model, load_config
from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget
#set the device to cuda if available, otherwise to cpu.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#load the models and weights.
xm = load_model('transmitter', device=device)
model = load_model('text300M', device=device)
diffusion = diffusion_from_config(load_config('diffusion'))
#generate the 3D models.
batch_size = 1 # this is the size of the models, higher values take longer to generate.
guidance_scale = 65.0 # this is the scale of the guidance, higher values make the model look more like the prompt.
latents = sample_latents(
batch_size=batch_size,
model=model,
diffusion=diffusion,
guidance_scale=guidance_scale,
model_kwargs=dict(texts=[prompt] * batch_size),
progress=True,
clip_denoised=True,
use_fp16=True,
use_karras=True,
karras_steps=64,
sigma_min=1E-3,
sigma_max=160,
s_churn=0,
)
render_mode = 'stf' #
size = 128 # this is the size of the renders, higher values take longer to render.
cameras = create_pan_cameras(size, device)
for i, latent in enumerate(latents):
images = decode_latent_images(xm, latent, cameras, rendering_mode=render_mode)
display(gif_widget(images))
#save the 3D models as .ply and .obj files.
# Example of saving the latents as meshes.
from shap_e.util.notebooks import decode_latent_mesh
for i, latent in enumerate(latents):
t = decode_latent_mesh(xm, latent).tri_mesh()
with open(f'example_mesh_{i}.ply', 'wb') as f: # this is three-dimensional geometric data of model.
t.write_ply(f)
with open(f'example_mesh_{i}.obj', 'w') as f: # we will use this file to customize in Blender Studio later.
t.write_obj(f)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('creating base model...')
base_name = 'base40M'
base_model = model_from_config(MODEL_CONFIGS[base_name], device)
base_model.eval()
base_diffusion = diffusion_from_config(DIFFUSION_CONFIGS[base_name])
print('creating upsample model...')
upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device)
upsampler_model.eval()
upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample'])
print('downloading base checkpoint...')
base_model.load_state_dict(load_checkpoint(base_name, device))
print('downloading upsampler checkpoint...')
upsampler_model.load_state_dict(load_checkpoint('upsample', device))
sampler = PointCloudSampler(
device=device,
models=[base_model, upsampler_model],
diffusions=[base_diffusion, upsampler_diffusion],
num_points=[1024, 4096 - 1024],
aux_channels=['R', 'G', 'B'],
guidance_scale=[3.0, 3.0],
)
from google.colab import files
uploaded = files.upload()
# Load an image to condition on.
img = Image.open('figure_all.jpg')
# Produce a sample from the model.
samples = None
for x in tqdm(sampler.sample_batch_progressive(batch_size=1, model_kwargs=dict(images=[img]))):
samples = x
img
pc = sampler.output_to_point_clouds(samples)[0]
fig = plot_point_cloud(pc, grid_size=3, fixed_bounds=((-0.75, -0.75, -0.75),(0.75, 0.75, 0.75)))
import plotly.graph_objects as go
fig_plotly = go.Figure(
data=[
go.Scatter3d(
x=pc.coords[:,0], y=pc.coords[:,1], z=pc.coords[:,2],
mode='markers',
marker=dict(
size=2,
color=['rgb({},{},{})'.format(r,g,b) for r,g,b in zip(pc.channels["R"], pc.channels["G"], pc.channels["B"])],
)
)
],
layout=dict(
scene=dict(
xaxis=dict(visible=False),
yaxis=dict(visible=False),
zaxis=dict(visible=False)
)
),
)
fig_plotly.show(renderer="colab")
from point_e.util.pc_to_mesh import marching_cubes_mesh
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('creating SDF model...')
name = 'sdf'
model = model_from_config(MODEL_CONFIGS[name], device)
model.eval()
print('loading SDF model...')
model.load_state_dict(load_checkpoint(name, device))
import skimage.measure as measure
# Produce a mesh (with vertex colors)
mesh = marching_cubes_mesh(
pc=pc,
model=model,
batch_size=4096,
grid_size=32, # increase to 128 for resolution used in evals
progress=True,
)
# Write the mesh to a PLY file to import into some other program.
with open('figure_all.obj', 'wb') as f:
mesh.write_ply(f)
#苏新皓[超话]##苏新皓 全能ace##苏新皓 山城曙光##苏新皓 三代唯一大主舞#
Don't let other people's opinions dictate your life choices. Only you can decide the direction of the future. Strong confidence, go forward!不要让别人的意见左右你的人生选择,只有你自己能够决定未来的方向。坚定信心,勇往直前!
Don't let other people's opinions dictate your life choices. Only you can decide the direction of the future. Strong confidence, go forward!不要让别人的意见左右你的人生选择,只有你自己能够决定未来的方向。坚定信心,勇往直前!
✋热门推荐