Rename app.py to model.py
Browse files
app.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import numpy as np
|
3 |
-
from diffusers import DiffusionPipeline
|
4 |
-
import streamlit as st
|
5 |
-
|
6 |
-
# Load the ShapE pipeline on CPU
|
7 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
8 |
-
"openai/shap-e",
|
9 |
-
torch_dtype=torch.float32,
|
10 |
-
trust_remote_code=True,
|
11 |
-
custom_pipeline="openai/shap-e", # Assuming it works with custom_pipeline param
|
12 |
-
).to("cpu")
|
13 |
-
|
14 |
-
# Define the function to generate and save a 3D model
|
15 |
-
def generate_3d_model(prompt, output_path="/tmp/output.ply"):
|
16 |
-
# Run the pipeline with a text prompt
|
17 |
-
result = pipeline(prompt, None)
|
18 |
-
|
19 |
-
# Try to save the result as a 3D model
|
20 |
-
try:
|
21 |
-
pipeline.save_ply(result, output_path)
|
22 |
-
print(f"Model saved to {output_path}")
|
23 |
-
return output_path
|
24 |
-
except Exception as e:
|
25 |
-
print(f"Failed to save model: {e}")
|
26 |
-
return None
|
27 |
-
|
28 |
-
# Streamlit interface
|
29 |
-
st.title("3D Model Generator")
|
30 |
-
prompt = st.text_input("Enter a prompt to generate a 3D model:", "a cat statue")
|
31 |
-
|
32 |
-
if st.button("Generate Model"):
|
33 |
-
with st.spinner("Generating model..."):
|
34 |
-
model_path = generate_3d_model(prompt)
|
35 |
-
if model_path:
|
36 |
-
st.success("Model generated successfully!")
|
37 |
-
# Display download link
|
38 |
-
with open(model_path, "rb") as file:
|
39 |
-
st.download_button(
|
40 |
-
label="Download 3D Model",
|
41 |
-
data=file,
|
42 |
-
file_name="generated_model.ply",
|
43 |
-
mime="application/octet-stream"
|
44 |
-
)
|
45 |
-
else:
|
46 |
-
st.error("Model generation failed.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from diffusers import ShapEPipeline
|
3 |
+
from diffusers.utils import export_to_gif
|
4 |
+
|
5 |
+
# Define checkpoint ID and load pipeline on CPU
|
6 |
+
ckpt_id = "openai/shap-e"
|
7 |
+
pipe = ShapEPipeline.from_pretrained(ckpt_id).to("cpu")
|
8 |
+
|
9 |
+
# Define generation parameters
|
10 |
+
guidance_scale = 10.0 # Lowered for efficiency on CPU
|
11 |
+
num_inference_steps = 32 # Reduced steps for CPU performance
|
12 |
+
prompt = "a shark"
|
13 |
+
|
14 |
+
# Generate images from the prompt with reduced settings
|
15 |
+
images = pipe(
|
16 |
+
prompt=prompt,
|
17 |
+
guidance_scale=guidance_scale,
|
18 |
+
num_inference_steps=num_inference_steps,
|
19 |
+
size=256, # Image size for the model
|
20 |
+
).images
|
21 |
+
|
22 |
+
# Export images to GIF format
|
23 |
+
gif_path = export_to_gif(images, "shark_3d.gif")
|
24 |
+
print(f"GIF saved at {gif_path}")
|