mike23415 commited on
Commit
dbbc4eb
·
verified ·
1 Parent(s): 464dd43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -44
app.py CHANGED
@@ -1,63 +1,49 @@
1
- import io
2
- import base64
3
  import torch
4
  from flask import Flask, request, jsonify, send_file
5
- from diffusers import DiffusionPipeline
6
  from PIL import Image
7
- import logging
8
-
9
- logging.basicConfig(level=logging.INFO)
10
- logger = logging.getLogger(__name__)
11
 
12
  app = Flask(__name__)
13
 
14
  # Load the model once at startup (on CPU)
15
- try:
16
- logger.info("Loading Zero123Plus pipeline...")
17
- pipe = DiffusionPipeline.from_pretrained(
18
- "sudo-ai/zero123plus-v1.2",
19
- torch_dtype=torch.float32, # CPU needs float32
20
- )
21
- pipe.to("cpu")
22
- logger.info("=== Application Startup at CPU mode =====")
23
- except Exception as e:
24
- logger.error(f"Error loading model: {e}")
25
- pipe = None
26
-
27
- def pil_to_base64(image):
28
- buffer = io.BytesIO()
29
- image.save(buffer, format="PNG")
30
- return base64.b64encode(buffer.getvalue()).decode("utf-8")
31
 
32
  @app.route("/")
33
  def home():
34
- return "Zero123Plus CPU API is running!"
 
 
 
 
 
 
 
35
 
36
  @app.route("/generate", methods=["POST"])
37
  def generate():
38
- if pipe is None:
39
- return jsonify({"error": "Model not loaded"}), 500
40
-
41
- try:
42
- data = request.get_json()
43
- image_data = data.get("image")
44
-
45
- if not image_data:
46
- return jsonify({"error": "No image provided"}), 400
47
-
48
- if image_data.startswith("data:image"):
49
- image_data = image_data.split(",")[1]
50
-
51
- image = Image.open(io.BytesIO(base64.b64decode(image_data))).convert("RGB")
52
 
53
- result = pipe(image)
54
- output_image = result.images[0]
55
 
56
- return jsonify({"image": f"data:image/png;base64,{pil_to_base64(output_image)}"})
 
57
 
58
- except Exception as e:
59
- logger.error(f"Error generating image: {e}")
60
- return jsonify({"error": str(e)}), 500
 
 
61
 
62
  if __name__ == "__main__":
63
  app.run(host="0.0.0.0", port=7860)
 
1
+ import os
 
2
  import torch
3
  from flask import Flask, request, jsonify, send_file
4
+ from pipeline import Zero123PlusPipeline # from your local pipeline.py
5
  from PIL import Image
6
+ from io import BytesIO
 
 
 
7
 
8
  app = Flask(__name__)
9
 
10
  # Load the model once at startup (on CPU)
11
+ print("Loading Zero123Plus pipeline on CPU...")
12
+ pipe = Zero123PlusPipeline.from_pretrained(
13
+ "sudo-ai/zero123plus-v1.2",
14
+ torch_dtype=torch.float32,
15
+ )
16
+ pipe.to("cpu")
17
+ pipe.enable_model_cpu_offload()
18
+ print("Model loaded.")
 
 
 
 
 
 
 
 
19
 
20
  @app.route("/")
21
  def home():
22
+ return '''
23
+ <h1>Zero123Plus Image to 3D Generator</h1>
24
+ <form action="/generate" method="post" enctype="multipart/form-data">
25
+ <p>Upload a single-view image:</p>
26
+ <input type="file" name="image"><br><br>
27
+ <input type="submit" value="Generate 3D View">
28
+ </form>
29
+ '''
30
 
31
  @app.route("/generate", methods=["POST"])
32
  def generate():
33
+ if "image" not in request.files:
34
+ return jsonify({"error": "No image uploaded"}), 400
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
+ file = request.files["image"]
37
+ image = Image.open(file.stream).convert("RGB")
38
 
39
+ print("Generating 3D view...")
40
+ result = pipe(image, num_inference_steps=50, guidance_scale=3.0)
41
 
42
+ output = result.images[0]
43
+ img_io = BytesIO()
44
+ output.save(img_io, "PNG")
45
+ img_io.seek(0)
46
+ return send_file(img_io, mimetype="image/png")
47
 
48
  if __name__ == "__main__":
49
  app.run(host="0.0.0.0", port=7860)