YiftachEde commited on
Commit
6b12b83
·
1 Parent(s): 7ffa97b
Files changed (3) hide show
  1. app.py +2 -2
  2. requirements.txt +3 -3
  3. zero123plus/pipeline.py +1 -1
app.py CHANGED
@@ -381,8 +381,8 @@ def create_demo():
381
  mesh_output = gr.Model3D(
382
  label="3D Mesh",
383
  clear_color=[1.0, 1.0, 1.0, 1.0],
384
- width=1280, # Full width
385
- height=600 # Taller for better visualization
386
  )
387
 
388
  # Set up event handlers
 
381
  mesh_output = gr.Model3D(
382
  label="3D Mesh",
383
  clear_color=[1.0, 1.0, 1.0, 1.0],
384
+ # width=1280, # Full width
385
+ # height=600 # Taller for better visualization
386
  )
387
 
388
  # Set up event handlers
requirements.txt CHANGED
@@ -12,7 +12,7 @@ contourpy==1.3.0
12
  cycler==0.12.1
13
  dataclasses-json==0.6.7
14
  Deprecated==1.2.14
15
- diffusers==0.19.3
16
  einops==0.8.1
17
  fastapi==0.112.4
18
  ffmpy==0.4.0
@@ -25,7 +25,7 @@ gradio_client==1.3.0
25
  h11==0.14.0
26
  httpcore==1.0.6
27
  httpx==0.27.2
28
- huggingface-hub==0.25.2
29
  imageio==2.35.1
30
  importlib_metadata==8.6.1
31
  importlib_resources==6.4.5
@@ -110,5 +110,5 @@ git+https://github.com/NVlabs/nvdiffrast.git
110
  rembg
111
  onnxruntime
112
  kiui
113
- transformers==4.30.2
114
  PyMCubes
 
12
  cycler==0.12.1
13
  dataclasses-json==0.6.7
14
  Deprecated==1.2.14
15
+ diffusers
16
  einops==0.8.1
17
  fastapi==0.112.4
18
  ffmpy==0.4.0
 
25
  h11==0.14.0
26
  httpcore==1.0.6
27
  httpx==0.27.2
28
+ huggingface-hub
29
  imageio==2.35.1
30
  importlib_metadata==8.6.1
31
  importlib_resources==6.4.5
 
110
  rembg
111
  onnxruntime
112
  kiui
113
+ transformers
114
  PyMCubes
zero123plus/pipeline.py CHANGED
@@ -13,7 +13,7 @@ from PIL import Image
13
  from torchvision import transforms
14
  from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
15
  from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
16
- from diffusers.utils import randn_tensor
17
  import diffusers
18
  from diffusers import (
19
  AutoencoderKL,
 
13
  from torchvision import transforms
14
  from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
15
  from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
16
+ randn_tensor = torch.randn
17
  import diffusers
18
  from diffusers import (
19
  AutoencoderKL,