Mariam-Elz commited on
Commit
d17c8ce
·
verified ·
1 Parent(s): 4168fdf

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +0 -444
app.py CHANGED
@@ -1,447 +1,3 @@
1
- # import gradio as gr
2
- # import torch
3
- # from PIL import Image
4
- # from model import CRM
5
- # from inference import generate3d
6
- # import numpy as np
7
-
8
- # # Load model
9
- # crm_path = "CRM.pth" # Make sure the model is uploaded to the Space
10
- # model = CRM(torch.load(crm_path, map_location="cpu"))
11
- # model = model.to("cuda:0" if torch.cuda.is_available() else "cpu")
12
-
13
- # def generate_3d(image_path, seed=1234, scale=5.5, step=30):
14
- # image = Image.open(image_path).convert("RGB")
15
- # np_img = np.array(image)
16
- # glb_path = generate3d(model, np_img, np_img, "cuda:0" if torch.cuda.is_available() else "cpu")
17
- # return glb_path
18
-
19
- # iface = gr.Interface(
20
- # fn=generate_3d,
21
- # inputs=gr.Image(type="filepath"),
22
- # outputs=gr.Model3D(),
23
- # title="Convolutional Reconstruction Model (CRM)",
24
- # description="Upload an image to generate a 3D model."
25
- # )
26
-
27
- # iface.launch()
28
-
29
-
30
- #############2nd################3
31
- # import os
32
- # import torch
33
- # import gradio as gr
34
- # from huggingface_hub import hf_hub_download
35
- # from model import CRM # Make sure this matches your model file structure
36
-
37
- # # Define model details
38
- # REPO_ID = "Mariam-Elz/CRM" # Hugging Face model repo
39
- # MODEL_FILES = {
40
- # "ccm-diffusion": "ccm-diffusion.pth",
41
- # "pixel-diffusion": "pixel-diffusion.pth",
42
- # "CRM": "CRM.pth"
43
- # }
44
- # DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
45
-
46
- # # Download models from Hugging Face if not already present
47
- # MODEL_DIR = "./models"
48
- # os.makedirs(MODEL_DIR, exist_ok=True)
49
-
50
- # for name, filename in MODEL_FILES.items():
51
- # model_path = os.path.join(MODEL_DIR, filename)
52
- # if not os.path.exists(model_path):
53
- # print(f"Downloading {filename}...")
54
- # hf_hub_download(repo_id=REPO_ID, filename=filename, local_dir=MODEL_DIR)
55
-
56
- # # Load the model
57
- # print("Loading CRM Model...")
58
- # model = CRM()
59
- # model.load_state_dict(torch.load(os.path.join(MODEL_DIR, MODEL_FILES["CRM"]), map_location=DEVICE))
60
- # model.to(DEVICE)
61
- # model.eval()
62
- # print("✅ Model Loaded Successfully!")
63
-
64
- # # Define Gradio Interface
65
- # def predict(input_image):
66
- # with torch.no_grad():
67
- # output = model(input_image.to(DEVICE)) # Modify based on model input format
68
- # return output.cpu()
69
-
70
- # demo = gr.Interface(
71
- # fn=predict,
72
- # inputs=gr.Image(type="pil"),
73
- # outputs=gr.Image(type="pil"),
74
- # title="Convolutional Reconstruction Model (CRM)",
75
- # description="Upload an image to generate a reconstructed output."
76
- # )
77
-
78
- # if __name__ == "__main__":
79
- # demo.launch()
80
- ########################3rd-MAIN######################3
81
-
82
- # import torch
83
- # import gradio as gr
84
- # import requests
85
- # import os
86
-
87
- # # Download model weights from Hugging Face model repo (if not already present)
88
- # model_repo = "Mariam-Elz/CRM" # Your Hugging Face model repo
89
-
90
- # model_files = {
91
- # "ccm-diffusion.pth": "ccm-diffusion.pth",
92
- # "pixel-diffusion.pth": "pixel-diffusion.pth",
93
- # "CRM.pth": "CRM.pth",
94
- # }
95
-
96
- # os.makedirs("models", exist_ok=True)
97
-
98
- # for filename, output_path in model_files.items():
99
- # file_path = f"models/{output_path}"
100
- # if not os.path.exists(file_path):
101
- # url = f"https://huggingface.co/{model_repo}/resolve/main/{filename}"
102
- # print(f"Downloading {filename}...")
103
- # response = requests.get(url)
104
- # with open(file_path, "wb") as f:
105
- # f.write(response.content)
106
-
107
- # # Load model (This part depends on how the model is defined)
108
- # device = "cuda" if torch.cuda.is_available() else "cpu"
109
-
110
- # def load_model():
111
- # model_path = "models/CRM.pth"
112
- # model = torch.load(model_path, map_location=device)
113
- # model.eval()
114
- # return model
115
-
116
- # model = load_model()
117
-
118
- # # Define inference function
119
- # def infer(image):
120
- # """Process input image and return a reconstructed image."""
121
- # with torch.no_grad():
122
- # # Assuming model expects a tensor input
123
- # image_tensor = torch.tensor(image).to(device)
124
- # output = model(image_tensor)
125
- # return output.cpu().numpy()
126
-
127
- # # Create Gradio UI
128
- # demo = gr.Interface(
129
- # fn=infer,
130
- # inputs=gr.Image(type="numpy"),
131
- # outputs=gr.Image(type="numpy"),
132
- # title="Convolutional Reconstruction Model",
133
- # description="Upload an image to get the reconstructed output."
134
- # )
135
-
136
- # if __name__ == "__main__":
137
- # demo.launch()
138
-
139
-
140
- #################4th##################
141
-
142
- # import torch
143
- # import gradio as gr
144
- # import requests
145
- # import os
146
-
147
- # # Define model repo
148
- # model_repo = "Mariam-Elz/CRM"
149
-
150
- # # Define model files and download paths
151
- # model_files = {
152
- # "CRM.pth": "models/CRM.pth"
153
- # }
154
-
155
- # os.makedirs("models", exist_ok=True)
156
-
157
- # # Download model files only if they don't exist
158
- # for filename, output_path in model_files.items():
159
- # if not os.path.exists(output_path):
160
- # url = f"https://huggingface.co/{model_repo}/resolve/main/{filename}"
161
- # print(f"Downloading {filename}...")
162
- # response = requests.get(url)
163
- # with open(output_path, "wb") as f:
164
- # f.write(response.content)
165
-
166
- # # Load model with low memory usage
167
- # def load_model():
168
- # model_path = "models/CRM.pth"
169
- # model = torch.load(model_path, map_location="cpu") # Load on CPU to reduce memory usage
170
- # model.eval()
171
- # return model
172
-
173
- # model = load_model()
174
-
175
- # # Define inference function
176
- # def infer(image):
177
- # """Process input image and return a reconstructed image."""
178
- # with torch.no_grad():
179
- # image_tensor = torch.tensor(image).unsqueeze(0) # Add batch dimension
180
- # image_tensor = image_tensor.to("cpu") # Keep on CPU to save memory
181
- # output = model(image_tensor)
182
- # return output.squeeze(0).numpy()
183
-
184
- # # Create Gradio UI
185
- # demo = gr.Interface(
186
- # fn=infer,
187
- # inputs=gr.Image(type="numpy"),
188
- # outputs=gr.Image(type="numpy"),
189
- # title="Convolutional Reconstruction Model",
190
- # description="Upload an image to get the reconstructed output."
191
- # )
192
-
193
- # if __name__ == "__main__":
194
- # demo.launch()
195
-
196
-
197
- # ##############5TH#################
198
- # import torch
199
- # import torch.nn as nn
200
- # import gradio as gr
201
- # import requests
202
- # import os
203
-
204
- # # Define model repo
205
- # model_repo = "Mariam-Elz/CRM"
206
-
207
- # # Define model files and download paths
208
- # model_files = {
209
- # "CRM.pth": "models/CRM.pth"
210
- # }
211
-
212
- # os.makedirs("models", exist_ok=True)
213
-
214
- # # Download model files only if they don't exist
215
- # for filename, output_path in model_files.items():
216
- # if not os.path.exists(output_path):
217
- # url = f"https://huggingface.co/{model_repo}/resolve/main/{filename}"
218
- # print(f"Downloading {filename}...")
219
- # response = requests.get(url)
220
- # with open(output_path, "wb") as f:
221
- # f.write(response.content)
222
-
223
- # # Define the model architecture (you MUST replace this with your actual model)
224
- # class CRM_Model(nn.Module):
225
- # def __init__(self):
226
- # super(CRM_Model, self).__init__()
227
- # self.layer1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
228
- # self.relu = nn.ReLU()
229
- # self.layer2 = nn.Conv2d(64, 3, kernel_size=3, padding=1)
230
-
231
- # def forward(self, x):
232
- # x = self.layer1(x)
233
- # x = self.relu(x)
234
- # x = self.layer2(x)
235
- # return x
236
-
237
- # # Load model with proper architecture
238
- # def load_model():
239
- # model = CRM_Model() # Instantiate the model architecture
240
- # model_path = "models/CRM.pth"
241
- # model.load_state_dict(torch.load(model_path, map_location="cpu")) # Load weights
242
- # model.eval() # Set to evaluation mode
243
- # return model
244
-
245
- # model = load_model()
246
-
247
- # # Define inference function
248
- # def infer(image):
249
- # """Process input image and return a reconstructed image."""
250
- # with torch.no_grad():
251
- # image_tensor = torch.tensor(image).unsqueeze(0).permute(0, 3, 1, 2).float() / 255.0 # Convert to tensor
252
- # output = model(image_tensor) # Run through model
253
- # output = output.squeeze(0).permute(1, 2, 0).numpy() * 255.0 # Convert back to image
254
- # return output.astype("uint8")
255
-
256
- # # Create Gradio UI
257
- # demo = gr.Interface(
258
- # fn=infer,
259
- # inputs=gr.Image(type="numpy"),
260
- # outputs=gr.Image(type="numpy"),
261
- # title="Convolutional Reconstruction Model",
262
- # description="Upload an image to get the reconstructed output."
263
- # )
264
-
265
- # if __name__ == "__main__":
266
- # demo.launch()
267
-
268
-
269
- #############6th-worked-proc##################
270
- # import torch
271
- # import gradio as gr
272
- # import requests
273
- # import os
274
- # import numpy as np
275
-
276
- # # Hugging Face Model Repository
277
- # model_repo = "Mariam-Elz/CRM"
278
-
279
- # # Download Model Weights (Only CRM.pth to Save Memory)
280
- # model_path = "models/CRM.pth"
281
- # os.makedirs("models", exist_ok=True)
282
-
283
- # if not os.path.exists(model_path):
284
- # url = f"https://huggingface.co/{model_repo}/resolve/main/CRM.pth"
285
- # print(f"Downloading CRM.pth...")
286
- # response = requests.get(url)
287
- # with open(model_path, "wb") as f:
288
- # f.write(response.content)
289
-
290
- # # Set Device (Use CPU to Reduce RAM Usage)
291
- # device = "cpu"
292
-
293
- # # Load Model Efficiently
294
- # def load_model():
295
- # model = torch.load(model_path, map_location=device)
296
- # if isinstance(model, torch.nn.Module):
297
- # model.eval() # Ensure model is in inference mode
298
- # return model
299
-
300
- # # Load model only when needed (saves memory)
301
- # model = load_model()
302
-
303
- # # Define Inference Function with Memory Optimizations
304
- # def infer(image):
305
- # """Process input image and return a reconstructed image."""
306
- # with torch.no_grad():
307
- # # Convert image to torch tensor & normalize (float16 to save RAM)
308
- # image_tensor = torch.tensor(image, dtype=torch.float16).unsqueeze(0).permute(0, 3, 1, 2) / 255.0
309
- # image_tensor = image_tensor.to(device)
310
-
311
- # # Model Inference
312
- # output = model(image_tensor)
313
-
314
- # # Convert back to numpy image format
315
- # output_image = output.squeeze(0).permute(1, 2, 0).cpu().numpy() * 255.0
316
- # output_image = np.clip(output_image, 0, 255).astype(np.uint8)
317
-
318
- # # Free Memory
319
- # del image_tensor, output
320
- # torch.cuda.empty_cache()
321
-
322
- # return output_image
323
-
324
- # # Create Gradio UI
325
- # demo = gr.Interface(
326
- # fn=infer,
327
- # inputs=gr.Image(type="numpy"),
328
- # outputs=gr.Image(type="numpy"),
329
- # title="Optimized Convolutional Reconstruction Model",
330
- # description="Upload an image to get the reconstructed output with reduced memory usage."
331
- # )
332
-
333
- # if __name__ == "__main__":
334
- # demo.launch()
335
-
336
-
337
-
338
- #############7tth################
339
- # import torch
340
- # import torch.nn as nn
341
- # import gradio as gr
342
- # import requests
343
- # import os
344
- # import torchvision.transforms as transforms
345
- # import numpy as np
346
- # from PIL import Image
347
-
348
- # # Hugging Face Model Repository
349
- # model_repo = "Mariam-Elz/CRM"
350
-
351
- # # Model File Path
352
- # model_path = "models/CRM.pth"
353
- # os.makedirs("models", exist_ok=True)
354
-
355
- # # Download model weights if not present
356
- # if not os.path.exists(model_path):
357
- # url = f"https://huggingface.co/{model_repo}/resolve/main/CRM.pth"
358
- # print(f"Downloading CRM.pth...")
359
- # response = requests.get(url)
360
- # with open(model_path, "wb") as f:
361
- # f.write(response.content)
362
-
363
- # # Set Device
364
- # device = "cuda" if torch.cuda.is_available() else "cpu"
365
-
366
- # # Define Model Architecture (Replace with your actual model)
367
- # class CRMModel(nn.Module):
368
- # def __init__(self):
369
- # super(CRMModel, self).__init__()
370
- # self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
371
- # self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
372
- # self.relu = nn.ReLU()
373
-
374
- # def forward(self, x):
375
- # x = self.relu(self.conv1(x))
376
- # x = self.relu(self.conv2(x))
377
- # return x
378
-
379
- # # Load Model
380
- # def load_model():
381
- # print("Loading model...")
382
- # model = CRMModel() # Use the correct architecture here
383
- # state_dict = torch.load(model_path, map_location=device)
384
-
385
- # if isinstance(state_dict, dict): # Ensure it's a valid state_dict
386
- # model.load_state_dict(state_dict)
387
- # else:
388
- # raise ValueError("Error: The loaded state_dict is not in the correct format.")
389
-
390
- # model.to(device)
391
- # model.eval()
392
- # print("Model loaded successfully!")
393
- # return model
394
-
395
- # # Load the model
396
- # model = load_model()
397
-
398
- # # Define Inference Function
399
- # def infer(image):
400
- # """Process input image and return a reconstructed 3D output."""
401
- # try:
402
- # print("Preprocessing image...")
403
-
404
- # # Convert image to PyTorch tensor & normalize
405
- # transform = transforms.Compose([
406
- # transforms.Resize((256, 256)), # Resize to fit model input
407
- # transforms.ToTensor(), # Converts to tensor (C, H, W)
408
- # transforms.Normalize(mean=[0.5], std=[0.5]), # Normalize
409
- # ])
410
- # image_tensor = transform(image).unsqueeze(0).to(device) # Add batch dimension
411
-
412
- # print("Running inference...")
413
- # with torch.no_grad():
414
- # output = model(image_tensor) # Forward pass
415
-
416
- # # Ensure output is a valid tensor
417
- # if isinstance(output, torch.Tensor):
418
- # output_image = output.squeeze(0).permute(1, 2, 0).cpu().numpy()
419
- # output_image = np.clip(output_image * 255.0, 0, 255).astype(np.uint8)
420
- # print("Inference complete! Returning output.")
421
- # return output_image
422
- # else:
423
- # print("Error: Model output is not a tensor.")
424
- # return None
425
-
426
- # except Exception as e:
427
- # print(f"Error during inference: {e}")
428
- # return None
429
-
430
- # # Create Gradio UI
431
- # demo = gr.Interface(
432
- # fn=infer,
433
- # inputs=gr.Image(type="pil"),
434
- # outputs=gr.Image(type="numpy"),
435
- # title="Convolutional Reconstruction Model",
436
- # description="Upload an image to get the reconstructed output."
437
- # )
438
-
439
- # if __name__ == "__main__":
440
- # demo.launch()
441
-
442
-
443
-
444
-
445
  # Not ready to use yet
446
  import spaces
447
  import argparse
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # Not ready to use yet
2
  import spaces
3
  import argparse