Spaces:
Running
on
Zero
Running
on
Zero
input_size
Browse files- app.py +2 -0
- llama_diffusion_model.py +2 -0
app.py
CHANGED
@@ -172,3 +172,5 @@ demo = gr.Interface(
|
|
172 |
|
173 |
demo.launch(share=True, allowed_paths=["."], ssr_mode=False)
|
174 |
|
|
|
|
|
|
172 |
|
173 |
demo.launch(share=True, allowed_paths=["."], ssr_mode=False)
|
174 |
|
175 |
+
|
176 |
+
|
llama_diffusion_model.py
CHANGED
@@ -213,6 +213,8 @@ class CustomTransformerModel(PreTrainedModel):
|
|
213 |
self.llama = get_peft_model(self.llama, lora_config)
|
214 |
self.llama.print_trainable_parameters() # Print number of trainable parameters
|
215 |
self.llama = self.llama.to(torch.float16)
|
|
|
|
|
216 |
|
217 |
def forward(self, input_ids, labels=None, **kwargs):
|
218 |
batch_size, seq_length = input_ids.shape
|
|
|
213 |
self.llama = get_peft_model(self.llama, lora_config)
|
214 |
self.llama.print_trainable_parameters() # Print number of trainable parameters
|
215 |
self.llama = self.llama.to(torch.float16)
|
216 |
+
self.input_size = 256
|
217 |
+
|
218 |
|
219 |
def forward(self, input_ids, labels=None, **kwargs):
|
220 |
batch_size, seq_length = input_ids.shape
|