Spaces:
Running
on
Zero
Running
on
Zero
Change placement of spaces decorator
Browse files
app.py
CHANGED
@@ -73,6 +73,7 @@ def noisify_answer(input_ids, answer_start, threshold=1.0, eot_weight=1.0):
|
|
73 |
noised[idx] = val
|
74 |
return noised
|
75 |
|
|
|
76 |
def generate_diffusion_text(input_ids, answer_start):
|
77 |
with torch.no_grad():
|
78 |
input_tensor = torch.tensor([input_ids], dtype=torch.long).to(model.device)
|
@@ -84,11 +85,13 @@ def generate_diffusion_text(input_ids, answer_start):
|
|
84 |
|
85 |
# --- Inference Wrapper ---
|
86 |
|
87 |
-
|
88 |
def diffusion_chat(question, eot_weight, max_it, sharpness):
|
89 |
placeholder = "What do you know about the city of New York?"
|
90 |
if question.strip() == "":
|
91 |
question = placeholder
|
|
|
|
|
92 |
|
93 |
prompt = f"User: {question}\nAssistant:"
|
94 |
input_ids = tokenizer.encode(prompt, add_special_tokens=False)
|
|
|
73 |
noised[idx] = val
|
74 |
return noised
|
75 |
|
76 |
+
@spaces.GPU
|
77 |
def generate_diffusion_text(input_ids, answer_start):
|
78 |
with torch.no_grad():
|
79 |
input_tensor = torch.tensor([input_ids], dtype=torch.long).to(model.device)
|
|
|
85 |
|
86 |
# --- Inference Wrapper ---
|
87 |
|
88 |
+
|
89 |
def diffusion_chat(question, eot_weight, max_it, sharpness):
|
90 |
placeholder = "What do you know about the city of New York?"
|
91 |
if question.strip() == "":
|
92 |
question = placeholder
|
93 |
+
|
94 |
+
print('started generation')
|
95 |
|
96 |
prompt = f"User: {question}\nAssistant:"
|
97 |
input_ids = tokenizer.encode(prompt, add_special_tokens=False)
|