Spaces:
Sleeping
Sleeping
Smartness increased to max_new_tokens=50
Browse files
app.py
CHANGED
@@ -173,7 +173,7 @@ async def process_prompt(request: PromptRequest):
|
|
173 |
with torch.no_grad():
|
174 |
outputs = model.generate(
|
175 |
**inputs,
|
176 |
-
max_new_tokens=
|
177 |
do_sample=True,
|
178 |
top_p=0.95,
|
179 |
temperature=0.8,
|
|
|
173 |
with torch.no_grad():
|
174 |
outputs = model.generate(
|
175 |
**inputs,
|
176 |
+
max_new_tokens=50,
|
177 |
do_sample=True,
|
178 |
top_p=0.95,
|
179 |
temperature=0.8,
|