Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -77,7 +77,7 @@ LLAMA_MAX_MAX_NEW_TOKENS = 512
|
|
77 |
LLAMA_DEFAULT_MAX_NEW_TOKENS = 512
|
78 |
LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
|
79 |
llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
80 |
-
llama_model_id = "
|
81 |
llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_id)
|
82 |
llama_model = AutoModelForCausalLM.from_pretrained(
|
83 |
llama_model_id,
|
@@ -135,7 +135,7 @@ Given the following issue description:
|
|
135 |
---
|
136 |
{issue_text}
|
137 |
---
|
138 |
-
Explain why this issue might be classified as a **{quality_name}** issue. Provide a concise explanation, relating it back to the issue description. Keep the explanation short and concise
|
139 |
"""
|
140 |
print(prompt)
|
141 |
try:
|
|
|
77 |
LLAMA_DEFAULT_MAX_NEW_TOKENS = 512
|
78 |
LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
|
79 |
llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
80 |
+
llama_model_id = "meta-llama/Llama-3.2-1B-Instruct"
|
81 |
llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_id)
|
82 |
llama_model = AutoModelForCausalLM.from_pretrained(
|
83 |
llama_model_id,
|
|
|
135 |
---
|
136 |
{issue_text}
|
137 |
---
|
138 |
+
Explain why this issue might be classified as a **{quality_name}** issue. Provide a concise explanation, relating it back to the issue description. Keep the explanation short and concise.
|
139 |
"""
|
140 |
print(prompt)
|
141 |
try:
|