Spaces:
Running
on
Zero
Running
on
Zero
Rename app1.py to app.py
Browse files- app1.py → app.py +2 -2
app1.py → app.py
RENAMED
@@ -77,7 +77,7 @@ LLAMA_MAX_MAX_NEW_TOKENS = 512
|
|
77 |
LLAMA_DEFAULT_MAX_NEW_TOKENS = 512
|
78 |
LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
|
79 |
llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
80 |
-
llama_model_id = "
|
81 |
llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_id)
|
82 |
llama_model = AutoModelForCausalLM.from_pretrained(
|
83 |
llama_model_id,
|
@@ -128,7 +128,7 @@ def generate_explanation(issue_text, top_quality):
|
|
128 |
if not top_quality:
|
129 |
return "<div style='color: red;'>No explanation available as no quality tags met the threshold.</div>"
|
130 |
|
131 |
-
quality_name = top_quality[0] # Get the name of the top quality
|
132 |
|
133 |
prompt = f"""
|
134 |
Given the following issue description:
|
|
|
77 |
LLAMA_DEFAULT_MAX_NEW_TOKENS = 512
|
78 |
LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
|
79 |
llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
80 |
+
llama_model_id = "HuggingFaceTB/SmolLM2-360M-Instruct"
|
81 |
llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_id)
|
82 |
llama_model = AutoModelForCausalLM.from_pretrained(
|
83 |
llama_model_id,
|
|
|
128 |
if not top_quality:
|
129 |
return "<div style='color: red;'>No explanation available as no quality tags met the threshold.</div>"
|
130 |
|
131 |
+
quality_name = top_quality[0][0] # Get the name of the top quality
|
132 |
|
133 |
prompt = f"""
|
134 |
Given the following issue description:
|