karths commited on
Commit
18d6db7
·
verified ·
1 Parent(s): 1203b35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -77,7 +77,7 @@ LLAMA_MAX_MAX_NEW_TOKENS = 512
77
  LLAMA_DEFAULT_MAX_NEW_TOKENS = 512
78
  LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
79
  llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
80
- llama_model_id = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
81
  llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_id)
82
  llama_model = AutoModelForCausalLM.from_pretrained(
83
  llama_model_id,
@@ -135,7 +135,7 @@ Given the following issue description:
135
  ---
136
  {issue_text}
137
  ---
138
- Explain why this issue might be classified as a **{quality_name}** issue. Provide a concise explanation, relating it back to the issue description. Keep the explanation short and concise and dont include anything else.
139
  """
140
  print(prompt)
141
  try:
 
77
  LLAMA_DEFAULT_MAX_NEW_TOKENS = 512
78
  LLAMA_MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
79
  llama_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
80
+ llama_model_id = "meta-llama/Llama-3.2-1B-Instruct"
81
  llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_id)
82
  llama_model = AutoModelForCausalLM.from_pretrained(
83
  llama_model_id,
 
135
  ---
136
  {issue_text}
137
  ---
138
+ Explain why this issue might be classified as a **{quality_name}** issue. Provide a concise explanation, relating it back to the issue description. Keep the explanation short and concise.
139
  """
140
  print(prompt)
141
  try: