karths commited on
Commit
0115ad0
·
verified ·
1 Parent(s): 1ca494b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -92,7 +92,7 @@ if llama_tokenizer.pad_token is None:
92
  def llama_generate(
93
  message: str,
94
  max_new_tokens: int = LLAMA_DEFAULT_MAX_NEW_TOKENS,
95
- temperature: float = 0.6,
96
  top_p: float = 0.9,
97
  top_k: int = 50,
98
  repetition_penalty: float = 1.2,
@@ -135,7 +135,7 @@ Given the following issue description:
135
  ---
136
  {issue_text}
137
  ---
138
- Explain why this issue might be classified as a **{quality_name}** issue. Provide a concise explanation, relating it back to the issue description.
139
  """
140
  try:
141
  explanation = llama_generate(prompt)
 
92
  def llama_generate(
93
  message: str,
94
  max_new_tokens: int = LLAMA_DEFAULT_MAX_NEW_TOKENS,
95
+ temperature: float = 0.3,
96
  top_p: float = 0.9,
97
  top_k: int = 50,
98
  repetition_penalty: float = 1.2,
 
135
  ---
136
  {issue_text}
137
  ---
138
+ Explain why this issue might be classified as a **{quality_name}** issue. Provide a concise explanation, relating it back to the issue description. Keep the explanation short and concise.
139
  """
140
  try:
141
  explanation = llama_generate(prompt)