omkar56 commited on
Commit
0093082
·
1 Parent(s): fac43fd

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +2 -0
main.py CHANGED
@@ -30,6 +30,7 @@ def generater(message, history, temperature, top_p, top_k):
30
  prompt += model.config["promptTemplate"].format(user_message)
31
  prompt += assistant_message + "</s>"
32
  prompt += model.config["promptTemplate"].format(message)
 
33
  outputs = []
34
  for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True):
35
  outputs.append(token)
@@ -58,6 +59,7 @@ def generate_text(
58
  temperature = body.get("temperature", 0.5)
59
  top_p = body.get("top_p", 0.95)
60
  top_k = body.get("top_k", 40)
 
61
  # max_new_tokens = body.get("max_new_tokens",512)
62
  # repetition_penalty = body.get("repetition_penalty", 1.0)
63
  history = [] # You might need to handle this based on your actual usage
 
30
  prompt += model.config["promptTemplate"].format(user_message)
31
  prompt += assistant_message + "</s>"
32
  prompt += model.config["promptTemplate"].format(message)
33
+ print("[prompt]",prompt)
34
  outputs = []
35
  for token in model.generate(prompt=prompt, temp=temperature, top_k = top_k, top_p = top_p, max_tokens = max_new_tokens, streaming=True):
36
  outputs.append(token)
 
59
  temperature = body.get("temperature", 0.5)
60
  top_p = body.get("top_p", 0.95)
61
  top_k = body.get("top_k", 40)
62
+ print("[request details]",message, temperature, top_p, top_k)
63
  # max_new_tokens = body.get("max_new_tokens",512)
64
  # repetition_penalty = body.get("repetition_penalty", 1.0)
65
  history = [] # You might need to handle this based on your actual usage