SyedHutter commited on
Commit
37df822
·
verified ·
1 Parent(s): 275cd2b

app.py beta 2 (push 4)

Browse files
Files changed (1) hide show
  1. app.py +11 -3
app.py CHANGED
@@ -57,7 +57,7 @@ class CombinedResponse(BaseModel):
57
 
58
  # Helper Functions
59
  def extract_keywords(text: str) -> List[str]:
60
- doc = nlp(text)
61
  keywords = [token.text for token in doc if token.pos_ in ["NOUN", "PROPN"]]
62
  return list(set(keywords))
63
 
@@ -72,7 +72,7 @@ def detect_intent(text: str) -> str:
72
  return "unknown"
73
 
74
  def search_products_by_keywords(keywords: List[str]) -> List[Dict[str, Any]]:
75
- if not keywords: # Handle empty keywords
76
  logger.info("No keywords provided, returning empty product list.")
77
  return []
78
  query = {"$or": [{"name": {"$regex": keyword, "$options": "i"}} for keyword in keywords]}
@@ -118,8 +118,15 @@ async def process_prompt(request: PromptRequest):
118
  full_input = f"{history_str} || {product_context} {context_msg} || {input_text}" if history else f"{product_context} {context_msg} || {input_text}"
119
  logger.info(f"Full input to model: {full_input}")
120
 
 
121
  inputs = tokenizer(full_input, return_tensors="pt", truncation=True, max_length=512)
122
- outputs = model.generate(**inputs, max_length=150, num_beams=5, no_repeat_ngram_size=2)
 
 
 
 
 
 
123
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
124
  logger.info(f"Model response: {response}")
125
 
@@ -130,6 +137,7 @@ async def process_prompt(request: PromptRequest):
130
  "score": 1.0
131
  }
132
 
 
133
  return {
134
  "ner": {"extracted_keywords": keywords},
135
  "qa": qa_response,
 
57
 
58
  # Helper Functions
59
  def extract_keywords(text: str) -> List[str]:
60
+ doc Rustdocdoc = nlp(text)
61
  keywords = [token.text for token in doc if token.pos_ in ["NOUN", "PROPN"]]
62
  return list(set(keywords))
63
 
 
72
  return "unknown"
73
 
74
  def search_products_by_keywords(keywords: List[str]) -> List[Dict[str, Any]]:
75
+ if not keywords:
76
  logger.info("No keywords provided, returning empty product list.")
77
  return []
78
  query = {"$or": [{"name": {"$regex": keyword, "$options": "i"}} for keyword in keywords]}
 
118
  full_input = f"{history_str} || {product_context} {context_msg} || {input_text}" if history else f"{product_context} {context_msg} || {input_text}"
119
  logger.info(f"Full input to model: {full_input}")
120
 
121
+ logger.info("Tokenizing input...")
122
  inputs = tokenizer(full_input, return_tensors="pt", truncation=True, max_length=512)
123
+ logger.info("Input tokenized successfully.")
124
+
125
+ logger.info("Generating model response...")
126
+ outputs = model.generate(**inputs, max_length=50, num_beams=1, no_repeat_ngram_size=2)
127
+ logger.info("Model generation complete.")
128
+
129
+ logger.info("Decoding model output...")
130
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
131
  logger.info(f"Model response: {response}")
132
 
 
137
  "score": 1.0
138
  }
139
 
140
+ logger.info("Returning response...")
141
  return {
142
  "ner": {"extracted_keywords": keywords},
143
  "qa": qa_response,