ProfessorLeVesseur commited on
Commit
da1f0f6
·
verified ·
1 Parent(s): 0d32d1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -5
app.py CHANGED
@@ -310,7 +310,10 @@ client = InferenceClient(api_key=hf_api_key)
310
  # ---------------------------------------------------------------------------------------
311
  # Survey Analysis Class
312
  # ---------------------------------------------------------------------------------------
313
- class SurveyAnalysis:
 
 
 
314
  def prepare_llm_input(self, survey_response, topics):
315
  topic_descriptions = "\n".join([f"- **{t}**: {d}" for t, d in topics.items()])
316
  return f"""Extract and summarize PDF notes based on topics:
@@ -330,7 +333,7 @@ Meeting Notes:
330
 
331
  def prompt_response_from_hf_llm(self, llm_input):
332
  system_prompt = """
333
- You are an expert assistant tasked with extracting exact quotes from provided meeting notes based on given topics.
334
 
335
  Instructions:
336
  - Only extract exact quotes relevant to provided topics.
@@ -341,18 +344,23 @@ Meeting Notes:
341
  - "Exact quote"
342
  """
343
 
344
- response = client.chat.completions.create(
345
  model="meta-llama/Llama-3.1-70B-Instruct",
346
  messages=[
347
  {"role": "system", "content": system_prompt},
348
  {"role": "user", "content": llm_input}
349
  ],
 
350
  temperature=0.5,
351
  max_tokens=1024,
352
  top_p=0.7
353
  )
354
 
355
- response_content = response.choices[0].message.content
 
 
 
 
356
  print("Full AI Response:", response_content) # Debugging
357
  return response_content.strip()
358
 
@@ -368,6 +376,15 @@ Meeting Notes:
368
  results.append({'Document_Text': row['Document_Text'], 'Topic_Summary': notes})
369
  return pd.concat([df.reset_index(drop=True), pd.DataFrame(results)['Topic_Summary']], axis=1)
370
 
 
 
 
 
 
 
 
 
 
371
  # ---------------------------------------------------------------------------------------
372
  # Helper Functions
373
  # ---------------------------------------------------------------------------------------
@@ -457,7 +474,7 @@ if st.session_state['pdf_processed']:
457
  st.warning("Please enter at least one topic and description.")
458
  st.stop()
459
 
460
- analyzer = SurveyAnalysis()
461
  processed_df = analyzer.process_dataframe(st.session_state['df'], topics)
462
  extracted_df = extract_excerpts(processed_df)
463
 
 
310
  # ---------------------------------------------------------------------------------------
311
  # Survey Analysis Class
312
  # ---------------------------------------------------------------------------------------
313
+ class AIAnalysis:
314
+ def __init__(self, client):
315
+ self.client = client
316
+
317
  def prepare_llm_input(self, survey_response, topics):
318
  topic_descriptions = "\n".join([f"- **{t}**: {d}" for t, d in topics.items()])
319
  return f"""Extract and summarize PDF notes based on topics:
 
333
 
334
  def prompt_response_from_hf_llm(self, llm_input):
335
  system_prompt = """
336
+ You are an expert assistant tasked with extracting exact quotes from provided meeting notes based on given topics.
337
 
338
  Instructions:
339
  - Only extract exact quotes relevant to provided topics.
 
344
  - "Exact quote"
345
  """
346
 
347
+ response = self.client.chat.completions.create(
348
  model="meta-llama/Llama-3.1-70B-Instruct",
349
  messages=[
350
  {"role": "system", "content": system_prompt},
351
  {"role": "user", "content": llm_input}
352
  ],
353
+ stream=True,
354
  temperature=0.5,
355
  max_tokens=1024,
356
  top_p=0.7
357
  )
358
 
359
+ response_content = ""
360
+ for message in response:
361
+ # Correctly handle streaming response
362
+ response_content += message.choices[0].delta.content
363
+
364
  print("Full AI Response:", response_content) # Debugging
365
  return response_content.strip()
366
 
 
376
  results.append({'Document_Text': row['Document_Text'], 'Topic_Summary': notes})
377
  return pd.concat([df.reset_index(drop=True), pd.DataFrame(results)['Topic_Summary']], axis=1)
378
 
379
+ def process_dataframe(self, df, topics):
380
+ results = []
381
+ for _, row in df.iterrows():
382
+ llm_input = self.prepare_llm_input(row['Document_Text'], topics)
383
+ response = self.prompt_response_from_hf_llm(llm_input)
384
+ notes = self.extract_text(response)
385
+ results.append({'Document_Text': row['Document_Text'], 'Topic_Summary': notes})
386
+ return pd.concat([df.reset_index(drop=True), pd.DataFrame(results)['Topic_Summary']], axis=1)
387
+
388
  # ---------------------------------------------------------------------------------------
389
  # Helper Functions
390
  # ---------------------------------------------------------------------------------------
 
474
  st.warning("Please enter at least one topic and description.")
475
  st.stop()
476
 
477
+ analyzer = AIAnalysis()
478
  processed_df = analyzer.process_dataframe(st.session_state['df'], topics)
479
  extracted_df = extract_excerpts(processed_df)
480