zoya23 commited on
Commit
d727731
·
verified ·
1 Parent(s): fad32b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -3,6 +3,10 @@ from datasets import load_dataset
3
  from langchain.llms import HuggingFaceEndpoint
4
  from langchain.prompts import FewShotChatMessagePromptTemplate, ChatPromptTemplate
5
  from langchain.schema.messages import SystemMessage
 
 
 
 
6
 
7
  # Set page config at the very top, before anything else
8
  st.set_page_config(page_title="DialogSum Few-Shot Summarizer", page_icon="🧠")
@@ -44,17 +48,17 @@ if user_input:
44
  formatted_prompt = few_shot_prompt.format_messages(dialogue=user_input)
45
 
46
  # Add formatted examples to the final prompt
47
- formatted_prompt = final_prompt.format_messages(dialogue=user_input)
48
 
49
  # Convert the list of messages into a single string
50
- prompt_string = "\n".join([msg.content for msg in formatted_prompt]) # Access content with .content
51
 
52
  # Get response from model with correct explicit parameters
53
  llm = HuggingFaceEndpoint(
54
  repo_id="google/pegasus-xsum",
55
- task="summarization", # Change to 'summarization'
56
  temperature=0.3, # Explicitly passing temperature here
57
- max_new_tokens=128 # Explicitly passing max_new_tokens here
58
  )
59
 
60
  # Run the LLM with the prompt string
 
3
  from langchain.llms import HuggingFaceEndpoint
4
  from langchain.prompts import FewShotChatMessagePromptTemplate, ChatPromptTemplate
5
  from langchain.schema.messages import SystemMessage
6
+ from huggingface_hub import login
7
+
8
+ # Authenticate using Hugging Face token
9
+ login("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") # Use your Hugging Face token here
10
 
11
  # Set page config at the very top, before anything else
12
  st.set_page_config(page_title="DialogSum Few-Shot Summarizer", page_icon="🧠")
 
48
  formatted_prompt = few_shot_prompt.format_messages(dialogue=user_input)
49
 
50
  # Add formatted examples to the final prompt
51
+ final_formatted_prompt = final_prompt.format_messages(dialogue=user_input)
52
 
53
  # Convert the list of messages into a single string
54
+ prompt_string = "\n".join([msg.content for msg in final_formatted_prompt]) # Access content with .content
55
 
56
  # Get response from model with correct explicit parameters
57
  llm = HuggingFaceEndpoint(
58
  repo_id="google/pegasus-xsum",
59
+ task="summarization", # Correct task for text summarization
60
  temperature=0.3, # Explicitly passing temperature here
61
+ max_length=128 # Explicitly passing max_length here
62
  )
63
 
64
  # Run the LLM with the prompt string