Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,48 +1,47 @@
|
|
1 |
import streamlit as st
|
2 |
-
from langchain.prompts import FewShotChatMessagePromptTemplate
|
3 |
-
from langchain.llms import HuggingFaceHub
|
4 |
from datasets import load_dataset
|
|
|
|
|
5 |
|
6 |
-
# Load
|
7 |
-
|
|
|
|
|
|
|
8 |
|
9 |
-
|
10 |
-
examples = [
|
11 |
-
{
|
12 |
-
"input": dialogue['dialogue'], # Assuming 'dialogue' field contains the conversation text
|
13 |
-
"output": dialogue['summary'] # Assuming 'summary' field contains the summary
|
14 |
-
}
|
15 |
-
for dialogue in dataset
|
16 |
-
]
|
17 |
|
18 |
-
#
|
19 |
-
example_prompt =
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
examples=examples,
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
)
|
23 |
|
24 |
# Streamlit UI
|
25 |
-
st.
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
# Load the model from Hugging Face (replace with your choice of model)
|
38 |
-
model = HuggingFaceHub(
|
39 |
-
repo_id="google/pegasus-xsum", # You can replace with any model available in Hugging Face
|
40 |
-
model_kwargs={"temperature": 0.7}
|
41 |
-
)
|
42 |
-
|
43 |
-
# Generate the summary
|
44 |
-
summary = model(formatted_message)
|
45 |
-
st.success("β
Summary:")
|
46 |
-
st.write(summary)
|
47 |
-
else:
|
48 |
-
st.warning("Please enter some text!")
|
|
|
1 |
import streamlit as st
|
|
|
|
|
2 |
from datasets import load_dataset
|
3 |
+
from langchain.llms import HuggingFaceEndpoint
|
4 |
+
from langchain.prompts import FewShotChatMessagePromptTemplate, ChatPromptTemplate
|
5 |
|
6 |
+
# Load dataset from HuggingFace
|
7 |
+
@st.cache_data
|
8 |
+
def load_examples(n=3):
|
9 |
+
dataset = load_dataset("knkarthick/dialogsum", split="train[:20]")
|
10 |
+
return [{"dialogue": row["dialogue"], "summary": row["summary"]} for row in dataset.select(range(n))]
|
11 |
|
12 |
+
examples = load_examples()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
# Format examples
|
15 |
+
example_prompt = ChatPromptTemplate.from_messages([
|
16 |
+
("human", "Summarize the following dialog:\n\n{dialogue}"),
|
17 |
+
("ai", "{summary}")
|
18 |
+
])
|
19 |
+
|
20 |
+
# Few-shot setup
|
21 |
+
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
22 |
examples=examples,
|
23 |
+
example_prompt=example_prompt,
|
24 |
+
suffix="Summarize the following dialog:\n\n{dialogue}",
|
25 |
+
input_variables=["dialogue"],
|
26 |
+
prefix="The following are examples of dialogues and their summaries."
|
27 |
+
)
|
28 |
+
|
29 |
+
# Load HF summarizer model (Pegasus)
|
30 |
+
llm = HuggingFaceEndpoint(
|
31 |
+
repo_id="google/pegasus-xsum",
|
32 |
+
task="text2text-generation",
|
33 |
+
model_kwargs={"temperature": 0.3, "max_new_tokens": 128}
|
34 |
)
|
35 |
|
36 |
# Streamlit UI
|
37 |
+
st.set_page_config(page_title="DialogSum Few-Shot Summarizer", page_icon="π§ ")
|
38 |
+
st.title("π§ Few-Shot Dialog Summarizer")
|
39 |
+
st.markdown("Uses real examples from `dialogsum` to guide the summary output.")
|
40 |
+
|
41 |
+
user_input = st.text_area("βοΈ Paste your dialogue here:", height=200)
|
42 |
+
|
43 |
+
if user_input:
|
44 |
+
messages = few_shot_prompt.format_messages(dialogue=user_input)
|
45 |
+
response = llm(messages)
|
46 |
+
st.subheader("π Summary:")
|
47 |
+
st.write(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|