joey1101 commited on
Commit
8b81b62
·
verified ·
1 Parent(s): d46516b

Delete sum_model.py

Browse files
Files changed (1) hide show
  1. sum_model.py +0 -153
sum_model.py DELETED
@@ -1,153 +0,0 @@
1
- from ipex_llm.langchain.llms import TransformersLLM
2
- from langchain import LLMChain
3
- from langchain.chains.summarize import load_summarize_chain
4
- from langchain.docstore.document import Document
5
- from langchain.prompts import PromptTemplate
6
- from langchain.chains.combine_documents.stuff import StuffDocumentsChain
7
- from langchain.chains import MapReduceDocumentsChain, ReduceDocumentsChain
8
- from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
9
-
10
-
11
- class Sum():
12
- def __init__(self, args):
13
- self.llm_version = args.llm_version
14
- # self.max_tokens = args.qa_max_new_tokens
15
-
16
- def summarize_refine(self, script):
17
- text_splitter = CharacterTextSplitter(chunk_size=1024, separator="\n", chunk_overlap=0)
18
- texts = text_splitter.split_text(script)
19
- docs = [Document(page_content=t) for t in texts]
20
- llm = TransformersLLM.from_model_id_low_bit(f"checkpoint\\{self.llm_version}")
21
-
22
- prompt_template = """Write a concise summary of the following:
23
- {text}
24
- CONCISE SUMMARY:"""
25
- prompt = PromptTemplate.from_template(prompt_template)
26
- refine_template = (
27
- "Your job is to produce a final summary\n"
28
- "We have provided an existing summary up to a certain point: {existing_answer}\n"
29
- "We have the opportunity to refine the existing summary"
30
- "(only if needed) with some more context below.\n"
31
- "------------\n"
32
- "{text}\n"
33
- "------------\n"
34
- "If the context isn't useful, return the original summary."
35
- )
36
- refine_prompt = PromptTemplate.from_template(refine_template)
37
- chain = load_summarize_chain(
38
- llm=llm,
39
- chain_type="refine",
40
- question_prompt=prompt,
41
- refine_prompt=refine_prompt,
42
- return_intermediate_steps=True,
43
- input_key="input_documents",
44
- output_key="output_text",
45
- )
46
- result = chain({"input_documents": docs}, return_only_outputs=True)
47
-
48
- return result
49
-
50
- def summarize_mapreduce(self, script):
51
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=0)
52
- texts = text_splitter.split_text(script)
53
- text = [Document(page_content=t) for t in texts]
54
-
55
- llm = TransformersLLM.from_model_id_low_bit(f"checkpoint\\{self.llm_version}")
56
-
57
- # Map
58
- map_template = """The following is a meeting recording
59
- =========
60
- {texts}
61
- =========
62
- Based on this list of recordings, please summary the main idea briefly
63
- Helpful Answer:"""
64
- map_prompt = PromptTemplate.from_template(map_template)
65
- map_chain = LLMChain(llm=llm, prompt=map_prompt, llm_kwargs={"max_new_tokens": 512})
66
-
67
- # Reduce
68
- reduce_template = """The following is set of summaries:
69
- =========
70
- {texts}
71
- =========
72
- Take these and distill it into a final, consolidated summary of the meeting.
73
- Helpful Answer:"""
74
- reduce_prompt = PromptTemplate.from_template(reduce_template)
75
- reduce_chain = LLMChain(llm=llm, prompt=reduce_prompt, llm_kwargs={"max_new_tokens": 4096})
76
-
77
- # Takes a list of documents, combines them into a single string, and passes this to an LLMChain
78
- combine_documents_chain = StuffDocumentsChain(
79
- llm_chain=reduce_chain, document_variable_name="texts"
80
- )
81
-
82
- # Combines and iteratively reduces the mapped documents
83
- reduce_documents_chain = ReduceDocumentsChain(
84
- combine_documents_chain=combine_documents_chain,
85
- collapse_documents_chain=combine_documents_chain,
86
- token_max=4000,
87
- )
88
-
89
- # Combining documents by mapping a chain over them, then combining results
90
- map_reduce_chain = MapReduceDocumentsChain(
91
- llm_chain=map_chain,
92
- reduce_documents_chain=reduce_documents_chain,
93
- document_variable_name="texts",
94
- return_intermediate_steps=False,
95
- )
96
-
97
- result = map_reduce_chain({"input_documents": text}, return_only_outputs=True)
98
- # print("-." * 40)
99
- # print(result)
100
- result = result['output_text'].split("Helpful Answer:").strip()[-1]
101
- return result
102
-
103
- def summarize(self, script):
104
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=0)
105
- texts = text_splitter.split_text(script)
106
-
107
- prompt_template = """The following is a piece of meeting recording:
108
- <<<{text}>>>
109
- Based on recording, summary the main idea fluently.
110
- JUST SUMMARY!NO OTHER WORDS!
111
- SUMMARY:"""
112
-
113
- reduce_template = """The following is a meeting recording pieces:
114
- <<<{text}>>>
115
- Take these and distill it into a final, consolidated summary of the meeting.
116
- JUST SUMMARY!NO OTHER WORDS!
117
- SUMMARY:"""
118
-
119
- print(len(texts))
120
- for text in texts:
121
- print(text)
122
- print("\n")
123
-
124
- llm = TransformersLLM.from_model_id_low_bit(
125
- f"checkpoint\\{self.llm_version}")
126
- sum_split = []
127
-
128
- for text in texts:
129
- response = llm(prompt=prompt_template.format(text=text), max_new_tokens=1024)
130
- print(response)
131
- response_answer = response.split("SUMMARY:")
132
-
133
- sum_split.append(response_answer[1])
134
-
135
- sum_all = "\n".join(sum_split)
136
-
137
- result = llm(prompt=reduce_template.format(text=sum_all), max_new_tokens=4000)
138
- result_split = result.split("SUMMARY:")
139
- return result_split[1]
140
-
141
- # # for test
142
- # import argparse
143
- #
144
- # parser = argparse.ArgumentParser()
145
- # parser.add_argument("--llm_version", default="Llama-2-7b-chat-hf-INT4", help="LLM model version")
146
- # args = parser.parse_args()
147
- # file_path = "../test.txt"
148
- # with open(file_path, "r", encoding="utf-8") as file:
149
- # content = file.read()
150
- # Sumbot = Sum(args)
151
- # result = Sumbot.summarize_map(content)
152
- # print("-." * 20)
153
- # print(result)