Create main.py
Browse files
main.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
import argparse
|
3 |
+
import gradio as gr
|
4 |
+
import os
|
5 |
+
from models.helperbot_bigdl import Chat
|
6 |
+
from models.sum_model import Sum
|
7 |
+
|
8 |
+
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
9 |
+
parser = argparse.ArgumentParser()
|
10 |
+
|
11 |
+
# whisper model arguments
|
12 |
+
parser.add_argument("--whisper_version", default="small", help="Whisper model version for video asr")
|
13 |
+
# llm model arguments
|
14 |
+
parser.add_argument("--llm_version", default="Llama-2-7b-chat-hf-INT4", help="LLM model version")
|
15 |
+
parser.add_argument("--embed_version", default="all-MiniLM-L12-v2", help="Embedding model version")
|
16 |
+
parser.add_argument("--top_k", default=3, type=int, help="Return top k relevant contexts to llm")
|
17 |
+
parser.add_argument("--qa_max_new_tokens", default=128, type=int, help="Number of max new tokens for llm")
|
18 |
+
# general arguments
|
19 |
+
parser.add_argument("--port", type=int, default=7860, help="Gradio server port")
|
20 |
+
|
21 |
+
args = parser.parse_args()
|
22 |
+
|
23 |
+
chat = Chat(args)
|
24 |
+
sumbot = Sum(args)
|
25 |
+
chat.init_model()
|
26 |
+
|
27 |
+
global_chat_history = []
|
28 |
+
global_result = ""
|
29 |
+
|
30 |
+
global_summary = ""
|
31 |
+
|
32 |
+
|
33 |
+
def clean_conversation():
|
34 |
+
global global_chat_history
|
35 |
+
chat.clean_history()
|
36 |
+
global_chat_history = []
|
37 |
+
return '', gr.update(value=None, interactive=True), None, gr.update(value=None, visible=True), gr.update(value=None,
|
38 |
+
visible=True)
|
39 |
+
|
40 |
+
|
41 |
+
def clean_chat_history():
|
42 |
+
global global_chat_history
|
43 |
+
chat.clean_history()
|
44 |
+
global_chat_history = []
|
45 |
+
return '', None
|
46 |
+
|
47 |
+
|
48 |
+
def submit_message(message, max_tokens, top_p):
|
49 |
+
args.qa_max_new_tokens = max_tokens
|
50 |
+
args.top_k = top_p
|
51 |
+
|
52 |
+
print(args)
|
53 |
+
chat_history, generated_question, source_documents = chat.chat2video(args, message, global_result)
|
54 |
+
global_chat_history.append((message, chat_history[0][1]))
|
55 |
+
return '', global_chat_history
|
56 |
+
|
57 |
+
|
58 |
+
def gen_script(vid_path):
|
59 |
+
print(vid_path)
|
60 |
+
global global_result
|
61 |
+
if vid_path is None:
|
62 |
+
log_text = "===== Please upload video! ====="
|
63 |
+
gr.update(value=log_text, visible=True)
|
64 |
+
else:
|
65 |
+
global_result = chat.video2log(vid_path)
|
66 |
+
# script_pth = download_script_file()
|
67 |
+
return gr.update(value=global_result, visible=True), download_script_file()
|
68 |
+
|
69 |
+
|
70 |
+
def download_script_file():
|
71 |
+
try:
|
72 |
+
with open("script_result.txt", "w") as file:
|
73 |
+
file.write(global_result)
|
74 |
+
return "script_result.txt"
|
75 |
+
except Exception as e:
|
76 |
+
return f"Error preparing file for download: {str(e)}"
|
77 |
+
|
78 |
+
|
79 |
+
def download_sum_file():
|
80 |
+
try:
|
81 |
+
with open("sum_result.txt", "w") as file:
|
82 |
+
file.write(global_summary)
|
83 |
+
return "sum_result.txt"
|
84 |
+
except Exception as e:
|
85 |
+
return f"Error preparing file for download: {str(e)}"
|
86 |
+
|
87 |
+
|
88 |
+
def upload_file(files):
|
89 |
+
global global_result
|
90 |
+
file_paths = [file.name for file in files][0]
|
91 |
+
try:
|
92 |
+
with open(file_paths, "r", encoding="utf-8") as file:
|
93 |
+
file_content = file.read()
|
94 |
+
global_result = file_content
|
95 |
+
except FileNotFoundError:
|
96 |
+
print("File not found")
|
97 |
+
except IOError:
|
98 |
+
print("Error occurred while reading the file")
|
99 |
+
return file_content, download_script_file()
|
100 |
+
|
101 |
+
|
102 |
+
def summary():
|
103 |
+
global global_summary
|
104 |
+
global_summary = sumbot.summarize(global_result)
|
105 |
+
return gr.update(value=global_summary, visible=True), download_sum_file()
|
106 |
+
|
107 |
+
|
108 |
+
css = """
|
109 |
+
#col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
|
110 |
+
#video_inp {min-height: 100px}
|
111 |
+
#chatbox {min-height: 100px;}
|
112 |
+
#header {text-align: center;}
|
113 |
+
#hint {font-size: 1.0em; padding: 0.5em; margin: 0;}
|
114 |
+
.message { font-size: 1.2em; }
|
115 |
+
"""
|
116 |
+
|
117 |
+
with gr.Blocks(css=css) as demo:
|
118 |
+
with gr.Column(elem_id="col-container"):
|
119 |
+
gr.Markdown(""" ## Meeting Helper Bot
|
120 |
+
Upload meeting recording in mp3/mp4/txt format and you can get the summary and chat based on content
|
121 |
+
(You can adjust parameters based on your needs)
|
122 |
+
Powered by BigDL, Llama, Whisper, and LangChain""",
|
123 |
+
elem_id="header")
|
124 |
+
|
125 |
+
with gr.Column() as advanced_column:
|
126 |
+
max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=1024, step=1, value=128)
|
127 |
+
top_k = gr.Slider(label="Top-k", minimum=1, maximum=50, step=1, value=3)
|
128 |
+
|
129 |
+
with gr.Row():
|
130 |
+
with gr.Column():
|
131 |
+
video_inp = gr.Video(label="1.Upload MP3/MP4 File")
|
132 |
+
# file_inp = gr.File(label="file/doc_input")
|
133 |
+
upload_button = gr.UploadButton("1. Or Click to Upload a txt File", file_types=["doc", "txt"],
|
134 |
+
file_count="multiple")
|
135 |
+
gen_btn = gr.Button("2. Generate Script")
|
136 |
+
sum_outp = gr.Textbox(label="Summerization output", lines=15)
|
137 |
+
# save_sum_btn = gr.Button("Save Summarization to txt file")
|
138 |
+
save_sum_dl = gr.outputs.File(label="Download Summary")
|
139 |
+
# save_sum_btn.click(download_sum_file, [], outputs=[gr.outputs.File(label="Download Summary")])
|
140 |
+
|
141 |
+
with gr.Column():
|
142 |
+
script_outp = gr.Textbox(label="Script output", lines=30)
|
143 |
+
with gr.Row():
|
144 |
+
script_summarization_btn = gr.Button("3.Script Summarization ")
|
145 |
+
# save_script_btn = gr.Button("Save Script to txt file")
|
146 |
+
|
147 |
+
save_script_dl = gr.outputs.File(label="Download Script")
|
148 |
+
# save_script_btn.click(download_script_file, [], outputs=[gr.outputs.File(label="Download Script")])
|
149 |
+
|
150 |
+
with gr.Column():
|
151 |
+
chatbot = gr.Chatbot(elem_id="chatbox")
|
152 |
+
input_message = gr.Textbox(show_label=False, placeholder="Enter text and press enter", visible=True)
|
153 |
+
btn_submit = gr.Button("Submit")
|
154 |
+
with gr.Row():
|
155 |
+
btn_clean_chat_history = gr.Button("Clean Chat History")
|
156 |
+
btn_clean_conversation = gr.Button("Start New Conversation")
|
157 |
+
|
158 |
+
upload_button.upload(upload_file, upload_button, [script_outp, save_script_dl])
|
159 |
+
|
160 |
+
gen_btn.click(gen_script, [video_inp], [script_outp, save_script_dl])
|
161 |
+
script_summarization_btn.click(summary, [], [sum_outp, save_sum_dl])
|
162 |
+
|
163 |
+
btn_submit.click(submit_message, [input_message, max_new_tokens, top_k], [input_message, chatbot])
|
164 |
+
input_message.submit(submit_message, [input_message, max_new_tokens, top_k], [input_message, chatbot])
|
165 |
+
|
166 |
+
btn_clean_conversation.click(clean_conversation, [], [input_message, video_inp, chatbot, sum_outp, script_outp])
|
167 |
+
btn_clean_chat_history.click(clean_chat_history, [], [input_message, chatbot])
|
168 |
+
|
169 |
+
demo.load(queur=False)
|
170 |
+
|
171 |
+
demo.queue(concurrency_count=1)
|
172 |
+
demo.launch(height='800px', server_port=args.port, debug=True, share=False)
|