warhawkmonk commited on
Commit
5492d7f
·
verified ·
1 Parent(s): 1842333

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -187
app.py CHANGED
@@ -1,187 +1,43 @@
1
- import streamlit as st
2
- # import wikipedia
3
- from streamlit_lottie import st_lottie
4
- import regex as re
5
- from streamlit_js_eval import streamlit_js_eval
6
- # from common.utils import *
7
- from data_collector import *
8
- # from langchain_community.llms import Ollama
9
-
10
- # from langchain_community.llms import Ollama
11
- import pandas as pd
12
- import json
13
- st.set_page_config(layout="wide")
14
- screen_width = streamlit_js_eval(label="screen.width",js_expressions='screen.width')
15
- screen_height = streamlit_js_eval(label="screen.height",js_expressions='screen.height')
16
-
17
-
18
- condition_capture = st.session_state
19
- if 'schema' not in condition_capture:
20
- condition_capture['schema'] = {}
21
- if 'prompt' not in condition_capture:
22
- condition_capture['prompt'] = ""
23
- if "count" not in condition_capture:
24
- condition_capture['count'] = 0
25
- if "prev_schema" not in condition_capture:
26
- condition_capture['prev_schema'] = {}
27
- if "textual_value" not in condition_capture:
28
- condition_capture['textual_value'] = {}
29
- textual_value=None
30
-
31
-
32
-
33
- schema=condition_capture['schema']
34
-
35
-
36
-
37
-
38
- column1,column2 = st.columns(2)
39
- with column2:
40
-
41
-
42
-
43
- if len(condition_capture['schema'])!=0 and len(condition_capture['textual_value'])==0:
44
- # condition_capture['prev_schema'] = condition_capture['schema']
45
- condition_capture['textual_value']=relevent_value(str(condition_capture['schema']).lower(),50)
46
- if len(condition_capture['schema'])!=0:
47
- html_page = condition_capture['textual_value'][1]
48
- textual_value = condition_capture['textual_value'][0]
49
- st.write("<br>",unsafe_allow_html=True)
50
-
51
- with st.container(border=True,height=int(screen_height/2.3)):
52
- st.header("Wikipedia insights")
53
- updated_schema = st.button("Start processing")
54
- selector=st.empty()
55
- write =st.empty()
56
- start_page= selector.select_slider("Select a range of color wavelength",options=[i for i in html_page],key="start_page")
57
- write.write(html_page[start_page],unsafe_allow_html=True)
58
-
59
-
60
-
61
-
62
-
63
- # )
64
-
65
-
66
-
67
-
68
-
69
-
70
-
71
-
72
- with column1:
73
-
74
- if str(schema)!=str({}):
75
- tabs = st.tabs(["Schema","Data Generation"])
76
- with tabs[0]:
77
- if str(schema)!=str({}):
78
-
79
- schema_column1,schema_column2 = st.columns(2)
80
- with schema_column1:
81
- edited_df = st.data_editor([str(i) for index,i in enumerate(schema)],hide_index=True,use_container_width=True,num_rows='dynamic',height=int(screen_height/3))
82
-
83
- with schema_column2:
84
- number = st.number_input("Number of rows",min_value=1,max_value=1000,value=10)
85
- if number!=condition_capture['count'] and updated_schema:
86
- condition_capture['count'] = number
87
-
88
-
89
- with open("/home/user/app/animation/edit_file.json") as animate:
90
- url_json=json.load(animate)
91
- st_lottie(url_json,height = int(screen_height/3))
92
-
93
-
94
- with tabs[1]:
95
- with open("/home/user/app/animation/no data animation.json") as animate:
96
- url_json=json.load(animate)
97
- dataframe=st.empty()
98
-
99
- if condition_capture['count']==0:
100
- st_lottie(url_json,height = int(screen_height/3))
101
-
102
- else:
103
- smart_append=[]
104
- if condition_capture['prev_schema'] != condition_capture['schema']:
105
- condition_capture['prev_schema'] = condition_capture['schema']
106
- condition_capture['current_append']={}
107
-
108
- for text_indexing,store in enumerate(actual_value(textual_value,schema)):
109
- dummy_value =dictionary_formatting(store)
110
- for keys in dummy_value:
111
- while len(dummy_value[keys])>=2:
112
- dummy_value[keys].pop(0)
113
- dummy_value = dictionary_formatting(dummy_value)
114
-
115
- if dummy_value != None:
116
-
117
-
118
- smart_append.append(dummy_value)
119
- print(dummy_value)
120
- for keys in dummy_value:
121
- if keys not in condition_capture['current_append']:
122
- condition_capture['current_append'][str(keys)]=[]
123
- condition_capture['current_append'][str(keys)].append(str([i for i in dummy_value[keys]]))
124
- dataframe.dataframe(condition_capture['current_append'])
125
-
126
-
127
- if len(condition_capture['current_append'][[i for i in condition_capture['current_append']][-1]])>=condition_capture['count']:
128
- break
129
-
130
- # print(dummy_value)
131
- # if smart_check(dummy_value)!=True:
132
- # smart_value=verification(dummy_value)
133
- # if statement(condition_capture['schema'],smart_value):
134
- # st.dataframe(smart_value)
135
- condition_capture['current_append']={}
136
- if len(smart_append)==0:
137
-
138
- ranger=len(condition_capture['current_append'][[i for i in condition_capture['current_append']][0]])
139
- for indexing in range(ranger):
140
- working_dict = {}
141
- for j in condition_capture['current_append']:
142
-
143
- working_dict[j]=condition_capture['current_append'][j][indexing][0]
144
- smart_append.append(working_dict)
145
- smart_movement = sorting(smart_append)
146
-
147
- for keys in smart_movement:
148
- value=eval(keys)
149
- for keys in value:
150
- if keys not in condition_capture['current_append']:
151
- condition_capture['current_append'][str(keys)]=[]
152
- condition_capture['current_append'][str(keys)].append([str(i) for i in value[keys]])
153
- dataframe.dataframe(condition_capture['current_append'])
154
- for indexing,j in enumerate(smart_movement):
155
- try:
156
- # Convert string to dictionary
157
- dummy_value = eval(j)
158
-
159
- # Process dictionary values
160
- for key in dummy_value:
161
- while len(dummy_value[key]) >= 2:
162
- dummy_value[key].pop(0)
163
-
164
- # Format dictionary
165
- formatted = dictionary_formatting(dummy_value)
166
- print(formatted)
167
- # Verify and store result
168
- verification_result = verification(formatted) if formatted else None
169
- for j in verification_result:
170
- if j in condition_capture['current_append']:
171
- condition_capture['current_append'][j][indexing]=[str(i) for i in verification_result[j]]
172
- dataframe.dataframe(condition_capture['current_append'])
173
-
174
- except:
175
- pass
176
-
177
-
178
-
179
- prompt = st.text_input(label="Please use prompt to generate data",value=condition_capture['prompt'])
180
- if prompt != str(condition_capture['prompt']):
181
-
182
- condition_capture['prompt'] = prompt
183
- schema = schema_generator(prompt)
184
- condition_capture['schema'] = schema
185
- condition_capture['current_append']={}
186
-
187
- st.rerun()
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import pipeline
4
+
5
+ # Model ID for Llama 3 8B instruct (replace with the exact model you want)
6
+ MODEL_ID = "meta-llama/Llama-3-8b-Instruct-hf"
7
+
8
+ # Load the text-generation pipeline with device_map="auto" to use GPU if available
9
+ generator = pipeline(
10
+ "text-generation",
11
+ model=MODEL_ID,
12
+ torch_dtype=torch.float16,
13
+ device_map="auto",
14
+ )
15
+
16
+ def generate_response(prompt, max_length=512, temperature=0.7):
17
+ # Format prompt for Llama 3 instruct style
18
+ formatted_prompt = f"<s>[INST] {prompt} [/INST]"
19
+ output = generator(
20
+ formatted_prompt,
21
+ max_length=max_length,
22
+ temperature=temperature,
23
+ do_sample=True,
24
+ top_p=0.95,
25
+ num_return_sequences=1,
26
+ )
27
+ generated_text = output[0]["generated_text"]
28
+ # Extract the response after the [/INST] token
29
+ response = generated_text.split("[/INST]")[-1].strip()
30
+ return response
31
+
32
+ with gr.Blocks() as demo:
33
+ gr.Markdown("# Chat with Llama 3 (8B Instruct)")
34
+ with gr.Row():
35
+ with gr.Column():
36
+ user_input = gr.Textbox(lines=3, placeholder="Type your message here...", label="Your Message")
37
+ submit_btn = gr.Button("Submit")
38
+ with gr.Column():
39
+ output = gr.Textbox(lines=10, label="Llama 3 Response")
40
+ submit_btn.click(fn=generate_response, inputs=user_input, outputs=output)
41
+
42
+ if __name__ == "__main__":
43
+ demo.launch()