Spaces:
Sleeping
Sleeping
rjavedv
commited on
Commit
·
202ad69
1
Parent(s):
b1e5b99
some more updates
Browse files
app.py
CHANGED
@@ -9,11 +9,10 @@ import requests as re
|
|
9 |
import json
|
10 |
import streamlit as st
|
11 |
|
12 |
-
#####
|
13 |
## Call selected LLM, get word def and return
|
14 |
-
def call_llm(lmodel: str, lword: str) -> str:
|
15 |
-
ltoken = os.getenv('HF_TOKEN')
|
16 |
-
sysmsg = 'You are an English language expert. you are precise in your output. Users will ask you a word and in your response you will include the word meaning, synonyms, antonyms, and word usage examples. Format your output in different sections to make it easy to read'
|
17 |
usermsg = lword
|
18 |
messages = [
|
19 |
{ "role": "system", "content": sysmsg },
|
@@ -35,86 +34,67 @@ def call_llm(lmodel: str, lword: str) -> str:
|
|
35 |
return(completion.choices[0].message.content)
|
36 |
|
37 |
## Call Dictionary API and get word def
|
38 |
-
def call_dict_api(lword: str) ->
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
lmodel = 'meta-llama/Llama-3.2-3B-Instruct'
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
st.title('A simple dictionary')
|
46 |
lword = st.text_input(label='Enter word')
|
47 |
lmodel = st.selectbox('Choose Language Model:',
|
48 |
index=0,
|
49 |
options=['meta-llama/Llama-3.2-3B-Instruct', 'Qwen/Qwen2.5-1.5B-Instruct', 'microsoft/Phi-3.5-mini-instruct']
|
50 |
-
)
|
|
|
51 |
chk1 = st.checkbox(label='Use Language model to get word definition', value=True)
|
52 |
chk2 = st.checkbox(label='Use Dictionary API call to get word definition', value=True)
|
53 |
|
54 |
btn_submit = st.button(label='Submit')
|
55 |
-
st.write(os.environ)
|
56 |
-
col1, col2 = st.columns(2, border=True)
|
57 |
-
## when setting hf token in terminal remember to set witout ''
|
58 |
-
ltoken = os.getenv('HF_TOKEN')
|
59 |
-
print(ltoken)
|
60 |
-
#st.write(ltoken)
|
61 |
-
|
62 |
-
#lmodel = 'cardiffnlp/twitter-roberta-base-sentiment-latest' #Text classification
|
63 |
-
# lmodel = 'meta-llama/Llama-3.2-3B-Instruct' #Text Generation
|
64 |
-
# lmodel = 'microsoft/phi-4' #Not Working for inference API, model too large
|
65 |
-
# lmodel = 'PowerInfer/SmallThinker-3B-Preview' #Time out for this shit
|
66 |
-
lmodel = 'Qwen/Qwen2.5-1.5B-Instruct' #Working But output is in a strange format
|
67 |
-
# lmodel = 'mistralai/Mistral-Small-24B-Instruct-2501' #Not Working
|
68 |
-
# lmodel = 'microsoft/Phi-3.5-mini-instruct' #working
|
69 |
|
70 |
-
|
71 |
-
# model= lmodel,
|
72 |
-
# token= ltoken
|
73 |
-
# )
|
74 |
-
|
75 |
-
# #loutput = client.text_classification("Today is a great day")
|
76 |
-
# #print('output=',loutput)
|
77 |
-
|
78 |
-
|
79 |
-
# completion = client.chat.completions.create(
|
80 |
-
# model= lmodel,
|
81 |
-
# messages= messages,
|
82 |
-
# temperature=0.5,
|
83 |
-
# max_tokens=2048,
|
84 |
-
# top_p=0.7
|
85 |
-
# #stream=False
|
86 |
-
# )
|
87 |
-
|
88 |
-
# print('Chat output : ')
|
89 |
-
# print(completion.choices[0].message.content)
|
90 |
-
sample_out = '''
|
91 |
-
Tolerate
|
92 |
-
Definition: To accept or allow something that one does not like or approve of, especially in a mild or passive way.
|
93 |
-
Synonyms: Accept, endure, put up with, overlook, tolerate
|
94 |
-
Antonyms: Disapprove, reject, condemn, despise
|
95 |
-
Usage examples:
|
96 |
-
1. The new law was difficult to tolerate.
|
97 |
-
2. I have to tolerate my boss's constant interruptions.
|
98 |
-
3. He was tolerant of her eccentricities.
|
99 |
-
4. We had to tolerate the long wait at the airport.
|
100 |
-
5. She tolerated his frequent late arrivals.
|
101 |
-
6. We have to tolerate the noise from the construction site.
|
102 |
-
7. He tolerated the criticism as a necessary part of his job.
|
103 |
-
8. She tolerated the small talk at the meeting.
|
104 |
-
9. We have to tolerate the inconvenience of the traffic.
|
105 |
-
10. He tolerated the long hours at work.
|
106 |
-
|
107 |
-
In these examples, "tolerate" is used to describe the acceptance or allowance of something that is not liked or approved of, often in a mild or passive manner.
|
108 |
-
'''
|
109 |
-
#### Dict API call
|
110 |
-
# dict_url = 'https://api.dictionaryapi.dev/api/v2/entries/en/'
|
111 |
-
# lword = 'Tolerate'
|
112 |
-
# api_url = dict_url + lword
|
113 |
-
# print(api_url)
|
114 |
-
# response = re.get(url=api_url)
|
115 |
-
# print(response.status_code)
|
116 |
-
# rjson = response.json()
|
117 |
-
# print(rjson)
|
118 |
-
# st.write(rjson)
|
119 |
|
|
|
|
|
|
|
|
|
|
|
120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import json
|
10 |
import streamlit as st
|
11 |
|
12 |
+
##### Start Func Def part
|
13 |
## Call selected LLM, get word def and return
|
14 |
+
def call_llm(lmodel: str, lword: str, sysmsg: str, ltoken: str) -> str:
|
15 |
+
#ltoken = os.getenv('HF_TOKEN')
|
|
|
16 |
usermsg = lword
|
17 |
messages = [
|
18 |
{ "role": "system", "content": sysmsg },
|
|
|
34 |
return(completion.choices[0].message.content)
|
35 |
|
36 |
## Call Dictionary API and get word def
|
37 |
+
def call_dict_api(lword: str) -> dict:
|
38 |
+
dict_url = 'https://api.dictionaryapi.dev/api/v2/entries/en/'
|
39 |
+
api_url = dict_url + lword
|
40 |
+
response = re.get(url=api_url)
|
41 |
+
rjson = response.json()
|
42 |
+
if type(rjson) == dict:
|
43 |
+
return rjson
|
44 |
+
elif type(rjson) == list:
|
45 |
+
return rjson[0]
|
46 |
+
else:
|
47 |
+
return dict()
|
48 |
+
|
49 |
+
##### End Func Def part
|
50 |
+
|
51 |
+
sysmsg = '''You are an English language expert. You are precise in your output.
|
52 |
+
User will ask you a word and in your response you will include the word meaning, synonyms, antonyms, and word usage examples.
|
53 |
+
Format your output in different sections to make it easy to understand.'''
|
54 |
|
55 |
lmodel = 'meta-llama/Llama-3.2-3B-Instruct'
|
56 |
+
ltoken = os.getenv('HF_TOKEN')
|
57 |
+
llm_out = ''
|
58 |
+
api_out = dict()
|
59 |
+
|
60 |
+
st.set_page_config(page_title='Simple Dictionary', page_icon=':blue_book:', layout="wide")
|
61 |
|
62 |
st.title('A simple dictionary')
|
63 |
lword = st.text_input(label='Enter word')
|
64 |
lmodel = st.selectbox('Choose Language Model:',
|
65 |
index=0,
|
66 |
options=['meta-llama/Llama-3.2-3B-Instruct', 'Qwen/Qwen2.5-1.5B-Instruct', 'microsoft/Phi-3.5-mini-instruct']
|
67 |
+
)
|
68 |
+
sys_prmpt = st.text_area(label='Sytem Prompt:', value=sysmsg)
|
69 |
chk1 = st.checkbox(label='Use Language model to get word definition', value=True)
|
70 |
chk2 = st.checkbox(label='Use Dictionary API call to get word definition', value=True)
|
71 |
|
72 |
btn_submit = st.button(label='Submit')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
+
##st.write(os.environ)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
+
if btn_submit:
|
77 |
+
if chk1:
|
78 |
+
llm_out = call_llm(lmodel, lword, sys_prmpt, ltoken)
|
79 |
+
if chk2:
|
80 |
+
api_out = call_dict_api(lword)
|
81 |
|
82 |
+
col1, col2 = st.columns(2, border=True)
|
83 |
+
with col1:
|
84 |
+
st.subheader(':green[LLM Output]')
|
85 |
+
if chk1:
|
86 |
+
st.write(llm_out)
|
87 |
+
else:
|
88 |
+
st.write('LLM Call option not selected')
|
89 |
+
|
90 |
+
with col2:
|
91 |
+
st.subheader(':orange[API Call]')
|
92 |
+
if chk2:
|
93 |
+
st.write(api_out)
|
94 |
+
else:
|
95 |
+
st.write('API Call option not selected')
|
96 |
+
|
97 |
+
st.divider()
|
98 |
+
st.write('*:blue[Developed By: Rashid Javed]*')
|
99 |
+
st.write('Dictionary API Call courtesy of ')
|
100 |
+
st.page_link('https://dictionaryapi.dev/', label='https://dictionaryapi.dev/')
|