Laurentgina commited on
Commit
df30967
Β·
verified Β·
1 Parent(s): e401713

Upload 4 files

Browse files
Files changed (3) hide show
  1. README.md +4 -4
  2. app.py +167 -0
  3. requirements.txt +10 -0
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Test Cyber
3
- emoji: πŸ“ˆ
4
- colorFrom: blue
5
- colorTo: green
6
  sdk: streamlit
7
  sdk_version: 1.44.1
8
  app_file: app.py
 
1
  ---
2
+ title: Test Lilly
3
+ emoji: πŸ“‰
4
+ colorFrom: yellow
5
+ colorTo: pink
6
  sdk: streamlit
7
  sdk_version: 1.44.1
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_huggingface import HuggingFaceEndpoint
3
+ import streamlit as st
4
+ from langchain_core.prompts import PromptTemplate
5
+ from langchain_core.output_parsers import StrOutputParser
6
+
7
+ model_id="segolilylabs/Lily-Cybersecurity-7B-v0.2"
8
+ def get_llm_hf_inference(model_id=model_id, max_new_tokens=128, temperature=0.1):
9
+ """
10
+ Returns a language model for HuggingFace inference.
11
+
12
+ Parameters:
13
+ - model_id (str): The ID of the HuggingFace model repository.
14
+ - max_new_tokens (int): The maximum number of new tokens to generate.
15
+ - temperature (float): The temperature for sampling from the model.
16
+
17
+ Returns:
18
+ - llm (HuggingFaceEndpoint): The language model for HuggingFace inference.
19
+ """
20
+ llm = HuggingFaceEndpoint(
21
+ repo_id=model_id,
22
+ max_new_tokens=max_new_tokens,
23
+ temperature=temperature,
24
+ token = os.getenv("HF_TOKEN")
25
+ )
26
+ return llm
27
+
28
+ # Configure the Streamlit app
29
+ st.set_page_config(page_title="HuggingFace ChatBot", page_icon="πŸ€—")
30
+ st.title("Personal HuggingFace ChatBot")
31
+ st.markdown(f"*This is a simple chatbot that uses the HuggingFace transformers library to generate responses to your text input. It uses the {model_id}.*")
32
+
33
+ # Initialize session state for avatars
34
+ if "avatars" not in st.session_state:
35
+ st.session_state.avatars = {'user': None, 'assistant': None}
36
+
37
+ # Initialize session state for user text input
38
+ if 'user_text' not in st.session_state:
39
+ st.session_state.user_text = None
40
+
41
+ # Initialize session state for model parameters
42
+ if "max_response_length" not in st.session_state:
43
+ st.session_state.max_response_length = 256
44
+
45
+ if "system_message" not in st.session_state:
46
+ st.session_state.system_message = "friendly AI conversing with a human user"
47
+
48
+ if "starter_message" not in st.session_state:
49
+ st.session_state.starter_message = "Hello, there! How can I help you today?"
50
+
51
+
52
+ # Sidebar for settings
53
+ with st.sidebar:
54
+ st.header("System Settings")
55
+
56
+ # AI Settings
57
+ st.session_state.system_message = st.text_area(
58
+ "System Message", value="You are a friendly AI conversing with a human user."
59
+ )
60
+ st.session_state.starter_message = st.text_area(
61
+ 'First AI Message', value="Hello, there! How can I help you today?"
62
+ )
63
+
64
+ # Model Settings
65
+ st.session_state.max_response_length = st.number_input(
66
+ "Max Response Length", value=128
67
+ )
68
+
69
+ # Avatar Selection
70
+ st.markdown("*Select Avatars:*")
71
+ col1, col2 = st.columns(2)
72
+ with col1:
73
+ st.session_state.avatars['assistant'] = st.selectbox(
74
+ "AI Avatar", options=["πŸ€—", "πŸ’¬", "πŸ€–"], index=0
75
+ )
76
+ with col2:
77
+ st.session_state.avatars['user'] = st.selectbox(
78
+ "User Avatar", options=["πŸ‘€", "πŸ‘±β€β™‚οΈ", "πŸ‘¨πŸΎ", "πŸ‘©", "πŸ‘§πŸΎ"], index=0
79
+ )
80
+ # Reset Chat History
81
+ reset_history = st.button("Reset Chat History")
82
+
83
+ # Initialize or reset chat history
84
+ if "chat_history" not in st.session_state or reset_history:
85
+ st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message}]
86
+
87
+ def get_response(system_message, chat_history, user_text,
88
+ eos_token_id=['User'], max_new_tokens=256, get_llm_hf_kws={}):
89
+ """
90
+ Generates a response from the chatbot model.
91
+
92
+ Args:
93
+ system_message (str): The system message for the conversation.
94
+ chat_history (list): The list of previous chat messages.
95
+ user_text (str): The user's input text.
96
+ model_id (str, optional): The ID of the HuggingFace model to use.
97
+ eos_token_id (list, optional): The list of end-of-sentence token IDs.
98
+ max_new_tokens (int, optional): The maximum number of new tokens to generate.
99
+ get_llm_hf_kws (dict, optional): Additional keyword arguments for the get_llm_hf function.
100
+
101
+ Returns:
102
+ tuple: A tuple containing the generated response and the updated chat history.
103
+ """
104
+ # Set up the model
105
+ hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.1)
106
+
107
+ # Create the prompt template
108
+ prompt = PromptTemplate.from_template(
109
+ (
110
+ "[INST] {system_message}"
111
+ "\nCurrent Conversation:\n{chat_history}\n\n"
112
+ "\nUser: {user_text}.\n [/INST]"
113
+ "\nAI:"
114
+ )
115
+ )
116
+ # Make the chain and bind the prompt
117
+ chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
118
+
119
+ # Generate the response
120
+ response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
121
+ response = response.split("AI:")[-1]
122
+
123
+ # Update the chat history
124
+ chat_history.append({'role': 'user', 'content': user_text})
125
+ chat_history.append({'role': 'assistant', 'content': response})
126
+ return response, chat_history
127
+
128
+ # Chat interface
129
+ chat_interface = st.container(border=True)
130
+ with chat_interface:
131
+ output_container = st.container()
132
+ st.session_state.user_text = st.chat_input(placeholder="Enter your text here.")
133
+
134
+ # Display chat messages
135
+ with output_container:
136
+ # For every message in the history
137
+ for message in st.session_state.chat_history:
138
+ # Skip the system message
139
+ if message['role'] == 'system':
140
+ continue
141
+
142
+ # Display the chat message using the correct avatar
143
+ with st.chat_message(message['role'],
144
+ avatar=st.session_state['avatars'][message['role']]):
145
+ st.markdown(message['content'])
146
+
147
+ # When the user enter new text:
148
+ if st.session_state.user_text:
149
+
150
+ # Display the user's new message immediately
151
+ with st.chat_message("user",
152
+ avatar=st.session_state.avatars['user']):
153
+ st.markdown(st.session_state.user_text)
154
+
155
+ # Display a spinner status bar while waiting for the response
156
+ with st.chat_message("assistant",
157
+ avatar=st.session_state.avatars['assistant']):
158
+
159
+ with st.spinner("Thinking..."):
160
+ # Call the Inference API with the system_prompt, user text, and history
161
+ response, st.session_state.chat_history = get_response(
162
+ system_message=st.session_state.system_message,
163
+ user_text=st.session_state.user_text,
164
+ chat_history=st.session_state.chat_history,
165
+ max_new_tokens=st.session_state.max_response_length,
166
+ )
167
+ st.markdown(response)
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ huggingface_hub
3
+ streamlit
4
+ langchain_core
5
+ langchain_community
6
+ langchain_huggingface
7
+ langchain_text_splitters
8
+ accelerate
9
+ watchdog
10
+ tqdm