ai: Ready to drink.
Browse files
README.md
CHANGED
@@ -8,5 +8,20 @@ app_file: jarvis.py
|
|
8 |
pinned: true
|
9 |
short_description: Inspired by Iron Man movies.
|
10 |
models:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
- Qwen/QwQ-32B
|
12 |
---
|
|
|
8 |
pinned: true
|
9 |
short_description: Inspired by Iron Man movies.
|
10 |
models:
|
11 |
+
- deepseek-ai/DeepSeek-V3-0324
|
12 |
+
- deepseek-ai/DeepSeek-R1
|
13 |
+
- deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
|
14 |
+
- deepseek-ai/DeepSeek-R1-Distill-Llama-70B
|
15 |
+
- google/gemma-3-1b-it
|
16 |
+
- google/gemma-3-4b-it
|
17 |
+
- google/gemma-3-27b-it
|
18 |
+
- meta-llama/Llama-3.1-8B-Instruct
|
19 |
+
- meta-llama/Llama-3.2-3B-Instruct
|
20 |
+
- meta-llama/Llama-3.3-70B-Instruct
|
21 |
+
- meta-llama/Llama-4-Maverick-17B-128E-Instruct
|
22 |
+
- meta-llama/Llama-4-Scout-17B-16E-Instruct
|
23 |
+
- Qwen/Qwen2.5-VL-3B-Instruct
|
24 |
+
- Qwen/Qwen2.5-VL-32B-Instruct
|
25 |
+
- Qwen/Qwen2.5-VL-72B-Instruct
|
26 |
- Qwen/QwQ-32B
|
27 |
---
|
jarvis.py
CHANGED
@@ -59,8 +59,7 @@ def marked_item(item, marked, attempts):
|
|
59 |
if attempts[item] >= 3:
|
60 |
def remove_fail():
|
61 |
marked.discard(item)
|
62 |
-
|
63 |
-
del attempts[item]
|
64 |
threading.Timer(3600, remove_fail).start()
|
65 |
|
66 |
class SessionWithID(requests.Session):
|
@@ -137,7 +136,7 @@ async def chat_with_model_async(history, user_input, selected_model_display, ses
|
|
137 |
selected_model = get_model_key(selected_model_display)
|
138 |
model_config = MODEL_CONFIG.get(selected_model, DEFAULT_CONFIG)
|
139 |
messages = [{"role": "user", "content": user} for user, _ in history] + [{"role": "assistant", "content": assistant} for _, assistant in history if assistant]
|
140 |
-
if INTERNAL_TRAINING_DATA:
|
141 |
messages.insert(0, {"role": "system", "content": INTERNAL_TRAINING_DATA})
|
142 |
messages.append({"role": "user", "content": user_input})
|
143 |
global ACTIVE_CANDIDATE
|
@@ -196,5 +195,8 @@ with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], h
|
|
196 |
chatbot = gr.Chatbot(label=AI_TYPES["AI_TYPE_1"], show_copy_button=True, scale=1, elem_id=AI_TYPES["AI_TYPE_2"])
|
197 |
with gr.Row():
|
198 |
msg = gr.MultimodalTextbox(show_label=False, placeholder=RESPONSES["RESPONSE_5"], interactive=True, file_count="single", file_types=ALLOWED_EXTENSIONS)
|
|
|
|
|
|
|
199 |
msg.submit(fn=respond_async, inputs=[msg, user_history, selected_model, user_session], outputs=[chatbot, msg, user_session], api_name=INTERNAL_AI_GET_SERVER)
|
200 |
jarvis.launch(max_file_size="1mb")
|
|
|
59 |
if attempts[item] >= 3:
|
60 |
def remove_fail():
|
61 |
marked.discard(item)
|
62 |
+
attempts.pop(item, None)
|
|
|
63 |
threading.Timer(3600, remove_fail).start()
|
64 |
|
65 |
class SessionWithID(requests.Session):
|
|
|
136 |
selected_model = get_model_key(selected_model_display)
|
137 |
model_config = MODEL_CONFIG.get(selected_model, DEFAULT_CONFIG)
|
138 |
messages = [{"role": "user", "content": user} for user, _ in history] + [{"role": "assistant", "content": assistant} for _, assistant in history if assistant]
|
139 |
+
if INTERNAL_TRAINING_DATA and MODEL_CHOICES and selected_model_display == MODEL_CHOICES[0]:
|
140 |
messages.insert(0, {"role": "system", "content": INTERNAL_TRAINING_DATA})
|
141 |
messages.append({"role": "user", "content": user_input})
|
142 |
global ACTIVE_CANDIDATE
|
|
|
195 |
chatbot = gr.Chatbot(label=AI_TYPES["AI_TYPE_1"], show_copy_button=True, scale=1, elem_id=AI_TYPES["AI_TYPE_2"])
|
196 |
with gr.Row():
|
197 |
msg = gr.MultimodalTextbox(show_label=False, placeholder=RESPONSES["RESPONSE_5"], interactive=True, file_count="single", file_types=ALLOWED_EXTENSIONS)
|
198 |
+
with gr.Accordion(AI_TYPES["AI_TYPE_6"], open=False):
|
199 |
+
model_dropdown = gr.Dropdown(show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0])
|
200 |
+
model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, user_session, selected_model], show_progress="full")
|
201 |
msg.submit(fn=respond_async, inputs=[msg, user_history, selected_model, user_session], outputs=[chatbot, msg, user_session], api_name=INTERNAL_AI_GET_SERVER)
|
202 |
jarvis.launch(max_file_size="1mb")
|