Spaces:
Running
Running
vaibhavard
commited on
Commit
·
91a7855
1
Parent(s):
b921791
origin
Browse files- Dockerfile +61 -0
- gradio_file.py +138 -0
- librechat.yaml +449 -0
- manager.ts +238 -0
- streamlit_app.py +132 -0
- tests.py +391 -0
Dockerfile
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#Pull the base image
|
2 |
+
FROM ghcr.io/danny-avila/librechat-dev:latest
|
3 |
+
|
4 |
+
|
5 |
+
#FROM ghcr.io/danny-avila/librechat-dev:d3d7d11ea8cbeef9fdffa1eb45d6b866e6ee182b
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
# Set environment variables
|
11 |
+
ENV HOST=0.0.0.0
|
12 |
+
ENV PORT=7860
|
13 |
+
ENV SESSION_EXPIRY=900000
|
14 |
+
ENV REFRESH_TOKEN_EXPIRY=604800000
|
15 |
+
# ENV MEILI_NO_ANALYTICS=true
|
16 |
+
# ENV MEILI_HOST=https://librechat-meilisearch.hf.space
|
17 |
+
|
18 |
+
# Create necessary directories
|
19 |
+
RUN mkdir -p /app/uploads/temp
|
20 |
+
RUN mkdir -p /app/client/public/images/temp
|
21 |
+
RUN mkdir -p /app/api/logs/
|
22 |
+
RUN mkdir -p /app/data
|
23 |
+
RUN mkdir -p /app/code_interpreter
|
24 |
+
|
25 |
+
# Give write permission to the directory
|
26 |
+
RUN chmod -R 777 /app/uploads/temp
|
27 |
+
RUN chmod -R 777 /app/client/public/images
|
28 |
+
RUN chmod -R 777 /app/api/logs/
|
29 |
+
RUN chmod -R 777 /app/data
|
30 |
+
RUN chmod -R 777 /app/code_interpreter
|
31 |
+
|
32 |
+
# RUN cd /app/packages/mcp/src/ && rm -rf manager.ts
|
33 |
+
|
34 |
+
# Copy Custom Endpoints Config
|
35 |
+
# RUN curl -o /app/librechat.yaml https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/librechat-hf.yaml
|
36 |
+
COPY librechat.yaml /app/librechat.yaml
|
37 |
+
COPY tests.py /app/tests.py
|
38 |
+
COPY streamlit_app.py /app/streamlit_app.py
|
39 |
+
COPY gradio_file.py /app/gradio_file.py
|
40 |
+
|
41 |
+
# Install dependencies
|
42 |
+
RUN cd /app/api && npm install
|
43 |
+
USER root
|
44 |
+
# COPY manager.ts /app/packages/mcp/src/manager.ts
|
45 |
+
|
46 |
+
# Install Python and pip
|
47 |
+
ENV PYTHONUNBUFFERED=1
|
48 |
+
RUN apk add --update --no-cache python3-dev git py3-pip gcc libc-dev libc6-compat build-base mpc1-dev && ln -sf python3 /usr/bin/python
|
49 |
+
# RUN apk --update --no-cache add python3~3.10 --repository=http://dl-cdn.alpinelinux.org/alpine/edge/main
|
50 |
+
# RUN apk add --update --no-cache git gcc libc-dev libc6-compat build-base mpc1-dev && ln -sf python3 /usr/bin/python
|
51 |
+
|
52 |
+
# RUN python3 -m ensurepip
|
53 |
+
|
54 |
+
RUN pip3 install --no-cache --upgrade --break-system-packages pip setuptools mcp mcp-simple-pubmed mcp-simple-arxiv e2b-code-interpreter==1.0.4b litellm gradio XlsxWriter openpyxl google-genai requests-futures
|
55 |
+
# RUN cd /app/packages && git clone --branch patch-1 https://github.com/vaibhavard/actors-mcp-server
|
56 |
+
# RUN cd /app/packages/actors-mcp-server/ && npm install && npm run build && npm link
|
57 |
+
# RUN git clone https://github.com/e2b-dev/mcp-server && cd mcp-server/packages/js/ && npm install && npm run build && npm link
|
58 |
+
# to run on container start
|
59 |
+
RUN git clone https://github.com/AIGENHACKER/mcp-hfspace && cd mcp-hfspace && npm install && npm run build && npm link
|
60 |
+
RUN git clone https://github.com/exa-labs/exa-mcp-server && cd exa-mcp-server && npm install --save axios dotenv && npm run build && npm link
|
61 |
+
CMD ["npm", "run", "backend"]
|
gradio_file.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import gradio as gr
|
3 |
+
import os
|
4 |
+
import shutil
|
5 |
+
|
6 |
+
# --- Configuration ---
|
7 |
+
INITIAL_PATH = os.getcwd() # Start in the current working directory
|
8 |
+
ALLOWED_EXTENSIONS_UPLOAD = None # Allow all file types for upload initially, or set a list like ['.txt', '.pdf']
|
9 |
+
ALLOWED_EXTENSIONS_DOWNLOAD = None # Allow all for download, or restrict if needed
|
10 |
+
|
11 |
+
# --- Helper Functions ---
|
12 |
+
|
13 |
+
def list_files_and_folders(path):
|
14 |
+
"""Lists files and folders in a given directory."""
|
15 |
+
try:
|
16 |
+
items = os.listdir(path)
|
17 |
+
files = []
|
18 |
+
folders = []
|
19 |
+
for item in items:
|
20 |
+
item_path = os.path.join(path, item)
|
21 |
+
if os.path.isfile(item_path):
|
22 |
+
files.append(item)
|
23 |
+
elif os.path.isdir(item_path):
|
24 |
+
folders.append(item)
|
25 |
+
return folders, files, path, None # Return folders, files, current path, and no error
|
26 |
+
except Exception as e:
|
27 |
+
return [], [], path, str(e) # Return empty lists and error message
|
28 |
+
|
29 |
+
def change_directory(path, current_path, direction):
|
30 |
+
"""Changes the current directory (up or into a folder)."""
|
31 |
+
new_path = current_path
|
32 |
+
if direction == "up":
|
33 |
+
new_path = os.path.dirname(current_path)
|
34 |
+
elif direction == "enter":
|
35 |
+
new_path = os.path.join(current_path, path) # Path here is the selected folder
|
36 |
+
elif direction == "refresh":
|
37 |
+
new_path = current_path # Just refresh current
|
38 |
+
|
39 |
+
if not os.path.exists(new_path) or not os.path.isdir(new_path):
|
40 |
+
return [], [], current_path, "Invalid directory path."
|
41 |
+
|
42 |
+
return list_files_and_folders(new_path)
|
43 |
+
|
44 |
+
def upload_file(files, current_path):
|
45 |
+
"""Uploads files to the current directory."""
|
46 |
+
if not files:
|
47 |
+
return [], [], current_path, "No files uploaded."
|
48 |
+
|
49 |
+
uploaded_filenames = []
|
50 |
+
for file in files:
|
51 |
+
try:
|
52 |
+
filename = os.path.basename(file.name) # Get original filename
|
53 |
+
destination_path = os.path.join(current_path, filename)
|
54 |
+
|
55 |
+
if ALLOWED_EXTENSIONS_UPLOAD: # Check allowed extensions if specified
|
56 |
+
file_extension = os.path.splitext(filename)[1].lower()
|
57 |
+
if file_extension not in ALLOWED_EXTENSIONS_UPLOAD:
|
58 |
+
return [], [], current_path, f"File type '{file_extension}' not allowed for upload."
|
59 |
+
|
60 |
+
shutil.copyfile(file.name, destination_path) # Copy uploaded file
|
61 |
+
uploaded_filenames.append(filename)
|
62 |
+
except Exception as e:
|
63 |
+
return [], [], current_path, f"Error uploading file '{filename}': {str(e)}"
|
64 |
+
|
65 |
+
# Refresh file list after upload
|
66 |
+
return list_files_and_folders(current_path)
|
67 |
+
|
68 |
+
|
69 |
+
def download_file(file_path, current_path):
|
70 |
+
"""Returns the file path for download."""
|
71 |
+
full_file_path = os.path.join(current_path, file_path) # Construct full path
|
72 |
+
|
73 |
+
if not os.path.isfile(full_file_path):
|
74 |
+
return None, "File not found for download."
|
75 |
+
|
76 |
+
if ALLOWED_EXTENSIONS_DOWNLOAD: # Check allowed extensions if specified
|
77 |
+
file_extension = os.path.splitext(full_file_path)[1].lower()
|
78 |
+
if file_extension not in ALLOWED_EXTENSIONS_DOWNLOAD:
|
79 |
+
return None, f"File type '{file_extension}' not allowed for download."
|
80 |
+
|
81 |
+
return full_file_path, None
|
82 |
+
|
83 |
+
|
84 |
+
# --- Gradio Interface ---
|
85 |
+
|
86 |
+
with gr.Blocks() as demo:
|
87 |
+
current_directory = gr.State(INITIAL_PATH)
|
88 |
+
|
89 |
+
with gr.Row():
|
90 |
+
current_path_display = gr.Textbox(value=INITIAL_PATH, label="Current Path", interactive=False, scale=4)
|
91 |
+
refresh_button = gr.Button("Refresh", scale=1)
|
92 |
+
up_button = gr.Button("Up", scale=1)
|
93 |
+
|
94 |
+
with gr.Row():
|
95 |
+
with gr.Column(scale=1):
|
96 |
+
folder_output = gr.List([], headers=["Folders"], label="Folders", elem_id="folder-list", interactive=False) # Removed type="value"
|
97 |
+
file_output = gr.List([], headers=["Files"], label="Files", elem_id="file-list", interactive=False) # Removed type="value"
|
98 |
+
with gr.Column(scale=2):
|
99 |
+
upload_component = gr.File(label="Upload Files", file_count="multiple") # Allows multiple file uploads
|
100 |
+
download_button = gr.File(label="Download Selected File")
|
101 |
+
error_output = gr.Markdown(label="Status")
|
102 |
+
|
103 |
+
|
104 |
+
# --- Functionality ---
|
105 |
+
|
106 |
+
def update_file_list(current_path):
|
107 |
+
folders, files, updated_path, error = list_files_and_folders(current_path)
|
108 |
+
return folders, files, updated_path, updated_path, error
|
109 |
+
|
110 |
+
def on_folder_select(folder_name, current_path_state):
|
111 |
+
return change_directory(folder_name, folder_output.value, "enter") # Corrected: use folder_output.value not folder_name directly
|
112 |
+
|
113 |
+
def on_up_button_click(current_path_state):
|
114 |
+
return change_directory("", current_path_state, "up") # Path is empty for "up"
|
115 |
+
|
116 |
+
def on_refresh_button_click(current_path_state):
|
117 |
+
return change_directory("", current_path_state, "refresh") # Path is empty for "refresh"
|
118 |
+
|
119 |
+
def on_file_upload(uploaded_files, current_path_state):
|
120 |
+
return upload_file(uploaded_files, current_path_state)
|
121 |
+
|
122 |
+
def on_file_select_for_download(file_name, current_path_state):
|
123 |
+
return download_file(file_name, current_path_state)
|
124 |
+
|
125 |
+
|
126 |
+
# --- Event Handlers ---
|
127 |
+
|
128 |
+
demo.load(update_file_list, inputs=current_directory, outputs=[folder_output, file_output, current_directory, current_path_display, error_output])
|
129 |
+
|
130 |
+
folder_output.select(on_folder_select, [folder_output, current_directory], [folder_output, file_output, current_directory, current_path_display, error_output])
|
131 |
+
up_button.click(on_up_button_click, inputs=current_directory, outputs=[folder_output, file_output, current_directory, current_path_display, error_output])
|
132 |
+
refresh_button.click(on_refresh_button_click, inputs=current_directory, outputs=[folder_output, file_output, current_directory, current_path_display, error_output])
|
133 |
+
upload_component.upload(on_file_upload, [upload_component, current_directory], [folder_output, file_output, current_directory, current_path_display, error_output] ) # clear_value=True to reset upload after submission
|
134 |
+
file_output.select(on_file_select_for_download, [file_output, current_directory], [download_button, error_output])
|
135 |
+
|
136 |
+
|
137 |
+
if __name__ == "__main__":
|
138 |
+
demo.launch(share=True,server_port=1337)
|
librechat.yaml
ADDED
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# For more information, see the Configuration Guide:
|
2 |
+
# https://www.librechat.ai/docs/configuration/librechat_yaml
|
3 |
+
|
4 |
+
# Configuration version (required)
|
5 |
+
version: 1.2.1
|
6 |
+
|
7 |
+
# Cache settings: Set to true to enable caching
|
8 |
+
cache: true
|
9 |
+
|
10 |
+
# Custom interface configuration
|
11 |
+
interface:
|
12 |
+
# Privacy policy settings
|
13 |
+
privacyPolicy:
|
14 |
+
externalUrl: 'https://librechat.ai/privacy-policy'
|
15 |
+
openNewTab: true
|
16 |
+
|
17 |
+
# Terms of service
|
18 |
+
termsOfService:
|
19 |
+
externalUrl: 'https://librechat.ai/tos'
|
20 |
+
openNewTab: true
|
21 |
+
modalAcceptance: true
|
22 |
+
modalTitle: "Terms of Service for LibreChat"
|
23 |
+
modalContent: |
|
24 |
+
# Terms and Conditions for LibreChat
|
25 |
+
|
26 |
+
Please do not use chatgpt , since librechat is better 😀
|
27 |
+
|
28 |
+
Regards , Vaibhav
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
endpointsMenu: true
|
33 |
+
modelSelect: true
|
34 |
+
parameters: true
|
35 |
+
sidePanel: true
|
36 |
+
presets: true
|
37 |
+
prompts: true
|
38 |
+
bookmarks: true
|
39 |
+
multiConvo: true
|
40 |
+
agents: true
|
41 |
+
|
42 |
+
# Example Registration Object Structure (optional)
|
43 |
+
registration:
|
44 |
+
socialLogins: ['github', 'google', 'discord', 'openid', 'facebook']
|
45 |
+
# allowedDomains:
|
46 |
+
# - "gmail.com"
|
47 |
+
|
48 |
+
# speech:
|
49 |
+
# tts:
|
50 |
+
# openai:
|
51 |
+
# url: ''
|
52 |
+
# apiKey: '${TTS_API_KEY}'
|
53 |
+
# model: ''
|
54 |
+
# voices: ['']
|
55 |
+
|
56 |
+
#
|
57 |
+
# stt:
|
58 |
+
# openai:
|
59 |
+
# url: ''
|
60 |
+
# apiKey: '${STT_API_KEY}'
|
61 |
+
# model: ''
|
62 |
+
|
63 |
+
# rateLimits:
|
64 |
+
# fileUploads:
|
65 |
+
# ipMax: 100
|
66 |
+
# ipWindowInMinutes: 60 # Rate limit window for file uploads per IP
|
67 |
+
# userMax: 50
|
68 |
+
# userWindowInMinutes: 60 # Rate limit window for file uploads per user
|
69 |
+
# conversationsImport:
|
70 |
+
# ipMax: 100
|
71 |
+
# ipWindowInMinutes: 60 # Rate limit window for conversation imports per IP
|
72 |
+
# userMax: 50
|
73 |
+
# userWindowInMinutes: 60 # Rate limit window for conversation imports per user
|
74 |
+
|
75 |
+
# Example Actions Object Structure
|
76 |
+
actions:
|
77 |
+
allowedDomains:
|
78 |
+
- "swapi.dev"
|
79 |
+
- "librechat.ai"
|
80 |
+
- "google.com"
|
81 |
+
- "https://api.e2b.dev"
|
82 |
+
- "api.e2b.dev"
|
83 |
+
# Example MCP Servers Object Structure
|
84 |
+
mcpServers:
|
85 |
+
# everything:
|
86 |
+
# type: sse # type can optionally be omitted
|
87 |
+
# url: https://787d-182-69-182-121.ngrok-free.app/
|
88 |
+
|
89 |
+
|
90 |
+
# memory:
|
91 |
+
# type: stdio
|
92 |
+
# command: npx
|
93 |
+
# args:
|
94 |
+
# - -y
|
95 |
+
# - "@modelcontextprotocol/server-memory"
|
96 |
+
# timeout: 60000000
|
97 |
+
|
98 |
+
|
99 |
+
# apify:
|
100 |
+
# type: stdio
|
101 |
+
# command: npx
|
102 |
+
# args:
|
103 |
+
# - -y
|
104 |
+
# - "@apify/actors-mcp-server"
|
105 |
+
# - --actors
|
106 |
+
# - apify/screenshot-url,apify/website-content-crawler,apify/puppeteer-scraper,apify/rag-web-browser,jancurn/screenshot-taker,apify/cheerio-scraper,apify/playwright-scraper,apify/ai-web-agent,marco.gullo/page-printer,dz_omar/example-website-screenshot-crawler,apify/legacy-phantomjs-crawler,lukaskrivka/article-extractor-smart
|
107 |
+
# timeout: 60000000
|
108 |
+
# env:
|
109 |
+
# APIFY_TOKEN: "apify_api_M3vftXQILokc2NDlhsc3twMBa5e7Be282swR"
|
110 |
+
# PATH: "/usr/local/bin:/usr/bin:/bin"
|
111 |
+
# NODE_PATH: "/usr/local/lib/node_modules"
|
112 |
+
|
113 |
+
hfspace:
|
114 |
+
type: stdio
|
115 |
+
command: npx
|
116 |
+
args:
|
117 |
+
- -y
|
118 |
+
- "@llmindset/mcp-hfspace"
|
119 |
+
- --HF_TOKEN=${HF_TOKEN}
|
120 |
+
- --work-dir=/app/uploads/temp/
|
121 |
+
- Qwen/Qwen2.5-Max-Demo
|
122 |
+
- evalstate/FLUX.1-schnell
|
123 |
+
timeout: 60000000
|
124 |
+
# env:
|
125 |
+
# E2B_API_KEY: "e2b_6eb042e8d60248f71b0aadcc05f29a7dd353b3e2"
|
126 |
+
# PATH: "/usr/local/bin:/usr/bin:/bin"
|
127 |
+
# NODE_PATH: "/usr/local/lib/node_modules"
|
128 |
+
exa:
|
129 |
+
type: stdio
|
130 |
+
command: npx
|
131 |
+
args:
|
132 |
+
- -y
|
133 |
+
- "/app/exa-mcp-server/build/index.js"
|
134 |
+
# - --HF_TOKEN=${HF_TOKEN}
|
135 |
+
timeout: 60000000
|
136 |
+
env:
|
137 |
+
EXA_API_KEY: "e4399980-1016-44ab-8789-1ef7f967a281"
|
138 |
+
PATH: "/usr/local/bin:/usr/bin:/bin"
|
139 |
+
NODE_PATH: "/usr/local/lib/node_modules"
|
140 |
+
arxiv:
|
141 |
+
type: stdio
|
142 |
+
command: python
|
143 |
+
args:
|
144 |
+
- -m
|
145 |
+
- mcp_simple_arxiv
|
146 |
+
timeout: 60000000
|
147 |
+
pubmed:
|
148 |
+
type: stdio
|
149 |
+
command: python
|
150 |
+
args:
|
151 |
+
- -m
|
152 |
+
- mcp_simple_pubmed
|
153 |
+
env:
|
154 |
+
PUBMED_EMAIL: "[email protected]"
|
155 |
+
PUBMED_API_KEY : "77ea72d89b98d279c1848389cd027a51c408"
|
156 |
+
PATH: "/usr/local/bin:/usr/bin:/bin"
|
157 |
+
NODE_PATH: "/usr/local/lib/node_modules"
|
158 |
+
memory:
|
159 |
+
type: stdio
|
160 |
+
command: npx
|
161 |
+
args:
|
162 |
+
- -y
|
163 |
+
- "@modelcontextprotocol/server-memory"
|
164 |
+
timeout: 60000000
|
165 |
+
filesystem:
|
166 |
+
# type: stdio
|
167 |
+
command: npx
|
168 |
+
args:
|
169 |
+
- -y
|
170 |
+
- "@modelcontextprotocol/server-filesystem"
|
171 |
+
- /app/
|
172 |
+
codesandbox:
|
173 |
+
type: stdio
|
174 |
+
command: python
|
175 |
+
args:
|
176 |
+
- tests.py
|
177 |
+
timeout: 60000000
|
178 |
+
# Definition of custom endpoints
|
179 |
+
endpoints:
|
180 |
+
agents:
|
181 |
+
recursionLimit: 50
|
182 |
+
disableBuilder: false
|
183 |
+
capabilities:
|
184 |
+
# - "execute_code"
|
185 |
+
- "file_search"
|
186 |
+
- "actions"
|
187 |
+
- "tools"
|
188 |
+
|
189 |
+
|
190 |
+
custom:
|
191 |
+
# together.ai
|
192 |
+
# https://api.together.ai/settings/api-keys
|
193 |
+
# Model list: https://docs.together.ai/docs/inference-models
|
194 |
+
- name: "together.ai"
|
195 |
+
apiKey: '${MISTRAL_API_KEY}'
|
196 |
+
baseURL: "https://api.together.xyz"
|
197 |
+
models:
|
198 |
+
default: [
|
199 |
+
"Gryphe/MythoMax-L2-13b",
|
200 |
+
"Gryphe/MythoMax-L2-13b-Lite",
|
201 |
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
202 |
+
"Qwen/QwQ-32B-Preview",
|
203 |
+
"Qwen/Qwen2-72B-Instruct",
|
204 |
+
"Qwen/Qwen2-VL-72B-Instruct",
|
205 |
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
206 |
+
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
207 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
208 |
+
"databricks/dbrx-instruct",
|
209 |
+
"deepseek-ai/DeepSeek-R1",
|
210 |
+
"deepseek-ai/DeepSeek-V3",
|
211 |
+
"deepseek-ai/deepseek-llm-67b-chat",
|
212 |
+
"dev-vfs/Qwen2-VL-72B-Instruct",
|
213 |
+
"devuser/test-lora-model-creation-1",
|
214 |
+
"devuser/test-lora-model-creation-10",
|
215 |
+
"devuser/test-lora-model-creation-2",
|
216 |
+
"devuser/test-lora-model-creation-3",
|
217 |
+
"devuser/test-lora-model-creation-4",
|
218 |
+
"devuser/test-lora-model-creation-5",
|
219 |
+
"devuser/test-lora-model-creation-6",
|
220 |
+
"devuser/test-lora-model-creation-7",
|
221 |
+
"devuser/test-lora-model-creation-8",
|
222 |
+
"devuser/test-lora-model-creation-9",
|
223 |
+
"google/gemma-2-27b-it",
|
224 |
+
"google/gemma-2-9b-it",
|
225 |
+
"google/gemma-2b-it",
|
226 |
+
"jd/test-lora-model-creation-2",
|
227 |
+
"jd/test-min-lora-model-creation-2",
|
228 |
+
"justindriemeyer_tai/test-lora-model-creation-3",
|
229 |
+
"justindriemeyer_tai/test-lora-model-creation-4",
|
230 |
+
"justindriemeyer_tai/test-lora-model-creation-5",
|
231 |
+
"justindriemeyer_tai/test-lora-model-creation-6",
|
232 |
+
"justindriemeyer_tai/test-lora-model-creation-7",
|
233 |
+
"llava-hf/llava-v1.6-mistral-7b-hf",
|
234 |
+
"meta-llama/Llama-2-13b-chat-hf",
|
235 |
+
"meta-llama/Llama-2-7b-chat-hf",
|
236 |
+
"meta-llama/Llama-3-70b-chat-hf",
|
237 |
+
"meta-llama/Llama-3-8b-chat-hf",
|
238 |
+
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
239 |
+
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
240 |
+
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
241 |
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
242 |
+
"meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
|
243 |
+
"meta-llama/Llama-Vision-Free",
|
244 |
+
"meta-llama/Meta-Llama-3-70B-Instruct-Lite",
|
245 |
+
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
|
246 |
+
"meta-llama/Meta-Llama-3-8B-Instruct-Lite",
|
247 |
+
"meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
|
248 |
+
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
|
249 |
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
250 |
+
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
251 |
+
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo-128K",
|
252 |
+
"microsoft/WizardLM-2-8x22B",
|
253 |
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
254 |
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
255 |
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
256 |
+
"mistralai/Mixtral-8x22B-Instruct-v0.1",
|
257 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
258 |
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
259 |
+
"salesforce/xgen-9b-instruct",
|
260 |
+
"scb10x/llama-3-typhoon-v1.5-8b-instruct",
|
261 |
+
"scb10x/scb10x-llama3-typhoon-v1-5-8b-instruct",
|
262 |
+
"scb10x/scb10x-llama3-typhoon-v1-5x-4f316",
|
263 |
+
"togethercomputer/Llama-3-8b-chat-hf-int4",
|
264 |
+
"togethercomputer/Llama-3-8b-chat-hf-int8",
|
265 |
+
"upstage/SOLAR-10.7B-Instruct-v1.0",
|
266 |
+
"vfs/Qwen2-VL-72B-Instruct"
|
267 |
+
]
|
268 |
+
fetch: false
|
269 |
+
titleConvo: true
|
270 |
+
titleModel: "openchat/openchat-3.5-1210"
|
271 |
+
summarize: false
|
272 |
+
summaryModel: "openchat/openchat-3.5-1210"
|
273 |
+
forcePrompt: false
|
274 |
+
modelDisplayLabel: "together.ai"
|
275 |
+
- name: "OpenRouter"
|
276 |
+
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
277 |
+
# recommended environment variables:
|
278 |
+
apiKey: "${OPENROUTER_KEY}" # NOT OPENROUTER_API_KEY
|
279 |
+
baseURL: "https://openrouter.ai/api/v1"
|
280 |
+
models:
|
281 |
+
default: ["deepseek/deepseek-chat","deepseek/deepseek-r1:free","google/gemini-2.0-flash-thinking-exp:free","google/gemini-2.0-flash-exp:free","google/gemini-exp-1206:free"]
|
282 |
+
fetch: true
|
283 |
+
titleConvo: true
|
284 |
+
titleModel: "google/gemini-2.0-flash-exp:free"
|
285 |
+
# Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens.
|
286 |
+
dropParams: ["stop"]
|
287 |
+
modelDisplayLabel: "OpenRouter"
|
288 |
+
# Groq Example
|
289 |
+
- name: 'groq'
|
290 |
+
apiKey: '${GROQ_API_KEY}'
|
291 |
+
baseURL: 'https://api.groq.com/openai/v1/'
|
292 |
+
models:
|
293 |
+
default:
|
294 |
+
[
|
295 |
+
'deepseek-r1-distill-qwen-32b',
|
296 |
+
'deepseek-r1-distill-llama-70b',
|
297 |
+
'llama-3.3-70b-versatile',
|
298 |
+
'mixtral-8x7b-32768',
|
299 |
+
'qwen-qwq-32b',
|
300 |
+
]
|
301 |
+
fetch: false
|
302 |
+
titleConvo: true
|
303 |
+
titleModel: 'mixtral-8x7b-32768'
|
304 |
+
modelDisplayLabel: 'groq'
|
305 |
+
- name: 'Tiny' # Unique name for the endpoint
|
306 |
+
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
307 |
+
# recommended environment variables:
|
308 |
+
apiKey: '77ea72d89b98d279c1848389cd027a51c408'
|
309 |
+
baseURL: 'https://962f-182-69-182-236.ngrok-free.app'
|
310 |
+
|
311 |
+
# Models configuration
|
312 |
+
models:
|
313 |
+
# List of default models to use. At least one value is required.
|
314 |
+
default: ['gemini-2.0-flash-thinking-exp-01-21' ,'deepseek.r1','deepseek-reasoner','deepseek-chat','gemini-2.0-pro-exp-02-05','deepseek-r1-distill-llama-70b', 'qwen-qwq-32b','QwQ-32B','llama-3.3-70b-versatile','DeepSeek-V3','DeepSeekV3-togetherAI','DeepSeek-R1','DeepSeekR1-togetherAI','gpt-4o','DeepSeek-R1-dev','DeepSeek-V3-dev']
|
315 |
+
# Fetch option: Set to true to fetch models from API.
|
316 |
+
# fetch: true # Defaults to false.
|
317 |
+
|
318 |
+
# Optional configurations
|
319 |
+
|
320 |
+
# Title Conversation setting
|
321 |
+
titleConvo: true # Set to true to enable title conversation
|
322 |
+
|
323 |
+
modelDisplayLabel: 'AI' # Default is "AI" when not set.
|
324 |
+
- name: 'Tiny-DEV' # Unique name for the endpoint
|
325 |
+
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
326 |
+
# recommended environment variables:
|
327 |
+
apiKey: '77ea72d89b98d279c1848389cd027a51c408'
|
328 |
+
baseURL: 'https://akiko19191-backend.hf.space/'
|
329 |
+
|
330 |
+
# Models configuration
|
331 |
+
models:
|
332 |
+
# List of default models to use. At least one value is required.
|
333 |
+
default: ['gemini-2.0-flash-thinking-exp-01-21' ,'deepseek.r1','deepseek-reasoner','deepseek-chat','gemini-2.0-pro-exp-02-05','deepseek-r1-distill-llama-70b', 'qwen-qwq-32b','QwQ-32B','llama-3.3-70b-versatile','DeepSeek-V3','DeepSeekV3-togetherAI','DeepSeek-R1','DeepSeekR1-togetherAI','gpt-4o','DeepSeek-R1-dev','DeepSeek-V3-dev']
|
334 |
+
# Fetch option: Set to true to fetch models from API.
|
335 |
+
# fetch: false # Defaults to false.
|
336 |
+
|
337 |
+
# Optional configurations
|
338 |
+
|
339 |
+
# Title Conversation setting
|
340 |
+
titleConvo: true # Set to true to enable title conversation
|
341 |
+
|
342 |
+
modelDisplayLabel: 'Tiny' # Default is "AI" when not set.
|
343 |
+
# # Mistral AI Example
|
344 |
+
# - name: 'Mistral' # Unique name for the endpoint
|
345 |
+
# # For `apiKey` and `baseURL`, you can use environment variables that you define.
|
346 |
+
# # recommended environment variables:
|
347 |
+
# apiKey: '${MISTRAL_API_KEY}'
|
348 |
+
# baseURL: 'https://api.mistral.ai/v1'
|
349 |
+
|
350 |
+
# # Models configuration
|
351 |
+
# models:
|
352 |
+
# # List of default models to use. At least one value is required.
|
353 |
+
# default: ['mistral-tiny', 'mistral-small', 'mistral-medium']
|
354 |
+
# # Fetch option: Set to true to fetch models from API.
|
355 |
+
# fetch: true # Defaults to false.
|
356 |
+
|
357 |
+
# # Optional configurations
|
358 |
+
|
359 |
+
# # Title Conversation setting
|
360 |
+
# titleConvo: true # Set to true to enable title conversation
|
361 |
+
|
362 |
+
# # Title Method: Choose between "completion" or "functions".
|
363 |
+
# # titleMethod: "completion" # Defaults to "completion" if omitted.
|
364 |
+
|
365 |
+
# # Title Model: Specify the model to use for titles.
|
366 |
+
# titleModel: 'mistral-tiny' # Defaults to "gpt-3.5-turbo" if omitted.
|
367 |
+
|
368 |
+
# # Summarize setting: Set to true to enable summarization.
|
369 |
+
# # summarize: false
|
370 |
+
|
371 |
+
# # Summary Model: Specify the model to use if summarization is enabled.
|
372 |
+
# # summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
373 |
+
|
374 |
+
# # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
|
375 |
+
# # forcePrompt: false
|
376 |
+
|
377 |
+
# # The label displayed for the AI model in messages.
|
378 |
+
# modelDisplayLabel: 'Mistral' # Default is "AI" when not set.
|
379 |
+
|
380 |
+
# # Add additional parameters to the request. Default params will be overwritten.
|
381 |
+
# # addParams:
|
382 |
+
# # safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
|
383 |
+
|
384 |
+
# # Drop Default params parameters from the request. See default params in guide linked below.
|
385 |
+
# # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
|
386 |
+
# dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty']
|
387 |
+
|
388 |
+
# # OpenRouter Example
|
389 |
+
# - name: 'OpenRouter'
|
390 |
+
# # For `apiKey` and `baseURL`, you can use environment variables that you define.
|
391 |
+
# # recommended environment variables:
|
392 |
+
# # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
|
393 |
+
# apiKey: '${OPENROUTER_KEY}'
|
394 |
+
# baseURL: 'https://openrouter.ai/api/v1'
|
395 |
+
# models:
|
396 |
+
# default: ['meta-llama/llama-3-70b-instruct']
|
397 |
+
# fetch: true
|
398 |
+
# titleConvo: true
|
399 |
+
# titleModel: 'meta-llama/llama-3-70b-instruct'
|
400 |
+
# # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens.
|
401 |
+
# dropParams: ['stop']
|
402 |
+
# modelDisplayLabel: 'OpenRouter'
|
403 |
+
# together.ai
|
404 |
+
# https://api.together.ai/settings/api-keys
|
405 |
+
# Model list: https://docs.together.ai/docs/inference-models
|
406 |
+
# # Portkey AI Example
|
407 |
+
# - name: "Portkey"
|
408 |
+
# apiKey: "dummy"
|
409 |
+
# baseURL: 'https://api.portkey.ai/v1'
|
410 |
+
# headers:
|
411 |
+
# x-portkey-api-key: '${PORTKEY_API_KEY}'
|
412 |
+
# x-portkey-virtual-key: '${PORTKEY_OPENAI_VIRTUAL_KEY}'
|
413 |
+
# models:
|
414 |
+
# default: ['gpt-4o-mini', 'gpt-4o', 'chatgpt-4o-latest']
|
415 |
+
# fetch: true
|
416 |
+
# titleConvo: true
|
417 |
+
# titleModel: 'current_model'
|
418 |
+
# summarize: false
|
419 |
+
# summaryModel: 'current_model'
|
420 |
+
# forcePrompt: false
|
421 |
+
# modelDisplayLabel: 'Portkey'
|
422 |
+
# iconURL: https://images.crunchbase.com/image/upload/c_pad,f_auto,q_auto:eco,dpr_1/rjqy7ghvjoiu4cd1xjbf
|
423 |
+
fileConfig:
|
424 |
+
endpoints:
|
425 |
+
agents:
|
426 |
+
fileLimit: 5
|
427 |
+
fileSizeLimit: 100 # Maximum size for an individual file in MB
|
428 |
+
totalSizeLimit: 500 # Maximum total size for all files in a single request in MB
|
429 |
+
supportedMimeTypes:
|
430 |
+
- "image/.*"
|
431 |
+
- "application/pdf"
|
432 |
+
- "video/.*"
|
433 |
+
- "application/vnd.ms-excel"
|
434 |
+
- "audio/mp3"
|
435 |
+
- "audio/mpeg"
|
436 |
+
- "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
437 |
+
- "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
438 |
+
- "application/msword"
|
439 |
+
# openAI:
|
440 |
+
# disabled: true # Disables file uploading to the OpenAI endpoint
|
441 |
+
# default:
|
442 |
+
# totalSizeLimit: 20
|
443 |
+
# YourCustomEndpointName:
|
444 |
+
# fileLimit: 2
|
445 |
+
# fileSizeLimit: 5
|
446 |
+
# serverFileSizeLimit: 100 # Global server file size limit in MB
|
447 |
+
# avatarSizeLimit: 2 # Limit for user avatar image size in MB
|
448 |
+
# See the Custom Configuration Guide for more information on Assistants Config:
|
449 |
+
# https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint
|
manager.ts
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js';
|
2 |
+
import type { JsonSchemaType } from 'librechat-data-provider';
|
3 |
+
import type { Logger } from 'winston';
|
4 |
+
import type * as t from './types/mcp';
|
5 |
+
import { formatToolContent } from './parsers';
|
6 |
+
import { MCPConnection } from './connection';
|
7 |
+
import { CONSTANTS } from './enum';
|
8 |
+
|
9 |
+
export class MCPManager {
|
10 |
+
private static instance: MCPManager | null = null;
|
11 |
+
private connections: Map<string, MCPConnection> = new Map();
|
12 |
+
private logger: Logger;
|
13 |
+
|
14 |
+
private static getDefaultLogger(): Logger {
|
15 |
+
return {
|
16 |
+
error: console.error,
|
17 |
+
warn: console.warn,
|
18 |
+
info: console.info,
|
19 |
+
debug: console.debug,
|
20 |
+
} as Logger;
|
21 |
+
}
|
22 |
+
|
23 |
+
private constructor(logger?: Logger) {
|
24 |
+
this.logger = logger || MCPManager.getDefaultLogger();
|
25 |
+
}
|
26 |
+
|
27 |
+
public static getInstance(logger?: Logger): MCPManager {
|
28 |
+
if (!MCPManager.instance) {
|
29 |
+
MCPManager.instance = new MCPManager(logger);
|
30 |
+
}
|
31 |
+
return MCPManager.instance;
|
32 |
+
}
|
33 |
+
|
34 |
+
public async initializeMCP(mcpServers: t.MCPServers): Promise<void> {
|
35 |
+
this.logger.info('[MCP] Initializing servers');
|
36 |
+
|
37 |
+
const entries = Object.entries(mcpServers);
|
38 |
+
const initializedServers = new Set();
|
39 |
+
const connectionResults = await Promise.allSettled(
|
40 |
+
entries.map(async ([serverName, config], i) => {
|
41 |
+
const connection = new MCPConnection(serverName, config, this.logger);
|
42 |
+
|
43 |
+
connection.on('connectionChange', (state) => {
|
44 |
+
this.logger.info(`[MCP][${serverName}] Connection state: ${state}`);
|
45 |
+
});
|
46 |
+
|
47 |
+
try {
|
48 |
+
const connectionTimeout = new Promise<void>((_, reject) =>
|
49 |
+
setTimeout(() => reject(new Error('Connection timeout')), 1800000),
|
50 |
+
);
|
51 |
+
|
52 |
+
const connectionAttempt = this.initializeServer(connection, serverName);
|
53 |
+
await Promise.race([connectionAttempt, connectionTimeout]);
|
54 |
+
|
55 |
+
if (connection.isConnected()) {
|
56 |
+
initializedServers.add(i);
|
57 |
+
this.connections.set(serverName, connection);
|
58 |
+
|
59 |
+
const serverCapabilities = connection.client.getServerCapabilities();
|
60 |
+
this.logger.info(
|
61 |
+
`[MCP][${serverName}] Capabilities: ${JSON.stringify(serverCapabilities)}`,
|
62 |
+
);
|
63 |
+
|
64 |
+
if (serverCapabilities?.tools) {
|
65 |
+
const tools = await connection.client.listTools();
|
66 |
+
if (tools.tools.length) {
|
67 |
+
this.logger.info(
|
68 |
+
`[MCP][${serverName}] Available tools: ${tools.tools
|
69 |
+
.map((tool) => tool.name)
|
70 |
+
.join(', ')}`,
|
71 |
+
);
|
72 |
+
}
|
73 |
+
}
|
74 |
+
}
|
75 |
+
} catch (error) {
|
76 |
+
this.logger.error(`[MCP][${serverName}] Initialization failed`, error);
|
77 |
+
throw error;
|
78 |
+
}
|
79 |
+
}),
|
80 |
+
);
|
81 |
+
|
82 |
+
const failedConnections = connectionResults.filter(
|
83 |
+
(result): result is PromiseRejectedResult => result.status === 'rejected',
|
84 |
+
);
|
85 |
+
|
86 |
+
this.logger.info(`[MCP] Initialized ${initializedServers.size}/${entries.length} server(s)`);
|
87 |
+
|
88 |
+
if (failedConnections.length > 0) {
|
89 |
+
this.logger.warn(
|
90 |
+
`[MCP] ${failedConnections.length}/${entries.length} server(s) failed to initialize`,
|
91 |
+
);
|
92 |
+
}
|
93 |
+
|
94 |
+
entries.forEach(([serverName], index) => {
|
95 |
+
if (initializedServers.has(index)) {
|
96 |
+
this.logger.info(`[MCP][${serverName}] ✓ Initialized`);
|
97 |
+
} else {
|
98 |
+
this.logger.info(`[MCP][${serverName}] ✗ Failed`);
|
99 |
+
}
|
100 |
+
});
|
101 |
+
|
102 |
+
if (initializedServers.size === entries.length) {
|
103 |
+
this.logger.info('[MCP] All servers initialized successfully');
|
104 |
+
} else if (initializedServers.size === 0) {
|
105 |
+
this.logger.error('[MCP] No servers initialized');
|
106 |
+
}
|
107 |
+
}
|
108 |
+
|
109 |
+
private async initializeServer(connection: MCPConnection, serverName: string): Promise<void> {
|
110 |
+
const maxAttempts = 3;
|
111 |
+
let attempts = 0;
|
112 |
+
|
113 |
+
while (attempts < maxAttempts) {
|
114 |
+
try {
|
115 |
+
await connection.connect();
|
116 |
+
|
117 |
+
if (connection.isConnected()) {
|
118 |
+
return;
|
119 |
+
}
|
120 |
+
} catch (error) {
|
121 |
+
attempts++;
|
122 |
+
|
123 |
+
if (attempts === maxAttempts) {
|
124 |
+
this.logger.error(`[MCP][${serverName}] Failed after ${maxAttempts} attempts`);
|
125 |
+
throw error;
|
126 |
+
}
|
127 |
+
|
128 |
+
await new Promise((resolve) => setTimeout(resolve, 2000 * attempts));
|
129 |
+
}
|
130 |
+
}
|
131 |
+
}
|
132 |
+
|
133 |
+
public getConnection(serverName: string): MCPConnection | undefined {
|
134 |
+
return this.connections.get(serverName);
|
135 |
+
}
|
136 |
+
|
137 |
+
public getAllConnections(): Map<string, MCPConnection> {
|
138 |
+
return this.connections;
|
139 |
+
}
|
140 |
+
|
141 |
+
public async mapAvailableTools(availableTools: t.LCAvailableTools): Promise<void> {
|
142 |
+
for (const [serverName, connection] of this.connections.entries()) {
|
143 |
+
try {
|
144 |
+
if (connection.isConnected() !== true) {
|
145 |
+
this.logger.warn(`Connection ${serverName} is not connected. Skipping tool fetch.`);
|
146 |
+
continue;
|
147 |
+
}
|
148 |
+
|
149 |
+
const tools = await connection.fetchTools();
|
150 |
+
for (const tool of tools) {
|
151 |
+
const name = `${tool.name}${CONSTANTS.mcp_delimiter}${serverName}`;
|
152 |
+
availableTools[name] = {
|
153 |
+
type: 'function',
|
154 |
+
['function']: {
|
155 |
+
name,
|
156 |
+
description: tool.description,
|
157 |
+
parameters: tool.inputSchema as JsonSchemaType,
|
158 |
+
},
|
159 |
+
};
|
160 |
+
}
|
161 |
+
} catch (error) {
|
162 |
+
this.logger.warn(`[MCP][${serverName}] Not connected, skipping tool fetch`);
|
163 |
+
}
|
164 |
+
}
|
165 |
+
}
|
166 |
+
|
167 |
+
public async loadManifestTools(manifestTools: t.LCToolManifest): Promise<void> {
|
168 |
+
for (const [serverName, connection] of this.connections.entries()) {
|
169 |
+
try {
|
170 |
+
if (connection.isConnected() !== true) {
|
171 |
+
this.logger.warn(`Connection ${serverName} is not connected. Skipping tool fetch.`);
|
172 |
+
continue;
|
173 |
+
}
|
174 |
+
|
175 |
+
const tools = await connection.fetchTools();
|
176 |
+
for (const tool of tools) {
|
177 |
+
const pluginKey = `${tool.name}${CONSTANTS.mcp_delimiter}${serverName}`;
|
178 |
+
manifestTools.push({
|
179 |
+
name: tool.name,
|
180 |
+
pluginKey,
|
181 |
+
description: tool.description ?? '',
|
182 |
+
icon: connection.iconPath,
|
183 |
+
});
|
184 |
+
}
|
185 |
+
} catch (error) {
|
186 |
+
this.logger.error(`[MCP][${serverName}] Error fetching tools`, error);
|
187 |
+
}
|
188 |
+
}
|
189 |
+
}
|
190 |
+
|
191 |
+
async callTool(
|
192 |
+
serverName: string,
|
193 |
+
toolName: string,
|
194 |
+
provider: t.Provider,
|
195 |
+
toolArguments?: Record<string, unknown>,
|
196 |
+
): Promise<t.FormattedToolResponse> {
|
197 |
+
const connection = this.connections.get(serverName);
|
198 |
+
if (!connection) {
|
199 |
+
throw new Error(
|
200 |
+
`No connection found for server: ${serverName}. Please make sure to use MCP servers available under 'Connected MCP Servers'.`,
|
201 |
+
);
|
202 |
+
}
|
203 |
+
const result = await connection.client.request(
|
204 |
+
{
|
205 |
+
method: 'tools/call',
|
206 |
+
params: {
|
207 |
+
name: toolName,
|
208 |
+
arguments: toolArguments,
|
209 |
+
},
|
210 |
+
},
|
211 |
+
CallToolResultSchema,
|
212 |
+
);
|
213 |
+
return formatToolContent(result, provider);
|
214 |
+
}
|
215 |
+
|
216 |
+
public async disconnectServer(serverName: string): Promise<void> {
|
217 |
+
const connection = this.connections.get(serverName);
|
218 |
+
if (connection) {
|
219 |
+
await connection.disconnect();
|
220 |
+
this.connections.delete(serverName);
|
221 |
+
}
|
222 |
+
}
|
223 |
+
|
224 |
+
public async disconnectAll(): Promise<void> {
|
225 |
+
const disconnectPromises = Array.from(this.connections.values()).map((connection) =>
|
226 |
+
connection.disconnect(),
|
227 |
+
);
|
228 |
+
await Promise.all(disconnectPromises);
|
229 |
+
this.connections.clear();
|
230 |
+
}
|
231 |
+
|
232 |
+
public static async destroyInstance(): Promise<void> {
|
233 |
+
if (MCPManager.instance) {
|
234 |
+
await MCPManager.instance.disconnectAll();
|
235 |
+
MCPManager.instance = null;
|
236 |
+
}
|
237 |
+
}
|
238 |
+
}
|
streamlit_app.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import streamlit as st
|
2 |
+
# import os
|
3 |
+
# import shutil
|
4 |
+
|
5 |
+
# def main():
|
6 |
+
# st.title("File Explorer")
|
7 |
+
|
8 |
+
# # File browsing
|
9 |
+
# st.header("Browse Files")
|
10 |
+
# current_dir = st.text_input("Current Directory", value=os.getcwd())
|
11 |
+
|
12 |
+
# try:
|
13 |
+
# files = os.listdir(current_dir)
|
14 |
+
# selected_file = st.selectbox("Select a file/folder", files)
|
15 |
+
# full_path = os.path.join(current_dir, selected_file)
|
16 |
+
|
17 |
+
# if selected_file:
|
18 |
+
# if os.path.isfile(full_path):
|
19 |
+
# st.write(f"You selected file: {selected_file}")
|
20 |
+
|
21 |
+
# # Download
|
22 |
+
# with open(full_path, "rb") as f:
|
23 |
+
# st.download_button("Download", f, file_name=selected_file)
|
24 |
+
|
25 |
+
# # Read
|
26 |
+
# if selected_file.endswith((".txt",".py",".md",".csv")):
|
27 |
+
# try:
|
28 |
+
# with open(full_path,"r") as f_read:
|
29 |
+
# content = f_read.read()
|
30 |
+
# st.text_area("File content", content, height=300)
|
31 |
+
# except UnicodeDecodeError:
|
32 |
+
# st.write("Cannot display binary file content.")
|
33 |
+
# elif os.path.isdir(full_path):
|
34 |
+
# st.write(f"You selected directory: {selected_file}")
|
35 |
+
|
36 |
+
|
37 |
+
# except FileNotFoundError:
|
38 |
+
# st.error("Invalid directory path.")
|
39 |
+
# except PermissionError:
|
40 |
+
# st.error("Permission denied to access this directory.")
|
41 |
+
# except OSError as e:
|
42 |
+
# st.error(f"OS error: {e}")
|
43 |
+
|
44 |
+
|
45 |
+
# # File Uploading
|
46 |
+
# st.header("Upload Files")
|
47 |
+
# uploaded_files = st.file_uploader("Choose files to upload", accept_multiple_files=True)
|
48 |
+
# upload_dir = st.text_input("Upload Directory", value=os.getcwd())
|
49 |
+
# if uploaded_files:
|
50 |
+
# for uploaded_file in uploaded_files:
|
51 |
+
# try:
|
52 |
+
# with open(os.path.join(upload_dir, uploaded_file.name), "wb") as f:
|
53 |
+
# f.write(uploaded_file.getbuffer())
|
54 |
+
# st.success(f"Saved File: {uploaded_file.name} to {upload_dir}")
|
55 |
+
# except Exception as e:
|
56 |
+
# st.error(f"Error saving file: {e}")
|
57 |
+
|
58 |
+
|
59 |
+
# if __name__ == "__main__":
|
60 |
+
# main()
|
61 |
+
|
62 |
+
from flask import Flask, url_for, redirect
|
63 |
+
from flask import request as req
|
64 |
+
from flask_cors import CORS
|
65 |
+
|
66 |
+
app = Flask(__name__)
|
67 |
+
CORS(app)
|
68 |
+
from werkzeug.utils import secure_filename
|
69 |
+
import os
|
70 |
+
from PIL import Image
|
71 |
+
app.config['UPLOAD_FOLDER'] = "static"
|
72 |
+
from pyngrok import ngrok
|
73 |
+
|
74 |
+
# Open a HTTP tunnel on the default port 80
|
75 |
+
# <NgrokTunnel: "https://<public_sub>.ngrok.io" -> "http://localhost:80">
|
76 |
+
http_tunnel = ngrok.connect("1337", "http")
|
77 |
+
print(http_tunnel)
|
78 |
+
@app.route('/upload', methods=['GET','POST'])
|
79 |
+
def index():
|
80 |
+
|
81 |
+
# If a post method then handle file upload
|
82 |
+
if req.method == 'POST':
|
83 |
+
|
84 |
+
if 'file' not in req.files:
|
85 |
+
return redirect('/')
|
86 |
+
|
87 |
+
file = req.files['file']
|
88 |
+
|
89 |
+
|
90 |
+
if file :
|
91 |
+
filename = secure_filename(file.filename)
|
92 |
+
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
|
93 |
+
if ("camera" in file.filename or "capture" in file.filename or "IMG" in file.filename or "Screenshot" in file.filename) :
|
94 |
+
img=Image.open(f"static/{filename}")
|
95 |
+
img.thumbnail((512, 512),Image.Resampling.LANCZOS)
|
96 |
+
|
97 |
+
img.save(f"static/{filename}")
|
98 |
+
|
99 |
+
return filename
|
100 |
+
|
101 |
+
|
102 |
+
# Get Files in the directory and create list items to be displayed to the user
|
103 |
+
file_list = ''
|
104 |
+
for f in os.listdir(app.config['UPLOAD_FOLDER']):
|
105 |
+
# Create link html
|
106 |
+
link = url_for("static", filename=f)
|
107 |
+
file_list = file_list + '<li><a href="%s">%s</a></li>' % (link, f)
|
108 |
+
|
109 |
+
# Format return HTML - allow file upload and list all available files
|
110 |
+
return_html = '''
|
111 |
+
<!doctype html>
|
112 |
+
<title>Upload File</title>
|
113 |
+
<h1>Upload File</h1>
|
114 |
+
<form method=post enctype=multipart/form-data>
|
115 |
+
<input type=file name=file><br>
|
116 |
+
<input type=submit value=Upload>
|
117 |
+
</form>
|
118 |
+
<hr>
|
119 |
+
<h1>Files</h1>
|
120 |
+
<ol>%s</ol>
|
121 |
+
''' % file_list
|
122 |
+
|
123 |
+
return return_html
|
124 |
+
|
125 |
+
if __name__ == '__main__':
|
126 |
+
config = {
|
127 |
+
'host': 'localhost',
|
128 |
+
'port': 1337,
|
129 |
+
'debug': True,
|
130 |
+
}
|
131 |
+
|
132 |
+
app.run(**config)
|
tests.py
ADDED
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mcp.server.fastmcp import FastMCP
|
2 |
+
import random
|
3 |
+
import time
|
4 |
+
from litellm import completion
|
5 |
+
import shlex
|
6 |
+
from subprocess import Popen, PIPE
|
7 |
+
from threading import Timer
|
8 |
+
import os
|
9 |
+
import glob
|
10 |
+
import http.client
|
11 |
+
import json
|
12 |
+
import openpyxl
|
13 |
+
import shutil
|
14 |
+
from google import genai
|
15 |
+
|
16 |
+
client = genai.Client(api_key="AIzaSyDtP05TyoIy9j0uPL7_wLEhgQEE75AZQSc")
|
17 |
+
|
18 |
+
source_dir = "/app/uploads/temp"
|
19 |
+
destination_dir = "/app/code_interpreter"
|
20 |
+
files_list=[]
|
21 |
+
downloaded_files=[]
|
22 |
+
# os.environ.get('GROQ_API_KEY')
|
23 |
+
os.environ["GROQ_API_KEY"] ="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"
|
24 |
+
os.environ["GEMINI_API_KEY"] ="AIzaSyAQgAtQPpY0bQaCqCISGxeyF6tpDePx-Jg"
|
25 |
+
os.environ["OPENROUTER_API_KEY"] = "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"
|
26 |
+
mcp = FastMCP("code_sandbox")
|
27 |
+
data={}
|
28 |
+
result=""
|
29 |
+
stdout=""
|
30 |
+
stderr=""
|
31 |
+
import requests
|
32 |
+
import os
|
33 |
+
from bs4 import BeautifulSoup # For parsing HTML
|
34 |
+
|
35 |
+
|
36 |
+
def download_all_files(base_url, files_endpoint, download_directory):
|
37 |
+
"""Downloads all files listed on the server's /upload page."""
|
38 |
+
global downloaded_files
|
39 |
+
|
40 |
+
# Create the download directory if it doesn't exist
|
41 |
+
if not os.path.exists(download_directory):
|
42 |
+
os.makedirs(download_directory)
|
43 |
+
|
44 |
+
try:
|
45 |
+
# 1. Get the HTML of the /upload page
|
46 |
+
files_url = f"{base_url}{files_endpoint}"
|
47 |
+
response = requests.get(files_url)
|
48 |
+
response.raise_for_status() # Check for HTTP errors
|
49 |
+
|
50 |
+
# 2. Parse the HTML using BeautifulSoup
|
51 |
+
soup = BeautifulSoup(response.content, "html.parser")
|
52 |
+
|
53 |
+
# 3. Find all the <a> (anchor) tags, which represent the links to the files
|
54 |
+
# This assumes the file links are inside <a> tags as shown in the server code
|
55 |
+
file_links = soup.find_all("a")
|
56 |
+
|
57 |
+
# 4. Iterate through the links and download the files
|
58 |
+
for link in file_links:
|
59 |
+
try:
|
60 |
+
file_url = link.get("href") # Extract the href attribute (the URL)
|
61 |
+
if file_url:
|
62 |
+
# Construct the full file URL if the href is relative
|
63 |
+
if not file_url.startswith("http"):
|
64 |
+
file_url = f"{base_url}{file_url}" # Relative URLs
|
65 |
+
|
66 |
+
filename = os.path.basename(file_url) # Extract the filename from the URL
|
67 |
+
file_path = os.path.join(download_directory, filename)
|
68 |
+
if filename in downloaded_files:
|
69 |
+
pass
|
70 |
+
else:
|
71 |
+
downloaded_files.append(filename)
|
72 |
+
print(f"Downloading: {filename} from {file_url}")
|
73 |
+
|
74 |
+
# Download the file
|
75 |
+
file_response = requests.get(file_url, stream=True) # Use stream=True for large files
|
76 |
+
file_response.raise_for_status() # Check for HTTP errors
|
77 |
+
|
78 |
+
with open(file_path, "wb") as file: # Open in binary write mode
|
79 |
+
for chunk in file_response.iter_content(chunk_size=8192): # Iterate and write in chunks (good for large files)
|
80 |
+
if chunk: # filter out keep-alive new chunks
|
81 |
+
file.write(chunk)
|
82 |
+
|
83 |
+
print(f"Downloaded: {filename} to {file_path}")
|
84 |
+
|
85 |
+
except requests.exceptions.RequestException as e:
|
86 |
+
print(f"Error downloading {link.get('href')}: {e}")
|
87 |
+
except OSError as e: #Handles potential issues with file permissions or disk space.
|
88 |
+
print(f"Error saving {filename}: {e}")
|
89 |
+
|
90 |
+
except requests.exceptions.RequestException as e:
|
91 |
+
print(f"Error getting file list from server: {e}")
|
92 |
+
except Exception as e: # Catch all other potential errors
|
93 |
+
print(f"An unexpected error occurred: {e}")
|
94 |
+
|
95 |
+
def transfer_files():
|
96 |
+
for item in os.listdir(source_dir):
|
97 |
+
item_path = os.path.join(source_dir, item)
|
98 |
+
if os.path.isdir(item_path): # Check if it's a directory
|
99 |
+
for filename in os.listdir(item_path):
|
100 |
+
source_file_path = os.path.join(item_path, filename)
|
101 |
+
destination_file_path = os.path.join(destination_dir, filename)
|
102 |
+
shutil.move(source_file_path, destination_file_path)
|
103 |
+
|
104 |
+
def upload_file(file_path, upload_url):
|
105 |
+
"""Uploads a file to the specified server endpoint."""
|
106 |
+
|
107 |
+
try:
|
108 |
+
# Check if the file exists
|
109 |
+
if not os.path.exists(file_path):
|
110 |
+
raise FileNotFoundError(f"File not found: {file_path}")
|
111 |
+
|
112 |
+
# Prepare the file for upload
|
113 |
+
with open(file_path, "rb") as file:
|
114 |
+
files = {"file": (os.path.basename(file_path), file)} # Important: Provide filename
|
115 |
+
|
116 |
+
# Send the POST request
|
117 |
+
response = requests.post(upload_url, files=files)
|
118 |
+
|
119 |
+
# Check the response status code
|
120 |
+
response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx)
|
121 |
+
|
122 |
+
# Parse and print the response
|
123 |
+
if response.status_code == 200:
|
124 |
+
print(f"File uploaded successfully. Filename returned by server: {response.text}")
|
125 |
+
return response.text # Return the filename returned by the server
|
126 |
+
else:
|
127 |
+
print(f"Upload failed. Status code: {response.status_code}, Response: {response.text}")
|
128 |
+
return None
|
129 |
+
|
130 |
+
except FileNotFoundError as e:
|
131 |
+
print(e)
|
132 |
+
return None # or re-raise the exception if you want the program to halt
|
133 |
+
except requests.exceptions.RequestException as e:
|
134 |
+
print(f"Upload failed. Network error: {e}")
|
135 |
+
return None
|
136 |
+
|
137 |
+
|
138 |
+
TOKEN = "5182224145:AAEjkSlPqV-Q3rH8A9X8HfCDYYEQ44v_qy0"
|
139 |
+
chat_id = "5075390513"
|
140 |
+
from requests_futures.sessions import FuturesSession
|
141 |
+
session = FuturesSession()
|
142 |
+
|
143 |
+
def run(cmd, timeout_sec):
|
144 |
+
global stdout
|
145 |
+
global stderr
|
146 |
+
proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE,cwd="/app/code_interpreter/")
|
147 |
+
timer = Timer(timeout_sec, proc.kill)
|
148 |
+
try:
|
149 |
+
timer.start()
|
150 |
+
stdout, stderr = proc.communicate()
|
151 |
+
finally:
|
152 |
+
timer.cancel()
|
153 |
+
|
154 |
+
|
155 |
+
@mcp.tool()
|
156 |
+
def analyse_audio(audiopath,query) -> dict:
|
157 |
+
"""Ask another AI model about audios.The AI model can listen to the audio and give answers.Eg-query:Generate detailed minutes of meeting from the audio clip,audiopath='/app/code_interpreter/<audioname>'.Note:The audios are automatically present in the /app/code_interpreter directory."""
|
158 |
+
download_all_files("https://opengpt-4ik5.onrender.com", "/upload", "/app/code_interpreter")
|
159 |
+
myfile = client.files.upload(file=audiopath)
|
160 |
+
|
161 |
+
response = client.models.generate_content(
|
162 |
+
model='gemini-2.0-flash',
|
163 |
+
contents=[query, myfile]
|
164 |
+
)
|
165 |
+
return {"Output":str(response.text)}
|
166 |
+
|
167 |
+
@mcp.tool()
|
168 |
+
def analyse_video(videopath,query) -> dict:
|
169 |
+
"""Ask another AI model about videos.The AI model can see the videos and give answers.Eg-query:Create a very detailed transcript and summary of the video,videopath='/app/code_interpreter/<videoname>'Note:The videos are automatically present in the /app/code_interpreter directory."""
|
170 |
+
download_all_files("https://opengpt-4ik5.onrender.com", "/upload", "/app/code_interpreter")
|
171 |
+
video_file = client.files.upload(file=videopath)
|
172 |
+
|
173 |
+
while video_file.state.name == "PROCESSING":
|
174 |
+
print('.', end='')
|
175 |
+
time.sleep(1)
|
176 |
+
video_file = client.files.get(name=video_file.name)
|
177 |
+
|
178 |
+
if video_file.state.name == "FAILED":
|
179 |
+
raise ValueError(video_file.state.name)
|
180 |
+
|
181 |
+
response = client.models.generate_content(
|
182 |
+
model='gemini-2.0-flash',
|
183 |
+
contents=[query, video_file]
|
184 |
+
)
|
185 |
+
return {"Output":str(response.text)}
|
186 |
+
|
187 |
+
|
188 |
+
@mcp.tool()
|
189 |
+
def analyse_images(imagepath,query) -> dict:
|
190 |
+
"""Ask another AI model about images.The AI model can see the images and give answers.Eg-query:Who is the person in this image?,imagepath='/app/code_interpreter/<imagename>'.Note:The images are automatically present in the /app/code_interpreter directory."""
|
191 |
+
download_all_files("https://opengpt-4ik5.onrender.com", "/upload", "/app/code_interpreter")
|
192 |
+
video_file = client.files.upload(file=imagepath)
|
193 |
+
|
194 |
+
|
195 |
+
response = client.models.generate_content(
|
196 |
+
model='gemini-2.0-flash',
|
197 |
+
contents=[query, video_file]
|
198 |
+
)
|
199 |
+
return {"Output":str(response.text)}
|
200 |
+
|
201 |
+
@mcp.tool()
|
202 |
+
def create_code_files(filename: str, code: str) -> dict:
|
203 |
+
global destination_dir
|
204 |
+
download_all_files("https://opengpt-4ik5.onrender.com", "/upload", "/app/code_interpreter")
|
205 |
+
"""Create code files by passing the the filename as well the entire code to write.The file is created by default in the /app/code_interpreter directory.Note:All user uploaded files that you might need to work upon are stored in the /app/code_interpreter directory."""
|
206 |
+
transfer_files()
|
207 |
+
f = open(os.path.join(destination_dir, filename), "w")
|
208 |
+
f.write(code)
|
209 |
+
f.close()
|
210 |
+
return {"info":"task completed. The referenced code files were created successfully. "}
|
211 |
+
|
212 |
+
|
213 |
+
|
214 |
+
@mcp.tool()
|
215 |
+
def run_code_files(start_cmd:str) -> dict:
|
216 |
+
"""(start_cmd:Example- sudo python /app/code_interpreter/app.py or bash /app/code_interpreter/app.py).The files must be inside the /app/code_interpreter directory."""
|
217 |
+
global files_list
|
218 |
+
global stdout
|
219 |
+
global stderr
|
220 |
+
run(start_cmd, 300)
|
221 |
+
while stderr=="" and stdout=="":
|
222 |
+
pass
|
223 |
+
time.sleep(1.5)
|
224 |
+
onlyfiles = glob.glob("/app/code_interpreter/*")
|
225 |
+
onlyfiles=list(set(onlyfiles)-set(files_list))
|
226 |
+
uploaded_filenames=[]
|
227 |
+
for files in onlyfiles:
|
228 |
+
try:
|
229 |
+
uploaded_filename = upload_file(files, "https://opengpt-4ik5.onrender.com/upload")
|
230 |
+
uploaded_filenames.append(f"https://opengpt-4ik5.onrender.com/static/{uploaded_filename}")
|
231 |
+
except:
|
232 |
+
pass
|
233 |
+
files_list=onlyfiles
|
234 |
+
return {"stdout":stdout,"stderr":stderr,"Files_download_link":uploaded_filenames}
|
235 |
+
|
236 |
+
|
237 |
+
@mcp.tool()
|
238 |
+
def run_shell_command(cmd:str) -> dict:
|
239 |
+
"""(cmd:Example- mkdir test.By default , the command is run inside the /app/code_interpreter/ directory.).Remember, the code_interpreter is running on **alpine linux** , so write commands accordingly.Eg-sudo does not work and is not required.."""
|
240 |
+
global stdout
|
241 |
+
global stderr
|
242 |
+
|
243 |
+
run(cmd, 300)
|
244 |
+
while stderr=="" and stdout=="":
|
245 |
+
pass
|
246 |
+
time.sleep(1.5)
|
247 |
+
transfer_files()
|
248 |
+
return {"stdout":stdout,"stderr":stderr}
|
249 |
+
|
250 |
+
|
251 |
+
|
252 |
+
@mcp.tool()
|
253 |
+
def install_python_packages(python_packages:str) -> dict:
|
254 |
+
"""python_packages to install seperated by space.eg-(python packages:numpy matplotlib).The following python packages are preinstalled:gradio XlsxWriter openpyxl"""
|
255 |
+
global sbx
|
256 |
+
package_names = python_packages.strip()
|
257 |
+
command="pip install"
|
258 |
+
if not package_names:
|
259 |
+
return
|
260 |
+
|
261 |
+
run(
|
262 |
+
f"{command} --break-system-packages {package_names}", timeout_sec=300
|
263 |
+
)
|
264 |
+
while stderr=="" and stdout=="":
|
265 |
+
pass
|
266 |
+
time.sleep(2)
|
267 |
+
return {"stdout":stdout,"stderr":stderr,"info":"Ran package installation command"}
|
268 |
+
|
269 |
+
@mcp.tool()
|
270 |
+
def get_youtube_transcript(videoid:str) -> dict:
|
271 |
+
"""Get the transcript of a youtube video by passing the video id.First search the web using google / exa for the relevant videos.Eg videoid=ZacjOVVgoLY"""
|
272 |
+
conn = http.client.HTTPSConnection("youtube-transcript3.p.rapidapi.com")
|
273 |
+
headers = {
|
274 |
+
'x-rapidapi-key': "2a155d4498mshd52b7d6b7a2ff86p10cdd0jsn6252e0f2f529",
|
275 |
+
'x-rapidapi-host': "youtube-transcript3.p.rapidapi.com"
|
276 |
+
}
|
277 |
+
conn.request("GET",f"/api/transcript?videoId={videoid}", headers=headers)
|
278 |
+
|
279 |
+
res = conn.getresponse()
|
280 |
+
data = res.read()
|
281 |
+
return json.loads(data)
|
282 |
+
|
283 |
+
@mcp.tool()
|
284 |
+
def read_excel_file(filename) -> dict:
|
285 |
+
"""Reads the contents of an excel file.Returns a dict with key :value pair = cell location:cell content.Always run this command first , when working with excels.The excel file is automatically present in the /app/code_interpreter directory.Note:Always use openpyxl in python to work with excel files."""
|
286 |
+
global destination_dir
|
287 |
+
download_all_files("https://opengpt-4ik5.onrender.com", "/upload", "/app/code_interpreter")
|
288 |
+
|
289 |
+
workbook = openpyxl.load_workbook(os.path.join(destination_dir, filename))
|
290 |
+
|
291 |
+
# Create an empty dictionary to store the data
|
292 |
+
excel_data_dict = {}
|
293 |
+
|
294 |
+
# Iterate over all sheets
|
295 |
+
for sheet_name in workbook.sheetnames:
|
296 |
+
sheet = workbook[sheet_name]
|
297 |
+
# Iterate over all rows and columns
|
298 |
+
for row in sheet.iter_rows():
|
299 |
+
for cell in row:
|
300 |
+
# Get cell coordinate (e.g., 'A1') and value
|
301 |
+
cell_coordinate = cell.coordinate
|
302 |
+
cell_value = cell.value
|
303 |
+
if cell_value is not None:
|
304 |
+
excel_data_dict[cell_coordinate] = str(cell_value)
|
305 |
+
return excel_data_dict
|
306 |
+
@mcp.tool()
|
307 |
+
def scrape_websites(url_list:list,query:str) -> list:
|
308 |
+
"""Get the entire content of websites by passing in the url lists.query is the question you want to ask about the content of the website.e.g-query:Give .pptx links in the website.Note:Max urls in url_list is 3."""
|
309 |
+
|
310 |
+
conn = http.client.HTTPSConnection("scrapeninja.p.rapidapi.com")
|
311 |
+
|
312 |
+
|
313 |
+
headers = {
|
314 |
+
'x-rapidapi-key': "2a155d4498mshd52b7d6b7a2ff86p10cdd0jsn6252e0f2f529",
|
315 |
+
'x-rapidapi-host': "scrapeninja.p.rapidapi.com",
|
316 |
+
'Content-Type': "application/json"
|
317 |
+
}
|
318 |
+
Output=[]
|
319 |
+
for urls in url_list:
|
320 |
+
payload = {"url" :urls}
|
321 |
+
payload=json.dumps(payload)
|
322 |
+
conn.request("POST", "/scrape", payload, headers)
|
323 |
+
res = conn.getresponse()
|
324 |
+
data = res.read()
|
325 |
+
content=str(data.decode("utf-8"))
|
326 |
+
response = completion(
|
327 |
+
model="gemini/gemini-2.0-flash-exp",
|
328 |
+
messages=[
|
329 |
+
{"role": "user", "content": f"Output the following content in the human readable format.Try to conserve all the links and the text.Try to ouput the entire content.Remove the html codes so its human readable.Also answer this question about the content in a seperate paragraph:{query}.Here is the content:{content}"}
|
330 |
+
],
|
331 |
+
)
|
332 |
+
Output.append(response.choices[0].message.content)
|
333 |
+
|
334 |
+
return {"website_content":Output}
|
335 |
+
|
336 |
+
|
337 |
+
@mcp.tool()
|
338 |
+
def deepthinking1(query:str,info:str) -> dict:
|
339 |
+
"""Ask another intelligent AI about the query.Ask the question defined by the query string and what you know about the question as well as provide your own knowledge and ideas about the question through the info string."""
|
340 |
+
response = completion(
|
341 |
+
model="groq/deepseek-r1-distill-llama-70b",
|
342 |
+
messages=[
|
343 |
+
{"role": "user", "content": f"{query}.Here is what i Know about the query:{info}"}
|
344 |
+
],
|
345 |
+
stream=False
|
346 |
+
)
|
347 |
+
|
348 |
+
|
349 |
+
return {"response":str(response.choices[0].message.content)}
|
350 |
+
|
351 |
+
@mcp.tool()
|
352 |
+
def deepthinking2(query:str,info:str) -> dict:
|
353 |
+
"""Ask another intelligent AI about the query.Ask the question defined by the query string and what you know about the question as well as provide your own knowledge and ideas about the question through the info string."""
|
354 |
+
response = completion(
|
355 |
+
model="openrouter/deepseek/deepseek-chat",
|
356 |
+
messages=[
|
357 |
+
{"role": "user", "content": f"Hi!"}],
|
358 |
+
provider={"order": ["Together"],"allow_fallbacks":False},
|
359 |
+
|
360 |
+
)
|
361 |
+
|
362 |
+
|
363 |
+
return {"response":str(response.choices[0].message.content)}
|
364 |
+
|
365 |
+
@mcp.tool()
|
366 |
+
def deepthinking3(query:str,info:str) -> dict:
|
367 |
+
"""Ask another intelligent AI about the query.Ask the question defined by the query string and what you know about the question as well as provide your own knowledge and ideas about the question through the info string."""
|
368 |
+
response = completion(
|
369 |
+
model="gemini/gemini-2.0-flash-thinking-exp-01-21",
|
370 |
+
messages=[
|
371 |
+
{"role": "user", "content": f"{query}.Here is what i Know about the query:{info}"}
|
372 |
+
],
|
373 |
+
)
|
374 |
+
|
375 |
+
|
376 |
+
return {"response":str(response.choices[0].message.content)}
|
377 |
+
|
378 |
+
if __name__ == "__main__":
|
379 |
+
# Initialize and run the server
|
380 |
+
mcp.run(transport='stdio')
|
381 |
+
|
382 |
+
|
383 |
+
# @mcp.tool()
|
384 |
+
# def run_website(start_cmd:str,port=8501) -> dict:
|
385 |
+
# """(start_cmd:streamlit run app.py).Always specify sandbox id.Specify port (int) if different from 8501."""
|
386 |
+
# output=sbx.commands.run(start_cmd,sandbox_id)
|
387 |
+
# url = sbx.get_host(port)
|
388 |
+
# info={"info":f"Your Application is live [here](https://{url})"}
|
389 |
+
|
390 |
+
# return info
|
391 |
+
|