Spaces:
Runtime error
Runtime error
Commit
·
21bfd6f
1
Parent(s):
8c7f1c1
shifted to google gemini
Browse files
TechdocsAPI/backend/__init__.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import mysql.connector
|
2 |
-
from mysql.connector import errorcode
|
3 |
|
4 |
from fastapi import FastAPI, status
|
5 |
from fastapi.exceptions import HTTPException
|
@@ -8,14 +7,16 @@ from fastapi.templating import Jinja2Templates
|
|
8 |
from backend.utils import DBConnection
|
9 |
from backend.core.ConfigEnv import config
|
10 |
|
11 |
-
from langchain.llms import Clarifai
|
12 |
from langchain.chains import LLMChain
|
13 |
from langchain.prompts import PromptTemplate
|
|
|
14 |
|
15 |
-
app = FastAPI(
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
19 |
|
20 |
from backend import router
|
21 |
|
@@ -24,31 +25,23 @@ try:
|
|
24 |
test_conn = DBConnection.get_client().get_server_info()
|
25 |
|
26 |
# send prompt wizardcoderLM-70b-instruct-GGUF model
|
27 |
-
with open("backend/utils/prompt.txt",
|
28 |
prompt = f.read()
|
29 |
|
30 |
-
prompt = PromptTemplate(template=prompt, input_variables=[
|
31 |
|
32 |
-
llm =
|
33 |
-
|
34 |
-
|
35 |
-
app_id = config.APP_ID,
|
36 |
-
model_id = config.MODEL_ID,
|
37 |
-
model_version_id=config.MODEL_VERSION_ID,
|
38 |
)
|
39 |
|
40 |
-
llmchain = LLMChain(
|
41 |
-
prompt=prompt,
|
42 |
-
llm=llm
|
43 |
-
)
|
44 |
app.state.llmchain = llmchain
|
45 |
|
46 |
-
|
47 |
app.state.templates = Jinja2Templates(directory="./backend/templates")
|
48 |
|
49 |
|
50 |
-
|
51 |
except mysql.connector.Error as err:
|
52 |
-
raise HTTPException(
|
53 |
-
|
54 |
-
|
|
|
1 |
import mysql.connector
|
|
|
2 |
|
3 |
from fastapi import FastAPI, status
|
4 |
from fastapi.exceptions import HTTPException
|
|
|
7 |
from backend.utils import DBConnection
|
8 |
from backend.core.ConfigEnv import config
|
9 |
|
10 |
+
# from langchain.llms import Clarifai
|
11 |
from langchain.chains import LLMChain
|
12 |
from langchain.prompts import PromptTemplate
|
13 |
+
from langchain_google_genai import GoogleGenerativeAI
|
14 |
|
15 |
+
app = FastAPI(
|
16 |
+
title="Techdocs",
|
17 |
+
version="V0.0.1",
|
18 |
+
description="API for automatic code documentation generation!",
|
19 |
+
)
|
20 |
|
21 |
from backend import router
|
22 |
|
|
|
25 |
test_conn = DBConnection.get_client().get_server_info()
|
26 |
|
27 |
# send prompt wizardcoderLM-70b-instruct-GGUF model
|
28 |
+
with open("backend/utils/prompt.txt", "r") as f:
|
29 |
prompt = f.read()
|
30 |
|
31 |
+
prompt = PromptTemplate(template=prompt, input_variables=["instruction"])
|
32 |
|
33 |
+
llm = GoogleGenerativeAI(
|
34 |
+
model = "gemini-pro",
|
35 |
+
google_api_key=config.GOOGLE_API_KEY,
|
|
|
|
|
|
|
36 |
)
|
37 |
|
38 |
+
llmchain = LLMChain(prompt=prompt, llm=llm)
|
|
|
|
|
|
|
39 |
app.state.llmchain = llmchain
|
40 |
|
|
|
41 |
app.state.templates = Jinja2Templates(directory="./backend/templates")
|
42 |
|
43 |
|
|
|
44 |
except mysql.connector.Error as err:
|
45 |
+
raise HTTPException(
|
46 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(err)
|
47 |
+
)
|
TechdocsAPI/backend/core/ConfigEnv.py
CHANGED
@@ -14,15 +14,9 @@ class Settings(BaseSettings):
|
|
14 |
JWT_SECRET_KEY:str
|
15 |
JWT_REFRESH_SECRET_KEY:str
|
16 |
JWT_VERIFICATION_SECRET_KEY:str
|
17 |
-
|
18 |
-
APP_ID:str
|
19 |
-
USER_ID:str
|
20 |
-
MODEL_ID:str
|
21 |
-
CLARIFAI_PAT:str
|
22 |
-
MODEL_VERSION_ID:str
|
23 |
|
24 |
MAIL_SERVER_URL:str
|
25 |
-
|
26 |
MAIL_USERNAME:str
|
27 |
MAIL_PASSWORD:str
|
28 |
MAIL_FROM:str
|
|
|
14 |
JWT_SECRET_KEY:str
|
15 |
JWT_REFRESH_SECRET_KEY:str
|
16 |
JWT_VERIFICATION_SECRET_KEY:str
|
17 |
+
GOOGLE_API_KEY:str
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
MAIL_SERVER_URL:str
|
|
|
20 |
MAIL_USERNAME:str
|
21 |
MAIL_PASSWORD:str
|
22 |
MAIL_FROM:str
|
TechdocsAPI/backend/services/auth/ops.py
CHANGED
@@ -128,9 +128,9 @@ def ops_inference(source_code:str,api_key:str,username:str):
|
|
128 |
def generate_docstring(source_code_message: str):
|
129 |
|
130 |
|
131 |
-
llm_response = app.state.llmchain.
|
132 |
|
133 |
-
docstring = Inference(docstr=llm_response)
|
134 |
|
135 |
return docstring
|
136 |
|
|
|
128 |
def generate_docstring(source_code_message: str):
|
129 |
|
130 |
|
131 |
+
llm_response = app.state.llmchain.invoke({"instruction": source_code_message})
|
132 |
|
133 |
+
docstring = Inference(docstr=llm_response["text"])
|
134 |
|
135 |
return docstring
|
136 |
|
TechdocsAPI/requirements.txt
CHANGED
@@ -5,8 +5,8 @@ pydantic==1.10.12
|
|
5 |
python-jose[cryptography]
|
6 |
passlib[bcrypt]
|
7 |
mysql-connector-python
|
|
|
8 |
pydantic[email]
|
9 |
langchain
|
10 |
-
clarifai
|
11 |
Pillow
|
12 |
jinja2
|
|
|
5 |
python-jose[cryptography]
|
6 |
passlib[bcrypt]
|
7 |
mysql-connector-python
|
8 |
+
langchain-google-genai
|
9 |
pydantic[email]
|
10 |
langchain
|
|
|
11 |
Pillow
|
12 |
jinja2
|