vishnusureshperumbavoor's picture
Update app.py
d6809ae
import gradio as gr
from langchain import PromptTemplate, LLMChain
from langchain import HuggingFaceHub
import os
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
repo_id = "tiiuae/falcon-7b-instruct"
llm = HuggingFaceHub(huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN,
repo_id=repo_id,
model_kwargs={"temperature":0.7, "max_new_tokens":700})
template = """
You are a helpful AI assistant and provide the answer for the question asked politely.
{question}
Answer: Let's think step by step.
"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
# Define the function that will be used in Gradio
def generate_answer(question):
answer = llm_chain.run(question)
return answer
# Create a Gradio interface
iface = gr.Interface(
fn=generate_answer,
inputs=gr.inputs.Textbox(),
outputs=gr.outputs.Textbox(),
title="VSP Bot",
description="Created by VSP",
)
# Launch the Gradio interface
iface.launch()