|
import streamlit as st |
|
from langchain.prompts import PromptTemplate |
|
from langchain.llms import CTransformers |
|
|
|
|
|
def getllamaresponse(prompt_template): |
|
|
|
llm = CTransformers(model='https://huggingface.co./nvidia/Llama3-ChatQA-1.5-8B', |
|
model_type='llama', |
|
config={"max_new_tokens": 300, "temperature": 0.01}) |
|
|
|
|
|
response = llm(prompt_template) |
|
return response |
|
|
|
st.set_page_config(page_title='Personal AI', |
|
page_icon='🖥️', |
|
layout='centered', |
|
initial_sidebar_state='collapsed') |
|
|
|
st.header("Personal AI 🖥️") |
|
|
|
prompt_template = st.text_input("Enter the Prompt Template") |
|
|
|
submit = st.button("Generate") |
|
|
|
|
|
if submit and prompt_template: |
|
st.write(getllamaresponse(prompt_template)) |
|
elif submit: |
|
st.warning("Please provide a prompt template.") |