Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForCausalLM, AutoTokenizer,pipeline | |
import torch | |
st.title("quantization_Generator Fine tunning model") | |
# Load model and tokenizer | |
model_dir = "quantization_model" | |
tokenizer = AutoTokenizer.from_pretrained(model_dir) | |
model = AutoModelForCausalLM.from_pretrained(model_dir) | |
code_generator = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
inputs_text=st.text_input("Please enter the text",value="def quicksort(arr):") | |
if st.button("submit"): | |
generated_code = code_generator(inputs_text, max_length=200, num_return_sequences=1) | |
st.write(generated_code[0]["generated_text"]) | |