import streamlit as st from unsloth import FastLanguageModel from transformers import AutoTokenizer import torch @st.cache_resource def load_model_and_tokenizer(model_name, hf_token): # Load the model and tokenizer model, tokenizer = FastLanguageModel.from_pretrained( model_name=model_name, max_seq_length=2048, dtype=None, load_in_4bit=True, token=hf_token, ) FastLanguageModel.for_inference(model) # Enable optimized inference return model, tokenizer def generate_solution(problem, model, tokenizer): # Prepare the prompt using the same format as training prompt_template = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. Before answering, think carefully about the question and create a step-by-step chain of thoughts to ensure a logical and accurate response. ### Instruction: You are a math expert. Please solve the following math problem. ### Problem: {} ### Solution: {{}} {{}}""" prompt = prompt_template.format(problem) # Tokenize and prepare input inputs = tokenizer( [prompt], return_tensors="pt", padding=True, ).to("cuda") # Generate solution outputs = model.generate( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, max_new_tokens=1200, temperature=0.7, pad_token_id=tokenizer.eos_token_id, use_cache=True, ) # Decode and format output full_response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Extract the generated solution part try: solution = full_response.split("### Solution:")[1].strip() except IndexError: solution = full_response # Fallback in case formatting fails return solution # Streamlit app st.title("Math Problem Solver") hf_token = st.text_input("Enter your Hugging Face token:",type="password") model_name = "shukdevdatta123/DeepSeek-R1-Math-Solutions" if hf_token: # Load model and tokenizer model, tokenizer = load_model_and_tokenizer(model_name, hf_token) # Input for custom problem custom_problem = st.text_input("Enter a math problem:") if st.button("Generate Solution"): if custom_problem: solution = generate_solution(custom_problem, model, tokenizer) st.write("### Generated Solution:") st.write(solution) else: st.error("Please enter a math problem.") else: st.warning("Please enter your Hugging Face token to load the model.")