Spaces:
Build error
Build error
import streamlit as st | |
from utils import ( | |
load_model, | |
convert_to_torchscript, | |
convert_to_onnx, | |
convert_to_gguf, | |
convert_to_tf_saved_model, | |
convert_to_pytorch, | |
get_hf_token | |
) | |
st.title("π§ Model Conversion") | |
# Load the HF token from utils | |
hf_token = get_hf_token() | |
# Load the model | |
model_path = "fine_tuned_model.pt" | |
tokenizer, model = load_model("google/gemma-3-1b-it", hf_token, model_path) | |
# Select conversion format | |
conversion_option = st.selectbox( | |
"Select Conversion Format", | |
["TorchScript", "ONNX", "GGUF", "TensorFlow SavedModel", "PyTorch"] | |
) | |
if st.button("Convert Model"): | |
if conversion_option == "TorchScript": | |
with st.spinner("Converting to TorchScript..."): | |
ts_model = convert_to_torchscript(model) | |
st.success("Model converted to TorchScript!") | |
elif conversion_option == "ONNX": | |
with st.spinner("Converting to ONNX..."): | |
onnx_path = convert_to_onnx(model) | |
st.success(f"Model converted to ONNX! Saved at: {onnx_path}") | |
elif conversion_option == "GGUF": | |
with st.spinner("Converting to GGUF..."): | |
gguf_path = convert_to_gguf(model) | |
st.success(f"Model converted to GGUF! Saved at: {gguf_path}") | |
elif conversion_option == "TensorFlow SavedModel": | |
with st.spinner("Converting to TensorFlow SavedModel..."): | |
tf_path = convert_to_tf_saved_model(model) | |
st.success(f"Model converted to TensorFlow SavedModel! Saved at: {tf_path}") | |
elif conversion_option == "PyTorch": | |
with st.spinner("Converting to PyTorch..."): | |
pytorch_path = convert_to_pytorch(model) | |
st.success(f"Model saved in PyTorch format! Saved at: {pytorch_path}") | |