import streamlit as st from transformers import T5Tokenizer, T5ForConditionalGeneration import torch # Load model and tokenizer from Hugging Face Hub model_name = "t5-small" # or your model name tokenizer = T5Tokenizer.from_pretrained(model_name) model = T5ForConditionalGeneration.from_pretrained(model_name) # Function to generate summary def generate_summary(text): inputs = ["summarize: " + text] inputs = tokenizer(inputs, max_length=1024, truncation=True, return_tensors="pt") outputs = model.generate(inputs.input_ids.to(model.device), max_length=150, length_penalty=2.0, num_beams=4, early_stopping=True) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Custom CSS styling st.markdown("""""", unsafe_allow_html=True) # Application UI st.title(" Text Summarizer App") text = st.text_area("Enter the text you want to summarize...", height=200) col1, col2 = st.columns(2) with col1: if st.button("Generate Summary"): if text: with st.spinner("Generating summary..."): summary = generate_summary(text) st.markdown('
Summary
' + summary + '
', unsafe_allow_html=True) else: st.warning("⚠️ Please enter text to summarize.") with col2: if st.button("Refresh"): st.rerun() # change here # Footer st.markdown("""""", unsafe_allow_html=True)