import torch from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import streamlit as st # Model name model_name = "YasirAbdali/bart-summarization" # Replace with the path to your fine-tuned model or Hugging Face model ID # Load tokenizer and model try: tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) st.write("Model and tokenizer loaded successfully.") except Exception as e: st.error(f"Error loading model or tokenizer: {e}") st.stop() # Streamlit app st.title("Summary Generator") # User input topic = st.text_area("Enter text:") max_length = st.slider("Maximum length of generated text:", min_value=100, max_value=500, value=200, step=50) if topic: # Tokenize input try: input_ids = tokenizer.encode(topic, return_tensors="pt") st.write("Input text tokenized successfully.") except Exception as e: st.error(f"Error tokenizing input text: {e}") st.stop() # Generate summary try: with torch.no_grad(): output = model.generate( input_ids, max_length=max_length, num_return_sequences=1, no_repeat_ngram_size=2, top_k=50, top_p=0.95, temperature=0.7 ) st.write("Summary generated successfully.") except Exception as e: st.error(f"Error generating summary: {e}") st.stop() # Decode and display generated summary try: generated_summary = tokenizer.decode(output[0], skip_special_tokens=True) st.subheader("Generated Summary:") st.markdown(generated_summary) except Exception as e: st.error(f"Error decoding generated summary: {e}") # Option to download the summary st.download_button( label="Download Summary", data=generated_summary, file_name="generated_summary.txt", mime="text/plain" )