import torch from transformers import AutoTokenizer, AutoModelForCausalLM import streamlit as st # Load GPT-2 model and tokenizer model_name = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Streamlit app st.title("Blog Post Generator") # User input topic = st.text_input("Enter a blog post topic:") max_length = st.slider("Maximum length of generated text:", min_value=100, max_value=2000, value=500, step=50) if topic: # Construct a detailed prompt prompt = f"""Write a well-formatted blog post about {topic}. """ # Tokenize input input_ids = tokenizer.encode(prompt, return_tensors="pt") # Generate text with torch.no_grad(): output = model.generate( input_ids, max_length=max_length, num_return_sequences=1, no_repeat_ngram_size=2, top_k=50, top_p=0.95, temperature=0.7 ) # Decode and display generated text generated_text = tokenizer.decode(output[0], skip_special_tokens=True) st.subheader("Generated Blog Post:") st.markdown(generated_text) # Option to download the blog post st.download_button( label="Download Blog Post", data=generated_text, file_name="generated_blog.md", mime="text/markdown" )