YasirAbdali commited on
Commit
a367f33
1 Parent(s): f069082

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -2,8 +2,10 @@ import torch
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import streamlit as st
4
 
5
- # Load GPT-2 model and tokenizer
6
  model_name = "YasirAbdali/bart-summarization"
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
9
 
@@ -16,9 +18,8 @@ max_length = st.slider("Maximum length of generated text:", min_value=100, max_v
16
 
17
  if topic:
18
  # Construct a detailed prompt
19
- prompt = f"""Summarize the following text; {topic}.
20
- """
21
-
22
  # Tokenize input
23
  input_ids = tokenizer.encode(prompt, return_tensors="pt")
24
 
@@ -40,10 +41,11 @@ if topic:
40
  st.subheader("Generated Summary:")
41
  st.markdown(generated_text)
42
 
43
- # Option to download the blog post
44
  st.download_button(
45
  label="Download Summary",
46
  data=generated_text,
47
  file_name="generated_summary.md",
48
  mime="text/markdown"
49
- )
 
 
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import streamlit as st
4
 
5
+ # Model name
6
  model_name = "YasirAbdali/bart-summarization"
7
+
8
+ # Load tokenizer and model
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
11
 
 
18
 
19
  if topic:
20
  # Construct a detailed prompt
21
+ prompt = f"Summarize the following text: {topic}"
22
+
 
23
  # Tokenize input
24
  input_ids = tokenizer.encode(prompt, return_tensors="pt")
25
 
 
41
  st.subheader("Generated Summary:")
42
  st.markdown(generated_text)
43
 
44
+ # Option to download the summary
45
  st.download_button(
46
  label="Download Summary",
47
  data=generated_text,
48
  file_name="generated_summary.md",
49
  mime="text/markdown"
50
+ )
51
+