YasirAbdali commited on
Commit
ce04557
1 Parent(s): a367f33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -29
app.py CHANGED
@@ -2,50 +2,68 @@ import torch
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import streamlit as st
4
 
 
 
 
5
  # Model name
6
- model_name = "YasirAbdali/bart-summarization"
7
 
8
  # Load tokenizer and model
9
- tokenizer = AutoTokenizer.from_pretrained(model_name)
10
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
 
 
 
 
 
11
 
12
  # Streamlit app
13
  st.title("Summary Generator")
14
 
15
  # User input
16
  topic = st.text_area("Enter text:")
17
- max_length = st.slider("Maximum length of generated text:", min_value=100, max_value=500, value=500, step=50)
18
 
19
  if topic:
20
- # Construct a detailed prompt
21
- prompt = f"Summarize the following text: {topic}"
22
-
23
  # Tokenize input
24
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
25
-
26
- # Generate text
27
- with torch.no_grad():
28
- output = model.generate(
29
- input_ids,
30
- max_length=max_length,
31
- num_return_sequences=1,
32
- no_repeat_ngram_size=2,
33
- top_k=50,
34
- top_p=0.95,
35
- temperature=0.7
36
- )
37
-
38
- # Decode and display generated text
39
- generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
40
-
41
- st.subheader("Generated Summary:")
42
- st.markdown(generated_text)
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  # Option to download the summary
45
  st.download_button(
46
  label="Download Summary",
47
- data=generated_text,
48
- file_name="generated_summary.md",
49
- mime="text/markdown"
50
  )
51
 
 
 
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import streamlit as st
4
 
5
+
6
+
7
+
8
  # Model name
9
+ model_name = "YasirAbdali/bart-summarization" # Replace with the path to your fine-tuned model or Hugging Face model ID
10
 
11
  # Load tokenizer and model
12
+ try:
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
15
+ st.write("Model and tokenizer loaded successfully.")
16
+ except Exception as e:
17
+ st.error(f"Error loading model or tokenizer: {e}")
18
+ st.stop()
19
 
20
  # Streamlit app
21
  st.title("Summary Generator")
22
 
23
  # User input
24
  topic = st.text_area("Enter text:")
25
+ max_length = st.slider("Maximum length of generated text:", min_value=100, max_value=500, value=200, step=50)
26
 
27
  if topic:
 
 
 
28
  # Tokenize input
29
+ try:
30
+ input_ids = tokenizer.encode(topic, return_tensors="pt")
31
+ st.write("Input text tokenized successfully.")
32
+ except Exception as e:
33
+ st.error(f"Error tokenizing input text: {e}")
34
+ st.stop()
35
+
36
+ # Generate summary
37
+ try:
38
+ with torch.no_grad():
39
+ output = model.generate(
40
+ input_ids,
41
+ max_length=max_length,
42
+ num_return_sequences=1,
43
+ no_repeat_ngram_size=2,
44
+ top_k=50,
45
+ top_p=0.95,
46
+ temperature=0.7
47
+ )
48
+ st.write("Summary generated successfully.")
49
+ except Exception as e:
50
+ st.error(f"Error generating summary: {e}")
51
+ st.stop()
52
+
53
+ # Decode and display generated summary
54
+ try:
55
+ generated_summary = tokenizer.decode(output[0], skip_special_tokens=True)
56
+ st.subheader("Generated Summary:")
57
+ st.markdown(generated_summary)
58
+ except Exception as e:
59
+ st.error(f"Error decoding generated summary: {e}")
60
 
61
  # Option to download the summary
62
  st.download_button(
63
  label="Download Summary",
64
+ data=generated_summary,
65
+ file_name="generated_summary.txt",
66
+ mime="text/plain"
67
  )
68
 
69
+