YasirAbdali commited on
Commit
4bffa5f
1 Parent(s): ce04557

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -33
app.py CHANGED
@@ -1,20 +1,16 @@
1
  import torch
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import streamlit as st
4
 
5
-
6
-
7
-
8
  # Model name
9
- model_name = "YasirAbdali/bart-summarization" # Replace with the path to your fine-tuned model or Hugging Face model ID
10
 
11
- # Load tokenizer and model
12
  try:
13
- tokenizer = AutoTokenizer.from_pretrained(model_name)
14
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
15
- st.write("Model and tokenizer loaded successfully.")
16
  except Exception as e:
17
- st.error(f"Error loading model or tokenizer: {e}")
18
  st.stop()
19
 
20
  # Streamlit app
@@ -25,38 +21,21 @@ topic = st.text_area("Enter text:")
25
  max_length = st.slider("Maximum length of generated text:", min_value=100, max_value=500, value=200, step=50)
26
 
27
  if topic:
28
- # Tokenize input
29
- try:
30
- input_ids = tokenizer.encode(topic, return_tensors="pt")
31
- st.write("Input text tokenized successfully.")
32
- except Exception as e:
33
- st.error(f"Error tokenizing input text: {e}")
34
- st.stop()
35
-
36
  # Generate summary
37
  try:
38
- with torch.no_grad():
39
- output = model.generate(
40
- input_ids,
41
- max_length=max_length,
42
- num_return_sequences=1,
43
- no_repeat_ngram_size=2,
44
- top_k=50,
45
- top_p=0.95,
46
- temperature=0.7
47
- )
48
  st.write("Summary generated successfully.")
49
  except Exception as e:
50
  st.error(f"Error generating summary: {e}")
51
  st.stop()
52
 
53
- # Decode and display generated summary
54
  try:
55
- generated_summary = tokenizer.decode(output[0], skip_special_tokens=True)
56
  st.subheader("Generated Summary:")
57
  st.markdown(generated_summary)
58
  except Exception as e:
59
- st.error(f"Error decoding generated summary: {e}")
60
 
61
  # Option to download the summary
62
  st.download_button(
@@ -65,5 +44,3 @@ if topic:
65
  file_name="generated_summary.txt",
66
  mime="text/plain"
67
  )
68
-
69
-
 
1
  import torch
2
+ from transformers import pipeline
3
  import streamlit as st
4
 
 
 
 
5
  # Model name
6
+ model_name = "YasirAbdali/bart-summarization" # Replace with the path to your fine-tuned model or Hugging Face model ID
7
 
8
+ # Load summarization pipeline
9
  try:
10
+ summarizer = pipeline("summarization", model=model_name)
11
+ st.write("Summarization pipeline loaded successfully.")
 
12
  except Exception as e:
13
+ st.error(f"Error loading summarization pipeline: {e}")
14
  st.stop()
15
 
16
  # Streamlit app
 
21
  max_length = st.slider("Maximum length of generated text:", min_value=100, max_value=500, value=200, step=50)
22
 
23
  if topic:
 
 
 
 
 
 
 
 
24
  # Generate summary
25
  try:
26
+ summary = summarizer(topic, max_length=max_length, min_length=50, do_sample=False)
27
+ generated_summary = summary[0]['summary_text']
 
 
 
 
 
 
 
 
28
  st.write("Summary generated successfully.")
29
  except Exception as e:
30
  st.error(f"Error generating summary: {e}")
31
  st.stop()
32
 
33
+ # Display generated summary
34
  try:
 
35
  st.subheader("Generated Summary:")
36
  st.markdown(generated_summary)
37
  except Exception as e:
38
+ st.error(f"Error displaying generated summary: {e}")
39
 
40
  # Option to download the summary
41
  st.download_button(
 
44
  file_name="generated_summary.txt",
45
  mime="text/plain"
46
  )