Falln87 commited on
Commit
8564ce3
1 Parent(s): c91944b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -113
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import streamlit as st
2
- import streamlit_ace
3
  from streamlit_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode, DataReturnMode
4
  from diffusers import DiffusionPipeline, StableDiffusionPipeline, DDIMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
@@ -31,11 +30,19 @@ elif model_type == "Text-to-Text":
31
 
32
  # Load model and tokenizer
33
  if model_type in ["Text-to-Image", "Image-to-Image"]:
34
- pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
35
- pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
 
 
 
 
36
  elif model_type == "Text-to-Text":
37
- tokenizer = AutoTokenizer.from_pretrained(model_id)
38
- model = AutoModelForCausalLM.from_pretrained(model_id)
 
 
 
 
39
 
40
  # Main app layout
41
  st.title("Diffusers UI")
@@ -66,121 +73,31 @@ if submitted:
66
  image = image.to(pipe.device)
67
 
68
  with st.spinner("Generating image..."):
69
- image = pipe(prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
 
 
 
 
70
 
71
  st.image(image, caption="Generated Image", use_column_width=True)
72
  else:
73
  with st.spinner("Generating image..."):
74
- image = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
 
 
 
 
75
 
76
  st.image(image, caption="Generated Image", use_column_width=True)
77
  elif model_type == "Text-to-Text":
78
  with st.spinner("Generating text..."):
79
- input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(model.device)
80
- generated_ids = model.generate(input_ids, max_length=max_length)
81
- generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
 
 
 
 
82
 
83
  st.write("Generated Text:")
84
- st.write(generated_text)
85
-
86
- # Code editor section
87
- st.header("Code Editor")
88
- with st.expander("View Code"):
89
- code =
90
- import streamlit as st
91
- import streamlit_ace
92
- from streamlit_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode, DataReturnMode
93
- from diffusers import DiffusionPipeline, StableDiffusionPipeline, DDIMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler
94
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
95
- from datasets import load_dataset
96
- import torch
97
-
98
- # Set page title and favicon
99
- st.set_page_config(page_title="Diffusers UI", page_icon=":art:")
100
-
101
- # Sidebar for selecting models and tasks
102
- st.sidebar.title("Diffusers UI")
103
- model_type = st.sidebar.selectbox("Select Model Type", ["Text-to-Image", "Image-to-Image", "Text-to-Text"])
104
- task = st.sidebar.selectbox("Select Task", ["Image Generation", "Image Editing", "Text Generation"])
105
-
106
- # Load Hugging Face Hub models based on selected model type and task
107
- if model_type == "Text-to-Image":
108
- if task == "Image Generation":
109
- model_id = "CompVis/stable-diffusion-v1-4"
110
- elif task == "Image Editing":
111
- model_id = "runwayml/stable-diffusion-inpainting"
112
- elif model_type == "Image-to-Image":
113
- if task == "Image Generation":
114
- model_id = "CompVis/stable-diffusion-v1-4"
115
- elif task == "Image Editing":
116
- model_id = "CompVis/stable-diffusion-v1-4"
117
- elif model_type == "Text-to-Text":
118
- if task == "Text Generation":
119
- model_id = "gpt2"
120
-
121
- # Load model and tokenizer
122
- if model_type in ["Text-to-Image", "Image-to-Image"]:
123
- pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
124
- pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
125
- elif model_type == "Text-to-Text":
126
- tokenizer = AutoTokenizer.from_pretrained(model_id)
127
- model = AutoModelForCausalLM.from_pretrained(model_id)
128
-
129
- # Main app layout
130
- st.title("Diffusers UI")
131
-
132
- # Input section
133
- with st.form("input_form"):
134
- if model_type in ["Text-to-Image", "Image-to-Image"]:
135
- prompt = st.text_input("Enter a prompt", "a photo of an astronaut riding a horse")
136
- num_inference_steps = st.slider("Number of inference steps", 1, 50, 25)
137
- guidance_scale = st.slider("Guidance scale", 1.0, 10.0, 7.5)
138
- image = st.file_uploader("Upload an image (optional)", type=["png", "jpg", "jpeg"])
139
- elif model_type == "Text-to-Text":
140
- input_text = st.text_input("Enter input text", "Hello, my name is")
141
- max_length = st.slider("Maximum length of generated text", 1, 100, 20)
142
-
143
- submitted = st.form_submit_button("Generate")
144
-
145
- # Output section
146
- if submitted:
147
- if model_type in ["Text-to-Image", "Image-to-Image"]:
148
- if image is not None:
149
- image = Image.open(image)
150
- image = image.resize((768, 768))
151
- image = np.array(image).astype(np.float32) / 255.0
152
- image = image[None].transpose(0, 3, 1, 2)
153
- image = torch.from_numpy(image)
154
- image = 2.0 * image - 1.0
155
- image = image.to(pipe.device)
156
-
157
- with st.spinner("Generating image..."):
158
- image = pipe(prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
159
-
160
- st.image(image, caption="Generated Image", use_column_width=True)
161
- else:
162
- with st.spinner("Generating image..."):
163
- image = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
164
-
165
- st.image(image, caption="Generated Image", use_column_width=True)
166
- elif model_type == "Text-to-Text":
167
- with st.spinner("Generating text..."):
168
- input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(model.device)
169
- generated_ids = model.generate(input_ids, max_length=max_length)
170
- generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
171
-
172
- st.write("Generated Text:")
173
- st.write(generated_text)
174
- """
175
- # Code editor section
176
- st.header("Code Editor")
177
- with st.expander("View Code"):
178
- code =
179
- streamlit_ace.ace(code, type="python", theme="monokai", height=500)
180
- streamlit_ace.ace(code, type="python", theme="monokai", height=500)
181
- """
182
-
183
- # Run the app
184
- if __name__ == "__main__":
185
- main()
186
- streamlit_ace.ace(code, type="python", theme="monokai", height=500)
 
1
  import streamlit as st
 
2
  from streamlit_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode, DataReturnMode
3
  from diffusers import DiffusionPipeline, StableDiffusionPipeline, DDIMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
30
 
31
  # Load model and tokenizer
32
  if model_type in ["Text-to-Image", "Image-to-Image"]:
33
+ try:
34
+ pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
35
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
36
+ except Exception as e:
37
+ st.error(f"Error loading model: {e}")
38
+ st.stop()
39
  elif model_type == "Text-to-Text":
40
+ try:
41
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
42
+ model = AutoModelForCausalLM.from_pretrained(model_id)
43
+ except Exception as e:
44
+ st.error(f"Error loading model: {e}")
45
+ st.stop()
46
 
47
  # Main app layout
48
  st.title("Diffusers UI")
 
73
  image = image.to(pipe.device)
74
 
75
  with st.spinner("Generating image..."):
76
+ try:
77
+ image = pipe(prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
78
+ except Exception as e:
79
+ st.error(f"Error generating image: {e}")
80
+ st.stop()
81
 
82
  st.image(image, caption="Generated Image", use_column_width=True)
83
  else:
84
  with st.spinner("Generating image..."):
85
+ try:
86
+ image = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
87
+ except Exception as e:
88
+ st.error(f"Error generating image: {e}")
89
+ st.stop()
90
 
91
  st.image(image, caption="Generated Image", use_column_width=True)
92
  elif model_type == "Text-to-Text":
93
  with st.spinner("Generating text..."):
94
+ try:
95
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(model.device)
96
+ generated_ids = model.generate(input_ids, max_length=max_length)
97
+ generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
98
+ except Exception as e:
99
+ st.error(f"Error generating text: {e}")
100
+ st.stop()
101
 
102
  st.write("Generated Text:")
103
+ st.write(generated_text)