mrbeliever commited on
Commit
396447e
1 Parent(s): e29eb69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -98
app.py CHANGED
@@ -67,21 +67,15 @@ def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
67
  return chatbot
68
 
69
 
70
- # Concatenate the hidden pre-filled text with the user input
71
- prefilled_text = "You are a specialized Prompt Generator focused on improving the original text while maintaining its essence. Keep the prompt length under 50 words never exceed this limit"
72
-
73
- def get_prefilled_text(user_input):
74
- return f"{prefilled_text} + {user_input}"
75
-
76
- # Update the user input function to use the concatenated text
77
  def user(text_prompt: str, chatbot: CHAT_HISTORY):
78
  if text_prompt:
79
- prefilled_with_user_input = get_prefilled_text(text_prompt)
80
- chatbot.append((prefilled_with_user_input, None))
 
 
81
  return "", chatbot
82
 
83
 
84
-
85
  def bot(
86
  google_key: str,
87
  files: Optional[List[str]],
@@ -93,7 +87,7 @@ def bot(
93
  chatbot: CHAT_HISTORY
94
  ):
95
  if len(chatbot) == 0:
96
- return chatbot
97
 
98
  google_key = google_key if google_key else GOOGLE_API_KEY
99
  if not google_key:
@@ -127,100 +121,56 @@ def bot(
127
  stream=True,
128
  generation_config=generation_config)
129
 
130
- # streaming effect
131
- chatbot[-1][1] = ""
132
  for chunk in response:
133
- for i in range(0, len(chunk.text), 10):
134
- section = chunk.text[i:i + 10]
135
- chatbot[-1][1] += section
136
- time.sleep(0.01)
137
- yield chatbot
138
 
 
139
 
140
- google_key_component = gr.Textbox(
141
- label="GOOGLE API KEY",
142
  value="",
143
- type="password",
144
- placeholder="...",
145
- info="You have to provide your own GOOGLE_API_KEY for this app to function properly",
146
- visible=GOOGLE_API_KEY is None
147
  )
148
- chatbot_component = gr.Chatbot(
149
- label='Gemini',
150
- bubble_full_width=False,
151
- avatar_images=AVATAR_IMAGES,
152
- scale=2,
153
- height=400
 
 
 
 
 
 
 
 
 
 
 
 
154
  )
 
 
 
155
  text_prompt_component = gr.Textbox(
156
- value="",
157
- placeholder="Type or Paste Your Prompt Here",
158
  show_label=False,
159
  autofocus=True,
160
  scale=8
161
  )
162
 
163
- upload_button_component = gr.UploadButton(
164
- label="Upload Images", file_count="multiple", file_types=["image"], scale=1
 
 
 
 
165
  )
166
- run_button_component = gr.Button(value="Run", variant="primary", scale=1)
167
- temperature_component = gr.Slider(
168
- minimum=0,
169
- maximum=1.0,
170
- value=0.4,
171
- step=0.05,
172
- label="Temperature",
173
- info=(
174
- "Temperature controls the degree of randomness in token selection. Lower "
175
- "temperatures are good for prompts that expect a true or correct response, "
176
- "while higher temperatures can lead to more diverse or unexpected results. "
177
- ))
178
- max_output_tokens_component = gr.Slider(
179
- minimum=1,
180
- maximum=2048,
181
- value=1024,
182
- step=1,
183
- label="Token limit",
184
- info=(
185
- "Token limit determines the maximum amount of text output from one prompt. A "
186
- "token is approximately four characters. The default value is 2048."
187
- ))
188
- stop_sequences_component = gr.Textbox(
189
- label="Add stop sequence",
190
- value="",
191
- type="text",
192
- placeholder="STOP, END",
193
- info=(
194
- "A stop sequence is a series of characters (including spaces) that stops "
195
- "response generation if the model encounters it. The sequence is not included "
196
- "as part of the response. You can add up to five stop sequences."
197
- ))
198
- top_k_component = gr.Slider(
199
- minimum=1,
200
- maximum=40,
201
- value=32,
202
- step=1,
203
- label="Top-K",
204
- info=(
205
- "Top-k changes how the model selects tokens for output. A top-k of 1 means the "
206
- "selected token is the most probable among all tokens in the model’s "
207
- "vocabulary (also called greedy decoding), while a top-k of 3 means that the "
208
- "next token is selected from among the 3 most probable tokens (using "
209
- "temperature)."
210
- ))
211
- top_p_component = gr.Slider(
212
- minimum=0,
213
- maximum=1,
214
- value=1,
215
- step=0.01,
216
- label="Top-P",
217
- info=(
218
- "Top-p changes how the model selects tokens for output. Tokens are selected "
219
- "from most probable to least until the sum of their probabilities equals the "
220
- "top-p value. For example, if tokens A, B, and C have a probability of .3, .2, "
221
- "and .1 and the top-p value is .5, then the model will select either A or B as "
222
- "the next token (using temperature). "
223
- ))
224
 
225
  user_inputs = [
226
  text_prompt_component,
@@ -243,7 +193,6 @@ with gr.Blocks() as demo:
243
  gr.HTML(SUBTITLE)
244
  gr.HTML(DUPLICATE)
245
  with gr.Column():
246
- google_key_component.render()
247
  chatbot_component.render()
248
  with gr.Row():
249
  text_prompt_component.render()
@@ -260,26 +209,27 @@ with gr.Blocks() as demo:
260
  run_button_component.click(
261
  fn=user,
262
  inputs=user_inputs,
263
- outputs=[text_prompt_component, chatbot_component],
264
  queue=False
265
  ).then(
266
- fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
267
  )
268
 
269
  text_prompt_component.submit(
270
  fn=user,
271
  inputs=user_inputs,
272
- outputs=[text_prompt_component, chatbot_component],
273
  queue=False
274
  ).then(
275
- fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
276
  )
277
 
278
  upload_button_component.upload(
279
  fn=upload,
280
  inputs=[upload_button_component, chatbot_component],
281
- outputs=[chatbot_component],
282
  queue=False
283
  )
284
 
285
  demo.queue(max_size=99).launch(debug=False, show_error=True)
 
 
67
  return chatbot
68
 
69
 
 
 
 
 
 
 
 
70
  def user(text_prompt: str, chatbot: CHAT_HISTORY):
71
  if text_prompt:
72
+ # Pre-filled text to go with user input
73
+ prefilled_text = "You are a specialized Prompt Generator focused on improving the original text while maintaining its essence. Keep the prompt length under 50 words never exceed this limit"
74
+ full_prompt = f"{prefilled_text} {text_prompt}"
75
+ chatbot.append((full_prompt, None))
76
  return "", chatbot
77
 
78
 
 
79
  def bot(
80
  google_key: str,
81
  files: Optional[List[str]],
 
87
  chatbot: CHAT_HISTORY
88
  ):
89
  if len(chatbot) == 0:
90
+ return ''
91
 
92
  google_key = google_key if google_key else GOOGLE_API_KEY
93
  if not google_key:
 
121
  stream=True,
122
  generation_config=generation_config)
123
 
124
+ generated_text = ''
 
125
  for chunk in response:
126
+ generated_text += chunk.text
 
 
 
 
127
 
128
+ return generated_text
129
 
130
+ output_text_component = gr.Textbox(
131
+ label="Generated Text",
132
  value="",
133
+ placeholder="Generated text will appear here",
134
+ readonly=True,
135
+ scale=8,
136
+ multiline=True
137
  )
138
+
139
+ def copy_text():
140
+ output_text_component.copy()
141
+
142
+ output_copy_icon = gr.HTML(
143
+ "<svg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' "
144
+ "fill='none' stroke='currentColor' stroke-width='2' stroke-linecap='round' "
145
+ "stroke-linejoin='round' class='feather feather-copy' onclick='copyText()'>"
146
+ "<rect x='9' y='9' width='13' height='13' rx='2' ry='2'></rect>"
147
+ "<path d='M9 15h4'></path><path d='M15 9v6'></path></svg>"
148
+ "<script>"
149
+ "function copyText() {"
150
+ "var copyText = document.getElementById('output-text');"
151
+ "copyText.select();"
152
+ "document.execCommand('copy');"
153
+ "alert('Copied to clipboard!');"
154
+ "}"
155
+ "</script>"
156
  )
157
+
158
+ output_text_component_copy = gr.OutputComponent([output_text_component, output_copy_icon])
159
+
160
  text_prompt_component = gr.Textbox(
161
+ placeholder="Hi there! [press Enter]",
 
162
  show_label=False,
163
  autofocus=True,
164
  scale=8
165
  )
166
 
167
+ chatbot_component = gr.Chatbot(
168
+ label='Gemini',
169
+ bubble_full_width=False,
170
+ avatar_images=AVATAR_IMAGES,
171
+ scale=2,
172
+ height=400
173
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
  user_inputs = [
176
  text_prompt_component,
 
193
  gr.HTML(SUBTITLE)
194
  gr.HTML(DUPLICATE)
195
  with gr.Column():
 
196
  chatbot_component.render()
197
  with gr.Row():
198
  text_prompt_component.render()
 
209
  run_button_component.click(
210
  fn=user,
211
  inputs=user_inputs,
212
+ outputs=[output_text_component, chatbot_component],
213
  queue=False
214
  ).then(
215
+ fn=bot, inputs=bot_inputs, outputs=[output_text_component_copy],
216
  )
217
 
218
  text_prompt_component.submit(
219
  fn=user,
220
  inputs=user_inputs,
221
+ outputs=[ output_text_component, chatbot_component],
222
  queue=False
223
  ).then(
224
+ fn=bot, inputs=bot_inputs, outputs=[output_text_component_copy],
225
  )
226
 
227
  upload_button_component.upload(
228
  fn=upload,
229
  inputs=[upload_button_component, chatbot_component],
230
+ outputs=[output_text_component, chatbot_component],
231
  queue=False
232
  )
233
 
234
  demo.queue(max_size=99).launch(debug=False, show_error=True)
235
+