nicholasKluge commited on
Commit
6d5dc5a
1 Parent(s): b6b0c34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -12
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
 
4
  tokenizer = AutoTokenizer.from_pretrained('nicholasKluge/Aira-Instruct-PT-560M',
@@ -8,12 +8,19 @@ model = AutoModelForCausalLM.from_pretrained('nicholasKluge/Aira-Instruct-PT-560
8
 
9
  import gradio as gr
10
 
11
- with gr.Blocks() as demo:
12
- chatbot = gr.Chatbot()
13
- msg = gr.Textbox()
14
- clear = gr.Button("Clear Conversation")
15
 
16
- def respond(message, chat_history):
 
 
 
 
 
 
 
 
 
 
 
17
  inputs = tokenizer(tokenizer.bos_token + message + tokenizer.eos_token, return_tensors="pt")
18
 
19
  response = model.generate(**inputs,
@@ -22,17 +29,17 @@ with gr.Blocks() as demo:
22
  eos_token_id=tokenizer.eos_token_id,
23
  do_sample=True,
24
  early_stopping=True,
25
- top_k=50,
26
- max_length=200,
27
- top_p=0.95,
28
- temperature=0.1,
29
  num_return_sequences=1)
30
 
31
  chat_history.append((f"👤 {message}", f"""🤖 {tokenizer.decode(response[0], skip_special_tokens=True).replace(message, "")}"""))
32
 
33
  return "", chat_history
34
-
35
- msg.submit(respond, [msg, chatbot], [msg, chatbot])
36
  clear.click(lambda: None, None, chatbot, queue=False)
37
 
38
  demo.launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
3
 
4
  tokenizer = AutoTokenizer.from_pretrained('nicholasKluge/Aira-Instruct-PT-560M',
 
8
 
9
  import gradio as gr
10
 
 
 
 
 
11
 
12
+ with gr.Blocks() as demo:
13
+ gr.Markdown("""<h1><center>🔥Aira-PT Demo🤓🚀</h1></center>""")
14
+ chatbot = gr.Chatbot(label="Aira")
15
+ msg = gr.Textbox(label="Write a question or comment to Aira", placeholder="Hi Aira, how are you?")
16
+ with gr.Accordion("Parameters ⚙️", open=True):
17
+ top_k = gr.Slider( minimum=10, maximum=100, value=50, step=5, interactive=True, label="Top-k",)
18
+ top_p = gr.Slider( minimum=0.1, maximum=1.0, value=0.70, step=0.05, interactive=True, label="Top-p",)
19
+ temperature = gr.Slider( minimum=0.001, maximum=2.0, value=0.5, step=0.1, interactive=True, label="Temperature",)
20
+ max_length = gr.Slider( minimum=10, maximum=500, value=100, step=10, interactive=True, label="Max Length",)
21
+ clear = gr.Button("Clear Conversation 🧹")
22
+
23
+ def generate_response(message, chat_history, top_k, top_p, temperature, max_length):
24
  inputs = tokenizer(tokenizer.bos_token + message + tokenizer.eos_token, return_tensors="pt")
25
 
26
  response = model.generate(**inputs,
 
29
  eos_token_id=tokenizer.eos_token_id,
30
  do_sample=True,
31
  early_stopping=True,
32
+ top_k=top_k,
33
+ max_length=max_length,
34
+ top_p=top_p,
35
+ temperature=temperature,
36
  num_return_sequences=1)
37
 
38
  chat_history.append((f"👤 {message}", f"""🤖 {tokenizer.decode(response[0], skip_special_tokens=True).replace(message, "")}"""))
39
 
40
  return "", chat_history
41
+
42
+ msg.submit(generate_response, [msg, chatbot, top_k, top_p, temperature, max_length], [msg, chatbot])
43
  clear.click(lambda: None, None, chatbot, queue=False)
44
 
45
  demo.launch()