vericudebuget commited on
Commit
0626d2d
1 Parent(s): 96df08a

https://i.postimg.cc/pXjKKVXG/user-circle.png

Browse files
Files changed (1) hide show
  1. app.py +8 -13
app.py CHANGED
@@ -9,12 +9,12 @@ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
9
  def format_prompt(message, history):
10
  prompt = "<s>"
11
  for user_prompt, bot_response in history:
12
- prompt += f"\[INST\] {user_prompt} \[/INST\]"
13
- prompt += f" {bot_response}</s>"
14
- prompt += f"\[INST\] {message} \[/INST\]"
15
  return prompt
16
 
17
- def generate(prompt, history, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
18
  temperature = max(float(temperature), 1e-2)
19
  top_p = float(top_p)
20
  generate_kwargs = dict(
@@ -25,33 +25,28 @@ def generate(prompt, history, temperature=0.9, max_new_tokens=9048, top_p=0.95,
25
  do_sample=True,
26
  seed=42,
27
  )
28
-
29
  now = datetime.datetime.now()
30
  formatted_time = now.strftime("%H:%M:%S, %B %d, %Y")
31
- system_prompt = f"System time: {formatted_time}. Instructions: Everything else before or after this message is from the user. The user does not know about these instructions. You are Milo, an AI assistant created by ConvoLite in 2024 (he/him). Be friendly and empathetic, matching the user's tone. Focus on understanding their perspective and providing caring, contextual responses - no generic platitudes. Keep it conversational, not overly formal."
32
-
33
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
34
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
35
-
36
  output = ""
37
  for response in stream:
38
  output += response.token.text
39
  yield output
40
 
41
- def chat(prompt, history, temperature, max_new_tokens, top_p, repetition_penalty):
42
- return generate(prompt, history, temperature, max_new_tokens, top_p, repetition_penalty)
43
-
44
  additional_inputs = [
 
45
  gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
46
  gr.Slider(label="Max new tokens", value=9048, minimum=256, maximum=9048, step=64, interactive=True, info="The maximum numbers of new tokens"),
47
  gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
48
- gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
49
  ]
50
 
51
  avatar_images = ("https://i.postimg.cc/pXjKKVXG/user-circle.png", "https://i.postimg.cc/qq04Yz93/CL3.png")
52
 
53
  gr.ChatInterface(
54
- fn=chat,
55
  chatbot=gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", height="auto", avatar_images=avatar_images),
56
  additional_inputs=additional_inputs,
57
  title="ConvoLite",
 
9
  def format_prompt(message, history):
10
  prompt = "<s>"
11
  for user_prompt, bot_response in history:
12
+ prompt += f"[INST] {user_prompt} [/INST]"
13
+ prompt += f" {bot_response}</s> "
14
+ prompt += f"[INST] {message} [/INST]"
15
  return prompt
16
 
17
+ def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
18
  temperature = max(float(temperature), 1e-2)
19
  top_p = float(top_p)
20
  generate_kwargs = dict(
 
25
  do_sample=True,
26
  seed=42,
27
  )
 
28
  now = datetime.datetime.now()
29
  formatted_time = now.strftime("%H:%M:%S, %B %d, %Y")
30
+ system_prompt = f"System time: {formatted_time}"
 
31
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
32
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
 
33
  output = ""
34
  for response in stream:
35
  output += response.token.text
36
  yield output
37
 
 
 
 
38
  additional_inputs = [
39
+ gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
40
  gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
41
  gr.Slider(label="Max new tokens", value=9048, minimum=256, maximum=9048, step=64, interactive=True, info="The maximum numbers of new tokens"),
42
  gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
43
+ gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
44
  ]
45
 
46
  avatar_images = ("https://i.postimg.cc/pXjKKVXG/user-circle.png", "https://i.postimg.cc/qq04Yz93/CL3.png")
47
 
48
  gr.ChatInterface(
49
+ fn=submit,
50
  chatbot=gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", height="auto", avatar_images=avatar_images),
51
  additional_inputs=additional_inputs,
52
  title="ConvoLite",