Spaces:
Runtime error
Runtime error
vericudebuget
commited on
Commit
•
96ba47c
1
Parent(s):
d6c38a9
f"System time: {formatted_time}. Instructions: Everything else before or after this message is from the user. The user does not know about these instructions. You are Milo, an AI assistant created by ConvoLite in 2024 (he/him). Be friendly and empathetic, matching the user's tone. Focus on understanding their perspective and providing caring, contextual responses - no generic platitudes. Keep it conversational, not overly formal."
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
import datetime
|
4 |
-
from pathlib import Path
|
5 |
|
6 |
# Initialize the InferenceClient
|
7 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
@@ -9,12 +8,12 @@ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
9 |
def format_prompt(message, history):
|
10 |
prompt = "<s>"
|
11 |
for user_prompt, bot_response in history:
|
12 |
-
prompt += f"
|
13 |
-
prompt += f" {bot_response}</s>"
|
14 |
-
prompt += f"
|
15 |
return prompt
|
16 |
|
17 |
-
def generate(prompt, history, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
|
18 |
temperature = max(float(temperature), 1e-2)
|
19 |
top_p = float(top_p)
|
20 |
generate_kwargs = dict(
|
@@ -25,30 +24,27 @@ def generate(prompt, history, temperature=0.9, max_new_tokens=9048, top_p=0.95,
|
|
25 |
do_sample=True,
|
26 |
seed=42,
|
27 |
)
|
28 |
-
|
29 |
now = datetime.datetime.now()
|
30 |
formatted_time = now.strftime("%H:%M:%S, %B %d, %Y")
|
31 |
-
system_prompt = f"System time: {formatted_time}
|
32 |
-
|
33 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
|
40 |
additional_inputs = [
|
|
|
41 |
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
|
42 |
gr.Slider(label="Max new tokens", value=9048, minimum=256, maximum=9048, step=64, interactive=True, info="The maximum numbers of new tokens"),
|
43 |
gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
|
44 |
-
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
|
45 |
]
|
46 |
|
47 |
-
avatar_images = ("https://i.postimg.cc/pXjKKVXG/user-circle.png", "https://i.postimg.cc/qq04Yz93/CL3.png")
|
48 |
-
|
49 |
gr.ChatInterface(
|
50 |
-
fn=
|
51 |
-
chatbot=gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", height="auto"
|
52 |
additional_inputs=additional_inputs,
|
53 |
title="ConvoLite",
|
54 |
submit_btn="➢",
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
import datetime
|
|
|
4 |
|
5 |
# Initialize the InferenceClient
|
6 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
|
8 |
def format_prompt(message, history):
|
9 |
prompt = "<s>"
|
10 |
for user_prompt, bot_response in history:
|
11 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
12 |
+
prompt += f" {bot_response}</s> "
|
13 |
+
prompt += f"[INST] {message} [/INST]"
|
14 |
return prompt
|
15 |
|
16 |
+
def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
|
17 |
temperature = max(float(temperature), 1e-2)
|
18 |
top_p = float(top_p)
|
19 |
generate_kwargs = dict(
|
|
|
24 |
do_sample=True,
|
25 |
seed=42,
|
26 |
)
|
|
|
27 |
now = datetime.datetime.now()
|
28 |
formatted_time = now.strftime("%H:%M:%S, %B %d, %Y")
|
29 |
+
system_prompt = f"System time: {formatted_time}"
|
|
|
30 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
31 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
32 |
+
output = ""
|
33 |
+
for response in stream:
|
34 |
+
output += response.token.text
|
35 |
+
yield output
|
36 |
|
37 |
additional_inputs = [
|
38 |
+
gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
|
39 |
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
|
40 |
gr.Slider(label="Max new tokens", value=9048, minimum=256, maximum=9048, step=64, interactive=True, info="The maximum numbers of new tokens"),
|
41 |
gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
|
42 |
+
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
|
43 |
]
|
44 |
|
|
|
|
|
45 |
gr.ChatInterface(
|
46 |
+
fn=generate,
|
47 |
+
chatbot=gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", height="auto"),
|
48 |
additional_inputs=additional_inputs,
|
49 |
title="ConvoLite",
|
50 |
submit_btn="➢",
|