Spaces:
Sleeping
Sleeping
xiaoheiqaq
commited on
Commit
•
0acd996
1
Parent(s):
9589bca
initial commit
Browse files- .gitignore +2 -0
- app.py +82 -59
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.venv
|
2 |
+
.env
|
app.py
CHANGED
@@ -1,63 +1,86 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
"""
|
7 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
-
|
9 |
-
|
10 |
-
def respond(
|
11 |
-
message,
|
12 |
-
history: list[tuple[str, str]],
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
-
|
20 |
-
for val in history:
|
21 |
-
if val[0]:
|
22 |
-
messages.append({"role": "user", "content": val[0]})
|
23 |
-
if val[1]:
|
24 |
-
messages.append({"role": "assistant", "content": val[1]})
|
25 |
-
|
26 |
-
messages.append({"role": "user", "content": message})
|
27 |
-
|
28 |
-
response = ""
|
29 |
-
|
30 |
-
for message in client.chat_completion(
|
31 |
-
messages,
|
32 |
-
max_tokens=max_tokens,
|
33 |
-
stream=True,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
-
|
39 |
-
response += token
|
40 |
-
yield response
|
41 |
-
|
42 |
-
"""
|
43 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
44 |
-
"""
|
45 |
-
demo = gr.ChatInterface(
|
46 |
-
respond,
|
47 |
-
additional_inputs=[
|
48 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
49 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
50 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
51 |
-
gr.Slider(
|
52 |
-
minimum=0.1,
|
53 |
-
maximum=1.0,
|
54 |
-
value=0.95,
|
55 |
-
step=0.05,
|
56 |
-
label="Top-p (nucleus sampling)",
|
57 |
-
),
|
58 |
-
],
|
59 |
-
)
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
if __name__ == "__main__":
|
63 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import os
|
3 |
+
import requests
|
4 |
+
import dotenv
|
5 |
+
dotenv.load_dotenv()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
url = os.getenv('BACKEND_URL')
|
8 |
+
username = os.getenv('USERNAME')
|
9 |
+
password = os.getenv('PASSWORD')
|
10 |
+
system_prompt_text = ""
|
11 |
+
|
12 |
+
def predict(message, history, max_new_tokens, top_k, top_p, temperature):
|
13 |
+
global system_prompt_text, url
|
14 |
+
payload = {
|
15 |
+
"message": message,
|
16 |
+
"system_message": system_prompt_text,
|
17 |
+
"history": history,
|
18 |
+
"max_new_tokens": max_new_tokens,
|
19 |
+
"top_k": top_k,
|
20 |
+
"top_p": top_p,
|
21 |
+
"temperature": temperature,
|
22 |
+
}
|
23 |
+
headers = {
|
24 |
+
"Content-Type": "application/json"
|
25 |
+
}
|
26 |
+
|
27 |
+
response = requests.post(url, json=payload, headers=headers)
|
28 |
+
|
29 |
+
if response.status_code == 200:
|
30 |
+
return response.json()
|
31 |
+
else:
|
32 |
+
response.raise_for_status()
|
33 |
+
|
34 |
+
|
35 |
+
def update_system_prompt(new_content):
|
36 |
+
global system_prompt_text
|
37 |
+
system_prompt_text = new_content
|
38 |
+
|
39 |
+
with gr.Blocks(fill_height=True) as demo:
|
40 |
+
max_new_tokens_slider = gr.Slider(
|
41 |
+
minimum=1, maximum=500, value=50, step=1,
|
42 |
+
label="Max New Tokens (The maximum number of tokens to generate in the response. This limits the length of the generated text.)",
|
43 |
+
render=False
|
44 |
+
)
|
45 |
+
|
46 |
+
top_k_slider = gr.Slider(
|
47 |
+
minimum=0, maximum=100, value=50, step=1,
|
48 |
+
label="Top K (The number of highest probability vocabulary tokens to keep for top-k filtering. This controls the diversity of the generated text by limiting the number of token options at each step.)",
|
49 |
+
render=False
|
50 |
+
)
|
51 |
+
|
52 |
+
top_p_slider = gr.Slider(
|
53 |
+
minimum=0.0, maximum=1.0, value=1.0, step=0.01,
|
54 |
+
label="Top P (The cumulative probability threshold for nucleus sampling. This controls the diversity of the generated text by sampling tokens from the smallest possible set whose cumulative probability is above the threshold.)",
|
55 |
+
render=False
|
56 |
+
)
|
57 |
+
|
58 |
+
temperature_slider = gr.Slider(
|
59 |
+
minimum=0.0, maximum=2.0, value=0.9, step=0.01,
|
60 |
+
label="Temperature (The sampling temperature to use. This controls the randomness of predictions by scaling the logits before applying softmax. Lower values make the model more deterministic, while higher values increase diversity.)",
|
61 |
+
render=False
|
62 |
+
)
|
63 |
+
|
64 |
+
gr.ChatInterface(
|
65 |
+
predict,
|
66 |
+
cache_examples=False,
|
67 |
+
additional_inputs=[max_new_tokens_slider, top_k_slider, top_p_slider, temperature_slider],
|
68 |
+
examples=[ ["I'm in a bad mood.", None, None, None, None],
|
69 |
+
["Do you have any hobbies or interests outside of work?", None, None, None, None],
|
70 |
+
["Who created you?", None, None, None, None],
|
71 |
+
["Please introduce yourself.", None, None, None, None],
|
72 |
+
["Do you have any plans for the future?", None, None, None, None],
|
73 |
+
["Does Emi play the piano?", None, None, None, None],
|
74 |
+
["Can you feel pain?", None, None, None, None],
|
75 |
+
["Do you feel like AI?", None, None, None, None],
|
76 |
+
["Can you work 24/7?", None, None, None, None],
|
77 |
+
["Do you ever update?", None, None, None, None]]
|
78 |
+
)
|
79 |
+
|
80 |
+
# system_prompt = gr.Textbox(value=system_prompt_text, info="System Message:", placeholder="你是Emi",
|
81 |
+
# interactive=True, lines=5)
|
82 |
+
# system_prompt.change(
|
83 |
+
# fn=update_system_prompt, inputs=system_prompt)
|
84 |
|
85 |
if __name__ == "__main__":
|
86 |
+
demo.launch(auth=(username, password))
|