Spaces:
Sleeping
Sleeping
Upload app.py
Browse filesSubstitute the template
app.py
CHANGED
@@ -1,63 +1,56 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
"""
|
5 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
-
"""
|
7 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
-
|
9 |
-
|
10 |
-
def respond(
|
11 |
-
message,
|
12 |
-
history: list[tuple[str, str]],
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
-
|
20 |
-
for val in history:
|
21 |
-
if val[0]:
|
22 |
-
messages.append({"role": "user", "content": val[0]})
|
23 |
-
if val[1]:
|
24 |
-
messages.append({"role": "assistant", "content": val[1]})
|
25 |
-
|
26 |
-
messages.append({"role": "user", "content": message})
|
27 |
-
|
28 |
-
response = ""
|
29 |
-
|
30 |
-
for message in client.chat_completion(
|
31 |
-
messages,
|
32 |
-
max_tokens=max_tokens,
|
33 |
-
stream=True,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
-
|
39 |
-
response += token
|
40 |
-
yield response
|
41 |
-
|
42 |
-
"""
|
43 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
44 |
-
"""
|
45 |
demo = gr.ChatInterface(
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
maximum=1.0,
|
54 |
-
value=0.95,
|
55 |
-
step=0.05,
|
56 |
-
label="Top-p (nucleus sampling)",
|
57 |
-
),
|
58 |
-
],
|
59 |
-
)
|
60 |
-
|
61 |
|
62 |
if __name__ == "__main__":
|
63 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import time
|
3 |
+
from utils import format_as_chat
|
4 |
+
import requests
|
5 |
+
|
6 |
+
def chatbot_demo(message, history):
|
7 |
+
if history:
|
8 |
+
input_message = format_as_chat(message, history)
|
9 |
+
else:
|
10 |
+
input_message = format_as_chat(message, [])
|
11 |
+
|
12 |
+
#Add another assistant delimiter at begining to make sure the output text doesn't contain 'assistant/n/n'
|
13 |
+
json_obj = {
|
14 |
+
"inputs": input_message + '<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n',
|
15 |
+
"parameters": {
|
16 |
+
"best_of": 1,
|
17 |
+
"decoder_input_details": False,
|
18 |
+
"details": True,
|
19 |
+
"do_sample": True,
|
20 |
+
"frequency_penalty": 0.1,
|
21 |
+
"grammar": None,
|
22 |
+
"max_new_tokens": 500,
|
23 |
+
"repetition_penalty": 1.03,
|
24 |
+
"return_full_text": False,
|
25 |
+
"seed": None,
|
26 |
+
"stop": [
|
27 |
+
"photographer"
|
28 |
+
],
|
29 |
+
"temperature": 0.5,
|
30 |
+
"top_k": 1,
|
31 |
+
"top_n_tokens": 5,
|
32 |
+
"top_p": 0.95,
|
33 |
+
"truncate": None,
|
34 |
+
"typical_p": 0.95,
|
35 |
+
"watermark": True
|
36 |
+
}
|
37 |
+
}
|
38 |
+
|
39 |
+
response = requests.post('https://uf9t072wj5ki2ho4.eu-west-1.aws.endpoints.huggingface.cloud/generate', json=json_obj)
|
40 |
+
data = response.json()
|
41 |
+
llama_out = data['generated_text']
|
42 |
+
for i in range(len(llama_out)):
|
43 |
+
time.sleep(0.05)
|
44 |
+
yield llama_out[: i + 1]
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
demo = gr.ChatInterface(
|
47 |
+
fn=chatbot_demo,
|
48 |
+
chatbot=gr.Chatbot(height=500),
|
49 |
+
textbox=gr.Textbox(placeholder="Just ask Llama3 anything you want!", container=False, scale=15),
|
50 |
+
examples=['Hey my maaaaaaaaaaaan! Whazzzzzzup!', 'Write me some crazy conversations between Rick and Morty', '帮我想几个关于去意大利旅游的小红书帖子标题。用中文回答后请用英语再翻译一遍'],
|
51 |
+
cache_examples=False,
|
52 |
+
title="Llama 3 8B Instruct",
|
53 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
if __name__ == "__main__":
|
56 |
+
demo.launch()
|