hfl-rc commited on
Commit
c49a56c
1 Parent(s): fe96d83

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +87 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from threading import Thread
2
+
3
+ import gradio as gr
4
+ import spaces
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
+
7
+ BANNER_HTML = """
8
+ <p align="center">
9
+ <a href="https://github.com/ymcui/Chinese-LLaMA-Alpaca-3">
10
+ <img src="https://ymcui.com/images/chinese-llama-alpaca-3-banner.png" width="600"/>
11
+ </a>
12
+ </p>
13
+ <h3>
14
+ <center>Check our
15
+ <a href='https://github.com/ymcui/Chinese-LLaMA-Alpaca-3' target='_blank'>Chinese-LLaMA-Alpaca-3 GitHub Project</a>
16
+ for more information.
17
+ </center>
18
+ </h3>
19
+ <p>
20
+ <center><em>The demo is mainly for academic purposes and users are not expected to use this demo for illegal activities.</em></center>
21
+ </p>
22
+ """
23
+
24
+ DEFAULT_SYSTEM_PROMPT = "You are a helpful assistant. 你是一个乐于助人的助手。"
25
+
26
+ # Load different instruct models based on the selected version
27
+ def load_model(version):
28
+ global tokenizer, model
29
+ if version == "v1":
30
+ model_name = "hfl/llama-3-chinese-8b-instruct"
31
+ elif version == "v2":
32
+ model_name = "hfl/llama-3-chinese-8b-instruct-v2"
33
+ elif version == "v3":
34
+ model_name = "hfl/llama-3-chinese-8b-instruct-v3"
35
+
36
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
37
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
38
+ return f"Model {model_name} loaded."
39
+
40
+ @spaces.GPU(duration=50)
41
+ def stream_chat(message: str, history: list, system_prompt: str, model_version: str, temperature: float, max_new_tokens: int):
42
+ conversation = [{"role": "system", "content": system_prompt or DEFAULT_SYSTEM_PROMPT}]
43
+ for prompt, answer in history:
44
+ conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
45
+
46
+ conversation.append({"role": "user", "content": message})
47
+
48
+ input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
49
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
50
+
51
+ generate_kwargs = {
52
+ "input_ids": input_ids,
53
+ "streamer": streamer,
54
+ "max_new_tokens": max_new_tokens,
55
+ "temperature": temperature,
56
+ "do_sample": temperature != 0,
57
+ }
58
+
59
+ generation_thread = Thread(target=model.generate, kwargs=generate_kwargs)
60
+ generation_thread.start()
61
+
62
+ output = ""
63
+ for new_token in streamer:
64
+ output += new_token
65
+ yield output
66
+
67
+ chatbot = gr.Chatbot(height=500)
68
+
69
+ with gr.Blocks() as demo:
70
+ gr.HTML(BANNER_HTML)
71
+ gr.ChatInterface(
72
+ fn=stream_chat,
73
+ chatbot=chatbot,
74
+ fill_height=True,
75
+ additional_inputs_accordion=gr.Accordion(label="Parameters / 参数设置", open=False, render=False),
76
+ additional_inputs=[
77
+ gr.Text(value=DEFAULT_SYSTEM_PROMPT, label="System Prompt / 系统提示词", render=False),
78
+ gr.Radio(choices=["v1", "v2", "v3"], label="Model Version / 模型版本", value="v3", interactive=False, render=False),
79
+ gr.Slider(minimum=0, maximum=1, step=0.1, value=0.5, label="Temperature / 温度系数", render=False),
80
+ gr.Slider(minimum=128, maximum=2048, step=1, value=256, label="Max new tokens / 最大生成长度", render=False),
81
+ ],
82
+ cache_examples=False,
83
+ )
84
+
85
+ if __name__ == "__main__":
86
+ load_model("v3") # Load the default model
87
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ huggingface_hub==0.22.2
2
+ transformers
3
+ accelerate