indiejoseph commited on
Commit
02fda95
1 Parent(s): d8c176d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +130 -0
app.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from threading import Thread
3
+ from typing import Iterator
4
+
5
+ import gradio as gr
6
+ import spaces
7
+ import torch
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
+
10
+ MAX_MAX_NEW_TOKENS = 4096
11
+ DEFAULT_MAX_NEW_TOKENS = 2048
12
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "8192"))
13
+
14
+ DESCRIPTION = """\
15
+ # Mixtral8x7b for personal use
16
+ """
17
+
18
+
19
+ if not torch.cuda.is_available():
20
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
21
+
22
+
23
+ if torch.cuda.is_available():
24
+ model_id = "hon9kon9ize/CantoneseLLMChat-preview20240326"
25
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
26
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
27
+ tokenizer.use_default_system_prompt = False
28
+
29
+
30
+ @spaces.GPU
31
+ def generate(
32
+ message: str,
33
+ chat_history: list[tuple[str, str]],
34
+ system_prompt: str,
35
+ max_new_tokens: int = 2048,
36
+ temperature: float = 0.6,
37
+ top_p: float = 0.9,
38
+ top_k: int = 50,
39
+ repetition_penalty: float = 1.2,
40
+ ) -> Iterator[str]:
41
+ conversation = []
42
+ if system_prompt:
43
+ conversation.append({"role": "system", "content": system_prompt})
44
+ for user, assistant in chat_history:
45
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
46
+ conversation.append({"role": "user", "content": message})
47
+
48
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
49
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
50
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
51
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
52
+ input_ids = input_ids.to(model.device)
53
+
54
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
55
+ generate_kwargs = dict(
56
+ {"input_ids": input_ids},
57
+ streamer=streamer,
58
+ max_new_tokens=max_new_tokens,
59
+ do_sample=True,
60
+ top_p=top_p,
61
+ top_k=top_k,
62
+ temperature=temperature,
63
+ num_beams=1,
64
+ repetition_penalty=repetition_penalty,
65
+ )
66
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
67
+ t.start()
68
+
69
+ outputs = []
70
+ for text in streamer:
71
+ outputs.append(text)
72
+ yield "".join(outputs)
73
+
74
+
75
+ chat_interface = gr.ChatInterface(
76
+ fn=generate,
77
+ additional_inputs=[
78
+ gr.Textbox(label="System prompt", lines=6),
79
+ gr.Slider(
80
+ label="Max new tokens",
81
+ minimum=1,
82
+ maximum=MAX_MAX_NEW_TOKENS,
83
+ step=1,
84
+ value=DEFAULT_MAX_NEW_TOKENS,
85
+ ),
86
+ gr.Slider(
87
+ label="Temperature",
88
+ minimum=0.1,
89
+ maximum=4.0,
90
+ step=0.1,
91
+ value=0.6,
92
+ ),
93
+ gr.Slider(
94
+ label="Top-p (nucleus sampling)",
95
+ minimum=0.05,
96
+ maximum=1.0,
97
+ step=0.05,
98
+ value=0.9,
99
+ ),
100
+ gr.Slider(
101
+ label="Top-k",
102
+ minimum=1,
103
+ maximum=1000,
104
+ step=1,
105
+ value=50,
106
+ ),
107
+ gr.Slider(
108
+ label="Repetition penalty",
109
+ minimum=1.0,
110
+ maximum=2.0,
111
+ step=0.05,
112
+ value=1.2,
113
+ ),
114
+ ],
115
+ stop_btn=None,
116
+ examples=[
117
+ ["Hello there! How are you doing?"],
118
+ ["咩嘢係氣候變化?"],
119
+ ["香港最高嘅山係?"],
120
+ ["邊個係香港特首?"],
121
+ ["香港行政长官是谁?"]
122
+ ],
123
+ )
124
+
125
+ with gr.Blocks() as demo:
126
+ gr.Markdown(DESCRIPTION)
127
+ chat_interface.render()
128
+
129
+ if __name__ == "__main__":
130
+ demo.queue(max_size=20).launch()