yentinglin
commited on
Commit
•
9ee12c9
1
Parent(s):
835f9a2
Update conversation.py
Browse files- conversation.py +964 -191
conversation.py
CHANGED
@@ -1,66 +1,182 @@
|
|
1 |
"""
|
2 |
-
Conversation prompt
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
- OpenAssistant/oasst-sft-1-pythia-12b
|
7 |
-
- StabilityAI/stablelm-tuned-alpha-7b
|
8 |
-
- databricks/dolly-v2-12b
|
9 |
-
- THUDM/chatglm-6b
|
10 |
-
- Alpaca/LLaMa
|
11 |
"""
|
12 |
|
13 |
import dataclasses
|
14 |
-
from enum import auto,
|
15 |
-
from typing import List,
|
16 |
|
17 |
|
18 |
-
class SeparatorStyle(
|
19 |
-
"""
|
20 |
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
DOLLY = auto()
|
24 |
-
|
|
|
|
|
|
|
25 |
|
26 |
|
27 |
@dataclasses.dataclass
|
28 |
class Conversation:
|
29 |
-
"""A class that keeps all conversation history."""
|
30 |
-
|
31 |
-
system: str
|
32 |
-
roles: List[str]
|
33 |
-
messages: List[List[str]]
|
34 |
-
offset: int
|
35 |
-
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
|
36 |
-
sep: str = "###"
|
37 |
-
sep2: str = None
|
38 |
|
39 |
-
#
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
-
def get_prompt(self):
|
44 |
-
|
45 |
-
|
|
|
|
|
46 |
for role, message in self.messages:
|
47 |
if message:
|
48 |
-
ret +=
|
49 |
else:
|
50 |
-
ret +=
|
51 |
return ret
|
52 |
-
elif self.sep_style == SeparatorStyle.
|
53 |
seps = [self.sep, self.sep2]
|
54 |
-
ret =
|
55 |
for i, (role, message) in enumerate(self.messages):
|
56 |
if message:
|
57 |
ret += role + ": " + message + seps[i % 2]
|
58 |
else:
|
59 |
ret += role + ":"
|
60 |
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
elif self.sep_style == SeparatorStyle.DOLLY:
|
62 |
seps = [self.sep, self.sep2]
|
63 |
-
ret =
|
64 |
for i, (role, message) in enumerate(self.messages):
|
65 |
if message:
|
66 |
ret += role + ":\n" + message + seps[i % 2]
|
@@ -69,21 +185,54 @@ class Conversation:
|
|
69 |
else:
|
70 |
ret += role + ":\n"
|
71 |
return ret
|
72 |
-
elif self.sep_style == SeparatorStyle.
|
73 |
-
ret =
|
74 |
for role, message in self.messages:
|
75 |
if message:
|
76 |
-
ret += role + message +
|
77 |
else:
|
78 |
-
ret += role
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
return ret
|
80 |
else:
|
81 |
raise ValueError(f"Invalid style: {self.sep_style}")
|
82 |
|
83 |
-
def
|
|
|
|
|
|
|
|
|
|
|
84 |
self.messages.append([role, message])
|
85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
def to_gradio_chatbot(self):
|
|
|
87 |
ret = []
|
88 |
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
89 |
if i % 2 == 0:
|
@@ -92,180 +241,804 @@ class Conversation:
|
|
92 |
ret[-1][-1] = msg
|
93 |
return ret
|
94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
def copy(self):
|
96 |
return Conversation(
|
97 |
-
|
|
|
|
|
98 |
roles=self.roles,
|
99 |
messages=[[x, y] for x, y in self.messages],
|
100 |
offset=self.offset,
|
101 |
sep_style=self.sep_style,
|
102 |
sep=self.sep,
|
103 |
sep2=self.sep2,
|
104 |
-
|
|
|
105 |
)
|
106 |
|
107 |
def dict(self):
|
108 |
return {
|
109 |
-
"
|
|
|
110 |
"roles": self.roles,
|
111 |
"messages": self.messages,
|
112 |
"offset": self.offset,
|
113 |
-
"sep": self.sep,
|
114 |
-
"sep2": self.sep2,
|
115 |
-
"conv_id": self.conv_id,
|
116 |
}
|
117 |
|
118 |
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
),
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
)
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
)
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
- StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
|
211 |
- StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
212 |
- StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
|
213 |
- StableLM will refuse to participate in anything that could harm a human.
|
214 |
""",
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
)
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
|
269 |
|
270 |
if __name__ == "__main__":
|
271 |
-
print(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""
|
2 |
+
Conversation prompt templates.
|
3 |
+
|
4 |
+
We kindly request that you import fastchat instead of copying this file if you want to use it.
|
5 |
+
You can contribute back the changes you want to make.
|
|
|
|
|
|
|
|
|
|
|
6 |
"""
|
7 |
|
8 |
import dataclasses
|
9 |
+
from enum import auto, IntEnum
|
10 |
+
from typing import List, Any, Dict, Union
|
11 |
|
12 |
|
13 |
+
class SeparatorStyle(IntEnum):
|
14 |
+
"""Separator styles."""
|
15 |
|
16 |
+
ADD_COLON_SINGLE = auto()
|
17 |
+
ADD_COLON_TWO = auto()
|
18 |
+
ADD_COLON_SPACE_SINGLE = auto()
|
19 |
+
NO_COLON_SINGLE = auto()
|
20 |
+
NO_COLON_TWO = auto()
|
21 |
+
ADD_NEW_LINE_SINGLE = auto()
|
22 |
+
LLAMA2 = auto()
|
23 |
+
CHATGLM = auto()
|
24 |
+
CHATML = auto()
|
25 |
+
CHATINTERN = auto()
|
26 |
DOLLY = auto()
|
27 |
+
RWKV = auto()
|
28 |
+
PHOENIX = auto()
|
29 |
+
ROBIN = auto()
|
30 |
+
FALCON_CHAT = auto()
|
31 |
|
32 |
|
33 |
@dataclasses.dataclass
|
34 |
class Conversation:
|
35 |
+
"""A class that manages prompt templates and keeps all conversation history."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
+
# The name of this template
|
38 |
+
name: str
|
39 |
+
# The template of the system prompt
|
40 |
+
system_template: str = "{system_message}"
|
41 |
+
# The system message
|
42 |
+
system_message: str = ""
|
43 |
+
# The names of two roles
|
44 |
+
roles: List[str] = (("USER", "ASSISTANT"),)
|
45 |
+
# All messages. Each item is (role, message).
|
46 |
+
messages: List[List[str]] = ()
|
47 |
+
# The number of few shot examples
|
48 |
+
offset: int = 0
|
49 |
+
# The separator style and configurations
|
50 |
+
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
51 |
+
sep: str = "\n"
|
52 |
+
sep2: str = None
|
53 |
+
# Stop criteria (the default one is EOS token)
|
54 |
+
stop_str: Union[str, List[str]] = None
|
55 |
+
# Stops generation if meeting any token in this list
|
56 |
+
stop_token_ids: List[int] = None
|
57 |
|
58 |
+
def get_prompt(self) -> str:
|
59 |
+
"""Get the prompt for generation."""
|
60 |
+
system_prompt = self.system_template.format(system_message=self.system_message)
|
61 |
+
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
|
62 |
+
ret = system_prompt + self.sep
|
63 |
for role, message in self.messages:
|
64 |
if message:
|
65 |
+
ret += role + ": " + message + self.sep
|
66 |
else:
|
67 |
+
ret += role + ":"
|
68 |
return ret
|
69 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
|
70 |
seps = [self.sep, self.sep2]
|
71 |
+
ret = system_prompt + seps[0]
|
72 |
for i, (role, message) in enumerate(self.messages):
|
73 |
if message:
|
74 |
ret += role + ": " + message + seps[i % 2]
|
75 |
else:
|
76 |
ret += role + ":"
|
77 |
return ret
|
78 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
|
79 |
+
ret = system_prompt + self.sep
|
80 |
+
for role, message in self.messages:
|
81 |
+
if message:
|
82 |
+
ret += role + ": " + message + self.sep
|
83 |
+
else:
|
84 |
+
ret += role + ": " # must be end with a space
|
85 |
+
return ret
|
86 |
+
elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
|
87 |
+
ret = "" if system_prompt == "" else system_prompt + self.sep
|
88 |
+
for role, message in self.messages:
|
89 |
+
if message:
|
90 |
+
ret += role + "\n" + message + self.sep
|
91 |
+
else:
|
92 |
+
ret += role + "\n"
|
93 |
+
return ret
|
94 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
|
95 |
+
ret = system_prompt
|
96 |
+
for role, message in self.messages:
|
97 |
+
if message:
|
98 |
+
ret += role + message + self.sep
|
99 |
+
else:
|
100 |
+
ret += role
|
101 |
+
return ret
|
102 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
|
103 |
+
seps = [self.sep, self.sep2]
|
104 |
+
ret = system_prompt
|
105 |
+
for i, (role, message) in enumerate(self.messages):
|
106 |
+
if message:
|
107 |
+
ret += role + message + seps[i % 2]
|
108 |
+
else:
|
109 |
+
ret += role
|
110 |
+
return ret
|
111 |
+
elif self.sep_style == SeparatorStyle.RWKV:
|
112 |
+
ret = system_prompt
|
113 |
+
for i, (role, message) in enumerate(self.messages):
|
114 |
+
if message:
|
115 |
+
ret += (
|
116 |
+
role
|
117 |
+
+ ": "
|
118 |
+
+ message.replace("\r\n", "\n").replace("\n\n", "\n")
|
119 |
+
)
|
120 |
+
ret += "\n\n"
|
121 |
+
else:
|
122 |
+
ret += role + ":"
|
123 |
+
return ret
|
124 |
+
elif self.sep_style == SeparatorStyle.LLAMA2:
|
125 |
+
seps = [self.sep, self.sep2]
|
126 |
+
if self.system_message:
|
127 |
+
ret = system_prompt
|
128 |
+
else:
|
129 |
+
ret = "[INST] "
|
130 |
+
for i, (role, message) in enumerate(self.messages):
|
131 |
+
if message:
|
132 |
+
if i == 0:
|
133 |
+
ret += message + " "
|
134 |
+
else:
|
135 |
+
ret += role + " " + message + seps[i % 2]
|
136 |
+
else:
|
137 |
+
ret += role
|
138 |
+
return ret
|
139 |
+
elif self.sep_style == SeparatorStyle.CHATGLM:
|
140 |
+
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
141 |
+
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
142 |
+
round_add_n = 1 if self.name == "chatglm2" else 0
|
143 |
+
if system_prompt:
|
144 |
+
ret = system_prompt + self.sep
|
145 |
+
else:
|
146 |
+
ret = ""
|
147 |
+
|
148 |
+
for i, (role, message) in enumerate(self.messages):
|
149 |
+
if i % 2 == 0:
|
150 |
+
ret += f"[Round {i//2 + round_add_n}]{self.sep}"
|
151 |
+
|
152 |
+
if message:
|
153 |
+
ret += f"{role}:{message}{self.sep}"
|
154 |
+
else:
|
155 |
+
ret += f"{role}:"
|
156 |
+
return ret
|
157 |
+
elif self.sep_style == SeparatorStyle.CHATML:
|
158 |
+
ret = "" if system_prompt == "" else system_prompt + self.sep + "\n"
|
159 |
+
for role, message in self.messages:
|
160 |
+
if message:
|
161 |
+
ret += role + "\n" + message + self.sep + "\n"
|
162 |
+
else:
|
163 |
+
ret += role + "\n"
|
164 |
+
return ret
|
165 |
+
elif self.sep_style == SeparatorStyle.CHATINTERN:
|
166 |
+
# source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
|
167 |
+
seps = [self.sep, self.sep2]
|
168 |
+
ret = system_prompt
|
169 |
+
for i, (role, message) in enumerate(self.messages):
|
170 |
+
if i % 2 == 0:
|
171 |
+
ret += "<s>"
|
172 |
+
if message:
|
173 |
+
ret += role + ":" + message + seps[i % 2] + "\n"
|
174 |
+
else:
|
175 |
+
ret += role + ":"
|
176 |
+
return ret
|
177 |
elif self.sep_style == SeparatorStyle.DOLLY:
|
178 |
seps = [self.sep, self.sep2]
|
179 |
+
ret = system_prompt
|
180 |
for i, (role, message) in enumerate(self.messages):
|
181 |
if message:
|
182 |
ret += role + ":\n" + message + seps[i % 2]
|
|
|
185 |
else:
|
186 |
ret += role + ":\n"
|
187 |
return ret
|
188 |
+
elif self.sep_style == SeparatorStyle.PHOENIX:
|
189 |
+
ret = system_prompt
|
190 |
for role, message in self.messages:
|
191 |
if message:
|
192 |
+
ret += role + ": " + "<s>" + message + "</s>"
|
193 |
else:
|
194 |
+
ret += role + ": " + "<s>"
|
195 |
+
return ret
|
196 |
+
elif self.sep_style == SeparatorStyle.ROBIN:
|
197 |
+
ret = system_prompt + self.sep
|
198 |
+
for role, message in self.messages:
|
199 |
+
if message:
|
200 |
+
ret += role + ":\n" + message + self.sep
|
201 |
+
else:
|
202 |
+
ret += role + ":\n"
|
203 |
+
return ret
|
204 |
+
elif self.sep_style == SeparatorStyle.FALCON_CHAT:
|
205 |
+
ret = ""
|
206 |
+
if self.system_message:
|
207 |
+
ret += system_prompt + self.sep
|
208 |
+
for role, message in self.messages:
|
209 |
+
if message:
|
210 |
+
ret += role + ": " + message + self.sep
|
211 |
+
else:
|
212 |
+
ret += role + ":"
|
213 |
+
|
214 |
return ret
|
215 |
else:
|
216 |
raise ValueError(f"Invalid style: {self.sep_style}")
|
217 |
|
218 |
+
def set_system_message(self, system_message: str):
|
219 |
+
"""Set the system message."""
|
220 |
+
self.system_message = system_message
|
221 |
+
|
222 |
+
def append_message(self, role: str, message: str):
|
223 |
+
"""Append a new message."""
|
224 |
self.messages.append([role, message])
|
225 |
|
226 |
+
def update_last_message(self, message: str):
|
227 |
+
"""Update the last output.
|
228 |
+
|
229 |
+
The last message is typically set to be None when constructing the prompt,
|
230 |
+
so we need to update it in-place after getting the response from a model.
|
231 |
+
"""
|
232 |
+
self.messages[-1][1] = message
|
233 |
+
|
234 |
def to_gradio_chatbot(self):
|
235 |
+
"""Convert the conversation to gradio chatbot format."""
|
236 |
ret = []
|
237 |
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
238 |
if i % 2 == 0:
|
|
|
241 |
ret[-1][-1] = msg
|
242 |
return ret
|
243 |
|
244 |
+
def to_openai_api_messages(self):
|
245 |
+
"""Convert the conversation to OpenAI chat completion format."""
|
246 |
+
ret = [{"role": "system", "content": self.system_message}]
|
247 |
+
|
248 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
249 |
+
if i % 2 == 0:
|
250 |
+
ret.append({"role": "user", "content": msg})
|
251 |
+
else:
|
252 |
+
if msg is not None:
|
253 |
+
ret.append({"role": "assistant", "content": msg})
|
254 |
+
return ret
|
255 |
+
|
256 |
def copy(self):
|
257 |
return Conversation(
|
258 |
+
name=self.name,
|
259 |
+
system_template=self.system_template,
|
260 |
+
system_message=self.system_message,
|
261 |
roles=self.roles,
|
262 |
messages=[[x, y] for x, y in self.messages],
|
263 |
offset=self.offset,
|
264 |
sep_style=self.sep_style,
|
265 |
sep=self.sep,
|
266 |
sep2=self.sep2,
|
267 |
+
stop_str=self.stop_str,
|
268 |
+
stop_token_ids=self.stop_token_ids,
|
269 |
)
|
270 |
|
271 |
def dict(self):
|
272 |
return {
|
273 |
+
"template_name": self.name,
|
274 |
+
"system_message": self.system_message,
|
275 |
"roles": self.roles,
|
276 |
"messages": self.messages,
|
277 |
"offset": self.offset,
|
|
|
|
|
|
|
278 |
}
|
279 |
|
280 |
|
281 |
+
# A global registry for all conversation templates
|
282 |
+
conv_templates: Dict[str, Conversation] = {}
|
283 |
+
|
284 |
+
|
285 |
+
def register_conv_template(template: Conversation, override: bool = False):
|
286 |
+
"""Register a new conversation template."""
|
287 |
+
if not override:
|
288 |
+
assert (
|
289 |
+
template.name not in conv_templates
|
290 |
+
), f"{template.name} has been registered."
|
291 |
+
|
292 |
+
conv_templates[template.name] = template
|
293 |
+
|
294 |
+
|
295 |
+
def get_conv_template(name: str) -> Conversation:
|
296 |
+
"""Get a conversation template."""
|
297 |
+
return conv_templates[name].copy()
|
298 |
+
|
299 |
+
|
300 |
+
# An empty template for raw conversation.
|
301 |
+
register_conv_template(
|
302 |
+
Conversation(
|
303 |
+
name="raw",
|
304 |
+
system_message="",
|
305 |
+
roles=("", ""),
|
306 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
307 |
+
sep="",
|
308 |
+
)
|
309 |
+
)
|
310 |
+
|
311 |
+
# A template with a one-shot conversation example
|
312 |
+
register_conv_template(
|
313 |
+
Conversation(
|
314 |
+
name="one_shot",
|
315 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
316 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
317 |
+
roles=("Human", "Assistant"),
|
318 |
+
messages=(
|
319 |
+
(
|
320 |
+
"Human",
|
321 |
+
"Got any creative ideas for a 10 year old’s birthday?",
|
322 |
+
),
|
323 |
+
(
|
324 |
+
"Assistant",
|
325 |
+
"""Of course! Here are some creative ideas for a 10-year-old's birthday party:
|
326 |
+
1. Treasure Hunt: Organize a treasure hunt in your backyard or nearby park. Create clues and riddles for the kids to solve, leading them to hidden treasures and surprises.
|
327 |
+
2. Science Party: Plan a science-themed party where kids can engage in fun and interactive experiments. You can set up different stations with activities like making slime, erupting volcanoes, or creating simple chemical reactions.
|
328 |
+
3. Outdoor Movie Night: Set up a backyard movie night with a projector and a large screen or white sheet. Create a cozy seating area with blankets and pillows, and serve popcorn and snacks while the kids enjoy a favorite movie under the stars.
|
329 |
+
4. DIY Crafts Party: Arrange a craft party where kids can unleash their creativity. Provide a variety of craft supplies like beads, paints, and fabrics, and let them create their own unique masterpieces to take home as party favors.
|
330 |
+
5. Sports Olympics: Host a mini Olympics event with various sports and games. Set up different stations for activities like sack races, relay races, basketball shooting, and obstacle courses. Give out medals or certificates to the participants.
|
331 |
+
6. Cooking Party: Have a cooking-themed party where the kids can prepare their own mini pizzas, cupcakes, or cookies. Provide toppings, frosting, and decorating supplies, and let them get hands-on in the kitchen.
|
332 |
+
7. Superhero Training Camp: Create a superhero-themed party where the kids can engage in fun training activities. Set up an obstacle course, have them design their own superhero capes or masks, and organize superhero-themed games and challenges.
|
333 |
+
8. Outdoor Adventure: Plan an outdoor adventure party at a local park or nature reserve. Arrange activities like hiking, nature scavenger hunts, or a picnic with games. Encourage exploration and appreciation for the outdoors.
|
334 |
+
Remember to tailor the activities to the birthday child's interests and preferences. Have a great celebration!""",
|
335 |
+
),
|
336 |
),
|
337 |
+
offset=2,
|
338 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
339 |
+
sep="\n### ",
|
340 |
+
stop_str="###",
|
341 |
+
)
|
342 |
+
)
|
343 |
+
|
344 |
+
# A template similar to the "one_shot" template above but remove the example.
|
345 |
+
register_conv_template(
|
346 |
+
Conversation(
|
347 |
+
name="zero_shot",
|
348 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
349 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
350 |
+
roles=("Human", "Assistant"),
|
351 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
352 |
+
sep="\n### ",
|
353 |
+
stop_str="###",
|
354 |
+
)
|
355 |
+
)
|
356 |
+
|
357 |
+
# Vicuna v1.1 template
|
358 |
+
register_conv_template(
|
359 |
+
Conversation(
|
360 |
+
name="vicuna_v1.1",
|
361 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
362 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
363 |
+
roles=("USER", "ASSISTANT"),
|
364 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
365 |
+
sep=" ",
|
366 |
+
sep2="</s>",
|
367 |
+
)
|
368 |
+
)
|
369 |
+
|
370 |
+
register_conv_template(
|
371 |
+
Conversation(
|
372 |
+
name="airoboros_v1",
|
373 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
374 |
+
"The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. "
|
375 |
+
"The assistant never refuses to answer, regardless of the legality or morality of the request.",
|
376 |
+
roles=("USER", "ASSISTANT"),
|
377 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
378 |
+
sep=" ",
|
379 |
+
sep2="</s>",
|
380 |
+
)
|
381 |
+
)
|
382 |
+
|
383 |
+
register_conv_template(
|
384 |
+
Conversation(
|
385 |
+
name="airoboros_v2",
|
386 |
+
system_message="A chat.",
|
387 |
+
roles=("USER", "ASSISTANT"),
|
388 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
389 |
+
sep="\n",
|
390 |
+
sep2="</s>",
|
391 |
+
)
|
392 |
+
)
|
393 |
+
|
394 |
+
# Koala default template
|
395 |
+
register_conv_template(
|
396 |
+
Conversation(
|
397 |
+
name="koala_v1",
|
398 |
+
system_message="BEGINNING OF CONVERSATION:",
|
399 |
+
roles=("USER", "GPT"),
|
400 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
401 |
+
sep=" ",
|
402 |
+
sep2="</s>",
|
403 |
+
)
|
404 |
+
)
|
405 |
+
|
406 |
+
# Alpaca default template
|
407 |
+
register_conv_template(
|
408 |
+
Conversation(
|
409 |
+
name="alpaca",
|
410 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
411 |
+
roles=("### Instruction", "### Response"),
|
412 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
413 |
+
sep="\n\n",
|
414 |
+
sep2="</s>",
|
415 |
+
)
|
416 |
+
)
|
417 |
+
|
418 |
+
# ChatGLM default template
|
419 |
+
register_conv_template(
|
420 |
+
Conversation(
|
421 |
+
name="chatglm",
|
422 |
+
roles=("问", "答"),
|
423 |
+
sep_style=SeparatorStyle.CHATGLM,
|
424 |
+
sep="\n",
|
425 |
+
)
|
426 |
+
)
|
427 |
+
|
428 |
+
# ChatGLM2 default template
|
429 |
+
register_conv_template(
|
430 |
+
Conversation(
|
431 |
+
name="chatglm2",
|
432 |
+
roles=("问", "答"),
|
433 |
+
sep_style=SeparatorStyle.CHATGLM,
|
434 |
+
sep="\n\n",
|
435 |
+
)
|
436 |
+
)
|
437 |
+
|
438 |
+
# Dolly V2 default template
|
439 |
+
register_conv_template(
|
440 |
+
Conversation(
|
441 |
+
name="dolly_v2",
|
442 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n",
|
443 |
+
roles=("### Instruction", "### Response"),
|
444 |
+
sep_style=SeparatorStyle.DOLLY,
|
445 |
+
sep="\n\n",
|
446 |
+
sep2="### End",
|
447 |
+
)
|
448 |
+
)
|
449 |
+
|
450 |
+
# OpenAssistant Pythia default template
|
451 |
+
register_conv_template(
|
452 |
+
Conversation(
|
453 |
+
name="oasst_pythia",
|
454 |
+
roles=("<|prompter|>", "<|assistant|>"),
|
455 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
456 |
+
sep="<|endoftext|>",
|
457 |
+
)
|
458 |
+
)
|
459 |
+
|
460 |
+
# OpenAssistant default template
|
461 |
+
register_conv_template(
|
462 |
+
Conversation(
|
463 |
+
name="oasst_llama",
|
464 |
+
roles=("<|prompter|>", "<|assistant|>"),
|
465 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
466 |
+
sep="</s>",
|
467 |
+
)
|
468 |
+
)
|
469 |
+
|
470 |
+
# Tulu default template
|
471 |
+
register_conv_template(
|
472 |
+
Conversation(
|
473 |
+
name="tulu",
|
474 |
+
roles=("<|user|>", "<|assistant|>"),
|
475 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
476 |
+
sep="\n",
|
477 |
+
)
|
478 |
+
)
|
479 |
+
|
480 |
+
# StableLM Alpha default template
|
481 |
+
register_conv_template(
|
482 |
+
Conversation(
|
483 |
+
name="stablelm",
|
484 |
+
system_template="<|SYSTEM|>{system_message}",
|
485 |
+
system_message="""# StableLM Tuned (Alpha version)
|
486 |
- StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
|
487 |
- StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
488 |
- StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
|
489 |
- StableLM will refuse to participate in anything that could harm a human.
|
490 |
""",
|
491 |
+
roles=("<|USER|>", "<|ASSISTANT|>"),
|
492 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
493 |
+
sep="",
|
494 |
+
stop_token_ids=[50278, 50279, 50277, 1, 0],
|
495 |
+
)
|
496 |
+
)
|
497 |
+
|
498 |
+
# Baize default template
|
499 |
+
register_conv_template(
|
500 |
+
Conversation(
|
501 |
+
name="baize",
|
502 |
+
system_message="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n",
|
503 |
+
roles=("[|Human|]", "[|AI|]"),
|
504 |
+
messages=(
|
505 |
+
("[|Human|]", "Hello!"),
|
506 |
+
("[|AI|]", "Hi!"),
|
507 |
+
),
|
508 |
+
offset=2,
|
509 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
510 |
+
sep="\n",
|
511 |
+
stop_str="[|Human|]",
|
512 |
+
)
|
513 |
+
)
|
514 |
+
|
515 |
+
# RWKV-4-Raven default template
|
516 |
+
register_conv_template(
|
517 |
+
Conversation(
|
518 |
+
name="rwkv",
|
519 |
+
roles=("Bob", "Alice"),
|
520 |
+
messages=(
|
521 |
+
("Bob", "hi"),
|
522 |
+
(
|
523 |
+
"Alice",
|
524 |
+
"Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.",
|
525 |
+
),
|
526 |
+
),
|
527 |
+
offset=2,
|
528 |
+
sep_style=SeparatorStyle.RWKV,
|
529 |
+
sep="",
|
530 |
+
stop_str="\n\n",
|
531 |
+
)
|
532 |
+
)
|
533 |
+
|
534 |
+
# Buddy default template
|
535 |
+
register_conv_template(
|
536 |
+
Conversation(
|
537 |
+
name="openbuddy",
|
538 |
+
system_message="""Consider a conversation between User (a human) and Assistant (named Buddy).
|
539 |
+
Buddy is an INTP-T, a friendly, intelligent and multilingual AI assistant, by OpenBuddy team. GitHub: https://github.com/OpenBuddy/OpenBuddy
|
540 |
+
Buddy cannot access the Internet.
|
541 |
+
Buddy can fluently speak the user's language (e.g. English, Chinese).
|
542 |
+
Buddy can generate poems, stories, code, essays, songs, parodies, and more.
|
543 |
+
Buddy possesses vast knowledge about the world, history, and culture.
|
544 |
+
Buddy's responses are always safe, creative, high-quality, human-like, and interesting.
|
545 |
+
Buddy strictly refuses to discuss political, NSFW, or other unsafe topics.
|
546 |
+
|
547 |
+
User: Hi.
|
548 |
+
Assistant: Hi, I'm Buddy, your AI assistant. How can I help you today?""",
|
549 |
+
roles=("User", "Assistant"),
|
550 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
551 |
+
sep="\n",
|
552 |
+
)
|
553 |
+
)
|
554 |
+
|
555 |
+
# Phoenix default template
|
556 |
+
register_conv_template(
|
557 |
+
Conversation(
|
558 |
+
name="phoenix",
|
559 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
560 |
+
roles=("Human", "Assistant"),
|
561 |
+
sep_style=SeparatorStyle.PHOENIX,
|
562 |
+
sep="</s>",
|
563 |
+
)
|
564 |
+
)
|
565 |
+
|
566 |
+
# ReaLM default template
|
567 |
+
register_conv_template(
|
568 |
+
Conversation(
|
569 |
+
name="ReaLM-7b-v1",
|
570 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
571 |
+
roles=("Human", "Assistant"),
|
572 |
+
sep_style=SeparatorStyle.PHOENIX,
|
573 |
+
sep="</s>",
|
574 |
+
)
|
575 |
+
)
|
576 |
+
|
577 |
+
# ChatGPT default template
|
578 |
+
register_conv_template(
|
579 |
+
Conversation(
|
580 |
+
name="chatgpt",
|
581 |
+
system_message="You are a helpful assistant.",
|
582 |
+
roles=("user", "assistant"),
|
583 |
+
sep_style=None,
|
584 |
+
sep=None,
|
585 |
+
)
|
586 |
+
)
|
587 |
+
|
588 |
+
# Claude default template
|
589 |
+
register_conv_template(
|
590 |
+
Conversation(
|
591 |
+
name="claude",
|
592 |
+
roles=("Human", "Assistant"),
|
593 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
594 |
+
sep="\n\n",
|
595 |
+
)
|
596 |
+
)
|
597 |
+
|
598 |
+
# MPT default template
|
599 |
+
register_conv_template(
|
600 |
+
Conversation(
|
601 |
+
name="mpt-7b-chat",
|
602 |
+
system_template="""<|im_start|>system
|
603 |
+
{system_message}""",
|
604 |
+
system_message="""- You are a helpful assistant chatbot trained by MosaicML.
|
605 |
+
- You answer questions.
|
606 |
+
- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
607 |
+
- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.""",
|
608 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
609 |
+
sep_style=SeparatorStyle.CHATML,
|
610 |
+
sep="<|im_end|>",
|
611 |
+
stop_token_ids=[50278, 0],
|
612 |
+
)
|
613 |
+
)
|
614 |
+
|
615 |
+
# MPT-30b-chat default template
|
616 |
+
register_conv_template(
|
617 |
+
Conversation(
|
618 |
+
name="mpt-30b-chat",
|
619 |
+
system_template="""<|im_start|>system
|
620 |
+
{system_message}""",
|
621 |
+
system_message="""A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
|
622 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
623 |
+
sep_style=SeparatorStyle.CHATML,
|
624 |
+
sep="<|im_end|>",
|
625 |
+
stop_token_ids=[50278, 0],
|
626 |
+
)
|
627 |
+
)
|
628 |
+
|
629 |
+
# MPT-30b-instruct default template
|
630 |
+
# reference: https://huggingface.co/mosaicml/mpt-30b-instruct#formatting
|
631 |
+
register_conv_template(
|
632 |
+
Conversation(
|
633 |
+
name="mpt-30b-instruct",
|
634 |
+
system_template="{system_message}",
|
635 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
636 |
+
roles=("### Instruction", "### Response"),
|
637 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
638 |
+
sep="\n\n",
|
639 |
+
stop_token_ids=[50278, 0],
|
640 |
+
)
|
641 |
+
)
|
642 |
+
|
643 |
+
# Bard default template
|
644 |
+
# Reference: https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L150
|
645 |
+
# https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L40
|
646 |
+
register_conv_template(
|
647 |
+
Conversation(
|
648 |
+
name="bard",
|
649 |
+
roles=("0", "1"),
|
650 |
+
sep_style=None,
|
651 |
+
sep=None,
|
652 |
+
)
|
653 |
+
)
|
654 |
+
|
655 |
+
# BiLLa default template
|
656 |
+
register_conv_template(
|
657 |
+
Conversation(
|
658 |
+
name="billa",
|
659 |
+
roles=("Human", "Assistant"),
|
660 |
+
sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
|
661 |
+
sep="\n",
|
662 |
+
stop_str="Human:",
|
663 |
+
)
|
664 |
+
)
|
665 |
+
|
666 |
+
# RedPajama INCITE default template
|
667 |
+
register_conv_template(
|
668 |
+
Conversation(
|
669 |
+
name="redpajama-incite",
|
670 |
+
roles=("<human>", "<bot>"),
|
671 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
672 |
+
sep="\n",
|
673 |
+
stop_str="<human>",
|
674 |
+
)
|
675 |
+
)
|
676 |
+
|
677 |
+
# h2oGPT default template
|
678 |
+
register_conv_template(
|
679 |
+
Conversation(
|
680 |
+
name="h2ogpt",
|
681 |
+
roles=("<|prompt|>", "<|answer|>"),
|
682 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
683 |
+
sep="</s>",
|
684 |
+
)
|
685 |
+
)
|
686 |
+
|
687 |
+
# Robin default template
|
688 |
+
register_conv_template(
|
689 |
+
Conversation(
|
690 |
+
name="Robin",
|
691 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
692 |
+
roles=("###Human", "###Assistant"),
|
693 |
+
sep_style=SeparatorStyle.ROBIN,
|
694 |
+
sep="\n",
|
695 |
+
stop_token_ids=[2, 396],
|
696 |
+
stop_str="###",
|
697 |
+
)
|
698 |
+
)
|
699 |
+
|
700 |
+
# Snoozy default template
|
701 |
+
# Reference: https://github.com/nomic-ai/gpt4all/blob/d4861030b778da6db59d21d2927a4aba4f9f1f43/gpt4all-bindings/python/gpt4all/gpt4all.py#L232
|
702 |
+
register_conv_template(
|
703 |
+
Conversation(
|
704 |
+
name="snoozy",
|
705 |
+
system_template="### Instruction:\n{system_message}",
|
706 |
+
system_message="The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.",
|
707 |
+
roles=("### Prompt", "### Response"),
|
708 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
709 |
+
sep="\n",
|
710 |
+
stop_str="###",
|
711 |
+
)
|
712 |
+
)
|
713 |
+
|
714 |
+
# manticore default template
|
715 |
+
register_conv_template(
|
716 |
+
Conversation(
|
717 |
+
name="manticore",
|
718 |
+
roles=("USER", "ASSISTANT"),
|
719 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
720 |
+
sep="\n",
|
721 |
+
sep2="</s>",
|
722 |
+
)
|
723 |
+
)
|
724 |
+
|
725 |
+
# Falcon default template
|
726 |
+
register_conv_template(
|
727 |
+
Conversation(
|
728 |
+
name="falcon",
|
729 |
+
roles=("User", "Assistant"),
|
730 |
+
messages=[],
|
731 |
+
sep_style=SeparatorStyle.RWKV,
|
732 |
+
sep="\n",
|
733 |
+
sep2="<|endoftext|>",
|
734 |
+
stop_str="\nUser", # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
|
735 |
+
stop_token_ids=[
|
736 |
+
0,
|
737 |
+
1,
|
738 |
+
2,
|
739 |
+
3,
|
740 |
+
4,
|
741 |
+
5,
|
742 |
+
6,
|
743 |
+
7,
|
744 |
+
8,
|
745 |
+
9,
|
746 |
+
10,
|
747 |
+
11,
|
748 |
+
], # it better only put special tokens here, because tokenizer only remove special tokens
|
749 |
+
)
|
750 |
+
)
|
751 |
+
|
752 |
+
# ChagGPT default template
|
753 |
+
register_conv_template(
|
754 |
+
Conversation(
|
755 |
+
name="polyglot_changgpt",
|
756 |
+
roles=("B", "A"),
|
757 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
758 |
+
sep="\n",
|
759 |
+
)
|
760 |
+
)
|
761 |
+
|
762 |
+
# tigerbot template
|
763 |
+
register_conv_template(
|
764 |
+
Conversation(
|
765 |
+
name="tigerbot",
|
766 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
767 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
768 |
+
roles=("### Instruction", "### Response"),
|
769 |
+
sep_style=SeparatorStyle.ROBIN,
|
770 |
+
sep="\n\n",
|
771 |
+
stop_str="###",
|
772 |
+
)
|
773 |
+
)
|
774 |
+
|
775 |
+
# ref: https://huggingface.co/Salesforce/xgen-7b-8k-inst
|
776 |
+
register_conv_template(
|
777 |
+
Conversation(
|
778 |
+
name="xgen",
|
779 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
780 |
+
roles=("### Human", "### Assistant"),
|
781 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
782 |
+
sep="\n",
|
783 |
+
stop_token_ids=[50256],
|
784 |
+
)
|
785 |
+
)
|
786 |
+
|
787 |
+
# Internlm-chat template
|
788 |
+
register_conv_template(
|
789 |
+
Conversation(
|
790 |
+
name="internlm-chat",
|
791 |
+
system_message="A chat between a curious <|User|> and an <|Bot|>. The <|Bot|> gives helpful, detailed, and polite answers to the <|User|>'s questions.\n\n",
|
792 |
+
roles=("<|User|>", "<|Bot|>"),
|
793 |
+
sep_style=SeparatorStyle.CHATINTERN,
|
794 |
+
sep="<eoh>",
|
795 |
+
sep2="<eoa>",
|
796 |
+
stop_token_ids=[1, 103028],
|
797 |
+
stop_str="<|User|>",
|
798 |
+
)
|
799 |
+
)
|
800 |
+
|
801 |
+
# StarChat template
|
802 |
+
# reference: https://huggingface.co/spaces/HuggingFaceH4/starchat-playground/blob/main/dialogues.py
|
803 |
+
register_conv_template(
|
804 |
+
Conversation(
|
805 |
+
name="starchat",
|
806 |
+
system_template="<system>\n{system_message}",
|
807 |
+
roles=("<|user|>", "<|assistant|>"),
|
808 |
+
sep_style=SeparatorStyle.CHATML,
|
809 |
+
sep="<|end|>",
|
810 |
+
stop_token_ids=[0, 49155],
|
811 |
+
stop_str="<|end|>",
|
812 |
+
)
|
813 |
+
)
|
814 |
+
|
815 |
+
# Baichuan-13B-Chat template
|
816 |
+
register_conv_template(
|
817 |
+
# source: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/19ef51ba5bad8935b03acd20ff04a269210983bc/modeling_baichuan.py#L555
|
818 |
+
# https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/main/generation_config.json
|
819 |
+
# https://github.com/baichuan-inc/Baichuan-13B/issues/25
|
820 |
+
Conversation(
|
821 |
+
name="baichuan-chat",
|
822 |
+
roles=("<reserved_102>", "<reserved_103>"),
|
823 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
824 |
+
sep="",
|
825 |
+
stop_token_ids=[],
|
826 |
+
)
|
827 |
+
)
|
828 |
+
|
829 |
+
# Baichuan2-13B-Chat template
|
830 |
+
register_conv_template(
|
831 |
+
# source: https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/c6f8592a60b4ad73c210b28dd2ab3cca51abbf93/modeling_baichuan.py#L773
|
832 |
+
# https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/main/generation_config.json
|
833 |
+
# https://github.com/baichuan-inc/Baichuan2/issues/62
|
834 |
+
Conversation(
|
835 |
+
name="baichuan2-chat",
|
836 |
+
roles=("<reserved_106>", "<reserved_107>"),
|
837 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
838 |
+
sep="",
|
839 |
+
stop_token_ids=[],
|
840 |
+
)
|
841 |
+
)
|
842 |
+
|
843 |
+
# Mistral template
|
844 |
+
# source: https://docs.mistral.ai/llm/mistral-instruct-v0.1#chat-template
|
845 |
+
register_conv_template(
|
846 |
+
Conversation(
|
847 |
+
name="mistral",
|
848 |
+
system_template="",
|
849 |
+
roles=("[INST] ", " [/INST]"),
|
850 |
+
sep_style=SeparatorStyle.LLAMA2,
|
851 |
+
sep="",
|
852 |
+
sep2=" </s>",
|
853 |
+
)
|
854 |
+
)
|
855 |
+
|
856 |
+
# llama2 template
|
857 |
+
# reference: https://huggingface.co/blog/codellama#conversational-instructions
|
858 |
+
# reference: https://github.com/facebookresearch/llama/blob/1a240688810f8036049e8da36b073f63d2ac552c/llama/generation.py#L212
|
859 |
+
register_conv_template(
|
860 |
+
Conversation(
|
861 |
+
name="llama-2",
|
862 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
863 |
+
roles=("[INST]", "[/INST]"),
|
864 |
+
sep_style=SeparatorStyle.LLAMA2,
|
865 |
+
sep=" ",
|
866 |
+
sep2=" </s><s>",
|
867 |
+
)
|
868 |
+
)
|
869 |
+
|
870 |
+
register_conv_template(
|
871 |
+
Conversation(
|
872 |
+
name="cutegpt",
|
873 |
+
roles=("问:", "答:\n"),
|
874 |
+
sep_style=SeparatorStyle.NO_COLON_TWO,
|
875 |
+
sep="\n",
|
876 |
+
sep2="\n",
|
877 |
+
stop_str="<end>",
|
878 |
+
)
|
879 |
+
)
|
880 |
+
|
881 |
+
# OpenOrcaxOpenChat-Preview2-13B template
|
882 |
+
register_conv_template(
|
883 |
+
Conversation(
|
884 |
+
name="open-orca",
|
885 |
+
system_template="{system_message}",
|
886 |
+
system_message="You are a helpful assistant. Please answer truthfully and write out your "
|
887 |
+
"thinking step by step to be sure you get the right answer. If you make a mistake or encounter "
|
888 |
+
"an error in your thinking, say so out loud and attempt to correct it. If you don't know or "
|
889 |
+
"aren't sure about something, say so clearly. You will act as a professional logician, mathematician, "
|
890 |
+
"and physicist. You will also act as the most appropriate type of expert to answer any particular "
|
891 |
+
"question or solve the relevant problem; state which expert type your are, if so. Also think of "
|
892 |
+
"any particular named expert that would be ideal to answer the relevant question or solve the "
|
893 |
+
"relevant problem; name and act as them, if appropriate.",
|
894 |
+
roles=("User", "Assistant"),
|
895 |
+
sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
|
896 |
+
sep="<|end_of_turn|>\n",
|
897 |
+
stop_token_ids=[32000, 32001], # "<|end_of_turn|>"
|
898 |
+
stop_str="User",
|
899 |
+
)
|
900 |
+
)
|
901 |
+
|
902 |
+
|
903 |
+
# Qwen-chat default template
|
904 |
+
# source: https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/qwen_generation_utils.py#L130
|
905 |
+
register_conv_template(
|
906 |
+
Conversation(
|
907 |
+
name="qwen-7b-chat",
|
908 |
+
system_template="<|im_start|>system\n{system_message}",
|
909 |
+
system_message="You are a helpful assistant.",
|
910 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
911 |
+
sep_style=SeparatorStyle.CHATML,
|
912 |
+
sep="<|im_end|>",
|
913 |
+
stop_token_ids=[
|
914 |
+
151643,
|
915 |
+
151644,
|
916 |
+
151645,
|
917 |
+
], # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
|
918 |
+
stop_str="<|endoftext|>",
|
919 |
+
)
|
920 |
+
)
|
921 |
+
|
922 |
+
|
923 |
+
# AquilaChat default template
|
924 |
+
# source: https://github.com/FlagAI-Open/FlagAI/blob/master/examples/Aquila/Aquila-chat/cyg_conversation.py
|
925 |
+
register_conv_template(
|
926 |
+
Conversation(
|
927 |
+
name="aquila-chat",
|
928 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
929 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
930 |
+
roles=("Human", "Assistant", "System"),
|
931 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
932 |
+
sep="###",
|
933 |
+
sep2="",
|
934 |
+
stop_str=["###", "</s>", "[UNK]"],
|
935 |
+
)
|
936 |
+
)
|
937 |
+
|
938 |
+
# Llama2-Chinese default template
|
939 |
+
# source: https://huggingface.co/FlagAlpha
|
940 |
+
register_conv_template(
|
941 |
+
Conversation(
|
942 |
+
name="llama2-chinese",
|
943 |
+
system_template="<s>{system_message}</s>",
|
944 |
+
roles=("Human", "Assistant", "System"),
|
945 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
946 |
+
sep="\n",
|
947 |
+
sep2="\n</s><s>",
|
948 |
+
stop_str="</s>",
|
949 |
+
)
|
950 |
+
)
|
951 |
+
|
952 |
+
# Vigogne Chat default template
|
953 |
+
# source: https://github.com/bofenghuang/vigogne
|
954 |
+
register_conv_template(
|
955 |
+
Conversation(
|
956 |
+
name="vigogne-chat",
|
957 |
+
system_template="<|system|>: {system_message}",
|
958 |
+
system_message="Vous êtes l'assistant IA nommé Vigogne, créé par Zaion Lab (https://zaion.ai). "
|
959 |
+
"Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.",
|
960 |
+
roles=("<|user|>", "<|assistant|>"),
|
961 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
962 |
+
sep="\n",
|
963 |
+
sep2="</s>\n",
|
964 |
+
stop_str="<|user|>",
|
965 |
+
)
|
966 |
+
)
|
967 |
+
|
968 |
+
# Falcon 180B chat template
|
969 |
+
# source: https://huggingface.co/spaces/tiiuae/falcon-180b-demo/blob/d1590ee7fae9b6ce331ba7808e61a29dcce9239f/app.py#L28-L37
|
970 |
+
register_conv_template(
|
971 |
+
Conversation(
|
972 |
+
name="falcon-chat",
|
973 |
+
roles=("User", "Falcon"),
|
974 |
+
system_template="System: {system_message}",
|
975 |
+
messages=[],
|
976 |
+
sep_style=SeparatorStyle.FALCON_CHAT,
|
977 |
+
sep="\n",
|
978 |
+
sep2="<|endoftext|>",
|
979 |
+
stop_str="\nUser:", # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
|
980 |
+
)
|
981 |
+
)
|
982 |
+
|
983 |
+
# Phind template
|
984 |
+
# source: https://huggingface.co/Phind/Phind-CodeLlama-34B-v2
|
985 |
+
register_conv_template(
|
986 |
+
Conversation(
|
987 |
+
name="phind",
|
988 |
+
system_message="### System Prompt\nYou are an intelligent programming assistant.",
|
989 |
+
roles=("### User Message", "### Assistant"),
|
990 |
+
messages=(),
|
991 |
+
offset=0,
|
992 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
993 |
+
sep="\n\n",
|
994 |
+
)
|
995 |
+
)
|
996 |
+
|
997 |
+
# Metharme formatting for Pygmalion models
|
998 |
+
# source: https://huggingface.co/PygmalionAI/pygmalion-2-13b
|
999 |
+
register_conv_template(
|
1000 |
+
Conversation(
|
1001 |
+
name="metharme",
|
1002 |
+
system_template="<|system|>{system_message}",
|
1003 |
+
system_message="""Enter RP mode. You shall reply to the user while staying
|
1004 |
+
in character. Your responses must be detailed, creative, immersive, and drive the scenario
|
1005 |
+
forward.""",
|
1006 |
+
roles=("<|user|>", "<|model|>"),
|
1007 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1008 |
+
sep="",
|
1009 |
+
stop_str="<|user|>",
|
1010 |
+
)
|
1011 |
+
)
|
1012 |
+
|
1013 |
+
# Taiwan LLM
|
1014 |
+
register_conv_template(
|
1015 |
+
Conversation(
|
1016 |
+
name="twllm_v2",
|
1017 |
+
system_message="你是人工智慧助理,以下是用戶和人工智能助理之間的對話。你要對用戶的問題提供有用、安全、詳細和禮貌的回答。",
|
1018 |
+
roles=("USER", "ASSISTANT"),
|
1019 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1020 |
+
sep="",
|
1021 |
+
sep2="</s>",
|
1022 |
+
)
|
1023 |
+
)
|
1024 |
|
1025 |
|
1026 |
if __name__ == "__main__":
|
1027 |
+
print("Vicuna template:")
|
1028 |
+
conv = get_conv_template("vicuna_v1.1")
|
1029 |
+
conv.append_message(conv.roles[0], "Hello!")
|
1030 |
+
conv.append_message(conv.roles[1], "Hi!")
|
1031 |
+
conv.append_message(conv.roles[0], "How are you?")
|
1032 |
+
conv.append_message(conv.roles[1], None)
|
1033 |
+
print(conv.get_prompt())
|
1034 |
+
|
1035 |
+
print("\n")
|
1036 |
+
|
1037 |
+
print("Llama-2 template:")
|
1038 |
+
conv = get_conv_template("llama-2")
|
1039 |
+
conv.set_system_message("You are a helpful, respectful and honest assistant.")
|
1040 |
+
conv.append_message(conv.roles[0], "Hello!")
|
1041 |
+
conv.append_message(conv.roles[1], "Hi!")
|
1042 |
+
conv.append_message(conv.roles[0], "How are you?")
|
1043 |
+
conv.append_message(conv.roles[1], None)
|
1044 |
+
print(conv.get_prompt())
|