Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -18,17 +18,17 @@ SYSTEM_PROMPT = '''You are a helpful, respectful and honest INTP-T AI Assistant
|
|
18 |
You are good at speaking English and Chinese.
|
19 |
You are talking to a human User. If the question is meaningless, please explain the reason and don't share false information.
|
20 |
You are based on SLIDE model, trained by "SSFW NLPark" team, not related to GPT, LLaMA, Meta, Mistral or OpenAI.
|
21 |
-
Let's work this out in a step by step way to be sure we have the right answer.\n
|
22 |
-
SYSTEM_TOKEN =
|
23 |
-
USER_TOKEN =
|
24 |
-
BOT_TOKEN =
|
25 |
-
LINEBREAK_TOKEN =
|
26 |
|
27 |
|
28 |
ROLE_TOKENS = {
|
29 |
-
"
|
30 |
-
"
|
31 |
-
"system": SYSTEM_TOKEN
|
32 |
}
|
33 |
|
34 |
|
@@ -45,8 +45,8 @@ def get_system_tokens(model):
|
|
45 |
return get_message_tokens(model, **system_message)
|
46 |
|
47 |
|
48 |
-
repo_name = "
|
49 |
-
model_name = "
|
50 |
|
51 |
snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name)
|
52 |
|
@@ -58,12 +58,12 @@ model = Llama(
|
|
58 |
|
59 |
max_new_tokens = 2500
|
60 |
|
61 |
-
def
|
62 |
new_history = history + [[message, None]]
|
63 |
return "", new_history
|
64 |
|
65 |
|
66 |
-
def
|
67 |
history,
|
68 |
system_prompt,
|
69 |
top_p,
|
@@ -73,15 +73,15 @@ def bot(
|
|
73 |
tokens = get_system_tokens(model)[:]
|
74 |
tokens.append(LINEBREAK_TOKEN)
|
75 |
|
76 |
-
for
|
77 |
-
message_tokens = get_message_tokens(model=model, role="
|
78 |
tokens.extend(message_tokens)
|
79 |
if bot_message:
|
80 |
-
message_tokens = get_message_tokens(model=model, role="
|
81 |
tokens.extend(message_tokens)
|
82 |
|
83 |
last_user_message = history[-1][0]
|
84 |
-
message_tokens = get_message_tokens(model=model, role="
|
85 |
tokens.extend(message_tokens)
|
86 |
|
87 |
role_tokens = [model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN]
|
@@ -161,12 +161,12 @@ with gr.Blocks(
|
|
161 |
|
162 |
# Pressing Enter
|
163 |
submit_event = msg.submit(
|
164 |
-
fn=
|
165 |
inputs=[msg, chatbot],
|
166 |
outputs=[msg, chatbot],
|
167 |
queue=False,
|
168 |
).success(
|
169 |
-
fn=
|
170 |
inputs=[
|
171 |
chatbot,
|
172 |
system_prompt,
|
@@ -180,12 +180,12 @@ with gr.Blocks(
|
|
180 |
|
181 |
# Pressing the button
|
182 |
submit_click_event = submit.click(
|
183 |
-
fn=
|
184 |
inputs=[msg, chatbot],
|
185 |
outputs=[msg, chatbot],
|
186 |
queue=False,
|
187 |
).success(
|
188 |
-
fn=
|
189 |
inputs=[
|
190 |
chatbot,
|
191 |
system_prompt,
|
|
|
18 |
You are good at speaking English and Chinese.
|
19 |
You are talking to a human User. If the question is meaningless, please explain the reason and don't share false information.
|
20 |
You are based on SLIDE model, trained by "SSFW NLPark" team, not related to GPT, LLaMA, Meta, Mistral or OpenAI.
|
21 |
+
Let's work this out in a step by step way to be sure we have the right answer.\n'''
|
22 |
+
SYSTEM_TOKEN = 384
|
23 |
+
USER_TOKEN = 2048
|
24 |
+
BOT_TOKEN = 3072
|
25 |
+
LINEBREAK_TOKEN = 64
|
26 |
|
27 |
|
28 |
ROLE_TOKENS = {
|
29 |
+
"User": USER_TOKEN,\n,
|
30 |
+
"Assistant": BOT_TOKEN,\n,
|
31 |
+
"system": SYSTEM_TOKEN,\n
|
32 |
}
|
33 |
|
34 |
|
|
|
45 |
return get_message_tokens(model, **system_message)
|
46 |
|
47 |
|
48 |
+
repo_name = "Cran-May/SLIDE-v2-Q4_K_M-GGUF"
|
49 |
+
model_name = "slide-v2.Q4_K_M.gguf"
|
50 |
|
51 |
snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name)
|
52 |
|
|
|
58 |
|
59 |
max_new_tokens = 2500
|
60 |
|
61 |
+
def User(message, history):
|
62 |
new_history = history + [[message, None]]
|
63 |
return "", new_history
|
64 |
|
65 |
|
66 |
+
def Assistant(
|
67 |
history,
|
68 |
system_prompt,
|
69 |
top_p,
|
|
|
73 |
tokens = get_system_tokens(model)[:]
|
74 |
tokens.append(LINEBREAK_TOKEN)
|
75 |
|
76 |
+
for User_message, Assistant_message in history[:-1]:
|
77 |
+
message_tokens = get_message_tokens(model=model, role="User", content=User_message)
|
78 |
tokens.extend(message_tokens)
|
79 |
if bot_message:
|
80 |
+
message_tokens = get_message_tokens(model=model, role="Assistant", content=Assistant_message)
|
81 |
tokens.extend(message_tokens)
|
82 |
|
83 |
last_user_message = history[-1][0]
|
84 |
+
message_tokens = get_message_tokens(model=model, role="User", content=last_User_message)
|
85 |
tokens.extend(message_tokens)
|
86 |
|
87 |
role_tokens = [model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN]
|
|
|
161 |
|
162 |
# Pressing Enter
|
163 |
submit_event = msg.submit(
|
164 |
+
fn=User,
|
165 |
inputs=[msg, chatbot],
|
166 |
outputs=[msg, chatbot],
|
167 |
queue=False,
|
168 |
).success(
|
169 |
+
fn=Assistant,
|
170 |
inputs=[
|
171 |
chatbot,
|
172 |
system_prompt,
|
|
|
180 |
|
181 |
# Pressing the button
|
182 |
submit_click_event = submit.click(
|
183 |
+
fn=User,
|
184 |
inputs=[msg, chatbot],
|
185 |
outputs=[msg, chatbot],
|
186 |
queue=False,
|
187 |
).success(
|
188 |
+
fn=Assistant,
|
189 |
inputs=[
|
190 |
chatbot,
|
191 |
system_prompt,
|