XingHe0127 commited on
Commit
16b7b35
1 Parent(s): ed5b2a0

Delete utils.py

Browse files
Files changed (1) hide show
  1. utils.py +0 -424
utils.py DELETED
@@ -1,424 +0,0 @@
1
- # -*- coding:utf-8 -*-
2
- from __future__ import annotations
3
- from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
4
- import logging
5
- import json
6
- import gradio as gr
7
- # import openai
8
- import os
9
- import traceback
10
- import requests
11
- # import markdown
12
- import csv
13
- import mdtex2html
14
- from pypinyin import lazy_pinyin
15
- from presets import *
16
- import tiktoken
17
- from tqdm import tqdm
18
- import colorama
19
- from duckduckgo_search import ddg
20
- import datetime
21
-
22
- # logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
23
-
24
- if TYPE_CHECKING:
25
- from typing import TypedDict
26
-
27
- class DataframeData(TypedDict):
28
- headers: List[str]
29
- data: List[List[str | int | bool]]
30
-
31
- initial_prompt = "你是 ChatGPT,由 OpenAI 训练的大型语言模型。你的数据库截止日期为:2021年9。"
32
- API_URL = "https://api.openai.com/v1/chat/completions"
33
- HISTORY_DIR = "history"
34
- TEMPLATES_DIR = "templates"
35
-
36
- def postprocess(
37
- self, y: List[Tuple[str | None, str | None]]
38
- ) -> List[Tuple[str | None, str | None]]:
39
- """
40
- Parameters:
41
- y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
42
- Returns:
43
- List of tuples representing the message and response. Each message and response will be a string of HTML.
44
- """
45
- if y is None:
46
- return []
47
- for i, (message, response) in enumerate(y):
48
- y[i] = (
49
- # None if message is None else markdown.markdown(message),
50
- # None if response is None else markdown.markdown(response),
51
- None if message is None else message,
52
- None if response is None else mdtex2html.convert(response),
53
- )
54
- return y
55
-
56
- def count_token(input_str):
57
- encoding = tiktoken.get_encoding("cl100k_base")
58
- length = len(encoding.encode(input_str))
59
- return length
60
-
61
- def parse_text(text):
62
- lines = text.split("\n")
63
- lines = [line for line in lines if line != ""]
64
- count = 0
65
- for i, line in enumerate(lines):
66
- if "```" in line:
67
- count += 1
68
- items = line.split('`')
69
- if count % 2 == 1:
70
- lines[i] = f'<pre><code class="language-{items[-1]}">'
71
- else:
72
- lines[i] = f'<br></code></pre>'
73
- else:
74
- if i > 0:
75
- if count % 2 == 1:
76
- line = line.replace("`", "\`")
77
- line = line.replace("<", "&lt;")
78
- line = line.replace(">", "&gt;")
79
- line = line.replace(" ", "&nbsp;")
80
- line = line.replace("*", "&ast;")
81
- line = line.replace("_", "&lowbar;")
82
- line = line.replace("-", "&#45;")
83
- line = line.replace(".", "&#46;")
84
- line = line.replace("!", "&#33;")
85
- line = line.replace("(", "&#40;")
86
- line = line.replace(")", "&#41;")
87
- line = line.replace("$", "&#36;")
88
- lines[i] = "<br>"+line
89
- text = "".join(lines)
90
- return text
91
-
92
- def construct_text(role, text):
93
- return {"role": role, "content": text}
94
-
95
- def construct_user(text):
96
- return construct_text("user", text)
97
-
98
- def construct_system(text):
99
- return construct_text("system", text)
100
-
101
- def construct_assistant(text):
102
- return construct_text("assistant", text)
103
-
104
- def construct_token_message(token, stream=False):
105
- return f"Token 计数: {token}"
106
-
107
- def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model):
108
- headers = {
109
- "Content-Type": "application/json",
110
- "Authorization": f"Bearer {openai_api_key}"
111
- }
112
-
113
- history = [construct_system(system_prompt), *history]
114
-
115
- payload = {
116
- "model": selected_model,
117
- "messages": history, # [{"role": "user", "content": f"{inputs}"}],
118
- "temperature": temperature, # 1.0,
119
- "top_p": top_p, # 1.0,
120
- "n": 1,
121
- "stream": stream,
122
- "presence_penalty": 0,
123
- "frequency_penalty": 0,
124
- }
125
- if stream:
126
- timeout = timeout_streaming
127
- else:
128
- timeout = timeout_all
129
- response = requests.post(API_URL, headers=headers, json=payload, stream=True, timeout=timeout)
130
- return response
131
-
132
- def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model):
133
- def get_return_value():
134
- return chatbot, history, status_text, all_token_counts
135
-
136
- logging.info("实时回答模式")
137
- partial_words = ""
138
- counter = 0
139
- status_text = "开始实时传输回答……"
140
- history.append(construct_user(inputs))
141
- history.append(construct_assistant(""))
142
- chatbot.append((parse_text(inputs), ""))
143
- user_token_count = 0
144
- if len(all_token_counts) == 0:
145
- system_prompt_token_count = count_token(system_prompt)
146
- user_token_count = count_token(inputs) + system_prompt_token_count
147
- else:
148
- user_token_count = count_token(inputs)
149
- all_token_counts.append(user_token_count)
150
- logging.info(f"输入token计数: {user_token_count}")
151
- yield get_return_value()
152
- try:
153
- response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True, selected_model)
154
- except requests.exceptions.ConnectTimeout:
155
- status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
156
- yield get_return_value()
157
- return
158
- except requests.exceptions.ReadTimeout:
159
- status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt
160
- yield get_return_value()
161
- return
162
-
163
- yield get_return_value()
164
- error_json_str = ""
165
-
166
- for chunk in tqdm(response.iter_lines()):
167
- if counter == 0:
168
- counter += 1
169
- continue
170
- counter += 1
171
- # check whether each line is non-empty
172
- if chunk:
173
- chunk = chunk.decode()
174
- chunklength = len(chunk)
175
- try:
176
- chunk = json.loads(chunk[6:])
177
- except json.JSONDecodeError:
178
- logging.info(chunk)
179
- error_json_str += chunk
180
- status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}"
181
- yield get_return_value()
182
- continue
183
- # decode each line as response data is in bytes
184
- if chunklength > 6 and "delta" in chunk['choices'][0]:
185
- finish_reason = chunk['choices'][0]['finish_reason']
186
- status_text = construct_token_message(sum(all_token_counts), stream=True)
187
- if finish_reason == "stop":
188
- yield get_return_value()
189
- break
190
- try:
191
- partial_words = partial_words + chunk['choices'][0]["delta"]["content"]
192
- except KeyError:
193
- status_text = standard_error_msg + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " + str(sum(all_token_counts))
194
- yield get_return_value()
195
- break
196
- history[-1] = construct_assistant(partial_words)
197
- chatbot[-1] = (parse_text(inputs), parse_text(partial_words))
198
- all_token_counts[-1] += 1
199
- yield get_return_value()
200
-
201
-
202
- def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model):
203
- logging.info("一次性回答模式")
204
- history.append(construct_user(inputs))
205
- history.append(construct_assistant(""))
206
- chatbot.append((parse_text(inputs), ""))
207
- all_token_counts.append(count_token(inputs))
208
- try:
209
- response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False, selected_model)
210
- except requests.exceptions.ConnectTimeout:
211
- status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
212
- return chatbot, history, status_text, all_token_counts
213
- except requests.exceptions.ProxyError:
214
- status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt
215
- return chatbot, history, status_text, all_token_counts
216
- except requests.exceptions.SSLError:
217
- status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt
218
- return chatbot, history, status_text, all_token_counts
219
- response = json.loads(response.text)
220
- content = response["choices"][0]["message"]["content"]
221
- history[-1] = construct_assistant(content)
222
- chatbot[-1] = (parse_text(inputs), parse_text(content))
223
- total_token_count = response["usage"]["total_tokens"]
224
- all_token_counts[-1] = total_token_count - sum(all_token_counts)
225
- status_text = construct_token_message(total_token_count)
226
- return chatbot, history, status_text, all_token_counts
227
-
228
-
229
- def predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, stream=False, selected_model = MODELS[0], use_websearch_checkbox = False, should_check_token_count = True): # repetition_penalty, top_k
230
- logging.info("输入为:" +colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
231
- if use_websearch_checkbox:
232
- results = ddg(inputs, max_results=3)
233
- web_results = []
234
- for idx, result in enumerate(results):
235
- logging.info(f"搜索结果{idx + 1}:{result}")
236
- web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}')
237
- web_results = "\n\n".join(web_results)
238
- today = datetime.datetime.today().strftime("%Y-%m-%d")
239
- inputs = websearch_prompt.replace("{current_date}", today).replace("{query}", inputs).replace("{web_results}", web_results)
240
- if len(openai_api_key) != 51:
241
- status_text = standard_error_msg + no_apikey_msg
242
- logging.info(status_text)
243
- chatbot.append((parse_text(inputs), ""))
244
- if len(history) == 0:
245
- history.append(construct_user(inputs))
246
- history.append("")
247
- all_token_counts.append(0)
248
- else:
249
- history[-2] = construct_user(inputs)
250
- yield chatbot, history, status_text, all_token_counts
251
- return
252
- if stream:
253
- yield chatbot, history, "开始生成回答……", all_token_counts
254
- if stream:
255
- logging.info("使用流式传输")
256
- iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model)
257
- for chatbot, history, status_text, all_token_counts in iter:
258
- yield chatbot, history, status_text, all_token_counts
259
- else:
260
- logging.info("不使用流式传输")
261
- chatbot, history, status_text, all_token_counts = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model)
262
- yield chatbot, history, status_text, all_token_counts
263
- logging.info(f"传输完毕。当前token计数为{all_token_counts}")
264
- if len(history) > 1 and history[-1]['content'] != inputs:
265
- logging.info("回答为:" +colorama.Fore.BLUE + f"{history[-1]['content']}" + colorama.Style.RESET_ALL)
266
- if stream:
267
- max_token = max_token_streaming
268
- else:
269
- max_token = max_token_all
270
- if sum(all_token_counts) > max_token and should_check_token_count:
271
- status_text = f"精简token中{all_token_counts}/{max_token}"
272
- logging.info(status_text)
273
- yield chatbot, history, status_text, all_token_counts
274
- iter = reduce_token_size(openai_api_key, system_prompt, history, chatbot, all_token_counts, top_p, temperature, stream=False, selected_model=selected_model, hidden=True)
275
- for chatbot, history, status_text, all_token_counts in iter:
276
- status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
277
- yield chatbot, history, status_text, all_token_counts
278
-
279
-
280
- def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, selected_model = MODELS[0]):
281
- logging.info("重试中……")
282
- if len(history) == 0:
283
- yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
284
- return
285
- history.pop()
286
- inputs = history.pop()["content"]
287
- token_count.pop()
288
- iter = predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream, selected_model=selected_model)
289
- logging.info("重试完毕")
290
- for x in iter:
291
- yield x
292
-
293
-
294
- def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, selected_model = MODELS[0], hidden=False):
295
- logging.info("开始减少token数量……")
296
- iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, selected_model = selected_model, should_check_token_count=False)
297
- logging.info(f"chatbot: {chatbot}")
298
- for chatbot, history, status_text, previous_token_count in iter:
299
- history = history[-2:]
300
- token_count = previous_token_count[-1:]
301
- if hidden:
302
- chatbot.pop()
303
- yield chatbot, history, construct_token_message(sum(token_count), stream=stream), token_count
304
- logging.info("减少token数量完毕")
305
-
306
-
307
- def delete_last_conversation(chatbot, history, previous_token_count):
308
- if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
309
- logging.info("由于包含报错信息,只删除chatbot记录")
310
- chatbot.pop()
311
- return chatbot, history
312
- if len(history) > 0:
313
- logging.info("删除了一组对话历史")
314
- history.pop()
315
- history.pop()
316
- if len(chatbot) > 0:
317
- logging.info("删除了一组chatbot对话")
318
- chatbot.pop()
319
- if len(previous_token_count) > 0:
320
- logging.info("删除了一组对话的token计数记录")
321
- previous_token_count.pop()
322
- return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count))
323
-
324
-
325
- def save_chat_history(filename, system, history, chatbot):
326
- logging.info("保存对话历史中……")
327
- if filename == "":
328
- return
329
- if not filename.endswith(".json"):
330
- filename += ".json"
331
- os.makedirs(HISTORY_DIR, exist_ok=True)
332
- json_s = {"system": system, "history": history, "chatbot": chatbot}
333
- logging.info(json_s)
334
- with open(os.path.join(HISTORY_DIR, filename), "w") as f:
335
- json.dump(json_s, f, ensure_ascii=False, indent=4)
336
- logging.info("保存对话历史完毕")
337
-
338
-
339
- def load_chat_history(filename, system, history, chatbot):
340
- logging.info("加载对话历史中……")
341
- try:
342
- with open(os.path.join(HISTORY_DIR, filename), "r") as f:
343
- json_s = json.load(f)
344
- try:
345
- if type(json_s["history"][0]) == str:
346
- logging.info("历史记录格式为���版,正在转换……")
347
- new_history = []
348
- for index, item in enumerate(json_s["history"]):
349
- if index % 2 == 0:
350
- new_history.append(construct_user(item))
351
- else:
352
- new_history.append(construct_assistant(item))
353
- json_s["history"] = new_history
354
- logging.info(new_history)
355
- except:
356
- # 没有对话历史
357
- pass
358
- logging.info("加载对话历史完毕")
359
- return filename, json_s["system"], json_s["history"], json_s["chatbot"]
360
- except FileNotFoundError:
361
- logging.info("没有找到对话历史文件,不执行任何操作")
362
- return filename, system, history, chatbot
363
-
364
- def sorted_by_pinyin(list):
365
- return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
366
-
367
- def get_file_names(dir, plain=False, filetypes=[".json"]):
368
- logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
369
- files = []
370
- try:
371
- for type in filetypes:
372
- files += [f for f in os.listdir(dir) if f.endswith(type)]
373
- except FileNotFoundError:
374
- files = []
375
- files = sorted_by_pinyin(files)
376
- if files == []:
377
- files = [""]
378
- if plain:
379
- return files
380
- else:
381
- return gr.Dropdown.update(choices=files)
382
-
383
- def get_history_names(plain=False):
384
- logging.info("获取历史记录文件名列表")
385
- return get_file_names(HISTORY_DIR, plain)
386
-
387
- def load_template(filename, mode=0):
388
- logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
389
- lines = []
390
- logging.info("Loading template...")
391
- if filename.endswith(".json"):
392
- with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
393
- lines = json.load(f)
394
- lines = [[i["act"], i["prompt"]] for i in lines]
395
- else:
396
- with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as csvfile:
397
- reader = csv.reader(csvfile)
398
- lines = list(reader)
399
- lines = lines[1:]
400
- if mode == 1:
401
- return sorted_by_pinyin([row[0] for row in lines])
402
- elif mode == 2:
403
- return {row[0]:row[1] for row in lines}
404
- else:
405
- choices = sorted_by_pinyin([row[0] for row in lines])
406
- return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
407
-
408
- def get_template_names(plain=False):
409
- logging.info("获取模板文件名列表")
410
- return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
411
-
412
- def get_template_content(templates, selection, original_system_prompt):
413
- logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
414
- try:
415
- return templates[selection]
416
- except:
417
- return original_system_prompt
418
-
419
- def reset_state():
420
- logging.info("重置状态")
421
- return [], [], [], construct_token_message(0)
422
-
423
- def reset_textbox():
424
- return gr.update(value='')