XingHe0127 commited on
Commit
5851510
1 Parent(s): 04b3136

Upload 6 files

Browse files
Files changed (5) hide show
  1. Chatbot.py +72 -65
  2. README.md +360 -10
  3. presets.py +58 -11
  4. requirements.txt +5 -0
  5. utils.py +167 -75
Chatbot.py CHANGED
@@ -1,11 +1,13 @@
 
1
  import gradio as gr
2
- # import openai
3
  import os
 
4
  import sys
5
  import argparse
6
  from utils import *
7
  from presets import *
8
 
 
9
 
10
  my_api_key = os.getenv("OpenAI-API") # 在这里输入你的 API 密钥
11
 
@@ -20,7 +22,7 @@ authflag = False
20
  if dockerflag:
21
  my_api_key = os.environ.get('my_api_key')
22
  if my_api_key == "empty":
23
- print("Please give a api key!")
24
  sys.exit(1)
25
  #auth
26
  username = os.environ.get('USERNAME')
@@ -42,12 +44,6 @@ else:
42
  gr.Chatbot.postprocess = postprocess
43
 
44
  with gr.Blocks(css=customCSS) as demo:
45
- gr.HTML(title)
46
- with gr.Row():
47
- keyTxt = gr.Textbox(show_label=False, placeholder=f"在这里输入你的OpenAI API-key...",
48
- value=my_api_key, type="password", visible=not HIDE_MY_KEY).style(container=True)
49
- use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option)
50
- chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
51
  history = gr.State([])
52
  token_count = gr.State([])
53
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
@@ -56,71 +52,82 @@ with gr.Blocks(css=customCSS) as demo:
56
  topic = gr.State("未命名对话历史记录")
57
 
58
  with gr.Row():
59
- with gr.Column(scale=12):
60
- user_input = gr.Textbox(show_label=False, placeholder="在这里输入").style(
61
- container=False)
62
- with gr.Column(min_width=50, scale=1):
63
- submitBtn = gr.Button("🚀", variant="primary")
64
- with gr.Row():
65
- emptyBtn = gr.Button("🧹 新的对话")
66
- retryBtn = gr.Button("🔄 重新生成")
67
- delLastBtn = gr.Button("🗑️ 删除最近一条对话")
68
- reduceTokenBtn = gr.Button("♻️ 总结对话")
69
- status_display = gr.Markdown("status: ready")
70
- systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...",
71
- label="System prompt", value=initial_prompt).style(container=True)
72
- with gr.Accordion(label="加载Prompt模板", open=False):
73
- with gr.Column():
74
- with gr.Row():
75
- with gr.Column(scale=6):
76
- templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件", choices=get_template_names(plain=True), multiselect=False, value=get_template_names(plain=True)[0])
77
- with gr.Column(scale=1):
78
- templateRefreshBtn = gr.Button("🔄 刷新")
79
- templaeFileReadBtn = gr.Button("📂 读入模板")
80
- with gr.Row():
81
- with gr.Column(scale=6):
82
- templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(get_template_names(plain=True)[0], mode=1), multiselect=False, value=load_template(get_template_names(plain=True)[0], mode=1)[0])
83
- with gr.Column(scale=1):
84
- templateApplyBtn = gr.Button("⬇️ 应用")
85
- with gr.Accordion(label="保存/加载对话历史记录", open=False):
86
  with gr.Column():
87
- with gr.Row():
88
- with gr.Column(scale=6):
89
- saveFileName = gr.Textbox(
90
- show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名", value="对话历史记录").style(container=True)
91
- with gr.Column(scale=1):
92
- saveHistoryBtn = gr.Button("💾 保存对话")
93
- with gr.Row():
94
- with gr.Column(scale=6):
95
- historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False, value=get_history_names(plain=True)[0])
96
- with gr.Column(scale=1):
97
- historyRefreshBtn = gr.Button("🔄 刷新")
98
- historyReadBtn = gr.Button("📂 读入对话")
99
- #inputs, top_p, temperature, top_k, repetition_penalty
100
- with gr.Accordion("参数", open=False):
101
- top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05,
102
- interactive=True, label="Top-p (nucleus sampling)",)
103
- temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0,
104
- step=0.1, interactive=True, label="Temperature",)
105
- #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
106
- #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
107
- gr.Markdown(description)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
 
109
 
110
- user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
111
  user_input.submit(reset_textbox, [], [user_input])
112
 
113
- submitBtn.click(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
114
  submitBtn.click(reset_textbox, [], [user_input])
115
 
116
  emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True)
117
 
118
- retryBtn.click(retry, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
119
 
120
- delLastBtn.click(delete_last_conversation, [chatbot, history, token_count, use_streaming_checkbox], [
121
  chatbot, history, token_count, status_display], show_progress=True)
122
 
123
- reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
124
 
125
  saveHistoryBtn.click(save_chat_history, [
126
  saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
@@ -129,15 +136,15 @@ with gr.Blocks(css=customCSS) as demo:
129
 
130
  historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
131
 
132
- historyReadBtn.click(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
133
 
134
  templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
135
 
136
- templaeFileReadBtn.click(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
137
 
138
- templateApplyBtn.click(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
139
 
140
- print("Chatbot启动成功!")
141
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
142
  demo.title = "Chatbot"
143
 
 
1
+ # -*- coding:utf-8 -*-
2
  import gradio as gr
 
3
  import os
4
+ import logging
5
  import sys
6
  import argparse
7
  from utils import *
8
  from presets import *
9
 
10
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
11
 
12
  my_api_key = os.getenv("OpenAI-API") # 在这里输入你的 API 密钥
13
 
 
22
  if dockerflag:
23
  my_api_key = os.environ.get('my_api_key')
24
  if my_api_key == "empty":
25
+ logging.error("Please give a api key!")
26
  sys.exit(1)
27
  #auth
28
  username = os.environ.get('USERNAME')
 
44
  gr.Chatbot.postprocess = postprocess
45
 
46
  with gr.Blocks(css=customCSS) as demo:
 
 
 
 
 
 
47
  history = gr.State([])
48
  token_count = gr.State([])
49
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
 
52
  topic = gr.State("未命名对话历史记录")
53
 
54
  with gr.Row():
55
+ gr.HTML(title)
56
+ status_display = gr.Markdown("status: ready", elem_id="status_display")
57
+
58
+ with gr.Row(scale=1).style(equal_height=True):
59
+ with gr.Column(scale=5):
60
+ with gr.Row(scale=1):
61
+ chatbot = gr.Chatbot().style(height=600) # .style(color_map=("#1D51EE", "#585A5B"))
62
+ with gr.Row(scale=1):
63
+ with gr.Column(scale=12):
64
+ user_input = gr.Textbox(show_label=False, placeholder="在这里输入").style(
65
+ container=False)
66
+ with gr.Column(min_width=50, scale=1):
67
+ submitBtn = gr.Button("🚀", variant="primary")
68
+ with gr.Row(scale=1):
69
+ emptyBtn = gr.Button("🧹 新的对话",)
70
+ retryBtn = gr.Button("🔄 重新生成")
71
+ delLastBtn = gr.Button("🗑️ 删除一条对话")
72
+ reduceTokenBtn = gr.Button("♻️ 总结对话")
73
+
 
 
 
 
 
 
 
 
74
  with gr.Column():
75
+ with gr.Column(min_width=50,scale=1):
76
+ with gr.Tab(label="ChatGPT"):
77
+ keyTxt = gr.Textbox(show_label=True, placeholder=f"OpenAI API-key...",value=my_api_key, type="password", visible=not HIDE_MY_KEY, label="API-Key")
78
+ model_select_dropdown = gr.Dropdown(label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0])
79
+ with gr.Accordion("参数", open=False):
80
+ top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05,
81
+ interactive=True, label="Top-p (nucleus sampling)",)
82
+ temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0,
83
+ step=0.1, interactive=True, label="Temperature",)
84
+ use_streaming_checkbox = gr.Checkbox(label="实时传输回答", value=True, visible=enable_streaming_option)
85
+ use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False)
86
+
87
+ with gr.Tab(label="Prompt"):
88
+ systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...", label="System prompt", value=initial_prompt).style(container=True)
89
+ with gr.Accordion(label="加载Prompt模板", open=True):
90
+ with gr.Column():
91
+ with gr.Row():
92
+ with gr.Column(scale=6):
93
+ templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件", choices=get_template_names(plain=True), multiselect=False, value=get_template_names(plain=True)[0])
94
+ with gr.Column(scale=1):
95
+ templateRefreshBtn = gr.Button("🔄 刷新")
96
+ with gr.Row():
97
+ with gr.Column():
98
+ templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(get_template_names(plain=True)[0], mode=1), multiselect=False, value=load_template(get_template_names(plain=True)[0], mode=1)[0])
99
+
100
+ with gr.Tab(label="保存/加载"):
101
+ with gr.Accordion(label="保存/加载对话历史记录", open=True):
102
+ with gr.Column():
103
+ with gr.Row():
104
+ with gr.Column(scale=6):
105
+ saveFileName = gr.Textbox(
106
+ show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名", value="对话历史记录").style(container=True)
107
+ with gr.Column(scale=1):
108
+ saveHistoryBtn = gr.Button("💾 保存对话")
109
+ with gr.Row():
110
+ with gr.Column(scale=6):
111
+ historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False, value=get_history_names(plain=True)[0])
112
+ with gr.Column(scale=1):
113
+ historyRefreshBtn = gr.Button("🔄 刷新")
114
 
115
+ gr.Markdown(description)
116
 
117
+ user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown, use_websearch_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
118
  user_input.submit(reset_textbox, [], [user_input])
119
 
120
+ submitBtn.click(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown, use_websearch_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
121
  submitBtn.click(reset_textbox, [], [user_input])
122
 
123
  emptyBtn.click(reset_state, outputs=[chatbot, history, token_count, status_display], show_progress=True)
124
 
125
+ retryBtn.click(retry, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
126
 
127
+ delLastBtn.click(delete_last_conversation, [chatbot, history, token_count], [
128
  chatbot, history, token_count, status_display], show_progress=True)
129
 
130
+ reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
131
 
132
  saveHistoryBtn.click(save_chat_history, [
133
  saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
 
136
 
137
  historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
138
 
139
+ historyFileSelectDropdown.change(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
140
 
141
  templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
142
 
143
+ templateFileSelectDropdown.change(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
144
 
145
+ templateSelectDropdown.change(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
146
 
147
+ logging.info(colorama.Back.GREEN + "\n温馨提示:访问 http://localhost:7860 查看界面" + colorama.Style.RESET_ALL)
148
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
149
  demo.title = "Chatbot"
150
 
README.md CHANGED
@@ -1,12 +1,362 @@
 
 
 
 
 
 
 
 
1
  ---
2
- title: Chatbot
3
- emoji: 💻
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.20.1
8
- app_file: Chatbot.py
9
- pinned: false
10
- ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <img height="128" align="left" src="https://user-images.githubusercontent.com/51039745/222689546-7612df0e-e28b-4693-9f5f-4ef2be3daf48.png" alt="Logo">
2
+
3
+ # 川虎 ChatGPT 🐯 Chuanhu ChatGPT
4
+
5
+ [![LICENSE](https://img.shields.io/github/license/GaiZhenbiao/ChuanhuChatGPT)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/blob/main/LICENSE)
6
+ [![Base](https://img.shields.io/badge/Base-Gradio-fb7d1a?style=flat)](https://gradio.app/)
7
+ [![Bilibili](https://img.shields.io/badge/Bilibili-%E8%A7%86%E9%A2%91%E6%95%99%E7%A8%8B-ff69b4?style=flat&logo=bilibili)](https://www.bilibili.com/video/BV1mo4y1r7eE)
8
+
9
  ---
 
 
 
 
 
 
 
 
 
10
 
11
+ 为ChatGPT API提供了一个Web图形界面。在Bilibili上[观看视频教程](https://www.bilibili.com/video/BV1mo4y1r7eE/)。也可以在Hugging Face上[在线体验](https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT)。
12
+
13
+ ![Animation Demo](https://user-images.githubusercontent.com/51039745/223148794-f4fd2fcb-3e48-4cdf-a759-7aa463d3f14c.gif)
14
+
15
+
16
+ ## 重大更新 🎉🎉🎉
17
+
18
+ - 像官方ChatGPT那样实时回复
19
+ - 无限长度对话
20
+ - 从互联网搜索结果
21
+ - 改进的保存/加载功能
22
+ - 从Prompt模板中选择预设
23
+ - 将大段代码显示在代码块中
24
+ - 渲染输出中的LaTex公式
25
+
26
+ ## 目录
27
+ |[功能](#功能)|[使用技巧](#使用技巧)|[安装方式](#安装方式)|[疑难杂症解决](#疑难杂症解决)|
28
+ | ---- | ---- | ---- | ---- |
29
+
30
+
31
+ ## 功能
32
+ - [x] 像官方客户端那样支持实时显示回答!
33
+ - [x] 重试对话,让ChatGPT再回答一次。
34
+ - [x] 优化Tokens,减少Tokens占用,以支持更长的对话。
35
+ - [x] 设置System Prompt,有效地设定前置条件
36
+ - [x] 保存/加载对话历史记录
37
+ - [x] 在图形界面中添加API key
38
+ - [x] System Prompt模板功能,从预置的Prompt库中选择System Prompt
39
+ - [x] 实时显示Tokens用量
40
+
41
+ ## 使用技巧
42
+
43
+ - 使用System Prompt可以很有效地设定前提条件
44
+ - 对于长对话,可以使用“优化Tokens”按钮减少Tokens占用。
45
+ - 如果部署到服务器,将程序最后一句改成`demo.launch(server_name="0.0.0.0", server_port=99999)`。其中`99999`是端口号,应该是1000-65535任意可用端口,请自行更改为实际端口号。
46
+ - 如果需要获取公共链接,将程序最后一句改成`demo.launch(share=True)`。注意程序必须在运行,才能通过公共链接访问
47
+ - 使用Prompt模板功能时,请先选择模板文件(`.csv`),然后点击载入按钮,然后就可以从下拉菜单中选择想要的prompt了,点击应用填入System Prmpt
48
+ - 输入框支持换行,按`shift enter`即可
49
+ - 在Hugging Face上使用时,建议在右上角**复制Space**再使用,这样能大大减少排队时间,App反应也会更加迅速。
50
+ <img width="300" alt="image" src="https://user-images.githubusercontent.com/51039745/223447310-e098a1f2-0dcf-48d6-bcc5-49472dd7ca0d.png">
51
+
52
+ ## 安装方式
53
+
54
+ ### 本地部署
55
+
56
+ 1. **下载本项目**
57
+
58
+ ```shell
59
+ git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
60
+ cd ChuanhuChatGPT
61
+ ```
62
+ 或者,点击网页右上角的 `Download ZIP`,下载并解压完成后进入文件夹,进入`终端`或`命令提示符`。
63
+
64
+ <img width="200" alt="downloadZIP" src="https://user-images.githubusercontent.com/23137268/223696317-b89d2c71-c74d-4c6d-8060-a21406cfb8c8.png">
65
+
66
+ 2. **填写API密钥**
67
+
68
+ 以下3种方法任选其一:
69
+
70
+ <details><summary>1. 在图形界面中填写你的API密钥</summary>
71
+
72
+ 这样设置的密钥会在页面刷新后被清除。
73
+
74
+ <img width="760" alt="image" src="https://user-images.githubusercontent.com/51039745/222873756-3858bb82-30b9-49bc-9019-36e378ee624d.png"></details>
75
+ <details><summary>2. 在直接代码中填入你的 OpenAI API 密钥</summary>
76
+
77
+ 这样设置的密钥会成为默认密钥。在这里还可以选择是否在UI中隐藏密钥输入框。
78
+
79
+ <img width="525" alt="image" src="https://user-images.githubusercontent.com/51039745/223440375-d472de4b-aa7f-4eae-9170-6dc2ed9f5480.png"></details>
80
+
81
+ <details><summary>3. 在文件中设定默认密钥、用户名密码</summary>
82
+
83
+ 这样设置的密钥可以在拉取项目更新之后保留。
84
+
85
+ 在项目文件夹中新建这两个文件:`api_key.txt` 和 `auth.json`。
86
+
87
+ 在`api_key.txt`中填写你的API-Key,注意不要填写任何无关内容。
88
+
89
+ 在`auth.json`中填写你的用户名和密码。
90
+
91
+ ```
92
+ {
93
+ "username": "用户名",
94
+ "password": "密码"
95
+ }
96
+ ```
97
+
98
+ </details>
99
+
100
+ 3. **安装依赖**
101
+
102
+ ```shell
103
+ pip install -r requirements.txt
104
+ ```
105
+
106
+ 如果报错,试试
107
+
108
+ ```shell
109
+ pip3 install -r requirements.txt
110
+ ```
111
+
112
+ 如果还是不行,请先[安装Python](https://www.runoob.com/python/python-install.html)。
113
+
114
+ 如果下载慢,建议[配置清华源](https://mirrors.tuna.tsinghua.edu.cn/help/pypi/),或者科学上网。
115
+
116
+ 4. **启动**
117
+
118
+ ```shell
119
+ python ChuanhuChatbot.py
120
+ ```
121
+
122
+ 如果报错,试试
123
+
124
+ ```shell
125
+ python3 ChuanhuChatbot.py
126
+ ```
127
+
128
+ 如果还是不行,请先[安装Python](https://www.runoob.com/python/python-install.html)。
129
+ <br />
130
+
131
+ 如果一切顺利,现在,你应该已经可以在浏览器地址栏中输入 [`http://localhost:7860`](http://localhost:7860) 查看并使用 ChuanhuChatGPT 了。
132
+
133
+ **如果你在安装过程中碰到了问题,请先查看[疑难杂症解决](#疑难杂症解决)部分。**
134
+
135
+ <details><summary><h3>或者,使用Docker 运行</h3></summary>
136
+
137
+ #### 拉取镜像
138
+
139
+ ```shell
140
+ docker pull tuchuanhuhuhu/chuanhuchatgpt:latest
141
+ ```
142
+
143
+ #### 运行
144
+
145
+ ```shell
146
+ docker run -d --name chatgpt \
147
+ -e my_api_key="替换成API" \
148
+ -e USERNAME="替换成用户名" \
149
+ -e PASSWORD="替换成密码" \
150
+ -v ~/chatGPThistory:/app/history \
151
+ -p 7860:7860 \
152
+ tuchuanhuhuhu/chuanhuchatgpt:latest
153
+ ```
154
+
155
+ 注:`USERNAME` 和 `PASSWORD` 两行可省略。若省略则不会启用认证。
156
+
157
+ #### 查看运行状态
158
+ ```shell
159
+ docker logs chatgpt
160
+ ```
161
+
162
+ #### 也可修改脚本后手动构建镜像
163
+
164
+ ```shell
165
+ docker build -t chuanhuchatgpt:latest .
166
+ ```
167
+ </details>
168
+
169
+
170
+ ### 远程部署
171
+
172
+ <details><summary>如果需要在公网服务器部署本项目,请阅读本部分</summary>
173
+
174
+ ### 部署到公网服务器
175
+
176
+ 将最后一句修改为
177
+
178
+ ```
179
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
180
+ ```
181
+ ### 用账号密码保护页面
182
+
183
+ 将最后一句修改为
184
+
185
+ ```
186
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
187
+ ```
188
+
189
+ ### 配置 Nginx 反向代理
190
+
191
+ 注意:配置反向代理不是必须的。如果需要使用域名,则需要配置 Nginx 反向代理。
192
+
193
+ 又及:目前配置认证后,Nginx 必须配置 SSL,否则会出现 [Cookie 不匹配问题](https://github.com/GaiZhenbiao/ChuanhuChatGPT/issues/89)。
194
+
195
+ 添加独立配置文件:
196
+ ```nginx
197
+ server {
198
+ listen 80;
199
+ server_name /域名/; # 请填入你设定的域名
200
+ access_log off;
201
+ error_log off;
202
+ location / {
203
+ proxy_pass http://127.0.0.1:7860; # 注意端口号
204
+ proxy_redirect off;
205
+ proxy_set_header Host $host;
206
+ proxy_set_header X-Real-IP $remote_addr;
207
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
208
+ proxy_set_header Upgrade $http_upgrade; # Websocket配置
209
+ proxy_set_header Connection $connection_upgrade; #Websocket配置
210
+ proxy_max_temp_file_size 0;
211
+ client_max_body_size 10m;
212
+ client_body_buffer_size 128k;
213
+ proxy_connect_timeout 90;
214
+ proxy_send_timeout 90;
215
+ proxy_read_timeout 90;
216
+ proxy_buffer_size 4k;
217
+ proxy_buffers 4 32k;
218
+ proxy_busy_buffers_size 64k;
219
+ proxy_temp_file_write_size 64k;
220
+ }
221
+ }
222
+ ```
223
+
224
+ 修改`nginx.conf`配置文件(通常在`/etc/nginx/nginx.conf`),向http部分添加如下配置:
225
+ (这一步是为了配置websocket连接,如之前配置过可忽略)
226
+ ```nginx
227
+ map $http_upgrade $connection_upgrade {
228
+ default upgrade;
229
+ '' close;
230
+ }
231
+ ```
232
+
233
+ 为了同时配置域名访问和身份认证,需要配置SSL的证书,可以参考[这篇博客](https://www.gzblog.tech/2020/12/25/how-to-config-hexo/#%E9%85%8D%E7%BD%AEHTTPS)一键配置
234
+
235
+ </details>
236
+
237
+ ## 疑难杂症解决
238
+
239
+ 首先,请尝试拉取本项目的最新更改,使用最新的代码重试。
240
+
241
+ 点击网页上的 `Download ZIP` 下载最新代码,或
242
+ ```shell
243
+ git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f
244
+ ```
245
+
246
+ 如果还有问题,可以再尝试重装 gradio:
247
+
248
+ ```
249
+ pip install gradio --upgrade --force-reinstall
250
+ ```
251
+
252
+ 很多时候,这样就可以解决问题。
253
+
254
+ <details><summary><h3><code>No module named '_bz2'</code></h3></summary>
255
+
256
+ > 部署在CentOS7.6,Python3.11.0上,最后报错ModuleNotFoundError: No module named '_bz2'
257
+
258
+ 安装python前先下载 `bzip` 编译环境
259
+
260
+ ```
261
+ sudo yum install bzip2-devel
262
+ ```
263
+ </details>
264
+
265
+ <details><summary><h3><code>openai.error.APIConnectionError</code></h3></summary>
266
+
267
+ > 如果有人也出现了`openai.error.APIConnectionError`提示的报错,那可能是`urllib3`的版本导致的。`urllib3`版本大于`1.25.11`,就会出现这个问题。
268
+ >
269
+ > 解决方案是卸载`urllib3`然后重装至`1.25.11`版本再重新运行一遍就可以
270
+
271
+ 参见:[#5](https://github.com/GaiZhenbiao/ChuanhuChatGPT/issues/5)
272
+
273
+ 在终端或命令提示符中卸载`urllib3`
274
+
275
+ ```
276
+ pip uninstall urllib3
277
+ ```
278
+
279
+ 然后,通过使用指定版本号的`pip install`命令来安装所需的版本:
280
+
281
+ ```
282
+ pip install urllib3==1.25.11
283
+ ```
284
+
285
+ 参考自:
286
+ [解决OpenAI API 挂了代理还是连接不上的问题](https://zhuanlan.zhihu.com/p/611080662)
287
+ </details>
288
+
289
+ <details><summary><h3>在 Python 文件里 设定 API Key 之后验证失败</h3></summary>
290
+
291
+ > 在ChuanhuChatbot.py中设置APIkey后验证出错,提示“发生了未知错误Orz”
292
+
293
+ 参见:[#26](https://github.com/GaiZhenbiao/ChuanhuChatGPT/issues/26)
294
+ </details>
295
+
296
+ <details><summary><h3>一直等待/SSL Error</h3></summary>
297
+
298
+ > 更新脚本文件后,SSLError [#49](https://github.com/GaiZhenbiao/ChuanhuChatGPT/issues/49)
299
+ >
300
+ > 跑起来之后,输入问题好像就没反应了,也没报错 [#25](https://github.com/GaiZhenbiao/ChuanhuChatGPT/issues/25)
301
+ >
302
+ > ```
303
+ > requests.exceptions.SSLError: HTTPSConnectionPool(host='api.openai.com', port=443): Max retries exceeded with url: /v1/chat/completions (Caused by SSLError(SSLEOFError(8, 'EOF occurred in violation of protocol (_ssl.c:1129)')))
304
+ > ```
305
+
306
+ 请将`openai.com`加入你使用的代理App的代理规则。注意不要将`127.0.0.1`加入代理,否则会有下一个错误。
307
+
308
+ 例如,在Clash配置文件中,加入:
309
+
310
+ ```
311
+ rule-providers:
312
+ private:
313
+ type: http
314
+ behavior: domain
315
+ url: "https://cdn.jsdelivr.net/gh/Loyalsoldier/clash-rules@release/lancidr.txt"
316
+ path: ./ruleset/ads.yaml
317
+ interval: 86400
318
+
319
+ rules:
320
+ - RULE-SET,private,DIRECT
321
+ - DOMAIN-SUFFIX,openai.com,你的代理规则
322
+ ```
323
+
324
+ Surge:
325
+
326
+ ```
327
+ [Rule]
328
+ DOMAIN-SET,https://cdn.jsdelivr.net/gh/Loyalsoldier/surge-rules@release/private.txt,DIRECT
329
+ DOMAIN-SUFFIX,openai.com,你的代理规则
330
+ ```
331
+ </details>
332
+
333
+ <details><summary><h3>网页提示错误 Something went wrong</h3></summary>
334
+
335
+ > ```
336
+ > Something went wrong
337
+ > Expecting value: 1ine 1 column 1 (char o)
338
+ > ```
339
+
340
+ 出现这个错误的原因是`127.0.0.1`被代理了,导致网页无法和后端通信。请设置代理软件,将`127.0.0.1`加入直连。
341
+ </details>
342
+
343
+ <details><summary><h3><code>No matching distribution found for openai>=0.27.0</code></h3></summary>
344
+
345
+ `openai`这个依赖已经被移除了。请尝试下载最新版脚本。
346
+ </details>
347
+
348
+ ## Starchart
349
+
350
+ [![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date)
351
+
352
+ ## Contributors
353
+
354
+ <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/graphs/contributors">
355
+ <img src="https://contrib.rocks/image?repo=GaiZhenbiao/ChuanhuChatGPT" />
356
+ </a>
357
+
358
+ ## 捐款
359
+
360
+ 🐯请作者喝可乐~
361
+
362
+ <img width="350" alt="image" src="https://user-images.githubusercontent.com/51039745/223626874-f471e5f5-8a06-43d5-aa31-9d2575b6f631.JPG">
presets.py CHANGED
@@ -1,13 +1,39 @@
1
- title = """<h1 align="center"></h1>"""
2
- description = """<div align=center>
 
3
 
4
- 此网页基于 `gpt-3.5-turbo` 大型自然语言处理模型
5
-
6
- Chatbot Mar 12 Version.
7
 
8
  </div>
9
  """
10
  customCSS = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  code {
12
  display: inline;
13
  white-space: break-spaces;
@@ -27,14 +53,35 @@ pre code {
27
  color: #FFF;
28
  box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
29
  }
 
 
 
 
30
  """
31
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
33
- error_retrieve_prompt = "连接超时,无法获取对话。请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
34
- summarize_prompt = "请总结以上对话,不超过100字。" # 总结对话时的 prompt
35
- max_token_streaming = 4096 # 流式对话时的最大 token 数
36
- timeout_streaming = 5 # 流式对话时的超时时间
37
- max_token_all = 4096 # 非流式对话时的最大 token 数
 
 
 
 
 
38
  timeout_all = 200 # 非流式对话时的超时时间
39
- enable_streaming_option = False # 是否启用选择选择是否实时显示回答的勾选框
40
  HIDE_MY_KEY = True # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
 
1
+ # -*- coding:utf-8 -*-
2
+ title = """<h1 align="left" style="min-width:200px; margin-top:0;"></h1>"""
3
+ description = """<div align="center" style="margin:16px 0">
4
 
5
+ Chatbot Mar 17 Version.
 
 
6
 
7
  </div>
8
  """
9
  customCSS = """
10
+ #status_display {
11
+ display: flex;
12
+ min-height: 2.5em;
13
+ align-items: flex-end;
14
+ justify-content: flex-end;
15
+ }
16
+ #status_display p {
17
+ font-size: .85em;
18
+ font-family: monospace;
19
+ color: var(--text-color-subdued) !important;
20
+ }
21
+ [class *= "message"] {
22
+ border-radius: var(--radius-xl) !important;
23
+ border: none;
24
+ padding: var(--spacing-xl) !important;
25
+ font-size: var(--text-md) !important;
26
+ line-height: var(--line-md) !important;
27
+ }
28
+ [data-testid = "bot"] {
29
+ max-width: 85%;
30
+ border-bottom-left-radius: 0 !important;
31
+ }
32
+ [data-testid = "user"] {
33
+ max-width: 85%;
34
+ width: auto !important;
35
+ border-bottom-right-radius: 0 !important;
36
+ }
37
  code {
38
  display: inline;
39
  white-space: break-spaces;
 
53
  color: #FFF;
54
  box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
55
  }
56
+
57
+ * {
58
+ transition: all 0.6s;
59
+ }
60
  """
61
 
62
+ summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
63
+ MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-0301", "gpt-4","gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314"] # 可选的模型
64
+ websearch_prompt = """Web search results:
65
+
66
+ {web_results}
67
+ Current date: {current_date}
68
+
69
+ Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
70
+ Query: {query}
71
+ Reply in 中文"""
72
+
73
+ # 错误信息
74
  standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
75
+ error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
76
+ connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时
77
+ read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时
78
+ proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误
79
+ ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误
80
+ no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位
81
+
82
+ max_token_streaming = 3500 # 流式对话时的最大 token 数
83
+ timeout_streaming = 15 # 流式对话时的超时时间
84
+ max_token_all = 3500 # 非流式对话时的最大 token 数
85
  timeout_all = 200 # 非流式对话时的超时时间
86
+ enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框
87
  HIDE_MY_KEY = True # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
requirements.txt CHANGED
@@ -1,3 +1,8 @@
1
  gradio
2
  mdtex2html
3
  pypinyin
 
 
 
 
 
 
1
  gradio
2
  mdtex2html
3
  pypinyin
4
+ tiktoken
5
+ socksio
6
+ tqdm
7
+ colorama
8
+ duckduckgo_search
utils.py CHANGED
@@ -1,9 +1,7 @@
1
- """Contains all of the components that can be used with Gradio Interface / Blocks.
2
- Along with the docs for each component, you can find the names of example demos that use
3
- each component. These demos are located in the `demo` directory."""
4
-
5
  from __future__ import annotations
6
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
 
7
  import json
8
  import gradio as gr
9
  # import openai
@@ -15,6 +13,13 @@ import csv
15
  import mdtex2html
16
  from pypinyin import lazy_pinyin
17
  from presets import *
 
 
 
 
 
 
 
18
 
19
  if TYPE_CHECKING:
20
  from typing import TypedDict
@@ -23,7 +28,7 @@ if TYPE_CHECKING:
23
  headers: List[str]
24
  data: List[List[str | int | bool]]
25
 
26
- initial_prompt = "你是 ChatGPT,由 OpenAI 训练的大型语言模型。你的数据库截止日期为:2021年9。"
27
  API_URL = "https://api.openai.com/v1/chat/completions"
28
  HISTORY_DIR = "history"
29
  TEMPLATES_DIR = "templates"
@@ -48,6 +53,11 @@ def postprocess(
48
  )
49
  return y
50
 
 
 
 
 
 
51
  def parse_text(text):
52
  lines = text.split("\n")
53
  lines = [line for line in lines if line != ""]
@@ -92,10 +102,9 @@ def construct_assistant(text):
92
  return construct_text("assistant", text)
93
 
94
  def construct_token_message(token, stream=False):
95
- extra = "【仅包含回答的计数】 " if stream else ""
96
- return f"{extra}Token 计数: {token}"
97
 
98
- def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream):
99
  headers = {
100
  "Content-Type": "application/json",
101
  "Authorization": f"Bearer {openai_api_key}"
@@ -104,7 +113,7 @@ def get_response(openai_api_key, system_prompt, history, temperature, top_p, str
104
  history = [construct_system(system_prompt), *history]
105
 
106
  payload = {
107
- "model": "gpt-3.5-turbo",
108
  "messages": history, # [{"role": "user", "content": f"{inputs}"}],
109
  "temperature": temperature, # 1.0,
110
  "top_p": top_p, # 1.0,
@@ -120,25 +129,41 @@ def get_response(openai_api_key, system_prompt, history, temperature, top_p, str
120
  response = requests.post(API_URL, headers=headers, json=payload, stream=True, timeout=timeout)
121
  return response
122
 
123
- def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
124
  def get_return_value():
125
- return chatbot, history, status_text, [*previous_token_count, token_counter]
126
- token_counter = 0
 
127
  partial_words = ""
128
  counter = 0
129
- status_text = "OK"
130
  history.append(construct_user(inputs))
 
 
 
 
 
 
 
 
 
 
 
131
  try:
132
- response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True)
133
  except requests.exceptions.ConnectTimeout:
134
- status_text = standard_error_msg + error_retrieve_prompt
 
 
 
 
135
  yield get_return_value()
136
  return
137
 
138
- chatbot.append((parse_text(inputs), ""))
139
  yield get_return_value()
 
140
 
141
- for chunk in response.iter_lines():
142
  if counter == 0:
143
  counter += 1
144
  continue
@@ -147,130 +172,200 @@ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, prev
147
  if chunk:
148
  chunk = chunk.decode()
149
  chunklength = len(chunk)
150
- chunk = json.loads(chunk[6:])
 
 
 
 
 
 
 
151
  # decode each line as response data is in bytes
152
  if chunklength > 6 and "delta" in chunk['choices'][0]:
153
  finish_reason = chunk['choices'][0]['finish_reason']
154
- status_text = construct_token_message(sum(previous_token_count)+token_counter, stream=True)
155
  if finish_reason == "stop":
156
  yield get_return_value()
157
  break
158
- partial_words = partial_words + chunk['choices'][0]["delta"]["content"]
159
- if token_counter == 0:
160
- history.append(construct_assistant(" " + partial_words))
161
- else:
162
- history[-1] = construct_assistant(partial_words)
 
 
163
  chatbot[-1] = (parse_text(inputs), parse_text(partial_words))
164
- token_counter += 1
165
  yield get_return_value()
166
 
167
 
168
- def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, previous_token_count, top_p, temperature):
 
169
  history.append(construct_user(inputs))
 
 
 
170
  try:
171
- response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False)
172
  except requests.exceptions.ConnectTimeout:
173
- status_text = standard_error_msg + error_retrieve_prompt
174
- return chatbot, history, status_text, previous_token_count
 
 
 
 
 
 
175
  response = json.loads(response.text)
176
  content = response["choices"][0]["message"]["content"]
177
- history.append(construct_assistant(content))
178
- chatbot.append((parse_text(inputs), parse_text(content)))
179
  total_token_count = response["usage"]["total_tokens"]
180
- previous_token_count.append(total_token_count - sum(previous_token_count))
181
  status_text = construct_token_message(total_token_count)
182
- return chatbot, history, status_text, previous_token_count
183
-
184
-
185
- def predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=False, should_check_token_count = True): # repetition_penalty, top_k
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  if stream:
187
- iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
188
- for chatbot, history, status_text, token_count in iter:
189
- yield chatbot, history, status_text, token_count
 
190
  else:
191
- chatbot, history, status_text, token_count = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature)
192
- yield chatbot, history, status_text, token_count
 
 
 
 
193
  if stream:
194
  max_token = max_token_streaming
195
  else:
196
  max_token = max_token_all
197
- if sum(token_count) > max_token and should_check_token_count:
198
- iter = reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=True)
199
- for chatbot, history, status_text, token_count in iter:
200
- status_text = f"Token已达到上限,自动降低Token计数至 {status_text}"
201
- yield chatbot, history, status_text, token_count
202
-
203
-
204
- def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False):
 
 
 
 
205
  if len(history) == 0:
206
  yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
207
  return
208
  history.pop()
209
  inputs = history.pop()["content"]
210
  token_count.pop()
211
- iter = predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream)
 
212
  for x in iter:
213
  yield x
214
 
215
 
216
- def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, hidden=False):
217
- iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, should_check_token_count=False)
 
 
218
  for chatbot, history, status_text, previous_token_count in iter:
219
  history = history[-2:]
220
  token_count = previous_token_count[-1:]
221
  if hidden:
222
  chatbot.pop()
223
  yield chatbot, history, construct_token_message(sum(token_count), stream=stream), token_count
 
224
 
225
 
226
- def delete_last_conversation(chatbot, history, previous_token_count, streaming):
227
  if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
 
228
  chatbot.pop()
229
  return chatbot, history
230
  if len(history) > 0:
 
231
  history.pop()
232
  history.pop()
233
  if len(chatbot) > 0:
 
234
  chatbot.pop()
235
  if len(previous_token_count) > 0:
 
236
  previous_token_count.pop()
237
- return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count), streaming)
238
 
239
 
240
  def save_chat_history(filename, system, history, chatbot):
 
241
  if filename == "":
242
  return
243
  if not filename.endswith(".json"):
244
  filename += ".json"
245
  os.makedirs(HISTORY_DIR, exist_ok=True)
246
  json_s = {"system": system, "history": history, "chatbot": chatbot}
247
- print(json_s)
248
  with open(os.path.join(HISTORY_DIR, filename), "w") as f:
249
- json.dump(json_s, f)
 
250
 
251
 
252
  def load_chat_history(filename, system, history, chatbot):
 
253
  try:
254
  with open(os.path.join(HISTORY_DIR, filename), "r") as f:
255
  json_s = json.load(f)
256
- if type(json_s["history"]) == list:
257
- new_history = []
258
- for index, item in enumerate(json_s["history"]):
259
- if index % 2 == 0:
260
- new_history.append(construct_user(item))
261
- else:
262
- new_history.append(construct_assistant(item))
263
- json_s["history"] = new_history
 
 
 
 
 
 
 
264
  return filename, json_s["system"], json_s["history"], json_s["chatbot"]
265
  except FileNotFoundError:
266
- print("File not found.")
267
  return filename, system, history, chatbot
268
 
269
  def sorted_by_pinyin(list):
270
  return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
271
 
272
  def get_file_names(dir, plain=False, filetypes=[".json"]):
273
- # find all json files in the current directory and return their names
274
  files = []
275
  try:
276
  for type in filetypes:
@@ -286,11 +381,13 @@ def get_file_names(dir, plain=False, filetypes=[".json"]):
286
  return gr.Dropdown.update(choices=files)
287
 
288
  def get_history_names(plain=False):
 
289
  return get_file_names(HISTORY_DIR, plain)
290
 
291
  def load_template(filename, mode=0):
 
292
  lines = []
293
- print("Loading template...")
294
  if filename.endswith(".json"):
295
  with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
296
  lines = json.load(f)
@@ -309,24 +406,19 @@ def load_template(filename, mode=0):
309
  return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
310
 
311
  def get_template_names(plain=False):
 
312
  return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
313
 
314
  def get_template_content(templates, selection, original_system_prompt):
 
315
  try:
316
  return templates[selection]
317
  except:
318
  return original_system_prompt
319
 
320
  def reset_state():
 
321
  return [], [], [], construct_token_message(0)
322
 
323
- def compose_system(system_prompt):
324
- return {"role": "system", "content": system_prompt}
325
-
326
-
327
- def compose_user(user_input):
328
- return {"role": "user", "content": user_input}
329
-
330
-
331
  def reset_textbox():
332
  return gr.update(value='')
 
1
+ # -*- coding:utf-8 -*-
 
 
 
2
  from __future__ import annotations
3
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
4
+ import logging
5
  import json
6
  import gradio as gr
7
  # import openai
 
13
  import mdtex2html
14
  from pypinyin import lazy_pinyin
15
  from presets import *
16
+ import tiktoken
17
+ from tqdm import tqdm
18
+ import colorama
19
+ from duckduckgo_search import ddg
20
+ import datetime
21
+
22
+ # logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
23
 
24
  if TYPE_CHECKING:
25
  from typing import TypedDict
 
28
  headers: List[str]
29
  data: List[List[str | int | bool]]
30
 
31
+ initial_prompt = "You are a helpful assistant."
32
  API_URL = "https://api.openai.com/v1/chat/completions"
33
  HISTORY_DIR = "history"
34
  TEMPLATES_DIR = "templates"
 
53
  )
54
  return y
55
 
56
+ def count_token(input_str):
57
+ encoding = tiktoken.get_encoding("cl100k_base")
58
+ length = len(encoding.encode(input_str))
59
+ return length
60
+
61
  def parse_text(text):
62
  lines = text.split("\n")
63
  lines = [line for line in lines if line != ""]
 
102
  return construct_text("assistant", text)
103
 
104
  def construct_token_message(token, stream=False):
105
+ return f"Token 计数: {token}"
 
106
 
107
+ def get_response(openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model):
108
  headers = {
109
  "Content-Type": "application/json",
110
  "Authorization": f"Bearer {openai_api_key}"
 
113
  history = [construct_system(system_prompt), *history]
114
 
115
  payload = {
116
+ "model": selected_model,
117
  "messages": history, # [{"role": "user", "content": f"{inputs}"}],
118
  "temperature": temperature, # 1.0,
119
  "top_p": top_p, # 1.0,
 
129
  response = requests.post(API_URL, headers=headers, json=payload, stream=True, timeout=timeout)
130
  return response
131
 
132
+ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model):
133
  def get_return_value():
134
+ return chatbot, history, status_text, all_token_counts
135
+
136
+ logging.info("实时回答模式")
137
  partial_words = ""
138
  counter = 0
139
+ status_text = "开始实时传输回答……"
140
  history.append(construct_user(inputs))
141
+ history.append(construct_assistant(""))
142
+ chatbot.append((parse_text(inputs), ""))
143
+ user_token_count = 0
144
+ if len(all_token_counts) == 0:
145
+ system_prompt_token_count = count_token(system_prompt)
146
+ user_token_count = count_token(inputs) + system_prompt_token_count
147
+ else:
148
+ user_token_count = count_token(inputs)
149
+ all_token_counts.append(user_token_count)
150
+ logging.info(f"输入token计数: {user_token_count}")
151
+ yield get_return_value()
152
  try:
153
+ response = get_response(openai_api_key, system_prompt, history, temperature, top_p, True, selected_model)
154
  except requests.exceptions.ConnectTimeout:
155
+ status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
156
+ yield get_return_value()
157
+ return
158
+ except requests.exceptions.ReadTimeout:
159
+ status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt
160
  yield get_return_value()
161
  return
162
 
 
163
  yield get_return_value()
164
+ error_json_str = ""
165
 
166
+ for chunk in tqdm(response.iter_lines()):
167
  if counter == 0:
168
  counter += 1
169
  continue
 
172
  if chunk:
173
  chunk = chunk.decode()
174
  chunklength = len(chunk)
175
+ try:
176
+ chunk = json.loads(chunk[6:])
177
+ except json.JSONDecodeError:
178
+ logging.info(chunk)
179
+ error_json_str += chunk
180
+ status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}"
181
+ yield get_return_value()
182
+ continue
183
  # decode each line as response data is in bytes
184
  if chunklength > 6 and "delta" in chunk['choices'][0]:
185
  finish_reason = chunk['choices'][0]['finish_reason']
186
+ status_text = construct_token_message(sum(all_token_counts), stream=True)
187
  if finish_reason == "stop":
188
  yield get_return_value()
189
  break
190
+ try:
191
+ partial_words = partial_words + chunk['choices'][0]["delta"]["content"]
192
+ except KeyError:
193
+ status_text = standard_error_msg + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " + str(sum(all_token_counts))
194
+ yield get_return_value()
195
+ break
196
+ history[-1] = construct_assistant(partial_words)
197
  chatbot[-1] = (parse_text(inputs), parse_text(partial_words))
198
+ all_token_counts[-1] += 1
199
  yield get_return_value()
200
 
201
 
202
+ def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model):
203
+ logging.info("一次性回答模式")
204
  history.append(construct_user(inputs))
205
+ history.append(construct_assistant(""))
206
+ chatbot.append((parse_text(inputs), ""))
207
+ all_token_counts.append(count_token(inputs))
208
  try:
209
+ response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False, selected_model)
210
  except requests.exceptions.ConnectTimeout:
211
+ status_text = standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
212
+ return chatbot, history, status_text, all_token_counts
213
+ except requests.exceptions.ProxyError:
214
+ status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt
215
+ return chatbot, history, status_text, all_token_counts
216
+ except requests.exceptions.SSLError:
217
+ status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt
218
+ return chatbot, history, status_text, all_token_counts
219
  response = json.loads(response.text)
220
  content = response["choices"][0]["message"]["content"]
221
+ history[-1] = construct_assistant(content)
222
+ chatbot[-1] = (parse_text(inputs), parse_text(content))
223
  total_token_count = response["usage"]["total_tokens"]
224
+ all_token_counts[-1] = total_token_count - sum(all_token_counts)
225
  status_text = construct_token_message(total_token_count)
226
+ return chatbot, history, status_text, all_token_counts
227
+
228
+
229
+ def predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, stream=False, selected_model = MODELS[0], use_websearch_checkbox = False, should_check_token_count = True): # repetition_penalty, top_k
230
+ logging.info("输入为:" +colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
231
+ if use_websearch_checkbox:
232
+ results = ddg(inputs, max_results=3)
233
+ web_results = []
234
+ for idx, result in enumerate(results):
235
+ logging.info(f"搜索结果{idx + 1}:{result}")
236
+ web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}')
237
+ web_results = "\n\n".join(web_results)
238
+ today = datetime.datetime.today().strftime("%Y-%m-%d")
239
+ inputs = websearch_prompt.replace("{current_date}", today).replace("{query}", inputs).replace("{web_results}", web_results)
240
+ if len(openai_api_key) != 51:
241
+ status_text = standard_error_msg + no_apikey_msg
242
+ logging.info(status_text)
243
+ chatbot.append((parse_text(inputs), ""))
244
+ if len(history) == 0:
245
+ history.append(construct_user(inputs))
246
+ history.append("")
247
+ all_token_counts.append(0)
248
+ else:
249
+ history[-2] = construct_user(inputs)
250
+ yield chatbot, history, status_text, all_token_counts
251
+ return
252
+ if stream:
253
+ yield chatbot, history, "开始生成回答……", all_token_counts
254
  if stream:
255
+ logging.info("使用流式传输")
256
+ iter = stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model)
257
+ for chatbot, history, status_text, all_token_counts in iter:
258
+ yield chatbot, history, status_text, all_token_counts
259
  else:
260
+ logging.info("不使用流式传输")
261
+ chatbot, history, status_text, all_token_counts = predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_token_counts, top_p, temperature, selected_model)
262
+ yield chatbot, history, status_text, all_token_counts
263
+ logging.info(f"传输完毕。当前token计数为{all_token_counts}")
264
+ if len(history) > 1 and history[-1]['content'] != inputs:
265
+ logging.info("回答为:" +colorama.Fore.BLUE + f"{history[-1]['content']}" + colorama.Style.RESET_ALL)
266
  if stream:
267
  max_token = max_token_streaming
268
  else:
269
  max_token = max_token_all
270
+ if sum(all_token_counts) > max_token and should_check_token_count:
271
+ status_text = f"精简token中{all_token_counts}/{max_token}"
272
+ logging.info(status_text)
273
+ yield chatbot, history, status_text, all_token_counts
274
+ iter = reduce_token_size(openai_api_key, system_prompt, history, chatbot, all_token_counts, top_p, temperature, stream=False, selected_model=selected_model, hidden=True)
275
+ for chatbot, history, status_text, all_token_counts in iter:
276
+ status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
277
+ yield chatbot, history, status_text, all_token_counts
278
+
279
+
280
+ def retry(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, selected_model = MODELS[0]):
281
+ logging.info("重试中……")
282
  if len(history) == 0:
283
  yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
284
  return
285
  history.pop()
286
  inputs = history.pop()["content"]
287
  token_count.pop()
288
+ iter = predict(openai_api_key, system_prompt, history, inputs, chatbot, token_count, top_p, temperature, stream=stream, selected_model=selected_model)
289
+ logging.info("重试完毕")
290
  for x in iter:
291
  yield x
292
 
293
 
294
+ def reduce_token_size(openai_api_key, system_prompt, history, chatbot, token_count, top_p, temperature, stream=False, selected_model = MODELS[0], hidden=False):
295
+ logging.info("开始减少token数量……")
296
+ iter = predict(openai_api_key, system_prompt, history, summarize_prompt, chatbot, token_count, top_p, temperature, stream=stream, selected_model = selected_model, should_check_token_count=False)
297
+ logging.info(f"chatbot: {chatbot}")
298
  for chatbot, history, status_text, previous_token_count in iter:
299
  history = history[-2:]
300
  token_count = previous_token_count[-1:]
301
  if hidden:
302
  chatbot.pop()
303
  yield chatbot, history, construct_token_message(sum(token_count), stream=stream), token_count
304
+ logging.info("减少token数量完毕")
305
 
306
 
307
+ def delete_last_conversation(chatbot, history, previous_token_count):
308
  if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
309
+ logging.info("由于包含报错信息,只删除chatbot记录")
310
  chatbot.pop()
311
  return chatbot, history
312
  if len(history) > 0:
313
+ logging.info("删除了一组对话历史")
314
  history.pop()
315
  history.pop()
316
  if len(chatbot) > 0:
317
+ logging.info("删除了一组chatbot对话")
318
  chatbot.pop()
319
  if len(previous_token_count) > 0:
320
+ logging.info("删除了一组对话的token计数记录")
321
  previous_token_count.pop()
322
+ return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count))
323
 
324
 
325
  def save_chat_history(filename, system, history, chatbot):
326
+ logging.info("保存对话历史中……")
327
  if filename == "":
328
  return
329
  if not filename.endswith(".json"):
330
  filename += ".json"
331
  os.makedirs(HISTORY_DIR, exist_ok=True)
332
  json_s = {"system": system, "history": history, "chatbot": chatbot}
333
+ logging.info(json_s)
334
  with open(os.path.join(HISTORY_DIR, filename), "w") as f:
335
+ json.dump(json_s, f, ensure_ascii=False, indent=4)
336
+ logging.info("保存对话历史完毕")
337
 
338
 
339
  def load_chat_history(filename, system, history, chatbot):
340
+ logging.info("加载对话历史中……")
341
  try:
342
  with open(os.path.join(HISTORY_DIR, filename), "r") as f:
343
  json_s = json.load(f)
344
+ try:
345
+ if type(json_s["history"][0]) == str:
346
+ logging.info("历史记录格式为旧版,正在转换……")
347
+ new_history = []
348
+ for index, item in enumerate(json_s["history"]):
349
+ if index % 2 == 0:
350
+ new_history.append(construct_user(item))
351
+ else:
352
+ new_history.append(construct_assistant(item))
353
+ json_s["history"] = new_history
354
+ logging.info(new_history)
355
+ except:
356
+ # 没有对话历史
357
+ pass
358
+ logging.info("加载对话历史完毕")
359
  return filename, json_s["system"], json_s["history"], json_s["chatbot"]
360
  except FileNotFoundError:
361
+ logging.info("没有找到对话历史文件,不执行任何操作")
362
  return filename, system, history, chatbot
363
 
364
  def sorted_by_pinyin(list):
365
  return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
366
 
367
  def get_file_names(dir, plain=False, filetypes=[".json"]):
368
+ logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
369
  files = []
370
  try:
371
  for type in filetypes:
 
381
  return gr.Dropdown.update(choices=files)
382
 
383
  def get_history_names(plain=False):
384
+ logging.info("获取历史记录文件名列表")
385
  return get_file_names(HISTORY_DIR, plain)
386
 
387
  def load_template(filename, mode=0):
388
+ logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
389
  lines = []
390
+ logging.info("Loading template...")
391
  if filename.endswith(".json"):
392
  with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
393
  lines = json.load(f)
 
406
  return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
407
 
408
  def get_template_names(plain=False):
409
+ logging.info("获取模板文件名列表")
410
  return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
411
 
412
  def get_template_content(templates, selection, original_system_prompt):
413
+ logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
414
  try:
415
  return templates[selection]
416
  except:
417
  return original_system_prompt
418
 
419
  def reset_state():
420
+ logging.info("重置状态")
421
  return [], [], [], construct_token_message(0)
422
 
 
 
 
 
 
 
 
 
423
  def reset_textbox():
424
  return gr.update(value='')