vansin commited on
Commit
634fff6
1 Parent(s): edb939d

feat: update

Browse files
Files changed (2) hide show
  1. mindsearch_gradio.py +146 -0
  2. requirements.txt +11 -11
mindsearch_gradio.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import gradio as gr
4
+ import requests
5
+ from lagent.schema import AgentStatusCode
6
+
7
+
8
+ os.system("python -m mindsearch.app --lang en --model_format internlm_server &")
9
+
10
+
11
+ PLANNER_HISTORY = []
12
+ SEARCHER_HISTORY = []
13
+
14
+
15
+ def rst_mem(history_planner: list, history_searcher: list):
16
+ '''
17
+ Reset the chatbot memory.
18
+ '''
19
+ history_planner = []
20
+ history_searcher = []
21
+ if PLANNER_HISTORY:
22
+ PLANNER_HISTORY.clear()
23
+ return history_planner, history_searcher
24
+
25
+
26
+ def format_response(gr_history, agent_return):
27
+ if agent_return['state'] in [
28
+ AgentStatusCode.STREAM_ING, AgentStatusCode.ANSWER_ING
29
+ ]:
30
+ gr_history[-1][1] = agent_return['response']
31
+ elif agent_return['state'] == AgentStatusCode.PLUGIN_START:
32
+ thought = gr_history[-1][1].split('```')[0]
33
+ if agent_return['response'].startswith('```'):
34
+ gr_history[-1][1] = thought + '\n' + agent_return['response']
35
+ elif agent_return['state'] == AgentStatusCode.PLUGIN_END:
36
+ thought = gr_history[-1][1].split('```')[0]
37
+ if isinstance(agent_return['response'], dict):
38
+ gr_history[-1][
39
+ 1] = thought + '\n' + f'```json\n{json.dumps(agent_return["response"], ensure_ascii=False, indent=4)}\n```' # noqa: E501
40
+ elif agent_return['state'] == AgentStatusCode.PLUGIN_RETURN:
41
+ assert agent_return['inner_steps'][-1]['role'] == 'environment'
42
+ item = agent_return['inner_steps'][-1]
43
+ gr_history.append([
44
+ None,
45
+ f"```json\n{json.dumps(item['content'], ensure_ascii=False, indent=4)}\n```"
46
+ ])
47
+ gr_history.append([None, ''])
48
+ return
49
+
50
+
51
+ def predict(history_planner, history_searcher):
52
+
53
+ def streaming(raw_response):
54
+ for chunk in raw_response.iter_lines(chunk_size=8192,
55
+ decode_unicode=False,
56
+ delimiter=b'\n'):
57
+ if chunk:
58
+ decoded = chunk.decode('utf-8')
59
+ if decoded == '\r':
60
+ continue
61
+ if decoded[:6] == 'data: ':
62
+ decoded = decoded[6:]
63
+ elif decoded.startswith(': ping - '):
64
+ continue
65
+ response = json.loads(decoded)
66
+ yield (response['response'], response['current_node'])
67
+
68
+ global PLANNER_HISTORY
69
+ PLANNER_HISTORY.append(dict(role='user', content=history_planner[-1][0]))
70
+ new_search_turn = True
71
+
72
+ url = 'http://localhost:8002/solve'
73
+ headers = {'Content-Type': 'application/json'}
74
+ data = {'inputs': PLANNER_HISTORY}
75
+ raw_response = requests.post(url,
76
+ headers=headers,
77
+ data=json.dumps(data),
78
+ timeout=20,
79
+ stream=True)
80
+
81
+ for resp in streaming(raw_response):
82
+ agent_return, node_name = resp
83
+ if node_name:
84
+ if node_name in ['root', 'response']:
85
+ continue
86
+ agent_return = agent_return['nodes'][node_name]['detail']
87
+ if new_search_turn:
88
+ history_searcher.append([agent_return['content'], ''])
89
+ new_search_turn = False
90
+ format_response(history_searcher, agent_return)
91
+ if agent_return['state'] == AgentStatusCode.END:
92
+ new_search_turn = True
93
+ yield history_planner, history_searcher
94
+ else:
95
+ new_search_turn = True
96
+ format_response(history_planner, agent_return)
97
+ if agent_return['state'] == AgentStatusCode.END:
98
+ PLANNER_HISTORY = agent_return['inner_steps']
99
+ yield history_planner, history_searcher
100
+ return history_planner, history_searcher
101
+
102
+
103
+ with gr.Blocks() as demo:
104
+ gr.HTML("""<h1 align="center">WebAgent Gradio Simple Demo</h1>""")
105
+ with gr.Row():
106
+ with gr.Column(scale=10):
107
+ with gr.Row():
108
+ with gr.Column():
109
+ planner = gr.Chatbot(label='planner',
110
+ height=700,
111
+ show_label=True,
112
+ show_copy_button=True,
113
+ bubble_full_width=False,
114
+ render_markdown=True)
115
+ with gr.Column():
116
+ searcher = gr.Chatbot(label='searcher',
117
+ height=700,
118
+ show_label=True,
119
+ show_copy_button=True,
120
+ bubble_full_width=False,
121
+ render_markdown=True)
122
+ with gr.Row():
123
+ user_input = gr.Textbox(show_label=False,
124
+ placeholder='inputs...',
125
+ lines=5,
126
+ container=False)
127
+ with gr.Row():
128
+ with gr.Column(scale=2):
129
+ submitBtn = gr.Button('Submit')
130
+ with gr.Column(scale=1, min_width=20):
131
+ emptyBtn = gr.Button('Clear History')
132
+
133
+ def user(query, history):
134
+ return '', history + [[query, '']]
135
+
136
+ submitBtn.click(user, [user_input, planner], [user_input, planner],
137
+ queue=False).then(predict, [planner, searcher],
138
+ [planner, searcher])
139
+ emptyBtn.click(rst_mem, [planner, searcher], [planner, searcher],
140
+ queue=False)
141
+
142
+ demo.queue()
143
+ demo.launch(server_name='0.0.0.0',
144
+ server_port=7860,
145
+ inbrowser=True,
146
+ share=True)
requirements.txt CHANGED
@@ -1,12 +1,12 @@
1
  flask
2
- # duckduckgo_search==5.3.1b1
3
- # einops
4
- # fastapi
5
- # git+https://github.com/InternLM/lagent.git
6
- # gradio
7
- # janus
8
- # lmdeploy==0.2.3
9
- # pyvis
10
- # sse-starlette
11
- # termcolor
12
- # uvicorn
 
1
  flask
2
+ duckduckgo_search==5.3.1b1
3
+ einops
4
+ fastapi
5
+ git+https://github.com/InternLM/lagent.git
6
+ gradio
7
+ janus
8
+ lmdeploy
9
+ pyvis
10
+ sse-starlette
11
+ termcolor
12
+ uvicorn