Added history and Better UI

#6
Files changed (2) hide show
  1. app.py +64 -124
  2. requirements.txt +1 -1
app.py CHANGED
@@ -11,128 +11,113 @@ MODELS = [
11
  "Meta-Llama-3.1-8B-Instruct"
12
  ]
13
 
 
 
 
14
  def create_client(api_key=None):
 
15
  if api_key:
16
  openai.api_key = api_key
17
- openai.api_base = "https://api.sambanova.ai/v1" # Fixed Base URL
18
  else:
19
  openai.api_key = os.getenv("API_KEY")
20
- openai.api_base = os.getenv("URL")
21
-
22
- def chat_with_ai(message, chat_history, system_prompt):
23
- messages = [
24
- {"role": "system", "content": system_prompt},
25
- ]
26
 
27
- for human, ai in chat_history:
28
- messages.append({"role": "user", "content": human})
29
- messages.append({"role": "assistant", "content": ai})
30
 
 
 
 
 
 
 
 
 
 
 
31
  messages.append({"role": "user", "content": message})
32
-
33
  return messages
34
 
35
  def respond(message, chat_history, model, system_prompt, thinking_budget, api_key):
36
- print("Starting respond function...")
37
- create_client(api_key) # Sets api_key and api_base globally
38
  messages = chat_with_ai(message, chat_history, system_prompt.format(budget=thinking_budget))
39
  start_time = time.time()
40
 
41
  try:
42
- print("Calling OpenAI API...")
43
- completion = openai.ChatCompletion.create(
44
- model=model,
45
- messages=messages,
46
- stream=False # Set to False for synchronous response
47
- )
48
- response = completion.choices[0].message['content']
49
  thinking_time = time.time() - start_time
50
- print("Response received from OpenAI API.")
51
  return response, thinking_time
52
  except Exception as e:
53
  error_message = f"Error: {str(e)}"
54
- print(error_message)
55
  return error_message, time.time() - start_time
56
 
57
  def parse_response(response):
 
58
  answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
59
  reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)
60
 
61
  answer = answer_match.group(1).strip() if answer_match else ""
62
  reflection = reflection_match.group(1).strip() if reflection_match else ""
63
-
64
  steps = re.findall(r'<step>(.*?)</step>', response, re.DOTALL)
65
- if answer is not "":
66
- return answer, reflection, steps
67
- else:
68
- return response, "", ""
69
 
70
- def process_chat(message, history, model, system_prompt, thinking_budget, api_key):
71
- print(f"Received message: {message}")
72
- if not api_key:
73
- print("API key missing")
74
- return history + [("System", "Please provide your API Key before starting the chat.")]
75
 
76
- try:
77
- formatted_system_prompt = system_prompt.format(budget=thinking_budget)
78
- except KeyError as e:
79
- error_msg = f"System prompt missing placeholder: {str(e)}"
80
- print(error_msg)
81
- return history + [("System", error_msg)]
82
-
83
- response, thinking_time = respond(message, history, model, formatted_system_prompt, thinking_budget, api_key)
84
 
85
  if response.startswith("Error:"):
86
- return history + [("System", response)]
87
 
88
  answer, reflection, steps = parse_response(response)
89
 
90
- formatted_response = f"**Answer:** {answer}\n\n**Reflection:** {reflection}\n\n**Thinking Steps:**\n"
91
- for i, step in enumerate(steps, 1):
92
- formatted_response += f"**Step {i}:** {step}\n"
 
 
93
 
94
- formatted_response += f"\n**Thinking time:** {thinking_time:.2f} s"
 
95
 
96
- print(f"Appended response: {formatted_response}")
97
- return history + [(message, formatted_response)]
98
 
99
  # Define the default system prompt
100
- default_system_prompt = """
101
  You are a helpful assistant in normal conversation.
102
- When given a problem to solve, you are an expert problem-solving assistant. Your task is to provide a detailed, step-by-step solution to a given question. Follow these instructions carefully:
103
-
 
104
  1. Read the given question carefully and reset counter between <count> and </count> to {budget}
105
  2. Generate a detailed, logical step-by-step solution.
106
  3. Enclose each step of your solution within <step> and </step> tags.
107
- 4. You are allowed to use at most {budget} steps (starting budget), keep track of it by counting down within tags <count> </count>, STOP GENERATING MORE STEPS when hitting 0, you don't have to use all of them.
108
- 5. Do a self-reflection when you are unsure about how to proceed, based on the self-reflection and reward, decides whether you need to return to the previous steps.
109
- 6. After completing the solution steps, reorganize and synthesize the steps into the final answer within <answer> and </answer> tags.
110
- 7. Provide a critical, honest and subjective self-evaluation of your reasoning process within <reflection> and </reflection> tags.
111
- 8. Assign a quality score to your solution as a float between 0.0 (lowest quality) and 1.0 (highest quality), enclosed in <reward> and </reward> tags.
112
-
 
 
 
 
 
 
113
  Example format:
114
  <count> [starting budget] </count>
115
-
116
  <step> [Content of step 1] </step>
117
  <count> [remaining budget] </count>
118
-
119
  <step> [Content of step 2] </step>
120
  <reflection> [Evaluation of the steps so far] </reflection>
121
  <reward> [Float between 0.0 and 1.0] </reward>
122
  <count> [remaining budget] </count>
123
-
124
  <step> [Content of step 3 or Content of some previous step] </step>
125
  <count> [remaining budget] </count>
126
-
127
  ...
128
-
129
  <step> [Content of final step] </step>
130
  <count> [remaining budget] </count>
131
-
132
- <answer> [Final Answer] </answer>
133
-
134
  <reflection> [Evaluation of the solution] </reflection>
135
-
136
  <reward> [Float between 0.0 and 1.0] </reward>
137
  """
138
 
@@ -141,65 +126,20 @@ with gr.Blocks() as demo:
141
  gr.Markdown("[Powered by Llama3.1 models through SN Cloud](https://sambanova.ai/fast-api?api_ref=907266)")
142
 
143
  with gr.Row():
144
- api_key = gr.Textbox(
145
- label="API Key",
146
- type="password",
147
- placeholder="Enter your API key here"
148
- )
149
 
150
  with gr.Row():
151
- model = gr.Dropdown(
152
- choices=MODELS,
153
- label="Select Model",
154
- value=MODELS[0]
155
- )
156
- thinking_budget = gr.Slider(
157
- minimum=1,
158
- maximum=100,
159
- value=10,
160
- step=1,
161
- label="Thinking Budget"
162
- )
163
-
164
- system_prompt = gr.Textbox(
165
- label="System Prompt",
166
- value=default_system_prompt,
167
- lines=15,
168
- interactive=True
169
- )
170
 
171
- with gr.Row():
172
- msg = gr.Textbox(
173
- label="Type your message here...",
174
- placeholder="Enter your message..."
175
- )
176
- submit = gr.Button("Submit")
177
- clear = gr.Button("Clear Chat")
178
-
179
- chatbot = gr.Chatbot(
180
- label="Chat History"
181
- )
182
-
183
- # Initialize chat history as a Gradio state
184
- chat_history = gr.State([])
185
-
186
- def handle_submit(message, history, model, system_prompt, thinking_budget, api_key):
187
- updated_history = process_chat(message, history, model, system_prompt, thinking_budget, api_key)
188
- return updated_history, ""
189
-
190
- def handle_clear():
191
- return [], ""
192
-
193
- submit.click(
194
- handle_submit,
195
- inputs=[msg, chat_history, model, system_prompt, thinking_budget, api_key],
196
- outputs=[chatbot, msg]
197
- )
198
-
199
- clear.click(
200
- handle_clear,
201
- inputs=None,
202
- outputs=[chatbot, msg]
203
- )
204
-
205
- demo.launch()
 
11
  "Meta-Llama-3.1-8B-Instruct"
12
  ]
13
 
14
+ # Sambanova API base URL
15
+ API_BASE = "https://api.sambanova.ai/v1"
16
+
17
  def create_client(api_key=None):
18
+ """Creates an OpenAI client instance."""
19
  if api_key:
20
  openai.api_key = api_key
 
21
  else:
22
  openai.api_key = os.getenv("API_KEY")
 
 
 
 
 
 
23
 
24
+ return openai.OpenAI(api_key=openai.api_key, base_url=API_BASE)
 
 
25
 
26
+ def chat_with_ai(message, chat_history, system_prompt):
27
+ """Formats the chat history for the API call."""
28
+ messages = [{"role": "system", "content": system_prompt}]
29
+ print(type(chat_history))
30
+ for tup in chat_history:
31
+ print(type(tup))
32
+ first_key = list(tup.keys())[0] # First key
33
+ last_key = list(tup.keys())[-1] # Last key
34
+ messages.append({"role": "user", "content": tup[first_key]})
35
+ messages.append({"role": "assistant", "content": tup[last_key]})
36
  messages.append({"role": "user", "content": message})
 
37
  return messages
38
 
39
  def respond(message, chat_history, model, system_prompt, thinking_budget, api_key):
40
+ """Sends the message to the API and gets the response."""
41
+ client = create_client(api_key)
42
  messages = chat_with_ai(message, chat_history, system_prompt.format(budget=thinking_budget))
43
  start_time = time.time()
44
 
45
  try:
46
+ completion = client.chat.completions.create(model=model, messages=messages)
47
+ response = completion.choices[0].message.content
 
 
 
 
 
48
  thinking_time = time.time() - start_time
 
49
  return response, thinking_time
50
  except Exception as e:
51
  error_message = f"Error: {str(e)}"
 
52
  return error_message, time.time() - start_time
53
 
54
  def parse_response(response):
55
+ """Parses the response from the API."""
56
  answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
57
  reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)
58
 
59
  answer = answer_match.group(1).strip() if answer_match else ""
60
  reflection = reflection_match.group(1).strip() if reflection_match else ""
 
61
  steps = re.findall(r'<step>(.*?)</step>', response, re.DOTALL)
 
 
 
 
62
 
63
+ return answer, reflection, steps
 
 
 
 
64
 
65
+ def generate(message, history, model, system_prompt, thinking_budget, api_key):
66
+ """Generates the chatbot response."""
67
+ response, thinking_time = respond(message, history, model, system_prompt, thinking_budget, api_key)
 
 
 
 
 
68
 
69
  if response.startswith("Error:"):
70
+ return history + [({"role": "system", "content": response},)], ""
71
 
72
  answer, reflection, steps = parse_response(response)
73
 
74
+ messages = []
75
+ messages.append({"role": "user", "content": message})
76
+
77
+ formatted_steps = [f"Step {i}: {step}" for i, step in enumerate(steps, 1)]
78
+ all_steps = "\n".join(formatted_steps) + f"\n\nReflection: {reflection}"
79
 
80
+ messages.append({"role": "assistant", "content": all_steps, "metadata": {"title": f"Thinking Time: {thinking_time:.2f} sec"}})
81
+ messages.append({"role": "assistant", "content": answer})
82
 
83
+ return history + messages, ""
 
84
 
85
  # Define the default system prompt
86
+ DEFAULT_SYSTEM_PROMPT = """
87
  You are a helpful assistant in normal conversation.
88
+ When given a problem to solve, you are an expert problem-solving assistant.
89
+ Your task is to provide a detailed, step-by-step solution to a given question.
90
+ Follow these instructions carefully:
91
  1. Read the given question carefully and reset counter between <count> and </count> to {budget}
92
  2. Generate a detailed, logical step-by-step solution.
93
  3. Enclose each step of your solution within <step> and </step> tags.
94
+ 4. You are allowed to use at most {budget} steps (starting budget),
95
+ keep track of it by counting down within tags <count> </count>,
96
+ STOP GENERATING MORE STEPS when hitting 0, you don't have to use all of them.
97
+ 5. Do a self-reflection when you are unsure about how to proceed,
98
+ based on the self-reflection and reward, decides whether you need to return
99
+ to the previous steps.
100
+ 6. After completing the solution steps, reorganize and synthesize the steps
101
+ into the final answer within <answer> and </answer> tags.
102
+ 7. Provide a critical, honest and subjective self-evaluation of your reasoning
103
+ process within <reflection> and </reflection> tags.
104
+ 8. Assign a quality score to your solution as a float between 0.0 (lowest
105
+ quality) and 1.0 (highest quality), enclosed in <reward> and </reward> tags.
106
  Example format:
107
  <count> [starting budget] </count>
 
108
  <step> [Content of step 1] </step>
109
  <count> [remaining budget] </count>
 
110
  <step> [Content of step 2] </step>
111
  <reflection> [Evaluation of the steps so far] </reflection>
112
  <reward> [Float between 0.0 and 1.0] </reward>
113
  <count> [remaining budget] </count>
 
114
  <step> [Content of step 3 or Content of some previous step] </step>
115
  <count> [remaining budget] </count>
 
116
  ...
 
117
  <step> [Content of final step] </step>
118
  <count> [remaining budget] </count>
119
+ <answer> [Final Answer] </answer> (must give final answer in this format)
 
 
120
  <reflection> [Evaluation of the solution] </reflection>
 
121
  <reward> [Float between 0.0 and 1.0] </reward>
122
  """
123
 
 
126
  gr.Markdown("[Powered by Llama3.1 models through SN Cloud](https://sambanova.ai/fast-api?api_ref=907266)")
127
 
128
  with gr.Row():
129
+ api_key = gr.Textbox(label="API Key", type="password", placeholder="Enter your API key here")
 
 
 
 
130
 
131
  with gr.Row():
132
+ model = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0])
133
+ thinking_budget = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Thinking Budget", info="maximum times a model can think")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
+ chatbot = gr.Chatbot(label="Chat", show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", type="messages")
136
+
137
+ msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...")
138
+
139
+ gr.Button("Clear Chat").click(lambda: ([], ""), inputs=None, outputs=[chatbot, msg])
140
+
141
+ system_prompt = gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=15, interactive=True)
142
+
143
+ msg.submit(generate, inputs=[msg, chatbot, model, system_prompt, thinking_budget, api_key], outputs=[chatbot, msg])
144
+
145
+ demo.launch(share=True, show_api=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1 +1 @@
1
- openai==0.28.0
 
1
+ openai