File size: 6,992 Bytes
aee6751
 
ea7e2a4
 
aee6751
a9ded25
ea7e2a4
 
 
 
 
 
 
aee6751
 
 
ea7e2a4
 
 
 
 
aee6751
 
ea7e2a4
 
aee6751
ea7e2a4
aee6751
ea7e2a4
 
aee6751
 
 
 
ea7e2a4
aee6751
 
 
 
ea7e2a4
 
aee6751
 
 
 
 
 
 
 
 
 
 
 
ea7e2a4
 
aee6751
ea7e2a4
 
aee6751
a9ded25
aee6751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a9ded25
aee6751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
import gradio as gr
import openai
import time
import re
import os

# Available models
MODELS = [
    "Meta-Llama-3.1-405B-Instruct",
    "Meta-Llama-3.1-70B-Instruct",
    "Meta-Llama-3.1-8B-Instruct"
]

def create_client(api_key):
    openai.api_key = api_key
    openai.api_base = "https://api.sambanova.ai/v1"  # Fixed Base URL

def chat_with_ai(message, chat_history, system_prompt):
    messages = [
        {"role": "system", "content": system_prompt},
    ]

    for human, ai in chat_history:
        messages.append({"role": "user", "content": human})
        messages.append({"role": "assistant", "content": ai})

    messages.append({"role": "user", "content": message})

    return messages

def respond(message, chat_history, model, system_prompt, thinking_budget, api_key):
    print("Starting respond function...")
    create_client(api_key)  # Sets api_key and api_base globally
    messages = chat_with_ai(message, chat_history, system_prompt.format(budget=thinking_budget))
    start_time = time.time()

    try:
        print("Calling OpenAI API...")
        completion = openai.ChatCompletion.create(
            model=model,
            messages=messages,
            stream=False  # Set to False for synchronous response
        )
        response = completion.choices[0].message['content']
        thinking_time = time.time() - start_time
        print("Response received from OpenAI API.")
        yield response, thinking_time
    except Exception as e:
        error_message = f"Error: {str(e)}"
        print(error_message)
        yield error_message, time.time() - start_time

def parse_response(response):
    answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
    reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)

    answer = answer_match.group(1).strip() if answer_match else ""
    reflection = reflection_match.group(1).strip() if reflection_match else ""

    steps = re.findall(r'<step>(.*?)</step>', response, re.DOTALL)

    return answer, reflection, steps

def process_chat(message, history, model, system_prompt, thinking_budget, api_key):
    print(f"Received message: {message}")
    if not api_key:
        print("API key missing")
        return "Please provide your API Key before starting the chat."

    try:
        formatted_system_prompt = system_prompt.format(budget=thinking_budget)
    except KeyError as e:
        error_msg = f"System prompt missing placeholder: {str(e)}"
        print(error_msg)
        return error_msg

    full_response = ""
    thinking_time = 0

    for response, elapsed_time in respond(message, history, model, formatted_system_prompt, thinking_budget, api_key):
        print(f"Received response: {response}")
        full_response = response
        thinking_time = elapsed_time

    if full_response.startswith("Error:"):
        return full_response

    answer, reflection, steps = parse_response(full_response)

    formatted_response = f"**Answer:** {answer}\n\n**Reflection:** {reflection}\n\n**Thinking Steps:**\n"
    for i, step in enumerate(steps, 1):
        formatted_response += f"**Step {i}:** {step}\n"

    formatted_response += f"\n**Thinking time:** {thinking_time:.2f} s"

    print(f"Appended response: {formatted_response}")
    history.append((message, formatted_response))
    return formatted_response

# Define the default system prompt
default_system_prompt = """
You are a helpful assistant in normal conversation.
When given a problem to solve, you are an expert problem-solving assistant. Your task is to provide a detailed, step-by-step solution to a given question. Follow these instructions carefully:

1. Read the given question carefully and reset counter between <count> and </count> to {budget}
2. Generate a detailed, logical step-by-step solution.
3. Enclose each step of your solution within <step> and </step> tags.
4. You are allowed to use at most {budget} steps (starting budget), keep track of it by counting down within tags <count> </count>, STOP GENERATING MORE STEPS when hitting 0, you don't have to use all of them.
5. Do a self-reflection when you are unsure about how to proceed, based on the self-reflection and reward, decides whether you need to return to the previous steps.
6. After completing the solution steps, reorganize and synthesize the steps into the final answer within <answer> and </answer> tags.
7. Provide a critical, honest and subjective self-evaluation of your reasoning process within <reflection> and </reflection> tags.
8. Assign a quality score to your solution as a float between 0.0 (lowest quality) and 1.0 (highest quality), enclosed in <reward> and </reward> tags.

Example format:            
<count> [starting budget] </count>

<step> [Content of step 1] </step>
<count> [remaining budget] </count>

<step> [Content of step 2] </step>
<reflection> [Evaluation of the steps so far] </reflection>
<reward> [Float between 0.0 and 1.0] </reward>
<count> [remaining budget] </count>

<step> [Content of step 3 or Content of some previous step] </step>
<count> [remaining budget] </count>

...

<step>  [Content of final step] </step>
<count> [remaining budget] </count>

<answer> [Final Answer] </answer>

<reflection> [Evaluation of the solution] </reflection>

<reward> [Float between 0.0 and 1.0] </reward>
"""

with gr.Blocks() as demo:
    gr.Markdown("# Llama3.1-Instruct-O1")
    gr.Markdown("[Powered by Llama3.1 models through SN Cloud](https://sambanova.ai/fast-api?api_ref=907266)")

    with gr.Row():
        api_key = gr.Textbox(
            label="API Key",
            type="password",
            placeholder="Enter your API key here"
        )

    with gr.Row():
        model = gr.Dropdown(
            choices=MODELS,
            label="Select Model",
            value=MODELS[0]
        )
        thinking_budget = gr.Slider(
            minimum=1,
            maximum=100,
            value=10,
            step=1,
            label="Thinking Budget"
        )

    system_prompt = gr.Textbox(
        label="System Prompt",
        value=default_system_prompt,
        lines=10
    )

    msg = gr.Textbox(
        label="Type your message here...",
        placeholder="Enter your message..."
    )
    submit = gr.Button("Submit")
    clear = gr.Button("Clear Chat")

    output = gr.Textbox(
        label="Response",
        lines=20,
        interactive=False
    )

    # Initialize chat history
    chat_history = []

    def handle_submit(message, history, model, system_prompt, thinking_budget, api_key):
        response = process_chat(message, history, model, system_prompt, thinking_budget, api_key)
        return response

    def handle_clear():
        return ""

    submit.click(
        handle_submit,
        inputs=[msg, gr.State(chat_history), model, system_prompt, thinking_budget, api_key],
        outputs=output
    )

    clear.click(
        lambda: "",
        inputs=None,
        outputs=output
    )

demo.launch()