Spaces:
Running
Running
Rewrite request
Browse files
app.py
CHANGED
@@ -1,22 +1,44 @@
|
|
1 |
import gradio as gr
|
2 |
import openai
|
|
|
|
|
3 |
|
4 |
-
openai.api_key = "sk-
|
|
|
5 |
|
6 |
default_system_message = {"role": "system", "content": "You are a brilliant, helpful assistant, always providing answers to the best of your knowledge. If you are unsure of the answer, you indicate it to the user."}
|
7 |
|
8 |
def get_completion(model, user_message, message_history, chatlog_history, temperature, maximum_length, top_p, frequency_penalty, presence_penalty):
|
9 |
new_history_row = {"role": "user", "content": user_message}
|
10 |
updated_message_history = message_history + [new_history_row]
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
assistant_message = completion["choices"][0]["message"]["content"]
|
21 |
new_history_row = {"role": "assistant", "content": assistant_message}
|
22 |
updated_message_history = updated_message_history + [new_history_row]
|
@@ -31,15 +53,34 @@ def retry_completion(model, message_history, chatlog_history, temperature, maxim
|
|
31 |
updated_chatlog_history = chatlog_history[:-1]
|
32 |
# delete latest assistant message from message_history
|
33 |
updated_message_history = message_history[:-1]
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
assistant_message = completion["choices"][0]["message"]["content"]
|
44 |
new_history_row = {"role": "assistant", "content": assistant_message}
|
45 |
updated_message_history = updated_message_history + [new_history_row]
|
@@ -76,5 +117,5 @@ with gr.Blocks(theme=theme) as app:
|
|
76 |
retry_button.click(retry_completion, inputs=[model, message_history, chatlog_history, temperature, maximum_length, top_p, frequency_penalty, presence_penalty], outputs=[user_message, message_history, chatlog_history, chatbot, token_count])
|
77 |
reset_button.click(reset_chat, inputs=[], outputs=[user_message, message_history, chatlog_history, chatbot, token_count])
|
78 |
|
79 |
-
|
80 |
-
app.launch(auth=("admin", "C%nc6mrn8*BCwQF9HhH4CX35d7Q**eQY"))
|
|
|
1 |
import gradio as gr
|
2 |
import openai
|
3 |
+
import os
|
4 |
+
import requests
|
5 |
|
6 |
+
openai.api_key = "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"
|
7 |
+
# openai.api_key = os.getenv("OPENAI_API_KEY")
|
8 |
|
9 |
default_system_message = {"role": "system", "content": "You are a brilliant, helpful assistant, always providing answers to the best of your knowledge. If you are unsure of the answer, you indicate it to the user."}
|
10 |
|
11 |
def get_completion(model, user_message, message_history, chatlog_history, temperature, maximum_length, top_p, frequency_penalty, presence_penalty):
|
12 |
new_history_row = {"role": "user", "content": user_message}
|
13 |
updated_message_history = message_history + [new_history_row]
|
14 |
+
headers = {
|
15 |
+
"Content-Type": "application/json",
|
16 |
+
"Authorization": f"Bearer {openai.api_key}",
|
17 |
+
}
|
18 |
+
payload = {
|
19 |
+
"model":model,
|
20 |
+
"messages":updated_message_history,
|
21 |
+
"temperature":temperature,
|
22 |
+
"max_tokens":maximum_length,
|
23 |
+
"top_p":top_p,
|
24 |
+
"frequency_penalty":frequency_penalty,
|
25 |
+
"presence_penalty":presence_penalty,
|
26 |
+
}
|
27 |
+
completion = requests.post(
|
28 |
+
"https://api.openai.com/v1/chat/completions",
|
29 |
+
headers=headers,
|
30 |
+
json=payload,
|
31 |
)
|
32 |
+
completion = completion.json()
|
33 |
+
# completion = openai.ChatCompletion.create(
|
34 |
+
# model=model,
|
35 |
+
# messages=updated_message_history,
|
36 |
+
# temperature=temperature,
|
37 |
+
# max_tokens=maximum_length,
|
38 |
+
# top_p=top_p,
|
39 |
+
# frequency_penalty=frequency_penalty,
|
40 |
+
# presence_penalty=presence_penalty,
|
41 |
+
# )
|
42 |
assistant_message = completion["choices"][0]["message"]["content"]
|
43 |
new_history_row = {"role": "assistant", "content": assistant_message}
|
44 |
updated_message_history = updated_message_history + [new_history_row]
|
|
|
53 |
updated_chatlog_history = chatlog_history[:-1]
|
54 |
# delete latest assistant message from message_history
|
55 |
updated_message_history = message_history[:-1]
|
56 |
+
headers = {
|
57 |
+
"Content-Type": "application/json",
|
58 |
+
"Authorization": f"Bearer {openai.api_key}",
|
59 |
+
}
|
60 |
+
payload = {
|
61 |
+
"model":model,
|
62 |
+
"messages":updated_message_history,
|
63 |
+
"temperature":temperature,
|
64 |
+
"max_tokens":maximum_length,
|
65 |
+
"top_p":top_p,
|
66 |
+
"frequency_penalty":frequency_penalty,
|
67 |
+
"presence_penalty":presence_penalty,
|
68 |
+
}
|
69 |
+
completion = requests.post(
|
70 |
+
"https://api.openai.com/v1/chat/completions",
|
71 |
+
headers=headers,
|
72 |
+
json=payload,
|
73 |
)
|
74 |
+
completion = completion.json()
|
75 |
+
# completion = openai.ChatCompletion.create(
|
76 |
+
# model=model,
|
77 |
+
# messages=updated_message_history,
|
78 |
+
# temperature=temperature,
|
79 |
+
# max_tokens=maximum_length,
|
80 |
+
# top_p=top_p,
|
81 |
+
# frequency_penalty=frequency_penalty,
|
82 |
+
# presence_penalty=presence_penalty,
|
83 |
+
# )
|
84 |
assistant_message = completion["choices"][0]["message"]["content"]
|
85 |
new_history_row = {"role": "assistant", "content": assistant_message}
|
86 |
updated_message_history = updated_message_history + [new_history_row]
|
|
|
117 |
retry_button.click(retry_completion, inputs=[model, message_history, chatlog_history, temperature, maximum_length, top_p, frequency_penalty, presence_penalty], outputs=[user_message, message_history, chatlog_history, chatbot, token_count])
|
118 |
reset_button.click(reset_chat, inputs=[], outputs=[user_message, message_history, chatlog_history, chatbot, token_count])
|
119 |
|
120 |
+
app.launch()
|
121 |
+
# app.launch(auth=("admin", "C%nc6mrn8*BCwQF9HhH4CX35d7Q**eQY"))
|