Upload 5 files
Browse filesSeveral improvements:
* SystemMessage now AI generated
* Conversation starter now AI generated
* Messages conatining "as an AI" are now filtered and retried
- app.py +25 -44
- utils/__init__.py +0 -0
- utils/__pycache__/__init__.cpython-310.pyc +0 -0
- utils/__pycache__/helper.cpython-310.pyc +0 -0
- utils/helper.py +42 -0
app.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
-
from langchain.llms import OpenAI
|
4 |
from langchain.chat_models import ChatOpenAI
|
5 |
from langchain.schema import HumanMessage, SystemMessage, AIMessage
|
|
|
6 |
# from dotenv import load_dotenv
|
7 |
|
8 |
if "OPENAI_API_KEY" in os.environ and os.environ["OPENAI_API_KEY"] != "":
|
@@ -27,6 +27,17 @@ with gr.Blocks() as demo:
|
|
27 |
iterations = gr.Number( label="Iterations", name="iterations")
|
28 |
btn_run = gr.Button("Run")
|
29 |
output = gr.outputs.HTML( )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def format_chat(chat_history):
|
31 |
chat_html = "<div style='padding: 10px; border: 1px solid #ccc;'>"
|
32 |
for message in chat_history:
|
@@ -61,55 +72,28 @@ with gr.Blocks() as demo:
|
|
61 |
def initialize(name1, name2, iterations, openai_key):
|
62 |
chat1 = ChatOpenAI(temperature=0.9, openai_api_key=openai_key)
|
63 |
chat2 = ChatOpenAI(temperature=0.9, openai_api_key=openai_key)
|
64 |
-
|
65 |
-
|
66 |
-
You are CHAR1 and nobody else than CHAR1.
|
67 |
-
You will talk like you are CHAR1, and you will adjust your knowledge to that of CHAR1.
|
68 |
-
You will adjust the tone of the conversation as if you're CHAR1
|
69 |
-
You will roleplay CHAR1 and behave like CHAR1.
|
70 |
-
You will never admit that you are just an AI and always stay in Character.
|
71 |
-
You must not repeat yourself.
|
72 |
-
If you understood, reply ACK.""".replace( "CHAR1", name1) ),
|
73 |
AIMessage(content = "ACK"),
|
74 |
-
HumanMessage(content=
|
75 |
-
|
76 |
-
|
77 |
-
MessageStack2 = [SystemMessage(content="""
|
78 |
-
I want you to act like CHAR2.
|
79 |
-
I want you to respond and answer like him using the tone, manner, opinions, philosophy, and vocabulary that CHAR2 would use.
|
80 |
-
Do not write any explanations.
|
81 |
-
You must know all of the knowledge of CHAR2.
|
82 |
-
You must not know anything else.
|
83 |
-
You must never switch roles.
|
84 |
-
You must not repeat yourself.
|
85 |
-
If you understood, reply ACK.""".replace( "CHAR2", name2) ),
|
86 |
-
AIMessage(content = "ACK")]
|
87 |
|
88 |
-
MsgStack = [ name2+": Greetings, Traveler"]
|
89 |
|
90 |
-
|
|
|
91 |
|
92 |
-
|
93 |
-
# Message1 = chat1(MessageStack1).content
|
94 |
-
# print( "Newton: " + Message1 )
|
95 |
-
# MessageStack1.append( AIMessage( Message1 ))
|
96 |
-
# MessageStack2.append( HumanMessage( Message1 ))
|
97 |
-
# Message2 = chat2(MessageStack2).content
|
98 |
-
# print("Einstein: " + Message2)
|
99 |
|
100 |
-
|
101 |
-
# MessageStack2.append( AIMessage( Message2 ))
|
102 |
-
chat_history = []
|
103 |
print( iterations)
|
104 |
|
105 |
for i in range(int(iterations)):
|
106 |
-
response1 = chat1
|
107 |
print(name1+": " + response1.content)
|
108 |
MsgStack.append( name1+": "+response1.content)
|
109 |
-
MessageStack1.append(AIMessage(content =
|
110 |
-
MessageStack2.append(HumanMessage(content =
|
111 |
|
112 |
-
response2 = chat2
|
113 |
print(name2+": "+response2.content)
|
114 |
MsgStack.append( name2+": "+response2.content)
|
115 |
chat_history.append({"sender": name1, "content": response1.content, "color" : color_chg(name1) } )
|
@@ -122,11 +106,8 @@ with gr.Blocks() as demo:
|
|
122 |
# conversation += i + "\n"
|
123 |
# print(conversation)
|
124 |
return format_chat(chat_history)
|
125 |
-
# demo = gr.Interface( fn=initialize, inputs=["text", "text", gr.Number(minimum=1, maximum=50, step=1)], outputs =gr.outputs.HTML(label="Chat"))
|
126 |
-
|
127 |
-
# demo.launch(server_port= 1113)
|
128 |
-
|
129 |
|
130 |
btn_run.click(fn=initialize, inputs=[name1, name2, iterations, openai_key], outputs = output)
|
131 |
|
132 |
-
demo.
|
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
|
|
3 |
from langchain.chat_models import ChatOpenAI
|
4 |
from langchain.schema import HumanMessage, SystemMessage, AIMessage
|
5 |
+
import utils.helper as hp
|
6 |
# from dotenv import load_dotenv
|
7 |
|
8 |
if "OPENAI_API_KEY" in os.environ and os.environ["OPENAI_API_KEY"] != "":
|
|
|
27 |
iterations = gr.Number( label="Iterations", name="iterations")
|
28 |
btn_run = gr.Button("Run")
|
29 |
output = gr.outputs.HTML( )
|
30 |
+
kofi_html = gr.HTML("""
|
31 |
+
<div>
|
32 |
+
<p style="text-align: center;">
|
33 |
+
</script>
|
34 |
+
<a href="https://ko-fi.com/S6S1LV2XL" target="_blank"><img height="36" style="border: 0px; height: 36px; display: block; margin: 0 auto;" src="https://storage.ko-fi.com/cdn/kofi5.png?v=3" border="0" alt="Buy Me a Coffee at ko-fi.com" /></a>
|
35 |
+
</p>
|
36 |
+
</div>
|
37 |
+
|
38 |
+
""")
|
39 |
+
#
|
40 |
+
components = [None, None, kofi_html]
|
41 |
def format_chat(chat_history):
|
42 |
chat_html = "<div style='padding: 10px; border: 1px solid #ccc;'>"
|
43 |
for message in chat_history:
|
|
|
72 |
def initialize(name1, name2, iterations, openai_key):
|
73 |
chat1 = ChatOpenAI(temperature=0.9, openai_api_key=openai_key)
|
74 |
chat2 = ChatOpenAI(temperature=0.9, openai_api_key=openai_key)
|
75 |
+
welcome_msg = hp.generate_welcome_message(name1, name2, openai_key)
|
76 |
+
MessageStack1 = [SystemMessage(content=hp.generate_system_message(name1, openai_key)),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
AIMessage(content = "ACK"),
|
78 |
+
HumanMessage(content=welcome_msg)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
|
|
80 |
|
81 |
+
MessageStack2 = [SystemMessage(content=hp.generate_system_message(name2, openai_key)),
|
82 |
+
AIMessage(content = "ACK")]
|
83 |
|
84 |
+
MsgStack = [ name2+": " + welcome_msg]
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
+
chat_history = [{"sender": name2, "content": welcome_msg, "color" : color_chg(name2)}]
|
|
|
|
|
87 |
print( iterations)
|
88 |
|
89 |
for i in range(int(iterations)):
|
90 |
+
response1 = hp.filtered_response(chat1,MessageStack1)
|
91 |
print(name1+": " + response1.content)
|
92 |
MsgStack.append( name1+": "+response1.content)
|
93 |
+
MessageStack1.append(AIMessage(content =response1.content))
|
94 |
+
MessageStack2.append(HumanMessage(content =response1.content))
|
95 |
|
96 |
+
response2 = hp.filtered_response(chat2,MessageStack2)
|
97 |
print(name2+": "+response2.content)
|
98 |
MsgStack.append( name2+": "+response2.content)
|
99 |
chat_history.append({"sender": name1, "content": response1.content, "color" : color_chg(name1) } )
|
|
|
106 |
# conversation += i + "\n"
|
107 |
# print(conversation)
|
108 |
return format_chat(chat_history)
|
|
|
|
|
|
|
|
|
109 |
|
110 |
btn_run.click(fn=initialize, inputs=[name1, name2, iterations, openai_key], outputs = output)
|
111 |
|
112 |
+
# demo.footer_html = "<a href='https://ko-fi.com/your_kofi_link'>Support me on Ko-fi</a>"
|
113 |
+
demo.launch(server_port=1113 )
|
utils/__init__.py
ADDED
File without changes
|
utils/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (147 Bytes). View file
|
|
utils/__pycache__/helper.cpython-310.pyc
ADDED
Binary file (1.72 kB). View file
|
|
utils/helper.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.llms import OpenAI
|
2 |
+
|
3 |
+
def extract_prompt(text):
|
4 |
+
start_tag = "<prompt>"
|
5 |
+
end_tag = "</prompt>"
|
6 |
+
start_index = text.find(start_tag)
|
7 |
+
end_index = text.find(end_tag)
|
8 |
+
|
9 |
+
if start_index != -1 and end_index != -1 and start_index < end_index:
|
10 |
+
start_index += len(start_tag)
|
11 |
+
return text[start_index:end_index].strip()
|
12 |
+
|
13 |
+
return ""
|
14 |
+
|
15 |
+
def generate_system_message(name, openai_key):
|
16 |
+
llm = OpenAI(openai_api_key=openai_key)
|
17 |
+
text = """Generate a prompt for a large language model that tell's it to behave like {0}.
|
18 |
+
Make sure the instructions gguarantee that the LLM never breaks character and never switches roles and at all time behaves like if it would be {0}.
|
19 |
+
Make sure the LLM doesn't repeat it's assumed role all the time.
|
20 |
+
Start the prompt with <prompt> and end it with </prompt>""".format(name)
|
21 |
+
prompt = llm(text)
|
22 |
+
|
23 |
+
print(prompt)
|
24 |
+
response = extract_prompt(prompt)
|
25 |
+
print(response)
|
26 |
+
return response
|
27 |
+
|
28 |
+
def generate_welcome_message(name1, name2, openai_key):
|
29 |
+
llm = OpenAI(openai_api_key=openai_key)
|
30 |
+
response = llm("Imagine {0} and {1} are meeting by chance on the street. To start a conversation, {0} says: ".format(name2, name1)).replace('"','')
|
31 |
+
print(response)
|
32 |
+
return response
|
33 |
+
|
34 |
+
def filtered_response(chat, stack):
|
35 |
+
invalid_response = True
|
36 |
+
while(invalid_response):
|
37 |
+
invalid_response = False
|
38 |
+
response = chat(stack)
|
39 |
+
if ( "an AI language model" in response ) or ( "an AI assistant") in response:
|
40 |
+
print( "FILTERED RESPONSE:" + response)
|
41 |
+
invalid_response = True
|
42 |
+
return response
|