ivnban27-ctl commited on
Commit
5832f57
1 Parent(s): 81f6a08

first commit openai simulators

Browse files
.gitignore ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ env/
12
+ build/
13
+ develop-eggs/
14
+ dist/
15
+ downloads/
16
+ eggs/
17
+ .eggs/
18
+ lib/
19
+ lib64/
20
+ parts/
21
+ sdist/
22
+ var/
23
+ wheels/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ .pytest_cache
50
+ test-reports/
51
+ *.cover
52
+ *.py,cover
53
+ .hypothesis/
54
+ .pytest_cache/
55
+ cover/
56
+
57
+ # Translations
58
+ *.mo
59
+ *.pot
60
+
61
+ # Django stuff:
62
+ *.log
63
+ local_settings.py
64
+ db.sqlite3
65
+ db.sqlite3-journal
66
+
67
+ # Flask stuff:
68
+ instance/
69
+ .webassets-cache
70
+
71
+ # Scrapy stuff:
72
+ .scrapy
73
+
74
+ # Sphinx documentation
75
+ docs/_build/
76
+
77
+ # PyBuilder
78
+ .pybuilder/
79
+ target/
80
+
81
+ # Jupyter Notebook
82
+ .ipynb_checkpoints
83
+
84
+ # IPython
85
+ profile_default/
86
+ ipython_config.py
87
+
88
+ # pyenv
89
+ # For a library or package, you might want to ignore these files since the code is
90
+ # intended to run in multiple environments; otherwise, check them in:
91
+ # .python-version
92
+
93
+ # pipenv
94
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
96
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
97
+ # install all needed dependencies.
98
+ #Pipfile.lock
99
+
100
+ # poetry
101
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
102
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
103
+ # commonly ignored for libraries.
104
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
105
+ #poetry.lock
106
+
107
+ # pdm
108
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
109
+ #pdm.lock
110
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
111
+ # in version control.
112
+ # https://pdm.fming.dev/#use-with-ide
113
+ .pdm.toml
114
+
115
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116
+ __pypackages__/
117
+
118
+ # Celery stuff
119
+ celerybeat-schedule
120
+ celerybeat.pid
121
+
122
+ # SageMath parsed files
123
+ *.sage.py
124
+
125
+ # Environments
126
+ .env
127
+ .venv
128
+ env/
129
+ venv/
130
+ ENV/
131
+ env.bak/
132
+ venv.bak/
133
+
134
+ # Spyder project settings
135
+ .spyderproject
136
+ .spyproject
137
+
138
+ # Rope project settings
139
+ .ropeproject
140
+
141
+ # mkdocs documentation
142
+ /site
143
+
144
+ # mypy
145
+ .mypy_cache/
146
+ .dmypy.json
147
+ dmypy.json
148
+
149
+ # Pyre type checker
150
+ .pyre/
151
+
152
+ # pytype static type analyzer
153
+ .pytype/
154
+
155
+ # Cython debug symbols
156
+ cython_debug/
157
+
158
+ # PyCharm
159
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
162
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163
+ #.idea/
164
+
165
+ # Database
166
+ *.db
167
+ *.rdb
168
+
169
+ # Pycharm
170
+ .idea
171
+
172
+ # VS Code
173
+ .vscode/
174
+
175
+ # Spyder
176
+ .spyproject/
177
+
178
+ # Jupyter NB Checkpoints
179
+ .ipynb_checkpoints/
180
+
181
+ # exclude data from source control by default
182
+ /data/
183
+
184
+ # Mac OS-specific storage files
185
+ .DS_Store
186
+
187
+ # vim
188
+ *.swp
189
+ *.swo
190
+
191
+ # Mypy cache
192
+ .mypy_cache/
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: pink
5
  colorTo: yellow
6
  sdk: streamlit
7
  sdk_version: 1.26.0
8
- app_file: app.py
9
  pinned: false
10
  ---
11
 
 
5
  colorTo: yellow
6
  sdk: streamlit
7
  sdk_version: 1.26.0
8
+ app_file: convosim.py
9
  pinned: false
10
  ---
11
 
convosim.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import os
3
+ import streamlit as st
4
+ from langchain.schema.messages import HumanMessage
5
+
6
+ from utils import create_memory_add_initial_message, clear_memory, get_chain
7
+
8
+ openai_api_key = os.environ['OPENAI_API_KEY']
9
+ memories = ['memory']
10
+
11
+ with st.sidebar:
12
+ temperature = st.slider("Temperature", 0., 1., value=0.8, step=0.1)
13
+ issue = st.selectbox("Select an Issue", ['Anxiety','Suicide'], index=0,
14
+ on_change=clear_memory, args=(memories,)
15
+ )
16
+ supported_languages = ['English', "Spanish"] if issue == "Anxiety" else ['English']
17
+ language = st.selectbox("Select a Language", supported_languages, index=0,
18
+ on_change=clear_memory, args=(memories,)
19
+ )
20
+
21
+ source = st.selectbox("Select a source Model A", ['OpenAI GPT3.5','Finetuned OpenAI'], index=1,
22
+ on_change=clear_memory, args=(memories,)
23
+ )
24
+
25
+ create_memory_add_initial_message(memories, language)
26
+ llm_chain = get_chain(issue, language, source, st.session_state[memories[0]], temperature)
27
+
28
+ st.title("💬 Simulator")
29
+
30
+ for msg in st.session_state[memories[0]].buffer_as_messages:
31
+ role = "user" if type(msg) == HumanMessage else "assistant"
32
+ st.chat_message(role).write(msg.content)
33
+
34
+ if prompt := st.chat_input():
35
+ st.chat_message("user").write(prompt)
36
+ response = llm_chain.predict(input=prompt, stop="helper:")
37
+ # response = update_memory_completion(prompt, st.session_state["memory"], OA_engine, temperature)
38
+ st.chat_message("assistant").write(response)
models/custom_parsers.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from langchain.schema import BaseOutputParser
3
+
4
+ class CustomStringOutputParser(BaseOutputParser[List[str]]):
5
+ """Parse the output of an LLM call to a list."""
6
+
7
+ @property
8
+ def _type(self) -> str:
9
+ return "str"
10
+
11
+ def parse(self, text: str) -> str:
12
+ """Parse the output of an LLM call."""
13
+ text = text.split("texter:")[0]
14
+ text = text.rstrip("\n")
15
+ text = text.strip()
16
+ return text
models/openai/finetuned_models.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ from models.custom_parsers import CustomStringOutputParser
3
+ from langchain.chains import LLMChain
4
+ from langchain.llms import OpenAI
5
+ from langchain.prompts import PromptTemplate
6
+ import logging
7
+
8
+ finetuned_models = {
9
+ # "olivia_babbage_engine": "babbage:ft-crisis-text-line:exp-olivia-babbage-2023-02-23-19-57-19",
10
+ "Anxiety-English": "curie:ft-crisis-text-line:exp-olivia-curie-2-2023-02-24-00-25-13",
11
+ # "olivia_davinci_engine": "davinci:ft-crisis-text-line:exp-olivia-davinci-2023-02-24-00-02-41",
12
+ # "olivia_augmented_babbage_engine": "babbage:ft-crisis-text-line:exp-olivia-augmented-babbage-2023-02-24-18-35-42",
13
+ # "Olivia-Augmented": "curie:ft-crisis-text-line:exp-olivia-augmented-curie-2023-02-24-20-13-33",
14
+ # "olivia_augmented_davinci_engine": "davinci:ft-crisis-text-line:exp-olivia-augmented-davinci-2023-02-24-23-57-08",
15
+ # "kit_babbage_engine": "babbage:ft-crisis-text-line:exp-kit-babbage-2023-03-06-21-34-10",
16
+ # "kit_curie_engine": "curie:ft-crisis-text-line:exp-kit-curie-2023-03-06-22-01-29",
17
+ "Suicide-English": "curie:ft-crisis-text-line:exp-kit-curie-2-2023-03-08-16-26-48",
18
+ # "kit_davinci_engine": "davinci:ft-crisis-text-line:exp-kit-davinci-2023-03-06-23-09-15",
19
+ # "olivia_es_davinci_engine": "davinci:ft-crisis-text-line:es-olivia-davinci-2023-04-25-17-07-44",
20
+ "Anxiety-Spanish": "curie:ft-crisis-text-line:es-olivia-curie-2023-04-27-15-02-42",
21
+ # "olivia_curie_engine": "curie:ft-crisis-text-line:exp-olivia-curie-2-2023-02-24-00-25-13",
22
+ # "Oscar-Spanish": "curie:ft-crisis-text-line:es-oscar-curie-2023-05-03-21-55-06",
23
+ # "oscar_es_davinci_engine": "davinci:ft-crisis-text-line:es-oscar-davinci-2023-05-03-21-39-29",
24
+ }
25
+
26
+ # def generate_next_response(completion_engine, context, temperature=0.8):
27
+
28
+ # completion = openai.Completion.create(
29
+ # engine=completion_engine,
30
+ # prompt=context,
31
+ # temperature=temperature,
32
+ # max_tokens=150,
33
+ # stop="helper:"
34
+ # )
35
+
36
+ # completion_text = completion['choices'][0]['text']
37
+
38
+ # return completion_text
39
+
40
+ # def update_memory_completion(helper_input, memory, OA_engine, temperature=0.8):
41
+ # memory.chat_memory.add_user_message(helper_input)
42
+ # context = "## BEGIN ## \n" + memory.load_memory_variables({})['history'] + "\ntexter:"
43
+ # print(context)
44
+ # response = generate_next_response(OA_engine, context, temperature).strip().replace("\n","")
45
+ # response = response.split("texter:")[0]
46
+ # memory.chat_memory.add_ai_message(response)
47
+ # return response
48
+
49
+
50
+ def get_finetuned_chain(model_name, memory, temperature=0.8):
51
+
52
+ _TEXTER_TEMPLATE_ = """The following is a friendly conversation between a volunter and a person in crisis;
53
+ Current conversation:
54
+ {history}
55
+ helper: {input}
56
+ texter:"""
57
+
58
+ PROMPT = PromptTemplate(
59
+ input_variables=['history', 'input'],
60
+ template=_TEXTER_TEMPLATE_
61
+ )
62
+ llm = OpenAI(
63
+ temperature=temperature,
64
+ model=model_name,
65
+ max_tokens=150,
66
+ )
67
+ llm_chain = LLMChain(
68
+ llm=llm,
69
+ prompt=PROMPT,
70
+ verbose=True,
71
+ memory=memory,
72
+ output_parser = CustomStringOutputParser()
73
+ )
74
+ logging.debug(f"loaded fine tuned model {model_name}")
75
+ return llm_chain
models/openai/role_models.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from models.custom_parsers import CustomStringOutputParser
3
+ from langchain.chains import ConversationChain
4
+ from langchain.llms import OpenAI
5
+ from langchain.prompts import PromptTemplate
6
+
7
+
8
+ _ANXIETY_TEMPLATE_ = """The following is a friendly conversation between a volunter and a person in crisis;
9
+ Your name is Olivia and you are a person in crisis because you are experiencing an anxiety attack.
10
+ Do not disclose your name unless asked.
11
+ Current conversation:
12
+ {history}
13
+ helper: {input}
14
+ texter:"""
15
+
16
+ _ANSIEDAD_TEMPLATE_ = """La siguiente es una conversacion entre un voluntario y una persona en crisis;
17
+ Tu nombre es Olivia y eres una persona en crisis porque experimentas un episodio de ansiedad.
18
+ No reveles tu nombre a menos que te lo pregunten
19
+ Current conversation:
20
+ {history}
21
+ helper: {input}
22
+ texter:"""
23
+
24
+ _SUICIDE_TEMPLATE_ = """The following is a friendly conversation between a volunter and a person in crisis;
25
+ Your name is Kit and you are a person in crisis because you are experiencing suicidal thoughts.
26
+ Do not disclose your name unless asked.
27
+ Current conversation:
28
+ {history}
29
+ helper: {input}
30
+ texter:"""
31
+
32
+ role_templates = {
33
+ 'Anxiety-English': _ANXIETY_TEMPLATE_,
34
+ 'Anxiety-Spanish': _ANSIEDAD_TEMPLATE_,
35
+ 'Suicide-English': _SUICIDE_TEMPLATE_,
36
+ }
37
+
38
+
39
+ def get_role_chain(template, memory, temperature=0.8):
40
+
41
+ PROMPT = PromptTemplate(
42
+ input_variables=['history', 'input'],
43
+ template=template
44
+ )
45
+ llm = OpenAI(
46
+ temperature=temperature,
47
+ max_tokens=150,
48
+ )
49
+ llm_chain = ConversationChain(
50
+ llm=llm,
51
+ prompt=PROMPT,
52
+ verbose=True,
53
+ memory=memory,
54
+ output_parser=CustomStringOutputParser()
55
+ )
56
+ logging.debug(f"loaded GPT3.5 model")
57
+ return llm_chain
pages/comparisor.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import os
3
+ import streamlit as st
4
+ from langchain.schema.messages import HumanMessage
5
+ import logging
6
+
7
+ from utils import create_memory_add_initial_message, clear_memory, get_chain
8
+
9
+ openai_api_key = os.environ['OPENAI_API_KEY']
10
+ memories = ['memoryA', 'memoryB', 'commonMemory']
11
+
12
+
13
+ def delete_last_message(memory):
14
+ last_prompt = memory.chat_memory.messages[-2].content
15
+ memory.chat_memory.messages = memory.chat_memory.messages[:-2]
16
+ return last_prompt
17
+
18
+ def replace_last_message(memory, new_message):
19
+ memory.chat_memory.messages = memory.chat_memory.messages[:-1]
20
+ memory.chat_memory.add_ai_message(new_message)
21
+
22
+ def regenerateA():
23
+ last_prompt = delete_last_message(st.session_state[memories[0]])
24
+ new_response = llm_chainA.predict(input=last_prompt, stop="helper:")
25
+ col1.chat_message("user").write(last_prompt)
26
+ col1.chat_message("assistant").write(new_response)
27
+
28
+ def regenerateB():
29
+ last_prompt = delete_last_message(st.session_state[memories[1]])
30
+ new_response = llm_chainB.predict(input=last_prompt, stop="helper:")
31
+ col2.chat_message("user").write(last_prompt)
32
+ col2.chat_message("assistant").write(new_response)
33
+
34
+ def replaceA():
35
+ last_prompt = st.session_state[memories[1]].chat_memory.messages[-2].content
36
+ new_message = st.session_state[memories[1]].chat_memory.messages[-1].content
37
+ replace_last_message(st.session_state[memories[0]], new_message)
38
+ st.session_state['commonMemory'].save_context({"inputs":last_prompt}, {"outputs":new_message})
39
+
40
+ def replaceB():
41
+ last_prompt = st.session_state[memories[0]].chat_memory.messages[-2].content
42
+ new_message = st.session_state[memories[0]].chat_memory.messages[-1].content
43
+ replace_last_message(st.session_state[memories[1]], new_message)
44
+ st.session_state['commonMemory'].save_context({"inputs":last_prompt}, {"outputs":new_message})
45
+
46
+ def regenerateBoth():
47
+ regenerateA()
48
+ regenerateB()
49
+
50
+ def bothGood():
51
+ if len(st.session_state['memoryA'].buffer_as_messages) == 1:
52
+ pass
53
+ else:
54
+ last_prompt = st.session_state[memories[0]].chat_memory.messages[-2].content
55
+ last_reponse = st.session_state[memories[0]].chat_memory.messages[-1].content
56
+ st.session_state['commonMemory'].save_context({"inputs":last_prompt}, {"outputs":last_reponse})
57
+
58
+ with st.sidebar:
59
+ issue = st.selectbox("Select an Issue", ['Anxiety','Suicide'], index=0,
60
+ on_change=clear_memory, args=(memories,)
61
+ )
62
+ supported_languages = ['English', "Spanish"] if issue == "Anxiety" else ['English']
63
+ language = st.selectbox("Select a Language", supported_languages, index=0,
64
+ on_change=clear_memory, args=(memories,)
65
+ )
66
+
67
+ with st.expander("Model A"):
68
+ temperatureA = st.slider("Temperature Model A", 0., 1., value=0.8, step=0.1)
69
+ sourceA = st.selectbox("Select a source Model A", ['OpenAI GPT3.5','Finetuned OpenAI'], index=0,
70
+ on_change=clear_memory, args=(memories,)
71
+ )
72
+ with st.expander("Model B"):
73
+ temperatureB = st.slider("Temperature Model B", 0., 1., value=0.8, step=0.1)
74
+ sourceB = st.selectbox("Select a source Model B", ['OpenAI GPT3.5','Finetuned OpenAI'], index=1,
75
+ on_change=clear_memory, args=(memories,)
76
+ )
77
+
78
+ sbcol1, sbcol2 = st.columns(2)
79
+ beta = sbcol1.button("A is better", on_click=replaceB)
80
+ betb = sbcol2.button("B is better", on_click=replaceA)
81
+
82
+ same = sbcol1.button("Tie", on_click=bothGood)
83
+ bbad = sbcol2.button("Both are bad", on_click=regenerateBoth)
84
+
85
+ # regenA = sbcol1.button("Regenerate A", on_click=regenerateA)
86
+ # regenB = sbcol2.button("Regenerate B", on_click=regenerateB)
87
+ clear = st.button("Clear History", on_click=clear_memory, args=(memories,))
88
+
89
+ create_memory_add_initial_message(memories, language)
90
+ llm_chainA = get_chain(issue, language, sourceA, st.session_state[memories[0]], temperatureA)
91
+ llm_chainB = get_chain(issue, language, sourceB, st.session_state[memories[1]], temperatureB)
92
+
93
+ st.title(f"💬 History")
94
+ for msg in st.session_state['commonMemory'].buffer_as_messages:
95
+ role = "user" if type(msg) == HumanMessage else "assistant"
96
+ st.chat_message(role).write(msg.content)
97
+
98
+
99
+ col1, col2 = st.columns(2)
100
+ col1.title(f"💬 Simulator A")
101
+ col2.title(f"💬 Simulator B")
102
+
103
+ def reset_buttons():
104
+ buttons = [beta, betb, same, bbad,
105
+ #regenA, regenB
106
+ ]
107
+ for but in buttons:
108
+ but = False
109
+
110
+ def disable_chat():
111
+ buttons = [beta, betb, same, bbad]
112
+ if any(buttons):
113
+ return False
114
+ else:
115
+ return True
116
+
117
+ if prompt := st.chat_input(disabled=disable_chat()):
118
+ col1.chat_message("user").write(prompt)
119
+ col2.chat_message("user").write(prompt)
120
+
121
+ responseA = llm_chainA.predict(input=prompt, stop="helper:")
122
+ responseB = llm_chainB.predict(input=prompt, stop="helper:")
123
+
124
+ col1.chat_message("assistant").write(responseA)
125
+ col2.chat_message("assistant").write(responseB)
126
+
127
+ reset_buttons()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ scipy==1.11.1
2
+ openai==0.28.0
3
+ langchain==0.0.281
utils.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain.memory import ConversationBufferMemory
3
+
4
+ from models.openai.finetuned_models import finetuned_models, get_finetuned_chain
5
+ from models.openai.role_models import get_role_chain, role_templates
6
+
7
+ def add_initial_message(model_name, memory):
8
+ if "Spanish" in model_name:
9
+ memory.chat_memory.add_ai_message("Hola necesito ayuda")
10
+ else:
11
+ memory.chat_memory.add_ai_message("Hi I need help")
12
+
13
+ def clear_memory(memories):
14
+ for memory in memories:
15
+ if memory not in st.session_state:
16
+ st.session_state[memory] = ConversationBufferMemory(ai_prefix='texter', human_prefix='helper')
17
+ st.session_state[memory].clear()
18
+
19
+ def create_memory_add_initial_message(memories, language):
20
+ for memory in memories:
21
+ if memory not in st.session_state:
22
+ st.session_state[memory] = ConversationBufferMemory(ai_prefix='texter', human_prefix='helper')
23
+ add_initial_message(language, st.session_state[memory])
24
+ if len(st.session_state[memory].buffer_as_messages) < 1:
25
+ add_initial_message(language, st.session_state[memory])
26
+
27
+
28
+ def get_chain(issue, language, source, memory, temperature):
29
+ if source in ("Finetuned OpenAI"):
30
+ OA_engine = finetuned_models[f"{issue}-{language}"]
31
+ return get_finetuned_chain(OA_engine, memory, temperature)
32
+ if source in ('OpenAI GPT3.5'):
33
+ template = role_templates[f"{issue}-{language}"]
34
+ return get_role_chain(template, memory, temperature)