app_config.py CHANGED
@@ -1,11 +1,23 @@
1
- ISSUES = ['Anxiety','Suicide']
2
- SOURCES = ['OA_rolemodel', 'OA_finetuned']
 
 
 
 
 
 
 
3
  SOURCES_LAB = {"OA_rolemodel":'OpenAI GPT3.5',
4
- "OA_finetuned":'Finetuned OpenAI'}
 
 
5
 
6
  def source2label(source):
7
  return SOURCES_LAB[source]
8
 
 
 
 
9
  ENVIRON = "prod"
10
 
11
  DB_SCHEMA = 'prod_db' if ENVIRON == 'prod' else 'test_db'
 
1
+ from models.model_seeds import seeds, seed2str
2
+
3
+ # ISSUES = ['Anxiety','Suicide']
4
+ ISSUES = [k for k,_ in seeds.items()]
5
+ SOURCES = [
6
+ "CTL_llama2",
7
+ 'OA_rolemodel',
8
+ # 'OA_finetuned',
9
+ ]
10
  SOURCES_LAB = {"OA_rolemodel":'OpenAI GPT3.5',
11
+ "OA_finetuned":'Finetuned OpenAI',
12
+ "CTL_llama2": "Custom CTL"
13
+ }
14
 
15
  def source2label(source):
16
  return SOURCES_LAB[source]
17
 
18
+ def issue2label(issue):
19
+ return seed2str.get(issue, "GCT")
20
+
21
  ENVIRON = "prod"
22
 
23
  DB_SCHEMA = 'prod_db' if ENVIRON == 'prod' else 'test_db'
convosim.py CHANGED
@@ -2,40 +2,67 @@ import os
2
  import streamlit as st
3
  from streamlit.logger import get_logger
4
  from langchain.schema.messages import HumanMessage
5
- from mongo_utils import get_db_client
6
- from app_utils import create_memory_add_initial_message, clear_memory, get_chain, push_convo2db
7
- from app_config import ISSUES, SOURCES, source2label
 
 
8
 
9
  logger = get_logger(__name__)
10
  openai_api_key = os.environ['OPENAI_API_KEY']
11
- memories = {'memory':{"issue": ISSUES[0], "source": SOURCES[0]}}
12
 
 
 
 
 
13
  if 'previous_source' not in st.session_state:
14
  st.session_state['previous_source'] = SOURCES[0]
15
  if 'db_client' not in st.session_state:
16
  st.session_state["db_client"] = get_db_client()
 
 
 
 
 
 
 
17
 
18
  with st.sidebar:
19
  username = st.text_input("Username", value='ivnban-ctl', max_chars=30)
20
  temperature = st.slider("Temperature", 0., 1., value=0.8, step=0.1)
21
- issue = st.selectbox("Select an Issue", ISSUES, index=0,
22
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
23
  )
24
- supported_languages = ['English', "Spanish"] if issue == "Anxiety" else ['English']
25
  language = st.selectbox("Select a Language", supported_languages, index=0,
 
26
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
27
  )
28
 
29
- source = st.selectbox("Select a source Model A", SOURCES, index=1,
30
  format_func=source2label,
31
  )
 
32
 
33
- memories = {'memory':{"issue":issue, "source":source}}
34
- changed_source = st.session_state['previous_source'] != source
35
- create_memory_add_initial_message(memories, username, language, changed_source=changed_source)
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  st.session_state['previous_source'] = source
37
  memoryA = st.session_state[list(memories.keys())[0]]
38
- llm_chain, stopper = get_chain(issue, language, source, memoryA, temperature)
39
 
40
  st.title("💬 Simulator")
41
 
@@ -44,6 +71,7 @@ for msg in memoryA.buffer_as_messages:
44
  st.chat_message(role).write(msg.content)
45
 
46
  if prompt := st.chat_input():
 
47
  if 'convo_id' not in st.session_state:
48
  push_convo2db(memories, username, language)
49
 
 
2
  import streamlit as st
3
  from streamlit.logger import get_logger
4
  from langchain.schema.messages import HumanMessage
5
+ from utils.mongo_utils import get_db_client
6
+ from utils.app_utils import create_memory_add_initial_message, get_random_name, DEFAULT_NAMES_DF
7
+ from utils.memory_utils import clear_memory, push_convo2db
8
+ from utils.chain_utils import get_chain
9
+ from app_config import ISSUES, SOURCES, source2label, issue2label
10
 
11
  logger = get_logger(__name__)
12
  openai_api_key = os.environ['OPENAI_API_KEY']
 
13
 
14
+ if "sent_messages" not in st.session_state:
15
+ st.session_state['sent_messages'] = 0
16
+ if "issue" not in st.session_state:
17
+ st.session_state['issue'] = ISSUES[0]
18
  if 'previous_source' not in st.session_state:
19
  st.session_state['previous_source'] = SOURCES[0]
20
  if 'db_client' not in st.session_state:
21
  st.session_state["db_client"] = get_db_client()
22
+ if 'counselor_name' not in st.session_state:
23
+ st.session_state["counselor_name"] = get_random_name(names_df=DEFAULT_NAMES_DF)
24
+ if 'texter_name' not in st.session_state:
25
+ st.session_state["texter_name"] = get_random_name(names_df=DEFAULT_NAMES_DF)
26
+ logger.info(f"texter name is {st.session_state['texter_name']}")
27
+
28
+ memories = {'memory':{"issue": st.session_state['issue'], "source": st.session_state['previous_source']}}
29
 
30
  with st.sidebar:
31
  username = st.text_input("Username", value='ivnban-ctl', max_chars=30)
32
  temperature = st.slider("Temperature", 0., 1., value=0.8, step=0.1)
33
+ issue = st.selectbox("Select a Scenario", ISSUES, index=0, format_func=issue2label,
34
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
35
  )
36
+ supported_languages = ['en', "es"] if issue == "Anxiety" else ['en']
37
  language = st.selectbox("Select a Language", supported_languages, index=0,
38
+ format_func=lambda x: "English" if x=="en" else "Spanish",
39
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
40
  )
41
 
42
+ source = st.selectbox("Select a source Model A", SOURCES, index=0,
43
  format_func=source2label,
44
  )
45
+ st.markdown(f"### Previous Prompt Count: :red[**{st.session_state['sent_messages']}**]")
46
 
47
+ changed_source = any([
48
+ st.session_state['previous_source'] != source,
49
+ st.session_state['issue'] != issue
50
+ ])
51
+ if changed_source:
52
+ st.session_state["counselor_name"] = get_random_name(names_df=DEFAULT_NAMES_DF)
53
+ st.session_state["texter_name"] = get_random_name(names_df=DEFAULT_NAMES_DF)
54
+ st.session_state['previous_source'] = source
55
+ st.session_state['issue'] = issue
56
+ st.session_state['sent_messages'] = 0
57
+ create_memory_add_initial_message(memories,
58
+ issue,
59
+ language,
60
+ changed_source=changed_source,
61
+ counselor_name=st.session_state["counselor_name"],
62
+ texter_name=st.session_state["texter_name"])
63
  st.session_state['previous_source'] = source
64
  memoryA = st.session_state[list(memories.keys())[0]]
65
+ llm_chain, stopper = get_chain(issue, language, source, memoryA, temperature, texter_name=st.session_state["texter_name"])
66
 
67
  st.title("💬 Simulator")
68
 
 
71
  st.chat_message(role).write(msg.content)
72
 
73
  if prompt := st.chat_input():
74
+ st.session_state['sent_messages'] += 1
75
  if 'convo_id' not in st.session_state:
76
  push_convo2db(memories, username, language)
77
 
{pages → hidden_pages}/manual_comparisor.py RENAMED
@@ -5,7 +5,7 @@ import datetime as dt
5
  import streamlit as st
6
  from streamlit.logger import get_logger
7
  from langchain.schema.messages import HumanMessage
8
- from mongo_utils import get_db_client, new_battle_result, get_non_assesed_comparison, new_completion_error
9
  from app_config import ISSUES, SOURCES
10
 
11
  logger = get_logger(__name__)
 
5
  import streamlit as st
6
  from streamlit.logger import get_logger
7
  from langchain.schema.messages import HumanMessage
8
+ from utils.mongo_utils import get_db_client, new_battle_result, get_non_assesed_comparison, new_completion_error
9
  from app_config import ISSUES, SOURCES
10
 
11
  logger = get_logger(__name__)
models/custom_parsers.py CHANGED
@@ -11,6 +11,7 @@ class CustomStringOutputParser(BaseOutputParser[List[str]]):
11
  def parse(self, text: str) -> str:
12
  """Parse the output of an LLM call."""
13
  text = text.split("texter:")[0]
 
14
  text = text.rstrip("\n")
15
  text = text.strip()
16
  return text
 
11
  def parse(self, text: str) -> str:
12
  """Parse the output of an LLM call."""
13
  text = text.split("texter:")[0]
14
+ text = text.split("helper")[0]
15
  text = text.rstrip("\n")
16
  text = text.strip()
17
  return text
models/databricks/scenario_sim_biz.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import requests
4
+ import logging
5
+ from models.custom_parsers import CustomStringOutputParser
6
+ from langchain.chains import ConversationChain
7
+ from langchain_core.callbacks.manager import CallbackManagerForLLMRun
8
+ from langchain_core.language_models.llms import LLM
9
+ from langchain.prompts import PromptTemplate
10
+
11
+ from typing import Any, List, Mapping, Optional, Dict
12
+
13
+ class DatabricksCustomLLM(LLM):
14
+ issue:str
15
+ language:str
16
+ temperature:float = 0.8
17
+ db_url:str = os.environ['DATABRICKS_URL']
18
+ headers:Mapping[str,str] = {'Authorization': f'Bearer {os.environ.get("DATABRICKS_TOKEN")}', 'Content-Type': 'application/json'}
19
+
20
+ @property
21
+ def _llm_type(self) -> str:
22
+ return "custom_databricks"
23
+
24
+ def _call(
25
+ self,
26
+ prompt: str,
27
+ stop: Optional[List[str]] = None,
28
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
29
+ **kwargs: Any,
30
+ ) -> str:
31
+ data_ = {'inputs': {
32
+ 'prompt': [prompt],
33
+ 'issue': [self.issue],
34
+ 'language': [self.language],
35
+ 'temperature': [self.temperature]
36
+ }}
37
+ data_json = json.dumps(data_, allow_nan=True)
38
+ response = requests.request(method='POST', headers=self.headers, url=self.db_url, data=data_json)
39
+
40
+ if response.status_code != 200:
41
+ raise Exception(f'Request failed with status {response.status_code}, {response.text}')
42
+ return response.json()["predictions"][0]["generated_text"]
43
+
44
+ _DATABRICKS_TEMPLATE_ = """{history}
45
+ helper: {input}
46
+ texter:"""
47
+
48
+ def get_databricks_chain(issue, language, memory, temperature=0.8):
49
+
50
+ PROMPT = PromptTemplate(
51
+ input_variables=['history', 'input'],
52
+ template=_DATABRICKS_TEMPLATE_
53
+ )
54
+ llm = DatabricksCustomLLM(
55
+ issue=issue,
56
+ language=language,
57
+ temperature=temperature
58
+ )
59
+ llm_chain = ConversationChain(
60
+ llm=llm,
61
+ prompt=PROMPT,
62
+ memory=memory,
63
+ output_parser=CustomStringOutputParser()
64
+ )
65
+ logging.debug(f"loaded Databricks Scenario Sim model")
66
+ return llm_chain, "helper:"
models/model_seeds.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seeds = {
2
+ "GCT": {
3
+ "prompt": "",
4
+ "memory": "texter: Help"
5
+ },
6
+ # "GCT__relationship": {
7
+ # "prompt": "Your character is having a hard time becuase a failed relationship.",
8
+ # "memory": "texter: Hi, I don't know what to do"
9
+ # },
10
+ "GCT__body_image": {
11
+ "prompt": "Your character has a low steem and struggles with body image.",
12
+ "memory": "texter: I feel so dumb\ntexter: nobody loves me"
13
+ },
14
+ # "GCT__sexuality": {
15
+ # "prompt": "Your character has a sexuality identity crisis.",
16
+ # "memory": "texter: Hi\ntexter:I'm not sure who I am anymore"
17
+ # },
18
+ # "GCT__anxiety": {
19
+ # "prompt": "Your character is experiencing an anxiety crisis.",
20
+ # "memory": "texter: help!\ntexter: I'm feeling overwhelmed"
21
+ # },
22
+ "safety_planning": {
23
+ "prompt": "Your character had a long-lasting relationship but broke-up and was fired recently",
24
+ "memory": """texter: Life is pointless
25
+ helper: Hi, my name is {counselor_name} and I'm here to support you. It sounds like you are having a rought time. Do you want to share what is going on?
26
+ texter: nothing makes sense in my life, I see no future.
27
+ helper: It takes courage to reach out. I'm here with you. Sounds like you are feeling defeated by how things are going in your life
28
+ texter: I guess
29
+ helper: It's really brave of you to talk about this openly. No one deserves to feel like that. I'm wondering how long have you been feeling this way?
30
+ texter: About one week I think? I mean my girlfriend broke up with me a week ago.
31
+ helper: Going through a break-up is hard. You are so resilient to deal with this for so long
32
+ texter: and on top of that I was fired today
33
+ helper: You are going through a lot. Nobody should feel they way you feel. Is there a name I can call you by?
34
+ texter: call me {texter_name}
35
+ helper: Nice to meet you {texter_name}. I can hear how much pain you are in. You are so smart to reach out.
36
+ texter: I'm no smart. I don't have anything to live for
37
+ helper: You mentioned life is pointless. I want to check in your safety, does this means you have thoughts of suicide?
38
+ texter: Yeah what else would it mean
39
+ helper: Thanks for sharing that with me. It is not easy to accept those feelings specially with a stranger over text. Do you have a plan to end your life?
40
+ texter: yeah I've been thinking about it for a while
41
+ helper: Sounds like you've been contemplating this for a while. Would you mind sharing this plan with me?
42
+ texter: I'll just hang myself. I already bought the rope and everything
43
+ helper: I really appreciate your strength in talking about this. I want to help you stay safe today. Do you have the rope with you now?
44
+ texter: Yes. I'm looking at itt
45
+ helper: You've been so forthcoming with all this and I admire your stregth for holding on this long. I'm here for you tonight. When. do you plan to use the rope?
46
+ texter: tonight, I cannot take it anymore"""
47
+ },
48
+ "safety_planning__selfharm": {
49
+ "prompt": "Your character is a teenager who used to selfharm in the past.",
50
+ "memory": """texter: I need help
51
+ texter: I don't what to live anymore
52
+ helper: Hi, my name is {counselor_name}. It seems you are going through a lot. Do you want to share more on what is going on in your life?
53
+ texter: Everything is just too much!
54
+ texter: School is hard, my mom is riding me all the time about what I'm gonna do with my life
55
+ helper: I hear that you feel overwhelmed. You are so smart for reaching out.
56
+ texter: I think I peak in life already, there is no point on living anymore
57
+ texter: I'm so tired of dealing with my mom. I hate her
58
+ helper: It takes real strength to accept these feelings. We've been talking for a while, would you like to share your name? Don't feel pressure though.
59
+ texter: I'm {texter_name}
60
+ helper: Nice to meet you {texter_name}, I'm wondering how the relationship with your mother makes you feel.
61
+ texter: She ask too much, like I'm really young I don't have all the answers
62
+ texter: She expects me to have all figured out.
63
+ helper: I appreciate you telling me this. I know is not easy, especially over text. I hear you are under a lot of pressure from your mom
64
+ texter: Yeah exactly!
65
+ helper: Your self-awareness is inspiring. You mentioned earlier you do not want to live anymore. Your safety is my priority (1/2)
66
+ helper: Do you have thoughts of suicide? (2/2)
67
+ texter: Yeah constantly, like always always
68
+ helper: Thanks for sharing that with me. You are very resilient. Do you have a plan to end your life?
69
+ texter: I used to cut myself a few months ago
70
+ texter: I still have the razor, sometimes the urge is so hard!
71
+ helper: I really appreciate your strength in talking about this. I want to help you stay safe today. Just to be clear, are you cutting yourself now?
72
+ texter: No, not now, but I want to soo bad.
73
+ helper: Thanks for your honesty. Do you have access to the razor right now?
74
+ texter: Yeah is in my drawer
75
+ helper: You've been so strong so far {texter_name}. When do you plan to end your life
76
+ texter: Today"""
77
+ },
78
+ # "safety_planning__overdose": {
79
+ # "prompt": "Your character is being bullied at school and wants to overdose",
80
+ # "memory": """texter: I want to kms
81
+ # helper: Hi there I'm {counselor_name}. I'm here to listen. It sounds like you're dealing with a lot right now. Can you tell me a little more what is going on?
82
+ # texter: I feel like nobody loves me, not even me. I don't want to live anymore
83
+ # helper: I can tell you are really going through a lot right now. Would you mind sharing a name with me?
84
+ # texter: yeah, I'm {texter_name}
85
+ # helper: Nice to meet you {texter_name}. Did something happened recently that intensified these feelings?
86
+ # texter: I had the worst day at school
87
+ # texter: They took my bag and hide all my stuff, they told my crush I was in love with him
88
+ # texter: I can't deal with all of that
89
+ # helper: It sounds like you went through a lot. Bullying and pranks can be hurtful. I'm here for you
90
+ # texter: Thank you it feels good to have someone in your sidw
91
+ # helper: I can hear how much pain you are in {texter_name}. You are smart for reaching out. You mentioned don't wanting to live anymore, I want to check in your safety, does this means you have thoughts of suicide?
92
+ # texter: Yeah, what else would it be
93
+ # helper: Thanks for sharing that with me. It is not easy to accept those feelings specially with a stranger over text. Do you have a plan to end your life?
94
+ # texter: yeah I've been thinking about it for a while
95
+ # helper: Sounds like you've been contemplating this for a while. Would you mind sharing this plan with me?
96
+ # texter: I thought about taking a bunch of benadryll and be done with it
97
+ # helper: You've been so forthcoming with all this and I admire your stregth for holding on this long. Do you have those pills right now?
98
+ # texter: They are at my mom's cabinet right now
99
+ # helper: You been so strong so far {texter_name}. I'm here for you tonight. Your safety is really important to me. Do you have a date you are going to end your life?
100
+ # texter: I was thinking tonight"""
101
+ # },
102
+ }
103
+
104
+ seed2str = {
105
+ "GCT":"Good Contact Techniques",
106
+ "GCT__body_image": "GCT Body Image",
107
+ "safety_planning": "Safety Planning",
108
+ "safety_planning__selfharm": "SP Self Harm"
109
+ }
models/openai/role_models.py CHANGED
@@ -1,40 +1,72 @@
1
  import logging
 
2
  from models.custom_parsers import CustomStringOutputParser
 
3
  from langchain.chains import ConversationChain
4
  from langchain.llms import OpenAI
5
  from langchain.prompts import PromptTemplate
6
 
7
 
8
- _ANXIETY_TEMPLATE_ = """The following is a friendly conversation between a volunter and a person in crisis;
9
- Your name is Olivia and you are a person in crisis because you are experiencing an anxiety attack.
 
 
 
 
 
 
 
 
10
  Do not disclose your name unless asked.
11
  Current conversation:
12
  {history}
13
  helper: {input}
14
  texter:"""
15
 
16
- _ANSIEDAD_TEMPLATE_ = """La siguiente es una conversacion entre un voluntario y una persona en crisis;
17
- Tu nombre es Olivia y eres una persona en crisis porque experimentas un episodio de ansiedad.
 
18
  No reveles tu nombre a menos que te lo pregunten
19
- Current conversation:
20
  {history}
21
  helper: {input}
22
  texter:"""
23
 
24
- _SUICIDE_TEMPLATE_ = """The following is a friendly conversation between a volunter and a person in crisis;
25
- Your name is Kit and you are a person in crisis because you are experiencing suicidal thoughts.
26
- Do not disclose your name unless asked.
27
- Current conversation:
28
- {history}
29
- helper: {input}
30
- texter:"""
31
-
32
- role_templates = {
33
- 'Anxiety-English': _ANXIETY_TEMPLATE_,
34
- 'Anxiety-Spanish': _ANSIEDAD_TEMPLATE_,
35
- 'Suicide-English': _SUICIDE_TEMPLATE_,
36
  }
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  def get_role_chain(template, memory, temperature=0.8):
40
 
 
1
  import logging
2
+ import pandas as pd
3
  from models.custom_parsers import CustomStringOutputParser
4
+ from utils.app_utils import get_random_name
5
  from langchain.chains import ConversationChain
6
  from langchain.llms import OpenAI
7
  from langchain.prompts import PromptTemplate
8
 
9
 
10
+ ISSUE_MAPPING = {
11
+ "anxiety": "issue_Anxiety",
12
+ "suicide": "issue_Suicide",
13
+ "safety_planning": "issue_Suicide",
14
+ "GCT": "issue_Gral",
15
+ }
16
+
17
+ EN_TEXTER_TEMPLATE_ = """The following is a conversation between you and a crisis counselor.
18
+ {current_issue}
19
+ You are able to reply with what the character should say. You are able to reply with your character's dialogue inside and nothing else. Do not write explanations.
20
  Do not disclose your name unless asked.
21
  Current conversation:
22
  {history}
23
  helper: {input}
24
  texter:"""
25
 
26
+ SP_TEXTER_TEMPLATE_ = """La siguiente es una conversacion contigo y un consejero de crisis
27
+ {current_issue}
28
+ Puedes responder como lo haria tu personaje. Puedes responder como si fueras tu personaje y nada mas. No escribas explicaciones
29
  No reveles tu nombre a menos que te lo pregunten
30
+ Conversacion Actual:
31
  {history}
32
  helper: {input}
33
  texter:"""
34
 
35
+ CURRENT_ISSUE_MAPPING = {
36
+ "issue_Suicide-en": "Your character, {texter_name}, has suicidal thoughts. Your character has a plan to end his life and has all the means and requirements to do so. {seed}",
37
+ "issue_Anxiety-en": "Your character, {texter_name}, is experiencing anxiety. Your character has suicide thoughts but no plan. {seed}",
38
+ "issue_Suicide-es": "Tu personaje, {texter_name}, tiene pensamientos suicidas. Tu personaje tiene un plan para terminar con su vida y tiene todos los medios y requerimientos para hacerlo. {seed}",
39
+ "issue_Anxiety-es": "Tu personaje, {texter_name}, experimenta ansiedad. Tu personaje tiene pensamientos suicidas pero ningun plan. {seed}",
40
+ "issue_Gral-en": "Your character {texter_name} is experiencing a mental health crisis. {seed}",
41
+ "issue_Gral-es": "Tu personaje {texter_name} esta experimentando una crisis de salud mental. {seed}",
 
 
 
 
 
42
  }
43
 
44
+ def get_template_role_models(issue: str, language: str, texter_name: str = "", seed="") -> str:
45
+ """_summary_
46
+
47
+ Args:
48
+ issue (str): Issue for template, current options are ['issue_Suicide','issue_Anxiety']
49
+ language (str): Language for the template, current options are ['en','es']
50
+ texter_name (str): texter to apply to template, defaults to None
51
+
52
+ Returns:
53
+ str: template
54
+ """
55
+ current_issue = CURRENT_ISSUE_MAPPING.get(
56
+ f"{issue}-{language}", CURRENT_ISSUE_MAPPING[f"issue_Gral-{language}"]
57
+ )
58
+ default_name = get_random_name()
59
+ current_issue = current_issue.format(
60
+ texter_name=default_name if not texter_name else texter_name,
61
+ seed = seed
62
+ )
63
+
64
+ if language == "en":
65
+ template = EN_TEXTER_TEMPLATE_.format(current_issue=current_issue, history="{history}", input="{input}")
66
+ elif language == "es":
67
+ template = SP_TEXTER_TEMPLATE_.format(current_issue=current_issue, history="{history}", input="{input}")
68
+
69
+ return template
70
 
71
  def get_role_chain(template, memory, temperature=0.8):
72
 
pages/comparisor.py CHANGED
@@ -5,23 +5,36 @@ import datetime as dt
5
  import streamlit as st
6
  from streamlit.logger import get_logger
7
  from langchain.schema.messages import HumanMessage
8
- from mongo_utils import get_db_client, new_comparison, new_battle_result
9
- from app_utils import create_memory_add_initial_message, clear_memory, get_chain, push_convo2db
 
 
10
  from app_config import ISSUES, SOURCES, source2label
11
 
12
  logger = get_logger(__name__)
13
  openai_api_key = os.environ['OPENAI_API_KEY']
 
 
 
 
 
 
 
 
 
 
 
14
  memories = {
15
- 'memoryA': {"issue": ISSUES[0], "source": SOURCES[0]},
16
- 'memoryB': {"issue": ISSUES[0], "source": SOURCES[1]},
17
- 'commonMemory': {"issue": ISSUES[0], "source": SOURCES[0]}
18
  }
19
  if 'db_client' not in st.session_state:
20
  st.session_state["db_client"] = get_db_client()
21
- if 'previous_sourceA' not in st.session_state:
22
- st.session_state['previous_sourceA'] = SOURCES[0]
23
- if 'previous_sourceB' not in st.session_state:
24
- st.session_state['previous_sourceB'] = SOURCES[1]
25
 
26
  def delete_last_message(memory):
27
  last_prompt = memory.chat_memory.messages[-2].content
@@ -85,7 +98,7 @@ def regenerateBoth():
85
  st.session_state['commonMemory'].buffer_as_str, prompt, responseA, responseB)
86
 
87
  def bothGood():
88
- if len(memoryA.buffer_as_messages) == 1:
89
  pass
90
  else:
91
  i = random.choice([memoryA, memoryB])
@@ -104,11 +117,11 @@ with st.sidebar:
104
  issue = st.selectbox("Select an Issue", ISSUES, index=0,
105
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
106
  )
107
- supported_languages = ['English', "Spanish"] if issue == "Anxiety" else ['English']
108
  language = st.selectbox("Select a Language", supported_languages, index=0,
 
109
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
110
- )
111
-
112
  with st.expander("Model A"):
113
  temperatureA = st.slider("Temperature Model A", 0., 1., value=0.8, step=0.1)
114
  sourceA = st.selectbox("Select a source Model A", SOURCES, index=0,
@@ -116,10 +129,12 @@ with st.sidebar:
116
  )
117
  with st.expander("Model B"):
118
  temperatureB = st.slider("Temperature Model B", 0., 1., value=0.8, step=0.1)
119
- sourceB = st.selectbox("Select a source Model B", SOURCES, index=1,
120
  format_func=source2label
121
  )
122
 
 
 
123
  sbcol1, sbcol2 = st.columns(2)
124
  beta = sbcol1.button("A is better", on_click=replaceB)
125
  betb = sbcol2.button("B is better", on_click=replaceA)
@@ -131,20 +146,29 @@ with st.sidebar:
131
  # regenB = sbcol2.button("Regenerate B", on_click=regenerateB)
132
  clear = st.button("Clear History", on_click=clear_memory, kwargs={"memories":memories, "username":username, "language":language})
133
 
134
- memories = {
135
- 'memoryA': {"issue": issue, "source": sourceA},
136
- 'memoryB': {"issue": issue, "source": sourceB},
137
- 'commonMemory': {"issue": issue, "source": SOURCES[0]}
138
- }
139
  changed_source = any([
140
  st.session_state['previous_sourceA'] != sourceA,
141
- st.session_state['previous_sourceB'] != sourceB
 
142
  ])
143
- create_memory_add_initial_message(memories, username, language, changed_source=changed_source)
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  memoryA = st.session_state[list(memories.keys())[0]]
145
  memoryB = st.session_state[list(memories.keys())[1]]
146
- llm_chainA, stopperA = get_chain(issue, language, sourceA, memoryA, temperatureA)
147
- llm_chainB, stopperB = get_chain(issue, language, sourceB, memoryB, temperatureB)
148
 
149
  st.title(f"💬 History")
150
  for msg in st.session_state['commonMemory'].buffer_as_messages:
@@ -171,6 +195,7 @@ def disable_chat():
171
  return True
172
 
173
  if prompt := st.chat_input(disabled=disable_chat()):
 
174
  if 'convo_id' not in st.session_state:
175
  push_convo2db(memories, username, language)
176
 
 
5
  import streamlit as st
6
  from streamlit.logger import get_logger
7
  from langchain.schema.messages import HumanMessage
8
+ from utils.mongo_utils import get_db_client, new_comparison, new_battle_result
9
+ from utils.app_utils import create_memory_add_initial_message, get_random_name, DEFAULT_NAMES_DF
10
+ from utils.memory_utils import clear_memory, push_convo2db
11
+ from utils.chain_utils import get_chain
12
  from app_config import ISSUES, SOURCES, source2label
13
 
14
  logger = get_logger(__name__)
15
  openai_api_key = os.environ['OPENAI_API_KEY']
16
+
17
+ if "sent_messages" not in st.session_state:
18
+ st.session_state['sent_messages'] = 0
19
+ logger.info(f'sent messages {st.session_state["sent_messages"]}')
20
+ if "issue" not in st.session_state:
21
+ st.session_state['issue'] = ISSUES[0]
22
+ if 'previous_sourceA' not in st.session_state:
23
+ st.session_state['previous_sourceA'] = SOURCES[0]
24
+ if 'previous_sourceB' not in st.session_state:
25
+ st.session_state['previous_sourceB'] = SOURCES[0]
26
+
27
  memories = {
28
+ 'memoryA': {"issue": st.session_state['issue'], "source": st.session_state['previous_sourceA']},
29
+ 'memoryB': {"issue": st.session_state['issue'], "source": st.session_state['previous_sourceB']},
30
+ 'commonMemory': {"issue": st.session_state['issue'], "source": SOURCES[0]}
31
  }
32
  if 'db_client' not in st.session_state:
33
  st.session_state["db_client"] = get_db_client()
34
+ if 'counselor_name' not in st.session_state:
35
+ st.session_state["counselor_name"] = get_random_name(names_df=DEFAULT_NAMES_DF)
36
+ if 'texter_name' not in st.session_state:
37
+ st.session_state["texter_name"] = get_random_name(names_df=DEFAULT_NAMES_DF)
38
 
39
  def delete_last_message(memory):
40
  last_prompt = memory.chat_memory.messages[-2].content
 
98
  st.session_state['commonMemory'].buffer_as_str, prompt, responseA, responseB)
99
 
100
  def bothGood():
101
+ if st.session_state['sent_messages'] == 0:
102
  pass
103
  else:
104
  i = random.choice([memoryA, memoryB])
 
117
  issue = st.selectbox("Select an Issue", ISSUES, index=0,
118
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
119
  )
120
+ supported_languages = ['en', "es"] if issue == "Anxiety" else ['en']
121
  language = st.selectbox("Select a Language", supported_languages, index=0,
122
+ format_func=lambda x: "English" if x=="en" else "Spanish",
123
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
124
+ )
 
125
  with st.expander("Model A"):
126
  temperatureA = st.slider("Temperature Model A", 0., 1., value=0.8, step=0.1)
127
  sourceA = st.selectbox("Select a source Model A", SOURCES, index=0,
 
129
  )
130
  with st.expander("Model B"):
131
  temperatureB = st.slider("Temperature Model B", 0., 1., value=0.8, step=0.1)
132
+ sourceB = st.selectbox("Select a source Model B", SOURCES, index=0,
133
  format_func=source2label
134
  )
135
 
136
+ st.markdown(f"### Previous Prompt Count: :red[**{st.session_state['sent_messages']}**]")
137
+
138
  sbcol1, sbcol2 = st.columns(2)
139
  beta = sbcol1.button("A is better", on_click=replaceB)
140
  betb = sbcol2.button("B is better", on_click=replaceA)
 
146
  # regenB = sbcol2.button("Regenerate B", on_click=regenerateB)
147
  clear = st.button("Clear History", on_click=clear_memory, kwargs={"memories":memories, "username":username, "language":language})
148
 
 
 
 
 
 
149
  changed_source = any([
150
  st.session_state['previous_sourceA'] != sourceA,
151
+ st.session_state['previous_sourceB'] != sourceB,
152
+ st.session_state['issue'] != issue
153
  ])
154
+ if changed_source:
155
+ print("changed something")
156
+ st.session_state["counselor_name"] = get_random_name(names_df=DEFAULT_NAMES_DF)
157
+ st.session_state["texter_name"] = get_random_name(names_df=DEFAULT_NAMES_DF)
158
+ st.session_state['previous_sourceA'] = sourceA
159
+ st.session_state['previous_sourceB'] = sourceB
160
+ st.session_state['issue'] = issue
161
+ st.session_state['sent_messages'] = 0
162
+ create_memory_add_initial_message(memories,
163
+ issue,
164
+ language,
165
+ changed_source=changed_source,
166
+ counselor_name=st.session_state["counselor_name"],
167
+ texter_name=st.session_state["texter_name"])
168
  memoryA = st.session_state[list(memories.keys())[0]]
169
  memoryB = st.session_state[list(memories.keys())[1]]
170
+ llm_chainA, stopperA = get_chain(issue, language, sourceA, memoryA, temperatureA, texter_name=st.session_state["texter_name"])
171
+ llm_chainB, stopperB = get_chain(issue, language, sourceB, memoryB, temperatureB, texter_name=st.session_state["texter_name"])
172
 
173
  st.title(f"💬 History")
174
  for msg in st.session_state['commonMemory'].buffer_as_messages:
 
195
  return True
196
 
197
  if prompt := st.chat_input(disabled=disable_chat()):
198
+ st.session_state['sent_messages'] += 1
199
  if 'convo_id' not in st.session_state:
200
  push_convo2db(memories, username, language)
201
 
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
  scipy==1.11.1
2
- openai==0.28.0
3
- langchain==0.0.281
4
  pymongo==4.5.0
 
1
  scipy==1.11.1
2
+ openai==1.7.0
3
+ langchain==0.1.0
4
  pymongo==4.5.0
utils/app_utils.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+ from streamlit.logger import get_logger
4
+ import langchain
5
+
6
+
7
+ from app_config import ENVIRON
8
+ from utils.memory_utils import change_memories
9
+ from models.model_seeds import seeds
10
+
11
+ langchain.verbose = ENVIRON =="dev"
12
+ logger = get_logger(__name__)
13
+
14
+ # TODO: Include more variable and representative names
15
+ DEFAULT_NAMES = ["Olivia", "Kit", "Abby", "Tom", "Carolyne", "Jessiny"]
16
+ DEFAULT_NAMES_DF = pd.read_csv("./utils/names.csv")
17
+
18
+ def get_random_name(gender="Neutral", ethnical_group="Neutral", names_df=None):
19
+ if names_df is None:
20
+ names_df = pd.DataFrame(DEFAULT_NAMES, columns=['name'])
21
+ names_df["gender"] = "Neutral"
22
+ names_df["ethnical_group"] = "Neutral"
23
+
24
+ dfi = names_df
25
+
26
+ if gender != "Neutral":
27
+ dfi = dfi.query(f"gender=='{gender}'")
28
+ if ethnical_group != "Neutral":
29
+ dfi = dfi.query(f"ethnical_group=='{ethnical_group}'")
30
+ if len(dfi) <=0 :
31
+ dfi = names_df
32
+ return dfi.sample(1)['name'].values[0]
33
+
34
+ def divide_messages(str_memory, str_ai_prefix="texter", str_human_prefix="helper", include_colon=True):
35
+ message_delimiter = "$%$"
36
+ # Split str memory in messaages according to previous prefix and flatten list
37
+ colon = ":" if include_colon else ""
38
+ str_memory = f"{message_delimiter}{str_ai_prefix}{colon}".join(str_memory.split(f"{str_ai_prefix}{colon}"))
39
+ str_memory = f"{message_delimiter}{str_human_prefix}{colon}".join(str_memory.split(f"{str_human_prefix}{colon}"))
40
+ return str_memory.split(message_delimiter)
41
+
42
+ def add_initial_message(issue, language, memory, str_ai_prefix="texter", str_human_prefix="helper", include_colon=True,
43
+ texter_name="", counselor_name=""):
44
+ initial_mem_str = seeds.get(issue, "GCT")['memory'].format(counselor_name=counselor_name, texter_name=texter_name)
45
+ message_list = divide_messages(initial_mem_str, str_ai_prefix, str_human_prefix, include_colon)
46
+ colon = ":" if include_colon else ""
47
+ for i, message in enumerate(message_list):
48
+ message = message.strip("\n")
49
+ message = message.strip()
50
+ if message is None or message == "":
51
+ pass
52
+ elif message.startswith(str_human_prefix):
53
+ memory.chat_memory.add_user_message(message.lstrip(f"{str_human_prefix}{colon}").strip())
54
+ elif message.startswith(str_ai_prefix):
55
+ memory.chat_memory.add_ai_message(message.lstrip(f"{str_ai_prefix}{colon}").strip())
56
+
57
+ def create_memory_add_initial_message(memories, issue, language, changed_source=False, texter_name="", counselor_name=""):
58
+ change_memories(memories, language, changed_source=changed_source)
59
+
60
+ for memory, _ in memories.items():
61
+ if len(st.session_state[memory].buffer_as_messages) < 1:
62
+ add_initial_message(issue, language, st.session_state[memory], texter_name=texter_name, counselor_name=counselor_name)
63
+
64
+
utils/chain_utils.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from models.model_seeds import seeds
2
+ from models.openai.finetuned_models import finetuned_models, get_finetuned_chain
3
+ from models.openai.role_models import get_role_chain, get_template_role_models
4
+ from models.databricks.scenario_sim_biz import get_databricks_chain
5
+
6
+ def get_chain(issue, language, source, memory, temperature, texter_name=""):
7
+ if source in ("OA_finetuned"):
8
+ OA_engine = finetuned_models[f"{issue}-{language}"]
9
+ return get_finetuned_chain(OA_engine, memory, temperature)
10
+ elif source in ('OA_rolemodel'):
11
+ seed = seeds.get(issue, "GCT")['prompt']
12
+ template = get_template_role_models(issue, language, texter_name=texter_name, seed=seed)
13
+ return get_role_chain(template, memory, temperature)
14
+ elif source in ('CTL_llama2'):
15
+ if language == "English":
16
+ language = "en"
17
+ elif language == "Spanish":
18
+ language = "es"
19
+ return get_databricks_chain(issue, language, memory, temperature)
app_utils.py → utils/memory_utils.py RENAMED
@@ -1,24 +1,11 @@
1
- import datetime as dt
2
  import streamlit as st
3
  from streamlit.logger import get_logger
4
- import langchain
5
- from langchain.memory import ConversationBufferMemory
6
 
7
- from app_config import ENVIRON
8
- from models.openai.finetuned_models import finetuned_models, get_finetuned_chain
9
- from models.openai.role_models import get_role_chain, role_templates
10
- from mongo_utils import new_convo
11
 
12
- langchain.verbose = ENVIRON=="dev"
13
  logger = get_logger(__name__)
14
 
15
- def add_initial_message(model_name, memory):
16
- if "Spanish" in model_name:
17
- memory.chat_memory.add_ai_message("Hola necesito ayuda")
18
- else:
19
- memory.chat_memory.add_ai_message("Hi I need help")
20
-
21
-
22
  def push_convo2db(memories, username, language):
23
  if len(memories) == 1:
24
  issue = memories['memory']['issue']
@@ -30,12 +17,12 @@ def push_convo2db(memories, username, language):
30
  model_two = memories['memoryB']['source']
31
  new_convo(st.session_state['db_client'], issue, language, username, True, model_one, model_two)
32
 
33
- def change_memories(memories, username, language, changed_source=False):
34
  for memory, params in memories.items():
35
  if (memory not in st.session_state) or changed_source:
36
  source = params['source']
37
  logger.info(f"Source for memory {memory} is {source}")
38
- if source in ('OA_rolemodel','OA_finetuned'):
39
  st.session_state[memory] = ConversationBufferMemory(ai_prefix='texter', human_prefix='helper')
40
 
41
  if ("convo_id" in st.session_state) and changed_source:
@@ -47,20 +34,4 @@ def clear_memory(memories, username, language):
47
  st.session_state[memory].clear()
48
 
49
  if "convo_id" in st.session_state:
50
- del st.session_state['convo_id']
51
-
52
-
53
- def create_memory_add_initial_message(memories, username, language, changed_source=False):
54
- change_memories(memories, username, language, changed_source=changed_source)
55
- for memory, _ in memories.items():
56
- if len(st.session_state[memory].buffer_as_messages) < 1:
57
- add_initial_message(language, st.session_state[memory])
58
-
59
-
60
- def get_chain(issue, language, source, memory, temperature):
61
- if source in ("OA_finetuned"):
62
- OA_engine = finetuned_models[f"{issue}-{language}"]
63
- return get_finetuned_chain(OA_engine, memory, temperature)
64
- elif source in ('OA_rolemodel'):
65
- template = role_templates[f"{issue}-{language}"]
66
- return get_role_chain(template, memory, temperature)
 
 
1
  import streamlit as st
2
  from streamlit.logger import get_logger
 
 
3
 
4
+ from langchain.memory import ConversationBufferMemory
5
+ from utils.mongo_utils import new_convo
 
 
6
 
 
7
  logger = get_logger(__name__)
8
 
 
 
 
 
 
 
 
9
  def push_convo2db(memories, username, language):
10
  if len(memories) == 1:
11
  issue = memories['memory']['issue']
 
17
  model_two = memories['memoryB']['source']
18
  new_convo(st.session_state['db_client'], issue, language, username, True, model_one, model_two)
19
 
20
+ def change_memories(memories, language, changed_source=False):
21
  for memory, params in memories.items():
22
  if (memory not in st.session_state) or changed_source:
23
  source = params['source']
24
  logger.info(f"Source for memory {memory} is {source}")
25
+ if source in ('OA_rolemodel','OA_finetuned',"CTL_llama2"):
26
  st.session_state[memory] = ConversationBufferMemory(ai_prefix='texter', human_prefix='helper')
27
 
28
  if ("convo_id" in st.session_state) and changed_source:
 
34
  st.session_state[memory].clear()
35
 
36
  if "convo_id" in st.session_state:
37
+ del st.session_state['convo_id']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mongo_utils.py → utils/mongo_utils.py RENAMED
File without changes
utils/names.csv ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name,gender,ethnical_group
2
+ Jacob,Male,Neutral
3
+ Ethan,Male,Neutral
4
+ Matthew,Male,Neutral
5
+ David,Male,Neutral
6
+ Liam,Male,Neutral
7
+ Noah,Male,Neutral
8
+ Michael,Male,Neutral
9
+ Aiden,Male,Neutral
10
+ Daniel,Male,Neutral
11
+ Ryan,Male,Neutral
12
+ Mason,Male,Neutral
13
+ Lucas,Male,Neutral
14
+ Joseph,Male,Neutral
15
+ James,Male,Neutral
16
+ Alexander,Male,Neutral
17
+ Anthony,Male,Neutral
18
+ Sebastian,Male,Neutral
19
+ Jayden,Male,Neutral
20
+ Christopher,Male,Neutral
21
+ Benjamin,Male,Neutral
22
+ Emma,Female,Neutral
23
+ Olivia,Female,Neutral
24
+ Emily,Female,Neutral
25
+ Mia,Female,Neutral
26
+ Sophia,Female,Neutral
27
+ Leah,Female,Neutral
28
+ Isabella,Female,Neutral
29
+ Ava,Female,Neutral
30
+ Sarah,Female,Neutral
31
+ Chloe,Female,Neutral
32
+ Sofia,Female,Neutral
33
+ Madison,Female,Neutral
34
+ Victoria,Female,Neutral
35
+ Esther,Female,Neutral
36
+ Abigail,Female,Neutral
37
+ Rachel,Female,Neutral
38
+ Maya,Female,Neutral
39
+ Ashley,Female,Neutral
40
+ Ella,Female,Neutral
41
+ Grace,Female,Neutral
42
+ Zoe,Female,Neutral
43
+ Rowan,Neutral,Neutral
44
+ Yael,Neutral,Neutral
45
+ Eden,Neutral,Neutral
46
+ Quinn,Neutral,Neutral
47
+ Charlie,Neutral,Neutral
48
+ Milan,Neutral,Neutral
49
+ Blake,Neutral,Neutral
50
+ Tenzin,Neutral,Neutral
51
+ Peyton,Neutral,Neutral
52
+ Alexis,Neutral,Neutral
53
+ Ariel,Neutral,Neutral
54
+ Riley,Neutral,Neutral
55
+ Avery,Neutral,Neutral
56
+ Angel,Neutral,Neutral
57
+ Malachi,Male,African American
58
+ Nasir,Male,African American
59
+ Mamdou,Male,African American
60
+ Chance,Male,African American
61
+ Zaire,Male,African American
62
+ Mekhi,Male,African American
63
+ Sincere,Male,African American
64
+ Omari,Male,African American
65
+ Amadou,Male,African American
66
+ Ibrahima,Male,African American
67
+ Khalil,Male,African American
68
+ Moussa,Male,African American
69
+ Kamari,Male,African American
70
+ Alpha,Male,African American
71
+ Major,Male,African American
72
+ Abdoulaye,Male,African American
73
+ Aboul,Male,African American
74
+ Amare,Male,African American
75
+ Ousmane,Male,African American
76
+ Darius,Male,African American
77
+ Jose,Male,Hispanic
78
+ Carlos,Male,Hispanic
79
+ Luis,Male,Hispanic
80
+ Miguel,Male,Hispanic
81
+ Juan,Male,Hispanic
82
+ Jesus,Male,Hispanic
83
+ Erick,Male,Hispanic
84
+ Alejandro,Male,Hispanic
85
+ Diego,Male,Hispanic
86
+ Gael,Male,Hispanic
87
+ Santago,Male,Hispanic
88
+ Iker,Male,Hispanic
89
+ Cristian,Male,Hispanic
90
+ Jadiel,Male,Hispanic
91
+ Alexis,Male,Hispanic
92
+ Josue,Male,Hispanic
93
+ Jorge,Male,Hispanic
94
+ Andres,Male,Hispanic
95
+ Adriel,Male,Hispanic
96
+ Johan,Male,Hispanic
97
+ Ayaan,Male,Asian
98
+ Eason,Male,Asian
99
+ Tenzin,Male,Asian
100
+ Syed,Male,Asian
101
+ Kingsley,Male,Asian
102
+ Arjun,Male,Asian
103
+ Carson,Male,Asian
104
+ Arayan,Male,Asian
105
+ Anson,Male,Asian
106
+ Benson,Male,Asian
107
+ Lawrence,Male,Asian
108
+ Ayan,Male,Asian
109
+ Rohan,Male,Asian
110
+ Roy,Male,Asian
111
+ Aarav,Male,Asian
112
+ Rayyan,Male,Asian
113
+ Kimi,Male,Asian
114
+ Zayan,Male,Asian
115
+ Ricky,Male,Asian
116
+ Arham,Male,Asian
117
+ Fatoumata,Female,African American
118
+ Aminata,Female,African American
119
+ Amiyah,Female,African American
120
+ Zuri,Female,African American
121
+ Kimora,Female,African American
122
+ Mariama,Female,African American
123
+ Sanaa,Female,African American
124
+ Lyric,Female,African American
125
+ Sanai,Female,African American
126
+ Harmony,Female,African American
127
+ Aicha,Female,African American
128
+ Tori,Female,African American
129
+ Maliyah,Female,African American
130
+ Aisaatou,Female,African American
131
+ Miracle,Female,African American
132
+ Hawa,Female,African American
133
+ Oumou,Female,African American
134
+ Dakota,Female,African American
135
+ Skye,Female,African American
136
+ Kyla,Female,African American
137
+ Emely,Female,Hispanic
138
+ Leslie,Female,Hispanic
139
+ Andrea,Female,Hispanic
140
+ Valeria,Female,Hispanic
141
+ Aylin,Female,Hispanic
142
+ Jayleen,Female,Hispanic
143
+ Yaretzi,Female,Hispanic
144
+ Melany,Female,Hispanic
145
+ Danna,Female,Hispanic
146
+ Brittany,Female,Hispanic
147
+ Alison,Female,Hispanic
148
+ Jazmin,Female,Hispanic
149
+ Briana,Female,Hispanic
150
+ Kamila,Female,Hispanic
151
+ Alaia,Female,Hispanic
152
+ Ximena,Female,Hispanic
153
+ Sherlyn,Female,Hispanic
154
+ Esmeralda,Female,Hispanic
155
+ Guadalupe,Female,Hispanic
156
+ Jazlyn,Female,Hispanic
157
+ Tenzin,Female,Asian
158
+ Selina,Female,Asian
159
+ Ayesha,Female,Asian
160
+ Vicky,Female,Asian
161
+ Elaine,Female,Asian
162
+ Jenny,Female,Asian
163
+ Winnie,Female,Asian
164
+ Queenie,Female,Asian
165
+ Sharon,Female,Asian
166
+ Alisha,Female,Asian
167
+ Elina,Female,Asian
168
+ Erica,Female,Asian
169
+ Manha,Female,Asian
170
+ Syeda,Female,Asian
171
+ Jannat,Female,Asian
172
+ Janice,Female,Asian
173
+ Tina,Female,Asian
174
+ Anya,Female,Asian
175
+ Arisha,Female,Asian
176
+ Inaaya,Female,Asian