Spaces:
Running
Running
File size: 1,511 Bytes
5832f57 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import streamlit as st
from langchain.memory import ConversationBufferMemory
from models.openai.finetuned_models import finetuned_models, get_finetuned_chain
from models.openai.role_models import get_role_chain, role_templates
def add_initial_message(model_name, memory):
if "Spanish" in model_name:
memory.chat_memory.add_ai_message("Hola necesito ayuda")
else:
memory.chat_memory.add_ai_message("Hi I need help")
def clear_memory(memories):
for memory in memories:
if memory not in st.session_state:
st.session_state[memory] = ConversationBufferMemory(ai_prefix='texter', human_prefix='helper')
st.session_state[memory].clear()
def create_memory_add_initial_message(memories, language):
for memory in memories:
if memory not in st.session_state:
st.session_state[memory] = ConversationBufferMemory(ai_prefix='texter', human_prefix='helper')
add_initial_message(language, st.session_state[memory])
if len(st.session_state[memory].buffer_as_messages) < 1:
add_initial_message(language, st.session_state[memory])
def get_chain(issue, language, source, memory, temperature):
if source in ("Finetuned OpenAI"):
OA_engine = finetuned_models[f"{issue}-{language}"]
return get_finetuned_chain(OA_engine, memory, temperature)
if source in ('OpenAI GPT3.5'):
template = role_templates[f"{issue}-{language}"]
return get_role_chain(template, memory, temperature) |