Spaces:
Running
Running
File size: 3,934 Bytes
8ba6a85 2b91f70 8ba6a85 e8a4050 8ba6a85 c3b47db 8ba6a85 c3b47db 8ba6a85 c3b47db 9c75f23 8ba6a85 4af23d9 8ba6a85 9435276 8ba6a85 9435276 8ba6a85 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import streamlit as st
from openai import OpenAI
import os
import sys
from dotenv import load_dotenv, dotenv_values
load_dotenv()
# initialize the client
client = OpenAI(
base_url="https://wzmh05cfg7kqctcc.us-east-1.aws.endpoints.huggingface.cloud/v1/",
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
)
#Create model
model_links ={
"Turkish-7b-mix":"burak/Trendyol-Turkcell-stock"
}
#Pull info about the model to display
model_info ={
"Turkish-7b-mix":
{ 'description':"""Turkish-7b-Mix is a merge of pre-trained language models created using **mergekit**.\n \
### Merge Method\n \
This model was merged using the [Model Stock](https://arxiv.org/abs/2403.19522) merge method using [Trendyol/Trendyol-LLM-7b-chat-dpo-v1.0](https://huggingface.co/Trendyol/Trendyol-LLM-7b-chat-dpo-v1.0) as a base.\n \
### Models Merged\n \
The following models were included in the merge:\n \
* [TURKCELL/Turkcell-LLM-7b-v1](https://huggingface.co/TURKCELL/Turkcell-LLM-7b-v1)\n \
* [Trendyol/Trendyol-LLM-7b-chat-v1.0](https://huggingface.co/Trendyol/Trendyol-LLM-7b-chat-v1.0)\n""",
'logo': 'https://huggingface.co/spaces/burak/TurkishChatbot/resolve/main/icon.jpg'
},
}
def reset_conversation():
'''
Resets Conversation
'''
st.session_state.conversation = []
st.session_state.messages = []
return None
st.sidebar.image(model_info["Turkish-7b-mix"]['logo'])
# Define the available models
models =[key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)
#Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
#Add reset button to clear conversation
st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
# Create model description
st.sidebar.write(f"You're now chatting with **{selected_model}**")
st.sidebar.markdown(model_info[selected_model]['description'])
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
# st.write(f"Changed to {selected_model}")
st.session_state.prev_option = selected_model
reset_conversation()
#Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'AI - {selected_model}')
# st.title(f'ChatBot Using {selected_model}')
# Set a default model
if selected_model not in st.session_state:
st.session_state[selected_model] = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
stream = client.chat.completions.create(
model= model_links[selected_model],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
temperature=temp_values,#0.5,
stream=True,
max_tokens=500,
)
response = st.write_stream(stream)
st.session_state.messages.append({"role": "assistant", "content": response}) |