Spaces:
Sleeping
Sleeping
import streamlit as st | |
from llama_cpp import Llama | |
st.set_page_config(page_title="Chat with AI", page_icon="π€") | |
# Custom CSS for better styling | |
st.markdown(""" | |
<style> | |
.stTextInput > div > div > input { | |
background-color: #f0f2f6; | |
} | |
.chat-message { | |
padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex | |
} | |
.chat-message.user { | |
background-color: #2b313e | |
} | |
.chat-message.bot { | |
background-color: #475063 | |
} | |
.chat-message .avatar { | |
width: 20%; | |
} | |
.chat-message .avatar img { | |
max-width: 78px; | |
max-height: 78px; | |
border-radius: 50%; | |
object-fit: cover; | |
} | |
.chat-message .message { | |
width: 80%; | |
padding: 0 1.5rem; | |
color: #fff; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
def load_model(): | |
return Llama.from_pretrained( | |
repo_id="Mykes/med_phi3-mini-4k-GGUF", | |
filename="*Q4_K_M.gguf", | |
verbose=False, | |
n_ctx=256, | |
n_batch=256, | |
n_threads=4 | |
) | |
llm = load_model() | |
basic_prompt = "Q: {question}\nA:" | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# React to user input | |
if prompt := st.chat_input("What is your question?"): | |
# Display user message in chat message container | |
st.chat_message("user").markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
model_input = basic_prompt.format(question=prompt) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
full_response = "" | |
for token in llm( | |
model_input, | |
max_tokens=None, | |
stop=["<end_of_turn>"], | |
echo=True, | |
stream=True | |
): | |
full_response += token['choices'][0]['text'] | |
message_placeholder.markdown(full_response + "β") | |
message_placeholder.markdown(full_response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
st.sidebar.title("Chat with AI") | |
st.sidebar.markdown("This is a simple chat interface using Streamlit and an AI model.") |