import streamlit as st from llama_cpp import Llama llm = Llama.from_pretrained( repo_id="Mykes/med_gemma7b_gguf", filename="*Q4_K_M.gguf", verbose=False ) basic_prompt = "Below is the context which is your conversation history and the last user question. Write a response according the context and question. ### Context: user: Ответь мне на вопрос о моем здоровье. assistant: Конечно! Какой у Вас вопрос? ### Question: {question} ### Response:" input_text = st.text_input('text') model_input = basic_prompt.format(question=input_text) if input_text: # Create an empty placeholder for the output output_placeholder = st.empty() # Initialize an empty string to store the generated text generated_text = "" # Stream the output for token in llm( model_input, max_tokens=32, stop=[""], echo=True, stream=True # Enable streaming ): # Append the new token to the generated text generated_text += token['choices'][0]['text'] # Update the placeholder with the current generated text output_placeholder.write(generated_text) # After the generation is complete, you can do any final processing if needed st.write("Generation complete!")