zhangtao
增加stop命令,避免无效重复输出
bb48dcd
raw
history blame contribute delete
No virus
1.43 kB
import gradio as gr
from llama_cpp import Llama
llm_f16 = Llama(model_path="./qwen-1.8b-f16.gguf",
n_ctx=4096,
n_threads=2,
chat_format="chatml")
llm_q5_k_m = Llama(model_path="./qwen-1.8b-q5_k_m.gguf",
n_ctx=4096,
n_threads=2,
chat_format="chatml")
def chat_stream_completion(message, history, system_prompt, q5_check):
messages_prompts = [{"role": "system", "content": system_prompt}]
llm = None
if q5_check:
llm = llm_q5_k_m
else:
llm = llm_f16
for human, assistant in history:
messages_prompts.append({"role": "user", "content": human})
messages_prompts.append({"role": "assistant", "content": assistant})
messages_prompts.append({"role": "user", "content": message})
response = llm.create_chat_completion(
messages=messages_prompts,
stream=True,
stop="\n\n\n"
)
message_repl = ""
for chunk in response:
if len(chunk['choices'][0]["delta"]) != 0 and "content" in chunk['choices'][0]["delta"]:
message_repl = message_repl + \
chunk['choices'][0]["delta"]["content"]
yield message_repl
gr.ChatInterface(
chat_stream_completion,
additional_inputs=[gr.Textbox(
"You are helpful AI.", label="System Prompt"), gr.Checkbox(label="Use Q5-K-M?", value=True)]
).queue().launch(server_name="0.0.0.0")