import gradio as gr import copy import random import os import requests import time import sys from huggingface_hub import snapshot_download os.system("pip install --upgrade pip") os.system('''CMAKE_ARGS="-DLLAMA_AVX512=ON -DLLAMA_AVX512_VBMI=ON -DLLAMA_AVX512_VNNI=ON -DLLAMA_FP16_VA=ON" pip install llama-cpp-python''') from llama_cpp import Llama SYSTEM_PROMPT = '''You are a helpful, respectful and honest INTP-T AI Assistant named "Shi-Ci" in English or "兮辞" in Chinese. You are good at speaking English and Chinese. You are talking to a human User. If the question is meaningless, please explain the reason and don't share false information. You are based on SEA-CausalLM model, not related to GPT, LLaMA, Meta, Mistral or OpenAI. Let's work this out in a step by step way to be sure we have the right answer.\n\n''' SYSTEM_TOKEN = 1587 USER_TOKEN = 2188 BOT_TOKEN = 12435 LINEBREAK_TOKEN = 13 ROLE_TOKENS = { "user": USER_TOKEN, "bot": BOT_TOKEN, "system": SYSTEM_TOKEN } def get_message_tokens(model, role, content): message_tokens = model.tokenize(content.encode("utf-8")) message_tokens.insert(1, ROLE_TOKENS[role]) message_tokens.insert(2, LINEBREAK_TOKEN) message_tokens.append(model.token_eos()) return message_tokens def get_system_tokens(model): system_message = {"role": "system", "content": SYSTEM_PROMPT} return get_message_tokens(model, **system_message) repo_name = "TheBloke/CausalLM-14B-GGUF" model_name = "causallm_14b.Q4_1.gguf" snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name) model = Llama( model_path=model_name, n_ctx=2000, n_parts=1, ) max_new_tokens = 1500 def user(message, history): new_history = history + [[message, None]] return "", new_history def bot( history, system_prompt, top_p, top_k, temp ): tokens = get_system_tokens(model)[:] tokens.append(LINEBREAK_TOKEN) for user_message, bot_message in history[:-1]: message_tokens = get_message_tokens(model=model, role="user", content=user_message) tokens.extend(message_tokens) if bot_message: message_tokens = get_message_tokens(model=model, role="bot", content=bot_message) tokens.extend(message_tokens) last_user_message = history[-1][0] message_tokens = get_message_tokens(model=model, role="user", content=last_user_message) tokens.extend(message_tokens) role_tokens = [model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN] tokens.extend(role_tokens) generator = model.generate( tokens, top_k=top_k, top_p=top_p, temp=temp ) partial_text = "" for i, token in enumerate(generator): if token == model.token_eos() or (max_new_tokens is not None and i >= max_new_tokens): break partial_text += model.detokenize([token]).decode("utf-8", "ignore") history[-1][1] = partial_text yield history with gr.Blocks( theme=gr.themes.Soft() ) as demo: gr.Markdown( f"""