Spaces:
Sleeping
Sleeping
import os | |
import logging | |
from logging.handlers import RotatingFileHandler | |
import gradio as gr | |
from transformers import AutoTokenizer, BitsAndBytesConfig | |
from langchain_huggingface import ChatHuggingFace | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import LLMChain | |
# Настройка логирования | |
log_file = '/tmp/app_debug.log' | |
logger = logging.getLogger(__name__) | |
logger.setLevel(logging.DEBUG) | |
file_handler = RotatingFileHandler(log_file, maxBytes=10*1024*1024, backupCount=5) | |
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) | |
logger.addHandler(file_handler) | |
logger.debug("Application started") | |
MODEL_ID = "Qwen/Qwen2.5-Coder-7B-Instruct" | |
MODEL_NAME = MODEL_ID.split("/")[-1] | |
template = """<|im_start|>system\n{system_prompt}\n<|im_end|>\n{history}<|im_start|>user\n{human_input}\n<|im_end|>\n<|im_start|>assistant\n""" | |
prompt = PromptTemplate(template=template, input_variables=["system_prompt", "history", "human_input"]) | |
def format_history(history): | |
return "".join([f"<|im_start|>user\n{h[0]}\n<|im_end|>\n<|im_start|>assistant\n{h[1]}\n<|im_end|>\n" for h in history]) | |
def predict(message, history, system_prompt, temperature, max_new_tokens, top_k, repetition_penalty, top_p): | |
logger.debug(f"Received prediction request: message='{message}', system_prompt='{system_prompt}'") | |
chat_model.temperature = temperature | |
chat_model.max_new_tokens = max_new_tokens | |
chat_model.top_k = top_k | |
chat_model.repetition_penalty = repetition_penalty | |
chat_model.top_p = top_p | |
chain = LLMChain(llm=chat_model, prompt=prompt) | |
try: | |
formatted_history = format_history(history) | |
for chunk in chain.stream({"system_prompt": system_prompt, "history": formatted_history, "human_input": message}): | |
yield chunk["text"] | |
logger.debug(f"Prediction completed successfully for message: '{message}'") | |
except Exception as e: | |
logger.exception(f"Error during prediction: {str(e)}") | |
yield "An error occurred during processing." | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
chat_model = ChatHuggingFace( | |
model_name=MODEL_ID, | |
tokenizer=tokenizer, | |
model_kwargs={ | |
"device_map": "auto", | |
"quantization_config": BitsAndBytesConfig(load_in_4bit=True), | |
} | |
) | |
logger.debug("Model and tokenizer loaded successfully") | |
gr.ChatInterface( | |
predict, | |
title=f"🤖 {MODEL_NAME}", | |
description=f"This is the {MODEL_NAME} model designed for coding assistance and general AI tasks.", | |
examples=[ | |
["Can you solve the equation 2x + 3 = 11 for x in Python?"], | |
["Write a Java program that checks if a number is even or odd."], | |
["How can I reverse a string in JavaScript?"], | |
["Create a C++ function to find the factorial of a number."], | |
["Write a Python list comprehension to generate a list of squares of numbers from 1 to 10."], | |
], | |
additional_inputs=[ | |
gr.Textbox("You are a code assistant.", label="System prompt"), | |
gr.Slider(0, 1, 0.3, label="Temperature"), | |
gr.Slider(128, 4096, 1024, label="Max new tokens"), | |
gr.Slider(1, 80, 40, label="Top K sampling"), | |
gr.Slider(0, 2, 1.1, label="Repetition penalty"), | |
gr.Slider(0, 1, 0.95, label="Top P sampling"), | |
], | |
theme=gr.themes.Soft(primary_hue="blue"), | |
).queue().launch() | |
logger.debug("Chat interface initialized and launched") |