Spaces:
Sleeping
Sleeping
File size: 3,458 Bytes
b94e0e5 a800c44 b94e0e5 a4b2221 76cf20f 2e1cfa2 173046e a4b2221 a800c44 569b33f a800c44 b94e0e5 c2ea2f2 569b33f 2e1cfa2 569b33f 2e1cfa2 b94e0e5 a800c44 b94e0e5 2e1cfa2 a800c44 569b33f 2e1cfa2 a800c44 569b33f a800c44 173046e b94e0e5 76cf20f 569b33f 76cf20f 569b33f b94e0e5 a800c44 b94e0e5 569b33f a800c44 569b33f b94e0e5 15fd008 b94e0e5 569b33f a800c44 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import os
import logging
from logging.handlers import RotatingFileHandler
import gradio as gr
from transformers import AutoTokenizer, BitsAndBytesConfig
from langchain_huggingface import ChatHuggingFace
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
# Настройка логирования
log_file = '/tmp/app_debug.log'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
file_handler = RotatingFileHandler(log_file, maxBytes=10*1024*1024, backupCount=5)
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(file_handler)
logger.debug("Application started")
MODEL_ID = "Qwen/Qwen2.5-Coder-7B-Instruct"
MODEL_NAME = MODEL_ID.split("/")[-1]
template = """<|im_start|>system\n{system_prompt}\n<|im_end|>\n{history}<|im_start|>user\n{human_input}\n<|im_end|>\n<|im_start|>assistant\n"""
prompt = PromptTemplate(template=template, input_variables=["system_prompt", "history", "human_input"])
def format_history(history):
return "".join([f"<|im_start|>user\n{h[0]}\n<|im_end|>\n<|im_start|>assistant\n{h[1]}\n<|im_end|>\n" for h in history])
def predict(message, history, system_prompt, temperature, max_new_tokens, top_k, repetition_penalty, top_p):
logger.debug(f"Received prediction request: message='{message}', system_prompt='{system_prompt}'")
chat_model.temperature = temperature
chat_model.max_new_tokens = max_new_tokens
chat_model.top_k = top_k
chat_model.repetition_penalty = repetition_penalty
chat_model.top_p = top_p
chain = LLMChain(llm=chat_model, prompt=prompt)
try:
formatted_history = format_history(history)
for chunk in chain.stream({"system_prompt": system_prompt, "history": formatted_history, "human_input": message}):
yield chunk["text"]
logger.debug(f"Prediction completed successfully for message: '{message}'")
except Exception as e:
logger.exception(f"Error during prediction: {str(e)}")
yield "An error occurred during processing."
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
chat_model = ChatHuggingFace(
model_name=MODEL_ID,
tokenizer=tokenizer,
model_kwargs={
"device_map": "auto",
"quantization_config": BitsAndBytesConfig(load_in_4bit=True),
}
)
logger.debug("Model and tokenizer loaded successfully")
gr.ChatInterface(
predict,
title=f"🤖 {MODEL_NAME}",
description=f"This is the {MODEL_NAME} model designed for coding assistance and general AI tasks.",
examples=[
["Can you solve the equation 2x + 3 = 11 for x in Python?"],
["Write a Java program that checks if a number is even or odd."],
["How can I reverse a string in JavaScript?"],
["Create a C++ function to find the factorial of a number."],
["Write a Python list comprehension to generate a list of squares of numbers from 1 to 10."],
],
additional_inputs=[
gr.Textbox("You are a code assistant.", label="System prompt"),
gr.Slider(0, 1, 0.3, label="Temperature"),
gr.Slider(128, 4096, 1024, label="Max new tokens"),
gr.Slider(1, 80, 40, label="Top K sampling"),
gr.Slider(0, 2, 1.1, label="Repetition penalty"),
gr.Slider(0, 1, 0.95, label="Top P sampling"),
],
theme=gr.themes.Soft(primary_hue="blue"),
).queue().launch()
logger.debug("Chat interface initialized and launched") |