from typing import Any, Dict import torch from transformers import AutoModelForCausalLM, AutoTokenizer class EndpointHandler: def __init__(self, path=""): # load model and processor from path self.tokenizer = AutoTokenizer.from_pretrained(path) model = AutoModelForCausalLM.from_pretrained( path, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True, ) self.model = model self.device = "cuda" if torch.cuda.is_available() else "cpu" def __call__(self, data: Dict[str, Any]) -> Dict[str, str]: # process input inputs = data.pop("inputs", data) parameters = data.pop("parameters", None) messages=[{ 'role': 'user', 'content': inputs}] # preprocess inputs = self.tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(self.device) # pass inputs with all kwargs in data if parameters is not None: outputs = self.model.generate(inputs, num_return_sequences=1, eos_token_id=self.tokenizer.eos_token_id, **parameters) #, max_new_tokens=880 else: outputs = self.model.generate(inputs, num_return_sequences=1, eos_token_id=self.tokenizer.eos_token_id) #, max_new_tokens=880 # postprocess the prediction prediction = self.tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True) return [{"generated_text": prediction}]