|
from typing import Any, Dict |
|
|
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
class EndpointHandler: |
|
def __init__(self, path=""): |
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained(path) |
|
model = AutoModelForCausalLM.from_pretrained( |
|
path, |
|
device_map="auto", |
|
torch_dtype=torch.float16, |
|
trust_remote_code=True, |
|
) |
|
self.model = model |
|
self.device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
def __call__(self, data: Dict[str, Any]) -> Dict[str, str]: |
|
|
|
inputs = data.pop("inputs", data) |
|
parameters = data.pop("parameters", None) |
|
messages=[{ 'role': 'user', 'content': inputs}] |
|
|
|
|
|
inputs = self.tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(self.device) |
|
|
|
|
|
if parameters is not None: |
|
outputs = self.model.generate(inputs, num_return_sequences=1, eos_token_id=self.tokenizer.eos_token_id, **parameters) |
|
else: |
|
outputs = self.model.generate(inputs, num_return_sequences=1, eos_token_id=self.tokenizer.eos_token_id) |
|
|
|
|
|
prediction = self.tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True) |
|
|
|
return [{"generated_text": prediction}] |