File size: 1,508 Bytes
ff76dc9
 
 
64439b2
ff76dc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64439b2
ff76dc9
 
64439b2
ff76dc9
 
 
64439b2
ff76dc9
64439b2
ff76dc9
 
64439b2
ff76dc9
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from typing import Any, Dict

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


class EndpointHandler:
    def __init__(self, path=""):
        # load model and processor from path
        self.tokenizer = AutoTokenizer.from_pretrained(path)
        model = AutoModelForCausalLM.from_pretrained(
            path,
            device_map="auto",
            torch_dtype=torch.float16,
            trust_remote_code=True,
        )
        self.model = model
        self.device = "cuda" if torch.cuda.is_available() else "cpu"

    def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
        # process input
        inputs = data.pop("inputs", data)
        parameters = data.pop("parameters", None)
        messages=[{ 'role': 'user', 'content': inputs}]

        # preprocess
        inputs = self.tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(self.device)

        # pass inputs with all kwargs in data
        if parameters is not None:
            outputs = self.model.generate(inputs, num_return_sequences=1, eos_token_id=self.tokenizer.eos_token_id, **parameters) #, max_new_tokens=880
        else:
            outputs = self.model.generate(inputs, num_return_sequences=1, eos_token_id=self.tokenizer.eos_token_id) #, max_new_tokens=880

        # postprocess the prediction
        prediction = self.tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)

        return [{"generated_text": prediction}]