WinterGYC commited on
Commit
c22d76e
1 Parent(s): a4a5581

Update handler.py

Browse files

Switch from text-generation pipeline to chat interface.

Files changed (1) hide show
  1. handler.py +5 -14
handler.py CHANGED
@@ -9,19 +9,10 @@ dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.
9
  class EndpointHandler:
10
  def __init__(self, path=""):
11
  # load the model
12
- tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
13
- model = AutoModelForCausalLM.from_pretrained(path, device_map="auto", torch_dtype=dtype, trust_remote_code=True)
14
- # create inference pipeline
15
- self.pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
16
 
17
  def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
18
- inputs = data.pop("inputs", data)
19
- parameters = data.pop("parameters", None)
20
-
21
- # pass inputs with all kwargs in data
22
- if parameters is not None:
23
- prediction = self.pipeline(inputs, **parameters)
24
- else:
25
- prediction = self.pipeline(inputs)
26
- # postprocess the prediction
27
- return prediction
 
9
  class EndpointHandler:
10
  def __init__(self, path=""):
11
  # load the model
12
+ self.tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
13
+ self.model = AutoModelForCausalLM.from_pretrained(path, device_map="auto", torch_dtype=dtype, trust_remote_code=True)
 
 
14
 
15
  def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
16
+ # ignoring parameters! Default to configs in generation_config.json.
17
+ messages = [{"role": "user", "content": data}]
18
+ return self.model.chat(self.tokenizer, messages)