slplab commited on
Commit
828ccc6
1 Parent(s): 6f0b942

Update ffmpeg_handler.py

Browse files
Files changed (1) hide show
  1. ffmpeg_handler.py +3 -20
ffmpeg_handler.py CHANGED
@@ -1,40 +1,23 @@
1
  from typing import Dict, Any, List
2
- from transformers import WhisperForConditionalGeneration, AutoProcessor, WhisperTokenizer, WhisperProcessor, pipeline, WhisperFeatureExtractor
3
  import torch
4
  from transformers.pipelines.audio_utils import ffmpeg_read
5
- #import io
6
 
7
-
8
- #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
 
10
 
11
  class EndpointHandler:
12
  def __init__(self, path=""):
13
- #tokenizer = WhisperTokenizer.from_pretrained('openai/whisper-large', language="korean", task='transcribe')
14
- #model = WhisperForConditionalGeneration.from_pretrained(path)
15
- #self.tokenizer = WhisperTokenizer.from_pretrained(path)
16
- #self.processor = WhisperProcessor.from_pretrained(path, language="korean", task='transcribe')
17
- #processor = AutoProcessor.from_pretrained(path)
18
- #self.pipe = pipeline("automatic-speech-recognition", model=model, tokenizer=processor.feature_extractor, feature_extractor=processor.feature_extractor)
19
- #feature_extractor = WhisperFeatureExtractor.from_pretrained('openai/whisper-large')
20
  self.pipe = pipeline(task='automatic-speech-recognition', model=path)
21
 
22
-
23
-
24
- # Move model to device
25
- # self.model.to(device)
26
-
27
  def __call__(self, data: Any) -> List[Dict[str, str]]:
28
- print('==========NEW PROCESS=========')
29
 
30
  inputs = data.pop("inputs", data)
31
  audio_nparray = ffmpeg_read(inputs, 16000)
32
  audio_tensor= torch.from_numpy(audio_nparray)
33
-
34
 
35
  transcribe = self.pipe
36
  transcribe.model.config.forced_decoder_ids = transcribe.tokenizer.get_decoder_prompt_ids(language="ko", task="transcribe")
37
- result = transcribe(audio_tensor)
38
-
39
 
40
  return result
 
1
  from typing import Dict, Any, List
2
+ from transformers import pipeline
3
  import torch
4
  from transformers.pipelines.audio_utils import ffmpeg_read
 
5
 
6
+ #ffmpeg
 
7
 
8
 
9
  class EndpointHandler:
10
  def __init__(self, path=""):
 
 
 
 
 
 
 
11
  self.pipe = pipeline(task='automatic-speech-recognition', model=path)
12
 
 
 
 
 
 
13
  def __call__(self, data: Any) -> List[Dict[str, str]]:
 
14
 
15
  inputs = data.pop("inputs", data)
16
  audio_nparray = ffmpeg_read(inputs, 16000)
17
  audio_tensor= torch.from_numpy(audio_nparray)
 
18
 
19
  transcribe = self.pipe
20
  transcribe.model.config.forced_decoder_ids = transcribe.tokenizer.get_decoder_prompt_ids(language="ko", task="transcribe")
21
+ result = transcribe(audio_tensor)
 
22
 
23
  return result