File size: 9,170 Bytes
c173d9b 2300584 cd03801 29850f3 fbec879 29850f3 9cf4194 fbec879 c173d9b 8da2b37 d63d47a 8da2b37 d63d47a 8da2b37 2ecbad4 8da2b37 9cf4194 d63d47a 8da2b37 d63d47a 8da2b37 fbec879 9cf4194 fbec879 29850f3 fbec879 29850f3 9cf4194 cd03801 9cf4194 cd03801 9cf4194 29850f3 9cf4194 cd03801 fbec879 9cf4194 7826b7e 9cf4194 920d6ef 9cf4194 7826b7e 9cf4194 7826b7e 9cf4194 8da2b37 7826b7e 8da2b37 7826b7e 2300584 7826b7e 2300584 d63d47a 7826b7e d63d47a cd03801 7826b7e d63d47a 7826b7e d63d47a 9cf4194 fbec879 c173d9b 8da2b37 29850f3 7826b7e 8da2b37 7826b7e 8da2b37 7826b7e 8da2b37 118252d 7826b7e 8da2b37 29850f3 7826b7e 29850f3 8da2b37 118252d d63d47a 7826b7e d63d47a 7826b7e 8da2b37 7826b7e 29850f3 7826b7e 29850f3 8da2b37 2ecbad4 9cf4194 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
import logging
import math
import time
import base64
import os
from typing import Dict, Any
from functools import wraps
from fastapi import FastAPI, Depends, HTTPException, File, UploadFile, Form, Header
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
import jax.numpy as jnp
import numpy as np
from transformers.pipelines.audio_utils import ffmpeg_read
from whisper_jax import FlaxWhisperPipline
app = FastAPI(title="Whisper JAX: The Fastest Whisper API ⚡️")
logger = logging.getLogger("whisper-jax-app")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s", "%Y-%m-%d %H:%M:%S")
ch.setFormatter(formatter)
logger.addHandler(ch)
checkpoint = "openai/whisper-large-v3"
BATCH_SIZE = 32
CHUNK_LENGTH_S = 30
NUM_PROC = 32
FILE_LIMIT_MB = 10000
pipeline = FlaxWhisperPipline(checkpoint, dtype=jnp.bfloat16, batch_size=BATCH_SIZE)
stride_length_s = CHUNK_LENGTH_S / 6
chunk_len = round(CHUNK_LENGTH_S * pipeline.feature_extractor.sampling_rate)
stride_left = stride_right = round(stride_length_s * pipeline.feature_extractor.sampling_rate)
step = chunk_len - stride_left - stride_right
# Pre-compile step
logger.debug("Compiling forward call...")
start = time.time()
random_inputs = {
"input_features": np.ones(
(BATCH_SIZE, pipeline.model.config.num_mel_bins, 2 * pipeline.model.config.max_source_positions)
)
}
random_timestamps = pipeline.forward(random_inputs, batch_size=BATCH_SIZE, return_timestamps=True)
compile_time = time.time() - start
logger.debug(f"Compiled in {compile_time}s")
class TranscribeAudioRequest(BaseModel):
audio_base64: str
task: str = "transcribe"
return_timestamps: bool = False
def timeit(func):
@wraps(func)
async def wrapper(*args, **kwargs):
start_time = time.time()
result = await func(*args, **kwargs)
end_time = time.time()
execution_time = end_time - start_time
if isinstance(result, dict):
result['total_execution_time'] = execution_time
else:
result = {'result': result, 'total_execution_time': execution_time}
return result
return wrapper
def check_api_key(x_api_key: str = Header(...)):
api_key = os.environ.get("WHISPER_API_KEY")
if not api_key or x_api_key != api_key:
raise HTTPException(status_code=401, detail="Invalid or missing API key")
return x_api_key
@app.post("/transcribe_audio_file")
@timeit
async def transcribe_audio_file(
file: UploadFile = File(...),
task: str = Form("transcribe"),
return_timestamps: bool = Form(False),
api_key: str = Depends(check_api_key)
) -> Dict[str, Any]:
logger.debug("Starting transcribe_audio_file function")
logger.debug(f"Received parameters - task: {task}, return_timestamps: {return_timestamps}")
try:
audio_data = await file.read()
file_size = len(audio_data)
file_size_mb = file_size / (1024 * 1024)
logger.debug(f"Audio file size: {file_size} bytes ({file_size_mb:.2f}MB)")
except Exception as e:
logger.error(f"Error reading audio file: {str(e)}", exc_info=True)
raise HTTPException(status_code=400, detail=f"Error reading audio file: {str(e)}")
return await process_audio(audio_data, file_size_mb, task, return_timestamps)
@app.post("/transcribe_audio_base64")
@timeit
async def transcribe_audio_base64(
request: TranscribeAudioRequest,
api_key: str = Depends(check_api_key)
) -> Dict[str, Any]:
logger.debug("Starting transcribe_audio_base64 function")
logger.debug(f"Received parameters - task: {request.task}, return_timestamps: {request.return_timestamps}")
try:
audio_data = base64.b64decode(request.audio_base64)
file_size = len(audio_data)
file_size_mb = file_size / (1024 * 1024)
logger.debug(f"Decoded audio data size: {file_size} bytes ({file_size_mb:.2f}MB)")
except Exception as e:
logger.error(f"Error decoding base64 audio data: {str(e)}", exc_info=True)
raise HTTPException(status_code=400, detail=f"Error decoding base64 audio data: {str(e)}")
return await process_audio(audio_data, file_size_mb, request.task, request.return_timestamps)
async def process_audio(audio_data: bytes, file_size_mb: float, task: str, return_timestamps: bool) -> Dict[str, Any]:
if file_size_mb > FILE_LIMIT_MB:
logger.warning(f"Max file size exceeded: {file_size_mb:.2f}MB > {FILE_LIMIT_MB}MB")
raise HTTPException(status_code=400, detail=f"File size exceeds file size limit. Got file of size {file_size_mb:.2f}MB for a limit of {FILE_LIMIT_MB}MB.")
try:
logger.debug("Performing ffmpeg read on audio data")
inputs = ffmpeg_read(audio_data, pipeline.feature_extractor.sampling_rate)
inputs = {"array": inputs, "sampling_rate": pipeline.feature_extractor.sampling_rate}
logger.debug("ffmpeg read completed successfully")
except Exception as e:
logger.error(f"Error in ffmpeg read: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Error processing audio data: {str(e)}")
logger.debug("Calling tqdm_generate to transcribe audio")
try:
text, runtime, timing_info = tqdm_generate(inputs, task=task, return_timestamps=return_timestamps)
logger.debug(f"Transcription completed. Runtime: {runtime:.2f}s")
except Exception as e:
logger.error(f"Error in tqdm_generate: {str(e)}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Error transcribing audio: {str(e)}")
logger.debug("Audio processing completed successfully")
return jsonable_encoder({
"text": text,
"runtime": runtime,
"timing_info": timing_info
})
def tqdm_generate(inputs: dict, task: str, return_timestamps: bool):
start_time = time.time()
logger.debug(f"Starting tqdm_generate - task: {task}, return_timestamps: {return_timestamps}")
inputs_len = inputs["array"].shape[0]
logger.debug(f"Input array length: {inputs_len}")
all_chunk_start_idx = np.arange(0, inputs_len, step)
num_samples = len(all_chunk_start_idx)
num_batches = math.ceil(num_samples / BATCH_SIZE)
logger.debug(f"Number of samples: {num_samples}, Number of batches: {num_batches}")
logger.debug("Preprocessing audio for inference")
try:
dataloader = pipeline.preprocess_batch(inputs, chunk_length_s=CHUNK_LENGTH_S, batch_size=BATCH_SIZE)
logger.debug("Preprocessing completed successfully")
except Exception as e:
logger.error(f"Error in preprocessing: {str(e)}", exc_info=True)
raise
model_outputs = []
transcription_start_time = time.time()
logger.debug("Starting transcription...")
try:
for i, batch in enumerate(dataloader):
logger.debug(f"Processing batch {i+1}/{num_batches} with {len(batch)} samples")
batch_output = pipeline.forward(batch, batch_size=BATCH_SIZE, task=task, return_timestamps=True)
model_outputs.append(batch_output)
logger.debug(f"Batch {i+1} processed successfully")
except Exception as e:
logger.error(f"Error during batch processing: {str(e)}", exc_info=True)
raise
transcription_runtime = time.time() - transcription_start_time
logger.debug(f"Transcription completed in {transcription_runtime:.2f}s")
logger.debug("Post-processing transcription results")
try:
post_processed = pipeline.postprocess(model_outputs, return_timestamps=True)
logger.debug("Post-processing completed successfully")
except Exception as e:
logger.error(f"Error in post-processing: {str(e)}", exc_info=True)
raise
text = post_processed["text"]
if return_timestamps:
timestamps = post_processed.get("chunks")
timestamps = [
f"[{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}"
for chunk in timestamps
]
text = "\n".join(str(feature) for feature in timestamps)
total_processing_time = time.time() - start_time
logger.debug("tqdm_generate function completed successfully")
return text, transcription_runtime, {
"transcription_time": transcription_runtime,
"total_processing_time": total_processing_time
}
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = "."):
if seconds is not None:
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
else:
# we have a malformed timestamp so just return it as is
return seconds
|