File size: 1,846 Bytes
06e4c74 09ab406 06e4c74 09ab406 06e4c74 09ab406 06e4c74 09ab406 06e4c74 09ab406 114efae 7bcf8d7 09ab406 06e4c74 09ab406 06e4c74 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
from fastapi import FastAPI, Form
from fastapi.responses import JSONResponse, FileResponse
import uvicorn
from pydantic import BaseModel
import numpy as np
import io
import soundfile as sf
import base64
from asr import transcribe, ASR_LANGUAGES
from tts import synthesize, TTS_LANGUAGES
from lid import identify
app = FastAPI(title="MMS: Scaling Speech Technology to 1000+ languages")
class TTSRequest(BaseModel):
text: str
language: str
speed: float
class AudioRequest(BaseModel):
audio: str # Base64 encoded audio data
language: str
@app.post("/transcribe")
async def transcribe_audio(request: AudioRequest):
audio_bytes = base64.b64decode(request.audio)
audio_array, sample_rate = sf.read(io.BytesIO(audio_bytes))
result = transcribe(audio_array, request.language)
return JSONResponse(content={"transcription": result})
@app.post("/synthesize")
async def synthesize_speech(request: TTSRequest):
audio, filtered_text = synthesize(request.text, request.language, request.speed)
# Convert numpy array to bytes
buffer = io.BytesIO()
sf.write(buffer, audio, 22050, format='wav')
buffer.seek(0)
return FileResponse(
buffer,
media_type="audio/wav",
headers={"Content-Disposition": "attachment; filename=synthesized_audio.wav"}
)
@app.post("/identify")
async def identify_language(request: AudioRequest):
audio_bytes = base64.b64decode(request.audio)
audio_array, sample_rate = sf.read(io.BytesIO(audio_bytes))
result = identify(audio_array)
return JSONResponse(content={"language_identification": result})
@app.get("/asr_languages")
async def get_asr_languages():
return JSONResponse(content=ASR_LANGUAGES)
@app.get("/tts_languages")
async def get_tts_languages():
return JSONResponse(content=TTS_LANGUAGES) |