from fastapi import FastAPI, UploadFile, File, Form from fastapi.responses import JSONResponse, FileResponse import uvicorn from pydantic import BaseModel import numpy as np import io import soundfile as sf from asr import transcribe, ASR_LANGUAGES from tts import synthesize, TTS_LANGUAGES from lid import identify app = FastAPI(title="MMS: Scaling Speech Technology to 1000+ languages") class TTSRequest(BaseModel): text: str language: str speed: float @app.post("/transcribe") async def transcribe_audio(audio: UploadFile = File(...), language: str = Form(...)): contents = await audio.read() audio_array, sample_rate = sf.read(io.BytesIO(contents)) result = transcribe(audio_array, language) return JSONResponse(content={"transcription": result}) @app.post("/synthesize") async def synthesize_speech(request: TTSRequest): audio, filtered_text = synthesize(request.text, request.language, request.speed) # Convert numpy array to bytes buffer = io.BytesIO() sf.write(buffer, audio, 22050, format='wav') buffer.seek(0) return FileResponse( buffer, media_type="audio/wav", headers={"Content-Disposition": "attachment; filename=synthesized_audio.wav"} ) @app.post("/identify") async def identify_language(audio: UploadFile = File(...)): contents = await audio.read() audio_array, sample_rate = sf.read(io.BytesIO(contents)) result = identify(audio_array) return JSONResponse(content={"language_identification": result}) @app.get("/asr_languages") async def get_asr_languages(): return JSONResponse(content=ASR_LANGUAGES) @app.get("/tts_languages") async def get_tts_languages(): return JSONResponse(content=TTS_LANGUAGES)