File size: 2,979 Bytes
4d56027
09ab406
 
 
 
 
 
06e4c74
09ab406
 
 
4d56027
 
 
 
 
09ab406
 
 
 
 
 
 
 
06e4c74
 
 
 
09ab406
06e4c74
4d56027
 
 
 
 
 
 
 
 
09ab406
 
 
4d56027
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7bcf8d7
09ab406
06e4c74
4d56027
 
 
 
 
 
 
 
 
09ab406
 
 
4d56027
 
 
 
 
09ab406
 
 
4d56027
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from fastapi import FastAPI, Form, HTTPException
from fastapi.responses import JSONResponse, FileResponse
import uvicorn
from pydantic import BaseModel
import numpy as np
import io
import soundfile as sf
import base64
from asr import transcribe, ASR_LANGUAGES
from tts import synthesize, TTS_LANGUAGES
from lid import identify
import logging

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI(title="MMS: Scaling Speech Technology to 1000+ languages")

class TTSRequest(BaseModel):
    text: str
    language: str
    speed: float

class AudioRequest(BaseModel):
    audio: str  # Base64 encoded audio data
    language: str

@app.post("/transcribe")
async def transcribe_audio(request: AudioRequest):
    try:
        audio_bytes = base64.b64decode(request.audio)
        audio_array, sample_rate = sf.read(io.BytesIO(audio_bytes))
        
        result = transcribe(audio_array, request.language)
        return JSONResponse(content={"transcription": result})
    except Exception as e:
        logger.error(f"Error in transcribe_audio: {str(e)}")
        raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")

@app.post("/synthesize")
async def synthesize_speech(request: TTSRequest):
    try:
        audio, filtered_text = synthesize(request.text, request.language, request.speed)
        
        # Convert numpy array to bytes
        buffer = io.BytesIO()
        sf.write(buffer, audio, 22050, format='wav')
        buffer.seek(0)
        
        return FileResponse(
            buffer, 
            media_type="audio/wav", 
            headers={"Content-Disposition": "attachment; filename=synthesized_audio.wav"}
        )
    except Exception as e:
        logger.error(f"Error in synthesize_speech: {str(e)}")
        raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")

@app.post("/identify")
async def identify_language(request: AudioRequest):
    try:
        audio_bytes = base64.b64decode(request.audio)
        audio_array, sample_rate = sf.read(io.BytesIO(audio_bytes))
        
        result = identify(audio_array)
        return JSONResponse(content={"language_identification": result})
    except Exception as e:
        logger.error(f"Error in identify_language: {str(e)}")
        raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")

@app.get("/asr_languages")
async def get_asr_languages():
    try:
        return JSONResponse(content=ASR_LANGUAGES)
    except Exception as e:
        logger.error(f"Error in get_asr_languages: {str(e)}")
        raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")

@app.get("/tts_languages")
async def get_tts_languages():
    try:
        return JSONResponse(content=TTS_LANGUAGES)
    except Exception as e:
        logger.error(f"Error in get_tts_languages: {str(e)}")
        raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")