Spaces:
Edmond98
/
Running on A100

File size: 8,238 Bytes
42c459e
fc0645c
09ab406
 
 
 
06e4c74
3cf82c2
 
 
 
0ec2266
1388ad6
5d16050
 
3cf82c2
 
09ab406
 
 
0eaed7a
00f260b
4d56027
 
 
09ab406
5d16050
 
 
 
 
 
 
09ab406
 
3cf82c2
 
0ec2266
3cf82c2
 
09ab406
 
 
 
 
0ec2266
 
 
 
 
 
 
 
 
1388ad6
 
 
 
 
0ec2266
 
 
09ab406
06e4c74
4d56027
0ec2266
 
 
3cf82c2
 
 
0ec2266
aa3c419
 
0ec2266
aa3c419
 
 
0ec2266
4d56027
 
 
5d16050
 
 
 
 
 
 
 
 
09ab406
 
 
be921fa
4d56027
1771b70
 
 
be921fa
 
 
1771b70
be921fa
 
 
5d16050
1771b70
0eaed7a
5d16050
 
 
 
 
0eaed7a
 
be921fa
5d16050
 
0eaed7a
5d16050
0eaed7a
5d16050
 
 
 
be921fa
 
5d16050
0eaed7a
5d16050
0eaed7a
5d16050
0eaed7a
5d16050
4d56027
0eaed7a
4d56027
5d16050
0eaed7a
fc0645c
 
 
0ec2266
fc0645c
 
 
4d56027
fc0645c
5d16050
 
be921fa
 
 
 
 
 
 
4d56027
be921fa
5d16050
 
be921fa
5d16050
 
 
 
be921fa
5d16050
be921fa
fc0645c
7bcf8d7
09ab406
06e4c74
4d56027
0ec2266
 
4d56027
 
 
5d16050
 
 
 
 
 
 
 
 
09ab406
 
 
4d56027
 
 
5d16050
 
 
 
 
 
 
 
 
09ab406
 
 
4d56027
 
 
5d16050
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
import numpy as np
import io
import soundfile as sf
import base64
import logging
import torch
import librosa
from pathlib import Path
import magic  # For MIME type detection
from pydub import AudioSegment
import traceback
from logging.handlers import RotatingFileHandler

# Import functions from other modules
from asr import transcribe, ASR_LANGUAGES
from tts import synthesize, TTS_LANGUAGES
from lid import identify
from asr import ASR_SAMPLING_RATE

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Add a file handler
file_handler = RotatingFileHandler('app.log', maxBytes=10000000, backupCount=5)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)

app = FastAPI(title="MMS: Scaling Speech Technology to 1000+ languages")

# Define request models
class AudioRequest(BaseModel):
    audio: str  # Base64 encoded audio or video data
    language: str

class TTSRequest(BaseModel):
    text: str
    language: str
    speed: float

def detect_mime_type(input_bytes):
    mime = magic.Magic(mime=True)
    return mime.from_buffer(input_bytes)

def extract_audio(input_bytes):
    mime_type = detect_mime_type(input_bytes)
    
    if mime_type.startswith('audio/'):
        return sf.read(io.BytesIO(input_bytes))
    elif mime_type.startswith('video/webm'):
        audio = AudioSegment.from_file(io.BytesIO(input_bytes), format="webm")
        audio_array = np.array(audio.get_array_of_samples())
        sample_rate = audio.frame_rate
        return audio_array, sample_rate
    else:
        raise ValueError(f"Unsupported MIME type: {mime_type}")

@app.post("/transcribe")
async def transcribe_audio(request: AudioRequest):
    try:
        input_bytes = base64.b64decode(request.audio)
        audio_array, sample_rate = extract_audio(input_bytes)

        # Convert to mono if stereo
        if len(audio_array.shape) > 1:
            audio_array = audio_array.mean(axis=1)

        # Ensure audio_array is float32
        audio_array = audio_array.astype(np.float32)

        # Resample if necessary
        if sample_rate != ASR_SAMPLING_RATE:
            audio_array = librosa.resample(audio_array, orig_sr=sample_rate, target_sr=ASR_SAMPLING_RATE)

        result = transcribe(audio_array, request.language)
        return JSONResponse(content={"transcription": result})
    except Exception as e:
        logger.error(f"Error in transcribe_audio: {str(e)}", exc_info=True)
        error_details = {
            "error": str(e),
            "traceback": traceback.format_exc()
        }
        return JSONResponse(
            status_code=500,
            content={"message": "An error occurred during transcription", "details": error_details}
        )

@app.post("/synthesize")
async def synthesize_speech(request: TTSRequest):
    logger.info(f"Synthesize request received: text='{request.text}', language='{request.language}', speed={request.speed}")
    try:
        # Extract the ISO code from the full language name
        lang_code = request.language.split()[0].strip()
        
        # Input validation
        if not request.text:
            raise ValueError("Text cannot be empty")
        if lang_code not in TTS_LANGUAGES:
            raise ValueError(f"Unsupported language: {request.language}")
        if not 0.5 <= request.speed <= 2.0:
            raise ValueError(f"Speed must be between 0.5 and 2.0, got {request.speed}")
        
        logger.info(f"Calling synthesize function with lang_code: {lang_code}")
        result, filtered_text = synthesize(request.text, request.language, request.speed)
        logger.info(f"Synthesize function completed. Filtered text: '{filtered_text}'")
        
        if result is None:
            logger.error("Synthesize function returned None")
            raise ValueError("Synthesis failed to produce audio")
        
        sample_rate, audio = result
        logger.info(f"Synthesis result: sample_rate={sample_rate}, audio_shape={audio.shape if isinstance(audio, np.ndarray) else 'not numpy array'}, audio_dtype={audio.dtype if isinstance(audio, np.ndarray) else type(audio)}")
        
        logger.info("Converting audio to numpy array")
        audio = np.array(audio, dtype=np.float32)
        logger.info(f"Converted audio shape: {audio.shape}, dtype: {audio.dtype}")
        
        logger.info("Normalizing audio")
        max_value = np.max(np.abs(audio))
        if max_value == 0:
            logger.warning("Audio array is all zeros")
            raise ValueError("Generated audio is silent (all zeros)")
        audio = audio / max_value
        logger.info(f"Normalized audio range: [{audio.min()}, {audio.max()}]")
        
        logger.info("Converting to int16")
        audio = (audio * 32767).astype(np.int16)
        logger.info(f"Int16 audio shape: {audio.shape}, dtype: {audio.dtype}")
        
        logger.info("Writing audio to buffer")
        buffer = io.BytesIO()
        sf.write(buffer, audio, sample_rate, format='wav')
        buffer.seek(0)
        logger.info(f"Buffer size: {buffer.getbuffer().nbytes} bytes")
        
        logger.info("Preparing StreamingResponse")
        response = StreamingResponse(
            buffer, 
            media_type="audio/wav",
            headers={
                "Content-Disposition": "attachment; filename=synthesized_audio.wav"
            }
        )
        logger.info("StreamingResponse prepared successfully")
        
        return response

    except ValueError as ve:
        logger.error(f"ValueError in synthesize_speech: {str(ve)}", exc_info=True)
        return JSONResponse(
            status_code=400,
            content={"message": "Invalid input", "details": str(ve)}
        )
    except Exception as e:
        logger.error(f"Unexpected error in synthesize_speech: {str(e)}", exc_info=True)
        error_details = {
            "error": str(e),
            "type": type(e).__name__,
            "traceback": traceback.format_exc()
        }
        return JSONResponse(
            status_code=500,
            content={"message": "An unexpected error occurred during speech synthesis", "details": error_details}
        )
    finally:
        logger.info("Synthesize request completed")

@app.post("/identify")
async def identify_language(request: AudioRequest):
    try:
        input_bytes = base64.b64decode(request.audio)
        audio_array, sample_rate = extract_audio(input_bytes)
        result = identify(audio_array)
        return JSONResponse(content={"language_identification": result})
    except Exception as e:
        logger.error(f"Error in identify_language: {str(e)}", exc_info=True)
        error_details = {
            "error": str(e),
            "traceback": traceback.format_exc()
        }
        return JSONResponse(
            status_code=500,
            content={"message": "An error occurred during language identification", "details": error_details}
        )

@app.get("/asr_languages")
async def get_asr_languages():
    try:
        return JSONResponse(content=ASR_LANGUAGES)
    except Exception as e:
        logger.error(f"Error in get_asr_languages: {str(e)}", exc_info=True)
        error_details = {
            "error": str(e),
            "traceback": traceback.format_exc()
        }
        return JSONResponse(
            status_code=500,
            content={"message": "An error occurred while fetching ASR languages", "details": error_details}
        )

@app.get("/tts_languages")
async def get_tts_languages():
    try:
        return JSONResponse(content=TTS_LANGUAGES)
    except Exception as e:
        logger.error(f"Error in get_tts_languages: {str(e)}", exc_info=True)
        error_details = {
            "error": str(e),
            "traceback": traceback.format_exc()
        }
        return JSONResponse(
            status_code=500,
            content={"message": "An error occurred while fetching TTS languages", "details": error_details}
        )