File size: 2,231 Bytes
de44379
 
 
5ffd823
0a494dc
77c34b5
de44379
5ffd823
 
de44379
77c34b5
 
5ffd823
0a494dc
de44379
 
 
 
 
 
5ffd823
 
 
995fd34
 
de44379
 
 
77c34b5
de44379
 
77c34b5
de44379
 
 
 
 
77c34b5
de44379
 
 
 
77c34b5
de44379
 
 
 
 
 
77c34b5
de44379
 
77c34b5
de44379
 
77c34b5
de44379
 
5ffd823
de44379
 
 
 
 
5ffd823
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import gradio as gr
import torch
import numpy as np
import librosa
from transformers import Wav2Vec2Processor, Wav2Vec2ForSequenceClassification
from safetensors.torch import load_file

# Caminho para o modelo e processador
model_name = 'results'
processor = Wav2Vec2Processor.from_pretrained(model_name)

# Carregar o modelo do arquivo safetensors
state_dict = load_file(f"{model_name}/model.safetensors")
model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name, state_dict=state_dict)

def classify_accent(audio):
    if audio is None:
        return "Error: No se recibió audio"

    try:
        # Verificar se o áudio é um caminho de arquivo
        if isinstance(audio, str):
            audio_array, sample_rate = librosa.load(audio, sr=None)
        else:
            raise ValueError("Formato de áudio inesperado.")

        print(f"Forma del audio: {audio_array.shape}, Frecuencia de muestreo: {sample_rate}")

        # Converter o áudio para float32
        audio_array = audio_array.astype(np.float32)

        # Resample para 16kHz, se necessário
        if sample_rate != 16000:
            audio_array = librosa.resample(audio_array, orig_sr=sample_rate, target_sr=16000)

        input_values = processor(audio_array, return_tensors="pt", sampling_rate=16000).input_values

        # Inferência
        with torch.no_grad():
            logits = model(input_values).logits
        predicted_ids = torch.argmax(logits, dim=-1).item()

        # IDs de sotaque
        labels = ["Español", "Otro"]
        return labels[predicted_ids]

    except Exception as e:
        return f"Error al procesar el audio: {str(e)}"

# Interface do Gradio
description_html = """
<p>Prueba con grabación o cargando un archivo de audio. Para probar, recomiendo una palabra.</p>
<p>Ramon Mayor Martins: <a href="https://rmayormartins.github.io/" target="_blank">Website</a> | <a href="https://huggingface.co/rmayormartins" target="_blank">Spaces</a></p>
"""

# Interface do Gradio
interface = gr.Interface(
    fn=classify_accent,
    inputs=gr.Audio(type="filepath"),
    outputs="label",
    title="Clasificador de Sotaques (Español vs Otro)",
    description=description_html
)

interface.launch(debug=True)