Ar4ikov's picture
Update app.py
1d19528
raw
history blame
1.97 kB
from transformers import pipeline
import gradio as gr
from pyctcdecode import BeamSearchDecoderCTC
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
from transformers import AutoConfig, AutoModel, Wav2Vec2FeatureExtractor
import librosa
import numpy as np
import subprocess
def resample(speech_array, sampling_rate):
resampler = torchaudio.transforms.Resample(sampling_rate)
speech = resampler(speech_array).squeeze()
return speech
def predict(speech_array, sampling_rate):
speech = resample(speech_array, sampling_rate)
inputs = feature_extactor(speech, sampling_rate=SR, return_tensors="pt", padding=True)
inputs = {key: inputs[key].to(device) for key in inputs}
with torch.no_grad():
logits = model_(**inputs).logits
scores = F.softmax(logits, dim=1).detach().cpu().numpy()[0]
outputs = [{"Emotion": config.id2label[i], "Score": f"{round(score * 100, 3):.1f}%"} for i, score in enumerate(scores)]
return outputs
TRUST = True
SR = 16000
config = AutoConfig.from_pretrained('Aniemore/wav2vec2-xlsr-53-russian-emotion-recognition', trust_remote_code=TRUST)
model = AutoModel.from_pretrained("Aniemore/wav2vec2-xlsr-53-russian-emotion-recognition", trust_remote_code=TRUST)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("Aniemore/wav2vec2-xlsr-53-russian-emotion-recognition")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def transcribe(audio):
sr, audio = audio[0], audio[1]
return predict(audio, sr)
def get_asr_interface():
return gr.Interface(
fn=transcribe,
inputs=[
gr.inputs.Audio(source="upload", type="numpy")
],
outputs=[
"textbox"
])
interfaces = [
get_asr_interface()
]
names = [
"Russian Emotion Recognition"
]
gr.TabbedInterface(interfaces, names).launch(server_name = "0.0.0.0", enable_queue=False)