File size: 3,474 Bytes
b99241b
 
 
 
 
b230689
 
 
 
 
 
 
 
b99241b
 
 
 
 
b230689
 
 
b99241b
b230689
 
b99241b
b230689
 
 
b99241b
b230689
 
b99241b
b230689
 
 
b99241b
 
b230689
 
b99241b
b230689
 
 
 
 
 
 
 
b99241b
 
 
b230689
 
 
 
 
 
 
 
 
b99241b
b230689
 
 
 
 
 
 
 
b99241b
 
 
 
 
 
 
 
 
 
 
 
08a9a34
 
b99241b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240f8ad
3235a32
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import gradio as gr
import numpy as np
import torch
from datasets import load_dataset

from transformers import (
    SpeechT5ForTextToSpeech,
    SpeechT5HifiGan,
    SpeechT5Processor,
    pipeline,
    VitsModel,
    VitsTokenizer,
)


device = "cuda:0" if torch.cuda.is_available() else "cpu"

# load speech translation checkpoint
asr_pipe = pipeline(
    "automatic-speech-recognition", model="openai/whisper-base", device=device
)


# speecht5
# load text-to-speech checkpoint and speaker embeddings
# processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
# model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
# vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)

# embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
# speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)

# mms
model = VitsModel.from_pretrained("Matthijs/mms-tts-fra")
tokenizer = VitsTokenizer.from_pretrained("Matthijs/mms-tts-fra")


# 保持 main 函数 speech_to_speech_translation 不变
# 并根据需要仅更新 translate 和 synthesise 函数
def translate(audio):
    # outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "translate"})
    outputs = asr_pipe(
        audio,
        max_new_tokens=256,
        generate_kwargs={"task": "transcribe", "language": "fr"},
        # generate_kwargs={"task": "transcribe"},
    )
    print(outputs)
    return outputs["text"]


# speecht5
# def synthesise(text):
#     inputs = processor(text=text, return_tensors="pt")
#     speech = model.generate_speech(
#         inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder
#     )
#     return speech.cpu()


def synthesise(text):
    inputs = tokenizer(text, return_tensors="pt")
    input_ids = inputs["input_ids"]

    with torch.no_grad():
        outputs = model(input_ids)

    speech = outputs.audio[0]

    return speech.cpu()


def speech_to_speech_translation(audio):
    translated_text = translate(audio)
    synthesised_speech = synthesise(translated_text)
    synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
    return 16000, synthesised_speech


title = "Cascaded STST"
description = """
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in Chinese. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
[MMS TTS](https://huggingface.co/Matthijs/mms-tts-fra) model for text-to-speech:

![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
"""

demo = gr.Blocks()

mic_translate = gr.Interface(
    fn=speech_to_speech_translation,
    inputs=gr.Audio(source="microphone", type="filepath"),
    outputs=gr.Audio(label="Generated Speech", type="numpy"),
    title=title,
    description=description,
)

file_translate = gr.Interface(
    fn=speech_to_speech_translation,
    inputs=gr.Audio(source="upload", type="filepath"),
    outputs=gr.Audio(label="Generated Speech", type="numpy"),
    examples=[["./example.wav"]],
    title=title,
    description=description,
)

with demo:
    gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])

# demo.launch(share=True)
demo.launch()