Spaces:
khetag
/
Build error

File size: 2,278 Bytes
194fffd
 
 
fc50d18
 
194fffd
 
142fdc7
 
 
 
 
 
 
194fffd
142fdc7
 
 
194fffd
 
fc50d18
194fffd
e1c65f1
 
 
 
 
 
 
 
 
 
 
 
194fffd
 
 
142fdc7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc50d18
142fdc7
 
 
 
 
fc50d18
142fdc7
194fffd
 
 
 
 
142fdc7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
from TTS.api import TTS

tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1")
tts.to("cuda")


def predict(prompt, language, audio_file_pth):
    tts.tts_to_file(
        text=prompt,
        file_path="output.wav",
        speaker_wav=audio_file_pth,
        language=language,
    )

    return gr.make_waveform(
        audio="output.wav",
    )


title = "Coqui🐸 XTTS"

description = """
<p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
<br/>
<a href="https://huggingface.co/spaces/coqui/xtts?duplicate=true">
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
</p>
XTTS is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip. 
Built on Tortoise, XTTS has important model changes that make cross-language voice cloning and multi-lingual speech generation super easy. 
<br/>
This is the same model that powers Coqui Studio, and Coqui API, however we apply a few tricks to make it faster and support streaming inference.
"""

gr.Interface(
    fn=predict,
    inputs=[
        gr.Textbox(
            label="Text Prompt",
            info="One or two sentences at a time is better",
            placeholder="It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.",
        ),
        gr.Dropdown(
            label="Language",
            info="Select an output language for the synthesised speech",
            choices=[
                "en",
                "es",
                "fr",
                "de",
                "it",
                "pt",
                "pl",
                "tr",
                "ru",
                "nl",
                "cz",
                "ar",
                "zh",
            ],
            max_choices=1,
            value="en"
        ),
        gr.Audio(
            label="Reference Audio",
            info="Upload a reference audio for target speaker voice",
            type="filepath",
            value="examples/en_speaker_6.wav"
        ),
    ],
    outputs=[
        gr.Video(label="Synthesised Speech"),
    ],
    title=title,
).launch(debug=True)