File size: 3,761 Bytes
66baa21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ab40649
66baa21
 
 
 
 
 
ff3710a
66baa21
 
 
ff3710a
66baa21
 
ff3710a
66baa21
 
 
5eb978f
 
66baa21
 
3857b9c
66baa21
 
 
 
 
 
 
 
 
 
 
 
 
e69710d
66baa21
ab40649
66baa21
f52ccf3
 
66baa21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f922a17
66baa21
 
 
 
 
 
 
5eb978f
66baa21
 
 
 
 
 
 
 
 
 
 
 
 
f52ccf3
66baa21
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import torch

from transformers import pipeline

import numpy as np
import gradio as gr

def _grab_best_device(use_gpu=True):
    if torch.cuda.device_count() > 0 and use_gpu:
        device = "cuda"
    else:
        device = "cpu"
    return device

device = _grab_best_device()

default_model_per_language = {
    "marathi": "ylacombe/mms-mar-finetuned-monospeaker"
}

models_per_language = {
    "marathi": ["ylacombe/mms-mar-finetuned-monospeaker"]
}

HUB_PATH = "ylacombe/mms-mar-finetuned-monospeaker"


pipe_dict = {
    "current_model": "ylacombe/mms-mar-finetuned-monospeaker",
    "pipe":  pipeline("text-to-speech", model=HUB_PATH, device=0),
    "original_pipe": pipeline("text-to-speech", model=default_model_per_language["marathi"], device=0),
    "language": "marathi",
}

title =      """
Marathi Parkinson Enabler: Speaking is a big challenge during Parakinsons. Patients show slurred speech and cannot communicate effectively. 
This is marathi text to speech model for parkinson users who want to communicate in Marathi. 
            """

max_speakers = 1


# Inference
def generate_audio(text, model_id, language):

    if pipe_dict["language"] != language:
        gr.Warning(f"Language has changed - loading new default model: {default_model_per_language[language]}")
        pipe_dict["language"] = language
        pipe_dict["original_pipe"] = pipeline("text-to-speech", model=default_model_per_language[language], device=0)
    
    num_speakers = pipe_dict["pipe"].model.config.num_speakers

    out = []

    output = pipe_dict["original_pipe"](text)
    output =  gr.Audio(value = (output["sampling_rate"], output["audio"].squeeze()), type="numpy", autoplay=True, label=f"Finetuned model prediction {default_model_per_language[language]}", show_label=True,
                               visible=True)
    
    return output


css = """
#container{
    margin: 0 auto;
    max-width: 80rem;
}
#intro{
    max-width: 100%;
    text-align: center;
    margin: 0 auto;
}
"""
# Gradio blocks demo    
with gr.Blocks(css=css) as demo_blocks:
    gr.Markdown(title, elem_id="intro")

    with gr.Row():
        with gr.Column():
            inp_text = gr.Textbox(label="Input Text", info="What sentence would you like to synthesise?")
            btn = gr.Button("Generate Audio!")
            language = gr.Dropdown(
                default_model_per_language.keys(),
                value = "marathi",
                label = "language",
                info = "Language that you want to test"
            )
            
            model_id = gr.Dropdown(
                    models_per_language["marathi"],
                    value="ylacombe/mms-mar-finetuned-monospeaker", 
                    label="Model", 
                    info="Model you want to test",
                    )
                
        with gr.Column():
            output = gr.Audio(type="numpy", autoplay=False, label=f"Generated Audio", show_label=True, visible=False)

    with gr.Accordion("Datasets and models details", open=False):
        gr.Markdown("""
        
### Marathi
* **Model**: [Marathi MMS TTS](https://huggingface.co/facebook/mms-tts-mar).
* **Datasets**:
    - [Marathi TTS dataset](https://huggingface.co/datasets/ylacombe/google-chilean-marathi). 
                    """) 


    language.change(lambda language: gr.Dropdown(
                    models_per_language[language],
                    value=models_per_language[language][0], 
                    label="Model", 
                    info="Model you want to test",
                    ),
                    language,
                    model_id
                   )
    
    btn.click(generate_audio, [inp_text, model_id, language], output)
    

demo_blocks.queue().launch()