File size: 6,420 Bytes
4c01711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
import random
import Phonemize
from Levenshtein import editops
from gradio.components import Audio, Dropdown, Textbox, Image
import gradio as gr
import transcriber
import json
import pandas as pd
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.signal import spectrogram
import numpy as np


engine = transcriber.transcribe_SA(model_path='models/SA',verbose=0)
phonemizer = Phonemize.phonemization()

prompts = np.loadtxt('data/prompts.txt', dtype=str)

Attributes = engine.att_list
df_output = None

def select_prompt():
    return random.choice(prompts)

def phonemize_prompt(prompt):
    return ' '.join(phonemizer.cmu_phonemize(prompt)).lower()

def diff_fn():
    return [('H','+'),('E','-'),('N',None),('\n', None),('F','-'),('Fgo','-'),('M','+')]

def recognizeAudio(audio_file, attributes):
    #print(','.join(attributes))
    global df_output
    output = engine.transcribe(audio_file, attributes= tuple(attributes), phonological_matrix_file='data/p2att_en_us-arpa.csv', human_readable=False)
    records = []
    d = json.loads(output)
    records.append(['Phoneme']+d['Phoneme']['symbols'])
    for att in d['Attributes']:
        records.append([att['Name']]+att['Pattern'])
    df = pd.DataFrame.from_records(records)
    df.fillna('', inplace=True)
    df_output = df
    return df.to_html(header=False, index=False)
    
#Get error by matching the expected sequence with the recognized one and return the output in a format that can be visualized by the gradio HighlightedText box
def get_error(exp_list, rec_list):
    exp_list = list(exp_list)
    rec_list = list(rec_list)
    vocab = set(exp_list+rec_list)
    w2c = dict(zip(vocab,range(len(vocab))))
    
    exp_out = [[a,None] for a in exp_list]
    rec_out = [[a,None] for a in rec_list]  
    exp_enc = ''.join([chr(w2c[c]) for c in exp_list])
    rec_enc = ''.join([chr(w2c[c]) for c in rec_list])

    for op, exp_i, rec_i in editops(exp_enc, rec_enc):
        if op == 'replace':
            exp_out[exp_i][1] = 'S'
            rec_out[rec_i][1] = 'S'
        elif op == 'insert':
            rec_out[rec_i][1] = 'I'
        elif op == 'delete':
            exp_out[exp_i][1] = 'D'
    
    diff_list = [['Expected:\t', None]] + exp_out + [['\n',None]] + [['Recognized:\t', None]] + rec_out
    return diff_list


def scale_vector(vector, new_min, new_max):
    min_val = min(vector)
    max_val = max(vector)
    scaled_vector = []
    for val in vector:
        scaled_val = ((val - min_val) * (new_max - new_min) / (max_val - min_val)) + new_min
        scaled_vector.append(scaled_val)
    return scaled_vector



def create_spectrogram_with_att(wav_file, att_contour, att):
    # Read the WAV file
    sampling_rate, data = wavfile.read(wav_file)

    # Calculate the spectrogram
    f, t, Sxx = spectrogram(data, fs=sampling_rate)
    fig, ax = plt.subplots(figsize=(10, 5))

    # Plot the spectrogram
    ax.pcolormesh(t, f, 10 * np.log10(Sxx), shading='gouraud')
    ax.set_ylabel('Frequency (Hz)')
    ax.set_xlabel('Time (s)')
    ax.set_title(f'Spectrogram with {att} Contour')
    ax.set_ylim(0, 8000)  # Adjust the frequency range if necessary

    # Plot the att contour
    time_pitch = np.arange(0, len(att_contour) * 0.02, 0.02)  # Assuming pitch_contour is sampled every 20 ms
    ax.plot(time_pitch, att_contour, color='blue', label=f'{att} Contour')
    ax.legend()

    return fig

def plot_contour(audio_file, att):
    indx = engine.processor.tokenizer.convert_tokens_to_ids([f'p_{att}'])
    att_contour = engine.logits.squeeze()[:,indx]
    att_contour = scale_vector(att_contour, 0, 6000)
    fig = create_spectrogram_with_att(audio_file, att_contour, att)
    return fig


with gr.Blocks() as gui:
    with gr.Tab("Main"):
        prompt = gr.Textbox(label='Prompt', value=select_prompt)
        get_prompt = gr.Button("Get Prompt")
        get_prompt.click(fn=select_prompt, outputs=prompt)
        
        prompt_phonemes = gr.Textbox(label="Expected Phonemes", interactive=False)
        get_phoneme = gr.Button("Get Phonemes")
        get_phoneme.click(fn=phonemize_prompt, inputs=prompt, outputs=prompt_phonemes)
        
        record_audio = gr.Audio(sources=["microphone","upload"], type="filepath")
        att_list = gr.Dropdown(label="Select Attributes", choices=Attributes, value=['vowel', 'voiced', 'consonant'] ,multiselect=True)
        process = gr.Button("Process Audio")
        
        recognition = gr.HTML(label='Output')
        
        process.click(fn=recognizeAudio, inputs=[record_audio,att_list], outputs=recognition)
        
    
        
    with gr.Tab("Assessment"):
        assess = gr.Button("Assessment")
        diff = []
        for i in range(len(Attributes)+1):
            diff.append(gr.HighlightedText(
                    combine_adjacent=False,
                    show_legend=True,
                    color_map={"S": "red", "I": "green", "D":"blue"}, visible=False))
            
        def get_assessment(prompt_phonemes):#, recognized_phonemes, recognized_attributes):
            outputs = [gr.HighlightedText(visible=False)]*(len(Attributes)+1)
            outputs[0] = gr.HighlightedText(label=f"Phoneme Assessment",
                                              value=get_error(prompt_phonemes.split(), df_output.iloc[0].values[1:]),
                                              visible=True)
            i = 1
            for i,r in df_output.iloc[1:].iterrows():
                convert = lambda ph: '-' if f'n_{att}' in engine.p2att_map[ph] else '+'
                att = r.iloc[0]
                exp_att = [convert(ph) for ph in prompt_phonemes.split()]
                rec_att = r.iloc[1:].values
                outputs[i] = gr.HighlightedText(label=f"{att} Assessment",
                                              value=get_error(exp_att, rec_att),
                                              visible=True)
                i += 1
                
            return outputs

        assess.click(fn=get_assessment, inputs= [prompt_phonemes], outputs=diff)

    with gr.Tab("Analysis"):
        selected_att = gr.Dropdown( Attributes, label="Select an Attribute to plot", value='voiced', interactive=True)
        do_plot = gr.Button('Plot')
        plot_block = gr.Plot(label='Spectrogram with Attribute Contour')
        do_plot.click(plot_contour, inputs=[record_audio,selected_att], outputs=plot_block)

gui.launch()