Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#@markdown Output: Accuracy Score
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import speech_recognition as sr
|
5 |
+
from Levenshtein import ratio
|
6 |
+
import tempfile
|
7 |
+
import numpy as np
|
8 |
+
import soundfile as sf
|
9 |
+
import pandas as pd
|
10 |
+
|
11 |
+
# Sample dataframe with sentences ordered from easy to hard
|
12 |
+
data = {
|
13 |
+
"Sentences": [
|
14 |
+
"A stitch in time saves nine.",
|
15 |
+
"To be or not to be, that is the question.",
|
16 |
+
"Five cats were living in safe caves.",
|
17 |
+
"Hives give shelter to bees in large caves.",
|
18 |
+
"His decision to plant a rose was amazing.",
|
19 |
+
"She sells sea shells by the sea shore.",
|
20 |
+
"The colorful parrot likes rolling berries.",
|
21 |
+
"Time flies like an arrow; fruit flies like a banana.",
|
22 |
+
"Good things come to those who wait.",
|
23 |
+
"All human beings are born free and equal in dignity and rights."
|
24 |
+
]
|
25 |
+
}
|
26 |
+
df = pd.DataFrame(data)
|
27 |
+
|
28 |
+
def transcribe_audio(file_info):
|
29 |
+
r = sr.Recognizer()
|
30 |
+
with tempfile.NamedTemporaryFile(delete=True, suffix=".wav") as tmpfile:
|
31 |
+
sf.write(tmpfile.name, data=file_info[1], samplerate=44100, format='WAV')
|
32 |
+
tmpfile.seek(0)
|
33 |
+
with sr.AudioFile(tmpfile.name) as source:
|
34 |
+
audio_data = r.record(source)
|
35 |
+
try:
|
36 |
+
text = r.recognize_google(audio_data)
|
37 |
+
return text
|
38 |
+
except sr.UnknownValueError:
|
39 |
+
return "Could not understand audio"
|
40 |
+
except sr.RequestError as e:
|
41 |
+
return f"Could not request results; {e}"
|
42 |
+
|
43 |
+
def pronunciation_correction(expected_text, file_info):
|
44 |
+
user_spoken_text = transcribe_audio(file_info)
|
45 |
+
similarity = ratio(expected_text.lower(), user_spoken_text.lower())
|
46 |
+
description = f"{similarity:.2f}"
|
47 |
+
|
48 |
+
if similarity >= 0.9:
|
49 |
+
feedback = "Excellent pronunciation!"
|
50 |
+
elif similarity >= 0.7:
|
51 |
+
feedback = "Good pronunciation!"
|
52 |
+
elif similarity >= 0.5:
|
53 |
+
feedback = "Needs improvement."
|
54 |
+
else:
|
55 |
+
feedback = "Poor pronunciation, try to focus more on clarity."
|
56 |
+
|
57 |
+
return feedback, description
|
58 |
+
|
59 |
+
with gr.Blocks() as app:
|
60 |
+
with gr.Row():
|
61 |
+
sentence_dropdown = gr.Dropdown(choices=df['Sentences'].tolist(), label="Select a Sentence")
|
62 |
+
selected_sentence_output = gr.Textbox(label="Selected Text", interactive=False)
|
63 |
+
audio_input = gr.Audio(label="Upload Audio File", type="numpy")
|
64 |
+
check_pronunciation_button = gr.Button("Check Pronunciation")
|
65 |
+
pronunciation_feedback = gr.Textbox(label="Pronunciation Feedback")
|
66 |
+
pronunciation_score = gr.Number(label="Pronunciation Accuracy Score: 0 (No Match) ~ 1 (Perfect)")
|
67 |
+
|
68 |
+
sentence_dropdown.change(lambda x: x, inputs=sentence_dropdown, outputs=selected_sentence_output)
|
69 |
+
check_pronunciation_button.click(
|
70 |
+
pronunciation_correction,
|
71 |
+
inputs=[sentence_dropdown, audio_input],
|
72 |
+
outputs=[pronunciation_feedback, pronunciation_score]
|
73 |
+
)
|
74 |
+
|
75 |
+
app.launch(debug=True)
|