Whisper-Konkani / app.py
thak123's picture
Update app.py
5a1e7b8 verified
from transformers import WhisperTokenizer
import os
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small") #, language="marathi", task="transcribe"
from transformers import pipeline
import gradio as gr
import torch
pipe = pipeline(model="thak123/gom-stt-v3", #"thak123/whisper-small-LDC-V1", #"thak123/whisper-small-gom",
task="automatic-speech-recognition", tokenizer= tokenizer) # change to "your-username/the-name-you-picked"
# pipe.model.config.forced_decoder_ids = (
# pipe.tokenizer.get_decoder_prompt_ids(
# language="marathi", task="transcribe"
# )
# )
def transcribe(audio):
text = pipe(audio)["text"]
pipe(audio)
return pipe(audio) #text
iface = gr.Interface(
fn=transcribe,
inputs=[gr.Audio(sources=["microphone", "upload"])],
outputs="text",
examples=[
[os.path.join(os.path.dirname("."),"audio/chalyaami.mp3")],
[os.path.join(os.path.dirname("."),"audio/ekdonteen.flac")],
[os.path.join(os.path.dirname("."),"audio/heyatachadjaale.mp3")],
],
title="Whisper Konkani",
description="Realtime demo for Konkani speech recognition using a fine-tuned Whisper small model.",
)
iface.launch()