import whisper | |
import gradio as gr | |
model = whisper.load_model('base') | |
def transcribe(inputs, task): | |
if inputs is None: | |
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.") | |
result = model.transcribe(inputs) | |
return result["text"] | |
interface = gr.Interface( | |
fn=transcribe, | |
inputs=gr.Audio(sources=["microphone"],type="filepath"), | |
outputs="text", | |
title="Whisper Large V3: Transcribe Audio", | |
description=( | |
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper" | |
) | |
) | |
interface.launch() |