Nitzantry1's picture
Update app.py
dd2fd57 verified
import os
import gradio as gr
from pyannote.audio import Pipeline
from faster_whisper import WhisperModel
# ื”ืชืงื ื” ืฉืœ PyAnnote ื‘ืžื™ื“ื” ื•ืขื“ื™ื™ืŸ ืœื ืžื•ืชืงืŸ
os.system('pip install pyannote.audio')
# ื˜ืขื™ื ืช ื”-Pipeline ืฉืœ PyAnnote ืœื“ื™ืืจื™ื–ืฆื™ื”
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization")
# ื˜ืขื™ื ืช ื”ืžื•ื“ืœ ืฉืœ Whisper ืœืชืžืœื•ืœ
whisper_model = WhisperModel("openai/whisper-large", device="cuda")
# ืคื•ื ืงืฆื™ื” ืฉืžื‘ืฆืขืช ื“ื™ืืจื™ื–ืฆื™ื” ื•ืชืžืœื•ืœ
def diarize_and_transcribe(audio_file):
# ืฉืœื‘ 1: ื“ื™ืืจื™ื–ืฆื™ื”
diarization = pipeline(audio_file)
# ืžืฉืชื ื” ืœืฉืžื•ืจ ืืช ื”ืชืžืœื•ืœ ื”ื›ื•ืœืœ ืขื ืžื™ื“ืข ืขืœ ื”ื“ื•ื‘ืจื™ื
full_transcription = []
# ืžืขื‘ืจ ืขืœ ื›ืœ ื”ืžืงื˜ืขื™ื ืฉื ืžืฆืื• ืขืœ ื™ื“ื™ ื“ื™ืืจื™ื–ืฆื™ื”
for segment, _, speaker in diarization.itertracks(yield_label=True):
# ื”ืžืจืช ืžืงื˜ืข ื”ื–ืžืŸ ืœืคื•ืจืžื˜ ืžืชืื™ื ืœืชืžืœื•ืœ
start_time = segment.start
end_time = segment.end
# ืชืžืœื•ืœ ื”ืžืงื˜ืข ื‘ืขื–ืจืช Whisper
segments, _ = whisper_model.transcribe(audio_file, language="he", task="transcribe",
segment_start=start_time, segment_end=end_time)
transcription = " ".join([seg.text for seg in segments])
# ื”ื•ืกืคืช ืชื•ืฆืื” ืœืชืžืœื•ืœ ื”ื›ื•ืœืœ
full_transcription.append(f"Speaker {speaker}: {transcription} (from {start_time:.2f} to {end_time:.2f})")
# ื”ื—ื–ืจืช ื”ืชืžืœื•ืœ ื”ืžืœื ืขื ื—ืœื•ืงื” ืœื“ื•ื‘ืจื™ื
return "\n".join(full_transcription)
# ื™ืฆื™ืจืช ืžืžืฉืง ื’ืจื“ื™ื•
interface = gr.Interface(
fn=diarize_and_transcribe,
inputs=gr.Audio(source="upload", type="filepath"),
outputs="text",
title="Speaker Diarization and Transcription",
description="Upload an audio file to perform both speaker diarization and transcription."
)
# ื”ืจืฆืช ื”ืืคืœื™ืงืฆื™ื”
interface.launch()