Spaces:
Sleeping
Sleeping
File size: 2,767 Bytes
1d9c414 5e76035 1d9c414 5e76035 1d9c414 5e76035 1d9c414 5e76035 1d9c414 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import os
import glob
import gradio as gr
import shutil
from ultralytics import YOLO
# Initialize YOLO model
model = YOLO('./best.pt')
def check_ffmpeg():
"""Check if ffmpeg is installed."""
if shutil.which("ffmpeg") is None:
raise EnvironmentError("ffmpeg is not installed or not found in the system PATH. Please install ffmpeg to proceed.")
def process_video(video_path, output_option):
# Check if ffmpeg is installed
check_ffmpeg()
# Run the YOLO model on the video, specifying the tracker configuration
results = model.track(video_path, save=True, tracker="bytetrack.yaml", persist=True)
# Get the video file name without extension and directory
video_name = os.path.splitext(os.path.basename(video_path))[0]
# Find the latest directory created in 'runs/detect/'
output_dir = max(glob.glob('./runs/detect/*'), key=os.path.getmtime)
# Find the saved video file in the latest directory with the specific video name
video_files = glob.glob(os.path.join(output_dir, f'{video_name}*.avi'))
if not video_files:
raise Exception(f"No .avi video files found in directory {output_dir} for {video_name}")
# Convert the video to mp4
mp4_files = []
for file in video_files:
mp4_file = f"{file[:-4]}.mp4"
# Quote the file paths to handle spaces and overwrite without prompt
os.system(f"ffmpeg -y -i \"{file}\" -vcodec libx264 \"{mp4_file}\"")
os.remove(file) # Remove the original .avi file after conversion
mp4_files.append(mp4_file)
# Check again for the converted .mp4 video files specifically for the processed video
matched_mp4_files = [file for file in mp4_files if video_name in file]
if not matched_mp4_files:
raise Exception(f"No .mp4 video files found in directory {output_dir} after conversion for {video_name}.")
# Count the number of objects detected
object_count = len(results.pandas().xyxy[0])
# Determine the output based on user's choice
if output_option == "Count":
return object_count
elif output_option == "Video":
return matched_mp4_files[0]
elif output_option == "Both":
return object_count, matched_mp4_files[0]
# Define Gradio inputs
video_input = gr.Video()
output_option_input = gr.Radio(choices=["Count", "Video", "Both"], label="Select output type")
video_interface = gr.Interface(
fn=process_video,
inputs=[video_input, output_option_input],
outputs=[gr.Textbox(label="Object Count"), "video"], # Update outputs to support multiple types
description="YOLO Video Tracking with Output Options"
)
# Deploy the interface with share enabled
gr.TabbedInterface([video_interface], ["Track Video"]).launch(share=True)
|