Spaces:
Runtime error
Runtime error
File size: 3,625 Bytes
98fdba8 65f781f 98fdba8 c2a1867 98fdba8 65f781f 98fdba8 65f781f 98fdba8 65f781f 98fdba8 93ccc0f 98fdba8 65f781f 98fdba8 65f781f 98fdba8 65f781f 98fdba8 c2a1867 98fdba8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
from spleeter.separator import Separator
import gradio as gr
import shutil
import librosa
import soundfile
import os
from gradio_client import Client, file
import subprocess
from constant import weights, indices
try:
shutil.rmtree("output")
except FileNotFoundError:
pass
def spleeter(separator, aud, model_dropdown, model_file, index_file, pitch):
filename = os.path.basename(aud).split('.')[0]
accompaniment_filename = f"./output/audio_example/{filename}_accompaniment.wav"
vocal_filename = f"./output/audio_example/{filename}_vocals.wav"
song_filename = f'./output/audio_example/{filename}_song.wav'
separator.separate_to_file(aud, "output/", filename_format="audio_example/%s_{instrument}.wav" % filename)
# change pitch
y, sr = librosa.load(accompaniment_filename)
new_accompaniment = librosa.effects.pitch_shift(y, sr, pitch)
soundfile.write(accompaniment_filename, new_accompaniment, sr, )
# send vocal for processing
# https://huggingface.co/spaces/r3gm/rvc_zero
model = model_file if model_dropdown is None else weights[model_dropdown]
index = index_file if model_dropdown is None else indices[model_dropdown]
client = Client("r3gm/rvc_zero")
result = client.predict(
audio_files=[file(vocal_filename)],
file_m=file(model),
pitch_alg="rmvpe+",
pitch_lvl=pitch,
file_index=file(index),
index_inf=0.75,
r_m_f=3,
e_r=0.25,
c_b_p=0.5,
active_noise_reduce=False,
audio_effects=False,
api_name="/run"
)[0]
# combine accompaniment and vocal
command = f"""ffmpeg -y -i "{result}" -i "{accompaniment_filename}" -filter_complex amix=inputs=2:duration=longest "{song_filename}" """
subprocess.call(command, shell=True)
return song_filename
inputs = [
gr.Audio(sources='upload', label="Input Audio File", type="filepath"),
gr.Dropdown(choices=list(weights.keys()), value=list(weights.keys()), label="existing people model"),
gr.File(
label="Model file (use this if your existing people model does not exist in the dropdown)",
type="filepath",
height=130,
),
gr.File(
label="Index file (use this if your existing people model does not exist in the dropdown)",
type="filepath",
height=130,
),
gr.Slider(
label="Pitch level",
minimum=-24,
maximum=24,
step=1,
value=0,
visible=True,
interactive=True,
)
]
outputs = [
gr.Audio(label="Output Audio", type="filepath"),
]
title = "Music Spleeter"
description = "Clearing a musical composition of the performer's voice is a common task. It is solved well, for example, by professional audio file editing programs. AI algorithms have also been gaining ground recently."
article = "<div style='text-align: center; max-width:800px; margin:10px auto;'><p>In this case we use Deezer's Spleeter with ready pretrained models. It can leave as an output both just the music and just the performer's voice.</p><p>Sources: <a href='https://github.com/deezer/spleeter/' target='_blank'>Spleeter</a>: a Fast and Efficient Music Source Separation Tool with Pre-Trained Models</p><p style='text-align: center'><a href='https://starstat.yt/cat/music' target='_blank'>StarStat Music</a>: Youtubers Net Worth in category Music</p></div>"
if __name__ == '__main__':
separator = Separator('spleeter:2stems')
lambda_spleeter = lambda aud, model_dropdown, model_file, index_file, pitch: spleeter(separator, aud=aud, model_dropdown=model_dropdown, model_file=model_file, index_file=index_file, pitch=pitch)
gr.Interface(lambda_spleeter, inputs, outputs).launch()
|