Spaces:
Running
Running
# https://huggingface.co/spaces/asigalov61/MIDI-Search | |
import os | |
import time as reqtime | |
import datetime | |
from pytz import timezone | |
import numpy as np | |
import gradio as gr | |
import copy | |
import random | |
import pickle | |
import zlib | |
from midi_to_colab_audio import midi_to_colab_audio | |
import TMIDIX | |
import matplotlib.pyplot as plt | |
#========================================================================================================== | |
def find_midi(title, artist): | |
print('=' * 70) | |
print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) | |
start_time = reqtime.time() | |
print('-' * 70) | |
print('Req title:', title) | |
print('Req artist:', artist) | |
print('-' * 70) | |
input_text = '' | |
if title != '': | |
input_text += title | |
if artist != '': | |
input_text += ' by ' + artist | |
print('Searching...') | |
query_embedding = model.encode([input_text]) | |
# Compute cosine similarity between query and each sentence in the corpus | |
similarities = util.cos_sim(query_embedding, corpus_embeddings) | |
# Find the index of the most similar sentence | |
closest_index = np.argmax(similarities) | |
closest_index_match_ratio = max(similarities[0]).tolist() | |
best_corpus_match = all_MIDI_files_names[closest_index] | |
print('Done!') | |
print('=' * 70) | |
print('Match corpus index', closest_index) | |
print('Match corpus ratio', closest_index_match_ratio) | |
print('=' * 70) | |
print('Done!') | |
print('=' * 70) | |
song_artist = best_corpus_match[0] | |
zlib_file_name = best_corpus_match[1] | |
print('Fetching MIDI score...') | |
with open(zlib_file_name, 'rb') as f: | |
compressed_data = f.read() | |
# Decompress the data | |
decompressed_data = zlib.decompress(compressed_data) | |
# Convert the bytes back to a list using pickle | |
scores_data = pickle.loads(decompressed_data) | |
fnames = [f[0] for f in scores_data] | |
fnameidx = fnames.index(song_artist) | |
MIDI_score_data = scores_data[fnameidx][1] | |
print('Sample INTs', MIDI_score_data[:12]) | |
print('=' * 70) | |
if len(outy) != 0: | |
song = outy | |
song_f = [] | |
time = 0 | |
dur = 0 | |
vel = 90 | |
pitch = 0 | |
channel = 0 | |
patches = [-1] * 16 | |
channels = [0] * 16 | |
channels[9] = 1 | |
for ss in song: | |
if 0 <= ss < 256: | |
time += ss * 16 | |
if 256 <= ss < 2304: | |
dur = ((ss-256) // 8) * 16 | |
vel = (((ss-256) % 8)+1) * 15 | |
if 2304 <= ss < 18945: | |
patch = (ss-2304) // 129 | |
if patch < 128: | |
if patch not in patches: | |
if 0 in channels: | |
cha = channels.index(0) | |
channels[cha] = 1 | |
else: | |
cha = 15 | |
patches[cha] = patch | |
channel = patches.index(patch) | |
else: | |
channel = patches.index(patch) | |
if patch == 128: | |
channel = 9 | |
pitch = (ss-2304) % 129 | |
song_f.append(['note', time, dur, channel, pitch, vel, patch ]) | |
x = [] | |
y = [] | |
c = [] | |
colors = ['red', 'yellow', 'green', 'cyan', | |
'blue', 'pink', 'orange', 'purple', | |
'gray', 'white', 'gold', 'silver', | |
'lightgreen', 'indigo', 'maroon', 'turquoise'] | |
for s in [m for m in song_f if m[0] == 'note']: | |
x.append(s[1]) | |
y.append(s[4]) | |
c.append(colors[s[3]]) | |
plt.close() | |
plt.figure(figsize=(14,5)) | |
ax=plt.axes(title='MIDI Match Plot') | |
ax.set_facecolor('black') | |
plt.scatter(x,y, c=c) | |
plt.xlabel("Time in MIDI ticks") | |
plt.ylabel("MIDI Pitch") | |
output_signature = AUX_DATA[search_index][0] | |
track_name = 'Project Los Angeles' | |
text_encoding = 'ISO-8859-1' | |
list_of_MIDI_patches=[0, 24, 32, 40, 42, 46, 56, 71, 73, 0, 53, 19, 0, 0, 0, 0] | |
output_header = [1000, | |
[['set_tempo', 0, 1000000], | |
['time_signature', 0, 4, 2, 24, 8], | |
['track_name', 0, bytes(output_signature, text_encoding)]]] | |
patch_list = [['patch_change', 0, 0, list_of_MIDI_patches[0]], | |
['patch_change', 0, 1, list_of_MIDI_patches[1]], | |
['patch_change', 0, 2, list_of_MIDI_patches[2]], | |
['patch_change', 0, 3, list_of_MIDI_patches[3]], | |
['patch_change', 0, 4, list_of_MIDI_patches[4]], | |
['patch_change', 0, 5, list_of_MIDI_patches[5]], | |
['patch_change', 0, 6, list_of_MIDI_patches[6]], | |
['patch_change', 0, 7, list_of_MIDI_patches[7]], | |
['patch_change', 0, 8, list_of_MIDI_patches[8]], | |
['patch_change', 0, 9, list_of_MIDI_patches[9]], | |
['patch_change', 0, 10, list_of_MIDI_patches[10]], | |
['patch_change', 0, 11, list_of_MIDI_patches[11]], | |
['patch_change', 0, 12, list_of_MIDI_patches[12]], | |
['patch_change', 0, 13, list_of_MIDI_patches[13]], | |
['patch_change', 0, 14, list_of_MIDI_patches[14]], | |
['patch_change', 0, 15, list_of_MIDI_patches[15]], | |
['track_name', 0, bytes(track_name, text_encoding)]] | |
output = output_header + [patch_list + song_f] | |
with open(f"MIDI-Search-Sample.mid", 'wb') as f: | |
f.write(MIDI.score2midi(output)) | |
audio = synthesis(MIDI.score2opus(output), soundfont_path) | |
yield AUX_DATA[search_index][0], "MIDI-Search-Sample.mid", (44100, audio), plt | |
#========================================================================================================== | |
if __name__ == "__main__": | |
PDT = timezone('US/Pacific') | |
print('=' * 70) | |
print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT))) | |
print('=' * 70) | |
soundfont_path = "SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2" | |
print('Loading files list...') | |
all_MIDI_files_names = TMIDIX.Tegridy_Any_Pickle_File_Reader('all_MIDI_files_names') | |
print('Done!') | |
print('=' * 70) | |
print('Loading clean_midi corpus...') | |
clean_midi_artist_song_description_summaries_lyrics_score = TMIDIX.Tegridy_Any_Pickle_File_Reader('clean_midi_artist_song_description_summaries_lyrics_scores') | |
print('Done!') | |
print('=' * 70) | |
print('Loading MIDI corpus embeddings...') | |
corpus_embeddings = np.load('MIDI_corpus_embeddings_all-mpnet-base-v2.npz')['data'] | |
print('Done!') | |
print('=' * 70) | |
print('Loading Sentence Transformer model...') | |
model = SentenceTransformer('all-mpnet-base-v2') | |
print('Done!') | |
print('=' * 70) | |
app = gr.Blocks() | |
with app: | |
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Advanced MIDI Search</h1>") | |
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Search and explore 179k+ MIDI titles</h1>") | |
gr.Markdown("![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.MIDI-Search&style=flat)\n\n" | |
"Giant Music Transformer Aux Data Demo\n\n" | |
"Please see [Giant Music Transformer](https://github.com/asigalov61/Giant-Music-Transformer) for more information and features\n\n" | |
"[Open In Colab]" | |
"(https://colab.research.google.com/github/asigalov61/Giant-Music-Transformer/blob/main/Giant_Music_Transformer_TTM.ipynb)" | |
" for all features" | |
) | |
title = gr.Textbox(label="Desired Song Title", value="Family Guy") | |
artist = gr.Textbox(label="Desired Song Artist", value="TV Themes") | |
submit = gr.Button() | |
gr.Markdown("# Search results") | |
output_midi_seq = gr.Textbox(label="Found MIDI search title") | |
output_audio = gr.Audio(label="Output MIDI search sample audio", format="mp3", elem_id="midi_audio") | |
output_plot = gr.Plot(label="Output MIDI search sample plot") | |
output_midi = gr.File(label="Output MIDI search sample MIDI", file_types=[".mid"]) | |
run_event = submit.click(find_midi, [title, artist], | |
[output_midi_seq, output_midi, output_audio, output_plot]) | |
app.launch() |