jordan@6: require "CSV" jordan@6: require "open-uri" jordan@6: # require "simplexml" jordan@6: mirex_path = "/Users/jordan/Desktop/MIREX_data" # EDIT THIS TO BE YOUR OWN DESIRED PATH. jordan@6: # IT WILL NEED TO HOLD ROUGHLY 70 MB OF DATA. jordan@6: jordan@6: jordan@6: # tmp = File.open(filename,'w') jordan@6: # tmptxt = [] jordan@6: # open(uri) {|f| jordan@6: # f.each_line {|line| tmptxt.push(line)} jordan@6: # } jordan@6: # tmp.write(tmptxt) jordan@6: # tmp.close jordan@6: # jordan@6: jordan@6: def url_download(uri, filename=".") jordan@6: open(filename, 'w') do |foo| jordan@6: foo.print open(uri).read jordan@6: end jordan@6: end jordan@6: jordan@6: def convert_file(filename) jordan@6: ann_out_file = filename[0..-4] + "_gt.txt" jordan@6: alg_out_file = filename[0..-4] + "_pred.txt" jordan@6: ann_out = File.open(ann_out_file,'w') jordan@6: alg_out = File.open(alg_out_file,'w') jordan@6: text = File.open(filename,'r').readlines[1..-4].join("").split(/[\[\]]/) jordan@6: text = File.open(filename,'r').readlines(sep=",").join("").split(/[\[\]]/) jordan@6: ann = text[2].split(/[\{\}]/) jordan@6: alg = text[4].split(/[\{\}]/) jordan@6: ann_out.write(json_2_text(ann)) jordan@6: alg_out.write(json_2_text(alg)) jordan@6: ann_out.close jordan@6: alg_out.close jordan@6: end jordan@6: jordan@6: def json_2_text(json) jordan@6: txt = [] jordan@6: (1..json.length).step(2).to_a.each do |indx| jordan@6: line = json[indx] jordan@6: els = line.split(",") jordan@6: # Make a LAB-style annotation (3-column): jordan@6: # txt.push([els[0].split(" ")[-1].to_f, els[1].split(" ")[-1].to_f, els[2].split("\"")[-1]].join("\t")) jordan@6: # Make a TXT-style annotation (2-column): jordan@6: txt.push([els[0].split(" ")[-1].to_f, els[2].split("\"")[-1]].join("\t")) jordan@6: end jordan@6: txt.push([json[-1].split(",")[1].split(" ")[-1].to_f, "End"].join("\t")) jordan@6: return txt.join("\n") jordan@6: end jordan@6: jordan@6: jordan@6: # # # # PART 1: DOWNLOAD ALL THE STRUCTURAL ANALYSIS EVALUTION DATA PUBLISHED BY MIREX jordan@6: jordan@6: # Define list of algorithms and datasets: jordan@6: algos = ["SP1", "SMGA2", "MHRAF1", "SMGA1", "SBV1", "KSP2", "OYZS1", "KSP3", "KSP1"] jordan@6: datasets = ["mrx09", "mrx10_1", "mrx10_2", "sal"] jordan@6: year = "2012" jordan@6: puts "Thanks for starting the script! Stay tuned for periodic updates." jordan@6: jordan@6: # Create appropriate directory tree and download CSV files: jordan@6: Dir.mkdir(mirex_path) unless File.directory?(mirex_path) jordan@6: puts("Downloading CSV files...\n") jordan@6: datasets.each do |dset| jordan@6: # Make dataset directory: jordan@6: dir_path = File.join(mirex_path,dset) jordan@6: Dir.mkdir(dir_path) unless File.directory?(dir_path) jordan@6: algos.each do |algo| jordan@6: # Make algorithm directory: jordan@6: algo_path = File.join(mirex_path,dset,algo) jordan@6: Dir.mkdir(algo_path) unless File.directory?(algo_path) jordan@6: # Download the CSV file to this directory: jordan@6: algocsvpath = File.join(mirex_path,dset,algo,"per_track_results.csv") jordan@6: csv_path = File.join(("http://nema.lis.illinois.edu/nema_out/mirex"+year),"/results/struct",dset,algo,"per_track_results.csv") jordan@6: url_download(csv_path, algocsvpath) jordan@6: end jordan@6: end jordan@6: jordan@6: puts "..done with that." jordan@6: jordan@6: puts "Now we will download all the files output by each algorithm. This could take a while depending on your connection." jordan@6: puts "Since this script points to " + datasets.length.to_s + " datasets and " + algos.length.to_s + " algorithms, you should expect to wait however long it takes between each of the next lines to appear, times " + (datasets.length*algos.length).to_s + "." jordan@6: jordan@6: # Read each CSV file and download all the json files it points to: jordan@6: datasets.each do |dset| jordan@6: algos.each do |algo| jordan@6: puts( "Starting to download "+dset+ " dataset for " + algo + " algorithm...\n") jordan@6: algocsvpath = File.join(mirex_path,dset,algo,"per_track_results.csv") jordan@6: csv_data = File.read(algocsvpath).split("\n") jordan@6: header = csv_data.delete_at(0) jordan@6: download_folder = File.join(mirex_path,dset,algo) jordan@6: # For each line in the spreadsheet, extract the songid and download the corresponding json document. jordan@6: csv_data.each do |line| jordan@6: line = line.split(",") jordan@6: song_id = line[1] jordan@6: url = "http://nema.lis.illinois.edu/nema_out/mirex" + year + "/results/struct/" + dset + "/" + algo.downcase + "segments" + song_id.delete("_") + ".js" jordan@6: download_path = File.join(download_folder,song_id + ".js") jordan@6: # download_path = download_folder + "/" + song_id + ".js" jordan@6: url_download(url, download_path) jordan@6: end jordan@6: end jordan@6: puts("Done with " + dset + " dataset!\n") jordan@6: end jordan@6: jordan@6: puts "..done with that." jordan@6: jordan@6: puts "Now, a much faster step: turning all the json files you downloaded into simpler text files." jordan@6: # Scan for all the json files, and convert each one into two text files, one for the algorithm output, one for the annotation: jordan@6: all_json_files = Dir.glob(File.join(mirex_path,"*","*","*.js")) jordan@6: all_json_files.each do |file| jordan@6: convert_file(file) jordan@6: puts file jordan@6: end jordan@6: jordan@6: puts "..done with that." jordan@6: jordan@6: puts "Now, PART 2 of the script: we download all the zip files (from various websites) that contain the public collections of ground truth files. This will only take a couple minutes, depending on connection speed (it's about 4 MB total)." jordan@6: jordan@6: jordan@6: # # # # PART 2: GET (AND CONVERT) THE ANNOTATION DATA PUBLISHED BY OTHERS jordan@6: jordan@6: # Download and unzip all public annotations jordan@6: list_of_db_urls = ["https://staff.aist.go.jp/m.goto/RWC-MDB/AIST-Annotation/AIST.RWC-MDB-P-2001.CHORUS.zip", "https://staff.aist.go.jp/m.goto/RWC-MDB/AIST-Annotation/AIST.RWC-MDB-C-2001.CHORUS.zip", "https://staff.aist.go.jp/m.goto/RWC-MDB/AIST-Annotation/AIST.RWC-MDB-J-2001.CHORUS.zip", "https://staff.aist.go.jp/m.goto/RWC-MDB/AIST-Annotation/AIST.RWC-MDB-G-2001.CHORUS.zip", "http://www.music.mcgill.ca/~jordan/salami/releases/SALAMI_data_v1.2.zip", "http://www.ifs.tuwien.ac.at/mir/audiosegmentation/dl/ep_groundtruth_excl_Paulus.zip", "http://musicdata.gforge.inria.fr/IRISA.RWC-MDB-P-2012.SEMLAB_v003_full.zip", "http://musicdata.gforge.inria.fr/IRISA.RWC-MDB-P-2012.SEMLAB_v003_reduced.zip", "http://musicdata.gforge.inria.fr/IRISA.RWC-MDB-P-2001.BLOCKS_v001.zip", "http://www.isophonics.net/files/annotations/The%20Beatles%20Annotations.tar.gz", "http://www.isophonics.net/files/annotations/Carole%20King%20Annotations.tar.gz", "http://www.isophonics.net/files/annotations/Queen%20Annotations.tar.gz", "http://www.isophonics.net/files/annotations/Michael%20Jackson%20Annotations.tar.gz", "http://www.isophonics.net/files/annotations/Zweieck%20Annotations.tar.gz", "http://www.cs.tut.fi/sgn/arg/paulus/beatles_sections_TUT.zip", "http://www.iua.upf.edu/~perfe/annotations/sections/beatles/structure_Beatles.rar"] jordan@6: jordan@6: public_data_path = File.join(mirex_path,"public_data") jordan@6: Dir.mkdir(public_data_path) unless File.directory?(public_data_path) jordan@6: list_of_db_urls.each do |db_url| jordan@6: open(File.join(public_data_path,File.basename(db_url)), 'wb') do |foo| jordan@6: foo.print open(db_url).read jordan@6: end jordan@6: end jordan@6: jordan@6: # # # # NOW, PLEASE EXIT THE SCRIPT, AND UNZIP ALL THOSE PACKAGES. jordan@6: # # # # WHEN YOU'RE DONE, GO ONTO THE PARENT MATLAB FILE TO RUN THE ANALYSES. jordan@6: puts "..done with that.\n\n" jordan@6: puts "Script apppears to have ended successfully. All files were downloaded and saved to " + public_data_path +"." jordan@6: puts "To continue please unpack all zip files, start MATLAB, and run 2-generate_smith2013_ismir.m. You can read more on README." jordan@6: puts "Important: be sure that the zip files unpack into the correct file structure. Again, see README for details."