diff pyspark/sonic-annotator-notimeside/test_sonic_annotator_notimeside.py @ 0:e34cf1b6fe09 tip

commit
author Daniel Wolff
date Sat, 20 Feb 2016 18:14:24 +0100
parents
children
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyspark/sonic-annotator-notimeside/test_sonic_annotator_notimeside.py	Sat Feb 20 18:14:24 2016 +0100
@@ -0,0 +1,76 @@
+# Part of DML (Digital Music Laboratory)
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+# 
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+
+#!/usr/local/spark-1.0.0-bin-hadoop2/bin/spark-submit
+# -*- coding: utf-8 -*-
+__author__="wolffd"
+__date__ ="$11-Jul-2014 15:31:01$"
+
+from pyspark import SparkConf, SparkContext
+import sys
+import os
+from sonic_annotator_vamp import *
+
+# this is the main routine to be submmitted as a spark job
+#
+#
+# Running python applications through ./bin/pyspark is deprecated as of Spark 1.0.
+# Use ./bin/spark-submit <python file> --py-files sonic_annotator_vamp.py
+# you can also provide a zip of all necessary python files
+#
+# @param string audiopath root of the folder structure to be traversed
+# @param string transform_file path to the .n3 turtle file describing the transform
+#def main(audiopath = '/home/wolffd/Documents/python/dml/TimeSide/tests/samples/',
+#         transform_file = '/home/wolffd/Documents/python/dml/pyspark/sonic-annotator-notimeside/silvet_settings.n3',
+#         masterip = '10.2.165.101'):
+def main(audiopath = '/CHARM-Collection',
+         transform_file = 'bbc_speechmusic.n3',
+         masterip = '0.0.0.0'):
+    print "PySpark Telemeta and Vamp Test"
+
+    # configure spark, cave: local profile uses just 1 core
+    conf = (SparkConf()
+            #.setMaster("local")
+            .setMaster("spark://" + masterip + ":7077")
+            .setAppName("Sonic Annotating")
+            .set("spark.executor.memory", "40g")
+            .set("spark.cores.max", "35"));
+    sc = SparkContext(conf = conf)
+    
+    # here traverse the file structure
+    data = []
+    for (dirpath, dirnames, filenames) in os.walk(audiopath):
+        for file in filenames:
+            if file.endswith(".wav") or file.endswith(".mp3") or file.endswith(".flac"):
+                data.append(os.path.join(dirpath, file))
+    njobs = len(data)
+    donejobs = sc.accumulator(0)
+    print "Total: " + str(njobs) + " files"
+
+    # define distributed dataset
+    distData = sc.parallelize(data)
+    
+    # define map 
+    m1 = distData.map(lambda x: transform(wav_file=x,transform_file=transform_file))
+
+    # reduce (just do the maps ;) )
+    result = m1.collect()
+
+if __name__ == "__main__":
+    if len(sys.argv) >= 3:
+        main(sys.argv[1],sys.argv[2])
+    else:
+        main()