Mercurial > hg > dml-open-backendtools
comparison pyspark/sonic-annotator-notimeside/test_sonic_annotator_notimeside.py @ 0:e34cf1b6fe09 tip
commit
author | Daniel Wolff |
---|---|
date | Sat, 20 Feb 2016 18:14:24 +0100 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:e34cf1b6fe09 |
---|---|
1 # Part of DML (Digital Music Laboratory) | |
2 # | |
3 # This program is free software; you can redistribute it and/or | |
4 # modify it under the terms of the GNU General Public License | |
5 # as published by the Free Software Foundation; either version 2 | |
6 # of the License, or (at your option) any later version. | |
7 # | |
8 # This program is distributed in the hope that it will be useful, | |
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 # GNU General Public License for more details. | |
12 # | |
13 # You should have received a copy of the GNU General Public | |
14 # License along with this library; if not, write to the Free Software | |
15 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
16 | |
17 #!/usr/local/spark-1.0.0-bin-hadoop2/bin/spark-submit | |
18 # -*- coding: utf-8 -*- | |
19 __author__="wolffd" | |
20 __date__ ="$11-Jul-2014 15:31:01$" | |
21 | |
22 from pyspark import SparkConf, SparkContext | |
23 import sys | |
24 import os | |
25 from sonic_annotator_vamp import * | |
26 | |
27 # this is the main routine to be submmitted as a spark job | |
28 # | |
29 # | |
30 # Running python applications through ./bin/pyspark is deprecated as of Spark 1.0. | |
31 # Use ./bin/spark-submit <python file> --py-files sonic_annotator_vamp.py | |
32 # you can also provide a zip of all necessary python files | |
33 # | |
34 # @param string audiopath root of the folder structure to be traversed | |
35 # @param string transform_file path to the .n3 turtle file describing the transform | |
36 #def main(audiopath = '/home/wolffd/Documents/python/dml/TimeSide/tests/samples/', | |
37 # transform_file = '/home/wolffd/Documents/python/dml/pyspark/sonic-annotator-notimeside/silvet_settings.n3', | |
38 # masterip = '10.2.165.101'): | |
39 def main(audiopath = '/CHARM-Collection', | |
40 transform_file = 'bbc_speechmusic.n3', | |
41 masterip = '0.0.0.0'): | |
42 print "PySpark Telemeta and Vamp Test" | |
43 | |
44 # configure spark, cave: local profile uses just 1 core | |
45 conf = (SparkConf() | |
46 #.setMaster("local") | |
47 .setMaster("spark://" + masterip + ":7077") | |
48 .setAppName("Sonic Annotating") | |
49 .set("spark.executor.memory", "40g") | |
50 .set("spark.cores.max", "35")); | |
51 sc = SparkContext(conf = conf) | |
52 | |
53 # here traverse the file structure | |
54 data = [] | |
55 for (dirpath, dirnames, filenames) in os.walk(audiopath): | |
56 for file in filenames: | |
57 if file.endswith(".wav") or file.endswith(".mp3") or file.endswith(".flac"): | |
58 data.append(os.path.join(dirpath, file)) | |
59 njobs = len(data) | |
60 donejobs = sc.accumulator(0) | |
61 print "Total: " + str(njobs) + " files" | |
62 | |
63 # define distributed dataset | |
64 distData = sc.parallelize(data) | |
65 | |
66 # define map | |
67 m1 = distData.map(lambda x: transform(wav_file=x,transform_file=transform_file)) | |
68 | |
69 # reduce (just do the maps ;) ) | |
70 result = m1.collect() | |
71 | |
72 if __name__ == "__main__": | |
73 if len(sys.argv) >= 3: | |
74 main(sys.argv[1],sys.argv[2]) | |
75 else: | |
76 main() |