Daniel@0
|
1 # Part of DML (Digital Music Laboratory)
|
Daniel@0
|
2 #
|
Daniel@0
|
3 # This program is free software; you can redistribute it and/or
|
Daniel@0
|
4 # modify it under the terms of the GNU General Public License
|
Daniel@0
|
5 # as published by the Free Software Foundation; either version 2
|
Daniel@0
|
6 # of the License, or (at your option) any later version.
|
Daniel@0
|
7 #
|
Daniel@0
|
8 # This program is distributed in the hope that it will be useful,
|
Daniel@0
|
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
|
Daniel@0
|
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
Daniel@0
|
11 # GNU General Public License for more details.
|
Daniel@0
|
12 #
|
Daniel@0
|
13 # You should have received a copy of the GNU General Public
|
Daniel@0
|
14 # License along with this library; if not, write to the Free Software
|
Daniel@0
|
15 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
Daniel@0
|
16
|
Daniel@0
|
17 #!/usr/local/spark-1.0.0-bin-hadoop2/bin/spark-submit
|
Daniel@0
|
18 # -*- coding: utf-8 -*-
|
Daniel@0
|
19 __author__="wolffd"
|
Daniel@0
|
20 __date__ ="$11-Jul-2014 15:31:01$"
|
Daniel@0
|
21
|
Daniel@0
|
22 from pyspark import SparkConf, SparkContext
|
Daniel@0
|
23 import sys
|
Daniel@0
|
24 import os
|
Daniel@0
|
25 from sonic_annotator_vamp import *
|
Daniel@0
|
26
|
Daniel@0
|
27 # this is the main routine to be submmitted as a spark job
|
Daniel@0
|
28 #
|
Daniel@0
|
29 #
|
Daniel@0
|
30 # Running python applications through ./bin/pyspark is deprecated as of Spark 1.0.
|
Daniel@0
|
31 # Use ./bin/spark-submit <python file> --py-files sonic_annotator_vamp.py
|
Daniel@0
|
32 # you can also provide a zip of all necessary python files
|
Daniel@0
|
33 #
|
Daniel@0
|
34 # @param string audiopath root of the folder structure to be traversed
|
Daniel@0
|
35 # @param string transform_file path to the .n3 turtle file describing the transform
|
Daniel@0
|
36 #def main(audiopath = '/home/wolffd/Documents/python/dml/TimeSide/tests/samples/',
|
Daniel@0
|
37 # transform_file = '/home/wolffd/Documents/python/dml/pyspark/sonic-annotator-notimeside/silvet_settings.n3',
|
Daniel@0
|
38 # masterip = '10.2.165.101'):
|
Daniel@0
|
39 def main(audiopath = '/CHARM-Collection',
|
Daniel@0
|
40 transform_file = 'bbc_speechmusic.n3',
|
Daniel@0
|
41 masterip = '0.0.0.0'):
|
Daniel@0
|
42 print "PySpark Telemeta and Vamp Test"
|
Daniel@0
|
43
|
Daniel@0
|
44 # configure spark, cave: local profile uses just 1 core
|
Daniel@0
|
45 conf = (SparkConf()
|
Daniel@0
|
46 #.setMaster("local")
|
Daniel@0
|
47 .setMaster("spark://" + masterip + ":7077")
|
Daniel@0
|
48 .setAppName("Sonic Annotating")
|
Daniel@0
|
49 .set("spark.executor.memory", "40g")
|
Daniel@0
|
50 .set("spark.cores.max", "35"));
|
Daniel@0
|
51 sc = SparkContext(conf = conf)
|
Daniel@0
|
52
|
Daniel@0
|
53 # here traverse the file structure
|
Daniel@0
|
54 data = []
|
Daniel@0
|
55 for (dirpath, dirnames, filenames) in os.walk(audiopath):
|
Daniel@0
|
56 for file in filenames:
|
Daniel@0
|
57 if file.endswith(".wav") or file.endswith(".mp3") or file.endswith(".flac"):
|
Daniel@0
|
58 data.append(os.path.join(dirpath, file))
|
Daniel@0
|
59 njobs = len(data)
|
Daniel@0
|
60 donejobs = sc.accumulator(0)
|
Daniel@0
|
61 print "Total: " + str(njobs) + " files"
|
Daniel@0
|
62
|
Daniel@0
|
63 # define distributed dataset
|
Daniel@0
|
64 distData = sc.parallelize(data)
|
Daniel@0
|
65
|
Daniel@0
|
66 # define map
|
Daniel@0
|
67 m1 = distData.map(lambda x: transform(wav_file=x,transform_file=transform_file))
|
Daniel@0
|
68
|
Daniel@0
|
69 # reduce (just do the maps ;) )
|
Daniel@0
|
70 result = m1.collect()
|
Daniel@0
|
71
|
Daniel@0
|
72 if __name__ == "__main__":
|
Daniel@0
|
73 if len(sys.argv) >= 3:
|
Daniel@0
|
74 main(sys.argv[1],sys.argv[2])
|
Daniel@0
|
75 else:
|
Daniel@0
|
76 main()
|