Mercurial > hg > simscene-py
comparison python/simscene.py @ 51:ebf92ed7d680 tip master
Added -fd (--full-duration) argument.
author | Emmanouil Theofanis Chourdakis <e.t.chourdakis@qmul.ac.uk> |
---|---|
date | Sun, 30 Sep 2018 13:21:49 +0100 |
parents | b11264117ddb |
children |
comparison
equal
deleted
inserted
replaced
50:275d04483bf7 | 51:ebf92ed7d680 |
---|---|
351 | 351 |
352 if 'annot_format' in kwargs: | 352 if 'annot_format' in kwargs: |
353 annot_format = kwargs['annot_format'] | 353 annot_format = kwargs['annot_format'] |
354 else: | 354 else: |
355 annot_format = 'sed_eval' | 355 annot_format = 'sed_eval' |
356 | |
357 if 'full_duration' in kwargs: | |
358 full_duration = True | |
359 else: | |
360 full_duration = False | |
356 | 361 |
357 # Stores the starting and ending times of every track for visualization | 362 # Stores the starting and ending times of every track for visualization |
358 # purposes | 363 # purposes |
359 scene_starting_times = [] | 364 scene_starting_times = [] |
360 scene_ending_times = [] | 365 scene_ending_times = [] |
606 | 611 |
607 else: | 612 else: |
608 # If 0, then start next sample after this one (set it to the duration of the sample) | 613 # If 0, then start next sample after this one (set it to the duration of the sample) |
609 if mean_time_between_instances == 0: | 614 if mean_time_between_instances == 0: |
610 mean_time_between_instances = len(wav)/float(SR) | 615 mean_time_between_instances = len(wav)/float(SR) |
616 | |
617 # If we are using -fd (full_duration) for each event then mean_time_between_instances denotes time AFTER | |
618 # the end of the previous event. | |
619 if full_duration and mean_time_between_instances > 0: | |
620 mean_time_between_instances += len(wav)/float(SR) | |
611 | 621 |
612 # Store the successive starting and ending times of the events (given e.g. the model) | 622 # Store the successive starting and ending times of the events (given e.g. the model) |
613 # in the following lists. | 623 # in the following lists. |
614 start_times = [events_df['start_time'].loc[n]] | 624 start_times = [events_df['start_time'].loc[n]] |
615 end_times = [start_times[-1]+len(wav)/float(SR)] | 625 end_times = [start_times[-1]+len(wav)/float(SR)] |
898 "`abstract': values are computed from an abstract representation of an existing acoustic scene. " | 908 "`abstract': values are computed from an abstract representation of an existing acoustic scene. " |
899 "`replicate': values are replicated from an existing acousting scene. (NOT IMPLEMENTED)", | 909 "`replicate': values are replicated from an existing acousting scene. (NOT IMPLEMENTED)", |
900 choices=['generate', 'abstract', 'replicate'] | 910 choices=['generate', 'abstract', 'replicate'] |
901 ) | 911 ) |
902 time_mode = 'generate' | 912 time_mode = 'generate' |
903 | 913 |
914 argparser.add_argument( | |
915 '-fd', '--full-duration', | |
916 action='store_true', | |
917 help="If enabled, times specified in the recipe refer to after the previous file finishes." | |
918 ) | |
919 full_duration = False | |
920 | |
904 argparser.add_argument( | 921 argparser.add_argument( |
905 '-R', '--ebr-mode', | 922 '-R', '--ebr-mode', |
906 type=str, | 923 type=str, |
907 help="Mode for Event to Background power level ratio. `generate': values must be set for each track in the " | 924 help="Mode for Event to Background power level ratio. `generate': values must be set for each track in the " |
908 "score files. `abstract': values are computed from an abstract representation of an existing acoustic " | 925 "score files. `abstract': values are computed from an abstract representation of an existing acoustic " |
991 input_path = args.input_path | 1008 input_path = args.input_path |
992 logging.debug("Using `{}' as input path".format(input_path)) | 1009 logging.debug("Using `{}' as input path".format(input_path)) |
993 if args.output_path: | 1010 if args.output_path: |
994 output_path = args.output_path | 1011 output_path = args.output_path |
995 logging.debug("Saving to `{}'".format(output_path)) | 1012 logging.debug("Saving to `{}'".format(output_path)) |
1013 if args.full_duration: | |
1014 full_duration = True | |
996 if args.scene_duration: | 1015 if args.scene_duration: |
997 if not (args.score_backgrounds or args.score_events): | 1016 if not (args.score_backgrounds or args.score_events): |
998 print("You must provide one of -e or -b") | 1017 print("You must provide one of -e or -b") |
999 else: | 1018 else: |
1000 if args.image_format: | 1019 if args.image_format: |
1044 annotation_file=annotation_file, | 1063 annotation_file=annotation_file, |
1045 audio_file=audio_file, | 1064 audio_file=audio_file, |
1046 figure_verbosity=figure_verbosity, | 1065 figure_verbosity=figure_verbosity, |
1047 end_cut=end_cut, | 1066 end_cut=end_cut, |
1048 image_format=image_format, | 1067 image_format=image_format, |
1049 append_to_filename=append_to_filename) | 1068 append_to_filename=append_to_filename, |
1069 full_duration=full_duration) | |
1050 else: | 1070 else: |
1051 for n in range(generate_n): | 1071 for n in range(generate_n): |
1052 if tag: | 1072 if tag: |
1053 append_to_filename = '{}_{}'.format(tag, n) | 1073 append_to_filename = '{}_{}'.format(tag, n) |
1054 else: | 1074 else: |
1063 annotation_file=annotation_file, | 1083 annotation_file=annotation_file, |
1064 audio_file=audio_file, | 1084 audio_file=audio_file, |
1065 figure_verbosity=min(figure_verbosity, 1), | 1085 figure_verbosity=min(figure_verbosity, 1), |
1066 end_cut=end_cut, | 1086 end_cut=end_cut, |
1067 image_format=image_format, | 1087 image_format=image_format, |
1068 append_to_filename=append_to_filename) | 1088 append_to_filename=append_to_filename, |
1089 full_duration=full_duration) |