amine@106
|
1 import os
|
amine@106
|
2 import sys
|
amine@106
|
3 import math
|
amine@107
|
4 from array import array
|
amine@133
|
5 from tempfile import NamedTemporaryFile, TemporaryDirectory
|
amine@110
|
6 import filecmp
|
amine@108
|
7 from unittest import TestCase
|
amine@108
|
8 from genty import genty, genty_dataset
|
amine@110
|
9 from auditok.io import (
|
amine@126
|
10 DATA_FORMAT,
|
amine@121
|
11 AudioIOError,
|
amine@110
|
12 AudioParameterError,
|
amine@126
|
13 BufferAudioSource,
|
amine@110
|
14 check_audio_data,
|
amine@128
|
15 _get_audio_parameters,
|
amine@116
|
16 _array_to_bytes,
|
amine@118
|
17 _mix_audio_channels,
|
amine@119
|
18 _extract_selected_channel,
|
amine@126
|
19 _load_raw,
|
amine@129
|
20 _load_wave,
|
amine@131
|
21 _load_with_pydub,
|
amine@120
|
22 from_file,
|
amine@111
|
23 _save_raw,
|
amine@110
|
24 _save_wave,
|
amine@135
|
25 to_file,
|
amine@110
|
26 )
|
amine@106
|
27
|
amine@106
|
28
|
amine@106
|
29 if sys.version_info >= (3, 0):
|
amine@106
|
30 PYTHON_3 = True
|
amine@124
|
31 from unittest.mock import patch, Mock
|
amine@106
|
32 else:
|
amine@106
|
33 PYTHON_3 = False
|
amine@124
|
34 from mock import patch, Mock
|
amine@120
|
35
|
amine@120
|
36 AUDIO_PARAMS_SHORT = {"sr": 16000, "sw": 2, "ch": 1}
|
amine@106
|
37
|
amine@106
|
38
|
amine@106
|
39 def _sample_generator(*data_buffers):
|
amine@106
|
40 """
|
amine@106
|
41 Takes a list of many mono audio data buffers and makes a sample generator
|
amine@106
|
42 of interleaved audio samples, one sample from each channel. The resulting
|
amine@106
|
43 generator can be used to build a multichannel audio buffer.
|
amine@106
|
44 >>> gen = _sample_generator("abcd", "ABCD")
|
amine@106
|
45 >>> list(gen)
|
amine@106
|
46 ["a", "A", "b", "B", "c", "C", "d", "D"]
|
amine@106
|
47 """
|
amine@106
|
48 frame_gen = zip(*data_buffers)
|
amine@106
|
49 return (sample for frame in frame_gen for sample in frame)
|
amine@106
|
50
|
amine@106
|
51
|
amine@107
|
52 def _generate_pure_tone(
|
amine@107
|
53 frequency, duration_sec=1, sampling_rate=16000, sample_width=2, volume=1e4
|
amine@107
|
54 ):
|
amine@107
|
55 """
|
amine@107
|
56 Generates a pure tone with the given frequency.
|
amine@107
|
57 """
|
amine@107
|
58 assert frequency <= sampling_rate / 2
|
amine@107
|
59 max_value = (2 ** (sample_width * 8) // 2) - 1
|
amine@107
|
60 if volume > max_value:
|
amine@107
|
61 volume = max_value
|
amine@107
|
62 fmt = DATA_FORMAT[sample_width]
|
amine@107
|
63 total_samples = int(sampling_rate * duration_sec)
|
amine@107
|
64 step = frequency / sampling_rate
|
amine@107
|
65 two_pi_step = 2 * math.pi * step
|
amine@107
|
66 data = array(
|
amine@107
|
67 fmt,
|
amine@107
|
68 (
|
amine@107
|
69 int(math.sin(two_pi_step * i) * volume)
|
amine@107
|
70 for i in range(total_samples)
|
amine@107
|
71 ),
|
amine@107
|
72 )
|
amine@107
|
73 return data
|
amine@107
|
74
|
amine@107
|
75
|
amine@107
|
76 PURE_TONE_DICT = {
|
amine@107
|
77 freq: _generate_pure_tone(freq, 1, 16000, 2) for freq in (400, 800, 1600)
|
amine@107
|
78 }
|
amine@107
|
79 PURE_TONE_DICT.update(
|
amine@107
|
80 {
|
amine@107
|
81 freq: _generate_pure_tone(freq, 0.1, 16000, 2)
|
amine@107
|
82 for freq in (600, 1150, 2400, 7220)
|
amine@107
|
83 }
|
amine@107
|
84 )
|
amine@108
|
85
|
amine@108
|
86
|
amine@108
|
87 @genty
|
amine@108
|
88 class TestIO(TestCase):
|
amine@108
|
89 @genty_dataset(
|
amine@108
|
90 valid_mono=(b"\0" * 113, 1, 1),
|
amine@108
|
91 valid_stereo=(b"\0" * 160, 1, 2),
|
amine@108
|
92 invalid_mono_sw_2=(b"\0" * 113, 2, 1, False),
|
amine@108
|
93 invalid_stereo_sw_1=(b"\0" * 113, 1, 2, False),
|
amine@108
|
94 invalid_stereo_sw_2=(b"\0" * 158, 2, 2, False),
|
amine@108
|
95 )
|
amine@108
|
96 def test_check_audio_data(self, data, sample_width, channels, valid=True):
|
amine@108
|
97
|
amine@108
|
98 if not valid:
|
amine@108
|
99 with self.assertRaises(AudioParameterError):
|
amine@108
|
100 check_audio_data(data, sample_width, channels)
|
amine@108
|
101 else:
|
amine@108
|
102 self.assertIsNone(check_audio_data(data, sample_width, channels))
|
amine@110
|
103
|
amine@110
|
104 @genty_dataset(
|
amine@118
|
105 mono_1byte=([400], 1),
|
amine@118
|
106 stereo_1byte=([400, 600], 1),
|
amine@118
|
107 three_channel_1byte=([400, 600, 2400], 1),
|
amine@118
|
108 mono_2byte=([400], 2),
|
amine@118
|
109 stereo_2byte=([400, 600], 2),
|
amine@118
|
110 three_channel_2byte=([400, 600, 1150], 2),
|
amine@118
|
111 mono_4byte=([400], 4),
|
amine@118
|
112 stereo_4byte=([400, 600], 4),
|
amine@118
|
113 four_channel_2byte=([400, 600, 1150, 7220], 4),
|
amine@118
|
114 )
|
amine@118
|
115 def test_mix_audio_channels(self, frequencies, sample_width):
|
amine@118
|
116 sampling_rate = 16000
|
amine@118
|
117 sample_width = 2
|
amine@118
|
118 channels = len(frequencies)
|
amine@118
|
119 mono_channels = [
|
amine@118
|
120 _generate_pure_tone(
|
amine@118
|
121 freq,
|
amine@118
|
122 duration_sec=0.1,
|
amine@118
|
123 sampling_rate=sampling_rate,
|
amine@118
|
124 sample_width=sample_width,
|
amine@118
|
125 )
|
amine@118
|
126 for freq in frequencies
|
amine@118
|
127 ]
|
amine@118
|
128 fmt = DATA_FORMAT[sample_width]
|
amine@118
|
129 expected = _array_to_bytes(
|
amine@118
|
130 array(
|
amine@118
|
131 fmt,
|
amine@118
|
132 (sum(samples) // channels for samples in zip(*mono_channels)),
|
amine@118
|
133 )
|
amine@118
|
134 )
|
amine@118
|
135 data = _array_to_bytes(array(fmt, _sample_generator(*mono_channels)))
|
amine@118
|
136 mixed = _mix_audio_channels(data, channels, sample_width)
|
amine@118
|
137 self.assertEqual(mixed, expected)
|
amine@118
|
138
|
amine@118
|
139 @genty_dataset(
|
amine@119
|
140 mono_1byte=([400], 1, 0),
|
amine@119
|
141 stereo_1byte_2st_channel=([400, 600], 1, 1),
|
amine@119
|
142 mono_2byte=([400], 2, 0),
|
amine@119
|
143 stereo_2byte_1st_channel=([400, 600], 2, 0),
|
amine@119
|
144 stereo_2byte_2nd_channel=([400, 600], 2, 1),
|
amine@119
|
145 three_channel_2byte_last_negative_idx=([400, 600, 1150], 2, -1),
|
amine@119
|
146 three_channel_2byte_2nd_negative_idx=([400, 600, 1150], 2, -2),
|
amine@119
|
147 three_channel_2byte_1st_negative_idx=([400, 600, 1150], 2, -3),
|
amine@119
|
148 three_channel_4byte_1st=([400, 600, 1150], 4, 0),
|
amine@119
|
149 three_channel_4byte_last_negative_idx=([400, 600, 1150], 4, -1),
|
amine@119
|
150 )
|
amine@119
|
151 def test_extract_selected_channel(
|
amine@119
|
152 self, frequencies, sample_width, use_channel
|
amine@119
|
153 ):
|
amine@119
|
154
|
amine@119
|
155 mono_channels = [
|
amine@119
|
156 _generate_pure_tone(
|
amine@119
|
157 freq,
|
amine@119
|
158 duration_sec=0.1,
|
amine@119
|
159 sampling_rate=16000,
|
amine@119
|
160 sample_width=sample_width,
|
amine@119
|
161 )
|
amine@119
|
162 for freq in frequencies
|
amine@119
|
163 ]
|
amine@119
|
164 channels = len(frequencies)
|
amine@119
|
165 fmt = DATA_FORMAT[sample_width]
|
amine@119
|
166 expected = _array_to_bytes(mono_channels[use_channel])
|
amine@119
|
167 data = _array_to_bytes(array(fmt, _sample_generator(*mono_channels)))
|
amine@119
|
168 selected_channel = _extract_selected_channel(
|
amine@119
|
169 data, channels, sample_width, use_channel
|
amine@119
|
170 )
|
amine@119
|
171 self.assertEqual(selected_channel, expected)
|
amine@119
|
172
|
amine@119
|
173 @genty_dataset(
|
amine@120
|
174 raw_with_audio_format=(
|
amine@120
|
175 "audio",
|
amine@120
|
176 "raw",
|
amine@120
|
177 "_load_raw",
|
amine@120
|
178 AUDIO_PARAMS_SHORT,
|
amine@120
|
179 ),
|
amine@120
|
180 raw_with_extension=(
|
amine@120
|
181 "audio.raw",
|
amine@120
|
182 None,
|
amine@120
|
183 "_load_raw",
|
amine@120
|
184 AUDIO_PARAMS_SHORT,
|
amine@120
|
185 ),
|
amine@120
|
186 wave_with_audio_format=("audio", "wave", "_load_wave"),
|
amine@120
|
187 wav_with_audio_format=("audio", "wave", "_load_wave"),
|
amine@120
|
188 wav_with_extension=("audio.wav", None, "_load_wave"),
|
amine@120
|
189 format_and_extension_both_given=("audio.dat", "wav", "_load_wave"),
|
amine@120
|
190 format_and_extension_both_given_b=("audio.raw", "wave", "_load_wave"),
|
amine@120
|
191 no_format_nor_extension=("audio", None, "_load_with_pydub"),
|
amine@120
|
192 other_formats_ogg=("audio.ogg", None, "_load_with_pydub"),
|
amine@120
|
193 other_formats_webm=("audio", "webm", "_load_with_pydub"),
|
amine@120
|
194 )
|
amine@120
|
195 def test_from_file(
|
amine@120
|
196 self, filename, audio_format, funtion_name, kwargs=None
|
amine@120
|
197 ):
|
amine@120
|
198 funtion_name = "auditok.io." + funtion_name
|
amine@120
|
199 if kwargs is None:
|
amine@120
|
200 kwargs = {}
|
amine@120
|
201 with patch(funtion_name) as patch_function:
|
amine@120
|
202 from_file(filename, audio_format, **kwargs)
|
amine@120
|
203 self.assertTrue(patch_function.called)
|
amine@120
|
204
|
amine@121
|
205 def test_from_file_no_pydub(self):
|
amine@121
|
206 with patch("auditok.io._WITH_PYDUB", False):
|
amine@121
|
207 with self.assertRaises(AudioIOError):
|
amine@121
|
208 from_file("audio", "mp3")
|
amine@121
|
209
|
amine@111
|
210 @genty_dataset(
|
amine@122
|
211 raw_first_channel=("raw", 0, 400),
|
amine@122
|
212 raw_second_channel=("raw", 1, 800),
|
amine@122
|
213 raw_third_channel=("raw", 2, 1600),
|
amine@122
|
214 raw_left_channel=("raw", "left", 400),
|
amine@122
|
215 raw_right_channel=("raw", "right", 800),
|
amine@122
|
216 wav_first_channel=("wav", 0, 400),
|
amine@122
|
217 wav_second_channel=("wav", 1, 800),
|
amine@122
|
218 wav_third_channel=("wav", 2, 1600),
|
amine@122
|
219 wav_left_channel=("wav", "left", 400),
|
amine@122
|
220 wav_right_channel=("wav", "right", 800),
|
amine@122
|
221 )
|
amine@122
|
222 def test_from_file_multichannel_audio(
|
amine@122
|
223 self, audio_format, use_channel, frequency
|
amine@122
|
224 ):
|
amine@122
|
225 expected = PURE_TONE_DICT[frequency]
|
amine@122
|
226 filename = "tests/data/test_16KHZ_3channel_400-800-1600Hz.{}".format(
|
amine@122
|
227 audio_format
|
amine@122
|
228 )
|
amine@122
|
229 sample_width = 2
|
amine@122
|
230 audio_source = from_file(
|
amine@122
|
231 filename,
|
amine@122
|
232 sampling_rate=16000,
|
amine@122
|
233 sample_width=sample_width,
|
amine@122
|
234 channels=3,
|
amine@122
|
235 use_channel=use_channel,
|
amine@122
|
236 )
|
amine@122
|
237 fmt = DATA_FORMAT[sample_width]
|
amine@122
|
238 data = array(fmt, audio_source._buffer)
|
amine@122
|
239 self.assertEqual(data, expected)
|
amine@122
|
240
|
amine@122
|
241 @genty_dataset(
|
amine@123
|
242 raw_mono=("raw", "mono_400Hz", (400,)),
|
amine@123
|
243 raw_3channel=("raw", "3channel_400-800-1600Hz", (400, 800, 1600)),
|
amine@123
|
244 wav_mono=("wav", "mono_400Hz", (400,)),
|
amine@123
|
245 wav_3channel=("wav", "3channel_400-800-1600Hz", (400, 800, 1600)),
|
amine@123
|
246 )
|
amine@123
|
247 def test_from_file_multichannel_audio_mix(
|
amine@123
|
248 self, audio_format, filename_suffix, frequencies
|
amine@123
|
249 ):
|
amine@123
|
250 sampling_rate = 16000
|
amine@123
|
251 sample_width = 2
|
amine@123
|
252 channels = len(frequencies)
|
amine@123
|
253 mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
|
amine@123
|
254 channels = len(frequencies)
|
amine@123
|
255 fmt = DATA_FORMAT[sample_width]
|
amine@123
|
256 expected = _array_to_bytes(
|
amine@123
|
257 array(
|
amine@123
|
258 fmt,
|
amine@123
|
259 (sum(samples) // channels for samples in zip(*mono_channels)),
|
amine@123
|
260 )
|
amine@123
|
261 )
|
amine@123
|
262 filename = "tests/data/test_16KHZ_{}.{}".format(
|
amine@123
|
263 filename_suffix, audio_format
|
amine@123
|
264 )
|
amine@123
|
265 audio_source = from_file(
|
amine@123
|
266 filename,
|
amine@123
|
267 use_channel="mix",
|
amine@123
|
268 sampling_rate=sampling_rate,
|
amine@123
|
269 sample_width=2,
|
amine@123
|
270 channels=channels,
|
amine@123
|
271 )
|
amine@123
|
272 mixed = audio_source._buffer
|
amine@123
|
273 self.assertEqual((mixed), expected)
|
amine@123
|
274
|
amine@124
|
275 @patch("auditok.io._WITH_PYDUB", True)
|
amine@124
|
276 @patch("auditok.io.BufferAudioSource")
|
amine@124
|
277 @genty_dataset(
|
amine@124
|
278 ogg_first_channel=("ogg", 0, "from_ogg"),
|
amine@124
|
279 ogg_second_channel=("ogg", 1, "from_ogg"),
|
amine@124
|
280 ogg_mix=("ogg", "mix", "from_ogg"),
|
amine@124
|
281 ogg_default=("ogg", None, "from_ogg"),
|
amine@124
|
282 mp3_left_channel=("mp3", "left", "from_mp3"),
|
amine@124
|
283 mp3_right_channel=("mp3", "right", "from_mp3"),
|
amine@124
|
284 flac_first_channel=("flac", 0, "from_file"),
|
amine@124
|
285 flac_second_channel=("flac", 1, "from_file"),
|
amine@124
|
286 flv_left_channel=("flv", "left", "from_flv"),
|
amine@124
|
287 webm_right_channel=("webm", "right", "from_file"),
|
amine@124
|
288 )
|
amine@124
|
289 def test_from_file_multichannel_audio_compressed(
|
amine@124
|
290 self, audio_format, use_channel, function, *mocks
|
amine@124
|
291 ):
|
amine@124
|
292 filename = "audio.{}".format(audio_format)
|
amine@124
|
293 segment_mock = Mock()
|
amine@124
|
294 segment_mock.sample_width = 2
|
amine@124
|
295 segment_mock.channels = 2
|
amine@124
|
296 segment_mock._data = b"abcd"
|
amine@124
|
297 with patch("auditok.io._extract_selected_channel") as ext_mock:
|
amine@124
|
298 with patch(
|
amine@124
|
299 "auditok.io.AudioSegment.{}".format(function)
|
amine@124
|
300 ) as open_func:
|
amine@124
|
301 open_func.return_value = segment_mock
|
amine@124
|
302 from_file(filename, use_channel=use_channel)
|
amine@124
|
303 self.assertTrue(open_func.called)
|
amine@124
|
304 self.assertTrue(ext_mock.called)
|
amine@124
|
305
|
amine@124
|
306 use_channel = {"left": 0, "right": 1, None: 0}.get(
|
amine@124
|
307 use_channel, use_channel
|
amine@124
|
308 )
|
amine@124
|
309 ext_mock.assert_called_with(
|
amine@124
|
310 segment_mock._data,
|
amine@124
|
311 segment_mock.channels,
|
amine@124
|
312 segment_mock.sample_width,
|
amine@124
|
313 use_channel,
|
amine@124
|
314 )
|
amine@124
|
315
|
amine@124
|
316 with patch("auditok.io._extract_selected_channel") as ext_mock:
|
amine@124
|
317 with patch(
|
amine@124
|
318 "auditok.io.AudioSegment.{}".format(function)
|
amine@124
|
319 ) as open_func:
|
amine@124
|
320 segment_mock.channels = 1
|
amine@124
|
321 open_func.return_value = segment_mock
|
amine@124
|
322 from_file(filename, use_channel=use_channel)
|
amine@124
|
323 self.assertTrue(open_func.called)
|
amine@124
|
324 self.assertFalse(ext_mock.called)
|
amine@124
|
325
|
amine@125
|
326 @patch("auditok.io._WITH_PYDUB", True)
|
amine@125
|
327 @patch("auditok.io.BufferAudioSource")
|
amine@125
|
328 @genty_dataset(
|
amine@125
|
329 ogg=("ogg", "from_ogg"),
|
amine@125
|
330 mp3=("mp3", "from_mp3"),
|
amine@125
|
331 flac=("flac", "from_file"),
|
amine@125
|
332 )
|
amine@125
|
333 def test_from_file_multichannel_audio_mix_compressed(
|
amine@125
|
334 self, audio_format, function, *mocks
|
amine@125
|
335 ):
|
amine@125
|
336 filename = "audio.{}".format(audio_format)
|
amine@125
|
337 segment_mock = Mock()
|
amine@125
|
338 segment_mock.sample_width = 2
|
amine@125
|
339 segment_mock.channels = 2
|
amine@125
|
340 segment_mock._data = b"abcd"
|
amine@125
|
341 with patch("auditok.io._mix_audio_channels") as mix_mock:
|
amine@125
|
342 with patch(
|
amine@125
|
343 "auditok.io.AudioSegment.{}".format(function)
|
amine@125
|
344 ) as open_func:
|
amine@125
|
345 open_func.return_value = segment_mock
|
amine@125
|
346 from_file(filename, use_channel="mix")
|
amine@125
|
347 self.assertTrue(open_func.called)
|
amine@125
|
348 mix_mock.assert_called_with(
|
amine@125
|
349 segment_mock._data,
|
amine@125
|
350 segment_mock.channels,
|
amine@125
|
351 segment_mock.sample_width,
|
amine@125
|
352 )
|
amine@125
|
353
|
amine@123
|
354 @genty_dataset(
|
amine@126
|
355 dafault_first_channel=(None, 400),
|
amine@126
|
356 first_channel=(0, 400),
|
amine@126
|
357 second_channel=(1, 800),
|
amine@126
|
358 third_channel=(2, 1600),
|
amine@126
|
359 negative_first_channel=(-3, 400),
|
amine@126
|
360 negative_second_channel=(-2, 800),
|
amine@126
|
361 negative_third_channel=(-1, 1600),
|
amine@126
|
362 )
|
amine@126
|
363 def test_load_raw(self, use_channel, frequency):
|
amine@126
|
364 filename = "tests/data/test_16KHZ_3channel_400-800-1600Hz.raw"
|
amine@126
|
365 if use_channel is not None:
|
amine@126
|
366 audio_source = _load_raw(
|
amine@126
|
367 filename,
|
amine@126
|
368 sampling_rate=16000,
|
amine@126
|
369 sample_width=2,
|
amine@126
|
370 channels=3,
|
amine@126
|
371 use_channel=use_channel,
|
amine@126
|
372 )
|
amine@126
|
373 else:
|
amine@126
|
374 audio_source = _load_raw(
|
amine@126
|
375 filename, sampling_rate=16000, sample_width=2, channels=3
|
amine@126
|
376 )
|
amine@126
|
377 self.assertIsInstance(audio_source, BufferAudioSource)
|
amine@126
|
378 self.assertEqual(audio_source.sampling_rate, 16000)
|
amine@126
|
379 self.assertEqual(audio_source.sample_width, 2)
|
amine@126
|
380 self.assertEqual(audio_source.channels, 1)
|
amine@126
|
381 # generate a pure sine wave tone of the given frequency
|
amine@126
|
382 expected = PURE_TONE_DICT[frequency]
|
amine@126
|
383 # compre with data read from file
|
amine@126
|
384 fmt = DATA_FORMAT[2]
|
amine@126
|
385 data = array(fmt, audio_source._buffer)
|
amine@126
|
386 self.assertEqual(data, expected)
|
amine@126
|
387
|
amine@126
|
388 @genty_dataset(
|
amine@127
|
389 mono=("mono_400Hz", (400,)),
|
amine@127
|
390 three_channel=("3channel_400-800-1600Hz", (400, 800, 1600)),
|
amine@127
|
391 )
|
amine@127
|
392 def test_load_raw_mix(self, filename_suffix, frequencies):
|
amine@127
|
393 sampling_rate = 16000
|
amine@127
|
394 sample_width = 2
|
amine@127
|
395 channels = len(frequencies)
|
amine@127
|
396 mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
|
amine@127
|
397
|
amine@127
|
398 fmt = DATA_FORMAT[sample_width]
|
amine@127
|
399 expected = _array_to_bytes(
|
amine@127
|
400 array(
|
amine@127
|
401 fmt,
|
amine@127
|
402 (sum(samples) // channels for samples in zip(*mono_channels)),
|
amine@127
|
403 )
|
amine@127
|
404 )
|
amine@127
|
405 filename = "tests/data/test_16KHZ_{}.raw".format(filename_suffix)
|
amine@127
|
406 audio_source = _load_raw(
|
amine@127
|
407 filename,
|
amine@127
|
408 use_channel="mix",
|
amine@127
|
409 sampling_rate=sampling_rate,
|
amine@127
|
410 sample_width=2,
|
amine@127
|
411 channels=channels,
|
amine@127
|
412 )
|
amine@127
|
413 mixed = audio_source._buffer
|
amine@127
|
414 self.assertEqual(mixed, expected)
|
amine@127
|
415 self.assertIsInstance(audio_source, BufferAudioSource)
|
amine@127
|
416 self.assertEqual(audio_source.sampling_rate, sampling_rate)
|
amine@127
|
417 self.assertEqual(audio_source.sample_width, sample_width)
|
amine@127
|
418 self.assertEqual(audio_source.channels, 1)
|
amine@127
|
419
|
amine@127
|
420 @genty_dataset(
|
amine@128
|
421 missing_sampling_rate=("sr",),
|
amine@128
|
422 missing_sample_width=("sw",),
|
amine@128
|
423 missing_channels=("ch",),
|
amine@128
|
424 )
|
amine@128
|
425 def test_load_raw_missing_audio_param(self, missing_param):
|
amine@128
|
426 with self.assertRaises(AudioParameterError):
|
amine@128
|
427 params = AUDIO_PARAMS_SHORT.copy()
|
amine@128
|
428 del params[missing_param]
|
amine@128
|
429 srate, swidth, channels, _ = _get_audio_parameters(params)
|
amine@128
|
430 _load_raw("audio", srate, swidth, channels)
|
amine@128
|
431
|
amine@128
|
432 @genty_dataset(
|
amine@129
|
433 dafault_first_channel=(None, 400),
|
amine@129
|
434 first_channel=(0, 400),
|
amine@129
|
435 second_channel=(1, 800),
|
amine@129
|
436 third_channel=(2, 1600),
|
amine@129
|
437 negative_first_channel=(-3, 400),
|
amine@129
|
438 negative_second_channel=(-2, 800),
|
amine@129
|
439 negative_third_channel=(-1, 1600),
|
amine@129
|
440 )
|
amine@129
|
441 def test_load_wave(self, use_channel, frequency):
|
amine@129
|
442 filename = "tests/data/test_16KHZ_3channel_400-800-1600Hz.wav"
|
amine@129
|
443 if use_channel is not None:
|
amine@129
|
444 audio_source = _load_wave(filename, use_channel=use_channel)
|
amine@129
|
445 else:
|
amine@129
|
446 audio_source = _load_wave(filename)
|
amine@129
|
447 self.assertIsInstance(audio_source, BufferAudioSource)
|
amine@129
|
448 self.assertEqual(audio_source.sampling_rate, 16000)
|
amine@129
|
449 self.assertEqual(audio_source.sample_width, 2)
|
amine@129
|
450 self.assertEqual(audio_source.channels, 1)
|
amine@129
|
451 # generate a pure sine wave tone of the given frequency
|
amine@129
|
452 expected = PURE_TONE_DICT[frequency]
|
amine@129
|
453 # compre with data read from file
|
amine@129
|
454 fmt = DATA_FORMAT[2]
|
amine@129
|
455 data = array(fmt, audio_source._buffer)
|
amine@129
|
456 self.assertEqual(data, expected)
|
amine@129
|
457
|
amine@129
|
458 @genty_dataset(
|
amine@130
|
459 mono=("mono_400Hz", (400,)),
|
amine@130
|
460 three_channel=("3channel_400-800-1600Hz", (400, 800, 1600)),
|
amine@130
|
461 )
|
amine@130
|
462 def test_load_wave_mix(self, filename_suffix, frequencies):
|
amine@130
|
463 sampling_rate = 16000
|
amine@130
|
464 sample_width = 2
|
amine@130
|
465 channels = len(frequencies)
|
amine@130
|
466 mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
|
amine@130
|
467 fmt = DATA_FORMAT[sample_width]
|
amine@130
|
468 expected = _array_to_bytes(
|
amine@130
|
469 array(
|
amine@130
|
470 fmt,
|
amine@130
|
471 (sum(samples) // channels for samples in zip(*mono_channels)),
|
amine@130
|
472 )
|
amine@130
|
473 )
|
amine@130
|
474 filename = "tests/data/test_16KHZ_{}.wav".format(filename_suffix)
|
amine@130
|
475 audio_source = _load_wave(filename, use_channel="mix")
|
amine@130
|
476 mixed = audio_source._buffer
|
amine@130
|
477 self.assertEqual(mixed, expected)
|
amine@130
|
478 self.assertIsInstance(audio_source, BufferAudioSource)
|
amine@130
|
479 self.assertEqual(audio_source.sampling_rate, sampling_rate)
|
amine@130
|
480 self.assertEqual(audio_source.sample_width, sample_width)
|
amine@130
|
481 self.assertEqual(audio_source.channels, 1)
|
amine@130
|
482
|
amine@131
|
483 @patch("auditok.io._WITH_PYDUB", True)
|
amine@131
|
484 @patch("auditok.io.BufferAudioSource")
|
amine@131
|
485 @genty_dataset(
|
amine@131
|
486 ogg_default_first_channel=("ogg", 2, None, "from_ogg"),
|
amine@131
|
487 ogg_first_channel=("ogg", 1, 0, "from_ogg"),
|
amine@131
|
488 ogg_second_channel=("ogg", 2, 1, "from_ogg"),
|
amine@131
|
489 ogg_mix_channels=("ogg", 3, "mix", "from_ogg"),
|
amine@131
|
490 mp3_left_channel=("mp3", 1, "left", "from_mp3"),
|
amine@131
|
491 mp3_right_channel=("mp3", 2, "right", "from_mp3"),
|
amine@131
|
492 mp3_mix_channels=("mp3", 3, "mix", "from_mp3"),
|
amine@131
|
493 flac_first_channel=("flac", 2, 0, "from_file"),
|
amine@131
|
494 flac_second_channel=("flac", 2, 1, "from_file"),
|
amine@131
|
495 flv_left_channel=("flv", 1, "left", "from_flv"),
|
amine@131
|
496 webm_right_channel=("webm", 2, "right", "from_file"),
|
amine@131
|
497 webm_mix_channels=("webm", 4, "mix", "from_file"),
|
amine@131
|
498 )
|
amine@131
|
499 def test_load_with_pydub(
|
amine@131
|
500 self, audio_format, channels, use_channel, function, *mocks
|
amine@131
|
501 ):
|
amine@131
|
502 filename = "audio.{}".format(audio_format)
|
amine@131
|
503 segment_mock = Mock()
|
amine@131
|
504 segment_mock.sample_width = 2
|
amine@131
|
505 segment_mock.channels = channels
|
amine@131
|
506 segment_mock._data = b"abcdefgh"
|
amine@131
|
507 with patch("auditok.io._extract_selected_channel") as ext_mock:
|
amine@131
|
508 with patch(
|
amine@131
|
509 "auditok.io.AudioSegment.{}".format(function)
|
amine@131
|
510 ) as open_func:
|
amine@131
|
511 open_func.return_value = segment_mock
|
amine@131
|
512 use_channel = {"left": 0, "right": 1, None: 0}.get(
|
amine@131
|
513 use_channel, use_channel
|
amine@131
|
514 )
|
amine@131
|
515 _load_with_pydub(filename, audio_format, use_channel)
|
amine@131
|
516 self.assertTrue(open_func.called)
|
amine@131
|
517 if channels > 1:
|
amine@131
|
518 self.assertTrue(ext_mock.called)
|
amine@131
|
519 ext_mock.assert_called_with(
|
amine@131
|
520 segment_mock._data,
|
amine@131
|
521 segment_mock.channels,
|
amine@131
|
522 segment_mock.sample_width,
|
amine@131
|
523 use_channel,
|
amine@131
|
524 )
|
amine@131
|
525 else:
|
amine@131
|
526 self.assertFalse(ext_mock.called)
|
amine@131
|
527
|
amine@130
|
528 @genty_dataset(
|
amine@132
|
529 mono=("mono_400Hz.raw", (400,)),
|
amine@132
|
530 three_channel=("3channel_400-800-1600Hz.raw", (400, 800, 1600)),
|
amine@132
|
531 )
|
amine@132
|
532 def test_save_raw(self, filename, frequencies):
|
amine@132
|
533 filename = "tests/data/test_16KHZ_{}".format(filename)
|
amine@132
|
534 sample_width = 2
|
amine@132
|
535 fmt = DATA_FORMAT[sample_width]
|
amine@132
|
536 mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
|
amine@132
|
537 data = _array_to_bytes(array(fmt, _sample_generator(*mono_channels)))
|
amine@132
|
538 tmpfile = NamedTemporaryFile()
|
amine@136
|
539 _save_raw(data, tmpfile.name)
|
amine@132
|
540 self.assertTrue(filecmp.cmp(tmpfile.name, filename, shallow=False))
|
amine@132
|
541
|
amine@132
|
542 @genty_dataset(
|
amine@110
|
543 mono=("mono_400Hz.wav", (400,)),
|
amine@110
|
544 three_channel=("3channel_400-800-1600Hz.wav", (400, 800, 1600)),
|
amine@110
|
545 )
|
amine@110
|
546 def test_save_wave(self, filename, frequencies):
|
amine@110
|
547 filename = "tests/data/test_16KHZ_{}".format(filename)
|
amine@110
|
548 sampling_rate = 16000
|
amine@110
|
549 sample_width = 2
|
amine@110
|
550 channels = len(frequencies)
|
amine@110
|
551 fmt = DATA_FORMAT[sample_width]
|
amine@110
|
552 mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
|
amine@110
|
553 data = _array_to_bytes(array(fmt, _sample_generator(*mono_channels)))
|
amine@110
|
554 tmpfile = NamedTemporaryFile()
|
amine@136
|
555 _save_wave(data, tmpfile.name, sampling_rate, sample_width, channels)
|
amine@110
|
556 self.assertTrue(filecmp.cmp(tmpfile.name, filename, shallow=False))
|
amine@132
|
557
|
amine@132
|
558 @genty_dataset(
|
amine@132
|
559 missing_sampling_rate=("sr",),
|
amine@132
|
560 missing_sample_width=("sw",),
|
amine@132
|
561 missing_channels=("ch",),
|
amine@132
|
562 )
|
amine@132
|
563 def test_save_wave_missing_audio_param(self, missing_param):
|
amine@132
|
564 with self.assertRaises(AudioParameterError):
|
amine@132
|
565 params = AUDIO_PARAMS_SHORT.copy()
|
amine@132
|
566 del params[missing_param]
|
amine@132
|
567 srate, swidth, channels, _ = _get_audio_parameters(params)
|
amine@136
|
568 _save_wave(b"\0\0", "audio", srate, swidth, channels)
|
amine@133
|
569
|
amine@133
|
570 @genty_dataset(
|
amine@133
|
571 raw_with_audio_format=("audio", "raw"),
|
amine@133
|
572 raw_with_extension=("audio.raw", None),
|
amine@133
|
573 raw_with_audio_format_and_extension=("audio.mp3", "raw"),
|
amine@133
|
574 raw_no_audio_format_nor_extension=("audio", None),
|
amine@133
|
575 )
|
amine@133
|
576 def test_to_file_raw(self, filename, audio_format):
|
amine@133
|
577 exp_filename = "tests/data/test_16KHZ_mono_400Hz.raw"
|
amine@133
|
578 tmpdir = TemporaryDirectory()
|
amine@133
|
579 filename = os.path.join(tmpdir.name, filename)
|
amine@133
|
580 data = _array_to_bytes(PURE_TONE_DICT[400])
|
amine@135
|
581 to_file(data, filename, audio_format=audio_format)
|
amine@133
|
582 self.assertTrue(filecmp.cmp(filename, exp_filename, shallow=False))
|
amine@133
|
583 tmpdir.cleanup()
|
amine@134
|
584
|
amine@134
|
585 @genty_dataset(
|
amine@134
|
586 wav_with_audio_format=("audio", "wav"),
|
amine@134
|
587 wav_with_extension=("audio.wav", None),
|
amine@134
|
588 wav_with_audio_format_and_extension=("audio.mp3", "wav"),
|
amine@134
|
589 wave_with_audio_format=("audio", "wave"),
|
amine@134
|
590 wave_with_extension=("audio.wave", None),
|
amine@134
|
591 wave_with_audio_format_and_extension=("audio.mp3", "wave"),
|
amine@134
|
592 )
|
amine@135
|
593 def test_to_file_wave(self, filename, audio_format):
|
amine@134
|
594 exp_filename = "tests/data/test_16KHZ_mono_400Hz.wav"
|
amine@134
|
595 tmpdir = TemporaryDirectory()
|
amine@134
|
596 filename = os.path.join(tmpdir.name, filename)
|
amine@134
|
597 data = _array_to_bytes(PURE_TONE_DICT[400])
|
amine@135
|
598 to_file(
|
amine@135
|
599 data,
|
amine@135
|
600 filename,
|
amine@135
|
601 audio_format=audio_format,
|
amine@135
|
602 sampling_rate=16000,
|
amine@135
|
603 sample_width=2,
|
amine@135
|
604 channels=1,
|
amine@134
|
605 )
|
amine@134
|
606 self.assertTrue(filecmp.cmp(filename, exp_filename, shallow=False))
|
amine@134
|
607 tmpdir.cleanup()
|