amine@106
|
1 import os
|
amine@106
|
2 import sys
|
amine@106
|
3 import math
|
amine@107
|
4 from array import array
|
amine@110
|
5 from tempfile import NamedTemporaryFile
|
amine@110
|
6 import filecmp
|
amine@108
|
7 from unittest import TestCase
|
amine@108
|
8 from genty import genty, genty_dataset
|
amine@110
|
9 from auditok.io import (
|
amine@126
|
10 DATA_FORMAT,
|
amine@121
|
11 AudioIOError,
|
amine@110
|
12 AudioParameterError,
|
amine@126
|
13 BufferAudioSource,
|
amine@110
|
14 check_audio_data,
|
amine@128
|
15 _get_audio_parameters,
|
amine@116
|
16 _array_to_bytes,
|
amine@118
|
17 _mix_audio_channels,
|
amine@119
|
18 _extract_selected_channel,
|
amine@126
|
19 _load_raw,
|
amine@129
|
20 _load_wave,
|
amine@131
|
21 _load_with_pydub,
|
amine@120
|
22 from_file,
|
amine@111
|
23 _save_raw,
|
amine@110
|
24 _save_wave,
|
amine@110
|
25 )
|
amine@106
|
26
|
amine@106
|
27
|
amine@106
|
28 if sys.version_info >= (3, 0):
|
amine@106
|
29 PYTHON_3 = True
|
amine@124
|
30 from unittest.mock import patch, Mock
|
amine@106
|
31 else:
|
amine@106
|
32 PYTHON_3 = False
|
amine@124
|
33 from mock import patch, Mock
|
amine@120
|
34
|
amine@120
|
35 AUDIO_PARAMS_SHORT = {"sr": 16000, "sw": 2, "ch": 1}
|
amine@106
|
36
|
amine@106
|
37
|
amine@106
|
38 def _sample_generator(*data_buffers):
|
amine@106
|
39 """
|
amine@106
|
40 Takes a list of many mono audio data buffers and makes a sample generator
|
amine@106
|
41 of interleaved audio samples, one sample from each channel. The resulting
|
amine@106
|
42 generator can be used to build a multichannel audio buffer.
|
amine@106
|
43 >>> gen = _sample_generator("abcd", "ABCD")
|
amine@106
|
44 >>> list(gen)
|
amine@106
|
45 ["a", "A", "b", "B", "c", "C", "d", "D"]
|
amine@106
|
46 """
|
amine@106
|
47 frame_gen = zip(*data_buffers)
|
amine@106
|
48 return (sample for frame in frame_gen for sample in frame)
|
amine@106
|
49
|
amine@106
|
50
|
amine@107
|
51 def _generate_pure_tone(
|
amine@107
|
52 frequency, duration_sec=1, sampling_rate=16000, sample_width=2, volume=1e4
|
amine@107
|
53 ):
|
amine@107
|
54 """
|
amine@107
|
55 Generates a pure tone with the given frequency.
|
amine@107
|
56 """
|
amine@107
|
57 assert frequency <= sampling_rate / 2
|
amine@107
|
58 max_value = (2 ** (sample_width * 8) // 2) - 1
|
amine@107
|
59 if volume > max_value:
|
amine@107
|
60 volume = max_value
|
amine@107
|
61 fmt = DATA_FORMAT[sample_width]
|
amine@107
|
62 total_samples = int(sampling_rate * duration_sec)
|
amine@107
|
63 step = frequency / sampling_rate
|
amine@107
|
64 two_pi_step = 2 * math.pi * step
|
amine@107
|
65 data = array(
|
amine@107
|
66 fmt,
|
amine@107
|
67 (
|
amine@107
|
68 int(math.sin(two_pi_step * i) * volume)
|
amine@107
|
69 for i in range(total_samples)
|
amine@107
|
70 ),
|
amine@107
|
71 )
|
amine@107
|
72 return data
|
amine@107
|
73
|
amine@107
|
74
|
amine@107
|
75 PURE_TONE_DICT = {
|
amine@107
|
76 freq: _generate_pure_tone(freq, 1, 16000, 2) for freq in (400, 800, 1600)
|
amine@107
|
77 }
|
amine@107
|
78 PURE_TONE_DICT.update(
|
amine@107
|
79 {
|
amine@107
|
80 freq: _generate_pure_tone(freq, 0.1, 16000, 2)
|
amine@107
|
81 for freq in (600, 1150, 2400, 7220)
|
amine@107
|
82 }
|
amine@107
|
83 )
|
amine@108
|
84
|
amine@108
|
85
|
amine@108
|
86 @genty
|
amine@108
|
87 class TestIO(TestCase):
|
amine@108
|
88 @genty_dataset(
|
amine@108
|
89 valid_mono=(b"\0" * 113, 1, 1),
|
amine@108
|
90 valid_stereo=(b"\0" * 160, 1, 2),
|
amine@108
|
91 invalid_mono_sw_2=(b"\0" * 113, 2, 1, False),
|
amine@108
|
92 invalid_stereo_sw_1=(b"\0" * 113, 1, 2, False),
|
amine@108
|
93 invalid_stereo_sw_2=(b"\0" * 158, 2, 2, False),
|
amine@108
|
94 )
|
amine@108
|
95 def test_check_audio_data(self, data, sample_width, channels, valid=True):
|
amine@108
|
96
|
amine@108
|
97 if not valid:
|
amine@108
|
98 with self.assertRaises(AudioParameterError):
|
amine@108
|
99 check_audio_data(data, sample_width, channels)
|
amine@108
|
100 else:
|
amine@108
|
101 self.assertIsNone(check_audio_data(data, sample_width, channels))
|
amine@110
|
102
|
amine@110
|
103 @genty_dataset(
|
amine@118
|
104 mono_1byte=([400], 1),
|
amine@118
|
105 stereo_1byte=([400, 600], 1),
|
amine@118
|
106 three_channel_1byte=([400, 600, 2400], 1),
|
amine@118
|
107 mono_2byte=([400], 2),
|
amine@118
|
108 stereo_2byte=([400, 600], 2),
|
amine@118
|
109 three_channel_2byte=([400, 600, 1150], 2),
|
amine@118
|
110 mono_4byte=([400], 4),
|
amine@118
|
111 stereo_4byte=([400, 600], 4),
|
amine@118
|
112 four_channel_2byte=([400, 600, 1150, 7220], 4),
|
amine@118
|
113 )
|
amine@118
|
114 def test_mix_audio_channels(self, frequencies, sample_width):
|
amine@118
|
115 sampling_rate = 16000
|
amine@118
|
116 sample_width = 2
|
amine@118
|
117 channels = len(frequencies)
|
amine@118
|
118 mono_channels = [
|
amine@118
|
119 _generate_pure_tone(
|
amine@118
|
120 freq,
|
amine@118
|
121 duration_sec=0.1,
|
amine@118
|
122 sampling_rate=sampling_rate,
|
amine@118
|
123 sample_width=sample_width,
|
amine@118
|
124 )
|
amine@118
|
125 for freq in frequencies
|
amine@118
|
126 ]
|
amine@118
|
127 fmt = DATA_FORMAT[sample_width]
|
amine@118
|
128 expected = _array_to_bytes(
|
amine@118
|
129 array(
|
amine@118
|
130 fmt,
|
amine@118
|
131 (sum(samples) // channels for samples in zip(*mono_channels)),
|
amine@118
|
132 )
|
amine@118
|
133 )
|
amine@118
|
134 data = _array_to_bytes(array(fmt, _sample_generator(*mono_channels)))
|
amine@118
|
135 mixed = _mix_audio_channels(data, channels, sample_width)
|
amine@118
|
136 self.assertEqual(mixed, expected)
|
amine@118
|
137
|
amine@118
|
138 @genty_dataset(
|
amine@119
|
139 mono_1byte=([400], 1, 0),
|
amine@119
|
140 stereo_1byte_2st_channel=([400, 600], 1, 1),
|
amine@119
|
141 mono_2byte=([400], 2, 0),
|
amine@119
|
142 stereo_2byte_1st_channel=([400, 600], 2, 0),
|
amine@119
|
143 stereo_2byte_2nd_channel=([400, 600], 2, 1),
|
amine@119
|
144 three_channel_2byte_last_negative_idx=([400, 600, 1150], 2, -1),
|
amine@119
|
145 three_channel_2byte_2nd_negative_idx=([400, 600, 1150], 2, -2),
|
amine@119
|
146 three_channel_2byte_1st_negative_idx=([400, 600, 1150], 2, -3),
|
amine@119
|
147 three_channel_4byte_1st=([400, 600, 1150], 4, 0),
|
amine@119
|
148 three_channel_4byte_last_negative_idx=([400, 600, 1150], 4, -1),
|
amine@119
|
149 )
|
amine@119
|
150 def test_extract_selected_channel(
|
amine@119
|
151 self, frequencies, sample_width, use_channel
|
amine@119
|
152 ):
|
amine@119
|
153
|
amine@119
|
154 mono_channels = [
|
amine@119
|
155 _generate_pure_tone(
|
amine@119
|
156 freq,
|
amine@119
|
157 duration_sec=0.1,
|
amine@119
|
158 sampling_rate=16000,
|
amine@119
|
159 sample_width=sample_width,
|
amine@119
|
160 )
|
amine@119
|
161 for freq in frequencies
|
amine@119
|
162 ]
|
amine@119
|
163 channels = len(frequencies)
|
amine@119
|
164 fmt = DATA_FORMAT[sample_width]
|
amine@119
|
165 expected = _array_to_bytes(mono_channels[use_channel])
|
amine@119
|
166 data = _array_to_bytes(array(fmt, _sample_generator(*mono_channels)))
|
amine@119
|
167 selected_channel = _extract_selected_channel(
|
amine@119
|
168 data, channels, sample_width, use_channel
|
amine@119
|
169 )
|
amine@119
|
170 self.assertEqual(selected_channel, expected)
|
amine@119
|
171
|
amine@119
|
172 @genty_dataset(
|
amine@120
|
173 raw_with_audio_format=(
|
amine@120
|
174 "audio",
|
amine@120
|
175 "raw",
|
amine@120
|
176 "_load_raw",
|
amine@120
|
177 AUDIO_PARAMS_SHORT,
|
amine@120
|
178 ),
|
amine@120
|
179 raw_with_extension=(
|
amine@120
|
180 "audio.raw",
|
amine@120
|
181 None,
|
amine@120
|
182 "_load_raw",
|
amine@120
|
183 AUDIO_PARAMS_SHORT,
|
amine@120
|
184 ),
|
amine@120
|
185 wave_with_audio_format=("audio", "wave", "_load_wave"),
|
amine@120
|
186 wav_with_audio_format=("audio", "wave", "_load_wave"),
|
amine@120
|
187 wav_with_extension=("audio.wav", None, "_load_wave"),
|
amine@120
|
188 format_and_extension_both_given=("audio.dat", "wav", "_load_wave"),
|
amine@120
|
189 format_and_extension_both_given_b=("audio.raw", "wave", "_load_wave"),
|
amine@120
|
190 no_format_nor_extension=("audio", None, "_load_with_pydub"),
|
amine@120
|
191 other_formats_ogg=("audio.ogg", None, "_load_with_pydub"),
|
amine@120
|
192 other_formats_webm=("audio", "webm", "_load_with_pydub"),
|
amine@120
|
193 )
|
amine@120
|
194 def test_from_file(
|
amine@120
|
195 self, filename, audio_format, funtion_name, kwargs=None
|
amine@120
|
196 ):
|
amine@120
|
197 funtion_name = "auditok.io." + funtion_name
|
amine@120
|
198 if kwargs is None:
|
amine@120
|
199 kwargs = {}
|
amine@120
|
200 with patch(funtion_name) as patch_function:
|
amine@120
|
201 from_file(filename, audio_format, **kwargs)
|
amine@120
|
202 self.assertTrue(patch_function.called)
|
amine@120
|
203
|
amine@121
|
204 def test_from_file_no_pydub(self):
|
amine@121
|
205 with patch("auditok.io._WITH_PYDUB", False):
|
amine@121
|
206 with self.assertRaises(AudioIOError):
|
amine@121
|
207 from_file("audio", "mp3")
|
amine@121
|
208
|
amine@111
|
209 @genty_dataset(
|
amine@122
|
210 raw_first_channel=("raw", 0, 400),
|
amine@122
|
211 raw_second_channel=("raw", 1, 800),
|
amine@122
|
212 raw_third_channel=("raw", 2, 1600),
|
amine@122
|
213 raw_left_channel=("raw", "left", 400),
|
amine@122
|
214 raw_right_channel=("raw", "right", 800),
|
amine@122
|
215 wav_first_channel=("wav", 0, 400),
|
amine@122
|
216 wav_second_channel=("wav", 1, 800),
|
amine@122
|
217 wav_third_channel=("wav", 2, 1600),
|
amine@122
|
218 wav_left_channel=("wav", "left", 400),
|
amine@122
|
219 wav_right_channel=("wav", "right", 800),
|
amine@122
|
220 )
|
amine@122
|
221 def test_from_file_multichannel_audio(
|
amine@122
|
222 self, audio_format, use_channel, frequency
|
amine@122
|
223 ):
|
amine@122
|
224 expected = PURE_TONE_DICT[frequency]
|
amine@122
|
225 filename = "tests/data/test_16KHZ_3channel_400-800-1600Hz.{}".format(
|
amine@122
|
226 audio_format
|
amine@122
|
227 )
|
amine@122
|
228 sample_width = 2
|
amine@122
|
229 audio_source = from_file(
|
amine@122
|
230 filename,
|
amine@122
|
231 sampling_rate=16000,
|
amine@122
|
232 sample_width=sample_width,
|
amine@122
|
233 channels=3,
|
amine@122
|
234 use_channel=use_channel,
|
amine@122
|
235 )
|
amine@122
|
236 fmt = DATA_FORMAT[sample_width]
|
amine@122
|
237 data = array(fmt, audio_source._buffer)
|
amine@122
|
238 self.assertEqual(data, expected)
|
amine@122
|
239
|
amine@122
|
240 @genty_dataset(
|
amine@123
|
241 raw_mono=("raw", "mono_400Hz", (400,)),
|
amine@123
|
242 raw_3channel=("raw", "3channel_400-800-1600Hz", (400, 800, 1600)),
|
amine@123
|
243 wav_mono=("wav", "mono_400Hz", (400,)),
|
amine@123
|
244 wav_3channel=("wav", "3channel_400-800-1600Hz", (400, 800, 1600)),
|
amine@123
|
245 )
|
amine@123
|
246 def test_from_file_multichannel_audio_mix(
|
amine@123
|
247 self, audio_format, filename_suffix, frequencies
|
amine@123
|
248 ):
|
amine@123
|
249 sampling_rate = 16000
|
amine@123
|
250 sample_width = 2
|
amine@123
|
251 channels = len(frequencies)
|
amine@123
|
252 mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
|
amine@123
|
253 channels = len(frequencies)
|
amine@123
|
254 fmt = DATA_FORMAT[sample_width]
|
amine@123
|
255 expected = _array_to_bytes(
|
amine@123
|
256 array(
|
amine@123
|
257 fmt,
|
amine@123
|
258 (sum(samples) // channels for samples in zip(*mono_channels)),
|
amine@123
|
259 )
|
amine@123
|
260 )
|
amine@123
|
261 filename = "tests/data/test_16KHZ_{}.{}".format(
|
amine@123
|
262 filename_suffix, audio_format
|
amine@123
|
263 )
|
amine@123
|
264 audio_source = from_file(
|
amine@123
|
265 filename,
|
amine@123
|
266 use_channel="mix",
|
amine@123
|
267 sampling_rate=sampling_rate,
|
amine@123
|
268 sample_width=2,
|
amine@123
|
269 channels=channels,
|
amine@123
|
270 )
|
amine@123
|
271 mixed = audio_source._buffer
|
amine@123
|
272 self.assertEqual((mixed), expected)
|
amine@123
|
273
|
amine@124
|
274 @patch("auditok.io._WITH_PYDUB", True)
|
amine@124
|
275 @patch("auditok.io.BufferAudioSource")
|
amine@124
|
276 @genty_dataset(
|
amine@124
|
277 ogg_first_channel=("ogg", 0, "from_ogg"),
|
amine@124
|
278 ogg_second_channel=("ogg", 1, "from_ogg"),
|
amine@124
|
279 ogg_mix=("ogg", "mix", "from_ogg"),
|
amine@124
|
280 ogg_default=("ogg", None, "from_ogg"),
|
amine@124
|
281 mp3_left_channel=("mp3", "left", "from_mp3"),
|
amine@124
|
282 mp3_right_channel=("mp3", "right", "from_mp3"),
|
amine@124
|
283 flac_first_channel=("flac", 0, "from_file"),
|
amine@124
|
284 flac_second_channel=("flac", 1, "from_file"),
|
amine@124
|
285 flv_left_channel=("flv", "left", "from_flv"),
|
amine@124
|
286 webm_right_channel=("webm", "right", "from_file"),
|
amine@124
|
287 )
|
amine@124
|
288 def test_from_file_multichannel_audio_compressed(
|
amine@124
|
289 self, audio_format, use_channel, function, *mocks
|
amine@124
|
290 ):
|
amine@124
|
291 filename = "audio.{}".format(audio_format)
|
amine@124
|
292 segment_mock = Mock()
|
amine@124
|
293 segment_mock.sample_width = 2
|
amine@124
|
294 segment_mock.channels = 2
|
amine@124
|
295 segment_mock._data = b"abcd"
|
amine@124
|
296 with patch("auditok.io._extract_selected_channel") as ext_mock:
|
amine@124
|
297 with patch(
|
amine@124
|
298 "auditok.io.AudioSegment.{}".format(function)
|
amine@124
|
299 ) as open_func:
|
amine@124
|
300 open_func.return_value = segment_mock
|
amine@124
|
301 from_file(filename, use_channel=use_channel)
|
amine@124
|
302 self.assertTrue(open_func.called)
|
amine@124
|
303 self.assertTrue(ext_mock.called)
|
amine@124
|
304
|
amine@124
|
305 use_channel = {"left": 0, "right": 1, None: 0}.get(
|
amine@124
|
306 use_channel, use_channel
|
amine@124
|
307 )
|
amine@124
|
308 ext_mock.assert_called_with(
|
amine@124
|
309 segment_mock._data,
|
amine@124
|
310 segment_mock.channels,
|
amine@124
|
311 segment_mock.sample_width,
|
amine@124
|
312 use_channel,
|
amine@124
|
313 )
|
amine@124
|
314
|
amine@124
|
315 with patch("auditok.io._extract_selected_channel") as ext_mock:
|
amine@124
|
316 with patch(
|
amine@124
|
317 "auditok.io.AudioSegment.{}".format(function)
|
amine@124
|
318 ) as open_func:
|
amine@124
|
319 segment_mock.channels = 1
|
amine@124
|
320 open_func.return_value = segment_mock
|
amine@124
|
321 from_file(filename, use_channel=use_channel)
|
amine@124
|
322 self.assertTrue(open_func.called)
|
amine@124
|
323 self.assertFalse(ext_mock.called)
|
amine@124
|
324
|
amine@125
|
325 @patch("auditok.io._WITH_PYDUB", True)
|
amine@125
|
326 @patch("auditok.io.BufferAudioSource")
|
amine@125
|
327 @genty_dataset(
|
amine@125
|
328 ogg=("ogg", "from_ogg"),
|
amine@125
|
329 mp3=("mp3", "from_mp3"),
|
amine@125
|
330 flac=("flac", "from_file"),
|
amine@125
|
331 )
|
amine@125
|
332 def test_from_file_multichannel_audio_mix_compressed(
|
amine@125
|
333 self, audio_format, function, *mocks
|
amine@125
|
334 ):
|
amine@125
|
335 filename = "audio.{}".format(audio_format)
|
amine@125
|
336 segment_mock = Mock()
|
amine@125
|
337 segment_mock.sample_width = 2
|
amine@125
|
338 segment_mock.channels = 2
|
amine@125
|
339 segment_mock._data = b"abcd"
|
amine@125
|
340 with patch("auditok.io._mix_audio_channels") as mix_mock:
|
amine@125
|
341 with patch(
|
amine@125
|
342 "auditok.io.AudioSegment.{}".format(function)
|
amine@125
|
343 ) as open_func:
|
amine@125
|
344 open_func.return_value = segment_mock
|
amine@125
|
345 from_file(filename, use_channel="mix")
|
amine@125
|
346 self.assertTrue(open_func.called)
|
amine@125
|
347 mix_mock.assert_called_with(
|
amine@125
|
348 segment_mock._data,
|
amine@125
|
349 segment_mock.channels,
|
amine@125
|
350 segment_mock.sample_width,
|
amine@125
|
351 )
|
amine@125
|
352
|
amine@123
|
353 @genty_dataset(
|
amine@126
|
354 dafault_first_channel=(None, 400),
|
amine@126
|
355 first_channel=(0, 400),
|
amine@126
|
356 second_channel=(1, 800),
|
amine@126
|
357 third_channel=(2, 1600),
|
amine@126
|
358 negative_first_channel=(-3, 400),
|
amine@126
|
359 negative_second_channel=(-2, 800),
|
amine@126
|
360 negative_third_channel=(-1, 1600),
|
amine@126
|
361 )
|
amine@126
|
362 def test_load_raw(self, use_channel, frequency):
|
amine@126
|
363 filename = "tests/data/test_16KHZ_3channel_400-800-1600Hz.raw"
|
amine@126
|
364 if use_channel is not None:
|
amine@126
|
365 audio_source = _load_raw(
|
amine@126
|
366 filename,
|
amine@126
|
367 sampling_rate=16000,
|
amine@126
|
368 sample_width=2,
|
amine@126
|
369 channels=3,
|
amine@126
|
370 use_channel=use_channel,
|
amine@126
|
371 )
|
amine@126
|
372 else:
|
amine@126
|
373 audio_source = _load_raw(
|
amine@126
|
374 filename, sampling_rate=16000, sample_width=2, channels=3
|
amine@126
|
375 )
|
amine@126
|
376 self.assertIsInstance(audio_source, BufferAudioSource)
|
amine@126
|
377 self.assertEqual(audio_source.sampling_rate, 16000)
|
amine@126
|
378 self.assertEqual(audio_source.sample_width, 2)
|
amine@126
|
379 self.assertEqual(audio_source.channels, 1)
|
amine@126
|
380 # generate a pure sine wave tone of the given frequency
|
amine@126
|
381 expected = PURE_TONE_DICT[frequency]
|
amine@126
|
382 # compre with data read from file
|
amine@126
|
383 fmt = DATA_FORMAT[2]
|
amine@126
|
384 data = array(fmt, audio_source._buffer)
|
amine@126
|
385 self.assertEqual(data, expected)
|
amine@126
|
386
|
amine@126
|
387 @genty_dataset(
|
amine@127
|
388 mono=("mono_400Hz", (400,)),
|
amine@127
|
389 three_channel=("3channel_400-800-1600Hz", (400, 800, 1600)),
|
amine@127
|
390 )
|
amine@127
|
391 def test_load_raw_mix(self, filename_suffix, frequencies):
|
amine@127
|
392 sampling_rate = 16000
|
amine@127
|
393 sample_width = 2
|
amine@127
|
394 channels = len(frequencies)
|
amine@127
|
395 mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
|
amine@127
|
396
|
amine@127
|
397 fmt = DATA_FORMAT[sample_width]
|
amine@127
|
398 expected = _array_to_bytes(
|
amine@127
|
399 array(
|
amine@127
|
400 fmt,
|
amine@127
|
401 (sum(samples) // channels for samples in zip(*mono_channels)),
|
amine@127
|
402 )
|
amine@127
|
403 )
|
amine@127
|
404 filename = "tests/data/test_16KHZ_{}.raw".format(filename_suffix)
|
amine@127
|
405 audio_source = _load_raw(
|
amine@127
|
406 filename,
|
amine@127
|
407 use_channel="mix",
|
amine@127
|
408 sampling_rate=sampling_rate,
|
amine@127
|
409 sample_width=2,
|
amine@127
|
410 channels=channels,
|
amine@127
|
411 )
|
amine@127
|
412 mixed = audio_source._buffer
|
amine@127
|
413 self.assertEqual(mixed, expected)
|
amine@127
|
414 self.assertIsInstance(audio_source, BufferAudioSource)
|
amine@127
|
415 self.assertEqual(audio_source.sampling_rate, sampling_rate)
|
amine@127
|
416 self.assertEqual(audio_source.sample_width, sample_width)
|
amine@127
|
417 self.assertEqual(audio_source.channels, 1)
|
amine@127
|
418
|
amine@127
|
419 @genty_dataset(
|
amine@128
|
420 missing_sampling_rate=("sr",),
|
amine@128
|
421 missing_sample_width=("sw",),
|
amine@128
|
422 missing_channels=("ch",),
|
amine@128
|
423 )
|
amine@128
|
424 def test_load_raw_missing_audio_param(self, missing_param):
|
amine@128
|
425 with self.assertRaises(AudioParameterError):
|
amine@128
|
426 params = AUDIO_PARAMS_SHORT.copy()
|
amine@128
|
427 del params[missing_param]
|
amine@128
|
428 srate, swidth, channels, _ = _get_audio_parameters(params)
|
amine@128
|
429 _load_raw("audio", srate, swidth, channels)
|
amine@128
|
430
|
amine@128
|
431 @genty_dataset(
|
amine@129
|
432 dafault_first_channel=(None, 400),
|
amine@129
|
433 first_channel=(0, 400),
|
amine@129
|
434 second_channel=(1, 800),
|
amine@129
|
435 third_channel=(2, 1600),
|
amine@129
|
436 negative_first_channel=(-3, 400),
|
amine@129
|
437 negative_second_channel=(-2, 800),
|
amine@129
|
438 negative_third_channel=(-1, 1600),
|
amine@129
|
439 )
|
amine@129
|
440 def test_load_wave(self, use_channel, frequency):
|
amine@129
|
441 filename = "tests/data/test_16KHZ_3channel_400-800-1600Hz.wav"
|
amine@129
|
442 if use_channel is not None:
|
amine@129
|
443 audio_source = _load_wave(filename, use_channel=use_channel)
|
amine@129
|
444 else:
|
amine@129
|
445 audio_source = _load_wave(filename)
|
amine@129
|
446 self.assertIsInstance(audio_source, BufferAudioSource)
|
amine@129
|
447 self.assertEqual(audio_source.sampling_rate, 16000)
|
amine@129
|
448 self.assertEqual(audio_source.sample_width, 2)
|
amine@129
|
449 self.assertEqual(audio_source.channels, 1)
|
amine@129
|
450 # generate a pure sine wave tone of the given frequency
|
amine@129
|
451 expected = PURE_TONE_DICT[frequency]
|
amine@129
|
452 # compre with data read from file
|
amine@129
|
453 fmt = DATA_FORMAT[2]
|
amine@129
|
454 data = array(fmt, audio_source._buffer)
|
amine@129
|
455 self.assertEqual(data, expected)
|
amine@129
|
456
|
amine@129
|
457 @genty_dataset(
|
amine@130
|
458 mono=("mono_400Hz", (400,)),
|
amine@130
|
459 three_channel=("3channel_400-800-1600Hz", (400, 800, 1600)),
|
amine@130
|
460 )
|
amine@130
|
461 def test_load_wave_mix(self, filename_suffix, frequencies):
|
amine@130
|
462 sampling_rate = 16000
|
amine@130
|
463 sample_width = 2
|
amine@130
|
464 channels = len(frequencies)
|
amine@130
|
465 mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
|
amine@130
|
466 fmt = DATA_FORMAT[sample_width]
|
amine@130
|
467 expected = _array_to_bytes(
|
amine@130
|
468 array(
|
amine@130
|
469 fmt,
|
amine@130
|
470 (sum(samples) // channels for samples in zip(*mono_channels)),
|
amine@130
|
471 )
|
amine@130
|
472 )
|
amine@130
|
473 filename = "tests/data/test_16KHZ_{}.wav".format(filename_suffix)
|
amine@130
|
474 audio_source = _load_wave(filename, use_channel="mix")
|
amine@130
|
475 mixed = audio_source._buffer
|
amine@130
|
476 self.assertEqual(mixed, expected)
|
amine@130
|
477 self.assertIsInstance(audio_source, BufferAudioSource)
|
amine@130
|
478 self.assertEqual(audio_source.sampling_rate, sampling_rate)
|
amine@130
|
479 self.assertEqual(audio_source.sample_width, sample_width)
|
amine@130
|
480 self.assertEqual(audio_source.channels, 1)
|
amine@130
|
481
|
amine@131
|
482 @patch("auditok.io._WITH_PYDUB", True)
|
amine@131
|
483 @patch("auditok.io.BufferAudioSource")
|
amine@131
|
484 @genty_dataset(
|
amine@131
|
485 ogg_default_first_channel=("ogg", 2, None, "from_ogg"),
|
amine@131
|
486 ogg_first_channel=("ogg", 1, 0, "from_ogg"),
|
amine@131
|
487 ogg_second_channel=("ogg", 2, 1, "from_ogg"),
|
amine@131
|
488 ogg_mix_channels=("ogg", 3, "mix", "from_ogg"),
|
amine@131
|
489 mp3_left_channel=("mp3", 1, "left", "from_mp3"),
|
amine@131
|
490 mp3_right_channel=("mp3", 2, "right", "from_mp3"),
|
amine@131
|
491 mp3_mix_channels=("mp3", 3, "mix", "from_mp3"),
|
amine@131
|
492 flac_first_channel=("flac", 2, 0, "from_file"),
|
amine@131
|
493 flac_second_channel=("flac", 2, 1, "from_file"),
|
amine@131
|
494 flv_left_channel=("flv", 1, "left", "from_flv"),
|
amine@131
|
495 webm_right_channel=("webm", 2, "right", "from_file"),
|
amine@131
|
496 webm_mix_channels=("webm", 4, "mix", "from_file"),
|
amine@131
|
497 )
|
amine@131
|
498 def test_load_with_pydub(
|
amine@131
|
499 self, audio_format, channels, use_channel, function, *mocks
|
amine@131
|
500 ):
|
amine@131
|
501 filename = "audio.{}".format(audio_format)
|
amine@131
|
502 segment_mock = Mock()
|
amine@131
|
503 segment_mock.sample_width = 2
|
amine@131
|
504 segment_mock.channels = channels
|
amine@131
|
505 segment_mock._data = b"abcdefgh"
|
amine@131
|
506 with patch("auditok.io._extract_selected_channel") as ext_mock:
|
amine@131
|
507 with patch(
|
amine@131
|
508 "auditok.io.AudioSegment.{}".format(function)
|
amine@131
|
509 ) as open_func:
|
amine@131
|
510 open_func.return_value = segment_mock
|
amine@131
|
511 use_channel = {"left": 0, "right": 1, None: 0}.get(
|
amine@131
|
512 use_channel, use_channel
|
amine@131
|
513 )
|
amine@131
|
514 _load_with_pydub(filename, audio_format, use_channel)
|
amine@131
|
515 self.assertTrue(open_func.called)
|
amine@131
|
516 if channels > 1:
|
amine@131
|
517 self.assertTrue(ext_mock.called)
|
amine@131
|
518 ext_mock.assert_called_with(
|
amine@131
|
519 segment_mock._data,
|
amine@131
|
520 segment_mock.channels,
|
amine@131
|
521 segment_mock.sample_width,
|
amine@131
|
522 use_channel,
|
amine@131
|
523 )
|
amine@131
|
524 else:
|
amine@131
|
525 self.assertFalse(ext_mock.called)
|
amine@131
|
526
|
amine@130
|
527 @genty_dataset(
|
amine@132
|
528 mono=("mono_400Hz.raw", (400,)),
|
amine@132
|
529 three_channel=("3channel_400-800-1600Hz.raw", (400, 800, 1600)),
|
amine@132
|
530 )
|
amine@132
|
531 def test_save_raw(self, filename, frequencies):
|
amine@132
|
532 filename = "tests/data/test_16KHZ_{}".format(filename)
|
amine@132
|
533 sample_width = 2
|
amine@132
|
534 fmt = DATA_FORMAT[sample_width]
|
amine@132
|
535 mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
|
amine@132
|
536 data = _array_to_bytes(array(fmt, _sample_generator(*mono_channels)))
|
amine@132
|
537 tmpfile = NamedTemporaryFile()
|
amine@132
|
538 _save_raw(tmpfile.name, data)
|
amine@132
|
539 self.assertTrue(filecmp.cmp(tmpfile.name, filename, shallow=False))
|
amine@132
|
540
|
amine@132
|
541 @genty_dataset(
|
amine@110
|
542 mono=("mono_400Hz.wav", (400,)),
|
amine@110
|
543 three_channel=("3channel_400-800-1600Hz.wav", (400, 800, 1600)),
|
amine@110
|
544 )
|
amine@110
|
545 def test_save_wave(self, filename, frequencies):
|
amine@110
|
546 filename = "tests/data/test_16KHZ_{}".format(filename)
|
amine@110
|
547 sampling_rate = 16000
|
amine@110
|
548 sample_width = 2
|
amine@110
|
549 channels = len(frequencies)
|
amine@110
|
550 fmt = DATA_FORMAT[sample_width]
|
amine@110
|
551 mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
|
amine@110
|
552 data = _array_to_bytes(array(fmt, _sample_generator(*mono_channels)))
|
amine@110
|
553 tmpfile = NamedTemporaryFile()
|
amine@110
|
554 _save_wave(tmpfile.name, data, sampling_rate, sample_width, channels)
|
amine@110
|
555 self.assertTrue(filecmp.cmp(tmpfile.name, filename, shallow=False))
|
amine@132
|
556
|
amine@132
|
557 @genty_dataset(
|
amine@132
|
558 missing_sampling_rate=("sr",),
|
amine@132
|
559 missing_sample_width=("sw",),
|
amine@132
|
560 missing_channels=("ch",),
|
amine@132
|
561 )
|
amine@132
|
562 def test_save_wave_missing_audio_param(self, missing_param):
|
amine@132
|
563 with self.assertRaises(AudioParameterError):
|
amine@132
|
564 params = AUDIO_PARAMS_SHORT.copy()
|
amine@132
|
565 del params[missing_param]
|
amine@132
|
566 srate, swidth, channels, _ = _get_audio_parameters(params)
|
amine@132
|
567 _save_wave("audio", b"\0\0", srate, swidth, channels)
|