Mercurial > hg > vamp-build-and-test
comparison DEPENDENCIES/mingw32/Python27/Lib/site-packages/numpy/lib/function_base.py @ 87:2a2c65a20a8b
Add Python libs and headers
author | Chris Cannam |
---|---|
date | Wed, 25 Feb 2015 14:05:22 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
86:413a9d26189e | 87:2a2c65a20a8b |
---|---|
1 from __future__ import division, absolute_import, print_function | |
2 | |
3 import warnings | |
4 import sys | |
5 import collections | |
6 import operator | |
7 | |
8 import numpy as np | |
9 import numpy.core.numeric as _nx | |
10 from numpy.core import linspace, atleast_1d, atleast_2d | |
11 from numpy.core.numeric import ( | |
12 ones, zeros, arange, concatenate, array, asarray, asanyarray, empty, | |
13 empty_like, ndarray, around, floor, ceil, take, dot, where, intp, | |
14 integer, isscalar | |
15 ) | |
16 from numpy.core.umath import ( | |
17 pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, | |
18 mod, exp, log10 | |
19 ) | |
20 from numpy.core.fromnumeric import ( | |
21 ravel, nonzero, sort, partition, mean | |
22 ) | |
23 from numpy.core.numerictypes import typecodes, number | |
24 from numpy.lib.twodim_base import diag | |
25 from .utils import deprecate | |
26 from ._compiled_base import _insert, add_docstring | |
27 from ._compiled_base import digitize, bincount, interp as compiled_interp | |
28 from ._compiled_base import add_newdoc_ufunc | |
29 from numpy.compat import long | |
30 | |
31 # Force range to be a generator, for np.delete's usage. | |
32 if sys.version_info[0] < 3: | |
33 range = xrange | |
34 | |
35 | |
36 __all__ = [ | |
37 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', | |
38 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', | |
39 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', | |
40 'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef', | |
41 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', | |
42 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', | |
43 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc' | |
44 ] | |
45 | |
46 | |
47 def iterable(y): | |
48 """ | |
49 Check whether or not an object can be iterated over. | |
50 | |
51 Parameters | |
52 ---------- | |
53 y : object | |
54 Input object. | |
55 | |
56 Returns | |
57 ------- | |
58 b : {0, 1} | |
59 Return 1 if the object has an iterator method or is a sequence, | |
60 and 0 otherwise. | |
61 | |
62 | |
63 Examples | |
64 -------- | |
65 >>> np.iterable([1, 2, 3]) | |
66 1 | |
67 >>> np.iterable(2) | |
68 0 | |
69 | |
70 """ | |
71 try: | |
72 iter(y) | |
73 except: | |
74 return 0 | |
75 return 1 | |
76 | |
77 | |
78 def histogram(a, bins=10, range=None, normed=False, weights=None, | |
79 density=None): | |
80 """ | |
81 Compute the histogram of a set of data. | |
82 | |
83 Parameters | |
84 ---------- | |
85 a : array_like | |
86 Input data. The histogram is computed over the flattened array. | |
87 bins : int or sequence of scalars, optional | |
88 If `bins` is an int, it defines the number of equal-width | |
89 bins in the given range (10, by default). If `bins` is a sequence, | |
90 it defines the bin edges, including the rightmost edge, allowing | |
91 for non-uniform bin widths. | |
92 range : (float, float), optional | |
93 The lower and upper range of the bins. If not provided, range | |
94 is simply ``(a.min(), a.max())``. Values outside the range are | |
95 ignored. | |
96 normed : bool, optional | |
97 This keyword is deprecated in Numpy 1.6 due to confusing/buggy | |
98 behavior. It will be removed in Numpy 2.0. Use the density keyword | |
99 instead. | |
100 If False, the result will contain the number of samples | |
101 in each bin. If True, the result is the value of the | |
102 probability *density* function at the bin, normalized such that | |
103 the *integral* over the range is 1. Note that this latter behavior is | |
104 known to be buggy with unequal bin widths; use `density` instead. | |
105 weights : array_like, optional | |
106 An array of weights, of the same shape as `a`. Each value in `a` | |
107 only contributes its associated weight towards the bin count | |
108 (instead of 1). If `normed` is True, the weights are normalized, | |
109 so that the integral of the density over the range remains 1 | |
110 density : bool, optional | |
111 If False, the result will contain the number of samples | |
112 in each bin. If True, the result is the value of the | |
113 probability *density* function at the bin, normalized such that | |
114 the *integral* over the range is 1. Note that the sum of the | |
115 histogram values will not be equal to 1 unless bins of unity | |
116 width are chosen; it is not a probability *mass* function. | |
117 Overrides the `normed` keyword if given. | |
118 | |
119 Returns | |
120 ------- | |
121 hist : array | |
122 The values of the histogram. See `normed` and `weights` for a | |
123 description of the possible semantics. | |
124 bin_edges : array of dtype float | |
125 Return the bin edges ``(length(hist)+1)``. | |
126 | |
127 | |
128 See Also | |
129 -------- | |
130 histogramdd, bincount, searchsorted, digitize | |
131 | |
132 Notes | |
133 ----- | |
134 All but the last (righthand-most) bin is half-open. In other words, if | |
135 `bins` is:: | |
136 | |
137 [1, 2, 3, 4] | |
138 | |
139 then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the | |
140 second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* | |
141 4. | |
142 | |
143 Examples | |
144 -------- | |
145 >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) | |
146 (array([0, 2, 1]), array([0, 1, 2, 3])) | |
147 >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) | |
148 (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) | |
149 >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) | |
150 (array([1, 4, 1]), array([0, 1, 2, 3])) | |
151 | |
152 >>> a = np.arange(5) | |
153 >>> hist, bin_edges = np.histogram(a, density=True) | |
154 >>> hist | |
155 array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) | |
156 >>> hist.sum() | |
157 2.4999999999999996 | |
158 >>> np.sum(hist*np.diff(bin_edges)) | |
159 1.0 | |
160 | |
161 """ | |
162 | |
163 a = asarray(a) | |
164 if weights is not None: | |
165 weights = asarray(weights) | |
166 if np.any(weights.shape != a.shape): | |
167 raise ValueError( | |
168 'weights should have the same shape as a.') | |
169 weights = weights.ravel() | |
170 a = a.ravel() | |
171 | |
172 if (range is not None): | |
173 mn, mx = range | |
174 if (mn > mx): | |
175 raise AttributeError( | |
176 'max must be larger than min in range parameter.') | |
177 | |
178 if not iterable(bins): | |
179 if np.isscalar(bins) and bins < 1: | |
180 raise ValueError( | |
181 '`bins` should be a positive integer.') | |
182 if range is None: | |
183 if a.size == 0: | |
184 # handle empty arrays. Can't determine range, so use 0-1. | |
185 range = (0, 1) | |
186 else: | |
187 range = (a.min(), a.max()) | |
188 mn, mx = [mi + 0.0 for mi in range] | |
189 if mn == mx: | |
190 mn -= 0.5 | |
191 mx += 0.5 | |
192 bins = linspace(mn, mx, bins + 1, endpoint=True) | |
193 else: | |
194 bins = asarray(bins) | |
195 if (np.diff(bins) < 0).any(): | |
196 raise AttributeError( | |
197 'bins must increase monotonically.') | |
198 | |
199 # Histogram is an integer or a float array depending on the weights. | |
200 if weights is None: | |
201 ntype = int | |
202 else: | |
203 ntype = weights.dtype | |
204 n = np.zeros(bins.shape, ntype) | |
205 | |
206 block = 65536 | |
207 if weights is None: | |
208 for i in arange(0, len(a), block): | |
209 sa = sort(a[i:i+block]) | |
210 n += np.r_[sa.searchsorted(bins[:-1], 'left'), | |
211 sa.searchsorted(bins[-1], 'right')] | |
212 else: | |
213 zero = array(0, dtype=ntype) | |
214 for i in arange(0, len(a), block): | |
215 tmp_a = a[i:i+block] | |
216 tmp_w = weights[i:i+block] | |
217 sorting_index = np.argsort(tmp_a) | |
218 sa = tmp_a[sorting_index] | |
219 sw = tmp_w[sorting_index] | |
220 cw = np.concatenate(([zero, ], sw.cumsum())) | |
221 bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), | |
222 sa.searchsorted(bins[-1], 'right')] | |
223 n += cw[bin_index] | |
224 | |
225 n = np.diff(n) | |
226 | |
227 if density is not None: | |
228 if density: | |
229 db = array(np.diff(bins), float) | |
230 return n/db/n.sum(), bins | |
231 else: | |
232 return n, bins | |
233 else: | |
234 # deprecated, buggy behavior. Remove for Numpy 2.0 | |
235 if normed: | |
236 db = array(np.diff(bins), float) | |
237 return n/(n*db).sum(), bins | |
238 else: | |
239 return n, bins | |
240 | |
241 | |
242 def histogramdd(sample, bins=10, range=None, normed=False, weights=None): | |
243 """ | |
244 Compute the multidimensional histogram of some data. | |
245 | |
246 Parameters | |
247 ---------- | |
248 sample : array_like | |
249 The data to be histogrammed. It must be an (N,D) array or data | |
250 that can be converted to such. The rows of the resulting array | |
251 are the coordinates of points in a D dimensional polytope. | |
252 bins : sequence or int, optional | |
253 The bin specification: | |
254 | |
255 * A sequence of arrays describing the bin edges along each dimension. | |
256 * The number of bins for each dimension (nx, ny, ... =bins) | |
257 * The number of bins for all dimensions (nx=ny=...=bins). | |
258 | |
259 range : sequence, optional | |
260 A sequence of lower and upper bin edges to be used if the edges are | |
261 not given explicitly in `bins`. Defaults to the minimum and maximum | |
262 values along each dimension. | |
263 normed : bool, optional | |
264 If False, returns the number of samples in each bin. If True, | |
265 returns the bin density ``bin_count / sample_count / bin_volume``. | |
266 weights : array_like (N,), optional | |
267 An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. | |
268 Weights are normalized to 1 if normed is True. If normed is False, | |
269 the values of the returned histogram are equal to the sum of the | |
270 weights belonging to the samples falling into each bin. | |
271 | |
272 Returns | |
273 ------- | |
274 H : ndarray | |
275 The multidimensional histogram of sample x. See normed and weights | |
276 for the different possible semantics. | |
277 edges : list | |
278 A list of D arrays describing the bin edges for each dimension. | |
279 | |
280 See Also | |
281 -------- | |
282 histogram: 1-D histogram | |
283 histogram2d: 2-D histogram | |
284 | |
285 Examples | |
286 -------- | |
287 >>> r = np.random.randn(100,3) | |
288 >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) | |
289 >>> H.shape, edges[0].size, edges[1].size, edges[2].size | |
290 ((5, 8, 4), 6, 9, 5) | |
291 | |
292 """ | |
293 | |
294 try: | |
295 # Sample is an ND-array. | |
296 N, D = sample.shape | |
297 except (AttributeError, ValueError): | |
298 # Sample is a sequence of 1D arrays. | |
299 sample = atleast_2d(sample).T | |
300 N, D = sample.shape | |
301 | |
302 nbin = empty(D, int) | |
303 edges = D*[None] | |
304 dedges = D*[None] | |
305 if weights is not None: | |
306 weights = asarray(weights) | |
307 | |
308 try: | |
309 M = len(bins) | |
310 if M != D: | |
311 raise AttributeError( | |
312 'The dimension of bins must be equal to the dimension of the ' | |
313 ' sample x.') | |
314 except TypeError: | |
315 # bins is an integer | |
316 bins = D*[bins] | |
317 | |
318 # Select range for each dimension | |
319 # Used only if number of bins is given. | |
320 if range is None: | |
321 # Handle empty input. Range can't be determined in that case, use 0-1. | |
322 if N == 0: | |
323 smin = zeros(D) | |
324 smax = ones(D) | |
325 else: | |
326 smin = atleast_1d(array(sample.min(0), float)) | |
327 smax = atleast_1d(array(sample.max(0), float)) | |
328 else: | |
329 smin = zeros(D) | |
330 smax = zeros(D) | |
331 for i in arange(D): | |
332 smin[i], smax[i] = range[i] | |
333 | |
334 # Make sure the bins have a finite width. | |
335 for i in arange(len(smin)): | |
336 if smin[i] == smax[i]: | |
337 smin[i] = smin[i] - .5 | |
338 smax[i] = smax[i] + .5 | |
339 | |
340 # avoid rounding issues for comparisons when dealing with inexact types | |
341 if np.issubdtype(sample.dtype, np.inexact): | |
342 edge_dt = sample.dtype | |
343 else: | |
344 edge_dt = float | |
345 # Create edge arrays | |
346 for i in arange(D): | |
347 if isscalar(bins[i]): | |
348 if bins[i] < 1: | |
349 raise ValueError( | |
350 "Element at index %s in `bins` should be a positive " | |
351 "integer." % i) | |
352 nbin[i] = bins[i] + 2 # +2 for outlier bins | |
353 edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt) | |
354 else: | |
355 edges[i] = asarray(bins[i], edge_dt) | |
356 nbin[i] = len(edges[i]) + 1 # +1 for outlier bins | |
357 dedges[i] = diff(edges[i]) | |
358 if np.any(np.asarray(dedges[i]) <= 0): | |
359 raise ValueError( | |
360 "Found bin edge of size <= 0. Did you specify `bins` with" | |
361 "non-monotonic sequence?") | |
362 | |
363 nbin = asarray(nbin) | |
364 | |
365 # Handle empty input. | |
366 if N == 0: | |
367 return np.zeros(nbin-2), edges | |
368 | |
369 # Compute the bin number each sample falls into. | |
370 Ncount = {} | |
371 for i in arange(D): | |
372 Ncount[i] = digitize(sample[:, i], edges[i]) | |
373 | |
374 # Using digitize, values that fall on an edge are put in the right bin. | |
375 # For the rightmost bin, we want values equal to the right edge to be | |
376 # counted in the last bin, and not as an outlier. | |
377 for i in arange(D): | |
378 # Rounding precision | |
379 mindiff = dedges[i].min() | |
380 if not np.isinf(mindiff): | |
381 decimal = int(-log10(mindiff)) + 6 | |
382 # Find which points are on the rightmost edge. | |
383 not_smaller_than_edge = (sample[:, i] >= edges[i][-1]) | |
384 on_edge = (around(sample[:, i], decimal) == | |
385 around(edges[i][-1], decimal)) | |
386 # Shift these points one bin to the left. | |
387 Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1 | |
388 | |
389 # Flattened histogram matrix (1D) | |
390 # Reshape is used so that overlarge arrays | |
391 # will raise an error. | |
392 hist = zeros(nbin, float).reshape(-1) | |
393 | |
394 # Compute the sample indices in the flattened histogram matrix. | |
395 ni = nbin.argsort() | |
396 xy = zeros(N, int) | |
397 for i in arange(0, D-1): | |
398 xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() | |
399 xy += Ncount[ni[-1]] | |
400 | |
401 # Compute the number of repetitions in xy and assign it to the | |
402 # flattened histmat. | |
403 if len(xy) == 0: | |
404 return zeros(nbin-2, int), edges | |
405 | |
406 flatcount = bincount(xy, weights) | |
407 a = arange(len(flatcount)) | |
408 hist[a] = flatcount | |
409 | |
410 # Shape into a proper matrix | |
411 hist = hist.reshape(sort(nbin)) | |
412 for i in arange(nbin.size): | |
413 j = ni.argsort()[i] | |
414 hist = hist.swapaxes(i, j) | |
415 ni[i], ni[j] = ni[j], ni[i] | |
416 | |
417 # Remove outliers (indices 0 and -1 for each dimension). | |
418 core = D*[slice(1, -1)] | |
419 hist = hist[core] | |
420 | |
421 # Normalize if normed is True | |
422 if normed: | |
423 s = hist.sum() | |
424 for i in arange(D): | |
425 shape = ones(D, int) | |
426 shape[i] = nbin[i] - 2 | |
427 hist = hist / dedges[i].reshape(shape) | |
428 hist /= s | |
429 | |
430 if (hist.shape != nbin - 2).any(): | |
431 raise RuntimeError( | |
432 "Internal Shape Error") | |
433 return hist, edges | |
434 | |
435 | |
436 def average(a, axis=None, weights=None, returned=False): | |
437 """ | |
438 Compute the weighted average along the specified axis. | |
439 | |
440 Parameters | |
441 ---------- | |
442 a : array_like | |
443 Array containing data to be averaged. If `a` is not an array, a | |
444 conversion is attempted. | |
445 axis : int, optional | |
446 Axis along which to average `a`. If `None`, averaging is done over | |
447 the flattened array. | |
448 weights : array_like, optional | |
449 An array of weights associated with the values in `a`. Each value in | |
450 `a` contributes to the average according to its associated weight. | |
451 The weights array can either be 1-D (in which case its length must be | |
452 the size of `a` along the given axis) or of the same shape as `a`. | |
453 If `weights=None`, then all data in `a` are assumed to have a | |
454 weight equal to one. | |
455 returned : bool, optional | |
456 Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) | |
457 is returned, otherwise only the average is returned. | |
458 If `weights=None`, `sum_of_weights` is equivalent to the number of | |
459 elements over which the average is taken. | |
460 | |
461 | |
462 Returns | |
463 ------- | |
464 average, [sum_of_weights] : {array_type, double} | |
465 Return the average along the specified axis. When returned is `True`, | |
466 return a tuple with the average as the first element and the sum | |
467 of the weights as the second element. The return type is `Float` | |
468 if `a` is of integer type, otherwise it is of the same type as `a`. | |
469 `sum_of_weights` is of the same type as `average`. | |
470 | |
471 Raises | |
472 ------ | |
473 ZeroDivisionError | |
474 When all weights along axis are zero. See `numpy.ma.average` for a | |
475 version robust to this type of error. | |
476 TypeError | |
477 When the length of 1D `weights` is not the same as the shape of `a` | |
478 along axis. | |
479 | |
480 See Also | |
481 -------- | |
482 mean | |
483 | |
484 ma.average : average for masked arrays -- useful if your data contains | |
485 "missing" values | |
486 | |
487 Examples | |
488 -------- | |
489 >>> data = range(1,5) | |
490 >>> data | |
491 [1, 2, 3, 4] | |
492 >>> np.average(data) | |
493 2.5 | |
494 >>> np.average(range(1,11), weights=range(10,0,-1)) | |
495 4.0 | |
496 | |
497 >>> data = np.arange(6).reshape((3,2)) | |
498 >>> data | |
499 array([[0, 1], | |
500 [2, 3], | |
501 [4, 5]]) | |
502 >>> np.average(data, axis=1, weights=[1./4, 3./4]) | |
503 array([ 0.75, 2.75, 4.75]) | |
504 >>> np.average(data, weights=[1./4, 3./4]) | |
505 Traceback (most recent call last): | |
506 ... | |
507 TypeError: Axis must be specified when shapes of a and weights differ. | |
508 | |
509 """ | |
510 if not isinstance(a, np.matrix): | |
511 a = np.asarray(a) | |
512 | |
513 if weights is None: | |
514 avg = a.mean(axis) | |
515 scl = avg.dtype.type(a.size/avg.size) | |
516 else: | |
517 a = a + 0.0 | |
518 wgt = np.array(weights, dtype=a.dtype, copy=0) | |
519 | |
520 # Sanity checks | |
521 if a.shape != wgt.shape: | |
522 if axis is None: | |
523 raise TypeError( | |
524 "Axis must be specified when shapes of a and weights " | |
525 "differ.") | |
526 if wgt.ndim != 1: | |
527 raise TypeError( | |
528 "1D weights expected when shapes of a and weights differ.") | |
529 if wgt.shape[0] != a.shape[axis]: | |
530 raise ValueError( | |
531 "Length of weights not compatible with specified axis.") | |
532 | |
533 # setup wgt to broadcast along axis | |
534 wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis) | |
535 | |
536 scl = wgt.sum(axis=axis) | |
537 if (scl == 0.0).any(): | |
538 raise ZeroDivisionError( | |
539 "Weights sum to zero, can't be normalized") | |
540 | |
541 avg = np.multiply(a, wgt).sum(axis)/scl | |
542 | |
543 if returned: | |
544 scl = np.multiply(avg, 0) + scl | |
545 return avg, scl | |
546 else: | |
547 return avg | |
548 | |
549 | |
550 def asarray_chkfinite(a, dtype=None, order=None): | |
551 """ | |
552 Convert the input to an array, checking for NaNs or Infs. | |
553 | |
554 Parameters | |
555 ---------- | |
556 a : array_like | |
557 Input data, in any form that can be converted to an array. This | |
558 includes lists, lists of tuples, tuples, tuples of tuples, tuples | |
559 of lists and ndarrays. Success requires no NaNs or Infs. | |
560 dtype : data-type, optional | |
561 By default, the data-type is inferred from the input data. | |
562 order : {'C', 'F'}, optional | |
563 Whether to use row-major ('C') or column-major ('FORTRAN') memory | |
564 representation. Defaults to 'C'. | |
565 | |
566 Returns | |
567 ------- | |
568 out : ndarray | |
569 Array interpretation of `a`. No copy is performed if the input | |
570 is already an ndarray. If `a` is a subclass of ndarray, a base | |
571 class ndarray is returned. | |
572 | |
573 Raises | |
574 ------ | |
575 ValueError | |
576 Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). | |
577 | |
578 See Also | |
579 -------- | |
580 asarray : Create and array. | |
581 asanyarray : Similar function which passes through subclasses. | |
582 ascontiguousarray : Convert input to a contiguous array. | |
583 asfarray : Convert input to a floating point ndarray. | |
584 asfortranarray : Convert input to an ndarray with column-major | |
585 memory order. | |
586 fromiter : Create an array from an iterator. | |
587 fromfunction : Construct an array by executing a function on grid | |
588 positions. | |
589 | |
590 Examples | |
591 -------- | |
592 Convert a list into an array. If all elements are finite | |
593 ``asarray_chkfinite`` is identical to ``asarray``. | |
594 | |
595 >>> a = [1, 2] | |
596 >>> np.asarray_chkfinite(a, dtype=float) | |
597 array([1., 2.]) | |
598 | |
599 Raises ValueError if array_like contains Nans or Infs. | |
600 | |
601 >>> a = [1, 2, np.inf] | |
602 >>> try: | |
603 ... np.asarray_chkfinite(a) | |
604 ... except ValueError: | |
605 ... print 'ValueError' | |
606 ... | |
607 ValueError | |
608 | |
609 """ | |
610 a = asarray(a, dtype=dtype, order=order) | |
611 if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): | |
612 raise ValueError( | |
613 "array must not contain infs or NaNs") | |
614 return a | |
615 | |
616 | |
617 def piecewise(x, condlist, funclist, *args, **kw): | |
618 """ | |
619 Evaluate a piecewise-defined function. | |
620 | |
621 Given a set of conditions and corresponding functions, evaluate each | |
622 function on the input data wherever its condition is true. | |
623 | |
624 Parameters | |
625 ---------- | |
626 x : ndarray | |
627 The input domain. | |
628 condlist : list of bool arrays | |
629 Each boolean array corresponds to a function in `funclist`. Wherever | |
630 `condlist[i]` is True, `funclist[i](x)` is used as the output value. | |
631 | |
632 Each boolean array in `condlist` selects a piece of `x`, | |
633 and should therefore be of the same shape as `x`. | |
634 | |
635 The length of `condlist` must correspond to that of `funclist`. | |
636 If one extra function is given, i.e. if | |
637 ``len(funclist) - len(condlist) == 1``, then that extra function | |
638 is the default value, used wherever all conditions are false. | |
639 funclist : list of callables, f(x,*args,**kw), or scalars | |
640 Each function is evaluated over `x` wherever its corresponding | |
641 condition is True. It should take an array as input and give an array | |
642 or a scalar value as output. If, instead of a callable, | |
643 a scalar is provided then a constant function (``lambda x: scalar``) is | |
644 assumed. | |
645 args : tuple, optional | |
646 Any further arguments given to `piecewise` are passed to the functions | |
647 upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then | |
648 each function is called as ``f(x, 1, 'a')``. | |
649 kw : dict, optional | |
650 Keyword arguments used in calling `piecewise` are passed to the | |
651 functions upon execution, i.e., if called | |
652 ``piecewise(..., ..., lambda=1)``, then each function is called as | |
653 ``f(x, lambda=1)``. | |
654 | |
655 Returns | |
656 ------- | |
657 out : ndarray | |
658 The output is the same shape and type as x and is found by | |
659 calling the functions in `funclist` on the appropriate portions of `x`, | |
660 as defined by the boolean arrays in `condlist`. Portions not covered | |
661 by any condition have a default value of 0. | |
662 | |
663 | |
664 See Also | |
665 -------- | |
666 choose, select, where | |
667 | |
668 Notes | |
669 ----- | |
670 This is similar to choose or select, except that functions are | |
671 evaluated on elements of `x` that satisfy the corresponding condition from | |
672 `condlist`. | |
673 | |
674 The result is:: | |
675 | |
676 |-- | |
677 |funclist[0](x[condlist[0]]) | |
678 out = |funclist[1](x[condlist[1]]) | |
679 |... | |
680 |funclist[n2](x[condlist[n2]]) | |
681 |-- | |
682 | |
683 Examples | |
684 -------- | |
685 Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. | |
686 | |
687 >>> x = np.linspace(-2.5, 2.5, 6) | |
688 >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) | |
689 array([-1., -1., -1., 1., 1., 1.]) | |
690 | |
691 Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for | |
692 ``x >= 0``. | |
693 | |
694 >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) | |
695 array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) | |
696 | |
697 """ | |
698 x = asanyarray(x) | |
699 n2 = len(funclist) | |
700 if (isscalar(condlist) or not (isinstance(condlist[0], list) or | |
701 isinstance(condlist[0], ndarray))): | |
702 condlist = [condlist] | |
703 condlist = array(condlist, dtype=bool) | |
704 n = len(condlist) | |
705 # This is a hack to work around problems with NumPy's | |
706 # handling of 0-d arrays and boolean indexing with | |
707 # numpy.bool_ scalars | |
708 zerod = False | |
709 if x.ndim == 0: | |
710 x = x[None] | |
711 zerod = True | |
712 if condlist.shape[-1] != 1: | |
713 condlist = condlist.T | |
714 if n == n2 - 1: # compute the "otherwise" condition. | |
715 totlist = np.logical_or.reduce(condlist, axis=0) | |
716 condlist = np.vstack([condlist, ~totlist]) | |
717 n += 1 | |
718 if (n != n2): | |
719 raise ValueError( | |
720 "function list and condition list must be the same") | |
721 | |
722 y = zeros(x.shape, x.dtype) | |
723 for k in range(n): | |
724 item = funclist[k] | |
725 if not isinstance(item, collections.Callable): | |
726 y[condlist[k]] = item | |
727 else: | |
728 vals = x[condlist[k]] | |
729 if vals.size > 0: | |
730 y[condlist[k]] = item(vals, *args, **kw) | |
731 if zerod: | |
732 y = y.squeeze() | |
733 return y | |
734 | |
735 | |
736 def select(condlist, choicelist, default=0): | |
737 """ | |
738 Return an array drawn from elements in choicelist, depending on conditions. | |
739 | |
740 Parameters | |
741 ---------- | |
742 condlist : list of bool ndarrays | |
743 The list of conditions which determine from which array in `choicelist` | |
744 the output elements are taken. When multiple conditions are satisfied, | |
745 the first one encountered in `condlist` is used. | |
746 choicelist : list of ndarrays | |
747 The list of arrays from which the output elements are taken. It has | |
748 to be of the same length as `condlist`. | |
749 default : scalar, optional | |
750 The element inserted in `output` when all conditions evaluate to False. | |
751 | |
752 Returns | |
753 ------- | |
754 output : ndarray | |
755 The output at position m is the m-th element of the array in | |
756 `choicelist` where the m-th element of the corresponding array in | |
757 `condlist` is True. | |
758 | |
759 See Also | |
760 -------- | |
761 where : Return elements from one of two arrays depending on condition. | |
762 take, choose, compress, diag, diagonal | |
763 | |
764 Examples | |
765 -------- | |
766 >>> x = np.arange(10) | |
767 >>> condlist = [x<3, x>5] | |
768 >>> choicelist = [x, x**2] | |
769 >>> np.select(condlist, choicelist) | |
770 array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) | |
771 | |
772 """ | |
773 # Check the size of condlist and choicelist are the same, or abort. | |
774 if len(condlist) != len(choicelist): | |
775 raise ValueError( | |
776 'list of cases must be same length as list of conditions') | |
777 | |
778 # Now that the dtype is known, handle the deprecated select([], []) case | |
779 if len(condlist) == 0: | |
780 warnings.warn("select with an empty condition list is not possible" | |
781 "and will be deprecated", | |
782 DeprecationWarning) | |
783 return np.asarray(default)[()] | |
784 | |
785 choicelist = [np.asarray(choice) for choice in choicelist] | |
786 choicelist.append(np.asarray(default)) | |
787 | |
788 # need to get the result type before broadcasting for correct scalar | |
789 # behaviour | |
790 dtype = np.result_type(*choicelist) | |
791 | |
792 # Convert conditions to arrays and broadcast conditions and choices | |
793 # as the shape is needed for the result. Doing it seperatly optimizes | |
794 # for example when all choices are scalars. | |
795 condlist = np.broadcast_arrays(*condlist) | |
796 choicelist = np.broadcast_arrays(*choicelist) | |
797 | |
798 # If cond array is not an ndarray in boolean format or scalar bool, abort. | |
799 deprecated_ints = False | |
800 for i in range(len(condlist)): | |
801 cond = condlist[i] | |
802 if cond.dtype.type is not np.bool_: | |
803 if np.issubdtype(cond.dtype, np.integer): | |
804 # A previous implementation accepted int ndarrays accidentally. | |
805 # Supported here deliberately, but deprecated. | |
806 condlist[i] = condlist[i].astype(bool) | |
807 deprecated_ints = True | |
808 else: | |
809 raise ValueError( | |
810 'invalid entry in choicelist: should be boolean ndarray') | |
811 | |
812 if deprecated_ints: | |
813 msg = "select condlists containing integer ndarrays is deprecated " \ | |
814 "and will be removed in the future. Use `.astype(bool)` to " \ | |
815 "convert to bools." | |
816 warnings.warn(msg, DeprecationWarning) | |
817 | |
818 if choicelist[0].ndim == 0: | |
819 # This may be common, so avoid the call. | |
820 result_shape = condlist[0].shape | |
821 else: | |
822 result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape | |
823 | |
824 result = np.full(result_shape, choicelist[-1], dtype) | |
825 | |
826 # Use np.copyto to burn each choicelist array onto result, using the | |
827 # corresponding condlist as a boolean mask. This is done in reverse | |
828 # order since the first choice should take precedence. | |
829 choicelist = choicelist[-2::-1] | |
830 condlist = condlist[::-1] | |
831 for choice, cond in zip(choicelist, condlist): | |
832 np.copyto(result, choice, where=cond) | |
833 | |
834 return result | |
835 | |
836 | |
837 def copy(a, order='K'): | |
838 """ | |
839 Return an array copy of the given object. | |
840 | |
841 Parameters | |
842 ---------- | |
843 a : array_like | |
844 Input data. | |
845 order : {'C', 'F', 'A', 'K'}, optional | |
846 Controls the memory layout of the copy. 'C' means C-order, | |
847 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, | |
848 'C' otherwise. 'K' means match the layout of `a` as closely | |
849 as possible. (Note that this function and :meth:ndarray.copy are very | |
850 similar, but have different default values for their order= | |
851 arguments.) | |
852 | |
853 Returns | |
854 ------- | |
855 arr : ndarray | |
856 Array interpretation of `a`. | |
857 | |
858 Notes | |
859 ----- | |
860 This is equivalent to | |
861 | |
862 >>> np.array(a, copy=True) #doctest: +SKIP | |
863 | |
864 Examples | |
865 -------- | |
866 Create an array x, with a reference y and a copy z: | |
867 | |
868 >>> x = np.array([1, 2, 3]) | |
869 >>> y = x | |
870 >>> z = np.copy(x) | |
871 | |
872 Note that, when we modify x, y changes, but not z: | |
873 | |
874 >>> x[0] = 10 | |
875 >>> x[0] == y[0] | |
876 True | |
877 >>> x[0] == z[0] | |
878 False | |
879 | |
880 """ | |
881 return array(a, order=order, copy=True) | |
882 | |
883 # Basic operations | |
884 | |
885 | |
886 def gradient(f, *varargs, **kwargs): | |
887 """ | |
888 Return the gradient of an N-dimensional array. | |
889 | |
890 The gradient is computed using second order accurate central differences | |
891 in the interior and either first differences or second order accurate | |
892 one-sides (forward or backwards) differences at the boundaries. The | |
893 returned gradient hence has the same shape as the input array. | |
894 | |
895 Parameters | |
896 ---------- | |
897 f : array_like | |
898 An N-dimensional array containing samples of a scalar function. | |
899 varargs : list of scalar, optional | |
900 N scalars specifying the sample distances for each dimension, | |
901 i.e. `dx`, `dy`, `dz`, ... Default distance: 1. | |
902 edge_order : {1, 2}, optional | |
903 Gradient is calculated using N\ :sup:`th` order accurate differences | |
904 at the boundaries. Default: 1. | |
905 | |
906 .. versionadded:: 1.9.1 | |
907 | |
908 Returns | |
909 ------- | |
910 gradient : ndarray | |
911 N arrays of the same shape as `f` giving the derivative of `f` with | |
912 respect to each dimension. | |
913 | |
914 Examples | |
915 -------- | |
916 >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) | |
917 >>> np.gradient(x) | |
918 array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) | |
919 >>> np.gradient(x, 2) | |
920 array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) | |
921 | |
922 >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) | |
923 [array([[ 2., 2., -1.], | |
924 [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], | |
925 [ 1. , 1. , 1. ]])] | |
926 | |
927 >>> x = np.array([0, 1, 2, 3, 4]) | |
928 >>> dx = np.gradient(x) | |
929 >>> y = x**2 | |
930 >>> np.gradient(y, dx, edge_order=2) | |
931 array([-0., 2., 4., 6., 8.]) | |
932 """ | |
933 f = np.asanyarray(f) | |
934 N = len(f.shape) # number of dimensions | |
935 n = len(varargs) | |
936 if n == 0: | |
937 dx = [1.0]*N | |
938 elif n == 1: | |
939 dx = [varargs[0]]*N | |
940 elif n == N: | |
941 dx = list(varargs) | |
942 else: | |
943 raise SyntaxError( | |
944 "invalid number of arguments") | |
945 | |
946 edge_order = kwargs.pop('edge_order', 1) | |
947 if kwargs: | |
948 raise TypeError('"{}" are not valid keyword arguments.'.format( | |
949 '", "'.join(kwargs.keys()))) | |
950 if edge_order > 2: | |
951 raise ValueError("'edge_order' greater than 2 not supported") | |
952 | |
953 # use central differences on interior and one-sided differences on the | |
954 # endpoints. This preserves second order-accuracy over the full domain. | |
955 | |
956 outvals = [] | |
957 | |
958 # create slice objects --- initially all are [:, :, ..., :] | |
959 slice1 = [slice(None)]*N | |
960 slice2 = [slice(None)]*N | |
961 slice3 = [slice(None)]*N | |
962 slice4 = [slice(None)]*N | |
963 | |
964 otype = f.dtype.char | |
965 if otype not in ['f', 'd', 'F', 'D', 'm', 'M']: | |
966 otype = 'd' | |
967 | |
968 # Difference of datetime64 elements results in timedelta64 | |
969 if otype == 'M': | |
970 # Need to use the full dtype name because it contains unit information | |
971 otype = f.dtype.name.replace('datetime', 'timedelta') | |
972 elif otype == 'm': | |
973 # Needs to keep the specific units, can't be a general unit | |
974 otype = f.dtype | |
975 | |
976 # Convert datetime64 data into ints. Make dummy variable `y` | |
977 # that is a view of ints if the data is datetime64, otherwise | |
978 # just set y equal to the the array `f`. | |
979 if f.dtype.char in ["M", "m"]: | |
980 y = f.view('int64') | |
981 else: | |
982 y = f | |
983 | |
984 for axis in range(N): | |
985 | |
986 if y.shape[axis] < 2: | |
987 raise ValueError( | |
988 "Shape of array too small to calculate a numerical gradient, " | |
989 "at least two elements are required.") | |
990 | |
991 # Numerical differentiation: 1st order edges, 2nd order interior | |
992 if y.shape[axis] == 2 or edge_order == 1: | |
993 # Use first order differences for time data | |
994 out = np.empty_like(y, dtype=otype) | |
995 | |
996 slice1[axis] = slice(1, -1) | |
997 slice2[axis] = slice(2, None) | |
998 slice3[axis] = slice(None, -2) | |
999 # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 | |
1000 out[slice1] = (y[slice2] - y[slice3])/2.0 | |
1001 | |
1002 slice1[axis] = 0 | |
1003 slice2[axis] = 1 | |
1004 slice3[axis] = 0 | |
1005 # 1D equivalent -- out[0] = (y[1] - y[0]) | |
1006 out[slice1] = (y[slice2] - y[slice3]) | |
1007 | |
1008 slice1[axis] = -1 | |
1009 slice2[axis] = -1 | |
1010 slice3[axis] = -2 | |
1011 # 1D equivalent -- out[-1] = (y[-1] - y[-2]) | |
1012 out[slice1] = (y[slice2] - y[slice3]) | |
1013 | |
1014 # Numerical differentiation: 2st order edges, 2nd order interior | |
1015 else: | |
1016 # Use second order differences where possible | |
1017 out = np.empty_like(y, dtype=otype) | |
1018 | |
1019 slice1[axis] = slice(1, -1) | |
1020 slice2[axis] = slice(2, None) | |
1021 slice3[axis] = slice(None, -2) | |
1022 # 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0 | |
1023 out[slice1] = (y[slice2] - y[slice3])/2.0 | |
1024 | |
1025 slice1[axis] = 0 | |
1026 slice2[axis] = 0 | |
1027 slice3[axis] = 1 | |
1028 slice4[axis] = 2 | |
1029 # 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0 | |
1030 out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 | |
1031 | |
1032 slice1[axis] = -1 | |
1033 slice2[axis] = -1 | |
1034 slice3[axis] = -2 | |
1035 slice4[axis] = -3 | |
1036 # 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3]) | |
1037 out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0 | |
1038 | |
1039 # divide by step size | |
1040 out /= dx[axis] | |
1041 outvals.append(out) | |
1042 | |
1043 # reset the slice object in this dimension to ":" | |
1044 slice1[axis] = slice(None) | |
1045 slice2[axis] = slice(None) | |
1046 slice3[axis] = slice(None) | |
1047 slice4[axis] = slice(None) | |
1048 | |
1049 if N == 1: | |
1050 return outvals[0] | |
1051 else: | |
1052 return outvals | |
1053 | |
1054 | |
1055 def diff(a, n=1, axis=-1): | |
1056 """ | |
1057 Calculate the n-th order discrete difference along given axis. | |
1058 | |
1059 The first order difference is given by ``out[n] = a[n+1] - a[n]`` along | |
1060 the given axis, higher order differences are calculated by using `diff` | |
1061 recursively. | |
1062 | |
1063 Parameters | |
1064 ---------- | |
1065 a : array_like | |
1066 Input array | |
1067 n : int, optional | |
1068 The number of times values are differenced. | |
1069 axis : int, optional | |
1070 The axis along which the difference is taken, default is the last axis. | |
1071 | |
1072 Returns | |
1073 ------- | |
1074 diff : ndarray | |
1075 The `n` order differences. The shape of the output is the same as `a` | |
1076 except along `axis` where the dimension is smaller by `n`. | |
1077 | |
1078 See Also | |
1079 -------- | |
1080 gradient, ediff1d, cumsum | |
1081 | |
1082 Examples | |
1083 -------- | |
1084 >>> x = np.array([1, 2, 4, 7, 0]) | |
1085 >>> np.diff(x) | |
1086 array([ 1, 2, 3, -7]) | |
1087 >>> np.diff(x, n=2) | |
1088 array([ 1, 1, -10]) | |
1089 | |
1090 >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) | |
1091 >>> np.diff(x) | |
1092 array([[2, 3, 4], | |
1093 [5, 1, 2]]) | |
1094 >>> np.diff(x, axis=0) | |
1095 array([[-1, 2, 0, -2]]) | |
1096 | |
1097 """ | |
1098 if n == 0: | |
1099 return a | |
1100 if n < 0: | |
1101 raise ValueError( | |
1102 "order must be non-negative but got " + repr(n)) | |
1103 a = asanyarray(a) | |
1104 nd = len(a.shape) | |
1105 slice1 = [slice(None)]*nd | |
1106 slice2 = [slice(None)]*nd | |
1107 slice1[axis] = slice(1, None) | |
1108 slice2[axis] = slice(None, -1) | |
1109 slice1 = tuple(slice1) | |
1110 slice2 = tuple(slice2) | |
1111 if n > 1: | |
1112 return diff(a[slice1]-a[slice2], n-1, axis=axis) | |
1113 else: | |
1114 return a[slice1]-a[slice2] | |
1115 | |
1116 | |
1117 def interp(x, xp, fp, left=None, right=None): | |
1118 """ | |
1119 One-dimensional linear interpolation. | |
1120 | |
1121 Returns the one-dimensional piecewise linear interpolant to a function | |
1122 with given values at discrete data-points. | |
1123 | |
1124 Parameters | |
1125 ---------- | |
1126 x : array_like | |
1127 The x-coordinates of the interpolated values. | |
1128 | |
1129 xp : 1-D sequence of floats | |
1130 The x-coordinates of the data points, must be increasing. | |
1131 | |
1132 fp : 1-D sequence of floats | |
1133 The y-coordinates of the data points, same length as `xp`. | |
1134 | |
1135 left : float, optional | |
1136 Value to return for `x < xp[0]`, default is `fp[0]`. | |
1137 | |
1138 right : float, optional | |
1139 Value to return for `x > xp[-1]`, default is `fp[-1]`. | |
1140 | |
1141 Returns | |
1142 ------- | |
1143 y : {float, ndarray} | |
1144 The interpolated values, same shape as `x`. | |
1145 | |
1146 Raises | |
1147 ------ | |
1148 ValueError | |
1149 If `xp` and `fp` have different length | |
1150 | |
1151 Notes | |
1152 ----- | |
1153 Does not check that the x-coordinate sequence `xp` is increasing. | |
1154 If `xp` is not increasing, the results are nonsense. | |
1155 A simple check for increasing is:: | |
1156 | |
1157 np.all(np.diff(xp) > 0) | |
1158 | |
1159 | |
1160 Examples | |
1161 -------- | |
1162 >>> xp = [1, 2, 3] | |
1163 >>> fp = [3, 2, 0] | |
1164 >>> np.interp(2.5, xp, fp) | |
1165 1.0 | |
1166 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) | |
1167 array([ 3. , 3. , 2.5 , 0.56, 0. ]) | |
1168 >>> UNDEF = -99.0 | |
1169 >>> np.interp(3.14, xp, fp, right=UNDEF) | |
1170 -99.0 | |
1171 | |
1172 Plot an interpolant to the sine function: | |
1173 | |
1174 >>> x = np.linspace(0, 2*np.pi, 10) | |
1175 >>> y = np.sin(x) | |
1176 >>> xvals = np.linspace(0, 2*np.pi, 50) | |
1177 >>> yinterp = np.interp(xvals, x, y) | |
1178 >>> import matplotlib.pyplot as plt | |
1179 >>> plt.plot(x, y, 'o') | |
1180 [<matplotlib.lines.Line2D object at 0x...>] | |
1181 >>> plt.plot(xvals, yinterp, '-x') | |
1182 [<matplotlib.lines.Line2D object at 0x...>] | |
1183 >>> plt.show() | |
1184 | |
1185 """ | |
1186 if isinstance(x, (float, int, number)): | |
1187 return compiled_interp([x], xp, fp, left, right).item() | |
1188 elif isinstance(x, np.ndarray) and x.ndim == 0: | |
1189 return compiled_interp([x], xp, fp, left, right).item() | |
1190 else: | |
1191 return compiled_interp(x, xp, fp, left, right) | |
1192 | |
1193 | |
1194 def angle(z, deg=0): | |
1195 """ | |
1196 Return the angle of the complex argument. | |
1197 | |
1198 Parameters | |
1199 ---------- | |
1200 z : array_like | |
1201 A complex number or sequence of complex numbers. | |
1202 deg : bool, optional | |
1203 Return angle in degrees if True, radians if False (default). | |
1204 | |
1205 Returns | |
1206 ------- | |
1207 angle : {ndarray, scalar} | |
1208 The counterclockwise angle from the positive real axis on | |
1209 the complex plane, with dtype as numpy.float64. | |
1210 | |
1211 See Also | |
1212 -------- | |
1213 arctan2 | |
1214 absolute | |
1215 | |
1216 | |
1217 | |
1218 Examples | |
1219 -------- | |
1220 >>> np.angle([1.0, 1.0j, 1+1j]) # in radians | |
1221 array([ 0. , 1.57079633, 0.78539816]) | |
1222 >>> np.angle(1+1j, deg=True) # in degrees | |
1223 45.0 | |
1224 | |
1225 """ | |
1226 if deg: | |
1227 fact = 180/pi | |
1228 else: | |
1229 fact = 1.0 | |
1230 z = asarray(z) | |
1231 if (issubclass(z.dtype.type, _nx.complexfloating)): | |
1232 zimag = z.imag | |
1233 zreal = z.real | |
1234 else: | |
1235 zimag = 0 | |
1236 zreal = z | |
1237 return arctan2(zimag, zreal) * fact | |
1238 | |
1239 | |
1240 def unwrap(p, discont=pi, axis=-1): | |
1241 """ | |
1242 Unwrap by changing deltas between values to 2*pi complement. | |
1243 | |
1244 Unwrap radian phase `p` by changing absolute jumps greater than | |
1245 `discont` to their 2*pi complement along the given axis. | |
1246 | |
1247 Parameters | |
1248 ---------- | |
1249 p : array_like | |
1250 Input array. | |
1251 discont : float, optional | |
1252 Maximum discontinuity between values, default is ``pi``. | |
1253 axis : int, optional | |
1254 Axis along which unwrap will operate, default is the last axis. | |
1255 | |
1256 Returns | |
1257 ------- | |
1258 out : ndarray | |
1259 Output array. | |
1260 | |
1261 See Also | |
1262 -------- | |
1263 rad2deg, deg2rad | |
1264 | |
1265 Notes | |
1266 ----- | |
1267 If the discontinuity in `p` is smaller than ``pi``, but larger than | |
1268 `discont`, no unwrapping is done because taking the 2*pi complement | |
1269 would only make the discontinuity larger. | |
1270 | |
1271 Examples | |
1272 -------- | |
1273 >>> phase = np.linspace(0, np.pi, num=5) | |
1274 >>> phase[3:] += np.pi | |
1275 >>> phase | |
1276 array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) | |
1277 >>> np.unwrap(phase) | |
1278 array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) | |
1279 | |
1280 """ | |
1281 p = asarray(p) | |
1282 nd = len(p.shape) | |
1283 dd = diff(p, axis=axis) | |
1284 slice1 = [slice(None, None)]*nd # full slices | |
1285 slice1[axis] = slice(1, None) | |
1286 ddmod = mod(dd + pi, 2*pi) - pi | |
1287 _nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0)) | |
1288 ph_correct = ddmod - dd | |
1289 _nx.copyto(ph_correct, 0, where=abs(dd) < discont) | |
1290 up = array(p, copy=True, dtype='d') | |
1291 up[slice1] = p[slice1] + ph_correct.cumsum(axis) | |
1292 return up | |
1293 | |
1294 | |
1295 def sort_complex(a): | |
1296 """ | |
1297 Sort a complex array using the real part first, then the imaginary part. | |
1298 | |
1299 Parameters | |
1300 ---------- | |
1301 a : array_like | |
1302 Input array | |
1303 | |
1304 Returns | |
1305 ------- | |
1306 out : complex ndarray | |
1307 Always returns a sorted complex array. | |
1308 | |
1309 Examples | |
1310 -------- | |
1311 >>> np.sort_complex([5, 3, 6, 2, 1]) | |
1312 array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) | |
1313 | |
1314 >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) | |
1315 array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) | |
1316 | |
1317 """ | |
1318 b = array(a, copy=True) | |
1319 b.sort() | |
1320 if not issubclass(b.dtype.type, _nx.complexfloating): | |
1321 if b.dtype.char in 'bhBH': | |
1322 return b.astype('F') | |
1323 elif b.dtype.char == 'g': | |
1324 return b.astype('G') | |
1325 else: | |
1326 return b.astype('D') | |
1327 else: | |
1328 return b | |
1329 | |
1330 | |
1331 def trim_zeros(filt, trim='fb'): | |
1332 """ | |
1333 Trim the leading and/or trailing zeros from a 1-D array or sequence. | |
1334 | |
1335 Parameters | |
1336 ---------- | |
1337 filt : 1-D array or sequence | |
1338 Input array. | |
1339 trim : str, optional | |
1340 A string with 'f' representing trim from front and 'b' to trim from | |
1341 back. Default is 'fb', trim zeros from both front and back of the | |
1342 array. | |
1343 | |
1344 Returns | |
1345 ------- | |
1346 trimmed : 1-D array or sequence | |
1347 The result of trimming the input. The input data type is preserved. | |
1348 | |
1349 Examples | |
1350 -------- | |
1351 >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) | |
1352 >>> np.trim_zeros(a) | |
1353 array([1, 2, 3, 0, 2, 1]) | |
1354 | |
1355 >>> np.trim_zeros(a, 'b') | |
1356 array([0, 0, 0, 1, 2, 3, 0, 2, 1]) | |
1357 | |
1358 The input data type is preserved, list/tuple in means list/tuple out. | |
1359 | |
1360 >>> np.trim_zeros([0, 1, 2, 0]) | |
1361 [1, 2] | |
1362 | |
1363 """ | |
1364 first = 0 | |
1365 trim = trim.upper() | |
1366 if 'F' in trim: | |
1367 for i in filt: | |
1368 if i != 0.: | |
1369 break | |
1370 else: | |
1371 first = first + 1 | |
1372 last = len(filt) | |
1373 if 'B' in trim: | |
1374 for i in filt[::-1]: | |
1375 if i != 0.: | |
1376 break | |
1377 else: | |
1378 last = last - 1 | |
1379 return filt[first:last] | |
1380 | |
1381 | |
1382 @deprecate | |
1383 def unique(x): | |
1384 """ | |
1385 This function is deprecated. Use numpy.lib.arraysetops.unique() | |
1386 instead. | |
1387 """ | |
1388 try: | |
1389 tmp = x.flatten() | |
1390 if tmp.size == 0: | |
1391 return tmp | |
1392 tmp.sort() | |
1393 idx = concatenate(([True], tmp[1:] != tmp[:-1])) | |
1394 return tmp[idx] | |
1395 except AttributeError: | |
1396 items = sorted(set(x)) | |
1397 return asarray(items) | |
1398 | |
1399 | |
1400 def extract(condition, arr): | |
1401 """ | |
1402 Return the elements of an array that satisfy some condition. | |
1403 | |
1404 This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If | |
1405 `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. | |
1406 | |
1407 Parameters | |
1408 ---------- | |
1409 condition : array_like | |
1410 An array whose nonzero or True entries indicate the elements of `arr` | |
1411 to extract. | |
1412 arr : array_like | |
1413 Input array of the same size as `condition`. | |
1414 | |
1415 Returns | |
1416 ------- | |
1417 extract : ndarray | |
1418 Rank 1 array of values from `arr` where `condition` is True. | |
1419 | |
1420 See Also | |
1421 -------- | |
1422 take, put, copyto, compress | |
1423 | |
1424 Examples | |
1425 -------- | |
1426 >>> arr = np.arange(12).reshape((3, 4)) | |
1427 >>> arr | |
1428 array([[ 0, 1, 2, 3], | |
1429 [ 4, 5, 6, 7], | |
1430 [ 8, 9, 10, 11]]) | |
1431 >>> condition = np.mod(arr, 3)==0 | |
1432 >>> condition | |
1433 array([[ True, False, False, True], | |
1434 [False, False, True, False], | |
1435 [False, True, False, False]], dtype=bool) | |
1436 >>> np.extract(condition, arr) | |
1437 array([0, 3, 6, 9]) | |
1438 | |
1439 | |
1440 If `condition` is boolean: | |
1441 | |
1442 >>> arr[condition] | |
1443 array([0, 3, 6, 9]) | |
1444 | |
1445 """ | |
1446 return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) | |
1447 | |
1448 | |
1449 def place(arr, mask, vals): | |
1450 """ | |
1451 Change elements of an array based on conditional and input values. | |
1452 | |
1453 Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that | |
1454 `place` uses the first N elements of `vals`, where N is the number of | |
1455 True values in `mask`, while `copyto` uses the elements where `mask` | |
1456 is True. | |
1457 | |
1458 Note that `extract` does the exact opposite of `place`. | |
1459 | |
1460 Parameters | |
1461 ---------- | |
1462 arr : array_like | |
1463 Array to put data into. | |
1464 mask : array_like | |
1465 Boolean mask array. Must have the same size as `a`. | |
1466 vals : 1-D sequence | |
1467 Values to put into `a`. Only the first N elements are used, where | |
1468 N is the number of True values in `mask`. If `vals` is smaller | |
1469 than N it will be repeated. | |
1470 | |
1471 See Also | |
1472 -------- | |
1473 copyto, put, take, extract | |
1474 | |
1475 Examples | |
1476 -------- | |
1477 >>> arr = np.arange(6).reshape(2, 3) | |
1478 >>> np.place(arr, arr>2, [44, 55]) | |
1479 >>> arr | |
1480 array([[ 0, 1, 2], | |
1481 [44, 55, 44]]) | |
1482 | |
1483 """ | |
1484 return _insert(arr, mask, vals) | |
1485 | |
1486 | |
1487 def disp(mesg, device=None, linefeed=True): | |
1488 """ | |
1489 Display a message on a device. | |
1490 | |
1491 Parameters | |
1492 ---------- | |
1493 mesg : str | |
1494 Message to display. | |
1495 device : object | |
1496 Device to write message. If None, defaults to ``sys.stdout`` which is | |
1497 very similar to ``print``. `device` needs to have ``write()`` and | |
1498 ``flush()`` methods. | |
1499 linefeed : bool, optional | |
1500 Option whether to print a line feed or not. Defaults to True. | |
1501 | |
1502 Raises | |
1503 ------ | |
1504 AttributeError | |
1505 If `device` does not have a ``write()`` or ``flush()`` method. | |
1506 | |
1507 Examples | |
1508 -------- | |
1509 Besides ``sys.stdout``, a file-like object can also be used as it has | |
1510 both required methods: | |
1511 | |
1512 >>> from StringIO import StringIO | |
1513 >>> buf = StringIO() | |
1514 >>> np.disp('"Display" in a file', device=buf) | |
1515 >>> buf.getvalue() | |
1516 '"Display" in a file\\n' | |
1517 | |
1518 """ | |
1519 if device is None: | |
1520 device = sys.stdout | |
1521 if linefeed: | |
1522 device.write('%s\n' % mesg) | |
1523 else: | |
1524 device.write('%s' % mesg) | |
1525 device.flush() | |
1526 return | |
1527 | |
1528 | |
1529 class vectorize(object): | |
1530 """ | |
1531 vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False) | |
1532 | |
1533 Generalized function class. | |
1534 | |
1535 Define a vectorized function which takes a nested sequence | |
1536 of objects or numpy arrays as inputs and returns a | |
1537 numpy array as output. The vectorized function evaluates `pyfunc` over | |
1538 successive tuples of the input arrays like the python map function, | |
1539 except it uses the broadcasting rules of numpy. | |
1540 | |
1541 The data type of the output of `vectorized` is determined by calling | |
1542 the function with the first element of the input. This can be avoided | |
1543 by specifying the `otypes` argument. | |
1544 | |
1545 Parameters | |
1546 ---------- | |
1547 pyfunc : callable | |
1548 A python function or method. | |
1549 otypes : str or list of dtypes, optional | |
1550 The output data type. It must be specified as either a string of | |
1551 typecode characters or a list of data type specifiers. There should | |
1552 be one data type specifier for each output. | |
1553 doc : str, optional | |
1554 The docstring for the function. If `None`, the docstring will be the | |
1555 ``pyfunc.__doc__``. | |
1556 excluded : set, optional | |
1557 Set of strings or integers representing the positional or keyword | |
1558 arguments for which the function will not be vectorized. These will be | |
1559 passed directly to `pyfunc` unmodified. | |
1560 | |
1561 .. versionadded:: 1.7.0 | |
1562 | |
1563 cache : bool, optional | |
1564 If `True`, then cache the first function call that determines the number | |
1565 of outputs if `otypes` is not provided. | |
1566 | |
1567 .. versionadded:: 1.7.0 | |
1568 | |
1569 Returns | |
1570 ------- | |
1571 vectorized : callable | |
1572 Vectorized function. | |
1573 | |
1574 Examples | |
1575 -------- | |
1576 >>> def myfunc(a, b): | |
1577 ... "Return a-b if a>b, otherwise return a+b" | |
1578 ... if a > b: | |
1579 ... return a - b | |
1580 ... else: | |
1581 ... return a + b | |
1582 | |
1583 >>> vfunc = np.vectorize(myfunc) | |
1584 >>> vfunc([1, 2, 3, 4], 2) | |
1585 array([3, 4, 1, 2]) | |
1586 | |
1587 The docstring is taken from the input function to `vectorize` unless it | |
1588 is specified | |
1589 | |
1590 >>> vfunc.__doc__ | |
1591 'Return a-b if a>b, otherwise return a+b' | |
1592 >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') | |
1593 >>> vfunc.__doc__ | |
1594 'Vectorized `myfunc`' | |
1595 | |
1596 The output type is determined by evaluating the first element of the input, | |
1597 unless it is specified | |
1598 | |
1599 >>> out = vfunc([1, 2, 3, 4], 2) | |
1600 >>> type(out[0]) | |
1601 <type 'numpy.int32'> | |
1602 >>> vfunc = np.vectorize(myfunc, otypes=[np.float]) | |
1603 >>> out = vfunc([1, 2, 3, 4], 2) | |
1604 >>> type(out[0]) | |
1605 <type 'numpy.float64'> | |
1606 | |
1607 The `excluded` argument can be used to prevent vectorizing over certain | |
1608 arguments. This can be useful for array-like arguments of a fixed length | |
1609 such as the coefficients for a polynomial as in `polyval`: | |
1610 | |
1611 >>> def mypolyval(p, x): | |
1612 ... _p = list(p) | |
1613 ... res = _p.pop(0) | |
1614 ... while _p: | |
1615 ... res = res*x + _p.pop(0) | |
1616 ... return res | |
1617 >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) | |
1618 >>> vpolyval(p=[1, 2, 3], x=[0, 1]) | |
1619 array([3, 6]) | |
1620 | |
1621 Positional arguments may also be excluded by specifying their position: | |
1622 | |
1623 >>> vpolyval.excluded.add(0) | |
1624 >>> vpolyval([1, 2, 3], x=[0, 1]) | |
1625 array([3, 6]) | |
1626 | |
1627 Notes | |
1628 ----- | |
1629 The `vectorize` function is provided primarily for convenience, not for | |
1630 performance. The implementation is essentially a for loop. | |
1631 | |
1632 If `otypes` is not specified, then a call to the function with the | |
1633 first argument will be used to determine the number of outputs. The | |
1634 results of this call will be cached if `cache` is `True` to prevent | |
1635 calling the function twice. However, to implement the cache, the | |
1636 original function must be wrapped which will slow down subsequent | |
1637 calls, so only do this if your function is expensive. | |
1638 | |
1639 The new keyword argument interface and `excluded` argument support | |
1640 further degrades performance. | |
1641 | |
1642 """ | |
1643 | |
1644 def __init__(self, pyfunc, otypes='', doc=None, excluded=None, | |
1645 cache=False): | |
1646 self.pyfunc = pyfunc | |
1647 self.cache = cache | |
1648 self._ufunc = None # Caching to improve default performance | |
1649 | |
1650 if doc is None: | |
1651 self.__doc__ = pyfunc.__doc__ | |
1652 else: | |
1653 self.__doc__ = doc | |
1654 | |
1655 if isinstance(otypes, str): | |
1656 self.otypes = otypes | |
1657 for char in self.otypes: | |
1658 if char not in typecodes['All']: | |
1659 raise ValueError( | |
1660 "Invalid otype specified: %s" % (char,)) | |
1661 elif iterable(otypes): | |
1662 self.otypes = ''.join([_nx.dtype(x).char for x in otypes]) | |
1663 else: | |
1664 raise ValueError( | |
1665 "Invalid otype specification") | |
1666 | |
1667 # Excluded variable support | |
1668 if excluded is None: | |
1669 excluded = set() | |
1670 self.excluded = set(excluded) | |
1671 | |
1672 def __call__(self, *args, **kwargs): | |
1673 """ | |
1674 Return arrays with the results of `pyfunc` broadcast (vectorized) over | |
1675 `args` and `kwargs` not in `excluded`. | |
1676 """ | |
1677 excluded = self.excluded | |
1678 if not kwargs and not excluded: | |
1679 func = self.pyfunc | |
1680 vargs = args | |
1681 else: | |
1682 # The wrapper accepts only positional arguments: we use `names` and | |
1683 # `inds` to mutate `the_args` and `kwargs` to pass to the original | |
1684 # function. | |
1685 nargs = len(args) | |
1686 | |
1687 names = [_n for _n in kwargs if _n not in excluded] | |
1688 inds = [_i for _i in range(nargs) if _i not in excluded] | |
1689 the_args = list(args) | |
1690 | |
1691 def func(*vargs): | |
1692 for _n, _i in enumerate(inds): | |
1693 the_args[_i] = vargs[_n] | |
1694 kwargs.update(zip(names, vargs[len(inds):])) | |
1695 return self.pyfunc(*the_args, **kwargs) | |
1696 | |
1697 vargs = [args[_i] for _i in inds] | |
1698 vargs.extend([kwargs[_n] for _n in names]) | |
1699 | |
1700 return self._vectorize_call(func=func, args=vargs) | |
1701 | |
1702 def _get_ufunc_and_otypes(self, func, args): | |
1703 """Return (ufunc, otypes).""" | |
1704 # frompyfunc will fail if args is empty | |
1705 if not args: | |
1706 raise ValueError('args can not be empty') | |
1707 | |
1708 if self.otypes: | |
1709 otypes = self.otypes | |
1710 nout = len(otypes) | |
1711 | |
1712 # Note logic here: We only *use* self._ufunc if func is self.pyfunc | |
1713 # even though we set self._ufunc regardless. | |
1714 if func is self.pyfunc and self._ufunc is not None: | |
1715 ufunc = self._ufunc | |
1716 else: | |
1717 ufunc = self._ufunc = frompyfunc(func, len(args), nout) | |
1718 else: | |
1719 # Get number of outputs and output types by calling the function on | |
1720 # the first entries of args. We also cache the result to prevent | |
1721 # the subsequent call when the ufunc is evaluated. | |
1722 # Assumes that ufunc first evaluates the 0th elements in the input | |
1723 # arrays (the input values are not checked to ensure this) | |
1724 inputs = [asarray(_a).flat[0] for _a in args] | |
1725 outputs = func(*inputs) | |
1726 | |
1727 # Performance note: profiling indicates that -- for simple | |
1728 # functions at least -- this wrapping can almost double the | |
1729 # execution time. | |
1730 # Hence we make it optional. | |
1731 if self.cache: | |
1732 _cache = [outputs] | |
1733 | |
1734 def _func(*vargs): | |
1735 if _cache: | |
1736 return _cache.pop() | |
1737 else: | |
1738 return func(*vargs) | |
1739 else: | |
1740 _func = func | |
1741 | |
1742 if isinstance(outputs, tuple): | |
1743 nout = len(outputs) | |
1744 else: | |
1745 nout = 1 | |
1746 outputs = (outputs,) | |
1747 | |
1748 otypes = ''.join([asarray(outputs[_k]).dtype.char | |
1749 for _k in range(nout)]) | |
1750 | |
1751 # Performance note: profiling indicates that creating the ufunc is | |
1752 # not a significant cost compared with wrapping so it seems not | |
1753 # worth trying to cache this. | |
1754 ufunc = frompyfunc(_func, len(args), nout) | |
1755 | |
1756 return ufunc, otypes | |
1757 | |
1758 def _vectorize_call(self, func, args): | |
1759 """Vectorized call to `func` over positional `args`.""" | |
1760 if not args: | |
1761 _res = func() | |
1762 else: | |
1763 ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) | |
1764 | |
1765 # Convert args to object arrays first | |
1766 inputs = [array(_a, copy=False, subok=True, dtype=object) | |
1767 for _a in args] | |
1768 | |
1769 outputs = ufunc(*inputs) | |
1770 | |
1771 if ufunc.nout == 1: | |
1772 _res = array(outputs, | |
1773 copy=False, subok=True, dtype=otypes[0]) | |
1774 else: | |
1775 _res = tuple([array(_x, copy=False, subok=True, dtype=_t) | |
1776 for _x, _t in zip(outputs, otypes)]) | |
1777 return _res | |
1778 | |
1779 | |
1780 def cov(m, y=None, rowvar=1, bias=0, ddof=None): | |
1781 """ | |
1782 Estimate a covariance matrix, given data. | |
1783 | |
1784 Covariance indicates the level to which two variables vary together. | |
1785 If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, | |
1786 then the covariance matrix element :math:`C_{ij}` is the covariance of | |
1787 :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance | |
1788 of :math:`x_i`. | |
1789 | |
1790 Parameters | |
1791 ---------- | |
1792 m : array_like | |
1793 A 1-D or 2-D array containing multiple variables and observations. | |
1794 Each row of `m` represents a variable, and each column a single | |
1795 observation of all those variables. Also see `rowvar` below. | |
1796 y : array_like, optional | |
1797 An additional set of variables and observations. `y` has the same | |
1798 form as that of `m`. | |
1799 rowvar : int, optional | |
1800 If `rowvar` is non-zero (default), then each row represents a | |
1801 variable, with observations in the columns. Otherwise, the relationship | |
1802 is transposed: each column represents a variable, while the rows | |
1803 contain observations. | |
1804 bias : int, optional | |
1805 Default normalization is by ``(N - 1)``, where ``N`` is the number of | |
1806 observations given (unbiased estimate). If `bias` is 1, then | |
1807 normalization is by ``N``. These values can be overridden by using | |
1808 the keyword ``ddof`` in numpy versions >= 1.5. | |
1809 ddof : int, optional | |
1810 .. versionadded:: 1.5 | |
1811 If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is | |
1812 the number of observations; this overrides the value implied by | |
1813 ``bias``. The default value is ``None``. | |
1814 | |
1815 Returns | |
1816 ------- | |
1817 out : ndarray | |
1818 The covariance matrix of the variables. | |
1819 | |
1820 See Also | |
1821 -------- | |
1822 corrcoef : Normalized covariance matrix | |
1823 | |
1824 Examples | |
1825 -------- | |
1826 Consider two variables, :math:`x_0` and :math:`x_1`, which | |
1827 correlate perfectly, but in opposite directions: | |
1828 | |
1829 >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T | |
1830 >>> x | |
1831 array([[0, 1, 2], | |
1832 [2, 1, 0]]) | |
1833 | |
1834 Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance | |
1835 matrix shows this clearly: | |
1836 | |
1837 >>> np.cov(x) | |
1838 array([[ 1., -1.], | |
1839 [-1., 1.]]) | |
1840 | |
1841 Note that element :math:`C_{0,1}`, which shows the correlation between | |
1842 :math:`x_0` and :math:`x_1`, is negative. | |
1843 | |
1844 Further, note how `x` and `y` are combined: | |
1845 | |
1846 >>> x = [-2.1, -1, 4.3] | |
1847 >>> y = [3, 1.1, 0.12] | |
1848 >>> X = np.vstack((x,y)) | |
1849 >>> print np.cov(X) | |
1850 [[ 11.71 -4.286 ] | |
1851 [ -4.286 2.14413333]] | |
1852 >>> print np.cov(x, y) | |
1853 [[ 11.71 -4.286 ] | |
1854 [ -4.286 2.14413333]] | |
1855 >>> print np.cov(x) | |
1856 11.71 | |
1857 | |
1858 """ | |
1859 # Check inputs | |
1860 if ddof is not None and ddof != int(ddof): | |
1861 raise ValueError( | |
1862 "ddof must be integer") | |
1863 | |
1864 # Handles complex arrays too | |
1865 m = np.asarray(m) | |
1866 if y is None: | |
1867 dtype = np.result_type(m, np.float64) | |
1868 else: | |
1869 y = np.asarray(y) | |
1870 dtype = np.result_type(m, y, np.float64) | |
1871 X = array(m, ndmin=2, dtype=dtype) | |
1872 | |
1873 if X.shape[0] == 1: | |
1874 rowvar = 1 | |
1875 if rowvar: | |
1876 N = X.shape[1] | |
1877 axis = 0 | |
1878 else: | |
1879 N = X.shape[0] | |
1880 axis = 1 | |
1881 | |
1882 # check ddof | |
1883 if ddof is None: | |
1884 if bias == 0: | |
1885 ddof = 1 | |
1886 else: | |
1887 ddof = 0 | |
1888 fact = float(N - ddof) | |
1889 if fact <= 0: | |
1890 warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning) | |
1891 fact = 0.0 | |
1892 | |
1893 if y is not None: | |
1894 y = array(y, copy=False, ndmin=2, dtype=dtype) | |
1895 X = concatenate((X, y), axis) | |
1896 | |
1897 X -= X.mean(axis=1-axis, keepdims=True) | |
1898 if not rowvar: | |
1899 return (dot(X.T, X.conj()) / fact).squeeze() | |
1900 else: | |
1901 return (dot(X, X.T.conj()) / fact).squeeze() | |
1902 | |
1903 | |
1904 def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None): | |
1905 """ | |
1906 Return correlation coefficients. | |
1907 | |
1908 Please refer to the documentation for `cov` for more detail. The | |
1909 relationship between the correlation coefficient matrix, `P`, and the | |
1910 covariance matrix, `C`, is | |
1911 | |
1912 .. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } } | |
1913 | |
1914 The values of `P` are between -1 and 1, inclusive. | |
1915 | |
1916 Parameters | |
1917 ---------- | |
1918 x : array_like | |
1919 A 1-D or 2-D array containing multiple variables and observations. | |
1920 Each row of `m` represents a variable, and each column a single | |
1921 observation of all those variables. Also see `rowvar` below. | |
1922 y : array_like, optional | |
1923 An additional set of variables and observations. `y` has the same | |
1924 shape as `m`. | |
1925 rowvar : int, optional | |
1926 If `rowvar` is non-zero (default), then each row represents a | |
1927 variable, with observations in the columns. Otherwise, the relationship | |
1928 is transposed: each column represents a variable, while the rows | |
1929 contain observations. | |
1930 bias : int, optional | |
1931 Default normalization is by ``(N - 1)``, where ``N`` is the number of | |
1932 observations (unbiased estimate). If `bias` is 1, then | |
1933 normalization is by ``N``. These values can be overridden by using | |
1934 the keyword ``ddof`` in numpy versions >= 1.5. | |
1935 ddof : {None, int}, optional | |
1936 .. versionadded:: 1.5 | |
1937 If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is | |
1938 the number of observations; this overrides the value implied by | |
1939 ``bias``. The default value is ``None``. | |
1940 | |
1941 Returns | |
1942 ------- | |
1943 out : ndarray | |
1944 The correlation coefficient matrix of the variables. | |
1945 | |
1946 See Also | |
1947 -------- | |
1948 cov : Covariance matrix | |
1949 | |
1950 """ | |
1951 c = cov(x, y, rowvar, bias, ddof) | |
1952 try: | |
1953 d = diag(c) | |
1954 except ValueError: # scalar covariance | |
1955 # nan if incorrect value (nan, inf, 0), 1 otherwise | |
1956 return c / c | |
1957 return c / sqrt(multiply.outer(d, d)) | |
1958 | |
1959 | |
1960 def blackman(M): | |
1961 """ | |
1962 Return the Blackman window. | |
1963 | |
1964 The Blackman window is a taper formed by using the first three | |
1965 terms of a summation of cosines. It was designed to have close to the | |
1966 minimal leakage possible. It is close to optimal, only slightly worse | |
1967 than a Kaiser window. | |
1968 | |
1969 Parameters | |
1970 ---------- | |
1971 M : int | |
1972 Number of points in the output window. If zero or less, an empty | |
1973 array is returned. | |
1974 | |
1975 Returns | |
1976 ------- | |
1977 out : ndarray | |
1978 The window, with the maximum value normalized to one (the value one | |
1979 appears only if the number of samples is odd). | |
1980 | |
1981 See Also | |
1982 -------- | |
1983 bartlett, hamming, hanning, kaiser | |
1984 | |
1985 Notes | |
1986 ----- | |
1987 The Blackman window is defined as | |
1988 | |
1989 .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) | |
1990 | |
1991 Most references to the Blackman window come from the signal processing | |
1992 literature, where it is used as one of many windowing functions for | |
1993 smoothing values. It is also known as an apodization (which means | |
1994 "removing the foot", i.e. smoothing discontinuities at the beginning | |
1995 and end of the sampled signal) or tapering function. It is known as a | |
1996 "near optimal" tapering function, almost as good (by some measures) | |
1997 as the kaiser window. | |
1998 | |
1999 References | |
2000 ---------- | |
2001 Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, | |
2002 Dover Publications, New York. | |
2003 | |
2004 Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. | |
2005 Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. | |
2006 | |
2007 Examples | |
2008 -------- | |
2009 >>> np.blackman(12) | |
2010 array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, | |
2011 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, | |
2012 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, | |
2013 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) | |
2014 | |
2015 | |
2016 Plot the window and the frequency response: | |
2017 | |
2018 >>> from numpy.fft import fft, fftshift | |
2019 >>> window = np.blackman(51) | |
2020 >>> plt.plot(window) | |
2021 [<matplotlib.lines.Line2D object at 0x...>] | |
2022 >>> plt.title("Blackman window") | |
2023 <matplotlib.text.Text object at 0x...> | |
2024 >>> plt.ylabel("Amplitude") | |
2025 <matplotlib.text.Text object at 0x...> | |
2026 >>> plt.xlabel("Sample") | |
2027 <matplotlib.text.Text object at 0x...> | |
2028 >>> plt.show() | |
2029 | |
2030 >>> plt.figure() | |
2031 <matplotlib.figure.Figure object at 0x...> | |
2032 >>> A = fft(window, 2048) / 25.5 | |
2033 >>> mag = np.abs(fftshift(A)) | |
2034 >>> freq = np.linspace(-0.5, 0.5, len(A)) | |
2035 >>> response = 20 * np.log10(mag) | |
2036 >>> response = np.clip(response, -100, 100) | |
2037 >>> plt.plot(freq, response) | |
2038 [<matplotlib.lines.Line2D object at 0x...>] | |
2039 >>> plt.title("Frequency response of Blackman window") | |
2040 <matplotlib.text.Text object at 0x...> | |
2041 >>> plt.ylabel("Magnitude [dB]") | |
2042 <matplotlib.text.Text object at 0x...> | |
2043 >>> plt.xlabel("Normalized frequency [cycles per sample]") | |
2044 <matplotlib.text.Text object at 0x...> | |
2045 >>> plt.axis('tight') | |
2046 (-0.5, 0.5, -100.0, ...) | |
2047 >>> plt.show() | |
2048 | |
2049 """ | |
2050 if M < 1: | |
2051 return array([]) | |
2052 if M == 1: | |
2053 return ones(1, float) | |
2054 n = arange(0, M) | |
2055 return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) | |
2056 | |
2057 | |
2058 def bartlett(M): | |
2059 """ | |
2060 Return the Bartlett window. | |
2061 | |
2062 The Bartlett window is very similar to a triangular window, except | |
2063 that the end points are at zero. It is often used in signal | |
2064 processing for tapering a signal, without generating too much | |
2065 ripple in the frequency domain. | |
2066 | |
2067 Parameters | |
2068 ---------- | |
2069 M : int | |
2070 Number of points in the output window. If zero or less, an | |
2071 empty array is returned. | |
2072 | |
2073 Returns | |
2074 ------- | |
2075 out : array | |
2076 The triangular window, with the maximum value normalized to one | |
2077 (the value one appears only if the number of samples is odd), with | |
2078 the first and last samples equal to zero. | |
2079 | |
2080 See Also | |
2081 -------- | |
2082 blackman, hamming, hanning, kaiser | |
2083 | |
2084 Notes | |
2085 ----- | |
2086 The Bartlett window is defined as | |
2087 | |
2088 .. math:: w(n) = \\frac{2}{M-1} \\left( | |
2089 \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| | |
2090 \\right) | |
2091 | |
2092 Most references to the Bartlett window come from the signal | |
2093 processing literature, where it is used as one of many windowing | |
2094 functions for smoothing values. Note that convolution with this | |
2095 window produces linear interpolation. It is also known as an | |
2096 apodization (which means"removing the foot", i.e. smoothing | |
2097 discontinuities at the beginning and end of the sampled signal) or | |
2098 tapering function. The fourier transform of the Bartlett is the product | |
2099 of two sinc functions. | |
2100 Note the excellent discussion in Kanasewich. | |
2101 | |
2102 References | |
2103 ---------- | |
2104 .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", | |
2105 Biometrika 37, 1-16, 1950. | |
2106 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", | |
2107 The University of Alberta Press, 1975, pp. 109-110. | |
2108 .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal | |
2109 Processing", Prentice-Hall, 1999, pp. 468-471. | |
2110 .. [4] Wikipedia, "Window function", | |
2111 http://en.wikipedia.org/wiki/Window_function | |
2112 .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, | |
2113 "Numerical Recipes", Cambridge University Press, 1986, page 429. | |
2114 | |
2115 | |
2116 Examples | |
2117 -------- | |
2118 >>> np.bartlett(12) | |
2119 array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, | |
2120 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, | |
2121 0.18181818, 0. ]) | |
2122 | |
2123 Plot the window and its frequency response (requires SciPy and matplotlib): | |
2124 | |
2125 >>> from numpy.fft import fft, fftshift | |
2126 >>> window = np.bartlett(51) | |
2127 >>> plt.plot(window) | |
2128 [<matplotlib.lines.Line2D object at 0x...>] | |
2129 >>> plt.title("Bartlett window") | |
2130 <matplotlib.text.Text object at 0x...> | |
2131 >>> plt.ylabel("Amplitude") | |
2132 <matplotlib.text.Text object at 0x...> | |
2133 >>> plt.xlabel("Sample") | |
2134 <matplotlib.text.Text object at 0x...> | |
2135 >>> plt.show() | |
2136 | |
2137 >>> plt.figure() | |
2138 <matplotlib.figure.Figure object at 0x...> | |
2139 >>> A = fft(window, 2048) / 25.5 | |
2140 >>> mag = np.abs(fftshift(A)) | |
2141 >>> freq = np.linspace(-0.5, 0.5, len(A)) | |
2142 >>> response = 20 * np.log10(mag) | |
2143 >>> response = np.clip(response, -100, 100) | |
2144 >>> plt.plot(freq, response) | |
2145 [<matplotlib.lines.Line2D object at 0x...>] | |
2146 >>> plt.title("Frequency response of Bartlett window") | |
2147 <matplotlib.text.Text object at 0x...> | |
2148 >>> plt.ylabel("Magnitude [dB]") | |
2149 <matplotlib.text.Text object at 0x...> | |
2150 >>> plt.xlabel("Normalized frequency [cycles per sample]") | |
2151 <matplotlib.text.Text object at 0x...> | |
2152 >>> plt.axis('tight') | |
2153 (-0.5, 0.5, -100.0, ...) | |
2154 >>> plt.show() | |
2155 | |
2156 """ | |
2157 if M < 1: | |
2158 return array([]) | |
2159 if M == 1: | |
2160 return ones(1, float) | |
2161 n = arange(0, M) | |
2162 return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1)) | |
2163 | |
2164 | |
2165 def hanning(M): | |
2166 """ | |
2167 Return the Hanning window. | |
2168 | |
2169 The Hanning window is a taper formed by using a weighted cosine. | |
2170 | |
2171 Parameters | |
2172 ---------- | |
2173 M : int | |
2174 Number of points in the output window. If zero or less, an | |
2175 empty array is returned. | |
2176 | |
2177 Returns | |
2178 ------- | |
2179 out : ndarray, shape(M,) | |
2180 The window, with the maximum value normalized to one (the value | |
2181 one appears only if `M` is odd). | |
2182 | |
2183 See Also | |
2184 -------- | |
2185 bartlett, blackman, hamming, kaiser | |
2186 | |
2187 Notes | |
2188 ----- | |
2189 The Hanning window is defined as | |
2190 | |
2191 .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) | |
2192 \\qquad 0 \\leq n \\leq M-1 | |
2193 | |
2194 The Hanning was named for Julius van Hann, an Austrian meteorologist. | |
2195 It is also known as the Cosine Bell. Some authors prefer that it be | |
2196 called a Hann window, to help avoid confusion with the very similar | |
2197 Hamming window. | |
2198 | |
2199 Most references to the Hanning window come from the signal processing | |
2200 literature, where it is used as one of many windowing functions for | |
2201 smoothing values. It is also known as an apodization (which means | |
2202 "removing the foot", i.e. smoothing discontinuities at the beginning | |
2203 and end of the sampled signal) or tapering function. | |
2204 | |
2205 References | |
2206 ---------- | |
2207 .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power | |
2208 spectra, Dover Publications, New York. | |
2209 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", | |
2210 The University of Alberta Press, 1975, pp. 106-108. | |
2211 .. [3] Wikipedia, "Window function", | |
2212 http://en.wikipedia.org/wiki/Window_function | |
2213 .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, | |
2214 "Numerical Recipes", Cambridge University Press, 1986, page 425. | |
2215 | |
2216 Examples | |
2217 -------- | |
2218 >>> np.hanning(12) | |
2219 array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, | |
2220 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, | |
2221 0.07937323, 0. ]) | |
2222 | |
2223 Plot the window and its frequency response: | |
2224 | |
2225 >>> from numpy.fft import fft, fftshift | |
2226 >>> window = np.hanning(51) | |
2227 >>> plt.plot(window) | |
2228 [<matplotlib.lines.Line2D object at 0x...>] | |
2229 >>> plt.title("Hann window") | |
2230 <matplotlib.text.Text object at 0x...> | |
2231 >>> plt.ylabel("Amplitude") | |
2232 <matplotlib.text.Text object at 0x...> | |
2233 >>> plt.xlabel("Sample") | |
2234 <matplotlib.text.Text object at 0x...> | |
2235 >>> plt.show() | |
2236 | |
2237 >>> plt.figure() | |
2238 <matplotlib.figure.Figure object at 0x...> | |
2239 >>> A = fft(window, 2048) / 25.5 | |
2240 >>> mag = np.abs(fftshift(A)) | |
2241 >>> freq = np.linspace(-0.5, 0.5, len(A)) | |
2242 >>> response = 20 * np.log10(mag) | |
2243 >>> response = np.clip(response, -100, 100) | |
2244 >>> plt.plot(freq, response) | |
2245 [<matplotlib.lines.Line2D object at 0x...>] | |
2246 >>> plt.title("Frequency response of the Hann window") | |
2247 <matplotlib.text.Text object at 0x...> | |
2248 >>> plt.ylabel("Magnitude [dB]") | |
2249 <matplotlib.text.Text object at 0x...> | |
2250 >>> plt.xlabel("Normalized frequency [cycles per sample]") | |
2251 <matplotlib.text.Text object at 0x...> | |
2252 >>> plt.axis('tight') | |
2253 (-0.5, 0.5, -100.0, ...) | |
2254 >>> plt.show() | |
2255 | |
2256 """ | |
2257 if M < 1: | |
2258 return array([]) | |
2259 if M == 1: | |
2260 return ones(1, float) | |
2261 n = arange(0, M) | |
2262 return 0.5 - 0.5*cos(2.0*pi*n/(M-1)) | |
2263 | |
2264 | |
2265 def hamming(M): | |
2266 """ | |
2267 Return the Hamming window. | |
2268 | |
2269 The Hamming window is a taper formed by using a weighted cosine. | |
2270 | |
2271 Parameters | |
2272 ---------- | |
2273 M : int | |
2274 Number of points in the output window. If zero or less, an | |
2275 empty array is returned. | |
2276 | |
2277 Returns | |
2278 ------- | |
2279 out : ndarray | |
2280 The window, with the maximum value normalized to one (the value | |
2281 one appears only if the number of samples is odd). | |
2282 | |
2283 See Also | |
2284 -------- | |
2285 bartlett, blackman, hanning, kaiser | |
2286 | |
2287 Notes | |
2288 ----- | |
2289 The Hamming window is defined as | |
2290 | |
2291 .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right) | |
2292 \\qquad 0 \\leq n \\leq M-1 | |
2293 | |
2294 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey | |
2295 and is described in Blackman and Tukey. It was recommended for | |
2296 smoothing the truncated autocovariance function in the time domain. | |
2297 Most references to the Hamming window come from the signal processing | |
2298 literature, where it is used as one of many windowing functions for | |
2299 smoothing values. It is also known as an apodization (which means | |
2300 "removing the foot", i.e. smoothing discontinuities at the beginning | |
2301 and end of the sampled signal) or tapering function. | |
2302 | |
2303 References | |
2304 ---------- | |
2305 .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power | |
2306 spectra, Dover Publications, New York. | |
2307 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The | |
2308 University of Alberta Press, 1975, pp. 109-110. | |
2309 .. [3] Wikipedia, "Window function", | |
2310 http://en.wikipedia.org/wiki/Window_function | |
2311 .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, | |
2312 "Numerical Recipes", Cambridge University Press, 1986, page 425. | |
2313 | |
2314 Examples | |
2315 -------- | |
2316 >>> np.hamming(12) | |
2317 array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, | |
2318 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, | |
2319 0.15302337, 0.08 ]) | |
2320 | |
2321 Plot the window and the frequency response: | |
2322 | |
2323 >>> from numpy.fft import fft, fftshift | |
2324 >>> window = np.hamming(51) | |
2325 >>> plt.plot(window) | |
2326 [<matplotlib.lines.Line2D object at 0x...>] | |
2327 >>> plt.title("Hamming window") | |
2328 <matplotlib.text.Text object at 0x...> | |
2329 >>> plt.ylabel("Amplitude") | |
2330 <matplotlib.text.Text object at 0x...> | |
2331 >>> plt.xlabel("Sample") | |
2332 <matplotlib.text.Text object at 0x...> | |
2333 >>> plt.show() | |
2334 | |
2335 >>> plt.figure() | |
2336 <matplotlib.figure.Figure object at 0x...> | |
2337 >>> A = fft(window, 2048) / 25.5 | |
2338 >>> mag = np.abs(fftshift(A)) | |
2339 >>> freq = np.linspace(-0.5, 0.5, len(A)) | |
2340 >>> response = 20 * np.log10(mag) | |
2341 >>> response = np.clip(response, -100, 100) | |
2342 >>> plt.plot(freq, response) | |
2343 [<matplotlib.lines.Line2D object at 0x...>] | |
2344 >>> plt.title("Frequency response of Hamming window") | |
2345 <matplotlib.text.Text object at 0x...> | |
2346 >>> plt.ylabel("Magnitude [dB]") | |
2347 <matplotlib.text.Text object at 0x...> | |
2348 >>> plt.xlabel("Normalized frequency [cycles per sample]") | |
2349 <matplotlib.text.Text object at 0x...> | |
2350 >>> plt.axis('tight') | |
2351 (-0.5, 0.5, -100.0, ...) | |
2352 >>> plt.show() | |
2353 | |
2354 """ | |
2355 if M < 1: | |
2356 return array([]) | |
2357 if M == 1: | |
2358 return ones(1, float) | |
2359 n = arange(0, M) | |
2360 return 0.54 - 0.46*cos(2.0*pi*n/(M-1)) | |
2361 | |
2362 ## Code from cephes for i0 | |
2363 | |
2364 _i0A = [ | |
2365 -4.41534164647933937950E-18, | |
2366 3.33079451882223809783E-17, | |
2367 -2.43127984654795469359E-16, | |
2368 1.71539128555513303061E-15, | |
2369 -1.16853328779934516808E-14, | |
2370 7.67618549860493561688E-14, | |
2371 -4.85644678311192946090E-13, | |
2372 2.95505266312963983461E-12, | |
2373 -1.72682629144155570723E-11, | |
2374 9.67580903537323691224E-11, | |
2375 -5.18979560163526290666E-10, | |
2376 2.65982372468238665035E-9, | |
2377 -1.30002500998624804212E-8, | |
2378 6.04699502254191894932E-8, | |
2379 -2.67079385394061173391E-7, | |
2380 1.11738753912010371815E-6, | |
2381 -4.41673835845875056359E-6, | |
2382 1.64484480707288970893E-5, | |
2383 -5.75419501008210370398E-5, | |
2384 1.88502885095841655729E-4, | |
2385 -5.76375574538582365885E-4, | |
2386 1.63947561694133579842E-3, | |
2387 -4.32430999505057594430E-3, | |
2388 1.05464603945949983183E-2, | |
2389 -2.37374148058994688156E-2, | |
2390 4.93052842396707084878E-2, | |
2391 -9.49010970480476444210E-2, | |
2392 1.71620901522208775349E-1, | |
2393 -3.04682672343198398683E-1, | |
2394 6.76795274409476084995E-1 | |
2395 ] | |
2396 | |
2397 _i0B = [ | |
2398 -7.23318048787475395456E-18, | |
2399 -4.83050448594418207126E-18, | |
2400 4.46562142029675999901E-17, | |
2401 3.46122286769746109310E-17, | |
2402 -2.82762398051658348494E-16, | |
2403 -3.42548561967721913462E-16, | |
2404 1.77256013305652638360E-15, | |
2405 3.81168066935262242075E-15, | |
2406 -9.55484669882830764870E-15, | |
2407 -4.15056934728722208663E-14, | |
2408 1.54008621752140982691E-14, | |
2409 3.85277838274214270114E-13, | |
2410 7.18012445138366623367E-13, | |
2411 -1.79417853150680611778E-12, | |
2412 -1.32158118404477131188E-11, | |
2413 -3.14991652796324136454E-11, | |
2414 1.18891471078464383424E-11, | |
2415 4.94060238822496958910E-10, | |
2416 3.39623202570838634515E-9, | |
2417 2.26666899049817806459E-8, | |
2418 2.04891858946906374183E-7, | |
2419 2.89137052083475648297E-6, | |
2420 6.88975834691682398426E-5, | |
2421 3.36911647825569408990E-3, | |
2422 8.04490411014108831608E-1 | |
2423 ] | |
2424 | |
2425 | |
2426 def _chbevl(x, vals): | |
2427 b0 = vals[0] | |
2428 b1 = 0.0 | |
2429 | |
2430 for i in range(1, len(vals)): | |
2431 b2 = b1 | |
2432 b1 = b0 | |
2433 b0 = x*b1 - b2 + vals[i] | |
2434 | |
2435 return 0.5*(b0 - b2) | |
2436 | |
2437 | |
2438 def _i0_1(x): | |
2439 return exp(x) * _chbevl(x/2.0-2, _i0A) | |
2440 | |
2441 | |
2442 def _i0_2(x): | |
2443 return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) | |
2444 | |
2445 | |
2446 def i0(x): | |
2447 """ | |
2448 Modified Bessel function of the first kind, order 0. | |
2449 | |
2450 Usually denoted :math:`I_0`. This function does broadcast, but will *not* | |
2451 "up-cast" int dtype arguments unless accompanied by at least one float or | |
2452 complex dtype argument (see Raises below). | |
2453 | |
2454 Parameters | |
2455 ---------- | |
2456 x : array_like, dtype float or complex | |
2457 Argument of the Bessel function. | |
2458 | |
2459 Returns | |
2460 ------- | |
2461 out : ndarray, shape = x.shape, dtype = x.dtype | |
2462 The modified Bessel function evaluated at each of the elements of `x`. | |
2463 | |
2464 Raises | |
2465 ------ | |
2466 TypeError: array cannot be safely cast to required type | |
2467 If argument consists exclusively of int dtypes. | |
2468 | |
2469 See Also | |
2470 -------- | |
2471 scipy.special.iv, scipy.special.ive | |
2472 | |
2473 Notes | |
2474 ----- | |
2475 We use the algorithm published by Clenshaw [1]_ and referenced by | |
2476 Abramowitz and Stegun [2]_, for which the function domain is | |
2477 partitioned into the two intervals [0,8] and (8,inf), and Chebyshev | |
2478 polynomial expansions are employed in each interval. Relative error on | |
2479 the domain [0,30] using IEEE arithmetic is documented [3]_ as having a | |
2480 peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). | |
2481 | |
2482 References | |
2483 ---------- | |
2484 .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in | |
2485 *National Physical Laboratory Mathematical Tables*, vol. 5, London: | |
2486 Her Majesty's Stationery Office, 1962. | |
2487 .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical | |
2488 Functions*, 10th printing, New York: Dover, 1964, pp. 379. | |
2489 http://www.math.sfu.ca/~cbm/aands/page_379.htm | |
2490 .. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html | |
2491 | |
2492 Examples | |
2493 -------- | |
2494 >>> np.i0([0.]) | |
2495 array(1.0) | |
2496 >>> np.i0([0., 1. + 2j]) | |
2497 array([ 1.00000000+0.j , 0.18785373+0.64616944j]) | |
2498 | |
2499 """ | |
2500 x = atleast_1d(x).copy() | |
2501 y = empty_like(x) | |
2502 ind = (x < 0) | |
2503 x[ind] = -x[ind] | |
2504 ind = (x <= 8.0) | |
2505 y[ind] = _i0_1(x[ind]) | |
2506 ind2 = ~ind | |
2507 y[ind2] = _i0_2(x[ind2]) | |
2508 return y.squeeze() | |
2509 | |
2510 ## End of cephes code for i0 | |
2511 | |
2512 | |
2513 def kaiser(M, beta): | |
2514 """ | |
2515 Return the Kaiser window. | |
2516 | |
2517 The Kaiser window is a taper formed by using a Bessel function. | |
2518 | |
2519 Parameters | |
2520 ---------- | |
2521 M : int | |
2522 Number of points in the output window. If zero or less, an | |
2523 empty array is returned. | |
2524 beta : float | |
2525 Shape parameter for window. | |
2526 | |
2527 Returns | |
2528 ------- | |
2529 out : array | |
2530 The window, with the maximum value normalized to one (the value | |
2531 one appears only if the number of samples is odd). | |
2532 | |
2533 See Also | |
2534 -------- | |
2535 bartlett, blackman, hamming, hanning | |
2536 | |
2537 Notes | |
2538 ----- | |
2539 The Kaiser window is defined as | |
2540 | |
2541 .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} | |
2542 \\right)/I_0(\\beta) | |
2543 | |
2544 with | |
2545 | |
2546 .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, | |
2547 | |
2548 where :math:`I_0` is the modified zeroth-order Bessel function. | |
2549 | |
2550 The Kaiser was named for Jim Kaiser, who discovered a simple | |
2551 approximation to the DPSS window based on Bessel functions. The Kaiser | |
2552 window is a very good approximation to the Digital Prolate Spheroidal | |
2553 Sequence, or Slepian window, which is the transform which maximizes the | |
2554 energy in the main lobe of the window relative to total energy. | |
2555 | |
2556 The Kaiser can approximate many other windows by varying the beta | |
2557 parameter. | |
2558 | |
2559 ==== ======================= | |
2560 beta Window shape | |
2561 ==== ======================= | |
2562 0 Rectangular | |
2563 5 Similar to a Hamming | |
2564 6 Similar to a Hanning | |
2565 8.6 Similar to a Blackman | |
2566 ==== ======================= | |
2567 | |
2568 A beta value of 14 is probably a good starting point. Note that as beta | |
2569 gets large, the window narrows, and so the number of samples needs to be | |
2570 large enough to sample the increasingly narrow spike, otherwise NaNs will | |
2571 get returned. | |
2572 | |
2573 Most references to the Kaiser window come from the signal processing | |
2574 literature, where it is used as one of many windowing functions for | |
2575 smoothing values. It is also known as an apodization (which means | |
2576 "removing the foot", i.e. smoothing discontinuities at the beginning | |
2577 and end of the sampled signal) or tapering function. | |
2578 | |
2579 References | |
2580 ---------- | |
2581 .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by | |
2582 digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. | |
2583 John Wiley and Sons, New York, (1966). | |
2584 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The | |
2585 University of Alberta Press, 1975, pp. 177-178. | |
2586 .. [3] Wikipedia, "Window function", | |
2587 http://en.wikipedia.org/wiki/Window_function | |
2588 | |
2589 Examples | |
2590 -------- | |
2591 >>> np.kaiser(12, 14) | |
2592 array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, | |
2593 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, | |
2594 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, | |
2595 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) | |
2596 | |
2597 | |
2598 Plot the window and the frequency response: | |
2599 | |
2600 >>> from numpy.fft import fft, fftshift | |
2601 >>> window = np.kaiser(51, 14) | |
2602 >>> plt.plot(window) | |
2603 [<matplotlib.lines.Line2D object at 0x...>] | |
2604 >>> plt.title("Kaiser window") | |
2605 <matplotlib.text.Text object at 0x...> | |
2606 >>> plt.ylabel("Amplitude") | |
2607 <matplotlib.text.Text object at 0x...> | |
2608 >>> plt.xlabel("Sample") | |
2609 <matplotlib.text.Text object at 0x...> | |
2610 >>> plt.show() | |
2611 | |
2612 >>> plt.figure() | |
2613 <matplotlib.figure.Figure object at 0x...> | |
2614 >>> A = fft(window, 2048) / 25.5 | |
2615 >>> mag = np.abs(fftshift(A)) | |
2616 >>> freq = np.linspace(-0.5, 0.5, len(A)) | |
2617 >>> response = 20 * np.log10(mag) | |
2618 >>> response = np.clip(response, -100, 100) | |
2619 >>> plt.plot(freq, response) | |
2620 [<matplotlib.lines.Line2D object at 0x...>] | |
2621 >>> plt.title("Frequency response of Kaiser window") | |
2622 <matplotlib.text.Text object at 0x...> | |
2623 >>> plt.ylabel("Magnitude [dB]") | |
2624 <matplotlib.text.Text object at 0x...> | |
2625 >>> plt.xlabel("Normalized frequency [cycles per sample]") | |
2626 <matplotlib.text.Text object at 0x...> | |
2627 >>> plt.axis('tight') | |
2628 (-0.5, 0.5, -100.0, ...) | |
2629 >>> plt.show() | |
2630 | |
2631 """ | |
2632 from numpy.dual import i0 | |
2633 if M == 1: | |
2634 return np.array([1.]) | |
2635 n = arange(0, M) | |
2636 alpha = (M-1)/2.0 | |
2637 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) | |
2638 | |
2639 | |
2640 def sinc(x): | |
2641 """ | |
2642 Return the sinc function. | |
2643 | |
2644 The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`. | |
2645 | |
2646 Parameters | |
2647 ---------- | |
2648 x : ndarray | |
2649 Array (possibly multi-dimensional) of values for which to to | |
2650 calculate ``sinc(x)``. | |
2651 | |
2652 Returns | |
2653 ------- | |
2654 out : ndarray | |
2655 ``sinc(x)``, which has the same shape as the input. | |
2656 | |
2657 Notes | |
2658 ----- | |
2659 ``sinc(0)`` is the limit value 1. | |
2660 | |
2661 The name sinc is short for "sine cardinal" or "sinus cardinalis". | |
2662 | |
2663 The sinc function is used in various signal processing applications, | |
2664 including in anti-aliasing, in the construction of a Lanczos resampling | |
2665 filter, and in interpolation. | |
2666 | |
2667 For bandlimited interpolation of discrete-time signals, the ideal | |
2668 interpolation kernel is proportional to the sinc function. | |
2669 | |
2670 References | |
2671 ---------- | |
2672 .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web | |
2673 Resource. http://mathworld.wolfram.com/SincFunction.html | |
2674 .. [2] Wikipedia, "Sinc function", | |
2675 http://en.wikipedia.org/wiki/Sinc_function | |
2676 | |
2677 Examples | |
2678 -------- | |
2679 >>> x = np.linspace(-4, 4, 41) | |
2680 >>> np.sinc(x) | |
2681 array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, | |
2682 -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, | |
2683 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, | |
2684 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, | |
2685 -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, | |
2686 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, | |
2687 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, | |
2688 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, | |
2689 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, | |
2690 -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, | |
2691 -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, | |
2692 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, | |
2693 -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, | |
2694 -4.92362781e-02, -3.89804309e-17]) | |
2695 | |
2696 >>> plt.plot(x, np.sinc(x)) | |
2697 [<matplotlib.lines.Line2D object at 0x...>] | |
2698 >>> plt.title("Sinc Function") | |
2699 <matplotlib.text.Text object at 0x...> | |
2700 >>> plt.ylabel("Amplitude") | |
2701 <matplotlib.text.Text object at 0x...> | |
2702 >>> plt.xlabel("X") | |
2703 <matplotlib.text.Text object at 0x...> | |
2704 >>> plt.show() | |
2705 | |
2706 It works in 2-D as well: | |
2707 | |
2708 >>> x = np.linspace(-4, 4, 401) | |
2709 >>> xx = np.outer(x, x) | |
2710 >>> plt.imshow(np.sinc(xx)) | |
2711 <matplotlib.image.AxesImage object at 0x...> | |
2712 | |
2713 """ | |
2714 x = np.asanyarray(x) | |
2715 y = pi * where(x == 0, 1.0e-20, x) | |
2716 return sin(y)/y | |
2717 | |
2718 | |
2719 def msort(a): | |
2720 """ | |
2721 Return a copy of an array sorted along the first axis. | |
2722 | |
2723 Parameters | |
2724 ---------- | |
2725 a : array_like | |
2726 Array to be sorted. | |
2727 | |
2728 Returns | |
2729 ------- | |
2730 sorted_array : ndarray | |
2731 Array of the same type and shape as `a`. | |
2732 | |
2733 See Also | |
2734 -------- | |
2735 sort | |
2736 | |
2737 Notes | |
2738 ----- | |
2739 ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. | |
2740 | |
2741 """ | |
2742 b = array(a, subok=True, copy=True) | |
2743 b.sort(0) | |
2744 return b | |
2745 | |
2746 | |
2747 def _ureduce(a, func, **kwargs): | |
2748 """ | |
2749 Internal Function. | |
2750 Call `func` with `a` as first argument swapping the axes to use extended | |
2751 axis on functions that don't support it natively. | |
2752 | |
2753 Returns result and a.shape with axis dims set to 1. | |
2754 | |
2755 Parameters | |
2756 ---------- | |
2757 a : array_like | |
2758 Input array or object that can be converted to an array. | |
2759 func : callable | |
2760 Reduction function Kapable of receiving an axis argument. | |
2761 It is is called with `a` as first argument followed by `kwargs`. | |
2762 kwargs : keyword arguments | |
2763 additional keyword arguments to pass to `func`. | |
2764 | |
2765 Returns | |
2766 ------- | |
2767 result : tuple | |
2768 Result of func(a, **kwargs) and a.shape with axis dims set to 1 | |
2769 which can be used to reshape the result to the same shape a ufunc with | |
2770 keepdims=True would produce. | |
2771 | |
2772 """ | |
2773 a = np.asanyarray(a) | |
2774 axis = kwargs.get('axis', None) | |
2775 if axis is not None: | |
2776 keepdim = list(a.shape) | |
2777 nd = a.ndim | |
2778 try: | |
2779 axis = operator.index(axis) | |
2780 if axis >= nd or axis < -nd: | |
2781 raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim)) | |
2782 keepdim[axis] = 1 | |
2783 except TypeError: | |
2784 sax = set() | |
2785 for x in axis: | |
2786 if x >= nd or x < -nd: | |
2787 raise IndexError("axis %d out of bounds (%d)" % (x, nd)) | |
2788 if x in sax: | |
2789 raise ValueError("duplicate value in axis") | |
2790 sax.add(x % nd) | |
2791 keepdim[x] = 1 | |
2792 keep = sax.symmetric_difference(frozenset(range(nd))) | |
2793 nkeep = len(keep) | |
2794 # swap axis that should not be reduced to front | |
2795 for i, s in enumerate(sorted(keep)): | |
2796 a = a.swapaxes(i, s) | |
2797 # merge reduced axis | |
2798 a = a.reshape(a.shape[:nkeep] + (-1,)) | |
2799 kwargs['axis'] = -1 | |
2800 else: | |
2801 keepdim = [1] * a.ndim | |
2802 | |
2803 r = func(a, **kwargs) | |
2804 return r, keepdim | |
2805 | |
2806 | |
2807 def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): | |
2808 """ | |
2809 Compute the median along the specified axis. | |
2810 | |
2811 Returns the median of the array elements. | |
2812 | |
2813 Parameters | |
2814 ---------- | |
2815 a : array_like | |
2816 Input array or object that can be converted to an array. | |
2817 axis : int or sequence of int, optional | |
2818 Axis along which the medians are computed. The default (axis=None) | |
2819 is to compute the median along a flattened version of the array. | |
2820 A sequence of axes is supported since version 1.9.0. | |
2821 out : ndarray, optional | |
2822 Alternative output array in which to place the result. It must have | |
2823 the same shape and buffer length as the expected output, but the | |
2824 type (of the output) will be cast if necessary. | |
2825 overwrite_input : bool, optional | |
2826 If True, then allow use of memory of input array (a) for | |
2827 calculations. The input array will be modified by the call to | |
2828 median. This will save memory when you do not need to preserve the | |
2829 contents of the input array. Treat the input as undefined, but it | |
2830 will probably be fully or partially sorted. Default is False. Note | |
2831 that, if `overwrite_input` is True and the input is not already an | |
2832 ndarray, an error will be raised. | |
2833 keepdims : bool, optional | |
2834 If this is set to True, the axes which are reduced are left | |
2835 in the result as dimensions with size one. With this option, | |
2836 the result will broadcast correctly against the original `arr`. | |
2837 | |
2838 .. versionadded:: 1.9.0 | |
2839 | |
2840 | |
2841 Returns | |
2842 ------- | |
2843 median : ndarray | |
2844 A new array holding the result (unless `out` is specified, in which | |
2845 case that array is returned instead). If the input contains | |
2846 integers, or floats of smaller precision than 64, then the output | |
2847 data-type is float64. Otherwise, the output data-type is the same | |
2848 as that of the input. | |
2849 | |
2850 See Also | |
2851 -------- | |
2852 mean, percentile | |
2853 | |
2854 Notes | |
2855 ----- | |
2856 Given a vector V of length N, the median of V is the middle value of | |
2857 a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is | |
2858 odd. When N is even, it is the average of the two middle values of | |
2859 ``V_sorted``. | |
2860 | |
2861 Examples | |
2862 -------- | |
2863 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) | |
2864 >>> a | |
2865 array([[10, 7, 4], | |
2866 [ 3, 2, 1]]) | |
2867 >>> np.median(a) | |
2868 3.5 | |
2869 >>> np.median(a, axis=0) | |
2870 array([ 6.5, 4.5, 2.5]) | |
2871 >>> np.median(a, axis=1) | |
2872 array([ 7., 2.]) | |
2873 >>> m = np.median(a, axis=0) | |
2874 >>> out = np.zeros_like(m) | |
2875 >>> np.median(a, axis=0, out=m) | |
2876 array([ 6.5, 4.5, 2.5]) | |
2877 >>> m | |
2878 array([ 6.5, 4.5, 2.5]) | |
2879 >>> b = a.copy() | |
2880 >>> np.median(b, axis=1, overwrite_input=True) | |
2881 array([ 7., 2.]) | |
2882 >>> assert not np.all(a==b) | |
2883 >>> b = a.copy() | |
2884 >>> np.median(b, axis=None, overwrite_input=True) | |
2885 3.5 | |
2886 >>> assert not np.all(a==b) | |
2887 | |
2888 """ | |
2889 r, k = _ureduce(a, func=_median, axis=axis, out=out, | |
2890 overwrite_input=overwrite_input) | |
2891 if keepdims: | |
2892 return r.reshape(k) | |
2893 else: | |
2894 return r | |
2895 | |
2896 def _median(a, axis=None, out=None, overwrite_input=False): | |
2897 # can't be reasonably be implemented in terms of percentile as we have to | |
2898 # call mean to not break astropy | |
2899 a = np.asanyarray(a) | |
2900 if axis is not None and axis >= a.ndim: | |
2901 raise IndexError( | |
2902 "axis %d out of bounds (%d)" % (axis, a.ndim)) | |
2903 | |
2904 if overwrite_input: | |
2905 if axis is None: | |
2906 part = a.ravel() | |
2907 sz = part.size | |
2908 if sz % 2 == 0: | |
2909 szh = sz // 2 | |
2910 part.partition((szh - 1, szh)) | |
2911 else: | |
2912 part.partition((sz - 1) // 2) | |
2913 else: | |
2914 sz = a.shape[axis] | |
2915 if sz % 2 == 0: | |
2916 szh = sz // 2 | |
2917 a.partition((szh - 1, szh), axis=axis) | |
2918 else: | |
2919 a.partition((sz - 1) // 2, axis=axis) | |
2920 part = a | |
2921 else: | |
2922 if axis is None: | |
2923 sz = a.size | |
2924 else: | |
2925 sz = a.shape[axis] | |
2926 if sz % 2 == 0: | |
2927 part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis) | |
2928 else: | |
2929 part = partition(a, (sz - 1) // 2, axis=axis) | |
2930 if part.shape == (): | |
2931 # make 0-D arrays work | |
2932 return part.item() | |
2933 if axis is None: | |
2934 axis = 0 | |
2935 indexer = [slice(None)] * part.ndim | |
2936 index = part.shape[axis] // 2 | |
2937 if part.shape[axis] % 2 == 1: | |
2938 # index with slice to allow mean (below) to work | |
2939 indexer[axis] = slice(index, index+1) | |
2940 else: | |
2941 indexer[axis] = slice(index-1, index+1) | |
2942 # Use mean in odd and even case to coerce data type | |
2943 # and check, use out array. | |
2944 return mean(part[indexer], axis=axis, out=out) | |
2945 | |
2946 | |
2947 def percentile(a, q, axis=None, out=None, | |
2948 overwrite_input=False, interpolation='linear', keepdims=False): | |
2949 """ | |
2950 Compute the qth percentile of the data along the specified axis. | |
2951 | |
2952 Returns the qth percentile of the array elements. | |
2953 | |
2954 Parameters | |
2955 ---------- | |
2956 a : array_like | |
2957 Input array or object that can be converted to an array. | |
2958 q : float in range of [0,100] (or sequence of floats) | |
2959 Percentile to compute which must be between 0 and 100 inclusive. | |
2960 axis : int or sequence of int, optional | |
2961 Axis along which the percentiles are computed. The default (None) | |
2962 is to compute the percentiles along a flattened version of the array. | |
2963 A sequence of axes is supported since version 1.9.0. | |
2964 out : ndarray, optional | |
2965 Alternative output array in which to place the result. It must | |
2966 have the same shape and buffer length as the expected output, | |
2967 but the type (of the output) will be cast if necessary. | |
2968 overwrite_input : bool, optional | |
2969 If True, then allow use of memory of input array `a` for | |
2970 calculations. The input array will be modified by the call to | |
2971 percentile. This will save memory when you do not need to preserve | |
2972 the contents of the input array. In this case you should not make | |
2973 any assumptions about the content of the passed in array `a` after | |
2974 this function completes -- treat it as undefined. Default is False. | |
2975 Note that, if the `a` input is not already an array this parameter | |
2976 will have no effect, `a` will be converted to an array internally | |
2977 regardless of the value of this parameter. | |
2978 interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} | |
2979 This optional parameter specifies the interpolation method to use, | |
2980 when the desired quantile lies between two data points `i` and `j`: | |
2981 * linear: `i + (j - i) * fraction`, where `fraction` is the | |
2982 fractional part of the index surrounded by `i` and `j`. | |
2983 * lower: `i`. | |
2984 * higher: `j`. | |
2985 * nearest: `i` or `j` whichever is nearest. | |
2986 * midpoint: (`i` + `j`) / 2. | |
2987 | |
2988 .. versionadded:: 1.9.0 | |
2989 keepdims : bool, optional | |
2990 If this is set to True, the axes which are reduced are left | |
2991 in the result as dimensions with size one. With this option, | |
2992 the result will broadcast correctly against the original `arr`. | |
2993 | |
2994 .. versionadded:: 1.9.0 | |
2995 | |
2996 Returns | |
2997 ------- | |
2998 percentile : scalar or ndarray | |
2999 If a single percentile `q` is given and axis=None a scalar is | |
3000 returned. If multiple percentiles `q` are given an array holding | |
3001 the result is returned. The results are listed in the first axis. | |
3002 (If `out` is specified, in which case that array is returned | |
3003 instead). If the input contains integers, or floats of smaller | |
3004 precision than 64, then the output data-type is float64. Otherwise, | |
3005 the output data-type is the same as that of the input. | |
3006 | |
3007 See Also | |
3008 -------- | |
3009 mean, median | |
3010 | |
3011 Notes | |
3012 ----- | |
3013 Given a vector V of length N, the q-th percentile of V is the q-th ranked | |
3014 value in a sorted copy of V. The values and distances of the two | |
3015 nearest neighbors as well as the `interpolation` parameter will | |
3016 determine the percentile if the normalized ranking does not match q | |
3017 exactly. This function is the same as the median if ``q=50``, the same | |
3018 as the minimum if ``q=0`` and the same as the maximum if ``q=100``. | |
3019 | |
3020 Examples | |
3021 -------- | |
3022 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) | |
3023 >>> a | |
3024 array([[10, 7, 4], | |
3025 [ 3, 2, 1]]) | |
3026 >>> np.percentile(a, 50) | |
3027 array([ 3.5]) | |
3028 >>> np.percentile(a, 50, axis=0) | |
3029 array([[ 6.5, 4.5, 2.5]]) | |
3030 >>> np.percentile(a, 50, axis=1) | |
3031 array([[ 7.], | |
3032 [ 2.]]) | |
3033 | |
3034 >>> m = np.percentile(a, 50, axis=0) | |
3035 >>> out = np.zeros_like(m) | |
3036 >>> np.percentile(a, 50, axis=0, out=m) | |
3037 array([[ 6.5, 4.5, 2.5]]) | |
3038 >>> m | |
3039 array([[ 6.5, 4.5, 2.5]]) | |
3040 | |
3041 >>> b = a.copy() | |
3042 >>> np.percentile(b, 50, axis=1, overwrite_input=True) | |
3043 array([[ 7.], | |
3044 [ 2.]]) | |
3045 >>> assert not np.all(a==b) | |
3046 >>> b = a.copy() | |
3047 >>> np.percentile(b, 50, axis=None, overwrite_input=True) | |
3048 array([ 3.5]) | |
3049 | |
3050 """ | |
3051 q = array(q, dtype=np.float64, copy=True) | |
3052 r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out, | |
3053 overwrite_input=overwrite_input, | |
3054 interpolation=interpolation) | |
3055 if keepdims: | |
3056 if q.ndim == 0: | |
3057 return r.reshape(k) | |
3058 else: | |
3059 return r.reshape([len(q)] + k) | |
3060 else: | |
3061 return r | |
3062 | |
3063 | |
3064 def _percentile(a, q, axis=None, out=None, | |
3065 overwrite_input=False, interpolation='linear', keepdims=False): | |
3066 a = asarray(a) | |
3067 if q.ndim == 0: | |
3068 # Do not allow 0-d arrays because following code fails for scalar | |
3069 zerod = True | |
3070 q = q[None] | |
3071 else: | |
3072 zerod = False | |
3073 | |
3074 # avoid expensive reductions, relevant for arrays with < O(1000) elements | |
3075 if q.size < 10: | |
3076 for i in range(q.size): | |
3077 if q[i] < 0. or q[i] > 100.: | |
3078 raise ValueError("Percentiles must be in the range [0,100]") | |
3079 q[i] /= 100. | |
3080 else: | |
3081 # faster than any() | |
3082 if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.): | |
3083 raise ValueError("Percentiles must be in the range [0,100]") | |
3084 q /= 100. | |
3085 | |
3086 # prepare a for partioning | |
3087 if overwrite_input: | |
3088 if axis is None: | |
3089 ap = a.ravel() | |
3090 else: | |
3091 ap = a | |
3092 else: | |
3093 if axis is None: | |
3094 ap = a.flatten() | |
3095 else: | |
3096 ap = a.copy() | |
3097 | |
3098 if axis is None: | |
3099 axis = 0 | |
3100 | |
3101 Nx = ap.shape[axis] | |
3102 indices = q * (Nx - 1) | |
3103 | |
3104 # round fractional indices according to interpolation method | |
3105 if interpolation == 'lower': | |
3106 indices = floor(indices).astype(intp) | |
3107 elif interpolation == 'higher': | |
3108 indices = ceil(indices).astype(intp) | |
3109 elif interpolation == 'midpoint': | |
3110 indices = floor(indices) + 0.5 | |
3111 elif interpolation == 'nearest': | |
3112 indices = around(indices).astype(intp) | |
3113 elif interpolation == 'linear': | |
3114 pass # keep index as fraction and interpolate | |
3115 else: | |
3116 raise ValueError( | |
3117 "interpolation can only be 'linear', 'lower' 'higher', " | |
3118 "'midpoint', or 'nearest'") | |
3119 | |
3120 if indices.dtype == intp: # take the points along axis | |
3121 ap.partition(indices, axis=axis) | |
3122 # ensure axis with qth is first | |
3123 ap = np.rollaxis(ap, axis, 0) | |
3124 axis = 0 | |
3125 | |
3126 if zerod: | |
3127 indices = indices[0] | |
3128 r = take(ap, indices, axis=axis, out=out) | |
3129 else: # weight the points above and below the indices | |
3130 indices_below = floor(indices).astype(intp) | |
3131 indices_above = indices_below + 1 | |
3132 indices_above[indices_above > Nx - 1] = Nx - 1 | |
3133 | |
3134 weights_above = indices - indices_below | |
3135 weights_below = 1.0 - weights_above | |
3136 | |
3137 weights_shape = [1, ] * ap.ndim | |
3138 weights_shape[axis] = len(indices) | |
3139 weights_below.shape = weights_shape | |
3140 weights_above.shape = weights_shape | |
3141 | |
3142 ap.partition(concatenate((indices_below, indices_above)), axis=axis) | |
3143 x1 = take(ap, indices_below, axis=axis) * weights_below | |
3144 x2 = take(ap, indices_above, axis=axis) * weights_above | |
3145 | |
3146 # ensure axis with qth is first | |
3147 x1 = np.rollaxis(x1, axis, 0) | |
3148 x2 = np.rollaxis(x2, axis, 0) | |
3149 | |
3150 if zerod: | |
3151 x1 = x1.squeeze(0) | |
3152 x2 = x2.squeeze(0) | |
3153 | |
3154 if out is not None: | |
3155 r = add(x1, x2, out=out) | |
3156 else: | |
3157 r = add(x1, x2) | |
3158 | |
3159 return r | |
3160 | |
3161 | |
3162 def trapz(y, x=None, dx=1.0, axis=-1): | |
3163 """ | |
3164 Integrate along the given axis using the composite trapezoidal rule. | |
3165 | |
3166 Integrate `y` (`x`) along given axis. | |
3167 | |
3168 Parameters | |
3169 ---------- | |
3170 y : array_like | |
3171 Input array to integrate. | |
3172 x : array_like, optional | |
3173 If `x` is None, then spacing between all `y` elements is `dx`. | |
3174 dx : scalar, optional | |
3175 If `x` is None, spacing given by `dx` is assumed. Default is 1. | |
3176 axis : int, optional | |
3177 Specify the axis. | |
3178 | |
3179 Returns | |
3180 ------- | |
3181 trapz : float | |
3182 Definite integral as approximated by trapezoidal rule. | |
3183 | |
3184 See Also | |
3185 -------- | |
3186 sum, cumsum | |
3187 | |
3188 Notes | |
3189 ----- | |
3190 Image [2]_ illustrates trapezoidal rule -- y-axis locations of points | |
3191 will be taken from `y` array, by default x-axis distances between | |
3192 points will be 1.0, alternatively they can be provided with `x` array | |
3193 or with `dx` scalar. Return value will be equal to combined area under | |
3194 the red lines. | |
3195 | |
3196 | |
3197 References | |
3198 ---------- | |
3199 .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule | |
3200 | |
3201 .. [2] Illustration image: | |
3202 http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png | |
3203 | |
3204 Examples | |
3205 -------- | |
3206 >>> np.trapz([1,2,3]) | |
3207 4.0 | |
3208 >>> np.trapz([1,2,3], x=[4,6,8]) | |
3209 8.0 | |
3210 >>> np.trapz([1,2,3], dx=2) | |
3211 8.0 | |
3212 >>> a = np.arange(6).reshape(2, 3) | |
3213 >>> a | |
3214 array([[0, 1, 2], | |
3215 [3, 4, 5]]) | |
3216 >>> np.trapz(a, axis=0) | |
3217 array([ 1.5, 2.5, 3.5]) | |
3218 >>> np.trapz(a, axis=1) | |
3219 array([ 2., 8.]) | |
3220 | |
3221 """ | |
3222 y = asanyarray(y) | |
3223 if x is None: | |
3224 d = dx | |
3225 else: | |
3226 x = asanyarray(x) | |
3227 if x.ndim == 1: | |
3228 d = diff(x) | |
3229 # reshape to correct shape | |
3230 shape = [1]*y.ndim | |
3231 shape[axis] = d.shape[0] | |
3232 d = d.reshape(shape) | |
3233 else: | |
3234 d = diff(x, axis=axis) | |
3235 nd = len(y.shape) | |
3236 slice1 = [slice(None)]*nd | |
3237 slice2 = [slice(None)]*nd | |
3238 slice1[axis] = slice(1, None) | |
3239 slice2[axis] = slice(None, -1) | |
3240 try: | |
3241 ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis) | |
3242 except ValueError: | |
3243 # Operations didn't work, cast to ndarray | |
3244 d = np.asarray(d) | |
3245 y = np.asarray(y) | |
3246 ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis) | |
3247 return ret | |
3248 | |
3249 | |
3250 #always succeed | |
3251 def add_newdoc(place, obj, doc): | |
3252 """Adds documentation to obj which is in module place. | |
3253 | |
3254 If doc is a string add it to obj as a docstring | |
3255 | |
3256 If doc is a tuple, then the first element is interpreted as | |
3257 an attribute of obj and the second as the docstring | |
3258 (method, docstring) | |
3259 | |
3260 If doc is a list, then each element of the list should be a | |
3261 sequence of length two --> [(method1, docstring1), | |
3262 (method2, docstring2), ...] | |
3263 | |
3264 This routine never raises an error. | |
3265 | |
3266 This routine cannot modify read-only docstrings, as appear | |
3267 in new-style classes or built-in functions. Because this | |
3268 routine never raises an error the caller must check manually | |
3269 that the docstrings were changed. | |
3270 """ | |
3271 try: | |
3272 new = getattr(__import__(place, globals(), {}, [obj]), obj) | |
3273 if isinstance(doc, str): | |
3274 add_docstring(new, doc.strip()) | |
3275 elif isinstance(doc, tuple): | |
3276 add_docstring(getattr(new, doc[0]), doc[1].strip()) | |
3277 elif isinstance(doc, list): | |
3278 for val in doc: | |
3279 add_docstring(getattr(new, val[0]), val[1].strip()) | |
3280 except: | |
3281 pass | |
3282 | |
3283 | |
3284 # Based on scitools meshgrid | |
3285 def meshgrid(*xi, **kwargs): | |
3286 """ | |
3287 Return coordinate matrices from coordinate vectors. | |
3288 | |
3289 Make N-D coordinate arrays for vectorized evaluations of | |
3290 N-D scalar/vector fields over N-D grids, given | |
3291 one-dimensional coordinate arrays x1, x2,..., xn. | |
3292 | |
3293 .. versionchanged:: 1.9 | |
3294 1-D and 0-D cases are allowed. | |
3295 | |
3296 Parameters | |
3297 ---------- | |
3298 x1, x2,..., xn : array_like | |
3299 1-D arrays representing the coordinates of a grid. | |
3300 indexing : {'xy', 'ij'}, optional | |
3301 Cartesian ('xy', default) or matrix ('ij') indexing of output. | |
3302 See Notes for more details. | |
3303 | |
3304 .. versionadded:: 1.7.0 | |
3305 sparse : bool, optional | |
3306 If True a sparse grid is returned in order to conserve memory. | |
3307 Default is False. | |
3308 | |
3309 .. versionadded:: 1.7.0 | |
3310 copy : bool, optional | |
3311 If False, a view into the original arrays are returned in order to | |
3312 conserve memory. Default is True. Please note that | |
3313 ``sparse=False, copy=False`` will likely return non-contiguous | |
3314 arrays. Furthermore, more than one element of a broadcast array | |
3315 may refer to a single memory location. If you need to write to the | |
3316 arrays, make copies first. | |
3317 | |
3318 .. versionadded:: 1.7.0 | |
3319 | |
3320 Returns | |
3321 ------- | |
3322 X1, X2,..., XN : ndarray | |
3323 For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , | |
3324 return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij' | |
3325 or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy' | |
3326 with the elements of `xi` repeated to fill the matrix along | |
3327 the first dimension for `x1`, the second for `x2` and so on. | |
3328 | |
3329 Notes | |
3330 ----- | |
3331 This function supports both indexing conventions through the indexing | |
3332 keyword argument. Giving the string 'ij' returns a meshgrid with | |
3333 matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. | |
3334 In the 2-D case with inputs of length M and N, the outputs are of shape | |
3335 (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case | |
3336 with inputs of length M, N and P, outputs are of shape (N, M, P) for | |
3337 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is | |
3338 illustrated by the following code snippet:: | |
3339 | |
3340 xv, yv = meshgrid(x, y, sparse=False, indexing='ij') | |
3341 for i in range(nx): | |
3342 for j in range(ny): | |
3343 # treat xv[i,j], yv[i,j] | |
3344 | |
3345 xv, yv = meshgrid(x, y, sparse=False, indexing='xy') | |
3346 for i in range(nx): | |
3347 for j in range(ny): | |
3348 # treat xv[j,i], yv[j,i] | |
3349 | |
3350 In the 1-D and 0-D case, the indexing and sparse keywords have no effect. | |
3351 | |
3352 See Also | |
3353 -------- | |
3354 index_tricks.mgrid : Construct a multi-dimensional "meshgrid" | |
3355 using indexing notation. | |
3356 index_tricks.ogrid : Construct an open multi-dimensional "meshgrid" | |
3357 using indexing notation. | |
3358 | |
3359 Examples | |
3360 -------- | |
3361 >>> nx, ny = (3, 2) | |
3362 >>> x = np.linspace(0, 1, nx) | |
3363 >>> y = np.linspace(0, 1, ny) | |
3364 >>> xv, yv = meshgrid(x, y) | |
3365 >>> xv | |
3366 array([[ 0. , 0.5, 1. ], | |
3367 [ 0. , 0.5, 1. ]]) | |
3368 >>> yv | |
3369 array([[ 0., 0., 0.], | |
3370 [ 1., 1., 1.]]) | |
3371 >>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays | |
3372 >>> xv | |
3373 array([[ 0. , 0.5, 1. ]]) | |
3374 >>> yv | |
3375 array([[ 0.], | |
3376 [ 1.]]) | |
3377 | |
3378 `meshgrid` is very useful to evaluate functions on a grid. | |
3379 | |
3380 >>> x = np.arange(-5, 5, 0.1) | |
3381 >>> y = np.arange(-5, 5, 0.1) | |
3382 >>> xx, yy = meshgrid(x, y, sparse=True) | |
3383 >>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2) | |
3384 >>> h = plt.contourf(x,y,z) | |
3385 | |
3386 """ | |
3387 ndim = len(xi) | |
3388 | |
3389 copy_ = kwargs.pop('copy', True) | |
3390 sparse = kwargs.pop('sparse', False) | |
3391 indexing = kwargs.pop('indexing', 'xy') | |
3392 | |
3393 if kwargs: | |
3394 raise TypeError("meshgrid() got an unexpected keyword argument '%s'" | |
3395 % (list(kwargs)[0],)) | |
3396 | |
3397 if indexing not in ['xy', 'ij']: | |
3398 raise ValueError( | |
3399 "Valid values for `indexing` are 'xy' and 'ij'.") | |
3400 | |
3401 s0 = (1,) * ndim | |
3402 output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::]) | |
3403 for i, x in enumerate(xi)] | |
3404 | |
3405 shape = [x.size for x in output] | |
3406 | |
3407 if indexing == 'xy' and ndim > 1: | |
3408 # switch first and second axis | |
3409 output[0].shape = (1, -1) + (1,)*(ndim - 2) | |
3410 output[1].shape = (-1, 1) + (1,)*(ndim - 2) | |
3411 shape[0], shape[1] = shape[1], shape[0] | |
3412 | |
3413 if sparse: | |
3414 if copy_: | |
3415 return [x.copy() for x in output] | |
3416 else: | |
3417 return output | |
3418 else: | |
3419 # Return the full N-D matrix (not only the 1-D vector) | |
3420 if copy_: | |
3421 mult_fact = np.ones(shape, dtype=int) | |
3422 return [x * mult_fact for x in output] | |
3423 else: | |
3424 return np.broadcast_arrays(*output) | |
3425 | |
3426 | |
3427 def delete(arr, obj, axis=None): | |
3428 """ | |
3429 Return a new array with sub-arrays along an axis deleted. For a one | |
3430 dimensional array, this returns those entries not returned by | |
3431 `arr[obj]`. | |
3432 | |
3433 Parameters | |
3434 ---------- | |
3435 arr : array_like | |
3436 Input array. | |
3437 obj : slice, int or array of ints | |
3438 Indicate which sub-arrays to remove. | |
3439 axis : int, optional | |
3440 The axis along which to delete the subarray defined by `obj`. | |
3441 If `axis` is None, `obj` is applied to the flattened array. | |
3442 | |
3443 Returns | |
3444 ------- | |
3445 out : ndarray | |
3446 A copy of `arr` with the elements specified by `obj` removed. Note | |
3447 that `delete` does not occur in-place. If `axis` is None, `out` is | |
3448 a flattened array. | |
3449 | |
3450 See Also | |
3451 -------- | |
3452 insert : Insert elements into an array. | |
3453 append : Append elements at the end of an array. | |
3454 | |
3455 Notes | |
3456 ----- | |
3457 Often it is preferable to use a boolean mask. For example: | |
3458 | |
3459 >>> mask = np.ones(len(arr), dtype=bool) | |
3460 >>> mask[[0,2,4]] = False | |
3461 >>> result = arr[mask,...] | |
3462 | |
3463 Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further | |
3464 use of `mask`. | |
3465 | |
3466 Examples | |
3467 -------- | |
3468 >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) | |
3469 >>> arr | |
3470 array([[ 1, 2, 3, 4], | |
3471 [ 5, 6, 7, 8], | |
3472 [ 9, 10, 11, 12]]) | |
3473 >>> np.delete(arr, 1, 0) | |
3474 array([[ 1, 2, 3, 4], | |
3475 [ 9, 10, 11, 12]]) | |
3476 | |
3477 >>> np.delete(arr, np.s_[::2], 1) | |
3478 array([[ 2, 4], | |
3479 [ 6, 8], | |
3480 [10, 12]]) | |
3481 >>> np.delete(arr, [1,3,5], None) | |
3482 array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) | |
3483 | |
3484 """ | |
3485 wrap = None | |
3486 if type(arr) is not ndarray: | |
3487 try: | |
3488 wrap = arr.__array_wrap__ | |
3489 except AttributeError: | |
3490 pass | |
3491 | |
3492 arr = asarray(arr) | |
3493 ndim = arr.ndim | |
3494 if axis is None: | |
3495 if ndim != 1: | |
3496 arr = arr.ravel() | |
3497 ndim = arr.ndim | |
3498 axis = ndim - 1 | |
3499 if ndim == 0: | |
3500 warnings.warn( | |
3501 "in the future the special handling of scalars will be removed " | |
3502 "from delete and raise an error", DeprecationWarning) | |
3503 if wrap: | |
3504 return wrap(arr) | |
3505 else: | |
3506 return arr.copy() | |
3507 | |
3508 slobj = [slice(None)]*ndim | |
3509 N = arr.shape[axis] | |
3510 newshape = list(arr.shape) | |
3511 | |
3512 if isinstance(obj, slice): | |
3513 start, stop, step = obj.indices(N) | |
3514 xr = range(start, stop, step) | |
3515 numtodel = len(xr) | |
3516 | |
3517 if numtodel <= 0: | |
3518 if wrap: | |
3519 return wrap(arr.copy()) | |
3520 else: | |
3521 return arr.copy() | |
3522 | |
3523 # Invert if step is negative: | |
3524 if step < 0: | |
3525 step = -step | |
3526 start = xr[-1] | |
3527 stop = xr[0] + 1 | |
3528 | |
3529 newshape[axis] -= numtodel | |
3530 new = empty(newshape, arr.dtype, arr.flags.fnc) | |
3531 # copy initial chunk | |
3532 if start == 0: | |
3533 pass | |
3534 else: | |
3535 slobj[axis] = slice(None, start) | |
3536 new[slobj] = arr[slobj] | |
3537 # copy end chunck | |
3538 if stop == N: | |
3539 pass | |
3540 else: | |
3541 slobj[axis] = slice(stop-numtodel, None) | |
3542 slobj2 = [slice(None)]*ndim | |
3543 slobj2[axis] = slice(stop, None) | |
3544 new[slobj] = arr[slobj2] | |
3545 # copy middle pieces | |
3546 if step == 1: | |
3547 pass | |
3548 else: # use array indexing. | |
3549 keep = ones(stop-start, dtype=bool) | |
3550 keep[:stop-start:step] = False | |
3551 slobj[axis] = slice(start, stop-numtodel) | |
3552 slobj2 = [slice(None)]*ndim | |
3553 slobj2[axis] = slice(start, stop) | |
3554 arr = arr[slobj2] | |
3555 slobj2[axis] = keep | |
3556 new[slobj] = arr[slobj2] | |
3557 if wrap: | |
3558 return wrap(new) | |
3559 else: | |
3560 return new | |
3561 | |
3562 _obj = obj | |
3563 obj = np.asarray(obj) | |
3564 # After removing the special handling of booleans and out of | |
3565 # bounds values, the conversion to the array can be removed. | |
3566 if obj.dtype == bool: | |
3567 warnings.warn( | |
3568 "in the future insert will treat boolean arrays and array-likes " | |
3569 "as boolean index instead of casting it to integer", FutureWarning) | |
3570 obj = obj.astype(intp) | |
3571 if isinstance(_obj, (int, long, integer)): | |
3572 # optimization for a single value | |
3573 obj = obj.item() | |
3574 if (obj < -N or obj >= N): | |
3575 raise IndexError( | |
3576 "index %i is out of bounds for axis %i with " | |
3577 "size %i" % (obj, axis, N)) | |
3578 if (obj < 0): | |
3579 obj += N | |
3580 newshape[axis] -= 1 | |
3581 new = empty(newshape, arr.dtype, arr.flags.fnc) | |
3582 slobj[axis] = slice(None, obj) | |
3583 new[slobj] = arr[slobj] | |
3584 slobj[axis] = slice(obj, None) | |
3585 slobj2 = [slice(None)]*ndim | |
3586 slobj2[axis] = slice(obj+1, None) | |
3587 new[slobj] = arr[slobj2] | |
3588 else: | |
3589 if obj.size == 0 and not isinstance(_obj, np.ndarray): | |
3590 obj = obj.astype(intp) | |
3591 if not np.can_cast(obj, intp, 'same_kind'): | |
3592 # obj.size = 1 special case always failed and would just | |
3593 # give superfluous warnings. | |
3594 warnings.warn( | |
3595 "using a non-integer array as obj in delete will result in an " | |
3596 "error in the future", DeprecationWarning) | |
3597 obj = obj.astype(intp) | |
3598 keep = ones(N, dtype=bool) | |
3599 | |
3600 # Test if there are out of bound indices, this is deprecated | |
3601 inside_bounds = (obj < N) & (obj >= -N) | |
3602 if not inside_bounds.all(): | |
3603 warnings.warn( | |
3604 "in the future out of bounds indices will raise an error " | |
3605 "instead of being ignored by `numpy.delete`.", | |
3606 DeprecationWarning) | |
3607 obj = obj[inside_bounds] | |
3608 positive_indices = obj >= 0 | |
3609 if not positive_indices.all(): | |
3610 warnings.warn( | |
3611 "in the future negative indices will not be ignored by " | |
3612 "`numpy.delete`.", FutureWarning) | |
3613 obj = obj[positive_indices] | |
3614 | |
3615 keep[obj, ] = False | |
3616 slobj[axis] = keep | |
3617 new = arr[slobj] | |
3618 | |
3619 if wrap: | |
3620 return wrap(new) | |
3621 else: | |
3622 return new | |
3623 | |
3624 | |
3625 def insert(arr, obj, values, axis=None): | |
3626 """ | |
3627 Insert values along the given axis before the given indices. | |
3628 | |
3629 Parameters | |
3630 ---------- | |
3631 arr : array_like | |
3632 Input array. | |
3633 obj : int, slice or sequence of ints | |
3634 Object that defines the index or indices before which `values` is | |
3635 inserted. | |
3636 | |
3637 .. versionadded:: 1.8.0 | |
3638 | |
3639 Support for multiple insertions when `obj` is a single scalar or a | |
3640 sequence with one element (similar to calling insert multiple | |
3641 times). | |
3642 values : array_like | |
3643 Values to insert into `arr`. If the type of `values` is different | |
3644 from that of `arr`, `values` is converted to the type of `arr`. | |
3645 `values` should be shaped so that ``arr[...,obj,...] = values`` | |
3646 is legal. | |
3647 axis : int, optional | |
3648 Axis along which to insert `values`. If `axis` is None then `arr` | |
3649 is flattened first. | |
3650 | |
3651 Returns | |
3652 ------- | |
3653 out : ndarray | |
3654 A copy of `arr` with `values` inserted. Note that `insert` | |
3655 does not occur in-place: a new array is returned. If | |
3656 `axis` is None, `out` is a flattened array. | |
3657 | |
3658 See Also | |
3659 -------- | |
3660 append : Append elements at the end of an array. | |
3661 concatenate : Join a sequence of arrays together. | |
3662 delete : Delete elements from an array. | |
3663 | |
3664 Notes | |
3665 ----- | |
3666 Note that for higher dimensional inserts `obj=0` behaves very different | |
3667 from `obj=[0]` just like `arr[:,0,:] = values` is different from | |
3668 `arr[:,[0],:] = values`. | |
3669 | |
3670 Examples | |
3671 -------- | |
3672 >>> a = np.array([[1, 1], [2, 2], [3, 3]]) | |
3673 >>> a | |
3674 array([[1, 1], | |
3675 [2, 2], | |
3676 [3, 3]]) | |
3677 >>> np.insert(a, 1, 5) | |
3678 array([1, 5, 1, 2, 2, 3, 3]) | |
3679 >>> np.insert(a, 1, 5, axis=1) | |
3680 array([[1, 5, 1], | |
3681 [2, 5, 2], | |
3682 [3, 5, 3]]) | |
3683 | |
3684 Difference between sequence and scalars: | |
3685 >>> np.insert(a, [1], [[1],[2],[3]], axis=1) | |
3686 array([[1, 1, 1], | |
3687 [2, 2, 2], | |
3688 [3, 3, 3]]) | |
3689 >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), | |
3690 ... np.insert(a, [1], [[1],[2],[3]], axis=1)) | |
3691 True | |
3692 | |
3693 >>> b = a.flatten() | |
3694 >>> b | |
3695 array([1, 1, 2, 2, 3, 3]) | |
3696 >>> np.insert(b, [2, 2], [5, 6]) | |
3697 array([1, 1, 5, 6, 2, 2, 3, 3]) | |
3698 | |
3699 >>> np.insert(b, slice(2, 4), [5, 6]) | |
3700 array([1, 1, 5, 2, 6, 2, 3, 3]) | |
3701 | |
3702 >>> np.insert(b, [2, 2], [7.13, False]) # type casting | |
3703 array([1, 1, 7, 0, 2, 2, 3, 3]) | |
3704 | |
3705 >>> x = np.arange(8).reshape(2, 4) | |
3706 >>> idx = (1, 3) | |
3707 >>> np.insert(x, idx, 999, axis=1) | |
3708 array([[ 0, 999, 1, 2, 999, 3], | |
3709 [ 4, 999, 5, 6, 999, 7]]) | |
3710 | |
3711 """ | |
3712 wrap = None | |
3713 if type(arr) is not ndarray: | |
3714 try: | |
3715 wrap = arr.__array_wrap__ | |
3716 except AttributeError: | |
3717 pass | |
3718 | |
3719 arr = asarray(arr) | |
3720 ndim = arr.ndim | |
3721 if axis is None: | |
3722 if ndim != 1: | |
3723 arr = arr.ravel() | |
3724 ndim = arr.ndim | |
3725 axis = ndim - 1 | |
3726 else: | |
3727 if ndim > 0 and (axis < -ndim or axis >= ndim): | |
3728 raise IndexError( | |
3729 "axis %i is out of bounds for an array of " | |
3730 "dimension %i" % (axis, ndim)) | |
3731 if (axis < 0): | |
3732 axis += ndim | |
3733 if (ndim == 0): | |
3734 warnings.warn( | |
3735 "in the future the special handling of scalars will be removed " | |
3736 "from insert and raise an error", DeprecationWarning) | |
3737 arr = arr.copy() | |
3738 arr[...] = values | |
3739 if wrap: | |
3740 return wrap(arr) | |
3741 else: | |
3742 return arr | |
3743 slobj = [slice(None)]*ndim | |
3744 N = arr.shape[axis] | |
3745 newshape = list(arr.shape) | |
3746 | |
3747 if isinstance(obj, slice): | |
3748 # turn it into a range object | |
3749 indices = arange(*obj.indices(N), **{'dtype': intp}) | |
3750 else: | |
3751 # need to copy obj, because indices will be changed in-place | |
3752 indices = np.array(obj) | |
3753 if indices.dtype == bool: | |
3754 # See also delete | |
3755 warnings.warn( | |
3756 "in the future insert will treat boolean arrays and " | |
3757 "array-likes as a boolean index instead of casting it to " | |
3758 "integer", FutureWarning) | |
3759 indices = indices.astype(intp) | |
3760 # Code after warning period: | |
3761 #if obj.ndim != 1: | |
3762 # raise ValueError('boolean array argument obj to insert ' | |
3763 # 'must be one dimensional') | |
3764 #indices = np.flatnonzero(obj) | |
3765 elif indices.ndim > 1: | |
3766 raise ValueError( | |
3767 "index array argument obj to insert must be one dimensional " | |
3768 "or scalar") | |
3769 if indices.size == 1: | |
3770 index = indices.item() | |
3771 if index < -N or index > N: | |
3772 raise IndexError( | |
3773 "index %i is out of bounds for axis %i with " | |
3774 "size %i" % (obj, axis, N)) | |
3775 if (index < 0): | |
3776 index += N | |
3777 | |
3778 # There are some object array corner cases here, but we cannot avoid | |
3779 # that: | |
3780 values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) | |
3781 if indices.ndim == 0: | |
3782 # broadcasting is very different here, since a[:,0,:] = ... behaves | |
3783 # very different from a[:,[0],:] = ...! This changes values so that | |
3784 # it works likes the second case. (here a[:,0:1,:]) | |
3785 values = np.rollaxis(values, 0, (axis % values.ndim) + 1) | |
3786 numnew = values.shape[axis] | |
3787 newshape[axis] += numnew | |
3788 new = empty(newshape, arr.dtype, arr.flags.fnc) | |
3789 slobj[axis] = slice(None, index) | |
3790 new[slobj] = arr[slobj] | |
3791 slobj[axis] = slice(index, index+numnew) | |
3792 new[slobj] = values | |
3793 slobj[axis] = slice(index+numnew, None) | |
3794 slobj2 = [slice(None)] * ndim | |
3795 slobj2[axis] = slice(index, None) | |
3796 new[slobj] = arr[slobj2] | |
3797 if wrap: | |
3798 return wrap(new) | |
3799 return new | |
3800 elif indices.size == 0 and not isinstance(obj, np.ndarray): | |
3801 # Can safely cast the empty list to intp | |
3802 indices = indices.astype(intp) | |
3803 | |
3804 if not np.can_cast(indices, intp, 'same_kind'): | |
3805 warnings.warn( | |
3806 "using a non-integer array as obj in insert will result in an " | |
3807 "error in the future", DeprecationWarning) | |
3808 indices = indices.astype(intp) | |
3809 | |
3810 indices[indices < 0] += N | |
3811 | |
3812 numnew = len(indices) | |
3813 order = indices.argsort(kind='mergesort') # stable sort | |
3814 indices[order] += np.arange(numnew) | |
3815 | |
3816 newshape[axis] += numnew | |
3817 old_mask = ones(newshape[axis], dtype=bool) | |
3818 old_mask[indices] = False | |
3819 | |
3820 new = empty(newshape, arr.dtype, arr.flags.fnc) | |
3821 slobj2 = [slice(None)]*ndim | |
3822 slobj[axis] = indices | |
3823 slobj2[axis] = old_mask | |
3824 new[slobj] = values | |
3825 new[slobj2] = arr | |
3826 | |
3827 if wrap: | |
3828 return wrap(new) | |
3829 return new | |
3830 | |
3831 | |
3832 def append(arr, values, axis=None): | |
3833 """ | |
3834 Append values to the end of an array. | |
3835 | |
3836 Parameters | |
3837 ---------- | |
3838 arr : array_like | |
3839 Values are appended to a copy of this array. | |
3840 values : array_like | |
3841 These values are appended to a copy of `arr`. It must be of the | |
3842 correct shape (the same shape as `arr`, excluding `axis`). If | |
3843 `axis` is not specified, `values` can be any shape and will be | |
3844 flattened before use. | |
3845 axis : int, optional | |
3846 The axis along which `values` are appended. If `axis` is not | |
3847 given, both `arr` and `values` are flattened before use. | |
3848 | |
3849 Returns | |
3850 ------- | |
3851 append : ndarray | |
3852 A copy of `arr` with `values` appended to `axis`. Note that | |
3853 `append` does not occur in-place: a new array is allocated and | |
3854 filled. If `axis` is None, `out` is a flattened array. | |
3855 | |
3856 See Also | |
3857 -------- | |
3858 insert : Insert elements into an array. | |
3859 delete : Delete elements from an array. | |
3860 | |
3861 Examples | |
3862 -------- | |
3863 >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) | |
3864 array([1, 2, 3, 4, 5, 6, 7, 8, 9]) | |
3865 | |
3866 When `axis` is specified, `values` must have the correct shape. | |
3867 | |
3868 >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) | |
3869 array([[1, 2, 3], | |
3870 [4, 5, 6], | |
3871 [7, 8, 9]]) | |
3872 >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) | |
3873 Traceback (most recent call last): | |
3874 ... | |
3875 ValueError: arrays must have same number of dimensions | |
3876 | |
3877 """ | |
3878 arr = asanyarray(arr) | |
3879 if axis is None: | |
3880 if arr.ndim != 1: | |
3881 arr = arr.ravel() | |
3882 values = ravel(values) | |
3883 axis = arr.ndim-1 | |
3884 return concatenate((arr, values), axis=axis) |