mi@0: #!/usr/bin/python mi@0: # mi@0: # Copyright (C) Christian Thurau, 2010. mi@0: # Licensed under the GNU General Public License (GPL). mi@0: # http://www.gnu.org/licenses/gpl.txt mi@0: """ mi@0: PyMF K-means clustering (unary-convex matrix factorization). mi@0: """ mi@0: mi@0: mi@0: import numpy as np mi@0: import random mi@0: mi@0: import dist mi@0: from nmf import NMF mi@0: mi@0: __all__ = ["Kmeans"] mi@0: mi@0: class Kmeans(NMF): mi@0: """ mi@0: Kmeans(data, num_bases=4) mi@0: mi@0: K-means clustering. Factorize a data matrix into two matrices s.t. mi@0: F = | data - W*H | is minimal. H is restricted to unary vectors, W mi@0: is simply the mean over the corresponding samples in "data". mi@0: mi@0: Parameters mi@0: ---------- mi@0: data : array_like, shape (_data_dimension, _num_samples) mi@0: the input data mi@0: num_bases: int, optional mi@0: Number of bases to compute (column rank of W and row rank of H). mi@0: 4 (default) mi@0: mi@0: Attributes mi@0: ---------- mi@0: W : "data_dimension x num_bases" matrix of basis vectors mi@0: H : "num bases x num_samples" matrix of coefficients mi@0: ferr : frobenius norm (after calling .factorize()) mi@0: mi@0: Example mi@0: ------- mi@0: Applying K-means to some rather stupid data set: mi@0: mi@0: >>> import numpy as np mi@0: >>> data = np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 1.0]]) mi@0: >>> kmeans_mdl = Kmeans(data, num_bases=2) mi@0: >>> kmeans_mdl.factorize(niter=10) mi@0: mi@0: The basis vectors are now stored in kmeans_mdl.W, the coefficients in kmeans_mdl.H. mi@0: To compute coefficients for an existing set of basis vectors simply copy W mi@0: to kmeans_mdl.W, and set compute_w to False: mi@0: mi@0: >>> data = np.array([[1.5], [1.2]]) mi@0: >>> W = [[1.0, 0.0], [0.0, 1.0]] mi@0: >>> kmeans_mdl = Kmeans(data, num_bases=2) mi@0: >>> kmeans_mdl.W = W mi@0: >>> kmeans_mdl.factorize(niter=1, compute_w=False) mi@0: mi@0: The result is a set of coefficients kmeans_mdl.H, s.t. data = W * kmeans_mdl.H. mi@0: """ mi@0: def init_h(self): mi@0: # W has to be present for H to be initialized mi@0: self.H = np.zeros((self._num_bases, self._num_samples)) mi@0: self.update_h() mi@0: mi@0: def init_w(self): mi@0: # set W to some random data samples mi@0: sel = random.sample(xrange(self._num_samples), self._num_bases) mi@0: mi@0: # sort indices, otherwise h5py won't work mi@0: self.W = self.data[:, np.sort(sel)] mi@0: mi@0: mi@0: def update_h(self): mi@0: # and assign samples to the best matching centers mi@0: self.assigned = dist.vq(self.W, self.data) mi@0: self.H = np.zeros(self.H.shape) mi@0: self.H[self.assigned, range(self._num_samples)] = 1.0 mi@0: mi@0: mi@0: def update_w(self): mi@0: for i in range(self._num_bases): mi@0: idx = np.where(self.assigned==i)[0] mi@0: n = len(idx) mi@0: if n > 1: mi@0: self.W[:,i] = np.sum(self.data[:,idx], axis=1)/n