m@9: # -*- coding: utf-8 -*- m@9: """ m@9: Created on Wed Oct 26 12:46:13 2016 m@9: m@9: @author: https://github.com/keik/nmftools/blob/master/nmftools/core.py m@9: """ m@9: m@9: import numpy as np m@9: m@9: m@9: def nmf(Y, R=3, n_iter=50, init_H=[], init_U=[], verbose=False): m@9: """ m@9: decompose non-negative matrix to components and activation with NMF m@9: Y ≈ HU m@9: Y ∈ R (m, n) m@9: H ∈ R (m, k) m@9: HU ∈ R (k, n) m@9: parameters m@9: ---- m@9: Y: target matrix to decompose m@9: R: number of bases to decompose m@9: n_iter: number for executing objective function to optimize m@9: init_H: initial value of H matrix. default value is random matrix m@9: init_U: initial value of U matrix. default value is random matrix m@9: return m@9: ---- m@9: Array of: m@9: 0: matrix of H m@9: 1: matrix of U m@9: 2: array of cost transition m@9: """ m@9: m@9: eps = np.spacing(1) m@9: m@9: # size of input spectrogram m@9: M = Y.shape[0] m@9: N = Y.shape[1] m@9: m@9: # initialization m@9: if len(init_U): m@9: U = init_U m@9: R = init_U.shape[0] m@9: else: m@9: U = np.random.rand(R,N); m@9: m@9: if len(init_H): m@9: H = init_H; m@9: R = init_H.shape[1] m@9: else: m@9: H = np.random.rand(M,R) m@9: m@9: # array to save the value of the euclid divergence m@9: cost = np.zeros(n_iter) m@9: m@9: # computation of Lambda (estimate of Y) m@9: Lambda = np.dot(H, U) m@9: m@9: # iterative computation m@9: for i in range(n_iter): m@9: m@9: # compute euclid divergence m@9: cost[i] = euclid_divergence(Y, Lambda) m@9: m@9: # update H m@9: H *= np.dot(Y, U.T) / (np.dot(np.dot(H, U), U.T) + eps) m@9: m@9: # update U m@9: U *= np.dot(H.T, Y) / (np.dot(np.dot(H.T, H), U) + eps) m@9: m@9: # recomputation of Lambda m@9: Lambda = np.dot(H, U) m@9: m@9: return [H, U, cost] m@9: m@9: m@9: def ssnmf(Y, R=3, n_iter=50, F=[], init_G=[], init_H=[], init_U=[], verbose=False): m@9: """ m@9: decompose non-negative matrix to components and activation with semi-supervised NMF m@9: Y ≈ FG + HU m@9: Y ∈ R (m, n) m@9: F ∈ R (m, x) m@9: G ∈ R (x, n) m@9: H ∈ R (m, k) m@9: U ∈ R (k, n) m@9: parameters m@9: ---- m@9: Y: target matrix to decompose m@9: R: number of bases to decompose m@9: n_iter: number for executing objective function to optimize m@9: F: matrix as supervised base components m@9: init_W: initial value of W matrix. default value is random matrix m@9: init_H: initial value of W matrix. default value is random matrix m@9: return m@9: ---- m@9: Array of: m@9: 0: matrix of F m@9: 1: matrix of G m@9: 2: matrix of H m@9: 3: matrix of U m@9: 4: array of cost transition m@9: """ m@9: m@9: eps = np.spacing(1) m@9: m@9: # size of input spectrogram m@9: M = Y.shape[0]; m@9: N = Y.shape[1]; m@9: X = F.shape[1] m@9: m@9: # initialization m@9: if len(init_G): m@9: G = init_G m@9: X = init_G.shape[1] m@9: else: m@9: G = np.random.rand(X, N) m@9: m@9: if len(init_U): m@9: U = init_U m@9: R = init_U.shape[0] m@9: else: m@9: U = np.random.rand(R, N) m@9: m@9: if len(init_H): m@9: H = init_H m@9: R = init_H.shape[1] m@9: else: m@9: H = np.random.rand(M, R) m@9: m@9: # array to save the value of the euclid divergence m@9: cost = np.zeros(n_iter) m@9: m@9: # computation of Lambda (estimate of Y) m@9: Lambda = np.dot(F, G) + np.dot(H, U) m@9: m@9: # iterative computation m@9: for it in range(n_iter): m@9: m@9: # compute euclid divergence m@9: cost[it] = euclid_divergence(Y, Lambda + eps) m@9: m@9: # update of H m@9: H *= (np.dot(Y, U.T) + eps) / (np.dot(np.dot(H, U) + np.dot(F, G), U.T) + eps) m@9: m@9: # update of U m@9: U *= (np.dot(H.T, Y) + eps) / (np.dot(H.T, np.dot(H, U) + np.dot(F, G)) + eps) m@9: m@9: # update of G m@9: G *= (np.dot(F.T, Y) + eps)[np.arange(G.shape[0])] / (np.dot(F.T, np.dot(H, U) + np.dot(F, G)) + eps) m@9: m@9: # recomputation of Lambda (estimate of V) m@9: Lambda = np.dot(H, U) + np.dot(F, G) m@9: m@9: return [F, G, H, U, cost] m@9: m@9: m@9: def euclid_divergence(V, Vh): m@9: d = 1 / 2 * (V ** 2 + Vh ** 2 - 2 * V * Vh).sum() m@9: return d