samer@0: /* samer@0: * Copyright (c) 2002, Samer Abdallah, King's College London. samer@0: * All rights reserved. samer@0: * samer@0: * This software is provided AS iS and WITHOUT ANY WARRANTY; samer@0: * without even the implied warranty of MERCHANTABILITY or samer@0: * FITNESS FOR A PARTICULAR PURPOSE. samer@0: */ samer@0: samer@0: package samer.models; samer@0: import samer.core.*; samer@0: import samer.core.Agent.*; samer@0: import samer.core.types.*; samer@0: import samer.tools.*; samer@0: import samer.maths.*; samer@0: samer@0: public class ICAWithScaler extends ICA samer@0: { samer@0: VVector k; // state, scaling vector samer@0: double[] __k, tmp; samer@0: samer@0: public ICAWithScaler(Vec input) { this(input.size()); setInput(input); } samer@0: public ICAWithScaler(int N) samer@0: { samer@0: super(N); samer@0: Shell.push(node); samer@0: k = new VVector("k",n); samer@0: Shell.pop(); samer@0: __k=k.array(); samer@0: tmp=new double[n]; samer@0: samer@0: for (int i=0; ithresh) return; samer@0: batch+=w; samer@0: samer@0: double[] phi=sourceModel.getGradient(); samer@0: for (int i=0; i<_n; i++) _G[i] += w*(phi[i]*_s[i] - 1); samer@0: } samer@0: samer@0: public void flush() samer@0: { samer@0: if (batch==0) return; samer@0: samer@0: G.changed(); samer@0: samer@0: // do differential learning on trace & diagonal of G samer@0: double alpha=scaleRate.value/batch; samer@0: double beta=stretchRate.value/batch; samer@0: samer@0: // compute factors and scale each row of W samer@0: double mu=Mathx.sum(_G)/n, dl; samer@0: for (int i=0; i<_n; i++) { samer@0: dl=alpha*mu+beta*(_G[i]-mu); samer@0: double tmp=Math.exp(-dl); samer@0: if (Double.isNaN(tmp)) throw new Error("alt: NaN"+i); samer@0: __k[i]*=tmp; // instead of Mathx.mul(_W[i],tmp); samer@0: } samer@0: k.changed(); samer@0: samer@0: reset(); // ready for next batch samer@0: } samer@0: } samer@0: } samer@0: samer@0: