samer@0
|
1 /*
|
samer@0
|
2 * Copyright (c) 2002, Samer Abdallah, King's College London.
|
samer@0
|
3 * All rights reserved.
|
samer@0
|
4 *
|
samer@0
|
5 * This software is provided AS iS and WITHOUT ANY WARRANTY;
|
samer@0
|
6 * without even the implied warranty of MERCHANTABILITY or
|
samer@0
|
7 * FITNESS FOR A PARTICULAR PURPOSE.
|
samer@0
|
8 */
|
samer@0
|
9
|
samer@0
|
10 package samer.models;
|
samer@0
|
11
|
samer@0
|
12 import samer.core.*;
|
samer@0
|
13 import samer.core.types.*;
|
samer@0
|
14 import samer.maths.*;
|
samer@0
|
15 import samer.maths.opt.*;
|
samer@0
|
16 import samer.tools.*;
|
samer@0
|
17
|
samer@0
|
18 /**
|
samer@0
|
19 Automatic gain control for a given input vector.
|
samer@0
|
20 Ouput appears in out. Also generates a trace of
|
samer@0
|
21 'loudness' of input signal.
|
samer@0
|
22 */
|
samer@0
|
23
|
samer@0
|
24 public class Scaler extends NullTask implements Model
|
samer@0
|
25 {
|
samer@0
|
26 private Model M;
|
samer@0
|
27 private int n;
|
samer@0
|
28 private Vec x;
|
samer@0
|
29 private VVector s;
|
samer@0
|
30 private VDouble multiplier;
|
samer@0
|
31 private VDouble offset;
|
samer@0
|
32 private double logK;
|
samer@0
|
33
|
samer@0
|
34 double [] _x, _s, _g, phi;
|
samer@0
|
35
|
samer@0
|
36 public Scaler( Vec input, Model M) { this(input); setOutputModel(M); M.setInput(s); }
|
samer@0
|
37 public Scaler( Vec input) { this(input.size()); setInput(input); }
|
samer@0
|
38 public Scaler( int N)
|
samer@0
|
39 {
|
samer@0
|
40 n = N;
|
samer@0
|
41
|
samer@0
|
42 x = null;
|
samer@0
|
43 s = new VVector("output",n);
|
samer@0
|
44 multiplier = new VDouble("scale",1.0,VDouble.SIGNAL);
|
samer@0
|
45 offset = new VDouble("offset",0.0,VDouble.SIGNAL);
|
samer@0
|
46
|
samer@0
|
47 _s = s.array();
|
samer@0
|
48 _g = new double[n];
|
samer@0
|
49 phi = null;
|
samer@0
|
50 reset();
|
samer@0
|
51 logK=Math.log(multiplier.value);
|
samer@0
|
52 }
|
samer@0
|
53
|
samer@0
|
54 public int getSize() { return n; }
|
samer@0
|
55 public VVector output() { return s; }
|
samer@0
|
56 public VDouble getScale() { return multiplier; }
|
samer@0
|
57 public VDouble getOffset() { return offset; }
|
samer@0
|
58 public Model getOutputModel() { return M; }
|
samer@0
|
59 public void setOutputModel(Model m) { M=m; }
|
samer@0
|
60 public void setInput(Vec in) { x=in; _x=x.array(); }
|
samer@0
|
61 public void reset() {
|
samer@0
|
62 // multiplier.load(Shell.env());
|
samer@0
|
63 // offset.load(Shell.env());
|
samer@0
|
64 }
|
samer@0
|
65
|
samer@0
|
66 public String toString() { return "Scaler:"+x; } // +"->"+s; }
|
samer@0
|
67 public void dispose()
|
samer@0
|
68 {
|
samer@0
|
69 offset.dispose();
|
samer@0
|
70 multiplier.dispose();
|
samer@0
|
71 s.dispose();
|
samer@0
|
72 super.dispose();
|
samer@0
|
73 }
|
samer@0
|
74
|
samer@0
|
75 public void infer() {
|
samer@0
|
76 double a=offset.value, k=1/multiplier.value;
|
samer@0
|
77 for (int i=0; i<n; i++) _s[i] = k*(_x[i]-a);
|
samer@0
|
78 s.changed();
|
samer@0
|
79 }
|
samer@0
|
80
|
samer@0
|
81 public void compute() {
|
samer@0
|
82 Mathx.mul(_g,M.getGradient(),1/multiplier.value);
|
samer@0
|
83 }
|
samer@0
|
84
|
samer@0
|
85 public double getEnergy() { return M.getEnergy() + n*logK; }
|
samer@0
|
86 public double [] getGradient() { return _g; }
|
samer@0
|
87
|
samer@0
|
88 public Functionx functionx() {
|
samer@0
|
89 return new Functionx() {
|
samer@0
|
90 Functionx fM=M.functionx();
|
samer@0
|
91 double [] s=new double[n];
|
samer@0
|
92
|
samer@0
|
93 public void dispose() { fM.dispose(); }
|
samer@0
|
94 public void evaluate(Datum P) { P.f=evaluate(P.x,P.g); }
|
samer@0
|
95 public double evaluate(double [] x, double [] g) {
|
samer@0
|
96 double a=offset.value, k=1/multiplier.value;
|
samer@0
|
97 for (int i=0; i<n; i++) s[i] = k*(x[i]-a);
|
samer@0
|
98 double E=fM.evaluate(s,g);
|
samer@0
|
99 Mathx.mul(g,k);
|
samer@0
|
100 return E+n*logK;
|
samer@0
|
101 }
|
samer@0
|
102 };
|
samer@0
|
103 }
|
samer@0
|
104
|
samer@0
|
105 public void starting() { logK=Math.log(multiplier.value); }
|
samer@0
|
106 public void stopping() {}
|
samer@0
|
107 public void run() { infer(); }
|
samer@0
|
108
|
samer@0
|
109 public Trainer getTrainer() { return new Trainer(); }
|
samer@0
|
110 public OffsetTrainer getOffsetTrainer() { return new OffsetTrainer(); }
|
samer@0
|
111 public ScaleTrainer getScaleTrainer() { return new ScaleTrainer(); }
|
samer@0
|
112
|
samer@0
|
113 public class Trainer extends AnonymousTask implements Model.Trainer
|
samer@0
|
114 {
|
samer@0
|
115 VDouble rate1=new VDouble("scaleRate",0.001);
|
samer@0
|
116 VDouble rate2=new VDouble("offsetRate",0.000001);
|
samer@0
|
117 double G,H,count;
|
samer@0
|
118 double [] _s;
|
samer@0
|
119 int n;
|
samer@0
|
120
|
samer@0
|
121
|
samer@0
|
122 public Trainer() { _s = s.array(); n=Scaler.this.n; }
|
samer@0
|
123
|
samer@0
|
124 public void reset() { count=0; G=0; H=0; }
|
samer@0
|
125 public String toString() { return "Trainer:"+Scaler.this; }
|
samer@0
|
126
|
samer@0
|
127 public VDouble getScaleRate() { return rate1; }
|
samer@0
|
128 public VDouble getOffsetRate() { return rate2; }
|
samer@0
|
129
|
samer@0
|
130 public void accumulate() { accumulate(1); }
|
samer@0
|
131 public void accumulate(double w) {
|
samer@0
|
132 // if (M.getEnergy() > 8000) return;
|
samer@0
|
133 double [] phi=M.getGradient();
|
samer@0
|
134 double g=0;
|
samer@0
|
135 for (int i=0; i<n; i++) g += phi[i]*_s[i] - 1;
|
samer@0
|
136 G += w*g;
|
samer@0
|
137 H += w*Mathx.sum(phi);
|
samer@0
|
138 count+=w;
|
samer@0
|
139 }
|
samer@0
|
140
|
samer@0
|
141 public void flush() {
|
samer@0
|
142 if (count==0) return; // nothing to do
|
samer@0
|
143
|
samer@0
|
144 double k=multiplier.value;
|
samer@0
|
145 double mu=offset.value;
|
samer@0
|
146
|
samer@0
|
147 mu += (rate2.value/count)*k*H/n;
|
samer@0
|
148 G *= rate1.value/(n*count);
|
samer@0
|
149 k *= Math.exp(G);
|
samer@0
|
150 multiplier.set(k);
|
samer@0
|
151 offset.set(mu);
|
samer@0
|
152 logK+=G;
|
samer@0
|
153 reset();
|
samer@0
|
154 }
|
samer@0
|
155
|
samer@0
|
156 public void oneshot() {
|
samer@0
|
157 double [] phi=M.getGradient();
|
samer@0
|
158 G=0; H = Mathx.sum(phi);
|
samer@0
|
159 for (int i=0; i<n; i++) G += phi[i]*_s[i] - 1;
|
samer@0
|
160
|
samer@0
|
161 double k=multiplier.value;
|
samer@0
|
162 double mu=offset.value;
|
samer@0
|
163
|
samer@0
|
164 mu += rate2.value*k*H/n;
|
samer@0
|
165 G *= rate1.value/n;
|
samer@0
|
166 k *= Math.exp(G);
|
samer@0
|
167 multiplier.set(k);
|
samer@0
|
168 offset.set(mu);
|
samer@0
|
169 logK+=G;
|
samer@0
|
170 }
|
samer@0
|
171
|
samer@0
|
172
|
samer@0
|
173 public void dispose() { rate1.dispose(); rate2.dispose(); }
|
samer@0
|
174 public void starting() { reset(); logK=Math.log(multiplier.value); }
|
samer@0
|
175 public void run() { oneshot(); }
|
samer@0
|
176 }
|
samer@0
|
177
|
samer@0
|
178 public class ScaleTrainer extends AnonymousTask implements Model.Trainer
|
samer@0
|
179 {
|
samer@0
|
180 VDouble rate1=new VDouble("scaleRate",0.001);
|
samer@0
|
181 double G,count;
|
samer@0
|
182 double [] _s;
|
samer@0
|
183 int n;
|
samer@0
|
184
|
samer@0
|
185
|
samer@0
|
186 public ScaleTrainer() { _s = s.array(); n=Scaler.this.n; }
|
samer@0
|
187 public String toString() { return "ScaleTrainer:"+Scaler.this; }
|
samer@0
|
188
|
samer@0
|
189 public void reset() { count=0; G=0; }
|
samer@0
|
190
|
samer@0
|
191 public void accumulate() { accumulate(1); }
|
samer@0
|
192 public void accumulate(double w) {
|
samer@0
|
193 double [] phi=M.getGradient();
|
samer@0
|
194 double g=0;
|
samer@0
|
195 for (int i=0; i<n; i++) g += phi[i]*_s[i] - 1;
|
samer@0
|
196 G += w*g;
|
samer@0
|
197 count+=w;
|
samer@0
|
198 }
|
samer@0
|
199
|
samer@0
|
200 public void flush() {
|
samer@0
|
201 if (count==0) return; // nothing to do
|
samer@0
|
202
|
samer@0
|
203 double k=multiplier.value;
|
samer@0
|
204
|
samer@0
|
205 G *= rate1.value/(n*count);
|
samer@0
|
206 k *= Math.exp(G);
|
samer@0
|
207 multiplier.set(k);
|
samer@0
|
208 logK+=G;
|
samer@0
|
209 reset();
|
samer@0
|
210 }
|
samer@0
|
211
|
samer@0
|
212 public void oneshot() {
|
samer@0
|
213 double [] phi=M.getGradient();
|
samer@0
|
214 G=0;
|
samer@0
|
215 for (int i=0; i<n; i++) G += phi[i]*_s[i] - 1;
|
samer@0
|
216
|
samer@0
|
217 double k=multiplier.value;
|
samer@0
|
218
|
samer@0
|
219 G *= rate1.value/n;
|
samer@0
|
220 k *= Math.exp(G);
|
samer@0
|
221 multiplier.set(k);
|
samer@0
|
222 logK+=G;
|
samer@0
|
223 }
|
samer@0
|
224
|
samer@0
|
225
|
samer@0
|
226 public void dispose() { rate1.dispose(); }
|
samer@0
|
227 public void starting() { reset(); logK=Math.log(multiplier.value); }
|
samer@0
|
228 public void run() { oneshot(); }
|
samer@0
|
229 }
|
samer@0
|
230
|
samer@0
|
231 /** This trains only the offset, not the scale */
|
samer@0
|
232 public class OffsetTrainer extends AnonymousTask implements Model.Trainer
|
samer@0
|
233 {
|
samer@0
|
234 VDouble rate2=new VDouble("offsetRate",0.000001);
|
samer@0
|
235 double H, count;
|
samer@0
|
236 int n;
|
samer@0
|
237
|
samer@0
|
238 public OffsetTrainer() { n=Scaler.this.n; }
|
samer@0
|
239 public String toString() { return "OffsetTrainer:"+Scaler.this; }
|
samer@0
|
240
|
samer@0
|
241 public void reset() { count=0; H=0; }
|
samer@0
|
242
|
samer@0
|
243 public void accumulate() { accumulate(1); }
|
samer@0
|
244 public void accumulate(double w) {
|
samer@0
|
245 double [] phi=M.getGradient();
|
samer@0
|
246 H += w*Mathx.sum(phi);
|
samer@0
|
247 count+=w;
|
samer@0
|
248 }
|
samer@0
|
249
|
samer@0
|
250 public void flush() {
|
samer@0
|
251 if (count==0) return; // nothing to do
|
samer@0
|
252 offset.value += (rate2.value/count)*multiplier.value*H/n;
|
samer@0
|
253 offset.changed();
|
samer@0
|
254 reset();
|
samer@0
|
255 }
|
samer@0
|
256
|
samer@0
|
257 public void oneshot() {
|
samer@0
|
258 double [] phi=M.getGradient();
|
samer@0
|
259 H = Mathx.sum(phi);
|
samer@0
|
260 offset.value += rate2.value*multiplier.value*H/n;
|
samer@0
|
261 offset.changed();
|
samer@0
|
262 }
|
samer@0
|
263
|
samer@0
|
264 public void dispose() { rate2.dispose(); }
|
samer@0
|
265 public void starting() { reset(); }
|
samer@0
|
266 public void run() { oneshot(); }
|
samer@0
|
267 }
|
samer@0
|
268 // could have alternative trainers if prior is Gaussian or Laplacian,
|
samer@0
|
269 // in which case, parameters can be estimated in closed form
|
samer@0
|
270 }
|
samer@0
|
271
|