samer@0
|
1 package samer.models;
|
samer@0
|
2
|
samer@0
|
3 import samer.maths.*;
|
samer@0
|
4 import samer.maths.opt.*;
|
samer@0
|
5 import samer.tools.*;
|
samer@0
|
6 import samer.core.*;
|
samer@0
|
7 import samer.core.types.*;
|
samer@0
|
8 import java.util.*;
|
samer@0
|
9
|
samer@0
|
10 /**
|
samer@0
|
11 Non-adaptive generalised exponential factorial prior: the pointy
|
samer@0
|
12 bit of the usual GeneralisedExponential has been smoothed out
|
samer@0
|
13 by blending with a quadratic.
|
samer@0
|
14 */
|
samer@0
|
15
|
samer@0
|
16 public class SmoothGeneralisedExponential extends NamedTask implements Model, Observer {
|
samer@0
|
17 Vec input;
|
samer@0
|
18 VVector alpha, e, grad;
|
samer@0
|
19 int N;
|
samer@0
|
20 VDouble E;
|
samer@0
|
21 double[] x, g, e0, _e, a, eps;
|
samer@0
|
22 double last_eps;
|
samer@0
|
23
|
samer@0
|
24 public SmoothGeneralisedExponential(Vec x) { this(x.size()); setInput(x); }
|
samer@0
|
25 public SmoothGeneralisedExponential(int n) {
|
samer@0
|
26 super("genExp");
|
samer@0
|
27 Shell.push(node);
|
samer@0
|
28 N=n;
|
samer@0
|
29 E=new VDouble("E");
|
samer@0
|
30 e=new VVector("e",N);
|
samer@0
|
31 grad=new VVector("phi",N);
|
samer@0
|
32 alpha=new VVector("alpha",N);
|
samer@0
|
33 eps=new double[N];
|
samer@0
|
34 alpha.addSaver();
|
samer@0
|
35 Shell.pop();
|
samer@0
|
36
|
samer@0
|
37 alpha.addObserver(this);
|
samer@0
|
38 g=grad.array(); // new double[N];
|
samer@0
|
39 e0=new double[N];
|
samer@0
|
40 a=alpha.array();
|
samer@0
|
41 _e=e.array();
|
samer@0
|
42
|
samer@0
|
43 Mathx.setAll(a,1.0);
|
samer@0
|
44 setEps(0.1);
|
samer@0
|
45 }
|
samer@0
|
46
|
samer@0
|
47 public void update(Observable obs, Object arg) {
|
samer@0
|
48 setEps(last_eps);
|
samer@0
|
49 }
|
samer@0
|
50 public void setEps(double e) {
|
samer@0
|
51 last_eps=e;
|
samer@0
|
52 for (int i=0; i<N; i++) { eps[i]=Math.pow(e,2-a[i]); }
|
samer@0
|
53 }
|
samer@0
|
54 public void setInput(Vec in) { input=in; x=input.array(); }
|
samer@0
|
55 public int getSize() { return N; }
|
samer@0
|
56 public void dispose() {
|
samer@0
|
57 alpha.dispose();
|
samer@0
|
58 grad.dispose();
|
samer@0
|
59 E.dispose();
|
samer@0
|
60 }
|
samer@0
|
61
|
samer@0
|
62 public VVector getEnergyVector() { return e; }
|
samer@0
|
63 public VDouble getEnergySignal() { return E; }
|
samer@0
|
64 public double getEnergy() { return E.value; }
|
samer@0
|
65 public double [] getGradient() { return g; }
|
samer@0
|
66 public VVector getAlpha() { return alpha; }
|
samer@0
|
67
|
samer@0
|
68 public void run() { compute(); }
|
samer@0
|
69 public void infer() {}
|
samer@0
|
70 public void compute() {
|
samer@0
|
71 // compute log likelihood
|
samer@0
|
72
|
samer@0
|
73 for (int i=0; i<N; i++) {
|
samer@0
|
74 double t=Math.abs(x[i]);
|
samer@0
|
75 _e[i] = t*t/(Math.pow(t,2-a[i]) + eps[i]);
|
samer@0
|
76 }
|
samer@0
|
77
|
samer@0
|
78 // compute gradient g_i = dL/dx_i
|
samer@0
|
79 for (int i=0; i<N; i++) {
|
samer@0
|
80 if (x[i]==0) g[i]=0;
|
samer@0
|
81 else g[i] = a[i]*(_e[i]/x[i]);
|
samer@0
|
82 }
|
samer@0
|
83
|
samer@0
|
84 e.changed();
|
samer@0
|
85 grad.changed();
|
samer@0
|
86 E.set(Mathx.sum(_e));
|
samer@0
|
87 }
|
samer@0
|
88
|
samer@0
|
89 public Functionx functionx() {
|
samer@0
|
90 return new Functionx() {
|
samer@0
|
91 double [] __e=new double[N];
|
samer@0
|
92 public void dispose() {}
|
samer@0
|
93 public void evaluate(Datum P) { P.f=evaluate(P.x,P.g); }
|
samer@0
|
94 public double evaluate(double [] x, double [] g) {
|
samer@0
|
95 for (int i=0; i<N; i++) {
|
samer@0
|
96 if (x[i]==0) { g[i]=0; __e[i]=0; }
|
samer@0
|
97 else {
|
samer@0
|
98 double t=Math.abs(x[i]);
|
samer@0
|
99 __e[i] = t*t/(Math.pow(t,2-a[i]) + eps[i]);
|
samer@0
|
100 g[i] = a[i]*(__e[i]/x[i]);
|
samer@0
|
101 }
|
samer@0
|
102 }
|
samer@0
|
103 return Mathx.sum(__e); // +L0;
|
samer@0
|
104 }
|
samer@0
|
105 };
|
samer@0
|
106 }
|
samer@0
|
107 }
|