diff src/samer/models/#Model.java# @ 0:bf79fb79ee13

Initial Mercurial check in.
author samer
date Tue, 17 Jan 2012 17:50:20 +0000
parents
children
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/samer/models/#Model.java#	Tue Jan 17 17:50:20 2012 +0000
@@ -0,0 +1,81 @@
+/*
+ *	Copyright (c) 2002, Samer Abdallah, King's College London.
+ *	All rights reserved.
+ *
+ *	This software is provided AS iS and WITHOUT ANY WARRANTY;
+ *	without even the implied warranty of MERCHANTABILITY or
+ *	FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+package samer.models;
+
+import samer.core.*;
+import samer.maths.*;
+import samer.maths.opt.*;
+import samer.tools.*;
+
+public interface Model
+{
+	/** return size of vector this model expects */
+	int getSize();
+	
+	/** model should begin observing this vector */
+	void setInput(Vec x);
+
+	/** should infer values latent variables */
+	void infer();
+
+	/**
+		 contract is that getEnergy and getGradient
+		must return correct values for current x after
+		infer and compute has been called, but not necessarily
+		before.
+
+:q
+
+		This is to give model an opportunity to
+		cache values of energy and gradient to avoid repeated
+		computations.
+	*/
+	void compute();
+
+	/** return E = -log p(x) */
+	double getEnergy();
+
+	/** return dE/dx */
+	double [] getGradient();
+
+	public void dispose();
+
+	/** This presents a more functional interface to the model
+	so that it can be driven by an optimiser. See classes
+	Functionx and MinimiserBase in package samer.maths.opt. */
+
+	public Functionx functionx();
+
+	/** This represents a training algorithm for a Model
+		Trainer is responsible for counting calls to accumulate()
+		between flushes
+	*/
+
+	public interface Trainer {
+		/** collect statistics for parameter update */
+		public void accumulate();
+
+		/** weighted accumulate */
+		public void accumulate(double w);
+
+		/** use collected stats to update parameters and reset */
+		public void flush();
+
+		/** Must be equivalent to reset(); accumulate(); flush();
+			but can be optimised for non-batched training */
+		public void oneshot();
+
+		/** reset accumulators without updating parameters */
+		public void reset();
+
+		public void dispose();
+	}
+}
+