changeset 480:175e51ae78eb

Untabify, indent, tidy
author Chris Cannam <cannam@all-day-breakfast.com>
date Fri, 31 May 2019 10:53:39 +0100
parents 7e52c034cf62
children de5f557a270f
files dsp/segmentation/ClusterMeltSegmenter.cpp dsp/segmentation/ClusterMeltSegmenter.h dsp/segmentation/Segmenter.cpp dsp/segmentation/Segmenter.h dsp/segmentation/cluster_melt.c dsp/segmentation/cluster_melt.h dsp/segmentation/cluster_segmenter.c dsp/segmentation/cluster_segmenter.h dsp/segmentation/segment.h
diffstat 9 files changed, 486 insertions(+), 527 deletions(-) [+]
line wrap: on
line diff
--- a/dsp/segmentation/ClusterMeltSegmenter.cpp	Fri May 31 10:35:08 2019 +0100
+++ b/dsp/segmentation/ClusterMeltSegmenter.cpp	Fri May 31 10:53:39 2019 +0100
@@ -36,7 +36,7 @@
     fmin(params.fmin),
     fmax(params.fmax),
     nbins(params.nbins),
-    ncomponents(params.ncomponents),	// NB currently not passed - no. of PCA components is set in cluser_segmenter.c
+    ncomponents(params.ncomponents),    // NB currently not passed - no. of PCA components is set in cluser_segmenter.c
     nHMMStates(params.nHMMStates),
     nclusters(params.nclusters),
     histogramLength(params.histogramLength),
@@ -212,7 +212,7 @@
         fft->forward(frame, real, imag);
         
         constq->process(real, imag, cqre, cqim);
-	
+        
         for (int i = 0; i < ncoeff; ++i) {
             cq[i] += sqrt(cqre[i] * cqre[i] + cqim[i] * cqim[i]);
         }
@@ -287,7 +287,7 @@
         }
 
         mfcc->process(frame, ccout);
-	
+        
         for (int i = 0; i < ncoeff; ++i) {
             cc[i] += ccout[i];
         }
@@ -337,40 +337,40 @@
               << " features with " << features[0].size() << " coefficients (ncoeff = " << ncoeff << ", ncomponents = " << ncomponents << ")" << std::endl;
 */
     // copy the features to a native array and use the existing C segmenter...
-    double** arrFeatures = new double*[features.size()];	
-    for (int i = 0; i < sz; i++)
-    {
+    double** arrFeatures = new double*[features.size()];        
+    for (int i = 0; i < sz; i++) {
         if (featureType == FEATURE_TYPE_UNKNOWN) {
             arrFeatures[i] = new double[features[0].size()];
             for (int j = 0; j < int(features[0].size()); j++) {
                 arrFeatures[i][j] = features[i][j];
             }
         } else {
-            arrFeatures[i] = new double[ncoeff+1];	// allow space for the normalised envelope
+            arrFeatures[i] = new double[ncoeff+1];      // allow space for the normalised envelope
             for (int j = 0; j < ncoeff; j++) {
                 arrFeatures[i][j] = features[i][j];
             }
         }
     }
-	
+        
     q = new int[features.size()];
-	
+        
     if (featureType == FEATURE_TYPE_UNKNOWN ||
-        featureType == FEATURE_TYPE_MFCC)
+        featureType == FEATURE_TYPE_MFCC) {
         cluster_segment(q, arrFeatures, features.size(), features[0].size(), nHMMStates, histogramLength, 
                         nclusters, neighbourhoodLimit);
-    else
+    } else {
         constq_segment(q, arrFeatures, features.size(), nbins, ncoeff, featureType, 
                        nHMMStates, histogramLength, nclusters, neighbourhoodLimit);
-	
+    }
+
     // convert the cluster assignment sequence to a segmentation
-    makeSegmentation(q, features.size());		
-	
+    makeSegmentation(q, features.size());               
+        
     // de-allocate arrays
     delete [] q;
     for (int i = 0; i < int(features.size()); i++) delete [] arrFeatures[i];
     delete [] arrFeatures;
-	
+        
     // clear the features
     clear();
 }
@@ -380,15 +380,13 @@
     segmentation.segments.clear();
     segmentation.nsegtypes = nclusters;
     segmentation.samplerate = samplerate;
-	
+        
     Segment segment;
     segment.start = 0;
     segment.type = q[0];
-	
-    for (int i = 1; i < len; i++)
-    {
-        if (q[i] != q[i-1])
-        {
+        
+    for (int i = 1; i < len; i++) {
+        if (q[i] != q[i-1]) {
             segment.end = i * getHopsize();
             segmentation.segments.push_back(segment);
             segment.type = q[i];
--- a/dsp/segmentation/ClusterMeltSegmenter.h	Fri May 31 10:35:08 2019 +0100
+++ b/dsp/segmentation/ClusterMeltSegmenter.h	Fri May 31 10:53:39 2019 +0100
@@ -39,13 +39,13 @@
         fmax(16000), 
         nbins(8),
         ncomponents(20),
- 	nHMMStates(40),
+        nHMMStates(40),
         nclusters(10),
         histogramLength(15),
         neighbourhoodLimit(20) { }
     feature_types featureType;
-    double hopSize;    	// in secs
-    double windowSize;	// in secs
+    double hopSize;     // in secs
+    double windowSize;  // in secs
     int fmin;
     int fmax;
     int nbins;
@@ -65,14 +65,14 @@
     virtual int getWindowsize();
     virtual int getHopsize();
     virtual void extractFeatures(const double* samples, int nsamples);
-    void setFeatures(const vector<vector<double> >& f);		// provide the features yourself
-    virtual void segment();		// segment into default number of segment-types
-    void segment(int m);		// segment into m segment-types
+    void setFeatures(const vector<vector<double> >& f);         // provide the features yourself
+    virtual void segment();             // segment into default number of segment-types
+    void segment(int m);                // segment into m segment-types
     int getNSegmentTypes() { return nclusters; }
 
 protected:
     void makeSegmentation(int* q, int len);
-	
+        
     void extractFeaturesConstQ(const double *, int);
     void extractFeaturesMFCC(const double *, int);
 
@@ -80,13 +80,13 @@
     FFTReal *fft;
     ConstantQ* constq; 
     MFCC* mfcc;
-    model_t* model;				// the HMM
-    int* q;					// the decoded HMM state sequence
-    vector<vector<double> > histograms;	
+    model_t* model;                             // the HMM
+    int* q;                                     // the decoded HMM state sequence
+    vector<vector<double> > histograms; 
     
-    feature_types featureType;	
-    double hopSize;		// in seconds
-    double windowSize;	// in seconds
+    feature_types featureType;  
+    double hopSize;             // in seconds
+    double windowSize;  // in seconds
     
     // constant-Q parameters
     int fmin;
--- a/dsp/segmentation/Segmenter.cpp	Fri May 31 10:35:08 2019 +0100
+++ b/dsp/segmentation/Segmenter.cpp	Fri May 31 10:53:39 2019 +0100
@@ -19,11 +19,11 @@
 ostream& operator<<(ostream& os, const Segmentation& s)
 {
     os << "structure_name : begin_time end_time\n";
-	
+        
     for (int i = 0; i < int(s.segments.size()); i++) {
-	Segment seg = s.segments[i];
-	os << std::fixed << seg.type << ':' << '\t' << std::setprecision(6) << seg.start / static_cast<double>(s.samplerate) 
-	   << '\t' << std::setprecision(6) << seg.end / static_cast<double>(s.samplerate) << "\n";
+        Segment seg = s.segments[i];
+        os << std::fixed << seg.type << ':' << '\t' << std::setprecision(6) << seg.start / static_cast<double>(s.samplerate) 
+           << '\t' << std::setprecision(6) << seg.end / static_cast<double>(s.samplerate) << "\n";
     }
     
     return os;
--- a/dsp/segmentation/Segmenter.h	Fri May 31 10:35:08 2019 +0100
+++ b/dsp/segmentation/Segmenter.h	Fri May 31 10:53:39 2019 +0100
@@ -25,17 +25,17 @@
 class Segment
 {
 public:
-	int start;		// in samples
-	int end;
-	int type;
+    int start;              // in samples
+    int end;
+    int type;
 };
 
 class Segmentation
 {
 public:
-	int nsegtypes;		// number of segment types, so possible types are {0,1,...,nsegtypes-1}
-	int samplerate;
-	vector<Segment> segments;	
+    int nsegtypes;          // number of segment types, so possible types are {0,1,...,nsegtypes-1}
+    int samplerate;
+    vector<Segment> segments;       
 };
 
 ostream& operator<<(ostream& os, const Segmentation& s);
@@ -43,20 +43,20 @@
 class Segmenter
 {
 public:
-	Segmenter() {}
-	virtual ~Segmenter() {}
-	virtual void initialise(int samplerate) = 0;	// must be called before any other methods
-	virtual int getWindowsize() = 0;				// required window size for calls to extractFeatures()
-	virtual int getHopsize() = 0;					// required hop size for calls to extractFeatures()
-	virtual void extractFeatures(const double* samples, int nsamples) = 0;
-	virtual void segment() = 0;						// call once all the features have been extracted
-	virtual void segment(int m) = 0;				// specify desired number of segment-types
-	virtual void clear() { features.clear(); }
-	const Segmentation& getSegmentation() const { return segmentation; } 
+    Segmenter() {}
+    virtual ~Segmenter() {}
+    virtual void initialise(int samplerate) = 0;    // must be called before any other methods
+    virtual int getWindowsize() = 0;                                // required window size for calls to extractFeatures()
+    virtual int getHopsize() = 0;                                   // required hop size for calls to extractFeatures()
+    virtual void extractFeatures(const double* samples, int nsamples) = 0;
+    virtual void segment() = 0;                                             // call once all the features have been extracted
+    virtual void segment(int m) = 0;                                // specify desired number of segment-types
+    virtual void clear() { features.clear(); }
+    const Segmentation& getSegmentation() const { return segmentation; } 
 protected:
-	vector<vector<double> > features;
-	Segmentation segmentation;
-	int samplerate;
+    vector<vector<double> > features;
+    Segmentation segmentation;
+    int samplerate;
 };
 
 #endif
--- a/dsp/segmentation/cluster_melt.c	Fri May 31 10:35:08 2019 +0100
+++ b/dsp/segmentation/cluster_melt.c	Fri May 31 10:53:39 2019 +0100
@@ -21,205 +21,192 @@
 #define DEFAULT_LIMIT 20;
 
 double kldist(double* a, double* b, int n) {
-	/* NB assume that all a[i], b[i] are non-negative
-	because a, b represent probability distributions */
-	double q, d;
-	int i;
-	
-	d = 0;
-	for (i = 0; i < n; i++)
-	{
-		q = (a[i] + b[i]) / 2.0;
-		if (q > 0)
-		{
-			if (a[i] > 0)
-				d += a[i] * log(a[i] / q);
-			if (b[i] > 0)
-				d += b[i] * log(b[i] / q);
-		}
-	}
-	return d;		
-}	
+    /* NB assume that all a[i], b[i] are non-negative
+       because a, b represent probability distributions */
+    double q, d;
+    int i;
+        
+    d = 0;
+    for (i = 0; i < n; i++) {
+        q = (a[i] + b[i]) / 2.0;
+        if (q > 0) {
+            if (a[i] > 0) {
+                d += a[i] * log(a[i] / q);
+            }
+            if (b[i] > 0) {
+                d += b[i] * log(b[i] / q);
+            }
+        }
+    }
+    return d;               
+}       
 
 void cluster_melt(double *h, int m, int n, double *Bsched, int t, int k, int l, int *c) {
-	double lambda, sum, beta, logsumexp, maxlp;
-	int i, j, a, b, b0, b1, limit, /* B, */ it, maxiter, maxiter0, maxiter1;
-	double** cl;	/* reference histograms for each cluster */
-	int** nc;	/* neighbour counts for each histogram */
-	double** lp;	/* soft assignment probs for each histogram */
-	int* oldc;	/* previous hard assignments (to check convergence) */
-	
-	/* NB h is passed as a 1d row major array */
-	
-	/* parameter values */
-	lambda = DEFAULT_LAMBDA;
-	if (l > 0)
-		limit = l;
-	else
-		limit = DEFAULT_LIMIT;		/* use default if no valid neighbourhood limit supplied */
-//	B = 2 * limit + 1;
-	maxiter0 = 20;	/* number of iterations at initial temperature */
-	maxiter1 = 5;	/* number of iterations at subsequent temperatures */
-	
-	/* allocate memory */	
-	cl = (double**) malloc(k*sizeof(double*));
-	for (i= 0; i < k; i++)
-		cl[i] = (double*) malloc(m*sizeof(double));
-	
-	nc = (int**) malloc(n*sizeof(int*));
-	for (i= 0; i < n; i++)
-		nc[i] = (int*) malloc(k*sizeof(int));
-	
-	lp = (double**) malloc(n*sizeof(double*));
-	for (i= 0; i < n; i++)
-		lp[i] = (double*) malloc(k*sizeof(double));
-	
-	oldc = (int*) malloc(n * sizeof(int));
-	
-	/* initialise */
-	for (i = 0; i < k; i++)
-	{
-		sum = 0;
-		for (j = 0; j < m; j++)
-		{
-			cl[i][j] = rand();	/* random initial reference histograms */
-			sum += cl[i][j] * cl[i][j];
-		}
-		sum = sqrt(sum);
-		for (j = 0; j < m; j++)
-		{
-			cl[i][j] /= sum;	/* normalise */
-		}
-	}	
-	//print_array(cl, k, m);
-	
-	for (i = 0; i < n; i++)
-		c[i] = 1;	/* initially assign all histograms to cluster 1 */
-	
-	for (a = 0; a < t; a++)
-	{
-		beta = Bsched[a];
-		
-		if (a == 0)
-			maxiter = maxiter0;
-		else
-			maxiter = maxiter1;
-		
-		for (it = 0; it < maxiter; it++)
-		{
-			//if (it == maxiter - 1)
-			//	mexPrintf("hasn't converged after %d iterations\n", maxiter);
-			
-			for (i = 0; i < n; i++)
-			{
-				/* save current hard assignments */
-				oldc[i] = c[i];
-				
-				/* calculate soft assignment logprobs for each cluster */
-				sum = 0;
-				for (j = 0; j < k; j++)
-				{
-					lp[i][ j] = -beta * kldist(cl[j], &h[i*m], m);
-					
-					/* update matching neighbour counts for this histogram, based on current hard assignments */
-					/* old version:
-					nc[i][j] = 0;	
-					if (i >= limit && i <= n - 1 - limit)
-					{
-							for (b = i - limit; b <= i + limit; b++)
-							{
-								if (c[b] == j+1)
-									nc[i][j]++;
-							}
-							nc[i][j] = B - nc[i][j];
-					}
-					*/
-					b0 = i - limit;
-					if (b0 < 0)
-						b0 = 0;
-					b1 = i + limit;
-					if (b1 >= n)
-						b1 = n - 1;
-					nc[i][j] = b1 - b0 + 1;		/* = B except at edges */
-					for (b = b0; b <= b1; b++)
-						if (c[b] == j+1)
-							nc[i][j]--;
-					
-					sum += exp(lp[i][j]);
-				}
-				
-				/* normalise responsibilities and add duration logprior */
-				logsumexp = log(sum);
-				for (j = 0; j < k; j++)
-					lp[i][j] -= logsumexp + lambda * nc[i][j];				
-			}
-			//print_array(lp, n, k);
-			/*
-			for (i = 0; i < n; i++)
-			{
-				 for (j = 0; j < k; j++)
-					 mexPrintf("%d ", nc[i][j]);
-				 mexPrintf("\n");
-			} 
-			*/
-			
-			
-			/* update the assignments now that we know the duration priors
-			based on the current assignments */
-			for (i = 0; i < n; i++)
-			{
-				maxlp = lp[i][0];
-				c[i] = 1;
-				for (j = 1; j < k; j++)
-					if (lp[i][j] > maxlp)
-					{
-						maxlp = lp[i][j];
-						c[i] = j+1;
-					}
-			}
-				
-			/* break if assignments haven't changed */
-			i = 0;
-			while (i < n && oldc[i] == c[i])
-				i++;
-			if (i == n)
-				break;
-			
-			/* update reference histograms now we know new responsibilities */
-			for (j = 0; j < k; j++)
-			{
-				for (b = 0; b < m; b++)
-				{
-					cl[j][b] = 0;
-					for (i = 0; i < n; i++)
-					{
-						cl[j][b] += exp(lp[i][j]) * h[i*m+b];
-					}	
-				}
-				
-				sum = 0;				
-				for (i = 0; i < n; i++)
-					sum += exp(lp[i][j]);
-				for (b = 0; b < m; b++)
-					cl[j][b] /= sum;	/* normalise */
-			}	
-			
-			//print_array(cl, k, m);
-			//mexPrintf("\n\n");
-		}
-	}
-		
-	/* free memory */
-	for (i = 0; i < k; i++)
-		free(cl[i]);
-	free(cl);
-	for (i = 0; i < n; i++)
-		free(nc[i]);
-	free(nc);
-	for (i = 0; i < n; i++)
-		free(lp[i]);
-	free(lp);
-	free(oldc);	
+    double lambda, sum, beta, logsumexp, maxlp;
+    int i, j, a, b, b0, b1, limit, /* B, */ it, maxiter, maxiter0, maxiter1;
+    double** cl;    /* reference histograms for each cluster */
+    int** nc;       /* neighbour counts for each histogram */
+    double** lp;    /* soft assignment probs for each histogram */
+    int* oldc;      /* previous hard assignments (to check convergence) */
+        
+    /* NB h is passed as a 1d row major array */
+        
+    /* parameter values */
+    lambda = DEFAULT_LAMBDA;
+    if (l > 0) {
+        limit = l;
+    } else {
+        limit = DEFAULT_LIMIT;          /* use default if no valid neighbourhood limit supplied */
+    }
+
+    maxiter0 = 20;  /* number of iterations at initial temperature */
+    maxiter1 = 5;   /* number of iterations at subsequent temperatures */
+        
+    /* allocate memory */   
+    cl = (double**) malloc(k*sizeof(double*));
+    for (i= 0; i < k; i++) {
+        cl[i] = (double*) malloc(m*sizeof(double));
+    }
+        
+    nc = (int**) malloc(n*sizeof(int*));
+    for (i= 0; i < n; i++) {
+        nc[i] = (int*) malloc(k*sizeof(int));
+    }
+        
+    lp = (double**) malloc(n*sizeof(double*));
+    for (i= 0; i < n; i++) {
+        lp[i] = (double*) malloc(k*sizeof(double));
+    }
+        
+    oldc = (int*) malloc(n * sizeof(int));
+        
+    /* initialise */
+    for (i = 0; i < k; i++) {
+        sum = 0;
+        for (j = 0; j < m; j++) {
+            cl[i][j] = rand();      /* random initial reference histograms */
+            sum += cl[i][j] * cl[i][j];
+        }
+        sum = sqrt(sum);
+        for (j = 0; j < m; j++) {
+            cl[i][j] /= sum;        /* normalise */
+        }
+    }       
+        
+    for (i = 0; i < n; i++) {
+        c[i] = 1;       /* initially assign all histograms to cluster 1 */
+    }
+        
+    for (a = 0; a < t; a++) {
+        
+        beta = Bsched[a];
+                
+        if (a == 0) {
+            maxiter = maxiter0;
+        } else {
+            maxiter = maxiter1;
+        }
+                
+        for (it = 0; it < maxiter; it++) {
+
+            //if (it == maxiter - 1)
+            //      mexPrintf("hasn't converged after %d iterations\n", maxiter);
+                        
+            for (i = 0; i < n; i++) {
+
+                /* save current hard assignments */
+                oldc[i] = c[i];
+                                
+                /* calculate soft assignment logprobs for each cluster */
+                sum = 0;
+
+                for (j = 0; j < k; j++) {
+
+                    lp[i][ j] = -beta * kldist(cl[j], &h[i*m], m);
+                                        
+                    /* update matching neighbour counts for this histogram, based on current hard assignments */
+
+                    b0 = i - limit;
+                    if (b0 < 0) {
+                        b0 = 0;
+                    }
+                    b1 = i + limit;
+                    if (b1 >= n) {
+                        b1 = n - 1;
+                    }
+                    nc[i][j] = b1 - b0 + 1;         /* = B except at edges */
+                    for (b = b0; b <= b1; b++) {
+                        if (c[b] == j+1) {
+                            nc[i][j]--;
+                        }
+                    }
+                                        
+                    sum += exp(lp[i][j]);
+                }
+                                
+                /* normalise responsibilities and add duration logprior */
+                logsumexp = log(sum);
+                for (j = 0; j < k; j++) {a
+                    lp[i][j] -= logsumexp + lambda * nc[i][j];
+                }
+            }
+                        
+            /* update the assignments now that we know the duration priors
+               based on the current assignments */
+            for (i = 0; i < n; i++) {
+                maxlp = lp[i][0];
+                c[i] = 1;
+                for (j = 1; j < k; j++) {
+                    if (lp[i][j] > maxlp) {
+                        maxlp = lp[i][j];
+                        c[i] = j+1;
+                    }
+                }
+            }
+                                
+            /* break if assignments haven't changed */
+            i = 0;
+            while (i < n && oldc[i] == c[i]) {
+                i++;
+            }
+            if (i == n) {
+                break;
+            }
+                        
+            /* update reference histograms now we know new responsibilities */
+            for (j = 0; j < k; j++) {
+                for (b = 0; b < m; b++) {
+                    cl[j][b] = 0;
+                    for (i = 0; i < n; i++) {
+                        cl[j][b] += exp(lp[i][j]) * h[i*m+b];
+                    }       
+                }
+                                
+                sum = 0;                                
+                for (i = 0; i < n; i++) {
+                    sum += exp(lp[i][j]);
+                }
+                for (b = 0; b < m; b++) {
+                    cl[j][b] /= sum;        /* normalise */
+                }
+            }       
+        }
+    }
+                
+    /* free memory */
+    for (i = 0; i < k; i++) {
+        free(cl[i]);
+    }
+    free(cl);
+    for (i = 0; i < n; i++) {
+        free(nc[i]);
+    }
+    free(nc);
+    for (i = 0; i < n; i++) {
+        free(lp[i]);
+    }
+    free(lp);
+    free(oldc);     
 }
 
 
--- a/dsp/segmentation/cluster_melt.h	Fri May 31 10:35:08 2019 +0100
+++ b/dsp/segmentation/cluster_melt.h	Fri May 31 10:53:39 2019 +0100
@@ -1,5 +1,3 @@
-#ifndef _CLUSTER_MELT_H
-#define _CLUSTER_MELT_H
 /*
  *  cluster_melt.h
  *  cluster_melt
@@ -15,6 +13,9 @@
  *
  */
 
+#ifndef _CLUSTER_MELT_H
+#define _CLUSTER_MELT_H
+
 #include <stdlib.h>
 #include <math.h>
 
@@ -22,15 +23,15 @@
 extern "C" {
 #endif
 
-void cluster_melt(double *h,		/* normalised histograms, as a vector in row major order */
-				  int m,			/* number of dimensions (i.e. histogram bins) */
-				  int n,			/* number of histograms */
-				  double *Bsched,	/* inverse temperature schedule */
-				  int t,			/* length of schedule */
-				  int k,			/* number of clusters */
-				  int l,			/* neighbourhood limit (supply zero to use default value) */
-				  int *c			/* sequence of cluster assignments */
-);
+void cluster_melt(double *h,            /* normalised histograms, as a vector in row major order */
+                  int m,                        /* number of dimensions (i.e. histogram bins) */
+                  int n,                        /* number of histograms */
+                  double *Bsched,       /* inverse temperature schedule */
+                  int t,                        /* length of schedule */
+                  int k,                        /* number of clusters */
+                  int l,                        /* neighbourhood limit (supply zero to use default value) */
+                  int *c                        /* sequence of cluster assignments */
+    );
 
 #ifdef __cplusplus
 }
--- a/dsp/segmentation/cluster_segmenter.c	Fri May 31 10:35:08 2019 +0100
+++ b/dsp/segmentation/cluster_segmenter.c	Fri May 31 10:53:39 2019 +0100
@@ -5,11 +5,11 @@
  *  Created by Mark Levy on 06/04/2006.
  *  Copyright 2006 Centre for Digital Music, Queen Mary, University of London.
 
-    This program is free software; you can redistribute it and/or
-    modify it under the terms of the GNU General Public License as
-    published by the Free Software Foundation; either version 2 of the
-    License, or (at your option) any later version.  See the file
-    COPYING included with this distribution for more information.
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.  See the file
+ COPYING included with this distribution for more information.
  *
  */
 
@@ -21,69 +21,56 @@
 /* converts constant-Q features to normalised chroma */
 void cq2chroma(double** cq, int nframes, int ncoeff, int bins, double** chroma)
 {
-	int noct = ncoeff / bins;	/* number of complete octaves in constant-Q */
-	int t, b, oct, ix;
-	//double maxchroma;	/* max chroma value at each time, for normalisation */
-	//double sum;		/* for normalisation */
-	
-	for (t = 0; t < nframes; t++)
-	{
-		for (b = 0; b < bins; b++)
-			chroma[t][b] = 0;
-		for (oct = 0; oct < noct; oct++)
-		{
-			ix = oct * bins;
-			for (b = 0; b < bins; b++)
-				chroma[t][b] += fabs(cq[t][ix+b]);
-		}
-		/* normalise to unit sum
-		sum = 0;
-		for (b = 0; b < bins; b++)
-			sum += chroma[t][b];
-		for (b = 0; b < bins; b++)
-			chroma[t][b] /= sum;
-		*/
-		/* normalise to unit max - NO this made results much worse!
-		maxchroma = 0;
-		for (b = 0; b < bins; b++)
-			if (chroma[t][b] > maxchroma)
-				maxchroma = chroma[t][b];
-		if (maxchroma > 0)
-			for (b = 0; b < bins; b++)
-				chroma[t][b] /= maxchroma;	
-		*/
-	}
+    int noct = ncoeff / bins;       /* number of complete octaves in constant-Q */
+    int t, b, oct, ix;
+        
+    for (t = 0; t < nframes; t++) {
+        for (b = 0; b < bins; b++) {
+            chroma[t][b] = 0;
+        }
+        for (oct = 0; oct < noct; oct++) {
+            ix = oct * bins;
+            for (b = 0; b < bins; b++) {
+                chroma[t][b] += fabs(cq[t][ix+b]);
+            }
+        }
+    }
 }
 
 /* applies MPEG-7 normalisation to constant-Q features, storing normalised envelope (norm) in last feature dimension */
 void mpeg7_constq(double** features, int nframes, int ncoeff)
 {
-	int i, j;
-	double ss;
-	double env;
-	double maxenv = 0;
-	
-	/* convert const-Q features to dB scale */
-	for (i = 0; i < nframes; i++)
-		for (j = 0; j < ncoeff; j++)
-			features[i][j] = 10.0 * log10(features[i][j]+DBL_EPSILON);
-	
-	/* normalise each feature vector and add the norm as an extra feature dimension */	
-	for (i = 0; i < nframes; i++)
-	{
-		ss = 0;
-		for (j = 0; j < ncoeff; j++)
-			ss += features[i][j] * features[i][j];
-		env = sqrt(ss);
-		for (j = 0; j < ncoeff; j++)
-			features[i][j] /= env;
-		features[i][ncoeff] = env;
-		if (env > maxenv)
-			maxenv = env;
-	} 
-	/* normalise the envelopes */
-	for (i = 0; i < nframes; i++)
-		features[i][ncoeff] /= maxenv;	
+    int i, j;
+    double ss;
+    double env;
+    double maxenv = 0;
+        
+    /* convert const-Q features to dB scale */
+    for (i = 0; i < nframes; i++) {
+        for (j = 0; j < ncoeff; j++) {
+            features[i][j] = 10.0 * log10(features[i][j]+DBL_EPSILON);
+        }
+    }
+        
+    /* normalise each feature vector and add the norm as an extra feature dimension */      
+    for (i = 0; i < nframes; i++) {
+        ss = 0;
+        for (j = 0; j < ncoeff; j++) {
+            ss += features[i][j] * features[i][j];
+        }
+        env = sqrt(ss);
+        for (j = 0; j < ncoeff; j++) {
+            features[i][j] /= env;
+        }
+        features[i][ncoeff] = env;
+        if (env > maxenv) {
+            maxenv = env;
+        }
+    } 
+    /* normalise the envelopes */
+    for (i = 0; i < nframes; i++) {
+        features[i][ncoeff] /= maxenv;
+    }
 }
 
 /* return histograms h[nx*m] of data x[nx] into m bins using a sliding window of length h_len (MUST BE ODD) */
@@ -91,194 +78,175 @@
 /* for historical reasons we normalise the histograms by their norm (not to sum to one) */
 void create_histograms(int* x, int nx, int m, int hlen, double* h)
 {
-	int i, j, t;
-	double norm;
+    int i, j, t;
+    double norm;
 
-	for (i = 0; i < nx*m; i++) 
-	        h[i] = 0;
+    for (i = 0; i < nx*m; i++) {
+        h[i] = 0;
+    }
 
-	for (i = hlen/2; i < nx-hlen/2; i++)
-	{
-		for (j = 0; j < m; j++)
-			h[i*m+j] = 0;
-		for (t = i-hlen/2; t <= i+hlen/2; t++)
-			++h[i*m+x[t]];
-		norm = 0;
-		for (j = 0; j < m; j++)
-			norm += h[i*m+j] * h[i*m+j];
-		for (j = 0; j < m; j++)
-			h[i*m+j] /= norm;
-	}
-	
-	/* duplicate histograms at beginning and end to create one histogram for each data value supplied */
-	for (i = 0; i < hlen/2; i++)
-		for (j = 0; j < m; j++)
-			h[i*m+j] = h[hlen/2*m+j];
-	for (i = nx-hlen/2; i < nx; i++)
-		for (j = 0; j < m; j++)
-			h[i*m+j] = h[(nx-hlen/2-1)*m+j];
+    for (i = hlen/2; i < nx-hlen/2; i++) {
+        for (j = 0; j < m; j++) {
+            h[i*m+j] = 0;
+        }
+        for (t = i-hlen/2; t <= i+hlen/2; t++) {
+            ++h[i*m+x[t]];
+        }
+        norm = 0;
+        for (j = 0; j < m; j++) {
+            norm += h[i*m+j] * h[i*m+j];
+        }
+        for (j = 0; j < m; j++) {
+            h[i*m+j] /= norm;
+        }
+    }
+        
+    /* duplicate histograms at beginning and end to create one histogram for each data value supplied */
+    for (i = 0; i < hlen/2; i++) {
+        for (j = 0; j < m; j++) {
+            h[i*m+j] = h[hlen/2*m+j];
+        }
+    }
+    for (i = nx-hlen/2; i < nx; i++) {
+        for (j = 0; j < m; j++) {
+            h[i*m+j] = h[(nx-hlen/2-1)*m+j];
+        }
+    }
 }
 
 /* segment using HMM and then histogram clustering */
 void cluster_segment(int* q, double** features, int frames_read, int feature_length, int nHMM_states, 
-					 int histogram_length, int nclusters, int neighbour_limit)
+                     int histogram_length, int nclusters, int neighbour_limit)
 {
-	int i, j;
-	
-	/*****************************/
-	if (0) {
-	/* try just using the predominant bin number as a 'decoded state' */
-	nHMM_states = feature_length + 1;	/* allow a 'zero' state */
-	double chroma_thresh = 0.05;
-	double maxval;
-	int maxbin;
-	for (i = 0; i < frames_read; i++)
-	{
-		maxval = 0;
-		for (j = 0; j < feature_length; j++)
-		{
-			if (features[i][j] > maxval) 
-			{
-				maxval = features[i][j];
-				maxbin = j;
-			}				
-		}
-		if (maxval > chroma_thresh)
-			q[i] = maxbin;
-		else
-			q[i] = feature_length;
-	}
-	
-	}
-	if (1) {
-	/*****************************/
-		
-	
-	/* scale all the features to 'balance covariances' during HMM training */
-	double scale = 10;
-	for (i = 0; i < frames_read; i++)
-		for (j = 0; j < feature_length; j++)
-			features[i][j] *= scale;
-	
-	/* train an HMM on the features */
-	
-	/* create a model */
-	model_t* model = hmm_init(features, frames_read, feature_length, nHMM_states);
-	
-	/* train the model */
-	hmm_train(features, frames_read, model);
-/*	
-	printf("\n\nafter training:\n");
-	hmm_print(model);
-*/	
-	/* decode the hidden state sequence */
-	viterbi_decode(features, frames_read, model, q);  
-	hmm_close(model);
-	
-	/*****************************/
-	}
-	/*****************************/
-	
+    int i, j;
+        
+    /*****************************/
+    if (0) {
+        /* try just using the predominant bin number as a 'decoded state' */
+        nHMM_states = feature_length + 1;       /* allow a 'zero' state */
+        double chroma_thresh = 0.05;
+        double maxval;
+        int maxbin;
+        for (i = 0; i < frames_read; i++) {
+            maxval = 0;
+            for (j = 0; j < feature_length; j++) {
+                if (features[i][j] > maxval) {
+                    maxval = features[i][j];
+                    maxbin = j;
+                }                               
+            }
+            if (maxval > chroma_thresh) {
+                q[i] = maxbin;
+            } else {
+                q[i] = feature_length;
+            }
+        }
+        
+    }
+    if (1) {
+        /*****************************/
+        
+        /* scale all the features to 'balance covariances' during HMM training */
+        double scale = 10;
+        for (i = 0; i < frames_read; i++)
+            for (j = 0; j < feature_length; j++)
+                features[i][j] *= scale;
+        
+        /* train an HMM on the features */
+        
+        /* create a model */
+        model_t* model = hmm_init(features, frames_read, feature_length, nHMM_states);
+        
+        /* train the model */
+        hmm_train(features, frames_read, model);
+/*      
+        printf("\n\nafter training:\n");
+        hmm_print(model);
+*/      
+        /* decode the hidden state sequence */
+        viterbi_decode(features, frames_read, model, q);  
+        hmm_close(model);
+        
+        /*****************************/
+    }
+    /*****************************/
     
 /*
-	fprintf(stderr, "HMM state sequence:\n");
-	for (i = 0; i < frames_read; i++)
-		fprintf(stderr, "%d ", q[i]);
-	fprintf(stderr, "\n\n");
+  fprintf(stderr, "HMM state sequence:\n");
+  for (i = 0; i < frames_read; i++)
+  fprintf(stderr, "%d ", q[i]);
+  fprintf(stderr, "\n\n");
 */
-	
-	/* create histograms of states */
-	double* h = (double*) malloc(frames_read*nHMM_states*sizeof(double));	/* vector in row major order */
-	create_histograms(q, frames_read, nHMM_states, histogram_length, h);
-	
-	/* cluster the histograms */
-	int nbsched = 20;	/* length of inverse temperature schedule */
-	double* bsched = (double*) malloc(nbsched*sizeof(double));	/* inverse temperature schedule */
-	double b0 = 100;
-	double alpha = 0.7;
-	bsched[0] = b0;
-	for (i = 1; i < nbsched; i++)
-		bsched[i] = alpha * bsched[i-1];
-	cluster_melt(h, nHMM_states, frames_read, bsched, nbsched, nclusters, neighbour_limit, q);
-	
-	/* now q holds a sequence of cluster assignments */
-	
-	free(h);  
-	free(bsched);
+        
+    /* create histograms of states */
+    double* h = (double*) malloc(frames_read*nHMM_states*sizeof(double));   /* vector in row major order */
+    create_histograms(q, frames_read, nHMM_states, histogram_length, h);
+        
+    /* cluster the histograms */
+    int nbsched = 20;       /* length of inverse temperature schedule */
+    double* bsched = (double*) malloc(nbsched*sizeof(double));      /* inverse temperature schedule */
+    double b0 = 100;
+    double alpha = 0.7;
+    bsched[0] = b0;
+    for (i = 1; i < nbsched; i++) {
+        bsched[i] = alpha * bsched[i-1];
+    }
+    cluster_melt(h, nHMM_states, frames_read, bsched, nbsched, nclusters, neighbour_limit, q);
+        
+    /* now q holds a sequence of cluster assignments */
+        
+    free(h);  
+    free(bsched);
 }
 
 /* segment constant-Q or chroma features */
 void constq_segment(int* q, double** features, int frames_read, int bins, int ncoeff, int feature_type, 
-			 int nHMM_states, int histogram_length, int nclusters, int neighbour_limit)
+                    int nHMM_states, int histogram_length, int nclusters, int neighbour_limit)
 {
-	int feature_length;
-	double** chroma;
-	int i;
-	
-	if (feature_type == FEATURE_TYPE_CONSTQ)
-	{
-/*		fprintf(stderr, "Converting to dB and normalising...\n");
- */		
-		mpeg7_constq(features, frames_read, ncoeff);
-/*		
-		fprintf(stderr, "Running PCA...\n");
-*/		
-		/* do PCA on the features (but not the envelope) */
-		int ncomponents = 20;
-		pca_project(features, frames_read, ncoeff, ncomponents);
-		
-		/* copy the envelope so that it immediatly follows the chosen components */
-		for (i = 0; i < frames_read; i++)
-			features[i][ncomponents] = features[i][ncoeff];	
-		
-		feature_length = ncomponents + 1;
-		
-		/**************************************
-		//TEST
-		// feature file name
-		char* dir = "/Users/mark/documents/semma/audio/";
-		char* file_name = (char*) malloc((strlen(dir) + strlen(trackname) + strlen("_features_c20r8h0.2f0.6.mat") + 1)*sizeof(char));
-		strcpy(file_name, dir);
-		strcat(file_name, trackname);
-		strcat(file_name, "_features_c20r8h0.2f0.6.mat");
-		
-		// get the features from Matlab from mat-file
-		int frames_in_file;
-		readmatarray_size(file_name, 2, &frames_in_file, &feature_length);
-		readmatarray(file_name, 2, frames_in_file, feature_length, features);
-		// copy final frame to ensure that we get as many as we expected
-		int missing_frames = frames_read - frames_in_file;
-		while (missing_frames > 0)
-		{
-			for (i = 0; i < feature_length; i++)
-				features[frames_read-missing_frames][i] = features[frames_read-missing_frames-1][i];
-			--missing_frames;
-		}
-		
-		free(file_name);
-		******************************************/
-	
-		cluster_segment(q, features, frames_read, feature_length, nHMM_states, histogram_length, nclusters, neighbour_limit);
-	}
-	
-	if (feature_type == FEATURE_TYPE_CHROMA)
-	{
-/*
-		fprintf(stderr, "Converting to chroma features...\n");
-*/		
-		/* convert constant-Q to normalised chroma features */
-		chroma = (double**) malloc(frames_read*sizeof(double*));
-		for (i = 0; i < frames_read; i++)
-			chroma[i] = (double*) malloc(bins*sizeof(double));
-		cq2chroma(features, frames_read, ncoeff, bins, chroma);
-		feature_length = bins;
-		
-		cluster_segment(q, chroma, frames_read, feature_length, nHMM_states, histogram_length, nclusters, neighbour_limit);
-	
-		for (i = 0; i < frames_read; i++)
-			free(chroma[i]);
-		free(chroma);
-	}
+    int feature_length;
+    double** chroma;
+    int i;
+        
+    if (feature_type == FEATURE_TYPE_CONSTQ) {
+
+        mpeg7_constq(features, frames_read, ncoeff);
+
+        /* do PCA on the features (but not the envelope) */
+        int ncomponents = 20;
+        pca_project(features, frames_read, ncoeff, ncomponents);
+                
+        /* copy the envelope so that it immediatly follows the chosen components */
+        for (i = 0; i < frames_read; i++) {
+            features[i][ncomponents] = features[i][ncoeff];
+        }
+                
+        feature_length = ncomponents + 1;
+        
+        cluster_segment(q, features, frames_read, feature_length,
+                        nHMM_states, histogram_length, nclusters,
+                        neighbour_limit);
+    }
+        
+    if (feature_type == FEATURE_TYPE_CHROMA) {
+
+        /* convert constant-Q to normalised chroma features */
+        chroma = (double**) malloc(frames_read*sizeof(double*));
+        for (i = 0; i < frames_read; i++) {
+            chroma[i] = (double*) malloc(bins*sizeof(double));
+        }
+        
+        cq2chroma(features, frames_read, ncoeff, bins, chroma);
+        
+        feature_length = bins;
+                
+        cluster_segment(q, chroma, frames_read, feature_length,
+                        nHMM_states, histogram_length, nclusters,
+                        neighbour_limit);
+        
+        for (i = 0; i < frames_read; i++)
+            free(chroma[i]);
+        free(chroma);
+    }
 }
 
 
--- a/dsp/segmentation/cluster_segmenter.h	Fri May 31 10:35:08 2019 +0100
+++ b/dsp/segmentation/cluster_segmenter.h	Fri May 31 10:53:39 2019 +0100
@@ -1,6 +1,3 @@
-#ifndef _CLUSTER_SEGMENTER_H
-#define _CLUSTER_SEGMENTER_H
-
 /*
  *  cluster_segmenter.h
  *  soundbite
@@ -16,6 +13,9 @@
  *
  */
 
+#ifndef _CLUSTER_SEGMENTER_H
+#define _CLUSTER_SEGMENTER_H
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
@@ -30,7 +30,8 @@
 extern "C" {
 #endif
 
-/* applies MPEG-7 normalisation to constant-Q features, storing normalised envelope (norm) in last feature dimension */
+/* applies MPEG-7 normalisation to constant-Q features,
+   storing normalised envelope (norm) in last feature dimension */
 void mpeg7_constq(double** features, int nframes, int ncoeff);
 
 /* converts constant-Q features to normalised chroma */
@@ -38,11 +39,15 @@
 
 void create_histograms(int* x, int nx, int m, int hlen, double* h);
 
-void cluster_segment(int* q, double** features, int frames_read, int feature_length, int nHMM_states, 
-					 int histogram_length, int nclusters, int neighbour_limit);
+void cluster_segment(int* q, double** features, int frames_read,
+                     int feature_length, int nHMM_states, 
+                     int histogram_length, int nclusters,
+                     int neighbour_limit);
 
-void constq_segment(int* q, double** features, int frames_read, int bins, int ncoeff, int feature_type, 
-			 int nHMM_states, int histogram_length, int nclusters, int neighbour_limit);
+void constq_segment(int* q, double** features, int frames_read,
+                    int bins, int ncoeff, int feature_type, 
+                    int nHMM_states, int histogram_length,
+                    int nclusters, int neighbour_limit);
 
 #ifdef __cplusplus
 }
--- a/dsp/segmentation/segment.h	Fri May 31 10:35:08 2019 +0100
+++ b/dsp/segmentation/segment.h	Fri May 31 10:53:39 2019 +0100
@@ -1,10 +1,3 @@
-#ifndef _SEGMENT_H
-#define _SEGMENT_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /*
  *  segment.h
  *
@@ -19,27 +12,34 @@
  *
  */
 
+#ifndef _SEGMENT_H
+#define _SEGMENT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 typedef struct segment_t
 {
-	long start;			/* in samples */
-	long end;
-	int type;
+    long start;                     /* in samples */
+    long end;
+    int type;
 } segment_t;
 
 typedef struct segmentation_t
 {
-	int nsegs;			/* number of segments */
-	int nsegtypes;		/* number of segment types, so possible types are {0,1,...,nsegtypes-1} */
-	int samplerate;
-	segment_t* segments;
+    int nsegs; /* number of segments */
+    int nsegtypes; /* number of segment types, so possible types are {0,1,...,nsegtypes-1} */
+    int samplerate;
+    segment_t* segments;
 } segmentation_t;
 
 typedef enum 
 { 
-	FEATURE_TYPE_UNKNOWN = 0, 
-	FEATURE_TYPE_CONSTQ = 1, 
-	FEATURE_TYPE_CHROMA = 2,
-	FEATURE_TYPE_MFCC = 3
+    FEATURE_TYPE_UNKNOWN = 0, 
+    FEATURE_TYPE_CONSTQ = 1, 
+    FEATURE_TYPE_CHROMA = 2,
+    FEATURE_TYPE_MFCC = 3
 } feature_types;
 
 #ifdef __cplusplus