cannam@16: /* cannam@16: * hmm.c cannam@16: * soundbite cannam@16: * cannam@16: * Created by Mark Levy on 12/02/2006. cannam@16: * Copyright 2006 Centre for Digital Music, Queen Mary, University of London. All rights reserved. cannam@16: * cannam@16: */ cannam@16: cannam@16: #include cannam@16: #include cannam@16: #include cannam@16: #include cannam@16: #include /* to seed random number generator */ cannam@16: #include "clapack.h" /* LAPACK for matrix inversion */ cannam@16: #ifdef _MAC_OS_X cannam@16: #include cannam@16: #else cannam@16: #include /* BLAS for matrix multiplication */ cannam@16: #endif cannam@16: cannam@16: #include "hmm.h" cannam@16: cannam@16: model_t* hmm_init(double** x, int T, int L, int N) cannam@16: { cannam@16: int i, j, d, e, t; cannam@16: double s, ss; cannam@16: cannam@16: model_t* model; cannam@16: model = (model_t*) malloc(sizeof(model_t)); cannam@16: model->N = N; cannam@16: model->L = L; cannam@16: model->p0 = (double*) malloc(N*sizeof(double)); cannam@16: model->a = (double**) malloc(N*sizeof(double*)); cannam@16: model->mu = (double**) malloc(N*sizeof(double*)); cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: model->a[i] = (double*) malloc(N*sizeof(double)); cannam@16: model->mu[i] = (double*) malloc(L*sizeof(double)); cannam@16: } cannam@16: model->cov = (double**) malloc(L*sizeof(double*)); cannam@16: for (i = 0; i < L; i++) cannam@16: model->cov[i] = (double*) malloc(L*sizeof(double)); cannam@16: cannam@16: srand(time(0)); cannam@16: double* global_mean = (double*) malloc(L*sizeof(double)); cannam@16: cannam@16: /* find global mean */ cannam@16: for (d = 0; d < L; d++) cannam@16: { cannam@16: global_mean[d] = 0; cannam@16: for (t = 0; t < T; t++) cannam@16: global_mean[d] += x[t][d]; cannam@16: global_mean[d] /= T; cannam@16: } cannam@16: cannam@16: /* calculate global diagonal covariance */ cannam@16: for (d = 0; d < L; d++) cannam@16: { cannam@16: for (e = 0; e < L; e++) cannam@16: model->cov[d][e] = 0; cannam@16: for (t = 0; t < T; t++) cannam@16: model->cov[d][d] += (x[t][d] - global_mean[d]) * (x[t][d] - global_mean[d]); cannam@16: model->cov[d][d] /= T-1; cannam@16: } cannam@16: cannam@16: /* set all means close to global mean */ cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: for (d = 0; d < L; d++) cannam@16: { cannam@16: /* add some random noise related to covariance */ cannam@16: /* ideally the random number would be Gaussian(0,1), as a hack we make it uniform on [-0.25,0.25] */ cannam@16: model->mu[i][d] = global_mean[d] + (0.5 * rand() / (double) RAND_MAX - 0.25) * sqrt(model->cov[d][d]); cannam@16: } cannam@16: } cannam@16: cannam@16: /* random intial and transition probs */ cannam@16: s = 0; cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: model->p0[i] = 1 + rand() / (double) RAND_MAX; cannam@16: s += model->p0[i]; cannam@16: ss = 0; cannam@16: for (j = 0; j < N; j++) cannam@16: { cannam@16: model->a[i][j] = 1 + rand() / (double) RAND_MAX; cannam@16: ss += model->a[i][j]; cannam@16: } cannam@16: for (j = 0; j < N; j++) cannam@16: { cannam@16: model->a[i][j] /= ss; cannam@16: } cannam@16: } cannam@16: for (i = 0; i < N; i++) cannam@16: model->p0[i] /= s; cannam@16: cannam@16: free(global_mean); cannam@16: cannam@16: return model; cannam@16: } cannam@16: cannam@16: void hmm_close(model_t* model) cannam@16: { cannam@16: int i; cannam@16: cannam@16: for (i = 0; i < model->N; i++) cannam@16: { cannam@16: free(model->a[i]); cannam@16: free(model->mu[i]); cannam@16: } cannam@16: free(model->a); cannam@16: free(model->mu); cannam@16: for (i = 0; i < model->L; i++) cannam@16: free(model->cov[i]); cannam@16: free(model->cov); cannam@16: free(model); cannam@16: } cannam@16: cannam@16: void hmm_train(double** x, int T, model_t* model) cannam@16: { cannam@16: int i, t; cannam@16: double loglik; /* overall log-likelihood at each iteration */ cannam@16: cannam@16: int N = model->N; cannam@16: int L = model->L; cannam@16: double* p0 = model->p0; cannam@16: double** a = model->a; cannam@16: double** mu = model->mu; cannam@16: double** cov = model->cov; cannam@16: cannam@16: /* allocate memory */ cannam@16: double** gamma = (double**) malloc(T*sizeof(double*)); cannam@16: double*** xi = (double***) malloc(T*sizeof(double**)); cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: gamma[t] = (double*) malloc(N*sizeof(double)); cannam@16: xi[t] = (double**) malloc(N*sizeof(double*)); cannam@16: for (i = 0; i < N; i++) cannam@16: xi[t][i] = (double*) malloc(N*sizeof(double)); cannam@16: } cannam@16: cannam@16: /* temporary memory */ cannam@16: double* gauss_y = (double*) malloc(L*sizeof(double)); cannam@16: double* gauss_z = (double*) malloc(L*sizeof(double)); cannam@16: cannam@16: /* obs probs P(j|{x}) */ cannam@16: double** b = (double**) malloc(T*sizeof(double*)); cannam@16: for (t = 0; t < T; t++) cannam@16: b[t] = (double*) malloc(N*sizeof(double)); cannam@16: cannam@16: /* inverse covariance and its determinant */ cannam@16: double** icov = (double**) malloc(L*sizeof(double*)); cannam@16: for (i = 0; i < L; i++) cannam@16: icov[i] = (double*) malloc(L*sizeof(double)); cannam@16: double detcov; cannam@16: cannam@16: double thresh = 0.0001; cannam@16: int niter = 50; cannam@16: int iter = 0; cannam@16: double loglik1, loglik2; cannam@16: while(iter < niter && !(iter > 1 && (loglik - loglik1) < thresh * (loglik1 - loglik2))) cannam@16: { cannam@16: ++iter; cannam@16: cannam@16: fprintf(stderr, "calculating obsprobs...\n"); cannam@16: fflush(stderr); cannam@16: cannam@16: /* precalculate obs probs */ cannam@16: invert(cov, L, icov, &detcov); cannam@16: cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: //int allzero = 1; cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: b[t][i] = exp(loggauss(x[t], L, mu[i], icov, detcov, gauss_y, gauss_z)); cannam@16: cannam@16: //if (b[t][i] != 0) cannam@16: // allzero = 0; cannam@16: } cannam@16: /* cannam@16: if (allzero) cannam@16: { cannam@16: printf("all the b[t][i] were zero for t = %d, correcting...\n", t); cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: b[t][i] = 0.00001; cannam@16: } cannam@16: } cannam@16: */ cannam@16: } cannam@16: cannam@16: fprintf(stderr, "forwards-backwards...\n"); cannam@16: fflush(stderr); cannam@16: cannam@16: forward_backwards(xi, gamma, &loglik, &loglik1, &loglik2, iter, N, T, p0, a, b); cannam@16: cannam@16: fprintf(stderr, "iteration %d: loglik = %f\n", iter, loglik); cannam@16: fprintf(stderr, "re-estimation...\n"); cannam@16: fflush(stderr); cannam@16: cannam@16: baum_welch(p0, a, mu, cov, N, T, L, x, xi, gamma); cannam@16: cannam@16: /* cannam@16: printf("a:\n"); cannam@16: for (i = 0; i < model->N; i++) cannam@16: { cannam@16: for (j = 0; j < model->N; j++) cannam@16: printf("%f ", model->a[i][j]); cannam@16: printf("\n"); cannam@16: } cannam@16: printf("\n\n"); cannam@16: */ cannam@16: //hmm_print(model); cannam@16: } cannam@16: cannam@16: /* deallocate memory */ cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: free(gamma[t]); cannam@16: free(b[t]); cannam@16: for (i = 0; i < N; i++) cannam@16: free(xi[t][i]); cannam@16: free(xi[t]); cannam@16: } cannam@16: free(gamma); cannam@16: free(xi); cannam@16: free(b); cannam@16: cannam@16: for (i = 0; i < L; i++) cannam@16: free(icov[i]); cannam@16: free(icov); cannam@16: cannam@16: free(gauss_y); cannam@16: free(gauss_z); cannam@16: } cannam@16: cannam@16: void mlss_reestimate(double* p0, double** a, double** mu, double** cov, int N, int T, int L, int* q, double** x) cannam@16: { cannam@16: /* fit a single Gaussian to observations in each state */ cannam@16: cannam@16: /* calculate the mean observation in each state */ cannam@16: cannam@16: /* calculate the overall covariance */ cannam@16: cannam@16: /* count transitions */ cannam@16: cannam@16: /* estimate initial probs from transitions (???) */ cannam@16: } cannam@16: cannam@16: void baum_welch(double* p0, double** a, double** mu, double** cov, int N, int T, int L, double** x, double*** xi, double** gamma) cannam@16: { cannam@16: int i, j, t; cannam@16: cannam@16: double* sum_gamma = (double*) malloc(N*sizeof(double)); cannam@16: cannam@16: /* temporary memory */ cannam@16: double* u = (double*) malloc(L*L*sizeof(double)); cannam@16: double* yy = (double*) malloc(T*L*sizeof(double)); cannam@16: double* yy2 = (double*) malloc(T*L*sizeof(double)); cannam@16: cannam@16: /* re-estimate transition probs */ cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: sum_gamma[i] = 0; cannam@16: for (t = 0; t < T-1; t++) cannam@16: sum_gamma[i] += gamma[t][i]; cannam@16: } cannam@16: cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: if (sum_gamma[i] == 0) cannam@16: { cannam@16: fprintf(stderr, "sum_gamma[%d] was zero...\n", i); cannam@16: } cannam@16: //double s = 0; cannam@16: for (j = 0; j < N; j++) cannam@16: { cannam@16: a[i][j] = 0; cannam@16: for (t = 0; t < T-1; t++) cannam@16: a[i][j] += xi[t][i][j]; cannam@16: //s += a[i][j]; cannam@16: a[i][j] /= sum_gamma[i]; cannam@16: } cannam@16: /* cannam@16: for (j = 0; j < N; j++) cannam@16: { cannam@16: a[i][j] /= s; cannam@16: } cannam@16: */ cannam@16: } cannam@16: cannam@16: /* NB: now we need to sum gamma over all t */ cannam@16: for (i = 0; i < N; i++) cannam@16: sum_gamma[i] += gamma[T-1][i]; cannam@16: cannam@16: /* re-estimate initial probs */ cannam@16: for (i = 0; i < N; i++) cannam@16: p0[i] = gamma[0][i]; cannam@16: cannam@16: /* re-estimate covariance */ cannam@16: int d, e; cannam@16: double sum_sum_gamma = 0; cannam@16: for (i = 0; i < N; i++) cannam@16: sum_sum_gamma += sum_gamma[i]; cannam@16: cannam@16: /* cannam@16: for (d = 0; d < L; d++) cannam@16: { cannam@16: for (e = d; e < L; e++) cannam@16: { cannam@16: cov[d][e] = 0; cannam@16: for (t = 0; t < T; t++) cannam@16: for (j = 0; j < N; j++) cannam@16: cov[d][e] += gamma[t][j] * (x[t][d] - mu[j][d]) * (x[t][e] - mu[j][e]); cannam@16: cannam@16: cov[d][e] /= sum_sum_gamma; cannam@16: cannam@16: if (isnan(cov[d][e])) cannam@16: { cannam@16: printf("cov[%d][%d] was nan\n", d, e); cannam@16: for (j = 0; j < N; j++) cannam@16: for (i = 0; i < L; i++) cannam@16: if (isnan(mu[j][i])) cannam@16: printf("mu[%d][%d] was nan\n", j, i); cannam@16: for (t = 0; t < T; t++) cannam@16: for (j = 0; j < N; j++) cannam@16: if (isnan(gamma[t][j])) cannam@16: printf("gamma[%d][%d] was nan\n", t, j); cannam@16: exit(-1); cannam@16: } cannam@16: } cannam@16: } cannam@16: for (d = 0; d < L; d++) cannam@16: for (e = 0; e < d; e++) cannam@16: cov[d][e] = cov[e][d]; cannam@16: */ cannam@16: cannam@16: /* using BLAS */ cannam@16: for (d = 0; d < L; d++) cannam@16: for (e = 0; e < L; e++) cannam@16: cov[d][e] = 0; cannam@16: cannam@16: for (j = 0; j < N; j++) cannam@16: { cannam@16: for (d = 0; d < L; d++) cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: yy[d*T+t] = x[t][d] - mu[j][d]; cannam@16: yy2[d*T+t] = gamma[t][j] * (x[t][d] - mu[j][d]); cannam@16: } cannam@16: cannam@16: cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, L, L, T, 1.0, yy, T, yy2, T, 0, u, L); cannam@16: cannam@16: for (e = 0; e < L; e++) cannam@16: for (d = 0; d < L; d++) cannam@16: cov[d][e] += u[e*L+d]; cannam@16: } cannam@16: cannam@16: for (d = 0; d < L; d++) cannam@16: for (e = 0; e < L; e++) cannam@16: cov[d][e] /= T; /* sum_sum_gamma; */ cannam@16: cannam@16: //printf("sum_sum_gamma = %f\n", sum_sum_gamma); /* fine, = T IS THIS ALWAYS TRUE with pooled cov?? */ cannam@16: cannam@16: /* re-estimate means */ cannam@16: for (j = 0; j < N; j++) cannam@16: { cannam@16: for (d = 0; d < L; d++) cannam@16: { cannam@16: mu[j][d] = 0; cannam@16: for (t = 0; t < T; t++) cannam@16: mu[j][d] += gamma[t][j] * x[t][d]; cannam@16: mu[j][d] /= sum_gamma[j]; cannam@16: } cannam@16: } cannam@16: cannam@16: /* deallocate memory */ cannam@16: free(sum_gamma); cannam@16: free(yy); cannam@16: free(yy2); cannam@16: free(u); cannam@16: } cannam@16: cannam@16: void forward_backwards(double*** xi, double** gamma, double* loglik, double* loglik1, double* loglik2, int iter, int N, int T, double* p0, double** a, double** b) cannam@16: { cannam@16: /* forwards-backwards with scaling */ cannam@16: int i, j, t; cannam@16: cannam@16: double** alpha = (double**) malloc(T*sizeof(double*)); cannam@16: double** beta = (double**) malloc(T*sizeof(double*)); cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: alpha[t] = (double*) malloc(N*sizeof(double)); cannam@16: beta[t] = (double*) malloc(N*sizeof(double)); cannam@16: } cannam@16: cannam@16: /* scaling coefficients */ cannam@16: double* c = (double*) malloc(T*sizeof(double)); cannam@16: cannam@16: /* calculate forward probs and scale coefficients */ cannam@16: c[0] = 0; cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: alpha[0][i] = p0[i] * b[0][i]; cannam@16: c[0] += alpha[0][i]; cannam@16: cannam@16: //printf("p0[%d] = %f, b[0][%d] = %f\n", i, p0[i], i, b[0][i]); cannam@16: } cannam@16: c[0] = 1 / c[0]; cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: alpha[0][i] *= c[0]; cannam@16: cannam@16: //printf("alpha[0][%d] = %f\n", i, alpha[0][i]); /* OK agrees with Matlab */ cannam@16: } cannam@16: cannam@16: *loglik1 = *loglik; cannam@16: *loglik = -log(c[0]); cannam@16: if (iter == 2) cannam@16: *loglik2 = *loglik; cannam@16: cannam@16: for (t = 1; t < T; t++) cannam@16: { cannam@16: c[t] = 0; cannam@16: for (j = 0; j < N; j++) cannam@16: { cannam@16: alpha[t][j] = 0; cannam@16: for (i = 0; i < N; i++) cannam@16: alpha[t][j] += alpha[t-1][i] * a[i][j]; cannam@16: alpha[t][j] *= b[t][j]; cannam@16: cannam@16: c[t] += alpha[t][j]; cannam@16: } cannam@16: cannam@16: /* cannam@16: if (c[t] == 0) cannam@16: { cannam@16: printf("c[%d] = 0, going to blow up so exiting\n", t); cannam@16: for (i = 0; i < N; i++) cannam@16: if (b[t][i] == 0) cannam@16: fprintf(stderr, "b[%d][%d] was zero\n", t, i); cannam@16: fprintf(stderr, "x[t] was \n"); cannam@16: for (i = 0; i < L; i++) cannam@16: fprintf(stderr, "%f ", x[t][i]); cannam@16: fprintf(stderr, "\n\n"); cannam@16: exit(-1); cannam@16: } cannam@16: */ cannam@16: cannam@16: c[t] = 1 / c[t]; cannam@16: for (j = 0; j < N; j++) cannam@16: alpha[t][j] *= c[t]; cannam@16: cannam@16: //printf("c[%d] = %e\n", t, c[t]); cannam@16: cannam@16: *loglik -= log(c[t]); cannam@16: } cannam@16: cannam@16: /* calculate backwards probs using same coefficients */ cannam@16: for (i = 0; i < N; i++) cannam@16: beta[T-1][i] = 1; cannam@16: t = T - 1; cannam@16: while (1) cannam@16: { cannam@16: for (i = 0; i < N; i++) cannam@16: beta[t][i] *= c[t]; cannam@16: cannam@16: if (t == 0) cannam@16: break; cannam@16: cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: beta[t-1][i] = 0; cannam@16: for (j = 0; j < N; j++) cannam@16: beta[t-1][i] += a[i][j] * b[t][j] * beta[t][j]; cannam@16: } cannam@16: cannam@16: t--; cannam@16: } cannam@16: cannam@16: /* cannam@16: printf("alpha:\n"); cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: for (i = 0; i < N; i++) cannam@16: printf("%4.4e\t\t", alpha[t][i]); cannam@16: printf("\n"); cannam@16: } cannam@16: printf("\n\n");printf("beta:\n"); cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: for (i = 0; i < N; i++) cannam@16: printf("%4.4e\t\t", beta[t][i]); cannam@16: printf("\n"); cannam@16: } cannam@16: printf("\n\n"); cannam@16: */ cannam@16: cannam@16: /* calculate posterior probs */ cannam@16: double tot; cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: tot = 0; cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: gamma[t][i] = alpha[t][i] * beta[t][i]; cannam@16: tot += gamma[t][i]; cannam@16: } cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: gamma[t][i] /= tot; cannam@16: cannam@16: //printf("gamma[%d][%d] = %f\n", t, i, gamma[t][i]); cannam@16: } cannam@16: } cannam@16: cannam@16: for (t = 0; t < T-1; t++) cannam@16: { cannam@16: tot = 0; cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: for (j = 0; j < N; j++) cannam@16: { cannam@16: xi[t][i][j] = alpha[t][i] * a[i][j] * b[t+1][j] * beta[t+1][j]; cannam@16: tot += xi[t][i][j]; cannam@16: } cannam@16: } cannam@16: for (i = 0; i < N; i++) cannam@16: for (j = 0; j < N; j++) cannam@16: xi[t][i][j] /= tot; cannam@16: } cannam@16: cannam@16: /* cannam@16: // CHECK - fine cannam@16: // gamma[t][i] = \sum_j{xi[t][i][j]} cannam@16: tot = 0; cannam@16: for (j = 0; j < N; j++) cannam@16: tot += xi[3][1][j]; cannam@16: printf("gamma[3][1] = %f, sum_j(xi[3][1][j]) = %f\n", gamma[3][1], tot); cannam@16: */ cannam@16: cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: free(alpha[t]); cannam@16: free(beta[t]); cannam@16: } cannam@16: free(alpha); cannam@16: free(beta); cannam@16: free(c); cannam@16: } cannam@16: cannam@16: void viterbi_decode(double** x, int T, model_t* model, int* q) cannam@16: { cannam@16: int i, j, t; cannam@16: double max; cannam@16: cannam@16: int N = model->N; cannam@16: int L = model->L; cannam@16: double* p0 = model->p0; cannam@16: double** a = model->a; cannam@16: double** mu = model->mu; cannam@16: double** cov = model->cov; cannam@16: cannam@16: /* inverse covariance and its determinant */ cannam@16: double** icov = (double**) malloc(L*sizeof(double*)); cannam@16: for (i = 0; i < L; i++) cannam@16: icov[i] = (double*) malloc(L*sizeof(double)); cannam@16: double detcov; cannam@16: cannam@16: double** logb = (double**) malloc(T*sizeof(double*)); cannam@16: double** phi = (double**) malloc(T*sizeof(double*)); cannam@16: int** psi = (int**) malloc(T*sizeof(int*)); cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: logb[t] = (double*) malloc(N*sizeof(double)); cannam@16: phi[t] = (double*) malloc(N*sizeof(double)); cannam@16: psi[t] = (int*) malloc(N*sizeof(int)); cannam@16: } cannam@16: cannam@16: /* temporary memory */ cannam@16: double* gauss_y = (double*) malloc(L*sizeof(double)); cannam@16: double* gauss_z = (double*) malloc(L*sizeof(double)); cannam@16: cannam@16: /* calculate observation logprobs */ cannam@16: invert(cov, L, icov, &detcov); cannam@16: for (t = 0; t < T; t++) cannam@16: for (i = 0; i < N; i++) cannam@16: logb[t][i] = loggauss(x[t], L, mu[i], icov, detcov, gauss_y, gauss_z); cannam@16: cannam@16: /* initialise */ cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: phi[0][i] = log(p0[i]) + logb[0][i]; cannam@16: psi[0][i] = 0; cannam@16: } cannam@16: cannam@16: for (t = 1; t < T; t++) cannam@16: { cannam@16: for (j = 0; j < N; j++) cannam@16: { cannam@16: max = -1000000; // TODO: what should this be?? = smallest possible sumlogprob cannam@16: psi[t][j] = 0; cannam@16: for (i = 0; i < N; i++) cannam@16: { cannam@16: if (phi[t-1][i] + log(a[i][j]) > max) cannam@16: { cannam@16: max = phi[t-1][i] + log(a[i][j]); cannam@16: phi[t][j] = max + logb[t][j]; cannam@16: psi[t][j] = i; cannam@16: } cannam@16: } cannam@16: } cannam@16: } cannam@16: cannam@16: /* find maximising state at time T-1 */ cannam@16: max = phi[T-1][0]; cannam@16: q[T-1] = 0; cannam@16: for (i = 1; i < N; i++) cannam@16: { cannam@16: if (phi[T-1][i] > max) cannam@16: { cannam@16: max = phi[T-1][i]; cannam@16: q[T-1] = i; cannam@16: } cannam@16: } cannam@16: cannam@16: cannam@16: /* track back */ cannam@16: t = T - 2; cannam@16: while (t >= 0) cannam@16: { cannam@16: q[t] = psi[t+1][q[t+1]]; cannam@16: t--; cannam@16: } cannam@16: cannam@16: /* de-allocate memory */ cannam@16: for (i = 0; i < L; i++) cannam@16: free(icov[i]); cannam@16: free(icov); cannam@16: for (t = 0; t < T; t++) cannam@16: { cannam@16: free(logb[t]); cannam@16: free(phi[t]); cannam@16: free(psi[t]); cannam@16: } cannam@16: free(logb); cannam@16: free(phi); cannam@16: free(psi); cannam@16: cannam@16: free(gauss_y); cannam@16: free(gauss_z); cannam@16: } cannam@16: cannam@16: /* invert matrix and calculate determinant using LAPACK */ cannam@16: void invert(double** cov, int L, double** icov, double* detcov) cannam@16: { cannam@16: /* copy square matrix into a vector in column-major order */ cannam@16: double* a = (double*) malloc(L*L*sizeof(double)); cannam@16: int i, j; cannam@16: for(j=0; j < L; j++) cannam@16: for (i=0; i < L; i++) cannam@16: a[j*L+i] = cov[i][j]; cannam@16: cannam@16: long M = (long) L; cannam@16: long* ipiv = (long*) malloc(L*L*sizeof(int)); cannam@16: long ret; cannam@16: cannam@16: /* LU decomposition */ cannam@16: ret = dgetrf_(&M, &M, a, &M, ipiv, &ret); /* ret should be zero, negative if cov is singular */ cannam@16: if (ret < 0) cannam@16: { cannam@16: fprintf(stderr, "Covariance matrix was singular, couldn't invert\n"); cannam@16: exit(-1); cannam@16: } cannam@16: cannam@16: /* find determinant */ cannam@16: double det = 1; cannam@16: for(i = 0; i < L; i++) cannam@16: det *= a[i*L+i]; cannam@16: // TODO: get this to work!!! If detcov < 0 then cov is bad anyway... cannam@16: /* cannam@16: int sign = 1; cannam@16: for (i = 0; i < L; i++) cannam@16: if (ipiv[i] != i) cannam@16: sign = -sign; cannam@16: det *= sign; cannam@16: */ cannam@16: if (det < 0) cannam@16: det = -det; cannam@16: *detcov = det; cannam@16: cannam@16: /* allocate required working storage */ cannam@16: long lwork = -1; cannam@16: double lwbest; cannam@16: dgetri_(&M, a, &M, ipiv, &lwbest, &lwork, &ret); cannam@16: lwork = (long) lwbest; cannam@16: double* work = (double*) malloc(lwork*sizeof(double)); cannam@16: cannam@16: /* find inverse */ cannam@16: dgetri_(&M, a, &M, ipiv, work, &lwork, &ret); cannam@16: cannam@16: for(j=0; j < L; j++) cannam@16: for (i=0; i < L; i++) cannam@16: icov[i][j] = a[j*L+i]; cannam@16: cannam@16: free(work); cannam@16: free(a); cannam@16: } cannam@16: cannam@16: /* probability of multivariate Gaussian given mean, inverse and determinant of covariance */ cannam@16: double gauss(double* x, int L, double* mu, double** icov, double detcov, double* y, double* z) cannam@16: { cannam@16: int i, j; cannam@16: double s = 0; cannam@16: for (i = 0; i < L; i++) cannam@16: y[i] = x[i] - mu[i]; cannam@16: for (i = 0; i < L; i++) cannam@16: { cannam@16: //z[i] = 0; cannam@16: //for (j = 0; j < L; j++) cannam@16: // z[i] += icov[i][j] * y[j]; cannam@16: z[i] = cblas_ddot(L, &icov[i][0], 1, y, 1); cannam@16: } cannam@16: s = cblas_ddot(L, z, 1, y, 1); cannam@16: //for (i = 0; i < L; i++) cannam@16: // s += z[i] * y[i]; cannam@16: cannam@16: return exp(-s/2.0) / (pow(2*PI, L/2.0) * sqrt(detcov)); cannam@16: } cannam@16: cannam@16: /* log probability of multivariate Gaussian given mean, inverse and determinant of covariance */ cannam@16: double loggauss(double* x, int L, double* mu, double** icov, double detcov, double* y, double* z) cannam@16: { cannam@16: int i, j; cannam@16: double s = 0; cannam@16: double ret; cannam@16: for (i = 0; i < L; i++) cannam@16: y[i] = x[i] - mu[i]; cannam@16: for (i = 0; i < L; i++) cannam@16: { cannam@16: //z[i] = 0; cannam@16: //for (j = 0; j < L; j++) cannam@16: // z[i] += icov[i][j] * y[j]; cannam@16: z[i] = cblas_ddot(L, &icov[i][0], 1, y, 1); cannam@16: } cannam@16: s = cblas_ddot(L, z, 1, y, 1); cannam@16: //for (i = 0; i < L; i++) cannam@16: // s += z[i] * y[i]; cannam@16: cannam@16: ret = -0.5 * (s + L * log(2*PI) + log(detcov)); cannam@16: cannam@16: /* cannam@16: // TEST cannam@16: if (isinf(ret) > 0) cannam@16: printf("loggauss returning infinity\n"); cannam@16: if (isinf(ret) < 0) cannam@16: printf("loggauss returning -infinity\n"); cannam@16: if (isnan(ret)) cannam@16: printf("loggauss returning nan\n"); cannam@16: */ cannam@16: cannam@16: return ret; cannam@16: } cannam@16: cannam@16: void hmm_print(model_t* model) cannam@16: { cannam@16: int i, j; cannam@16: printf("p0:\n"); cannam@16: for (i = 0; i < model->N; i++) cannam@16: printf("%f ", model->p0[i]); cannam@16: printf("\n\n"); cannam@16: printf("a:\n"); cannam@16: for (i = 0; i < model->N; i++) cannam@16: { cannam@16: for (j = 0; j < model->N; j++) cannam@16: printf("%f ", model->a[i][j]); cannam@16: printf("\n"); cannam@16: } cannam@16: printf("\n\n"); cannam@16: printf("mu:\n"); cannam@16: for (i = 0; i < model->N; i++) cannam@16: { cannam@16: for (j = 0; j < model->L; j++) cannam@16: printf("%f ", model->mu[i][j]); cannam@16: printf("\n"); cannam@16: } cannam@16: printf("\n\n"); cannam@16: printf("cov:\n"); cannam@16: for (i = 0; i < model->L; i++) cannam@16: { cannam@16: for (j = 0; j < model->L; j++) cannam@16: printf("%f ", model->cov[i][j]); cannam@16: printf("\n"); cannam@16: } cannam@16: printf("\n\n"); cannam@16: } cannam@16: cannam@16: