Mercurial > hg > chourdakisreiss2016
diff training-sessions/jacob.harrison/mapping.py @ 0:246d5546657c
initial commit, needs cleanup
author | Emmanouil Theofanis Chourdakis <e.t.chourdakis@qmul.ac.uk> |
---|---|
date | Wed, 14 Dec 2016 13:15:48 +0000 |
parents | |
children |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/training-sessions/jacob.harrison/mapping.py Wed Dec 14 13:15:48 2016 +0000 @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon Mar 7 13:03:01 2016 + +@author: Emmanouil Theofanis Chourdakis +""" + +#from numpy import * +from scipy.optimize import * +from numpy import sqrt,array,polyval,exp,log,log10,mean,sum,var,loadtxt + +ga = sqrt(2) +g1_min = 0.001 +g1_max = 0.999 + +d1_min = 0.010 +d1_max = 0.900 + +da_min = 0.006 +da_max = 0.012 + +gc_min = 0.001 +gc_max = 0.999 + +G_min = 0.001 +G_max = 0.999 + +# Limits are defined by the variables minima and maxima + +T60_min = 0.2 +T60_max = 4 +# +D_min = 1000 +D_max = 10000 +# +C_min = -20 +C_max = 10 +# +Tc_min = 0.01 +Tc_max = 2.0 +# +SC_min = 200 +SC_max = 11025 +def normalize(f,fmin,fmax): + return (f - fmin)/(fmax - fmin) + +def normalize_param(p): + return array([ + normalize(p[0],T60_min,T60_max), + normalize(p[1],D_min,D_max), + normalize(p[2],C_min,C_max), + normalize(p[3],Tc_min,Tc_max) + ]) + +def get_high_param_norm(x): + return array([ + normalize(T60(x),T60_min,T60_max), + normalize(D(x),D_min,D_max), + normalize(C(x),C_min,C_max), + normalize(Tc(x),Tc_min,Tc_max) + ]) +def T601d(d1,g1,gc,G): + g1 = abs(g1) + return -d1/log(g1)*(log(G)+log(1-gc)+log(ga)+3*log(10)) + + + +def D1d(d1,da,t=0.1): + kS = 20.78125 + + return kS*t/d1/da + + +def C1d(g1,gc, G): + s_ = 0 + for k in range(1, 7): + s_ += g1**(2*1.5**(1-k))/(1-g1**(2*1.5**(1-k))) + + return -10 * log10(G**2*(1-gc)/(1+gc)*s_) + +def Tc1d(d1,da,g1): + gk = lambda k: g1**(1.5)**(1-k) + dk = lambda k: d1*1.5**(1-k) + + sum1 = 0 + sum2 = 0 + + for k in range(1, 7): + s1 = gk(k)**2/(1-gk(k)**2) + sum2 += s1 + sum1 += dk(k)*s1/(1-gk(k)**2) + + return sum1/sum2 + da + +def Tc_g1_d1_D(d1,g1,D, t=0.1): + S1 = sum([1.5**(-k + 1)*d1*g1**(2*1.5**(-k + 1))/(-g1**(2*1.5**(-k + 1)) + 1)**2 for k in range(1, 7)]) + S2 = sum([g1**(2*1.5**(-k + 1))/(-g1**(2*1.5**(-k + 1)) + 1) for k in range(1, 7)]) + Tc = S1/S2 + 20.78125*t/(D*d1) + return Tc + +def T60_d1_g1_C(d1,g1,gc,C): +# S1 = sum([g1**(2*1.5**(-k + 1))/(-g1**(2*1.5**(-k + 1)) + 1) for k in range(1, 7)]) + g1 = abs(g1) + s_ = 0 + for k in range(1, 7): + s_ += g1**(2*1.5**(1-k))/(1-g1**(2*1.5**(1-k))) + + T60 = d1*log(0.001/ga*exp(C*log(10)/20)/(sqrt((gc + 1)/((-gc + 1)*s_))*(-gc + 1)))/log(g1) + + return T60 + +def d1_g1_T60_C(g1,gc,T60,C): + g1 = abs(g1) + s_ = 0 + for k in range(1, 7): + s_ += g1**(2*1.5**(1-k))/(1-g1**(2*1.5**(1-k))) + #S1 = sum([g1**(2*1.5**(-k + 1))/(-g1**(2*1.5**(-k + 1)) + 1) for k in range(1, 7)]) + + + d1 = T60*log(g1)/log(0.001/ga*exp(C*log(10)/20)/(sqrt((gc + 1)/((-gc + 1)*s_))*(-gc + 1))) + return d1 + +def G_g1_C(g1,gc, C): + + s_ = 0 + for k in range(1, 7): + s_ += g1**(2*1.5**(1-k))/(1-g1**(2*1.5**(1-k))) + + G = sqrt(exp(-C/10*log(10))/(1-gc)*(1+gc)/s_) + return G + +def da_d1_D(d1,gc, D,t=0.1): + return 20.78125*t/d1/D + + +def Tc_g1_T60_D_C(g1,gc, T60, D, C,SR=44100.0): + S1 = sum([g1**(2*1.5**(-k + 1))/(-g1**(2*1.5**(-k + 1)) + 1) for k in range(1, 7)]) + L1 = log(0.001*exp(C*log(10)/20)/(ga*sqrt((gc + 1)/((-gc + 1)*S1))*(-gc + 1))) + S2 = sum([1.5**(-k + 1)*T60*g1**(2*1.5**(-k + 1))*log(g1)/((-g1**(2*1.5**(-k + 1)) + 1)**2*L1) for k in range(1, 7)]) + Tc = S2/S1 + 20.78125*t*log(0.001*exp(C*log(10)/20)/(ga*sqrt((gc + 1)/((-gc + 1)*S1))*(-gc + 1)))/(D*T60*log(g1)) + + return Tc + +def f_gc(sc,SR=44100): + T = loadtxt('scmapping.csv') + SampleRates = T[:,0] + P = T[:,1:] + p = P[SampleRates == SR][0] + return polyval(p, sc) + + +def errorfunc(g1, Target, gc, weights=(1.0, 1.0, 1.0, 1.0),SR=44100.0): + + (kT60, kD, kC, kTc) = Target + G = G_g1_C(g1, gc, kC) + + + # Check bounds + + if G>G_max: + G = G_max + if G<G_min: + G = G_min + + + d1 = d1_g1_T60_C(g1,gc, kT60, kC) + + if d1>d1_max: + d1 = d1_max + if d1<d1_min: + d1 = d1_min + + da = da_d1_D(d1, gc, kD) + + if da>da_max: + da = da_max + if da<da_min: + da = da_min + + T_60_est = T601d(d1,g1,gc,G) + D_est = D1d(d1,da) + C_est = C1d(g1,gc,G) + Tc_est = Tc1d(d1,da,g1) + + est_vect = normalize_param(array([T_60_est, D_est, C_est, Tc_est])) + orig_vect = normalize_param([kT60, kD, kC, kTc]) + + errvec = abs(est_vect-orig_vect) + w1 = weights[0] + w2 = weights[1] + w3 = weights[2] + w4 = weights[3] + + errvec = array([w1*errvec[0],w2*errvec[1],w3*errvec[2],w4*errvec[3]]) + + return mean(errvec**2) + var(errvec**2)**2 #+ skew(abs(est_vect-orig_vect)**2) + +def SC1d(gc,SR=44100): + + lut = 1-2*gc*cos(2*pi*arange(0,int(SR/2)+1)/SR)+gc**2 + + fgcn = lambda n: 1/lut[n] + + sum1 = 0 + sum2 = 0 + + for n in range(0, int(SR/2)): + fgcn_ = fgcn(n) + sum1 += n*fgcn_ + sum2 += fgcn_ + + return sum1/sum2 + + +def forward_mapping(d1, da, g1, gc, G): + SC = SC1d(gc) + T60 = T601d(d1,g1,gc,G) + D = D1d(d1,da) + C = C1d(g1,gc,G) + Tc = Tc1d(d1,da,g1) + + return (T60, D, C, Tc, SC) + +def inverse_mapping(T60, D, C, Tc, SC, SR=44100): + """ returns a vector (d1, da, g1, gc, G) of filter coefficients """ + + #1. Estimate gc from spectral centroid + gc = f_gc(SC, SR) + print gc + + #2. Estimate g1 + g1 = minimize_scalar(lambda x: errorfunc(x, (T60,D,C,Tc), gc), bounds=(g1_min,g1_max), method='bounded').x + + #4. a. Estimate G from G_g1_C + G = G_g1_C(g1, gc, C) + if G > G_max: + G = G_max + elif G < G_min: + G = G_min + + #4. b. Estimate d1 from d1_g1_T60_C + d1 = d1_g1_T60_C(g1,gc,T60,C) + if d1 > d1_max: + d1 = d1_max + elif d1 < d1_min: + d1 = d1_min + + #4. c. Estimate da from da_d1_D + da = da_d1_D(d1,gc, D) + if da > da_max: + da = da_max + elif da < da_min: + da = da_min + + return (d1, da, g1, gc, G) +