Surface NMR processing and inversion GUI
Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

cadapt.pyx 20KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. import numpy as np
  2. from numpy.linalg import lstsq
  3. from numpy.linalg import norm
  4. from numpy import fft
  5. import pylab
  6. from scipy.signal import correlate
  7. def autocorr(x):
  8. #result = np.correlate(x, x, mode='full')
  9. result = correlate(x, x, mode='full')
  10. return result[result.size/2:]
  11. class AdaptiveFilter:
  12. def __init__(self, mu):
  13. self.mu = mu
  14. def adapt_filt_Ref(self, x, R, M, mu, PCA, lambda2=0.95, H0=0):
  15. """ Taken from .m file
  16. This function is written to allow the user to filter a input signal
  17. with an adaptive filter that utilizes 2 reference signals instead of
  18. the standard method which allows for only 1 reference signal.
  19. Author: Rob Clemens Date: 3/16/06
  20. Modified and ported to Python, now takes arbitray number of reference points
  21. """
  22. #from akvo.tressel import pca
  23. import akvo.tressel.cpca as pca
  24. if np.shape(x) != np.shape(R[0]): # or np.shape(x) != np.shape(rx1):
  25. print ("Error, non aligned")
  26. exit(1)
  27. if PCA == "Yes":
  28. # PCA decomposition on ref channels so signals are less related
  29. R, K, means = pca.pca( R )
  30. if all(H0) == 0:
  31. H = np.zeros( (len(R)*M))
  32. #print ("resetting filter")
  33. else:
  34. H = H0
  35. Rn = np.ones(len(R)*M) / mu
  36. r_ = np.zeros( (len(R), M) )
  37. e = np.zeros(len(x)) # error, desired output
  38. ilambda = lambda2**-1
  39. cdef int z
  40. cdef int ir
  41. for z in range(0, len(x)):
  42. # Only look forwards, to avoid distorting the lates times
  43. # (run backwards, if opposite and you don't care about distorting very late time.)
  44. for ir in range(len(R)):
  45. if z < M:
  46. r_[ir,0:z] = R[ir][0:z]
  47. r_[ir,z:M] = 0
  48. else:
  49. # TODO, use np.delete and np.append to speed this up
  50. r_[ir,:] = R[ir][z-M:z]
  51. # reshape
  52. r_n = np.reshape(r_, -1) #concatenate((r_v, r_h ))
  53. #K = np.dot( np.diag(Rn,0), r_n) / (lambda2 + np.dot(r_n*Rn, r_n)) # Create/update K
  54. K = (Rn* r_n) / (lambda2 + np.dot(r_n*Rn, r_n)) # Create/update K
  55. e[z] = x[z] - np.dot(r_n.T, H) # e is the filtered signal, input - r(n) * Filter Coefs
  56. H += K*e[z]; # Update Filter Coefficients
  57. Rn = ilambda*Rn - ilambda*np.dot(np.dot(K, r_n.T), Rn) # Update R(n)
  58. return e, H
  59. def transferFunctionFFT(self, D, R, reg=1e-2):
  60. from akvo.tressel import pca
  61. """
  62. Computes the transfer function (H) between a Data channel and
  63. a number of Reference channels. The Matrices D and R are
  64. expected to be in the frequency domain on input.
  65. | R1'R1 R1'R2 R1'R3| |h1| |R1'D|
  66. | R2'R1 R2'R2 R2'R3| * |h2| = |R2'D|
  67. | R3'R1 R3'R2 R3'R3| |h3| |R3'D|
  68. Returns the corrected array
  69. """
  70. # PCA decomposition on ref channels so signals are less related
  71. #transMatrix, K, means = pca.pca( np.array([rx0, rx1]))
  72. #RR = np.zeros(( np.shape(R[0])[0]*np.shape(R[0])[1], len(R)))
  73. # RR = np.zeros(( len(R), np.shape(R[0])[0]*np.shape(R[0])[1] ))
  74. # for ir in range(len(R)):
  75. # RR[ir,:] = np.reshape(R[ir], -1)
  76. # transMatrix, K, means = pca.pca(RR)
  77. # #R rx0 = transMatrix[0,:]
  78. # # rx1 = transMatrix[1,:]
  79. # for ir in range(len(R)):
  80. # R[ir] = transMatrix[ir,0]
  81. import scipy.linalg
  82. import akvo.tressel.pca as pca
  83. # Compute as many transfer functions as len(R)
  84. # A*H = B
  85. nref = len(R)
  86. H = np.zeros( (np.shape(D)[1], len(R)), dtype=complex )
  87. for iw in range(np.shape(D)[1]):
  88. A = np.zeros( (nref, nref), dtype=complex )
  89. B = np.zeros( (nref) , dtype=complex)
  90. for ii in range(nref):
  91. for jj in range(nref):
  92. # build A
  93. A[ii,jj] = np.dot(R[ii][:,iw], R[jj][:,iw])
  94. # build B
  95. B[ii] = np.dot( R[ii][:,iw], D[:,iw] )
  96. # compute H(iw)
  97. #linalg.solve(a,b) if a is square
  98. #print "A", A
  99. #print "B", B
  100. # TODO, regularise this solve step? So as to not fit the spurious noise
  101. #print np.shape(B), np.shape(A)
  102. #H[iw, :] = scipy.linalg.solve(A,B)
  103. H[iw, :] = scipy.linalg.lstsq(A,B,cond=reg)[0]
  104. #print "lstqt", np.shape(scipy.linalg.lstsq(A,B))
  105. #print "solve", scipy.linalg.solve(A,B)
  106. #H[iw,:] = scipy.linalg.lstsq(A,B) # otherwise
  107. #H = np.zeros( (np.shape(D)[1], ) )
  108. #print H #A, B
  109. Error = np.zeros(np.shape(D), dtype=complex)
  110. for ir in range(nref):
  111. for q in range( np.shape(D)[0] ):
  112. #print "dimcheck", np.shape(H[:,ir]), np.shape(R[ir][q,:] )
  113. Error[q,:] += H[:,ir]*R[ir][q,:]
  114. return D - Error
  115. def adapt_filt_tworefFreq(self, x, rx0, rx1, M, lambda2=0.95):
  116. """ Frequency domain version of above
  117. """
  118. from akvo.tressel import pca
  119. pylab.figure()
  120. pylab.plot(rx0)
  121. pylab.plot(rx1)
  122. # PCA decomposition on ref channels so signals are less related
  123. transMatrix, K, means = pca.pca( np.array([rx0, rx1]))
  124. rx0 = transMatrix[:,0]
  125. rx1 = transMatrix[:,1]
  126. pylab.plot(rx0)
  127. pylab.plot(rx1)
  128. pylab.show()
  129. exit()
  130. if np.shape(x) != np.shape(rx0) or np.shape(x) != np.shape(rx1):
  131. print ("Error, non aligned")
  132. exit(1)
  133. wx = fft.rfft(x)
  134. wr0 = fft.rfft(rx0)
  135. wr1 = fft.rfft(rx1)
  136. H = np.zeros( (2*M), dtype=complex )
  137. ident_mat = np.eye((2*M))
  138. Rn = ident_mat / 0.1
  139. r_v = np.zeros( (M), dtype=complex )
  140. r_h = np.zeros( (M), dtype=complex )
  141. e = np.zeros(len(x), dtype=complex )
  142. ilambda = lambda2**-1
  143. for z in range(0, len(wx)):
  144. # TODO Padd with Zeros or truncate if M >,< arrays
  145. r_v = wr0[::-1][:M]
  146. r_h = wr1[::-1][:M]
  147. r_n = np.concatenate((r_v, r_h ))
  148. K = np.dot(Rn, r_n) / (lambda2 + np.dot(np.dot(r_n.T, Rn), r_n)) # Create/update K
  149. e[z] = wx[z] - np.dot(r_n,H) # e is the filtered signal, input - r(n) * Filter Coefs
  150. H += K * e[z]; # Update Filter Coefficients
  151. Rn = ilambda*Rn - ilambda*K*r_n.T*Rn # Update R(n)
  152. return fft.irfft(e)
  153. def iter_filt_refFreq(self, x, rx0, Ahat=.05, Bhat=.5, k=0.05):
  154. X = np.fft.rfft(x)
  155. X0 = np.copy(X)
  156. RX0 = np.fft.rfft(rx0)
  157. # step 0
  158. Abs2HW = []
  159. alphai = k * (np.abs(Ahat)**2 / np.abs(Bhat)**2)
  160. betai = k * (1. / (np.abs(Bhat)**2) )
  161. Hw = ((1.+alphai) * np.abs(X)**2 ) / (np.abs(X)**2 + betai*(np.abs(RX0)**2))
  162. H = np.abs(Hw)**2
  163. pylab.ion()
  164. pylab.figure()
  165. for i in range(10):
  166. #print "alphai", alphai
  167. #print "betai", betai
  168. #print "Hw", Hw
  169. alphai = k * (np.abs(Ahat)**2 / np.abs(Bhat)**2) * np.product(H, axis=0)
  170. betai = k * (1. / np.abs(Bhat)**2) * np.product(H, axis=0)
  171. # update signal
  172. Hw = ((1.+alphai) * np.abs(X)**2) / (np.abs(X)**2 + betai*np.abs(RX0)**2)
  173. Hw = np.nan_to_num(Hw)
  174. X *= Hw
  175. H = np.vstack( (H, np.abs(Hw)**2) )
  176. #print "Hw", Hw
  177. pylab.cla()
  178. pylab.plot(Hw)
  179. #pylab.plot(np.abs(X))
  180. #pylab.plot(np.abs(RX0))
  181. pylab.draw()
  182. raw_input("wait")
  183. pylab.cla()
  184. pylab.ioff()
  185. #return np.fft.irfft(X0-X)
  186. return np.fft.irfft(X)
  187. def iter_filt_refFreq(self, x, rx0, rx1, Ahat=.1, Bhat=1., k=0.001):
  188. X = np.fft.rfft(x)
  189. X0 = np.copy(X)
  190. RX0 = np.fft.rfft(rx0)
  191. RX1 = np.fft.rfft(rx1)
  192. # step 0
  193. alphai = k * (np.abs(Ahat)**2 / np.abs(Bhat)**2)
  194. betai = k * (1. / (np.abs(Bhat)**2) )
  195. #Hw = ((1.+alphai) * np.abs(X)**2 ) / (np.abs(X)**2 + betai*(np.abs(RX0)**2))
  196. H = np.ones(len(X)) # abs(Hw)**2
  197. #pylab.ion()
  198. #pylab.figure(334)
  199. for i in range(1000):
  200. #print "alphai", alphai
  201. #print "betai", betai
  202. #print "Hw", Hw
  203. alphai = k * (np.abs(Ahat)**2 / np.abs(Bhat)**2) * np.product(H, axis=0)
  204. betai = k * (1. / np.abs(Bhat)**2) * np.product(H, axis=0)
  205. # update signal
  206. Hw = ((1.+alphai) * np.abs(X)**2) / (np.abs(X)**2 + betai*np.abs(RX0)**2)
  207. Hw = np.nan_to_num(Hw)
  208. X *= Hw #.conjugate
  209. #H = np.vstack((H, np.abs(Hw)**2) )
  210. H = np.vstack((H, np.abs(Hw)) )
  211. #print "Hw", Hw
  212. #pylab.cla()
  213. #pylab.plot(Hw)
  214. #pylab.plot(np.abs(X))
  215. #pylab.plot(np.abs(RX0))
  216. #pylab.draw()
  217. #raw_input("wait")
  218. #pylab.cla()
  219. #pylab.ioff()
  220. return np.fft.irfft(X0-X)
  221. #return np.fft.irfft(X)
  222. def Tdomain_DFT(self, desired, input, S):
  223. """ Lifted from Adaptive filtering toolbox. Modefied to accept more than one input
  224. vector
  225. """
  226. # Initialisation Procedure
  227. nCoefficients = S["filterOrderNo"]/2+1
  228. nIterations = len(desired)
  229. # Pre Allocations
  230. errorVector = np.zeros(nIterations, dtype='complex')
  231. outputVector = np.zeros(nIterations, dtype='complex')
  232. # Initial State
  233. coefficientVectorDFT = np.fft.rfft(S["initialCoefficients"])/np.sqrt(float(nCoefficients))
  234. desiredDFT = np.fft.rfft(desired)
  235. powerVector = S["initialPower"]*np.ones(nCoefficients)
  236. # Improve source code regularity, pad with zeros
  237. # TODO, confirm zeros(nCoeffics) not nCoeffics-1
  238. prefixedInput = np.concatenate([np.zeros(nCoefficients-1), np.array(input)])
  239. # Body
  240. pylab.ion()
  241. pylab.figure(11)
  242. for it in range(nIterations): # = 1:nIterations,
  243. regressorDFT = np.fft.rfft(prefixedInput[it:it+nCoefficients][::-1]) /\
  244. np.sqrt(float(nCoefficients))
  245. # Summing two column vectors
  246. powerVector = S["alpha"] * (regressorDFT*np.conjugate(regressorDFT)) + \
  247. (1.-S["alpha"])*(powerVector)
  248. pylab.cla()
  249. #pylab.plot(prefixedInput[::-1], 'b')
  250. #pylab.plot(prefixedInput[it:it+nCoefficients][::-1], 'g', linewidth=3)
  251. #pylab.plot(regressorDFT.real)
  252. #pylab.plot(regressorDFT.imag)
  253. pylab.plot(powerVector.real)
  254. pylab.plot(powerVector.imag)
  255. #pylab.plot(outputVector)
  256. #pylab.plot(errorVector.real)
  257. #pylab.plot(errorVector.imag)
  258. pylab.draw()
  259. #raw_input("wait")
  260. outputVector[it] = np.dot(coefficientVectorDFT.T, regressorDFT)
  261. #errorVector[it] = desired[it] - outputVector[it]
  262. errorVector[it] = desiredDFT[it] - outputVector[it]
  263. #print errorVector[it], desired[it], outputVector[it]
  264. # Vectorized
  265. coefficientVectorDFT += (S["step"]*np.conjugate(errorVector[it])*regressorDFT) /\
  266. (S['gamma']+powerVector)
  267. return np.real(np.fft.irfft(errorVector))
  268. #coefficientVector = ifft(coefficientVectorDFT)*sqrt(nCoefficients);
  269. def Tdomain_DCT(self, desired, input, S):
  270. """ Lifted from Adaptive filtering toolbox. Modefied to accept more than one input
  271. vector. Uses cosine transform
  272. """
  273. from scipy.fftpack import dct
  274. # Initialisation Procedure
  275. nCoefficients = S["filterOrderNo"]+1
  276. nIterations = len(desired)
  277. # Pre Allocations
  278. errorVector = np.zeros(nIterations)
  279. outputVector = np.zeros(nIterations)
  280. # Initial State
  281. coefficientVectorDCT = dct(S["initialCoefficients"]) #/np.sqrt(float(nCoefficients))
  282. desiredDCT = dct(desired)
  283. powerVector = S["initialPower"]*np.ones(nCoefficients)
  284. # Improve source code regularity, pad with zeros
  285. prefixedInput = np.concatenate([np.zeros(nCoefficients-1), np.array(input)])
  286. # Body
  287. #pylab.figure(11)
  288. #pylab.ion()
  289. for it in range(0, nIterations): # = 1:nIterations,
  290. regressorDCT = dct(prefixedInput[it:it+nCoefficients][::-1], type=2)
  291. #regressorDCT = dct(prefixedInput[it+nCoefficients:it+nCoefficients*2+1])#[::-1])
  292. # Summing two column vectors
  293. powerVector = S["alpha"]*(regressorDCT) + (1.-S["alpha"])*(powerVector)
  294. #pylab.cla()
  295. #pylab.plot(powerVector)
  296. #pylab.draw()
  297. outputVector[it] = np.dot(coefficientVectorDCT.T, regressorDCT)
  298. #errorVector[it] = desired[it] - outputVector[it]
  299. errorVector[it] = desiredDCT[it] - outputVector[it]
  300. # Vectorized
  301. coefficientVectorDCT += (S["step"]*errorVector[it]*regressorDCT) #/\
  302. #(S['gamma']+powerVector)
  303. #pylab.plot(errorVector)
  304. #pylab.show()
  305. return dct(errorVector, type=3)
  306. #coefficientVector = ifft(coefficientVectorDCT)*sqrt(nCoefficients);
  307. def Tdomain_CORR(self, desired, input, S):
  308. from scipy.linalg import toeplitz
  309. from scipy.signal import correlate
  310. # Autocorrelation
  311. ac = np.correlate(input, input, mode='full')
  312. ac = ac[ac.size/2:]
  313. R = toeplitz(ac)
  314. # cross correllation
  315. r = np.correlate(desired, input, mode='full')
  316. r = r[r.size/2:]
  317. #r = np.correlate(desired, input, mode='valid')
  318. print ("R", np.shape(R))
  319. print ("r", np.shape(r))
  320. print ("solving")
  321. #H = np.linalg.solve(R,r)
  322. H = np.linalg.lstsq(R,r,rcond=.01)[0]
  323. #return desired - np.dot(H,input)
  324. print ("done solving")
  325. pylab.figure()
  326. pylab.plot(H)
  327. pylab.title("H")
  328. #return desired - np.convolve(H, input, mode='valid')
  329. #return desired - np.convolve(H, input, mode='same')
  330. #return np.convolve(H, input, mode='same')
  331. return desired - np.dot(toeplitz(H), input)
  332. #return np.dot(R, H)
  333. # T = toeplitz(input)
  334. # print "shapes", np.shape(T), np.shape(desired)
  335. # h = np.linalg.lstsq(T, desired)[0]
  336. # print "shapes", np.shape(h), np.shape(input)
  337. # #return np.convolve(h, input, mode='same')
  338. # return desired - np.dot(T,h)
  339. def Fdomain_CORR(self, desired, input, dt, freq):
  340. from scipy.linalg import toeplitz
  341. # Fourier domain
  342. Input = np.fft.rfft(input)
  343. Desired = np.fft.rfft(desired)
  344. T = toeplitz(Input)
  345. #H = np.linalg.solve(T, Desired)
  346. H = np.linalg.lstsq(T, Desired)[0]
  347. # ac = np.correlate(Input, Input, mode='full')
  348. # ac = ac[ac.size/2:]
  349. # R = toeplitz(ac)
  350. #
  351. # r = np.correlate(Desired, Input, mode='full')
  352. # r = r[r.size/2:]
  353. #
  354. # #r = np.correlate(desired, input, mode='valid')
  355. # print "R", np.shape(R)
  356. # print "r", np.shape(r)
  357. # print "solving"
  358. # H = np.linalg.solve(R,r)
  359. # #H = np.linalg.lstsq(R,r)
  360. # #return desired - np.dot(H,input)
  361. # print "done solving"
  362. pylab.figure()
  363. pylab.plot(H.real)
  364. pylab.plot(H.imag)
  365. pylab.plot(Input.real)
  366. pylab.plot(Input.imag)
  367. pylab.plot(Desired.real)
  368. pylab.plot(Desired.imag)
  369. pylab.legend(["hr","hi","ir","ii","dr","di"])
  370. pylab.title("H")
  371. #return desired - np.fft.irfft(Input*H)
  372. return np.fft.irfft(H*Input)
  373. def Tdomain_RLS(self, desired, input, S):
  374. """
  375. A DFT is first performed on the data. Than a RLS algorithm is carried out
  376. for noise cancellation. Related to the RLS_Alt Algoritm 5.3 in Diniz book.
  377. The desired and input signals are assummed to be real time series data.
  378. """
  379. # Transform data into frequency domain
  380. Input = np.fft.rfft(input)
  381. Desired = np.fft.rfft(desired)
  382. # Initialisation Procedure
  383. nCoefficients = S["filterOrderNo"]+1
  384. nIterations = len(Desired)
  385. # Pre Allocations
  386. errorVector = np.zeros(nIterations, dtype="complex")
  387. outputVector = np.zeros(nIterations, dtype="complex")
  388. errorVectorPost = np.zeros(nIterations, dtype="complex")
  389. outputVectorPost = np.zeros(nIterations, dtype="complex")
  390. coefficientVector = np.zeros( (nCoefficients, nIterations+1), dtype="complex" )
  391. # Initial State
  392. coefficientVector[:,1] = S["initialCoefficients"]
  393. S_d = S["delta"]*np.eye(nCoefficients)
  394. # Improve source code regularity, pad with zeros
  395. prefixedInput = np.concatenate([np.zeros(nCoefficients-1, dtype="complex"),
  396. np.array(Input)])
  397. invLambda = 1./S["lambda"]
  398. # Body
  399. pylab.ion()
  400. pylab.figure(11)
  401. for it in range(nIterations):
  402. regressor = prefixedInput[it:it+nCoefficients][::-1]
  403. # a priori estimated output
  404. outputVector[it] = np.dot(coefficientVector[:,it].T, regressor)
  405. # a priori error
  406. errorVector[it] = Desired[it] - outputVector[it]
  407. psi = np.dot(S_d, regressor)
  408. if np.isnan(psi).any():
  409. print ("psi", psi)
  410. exit(1)
  411. pylab.cla()
  412. #pylab.plot(psi)
  413. pylab.plot(regressor.real)
  414. pylab.plot(regressor.imag)
  415. pylab.plot(coefficientVector[:,it].real)
  416. pylab.plot(coefficientVector[:,it].imag)
  417. pylab.legend(["rr","ri", "cr", "ci"])
  418. pylab.draw()
  419. raw_input("paws")
  420. S_d = invLambda * (S_d - np.dot(psi, psi.T) /\
  421. S["lambda"] + np.dot(psi.T, regressor))
  422. coefficientVector[:,it+1] = coefficientVector[:,it] + \
  423. np.conjugate(errorVector[it])*np.dot(S_d, regressor)
  424. # A posteriori estimated output
  425. outputVectorPost[it] = np.dot(coefficientVector[:,it+1].T, regressor)
  426. # A posteriori error
  427. errorVectorPost[it] = Desired[it] - outputVectorPost[it]
  428. errorVectorPost = np.nan_to_num(errorVectorPost)
  429. pylab.figure(11)
  430. print (np.shape(errorVectorPost))
  431. pylab.plot(errorVectorPost.real)
  432. pylab.plot(errorVectorPost.imag)
  433. pylab.show()
  434. print(errorVectorPost)
  435. #return np.fft.irfft(Desired)
  436. return np.fft.irfft(errorVectorPost)
  437. if __name__ == "__main__":
  438. def noise(nu, t, phi):
  439. return np.sin(nu*2.*np.pi*t + phi)
  440. import matplotlib.pyplot as plt
  441. print("Test driver for adaptive filtering")
  442. Filt = AdaptiveFilter(.1)
  443. t = np.arange(0, .5, 1e-4)
  444. omega = 2000 * 2.*np.pi
  445. T2 = .100
  446. n1 = noise(60, t, .2 )
  447. n2 = noise(61, t, .514 )
  448. x = np.sin(omega*t)* np.exp(-t/T2) + 2.3*noise(60, t, .34) + 1.783*noise(31, t, 2.1)
  449. e = Filt.adapt_filt_tworef(x, n1, n2, 200, .98)
  450. plt.plot(t, x)
  451. plt.plot(t, n1)
  452. plt.plot(t, n2)
  453. plt.plot(t, e)
  454. plt.show()