Surface NMR processing and inversion GUI
Du kannst nicht mehr als 25 Themen auswählen Themen müssen mit entweder einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. import numpy as np
  2. from numpy.linalg import lstsq
  3. from numpy.linalg import norm
  4. from numpy import fft
  5. import pylab
  6. from scipy.signal import correlate
  7. def autocorr(x):
  8. #result = np.correlate(x, x, mode='full')
  9. result = correlate(x, x, mode='full')
  10. return result[result.size/2:]
  11. class AdaptiveFilter:
  12. def __init__(self, mu):
  13. self.mu = mu
  14. def adapt_filt_Ref(self, x, R, M, mu, PCA, lambda2=0.95, H0=0):
  15. """ Taken from .m file
  16. This function is written to allow the user to filter a input signal
  17. with an adaptive filter that utilizes 2 reference signals instead of
  18. the standard method which allows for only 1 reference signal.
  19. Author: Rob Clemens Date: 3/16/06
  20. Modified and ported to Python, now takes arbitray number of reference points
  21. """
  22. #from akvo.tressel import pca
  23. import akvo.tressel.pca as pca
  24. if np.shape(x) != np.shape(R[0]): # or np.shape(x) != np.shape(rx1):
  25. print ("Error, non aligned")
  26. exit(1)
  27. if PCA == "Yes":
  28. # PCA decomposition on ref channels so signals are less related
  29. R, K, means = pca.pca( R )
  30. if all(H0) == 0:
  31. H = np.zeros( (len(R)*M))
  32. #print ("resetting filter")
  33. else:
  34. H = H0
  35. Rn = np.ones(len(R)*M) / mu
  36. r_ = np.zeros( (len(R), M) )
  37. e = np.zeros(len(x)) # error, desired output
  38. ilambda = lambda2**-1
  39. for z in range(0, len(x)):
  40. # Only look forwards, to avoid distorting the lates times
  41. # (run backwards, if opposite and you don't care about distorting very late time.)
  42. for ir in range(len(R)):
  43. if z < M:
  44. r_[ir,0:z] = R[ir][0:z]
  45. r_[ir,z:M] = 0
  46. else:
  47. # TODO, use np.delete and np.append to speed this up
  48. r_[ir,:] = R[ir][z-M:z]
  49. # reshape
  50. r_n = np.reshape(r_, -1) #concatenate((r_v, r_h ))
  51. #K = np.dot( np.diag(Rn,0), r_n) / (lambda2 + np.dot(r_n*Rn, r_n)) # Create/update K
  52. K = (Rn* r_n) / (lambda2 + np.dot(r_n*Rn, r_n)) # Create/update K
  53. e[z] = x[z] - np.dot(r_n.T, H) # e is the filtered signal, input - r(n) * Filter Coefs
  54. H += K*e[z]; # Update Filter Coefficients
  55. Rn = ilambda*Rn - ilambda*np.dot(np.dot(K, r_n.T), Rn) # Update R(n)
  56. return e, H
  57. def transferFunctionFFT(self, D, R, reg=1e-2):
  58. from akvo.tressel import pca
  59. """
  60. Computes the transfer function (H) between a Data channel and
  61. a number of Reference channels. The Matrices D and R are
  62. expected to be in the frequency domain on input.
  63. | R1'R1 R1'R2 R1'R3| |h1| |R1'D|
  64. | R2'R1 R2'R2 R2'R3| * |h2| = |R2'D|
  65. | R3'R1 R3'R2 R3'R3| |h3| |R3'D|
  66. Returns the corrected array
  67. """
  68. # PCA decomposition on ref channels so signals are less related
  69. #transMatrix, K, means = pca.pca( np.array([rx0, rx1]))
  70. #RR = np.zeros(( np.shape(R[0])[0]*np.shape(R[0])[1], len(R)))
  71. # RR = np.zeros(( len(R), np.shape(R[0])[0]*np.shape(R[0])[1] ))
  72. # for ir in range(len(R)):
  73. # RR[ir,:] = np.reshape(R[ir], -1)
  74. # transMatrix, K, means = pca.pca(RR)
  75. # #R rx0 = transMatrix[0,:]
  76. # # rx1 = transMatrix[1,:]
  77. # for ir in range(len(R)):
  78. # R[ir] = transMatrix[ir,0]
  79. import scipy.linalg
  80. import akvo.tressel.pca as pca
  81. # Compute as many transfer functions as len(R)
  82. # A*H = B
  83. nref = len(R)
  84. H = np.zeros( (np.shape(D)[1], len(R)), dtype=complex )
  85. for iw in range(np.shape(D)[1]):
  86. A = np.zeros( (nref, nref), dtype=complex )
  87. B = np.zeros( (nref) , dtype=complex)
  88. for ii in range(nref):
  89. for jj in range(nref):
  90. # build A
  91. A[ii,jj] = np.dot(R[ii][:,iw], R[jj][:,iw])
  92. # build B
  93. B[ii] = np.dot( R[ii][:,iw], D[:,iw] )
  94. # compute H(iw)
  95. #linalg.solve(a,b) if a is square
  96. #print "A", A
  97. #print "B", B
  98. # TODO, regularise this solve step? So as to not fit the spurious noise
  99. #print np.shape(B), np.shape(A)
  100. #H[iw, :] = scipy.linalg.solve(A,B)
  101. H[iw, :] = scipy.linalg.lstsq(A,B,cond=reg)[0]
  102. #print "lstqt", np.shape(scipy.linalg.lstsq(A,B))
  103. #print "solve", scipy.linalg.solve(A,B)
  104. #H[iw,:] = scipy.linalg.lstsq(A,B) # otherwise
  105. #H = np.zeros( (np.shape(D)[1], ) )
  106. #print H #A, B
  107. Error = np.zeros(np.shape(D), dtype=complex)
  108. for ir in range(nref):
  109. for q in range( np.shape(D)[0] ):
  110. #print "dimcheck", np.shape(H[:,ir]), np.shape(R[ir][q,:] )
  111. Error[q,:] += H[:,ir]*R[ir][q,:]
  112. return D - Error
  113. def adapt_filt_tworefFreq(self, x, rx0, rx1, M, lambda2=0.95):
  114. """ Frequency domain version of above
  115. """
  116. from akvo.tressel import pca
  117. pylab.figure()
  118. pylab.plot(rx0)
  119. pylab.plot(rx1)
  120. # PCA decomposition on ref channels so signals are less related
  121. transMatrix, K, means = pca.pca( np.array([rx0, rx1]))
  122. rx0 = transMatrix[:,0]
  123. rx1 = transMatrix[:,1]
  124. pylab.plot(rx0)
  125. pylab.plot(rx1)
  126. pylab.show()
  127. exit()
  128. if np.shape(x) != np.shape(rx0) or np.shape(x) != np.shape(rx1):
  129. print ("Error, non aligned")
  130. exit(1)
  131. wx = fft.rfft(x)
  132. wr0 = fft.rfft(rx0)
  133. wr1 = fft.rfft(rx1)
  134. H = np.zeros( (2*M), dtype=complex )
  135. ident_mat = np.eye((2*M))
  136. Rn = ident_mat / 0.1
  137. r_v = np.zeros( (M), dtype=complex )
  138. r_h = np.zeros( (M), dtype=complex )
  139. e = np.zeros(len(x), dtype=complex )
  140. ilambda = lambda2**-1
  141. for z in range(0, len(wx)):
  142. # TODO Padd with Zeros or truncate if M >,< arrays
  143. r_v = wr0[::-1][:M]
  144. r_h = wr1[::-1][:M]
  145. r_n = np.concatenate((r_v, r_h ))
  146. K = np.dot(Rn, r_n) / (lambda2 + np.dot(np.dot(r_n.T, Rn), r_n)) # Create/update K
  147. e[z] = wx[z] - np.dot(r_n,H) # e is the filtered signal, input - r(n) * Filter Coefs
  148. H += K * e[z]; # Update Filter Coefficients
  149. Rn = ilambda*Rn - ilambda*K*r_n.T*Rn # Update R(n)
  150. return fft.irfft(e)
  151. def iter_filt_refFreq(self, x, rx0, Ahat=.05, Bhat=.5, k=0.05):
  152. X = np.fft.rfft(x)
  153. X0 = np.copy(X)
  154. RX0 = np.fft.rfft(rx0)
  155. # step 0
  156. Abs2HW = []
  157. alphai = k * (np.abs(Ahat)**2 / np.abs(Bhat)**2)
  158. betai = k * (1. / (np.abs(Bhat)**2) )
  159. Hw = ((1.+alphai) * np.abs(X)**2 ) / (np.abs(X)**2 + betai*(np.abs(RX0)**2))
  160. H = np.abs(Hw)**2
  161. pylab.ion()
  162. pylab.figure()
  163. for i in range(10):
  164. #print "alphai", alphai
  165. #print "betai", betai
  166. #print "Hw", Hw
  167. alphai = k * (np.abs(Ahat)**2 / np.abs(Bhat)**2) * np.product(H, axis=0)
  168. betai = k * (1. / np.abs(Bhat)**2) * np.product(H, axis=0)
  169. # update signal
  170. Hw = ((1.+alphai) * np.abs(X)**2) / (np.abs(X)**2 + betai*np.abs(RX0)**2)
  171. Hw = np.nan_to_num(Hw)
  172. X *= Hw
  173. H = np.vstack( (H, np.abs(Hw)**2) )
  174. #print "Hw", Hw
  175. pylab.cla()
  176. pylab.plot(Hw)
  177. #pylab.plot(np.abs(X))
  178. #pylab.plot(np.abs(RX0))
  179. pylab.draw()
  180. raw_input("wait")
  181. pylab.cla()
  182. pylab.ioff()
  183. #return np.fft.irfft(X0-X)
  184. return np.fft.irfft(X)
  185. def iter_filt_refFreq(self, x, rx0, rx1, Ahat=.1, Bhat=1., k=0.001):
  186. X = np.fft.rfft(x)
  187. X0 = np.copy(X)
  188. RX0 = np.fft.rfft(rx0)
  189. RX1 = np.fft.rfft(rx1)
  190. # step 0
  191. alphai = k * (np.abs(Ahat)**2 / np.abs(Bhat)**2)
  192. betai = k * (1. / (np.abs(Bhat)**2) )
  193. #Hw = ((1.+alphai) * np.abs(X)**2 ) / (np.abs(X)**2 + betai*(np.abs(RX0)**2))
  194. H = np.ones(len(X)) # abs(Hw)**2
  195. #pylab.ion()
  196. #pylab.figure(334)
  197. for i in range(1000):
  198. #print "alphai", alphai
  199. #print "betai", betai
  200. #print "Hw", Hw
  201. alphai = k * (np.abs(Ahat)**2 / np.abs(Bhat)**2) * np.product(H, axis=0)
  202. betai = k * (1. / np.abs(Bhat)**2) * np.product(H, axis=0)
  203. # update signal
  204. Hw = ((1.+alphai) * np.abs(X)**2) / (np.abs(X)**2 + betai*np.abs(RX0)**2)
  205. Hw = np.nan_to_num(Hw)
  206. X *= Hw #.conjugate
  207. #H = np.vstack((H, np.abs(Hw)**2) )
  208. H = np.vstack((H, np.abs(Hw)) )
  209. #print "Hw", Hw
  210. #pylab.cla()
  211. #pylab.plot(Hw)
  212. #pylab.plot(np.abs(X))
  213. #pylab.plot(np.abs(RX0))
  214. #pylab.draw()
  215. #raw_input("wait")
  216. #pylab.cla()
  217. #pylab.ioff()
  218. return np.fft.irfft(X0-X)
  219. #return np.fft.irfft(X)
  220. def Tdomain_DFT(self, desired, input, S):
  221. """ Lifted from Adaptive filtering toolbox. Modefied to accept more than one input
  222. vector
  223. """
  224. # Initialisation Procedure
  225. nCoefficients = S["filterOrderNo"]/2+1
  226. nIterations = len(desired)
  227. # Pre Allocations
  228. errorVector = np.zeros(nIterations, dtype='complex')
  229. outputVector = np.zeros(nIterations, dtype='complex')
  230. # Initial State
  231. coefficientVectorDFT = np.fft.rfft(S["initialCoefficients"])/np.sqrt(float(nCoefficients))
  232. desiredDFT = np.fft.rfft(desired)
  233. powerVector = S["initialPower"]*np.ones(nCoefficients)
  234. # Improve source code regularity, pad with zeros
  235. # TODO, confirm zeros(nCoeffics) not nCoeffics-1
  236. prefixedInput = np.concatenate([np.zeros(nCoefficients-1), np.array(input)])
  237. # Body
  238. pylab.ion()
  239. pylab.figure(11)
  240. for it in range(nIterations): # = 1:nIterations,
  241. regressorDFT = np.fft.rfft(prefixedInput[it:it+nCoefficients][::-1]) /\
  242. np.sqrt(float(nCoefficients))
  243. # Summing two column vectors
  244. powerVector = S["alpha"] * (regressorDFT*np.conjugate(regressorDFT)) + \
  245. (1.-S["alpha"])*(powerVector)
  246. pylab.cla()
  247. #pylab.plot(prefixedInput[::-1], 'b')
  248. #pylab.plot(prefixedInput[it:it+nCoefficients][::-1], 'g', linewidth=3)
  249. #pylab.plot(regressorDFT.real)
  250. #pylab.plot(regressorDFT.imag)
  251. pylab.plot(powerVector.real)
  252. pylab.plot(powerVector.imag)
  253. #pylab.plot(outputVector)
  254. #pylab.plot(errorVector.real)
  255. #pylab.plot(errorVector.imag)
  256. pylab.draw()
  257. #raw_input("wait")
  258. outputVector[it] = np.dot(coefficientVectorDFT.T, regressorDFT)
  259. #errorVector[it] = desired[it] - outputVector[it]
  260. errorVector[it] = desiredDFT[it] - outputVector[it]
  261. #print errorVector[it], desired[it], outputVector[it]
  262. # Vectorized
  263. coefficientVectorDFT += (S["step"]*np.conjugate(errorVector[it])*regressorDFT) /\
  264. (S['gamma']+powerVector)
  265. return np.real(np.fft.irfft(errorVector))
  266. #coefficientVector = ifft(coefficientVectorDFT)*sqrt(nCoefficients);
  267. def Tdomain_DCT(self, desired, input, S):
  268. """ Lifted from Adaptive filtering toolbox. Modefied to accept more than one input
  269. vector. Uses cosine transform
  270. """
  271. from scipy.fftpack import dct
  272. # Initialisation Procedure
  273. nCoefficients = S["filterOrderNo"]+1
  274. nIterations = len(desired)
  275. # Pre Allocations
  276. errorVector = np.zeros(nIterations)
  277. outputVector = np.zeros(nIterations)
  278. # Initial State
  279. coefficientVectorDCT = dct(S["initialCoefficients"]) #/np.sqrt(float(nCoefficients))
  280. desiredDCT = dct(desired)
  281. powerVector = S["initialPower"]*np.ones(nCoefficients)
  282. # Improve source code regularity, pad with zeros
  283. prefixedInput = np.concatenate([np.zeros(nCoefficients-1), np.array(input)])
  284. # Body
  285. #pylab.figure(11)
  286. #pylab.ion()
  287. for it in range(0, nIterations): # = 1:nIterations,
  288. regressorDCT = dct(prefixedInput[it:it+nCoefficients][::-1], type=2)
  289. #regressorDCT = dct(prefixedInput[it+nCoefficients:it+nCoefficients*2+1])#[::-1])
  290. # Summing two column vectors
  291. powerVector = S["alpha"]*(regressorDCT) + (1.-S["alpha"])*(powerVector)
  292. #pylab.cla()
  293. #pylab.plot(powerVector)
  294. #pylab.draw()
  295. outputVector[it] = np.dot(coefficientVectorDCT.T, regressorDCT)
  296. #errorVector[it] = desired[it] - outputVector[it]
  297. errorVector[it] = desiredDCT[it] - outputVector[it]
  298. # Vectorized
  299. coefficientVectorDCT += (S["step"]*errorVector[it]*regressorDCT) #/\
  300. #(S['gamma']+powerVector)
  301. #pylab.plot(errorVector)
  302. #pylab.show()
  303. return dct(errorVector, type=3)
  304. #coefficientVector = ifft(coefficientVectorDCT)*sqrt(nCoefficients);
  305. def Tdomain_CORR(self, desired, input, S):
  306. from scipy.linalg import toeplitz
  307. from scipy.signal import correlate
  308. # Autocorrelation
  309. ac = np.correlate(input, input, mode='full')
  310. ac = ac[ac.size/2:]
  311. R = toeplitz(ac)
  312. # cross correllation
  313. r = np.correlate(desired, input, mode='full')
  314. r = r[r.size/2:]
  315. #r = np.correlate(desired, input, mode='valid')
  316. print ("R", np.shape(R))
  317. print ("r", np.shape(r))
  318. print ("solving")
  319. #H = np.linalg.solve(R,r)
  320. H = np.linalg.lstsq(R,r,rcond=.01)[0]
  321. #return desired - np.dot(H,input)
  322. print ("done solving")
  323. pylab.figure()
  324. pylab.plot(H)
  325. pylab.title("H")
  326. #return desired - np.convolve(H, input, mode='valid')
  327. #return desired - np.convolve(H, input, mode='same')
  328. #return np.convolve(H, input, mode='same')
  329. return desired - np.dot(toeplitz(H), input)
  330. #return np.dot(R, H)
  331. # T = toeplitz(input)
  332. # print "shapes", np.shape(T), np.shape(desired)
  333. # h = np.linalg.lstsq(T, desired)[0]
  334. # print "shapes", np.shape(h), np.shape(input)
  335. # #return np.convolve(h, input, mode='same')
  336. # return desired - np.dot(T,h)
  337. def Fdomain_CORR(self, desired, input, dt, freq):
  338. from scipy.linalg import toeplitz
  339. # Fourier domain
  340. Input = np.fft.rfft(input)
  341. Desired = np.fft.rfft(desired)
  342. T = toeplitz(Input)
  343. #H = np.linalg.solve(T, Desired)
  344. H = np.linalg.lstsq(T, Desired)[0]
  345. # ac = np.correlate(Input, Input, mode='full')
  346. # ac = ac[ac.size/2:]
  347. # R = toeplitz(ac)
  348. #
  349. # r = np.correlate(Desired, Input, mode='full')
  350. # r = r[r.size/2:]
  351. #
  352. # #r = np.correlate(desired, input, mode='valid')
  353. # print "R", np.shape(R)
  354. # print "r", np.shape(r)
  355. # print "solving"
  356. # H = np.linalg.solve(R,r)
  357. # #H = np.linalg.lstsq(R,r)
  358. # #return desired - np.dot(H,input)
  359. # print "done solving"
  360. pylab.figure()
  361. pylab.plot(H.real)
  362. pylab.plot(H.imag)
  363. pylab.plot(Input.real)
  364. pylab.plot(Input.imag)
  365. pylab.plot(Desired.real)
  366. pylab.plot(Desired.imag)
  367. pylab.legend(["hr","hi","ir","ii","dr","di"])
  368. pylab.title("H")
  369. #return desired - np.fft.irfft(Input*H)
  370. return np.fft.irfft(H*Input)
  371. def Tdomain_RLS(self, desired, input, S):
  372. """
  373. A DFT is first performed on the data. Than a RLS algorithm is carried out
  374. for noise cancellation. Related to the RLS_Alt Algoritm 5.3 in Diniz book.
  375. The desired and input signals are assummed to be real time series data.
  376. """
  377. # Transform data into frequency domain
  378. Input = np.fft.rfft(input)
  379. Desired = np.fft.rfft(desired)
  380. # Initialisation Procedure
  381. nCoefficients = S["filterOrderNo"]+1
  382. nIterations = len(Desired)
  383. # Pre Allocations
  384. errorVector = np.zeros(nIterations, dtype="complex")
  385. outputVector = np.zeros(nIterations, dtype="complex")
  386. errorVectorPost = np.zeros(nIterations, dtype="complex")
  387. outputVectorPost = np.zeros(nIterations, dtype="complex")
  388. coefficientVector = np.zeros( (nCoefficients, nIterations+1), dtype="complex" )
  389. # Initial State
  390. coefficientVector[:,1] = S["initialCoefficients"]
  391. S_d = S["delta"]*np.eye(nCoefficients)
  392. # Improve source code regularity, pad with zeros
  393. prefixedInput = np.concatenate([np.zeros(nCoefficients-1, dtype="complex"),
  394. np.array(Input)])
  395. invLambda = 1./S["lambda"]
  396. # Body
  397. pylab.ion()
  398. pylab.figure(11)
  399. for it in range(nIterations):
  400. regressor = prefixedInput[it:it+nCoefficients][::-1]
  401. # a priori estimated output
  402. outputVector[it] = np.dot(coefficientVector[:,it].T, regressor)
  403. # a priori error
  404. errorVector[it] = Desired[it] - outputVector[it]
  405. psi = np.dot(S_d, regressor)
  406. if np.isnan(psi).any():
  407. print ("psi", psi)
  408. exit(1)
  409. pylab.cla()
  410. #pylab.plot(psi)
  411. pylab.plot(regressor.real)
  412. pylab.plot(regressor.imag)
  413. pylab.plot(coefficientVector[:,it].real)
  414. pylab.plot(coefficientVector[:,it].imag)
  415. pylab.legend(["rr","ri", "cr", "ci"])
  416. pylab.draw()
  417. raw_input("paws")
  418. S_d = invLambda * (S_d - np.dot(psi, psi.T) /\
  419. S["lambda"] + np.dot(psi.T, regressor))
  420. coefficientVector[:,it+1] = coefficientVector[:,it] + \
  421. np.conjugate(errorVector[it])*np.dot(S_d, regressor)
  422. # A posteriori estimated output
  423. outputVectorPost[it] = np.dot(coefficientVector[:,it+1].T, regressor)
  424. # A posteriori error
  425. errorVectorPost[it] = Desired[it] - outputVectorPost[it]
  426. errorVectorPost = np.nan_to_num(errorVectorPost)
  427. pylab.figure(11)
  428. print (np.shape(errorVectorPost))
  429. pylab.plot(errorVectorPost.real)
  430. pylab.plot(errorVectorPost.imag)
  431. pylab.show()
  432. print(errorVectorPost)
  433. #return np.fft.irfft(Desired)
  434. return np.fft.irfft(errorVectorPost)
  435. if __name__ == "__main__":
  436. def noise(nu, t, phi):
  437. return np.sin(nu*2.*np.pi*t + phi)
  438. import matplotlib.pyplot as plt
  439. print("Test driver for adaptive filtering")
  440. Filt = AdaptiveFilter(.1)
  441. t = np.arange(0, .5, 1e-4)
  442. omega = 2000 * 2.*np.pi
  443. T2 = .100
  444. n1 = noise(60, t, .2 )
  445. n2 = noise(61, t, .514 )
  446. x = np.sin(omega*t)* np.exp(-t/T2) + 2.3*noise(60, t, .34) + 1.783*noise(31, t, 2.1)
  447. e = Filt.adapt_filt_tworef(x, n1, n2, 200, .98)
  448. plt.plot(t, x)
  449. plt.plot(t, n1)
  450. plt.plot(t, n2)
  451. plt.plot(t, e)
  452. plt.show()