Surface NMR processing and inversion GUI
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. import numpy, array #,rpy2
  2. from matplotlib import pyplot as plt
  3. import numpy as np
  4. from scipy.optimize import least_squares
  5. #from rpy2.robjects.packages import importr
  6. #import rpy2.robjects as robjects
  7. #import rpy2.robjects.numpy2ri
  8. #import notch
  9. from numpy.fft import fft, fftfreq
  10. # We know/can calculate frequency peak, use this to guess where picks will be.
  11. # maybe have a sliding window that reports peak values.
  12. def peakPicker(data, omega, dt):
  13. # compute window based on omega and dt
  14. # make sure you are not aliased, grab every other peak
  15. window = (2*numpy.pi) / (omega*dt)
  16. data = numpy.array(data)
  17. peaks = []
  18. troughs = []
  19. times = []
  20. times2 = []
  21. indices = []
  22. ws = 0
  23. we = window
  24. ii = 0
  25. for i in range((int)(len(data)/window)):
  26. # initially was just returning this I think avg is better
  27. #times.append( (ws + numpy.abs(data[ws:we]).argmax()) * dt )
  28. peaks.append(numpy.max(data[ws:we]))
  29. times.append( (ws + data[ws:we].argmax()) * dt )
  30. indices.append( ii + data[ws:we].argmax() )
  31. troughs.append(numpy.min(data[ws:we]))
  32. times2.append( (ws + (data[ws:we]).argmin()) * dt )
  33. indices.append( ii + data[ws:we].argmin() )
  34. ws += window
  35. we += window
  36. ii += (int)(we-ws)
  37. #return numpy.array(peaks), numpy.array(times)
  38. # Averaging peaks does a good job of removing bias in noise
  39. return (numpy.array(peaks)-numpy.array(troughs))/2., \
  40. (numpy.array(times)+numpy.array(times2))/2., \
  41. indices
  42. def fun(x, t, y):
  43. """ Cost function for regression, single exponential, no DC term
  44. x[0] = A0
  45. x[1] = zeta
  46. x[2] = df
  47. x[3] = T2
  48. """
  49. # concatenated real and imaginary parts
  50. pre = np.concatenate((-x[0]*np.sin(2.*np.pi*x[2]*t + x[1])*np.exp(-t/x[3]), \
  51. +x[0]*np.cos(2.*np.pi*x[2]*t + x[1])*np.exp(-t/x[3])))
  52. return y-pre
  53. def fun2(x, t, y):
  54. """ Cost function for regression, single exponential, no DC term
  55. x[0] = A0
  56. x[1] = zeta
  57. x[2] = T2
  58. """
  59. # concatenated real and imaginary parts
  60. pre = np.concatenate((x[0]*np.cos(x[1])*np.exp(-t/x[2]), \
  61. -1.*x[0]*np.sin(x[1])*np.exp(-t/x[2])))
  62. return y-pre
  63. def quadratureDetect2(X, Y, tt, x0="None"):
  64. """ Pure python quadrature detection using Scipy.
  65. X = real part of NMR signal
  66. Y = imaginary component of NMR signal
  67. tt = time
  68. """
  69. print("Pure Python Quad Det", "TODO look at loss functions and method")
  70. # Loss functions, linear, soft_l1, huber, cauchy, arctan
  71. # df
  72. loss = 'cauchy' # 'soft_l1'
  73. method = 'trf' # trf, dogbox, lm
  74. if x0=="None":
  75. x0 = np.array( [1., 0., 0., .2] ) # A0, zeta, df, T2
  76. res_lsq = least_squares(fun, x0, args=(tt, np.concatenate((X, Y))), loss=loss, f_scale=1.0,\
  77. bounds=( [1., -np.pi, -5, .005] , [1000., np.pi, 5, .800] ),
  78. method=method
  79. )
  80. x = res_lsq.x
  81. print ("A0={} zeta={} df={} T2={}".format(x[0],x[1],x[2],x[3]))
  82. else:
  83. res_lsq = least_squares(fun, x0, args=(tt, np.concatenate((X, Y))), loss=loss, f_scale=1.0,\
  84. bounds=( [1., -np.pi, -5, .005] , [1000., np.pi, 5, .800] ),
  85. method=method
  86. )
  87. #bounds=( [0., 0, -20, .0] , [1., np.pi, 20, .6] ))
  88. x = res_lsq.x
  89. return res_lsq.success, x[0], x[2], x[1], x[3]
  90. # no df
  91. #x = np.array( [1., 0., 0.2] )
  92. #res_lsq = least_squares(fun2, x, args=(tt, np.concatenate((X, Y))), loss='soft_l1', f_scale=0.1)
  93. #x = res_lsq.x
  94. #return conv, E0,df,phi,T2
  95. #return res_lsq.success, x[0], 0, x[1], x[2]
  96. ###################################################################
  97. ###################################################################
  98. ###################################################################
  99. if __name__ == "__main__":
  100. dt = .0001
  101. T2 = .1
  102. omega = 2000.*2*numpy.pi
  103. phi = .0
  104. T = 8.*T2
  105. t = numpy.arange(0, T, dt)
  106. # Synthetic data, simple single decaying sinusoid
  107. # with a single decay parameter and gaussian noise added
  108. data = numpy.exp(-t/T2) * numpy.sin(omega * t + phi) + numpy.random.normal(0,.05,len(t)) \
  109. + numpy.random.randint(-1,2,len(t))*numpy.random.exponential(.2,len(t))
  110. cdata = numpy.exp(-t/T2) * numpy.sin(omega * t + phi) #+ numpy.random.normal(0,.25,len(t))
  111. #data = numpy.random.normal(0,.25,len(t))
  112. sigma2 = numpy.std(data[::-len(data)/4])
  113. #sigma2 = numpy.var(data[::-len(data)/4])
  114. print("sigma2", sigma2)
  115. [peaks,times,indices] = peakPicker(data, omega, dt)
  116. [b1,b2,rT2] = regressCurve(peaks,times)
  117. print("rT2 nonweighted", rT2)
  118. [b1,b2,rT2] = regressCurve(peaks,times,sigma2)
  119. print("rT2 weighted", rT2)
  120. envelope = numpy.exp(-t/T2)
  121. renvelope = numpy.exp(-t/rT2)
  122. #outf = file('regress.txt','w')
  123. #for i in range(len(times)):
  124. # outf.write(str(times[i]) + " " + str(peaks[i]) + "\n")
  125. #outf.close()
  126. plt.plot(t,data, 'b')
  127. plt.plot(t,cdata, 'g', linewidth=1)
  128. plt.plot(t,envelope, color='violet', linewidth=4)
  129. plt.plot(t,renvelope, 'r', linewidth=4)
  130. plt.plot(times, numpy.array(peaks), 'bo', markersize=8, alpha=.25)
  131. plt.legend(['noisy data','clean data','real envelope','regressed env','picks'])
  132. plt.savefig("regression.pdf")
  133. # FFT check
  134. fourier = fft(data)
  135. plt.figure()
  136. freq = fftfreq(len(data), d=dt)
  137. plt.plot(freq, (fourier.real))
  138. plt.show()
  139. # TODO do a bunch in batch mode to see if T2 estimate is better with or without
  140. # weighting and which model is best.
  141. # TODO try with real data
  142. # TODO test filters (median, FFT, notch)
  143. # It looks like weighting is good for relatively low sigma, but for noisy data
  144. # it hurts us. Check