Surface NMR processing and inversion GUI
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

logbarrier.py 13KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. from __future__ import division
  2. import numpy as np
  3. from scipy.sparse.linalg import iterative as iter
  4. from scipy.sparse import eye as seye
  5. import pylab
  6. import pprint
  7. from scipy.optimize import nnls
  8. import matplotlib.pyplot as plt
  9. from akvo.tressel.SlidesPlot import *
  10. def PhiB(mux, minVal, x):
  11. phib = mux * np.abs( np.sum(np.log( x-minVal)) )
  12. return phib
  13. def curvaturefd(x, y, t):
  14. x1 = np.gradient(x,t)
  15. x2 = np.gradient(x1,t)
  16. y1 = np.gradient(y,t)
  17. y2 = np.gradient(y1,t)
  18. return np.abs(x1*y2 - y1*x2) / np.power(x1**2 + y1**2, 3./2)
  19. def curvatureg(x, y):
  20. from scipy.ndimage import gaussian_filter1d
  21. #first and second derivative
  22. x1 = gaussian_filter1d(x, sigma=1, order=1)#, mode='constant', cval=x[-1])
  23. x2 = gaussian_filter1d(x1, sigma=1, order=1)#, mode='constant', cval=y[-1])
  24. y1 = gaussian_filter1d(y, sigma=1, order=1)#, mode='constant', cval=x1[-1])
  25. y2 = gaussian_filter1d(y1, sigma=1, order=1)#, mode='constant', cval=y1[-1])
  26. return np.abs(x1*y2 - y1*x2) / np.power(x1**2 + y1**2, 3./2)
  27. def logBarrier(A, b, T2Bins, lambdastar, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth="Smallest", MAXITER=70, fignum=1000, sigma=1, callback=None):
  28. """Impliments a log barrier Tikhonov solution to a linear system of equations
  29. Ax = b s.t. x_min < x < x_max. A log-barrier term is used for the constraint
  30. """
  31. # TODO input
  32. minVal = 0.0
  33. #maxVal = 1e8
  34. Wd = (np.eye(len(b)) / (sigma)) # Wd = eye( sigma )
  35. WdTWd = (np.eye(len(b)) / (sigma**2)) # Wd = eye( sigma )
  36. ATWdTWdA = np.dot(A.conj().transpose(), np.dot( WdTWd, A )) # TODO, implicit calculation instead?
  37. N = np.shape(A)[1] # number of model
  38. M = np.shape(A)[0] # number of data
  39. SIGMA = .25 # .25 # lower is more aggresive relaxation of log barrier
  40. EPSILON = 1e-25 #1e-35
  41. # reference model
  42. if np.size(xr) == 1:
  43. xr = np.zeros(N)
  44. # initial guess
  45. if np.size(x_0) == 1:
  46. x = 1e-10 + np.zeros(N)
  47. else:
  48. x = 1e-10 + x_0
  49. # Construct model constraint base
  50. Phim_base = np.zeros( [N , N] )
  51. a1 = .05 # smallest too
  52. # calculate largest term
  53. D1 = 1./abs(T2Bins[1]-T2Bins[0])
  54. D2 = 1./abs(T2Bins[2]-T2Bins[1])
  55. #a2 = 1. #(1./(2.*D1+D2)) # smooth
  56. if smooth == "Both":
  57. #print ("Both small and smooth model")
  58. for ip in range(N):
  59. D1 = 0.
  60. D2 = 0.
  61. if ip > 0:
  62. #D1 = np.sqrt(1./abs(T2Bins[ip]-T2Bins[ip-1]))**.5
  63. D1 = (1./abs(T2Bins[ip]-T2Bins[ip-1])) #**2
  64. if ip < N-1:
  65. #D2 = np.sqrt(1./abs(T2Bins[ip+1]-T2Bins[ip]))**.5
  66. D2 = (1./abs(T2Bins[ip+1]-T2Bins[ip])) #**2
  67. if ip > 0:
  68. Phim_base[ip,ip-1] = -(D1)
  69. if ip == 0:
  70. Phim_base[ip,ip ] = 2.*(D1+D2)
  71. elif ip == N-1:
  72. Phim_base[ip,ip ] = 2.*(D1+D2)
  73. else:
  74. Phim_base[ip,ip ] = 2.*(D1+D2)
  75. if ip < N-1:
  76. Phim_base[ip,ip+1] = -(D2)
  77. Phim_base /= np.max(Phim_base) # normalize
  78. Phim_base += a1*np.eye(N)
  79. elif smooth == "Smooth":
  80. #print ("Smooth model")
  81. for ip in range(N):
  82. if ip > 0:
  83. Phim_base[ip,ip-1] = -1 # smooth in log space
  84. if ip == 0:
  85. Phim_base[ip,ip ] = 2.05 # Encourage a little low model
  86. elif ip == N-1:
  87. Phim_base[ip,ip ] = 2.5 # Penalize long decays
  88. else:
  89. Phim_base[ip,ip ] = 2.1 # Smooth and small
  90. if ip < N-1:
  91. Phim_base[ip,ip+1] = -1 # smooth in log space
  92. elif smooth == "Smallest":
  93. for ip in range(N):
  94. Phim_base[ip,ip ] = 1.
  95. else:
  96. print("non valid model constraint:", smooth)
  97. exit()
  98. Phi_m = alpha*Phim_base
  99. WmTWm = Phim_base # np.dot(Phim_base, Phim_base.T)
  100. b_pre = np.dot(A, x)
  101. phid = np.linalg.norm( np.dot(Wd, (b-b_pre)) )**2
  102. phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
  103. mu2 = phim
  104. phib = PhiB(mu1, 0, x)
  105. mu1 = ((phid + alpha*phim) / phib)
  106. PHIM = []
  107. PHID = []
  108. MOD = []
  109. ALPHA = []
  110. ALPHA.append(alpha)
  111. #ALPHA = np.linspace( alpha, 1, MAXITER )
  112. print ("{:^5} {:^15} {:^15} {:^15} {:^15} {:^10} {:^10}".format("iter.", "lambda", "phi_d", "phi_m","phi","kappa","kappa dist."), flush=True)
  113. print ("{:^5} {:>15} {:<15} {:<15} {:<15} {:<10} {:<10}".format("-----", "---------------", "---------------","---------------","---------------","----------","----------"), flush=True)
  114. for i in range(MAXITER):
  115. #alpha = ALPHA[i]
  116. Phi_m = alpha*Phim_base
  117. # reset mu1 at each iteration
  118. # Calvetti -> No ; Li -> Yes
  119. # without this, non monotonic convergence occurs...which is OK if you really trust your noise
  120. mu1 = ((phid + alpha*phim) / phib)
  121. WmTWm = Phim_base # np.dot(Phim_base, Phim_base.T)
  122. phid_old = phid
  123. inner = 0
  124. First = True # guarantee entry
  125. xp = np.copy(x) # prior step x
  126. # quick and dirty solution
  127. #b2a = np.dot(A.conj().transpose(), np.dot(WdTWd, b-b_pre) ) - alpha*np.dot(WmTWm,(x-xr))
  128. #xg = nnls(ATWdTWdA + Phi_m, b2a)
  129. #x = xg[0]
  130. while ( (phib / (phid+alpha*phim)) > EPSILON or First==True ):
  131. #while ( False ): # skip the hard stuff
  132. First = False
  133. # Log barrier, keep each element above minVal
  134. X1 = np.eye(N) * (x-minVal)**-1
  135. X2 = np.eye(N) * (x-minVal)**-2
  136. # Log barrier, keep sum below maxVal TODO normalize by component. Don't want to push all down
  137. #Y1 = np.eye(N) * (maxVal - np.sum(x))**-1
  138. #Y2 = np.eye(N) * (maxVal - np.sum(x))**-2
  139. AA = ATWdTWdA + mu1*X2 + Phi_m
  140. M = np.eye( N ) * (1./np.diag(ATWdTWdA + mu1*X2 + Phi_m))
  141. #M = seye( N ).dot(1./np.diag(ATWdTWdA + mu1*X2 + Phi_m))
  142. # Solve system (newton step) (Li)
  143. b2 = np.dot(A.conj().transpose(), np.dot(WdTWd, b-b_pre) ) + 2.*mu1*np.diag(X1) - alpha*np.dot(WmTWm,(x-xr))
  144. ztilde = iter.cg(AA, b2, M = M)
  145. h = (ztilde[0].real)
  146. # Solve system (direct solution) (Calvetti)
  147. #b2 = np.dot(A.conj().transpose(), np.dot(WdTWd, b)) + 2.*mu1*np.diag(X1) - alpha*np.dot(WmTWm,(x-xr))
  148. #ztilde = iter.cg(AA, b2, M=M, x0=x)
  149. #h = (ztilde[0].real - x)
  150. # step size
  151. d = np.min( (1, 0.95 * np.min(x/np.abs(h+1e-120))) )
  152. ##########################################################
  153. # Update and fix any over/under stepping
  154. x += d*h
  155. # Determine mu steps to take
  156. s1 = mu1 * (np.dot(X2, ztilde[0].real) - 2.*np.diag(X1))
  157. #s2 = mu2 * (np.dot(Y2, ztilde[0].real) - 2.*np.diag(Y1))
  158. # determine mu for next step
  159. mu1 = SIGMA/N * np.abs(np.dot(s1, x))
  160. #mu2 = SIGMA/N * np.abs(np.dot(s2, x))
  161. b_pre = np.dot(A, x)
  162. phid = np.linalg.norm(np.dot(Wd, (b-b_pre)))**2
  163. phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
  164. phib = PhiB(mu1, minVal, x)
  165. inner += 1
  166. PHIM.append(phim)
  167. PHID.append(phid)
  168. MOD.append(np.copy(x))
  169. tphi = phid + alpha*phim
  170. # determine alpha
  171. scale = 1.5*(len(b)/phid)
  172. #alpha *= np.sqrt(scale)
  173. alpha *= min(scale, .95) # was .85...
  174. #print("alpha", min(scale, 0.99))
  175. #alpha *= .99 # was .85...
  176. ALPHA.append(alpha)
  177. #alpha = ALPHA[i+1]
  178. #print("inversion progress", i, alpha, np.sqrt(phid/len(b)), phim, flush=True)
  179. #print ("{:<8} {:<15} {:<10} {:<10}".format(i, alpha, np.sqrt(phid/len(b)), phim), flush=True)
  180. if i <= 4:
  181. print ("{:^5} {:>15.4f} {:>15.4f} {:>15.4f} {:>15.4f}".format(i, alpha, np.sqrt(phid/len(b)), phim, tphi ), flush=True)
  182. # if np.sqrt(phid/len(b)) < 0.97:
  183. # ibreak = -1
  184. # print ("------------overshot--------------------", alpha, np.sqrt(phid/len(b)), ibreak)
  185. # alpha *= 2. #0
  186. # x -= d*h
  187. # b_pre = np.dot(A, x)
  188. # phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
  189. # phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )#**2
  190. # mu1 = ((phid + alpha*phim) / phib)
  191. if lambdastar == "discrepency":
  192. if np.sqrt(phid/len(b)) < 1.00 or alpha < 1e-5:
  193. ibreak = 1
  194. print ("optimal solution found", alpha, np.sqrt(phid/len(b)), ibreak)
  195. break
  196. # slow convergence, bail and use L-curve
  197. # TI- only use L-curve. Otherwise results for perlin noise are too spurious for paper.
  198. if lambdastar == "lcurve":
  199. if i > 4:
  200. kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:i+1])#ALPHA[0:-1])
  201. #kappa = curvatureg(np.log(np.array(PHIM)), np.log(np.array(PHID)))
  202. #print("max kappa", np.argmax(kappa), "distance from", i-np.argmax(kappa))
  203. print ("{:^5} {:>15.4f} {:>15.4f} {:>15.4f} {:>15.4f} {:^10} {:^10}".format(i, alpha, np.sqrt(phid/len(b)), phim, tphi, np.argmax(kappa), i-np.argmax(kappa)), flush=True)
  204. if i > 4 and (i-np.argmax(kappa)) > 4: # ((np.sqrt(phid_old/len(b))-np.sqrt(phid/len(b))) < 1e-4) :
  205. #if np.sqrt(phid/len(b)) < 3.0 and ((np.sqrt(phid_old/len(b))-np.sqrt(phid/len(b))) < 1e-3):
  206. ibreak = 1
  207. MOD = np.array(MOD)
  208. print ("################################") #slow convergence", alpha, "phid_old", np.sqrt(phid_old/len(b)), "phid", np.sqrt(phid/len(b)), ibreak)
  209. print ("Using L-curve criteria")
  210. #kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:-1])
  211. #kappa2 = curvatureg(np.log(np.array(PHIM)), np.log(np.array(PHID)))
  212. #kappa = curvature( np.array(PHIM), np.array(PHID))
  213. x = MOD[ np.argmax(kappa) ]
  214. alphastar = ALPHA[ np.argmax(kappa) ]
  215. b_pre = np.dot(A, x)
  216. phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
  217. phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
  218. mu1 = ((phid + alpha*phim) / phib)
  219. print ("L-curve selected: iteration=", np.argmax(kappa)) #, " lambda*=", alpha, "phid_old=", np.sqrt(phid_old/len(b)), "phid=", np.sqrt(phid/len(b)), ibreak)
  220. print ("################################")
  221. if np.sqrt(phid/len(b)) <= 1:
  222. ibreak=0
  223. fig = plt.figure( figsize=(pc2in(20.0),pc2in(22.)) )
  224. ax1 = fig.add_axes( [.2,.15,.6,.7] )
  225. #plt.plot( (np.array(PHIM)), np.log(np.array(PHID)/len(b)), '.-')
  226. #plt.plot( ((np.array(PHIM))[np.argmax(kappa)]) , np.log( (np.array(PHID)/len(b))[np.argmax(kappa)] ), '.', markersize=12)
  227. #plt.axhline()
  228. lns1 = plt.plot( np.log(np.array(PHIM)), np.log(np.sqrt(np.array(PHID)/len(b))), '.-', label="L curve")
  229. lns2 = plt.plot( np.log(np.array(PHIM))[np.argmax(kappa)], np.log(np.sqrt(np.array(PHID)/len(b))[np.argmax(kappa)]), '.', markersize=12, label="$\lambda^*$")
  230. ax2 = plt.twinx()
  231. lns3 = ax2.plot( np.log(np.array(PHIM)), kappa, color='orange', label="curvature" )
  232. # Single legend
  233. lns = lns1+lns3
  234. labs = [l.get_label() for l in lns]
  235. ax2.legend(lns, labs, loc=0)
  236. ax1.set_xlabel("$\phi_m$")
  237. ax1.set_ylabel("$\phi_d$")
  238. ax2.set_ylabel("curvature")
  239. plt.savefig('lcurve.pdf')
  240. break
  241. PHIM = np.array(PHIM)
  242. PHID = np.array(PHID)
  243. if (i == MAXITER-1 ):
  244. ibreak = 2
  245. #print("Reached max iterations!!", alpha, np.sqrt(phid/len(b)), ibreak)
  246. #kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:-1])
  247. x = MOD[-1]
  248. b_pre = np.dot(A, x)
  249. phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
  250. phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
  251. mu1 = ((phid + alpha*phim) / phib)
  252. if lambdastar == "lcurve":
  253. #print("Returning L curve result")
  254. return x, ibreak, np.sqrt(phid/len(b)), PHIM, PHID/len(b), np.argmax(kappa), Wd, Phim_base, alphastar
  255. else:
  256. print("Returning max iteration result")
  257. return x, ibreak, np.sqrt(phid/len(b))
  258. if __name__ == "__main__":
  259. print("Test")