123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267 |
- from __future__ import division
- import numpy as np
- from scipy.sparse.linalg import iterative as iter
- import pylab
- import pprint
- from scipy.optimize import nnls
-
- import matplotlib.pyplot as plt
-
- def PhiB(mux, muy, minVal, maxVal, x):
- phib = mux * np.abs( np.sum(np.log( x-minVal)) )
- return phib
-
- def curvaturefd(x, y, t):
- x1 = np.gradient(x,t)
- x2 = np.gradient(x1,t)
- y1 = np.gradient(y,t)
- y2 = np.gradient(y1,t)
- return np.abs(x1*y2 - y1*x2) / np.power(x1**2 + y1**2, 3./2)
-
- def curvatureg(x, y):
- from scipy.ndimage import gaussian_filter1d
- #first and second derivative
- x1 = gaussian_filter1d(x, sigma=1, order=1)#, mode='constant', cval=x[-1])
- x2 = gaussian_filter1d(x1, sigma=1, order=1)#, mode='constant', cval=y[-1])
- y1 = gaussian_filter1d(y, sigma=1, order=1)#, mode='constant', cval=x1[-1])
- y2 = gaussian_filter1d(y1, sigma=1, order=1)#, mode='constant', cval=y1[-1])
- return np.abs(x1*y2 - y1*x2) / np.power(x1**2 + y1**2, 3./2)
-
- def logBarrier(A, b, T2Bins, lambdastar, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False, MAXITER=70, fignum=1000, sigma=1, callback=None):
- """Impliments a log barrier Tikhonov solution to a linear system of equations
- Ax = b s.t. x_min < x < x_max. A log-barrier term is used for the constraint
- """
- # TODO input
- minVal = 0.0
- maxVal = 1e8
-
- Wd = (np.eye(len(b)) / (sigma)) # Wd = eye( sigma )
- WdTWd = (np.eye(len(b)) / (sigma**2)) # Wd = eye( sigma )
-
- ATWdTWdA = np.dot(A.conj().transpose(), np.dot( WdTWd, A )) # TODO, implicit calculation instead?
- N = np.shape(A)[1] # number of model
- M = np.shape(A)[0] # number of data
- SIGMA = .25
- EPSILON = 1e-35
-
- # reference model
- if np.size(xr) == 1:
- xr = np.zeros(N)
-
- # initial guess
- if np.size(x_0) == 1:
- x = 1e-10 + np.zeros(N)
- else:
- x = 1e-10 + x_0
-
- # Construct model constraint base
- Phim_base = np.zeros( [N , N] )
- a1 = .05 # smallest too
-
- # calculate largest term
- D1 = 1./abs(T2Bins[1]-T2Bins[0])
- D2 = 1./abs(T2Bins[2]-T2Bins[1])
- #a2 = 1. #(1./(2.*D1+D2)) # smooth
-
- if smooth == "Both":
- #print ("Both small and smooth model")
- for ip in range(N):
- D1 = 0.
- D2 = 0.
- if ip > 0:
- #D1 = np.sqrt(1./abs(T2Bins[ip]-T2Bins[ip-1]))**.5
- D1 = (1./abs(T2Bins[ip]-T2Bins[ip-1])) #**2
- if ip < N-1:
- #D2 = np.sqrt(1./abs(T2Bins[ip+1]-T2Bins[ip]))**.5
- D2 = (1./abs(T2Bins[ip+1]-T2Bins[ip])) #**2
- if ip > 0:
- Phim_base[ip,ip-1] = -(D1)
- if ip == 0:
- Phim_base[ip,ip ] = 2.*(D1+D2)
- elif ip == N-1:
- Phim_base[ip,ip ] = 2.*(D1+D2)
- else:
- Phim_base[ip,ip ] = 2.*(D1+D2)
- if ip < N-1:
- Phim_base[ip,ip+1] = -(D2)
- Phim_base /= np.max(Phim_base) # normalize
- Phim_base += a1*np.eye(N)
-
- elif smooth == "Smooth":
- #print ("Smooth model")
- for ip in range(N):
- if ip > 0:
- Phim_base[ip,ip-1] = -1 # smooth in log space
- if ip == 0:
- Phim_base[ip,ip ] = 2.05 # Encourage a little low model
- elif ip == N-1:
- Phim_base[ip,ip ] = 2.5 # Penalize long decays
- else:
- Phim_base[ip,ip ] = 2.1 # Smooth and small
- if ip < N-1:
- Phim_base[ip,ip+1] = -1 # smooth in log space
-
- elif smooth == "Smallest":
- for ip in range(N):
- Phim_base[ip,ip ] = 1.
- else:
- print("non valid model constraint:", smooth)
- exit()
-
- Phi_m = alpha*Phim_base
- WmTWm = Phim_base # np.dot(Phim_base, Phim_base.T)
- b_pre = np.dot(A, x)
- phid = np.linalg.norm( np.dot(Wd, (b-b_pre)) )**2
- phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
-
- mu2 = phim
- phib = PhiB(mu1, mu2, 0, 1e8, x)
- mu1 = ((phid + alpha*phim) / phib)
-
- PHIM = []
- PHID = []
- MOD = []
-
- ALPHA = []
- ALPHA.append(alpha)
- for i in range(MAXITER):
-
- Phi_m = alpha*Phim_base
-
- # reset mu1 at each iteration
- # Calvetti -> No ; Li -> Yes
- # without this, non monotonic convergence occurs...which is OK if you really trust your noise
- mu1 = ((phid + alpha*phim) / phib)
-
- WmTWm = Phim_base # np.dot(Phim_base, Phim_base.T)
- phid_old = phid
- inner = 0
-
- First = True # guarantee entry
-
- xp = np.copy(x) # prior step x
-
- while ( (phib / (phid+alpha*phim)) > EPSILON or First==True ):
-
- First = False
- # Log barrier, keep each element above minVal
- X1 = np.eye(N) * (x-minVal)**-1
- X2 = np.eye(N) * (x-minVal)**-2
-
- # Log barrier, keep sum below maxVal TODO normalize by component. Don't want to push all down
- Y1 = np.eye(N) * (maxVal - np.sum(x))**-1
- Y2 = np.eye(N) * (maxVal - np.sum(x))**-2
-
- AA = ATWdTWdA + mu1*X2 + mu2*Y2 + Phi_m
- M = np.eye( N ) * (1./np.diag(ATWdTWdA + mu1*X2 + mu2*Y2 + Phi_m))
-
- # Solve system (newton step) (Li)
- b2 = np.dot(A.conj().transpose(), np.dot(WdTWd, b-b_pre) ) + 2.*mu1*np.diag(X1) + 2.*mu2*np.diag(Y1) - alpha*np.dot(WmTWm,(x-xr))
- ztilde = iter.cg(AA, b2, M = M)
- h = (ztilde[0].real)
-
- # Solve system (direct solution) (Calvetti)
- #b2 = np.dot(A.conj().transpose(), np.dot(WdTWd, b)) + 2.*mu1*np.diag(X1) + 2.*mu2*np.diag(Y1) - alpha*np.dot(WmTWm,(x-xr))
- #ztilde = iter.cg(AA, b2, M=M, x0=x)
- #h = (ztilde[0].real - x)
-
- # step size
- d = np.min( (1, 0.95 * np.min(x/np.abs(h+1e-120))) )
-
- ##########################################################
- # Update and fix any over/under stepping
- x += d*h
-
- # Determine mu steps to take
- s1 = mu1 * (np.dot(X2, ztilde[0].real) - 2.*np.diag(X1))
- s2 = mu2 * (np.dot(Y2, ztilde[0].real) - 2.*np.diag(Y1))
-
- # determine mu for next step
- mu1 = SIGMA/N * np.abs(np.dot(s1, x))
- mu2 = SIGMA/N * np.abs(np.dot(s2, x))
-
- b_pre = np.dot(A, x)
- phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
- phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
- phib = PhiB(mu1, mu2, minVal, maxVal, x)
- inner += 1
-
- PHIM.append(phim)
- PHID.append(phid)
- MOD.append(np.copy(x))
-
- # determine alpha
- scale = (len(b)/phid)
- #alpha *= np.sqrt(scale)
- alpha *= min(scale, .95) # was .85...
- ALPHA.append(alpha)
- #alpha = ALPHA[i+1]
-
-
- # if np.sqrt(phid/len(b)) < 0.97:
- # ibreak = -1
- # print ("------------overshot--------------------", alpha, np.sqrt(phid/len(b)), ibreak)
- # alpha *= 2. #0
- # x -= d*h
- # b_pre = np.dot(A, x)
- # phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
- # phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )#**2
- # mu1 = ((phid + alpha*phim) / phib)
- if lambdastar == "discrepency":
- if np.sqrt(phid/len(b)) < 1.00 or alpha < 1e-5:
- ibreak = 1
- print ("optimal solution found", alpha, np.sqrt(phid/len(b)), ibreak)
- break
- # slow convergence, bail and use L-curve
- # TI- only use L-curve. Otherwise results for perlin noise are too spurious for paper.
- if lambdastar == "lcurve":
- if i>20 and ((np.sqrt(phid_old/len(b))-np.sqrt(phid/len(b))) < 1e-4):
- #if np.sqrt(phid/len(b)) < 3.0 and ((np.sqrt(phid_old/len(b))-np.sqrt(phid/len(b))) < 1e-3):
- ibreak = 1
- MOD = np.array(MOD)
- print ("###########################") #slow convergence", alpha, "phid_old", np.sqrt(phid_old/len(b)), "phid", np.sqrt(phid/len(b)), ibreak)
- print ("Using L-curve criteria")
- kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:-1])
- #kappa2 = curvatureg(np.log(np.array(PHIM)), np.log(np.array(PHID)))
- #kappa = curvature( np.array(PHIM), np.array(PHID))
- x = MOD[ np.argmax(kappa) ]
- b_pre = np.dot(A, x)
- phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
- phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
- mu1 = ((phid + alpha*phim) / phib)
- print ("L-curve selected", alpha, "phid_old", np.sqrt(phid_old/len(b)), "phid", np.sqrt(phid/len(b)), ibreak)
- print ("###########################")
- if np.sqrt(phid/len(b)) <= 1:
- ibreak=0
-
- #plt.figure()
- #plt.plot( (np.array(PHIM)), np.log(np.array(PHID)/len(b)), '.-')
- #plt.plot( ((np.array(PHIM))[np.argmax(kappa)]) , np.log( (np.array(PHID)/len(b))[np.argmax(kappa)] ), '.', markersize=12)
- #plt.axhline()
- #plt.plot( np.array(PHIM), np.array(PHID), '.-')
- #plt.plot( np.array(PHIM)[np.argmax(kappa)], np.array(PHID)[np.argmax(kappa)], '.', markersize=12)
- #plt.savefig('lcurve.pdf')
- break
-
- PHIM = np.array(PHIM)
- PHID = np.array(PHID)
-
- if (i == MAXITER-1 ):
- ibreak = 2
- print("Reached max iterations!!", alpha, np.sqrt(phid/len(b)), ibreak)
- kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:-1])
- x = MOD[ np.argmax(kappa) ]
- b_pre = np.dot(A, x)
- phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
- phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
- mu1 = ((phid + alpha*phim) / phib)
-
- if lambdastar == "lcurve":
- return x, ibreak, np.sqrt(phid/len(b)), PHIM, PHID/len(b), np.argmax(kappa)
- else:
- return x, ibreak, np.sqrt(phid/len(b))
-
-
-
- if __name__ == "__main__":
- print("Test")
|