Browse Source

GJI paper consistency

master
Trevor Irons 6 years ago
parent
commit
6ac0a32661
7 changed files with 3645 additions and 136 deletions
  1. 1
    1
      GUI/borehole2.py
  2. 3420
    0
      GUI/borehole2.ui
  3. BIN
      GUI/lemma.png
  4. 2
    2
      GUI/main.py
  5. 79
    46
      MRProc.py
  6. 3
    2
      decay.py
  7. 140
    85
      logbarrier.py

+ 1
- 1
GUI/borehole2.py View File

@@ -2,7 +2,7 @@
2 2
 
3 3
 # Form implementation generated from reading ui file 'borehole2.ui'
4 4
 #
5
-# Created: Mon Oct 30 10:03:36 2017
5
+# Created: Mon Oct 30 10:31:57 2017
6 6
 #      by: pyside-uic 0.2.15 running on PySide 1.2.2
7 7
 #
8 8
 # WARNING! All changes made in this file will be lost!

+ 3420
- 0
GUI/borehole2.ui
File diff suppressed because it is too large
View File


BIN
GUI/lemma.png View File


+ 2
- 2
GUI/main.py View File

@@ -943,8 +943,8 @@ class MyForm(QtGui.QMainWindow): #, threading.Thread):
943 943
             dmin = 7672  # Morrow           #
944 944
             dmax = 7707  # Morrow Bottom    #
945 945
             #################################
946
-            #dmin = 7672+15.25  # Morrow           #
947
-            #dmax = 7707-17.25  # Morrow Bottom    #
946
+            #dmin = 7672+10.25  # Morrow           #
947
+            #dmax = 7707-10.25  # Morrow Bottom    #
948 948
             #dmin = 0 
949 949
             #dmax = 7510
950 950
             #dmax = 10000

+ 79
- 46
MRProc.py View File

@@ -282,19 +282,8 @@ class MRProc(QObject):
282 282
         if self.NS >= 2:
283 283
             self.sigma = np.std( np.imag(self.T2N) )
284 284
         else:
285
-            #N = np.real(self.T2D)
286 285
             s0 = np.std( np.real(self.T2D) )
287
-            #sr = ( np.std( (np.exp(-1j*self.zeta)*N).real ))  
288
-            #si = ( np.std( (np.exp(-1j*self.zeta)*N).imag ))  
289
-            print("s0", s0)
290
-            self.sigma = s0 # np.array((.15)) #s0 #+ (sr+si-s0)/2.   
291
-            #self.phasegain =  (s0 + (sr+si-s0)/2) / s0
292
-            #print("ratio",  s0,  (s0+(sr+si-s0)/2.)/s0  )
293
-            #nr = (np.exp(-1j*self.zeta)*N).real
294
-            #ni = (np.exp(-1j*self.zeta)*N).imag
295
-            #print ("phasegain", self.phasegain)
296
-            #self.T2D.real *= self.phasegain 
297
-            #print("SIGMA CALCULATION HERE", s0, self.sigma)
286
+            self.sigma = s0 
298 287
             
299 288
     def computeSTDBurst(self):
300 289
         if self.NS >= 2:
@@ -373,13 +362,22 @@ class MRProc(QObject):
373 362
 
374 363
         if self.burst:
375 364
             self.gateIntegrateBurst(gpd, stackEfficiency)
365
+            print ("BBBBBUUURSSSSTTTTTTTTTTTT DOOOM FIXME MRProc.py!!!!!!!!!!!")
366
+            # I need to apply gate shifting logic to burst data as well  
367
+
368
+        #########################################################################
369
+        # use artificial time gates so that early times are fully captured at gpd
370
+        # very minor correction for borehole data. 
371
+        T2T0 = self.T2T[0]
372
+        T2TD = self.T2T[0] - (self.T2T[1]-self.T2T[0])
373
+        self.T2T -= T2TD
376 374
 
377 375
         # calculate total number of decades
378 376
         nd = np.log10(self.T2T[-1]/self.T2T[0])   #np.log10(self.T2T[-1]) -  np.log10(self.T2T[-1])
379 377
         tdd = np.logspace( np.log10(self.T2T[0]), np.log10(self.T2T[-1]), (int)(gpd*nd)+1, base=10, endpoint=True) 
380 378
         tdl = tdd[0:-1]     # these are left edges
381
-        tdr = tdd[1::]      # these are left edges
382
-        td = (tdl+tdr) / 2. # window centres
379
+        tdr = tdd[1::]      # approximate left edges
380
+        td = (tdl+tdr) / 2. # approximate window centres
383 381
 
384 382
         htd = np.zeros( len(td), dtype=complex )
385 383
         htn = np.zeros( len(td), dtype=complex )
@@ -391,6 +389,9 @@ class MRProc(QObject):
391 389
             if ( self.T2T[itd] > tdr[ii] ):
392 390
                 if (ii < len(td)-1):
393 391
                     ii += 1
392
+                    # correct window edges to centre about data 
393
+                    tdr[ii-1] = (self.T2T[itd-1]+self.T2T[itd])*.5 
394
+                    tdl[ii  ] = (self.T2T[itd-1]+self.T2T[itd])*.5
394 395
                 else:
395 396
                     break
396 397
             isum[ii] += 1
@@ -399,32 +400,52 @@ class MRProc(QObject):
399 400
             #htn[ii] += self.T2N[ itd ]
400 401
             Vars[ii] += self.sigma**2
401 402
             self.emit(SIGNAL("updateProgress(int)"), (int)(1e2*itd/len(self.T2T)))  
402
-        
403
+
404
+        # Correct window centres 
405
+        td = (tdl+tdr) / 2.             # actual window centres       
406
+ 
403 407
         # Theoretical gates 
404 408
         # The 1.5 should be 2 in the theory, the 1.5 compensates for correlated noise, etc. Bascically 
405 409
         #       we don't see noise reduce like ideal averaging. 
406 410
         self.sigma = np.sqrt(Vars  * (1./isum)**stackEfficiency ) # Var (aX) = a^2 Var(x)
407 411
 
408 412
         # Bootstrap
409
-        bootstrap = False # False
410
-        bs = open("bootstrap.dat", "a")
413
+        bootstrap = True
414
+        #bs = open("bootstrap.dat", "a")
415
+
411 416
         if bootstrap:
412
-            nboot=10000
413
-            #Means, isumbs = self.bootstrapWindows(np.real(self.T2D), 100, isum[isum!=1])
414
-            Means, isumbs = self.bootstrapWindows(np.real(self.T2D), nboot, np.arange(1,isum[-1]+1))
415
-            #ts,sm = self.smooth(isumbs, np.std(Means - np.tile(np.mean(Means,axis=1),(nboot,1)).T  , axis=1, ddof=2)) # unbiased
416
-            # TODO use MAD instead of STD??
417
-            ts,sm = self.smooth(isumbs, np.std(Means, axis=1, ddof=1)) # unbiased
418
-            #scale = (1. + (nboot*isum)/(np.ones(ii+1)*len(self.T2D)))**.125
419
-            #print (scale) 
420
-            for item in (sm[isum[isum!=1]-1]-self.sigma[isum!=1]): #/self.sigma[isum!=1]:
421
-                bs.write( str(item) + " " )
422
-            bs.write("\n")
417
+            print("Bootstrappin")
418
+            nboot=20000
419
+            Means, isumbs = self.bootstrapWindows(np.real(self.T2D), nboot, isum[isum!=1], adapt=True)
420
+            #Means, isumbs = self.bootstrapWindows(np.real(self.T2D), nboot, np.arange(1,isum[-1]+1), adapt=True)
421
+
422
+            # MAD measure 
423
+            c = stats.norm.ppf(3/4.) 
424
+            mad = np.ma.median(np.ma.abs(Means), axis=1)/c
425
+            #nboot2 = nboot//isumbs
426
+            #std = np.ma.std(Means, axis=1, ddof=2) # unbiased
427
+            
428
+            #iqr = stats.iqr( Means, axis=1 )
429
+
430
+            # Smoothing spline
431
+            #ts,sm = self.smooth(isumbs, std) 
432
+            #ts,sm = self.smooth(isumbs, std) 
433
+
434
+            #for ii, s in enumerate(mad): 
435
+            #    bs.write( str( (s-self.sigma[isum!=1][ii]) / self.sigma[isum!=1][ii] ) + " " )
436
+            #for item in (sm[isum[isum!=1]-1]-self.sigma[isum!=1])/self.sigma[isum!=1]:
437
+            #    bs.write( str(item) + " ")
438
+            #bs.write("\n")
439
+
423 440
             #ts,sm = self.smooth( isumbs, np.std(Means, axis=1))
424 441
             #bias = np.average(Means,axis=1)
425
-            for it in range(len(self.sigma)):
426
-                self.sigma[it] = sm[isum[it]-1]
427
-            print('sigma boot', self.sigma)
442
+            ii = 0
443
+            for it in range(1, len(self.sigma)):
444
+                #self.sigma[it] = sm[isum[it]-1]
445
+                if isum[it] != 1:
446
+                    #print ( "assigning mad", it , mad[ii] )
447
+                    self.sigma[it] = mad[ii]
448
+                    ii += 1
428 449
         # RESET times where isum == 1
429 450
         ii = 0
430 451
         while (isum[ii] == 1):
@@ -434,7 +455,7 @@ class MRProc(QObject):
434 455
         htd /= isum
435 456
         htn /= isum
436 457
 
437
-        self.T2T = td
458
+        self.T2T = td + T2TD
438 459
         self.T2D = htd
439 460
 
440 461
         self.emit(SIGNAL("plotLOG10ENV()"))   
@@ -442,23 +463,34 @@ class MRProc(QObject):
442 463
         self.emit(SIGNAL("enableINV()"))   
443 464
         self.emit(SIGNAL("doneStatus()"))  
444 465
 
445
-    def bootstrapWindows(self, N, nboot, isum):
466
+    def bootstrapWindows(self, N, nboot, isum, adapt=False):
446 467
         """ Bootstraps noise as a function of gate width
447 468
         """
448 469
         nc = np.shape(N)[0]
449
-
450 470
         Means = {}
451
-        Means = np.zeros( (len(isum), nboot)  )
452
-        for ii, nwin in enumerate(isum):  
453
-            for iboot in range(nboot):
454
-                cs = np.random.randint(0,nc-nwin)
455
-                #Means[ii,iboot] = np.mean( N[cs:cs+nwin] )
456
-                Means[ii,iboot] = np.mean( np.random.normal(0, self.sigma[0], nwin))
471
+
472
+        if adapt:
473
+            Means = -9999*np.ones((len(isum), nboot//isum[0]))
474
+            for ii, nwin in enumerate(isum):  
475
+                for iboot in range(nboot//isum[ii]):
476
+                    cs = np.random.randint(0,nc-nwin)
477
+                    Means[ii,iboot] = np.mean( N[cs:cs+nwin] )
478
+                    #Means[ii,iboot] = np.mean( np.random.normal(0, self.sigma[0], nwin))
479
+            Means = np.ma.masked_less(Means, -9995)
480
+
481
+        else:
482
+            Means = np.zeros((len(isum), nboot))
483
+            for ii, nwin in enumerate(isum):  
484
+                for iboot in range(nboot):
485
+                    cs = np.random.randint(0,nc-nwin)
486
+                    Means[ii,iboot] = np.mean( N[cs:cs+nwin] )
487
+                    #Means[ii,iboot] = np.mean( np.random.normal(0, self.sigma[0], nwin))
488
+
457 489
         return Means, np.array(isum)
458 490
 
459 491
     def smooth(self, x, y):
460 492
         w = np.ones(len(x))
461
-        w[0] = 100. # force first time gate
493
+        #w[0] = 1. # force first time gate
462 494
         xs = np.arange(1, x[-1]+1, .1) # resample
463 495
         #s = UnivariateSpline( np.log(x), np.log(y), s=2.5, w=w)
464 496
         s = UnivariateSpline( np.log(x), np.log(y), s=4.5, w=w)
@@ -539,10 +571,11 @@ class MRProc(QObject):
539 571
        
540 572
         # mask first few echoes, just for detection 
541 573
         NM = 2 
542
-        #[conv,E0,df,phi,T2] = quadratureDetect(np.imag(self.T2D[NM::]), np.real(self.T2D[NM::]), self.T2T[NM::], False, True)
574
+        [conv,E0,df,phi,T2] = quadratureDetect(np.imag(self.T2D[NM::]), np.real(self.T2D[NM::]), self.T2T[NM::], True, True, False)
543 575
         #[conv,E0,df,phi,T2] = quadratureDetect(np.imag(self.T2D[NM::]), np.real(self.T2D[NM::]), self.T2T[NM::], False, False, True)
544 576
         #                                                                              #CorrectFreq=False, BiExp=False, CorrectDC=False)
545
-        [conv,E0,df,phi,T2] = quadratureDetect2(np.imag(self.T2D[NM::]), np.real(self.T2D[NM::]), self.T2T[NM::])
577
+        
578
+        #[conv,E0,df,phi,T2] = quadratureDetect2(np.imag(self.T2D[NM::]), np.real(self.T2D[NM::]), self.T2T[NM::])
546 579
         self.zeta1 = phi
547 580
 
548 581
         #if conv: # failed convergence
@@ -674,7 +707,7 @@ class MRProc(QObject):
674 707
             Timeb.generateGenv()
675 708
 
676 709
             # invert burst data, always use smooth to encourage fast time inversion
677
-            modelb  = logBarrier(Timeb.Genv, np.imag(self.T2Db)-dc, Timeb.T2Bins, MAXITER=500, sigma=betaScale*self.sigmaBurst, alpha=1e10, smooth=Smooth) 
710
+            modelb  = logBarrier(Timeb.Genv, np.imag(self.T2Db)-dc, Timeb.T2Bins, "lcurve", MAXITER=500, sigma=betaScale*self.sigmaBurst, alpha=1e6, smooth=Smooth)[0] 
678 711
             modelb2 = np.zeros(nT2)
679 712
             modelb2[0:len(modelb)] += modelb 
680 713
             #model = modelb2 # TODO remove 
@@ -684,10 +717,10 @@ class MRProc(QObject):
684 717
             #model += modelb 
685 718
             
686 719
             # Pass burt result as reference model
687
-            model  = logBarrier(Time.Genv, np.imag(self.T2D[mask:])-dc, Time.T2Bins, MAXITER=500, xr=modelb2, sigma=betaScale*self.sigma[mask:], alpha=1e10, smooth=Smooth)
720
+            model  = logBarrier(Time.Genv, np.imag(self.T2D[mask:])-dc, Time.T2Bins, "lcurve", MAXITER=500, xr=modelb2, sigma=betaScale*self.sigma[mask:], alpha=1e6, smooth=Smooth)[0]
688 721
 
689 722
         else:
690
-            model  = logBarrier(Time.Genv, np.imag(self.T2D[mask:])-dc, Time.T2Bins, MAXITER=500, sigma=betaScale*self.sigma[mask:], alpha=1e10, smooth=Smooth) #, smooth=True) #
723
+            model  = logBarrier(Time.Genv, np.imag(self.T2D[mask:])-dc, Time.T2Bins, "lcurve",  MAXITER=500, sigma=betaScale*self.sigma[mask:], alpha=1e6, smooth=Smooth)[0] #, smooth=True) #
691 724
 
692 725
         # forward model
693 726
         env = np.dot(Time.Genv, model) 

+ 3
- 2
decay.py View File

@@ -469,9 +469,10 @@ def quadratureDetect(X, Y, tt, CorrectFreq=False, BiExp=False, CorrectDC=False):
469 469
             E0   =  r['$'](report,'par')[0]   # E01
470 470
             phi =  r['$'](report,'par')[1]   # phase 
471 471
             T2  =  r['$'](report,'par')[2]   # T2
472
-    #phi = 0.907655876627
472
+    phi = 0.907655876627
473
+    df = 0.
473 474
     #phi = 0
474
-    #print ("df", df)# = 0
475
+    print ("df", df)# = 0
475 476
     return conv, E0,df,phi,T2
476 477
     
477 478
 

+ 140
- 85
logbarrier.py View File

@@ -5,26 +5,29 @@ import pylab
5 5
 import pprint 
6 6
 from scipy.optimize import nnls 
7 7
 
8
+import matplotlib.pyplot as plt
9
+
8 10
 def PhiB(mux, muy, minVal, maxVal, x):
9 11
     phib = mux * np.abs( np.sum(np.log( x-minVal)) )
10
-    #phib += np.abs( np.log(1. - maxVal / np.sum(x)) )
11 12
     return phib
12
-    #phib += np.log std::log(maxVal - x.segment(ib*block, block).sum());
13
-    
14 13
 
15
-    #template < typename  Scalar >
16
-    #Scalar PhiB2 (const Scalar& minVal, const Scalar& maxVal, const VectorXr x,
17
-    #              const int& block, const int &nblocks) {
18
-    #    Scalar phib  = std::abs((x.array() - minVal).log().sum());
19
-    #    //phib      += std::abs((maxVal - x.array()).log().sum()*muy);
20
-    #    for (int ib=0; ib<nblocks; ++ib) {
21
-    #        //HiSegments(ib) = x.segment(ib*block, block).sum();
22
-    #        phib += Scalar(block)*std::log(maxVal - x.segment(ib*block, block).sum());
23
-    #    }
24
-    #    return phib;
25
-    #}
26
-
27
-def logBarrier(A, b, T2Bins, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False, MAXITER=70, fignum=1000, sigma=1, callback=None):
14
+def curvaturefd(x, y, t):
15
+    x1 = np.gradient(x,t) 
16
+    x2 = np.gradient(x1,t) 
17
+    y1 = np.gradient(y,t) 
18
+    y2 = np.gradient(y1,t) 
19
+    return np.abs(x1*y2 - y1*x2) / np.power(x1**2 + y1**2, 3./2)
20
+
21
+def curvatureg(x, y):
22
+    from scipy.ndimage import gaussian_filter1d
23
+    #first and second derivative
24
+    x1 = gaussian_filter1d(x, sigma=1, order=1)#, mode='constant', cval=x[-1])
25
+    x2 = gaussian_filter1d(x1, sigma=1, order=1)#, mode='constant', cval=y[-1])
26
+    y1 = gaussian_filter1d(y, sigma=1, order=1)#, mode='constant', cval=x1[-1])
27
+    y2 = gaussian_filter1d(y1, sigma=1, order=1)#, mode='constant', cval=y1[-1])
28
+    return np.abs(x1*y2 - y1*x2) / np.power(x1**2 + y1**2, 3./2)
29
+
30
+def logBarrier(A, b, T2Bins, lambdastar, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False, MAXITER=70, fignum=1000, sigma=1, callback=None):
28 31
     """Impliments a log barrier Tikhonov solution to a linear system of equations 
29 32
         Ax = b  s.t.  x_min < x < x_max. A log-barrier term is used for the constraint
30 33
     """
@@ -35,13 +38,11 @@ def logBarrier(A, b, T2Bins, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False
35 38
     Wd =    (np.eye(len(b)) / (sigma))        # Wd = eye( sigma )
36 39
     WdTWd = (np.eye(len(b)) / (sigma**2))     # Wd = eye( sigma )
37 40
 
38
-    #print ("calculating AT WdT.Wd A ") # AT WdTWd A with known data matrix 
39
-    #print (" A.shape" , np.shape(A), np.shape(b))
40 41
     ATWdTWdA = np.dot(A.conj().transpose(), np.dot( WdTWd, A ))     # TODO, implicit calculation instead?
41 42
     N = np.shape(A)[1]                        # number of model
42 43
     M = np.shape(A)[0]                        # number of data
43
-    SIGMA = .25  #.25 #.125 # .25 #.01#3e-1
44
-    EPSILON = 1e-35 # was 35 
44
+    SIGMA = .25 
45
+    EPSILON = 1e-35 
45 46
 
46 47
     # reference model
47 48
     if np.size(xr) == 1:
@@ -54,9 +55,8 @@ def logBarrier(A, b, T2Bins, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False
54 55
         x = 1e-10 + x_0
55 56
         
56 57
     # Construct model constraint base   
57
-    #print ("constructing Phim")
58 58
     Phim_base = np.zeros( [N , N] ) 
59
-    a1 = 1.0     # smallest
59
+    a1 = .05     # smallest too
60 60
     
61 61
     # calculate largest term            
62 62
     D1 = 1./abs(T2Bins[1]-T2Bins[0])
@@ -64,56 +64,53 @@ def logBarrier(A, b, T2Bins, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False
64 64
     #a2 = 1. #(1./(2.*D1+D2))    # smooth
65 65
     
66 66
     if smooth == "Both":
67
-        #print ("SMOOTH")
68
-        # Smooth model
69
-        print ("Both small and smooth model")
67
+        #print ("Both small and smooth model")
70 68
         for ip in range(N):
71 69
             D1 = 0.
72 70
             D2 = 0.
73
-            DMAX = 2./(T2Bins[1]-T2Bins[0])
74 71
             if ip > 0:
75
-                D1 = 1./abs(T2Bins[ip]-T2Bins[ip-1])
72
+                #D1 = np.sqrt(1./abs(T2Bins[ip]-T2Bins[ip-1]))**.5
73
+                D1 = (1./abs(T2Bins[ip]-T2Bins[ip-1])) #**2
76 74
             if ip < N-1:
77
-                D2 = 1./abs(T2Bins[ip+1]-T2Bins[ip])
75
+                #D2 = np.sqrt(1./abs(T2Bins[ip+1]-T2Bins[ip]))**.5
76
+                D2 = (1./abs(T2Bins[ip+1]-T2Bins[ip])) #**2
78 77
             if ip > 0:
79
-                Phim_base[ip,ip-1] = -(D1)               # smooth in log space
78
+                Phim_base[ip,ip-1] =   -(D1)      
80 79
             if ip == 0:
81
-                Phim_base[ip,ip  ] =  (D1+D2) + a1       # Encourage a little low model, no a1
80
+                Phim_base[ip,ip  ] = 2.*(D1+D2)  
82 81
             elif ip == N-1:
83
-                Phim_base[ip,ip  ] =  (D1+D2) + a1       # Penalize long decays
82
+                Phim_base[ip,ip  ] = 2.*(D1+D2) 
84 83
             else:
85
-                Phim_base[ip,ip  ] =  (D1+D2) + a1       # Smooth and small
84
+                Phim_base[ip,ip  ] = 2.*(D1+D2)
86 85
             if ip < N-1:
87
-                Phim_base[ip,ip+1] = -(D2)               # smooth in log space
88
-
89
-        #Phim_base /= np.max(Phim_base)
90
-        #Phim_base += a1*np.eye(N)
86
+                Phim_base[ip,ip+1] =   -(D2)  
87
+        Phim_base /= np.max(Phim_base)            # normalize 
88
+        Phim_base += a1*np.eye(N)
91 89
 
92 90
     elif smooth == "Smooth":
93
-        print ("Smooth model")
91
+        #print ("Smooth model")
94 92
         for ip in range(N):
95 93
             if ip > 0:
96 94
                 Phim_base[ip,ip-1] = -1    # smooth in log space
97 95
             if ip == 0:
98
-                Phim_base[ip,ip  ] = 1.0   # Encourage a little low model
96
+                Phim_base[ip,ip  ] = 2.05   # Encourage a little low model
99 97
             elif ip == N-1:
100
-                Phim_base[ip,ip  ] = 8.0   # Penalize long decays
98
+                Phim_base[ip,ip  ] = 2.5   # Penalize long decays
101 99
             else:
102
-                Phim_base[ip,ip  ] = 2.0   # Smooth and small
100
+                Phim_base[ip,ip  ] = 2.1   # Smooth and small
103 101
             if ip < N-1:
104 102
                 Phim_base[ip,ip+1] = -1    # smooth in log space
105
-        #print(Phim_base)    
106 103
 
107
-    else: 
108
-        print ("SMALLEST")
109
-        # Smallest model
104
+    elif smooth == "Smallest":
110 105
         for ip in range(N):
111 106
             Phim_base[ip,ip  ] = 1.
107
+    else: 
108
+        print("non valid model constraint:", smooth)
109
+        exit()
112 110
     
113 111
     Phi_m =  alpha*Phim_base
114 112
     WmTWm = Phim_base # np.dot(Phim_base, Phim_base.T)            
115 113
     b_pre = np.dot(A, x)
116
-
117 114
     phid = np.linalg.norm( np.dot(Wd, (b-b_pre)) )**2
118 115
     phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
119 116
 
@@ -121,24 +118,28 @@ def logBarrier(A, b, T2Bins, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False
121 118
     phib = PhiB(mu1, mu2, 0, 1e8, x) 
122 119
     mu1 = ((phid + alpha*phim) / phib)
123 120
 
124
-    #print ("iteration", -1, mu1, mu2, phib, phid, phim, len(b))
121
+    PHIM = []
122
+    PHID = []
123
+    MOD = []
124
+
125
+    ALPHA = []
126
+    ALPHA.append(alpha)
125 127
     for i in range(MAXITER):
126
-            
127
-        b_pre = np.dot(A, x)
128
-        phid = np.linalg.norm(np.dot(Wd, (b-b_pre)))**2
129
-        # technically phim should not be on WmTWm matrix. So no need to square result. 
130
-        phim = np.linalg.norm(np.dot(Phim_base, (x-xr)))#**2
131
-        phib = PhiB(mu1, mu2, 0, 1e8, x) 
128
+        
132 129
         Phi_m =  alpha*Phim_base
133 130
         
131
+        # reset mu1 at each iteration 
132
+        # Calvetti -> No ; Li -> Yes   
133
+        # without this, non monotonic convergence occurs...which is OK if you really trust your noise 
134 134
         mu1 = ((phid + alpha*phim) / phib) 
135 135
 
136 136
         WmTWm = Phim_base # np.dot(Phim_base, Phim_base.T)            
137
-        #ztilde = x
138
-        #print("ztilde", ztilde)
139 137
         phid_old = phid
140 138
         inner = 0
141
-        First = True
139
+
140
+        First = True # guarantee entry 
141
+
142
+        xp = np.copy(x) # prior step x 
142 143
 
143 144
         while ( (phib / (phid+alpha*phim)) > EPSILON  or First==True ):
144 145
 
@@ -154,14 +155,14 @@ def logBarrier(A, b, T2Bins, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False
154 155
             AA = ATWdTWdA + mu1*X2 + mu2*Y2 + Phi_m 
155 156
             M = np.eye( N ) * (1./np.diag(ATWdTWdA + mu1*X2 + mu2*Y2 + Phi_m))
156 157
         
157
-            # Solve system (newton step) 
158
-            b2 = np.dot(A.transpose(), np.dot(WdTWd, b-b_pre) ) + 2.*mu1*np.diag(X1) + 2.*mu2*np.diag(Y1) - alpha*np.dot(WmTWm,(x-xr))
159
-            ztilde = iter.cg(AA, b2, M=M) # tol=1e-3*phid, maxiter=200, callback=callback) #, tol=1e-2, maxiter) #, x0=x) #, tol=ttol) #, M=M, x0=x)        
160
-            h = (ztilde[0]) 
158
+            # Solve system (newton step) (Li)
159
+            b2 = np.dot(A.conj().transpose(), np.dot(WdTWd, b-b_pre) ) + 2.*mu1*np.diag(X1) + 2.*mu2*np.diag(Y1) - alpha*np.dot(WmTWm,(x-xr))
160
+            ztilde = iter.cg(AA, b2, M = M) 
161
+            h = (ztilde[0].real) 
161 162
             
162
-            # Solve system (direct solution) 
163
+            # Solve system (direct solution) (Calvetti) 
163 164
             #b2 = np.dot(A.conj().transpose(), np.dot(WdTWd, b)) + 2.*mu1*np.diag(X1) + 2.*mu2*np.diag(Y1) - alpha*np.dot(WmTWm,(x-xr))
164
-            #ztilde = iter.cg(AA, b2, x0=x) #, tol=1e-2) #, x0=x) #, tol=ttol) #, M=M, x0=x)        
165
+            #ztilde = iter.cg(AA, b2, M=M, x0=x) 
165 166
             #h = (ztilde[0].real - x) 
166 167
 
167 168
             # step size
@@ -169,8 +170,7 @@ def logBarrier(A, b, T2Bins, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False
169 170
             
170 171
             ##########################################################
171 172
             # Update and fix any over/under stepping
172
-            x = x+d*h 
173
-            #x = np.max( ( (minVal+1e-120)*np.ones(N),  x+d*h), 0)
173
+            x += d*h
174 174
         
175 175
             # Determine mu steps to take
176 176
             s1 = mu1 * (np.dot(X2, ztilde[0].real) - 2.*np.diag(X1))
@@ -181,32 +181,87 @@ def logBarrier(A, b, T2Bins, x_0=0, xr=0, alpha=10, mu1=10, mu2=10, smooth=False
181 181
             mu2 = SIGMA/N * np.abs(np.dot(s2, x))
182 182
             
183 183
             b_pre = np.dot(A, x)
184
-            phid = np.linalg.norm(np.dot(Wd, (b-b_pre)))**2
185
-            phim = np.linalg.norm(np.dot(Phim_base, (x-xr)))#**2
184
+            phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
185
+            phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
186 186
             phib = PhiB(mu1, mu2, minVal, maxVal, x)
187 187
             inner += 1
188 188
         
189
+        PHIM.append(phim)      
190
+        PHID.append(phid)      
191
+        MOD.append(np.copy(x))  
192
+
189 193
         # determine alpha
190 194
         scale = (len(b)/phid)
191
-        alpha *= scale  #**(1/6) 
192
-
193
-        score = np.sqrt(phid/(len(b)+1.)) # unbiased  
194
- 
195
-        # check stopping criteria 
196
-        if score < 1: 
197
-            print ("*overshot* optimal solution found") #, alpha, score)
198
-            break
199
-        if score < 1.1: # or np.linalg.norm(x_old-x) < 1e-5 or phid > phid_old:
200
-            #print ("overshoot")
201
-            #alpha *= 10
202
-            print ("optimal solution found") #, alpha, score)
203
-            break
204
-        if i > 10 and (np.sqrt(phid_old/(len(b)+1.)) - score) < 1e-2: # 1e-2
205
-            print ("slow convergence") #, alpha, score, i, scale, score-np.sqrt(phid_old/len(b)))
206
-            break
207
-            
208
-    print ( "alpha","phid", "iter", "search", "prior" )
209
-    print ( alpha, score, i, scale, np.sqrt(phid_old/(len(b)+1)))
195
+        #alpha *= np.sqrt(scale)
196
+        alpha *= min(scale, .95) # was .85...
197
+        ALPHA.append(alpha)
198
+        #alpha = ALPHA[i+1]
199
+        
200
+
201
+#         if np.sqrt(phid/len(b)) < 0.97: 
202
+#             ibreak = -1
203
+#             print ("------------overshot--------------------", alpha, np.sqrt(phid/len(b)), ibreak)
204
+#             alpha *= 2. #0
205
+#             x -= d*h
206
+#             b_pre = np.dot(A, x)
207
+#             phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
208
+#             phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )#**2
209
+#             mu1 = ((phid + alpha*phim) / phib)
210
+        if lambdastar == "discrepency": 
211
+            if np.sqrt(phid/len(b)) < 1.00 or alpha < 1e-5: 
212
+                ibreak = 1
213
+                print ("optimal solution found", alpha, np.sqrt(phid/len(b)), ibreak)
214
+                break
215
+        # slow convergence, bail and use L-curve 
216
+        # TI- only use L-curve. Otherwise results for perlin noise are too spurious for paper.  
217
+        if lambdastar == "lcurve": 
218
+            if i>20 and ((np.sqrt(phid_old/len(b))-np.sqrt(phid/len(b))) < 1e-4): 
219
+            #if np.sqrt(phid/len(b)) < 3.0 and ((np.sqrt(phid_old/len(b))-np.sqrt(phid/len(b))) < 1e-3): 
220
+                ibreak = 1
221
+                MOD = np.array(MOD)
222
+                print ("###########################") #slow convergence", alpha, "phid_old", np.sqrt(phid_old/len(b)), "phid", np.sqrt(phid/len(b)), ibreak)
223
+                print ("Using L-curve criteria") 
224
+                kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:-1])
225
+                #kappa2 = curvatureg(np.log(np.array(PHIM)), np.log(np.array(PHID)))
226
+                #kappa = curvature( np.array(PHIM), np.array(PHID))
227
+                x = MOD[ np.argmax(kappa) ]
228
+                b_pre = np.dot(A, x)
229
+                phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
230
+                phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
231
+                mu1 = ((phid + alpha*phim) / phib) 
232
+                print ("L-curve selected", alpha, "phid_old", np.sqrt(phid_old/len(b)), "phid", np.sqrt(phid/len(b)), ibreak)
233
+                print ("###########################")
234
+                if np.sqrt(phid/len(b)) <= 1:
235
+                    ibreak=0
236
+
237
+                #plt.figure()
238
+                #plt.plot( (np.array(PHIM)),  np.log(np.array(PHID)/len(b)), '.-')
239
+                #plt.plot(  ((np.array(PHIM))[np.argmax(kappa)]) , np.log( (np.array(PHID)/len(b))[np.argmax(kappa)] ), '.', markersize=12)
240
+                #plt.axhline()
241
+                #plt.plot( np.array(PHIM), np.array(PHID), '.-')
242
+                #plt.plot( np.array(PHIM)[np.argmax(kappa)], np.array(PHID)[np.argmax(kappa)], '.', markersize=12)
243
+                #plt.savefig('lcurve.pdf')
244
+                break
245
+
246
+    PHIM = np.array(PHIM)
247
+    PHID = np.array(PHID)
248
+
249
+    if (i == MAXITER-1 ):
250
+        ibreak = 2
251
+        print("Reached max iterations!!", alpha, np.sqrt(phid/len(b)), ibreak)
252
+        kappa = curvaturefd(np.log(np.array(PHIM)), np.log(np.array(PHID)), ALPHA[0:-1])
253
+        x = MOD[ np.argmax(kappa) ]
254
+        b_pre = np.dot(A, x)
255
+        phid = np.linalg.norm( np.dot(Wd, (b-b_pre)))**2
256
+        phim = np.linalg.norm( np.dot(Phim_base, (x-xr)) )**2
257
+        mu1 = ((phid + alpha*phim) / phib) 
258
+
259
+    if lambdastar == "lcurve":
260
+        return x, ibreak, np.sqrt(phid/len(b)), PHIM, PHID/len(b), np.argmax(kappa)
261
+    else:
262
+        return x, ibreak, np.sqrt(phid/len(b))
263
+
210 264
 
211
-    return x
212 265
 
266
+if __name__ == "__main__":
267
+    print("Test")

Loading…
Cancel
Save