3D EM based on Schur decomposition
Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941
  1. // ===========================================================================
  2. //
  3. // Filename: bicgstab.h
  4. //
  5. // Description:
  6. //
  7. // Version: 0.0
  8. // Created: 10/27/2009 03:15:06 PM
  9. // Revision: none
  10. // Compiler: g++ (c++)
  11. //
  12. // Author: Trevor Irons (ti)
  13. //
  14. // Organisation: Colorado School of Mines (CSM)
  15. // United States Geological Survey (USGS)
  16. //
  17. // Email: tirons@mines.edu, tirons@usgs.gov
  18. //
  19. // This program is free software: you can redistribute it and/or modify
  20. // it under the terms of the GNU General Public License as published by
  21. // the Free Software Foundation, either version 3 of the License, or
  22. // (at your option) any later version.
  23. //
  24. // This program is distributed in the hope that it will be useful,
  25. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. // GNU General Public License for more details.
  28. //
  29. // You should have received a copy of the GNU General Public License
  30. // along with this program. If not, see <http://www.gnu.org/licenses/>.
  31. //
  32. // ===========================================================================
  33. #pragma once
  34. #ifndef BICGSTAB_INC
  35. #define BICGSTAB_INC
  36. #include <Eigen/Core>
  37. #include <Eigen/Sparse>
  38. #ifdef CHOLMODPRECONDITION
  39. #include <Eigen/CholmodSupport>
  40. #endif // CHOLMODPRECONDITION
  41. //#include "unsupported/Eigen/IterativeSolvers"
  42. //#include <unsupported/Eigen/SuperLUSupport>
  43. #include <iostream>
  44. #include <string>
  45. #include <complex>
  46. #include <fstream>
  47. #include <LemmaCore>
  48. #include "timer.h"
  49. using namespace Eigen;
  50. using namespace Lemma;
  51. //typedef Eigen::VectorXcd VectorXcr;
  52. typedef Eigen::SparseMatrix<Complex> SparseMat;
  53. // On Input
  54. // A = Matrix
  55. // B = Right hand side
  56. // X = initial guess, and solution
  57. // maxit = maximum Number of iterations
  58. // tol = error tolerance
  59. // On Output
  60. // X real solution vector
  61. // errorn = Real error norm
  62. int bicgstab(const SparseMat &A, const SparseMat &M, const VectorXcr &b, VectorXcr &x,
  63. int &max_it, Real &tol, Real &errorn, int &iter_done,
  64. const bool& banner = true) {
  65. Complex omega, rho, rho_1, alpha, beta;
  66. Real bnrm2, eps, errmin;
  67. int n, iter; //, istat;
  68. // Determine size of system and init vectors
  69. n = x.size();
  70. VectorXcr r(n);
  71. VectorXcr r_tld(n);
  72. VectorXcr p(n);
  73. VectorXcr v(n);
  74. VectorXcr p_hat(n);
  75. VectorXcr s(n);
  76. VectorXcr s_hat(n);
  77. VectorXcr t(n);
  78. VectorXcr xmin(n);
  79. if (banner) {
  80. std::cout << "Start BiCGStab, memory needed: "
  81. << (sizeof(Complex)*(9+2)*n/(1024.*1024*1024)) << " [Gb]\n";
  82. }
  83. // Initialise
  84. iter_done = 0;
  85. v.setConstant(0.); // not necessary I don't think
  86. t.setConstant(0.);
  87. eps = 1e-100;
  88. bnrm2 = b.norm();
  89. if (bnrm2 == 0) {
  90. x.setConstant(0.0);
  91. errorn = 0;
  92. std::cerr << "Trivial case of Ax = b, where b is 0\n";
  93. return (0);
  94. }
  95. // If there is an initial guess
  96. if ( x.norm() ) {
  97. r = b - A.selfadjointView<Eigen::Upper>()*x;
  98. //r = b - A*x;
  99. } else {
  100. r = b;
  101. }
  102. errorn = r.norm() / bnrm2;
  103. omega = 1.;
  104. r_tld = r;
  105. errmin = 1e30;
  106. // Get down to business
  107. for (iter=0; iter<max_it; ++iter) {
  108. rho = r_tld.dot(r);
  109. if ( abs(rho) < eps) return (0);
  110. if (iter > 0) {
  111. beta = (rho/rho_1) * (alpha/omega);
  112. p = r.array() + beta*(p.array()-omega*v.array()).array();
  113. } else {
  114. p = r;
  115. }
  116. // Use pseudo inverse to get approximate answer
  117. //#pragma omp sections
  118. p_hat = M*p;
  119. //v = A*p_hat; // TODO double check
  120. v = A.selfadjointView<Eigen::Upper>()*p_hat; // TODO double check
  121. alpha = rho / r_tld.dot(v);
  122. s = r.array() - alpha*v.array();
  123. errorn = s.norm()/bnrm2;
  124. if (errorn < tol && iter > 1) {
  125. x.array() += alpha*p_hat.array();
  126. return (0);
  127. }
  128. s_hat = M*s;
  129. t = A.selfadjointView<Eigen::Upper>()*s_hat;
  130. //t = A*s_hat;
  131. omega = t.dot(s) / t.dot(t);
  132. x.array() += alpha*p_hat.array() + omega*s_hat.array();
  133. r = s.array() - omega*t.array();
  134. errorn = r.norm() / bnrm2;
  135. iter_done = iter;
  136. if (errorn < errmin) {
  137. // remember the model with the smallest norm
  138. errmin = errorn;
  139. xmin = x;
  140. }
  141. if ( errorn <= tol ) return (0);
  142. if ( abs(omega) < eps ) return (0);
  143. rho_1 = rho;
  144. }
  145. return (0);
  146. }
  147. template <typename Preconditioner>
  148. bool preconditionedBiCGStab(const SparseMat &A, const Preconditioner &M,
  149. const Ref< VectorXcr const > b,
  150. Ref <VectorXcr > x,
  151. const int &max_it, const Real &tol,
  152. Real &errorn, int &iter_done) {
  153. Complex omega, rho, rho_1, alpha, beta;
  154. Real bnrm2, eps;
  155. int n, iter;
  156. Real tol2 = tol*tol;
  157. // Determine size of system and init vectors
  158. n = x.size();
  159. VectorXcd r(n);
  160. VectorXcd r_tld(n);
  161. VectorXcd p(n);
  162. VectorXcd s(n);
  163. VectorXcd s_hat(n);
  164. VectorXcd p_hat(n);
  165. VectorXcd v = VectorXcr::Zero(n);
  166. VectorXcd t = VectorXcr::Zero(n);
  167. //std::cout << "Start BiCGStab, memory needed: "
  168. // << (sizeof(Complex)*(8+2)*n/(1024.*1024)) / (1024.) << " [Gb]\n";
  169. // Initialise
  170. iter_done = 0;
  171. eps = 1e-100;
  172. bnrm2 = b.squaredNorm();
  173. if (bnrm2 == 0) {
  174. x.setConstant(0.0);
  175. errorn = 0;
  176. std::cerr << "Trivial case of Ax = b, where b is 0\n";
  177. return (false);
  178. }
  179. // If there is an initial guess
  180. if ( x.squaredNorm() ) {
  181. r = b - A.selfadjointView<Eigen::Upper>()*x;
  182. } else {
  183. r = b;
  184. }
  185. errorn = r.squaredNorm() / bnrm2;
  186. omega = 1.;
  187. r_tld = r;
  188. // Get down to business
  189. for (iter=0; iter<max_it; ++iter) {
  190. rho = r_tld.dot(r);
  191. if (abs(rho) < eps) {
  192. std::cerr << "arbitrary orthogonality issue in bicgstab\n";
  193. std::cerr << "consider eigen restarting\n";
  194. return (false);
  195. }
  196. if (iter > 0) {
  197. beta = (rho/rho_1) * (alpha/omega);
  198. p = r + beta*(p-omega*v);
  199. } else {
  200. p = r;
  201. }
  202. p_hat = M.solve(p);
  203. v.noalias() = A.selfadjointView<Eigen::Upper>()*p_hat;
  204. alpha = rho / r_tld.dot(v);
  205. s = r - alpha*v;
  206. errorn = s.squaredNorm()/bnrm2;
  207. if (errorn < tol2 && iter > 1) {
  208. x = x + alpha*p_hat;
  209. errorn = std::sqrt(errorn);
  210. return (true);
  211. }
  212. s_hat = M.solve(s);
  213. t.noalias() = A.selfadjointView<Eigen::Upper>()*s_hat;
  214. omega = t.dot(s) / t.dot(t);
  215. x += alpha*p_hat + omega*s_hat;
  216. r = s - omega*t;
  217. errorn = r.squaredNorm() / bnrm2;
  218. iter_done = iter;
  219. if ( errorn <= tol2 || abs(omega) < eps) {
  220. errorn = std::sqrt(errorn);
  221. return (true);
  222. }
  223. rho_1 = rho;
  224. }
  225. return (false);
  226. }
  227. template <typename Preconditioner>
  228. bool preconditionedSCBiCG(const SparseMat &A, const Preconditioner &M,
  229. const Ref< VectorXcr const > b,
  230. Ref <VectorXcr > x,
  231. const int &max_iter, const Real &tol,
  232. Real &errorn, int &iter_done) {
  233. Real resid;
  234. VectorXcr p, z, q;
  235. Complex alpha, beta, rho, rho_1;
  236. Real normb = b.norm( );
  237. VectorXcr r = b - A*x;
  238. if (normb == 0.0) normb = 1;
  239. if ((resid = r.norm( ) / normb) <= tol) {
  240. errorn = resid;
  241. iter_done = 0;
  242. return 0;
  243. }
  244. for (int i = 1; i <= max_iter; i++) {
  245. z = M.solve(r);
  246. rho = r.dot(z);
  247. if (i == 1) p = z;
  248. else {
  249. beta = rho / rho_1;
  250. p = z + beta * p;
  251. }
  252. q = A*p;
  253. alpha = rho / p.dot(q);
  254. x += alpha * p;
  255. r -= alpha * q;
  256. std::cout << "resid\t" << resid << std::endl;
  257. if ((resid = r.norm( ) / normb) <= tol) {
  258. errorn = resid;
  259. iter_done = i;
  260. return 0;
  261. }
  262. rho_1 = rho;
  263. }
  264. errorn = resid;
  265. return (false);
  266. }
  267. /** \internal Low-level conjugate gradient algorithm
  268. * \param mat The matrix A
  269. * \param rhs The right hand side vector b
  270. * \param x On input and initial solution, on output the computed solution.
  271. * \param precond A preconditioner being able to efficiently solve for an
  272. * approximation of Ax=b (regardless of b)
  273. * \param iters On input the max number of iteration, on output the number of performed iterations.
  274. * \param tol_error On input the tolerance error, on output an estimation of the relative error.
  275. */
  276. template<typename Rhs, typename Dest, typename Preconditioner>
  277. EIGEN_DONT_INLINE
  278. void conjugateGradient(const SparseMat& mat, const Rhs& rhs, Dest& x,
  279. const Preconditioner& precond, int& iters,
  280. typename Dest::RealScalar& tol_error)
  281. {
  282. using std::sqrt;
  283. using std::abs;
  284. typedef typename Dest::RealScalar RealScalar;
  285. typedef typename Dest::Scalar Scalar;
  286. typedef Matrix<Scalar,Dynamic,1> VectorType;
  287. RealScalar tol = tol_error;
  288. int maxIters = iters;
  289. int n = mat.cols();
  290. VectorType residual = rhs - mat.selfadjointView<Eigen::Upper>() * x; //initial residual
  291. RealScalar rhsNorm2 = rhs.squaredNorm();
  292. if(rhsNorm2 == 0)
  293. {
  294. x.setZero();
  295. iters = 0;
  296. tol_error = 0;
  297. return;
  298. }
  299. RealScalar threshold = tol*tol*rhsNorm2;
  300. RealScalar residualNorm2 = residual.squaredNorm();
  301. if (residualNorm2 < threshold)
  302. {
  303. iters = 0;
  304. tol_error = sqrt(residualNorm2 / rhsNorm2);
  305. return;
  306. }
  307. VectorType p(n);
  308. p = precond.solve(residual); //initial search direction
  309. VectorType z(n), tmp(n);
  310. RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
  311. int i = 0;
  312. while(i < maxIters)
  313. {
  314. tmp.noalias() = mat.selfadjointView<Eigen::Upper>() * p; // the bottleneck of the algorithm
  315. Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
  316. x += alpha * p; // update solution
  317. residual -= alpha * tmp; // update residue
  318. residualNorm2 = residual.squaredNorm();
  319. if(residualNorm2 < threshold)
  320. break;
  321. z = precond.solve(residual); // approximately solve for "A z = residual"
  322. RealScalar absOld = absNew;
  323. absNew = numext::real(residual.dot(z)); // update the absolute value of r
  324. RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
  325. p = z + beta * p; // update search direction
  326. i++;
  327. }
  328. tol_error = sqrt(residualNorm2 / rhsNorm2);
  329. iters = i;
  330. }
  331. // // Computes implicit
  332. // VectorXcr implicitDCInvBPhi (const SparseMat& D, const SparseMat& C,
  333. // const SparseMat& B, const SparseMat& MC,
  334. // const VectorXcr& Phi, Real& tol,
  335. // int& max_it) {
  336. // int iter_done(0);
  337. // Real errorn(0);
  338. // VectorXcr b = B*Phi;
  339. // VectorXcr y = VectorXcr::Zero(C.rows()) ; // = C^1*b;
  340. // bicgstab(C, MC, b, y, max_it, tol, errorn, iter_done, false);
  341. // //std::cout << "Temp " << errorn << std::endl;
  342. // return D*y;
  343. // }
  344. // Computes implicit
  345. VectorXcr implicitDCInvBPhi (const SparseMat& D, const SparseMat& C,
  346. const VectorXcr& ioms, const SparseMat& MC,
  347. const VectorXcr& Phi, Real& tol,
  348. int& max_it) {
  349. int iter_done(0);
  350. Real errorn(0);
  351. VectorXcr b = (ioms).asDiagonal() * (D.transpose()*Phi);
  352. VectorXcr y = VectorXcr::Zero(C.rows()) ; // = C^1*b;
  353. bicgstab(C, MC, b, y, max_it, tol, errorn, iter_done, false);
  354. //std::cout << "Temp " << errorn << std::endl;
  355. max_it = iter_done;
  356. return D*y;
  357. }
  358. // Computes implicit
  359. template <typename Preconditioner>
  360. VectorXcr implicitDCInvBPhi2 (const SparseMat& D, const SparseMat& C,
  361. const Ref<VectorXcr const> ioms, const Preconditioner& solver,
  362. const Ref<VectorXcr const> Phi, Real& tol,
  363. int& max_it) {
  364. VectorXcr b = (ioms).asDiagonal() * (D.transpose()*Phi);
  365. VectorXcr y = VectorXcr::Zero(C.rows()) ; // = C^1*b;
  366. // Home Made
  367. //int iter_done(0);
  368. //Real errorn(0);
  369. //preconditionedBiCGStab(C, solver, b, y, max_it, tol, errorn, iter_done); //, false); // Jacobi M
  370. //max_it = iter_done;
  371. // Eigen BiCGStab
  372. Eigen::BiCGSTAB<SparseMatrix<Complex> > BiCG;
  373. BiCG.compute( C ); // TODO move this out of this loop!
  374. y = BiCG.solve(b);
  375. max_it = BiCG.iterations();
  376. tol = BiCG.error();
  377. // Direct
  378. /*
  379. std::cout << "Computing LLT" << std::endl;
  380. Eigen::SimplicialLLT<SparseMatrix<Complex>, Eigen::Upper, Eigen::AMDOrdering<int> > LLT;
  381. LLT.compute(C);
  382. max_it = 1;
  383. std::cout << "Computed LLT" << std::endl;
  384. y = LLT.solve(b);
  385. */
  386. return D*y;
  387. }
  388. // Computes implicit
  389. //template <typename Solver>
  390. template < typename Solver >
  391. inline VectorXcr implicitDCInvBPhi3 (const SparseMat& D, const Solver& solver,
  392. const Ref<VectorXcr const> ioms,
  393. const Ref<VectorXcr const> Phi, Real& tol,
  394. int& max_it) {
  395. VectorXcr b = (ioms).asDiagonal() * (D.transpose()*Phi);
  396. VectorXcr y = solver.solve(b);
  397. max_it = 0;
  398. //max_it = solver.iterations();
  399. //errorn = solver.error();
  400. return D*y;
  401. }
  402. // // Simple extraction of indices in idx into reduceed array x1
  403. // void vmap( const Ref<VectorXcr const> x0, Ref<VectorXcr> x1, const std::vector<int>& idx ) {
  404. // for (unsigned int ii=0; ii<idx.size(); ++ii) {
  405. // x1(ii) = x0(idx[ii]);
  406. // }
  407. // }
  408. // Simple extraction of indices in idx into reduceed array x1
  409. VectorXcr vmap( const Ref<VectorXcr const> x0, const std::vector<int>& idx ) {
  410. VectorXcr x1 = VectorXcr::Zero( idx.size() );
  411. for (unsigned int ii=0; ii<idx.size(); ++ii) {
  412. x1(ii) = x0(idx[ii]);
  413. }
  414. return x1;
  415. }
  416. // reverse of above
  417. void ivmap( Ref<VectorXcr > x0, const Ref<VectorXcr const> x1, const std::vector<int>& idx ) {
  418. for (unsigned int ii=0; ii<idx.size(); ++ii) {
  419. x0(idx[ii]) = x1(ii);
  420. }
  421. }
  422. // On Input
  423. // A = Matrix
  424. // B = Right hand side
  425. // X = initial guess, and solution
  426. // maxit = maximum Number of iterations
  427. // tol = error tolerance
  428. // On Output
  429. // X real solution vector
  430. // errorn = Real error norm
  431. template < typename CSolver >
  432. int implicitbicgstab(//const SparseMat& D,
  433. //const SparseMat& C,
  434. const Ref< Eigen::SparseMatrix<Complex> const > D,
  435. const std::vector<int>& idx,
  436. const Ref< VectorXcr const > ioms,
  437. const Ref< VectorXcr const > rhs,
  438. Ref <VectorXcr> phi,
  439. CSolver& solver,
  440. int &max_it, const Real &tol, Real &errorn, int &iter_done, ofstream& logio) {
  441. logio << "using the preconditioned implicit solver" << std::endl;
  442. Complex omega, rho, rho_1, alpha, beta;
  443. Real tol2;
  444. int iter, max_it2, max_it1;
  445. // Look at reduced problem
  446. VectorXcr rhs2 = vmap(rhs, idx);
  447. VectorXcr phi2 = vmap(phi, idx);
  448. // Determine size of system and init vectors
  449. int n = idx.size(); // was phi.size();
  450. VectorXcr r(n);
  451. VectorXcr r_tld(n);
  452. VectorXcr p(n);
  453. VectorXcr s(n);
  454. VectorXcr v = VectorXcr::Zero(n);
  455. VectorXcr t = VectorXcr::Zero(n);
  456. // TODO, refigure for implicit large system
  457. // std::cout << "Start BiCGStab, memory needed: "
  458. // << (sizeof(Complex)*(9+2)*n/(1024.*1024*1024)) << " [Gb]\n";
  459. // Initialise
  460. iter_done = 0;
  461. Real eps = 1e-100;
  462. Real bnrm2 = rhs.norm();
  463. if (bnrm2 == 0) {
  464. phi.setConstant(0.0);
  465. errorn = 0;
  466. std::cerr << "Trivial case of Ax = b, where b is 0\n";
  467. return (0);
  468. }
  469. // If there is an initial guess
  470. if ( phi.norm() ) {
  471. tol2 = tol;
  472. max_it2 = 50000;
  473. //r = rhs - implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it2);
  474. //r = rhs - implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it2);
  475. r = rhs2 - vmap( implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it2), idx );
  476. } else {
  477. r = vmap(rhs, idx);
  478. }
  479. jsw_timer timer;
  480. errorn = r.norm() / bnrm2;
  481. omega = 1.;
  482. r_tld = r;
  483. Real errornold = 1e14;
  484. // Get down to business
  485. for (iter=0; iter<max_it; ++iter) {
  486. timer.begin();
  487. rho = r_tld.dot(r);
  488. if (abs(rho) < eps) {
  489. ivmap( phi, phi2, idx );
  490. return (0);
  491. }
  492. if (iter > 0) {
  493. beta = (rho/rho_1) * (alpha/omega);
  494. p = r.array() + beta*(p.array()-omega*v.array()).array();
  495. } else {
  496. p = r;
  497. }
  498. tol2 = tol;
  499. max_it2 = 500000;
  500. //v = implicitDCInvBPhi2(D, C, ioms, solver, p, tol2, max_it2);
  501. ivmap(phi, p, idx);
  502. v = vmap(implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it2), idx);
  503. alpha = rho / r_tld.dot(v);
  504. s = r.array() - alpha*v.array();
  505. errorn = s.norm()/bnrm2;
  506. if (errorn < tol && iter > 1) {
  507. phi2.array() += alpha*p.array();
  508. ivmap( phi, phi2, idx );
  509. return (0);
  510. }
  511. tol2 = tol;
  512. max_it1 = 500000;
  513. //t = implicitDCInvBPhi2(D, C, ioms, solver, s, tol2, max_it1);
  514. //t = implicitDCInvBPhi3(D, solver, ioms, s, tol2, max_it1);
  515. ivmap(phi, s, idx);
  516. t = vmap(implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it1), idx);
  517. omega = t.dot(s) / t.dot(t);
  518. r = s.array() - omega*t.array();
  519. errorn = r.norm() / bnrm2;
  520. iter_done = iter;
  521. if (errorn <= tol) {
  522. ivmap( phi, phi2, idx );
  523. return (0);
  524. }
  525. if (abs(omega) < eps) {
  526. ivmap( phi, phi2, idx );
  527. return (0);
  528. }
  529. rho_1 = rho;
  530. logio << "iteration " << std::setw(3) << iter
  531. << " errorn " << std::setw(6) << std::setprecision(4) << std::scientific << errorn
  532. //<< "\timplicit iterations " << std::setw(5) << max_it1+max_it2
  533. << " time " << std::setw(6) << std::fixed << std::setprecision(2) << timer.end() << std::endl;
  534. // Check to see how progress is going
  535. if (errornold - errorn < 0) {
  536. logio << "Irregular non-monotonic (negative) convergence. Recommend restart. \n";
  537. ivmap( phi, phi2, idx );
  538. return (2);
  539. }
  540. /*
  541. if (errornold - errorn < 1e-14) {
  542. logio << "not making any progress. Giving up\n";
  543. return (1);
  544. }
  545. */
  546. //std::cout << "|| p-s ||" << (alpha*p - omega*s).norm() << std::endl;
  547. // only update phi if good things are happening
  548. phi2.array() += alpha*p.array() + omega*s.array();
  549. errornold = errorn;
  550. }
  551. ivmap( phi, phi2, idx );
  552. return (0);
  553. }
  554. // On Input
  555. // A = Matrix
  556. // B = Right hand side
  557. // X = initial guess, and solution
  558. // maxit = maximum Number of iterations
  559. // tol = error tolerance
  560. // On Output
  561. // X real solution vector
  562. // errorn = Real error norm
  563. template < typename Solver >
  564. int implicitbicgstab_ei(const SparseMat& D,
  565. const Ref< VectorXcr const > ioms,
  566. const Ref< VectorXcr const > rhs,
  567. Ref <VectorXcr> phi,
  568. Solver& solver,
  569. int &max_it, const Real &tol, Real &errorn, int &iter_done, ofstream& logio) {
  570. logio << "using the preconditioned Eigen implicit solver" << std::endl;
  571. Complex omega, rho, rho_1, alpha, beta;
  572. Real tol2;
  573. int iter, max_it2,max_it1;
  574. // Determine size of system and init vectors
  575. int n = phi.size();
  576. VectorXcr r(n);
  577. VectorXcr r_tld(n);
  578. VectorXcr p(n);
  579. VectorXcr v(n);
  580. VectorXcr s(n);
  581. VectorXcr t(n);
  582. // Initialise
  583. iter_done = 0;
  584. Real eps = 1e-100;
  585. Real bnrm2 = rhs.norm();
  586. if (bnrm2 == 0) {
  587. phi.setConstant(0.0);
  588. errorn = 0;
  589. std::cerr << "Trivial case of Ax = b, where b is 0\n";
  590. return (0);
  591. }
  592. // If there is an initial guess
  593. if ( phi.norm() ) {
  594. tol2 = tol;
  595. max_it2 = 50000;
  596. r = rhs - implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it2);
  597. } else {
  598. r = rhs;
  599. }
  600. jsw_timer timer;
  601. errorn = r.norm() / bnrm2;
  602. omega = 1.;
  603. r_tld = r;
  604. Real errornold = 1e14;
  605. // Get down to business
  606. for (iter=0; iter<max_it; ++iter) {
  607. timer.begin();
  608. rho = r_tld.dot(r);
  609. if (abs(rho) < eps) return (0);
  610. if (iter > 0) {
  611. beta = (rho/rho_1) * (alpha/omega);
  612. p = r.array() + beta*(p.array()-omega*v.array()).array();
  613. } else {
  614. p = r;
  615. }
  616. tol2 = tol;
  617. max_it2 = 500000;
  618. v = implicitDCInvBPhi3(D, solver, ioms, p, tol2, max_it2);
  619. max_it2 = 0; // solver.iterations();
  620. alpha = rho / r_tld.dot(v);
  621. s = r.array() - alpha*v.array();
  622. errorn = s.norm()/bnrm2;
  623. if (errorn < tol && iter > 1) {
  624. phi.array() += alpha*p.array();
  625. return (0);
  626. }
  627. tol2 = tol;
  628. max_it1 = 500000;
  629. t = implicitDCInvBPhi3(D, solver, ioms, s, tol2, max_it1);
  630. max_it1 = 0; //solver.iterations();
  631. omega = t.dot(s) / t.dot(t);
  632. r = s.array() - omega*t.array();
  633. errorn = r.norm() / bnrm2;
  634. iter_done = iter;
  635. if (errorn <= tol ) return (0);
  636. if (abs(omega) < eps) return (0);
  637. rho_1 = rho;
  638. logio << "iteration " << std::setw(4) << iter
  639. << "\terrorn " << std::setw(6) << std::setprecision(4) << std::scientific << errorn
  640. << "\timplicit iterations " << std::setw(5) << max_it1+max_it2
  641. << "\ttime " << std::setw(10) << std::fixed << std::setprecision(2) << timer.end() << std::endl;
  642. // Check to see how progress is going
  643. if (errornold - errorn < 0) {
  644. logio << "irregular (negative) convergence. Try again? \n";
  645. return (2);
  646. }
  647. // only update phi if good things are happening
  648. phi.array() += alpha*p.array() + omega*s.array();
  649. errornold = errorn;
  650. }
  651. return (0);
  652. }
  653. // On Input
  654. // A = Matrix
  655. // B = Right hand side
  656. // X = initial guess, and solution
  657. // maxit = maximum Number of iterations
  658. // tol = error tolerance
  659. // On Output
  660. // X real solution vector
  661. // errorn = Real error norm
  662. int implicitbicgstabnt(const SparseMat& D,
  663. const SparseMat& C,
  664. const VectorXcr& ioms,
  665. const SparseMat& MC,
  666. Eigen::Ref< VectorXcr > rhs,
  667. VectorXcr& phi,
  668. int &max_it, Real &tol, Real &errorn, int &iter_done) {
  669. Complex omega, rho, rho_1, alpha, beta;
  670. Real errmin, tol2;
  671. int iter, max_it2;
  672. // // Cholesky decomp
  673. // SparseLLT<SparseMatrix<Complex>, Cholmod>
  674. // CholC(SparseMatrix<Complex> (C.real()) );
  675. // if(!CholC.succeeded()) {
  676. // std::cerr << "decomposiiton failed\n";
  677. // return EXIT_FAILURE;
  678. // }
  679. // Determine size of system and init vectors
  680. int n = phi.size();
  681. VectorXcr r(n);
  682. VectorXcr r_tld(n);
  683. VectorXcr p(n);
  684. VectorXcr v(n);
  685. //VectorXcr p_hat(n);
  686. VectorXcr s(n);
  687. //VectorXcr s_hat(n);
  688. VectorXcr t(n);
  689. VectorXcr xmin(n);
  690. // TODO, refigure for implicit large system
  691. // std::cout << "Start BiCGStab, memory needed: "
  692. // << (sizeof(Complex)*(9+2)*n/(1024.*1024*1024)) << " [Gb]\n";
  693. // Initialise
  694. iter_done = 0;
  695. v.setConstant(0.); // not necessary I don't think
  696. t.setConstant(0.);
  697. Real eps = 1e-100;
  698. Real bnrm2 = rhs.norm();
  699. if (bnrm2 == 0) {
  700. phi.setConstant(0.0);
  701. errorn = 0;
  702. std::cerr << "Trivial case of Ax = b, where b is 0\n";
  703. return (0);
  704. }
  705. // If there is an initial guess
  706. if ( phi.norm() ) {
  707. //r = rhs - A*phi;
  708. tol2 = tol;
  709. max_it2 = 50000;
  710. std::cout << "Initial guess " << std::endl;
  711. r = rhs - implicitDCInvBPhi(D, C, ioms, MC, phi, tol2, max_it2);
  712. //r = rhs - implicitDCInvBPhi (D, C, B, CholC, phi, tol2, max_it2);
  713. } else {
  714. r = rhs;
  715. }
  716. errorn = r.norm() / bnrm2;
  717. //std::cout << "Initial |r| " << r.norm() << "\t" << errorn<< std::endl;
  718. omega = 1.;
  719. r_tld = r;
  720. errmin = 1e30;
  721. Real errornold = 1e6;
  722. // Get down to business
  723. for (iter=0; iter<max_it; ++iter) {
  724. rho = r_tld.dot(r);
  725. if (abs(rho) < eps) return (0);
  726. if (iter > 0) {
  727. beta = (rho/rho_1) * (alpha/omega);
  728. p = r.array() + beta*(p.array()-omega*v.array()).array();
  729. } else {
  730. p = r;
  731. }
  732. // Use pseudo inverse to get approximate answer
  733. //p_hat = p;
  734. tol2 = std::max(1e-4*errorn, tol);
  735. tol2 = tol;
  736. max_it2 = 500000;
  737. //v = A*p_hat;
  738. v = implicitDCInvBPhi(D, C, ioms, MC, p, tol2, max_it2);
  739. //v = implicitDCInvBPhi(D, C, B, CholC, p, tol2, max_it2);
  740. alpha = rho / r_tld.dot(v);
  741. s = r.array() - alpha*v.array();
  742. errorn = s.norm()/bnrm2;
  743. if (errorn < tol && iter > 1) {
  744. phi.array() += alpha*p.array();
  745. return (0);
  746. }
  747. // s_hat = M*s;
  748. //tol2 = tol;
  749. tol2 = std::max(1e-4*errorn, tol);
  750. tol2 = tol;
  751. max_it2 = 50000;
  752. // t = A*s_hat;
  753. t = implicitDCInvBPhi(D, C, ioms, MC, s, tol2, max_it2);
  754. //t = implicitDCInvBPhi(D, C, B, CholC, s, tol2, max_it2);
  755. omega = t.dot(s) / t.dot(t);
  756. phi.array() += alpha*p.array() + omega*s.array();
  757. r = s.array() - omega*t.array();
  758. errorn = r.norm() / bnrm2;
  759. iter_done = iter;
  760. if (errorn < errmin) {
  761. // remember the model with the smallest norm
  762. errmin = errorn;
  763. xmin = phi;
  764. }
  765. if (errorn <= tol ) return (0);
  766. if (abs(omega) < eps) return (0);
  767. rho_1 = rho;
  768. std::cout << "iteration " << std::setw(4) << iter << "\terrorn " << std::setw(6) << std::scientific << errorn
  769. << "\timplicit iterations " << std::setw(5) << max_it2 << std::endl;
  770. if (errornold - errorn < 1e-14) {
  771. std::cout << "not making any progress. Giving up\n";
  772. return (2);
  773. }
  774. errornold = errorn;
  775. }
  776. return (0);
  777. }
  778. #endif // ----- #ifndef BICGSTAB_INC -----
  779. //int bicgstab(const SparseMat &A, Eigen::SparseLU< Eigen::SparseMatrix<Complex, RowMajor> ,