|
@@ -0,0 +1,936 @@
|
|
1
|
+// ===========================================================================
|
|
2
|
+//
|
|
3
|
+// Filename: bicgstab.h
|
|
4
|
+//
|
|
5
|
+// Description:
|
|
6
|
+//
|
|
7
|
+// Version: 0.0
|
|
8
|
+// Created: 10/27/2009 03:15:06 PM
|
|
9
|
+// Revision: none
|
|
10
|
+// Compiler: g++ (c++)
|
|
11
|
+//
|
|
12
|
+// Author: Trevor Irons (ti)
|
|
13
|
+//
|
|
14
|
+// Organisation: Colorado School of Mines (CSM)
|
|
15
|
+// United States Geological Survey (USGS)
|
|
16
|
+//
|
|
17
|
+// Email: tirons@mines.edu, tirons@usgs.gov
|
|
18
|
+//
|
|
19
|
+// This program is free software: you can redistribute it and/or modify
|
|
20
|
+// it under the terms of the GNU General Public License as published by
|
|
21
|
+// the Free Software Foundation, either version 3 of the License, or
|
|
22
|
+// (at your option) any later version.
|
|
23
|
+//
|
|
24
|
+// This program is distributed in the hope that it will be useful,
|
|
25
|
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
26
|
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
27
|
+// GNU General Public License for more details.
|
|
28
|
+//
|
|
29
|
+// You should have received a copy of the GNU General Public License
|
|
30
|
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
31
|
+//
|
|
32
|
+// ===========================================================================
|
|
33
|
+
|
|
34
|
+#include <Eigen/Core>
|
|
35
|
+#include <Eigen/Sparse>
|
|
36
|
+
|
|
37
|
+#ifdef CHOLMODPRECONDITION
|
|
38
|
+#include <Eigen/CholmodSupport>
|
|
39
|
+#endif // CHOLMODPRECONDITION
|
|
40
|
+
|
|
41
|
+//#include "unsupported/Eigen/IterativeSolvers"
|
|
42
|
+//#include <unsupported/Eigen/SuperLUSupport>
|
|
43
|
+
|
|
44
|
+#include <iostream>
|
|
45
|
+#include <string>
|
|
46
|
+#include <complex>
|
|
47
|
+#include <fstream>
|
|
48
|
+#include "lemma.h"
|
|
49
|
+#include "timer.h"
|
|
50
|
+
|
|
51
|
+using namespace Eigen;
|
|
52
|
+using namespace Lemma;
|
|
53
|
+
|
|
54
|
+//typedef Eigen::VectorXcd VectorXcr;
|
|
55
|
+typedef Eigen::SparseMatrix<Complex> SparseMat;
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+// On Input
|
|
59
|
+// A = Matrix
|
|
60
|
+// B = Right hand side
|
|
61
|
+// X = initial guess, and solution
|
|
62
|
+// maxit = maximum Number of iterations
|
|
63
|
+// tol = error tolerance
|
|
64
|
+// On Output
|
|
65
|
+// X real solution vector
|
|
66
|
+// errorn = Real error norm
|
|
67
|
+int bicgstab(const SparseMat &A, const SparseMat &M, const VectorXcr &b, VectorXcr &x,
|
|
68
|
+ int &max_it, Real &tol, Real &errorn, int &iter_done,
|
|
69
|
+ const bool& banner = true) {
|
|
70
|
+
|
|
71
|
+ Complex omega, rho, rho_1, alpha, beta;
|
|
72
|
+ Real bnrm2, eps, errmin;
|
|
73
|
+ int n, iter; //, istat;
|
|
74
|
+
|
|
75
|
+ // Determine size of system and init vectors
|
|
76
|
+ n = x.size();
|
|
77
|
+ VectorXcr r(n);
|
|
78
|
+ VectorXcr r_tld(n);
|
|
79
|
+ VectorXcr p(n);
|
|
80
|
+ VectorXcr v(n);
|
|
81
|
+ VectorXcr p_hat(n);
|
|
82
|
+ VectorXcr s(n);
|
|
83
|
+ VectorXcr s_hat(n);
|
|
84
|
+ VectorXcr t(n);
|
|
85
|
+ VectorXcr xmin(n);
|
|
86
|
+
|
|
87
|
+ if (banner) {
|
|
88
|
+ std::cout << "Start BiCGStab, memory needed: "
|
|
89
|
+ << (sizeof(Complex)*(9+2)*n/(1024.*1024*1024)) << " [Gb]\n";
|
|
90
|
+ }
|
|
91
|
+
|
|
92
|
+ // Initialise
|
|
93
|
+ iter_done = 0;
|
|
94
|
+ v.setConstant(0.); // not necessary I don't think
|
|
95
|
+ t.setConstant(0.);
|
|
96
|
+ eps = 1e-100;
|
|
97
|
+
|
|
98
|
+ bnrm2 = b.norm();
|
|
99
|
+ if (bnrm2 == 0) {
|
|
100
|
+ x.setConstant(0.0);
|
|
101
|
+ errorn = 0;
|
|
102
|
+ std::cerr << "Trivial case of Ax = b, where b is 0\n";
|
|
103
|
+ return (0);
|
|
104
|
+ }
|
|
105
|
+
|
|
106
|
+ // If there is an initial guess
|
|
107
|
+ if ( x.norm() ) {
|
|
108
|
+ r = b - A.selfadjointView<Eigen::Upper>()*x;
|
|
109
|
+ //r = b - A*x;
|
|
110
|
+ } else {
|
|
111
|
+ r = b;
|
|
112
|
+ }
|
|
113
|
+
|
|
114
|
+ errorn = r.norm() / bnrm2;
|
|
115
|
+ omega = 1.;
|
|
116
|
+ r_tld = r;
|
|
117
|
+ errmin = 1e30;
|
|
118
|
+
|
|
119
|
+ // Get down to business
|
|
120
|
+ for (iter=0; iter<max_it; ++iter) {
|
|
121
|
+
|
|
122
|
+ rho = r_tld.dot(r);
|
|
123
|
+ if ( abs(rho) < eps) return (0);
|
|
124
|
+
|
|
125
|
+ if (iter > 0) {
|
|
126
|
+ beta = (rho/rho_1) * (alpha/omega);
|
|
127
|
+ p = r.array() + beta*(p.array()-omega*v.array()).array();
|
|
128
|
+ } else {
|
|
129
|
+ p = r;
|
|
130
|
+ }
|
|
131
|
+
|
|
132
|
+ // Use pseudo inverse to get approximate answer
|
|
133
|
+ //#pragma omp sections
|
|
134
|
+ p_hat = M*p;
|
|
135
|
+ //v = A*p_hat; // TODO double check
|
|
136
|
+ v = A.selfadjointView<Eigen::Upper>()*p_hat; // TODO double check
|
|
137
|
+
|
|
138
|
+ alpha = rho / r_tld.dot(v);
|
|
139
|
+ s = r.array() - alpha*v.array();
|
|
140
|
+ errorn = s.norm()/bnrm2;
|
|
141
|
+
|
|
142
|
+ if (errorn < tol && iter > 1) {
|
|
143
|
+ x.array() += alpha*p_hat.array();
|
|
144
|
+ return (0);
|
|
145
|
+ }
|
|
146
|
+
|
|
147
|
+ s_hat = M*s;
|
|
148
|
+ t = A.selfadjointView<Eigen::Upper>()*s_hat;
|
|
149
|
+ //t = A*s_hat;
|
|
150
|
+
|
|
151
|
+ omega = t.dot(s) / t.dot(t);
|
|
152
|
+ x.array() += alpha*p_hat.array() + omega*s_hat.array();
|
|
153
|
+ r = s.array() - omega*t.array();
|
|
154
|
+ errorn = r.norm() / bnrm2;
|
|
155
|
+ iter_done = iter;
|
|
156
|
+
|
|
157
|
+ if (errorn < errmin) {
|
|
158
|
+ // remember the model with the smallest norm
|
|
159
|
+ errmin = errorn;
|
|
160
|
+ xmin = x;
|
|
161
|
+ }
|
|
162
|
+
|
|
163
|
+ if ( errorn <= tol ) return (0);
|
|
164
|
+ if ( abs(omega) < eps ) return (0);
|
|
165
|
+ rho_1 = rho;
|
|
166
|
+
|
|
167
|
+ }
|
|
168
|
+ return (0);
|
|
169
|
+}
|
|
170
|
+
|
|
171
|
+template <typename Preconditioner>
|
|
172
|
+bool preconditionedBiCGStab(const SparseMat &A, const Preconditioner &M,
|
|
173
|
+ const Ref< VectorXcr const > b,
|
|
174
|
+ Ref <VectorXcr > x,
|
|
175
|
+ const int &max_it, const Real &tol,
|
|
176
|
+ Real &errorn, int &iter_done) {
|
|
177
|
+
|
|
178
|
+ Complex omega, rho, rho_1, alpha, beta;
|
|
179
|
+ Real bnrm2, eps;
|
|
180
|
+ int n, iter;
|
|
181
|
+ Real tol2 = tol*tol;
|
|
182
|
+
|
|
183
|
+ // Determine size of system and init vectors
|
|
184
|
+ n = x.size();
|
|
185
|
+
|
|
186
|
+ VectorXcd r(n);
|
|
187
|
+ VectorXcd r_tld(n);
|
|
188
|
+ VectorXcd p(n);
|
|
189
|
+ VectorXcd s(n);
|
|
190
|
+ VectorXcd s_hat(n);
|
|
191
|
+ VectorXcd p_hat(n);
|
|
192
|
+ VectorXcd v = VectorXcr::Zero(n);
|
|
193
|
+ VectorXcd t = VectorXcr::Zero(n);
|
|
194
|
+
|
|
195
|
+ //std::cout << "Start BiCGStab, memory needed: "
|
|
196
|
+ // << (sizeof(Complex)*(8+2)*n/(1024.*1024)) / (1024.) << " [Gb]\n";
|
|
197
|
+
|
|
198
|
+ // Initialise
|
|
199
|
+ iter_done = 0;
|
|
200
|
+ eps = 1e-100;
|
|
201
|
+
|
|
202
|
+ bnrm2 = b.squaredNorm();
|
|
203
|
+ if (bnrm2 == 0) {
|
|
204
|
+ x.setConstant(0.0);
|
|
205
|
+ errorn = 0;
|
|
206
|
+ std::cerr << "Trivial case of Ax = b, where b is 0\n";
|
|
207
|
+ return (false);
|
|
208
|
+ }
|
|
209
|
+
|
|
210
|
+ // If there is an initial guess
|
|
211
|
+ if ( x.squaredNorm() ) {
|
|
212
|
+ r = b - A.selfadjointView<Eigen::Upper>()*x;
|
|
213
|
+ } else {
|
|
214
|
+ r = b;
|
|
215
|
+ }
|
|
216
|
+
|
|
217
|
+ errorn = r.squaredNorm() / bnrm2;
|
|
218
|
+ omega = 1.;
|
|
219
|
+ r_tld = r;
|
|
220
|
+
|
|
221
|
+ // Get down to business
|
|
222
|
+ for (iter=0; iter<max_it; ++iter) {
|
|
223
|
+
|
|
224
|
+ rho = r_tld.dot(r);
|
|
225
|
+ if (abs(rho) < eps) {
|
|
226
|
+ std::cerr << "arbitrary orthogonality issue in bicgstab\n";
|
|
227
|
+ std::cerr << "consider eigen restarting\n";
|
|
228
|
+ return (false);
|
|
229
|
+ }
|
|
230
|
+
|
|
231
|
+ if (iter > 0) {
|
|
232
|
+ beta = (rho/rho_1) * (alpha/omega);
|
|
233
|
+ p = r + beta*(p-omega*v);
|
|
234
|
+ } else {
|
|
235
|
+ p = r;
|
|
236
|
+ }
|
|
237
|
+
|
|
238
|
+ p_hat = M.solve(p);
|
|
239
|
+ v.noalias() = A.selfadjointView<Eigen::Upper>()*p_hat;
|
|
240
|
+
|
|
241
|
+ alpha = rho / r_tld.dot(v);
|
|
242
|
+ s = r - alpha*v;
|
|
243
|
+ errorn = s.squaredNorm()/bnrm2;
|
|
244
|
+
|
|
245
|
+ if (errorn < tol2 && iter > 1) {
|
|
246
|
+ x = x + alpha*p_hat;
|
|
247
|
+ errorn = std::sqrt(errorn);
|
|
248
|
+ return (true);
|
|
249
|
+ }
|
|
250
|
+
|
|
251
|
+ s_hat = M.solve(s);
|
|
252
|
+ t.noalias() = A.selfadjointView<Eigen::Upper>()*s_hat;
|
|
253
|
+
|
|
254
|
+ omega = t.dot(s) / t.dot(t);
|
|
255
|
+ x += alpha*p_hat + omega*s_hat;
|
|
256
|
+ r = s - omega*t;
|
|
257
|
+ errorn = r.squaredNorm() / bnrm2;
|
|
258
|
+ iter_done = iter;
|
|
259
|
+
|
|
260
|
+ if ( errorn <= tol2 || abs(omega) < eps) {
|
|
261
|
+ errorn = std::sqrt(errorn);
|
|
262
|
+ return (true);
|
|
263
|
+ }
|
|
264
|
+
|
|
265
|
+ rho_1 = rho;
|
|
266
|
+ }
|
|
267
|
+ return (false);
|
|
268
|
+}
|
|
269
|
+
|
|
270
|
+template <typename Preconditioner>
|
|
271
|
+bool preconditionedSCBiCG(const SparseMat &A, const Preconditioner &M,
|
|
272
|
+ const Ref< VectorXcr const > b,
|
|
273
|
+ Ref <VectorXcr > x,
|
|
274
|
+ const int &max_iter, const Real &tol,
|
|
275
|
+ Real &errorn, int &iter_done) {
|
|
276
|
+
|
|
277
|
+ Real resid;
|
|
278
|
+ VectorXcr p, z, q;
|
|
279
|
+ Complex alpha, beta, rho, rho_1;
|
|
280
|
+
|
|
281
|
+ Real normb = b.norm( );
|
|
282
|
+ VectorXcr r = b - A*x;
|
|
283
|
+
|
|
284
|
+ if (normb == 0.0) normb = 1;
|
|
285
|
+
|
|
286
|
+ if ((resid = r.norm( ) / normb) <= tol) {
|
|
287
|
+ errorn = resid;
|
|
288
|
+ iter_done = 0;
|
|
289
|
+ return 0;
|
|
290
|
+ }
|
|
291
|
+
|
|
292
|
+ for (int i = 1; i <= max_iter; i++) {
|
|
293
|
+ z = M.solve(r);
|
|
294
|
+ rho = r.dot(z);
|
|
295
|
+
|
|
296
|
+ if (i == 1) p = z;
|
|
297
|
+ else {
|
|
298
|
+ beta = rho / rho_1;
|
|
299
|
+ p = z + beta * p;
|
|
300
|
+ }
|
|
301
|
+
|
|
302
|
+ q = A*p;
|
|
303
|
+ alpha = rho / p.dot(q);
|
|
304
|
+
|
|
305
|
+ x += alpha * p;
|
|
306
|
+ r -= alpha * q;
|
|
307
|
+ std::cout << "resid\t" << resid << std::endl;
|
|
308
|
+ if ((resid = r.norm( ) / normb) <= tol) {
|
|
309
|
+ errorn = resid;
|
|
310
|
+ iter_done = i;
|
|
311
|
+ return 0;
|
|
312
|
+ }
|
|
313
|
+
|
|
314
|
+ rho_1 = rho;
|
|
315
|
+ }
|
|
316
|
+
|
|
317
|
+ errorn = resid;
|
|
318
|
+
|
|
319
|
+ return (false);
|
|
320
|
+}
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+/** \internal Low-level conjugate gradient algorithm
|
|
324
|
+ * \param mat The matrix A
|
|
325
|
+ * \param rhs The right hand side vector b
|
|
326
|
+ * \param x On input and initial solution, on output the computed solution.
|
|
327
|
+ * \param precond A preconditioner being able to efficiently solve for an
|
|
328
|
+ * approximation of Ax=b (regardless of b)
|
|
329
|
+ * \param iters On input the max number of iteration, on output the number of performed iterations.
|
|
330
|
+ * \param tol_error On input the tolerance error, on output an estimation of the relative error.
|
|
331
|
+ */
|
|
332
|
+template<typename Rhs, typename Dest, typename Preconditioner>
|
|
333
|
+EIGEN_DONT_INLINE
|
|
334
|
+void conjugateGradient(const SparseMat& mat, const Rhs& rhs, Dest& x,
|
|
335
|
+ const Preconditioner& precond, int& iters,
|
|
336
|
+ typename Dest::RealScalar& tol_error)
|
|
337
|
+{
|
|
338
|
+ using std::sqrt;
|
|
339
|
+ using std::abs;
|
|
340
|
+ typedef typename Dest::RealScalar RealScalar;
|
|
341
|
+ typedef typename Dest::Scalar Scalar;
|
|
342
|
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
|
|
343
|
+
|
|
344
|
+ RealScalar tol = tol_error;
|
|
345
|
+ int maxIters = iters;
|
|
346
|
+
|
|
347
|
+ int n = mat.cols();
|
|
348
|
+
|
|
349
|
+ VectorType residual = rhs - mat.selfadjointView<Eigen::Upper>() * x; //initial residual
|
|
350
|
+
|
|
351
|
+ RealScalar rhsNorm2 = rhs.squaredNorm();
|
|
352
|
+ if(rhsNorm2 == 0)
|
|
353
|
+ {
|
|
354
|
+ x.setZero();
|
|
355
|
+ iters = 0;
|
|
356
|
+ tol_error = 0;
|
|
357
|
+ return;
|
|
358
|
+ }
|
|
359
|
+ RealScalar threshold = tol*tol*rhsNorm2;
|
|
360
|
+ RealScalar residualNorm2 = residual.squaredNorm();
|
|
361
|
+ if (residualNorm2 < threshold)
|
|
362
|
+ {
|
|
363
|
+ iters = 0;
|
|
364
|
+ tol_error = sqrt(residualNorm2 / rhsNorm2);
|
|
365
|
+ return;
|
|
366
|
+ }
|
|
367
|
+
|
|
368
|
+ VectorType p(n);
|
|
369
|
+ p = precond.solve(residual); //initial search direction
|
|
370
|
+
|
|
371
|
+ VectorType z(n), tmp(n);
|
|
372
|
+ RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
|
|
373
|
+ int i = 0;
|
|
374
|
+ while(i < maxIters)
|
|
375
|
+ {
|
|
376
|
+ tmp.noalias() = mat.selfadjointView<Eigen::Upper>() * p; // the bottleneck of the algorithm
|
|
377
|
+
|
|
378
|
+ Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
|
|
379
|
+ x += alpha * p; // update solution
|
|
380
|
+ residual -= alpha * tmp; // update residue
|
|
381
|
+
|
|
382
|
+ residualNorm2 = residual.squaredNorm();
|
|
383
|
+ if(residualNorm2 < threshold)
|
|
384
|
+ break;
|
|
385
|
+
|
|
386
|
+ z = precond.solve(residual); // approximately solve for "A z = residual"
|
|
387
|
+
|
|
388
|
+ RealScalar absOld = absNew;
|
|
389
|
+ absNew = numext::real(residual.dot(z)); // update the absolute value of r
|
|
390
|
+ RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
|
|
391
|
+ p = z + beta * p; // update search direction
|
|
392
|
+ i++;
|
|
393
|
+ }
|
|
394
|
+ tol_error = sqrt(residualNorm2 / rhsNorm2);
|
|
395
|
+ iters = i;
|
|
396
|
+}
|
|
397
|
+
|
|
398
|
+// // Computes implicit
|
|
399
|
+// VectorXcr implicitDCInvBPhi (const SparseMat& D, const SparseMat& C,
|
|
400
|
+// const SparseMat& B, const SparseMat& MC,
|
|
401
|
+// const VectorXcr& Phi, Real& tol,
|
|
402
|
+// int& max_it) {
|
|
403
|
+// int iter_done(0);
|
|
404
|
+// Real errorn(0);
|
|
405
|
+// VectorXcr b = B*Phi;
|
|
406
|
+// VectorXcr y = VectorXcr::Zero(C.rows()) ; // = C^1*b;
|
|
407
|
+// bicgstab(C, MC, b, y, max_it, tol, errorn, iter_done, false);
|
|
408
|
+// //std::cout << "Temp " << errorn << std::endl;
|
|
409
|
+// return D*y;
|
|
410
|
+// }
|
|
411
|
+
|
|
412
|
+// Computes implicit
|
|
413
|
+VectorXcr implicitDCInvBPhi (const SparseMat& D, const SparseMat& C,
|
|
414
|
+ const VectorXcr& ioms, const SparseMat& MC,
|
|
415
|
+ const VectorXcr& Phi, Real& tol,
|
|
416
|
+ int& max_it) {
|
|
417
|
+ int iter_done(0);
|
|
418
|
+ Real errorn(0);
|
|
419
|
+ VectorXcr b = (ioms).asDiagonal() * (D.transpose()*Phi);
|
|
420
|
+ VectorXcr y = VectorXcr::Zero(C.rows()) ; // = C^1*b;
|
|
421
|
+ bicgstab(C, MC, b, y, max_it, tol, errorn, iter_done, false);
|
|
422
|
+ //std::cout << "Temp " << errorn << std::endl;
|
|
423
|
+ max_it = iter_done;
|
|
424
|
+ return D*y;
|
|
425
|
+}
|
|
426
|
+
|
|
427
|
+// Computes implicit
|
|
428
|
+template <typename Preconditioner>
|
|
429
|
+VectorXcr implicitDCInvBPhi2 (const SparseMat& D, const SparseMat& C,
|
|
430
|
+ const Ref<VectorXcr const> ioms, const Preconditioner& solver,
|
|
431
|
+ const Ref<VectorXcr const> Phi, Real& tol,
|
|
432
|
+ int& max_it) {
|
|
433
|
+
|
|
434
|
+ VectorXcr b = (ioms).asDiagonal() * (D.transpose()*Phi);
|
|
435
|
+ VectorXcr y = VectorXcr::Zero(C.rows()) ; // = C^1*b;
|
|
436
|
+
|
|
437
|
+ // Home Made
|
|
438
|
+ //int iter_done(0);
|
|
439
|
+ //Real errorn(0);
|
|
440
|
+ //preconditionedBiCGStab(C, solver, b, y, max_it, tol, errorn, iter_done); //, false); // Jacobi M
|
|
441
|
+ //max_it = iter_done;
|
|
442
|
+
|
|
443
|
+ // Eigen BiCGStab
|
|
444
|
+ Eigen::BiCGSTAB<SparseMatrix<Complex> > BiCG;
|
|
445
|
+ BiCG.compute( C ); // TODO move this out of this loop!
|
|
446
|
+ y = BiCG.solve(b);
|
|
447
|
+ max_it = BiCG.iterations();
|
|
448
|
+ tol = BiCG.error();
|
|
449
|
+
|
|
450
|
+ // Direct
|
|
451
|
+/*
|
|
452
|
+ std::cout << "Computing LLT" << std::endl;
|
|
453
|
+ Eigen::SimplicialLLT<SparseMatrix<Complex>, Eigen::Upper, Eigen::AMDOrdering<int> > LLT;
|
|
454
|
+ LLT.compute(C);
|
|
455
|
+ max_it = 1;
|
|
456
|
+ std::cout << "Computed LLT" << std::endl;
|
|
457
|
+ y = LLT.solve(b);
|
|
458
|
+*/
|
|
459
|
+
|
|
460
|
+ return D*y;
|
|
461
|
+}
|
|
462
|
+
|
|
463
|
+// Computes implicit
|
|
464
|
+//template <typename Solver>
|
|
465
|
+template < typename Solver >
|
|
466
|
+inline VectorXcr implicitDCInvBPhi3 (const SparseMat& D, const Solver& solver,
|
|
467
|
+ const Ref<VectorXcr const> ioms,
|
|
468
|
+ const Ref<VectorXcr const> Phi, Real& tol,
|
|
469
|
+ int& max_it) {
|
|
470
|
+ VectorXcr b = (ioms).asDiagonal() * (D.transpose()*Phi);
|
|
471
|
+ VectorXcr y = solver.solve(b);
|
|
472
|
+ max_it = 0;
|
|
473
|
+ //max_it = solver.iterations();
|
|
474
|
+ //errorn = solver.error();
|
|
475
|
+ return D*y;
|
|
476
|
+}
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+// // Simple extraction of indices in idx into reduceed array x1
|
|
480
|
+// void vmap( const Ref<VectorXcr const> x0, Ref<VectorXcr> x1, const std::vector<int>& idx ) {
|
|
481
|
+// for (unsigned int ii=0; ii<idx.size(); ++ii) {
|
|
482
|
+// x1(ii) = x0(idx[ii]);
|
|
483
|
+// }
|
|
484
|
+// }
|
|
485
|
+
|
|
486
|
+// Simple extraction of indices in idx into reduceed array x1
|
|
487
|
+VectorXcr vmap( const Ref<VectorXcr const> x0, const std::vector<int>& idx ) {
|
|
488
|
+ VectorXcr x1 = VectorXcr::Zero( idx.size() );
|
|
489
|
+ for (unsigned int ii=0; ii<idx.size(); ++ii) {
|
|
490
|
+ x1(ii) = x0(idx[ii]);
|
|
491
|
+ }
|
|
492
|
+ return x1;
|
|
493
|
+}
|
|
494
|
+
|
|
495
|
+// reverse of above
|
|
496
|
+void ivmap( Ref<VectorXcr > x0, const Ref<VectorXcr const> x1, const std::vector<int>& idx ) {
|
|
497
|
+ for (unsigned int ii=0; ii<idx.size(); ++ii) {
|
|
498
|
+ x0(idx[ii]) = x1(ii);
|
|
499
|
+ }
|
|
500
|
+}
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+// On Input
|
|
504
|
+// A = Matrix
|
|
505
|
+// B = Right hand side
|
|
506
|
+// X = initial guess, and solution
|
|
507
|
+// maxit = maximum Number of iterations
|
|
508
|
+// tol = error tolerance
|
|
509
|
+// On Output
|
|
510
|
+// X real solution vector
|
|
511
|
+// errorn = Real error norm
|
|
512
|
+template < typename CSolver >
|
|
513
|
+int implicitbicgstab(//const SparseMat& D,
|
|
514
|
+ //const SparseMat& C,
|
|
515
|
+ const Ref< Eigen::SparseMatrix<Complex> const > D,
|
|
516
|
+ const std::vector<int>& idx,
|
|
517
|
+ const Ref< VectorXcr const > ioms,
|
|
518
|
+ const Ref< VectorXcr const > rhs,
|
|
519
|
+ Ref <VectorXcr> phi,
|
|
520
|
+ CSolver& solver,
|
|
521
|
+ int &max_it, const Real &tol, Real &errorn, int &iter_done, ofstream& logio) {
|
|
522
|
+
|
|
523
|
+ logio << "using the preconditioned implicit solver" << std::endl;
|
|
524
|
+
|
|
525
|
+ Complex omega, rho, rho_1, alpha, beta;
|
|
526
|
+ Real tol2;
|
|
527
|
+ int iter, max_it2, max_it1;
|
|
528
|
+
|
|
529
|
+ // Look at reduced problem
|
|
530
|
+ VectorXcr rhs2 = vmap(rhs, idx);
|
|
531
|
+ VectorXcr phi2 = vmap(phi, idx);
|
|
532
|
+
|
|
533
|
+ // Determine size of system and init vectors
|
|
534
|
+ int n = idx.size(); // was phi.size();
|
|
535
|
+ VectorXcr r(n);
|
|
536
|
+ VectorXcr r_tld(n);
|
|
537
|
+ VectorXcr p(n);
|
|
538
|
+ VectorXcr s(n);
|
|
539
|
+ VectorXcr v = VectorXcr::Zero(n);
|
|
540
|
+ VectorXcr t = VectorXcr::Zero(n);
|
|
541
|
+
|
|
542
|
+// TODO, refigure for implicit large system
|
|
543
|
+// std::cout << "Start BiCGStab, memory needed: "
|
|
544
|
+// << (sizeof(Complex)*(9+2)*n/(1024.*1024*1024)) << " [Gb]\n";
|
|
545
|
+
|
|
546
|
+ // Initialise
|
|
547
|
+ iter_done = 0;
|
|
548
|
+ Real eps = 1e-100;
|
|
549
|
+
|
|
550
|
+ Real bnrm2 = rhs.norm();
|
|
551
|
+ if (bnrm2 == 0) {
|
|
552
|
+ phi.setConstant(0.0);
|
|
553
|
+ errorn = 0;
|
|
554
|
+ std::cerr << "Trivial case of Ax = b, where b is 0\n";
|
|
555
|
+ return (0);
|
|
556
|
+ }
|
|
557
|
+
|
|
558
|
+ // If there is an initial guess
|
|
559
|
+ if ( phi.norm() ) {
|
|
560
|
+ tol2 = tol;
|
|
561
|
+ max_it2 = 50000;
|
|
562
|
+ //r = rhs - implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it2);
|
|
563
|
+ //r = rhs - implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it2);
|
|
564
|
+ r = rhs2 - vmap( implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it2), idx );
|
|
565
|
+ } else {
|
|
566
|
+ r = vmap(rhs, idx);
|
|
567
|
+ }
|
|
568
|
+
|
|
569
|
+ jsw_timer timer;
|
|
570
|
+
|
|
571
|
+ errorn = r.norm() / bnrm2;
|
|
572
|
+ omega = 1.;
|
|
573
|
+ r_tld = r;
|
|
574
|
+ Real errornold = 1e14;
|
|
575
|
+ // Get down to business
|
|
576
|
+ for (iter=0; iter<max_it; ++iter) {
|
|
577
|
+
|
|
578
|
+ timer.begin();
|
|
579
|
+
|
|
580
|
+ rho = r_tld.dot(r);
|
|
581
|
+ if (abs(rho) < eps) {
|
|
582
|
+ ivmap( phi, phi2, idx );
|
|
583
|
+ return (0);
|
|
584
|
+ }
|
|
585
|
+
|
|
586
|
+ if (iter > 0) {
|
|
587
|
+ beta = (rho/rho_1) * (alpha/omega);
|
|
588
|
+ p = r.array() + beta*(p.array()-omega*v.array()).array();
|
|
589
|
+ } else {
|
|
590
|
+ p = r;
|
|
591
|
+ }
|
|
592
|
+
|
|
593
|
+ tol2 = tol;
|
|
594
|
+
|
|
595
|
+ max_it2 = 500000;
|
|
596
|
+ //v = implicitDCInvBPhi2(D, C, ioms, solver, p, tol2, max_it2);
|
|
597
|
+ ivmap(phi, p, idx);
|
|
598
|
+ v = vmap(implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it2), idx);
|
|
599
|
+
|
|
600
|
+ alpha = rho / r_tld.dot(v);
|
|
601
|
+ s = r.array() - alpha*v.array();
|
|
602
|
+ errorn = s.norm()/bnrm2;
|
|
603
|
+
|
|
604
|
+ if (errorn < tol && iter > 1) {
|
|
605
|
+ phi2.array() += alpha*p.array();
|
|
606
|
+ ivmap( phi, phi2, idx );
|
|
607
|
+ return (0);
|
|
608
|
+ }
|
|
609
|
+
|
|
610
|
+ tol2 = tol;
|
|
611
|
+
|
|
612
|
+ max_it1 = 500000;
|
|
613
|
+ //t = implicitDCInvBPhi2(D, C, ioms, solver, s, tol2, max_it1);
|
|
614
|
+ //t = implicitDCInvBPhi3(D, solver, ioms, s, tol2, max_it1);
|
|
615
|
+ ivmap(phi, s, idx);
|
|
616
|
+ t = vmap(implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it1), idx);
|
|
617
|
+ omega = t.dot(s) / t.dot(t);
|
|
618
|
+
|
|
619
|
+ r = s.array() - omega*t.array();
|
|
620
|
+ errorn = r.norm() / bnrm2;
|
|
621
|
+ iter_done = iter;
|
|
622
|
+
|
|
623
|
+ if (errorn <= tol) {
|
|
624
|
+ ivmap( phi, phi2, idx );
|
|
625
|
+ return (0);
|
|
626
|
+ }
|
|
627
|
+
|
|
628
|
+ if (abs(omega) < eps) {
|
|
629
|
+ ivmap( phi, phi2, idx );
|
|
630
|
+ return (0);
|
|
631
|
+ }
|
|
632
|
+
|
|
633
|
+ rho_1 = rho;
|
|
634
|
+
|
|
635
|
+ logio << "iteration " << std::setw(3) << iter
|
|
636
|
+ << " errorn " << std::setw(6) << std::setprecision(4) << std::scientific << errorn
|
|
637
|
+ //<< "\timplicit iterations " << std::setw(5) << max_it1+max_it2
|
|
638
|
+ << " time " << std::setw(6) << std::fixed << std::setprecision(2) << timer.end() << std::endl;
|
|
639
|
+
|
|
640
|
+ // Check to see how progress is going
|
|
641
|
+
|
|
642
|
+ if (errornold - errorn < 0) {
|
|
643
|
+ logio << "Irregular non-monotonic (negative) convergence. Recommend restart. \n";
|
|
644
|
+ ivmap( phi, phi2, idx );
|
|
645
|
+ return (2);
|
|
646
|
+ }
|
|
647
|
+
|
|
648
|
+ /*
|
|
649
|
+ if (errornold - errorn < 1e-14) {
|
|
650
|
+ logio << "not making any progress. Giving up\n";
|
|
651
|
+ return (1);
|
|
652
|
+ }
|
|
653
|
+ */
|
|
654
|
+
|
|
655
|
+ //std::cout << "|| p-s ||" << (alpha*p - omega*s).norm() << std::endl;
|
|
656
|
+
|
|
657
|
+ // only update phi if good things are happening
|
|
658
|
+ phi2.array() += alpha*p.array() + omega*s.array();
|
|
659
|
+ errornold = errorn;
|
|
660
|
+
|
|
661
|
+ }
|
|
662
|
+ ivmap( phi, phi2, idx );
|
|
663
|
+ return (0);
|
|
664
|
+}
|
|
665
|
+
|
|
666
|
+// On Input
|
|
667
|
+// A = Matrix
|
|
668
|
+// B = Right hand side
|
|
669
|
+// X = initial guess, and solution
|
|
670
|
+// maxit = maximum Number of iterations
|
|
671
|
+// tol = error tolerance
|
|
672
|
+// On Output
|
|
673
|
+// X real solution vector
|
|
674
|
+// errorn = Real error norm
|
|
675
|
+template < typename Solver >
|
|
676
|
+int implicitbicgstab_ei(const SparseMat& D,
|
|
677
|
+ const Ref< VectorXcr const > ioms,
|
|
678
|
+ const Ref< VectorXcr const > rhs,
|
|
679
|
+ Ref <VectorXcr> phi,
|
|
680
|
+ Solver& solver,
|
|
681
|
+ int &max_it, const Real &tol, Real &errorn, int &iter_done, ofstream& logio) {
|
|
682
|
+
|
|
683
|
+ logio << "using the preconditioned Eigen implicit solver" << std::endl;
|
|
684
|
+
|
|
685
|
+ Complex omega, rho, rho_1, alpha, beta;
|
|
686
|
+ Real tol2;
|
|
687
|
+ int iter, max_it2,max_it1;
|
|
688
|
+
|
|
689
|
+ // Determine size of system and init vectors
|
|
690
|
+ int n = phi.size();
|
|
691
|
+ VectorXcr r(n);
|
|
692
|
+ VectorXcr r_tld(n);
|
|
693
|
+ VectorXcr p(n);
|
|
694
|
+ VectorXcr v(n);
|
|
695
|
+ VectorXcr s(n);
|
|
696
|
+ VectorXcr t(n);
|
|
697
|
+
|
|
698
|
+ // Initialise
|
|
699
|
+ iter_done = 0;
|
|
700
|
+ Real eps = 1e-100;
|
|
701
|
+
|
|
702
|
+ Real bnrm2 = rhs.norm();
|
|
703
|
+ if (bnrm2 == 0) {
|
|
704
|
+ phi.setConstant(0.0);
|
|
705
|
+ errorn = 0;
|
|
706
|
+ std::cerr << "Trivial case of Ax = b, where b is 0\n";
|
|
707
|
+ return (0);
|
|
708
|
+ }
|
|
709
|
+
|
|
710
|
+ // If there is an initial guess
|
|
711
|
+ if ( phi.norm() ) {
|
|
712
|
+ tol2 = tol;
|
|
713
|
+ max_it2 = 50000;
|
|
714
|
+ r = rhs - implicitDCInvBPhi3(D, solver, ioms, phi, tol2, max_it2);
|
|
715
|
+ } else {
|
|
716
|
+ r = rhs;
|
|
717
|
+ }
|
|
718
|
+
|
|
719
|
+ jsw_timer timer;
|
|
720
|
+
|
|
721
|
+ errorn = r.norm() / bnrm2;
|
|
722
|
+ omega = 1.;
|
|
723
|
+ r_tld = r;
|
|
724
|
+ Real errornold = 1e14;
|
|
725
|
+
|
|
726
|
+ // Get down to business
|
|
727
|
+ for (iter=0; iter<max_it; ++iter) {
|
|
728
|
+
|
|
729
|
+ timer.begin();
|
|
730
|
+
|
|
731
|
+ rho = r_tld.dot(r);
|
|
732
|
+ if (abs(rho) < eps) return (0);
|
|
733
|
+
|
|
734
|
+ if (iter > 0) {
|
|
735
|
+ beta = (rho/rho_1) * (alpha/omega);
|
|
736
|
+ p = r.array() + beta*(p.array()-omega*v.array()).array();
|
|
737
|
+ } else {
|
|
738
|
+ p = r;
|
|
739
|
+ }
|
|
740
|
+
|
|
741
|
+ tol2 = tol;
|
|
742
|
+ max_it2 = 500000;
|
|
743
|
+ v = implicitDCInvBPhi3(D, solver, ioms, p, tol2, max_it2);
|
|
744
|
+ max_it2 = 0; // solver.iterations();
|
|
745
|
+
|
|
746
|
+ alpha = rho / r_tld.dot(v);
|
|
747
|
+ s = r.array() - alpha*v.array();
|
|
748
|
+ errorn = s.norm()/bnrm2;
|
|
749
|
+
|
|
750
|
+ if (errorn < tol && iter > 1) {
|
|
751
|
+ phi.array() += alpha*p.array();
|
|
752
|
+ return (0);
|
|
753
|
+ }
|
|
754
|
+
|
|
755
|
+ tol2 = tol;
|
|
756
|
+ max_it1 = 500000;
|
|
757
|
+ t = implicitDCInvBPhi3(D, solver, ioms, s, tol2, max_it1);
|
|
758
|
+ max_it1 = 0; //solver.iterations();
|
|
759
|
+ omega = t.dot(s) / t.dot(t);
|
|
760
|
+
|
|
761
|
+ r = s.array() - omega*t.array();
|
|
762
|
+ errorn = r.norm() / bnrm2;
|
|
763
|
+ iter_done = iter;
|
|
764
|
+
|
|
765
|
+ if (errorn <= tol ) return (0);
|
|
766
|
+ if (abs(omega) < eps) return (0);
|
|
767
|
+ rho_1 = rho;
|
|
768
|
+
|
|
769
|
+ logio << "iteration " << std::setw(4) << iter
|
|
770
|
+ << "\terrorn " << std::setw(6) << std::setprecision(4) << std::scientific << errorn
|
|
771
|
+ << "\timplicit iterations " << std::setw(5) << max_it1+max_it2
|
|
772
|
+ << "\ttime " << std::setw(10) << std::fixed << std::setprecision(2) << timer.end() << std::endl;
|
|
773
|
+
|
|
774
|
+ // Check to see how progress is going
|
|
775
|
+ if (errornold - errorn < 0) {
|
|
776
|
+ logio << "irregular (negative) convergence. Try again? \n";
|
|
777
|
+ return (2);
|
|
778
|
+ }
|
|
779
|
+
|
|
780
|
+ // only update phi if good things are happening
|
|
781
|
+ phi.array() += alpha*p.array() + omega*s.array();
|
|
782
|
+ errornold = errorn;
|
|
783
|
+
|
|
784
|
+ }
|
|
785
|
+ return (0);
|
|
786
|
+}
|
|
787
|
+
|
|
788
|
+
|
|
789
|
+// On Input
|
|
790
|
+// A = Matrix
|
|
791
|
+// B = Right hand side
|
|
792
|
+// X = initial guess, and solution
|
|
793
|
+// maxit = maximum Number of iterations
|
|
794
|
+// tol = error tolerance
|
|
795
|
+// On Output
|
|
796
|
+// X real solution vector
|
|
797
|
+// errorn = Real error norm
|
|
798
|
+int implicitbicgstab(const SparseMat& D,
|
|
799
|
+ const SparseMat& C,
|
|
800
|
+ const VectorXcr& ioms,
|
|
801
|
+ const SparseMat& MC,
|
|
802
|
+ Eigen::Ref< VectorXcr > rhs,
|
|
803
|
+ VectorXcr& phi,
|
|
804
|
+ int &max_it, Real &tol, Real &errorn, int &iter_done) {
|
|
805
|
+
|
|
806
|
+ Complex omega, rho, rho_1, alpha, beta;
|
|
807
|
+ Real errmin, tol2;
|
|
808
|
+ int iter, max_it2;
|
|
809
|
+
|
|
810
|
+// // Cholesky decomp
|
|
811
|
+// SparseLLT<SparseMatrix<Complex>, Cholmod>
|
|
812
|
+// CholC(SparseMatrix<Complex> (C.real()) );
|
|
813
|
+// if(!CholC.succeeded()) {
|
|
814
|
+// std::cerr << "decomposiiton failed\n";
|
|
815
|
+// return EXIT_FAILURE;
|
|
816
|
+// }
|
|
817
|
+
|
|
818
|
+ // Determine size of system and init vectors
|
|
819
|
+ int n = phi.size();
|
|
820
|
+ VectorXcr r(n);
|
|
821
|
+ VectorXcr r_tld(n);
|
|
822
|
+ VectorXcr p(n);
|
|
823
|
+ VectorXcr v(n);
|
|
824
|
+ //VectorXcr p_hat(n);
|
|
825
|
+ VectorXcr s(n);
|
|
826
|
+ //VectorXcr s_hat(n);
|
|
827
|
+ VectorXcr t(n);
|
|
828
|
+ VectorXcr xmin(n);
|
|
829
|
+
|
|
830
|
+// TODO, refigure for implicit large system
|
|
831
|
+// std::cout << "Start BiCGStab, memory needed: "
|
|
832
|
+// << (sizeof(Complex)*(9+2)*n/(1024.*1024*1024)) << " [Gb]\n";
|
|
833
|
+
|
|
834
|
+ // Initialise
|
|
835
|
+ iter_done = 0;
|
|
836
|
+ v.setConstant(0.); // not necessary I don't think
|
|
837
|
+ t.setConstant(0.);
|
|
838
|
+ Real eps = 1e-100;
|
|
839
|
+
|
|
840
|
+ Real bnrm2 = rhs.norm();
|
|
841
|
+ if (bnrm2 == 0) {
|
|
842
|
+ phi.setConstant(0.0);
|
|
843
|
+ errorn = 0;
|
|
844
|
+ std::cerr << "Trivial case of Ax = b, where b is 0\n";
|
|
845
|
+ return (0);
|
|
846
|
+ }
|
|
847
|
+
|
|
848
|
+ // If there is an initial guess
|
|
849
|
+ if ( phi.norm() ) {
|
|
850
|
+ //r = rhs - A*phi;
|
|
851
|
+ tol2 = tol;
|
|
852
|
+ max_it2 = 50000;
|
|
853
|
+ std::cout << "Initial guess " << std::endl;
|
|
854
|
+ r = rhs - implicitDCInvBPhi(D, C, ioms, MC, phi, tol2, max_it2);
|
|
855
|
+ //r = rhs - implicitDCInvBPhi (D, C, B, CholC, phi, tol2, max_it2);
|
|
856
|
+ } else {
|
|
857
|
+ r = rhs;
|
|
858
|
+ }
|
|
859
|
+
|
|
860
|
+
|
|
861
|
+ errorn = r.norm() / bnrm2;
|
|
862
|
+ //std::cout << "Initial |r| " << r.norm() << "\t" << errorn<< std::endl;
|
|
863
|
+ omega = 1.;
|
|
864
|
+ r_tld = r;
|
|
865
|
+ errmin = 1e30;
|
|
866
|
+ Real errornold = 1e6;
|
|
867
|
+ // Get down to business
|
|
868
|
+ for (iter=0; iter<max_it; ++iter) {
|
|
869
|
+
|
|
870
|
+ rho = r_tld.dot(r);
|
|
871
|
+ if (abs(rho) < eps) return (0);
|
|
872
|
+
|
|
873
|
+ if (iter > 0) {
|
|
874
|
+ beta = (rho/rho_1) * (alpha/omega);
|
|
875
|
+ p = r.array() + beta*(p.array()-omega*v.array()).array();
|
|
876
|
+ } else {
|
|
877
|
+ p = r;
|
|
878
|
+ }
|
|
879
|
+
|
|
880
|
+ // Use pseudo inverse to get approximate answer
|
|
881
|
+ //p_hat = p;
|
|
882
|
+ tol2 = std::max(1e-4*errorn, tol);
|
|
883
|
+ tol2 = tol;
|
|
884
|
+ max_it2 = 500000;
|
|
885
|
+ //v = A*p_hat;
|
|
886
|
+ v = implicitDCInvBPhi(D, C, ioms, MC, p, tol2, max_it2);
|
|
887
|
+ //v = implicitDCInvBPhi(D, C, B, CholC, p, tol2, max_it2);
|
|
888
|
+
|
|
889
|
+ alpha = rho / r_tld.dot(v);
|
|
890
|
+ s = r.array() - alpha*v.array();
|
|
891
|
+ errorn = s.norm()/bnrm2;
|
|
892
|
+
|
|
893
|
+ if (errorn < tol && iter > 1) {
|
|
894
|
+ phi.array() += alpha*p.array();
|
|
895
|
+ return (0);
|
|
896
|
+ }
|
|
897
|
+
|
|
898
|
+ // s_hat = M*s;
|
|
899
|
+ //tol2 = tol;
|
|
900
|
+ tol2 = std::max(1e-4*errorn, tol);
|
|
901
|
+ tol2 = tol;
|
|
902
|
+ max_it2 = 50000;
|
|
903
|
+ // t = A*s_hat;
|
|
904
|
+ t = implicitDCInvBPhi(D, C, ioms, MC, s, tol2, max_it2);
|
|
905
|
+ //t = implicitDCInvBPhi(D, C, B, CholC, s, tol2, max_it2);
|
|
906
|
+ omega = t.dot(s) / t.dot(t);
|
|
907
|
+ phi.array() += alpha*p.array() + omega*s.array();
|
|
908
|
+ r = s.array() - omega*t.array();
|
|
909
|
+ errorn = r.norm() / bnrm2;
|
|
910
|
+ iter_done = iter;
|
|
911
|
+ if (errorn < errmin) {
|
|
912
|
+ // remember the model with the smallest norm
|
|
913
|
+ errmin = errorn;
|
|
914
|
+ xmin = phi;
|
|
915
|
+ }
|
|
916
|
+
|
|
917
|
+ if (errorn <= tol ) return (0);
|
|
918
|
+ if (abs(omega) < eps) return (0);
|
|
919
|
+ rho_1 = rho;
|
|
920
|
+
|
|
921
|
+ std::cout << "iteration " << std::setw(4) << iter << "\terrorn " << std::setw(6) << std::scientific << errorn
|
|
922
|
+ << "\timplicit iterations " << std::setw(5) << max_it2 << std::endl;
|
|
923
|
+ if (errornold - errorn < 1e-14) {
|
|
924
|
+ std::cout << "not making any progress. Giving up\n";
|
|
925
|
+ return (2);
|
|
926
|
+ }
|
|
927
|
+ errornold = errorn;
|
|
928
|
+
|
|
929
|
+ }
|
|
930
|
+ return (0);
|
|
931
|
+}
|
|
932
|
+
|
|
933
|
+
|
|
934
|
+//int bicgstab(const SparseMat &A, Eigen::SparseLU< Eigen::SparseMatrix<Complex, RowMajor> ,
|
|
935
|
+
|
|
936
|
+
|