1
# -*- coding: utf-8 -*-
4
@license: Copyright (C) 2005
5
Author: Åsmund Ødegård, Ola Skavhaug, Gunnar Staff
7
Simula Research Laboratory AS
9
This file is part of PyCC.
11
PyCC free software; you can redistribute it and/or modify
12
it under the terms of the GNU General Public License as published by
13
the Free Software Foundation; either version 2 of the License, or
14
(at your option) any later version.
16
PyCC is distributed in the hope that it will be useful,
17
but WITHOUT ANY WARRANTY; without even the implied warranty of
18
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
GNU General Public License for more details.
21
You should have received a copy of the GNU General Public License
22
along with PyFDM; if not, write to the Free Software
23
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26
ConjGrad: A generic implementation of the (preconditioned) conjugate gradient method.
30
Solve Ax = b with the Krylov methods.
32
A: Matrix or discrete operator. Must support A*x to operate on x.
34
x: Anything that A can operate on, as well as support inner product
35
and multiplication/addition/subtraction with scalars and vectors. Also
36
incremental operators like __imul__ and __iadd__ might be needed
38
b: Right-hand side, probably the same type as x.
43
#from debug import debug
45
def debug(string, level):
52
#debug("Deprecation warning: You have imported Krylov.py. Use Solvers.py instead",level=0)
55
"""Compute innerproduct of u and v.
56
It is not computed here, we just check type of operands and
57
call a suitable innerproduct method"""
59
if isinstance(u,_n.ndarray):
60
# assume both are numarrays:
63
# assume that left operand implements inner
66
"""Conjugate Gradient Methods"""
68
def conjgrad(A, x, b, tolerance=1.0E-05, relativeconv=False, maxit=500):
70
conjgrad(A,x,b): Solve Ax = b with the conjugate gradient method.
72
@param A: See module documentation
73
@param x: See module documentation
74
@param b: See module documentation
76
@param tolerance: Convergence criterion
77
@type tolerance: float
79
@param relativeconv: Control relative vs. absolute convergence criterion.
80
That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as
81
convergence criterion, else just ||r||_2 is used. Actually ||r||_2^2 is
82
used, since that save a few cycles.
84
@type relativeconv: bool
86
@return: the solution x.
94
tolerance *= sqrt(inner(b,b))
96
while sqrt(r0) > tolerance and iter <= maxit:
105
print "rho ", sqrt(r0)
107
#debug("CG finished, iter: %d, ||e||=%e" % (iter,sqrt(r0)))
110
def precondconjgrad(B, A, x, b, tolerance=1.0E-05, relativeconv=False, maxiter=500):
111
r"""Preconditioned Conjugate Gradients method. Algorithm described here;
112
http://www.math-linux.com/spip.php?article55"""
119
if relativeconv: tolerance *= sqrt(rz)
121
while sqrt(rz) > tolerance and iter <= maxiter:
123
alpha = rz / inner(d,z)
129
print "rho ", sqrt(rz)
137
def old_precondconjgrad(B, A, x, b, tolerance=1.0E-05, relativeconv=False, maxiter=500):
139
conjgrad(B,A,x,b): Solve Ax = b with the preconditioned conjugate gradient method.
141
@param B: Preconditioner supporting the __mul__ operator for operating on
144
@param A: Matrix or discrete operator. Must support A*x to operate on x.
146
@param x: Anything that A can operate on, as well as support inner product
147
and multiplication/addition/subtraction with scalar and something of the
150
@param b: Right-hand side, probably the same type as x.
152
@param tolerance: Convergence criterion
153
@type tolerance: float
155
@param relativeconv: Control relative vs. absolute convergence criterion.
156
That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as
157
convergence criterion, else just ||r||_2 is used. Actually ||r||_2^2 is
158
used, since that save a few cycles.
160
@type relativeconv: bool
162
@return: the solution x.
171
rho = rho0 = inner(s, r)
173
tolerance *= sqrt(inner(B*b,b))
175
while sqrt(rho0) > tolerance and iter <= maxiter:
188
debug("PCG finished, iter: %d, ||rho||=%e" % (iter,sqrt(rho0)))
192
"""Bi-conjugate gradients method"""
194
def BiCGStab(A, x, b, tolerance=1.0E-05, relativeconv=False, maxiter=1000, info=False):
196
BiCGStab(A,x,b): Solve Ax = b with the Biconjugate gradient stabilized
199
@param A: Matrix or discrete operator. Must support A*x to operate on x.
201
@param x: Anything that A can operate on, as well as support inner product
202
and multiplication/addition/subtraction with scalar and something of the
205
@param b: Right-hand side, probably the same type as x.
207
@param tolerance: Convergence criterion
208
@type tolerance: float
210
@param relativeconv: Control relative vs. absolute convergence criterion.
211
That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as
212
convergence criterion, else just ||r||_2 is used. Actually ||r||_2^2 is
213
used, since that save a few cycles.
215
@type relativeconv: bool
217
@return: the solution x.
226
tolerance *= sqrt(inner(b,b))
228
debug("r0=" + str(sqrt(inner(r,r))), 0)
229
while sqrt(inner(r,r)) > tolerance and iter < maxiter:
231
alpha = rr/inner(rs,Ap)
232
print "alpha ", alpha
235
w = inner(As,s)/inner(As,As)
240
beta = (rrn/rr)*(alpha/w)
242
debug("BiCGStab breakdown, beta=0, at iter=" + str(iter) + " with residual=" + str(sqrt(inner(r,r))), 0)
243
# return (x,iter,sqrt(inner(r,r)))
248
#debug("r=",sqrt(inner(r,r)))
249
debug("BiCGStab finished, iter: " + str(iter) + "|r|= " + str(sqrt(inner(r,r))), 0)
258
def precondBiCGStab(B, A, x, b, tolerance=1.0E-05, relativeconv=False, maxit=200):
260
precondBiCGStab(B,A,x,b): Solve Ax = b with the preconditioned biconjugate
261
gradient stabilized method.
263
@param B: Preconditioner supporting the __mul__ operator for operating on
266
@param A: Matrix or discrete operator. Must support A*x to operate on x.
268
@param x: Anything that A can operate on, as well as support inner product
269
and multiplication/addition/subtraction with scalar and something of the
272
@param b: Right-hand side, probably the same type as x.
274
@param tolerance: Convergence criterion
275
@type tolerance: float
277
@param relativeconv: Control relative vs. absolute convergence criterion.
278
That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as
279
convergence criterion, else just ||r||_2 is used. Actually ||r||_2^2 is
280
used, since that save a few cycles.
282
@type relativeconv: bool
284
@return: the solution x.
291
debug("b0=%e"%sqrt(inner(b,b)),1)
299
tolerance *= sqrt(inner(B*r,r))
302
debug("r0=%e"%sqrt(inner(r,r)),1)
303
while sqrt(inner(r,r)) > tolerance and iter <= maxit:
304
#debug("iter, sqrt(inner(r,r))", iter, sqrt(inner(r,r)))
307
alpha = rr/inner(rs,Ap)
311
w = inner(As,s)/inner(As,As)
315
beta = (rrn/rr)*(alpha/w)
317
debug("BiCGStab breakdown, beta=0, at iter=" + str(iter)+" with residual=" + str(sqrt(inner(r,r))), 0)
318
# return (x,iter,sqrt(inner(r,r)))
320
debug("|r|_%d = %e " %(iter,sqrt(inner(r,r))), 1)
325
debug("precondBiCGStab finished, iter: %d, ||e||=%e" % (iter,sqrt(inner(r,r))),1)
326
return (x,iter,sqrt(inner(r,r)))
329
def precondLBiCGStab(B, A, x, b, tolerance=1.0E-05, relativeconv=False):
331
precondBiCGStab(B,A,x,b): Solve Ax = b with the preconditioned biconjugate
332
gradient stabilized method.
334
@param B: Preconditioner supporting the __mul__ operator for operating on
337
@param A: Matrix or discrete operator. Must support A*x to operate on x.
339
@param x: Anything that A can operate on, as well as support inner product
340
and multiplication/addition/subtraction with scalar and something of the
343
@param b: Right-hand side, probably the same type as x.
345
@param tolerance: Convergence criterion
346
@type tolerance: float
348
@param relativeconv: Control relative vs. absolute convergence criterion.
349
That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as
350
convergence criterion, else just ||r||_2 is used. Actually ||r||_2^2 is
351
used, since that save a few cycles.
353
@type relativeconv: bool
355
@return: the solution x.
365
tolerance *= sqrt(inner(b,b))
367
while sqrt(inner(r,r)) > tolerance:
370
alpha = rr/inner(rs,BAp)
374
w = inner(BAs,s)/inner(BAs,BAs)
378
beta = (rrn/rr)*(alpha/w)
382
debug("precondBiCGStab finished, iter: %d, ||e||=%e" % (iter,sqrt(inner(r,r))))
386
"""Conjugate Gradients Method for the normal equations"""
388
def CGN_AA(A, x, b, tolerance=1.0E-05, relativeconv=False):
390
CGN_AA(B,A,x,b): Solve Ax = b with the conjugate
391
gradient method applied to the normal equation.
394
@param A: Matrix or discrete operator. Must support A*x to operate on x.
396
@param x: Anything that A can operate on, as well as support inner product
397
and multiplication/addition/subtraction with scalar and something of the
400
@param b: Right-hand side, probably the same type as x.
402
@param tolerance: Convergence criterion
403
@type tolerance: float
405
@param relativeconv: Control relative vs. absolute convergence criterion.
406
That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as
407
convergence criterion, else just ||r||_2 is used. Actually ||r||_2^2 is
408
used, since that save a few cycles.
410
@type relativeconv: bool
412
@return: the solution x.
424
bnrm2=sqrt(inner(b,b))
431
# Used to compute an estimate of the condition number
435
error = sqrt(rho)/bnrm2
436
debug("error0="+str(error), 0)
438
while error>=tolerance:
441
alpha = rho/inner(p,p_aa)
447
error = sqrt(rho)/bnrm2
448
debug("error = " + str(error), 0)
450
alpha_v.append(alpha)
458
debug("CGN_ABBA finished, iter: %d, ||e||=%e" % (iter,error))
459
return (x,iter,alpha_v,beta_v)
461
def CGN_ABBA(B, A, x, b, tolerance=1.0E-05, relativeconv=False):
463
CGN_ABBA(B,A,x,b): Solve Ax = b with the preconditioned conjugate
464
gradient method applied to the normal equation.
466
@param B: Preconditioner supporting the __mul__ operator for operating on
469
@param A: Matrix or discrete operator. Must support A*x to operate on x.
471
@param x: Anything that A can operate on, as well as support inner product
472
and multiplication/addition/subtraction with scalar and something of the
475
@param b: Right-hand side, probably the same type as x.
477
@param tolerance: Convergence criterion
478
@type tolerance: float
480
@param relativeconv: Control relative vs. absolute convergence criterion.
481
That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as
482
convergence criterion, else just ||r||_2 is used. Actually ||r||_2^2 is
483
used, since that save a few cycles.
485
@type relativeconv: bool
487
@return: the solution x.
501
bnrm2 = sqrt(inner(b,b))
503
r_bb = B*r_b #Should be transposed
505
A.data[0][0].prod(r_bb[0],r_abb[0])
506
A.prod(r_bb,r_abb,True)
508
rho=inner(r_abb,r_abb)
512
# Used to compute an estimate of the condition number
516
error = sqrt(rho)/bnrm2
518
while error>=tolerance:
523
alpha = rho/inner(p,p_abba)
528
A.prod(r_bb,r_abb,True)
529
rho = inner(r_abb,r_abb)
531
error = sqrt(rho)/bnrm2
534
alpha_v.append(alpha)
542
debug("CGN_ABBA finished, iter: %d, ||e||=%e" % (iter,error))
543
return (x,iter,alpha_v,beta_v)
548
def CGN_BABA(B, A, x, b, tolerance=1.0E-05, relativeconv=False):
550
CGN_BABA(B,A,x,b): Solve Ax = b with the preconditioned conjugate
551
gradient method applied to the normal equation.
553
@param B: Preconditioner supporting the __mul__ operator for operating on
556
@param A: Matrix or discrete operator. Must support A*x to operate on x.
558
@param x: Anything that A can operate on, as well as support inner product
559
and multiplication/addition/subtraction with scalar and something of the
562
@param b: Right-hand side, probably the same type as x.
564
@param tolerance: Convergence criterion
565
@type tolerance: float
567
@param relativeconv: Control relative vs. absolute convergence criterion.
568
That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as
569
convergence criterion, else just ||r||_2 is used. Actually ||r||_2^2 is
570
used, since that save a few cycles.
572
@type relativeconv: bool
574
@return: the solution x.
578
# Is this correct?? Should we not transpose the preconditoner somewhere??
587
bnrm2 = sqrt(inner(b,b))
589
A.prod(r_b,r_ab,True)
592
rho = inner(r_bab,r_ab)
596
# Used to compute an estimate of the condition number
600
error = sqrt(rho)/bnrm2
602
while error>=tolerance:
605
A.prod(p_ba,p_aba,True)
607
alpha = rho/inner(p,p_aba)
611
A.prod(r_b,r_ab,True)
613
rho = inner(r_bab,r_ab)
615
error = sqrt(rho)/bnrm2
617
alpha_v.append(alpha)
625
debug("CGN_BABA finished, iter: %d, ||e||=%e" % (iter,error))
626
return (x,iter,alpha_v,beta_v)
630
def precondRconjgrad(B, A, x, b, tolerance=1.0E-05, relativeconv=False):
632
conjgrad(B,A,x,b): Solve Ax = b with the right preconditioned conjugate gradient method.
634
@param B: Preconditioner supporting the __mul__ operator for operating on
637
@param A: Matrix or discrete operator. Must support A*x to operate on x.
639
@param x: Anything that A can operate on, as well as support inner product
640
and multiplication/addition/subtraction with scalar and something of the
643
@param b: Right-hand side, probably the same type as x.
645
@param tolerance: Convergence criterion
646
@type tolerance: float
648
@param relativeconv: Control relative vs. absolute convergence criterion.
649
That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as
650
convergence criterion, else just ||r||_2 is used. Actually ||r||_2^2 is
651
used, since that save a few cycles.
653
@type relativeconv: bool
655
@return: the solution x.
663
rho = rho0 = inner(z, r)
665
tolerance *= sqrt(inner(B*b,b))
670
while sqrt(fabs(rho0)) > tolerance:
672
alpha = rho0/inner(Aq,q)
681
alpha_v.append(alpha)
684
return (x,iter,alpha_v,beta_v)
687
def Richardson(A, x, b, tau=1, tolerance=1.0E-05, relativeconv=False, maxiter=1000, info=False):
689
print "b ", b.inner(b)
691
print "x ", x.inner(x)
695
print "r ", r.inner(r)
696
rho = rho0 = inner(r, r)
698
tolerance *= sqrt(inner(b,b))
699
print "tolerance ", tolerance
701
while sqrt(rho) > tolerance and iter <= maxiter:
705
print "iteration ", iter, " rho= ", sqrt(rho)
710
def precRichardson(B, A, x, b, tau=1, tolerance=1.0E-05, relativeconv=False, maxiter=1000, info=False):
712
print "b ", b.inner(b)
713
print "x ", x.inner(x)
716
print "r ", r.inner(r)
718
rho = rho0 = inner(r, r)
720
tolerance *= sqrt(inner(b,b))
721
print "tolerance ", tolerance
723
while sqrt(rho) > tolerance and iter <= maxiter:
728
print "iteration ", iter, " rho= ", sqrt(rho)