Use LOBPCG with Epetra and Ifpack preconditioner.This example computes the eigenvalues of largest magnitude of an generalized eigenvalue problem, using Anasazi's implementation of the LOBPCG method, with Epetra linear algebra. It preconditions LOBPCG with an Ifpack incomplete Cholesky preconditioner.
#include "Epetra_CrsMatrix.h"
#include "Epetra_InvOperator.h"
#include "Ifpack.h"
#ifdef HAVE_MPI
#include "Epetra_MpiComm.h"
#include <mpi.h>
#else
#include "Epetra_SerialComm.h"
#endif
#include "Epetra_Map.h"
#include "ModeLaplace2DQ2.h"
int
main (int argc, char *argv[])
{
using std::endl;
#ifdef HAVE_MPI
MPI_Init (&argc, &argv);
#endif
#ifdef HAVE_MPI
Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif // HAVE_MPI
int nev = 10;
int blockSize = 5;
int maxIters = 500;
double tol = 1.0e-8;
int numElements = 10;
bool verbose = true;
std::string which ("SM");
bool usePrec = true;
double prec_dropTol = 1e-4;
int prec_lofill = 0;
cmdp.
setOption(
"nev",&nev,
"Number of eigenpairs to compted.");
cmdp.
setOption(
"maxIters",&maxIters,
"Maximum number of iterations.");
cmdp.
setOption(
"blockSize",&blockSize,
"Block size.");
cmdp.
setOption(
"tol",&tol,
"Relative convergence tolerance.");
cmdp.
setOption(
"numElements",&numElements,
"Number of elements in the discretization.");
cmdp.
setOption(
"verbose",
"quiet",&verbose,
"Print messages and results.");
cmdp.
setOption(
"sort",&which,
"Targetted eigenvalues (SM or LM).");
cmdp.
setOption(
"usePrec",
"noPrec",&usePrec,
"Use Ifpack for preconditioning.");
cmdp.
setOption(
"prec_dropTol",&prec_dropTol,
"Preconditioner: drop tolerance.");
cmdp.
setOption(
"prec_lofill",&prec_lofill,
"Preconditioner: level of fill.");
#ifdef HAVE_MPI
MPI_Finalize ();
#endif // HAVE_MPI
return -1;
}
if (verbose) {
}
printer.
stream(
Errors) << Anasazi_Version() << endl << endl;
typedef Epetra_MultiVector MV;
typedef Epetra_Operator OP;
printer.
stream(
Errors) <<
"Generating problem matrices..." << std::flush;
const int space_dim = 2;
std::vector<double> brick_dim (space_dim);
brick_dim[0] = 1.0;
brick_dim[1] = 1.0;
std::vector<int> elements (space_dim);
elements[0] = numElements;
elements[1] = numElements;
RCP<ModalProblem> testCase =
rcp(
new ModeLaplace2DQ2(Comm, brick_dim[0], elements[0], brick_dim[1], elements[1]) );
RCP<Epetra_CrsMatrix> K =
rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()),
false );
RCP<Epetra_CrsMatrix> M =
rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()),
false );
RCP<Ifpack_Preconditioner> prec;
RCP<Epetra_Operator> PrecOp;
if (usePrec) {
printer.
stream(
Errors) <<
"Constructing Incomplete Cholesky preconditioner..." << std::flush;
Ifpack precFactory;
std::string precType = "IC stand-alone";
int overlapLevel = 0;
prec =
rcp (precFactory.Create (precType, K.get (), overlapLevel));
precParams.
set(
"fact: drop tolerance",prec_dropTol);
precParams.
set(
"fact: level-of-fill",prec_lofill);
IFPACK_CHK_ERR(prec->SetParameters(precParams));
IFPACK_CHK_ERR(prec->Initialize());
IFPACK_CHK_ERR(prec->Compute());
PrecOp =
rcp (
new Epetra_InvOperator (&*prec));
}
RCP<Epetra_MultiVector> ivec
=
rcp (
new Epetra_MultiVector (K->OperatorDomainMap (), blockSize));
ivec->Random ();
RCP<BasicEigenproblem<double, MV, OP> > MyProblem =
MyProblem->setHermitian (true);
if (usePrec) {
MyProblem->setPrec (PrecOp);
}
MyProblem->setNEV (nev);
const bool success = MyProblem->setProblem ();
if (! success) {
printer.
print (
Errors,
"Anasazi::BasicEigenproblem::setProblem() reported an error.\n");
#ifdef HAVE_MPI
MPI_Finalize ();
#endif // HAVE_MPI
return -1;
}
MyPL.
set(
"Verbosity", verbosity );
MyPL.
set(
"Which", which );
MyPL.
set(
"Block Size", blockSize );
MyPL.
set(
"Maximum Iterations", maxIters );
MyPL.
set(
"Convergence Tolerance", tol );
MyPL.
set(
"Full Ortho",
true );
MyPL.
set(
"Use Locking",
true );
printer.
stream(
Errors) <<
"Solving eigenvalue problem..." << endl;
if (usePrec) {
}
std::vector<Value<double> > evals = sol.
Evals;
RCP<MV> evecs = sol.
Evecs;
std::vector<double> normR(sol.
numVecs);
Epetra_MultiVector Kvec( K->OperatorDomainMap(), evecs->NumVectors() );
Epetra_MultiVector Mvec( M->OperatorDomainMap(), evecs->NumVectors() );
for (
int i=0; i<sol.
numVecs; i++) {
T(i,i) = evals[i].realpart;
}
K->Apply( *evecs, Kvec );
M->Apply( *evecs, Mvec );
MVT::MvTimesMatAddMv( -1.0, Mvec, T, 1.0, Kvec );
MVT::MvNorm( Kvec, normR );
}
std::ostringstream os;
os.setf (std::ios_base::right, std::ios_base::adjustfield);
os << "Solver manager returned "
<< (returnCode ==
Converged ?
"converged." :
"unconverged.") << endl;
os << endl;
os << "------------------------------------------------------" << endl;
os << std::setw(16) << "Eigenvalue"
<< std::setw(18) << "Direct Residual"
<< endl;
os << "------------------------------------------------------" << endl;
for (
int i=0; i<sol.
numVecs; ++i) {
os << std::setw(16) << evals[i].realpart
<< std::setw(18) << normR[i]/evals[i].realpart
<< endl;
}
os << "------------------------------------------------------" << endl;
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return 0;
}