diff --git a/dumux/linear/istlsolverfactorybackend.hh b/dumux/linear/istlsolverfactorybackend.hh index bcb18ace81c351aab665a4c3edaaab7d7c958b06..700b11ff7e7559283ba0f5afa6d80f41be02101a 100644 --- a/dumux/linear/istlsolverfactorybackend.hh +++ b/dumux/linear/istlsolverfactorybackend.hh @@ -37,6 +37,7 @@ #include <dumux/linear/parallelhelpers.hh> #include <dumux/linear/istlsolverregistry.hh> #include <dumux/linear/solvercategory.hh> +#include <dumux/linear/istlsolversmultitype.hh> namespace Dumux { @@ -69,11 +70,10 @@ int initSolverFactoriesForMultiTypeBlockMatrix() return Dune::addRegistryToFactory<TLS>(isfac, Dune::IterativeSolverTag{}); #else using OpTraits = Dune::OperatorTraits<LinearOperator>; - auto& sfac = Dune::SolverFactory<LinearOperator>::instance(); auto& pfac = Dune::PreconditionerFactory<LinearOperator>::instance(); - Dune::addRegistryToFactory<OpTraits>(sfac, Dumux::MultiTypeBlockMatrixDirectSolverTag{}); Dune::addRegistryToFactory<OpTraits>(pfac, Dumux::MultiTypeBlockMatrixPreconditionerTag{}); - return Dune::addRegistryToFactory<OpTraits>(sfac, Dune::SolverTag{}); + auto& sfac = Dune::SolverFactory<LinearOperator>::instance(); + return Dune::addRegistryToFactory<OpTraits>(sfac, Dumux::MultiTypeBlockMatrixSolverTag{}); #endif } } // end namespace diff --git a/dumux/linear/istlsolverregistry.hh b/dumux/linear/istlsolverregistry.hh index 62043b7fefdf8c047256e9c6c7c3100b69fbdb4e..a23dce8fbf83f4fc9ca631299e1ee4d55ea4d408 100644 --- a/dumux/linear/istlsolverregistry.hh +++ b/dumux/linear/istlsolverregistry.hh @@ -34,10 +34,30 @@ DUNE_REGISTRY_PUT(tag, name, __VA_ARGS__); \ } namespace Dumux { \ static_assert(true, "Require semicolon after macro call") +/*! + * \brief Register a solver from the Dumux namespace + * + * Use this macro in namespace Dumux. + * Example: + * DUMUX_REGISTER_SOLVER("mysolver", Dumux::MultiTypeBlockMatrixSolverTag, Dune::defaultIterativeSolverCreator<Dumux::MySolver>()); + * Explicitly specifying the namespaces is required. + * Set parameter Solver.Type to "mysolver" to use it through the factory. + * + * In the macro implementation, the final static_assert forces implementers + * to put a semicolon after every DUMUX_REGISTER_SOLVER macro call (cf. example) + * and avoids a compiler warning for an empty line semicolon at the same time + */ +#define DUMUX_REGISTER_SOLVER(name, tag, ...) \ +} namespace Dune { \ +DUNE_REGISTRY_PUT(tag, name, __VA_ARGS__); \ +} namespace Dumux { \ +static_assert(true, "Require semicolon after macro call") + namespace Dumux { namespace { struct MultiTypeBlockMatrixPreconditionerTag {}; struct MultiTypeBlockMatrixDirectSolverTag {}; +struct MultiTypeBlockMatrixSolverTag {}; } // end namespace } // end namespace Dumux diff --git a/dumux/linear/istlsolversmultitype.hh b/dumux/linear/istlsolversmultitype.hh new file mode 100644 index 0000000000000000000000000000000000000000..3996b7a8c3ce99260f063c907f4de3d175ca6d93 --- /dev/null +++ b/dumux/linear/istlsolversmultitype.hh @@ -0,0 +1,93 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +// +// SPDX-FileCopyrightInfo: Copyright © DuMux Project contributors, see AUTHORS.md in root folder +// SPDX-License-Identifier: GPL-3.0-or-later +// +/*! + * \file + * \ingroup Linear + * \brief The specialized Dumux tag for the ISTL registry including only ISTL solvers compatible + * with MultiTypeBlockMatrix + */ +#ifndef DUMUX_LINEAR_ISTL_SOLVERS_MULTITYPE_HH +#define DUMUX_LINEAR_ISTL_SOLVERS_MULTITYPE_HH + +#include <dune/common/version.hh> + +#include <dune/istl/solvers.hh> +#include <dune/istl/cholmod.hh> +#include <dune/istl/umfpack.hh> + +#include <dumux/linear/istlsolverregistry.hh> + +namespace Dumux { + +DUMUX_REGISTER_SOLVER("loopsolver", Dumux::MultiTypeBlockMatrixSolverTag, defaultIterativeSolverCreator<Dune::LoopSolver>()); +DUMUX_REGISTER_SOLVER("gradientsolver", Dumux::MultiTypeBlockMatrixSolverTag, defaultIterativeSolverCreator<Dune::GradientSolver>()); +DUMUX_REGISTER_SOLVER("cgsolver", Dumux::MultiTypeBlockMatrixSolverTag, defaultIterativeSolverCreator<Dune::CGSolver>()); +DUMUX_REGISTER_SOLVER("bicgstabsolver", Dumux::MultiTypeBlockMatrixSolverTag, defaultIterativeSolverCreator<Dune::BiCGSTABSolver>()); +DUMUX_REGISTER_SOLVER("minressolver", Dumux::MultiTypeBlockMatrixSolverTag, defaultIterativeSolverCreator<Dune::MINRESSolver>()); +DUMUX_REGISTER_SOLVER("restartedgmressolver", Dumux::MultiTypeBlockMatrixSolverTag, defaultIterativeSolverCreator<Dune::RestartedGMResSolver>()); +DUMUX_REGISTER_SOLVER("restartedflexiblegmressolver", Dumux::MultiTypeBlockMatrixSolverTag, defaultIterativeSolverCreator<Dune::RestartedFlexibleGMResSolver>()); +DUMUX_REGISTER_SOLVER("generalizedpcgsolver", Dumux::MultiTypeBlockMatrixSolverTag, defaultIterativeSolverCreator<Dune::GeneralizedPCGSolver>()); +DUMUX_REGISTER_SOLVER("restartedfcgsolver", Dumux::MultiTypeBlockMatrixSolverTag, defaultIterativeSolverCreator<Dune::RestartedFCGSolver>()); +DUMUX_REGISTER_SOLVER("completefcgsolver", Dumux::MultiTypeBlockMatrixSolverTag, defaultIterativeSolverCreator<Dune::CompleteFCGSolver>()); +#if HAVE_SUITESPARSE_CHOLMOD +DUMUX_REGISTER_SOLVER("cholmod", Dumux::MultiTypeBlockMatrixSolverTag, + [](auto opTraits, const auto& op, const Dune::ParameterTree& config) + -> std::shared_ptr<typename decltype(opTraits)::solver_type> + { + using OpTraits = decltype(opTraits); + using M = typename OpTraits::matrix_type; + using D = typename OpTraits::domain_type; + // works only for sequential operators + if constexpr (OpTraits::isParallel){ + if(opTraits.getCommOrThrow(op).communicator().size() > 1) + DUNE_THROW(Dune::InvalidStateException, "CholMod works only for sequential operators."); + } + if constexpr (OpTraits::isAssembled && + // check whether the Matrix field_type is double or float + (std::is_same_v<typename FieldTraits<D>::field_type, double> || + std::is_same_v<typename FieldTraits<D>::field_type, float>)){ + const auto& A = opTraits.getAssembledOpOrThrow(op); + const M& mat = A->getmat(); + auto solver = std::make_shared<Dune::Cholmod<D>>(); + solver->setMatrix(mat); + return solver; + } + DUNE_THROW(UnsupportedType, + "Unsupported Type in Cholmod (only double and float supported)"); + }); +#endif // HAVE_SUITESPARSE_CHOLMOD +#if HAVE_SUITESPARSE_UMFPACK && DUNE_VERSION_GTE(DUNE_ISTL, 2, 10) +DUMUX_REGISTER_SOLVER("umfpack", Dumux::MultiTypeBlockMatrixSolverTag, + [](auto opTraits, const auto& op, const Dune::ParameterTree& config) + -> std::shared_ptr<typename decltype(opTraits)::solver_type> + { + using OpTraits = decltype(opTraits); + // works only for sequential operators + if constexpr (OpTraits::isParallel){ + if(opTraits.getCommOrThrow(op).communicator().size() > 1) + DUNE_THROW(Dune::InvalidStateException, "UMFPack works only for sequential operators."); + } + if constexpr (OpTraits::isAssembled){ + using M = typename OpTraits::matrix_type; + // check if UMFPack<M>* is convertible to + // InverseOperator*. This checks compatibility of the + // domain and range types + if constexpr (UMFPackImpl::isValidBlock<OpTraits>::value) { + const auto& A = opTraits.getAssembledOpOrThrow(op); + const M& mat = A->getmat(); + int verbose = config.get("verbose", 0); + return std::make_shared<Dune::UMFPack<M>>(mat,verbose); + } + } + DUNE_THROW(UnsupportedType, + "Unsupported Type in UMFPack (only double and std::complex<double> supported)"); + return nullptr; + }); +#endif // HAVE_SUITESPARSE_UMFPACK && DUNE_VERSION_GTE(DUNE_ISTL, 2, 10) +} // end namespace Dumux + +#endif // DUMUX_LINEAR_ISTL_SOLVERS_MULTITYPE_HH