From 4d91b51f2da4ad6d5b9ed68668abbed6361eca08 Mon Sep 17 00:00:00 2001 From: Kilian Weishaupt Date: Wed, 29 Jan 2020 13:54:49 +0100 Subject: [PATCH 1/7] [linear] Rename amgtraits to linearsolvertraits and deprecate --- dumux/linear/CMakeLists.txt | 1 + dumux/linear/amgtraits.hh | 141 +---------------------- dumux/linear/linearsolvertraits.hh | 175 +++++++++++++++++++++++++++++ 3 files changed, 179 insertions(+), 138 deletions(-) create mode 100644 dumux/linear/linearsolvertraits.hh diff --git a/dumux/linear/CMakeLists.txt b/dumux/linear/CMakeLists.txt index ff40c00525..1727503eac 100644 --- a/dumux/linear/CMakeLists.txt +++ b/dumux/linear/CMakeLists.txt @@ -2,6 +2,7 @@ install(FILES amgbackend.hh amgparallelhelpers.hh amgtraits.hh +linearsolvertraits.hh linearsolveracceptsmultitypematrix.hh matrixconverter.hh pdesolver.hh diff --git a/dumux/linear/amgtraits.hh b/dumux/linear/amgtraits.hh index 942f68ee65..7e856c7f1d 100644 --- a/dumux/linear/amgtraits.hh +++ b/dumux/linear/amgtraits.hh @@ -24,151 +24,16 @@ #ifndef DUMUX_AMG_TRAITS_HH #define DUMUX_AMG_TRAITS_HH -#include -#include -#include -#include -#include -#include +#warning "This header is deprecated and will be removed after release 3.2. Use linearsolver/linearsolvertraits.hh" -#include +#include "linearsolvertraits.hh" -// TODO: The following is a temporary solution to make the parallel AMG work -// for UGGrid. Once it is resolved upstream -// (https://gitlab.dune-project.org/core/dune-grid/issues/78), -// it should be guarded by a DUNE_VERSION macro and removed later. - -#if HAVE_UG -#include -#endif // HAVE_UG - -namespace Dumux { -namespace Temp { -namespace Capabilities { - -template -struct canCommunicate -{ - static const bool v = false; -}; - -#if HAVE_UG -template -struct canCommunicate, codim> -{ - static const bool v = true; -}; -#endif // HAVE_UG - -} // namespace Capabilities -} // namespace Temp -} // namespace Dumux -// end workaround namespace Dumux { -//! The implementation is specialized for the different discretizations -template struct AmgTraitsImpl; - //! The type traits required for using the AMG backend template -using AmgTraits = AmgTraitsImpl; - -//! NonoverlappingSolverTraits used by discretization with non-overlapping parallel model -template -class NonoverlappingSolverTraits -{ -public: - using Comm = Dune::Amg::SequentialInformation; - using LinearOperator = Dune::MatrixAdapter; - using ScalarProduct = Dune::SeqScalarProduct; - using Smoother = Dune::SeqSSOR; -}; - -#if HAVE_MPI -template -class NonoverlappingSolverTraits -{ -public: - using Comm = Dune::OwnerOverlapCopyCommunication,int>; - using LinearOperator = Dune::NonoverlappingSchwarzOperator; - using ScalarProduct = Dune::NonoverlappingSchwarzScalarProduct; - using Smoother = Dune::NonoverlappingBlockPreconditioner >; -}; -#endif - -//! Box: use the non-overlapping AMG -template -struct AmgTraitsImpl -{ - using Grid = typename GridGeometry::GridView::Traits::Grid; - enum { - dofCodim = Grid::dimension, - isNonOverlapping = true, - // TODO: see above for description of this workaround, remove second line if fixed upstream - isParallel = Dune::Capabilities::canCommunicate::v - || Dumux::Temp::Capabilities::canCommunicate::v - }; - using MType = Matrix; - using VType = Dune::BlockVector>; - using SolverTraits = NonoverlappingSolverTraits; - using Comm = typename SolverTraits::Comm; - using LinearOperator = typename SolverTraits::LinearOperator; - using ScalarProduct = typename SolverTraits::ScalarProduct; - using Smoother = typename SolverTraits::Smoother; - - using DofMapper = typename GridGeometry::VertexMapper; -}; - -//! OverlappingSolverTraits used by discretization with overlapping parallel model -template -class OverlappingSolverTraits -{ -public: - using Comm = Dune::Amg::SequentialInformation; - using LinearOperator = Dune::MatrixAdapter; - using ScalarProduct = Dune::SeqScalarProduct; - using Smoother = Dune::SeqSSOR; -}; - -#if HAVE_MPI -template -class OverlappingSolverTraits -{ -public: - using Comm = Dune::OwnerOverlapCopyCommunication,int>; - using LinearOperator = Dune::OverlappingSchwarzOperator; - using ScalarProduct = Dune::OverlappingSchwarzScalarProduct; - using Smoother = Dune::BlockPreconditioner >; -}; -#endif - -//! Cell-centered tpfa: use the overlapping AMG -template -struct AmgTraitsImpl -{ - using Grid = typename GridGeometry::GridView::Traits::Grid; - enum { - dofCodim = 0, - isNonOverlapping = false, - // TODO: see above for description of this workaround, remove second line if fixed upstream - isParallel = Dune::Capabilities::canCommunicate::v - || Dumux::Temp::Capabilities::canCommunicate::v - }; - using MType = Matrix; - using VType = Dune::BlockVector>; - using SolverTraits = OverlappingSolverTraits; - using Comm = typename SolverTraits::Comm; - using LinearOperator = typename SolverTraits::LinearOperator; - using ScalarProduct = typename SolverTraits::ScalarProduct; - using Smoother = typename SolverTraits::Smoother; - - using DofMapper = typename GridGeometry::ElementMapper; -}; - -template -struct AmgTraitsImpl -: public AmgTraitsImpl {}; +using AmgTraits [[deprecated("Use LinearSolverTraits instead. AmgTraits will be removed after 3.2!")]] = LinearSolverTraits; } // end namespace Dumux diff --git a/dumux/linear/linearsolvertraits.hh b/dumux/linear/linearsolvertraits.hh new file mode 100644 index 0000000000..a004995278 --- /dev/null +++ b/dumux/linear/linearsolvertraits.hh @@ -0,0 +1,175 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Linear + * \brief Define traits for linear solvers. + */ +#ifndef DUMUX_LINEAR_SOLVER_TRAITS_HH +#define DUMUX_LINEAR_SOLVER_TRAITS_HH + +#include +#include +#include +#include +#include +#include + +#include + +// TODO: The following is a temporary solution to make the parallel AMG work +// for UGGrid. Once it is resolved upstream +// (https://gitlab.dune-project.org/core/dune-grid/issues/78), +// it should be guarded by a DUNE_VERSION macro and removed later. + +#if HAVE_UG +#include +#endif // HAVE_UG + +namespace Dumux { +namespace Temp { +namespace Capabilities { + +template +struct canCommunicate +{ + static const bool v = false; +}; + +#if HAVE_UG +template +struct canCommunicate, codim> +{ + static const bool v = true; +}; +#endif // HAVE_UG + +} // namespace Capabilities +} // namespace Temp +} // namespace Dumux +// end workaround + +namespace Dumux { + +//! The implementation is specialized for the different discretizations +template struct LinearSolverTraitsImpl; + +//! The type traits required for using the IstlFactoryBackend +template +using LinearSolverTraits = LinearSolverTraitsImpl; + +//! NonoverlappingSolverTraits used by discretization with non-overlapping parallel model +template +class NonoverlappingSolverTraits +{ +public: + using Comm = Dune::Amg::SequentialInformation; + using LinearOperator = Dune::MatrixAdapter; + using ScalarProduct = Dune::SeqScalarProduct; + using Smoother = Dune::SeqSSOR; +}; + +#if HAVE_MPI +template +class NonoverlappingSolverTraits +{ +public: + using Comm = Dune::OwnerOverlapCopyCommunication,int>; + using LinearOperator = Dune::NonoverlappingSchwarzOperator; + using ScalarProduct = Dune::NonoverlappingSchwarzScalarProduct; + using Smoother = Dune::NonoverlappingBlockPreconditioner >; +}; +#endif + +//! Box: use non-overlapping model +template +struct LinearSolverTraitsImpl +{ + using Grid = typename GridGeometry::GridView::Traits::Grid; + enum { + dofCodim = Grid::dimension, + isNonOverlapping = true, + // TODO: see above for description of this workaround, remove second line if fixed upstream + isParallel = Dune::Capabilities::canCommunicate::v + || Dumux::Temp::Capabilities::canCommunicate::v + }; + using MType = Matrix; + using VType = Dune::BlockVector>; + using SolverTraits = NonoverlappingSolverTraits; + using Comm = typename SolverTraits::Comm; + using LinearOperator = typename SolverTraits::LinearOperator; + using ScalarProduct = typename SolverTraits::ScalarProduct; + using Smoother = typename SolverTraits::Smoother; + + using DofMapper = typename GridGeometry::VertexMapper; +}; + +//! OverlappingSolverTraits used by discretization with overlapping parallel model +template +class OverlappingSolverTraits +{ +public: + using Comm = Dune::Amg::SequentialInformation; + using LinearOperator = Dune::MatrixAdapter; + using ScalarProduct = Dune::SeqScalarProduct; + using Smoother = Dune::SeqSSOR; +}; + +#if HAVE_MPI +template +class OverlappingSolverTraits +{ +public: + using Comm = Dune::OwnerOverlapCopyCommunication,int>; + using LinearOperator = Dune::OverlappingSchwarzOperator; + using ScalarProduct = Dune::OverlappingSchwarzScalarProduct; + using Smoother = Dune::BlockPreconditioner >; +}; +#endif + +//! Cell-centered tpfa: use overlapping model +template +struct LinearSolverTraitsImpl +{ + using Grid = typename GridGeometry::GridView::Traits::Grid; + enum { + dofCodim = 0, + isNonOverlapping = false, + // TODO: see above for description of this workaround, remove second line if fixed upstream + isParallel = Dune::Capabilities::canCommunicate::v + || Dumux::Temp::Capabilities::canCommunicate::v + }; + using MType = Matrix; + using VType = Dune::BlockVector>; + using SolverTraits = OverlappingSolverTraits; + using Comm = typename SolverTraits::Comm; + using LinearOperator = typename SolverTraits::LinearOperator; + using ScalarProduct = typename SolverTraits::ScalarProduct; + using Smoother = typename SolverTraits::Smoother; + + using DofMapper = typename GridGeometry::ElementMapper; +}; + +template +struct LinearSolverTraitsImpl +: public LinearSolverTraitsImpl {}; + +} // end namespace Dumux + +#endif // DUMUX_LINEAR_SOLVER_TRAITS_HH -- GitLab From 11f09e565b50ac37515f88eac04debbe66346d6c Mon Sep 17 00:00:00 2001 From: Kilian Weishaupt Date: Wed, 29 Jan 2020 16:52:41 +0100 Subject: [PATCH 2/7] [linear] Rename amgparallelhelpers to parallelhelpers, deprecate and cleanup * createIndexSetAndProjectForAMG -> createParallelIndexSet * getExtendedMatrix -> extendMatrix --- dumux/linear/CMakeLists.txt | 1 + dumux/linear/amgparallelhelpers.hh | 1077 +--------------------------- dumux/linear/parallelhelpers.hh | 1029 ++++++++++++++++++++++++++ 3 files changed, 1032 insertions(+), 1075 deletions(-) create mode 100644 dumux/linear/parallelhelpers.hh diff --git a/dumux/linear/CMakeLists.txt b/dumux/linear/CMakeLists.txt index 1727503eac..115cb9a3ed 100644 --- a/dumux/linear/CMakeLists.txt +++ b/dumux/linear/CMakeLists.txt @@ -5,6 +5,7 @@ amgtraits.hh linearsolvertraits.hh linearsolveracceptsmultitypematrix.hh matrixconverter.hh +parallelhelpers.hh pdesolver.hh scotchbackend.hh seqsolverbackend.hh diff --git a/dumux/linear/amgparallelhelpers.hh b/dumux/linear/amgparallelhelpers.hh index 849d5d868b..e1f9a65ed8 100644 --- a/dumux/linear/amgparallelhelpers.hh +++ b/dumux/linear/amgparallelhelpers.hh @@ -25,1080 +25,7 @@ #ifndef DUMUX_AMGPARALLELHELPERS_HH #define DUMUX_AMGPARALLELHELPERS_HH -#include -#include -#include -#include -#include -#include - -namespace Dumux { - -/*! - * \ingroup Linear - * \brief A parallel helper class providing a nonoverlapping - * decomposition of all degrees of freedom - */ -// operator that resets result to zero at constrained DOFS -template -class ParallelISTLHelper -{ - using DofMapper = typename AmgTraits::DofMapper; - enum { dofCodim = AmgTraits::dofCodim }; - - class BaseGatherScatter - { - public: - BaseGatherScatter(const DofMapper& mapper) - : mapper_(mapper) {} - - template - int index(const EntityType& e) const - { return mapper_.index(e); } - - private: - const DofMapper& mapper_; - }; - - /*! - * \brief GatherScatter implementation that makes a right hand side in the box model consistent. - */ - template - class ConsistencyBoxGatherScatter - : public BaseGatherScatter, - public Dune::CommDataHandleIF,typename V::block_type> - { - public: - using DataType = typename V::block_type; - - ConsistencyBoxGatherScatter(V& container, const DofMapper& mapper) - : BaseGatherScatter(mapper), container_(container) - {} - - bool contains(int dim, int codim) const - { - return dofCodim==codim; - } - - bool fixedsize(int dim, int codim) const - { - return true; - } - - template - size_t size (EntityType& e) const - { - return 1; - } - - template - void gather (MessageBuffer& buff, const EntityType& e) const - { - buff.write(container_[this->index(e)]); - } - - template - void scatter (MessageBuffer& buff, const EntityType& e, size_t n) - { - typename V::block_type block; - buff.read(block); - container_[this->index(e)]+=block; - } - private: - V& container_; - }; - - - /*! - * \brief Writes 1<<24 to each data item (of the container) that is gathered or scattered - * and is neither interior nor border. - * - * Can be used to mark ghost cells. - */ - class GhostGatherScatter - : public BaseGatherScatter, - public Dune::CommDataHandleIF - { - public: - using DataType = std::size_t; - - GhostGatherScatter(std::vector& ranks, const DofMapper& mapper) - : BaseGatherScatter(mapper), ranks_(ranks) - {} - - - bool contains(int dim, int codim) const - { - return dofCodim==codim; - } - - bool fixedsize(int dim, int codim) const - { - return true; - } - - template - size_t size (EntityType& e) const - { - return 1; - } - - template - void gather (MessageBuffer& buff, const EntityType& e) const - { - std::size_t& data= ranks_[this->index(e)]; - if (e.partitionType()!=Dune::InteriorEntity && e.partitionType()!=Dune::BorderEntity) - data = (1<<24); - buff.write(data); - } - - template - void scatter (MessageBuffer& buff, const EntityType& e, size_t n) - { - std::size_t x; - std::size_t& data = ranks_[this->index(e)]; - buff.read(x); - if (e.partitionType()!=Dune::InteriorEntity && e.partitionType()!=Dune::BorderEntity) - data= (1<<24); - } - private: - std::vector& ranks_; - }; - - /*! - * \brief GatherScatter handle that sets 1<<24 for data items neither associated to - * the interior or border and take the minimum when scattering. - * - * Used to compute an owner rank for each unknown. - */ - class InteriorBorderGatherScatter - : public BaseGatherScatter, - public Dune::CommDataHandleIF - { - public: - using DataType = std::size_t; - - InteriorBorderGatherScatter(std::vector& ranks, const DofMapper& mapper) - : BaseGatherScatter(mapper), ranks_(ranks) - {} - - - bool contains(int dim, int codim) const - { - return dofCodim==codim; - } - - bool fixedsize(int dim, int codim) const - { - return true; - - } - - template - size_t size (EntityType& e) const - { - return 1; - } - - template - void gather (MessageBuffer& buff, const EntityType& e) const - { - - std::size_t& data = ranks_[this->index(e)]; - if (e.partitionType()!=Dune::InteriorEntity && e.partitionType()!=Dune::BorderEntity) - data = (1<<24); - buff.write(data); - } - - template - void scatter (MessageBuffer& buff, const EntityType& e, size_t n) - { - using std::min; - std::size_t x; - std::size_t& data = ranks_[this->index(e)]; - buff.read(x); - if (e.partitionType()!=Dune::InteriorEntity && e.partitionType()!=Dune::BorderEntity) - data = x; - else - data = min(data,x); - } - private: - std::vector& ranks_; - }; - - /*! - * \brief GatherScatter handle for finding out about neighbouring processor ranks. - * - */ - struct NeighbourGatherScatter - : public BaseGatherScatter, - public Dune::CommDataHandleIF - { - using DataType = int; - - NeighbourGatherScatter(const DofMapper& mapper, int rank_, std::set& neighbours_) - : BaseGatherScatter(mapper), myrank(rank_), neighbours(neighbours_) - {} - - - bool contains(int dim, int codim) const - { - return dofCodim==codim; - } - - bool fixedsize(int dim, int codim) const - { - return true; - } - - template - size_t size (EntityType& e) const - { - return 1; - } - - template - void gather (MessageBuffer& buff, const EntityType &e) const - { - buff.write(myrank); - } - - template - void scatter (MessageBuffer& buff, const EntityType &e, size_t n) - { - int x; - buff.read(x); - neighbours.insert(x); - } - int myrank; - std::set& neighbours; - }; - - - /*! - * \brief GatherScatter handle for finding out about neighbouring processor ranks. - * - */ - struct SharedGatherScatter - : public BaseGatherScatter, - public Dune::CommDataHandleIF - { - using DataType = int; - - SharedGatherScatter(std::vector& shared, const DofMapper& mapper) - : BaseGatherScatter(mapper), shared_(shared) - {} - - bool contains(int dim, int codim) const - { - return dofCodim==codim; - } - - bool fixedsize(int dim, int codim) const - { - return true; - - } - - template - size_t size (EntityType& e) const - { - return 1; - } - - template - void gather (MessageBuffer& buff, EntityType& e) const - { - int data=true; - buff.write(data); - } - - template - void scatter (MessageBuffer& buff, const EntityType &e, size_t n) - { - int x; - buff.read(x); - int& data= shared_[this->index(e)]; - data = data || x; - } - private: - std::vector& shared_; - }; - - /*! - * \brief GatherScatter handle for finding out about neighbouring processor ranks. - * - */ - template - struct GlobalIndexGatherScatter - : public BaseGatherScatter, - public Dune::CommDataHandleIF, GI> - { - using DataType = GI; - GlobalIndexGatherScatter(std::vector& gindices, const DofMapper& mapper) - : BaseGatherScatter(mapper), gindices_(gindices) - {} - - bool contains(int dim, int codim) const - { - return dofCodim==codim; - } - - bool fixedsize(int dim, int codim) const - { - return true; - } - - template - size_t size (EntityType& e) const - { - return 1; - } - - template - void gather (MessageBuffer& buff, const EntityType& e) const - { - buff.write(gindices_[this->index(e)]); - } - - template - void scatter (MessageBuffer& buff, const EntityType& e, size_t n) - { - using std::min; - DataType x; - buff.read(x); - gindices_[this->index(e)] = min(gindices_[this->index(e)], x); - } - private: - std::vector& gindices_; - }; - -public: - - ParallelISTLHelper (const GridView& gridView, const DofMapper& mapper, int verbose=1) - : gridView_(gridView), mapper_(mapper), verbose_(verbose), initialized_(false) - {} - - // \brief Initializes the markers for ghosts and owners with the correct size and values. - // - void initGhostsAndOwners() - { - owner_.resize(mapper_.size(), - gridView_.comm().rank()); - isGhost_.resize(mapper_.size(),0.0); - // find out about ghosts - GhostGatherScatter ggs(owner_, mapper_); - - if (gridView_.comm().size()>1) - gridView_.communicate(ggs, Dune::InteriorBorder_All_Interface, Dune::ForwardCommunication); - - isGhost_ = owner_; - - // partition interior/border - InteriorBorderGatherScatter dh(owner_, mapper_); - - if (gridView_.comm().size()>1) - gridView_.communicate(dh, Dune::InteriorBorder_InteriorBorder_Interface, Dune::ForwardCommunication); - - // convert vector into mask vector - for (auto& v : owner_) - v = (v == gridView_.comm().rank()) ? 1.0 : 0.0; - - initialized_=true; - } - - // keep only DOFs assigned to this processor - template - void mask (W& w) const - { - auto v1=w.begin(); - - for(auto v2=owner_.begin(), vend=owner_.end(); v2!=vend; ++v1, ++v2) - v1*=v2; - } - - // access to mask vector - double mask (std::size_t i) const - { - return owner_[i]; - } - - // access to ghost vector - std::size_t ghost (std::size_t i) const - { - return isGhost_[i]; - } - - // \brief Make a vector of the box model consistent. - template - void makeNonOverlappingConsistent(Dune::BlockVector& v) - { -#if HAVE_MPI - ConsistencyBoxGatherScatter > gs(v, mapper_); - if (gridView_.comm().size() > 1) - gridView_.communicate(gs, Dune::InteriorBorder_InteriorBorder_Interface, - Dune::ForwardCommunication); -#endif - } - - -#if HAVE_MPI - - /*! - * \brief Creates a matrix suitable for parallel AMG and the parallel information - * - * \tparam MatrixType The type of the ISTL matrix used. - * \tparam Comm The type of the OwnerOverlapCopyCommunication - * \param m The local matrix. - * \param c The parallel information object providing index set, interfaces and - * communicators. - */ - template - void createIndexSetAndProjectForAMG(MatrixType& m, Comm& c); -#endif - - //! Return the dofMapper - const DofMapper& dofMapper() const - { return mapper_; } - - //! Return the gridView - const GridView& gridView() const - { return gridView_; } - -private: - const GridView gridView_; //!< the grid view - const DofMapper& mapper_; //!< the dof mapper - std::vector owner_; //!< vector to identify unique decomposition - std::vector isGhost_; //!< vector to identify ghost dofs - int verbose_; //!< verbosity - bool initialized_; //!< whether isGhost and owner arrays are initialized - -}; // class ParallelISTLHelper - -/*! - * \ingroup Linear - * \brief Helper class for adding up matrix entries on border. - * \tparam GridView The grid view to work on - * \tparam AmgTraits traits class - */ -template -class EntityExchanger -{ - using Matrix = typename AmgTraits::MType; - enum { dim = GridView::dimension }; - enum { dofCodim = AmgTraits::dofCodim }; - using Grid = typename GridView::Traits::Grid; - using BlockType = typename Matrix::block_type; - using IDS = typename Grid::Traits::GlobalIdSet; - using IdType = typename IDS::IdType; - using RowIterator = typename Matrix::RowIterator; - using ColIterator = typename Matrix::ColIterator; - using DofMapper = typename AmgTraits::DofMapper; - -public: - /*! \brief Constructor. Sets up the local to global relations. - \param[in] gridView The gridView on which we are operating - \param[in] mapper The local dof mapper - */ - EntityExchanger(const GridView& gridView, const DofMapper& mapper) - : gridView_(gridView), mapper_(mapper) - { - gid2Index_.clear(); - index2GID_.clear(); - - for (const auto& entity : entities(gridView_, Dune::Codim())) - { - if (entity.partitionType() == Dune::BorderEntity) - { - int localIdx = mapper_.index(entity); - IdType dofIdxGlobal = gridView_.grid().globalIdSet().id(entity); - - std::pair g2iPair(dofIdxGlobal, localIdx); - gid2Index_.insert(g2iPair); - - std::pair i2gPair(localIdx, dofIdxGlobal); - index2GID_.insert(i2gPair); - } - } - } - - /*! - * \brief A DataHandle class to exchange matrix sparsity patterns. - * - * We look at a 2D example with a nonoverlapping grid, - * two processes and no ghosts with Q1 discretization. - * Process 0 has the left part of the domain - * with three cells and eight vertices (1-8), - * Process 1 the right part with three cells - * and eight vertices (2,4,7-12). - *
-     *  1 _ 2        2 _ 9 _ 10
-     *  |   |        |   |   |
-     *  3 _ 4 _ 7    4 _ 7 _ 11
-     *  |   |   |        |   |
-     *  5 _ 6 _ 8        8 _ 12
-     *  
- * If we look at vertex 7 and the corresponding entries in the matrix for P0, - * there will be entries for (7,4) and (7,8), but not for (7,2). - * The MatPatternExchange class will find these entries and returns a vector "sparsity", - * that contains all missing connections. - */ - class MatPatternExchange - : public Dune::CommDataHandleIF - { - using RowIterator = typename Matrix::RowIterator; - using ColIterator = typename Matrix::ColIterator; - public: - //! Export type of data for message buffer - using DataType = IdType; - - /** \brief Constructor - \param[in] mapper The local dof mapper. - \param[in] g2i Global to local index map. - \param[in] i2g Local to global index map. - \param[in] A Matrix to operate on. - \param[in] helper parallel istl helper. - */ - MatPatternExchange (const DofMapper& mapper, - const std::map& g2i, - const std::map& i2g, Matrix& A, - const ParallelISTLHelper& helper) - : mapper_(mapper), gid2Index_(g2i), index2GID_(i2g), - sparsity_(A.N()), A_(A), helper_(helper) - {} - - /*! - * \brief Returns true if data for given valid codim should be communicated - */ - bool contains (int dim, int codim) const - { - return (codim==dofCodim); - } - - /*! - * \brief Returns true if size of data per entity of given dim and codim is a constant - */ - bool fixedsize (int dim, int codim) const - { - return false; - } - - /*! - * \brief How many objects of type DataType have to be sent for a given entity - */ - template - size_t size (EntityType& e) const - { - int i = mapper_.index(e); - int n = 0; - for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) - { - typename std::map::const_iterator it = index2GID_.find(j.index()); - if (it != index2GID_.end()) - n++; - } - - return n; - } - - /*! - * \brief Pack data from user to message buffer - */ - template - void gather (MessageBuffer& buff, const EntityType& e) const - { - int i = mapper_.index(e); - for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) - { - typename std::map::const_iterator it=index2GID_.find(j.index()); - if (it != index2GID_.end()) - buff.write(it->second); - } - - } - - /*! - * \brief Unpack data from message buffer to user - */ - template - void scatter (MessageBuffer& buff, const EntityType& e, size_t n) - { - int i = mapper_.index(e); - for (size_t k = 0; k < n; k++) - { - IdType id; - buff.read(id); - // only add entries corresponding to border entities - typename std::map::const_iterator it = gid2Index_.find(id); - if (it != gid2Index_.end() - && sparsity_[i].find(it->second) == sparsity_[i].end() - && helper_.ghost(it->second) != 1<<24) - sparsity_[i].insert(it->second); - } - } - - /*! - * \brief Get the communicated sparsity pattern - * \return the vector with the sparsity pattern - */ - std::vector >& sparsity () - { - return sparsity_; - } - - private: - const DofMapper& mapper_; - const std::map& gid2Index_; - const std::map& index2GID_; - std::vector > sparsity_; - Matrix& A_; - const ParallelISTLHelper& helper_; - - }; // class MatPatternExchange - - //! Local matrix blocks associated with the global id set - struct MatEntry - { - IdType first; - BlockType second; - MatEntry (const IdType& f, const BlockType& s) : first(f), second(s) {} - MatEntry () {} - }; - - //! A DataHandle class to exchange matrix entries - class MatEntryExchange - : public Dune::CommDataHandleIF - { - using RowIterator = typename Matrix::RowIterator; - using ColIterator = typename Matrix::ColIterator; - public: - //! Export type of data for message buffer - using DataType = MatEntry; - - /*! - * \brief Constructor - * \param[in] mapper The local dof mapper. - * \param[in] g2i Global to local index map. - * \param[in] i2g Local to global index map. - * \param[in] A Matrix to operate on. - */ - MatEntryExchange (const DofMapper& mapper, const std::map& g2i, - const std::map& i2g, - Matrix& A) - : mapper_(mapper), gid2Index_(g2i), index2GID_(i2g), A_(A) - {} - - /*! - * \brief Returns true if data for given valid codim should be communicated - */ - bool contains (int dim, int codim) const - { - return (codim==dofCodim); - } - - /*! - * \brief Returns true if size of data per entity of given dim and codim is a constant - */ - bool fixedsize (int dim, int codim) const - { - return false; - } - - /*! - * \brief How many objects of type DataType have to be sent for a given entity - */ - template - size_t size (EntityType& e) const - { - int i = mapper_.index(e); - int n = 0; - for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) - { - typename std::map::const_iterator it = index2GID_.find(j.index()); - if (it != index2GID_.end()) - n++; - } - - return n; - } - - /*! - * \brief Pack data from user to message buffer - */ - template - void gather (MessageBuffer& buff, const EntityType& e) const - { - int i = mapper_.index(e); - for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) - { - typename std::map::const_iterator it=index2GID_.find(j.index()); - if (it != index2GID_.end()) - buff.write(MatEntry(it->second,*j)); - } - - } - - /*! - * \brief Unpack data from message buffer to user - */ - template - void scatter (MessageBuffer& buff, const EntityType& e, size_t n) - { - int i = mapper_.index(e); - for (size_t k = 0; k < n; k++) - { - MatEntry m; - buff.read(m); - // only add entries corresponding to border entities - typename std::map::const_iterator it = gid2Index_.find(m.first); - if (it != gid2Index_.end()) - if (A_[i].find(it->second) != A_[i].end()) - A_[i][it->second] += m.second; - } - } - - private: - const DofMapper& mapper_; - const std::map& gid2Index_; - const std::map& index2GID_; - Matrix& A_; - - }; // class MatEntryExchange - - /*! - * \brief communicates values for the sparsity pattern of the new matrix. - * \param A Matrix to operate on. - * \param helper ParallelelISTLHelper. - */ - void getExtendedMatrix (Matrix& A, const ParallelISTLHelper& helper) - { - if (gridView_.comm().size() > 1) - { - Matrix tmp(A); - std::size_t nnz=0; - // get entries from other processes - MatPatternExchange datahandle(mapper_, gid2Index_, index2GID_, A, helper); - gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, - Dune::ForwardCommunication); - std::vector >& sparsity = datahandle.sparsity(); - // add own entries, count number of nonzeros - for (RowIterator i = A.begin(); i != A.end(); ++i) - { - for (ColIterator j = A[i.index()].begin(); j != A[i.index()].end(); ++j) - { - if (sparsity[i.index()].find(j.index()) == sparsity[i.index()].end()) - { - sparsity[i.index()].insert(j.index()); - } - } - nnz += sparsity[i.index()].size(); - } - - A.setSize(tmp.N(), tmp.N(), nnz); - A.setBuildMode(Matrix::row_wise); - typename Matrix::CreateIterator citer = A.createbegin(); - using Iter = typename std::vector >::const_iterator; - for (Iter i = sparsity.begin(), end = sparsity.end(); i!=end; ++i, ++citer) - { - using SIter = typename std::set::const_iterator; - for (SIter si = i->begin(), send = i->end(); si!=send; ++si) - citer.insert(*si); - } - - // set matrix old values - A = 0; - for (RowIterator i = tmp.begin(); i != tmp.end(); ++i) - for (ColIterator j = tmp[i.index()].begin(); j != tmp[i.index()].end(); ++j) - A[i.index()][j.index()] = tmp[i.index()][j.index()]; - } - } - - /*! - * \brief Sums up the entries corresponding to border vertices. - * \param A Matrix to operate on. - */ - void sumEntries (Matrix& A) - { - if (gridView_.comm().size() > 1) - { - MatEntryExchange datahandle(mapper_, gid2Index_, index2GID_, A); - gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, - Dune::ForwardCommunication); - } - } - -#if HAVE_MPI - /*! - * \brief Extends the sparsity pattern of the discretization matrix for AMG. - * \param A A reference to the matrix to change. - */ - void getExtendedMatrix (Matrix& A) const; -#endif - -private: - const GridView gridView_; - const DofMapper& mapper_; - std::map gid2Index_; - std::map index2GID_; - -}; // class EntityExchanger - -// methods that only exist if MPI is available -#if HAVE_MPI -template -void EntityExchanger::getExtendedMatrix (Matrix& A) const -{ - if (gridView_.comm().size() > 1) - { - Matrix tmp(A); - std::size_t nnz = 0; - // get entries from other processes - MatPatternExchange datahandle(mapper_, gid2Index_, index2GID_, A, *this); - gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, - Dune::ForwardCommunication); - std::vector >& sparsity = datahandle.sparsity(); - - // add own entries, count number of nonzeros - for (RowIterator i = A.begin(); i != A.end(); ++i) - { - for (ColIterator j = A[i.index()].begin(); j != A[i.index()].end(); ++j) - { - if (sparsity[i.index()].find(j.index()) == sparsity[i.index()].end()) - sparsity[i.index()].insert(j.index()); - } - nnz += sparsity[i.index()].size(); - } - - A.setSize(tmp.N(), tmp.N(), nnz); - A.setBuildMode(Matrix::row_wise); - typename Matrix::CreateIterator citer = A.createbegin(); - using Iter = typename std::vector >::const_iterator; - for (Iter i = sparsity.begin(), end = sparsity.end(); i!=end; ++i, ++citer){ - using SIter = typename std::set::const_iterator; - for (SIter si = i->begin(), send = i->end(); si!=send; ++si) - citer.insert(*si); - } - - // set matrix old values - A = 0; - for (RowIterator i = tmp.begin(); i != tmp.end(); ++i) - for (ColIterator j = tmp[i.index()].begin(); j != tmp[i.index()].end(); ++j) - A[i.index()][j.index()] = tmp[i.index()][j.index()]; - - sumEntries(A); - } - -} // EntityExchanger::getExtendedMatrix - -template -template -void ParallelISTLHelper::createIndexSetAndProjectForAMG(MatrixType& m, Comm& comm) -{ - if(!initialized_) - { - // This is the first time this function is called. - // Therefore we need to initialize the marker vectors for ghosts and - // owned dofs - initGhostsAndOwners(); - } - - // First find out which dofs we share with other processors - std::vector sharedDofs(mapper_.size(), false); - - SharedGatherScatter sgs(sharedDofs, mapper_); - - if (gridView_.comm().size() > 1) - gridView_.communicate(sgs, Dune::All_All_Interface, - Dune::ForwardCommunication); - - // Count shared dofs that we own - using GlobalIndex = typename Comm::ParallelIndexSet::GlobalIndex; - GlobalIndex count = 0; - auto owned = owner_.begin(); - - for (auto v=sharedDofs.begin(), vend=sharedDofs.end(); v != vend; ++v, ++owned) - if(*v && *owned==1) - ++count; - - Dune::dverb << gridView_.comm().rank() << ": shared count is " << count.touint() - << std::endl; - - std::vector counts(gridView_.comm().size()); - gridView_.comm().allgather(&count, 1, &(counts[0])); - - // Compute start index start_p = \sum_{i=0}^{i scalarIndices(mapper_.size(), - std::numeric_limits::max()); - - auto shared = sharedDofs.begin(); - auto index = scalarIndices.begin(); - - for (auto i=owner_.begin(), iend=owner_.end(); i!=iend; ++i, ++shared, ++index) - { - if(*i==1 && *shared) - { - *index=start; - ++start; - } - } - - // publish global indices for the shared DOFS to other processors. - GlobalIndexGatherScatter gigs(scalarIndices, mapper_); - if (gridView_.comm().size()>1) - gridView_.communicate(gigs, Dune::All_All_Interface, - Dune::ForwardCommunication); - - - // Setup the index set - comm.indexSet().beginResize(); - index = scalarIndices.begin(); - auto ghost = isGhost_.begin(); - - for (auto i=owner_.begin(), iend=owner_.end(); i!=iend; ++i, ++ghost, ++index) - { - Dune::OwnerOverlapCopyAttributeSet::AttributeSet attr; - if (*index!=std::numeric_limits::max()) - { - // global index exist in index set - if (*i>0) - { - // This dof is managed by us. - attr = Dune::OwnerOverlapCopyAttributeSet::owner; - } -#if DUNE_VERSION_GTE(DUNE_ISTL, 2, 7) - else if ( *ghost==(1<<24) && ( comm.category() == - static_cast(Dune::SolverCategory::nonoverlapping)) ) -#else - else if ( *ghost==(1<<24) && ( comm.getSolverCategory() == - static_cast(Dune::SolverCategory::nonoverlapping)) ) -#endif - { - //use attribute overlap for ghosts in novlp grids - attr = Dune::OwnerOverlapCopyAttributeSet::overlap; - } - else - { - attr = Dune::OwnerOverlapCopyAttributeSet::copy; - } - comm.indexSet().add(*index, typename Comm::ParallelIndexSet::LocalIndex(i-owner_.begin(), attr)); - } - } - comm.indexSet().endResize(); - - // Compute neighbours using communication - std::set neighbours; - NeighbourGatherScatter ngs(mapper_, gridView_.comm().rank(), - neighbours); - - if (gridView_.comm().size() > 1) - gridView_.communicate(ngs, Dune::All_All_Interface, - Dune::ForwardCommunication); - - comm.remoteIndices().setNeighbours(neighbours); - comm.remoteIndices().template rebuild(); - -} // ParallelISTLHelper::createIndexSetAndProjectForAMG - -#endif // HAVE_MPI - -/*! - * \brief Prepare the linear algebra member variables. - * - * At compile time, correct constructor calls have to be chosen, - * depending on whether the setting is parallel or sequential. - * Since several template parameters are present, this cannot be solved - * by a full function template specialization. Instead, class template - * specialization has to be used. - * This adapts example 4 from http://www.gotw.ca/publications/mill17.htm. - * - * This class template implements the function for the sequential case. - * - * \tparam isParallel decides if the setting is parallel or sequential - */ -template -struct LinearAlgebraPreparator -{ - using DofMapper = typename AmgTraits::DofMapper; - using ParallelHelper = ParallelISTLHelper; - using Comm = typename AmgTraits::Comm; - using LinearOperator = typename AmgTraits::LinearOperator; - using ScalarProduct = typename AmgTraits::ScalarProduct; - - template - static void prepareLinearAlgebra(Matrix& A, Vector& b, - int& rank, - std::shared_ptr& comm, - std::shared_ptr& fop, - std::shared_ptr& sp, - ParallelHelper& pHelper, - const bool firstCall) - { - comm = std::make_shared(); - fop = std::make_shared(A); - sp = std::make_shared(); - } -}; - -#if HAVE_MPI -/*! - * \brief Specialization for the parallel case. - */ -template -struct LinearAlgebraPreparator -{ - using DofMapper = typename AmgTraits::DofMapper; - using ParallelHelper = ParallelISTLHelper; - using Comm = typename AmgTraits::Comm; - using LinearOperator = typename AmgTraits::LinearOperator; - using ScalarProduct = typename AmgTraits::ScalarProduct; - - template - static void prepareLinearAlgebra(Matrix& A, Vector& b, - int& rank, - std::shared_ptr& comm, - std::shared_ptr& fop, - std::shared_ptr& sp, - ParallelHelper& pHelper, - const bool firstCall) - { - Dune::SolverCategory::Category category = AmgTraits::isNonOverlapping ? - Dune::SolverCategory::nonoverlapping : Dune::SolverCategory::overlapping; - - if(AmgTraits::isNonOverlapping && firstCall) - { - pHelper.initGhostsAndOwners(); - } - - comm = std::make_shared(pHelper.gridView().comm(), category); - - if(AmgTraits::isNonOverlapping) - { - // extend the matrix pattern such that it is usable for AMG - EntityExchanger exchanger(pHelper.gridView(), pHelper.dofMapper()); - exchanger.getExtendedMatrix(A, pHelper); - exchanger.sumEntries(A); - } - pHelper.createIndexSetAndProjectForAMG(A, *comm); - - fop = std::make_shared(A, *comm); - sp = std::make_shared(*comm); - rank = comm->communicator().rank(); - - // Make rhs consistent - if(AmgTraits::isNonOverlapping) - { - pHelper.makeNonOverlappingConsistent(b); - } - } - -}; // parallel LinearAlgebraPreparator - -#endif // HAVE_MPI - -} // end namespace Dumux +#include "parallelhelpers.hh" +#warning "This header is deprecated and will be removed after release 3.2. Use linearsolver/parallelhelpers.hh" #endif // DUMUX_AMGPARALLELHELPERS_HH diff --git a/dumux/linear/parallelhelpers.hh b/dumux/linear/parallelhelpers.hh new file mode 100644 index 0000000000..9a5ee74c3c --- /dev/null +++ b/dumux/linear/parallelhelpers.hh @@ -0,0 +1,1029 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Linear + * \brief Provides a helper class for nonoverlapping + * decomposition. + */ +#ifndef DUMUX_PARALLELHELPERS_HH +#define DUMUX_PARALLELHELPERS_HH + +#include +#include +#include +#include +#include +#include + +namespace Dumux { + +/*! + * \ingroup Linear + * \brief A parallel helper class providing a nonoverlapping + * decomposition of all degrees of freedom + */ +// operator that resets result to zero at constrained DOFS +template +class ParallelISTLHelper +{ + using DofMapper = typename LinearSolverTraits::DofMapper; + enum { dofCodim = LinearSolverTraits::dofCodim }; + + class BaseGatherScatter + { + public: + BaseGatherScatter(const DofMapper& mapper) + : mapper_(mapper) {} + + template + int index(const EntityType& e) const + { return mapper_.index(e); } + + bool contains(int dim, int codim) const + { return dofCodim == codim; } + + bool fixedsize(int dim, int codim) const + { return true; } + + template + size_t size(EntityType& e) const + { return 1; } + + template + bool isNeitherInteriorNorBorderEntity(EntityType& e) const + { return e.partitionType() != Dune::InteriorEntity && e.partitionType() != Dune::BorderEntity; } + + private: + const DofMapper& mapper_; + }; + + /*! + * \brief GatherScatter implementation that makes a right hand side in the box model consistent. + */ + template + class ConsistencyBoxGatherScatter + : public BaseGatherScatter, + public Dune::CommDataHandleIF,typename V::block_type> + { + public: + using DataType = typename V::block_type; + using BaseGatherScatter::contains; + using BaseGatherScatter::fixedsize; + using BaseGatherScatter::size; + + ConsistencyBoxGatherScatter(V& container, const DofMapper& mapper) + : BaseGatherScatter(mapper), container_(container) + {} + + template + void gather(MessageBuffer& buff, const EntityType& e) const + { + buff.write(container_[this->index(e)]); + } + + template + void scatter(MessageBuffer& buff, const EntityType& e, size_t n) + { + typename V::block_type block; + buff.read(block); + container_[this->index(e)] += block; + } + private: + V& container_; + }; + + + /*! + * \brief Writes 1<<24 to each data item (of the container) that is gathered or scattered + * and is neither interior nor border. + * + * Can be used to mark ghost cells. + */ + class GhostGatherScatter + : public BaseGatherScatter, + public Dune::CommDataHandleIF + { + public: + using DataType = std::size_t; + using BaseGatherScatter::contains; + using BaseGatherScatter::fixedsize; + using BaseGatherScatter::size; + + GhostGatherScatter(std::vector& ranks, const DofMapper& mapper) + : BaseGatherScatter(mapper), ranks_(ranks) + {} + + template + void gather(MessageBuffer& buff, const EntityType& e) const + { + std::size_t& data = ranks_[this->index(e)]; // TODO: reference to size_t ? + if (this->isNeitherInteriorNorBorderEntity(e)) + data = (1<<24); + buff.write(data); + } + + template + void scatter(MessageBuffer& buff, const EntityType& e, size_t n) + { + std::size_t x; + std::size_t& data = ranks_[this->index(e)]; + buff.read(x); // TODO what is this good for? + if (this->isNeitherInteriorNorBorderEntity(e)) + data = (1<<24); + } + private: + std::vector& ranks_; + }; + + /*! + * \brief GatherScatter handle that sets 1<<24 for data items neither associated to + * the interior or border and take the minimum when scattering. + * + * Used to compute an owner rank for each unknown. + */ + class InteriorBorderGatherScatter + : public BaseGatherScatter, + public Dune::CommDataHandleIF + { + public: + using DataType = std::size_t; + using BaseGatherScatter::contains; + using BaseGatherScatter::fixedsize; + using BaseGatherScatter::size; + + InteriorBorderGatherScatter(std::vector& ranks, const DofMapper& mapper) + : BaseGatherScatter(mapper), ranks_(ranks) + {} + + template + void gather(MessageBuffer& buff, const EntityType& e) const + { + std::size_t& data = ranks_[this->index(e)]; // TODO: reference to size_t ? + if (this->isNeitherInteriorNorBorderEntity(e)) + data = (1<<24); + buff.write(data); + } + + template + void scatter(MessageBuffer& buff, const EntityType& e, std::size_t n) + { + using std::min; + std::size_t x; + std::size_t& data = ranks_[this->index(e)]; + buff.read(x); + if (this->isNeitherInteriorNorBorderEntity(e)) + data = x; + else + data = min(data,x); + } + private: + std::vector& ranks_; + }; + + /*! + * \brief GatherScatter handle for finding out about neighbouring processor ranks. + * + */ + struct NeighbourGatherScatter + : public BaseGatherScatter, + public Dune::CommDataHandleIF + { + using DataType = int; // TODO: size_t? + using BaseGatherScatter::contains; + using BaseGatherScatter::fixedsize; + using BaseGatherScatter::size; + + NeighbourGatherScatter(const DofMapper& mapper, int rank, std::set& neighbours) + : BaseGatherScatter(mapper), rank_(rank), neighbours_(neighbours) + {} + + template + void gather(MessageBuffer& buff, const EntityType& e) const + { + buff.write(rank_); + } + + template + void scatter(MessageBuffer& buff, const EntityType& e, size_t n) + { + int x; // TODO: size_t? + buff.read(x); + neighbours_.insert(x); + } + private: + int rank_; + std::set& neighbours_; + }; + + + /*! + * \brief GatherScatter handle for finding out about neighbouring processor ranks. + * + */ + struct SharedGatherScatter + : public BaseGatherScatter, + public Dune::CommDataHandleIF + { + using DataType = int; + using BaseGatherScatter::contains; + using BaseGatherScatter::fixedsize; + using BaseGatherScatter::size; + + SharedGatherScatter(std::vector& shared, const DofMapper& mapper) + : BaseGatherScatter(mapper), shared_(shared) + {} + + template + void gather(MessageBuffer& buff, EntityType& e) const + { + int data = true; + buff.write(data); + } + + template + void scatter(MessageBuffer& buff, const EntityType &e, size_t n) + { + int x; + buff.read(x); + int& data = shared_[this->index(e)]; + data = data || x; + } + private: + std::vector& shared_; + }; + + /*! + * \brief GatherScatter handle for finding out about neighbouring processor ranks. + * + */ + template + struct GlobalIndexGatherScatter + : public BaseGatherScatter, + public Dune::CommDataHandleIF, GI> + { + using DataType = GI; + using BaseGatherScatter::contains; + using BaseGatherScatter::fixedsize; + using BaseGatherScatter::size; + + GlobalIndexGatherScatter(std::vector& gindices, const DofMapper& mapper) + : BaseGatherScatter(mapper), gindices_(gindices) + {} + + template + void gather(MessageBuffer& buff, const EntityType& e) const + { + buff.write(gindices_[this->index(e)]); + } + + template + void scatter(MessageBuffer& buff, const EntityType& e, size_t n) + { + using std::min; + DataType x; + buff.read(x); + gindices_[this->index(e)] = min(gindices_[this->index(e)], x); + } + private: + std::vector& gindices_; + }; + +public: + + ParallelISTLHelper (const GridView& gridView, const DofMapper& mapper, int verbose = 1) + : gridView_(gridView), mapper_(mapper), verbose_(verbose), initialized_(false) + {} + + // \brief Initializes the markers for ghosts and owners with the correct size and values. + // + void initGhostsAndOwners() + { + owner_.resize(mapper_.size(), + gridView_.comm().rank()); + isGhost_.resize(mapper_.size(),0.0); + // find out about ghosts + GhostGatherScatter ggs(owner_, mapper_); + + if (gridView_.comm().size() > 1) + gridView_.communicate(ggs, Dune::InteriorBorder_All_Interface, Dune::ForwardCommunication); + + isGhost_ = owner_; + + // partition interior/border + InteriorBorderGatherScatter dh(owner_, mapper_); + + if (gridView_.comm().size() > 1) + gridView_.communicate(dh, Dune::InteriorBorder_InteriorBorder_Interface, Dune::ForwardCommunication); + + // convert vector into mask vector + for (auto& v : owner_) + v = (v == gridView_.comm().rank()) ? 1.0 : 0.0; + + initialized_ = true; + } + + // keep only DOFs assigned to this processor + template + void mask(W& w) const + { + auto v1 = w.begin(); + for (auto v2 = owner_.begin(), vend = owner_.end(); v2 != vend; ++v1, ++v2) + v1 *= v2; + } + + // access to mask vector + double mask(std::size_t i) const + { return owner_[i]; } + + // access to ghost vector + std::size_t ghost (std::size_t i) const + { return isGhost_[i]; } + + // \brief Make a vector of the box model consistent. + template + void makeNonOverlappingConsistent(Dune::BlockVector& v) + { +#if HAVE_MPI + ConsistencyBoxGatherScatter > gs(v, mapper_); + if (gridView_.comm().size() > 1) + gridView_.communicate(gs, Dune::InteriorBorder_InteriorBorder_Interface, + Dune::ForwardCommunication); +#endif + } + + +#if HAVE_MPI + + /*! + * \brief Creates a matrix suitable for parallel AMG and the parallel information + * + * \tparam MatrixType The type of the ISTL matrix used. + * \tparam Comm The type of the OwnerOverlapCopyCommunication + * \param m The local matrix. + * \param c The parallel information object providing index set, interfaces and + * communicators. + */ + template + [[deprecated("Use createParallelIndexSet(comm) instead. Will be removed after 3.1!")]] + void createIndexSetAndProjectForAMG(MatrixType& m, Comm& c) + { createParallelIndexSet(c); } + + template + void createParallelIndexSet(Comm& comm) + { + if (!initialized_) + { + // This is the first time this function is called. + // Therefore we need to initialize the marker vectors for ghosts and + // owned dofs + initGhostsAndOwners(); + } + + // First find out which dofs we share with other processors + std::vector sharedDofs(mapper_.size(), false); + + SharedGatherScatter sgs(sharedDofs, mapper_); + + if (gridView_.comm().size() > 1) + gridView_.communicate(sgs, Dune::All_All_Interface, + Dune::ForwardCommunication); + + // Count shared dofs that we own + using GlobalIndex = typename Comm::ParallelIndexSet::GlobalIndex; + GlobalIndex count = 0; + auto owned = owner_.begin(); + + for (auto v = sharedDofs.begin(), vend=sharedDofs.end(); v != vend; ++v, ++owned) + if (*v && *owned == 1) + ++count; + + Dune::dverb << gridView_.comm().rank() << ": shared count is " << count.touint() + << std::endl; + + std::vector counts(gridView_.comm().size()); + gridView_.comm().allgather(&count, 1, &(counts[0])); + + // Compute start index start_p = \sum_{i=0}^{i scalarIndices(mapper_.size(), + std::numeric_limits::max()); + + auto shared = sharedDofs.begin(); + auto index = scalarIndices.begin(); + + for (auto i = owner_.begin(), iend = owner_.end(); i != iend; ++i, ++shared, ++index) + { + if (*i==1 && *shared) + { + *index = start; + ++start; + } + } + + // publish global indices for the shared DOFS to other processors. + GlobalIndexGatherScatter gigs(scalarIndices, mapper_); + if (gridView_.comm().size() > 1) + gridView_.communicate(gigs, Dune::All_All_Interface, + Dune::ForwardCommunication); + + + // Setup the index set + comm.indexSet().beginResize(); + index = scalarIndices.begin(); + auto ghost = isGhost_.begin(); + + for (auto i = owner_.begin(), iend = owner_.end(); i != iend; ++i, ++ghost, ++index) + { + Dune::OwnerOverlapCopyAttributeSet::AttributeSet attr; + if (*index != std::numeric_limits::max()) + { + // global index exist in index set + if (*i > 0) + { + // This dof is managed by us. + attr = Dune::OwnerOverlapCopyAttributeSet::owner; + } + #if DUNE_VERSION_GTE(DUNE_ISTL, 2, 7) + else if ( *ghost==(1<<24) && ( comm.category() == + static_cast(Dune::SolverCategory::nonoverlapping)) ) + #else + else if ( *ghost==(1<<24) && ( comm.getSolverCategory() == + static_cast(Dune::SolverCategory::nonoverlapping)) ) + #endif + { + //use attribute overlap for ghosts in novlp grids + attr = Dune::OwnerOverlapCopyAttributeSet::overlap; + } + else + { + attr = Dune::OwnerOverlapCopyAttributeSet::copy; + } + comm.indexSet().add(*index, typename Comm::ParallelIndexSet::LocalIndex(i-owner_.begin(), attr)); + } + } + comm.indexSet().endResize(); + + // Compute neighbours using communication + std::set neighbours; + NeighbourGatherScatter ngs(mapper_, gridView_.comm().rank(), + neighbours); + + if (gridView_.comm().size() > 1) + gridView_.communicate(ngs, Dune::All_All_Interface, + Dune::ForwardCommunication); + + comm.remoteIndices().setNeighbours(neighbours); + comm.remoteIndices().template rebuild(); + + } + +#endif + + //! Return the dofMapper + const DofMapper& dofMapper() const + { return mapper_; } + + //! Return the gridView + const GridView& gridView() const + { return gridView_; } + +private: + const GridView gridView_; //!< the grid view + const DofMapper& mapper_; //!< the dof mapper + std::vector owner_; //!< vector to identify unique decomposition + std::vector isGhost_; //!< vector to identify ghost dofs + int verbose_; //!< verbosity + bool initialized_; //!< whether isGhost and owner arrays are initialized + +}; // class ParallelISTLHelper + +/*! + * \ingroup Linear + * \brief Helper class for adding up matrix entries on border. + * \tparam GridView The grid view to work on + * \tparam LinearSolverTraits traits class + */ +template +class EntityExchanger +{ + using Matrix = typename LinearSolverTraits::MType; + enum { dim = GridView::dimension }; + enum { dofCodim = LinearSolverTraits::dofCodim }; + using Grid = typename GridView::Traits::Grid; + using BlockType = typename Matrix::block_type; + using IDS = typename Grid::Traits::GlobalIdSet; + using IdType = typename IDS::IdType; + using RowIterator = typename Matrix::RowIterator; + using ColIterator = typename Matrix::ColIterator; + using DofMapper = typename LinearSolverTraits::DofMapper; + +public: + /*! \brief Constructor. Sets up the local to global relations. + \param[in] gridView The gridView on which we are operating + \param[in] mapper The local dof mapper + */ + EntityExchanger(const GridView& gridView, const DofMapper& mapper) + : gridView_(gridView), mapper_(mapper) + { + gid2Index_.clear(); + index2GID_.clear(); + + for (const auto& entity : entities(gridView_, Dune::Codim())) + { + if (entity.partitionType() == Dune::BorderEntity) + { + int localIdx = mapper_.index(entity); + IdType dofIdxGlobal = gridView_.grid().globalIdSet().id(entity); + + std::pair g2iPair(dofIdxGlobal, localIdx); + gid2Index_.insert(g2iPair); + + std::pair i2gPair(localIdx, dofIdxGlobal); + index2GID_.insert(i2gPair); + } + } + } + + /*! + * \brief A DataHandle class to exchange matrix sparsity patterns. + * + * We look at a 2D example with a nonoverlapping grid, + * two processes and no ghosts with Q1 discretization. + * Process 0 has the left part of the domain + * with three cells and eight vertices (1-8), + * Process 1 the right part with three cells + * and eight vertices (2,4,7-12). + *
+     *  1 _ 2        2 _ 9 _ 10
+     *  |   |        |   |   |
+     *  3 _ 4 _ 7    4 _ 7 _ 11
+     *  |   |   |        |   |
+     *  5 _ 6 _ 8        8 _ 12
+     *  
+ * If we look at vertex 7 and the corresponding entries in the matrix for P0, + * there will be entries for (7,4) and (7,8), but not for (7,2). + * The MatPatternExchange class will find these entries and returns a vector "sparsity", + * that contains all missing connections. + */ + class MatPatternExchange + : public Dune::CommDataHandleIF + { + using RowIterator = typename Matrix::RowIterator; + using ColIterator = typename Matrix::ColIterator; + public: + //! Export type of data for message buffer + using DataType = IdType; + + /** \brief Constructor + \param[in] mapper The local dof mapper. + \param[in] g2i Global to local index map. + \param[in] i2g Local to global index map. + \param[in] A Matrix to operate on. + \param[in] helper parallel istl helper. + */ + MatPatternExchange (const DofMapper& mapper, + const std::map& g2i, + const std::map& i2g, Matrix& A, + const ParallelISTLHelper& helper) + : mapper_(mapper), gid2Index_(g2i), index2GID_(i2g), + sparsity_(A.N()), A_(A), helper_(helper) + {} + + /*! + * \brief Returns true if data for given valid codim should be communicated + */ + bool contains (int dim, int codim) const + { + return (codim==dofCodim); + } + + /*! + * \brief Returns true if size of data per entity of given dim and codim is a constant + */ + bool fixedsize (int dim, int codim) const + { + return false; + } + + /*! + * \brief How many objects of type DataType have to be sent for a given entity + */ + template + size_t size (EntityType& e) const + { + int i = mapper_.index(e); + int n = 0; + for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) + { + typename std::map::const_iterator it = index2GID_.find(j.index()); + if (it != index2GID_.end()) + n++; + } + + return n; + } + + /*! + * \brief Pack data from user to message buffer + */ + template + void gather (MessageBuffer& buff, const EntityType& e) const + { + int i = mapper_.index(e); + for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) + { + typename std::map::const_iterator it=index2GID_.find(j.index()); + if (it != index2GID_.end()) + buff.write(it->second); + } + + } + + /*! + * \brief Unpack data from message buffer to user + */ + template + void scatter (MessageBuffer& buff, const EntityType& e, size_t n) + { + int i = mapper_.index(e); + for (size_t k = 0; k < n; k++) + { + IdType id; + buff.read(id); + // only add entries corresponding to border entities + typename std::map::const_iterator it = gid2Index_.find(id); + if (it != gid2Index_.end() + && sparsity_[i].find(it->second) == sparsity_[i].end() + && helper_.ghost(it->second) != 1<<24) + sparsity_[i].insert(it->second); + } + } + + /*! + * \brief Get the communicated sparsity pattern + * \return the vector with the sparsity pattern + */ + std::vector >& sparsity () + { + return sparsity_; + } + + private: + const DofMapper& mapper_; + const std::map& gid2Index_; + const std::map& index2GID_; + std::vector > sparsity_; + Matrix& A_; + const ParallelISTLHelper& helper_; + + }; // class MatPatternExchange + + //! Local matrix blocks associated with the global id set + struct MatEntry + { + IdType first; + BlockType second; + MatEntry (const IdType& f, const BlockType& s) : first(f), second(s) {} + MatEntry () {} + }; + + //! A DataHandle class to exchange matrix entries + class MatEntryExchange + : public Dune::CommDataHandleIF + { + using RowIterator = typename Matrix::RowIterator; + using ColIterator = typename Matrix::ColIterator; + public: + //! Export type of data for message buffer + using DataType = MatEntry; + + /*! + * \brief Constructor + * \param[in] mapper The local dof mapper. + * \param[in] g2i Global to local index map. + * \param[in] i2g Local to global index map. + * \param[in] A Matrix to operate on. + */ + MatEntryExchange (const DofMapper& mapper, const std::map& g2i, + const std::map& i2g, + Matrix& A) + : mapper_(mapper), gid2Index_(g2i), index2GID_(i2g), A_(A) + {} + + /*! + * \brief Returns true if data for given valid codim should be communicated + */ + bool contains (int dim, int codim) const + { + return (codim==dofCodim); + } + + /*! + * \brief Returns true if size of data per entity of given dim and codim is a constant + */ + bool fixedsize (int dim, int codim) const + { + return false; + } + + /*! + * \brief How many objects of type DataType have to be sent for a given entity + */ + template + size_t size (EntityType& e) const + { + int i = mapper_.index(e); + int n = 0; + for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) + { + typename std::map::const_iterator it = index2GID_.find(j.index()); + if (it != index2GID_.end()) + n++; + } + + return n; + } + + /*! + * \brief Pack data from user to message buffer + */ + template + void gather (MessageBuffer& buff, const EntityType& e) const + { + int i = mapper_.index(e); + for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) + { + typename std::map::const_iterator it=index2GID_.find(j.index()); + if (it != index2GID_.end()) + buff.write(MatEntry(it->second,*j)); + } + + } + + /*! + * \brief Unpack data from message buffer to user + */ + template + void scatter (MessageBuffer& buff, const EntityType& e, size_t n) + { + int i = mapper_.index(e); + for (size_t k = 0; k < n; k++) + { + MatEntry m; + buff.read(m); + // only add entries corresponding to border entities + typename std::map::const_iterator it = gid2Index_.find(m.first); + if (it != gid2Index_.end()) + if (A_[i].find(it->second) != A_[i].end()) + A_[i][it->second] += m.second; + } + } + + private: + const DofMapper& mapper_; + const std::map& gid2Index_; + const std::map& index2GID_; + Matrix& A_; + + }; // class MatEntryExchange + + [[deprecated("Use extendMatrix instead. Will be removed after 3.1!")]] + void getExtendedMatrix (Matrix& A, const ParallelISTLHelper& helper) + { extendMatrix(A, helper); } + + /*! + * \brief communicates values for the sparsity pattern of the new matrix. + * \param A Matrix to operate on. + * \param helper ParallelelISTLHelper. + */ + void extendMatrix(Matrix& A, const ParallelISTLHelper& helper) + { + if (gridView_.comm().size() > 1) + { + Matrix tmp(A); + std::size_t nnz = 0; + // get entries from other processes + MatPatternExchange datahandle(mapper_, gid2Index_, index2GID_, A, helper); + gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, + Dune::ForwardCommunication); + std::vector >& sparsity = datahandle.sparsity(); + // add own entries, count number of nonzeros + for (RowIterator i = A.begin(); i != A.end(); ++i) + { + for (ColIterator j = A[i.index()].begin(); j != A[i.index()].end(); ++j) + { + if (sparsity[i.index()].find(j.index()) == sparsity[i.index()].end()) + sparsity[i.index()].insert(j.index()); + } + nnz += sparsity[i.index()].size(); + } + + A.setSize(tmp.N(), tmp.N(), nnz); + A.setBuildMode(Matrix::row_wise); + typename Matrix::CreateIterator citer = A.createbegin(); + using Iter = typename std::vector >::const_iterator; + for (Iter i = sparsity.begin(), end = sparsity.end(); i!=end; ++i, ++citer) + { + using SIter = typename std::set::const_iterator; + for (SIter si = i->begin(), send = i->end(); si!=send; ++si) + citer.insert(*si); + } + + // set matrix old values + A = 0; + for (RowIterator i = tmp.begin(); i != tmp.end(); ++i) + for (ColIterator j = tmp[i.index()].begin(); j != tmp[i.index()].end(); ++j) + A[i.index()][j.index()] = tmp[i.index()][j.index()]; + } + } + + /*! + * \brief Sums up the entries corresponding to border vertices. + * \param A Matrix to operate on. + */ + void sumEntries (Matrix& A) + { + if (gridView_.comm().size() > 1) + { + MatEntryExchange datahandle(mapper_, gid2Index_, index2GID_, A); + gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, + Dune::ForwardCommunication); + } + } + +#if HAVE_MPI + /*! + * \brief Extends the sparsity pattern of the discretization matrix for AMG. + * \param A A reference to the matrix to change. + */ + [[deprecated("Use extendMatrix instead. Will be removed after 3.1!")]] + void getExtendedMatrix (Matrix& A) + { extendMatrix(A); } + /*! + * \brief Extends the sparsity pattern of the discretization matrix for AMG. + * \param A A reference to the matrix to change. + */ + void extendMatrix(Matrix& A) + { + if (gridView_.comm().size() > 1) + { + Matrix tmp(A); + std::size_t nnz = 0; + // get entries from other processes + MatPatternExchange datahandle(mapper_, gid2Index_, index2GID_, A, *this); + gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, + Dune::ForwardCommunication); + std::vector >& sparsity = datahandle.sparsity(); + + // add own entries, count number of nonzeros + for (RowIterator i = A.begin(); i != A.end(); ++i) + { + for (ColIterator j = A[i.index()].begin(); j != A[i.index()].end(); ++j) + { + if (sparsity[i.index()].find(j.index()) == sparsity[i.index()].end()) + sparsity[i.index()].insert(j.index()); + } + nnz += sparsity[i.index()].size(); + } + + A.setSize(tmp.N(), tmp.N(), nnz); + A.setBuildMode(Matrix::row_wise); + typename Matrix::CreateIterator citer = A.createbegin(); + using Iter = typename std::vector >::const_iterator; + for (Iter i = sparsity.begin(), end = sparsity.end(); i!=end; ++i, ++citer){ + using SIter = typename std::set::const_iterator; + for (SIter si = i->begin(), send = i->end(); si!=send; ++si) + citer.insert(*si); + } + + // set matrix old values + A = 0; + for (RowIterator i = tmp.begin(); i != tmp.end(); ++i) + for (ColIterator j = tmp[i.index()].begin(); j != tmp[i.index()].end(); ++j) + A[i.index()][j.index()] = tmp[i.index()][j.index()]; + + sumEntries(A); + } + } +#endif + +private: + const GridView gridView_; + const DofMapper& mapper_; + std::map gid2Index_; + std::map index2GID_; + +}; // class EntityExchanger + +/*! + * \brief Prepare the linear algebra member variables. + * + * At compile time, correct constructor calls have to be chosen, + * depending on whether the setting is parallel or sequential. + * Since several template parameters are present, this cannot be solved + * by a full function template specialization. Instead, class template + * specialization has to be used. + * This adapts example 4 from http://www.gotw.ca/publications/mill17.htm. + * + * This class template implements the function for the sequential case. + * + * \tparam isParallel decides if the setting is parallel or sequential + */ +template +struct LinearAlgebraPreparator +{ + using DofMapper = typename LinearSolverTraits::DofMapper; + using ParallelHelper = ParallelISTLHelper; + using Comm = typename LinearSolverTraits::Comm; + using LinearOperator = typename LinearSolverTraits::LinearOperator; + using ScalarProduct = typename LinearSolverTraits::ScalarProduct; + + template + static void prepareLinearAlgebra(Matrix& A, Vector& b, + int& rank, + std::shared_ptr& comm, + std::shared_ptr& fop, + std::shared_ptr& sp, + ParallelHelper& pHelper, + const bool firstCall) + { + comm = std::make_shared(); + fop = std::make_shared(A); + sp = std::make_shared(); + } +}; + +#if HAVE_MPI +/*! + * \brief Specialization for the parallel case. + */ +template +struct LinearAlgebraPreparator +{ + using DofMapper = typename LinearSolverTraits::DofMapper; + using ParallelHelper = ParallelISTLHelper; + using Comm = typename LinearSolverTraits::Comm; + using LinearOperator = typename LinearSolverTraits::LinearOperator; + using ScalarProduct = typename LinearSolverTraits::ScalarProduct; + + template + static void prepareLinearAlgebra(Matrix& A, Vector& b, + int& rank, + std::shared_ptr& comm, + std::shared_ptr& fop, + std::shared_ptr& sp, + ParallelHelper& pHelper, + const bool firstCall) + { + const auto category = LinearSolverTraits::isNonOverlapping ? + Dune::SolverCategory::nonoverlapping : Dune::SolverCategory::overlapping; + + if (LinearSolverTraits::isNonOverlapping && firstCall) + pHelper.initGhostsAndOwners(); + + comm = std::make_shared(pHelper.gridView().comm(), category); + + if (LinearSolverTraits::isNonOverlapping) + { + // extend the matrix pattern such that it is usable for the parallel solver + EntityExchanger exchanger(pHelper.gridView(), pHelper.dofMapper()); + exchanger.extendMatrix(A, pHelper); + exchanger.sumEntries(A); + } + pHelper.createParallelIndexSet(*comm); + + fop = std::make_shared(A, *comm); + sp = std::make_shared(*comm); + rank = comm->communicator().rank(); + + // Make rhs consistent + if (LinearSolverTraits::isNonOverlapping) + pHelper.makeNonOverlappingConsistent(b); + } +}; + +#endif // HAVE_MPI + +} // end namespace Dumux + +#endif // DUMUX_PARALLELHELPERS_HH -- GitLab From cec11de7bcb826cc30aaa5bdf664ff6551563d1c Mon Sep 17 00:00:00 2001 From: Kilian Weishaupt Date: Wed, 29 Jan 2020 16:59:58 +0100 Subject: [PATCH 3/7] [amgbackend] Resolve deprecation warnings --- dumux/linear/amgbackend.hh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dumux/linear/amgbackend.hh b/dumux/linear/amgbackend.hh index a491f22b34..518f926de6 100644 --- a/dumux/linear/amgbackend.hh +++ b/dumux/linear/amgbackend.hh @@ -43,7 +43,7 @@ #include #include -#include +#include namespace Dumux { @@ -217,7 +217,7 @@ private: } // namespace Dumux #include -#include +#include namespace Dumux { @@ -228,9 +228,9 @@ namespace Dumux { * \note This is an adaptor using a TypeTag */ template -using AMGBackend = ParallelAMGBackend, AmgTraits, - GetPropType, - GetPropType>>; +using AMGBackend = ParallelAMGBackend, LinearSolverTraits, + GetPropType, + GetPropType>>; } // namespace Dumux -- GitLab From 4d0333d2092d4264424ed9ffbcd2a10000730402 Mon Sep 17 00:00:00 2001 From: Kilian Weishaupt Date: Wed, 29 Jan 2020 17:42:49 +0100 Subject: [PATCH 4/7] [parallel] Generalize vectorexchange --- dumux/linear/vectorexchange.hh | 72 +------------ dumux/parallel/CMakeLists.txt | 1 + dumux/parallel/vectorcommdatahandle.hh | 138 +++++++++++++++++++++++++ 3 files changed, 144 insertions(+), 67 deletions(-) create mode 100644 dumux/parallel/vectorcommdatahandle.hh diff --git a/dumux/linear/vectorexchange.hh b/dumux/linear/vectorexchange.hh index c201406aa9..ee32a4b1b7 100644 --- a/dumux/linear/vectorexchange.hh +++ b/dumux/linear/vectorexchange.hh @@ -24,75 +24,13 @@ #ifndef DUMUX_VECTOR_EXCHANGE_HH #define DUMUX_VECTOR_EXCHANGE_HH -#include +#warning "This header is deprecated and will be removed after release 3.2. Use linearsolver/linearsolvertraits.hh" -namespace Dumux { - -/*! - * \ingroup Linear - * \todo why is this needed? is parallel/vectorexhange.hh not the same? - * \brief A data handle class to exchange entries of a vector - */ -template // mapper type and vector type -class VectorExchange - : public Dune::CommDataHandleIF, - typename Vector::value_type> -{ -public: - //! export type of data for message buffer - using DataType = typename Vector::value_type; - - //! returns true if data for this codim should be communicated - bool contains (int dim, int codim) const - { - return (codim == 0); - } - - //! returns true if size per entity of given dim and codim is a constant - bool fixedsize (int dim, int codim) const - { - return true; - } - - /*! - * \brief how many objects of type DataType have to be sent for a given entity - * \note Only the sender side needs to know this size. - */ - template - size_t size (Entity& entity) const - { - return 1; - } - - //! pack data from user to message buffer - template - void gather (MessageBuffer& buff, const Entity& entity) const - { - buff.write(dataVector_[mapper_.index(entity)]); - } - - /*! - * \brief unpack data from message buffer to user - * \note n is the number of objects sent by the sender - */ - template - void scatter (MessageBuffer& buff, const Entity& entity, size_t n) - { - DataType x; - buff.read(x); - dataVector_[mapper_.index(entity)] = x; - } - - //! constructor - VectorExchange (const Mapper& mapper, Vector& dataVector) - : mapper_(mapper), dataVector_(dataVector) - {} - -private: - const Mapper& mapper_; - Vector& dataVector_; -}; +#include +namespace Dumux { + template + using VectorExchange [[deprecated("Use VectorCommDataHandleEqual instead. Will be removed after 3.2!")]] = VectorCommDataHandleEqual; } // end namespace Dumux #endif diff --git a/dumux/parallel/CMakeLists.txt b/dumux/parallel/CMakeLists.txt index 633c6f71b4..f773c82b7c 100644 --- a/dumux/parallel/CMakeLists.txt +++ b/dumux/parallel/CMakeLists.txt @@ -1,3 +1,4 @@ install(FILES +vectorcommdatahandle.hh vertexhandles.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/parallel) diff --git a/dumux/parallel/vectorcommdatahandle.hh b/dumux/parallel/vectorcommdatahandle.hh new file mode 100644 index 0000000000..0bcfa538d8 --- /dev/null +++ b/dumux/parallel/vectorcommdatahandle.hh @@ -0,0 +1,138 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Linear + * \brief Contains a class to exchange entries of a vector + */ +#ifndef DUMUX_VECTOR_COMM_DATA_HANDLE_HH +#define DUMUX_VECTOR_COMM_DATA_HANDLE_HH + +#include +#include + +namespace Dumux { + +namespace Detail { + + struct SetEqual + { + template + static void apply(A& a, const B& b) + { a = b; } + }; + + struct Sum + { + template + static void apply(A& a, const B& b) + { a += b; } + }; + + struct Max + { + template + static void apply(A& a, const B& b) + { + using std::max; + a = max(a,b); + } + }; + + struct Min + { + template + static void apply(A& a, const B& b) + { + using std::min; + a = min(a,b); + } + }; +} // end namespace Detail + +/*! + * \ingroup Linear + * \brief A data handle class to exchange entries of a vector + */ +template +class VectorCommDataHandle + : public Dune::CommDataHandleIF, + typename Vector::value_type> +{ +public: + //! export type of data for message buffer + using DataType = typename Vector::value_type; + + VectorCommDataHandle(const Mapper& mapper, Vector& vector) + : mapper_(mapper), vector_(vector) + {} + + //! returns true if data for this codim should be communicated + bool contains(int dim, int codim) const + { return (codim == entityCodim); } + + //! returns true if size per entity of given dim and codim is a constant + bool fixedsize(int dim, int codim) const + { return true; } + + /*! + * \brief how many objects of type DataType have to be sent for a given entity + * \note Only the sender side needs to know this size. + */ + template + std::size_t size(Entity& entity) const + { return 1; } + + //! pack data from user to message buffer + template + void gather(MessageBuffer& buff, const Entity& entity) const + { buff.write(vector_[mapper_.index(entity)]); } + + /*! + * \brief unpack data from message buffer to user + * \note n is the number of objects sent by the sender + */ + template + void scatter(MessageBuffer& buff, const Entity& entity, std::size_t n) + { + DataType x; + buff.read(x); + ScatterOperator::apply(vector_[mapper_.index(entity)], x); + } + +protected: + const Mapper& mapper_; + Vector& vector_; +}; + +template +using VectorCommDataHandleEqual = VectorCommDataHandle; + +template +using VectorCommDataHandleSum = VectorCommDataHandle; + +template +using VectorCommDataHandleMin = VectorCommDataHandle; + +template +using VectorCommDataHandleMax = VectorCommDataHandle; + +} // end namespace Dumux + +#endif // DUMUX_VECTOR_COMM_DATA_HANDLE_HH -- GitLab From ac5a38645c6b3d144a08641bbd81881928edd488 Mon Sep 17 00:00:00 2001 From: Kilian Weishaupt Date: Wed, 29 Jan 2020 19:36:28 +0100 Subject: [PATCH 5/7] [parallel] Deprecate vertexhandles.hh * use specializations of VectorCommDataHandle instead --- dumux/parallel/vertexhandles.hh | 225 +++++++------------------------- 1 file changed, 50 insertions(+), 175 deletions(-) diff --git a/dumux/parallel/vertexhandles.hh b/dumux/parallel/vertexhandles.hh index 5655c1b476..0faa96649f 100644 --- a/dumux/parallel/vertexhandles.hh +++ b/dumux/parallel/vertexhandles.hh @@ -25,188 +25,63 @@ #ifndef DUMUX_VERTEX_HANDLES_HH #define DUMUX_VERTEX_HANDLES_HH -#include +#warning "This header is deprecated and will be removed after release 3.2. Use parallel/vectorcommdatahandle.hh" +#include "vectorcommdatahandle.hh" -namespace Dumux { - -/*! - * \ingroup Assembly - * \brief Data handle for parallel communication which sums up all - * values are attached to vertices - */ -template -class VertexHandleSum - : public Dune::CommDataHandleIF< VertexHandleSum, - FieldType > -{ -public: - VertexHandleSum(Container &container, - const VertexMapper &mapper) - : mapper_(mapper) - , container_(container) - { }; - - bool contains(int dim, int codim) const - { - // only communicate vertices - return codim == dim; - } - - bool fixedsize(int dim, int codim) const - { - // for each vertex we communicate a single field vector which - // has a fixed size - return true; - } - - template - size_t size (const EntityType &e) const - { - // communicate a field type per entity - return 1; - } - - template - void gather(MessageBufferImp &buff, const EntityType &e) const - { - int vIdx = mapper_.index(e); - buff.write(container_[vIdx]); - } - - template - void scatter(MessageBufferImp &buff, const EntityType &e, size_t n) - { - int vIdx = mapper_.index(e); - - FieldType tmp; - buff.read(tmp); - container_[vIdx] += tmp; - } - -private: - const VertexMapper &mapper_; - Container &container_; -}; - -/*! - * \brief Data handle for parallel communication which takes the - * maximum of all values that are attached to vertices - */ -template -class VertexHandleMax - : public Dune::CommDataHandleIF< VertexHandleMax, - FieldType > -{ -public: - VertexHandleMax(Container &container, - const VertexMapper &mapper) - : mapper_(mapper) - , container_(container) - { }; - - bool contains(int dim, int codim) const - { - // only communicate vertices - return codim == dim; - } - bool fixedsize(int dim, int codim) const - { - // for each vertex we communicate a single field vector which - // has a fixed size - return true; - } - - template - size_t size (const EntityType &e) const - { - // communicate a field type per entity - return 1; - } - - template - void gather(MessageBufferImp &buff, const EntityType &e) const - { - int vIdx = mapper_.index(e); - buff.write(container_[vIdx]); - } - - template - void scatter(MessageBufferImp &buff, const EntityType &e, size_t n) - { - int vIdx = mapper_.index(e); - - FieldType tmp; - buff.read(tmp); - using std::max; - container_[vIdx] = max(container_[vIdx], tmp); - } - -private: - const VertexMapper &mapper_; - Container &container_; -}; - - -/*! - * \brief Provides data handle for parallel communication which takes - * the minimum of all values that are attached to vertices - */ -template -class VertexHandleMin - : public Dune::CommDataHandleIF< VertexHandleMin, - FieldType > -{ -public: - VertexHandleMin(Container &container, - const VertexMapper &mapper) - : mapper_(mapper) - , container_(container) - { }; - - bool contains(int dim, int codim) const - { - // only communicate vertices - return codim == dim; - } - - bool fixedsize(int dim, int codim) const - { - // for each vertex we communicate a single field vector which - // has a fixed size - return true; - } +namespace Dumux { - template - size_t size (const EntityType &e) const + template + class [[deprecated("Use VectorCommDataHandleSum instead.")]] VertexHandleSum : public VectorCommDataHandleSum { - // communicate a field type per entity - return 1; - } - - template - void gather(MessageBufferImp &buff, const EntityType &e) const + using ParentType = VectorCommDataHandleSum; + public: + VertexHandleSum(Container &container, + const VertexMapper &mapper) + : ParentType(mapper, container) + { }; + + bool contains(int dim, int codim) const + { + // only communicate vertices + return codim == dim; + } + }; + + template + class [[deprecated("Use VectorCommDataHandleMax instead.")]] VertexHandleMax : public VectorCommDataHandleMax { - int vIdx = mapper_.index(e); - buff.write(container_[vIdx]); - } - - template - void scatter(MessageBufferImp &buff, const EntityType &e, size_t n) + using ParentType = VectorCommDataHandleMax; + public: + VertexHandleMax(Container &container, + const VertexMapper &mapper) + : ParentType(mapper, container) + { }; + + bool contains(int dim, int codim) const + { + // only communicate vertices + return codim == dim; + } + }; + + template + class [[deprecated("Use VectorCommDataHandleMin instead.")]] VertexHandleMin : public VectorCommDataHandleMin { - int vIdx = mapper_.index(e); - FieldType tmp; - buff.read(tmp); - using std::min; - container_[vIdx] = min(container_[vIdx], tmp); - } - -private: - const VertexMapper &mapper_; - Container &container_; -}; - + using ParentType = VectorCommDataHandleMin; + public: + VertexHandleMin(Container &container, + const VertexMapper &mapper) + : ParentType(mapper, container) + { }; + + bool contains(int dim, int codim) const + { + // only communicate vertices + return codim == dim; + } + }; } // end namespace Dumux #endif -- GitLab From 633d308dbaa847ab10b9d13b24dca1d96bc647e3 Mon Sep 17 00:00:00 2001 From: Kilian Weishaupt Date: Wed, 29 Jan 2020 19:46:28 +0100 Subject: [PATCH 6/7] [assembly] Resolve deprecation warnings --- dumux/assembly/fvassembler.hh | 6 +++--- dumux/assembly/partialreassembler.hh | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/dumux/assembly/fvassembler.hh b/dumux/assembly/fvassembler.hh index a8f0c94fea..084478e595 100644 --- a/dumux/assembly/fvassembler.hh +++ b/dumux/assembly/fvassembler.hh @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include "jacobianpattern.hh" #include "diffmethod.hh" @@ -191,8 +191,8 @@ public: if (isBox && gridView().comm().size() > 1) { using VertexMapper = typename GridGeometry::VertexMapper; - VertexHandleSum - sumResidualHandle(residual, gridGeometry_->vertexMapper()); + VectorCommDataHandleSum + sumResidualHandle(gridGeometry_->vertexMapper(), residual); gridView().communicate(sumResidualHandle, Dune::InteriorBorder_InteriorBorder_Interface, Dune::ForwardCommunication); diff --git a/dumux/assembly/partialreassembler.hh b/dumux/assembly/partialreassembler.hh index f164191967..52defa5d3c 100644 --- a/dumux/assembly/partialreassembler.hh +++ b/dumux/assembly/partialreassembler.hh @@ -32,7 +32,7 @@ #include #include -#include +#include #include "entitycolor.hh" @@ -181,8 +181,8 @@ public: // at this point we communicate the yellow vertices to the // neighboring processes because a neigbor process may not see // the red vertex for yellow border vertices - VertexHandleMin, VertexMapper> - minHandle(vertexColor_, vertexMapper); + VectorCommDataHandleMin, dim> + minHandle(vertexMapper, vertexColor_); gridView.communicate(minHandle, Dune::InteriorBorder_InteriorBorder_Interface, Dune::ForwardCommunication); @@ -235,8 +235,8 @@ public: } // demote the border orange vertices - VertexHandleMax, VertexMapper> - maxHandle(vertexColor_, vertexMapper); + VectorCommDataHandleMax, dim> + maxHandle(vertexMapper, vertexColor_); gridView.communicate(maxHandle, Dune::InteriorBorder_InteriorBorder_Interface, Dune::ForwardCommunication); -- GitLab From 0fb0600906ff4118378bc56faff0a7994cdda10f Mon Sep 17 00:00:00 2001 From: Kilian Weishaupt Date: Thu, 30 Jan 2020 08:10:35 +0100 Subject: [PATCH 7/7] [parallelHelper] Clean-up internal structure --- dumux/linear/amgbackend.hh | 42 +- dumux/linear/linearsolvertraits.hh | 8 +- dumux/linear/parallelhelpers.hh | 742 +++++++++++++---------------- 3 files changed, 344 insertions(+), 448 deletions(-) diff --git a/dumux/linear/amgbackend.hh b/dumux/linear/amgbackend.hh index 518f926de6..1f35a28577 100644 --- a/dumux/linear/amgbackend.hh +++ b/dumux/linear/amgbackend.hh @@ -134,12 +134,18 @@ public: template bool solve(Matrix& A, Vector& x, Vector& b) { - int rank = 0; std::shared_ptr comm; std::shared_ptr fop; std::shared_ptr sp; - static const bool isParallel = AmgTraits::isParallel; - prepareLinearAlgebra_(A, b, rank, comm, fop, sp); + +#if HAVE_MPI + if constexpr (AmgTraits::isParallel) + prepareLinearAlgebraParallel(A, b, comm, fop, sp, *phelper_, firstCall_); + else + prepareLinearAlgebraSequential(A, comm, fop, sp); +#else + prepareLinearAlgebraSequential(A, comm, fop, sp); +#endif using SmootherArgs = typename Dune::Amg::SmootherTraits::Arguments; using Criterion = Dune::Amg::CoarsenCriterion>; @@ -156,7 +162,7 @@ public: AMGType amg(*fop, criterion, smootherArgs, *comm); Dune::BiCGSTABSolver solver(*fop, *sp, amg, this->residReduction(), this->maxIter(), - rank == 0 ? this->verbosity() : 0); + comm->communicator().rank() == 0 ? this->verbosity() : 0); solver.apply(x, b, result_); firstCall_ = false; @@ -181,34 +187,6 @@ public: private: - /*! - * \brief Prepare the linear algebra member variables. - * - * At compile time, correct constructor calls have to be chosen, - * depending on whether the setting is parallel or sequential. - * Since several template parameters are present, this cannot be solved - * by a full function template specialization. Instead, class template - * specialization has to be used. - * This adapts example 4 from http://www.gotw.ca/publications/mill17.htm. - - * The function is called from the solve function. The call is - * forwarded to the corresponding function of a class template. - * - * \tparam Matrix the matrix type - * \tparam Vector the vector type - * \tparam isParallel decides if the setting is parallel or sequential - */ - template - void prepareLinearAlgebra_(Matrix& A, Vector& b, int& rank, - std::shared_ptr& comm, - std::shared_ptr& fop, - std::shared_ptr& sp) - { - LinearAlgebraPreparator - ::prepareLinearAlgebra(A, b, rank, comm, fop, sp, - *phelper_, firstCall_); - } - std::shared_ptr> phelper_; Dune::InverseOperatorResult result_; bool firstCall_; diff --git a/dumux/linear/linearsolvertraits.hh b/dumux/linear/linearsolvertraits.hh index a004995278..dad36f7cb8 100644 --- a/dumux/linear/linearsolvertraits.hh +++ b/dumux/linear/linearsolvertraits.hh @@ -42,9 +42,7 @@ #include #endif // HAVE_UG -namespace Dumux { -namespace Temp { -namespace Capabilities { +namespace Dumux::Temp::Capabilities { template struct canCommunicate @@ -60,9 +58,7 @@ struct canCommunicate, codim> }; #endif // HAVE_UG -} // namespace Capabilities -} // namespace Temp -} // namespace Dumux +} // namespace Dumux::Temp::Capabilities // end workaround namespace Dumux { diff --git a/dumux/linear/parallelhelpers.hh b/dumux/linear/parallelhelpers.hh index 9a5ee74c3c..26f180d085 100644 --- a/dumux/linear/parallelhelpers.hh +++ b/dumux/linear/parallelhelpers.hh @@ -46,6 +46,9 @@ class ParallelISTLHelper using DofMapper = typename LinearSolverTraits::DofMapper; enum { dofCodim = LinearSolverTraits::dofCodim }; + // TODO: this is some large number (replace by limits?) + static constexpr std::size_t ghostMarker_ = 1<<24; + class BaseGatherScatter { public: @@ -63,7 +66,7 @@ class ParallelISTLHelper { return true; } template - size_t size(EntityType& e) const + std::size_t size(EntityType& e) const { return 1; } template @@ -99,7 +102,7 @@ class ParallelISTLHelper } template - void scatter(MessageBuffer& buff, const EntityType& e, size_t n) + void scatter(MessageBuffer& buff, const EntityType& e, std::size_t n) { typename V::block_type block; buff.read(block); @@ -111,7 +114,7 @@ class ParallelISTLHelper /*! - * \brief Writes 1<<24 to each data item (of the container) that is gathered or scattered + * \brief Writes ghostMarker_ to each data item (of the container) that is gathered or scattered * and is neither interior nor border. * * Can be used to mark ghost cells. @@ -133,27 +136,27 @@ class ParallelISTLHelper template void gather(MessageBuffer& buff, const EntityType& e) const { - std::size_t& data = ranks_[this->index(e)]; // TODO: reference to size_t ? + auto& data = ranks_[this->index(e)]; if (this->isNeitherInteriorNorBorderEntity(e)) - data = (1<<24); + data = ghostMarker_; buff.write(data); } template - void scatter(MessageBuffer& buff, const EntityType& e, size_t n) + void scatter(MessageBuffer& buff, const EntityType& e, std::size_t n) { std::size_t x; - std::size_t& data = ranks_[this->index(e)]; - buff.read(x); // TODO what is this good for? + buff.read(x); + auto& data = ranks_[this->index(e)]; if (this->isNeitherInteriorNorBorderEntity(e)) - data = (1<<24); + data = ghostMarker_; } private: std::vector& ranks_; }; /*! - * \brief GatherScatter handle that sets 1<<24 for data items neither associated to + * \brief GatherScatter handle that sets ghostMarker_ for data items neither associated to * the interior or border and take the minimum when scattering. * * Used to compute an owner rank for each unknown. @@ -175,23 +178,20 @@ class ParallelISTLHelper template void gather(MessageBuffer& buff, const EntityType& e) const { - std::size_t& data = ranks_[this->index(e)]; // TODO: reference to size_t ? + auto& data = ranks_[this->index(e)]; if (this->isNeitherInteriorNorBorderEntity(e)) - data = (1<<24); + data = ghostMarker_; buff.write(data); } template void scatter(MessageBuffer& buff, const EntityType& e, std::size_t n) { - using std::min; std::size_t x; - std::size_t& data = ranks_[this->index(e)]; buff.read(x); - if (this->isNeitherInteriorNorBorderEntity(e)) - data = x; - else - data = min(data,x); + auto& data = ranks_[this->index(e)]; + using std::min; + data = this->isNeitherInteriorNorBorderEntity(e) ? x : min(data,x); } private: std::vector& ranks_; @@ -205,7 +205,7 @@ class ParallelISTLHelper : public BaseGatherScatter, public Dune::CommDataHandleIF { - using DataType = int; // TODO: size_t? + using DataType = int; using BaseGatherScatter::contains; using BaseGatherScatter::fixedsize; using BaseGatherScatter::size; @@ -221,9 +221,9 @@ class ParallelISTLHelper } template - void scatter(MessageBuffer& buff, const EntityType& e, size_t n) + void scatter(MessageBuffer& buff, const EntityType& e, std::size_t n) { - int x; // TODO: size_t? + int x; buff.read(x); neighbours_.insert(x); } @@ -258,11 +258,11 @@ class ParallelISTLHelper } template - void scatter(MessageBuffer& buff, const EntityType &e, size_t n) + void scatter(MessageBuffer& buff, const EntityType &e, std::size_t n) { int x; buff.read(x); - int& data = shared_[this->index(e)]; + auto& data = shared_[this->index(e)]; data = data || x; } private: @@ -273,89 +273,96 @@ class ParallelISTLHelper * \brief GatherScatter handle for finding out about neighbouring processor ranks. * */ - template + template struct GlobalIndexGatherScatter : public BaseGatherScatter, - public Dune::CommDataHandleIF, GI> + public Dune::CommDataHandleIF, GlobalIndex> { - using DataType = GI; + using DataType = GlobalIndex; using BaseGatherScatter::contains; using BaseGatherScatter::fixedsize; using BaseGatherScatter::size; - GlobalIndexGatherScatter(std::vector& gindices, const DofMapper& mapper) - : BaseGatherScatter(mapper), gindices_(gindices) + GlobalIndexGatherScatter(std::vector& globalIndices, const DofMapper& mapper) + : BaseGatherScatter(mapper), globalIndices_(globalIndices) {} template void gather(MessageBuffer& buff, const EntityType& e) const { - buff.write(gindices_[this->index(e)]); + buff.write(globalIndices_[this->index(e)]); } template - void scatter(MessageBuffer& buff, const EntityType& e, size_t n) + void scatter(MessageBuffer& buff, const EntityType& e, std::size_t n) { - using std::min; DataType x; buff.read(x); - gindices_[this->index(e)] = min(gindices_[this->index(e)], x); + using std::min; + auto& data = globalIndices_[this->index(e)]; + data = min(data, x); } private: - std::vector& gindices_; + std::vector& globalIndices_; }; public: - ParallelISTLHelper (const GridView& gridView, const DofMapper& mapper, int verbose = 1) - : gridView_(gridView), mapper_(mapper), verbose_(verbose), initialized_(false) + ParallelISTLHelper(const GridView& gridView, const DofMapper& mapper) + : gridView_(gridView), mapper_(mapper), initialized_(false) + {} + + [[deprecated("The verbose argument has no effect. Use ParallelISTLHelper(gridView, mapper) instead. Will be removed after 3.2!")]] + ParallelISTLHelper(const GridView& gridView, const DofMapper& mapper, int verbose) + : gridView_(gridView), mapper_(mapper), initialized_(false) {} // \brief Initializes the markers for ghosts and owners with the correct size and values. // void initGhostsAndOwners() { - owner_.resize(mapper_.size(), - gridView_.comm().rank()); - isGhost_.resize(mapper_.size(),0.0); + const auto rank = gridView_.comm().rank(); + isOwned_.resize(mapper_.size(), rank); // find out about ghosts - GhostGatherScatter ggs(owner_, mapper_); + GhostGatherScatter ggs(isOwned_, mapper_); if (gridView_.comm().size() > 1) gridView_.communicate(ggs, Dune::InteriorBorder_All_Interface, Dune::ForwardCommunication); - isGhost_ = owner_; + isGhost_ = isOwned_; // partition interior/border - InteriorBorderGatherScatter dh(owner_, mapper_); + InteriorBorderGatherScatter dh(isOwned_, mapper_); if (gridView_.comm().size() > 1) gridView_.communicate(dh, Dune::InteriorBorder_InteriorBorder_Interface, Dune::ForwardCommunication); // convert vector into mask vector - for (auto& v : owner_) - v = (v == gridView_.comm().rank()) ? 1.0 : 0.0; + for (auto& v : isOwned_) + v = (v == rank) ? 1 : 0; initialized_ = true; } // keep only DOFs assigned to this processor template + [[deprecated("This function has no effect. Will be removed after 3.2!")]] void mask(W& w) const - { - auto v1 = w.begin(); - for (auto v2 = owner_.begin(), vend = owner_.end(); v2 != vend; ++v1, ++v2) - v1 *= v2; - } + {} // access to mask vector - double mask(std::size_t i) const - { return owner_[i]; } + [[deprecated("Will be removed after 3.2!")]] + std::size_t mask(std::size_t i) const + { return isOwned_[i]; } // access to ghost vector - std::size_t ghost (std::size_t i) const + [[deprecated("Will be removed after 3.2!")]] + std::size_t ghost(std::size_t i) const { return isGhost_[i]; } + bool isGhost(std::size_t i) const + { return isGhost_[i] == ghostMarker_; } + // \brief Make a vector of the box model consistent. template void makeNonOverlappingConsistent(Dune::BlockVector& v) @@ -371,20 +378,17 @@ public: #if HAVE_MPI + template + [[deprecated("Use createParallelIndexSet(comm) instead. Will be removed after 3.2!")]] + void createIndexSetAndProjectForAMG(MatrixType& m, Comm& c) + { createParallelIndexSet(c); } + /*! - * \brief Creates a matrix suitable for parallel AMG and the parallel information + * \brief Creates a parallel index set * - * \tparam MatrixType The type of the ISTL matrix used. * \tparam Comm The type of the OwnerOverlapCopyCommunication - * \param m The local matrix. - * \param c The parallel information object providing index set, interfaces and * communicators. */ - template - [[deprecated("Use createParallelIndexSet(comm) instead. Will be removed after 3.1!")]] - void createIndexSetAndProjectForAMG(MatrixType& m, Comm& c) - { createParallelIndexSet(c); } - template void createParallelIndexSet(Comm& comm) { @@ -396,105 +400,56 @@ public: initGhostsAndOwners(); } + if (gridView_.comm().size() <= 1) + { + comm.remoteIndices().template rebuild(); + return; + } + // First find out which dofs we share with other processors - std::vector sharedDofs(mapper_.size(), false); + std::vector isShared(mapper_.size(), false); - SharedGatherScatter sgs(sharedDofs, mapper_); - - if (gridView_.comm().size() > 1) - gridView_.communicate(sgs, Dune::All_All_Interface, - Dune::ForwardCommunication); + SharedGatherScatter sgs(isShared, mapper_); + gridView_.communicate(sgs, Dune::All_All_Interface, Dune::ForwardCommunication); // Count shared dofs that we own using GlobalIndex = typename Comm::ParallelIndexSet::GlobalIndex; GlobalIndex count = 0; - auto owned = owner_.begin(); - - for (auto v = sharedDofs.begin(), vend=sharedDofs.end(); v != vend; ++v, ++owned) - if (*v && *owned == 1) + for (std::size_t i = 0; i < isShared.size(); ++i) + if (isShared[i] && isOwned_[i] == 1) ++count; - Dune::dverb << gridView_.comm().rank() << ": shared count is " << count.touint() - << std::endl; - std::vector counts(gridView_.comm().size()); - gridView_.comm().allgather(&count, 1, &(counts[0])); + gridView_.comm().allgather(&count, 1, counts.data()); // Compute start index start_p = \sum_{i=0}^{i scalarIndices(mapper_.size(), - std::numeric_limits::max()); + std::vector globalIndices(mapper_.size(), std::numeric_limits::max()); - auto shared = sharedDofs.begin(); - auto index = scalarIndices.begin(); - - for (auto i = owner_.begin(), iend = owner_.end(); i != iend; ++i, ++shared, ++index) + for (std::size_t i = 0; i < globalIndices.size(); ++i) { - if (*i==1 && *shared) + if (isOwned_[i] == 1 && isShared[i]) { - *index = start; + globalIndices[i] = start; // GlobalIndex does not implement postfix ++ ++start; } } // publish global indices for the shared DOFS to other processors. - GlobalIndexGatherScatter gigs(scalarIndices, mapper_); - if (gridView_.comm().size() > 1) - gridView_.communicate(gigs, Dune::All_All_Interface, - Dune::ForwardCommunication); + GlobalIndexGatherScatter gigs(globalIndices, mapper_); + gridView_.communicate(gigs, Dune::All_All_Interface, Dune::ForwardCommunication); - - // Setup the index set - comm.indexSet().beginResize(); - index = scalarIndices.begin(); - auto ghost = isGhost_.begin(); - - for (auto i = owner_.begin(), iend = owner_.end(); i != iend; ++i, ++ghost, ++index) - { - Dune::OwnerOverlapCopyAttributeSet::AttributeSet attr; - if (*index != std::numeric_limits::max()) - { - // global index exist in index set - if (*i > 0) - { - // This dof is managed by us. - attr = Dune::OwnerOverlapCopyAttributeSet::owner; - } - #if DUNE_VERSION_GTE(DUNE_ISTL, 2, 7) - else if ( *ghost==(1<<24) && ( comm.category() == - static_cast(Dune::SolverCategory::nonoverlapping)) ) - #else - else if ( *ghost==(1<<24) && ( comm.getSolverCategory() == - static_cast(Dune::SolverCategory::nonoverlapping)) ) - #endif - { - //use attribute overlap for ghosts in novlp grids - attr = Dune::OwnerOverlapCopyAttributeSet::overlap; - } - else - { - attr = Dune::OwnerOverlapCopyAttributeSet::copy; - } - comm.indexSet().add(*index, typename Comm::ParallelIndexSet::LocalIndex(i-owner_.begin(), attr)); - } - } - comm.indexSet().endResize(); + resizeIndexSet_(comm, globalIndices); // Compute neighbours using communication std::set neighbours; - NeighbourGatherScatter ngs(mapper_, gridView_.comm().rank(), - neighbours); - - if (gridView_.comm().size() > 1) - gridView_.communicate(ngs, Dune::All_All_Interface, - Dune::ForwardCommunication); - + NeighbourGatherScatter ngs(mapper_, gridView_.comm().rank(), neighbours); + gridView_.communicate(ngs, Dune::All_All_Interface, Dune::ForwardCommunication); comm.remoteIndices().setNeighbours(neighbours); - comm.remoteIndices().template rebuild(); + comm.remoteIndices().template rebuild(); } #endif @@ -507,12 +462,49 @@ public: const GridView& gridView() const { return gridView_; } + template + Dune::OwnerOverlapCopyAttributeSet::AttributeSet + getAttribute_(const Comm& comm, const bool isOwned, const bool isGhost) const + { + if (isOwned) + return Dune::OwnerOverlapCopyAttributeSet::owner; +#if DUNE_VERSION_GTE(DUNE_ISTL, 2, 7) + else if (isGhost && ( comm.category() == + static_cast(Dune::SolverCategory::nonoverlapping)) ) +#else + else if (isGhost && ( comm.getSolverCategory() == + static_cast(Dune::SolverCategory::nonoverlapping)) ) +#endif + return Dune::OwnerOverlapCopyAttributeSet::overlap; + else + return Dune::OwnerOverlapCopyAttributeSet::copy; + } + + template + void resizeIndexSet_(Comm& comm, const GlobalIndices& globalIndices) const + { + comm.indexSet().beginResize(); + + for (std::size_t localIndex = 0; localIndex < globalIndices.size(); ++localIndex) + { + const auto globalIndex = globalIndices[localIndex]; + if (globalIndex != std::numeric_limits::max()) + { + const bool isOwned = isOwned_[localIndex] > 0; + const auto attr = getAttribute_(comm, isOwned, isGhost(localIndex)); + using LocalIndex = typename Comm::ParallelIndexSet::LocalIndex; + comm.indexSet().add(globalIndex, LocalIndex{localIndex, attr}); + } + } + + comm.indexSet().endResize(); + } + private: const GridView gridView_; //!< the grid view const DofMapper& mapper_; //!< the dof mapper - std::vector owner_; //!< vector to identify unique decomposition + std::vector isOwned_; //!< vector to identify unique decomposition std::vector isGhost_; //!< vector to identify ghost dofs - int verbose_; //!< verbosity bool initialized_; //!< whether isGhost and owner arrays are initialized }; // class ParallelISTLHelper @@ -533,37 +525,8 @@ class EntityExchanger using BlockType = typename Matrix::block_type; using IDS = typename Grid::Traits::GlobalIdSet; using IdType = typename IDS::IdType; - using RowIterator = typename Matrix::RowIterator; - using ColIterator = typename Matrix::ColIterator; using DofMapper = typename LinearSolverTraits::DofMapper; -public: - /*! \brief Constructor. Sets up the local to global relations. - \param[in] gridView The gridView on which we are operating - \param[in] mapper The local dof mapper - */ - EntityExchanger(const GridView& gridView, const DofMapper& mapper) - : gridView_(gridView), mapper_(mapper) - { - gid2Index_.clear(); - index2GID_.clear(); - - for (const auto& entity : entities(gridView_, Dune::Codim())) - { - if (entity.partitionType() == Dune::BorderEntity) - { - int localIdx = mapper_.index(entity); - IdType dofIdxGlobal = gridView_.grid().globalIdSet().id(entity); - - std::pair g2iPair(dofIdxGlobal, localIdx); - gid2Index_.insert(g2iPair); - - std::pair i2gPair(localIdx, dofIdxGlobal); - index2GID_.insert(i2gPair); - } - } - } - /*! * \brief A DataHandle class to exchange matrix sparsity patterns. * @@ -582,63 +545,46 @@ public: * * If we look at vertex 7 and the corresponding entries in the matrix for P0, * there will be entries for (7,4) and (7,8), but not for (7,2). - * The MatPatternExchange class will find these entries and returns a vector "sparsity", + * The MatrixPatternExchange class will find these entries and returns a vector "sparsity", * that contains all missing connections. */ - class MatPatternExchange - : public Dune::CommDataHandleIF + struct MatrixPatternExchange + : public Dune::CommDataHandleIF { - using RowIterator = typename Matrix::RowIterator; - using ColIterator = typename Matrix::ColIterator; - public: //! Export type of data for message buffer using DataType = IdType; - /** \brief Constructor - \param[in] mapper The local dof mapper. - \param[in] g2i Global to local index map. - \param[in] i2g Local to global index map. - \param[in] A Matrix to operate on. - \param[in] helper parallel istl helper. - */ - MatPatternExchange (const DofMapper& mapper, - const std::map& g2i, - const std::map& i2g, Matrix& A, - const ParallelISTLHelper& helper) - : mapper_(mapper), gid2Index_(g2i), index2GID_(i2g), - sparsity_(A.N()), A_(A), helper_(helper) + MatrixPatternExchange(const DofMapper& mapper, + const std::map& globalToLocal, + const std::map& localToGlobal, Matrix& A, + const ParallelISTLHelper& helper) + : mapper_(mapper), idToIndex_(globalToLocal), indexToID_(localToGlobal) + , sparsity_(A.N()), A_(A), helper_(helper) {} /*! * \brief Returns true if data for given valid codim should be communicated */ bool contains (int dim, int codim) const - { - return (codim==dofCodim); - } + { return (codim == dofCodim); } /*! * \brief Returns true if size of data per entity of given dim and codim is a constant */ bool fixedsize (int dim, int codim) const - { - return false; - } + { return false; } /*! * \brief How many objects of type DataType have to be sent for a given entity */ template - size_t size (EntityType& e) const + std::size_t size(EntityType& e) const { - int i = mapper_.index(e); - int n = 0; - for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) - { - typename std::map::const_iterator it = index2GID_.find(j.index()); - if (it != index2GID_.end()) + const auto rowIdx = mapper_.index(e); + std::size_t n = 0; + for (auto colIt = A_[rowIdx].begin(); colIt != A_[rowIdx].end(); ++colIt) + if (indexToID_.count(colIt.index())) n++; - } return n; } @@ -647,35 +593,36 @@ public: * \brief Pack data from user to message buffer */ template - void gather (MessageBuffer& buff, const EntityType& e) const + void gather(MessageBuffer& buff, const EntityType& e) const { - int i = mapper_.index(e); - for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) + const auto rowIdx = mapper_.index(e); + for (auto colIt = A_[rowIdx].begin(); colIt != A_[rowIdx].end(); ++colIt) { - typename std::map::const_iterator it=index2GID_.find(j.index()); - if (it != index2GID_.end()) + auto it = indexToID_.find(colIt.index()); + if (it != indexToID_.end()) buff.write(it->second); } - } /*! * \brief Unpack data from message buffer to user */ template - void scatter (MessageBuffer& buff, const EntityType& e, size_t n) + void scatter(MessageBuffer& buff, const EntityType& e, std::size_t n) { - int i = mapper_.index(e); - for (size_t k = 0; k < n; k++) + const auto rowIdx = mapper_.index(e); + for (std::size_t k = 0; k < n; k++) { IdType id; buff.read(id); // only add entries corresponding to border entities - typename std::map::const_iterator it = gid2Index_.find(id); - if (it != gid2Index_.end() - && sparsity_[i].find(it->second) == sparsity_[i].end() - && helper_.ghost(it->second) != 1<<24) - sparsity_[i].insert(it->second); + const auto it = idToIndex_.find(id); + if (it != idToIndex_.end()) + { + const auto colIdx = it->second; + if (!sparsity_[rowIdx].count(colIdx) && !helper_.isGhost(colIdx)) + sparsity_[rowIdx].insert(colIdx); + } } } @@ -683,83 +630,65 @@ public: * \brief Get the communicated sparsity pattern * \return the vector with the sparsity pattern */ - std::vector >& sparsity () - { - return sparsity_; - } + std::vector>& sparsity() + { return sparsity_; } private: const DofMapper& mapper_; - const std::map& gid2Index_; - const std::map& index2GID_; + const std::map& idToIndex_; + const std::map& indexToID_; std::vector > sparsity_; Matrix& A_; const ParallelISTLHelper& helper_; - }; // class MatPatternExchange + }; // class MatrixPatternExchange //! Local matrix blocks associated with the global id set - struct MatEntry + struct MatrixEntry { IdType first; BlockType second; - MatEntry (const IdType& f, const BlockType& s) : first(f), second(s) {} - MatEntry () {} + MatrixEntry (const IdType& f, const BlockType& s) : first(f), second(s) {} + MatrixEntry () {} }; //! A DataHandle class to exchange matrix entries - class MatEntryExchange - : public Dune::CommDataHandleIF + struct MatrixEntryExchange + : public Dune::CommDataHandleIF { - using RowIterator = typename Matrix::RowIterator; - using ColIterator = typename Matrix::ColIterator; - public: //! Export type of data for message buffer - using DataType = MatEntry; + using DataType = MatrixEntry; - /*! - * \brief Constructor - * \param[in] mapper The local dof mapper. - * \param[in] g2i Global to local index map. - * \param[in] i2g Local to global index map. - * \param[in] A Matrix to operate on. - */ - MatEntryExchange (const DofMapper& mapper, const std::map& g2i, - const std::map& i2g, - Matrix& A) - : mapper_(mapper), gid2Index_(g2i), index2GID_(i2g), A_(A) + MatrixEntryExchange(const DofMapper& mapper, + const std::map& globalToLocal, + const std::map& localToGlobal, + Matrix& A) + : mapper_(mapper), idToIndex_(globalToLocal), indexToID_(localToGlobal), A_(A) {} /*! * \brief Returns true if data for given valid codim should be communicated */ - bool contains (int dim, int codim) const - { - return (codim==dofCodim); - } + bool contains(int dim, int codim) const + { return (codim == dofCodim); } /*! * \brief Returns true if size of data per entity of given dim and codim is a constant */ - bool fixedsize (int dim, int codim) const - { - return false; - } + bool fixedsize(int dim, int codim) const + { return false; } /*! * \brief How many objects of type DataType have to be sent for a given entity */ template - size_t size (EntityType& e) const + std::size_t size(EntityType& e) const { - int i = mapper_.index(e); - int n = 0; - for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) - { - typename std::map::const_iterator it = index2GID_.find(j.index()); - if (it != index2GID_.end()) + const auto rowIdx = mapper_.index(e); + std::size_t n = 0; + for (auto colIt = A_[rowIdx].begin(); colIt != A_[rowIdx].end(); ++colIt) + if (indexToID_.count(colIt.index())) n++; - } return n; } @@ -768,46 +697,66 @@ public: * \brief Pack data from user to message buffer */ template - void gather (MessageBuffer& buff, const EntityType& e) const + void gather(MessageBuffer& buff, const EntityType& e) const { - int i = mapper_.index(e); - for (ColIterator j = A_[i].begin(); j != A_[i].end(); ++j) + const auto rowIdx = mapper_.index(e); + for (auto colIt = A_[rowIdx].begin(); colIt != A_[rowIdx].end(); ++colIt) { - typename std::map::const_iterator it=index2GID_.find(j.index()); - if (it != index2GID_.end()) - buff.write(MatEntry(it->second,*j)); + auto it = indexToID_.find(colIt.index()); + if (it != indexToID_.end()) + buff.write(MatrixEntry(it->second,*colIt)); } - } /*! * \brief Unpack data from message buffer to user */ template - void scatter (MessageBuffer& buff, const EntityType& e, size_t n) + void scatter(MessageBuffer& buff, const EntityType& e, std::size_t n) { - int i = mapper_.index(e); - for (size_t k = 0; k < n; k++) + const auto rowIdx = mapper_.index(e); + for (std::size_t k = 0; k < n; k++) { - MatEntry m; + MatrixEntry m; buff.read(m); // only add entries corresponding to border entities - typename std::map::const_iterator it = gid2Index_.find(m.first); - if (it != gid2Index_.end()) - if (A_[i].find(it->second) != A_[i].end()) - A_[i][it->second] += m.second; + auto it = idToIndex_.find(m.first); + if (it != idToIndex_.end()) + if (A_[rowIdx].find(it->second) != A_[rowIdx].end()) + A_[rowIdx][it->second] += m.second; } } private: const DofMapper& mapper_; - const std::map& gid2Index_; - const std::map& index2GID_; + const std::map& idToIndex_; + const std::map& indexToID_; Matrix& A_; - }; // class MatEntryExchange + }; // class MatrixEntryExchange + +public: + + EntityExchanger(const GridView& gridView, const DofMapper& mapper) + : gridView_(gridView), mapper_(mapper) + { + idToIndex_.clear(); + indexToID_.clear(); + + for (const auto& entity : entities(gridView_, Dune::Codim())) + { + if (entity.partitionType() == Dune::BorderEntity) + { + const int localIdx = mapper_.index(entity); + IdType dofIdxGlobal = gridView_.grid().globalIdSet().id(entity); + + idToIndex_.emplace(dofIdxGlobal, localIdx); + indexToID_.emplace(localIdx, dofIdxGlobal); + } + } + } - [[deprecated("Use extendMatrix instead. Will be removed after 3.1!")]] + [[deprecated("Use extendMatrix instead. Will be removed after 3.2!")]] void getExtendedMatrix (Matrix& A, const ParallelISTLHelper& helper) { extendMatrix(A, helper); } @@ -818,122 +767,118 @@ public: */ void extendMatrix(Matrix& A, const ParallelISTLHelper& helper) { - if (gridView_.comm().size() > 1) + if (gridView_.comm().size() <= 1) + return; + + Matrix tmp(A); + std::size_t nnz = 0; + // get entries from other processes + MatrixPatternExchange datahandle(mapper_, idToIndex_, indexToID_, A, helper); + gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, + Dune::ForwardCommunication); + std::vector>& sparsity = datahandle.sparsity(); + // add own entries, count number of nonzeros + for (auto rowIt = A.begin(); rowIt != A.end(); ++rowIt) { - Matrix tmp(A); - std::size_t nnz = 0; - // get entries from other processes - MatPatternExchange datahandle(mapper_, gid2Index_, index2GID_, A, helper); - gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, - Dune::ForwardCommunication); - std::vector >& sparsity = datahandle.sparsity(); - // add own entries, count number of nonzeros - for (RowIterator i = A.begin(); i != A.end(); ++i) - { - for (ColIterator j = A[i.index()].begin(); j != A[i.index()].end(); ++j) - { - if (sparsity[i.index()].find(j.index()) == sparsity[i.index()].end()) - sparsity[i.index()].insert(j.index()); - } - nnz += sparsity[i.index()].size(); - } + for (auto colIt = A[rowIt.index()].begin(); colIt != A[rowIt.index()].end(); ++colIt) + if (!sparsity[rowIt.index()].count(colIt.index())) + sparsity[rowIt.index()].insert(colIt.index()); - A.setSize(tmp.N(), tmp.N(), nnz); - A.setBuildMode(Matrix::row_wise); - typename Matrix::CreateIterator citer = A.createbegin(); - using Iter = typename std::vector >::const_iterator; - for (Iter i = sparsity.begin(), end = sparsity.end(); i!=end; ++i, ++citer) - { - using SIter = typename std::set::const_iterator; - for (SIter si = i->begin(), send = i->end(); si!=send; ++si) - citer.insert(*si); - } + nnz += sparsity[rowIt.index()].size(); + } - // set matrix old values - A = 0; - for (RowIterator i = tmp.begin(); i != tmp.end(); ++i) - for (ColIterator j = tmp[i.index()].begin(); j != tmp[i.index()].end(); ++j) - A[i.index()][j.index()] = tmp[i.index()][j.index()]; + A.setSize(tmp.N(), tmp.N(), nnz); + A.setBuildMode(Matrix::row_wise); + auto citer = A.createbegin(); + for (auto i = sparsity.begin(), end = sparsity.end(); i!=end; ++i, ++citer) + { + for (auto si = i->begin(), send = i->end(); si!=send; ++si) + citer.insert(*si); } + + // set matrix old values + A = 0; + for (auto rowIt = tmp.begin(); rowIt != tmp.end(); ++rowIt) + for (auto colIt = tmp[rowIt.index()].begin(); colIt != tmp[rowIt.index()].end(); ++colIt) + A[rowIt.index()][colIt.index()] = tmp[rowIt.index()][colIt.index()]; } /*! * \brief Sums up the entries corresponding to border vertices. * \param A Matrix to operate on. */ - void sumEntries (Matrix& A) + void sumEntries(Matrix& A) { - if (gridView_.comm().size() > 1) - { - MatEntryExchange datahandle(mapper_, gid2Index_, index2GID_, A); - gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, - Dune::ForwardCommunication); - } + if (gridView_.comm().size() <= 1) + return; + + MatrixEntryExchange datahandle(mapper_, idToIndex_, indexToID_, A); + gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, + Dune::ForwardCommunication); } -#if HAVE_MPI - /*! - * \brief Extends the sparsity pattern of the discretization matrix for AMG. - * \param A A reference to the matrix to change. - */ - [[deprecated("Use extendMatrix instead. Will be removed after 3.1!")]] - void getExtendedMatrix (Matrix& A) - { extendMatrix(A); } - /*! - * \brief Extends the sparsity pattern of the discretization matrix for AMG. - * \param A A reference to the matrix to change. - */ - void extendMatrix(Matrix& A) - { - if (gridView_.comm().size() > 1) - { - Matrix tmp(A); - std::size_t nnz = 0; - // get entries from other processes - MatPatternExchange datahandle(mapper_, gid2Index_, index2GID_, A, *this); - gridView_.communicate(datahandle, Dune::InteriorBorder_InteriorBorder_Interface, - Dune::ForwardCommunication); - std::vector >& sparsity = datahandle.sparsity(); +private: + const GridView gridView_; + const DofMapper& mapper_; + std::map idToIndex_; + std::map indexToID_; - // add own entries, count number of nonzeros - for (RowIterator i = A.begin(); i != A.end(); ++i) - { - for (ColIterator j = A[i.index()].begin(); j != A[i.index()].end(); ++j) - { - if (sparsity[i.index()].find(j.index()) == sparsity[i.index()].end()) - sparsity[i.index()].insert(j.index()); - } - nnz += sparsity[i.index()].size(); - } +}; // class EntityExchanger - A.setSize(tmp.N(), tmp.N(), nnz); - A.setBuildMode(Matrix::row_wise); - typename Matrix::CreateIterator citer = A.createbegin(); - using Iter = typename std::vector >::const_iterator; - for (Iter i = sparsity.begin(), end = sparsity.end(); i!=end; ++i, ++citer){ - using SIter = typename std::set::const_iterator; - for (SIter si = i->begin(), send = i->end(); si!=send; ++si) - citer.insert(*si); - } +#if HAVE_MPI +/*! + * \brief Prepare linear algebra variables for parallel solvers + */ +template +void prepareLinearAlgebraParallel(Matrix& A, Vector& b, + std::shared_ptr& comm, + std::shared_ptr& fop, + std::shared_ptr& sp, + ParallelHelper& pHelper, + const bool firstCall) +{ + const auto category = LinearSolverTraits::isNonOverlapping ? + Dune::SolverCategory::nonoverlapping + : Dune::SolverCategory::overlapping; - // set matrix old values - A = 0; - for (RowIterator i = tmp.begin(); i != tmp.end(); ++i) - for (ColIterator j = tmp[i.index()].begin(); j != tmp[i.index()].end(); ++j) - A[i.index()][j.index()] = tmp[i.index()][j.index()]; + if (LinearSolverTraits::isNonOverlapping && firstCall) + pHelper.initGhostsAndOwners(); - sumEntries(A); - } + comm = std::make_shared(pHelper.gridView().comm(), category); + + if (LinearSolverTraits::isNonOverlapping) + { + // extend the matrix pattern such that it is usable for a parallel solver + using GridView = std::decay_t; + EntityExchanger exchanger(pHelper.gridView(), pHelper.dofMapper()); + exchanger.extendMatrix(A, pHelper); + exchanger.sumEntries(A); } -#endif + pHelper.createParallelIndexSet(*comm); -private: - const GridView gridView_; - const DofMapper& mapper_; - std::map gid2Index_; - std::map index2GID_; + fop = std::make_shared(A, *comm); + sp = std::make_shared(*comm); + + // make rhs consistent + if (LinearSolverTraits::isNonOverlapping) + pHelper.makeNonOverlappingConsistent(b); +} +#endif // HAVE_MPI + +/*! + * \brief Prepare linear algebra variables for sequential solvers + */ +template +void prepareLinearAlgebraSequential(Matrix& A, + std::shared_ptr& comm, + std::shared_ptr& fop, + std::shared_ptr& sp) +{ + comm = std::make_shared(); + fop = std::make_shared(A); + sp = std::make_shared(); +} -}; // class EntityExchanger /*! * \brief Prepare the linear algebra member variables. @@ -949,16 +894,16 @@ private: * * \tparam isParallel decides if the setting is parallel or sequential */ -template -struct LinearAlgebraPreparator +template +struct LinearAlgebraPreparator // TODO: Deprecating the whole struct always triggers a warning even it is not used. Remove after 3.2 { - using DofMapper = typename LinearSolverTraits::DofMapper; - using ParallelHelper = ParallelISTLHelper; - using Comm = typename LinearSolverTraits::Comm; - using LinearOperator = typename LinearSolverTraits::LinearOperator; - using ScalarProduct = typename LinearSolverTraits::ScalarProduct; + using ParallelHelper = ParallelISTLHelper; + using Comm = typename AmgTraits::Comm; + using LinearOperator = typename AmgTraits::LinearOperator; + using ScalarProduct = typename AmgTraits::ScalarProduct; template + [[deprecated("Use free function prepareLinearAlgebraSequential instead. Will be removed after 3.2!")]] static void prepareLinearAlgebra(Matrix& A, Vector& b, int& rank, std::shared_ptr& comm, @@ -967,9 +912,7 @@ struct LinearAlgebraPreparator ParallelHelper& pHelper, const bool firstCall) { - comm = std::make_shared(); - fop = std::make_shared(A); - sp = std::make_shared(); + prepareLinearAlgebraSequential(A, comm, fop, sp); } }; @@ -977,16 +920,16 @@ struct LinearAlgebraPreparator /*! * \brief Specialization for the parallel case. */ -template -struct LinearAlgebraPreparator +template +struct LinearAlgebraPreparator // TODO: Deprecating the whole struct always triggers a warning even it is not used. Remove after 3.2 { - using DofMapper = typename LinearSolverTraits::DofMapper; - using ParallelHelper = ParallelISTLHelper; - using Comm = typename LinearSolverTraits::Comm; - using LinearOperator = typename LinearSolverTraits::LinearOperator; - using ScalarProduct = typename LinearSolverTraits::ScalarProduct; + using ParallelHelper = ParallelISTLHelper; + using Comm = typename AmgTraits::Comm; + using LinearOperator = typename AmgTraits::LinearOperator; + using ScalarProduct = typename AmgTraits::ScalarProduct; template + [[deprecated("Use free function prepareLinearAlgebraParallel instead. Will be removed after 3.2!")]] static void prepareLinearAlgebra(Matrix& A, Vector& b, int& rank, std::shared_ptr& comm, @@ -995,32 +938,11 @@ struct LinearAlgebraPreparator ParallelHelper& pHelper, const bool firstCall) { - const auto category = LinearSolverTraits::isNonOverlapping ? - Dune::SolverCategory::nonoverlapping : Dune::SolverCategory::overlapping; - - if (LinearSolverTraits::isNonOverlapping && firstCall) - pHelper.initGhostsAndOwners(); - - comm = std::make_shared(pHelper.gridView().comm(), category); - - if (LinearSolverTraits::isNonOverlapping) - { - // extend the matrix pattern such that it is usable for the parallel solver - EntityExchanger exchanger(pHelper.gridView(), pHelper.dofMapper()); - exchanger.extendMatrix(A, pHelper); - exchanger.sumEntries(A); - } - pHelper.createParallelIndexSet(*comm); - - fop = std::make_shared(A, *comm); - sp = std::make_shared(*comm); + prepareLinearAlgebraParallel(A, b, rank, comm, fop, sp, pHelper, firstCall); rank = comm->communicator().rank(); - - // Make rhs consistent - if (LinearSolverTraits::isNonOverlapping) - pHelper.makeNonOverlappingConsistent(b); } -}; + +}; // parallel LinearAlgebraPreparator #endif // HAVE_MPI -- GitLab