From b003e1614453b16c5647af58e851b93747021fb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Thu, 4 Feb 2021 13:44:10 +0100 Subject: [PATCH 01/27] [box][scvf] harmonize outsideScvIdx() with other scvfs --- dumux/discretization/box/subcontrolvolumeface.hh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dumux/discretization/box/subcontrolvolumeface.hh b/dumux/discretization/box/subcontrolvolumeface.hh index d2d1e3c8b3..40cd2c9b7a 100644 --- a/dumux/discretization/box/subcontrolvolumeface.hh +++ b/dumux/discretization/box/subcontrolvolumeface.hh @@ -189,9 +189,9 @@ public: return scvIndices_[0]; } - //! index of the outside sub control volume for spatial param evaluation - // This results in undefined behaviour if boundary is true - LocalIndexType outsideScvIdx() const + //! Index of the i-th outside sub control volume or boundary scv index. + // Results in undefined behaviour if (boundary == true and i != 0) or i >= numOutsideScvs() + LocalIndexType outsideScvIdx(int i = 0) const { assert(!boundary()); return scvIndices_[1]; -- GitLab From 582e58117cde686fe2324ddd2a305787d3fc86c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Thu, 4 Feb 2021 13:44:37 +0100 Subject: [PATCH 02/27] [box][scvf] add numOutsideScvs() function --- dumux/discretization/box/subcontrolvolumeface.hh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/dumux/discretization/box/subcontrolvolumeface.hh b/dumux/discretization/box/subcontrolvolumeface.hh index 40cd2c9b7a..b11a298f3e 100644 --- a/dumux/discretization/box/subcontrolvolumeface.hh +++ b/dumux/discretization/box/subcontrolvolumeface.hh @@ -190,13 +190,19 @@ public: } //! Index of the i-th outside sub control volume or boundary scv index. - // Results in undefined behaviour if (boundary == true and i != 0) or i >= numOutsideScvs() + // Results in undefined behaviour if i >= numOutsideScvs() LocalIndexType outsideScvIdx(int i = 0) const { assert(!boundary()); return scvIndices_[1]; } + //! The number of scvs on the outside of this face + std::size_t numOutsideScvs() const + { + return static_cast(!boundary()); + } + //! The local index of this sub control volume face GridIndexType index() const { -- GitLab From 253b9dab9201c9cd734c7479a8bed5646c38f689 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Thu, 4 Feb 2021 14:04:52 +0100 Subject: [PATCH 03/27] [facet][box][scvf] add numOutsideScvs() --- .../facet/box/subcontrolvolumeface.hh | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/dumux/multidomain/facet/box/subcontrolvolumeface.hh b/dumux/multidomain/facet/box/subcontrolvolumeface.hh index f848eeb2af..88aa660ff9 100644 --- a/dumux/multidomain/facet/box/subcontrolvolumeface.hh +++ b/dumux/multidomain/facet/box/subcontrolvolumeface.hh @@ -131,11 +131,11 @@ public: Scalar area() const { return area_; } - //! returns bolean if the sub control volume face is on the boundary + //! returns true if the sub control volume face is on the boundary bool boundary() const { return boundary_; } - //! returns bolean if the sub control volume face is on an interior boundary + //! returns true if the sub control volume face is on an interior boundary bool interiorBoundary() const { return interiorBoundary_; } @@ -144,7 +144,7 @@ public: const GlobalPosition& unitOuterNormal() const { return unitOuterNormal_; } - //! index of the inside sub control volume for spatial param evaluation + //! index of the inside sub control volume LocalIndexType insideScvIdx() const { return scvIndices_[0]; } @@ -152,14 +152,20 @@ public: GridIndexType index() const { return scvfIndex_; } - //! index of the outside sub control volume for spatial param evaluation - //! This results in undefined behaviour if boundary is true - LocalIndexType outsideScvIdx() const + //! Index of the i-th outside sub control volume or boundary scv index. + // Results in undefined behaviour if i >= numOutsideScvs() + LocalIndexType outsideScvIdx(int i = 0) const { - assert(!boundary()); + assert(!boundary() && !interiorBoundary()); return scvIndices_[1]; } + //! The number of scvs on the outside of this face + std::size_t numOutsideScvs() const + { + return static_cast(!(boundary() || interiorBoundary())); + } + //! returns the element-local index of the facet this scvf is embedded in. //! This is only valid to be called for scvfs on domain/interior boundaries. LocalIndexType facetIndexInElement() const -- GitLab From ac48365eb454af1495aee3d8d50406227d24eb40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Thu, 4 Feb 2021 14:04:07 +0100 Subject: [PATCH 04/27] [boxdfm][scvf] add numOutsideScvs() --- .../boxdfm/subcontrolvolumeface.hh | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/dumux/porousmediumflow/boxdfm/subcontrolvolumeface.hh b/dumux/porousmediumflow/boxdfm/subcontrolvolumeface.hh index 2be9c716cd..259ea09672 100644 --- a/dumux/porousmediumflow/boxdfm/subcontrolvolumeface.hh +++ b/dumux/porousmediumflow/boxdfm/subcontrolvolumeface.hh @@ -210,7 +210,7 @@ public: Scalar area() const { return area_; } - //! returns bolean if the sub control volume face is on the boundary + //! returns true if the sub control volume face is on the boundary bool boundary() const { return boundary_; } @@ -234,18 +234,25 @@ public: typename BoundaryFlag::value_type boundaryFlag() const { return boundaryFlag_.get(); } - //! Index of the inside sub control volume for spatial param evaluation + //! index of the inside sub control volume LocalIndexType insideScvIdx() const { return scvIndices_[0]; } - //! Index of the outside sub control volume for spatial param evaluation - // This results in undefined behaviour if boundary is true - LocalIndexType outsideScvIdx() const + //! Index of the i-th outside sub control volume or boundary scv index. + // Results in undefined behaviour if i >= numOutsideScvs() + LocalIndexType outsideScvIdx(int i = 0) const { assert(!boundary()); return scvIndices_[1]; } + //! The number of scvs on the outside of this face + std::size_t numOutsideScvs() const + { + return static_cast(!boundary()); + } + + //! Returns a corner of the sub control volume face const GlobalPosition& corner(unsigned int localIdx) const { assert(localIdx < corners_.size() && "provided index exceeds the number of corners"); -- GitLab From 938446b2330940df49e905678bf522f6edd07b18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 13 Jan 2021 13:40:15 +0100 Subject: [PATCH 05/27] [md][gridvars] allow for construction from existing tuple --- dumux/multidomain/fvgridvariables.hh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/dumux/multidomain/fvgridvariables.hh b/dumux/multidomain/fvgridvariables.hh index 82f25a464b..bb70715626 100644 --- a/dumux/multidomain/fvgridvariables.hh +++ b/dumux/multidomain/fvgridvariables.hh @@ -69,6 +69,13 @@ public: */ MultiDomainFVGridVariables() = default; + /*! + * \brief Construction from an existing tuple + */ + MultiDomainFVGridVariables(const TupleType& tuple) + : gridVars_(tuple) + {} + /*! * \brief Contruct the grid variables * \param gridGeometries a tuple of grid geometry shared pointers -- GitLab From 20c44da914bf85e8a4faf07e1f20593d4b519072 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 3 Feb 2021 10:37:16 +0100 Subject: [PATCH 06/27] [fvlocalop] use more generic check if outside entry should be set --- dumux/assembly/fv/localoperator.hh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dumux/assembly/fv/localoperator.hh b/dumux/assembly/fv/localoperator.hh index d4374b8df1..06c543461a 100644 --- a/dumux/assembly/fv/localoperator.hh +++ b/dumux/assembly/fv/localoperator.hh @@ -215,7 +215,10 @@ protected: { const auto flux = Operators::flux(problem, element_, fvGeometry_, evv, efvc, scvf); r[fvGeometry_.scv(scvf.insideScvIdx()).localDofIndex()] += flux; - r[fvGeometry_.scv(scvf.outsideScvIdx()).localDofIndex()] -= flux; + + // We expects that for scvf.numOutsideScvs() > 1 outside faces are visited separately! + if (scvf.numOutsideScvs() == 1) + r[fvGeometry_.scv(scvf.outsideScvIdx()).localDofIndex()] -= flux; } // boundary faces -- GitLab From 425b57f28232a3bcb35f240f04e0a3ea3b46184d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 3 Feb 2021 10:36:47 +0100 Subject: [PATCH 07/27] [TEMP] add todo comment --- dumux/assembly/fv/boxlocalassembler.hh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dumux/assembly/fv/boxlocalassembler.hh b/dumux/assembly/fv/boxlocalassembler.hh index 80fd526a1c..bb424adcbc 100644 --- a/dumux/assembly/fv/boxlocalassembler.hh +++ b/dumux/assembly/fv/boxlocalassembler.hh @@ -156,6 +156,7 @@ public: { res[scvI.dofIndex()][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + // TODO: Skip this for explicit schemes? auto& row = jac[scvI.dofIndex()]; for (auto col = row.begin(); col != row.end(); ++col) row[col.index()][eqIdx] = 0.0; @@ -333,6 +334,7 @@ protected: const auto globalI = scvI.dofIndex(); const auto localI = scvI.localDofIndex(); + // TODO: How to handle the case of caching? const auto origCurVolVars = curElemVolVars[scvI]; auto& curVolVars = curElemVolVars[scvI]; -- GitLab From ec037c458e5b79a654847d30cc35e12ead7755a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 3 Feb 2021 10:45:17 +0100 Subject: [PATCH 08/27] [TEMP][md,cm] provide access to sub-solution TODO: Find alternative way, possibly getting rid of duplicate solution storage in the coupling managers --- dumux/multidomain/couplingmanager.hh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/dumux/multidomain/couplingmanager.hh b/dumux/multidomain/couplingmanager.hh index b1fbbb6296..27b458e0d6 100644 --- a/dumux/multidomain/couplingmanager.hh +++ b/dumux/multidomain/couplingmanager.hh @@ -45,6 +45,7 @@ template class CouplingManager { template using SubDomainTypeTag = typename Traits::template SubDomain::TypeTag; + template using SubDomainSolutionVector = GetPropType, Properties::SolutionVector>; template using PrimaryVariables = GetPropType, Properties::PrimaryVariables>; template using GridView = typename GetPropType, Properties::GridGeometry>::GridView; template using Element = typename GridView::template Codim<0>::Entity; @@ -269,6 +270,14 @@ public: DUNE_THROW(Dune::InvalidStateException, "The problem pointer was not set or has already expired. Use setSubProblems() before calling this function"); } + /*! + * \brief Return a reference to the current solution in a subdomain + * \param domainIdx The domain index + */ + template + const SubDomainSolutionVector& dofs(Dune::index_constant domainId) const + { return curSol_[domainId]; } + protected: /*! -- GitLab From 4a7540ff4faaf30bb4ab934359506f61568d7f1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 3 Feb 2021 10:47:26 +0100 Subject: [PATCH 09/27] [TEMP][md,grivars] store copy of solution --- dumux/multidomain/fvgridvariables.hh | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/dumux/multidomain/fvgridvariables.hh b/dumux/multidomain/fvgridvariables.hh index bb70715626..400b8b7111 100644 --- a/dumux/multidomain/fvgridvariables.hh +++ b/dumux/multidomain/fvgridvariables.hh @@ -41,7 +41,6 @@ namespace Dumux { template class MultiDomainFVGridVariables { - using SolutionVector = typename MDTraits::SolutionVector; static constexpr std::size_t numSubDomains = MDTraits::numSubDomains; template @@ -53,6 +52,9 @@ class MultiDomainFVGridVariables using Problems = typename MDTraits::template TupleOfSharedPtrConst; public: + //! export the solution vector type required for update + using SolutionVector = typename MDTraits::SolutionVector; + //! export base types of the stored type template using Type = typename MDTraits::template SubDomain::GridVariables; @@ -94,6 +96,7 @@ public: //! initialize all variables void init(const SolutionVector& sol) { + dofs_ = sol; using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { @@ -104,6 +107,8 @@ public: //! update all variables void update(const SolutionVector& sol, bool forceFluxCacheUpdate = false) { + // TODO: This should update the coupling manager? + dofs_ = sol; using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { @@ -114,6 +119,7 @@ public: //! update all variables after grid adaption void updateAfterGridAdaption(const SolutionVector& sol) { + dofs_ = sol; using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { @@ -137,6 +143,7 @@ public: //! resets state to the one before time integration void resetTimeStep(const SolutionVector& sol) { + dofs_ = sol; using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { @@ -164,6 +171,9 @@ public: void set(PtrType p, Dune::index_constant id = Dune::index_constant{}) { Dune::Hybrid::elementAt(gridVars_, id) = p; } + const SolutionVector& dofs() const { return dofs_; } + SolutionVector& dofs() { return dofs_; } + /*! * \brief return the grid variables tuple we are wrapping * \note the copy is not expensive since it is a tuple of shared pointers @@ -172,9 +182,10 @@ public: { return gridVars_; } private: - //! a tuple of pointes to all grid variables TupleType gridVars_; + + SolutionVector dofs_; }; } // end namespace Dumux -- GitLab From db28149be0fd6a209ab3cb70b24bd49b2aca64cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 3 Feb 2021 10:47:47 +0100 Subject: [PATCH 10/27] [TEMP][md,traits] export subdomain local operator --- dumux/multidomain/traits.hh | 1 + 1 file changed, 1 insertion(+) diff --git a/dumux/multidomain/traits.hh b/dumux/multidomain/traits.hh index 22f2552456..314ff3ab2f 100644 --- a/dumux/multidomain/traits.hh +++ b/dumux/multidomain/traits.hh @@ -181,6 +181,7 @@ public: using GridVariables =GetPropType, Properties::GridVariables>; using IOFields = GetPropType, Properties::IOFields>; using SolutionVector = GetPropType, Properties::SolutionVector>; + using LocalOperator = GetPropType, Properties::LocalOperator>; }; //\} -- GitLab From 23450013a441bb2715544c16a8349cd3b19333dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 3 Feb 2021 10:49:09 +0100 Subject: [PATCH 11/27] [TEMP] add drafty new md assembly --- dumux/multidomain/assembler.hh | 475 ++++++++++++++++ .../multidomain/assembly/boxlocalassembler.hh | 508 ++++++++++++++++++ dumux/multidomain/assembly/localassembler.hh | 57 ++ 3 files changed, 1040 insertions(+) create mode 100644 dumux/multidomain/assembler.hh create mode 100644 dumux/multidomain/assembly/boxlocalassembler.hh create mode 100644 dumux/multidomain/assembly/localassembler.hh diff --git a/dumux/multidomain/assembler.hh b/dumux/multidomain/assembler.hh new file mode 100644 index 0000000000..936dee20a5 --- /dev/null +++ b/dumux/multidomain/assembler.hh @@ -0,0 +1,475 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup MultiDomain + * \ingroup Assembly + * \brief A linear system assembler (residual and Jacobian) for finite volume schemes + * with multiple domains + */ +#ifndef DUMUX_MULTIDOMAIN_ASSEMBLER_HH +#define DUMUX_MULTIDOMAIN_ASSEMBLER_HH + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "couplingjacobianpattern.hh" +#include "subdomaincclocalassembler.hh" +// #include "subdomainboxlocalassembler.hh" +#include "subdomainstaggeredlocalassembler.hh" + +#include +#include "assembly/localassembler.hh" + +namespace Dumux { + +/*! + * \ingroup MultiDomain + * \ingroup Assembly + * \brief A linear system assembler (residual and Jacobian) for finite volume schemes (box, tpfa, mpfa, ...) + * with multiple domains + * \tparam MDTraits the multidimension traits + * \tparam diffMethod the differentiation method to residual compute derivatives + * \tparam useImplicitAssembly if to use an implicit or explicit time discretization + */ +template +class MultiDomainAssembler +{ + template + using SubDomainTypeTag = typename MDTraits::template SubDomain::TypeTag; + +public: + using Traits = MDTraits; + using CouplingManager = CMType; + + // types related to linear algebra + using Scalar = typename MDTraits::Scalar; + using JacobianMatrix = typename MDTraits::JacobianMatrix; + using SolutionVector = typename MDTraits::SolutionVector; + using ResidualType = SolutionVector; + + // types underlying the subdomains + template using SubDomainLocalOperator = typename MDTraits::template SubDomain::LocalOperator; + template using SubDomainGridVariables = typename MDTraits::template SubDomain::GridVariables; + template using SubDomainGridGeometry = typename MDTraits::template SubDomain::GridGeometry; + template using SubDomainProblem = typename MDTraits::template SubDomain::Problem; + + // The variables that define an evaluation point + using Variables = MultiDomainFVGridVariables; + using GridVariables = Variables; + + // The grid geometry on which it is operated + using GridGeometry = MultiDomainFVGridGeometry; + + //! export the type for parameters of a stage in time integration + using StageParams = MultiStageParams; + +private: + template using SubDomainGridView = typename SubDomainGridGeometry::GridView; + template using SubDomainElemVars = typename SubDomainGridVariables::LocalView; + template using SubDomainElement = typename SubDomainGridView::template Codim<0>::Entity; + + using TimeLoop = TimeLoopBase; + using ThisType = MultiDomainAssembler; + using GridGeometryTuple = typename MDTraits::template TupleOfSharedPtrConst; + +public: + + /*! + * \brief Constructor from a grid geometry tuple for stationary problems + * \note the grid variables might be temporarily changed during assembly (if caching is enabled) + * it is however guaranteed that the state after assembly will be the same as before + */ + MultiDomainAssembler(GridGeometryTuple&& gridGeometry, std::shared_ptr couplingManager) + : gridGeometryTuple_(gridGeometry) + , couplingManager_(couplingManager) + , isImplicit_(true) + { + std::cout << "Instantiated MultiDomainAssembler for a stationary problem." << std::endl; + } + + /*! + * \brief The constructor for instationary problems + * \note the grid variables might be temporarily changed during assembly (if caching is enabled) + * it is however guaranteed that the state after assembly will be the same as before + */ + template + MultiDomainAssembler(GridGeometryTuple&& gridGeometry, + std::shared_ptr couplingManager, + const TimeIntegrationMethod& method) + : gridGeometryTuple_(gridGeometry) + , couplingManager_(couplingManager) + , isImplicit_(method.implicit()) + { + std::cout << "Instantiated MultiDomainAssembler for an instationary problem." << std::endl; + } + + /*! + * \brief Assembles the global Jacobian of the residual + * and the residual for the current solution. + */ + void assembleJacobianAndResidual(const GridVariables& gridVars) + { + ptr_ = &gridVars; + resetJacobian_(); + resetResidual_(); + + using namespace Dune::Hybrid; + forEach(std::make_index_sequence(), [&](const auto domainId) + { + auto& jacRow = (*jacobian_)[domainId]; + auto& subRes = (*residual_)[domainId]; + this->assembleJacobianAndResidual_(domainId, jacRow, subRes, gridVars); + }); + + // using namespace Dune::Indices; + // std::cout << "SOL in GV: " << std::endl; + // Dune::printvector(std::cout, gridVars.dofs()[_0], "", "", 1, 10, 5); + // Dune::printvector(std::cout, gridVars.dofs()[_1], "", "", 1, 10, 5); + // + // std::cout << "Linear system: " << std::endl; + // Dune::printmatrix(std::cout, (*jacobian_)[_0][_0], "", "", 10, 5); + // Dune::printmatrix(std::cout, (*jacobian_)[_0][_1], "", "", 10, 5); + // Dune::printmatrix(std::cout, (*jacobian_)[_1][_0], "", "", 10, 5); + // Dune::printmatrix(std::cout, (*jacobian_)[_1][_1], "", "", 10, 5); + // Dune::printvector(std::cout, (*residual_)[_0], "", "", 1, 10, 5); + // Dune::printvector(std::cout, (*residual_)[_1], "", "", 1, 10, 5); + } + + //! compute the residuals using the internal residual + void assembleResidual(const GridVariables& gridVars) + { + ptr_ = &gridVars; + resetResidual_(); + assembleResidual(*residual_, gridVars); + } + + //! assemble a residual r + void assembleResidual(ResidualType& r, const GridVariables& gridVars) + { + ptr_ = &gridVars; + using namespace Dune::Hybrid; + forEach(integralRange(Dune::Hybrid::size(r)), [&](const auto domainId) + { + auto& subRes = r[domainId]; + this->assembleResidual_(domainId, subRes, gridVars); + }); + } + + //! compute the residual and return it's vector norm + //! TODO: this needs to be adapted in parallel + Scalar residualNorm(const GridVariables& gridVars) + { + ptr_ = &gridVars; + ResidualType residual; + setResidualSize(residual); + assembleResidual(residual, gridVars); + + // calculate the square norm of the residual + const Scalar result2 = residual.two_norm2(); + using std::sqrt; + return sqrt(result2); + } + + /*! + * \brief Tells the assembler which jacobian and residual to use. + * This also resizes the containers to the required sizes and sets the + * sparsity pattern of the jacobian matrix. + */ + void setLinearSystem(std::shared_ptr A, + std::shared_ptr r) + { + jacobian_ = A; + residual_ = r; + + setJacobianBuildMode(*jacobian_); + setJacobianPattern(*jacobian_); + setResidualSize(*residual_); + } + + /*! + * \brief The version without arguments uses the default constructor to create + * the jacobian and residual objects in this assembler if you don't need them outside this class + */ + void setLinearSystem() + { + jacobian_ = std::make_shared(); + residual_ = std::make_shared(); + + setJacobianBuildMode(*jacobian_); + setJacobianPattern(*jacobian_); + setResidualSize(*residual_); + } + + /*! + * \brief Sets the jacobian build mode + */ + void setJacobianBuildMode(JacobianMatrix& jac) const + { + using namespace Dune::Hybrid; + forEach(std::make_index_sequence(), [&](const auto i) + { + forEach(jac[i], [&](auto& jacBlock) + { + using BlockType = std::decay_t; + if (jacBlock.buildMode() == BlockType::BuildMode::unknown) + jacBlock.setBuildMode(BlockType::BuildMode::random); + else if (jacBlock.buildMode() != BlockType::BuildMode::random) + DUNE_THROW(Dune::NotImplemented, "Only BCRS matrices with random build mode are supported at the moment"); + }); + }); + } + + /*! + * \brief Sets the jacobian sparsity pattern. + */ + void setJacobianPattern(JacobianMatrix& jac) const + { + using namespace Dune::Hybrid; + forEach(std::make_index_sequence(), [&](const auto domainI) + { + forEach(integralRange(Dune::Hybrid::size(jac[domainI])), [&](const auto domainJ) + { + const auto pattern = this->getJacobianPattern_(domainI, domainJ); + pattern.exportIdx(jac[domainI][domainJ]); + }); + }); + } + + /*! + * \brief Resizes the residual + */ + void setResidualSize(SolutionVector& res) const + { + using namespace Dune::Hybrid; + forEach(integralRange(Dune::Hybrid::size(res)), [&](const auto domainId) + { res[domainId].resize(this->numDofs(domainId)); }); + } + + //! the number of dof locations of domain i + template + std::size_t numDofs(Dune::index_constant domainId) const + { return std::get(gridGeometryTuple_)->numDofs(); } + + //! the problem of domain i + // template + // const auto& problem(Dune::index_constant domainId) const + // { return *std::get(problemTuple_); } + + //! the finite volume grid geometry of domain i + template + const auto& gridGeometry(Dune::index_constant domainId) const + { return *std::get(gridGeometryTuple_); } + + //! the grid view of domain i + template + const auto& gridView(Dune::index_constant domainId) const + { return gridGeometry(domainId).gridView(); } + + //! the grid variables of domain i + template + auto& gridVariables(Dune::index_constant domainId) + { return (*ptr_)[domainId]; } + + //! the grid variables of domain i + template + const auto& gridVariables(Dune::index_constant domainId) const + { return (*ptr_)[domainId]; } + + //! the coupling manager + const CouplingManager& couplingManager() const + { return *couplingManager_; } + + //! the full Jacobian matrix + JacobianMatrix& jacobian() + { return *jacobian_; } + + //! the full residual vector + SolutionVector& residual() + { return *residual_; } + +private: + // reset the residual vector to 0.0 + void resetResidual_() + { + if(!residual_) + { + residual_ = std::make_shared(); + setResidualSize(*residual_); + } + + (*residual_) = 0.0; + } + + // reset the jacobian vector to 0.0 + void resetJacobian_() + { + if(!jacobian_) + { + jacobian_ = std::make_shared(); + setJacobianBuildMode(*jacobian_); + setJacobianPattern(*jacobian_); + } + + (*jacobian_) = 0.0; + } + + template + void assembleJacobianAndResidual_(Dune::index_constant domainId, + JacRow& jacRow, SubRes& subRes, + const GridVariables& gridVars) + { + assemble_(domainId, [&](const auto& element) + { + auto ggLocalView = localView(gridGeometry(domainId)); + ggLocalView.bind(element); + auto elemVars = this->prepareElemVariables_(gridVars[domainId], element, ggLocalView); + + using LocalAssembler = Dumux::MultiDomainLocalAssembler; + LocalAssembler localAssembler(element, ggLocalView, elemVars, stageParams_, couplingManager_); + localAssembler.assembleJacobianAndResidual(jacRow, subRes); + }); + } + + template + void assembleResidual_(Dune::index_constant domainId, + SubRes& subRes, + const GridVariables& gridVars) + { + assemble_(domainId, [&](const auto& element) + { + auto ggLocalView = localView(gridGeometry(domainId)); + ggLocalView.bind(element); + auto elemVars = this->prepareElemVariables_(gridVars[domainId], element, ggLocalView); + + using LocalAssembler = Dumux::MultiDomainLocalAssembler; + LocalAssembler localAssembler(element, ggLocalView, elemVars, stageParams_, couplingManager_); + localAssembler.assembleResidual(subRes); + }); + } + + /*! + * \brief A method assembling something per element + * \note Handles exceptions for parallel runs + * \throws NumericalProblem on all processes if something throwed during assembly + * TODO: assemble in parallel + */ + template + void assemble_(Dune::index_constant domainId, AssembleElementFunc&& assembleElement) const + { + // let the local assembler add the element contributions + for (const auto& element : elements(gridView(domainId))) + assembleElement(element); + } + + // get diagonal block pattern + template = 0> + Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant domainI, + Dune::index_constant domainJ) const + { + const auto& gg = gridGeometry(domainI); + auto pattern = isImplicit_ ? getJacobianPattern(gg) + : getJacobianPattern(gg); + couplingManager_->extendJacobianPattern(domainI, pattern); + return pattern; + } + + // get coupling block pattern + template = 0> + Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant domainI, + Dune::index_constant domainJ) const + { + return isImplicit_ ? getCouplingJacobianPattern(*couplingManager_, + domainI, gridGeometry(domainI), + domainJ, gridGeometry(domainJ)) + : getCouplingJacobianPattern(*couplingManager_, + domainI, gridGeometry(domainI), + domainJ, gridGeometry(domainJ)); + } + + //! prepares the local views on the grid variables for the given element + //! \todo: TODO: when stageparams.skipSpatial() == true, we don't need to bind flux vars caches! + template + std::vector> + prepareElemVariables_(const SubDomainGridVariables& gridVars, + const SubDomainElement& element, + const typename SubDomainGridGeometry::LocalView& ggLocalView) const + { + couplingManager_->bindCouplingContext(Dune::index_constant(), element, *this); + if (!stageParams_) + { + auto elemVars = localView(gridVars); + elemVars.bind(element, ggLocalView); + return {elemVars}; + } + else + { + std::vector> elemVars; + elemVars.reserve(stageParams_->size()); + + for (int i = 0; i < stageParams_->size()-1; ++i) + elemVars.emplace_back(prevStageVariables_[i][Dune::index_constant()]); + elemVars.emplace_back(gridVars); + + for (auto& evv : elemVars) + evv.bind(element, ggLocalView); + + return elemVars; + } + } + + //! the grid geometries of all subdomains + GridGeometryTuple gridGeometryTuple_; + + //! pointer to the coupling manager + std::shared_ptr couplingManager_; + + //! states if an implicit of explicit scheme is used (affects jacobian pattern) + bool isImplicit_; + + //! shared pointers to the jacobian matrix and residual + std::shared_ptr jacobian_; + std::shared_ptr residual_; + + //! parameters containing information on the current stage of time integration + std::shared_ptr stageParams_ = nullptr; + + //! keep track of the states of previous stages within a time integration step + std::vector prevStageVariables_; + + const GridVariables* ptr_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/multidomain/assembly/boxlocalassembler.hh b/dumux/multidomain/assembly/boxlocalassembler.hh new file mode 100644 index 0000000000..4e83ad050c --- /dev/null +++ b/dumux/multidomain/assembly/boxlocalassembler.hh @@ -0,0 +1,508 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution per element + * for the box scheme in the context of multidomain models. + */ +#ifndef DUMUX_MULTIDOMAIN_BOX_LOCAL_ASSEMBLER_HH +#define DUMUX_MULTIDOMAIN_BOX_LOCAL_ASSEMBLER_HH + +#include +#include + +#include + +#include +#include + +#include +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for the box scheme. + * \tparam Assembler The grid-wide assembler type + */ +template +class SubDomainBoxLocalAssembler +{ + using CouplingManager = typename Assembler::CouplingManager; + using GridVariables = typename Assembler::template SubDomainGridVariables; + using GridGeometry = typename Assembler::template SubDomainGridGeometry; + using LocalOperator = typename Assembler::template SubDomainLocalOperator; + + using FVElementGeometry = typename GridGeometry::LocalView; + using ElementVariables = typename GridVariables::LocalView; + using PrimaryVariables = typename GridVariables::PrimaryVariables; + using Scalar = typename GridVariables::Scalar; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using JacobianMatrix = typename Assembler::JacobianMatrix; + using ResidualVector = typename Assembler::SolutionVector; + + // TODO: remove this assert? Get numEq from somewhere else? + static constexpr int numEq = PrimaryVariables::size(); + static_assert(diffMethod == DiffMethod::numeric, "Analytical assembly not implemented"); + // static_assert(numEq == JacobianMatrix::block_type::cols, "Matrix block size doesn't match privars size"); + + //! the parameters of a stage in time integration + using StageParams = MultiStageParams; + +public: + static constexpr auto domainId = typename Dune::index_constant(); + using ElementResidualVector = typename LocalOperator::ElementResidualVector; + + /*! + * \brief Constructor for stationary problems. + */ + explicit SubDomainBoxLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars, + std::shared_ptr cm) + : element_(element) + , fvGeometry_(fvGeometry) + , elemVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(nullptr) + , cm_(cm) + { + assert(elemVars.size() == 1); + } + + /*! + * \brief Constructor for instationary problems. + * \note Using this constructor, we assemble one stage within + * a time integration step using multi-stage methods. + */ + explicit SubDomainBoxLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars, + std::shared_ptr stageParams, + std::shared_ptr cm) + : element_(element) + , fvGeometry_(fvGeometry) + , elemVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(stageParams) + , cm_(cm) + {} + + /*! + * \brief Computes the derivatives with respect to the given element and adds + * them to the global matrix. The element residual is written into the + * right hand side. + */ + template + void assembleJacobianAndResidual(JacRow& jacRow, SubRes& res) + { + // TODO: treat ghost elements for parallelism + + // assemble the diagonal block + const auto residual = assembleJacobianAndResidual_(jacRow[domainId]); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + + // assemble the coupling blocks + using namespace Dune::Hybrid; + forEach(integralRange(Dune::Hybrid::size(jacRow)), [&](auto&& j) + { + if constexpr (j != id) + { + static constexpr auto domainJ = Dune::index_constant(); + assembleJacobianCoupling_(j, jacRow[domainJ]); + } + }); + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + res[scvI.dofIndex()][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + + // TODO: Skip this for explicit schemes? + auto& jac = jacRow[domainId]; + auto& row = jac[scvI.dofIndex()]; + for (auto col = row.begin(); col != row.end(); ++col) + row[col.index()][eqIdx] = 0.0; + + jac[scvI.dofIndex()][scvI.dofIndex()][eqIdx][pvIdx] = 1.0; + + // TODO: Periodicity + // if (fvGeometry().gridGeometry().dofOnPeriodicBoundary(scvI.dofIndex())) {} + }; + + enforceDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + template + void assembleJacobian(JacobianBlock& jac) + { + assembleJacobianAndResidual_(jac); + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + // TODO: Skip this for explicit schemes? + auto& row = jac[scvI.dofIndex()]; + for (auto col = row.begin(); col != row.end(); ++col) + row[col.index()][eqIdx] = 0.0; + + jac[scvI.dofIndex()][scvI.dofIndex()][eqIdx][pvIdx] = 1.0; + }; + + enforceDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Assemble the residual only + */ + template + void assembleResidual(SubRes& res) + { + const auto residual = evalLocalResidual(); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + res[scvI.dofIndex()][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + }; + + enforceDirichletConstraints(applyDirichlet); + } + + //! Enforce Dirichlet constraints + template + void enforceDirichletConstraints(const ApplyFunction& applyDirichlet) + { + // enforce Dirichlet boundary conditions + evalDirichletBoundaries(applyDirichlet); + // TODO: take care of internal Dirichlet constraints (if enabled) + // enforceInternalDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Evaluates Dirichlet boundaries + */ + template< typename ApplyDirichletFunctionType > + void evalDirichletBoundaries(ApplyDirichletFunctionType applyDirichlet) + { + for (const auto& scvI : scvs(fvGeometry())) + { + if (!fvGeometry().gridGeometry().dofOnBoundary(scvI.dofIndex())) + continue; + + const auto bcTypes = problem().boundaryTypes(element(), scvI); + if (bcTypes.hasDirichlet()) + { + const auto dirichletValues = problem().dirichlet(element(), scvI); + + // set the Dirichlet conditions in residual and jacobian + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (bcTypes.isDirichlet(eqIdx)) + { + const auto pvIdx = bcTypes.eqToDirichletIndex(eqIdx); + assert(0 <= pvIdx && pvIdx < numEq); + applyDirichlet(scvI, dirichletValues, eqIdx, pvIdx); + } + } + } + } + } + + + /*! + * \brief Evaluate the complete local residual for the current element. + */ + ElementResidualVector evalLocalResidual() const + { + if (isStationary()) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables()); + return elementIsGhost_ ? localOperator.getEmptyResidual() + : localOperator.evalFluxesAndSources(); + } + else + { + ElementResidualVector residual(fvGeometry().numScv()); + residual = 0.0; + + for (std::size_t k = 0; k < stageParams_->size(); ++k) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables_[k]); + + if (!stageParams_->skipTemporal(k)) + residual.axpy(stageParams_->temporalWeight(k), localOperator.evalStorage()); + if (!stageParams_->skipSpatial(k)) + residual.axpy(stageParams_->spatialWeight(k), localOperator.evalFluxesAndSources()); + } + + return residual; + } + } + + /*! + * \brief Computes the derivatives with respect to the dofs of the given + * element and adds them to the global matrix. + * \return The element residual at the current solution. + */ + template + ElementResidualVector assembleJacobianAndResidual_(JacobianBlock& A) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianAndResidualNumeric_(A); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box"); + } + + /*! + * \brief Computes the derivatives by means of numeric differentiation + * and adds them to the global matrix. + * \return The element residual at the current solution. + * \note This specialization is for the box scheme with numeric differentiation + */ + template + ElementResidualVector assembleJacobianAndResidualNumeric_(JacobianBlock& A) + { + // get the variables of the current stage + auto& curVariables = elemVariables(); + auto& curElemVolVars = curVariables.elemVolVars(); + const auto& x = curVariables.gridVariables().dofs(); + + const auto origResiduals = evalLocalResidual(); + const auto origElemSol = elementSolution(element(), x, fvGeometry().gridGeometry()); + auto elemSol = origElemSol; + + ////////////////////////////////////////////////////////////////////////////////////////////// + // Calculate derivatives of the residual of all dofs in element with respect to themselves. // + ////////////////////////////////////////////////////////////////////////////////////////////// + + ElementResidualVector partialDerivs(fvGeometry().numScv()); + for (const auto& scvI : scvs(fvGeometry())) + { + // dof index and corresponding actual pri vars + const auto globalI = scvI.dofIndex(); + const auto localI = scvI.localDofIndex(); + + const auto origCurVolVars = curElemVolVars[scvI]; + auto& curVolVars = curElemVolVars[scvI]; + + // calculate derivatives w.r.t to the privars at the dof at hand + for (int pvIdx = 0; pvIdx < numEq; pvIdx++) + { + partialDerivs = 0.0; + auto evalResiduals = [&](Scalar priVar) + { + // update the volume variables and compute element residual + elemSol[localI][pvIdx] = priVar; + curVolVars.update(elemSol, problem(), element(), scvI); + cm_->updateCouplingContext(domainId, *this, domainId, scvI.dofIndex(), elemSol[localI], pvIdx); + return evalLocalResidual(); + }; + + // derive the residuals numerically + static const NumericEpsilon eps_{problem().paramGroup()}; + static const int numDiffMethod = getParamFromGroup(problem().paramGroup(), "Assembly.NumericDifferenceMethod"); + NumericDifferentiation::partialDerivative(evalResiduals, elemSol[localI][pvIdx], partialDerivs, + origResiduals, eps_(elemSol[localI][pvIdx], pvIdx), + numDiffMethod); + + // update the global stiffness matrix with the current partial derivatives + for (const auto& scvJ : scvs(fvGeometry())) + { + const auto globalJ = scvJ.dofIndex(); + const auto localJ = scvJ.localDofIndex(); + + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + // A[i][col][eqIdx][pvIdx] is the rate of change of the + // the residual of equation 'eqIdx' at dof 'i' + // depending on the primary variable 'pvIdx' at dof 'col' + A[globalJ][globalI][eqIdx][pvIdx] += partialDerivs[localJ][eqIdx]; + } + } + + // restore the original element solution & volume variables + elemSol[localI][pvIdx] = origElemSol[localI][pvIdx]; + curVolVars = origCurVolVars; + cm_->updateCouplingContext(domainId, *this, domainId, globalI, elemSol[localI], pvIdx); + + // TODO additional dof dependencies + } + } + + return origResiduals; + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + template + void assembleJacobianCoupling_(Dune::index_constant domainJ, JacobianBlock& A) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianCouplingNumeric_(domainJ, A); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box"); + } + + /*! + * \brief Computes the derivatives with respect to the given element by + * means of numeric differentiation and adds them to the global matrix. + */ + template + void assembleJacobianCouplingNumeric_(Dune::index_constant domainJ, JacobianBlock& A) + { + const auto& problem = elemVariables().elemVolVars().gridVolVars().problem(); + const auto& stencil = cm_->couplingStencil(domainId, element(), domainJ); + const auto& dofs = cm_->dofs(domainJ); + + auto& elemVolVars = elemVariables().elemVolVars(); + auto& elemFluxVarsCache = elemVariables().elemFluxVarsCache(); + + // TODO: How to handle the case of caching? + auto updateCoupledVariables = [&] () + { + // Update ourself after the context has been modified. Depending on the + // type of caching, other objects might have to be updated. All ifs can be optimized away. + cm_->updateCoupledVariables(domainId, *this, elemVolVars, elemFluxVarsCache); + }; + + for (const auto globalJ : stencil) + { + // undeflected privars and privars to be deflected + const auto origPriVarsJ = dofs[globalJ]; + auto priVarsJ = origPriVarsJ; + + // the undeflected coupling residual + const auto origResidual = cm_->evalCouplingResidual(domainId, *this, domainJ, globalJ); + + for (int pvIdx = 0; pvIdx < JacobianBlock::block_type::cols; ++pvIdx) + { + auto evalCouplingResidual = [&](Scalar priVar) + { + priVarsJ[pvIdx] = priVar; + cm_->updateCouplingContext(domainId, *this, domainJ, globalJ, priVarsJ, pvIdx); + updateCoupledVariables(); + return cm_->evalCouplingResidual(domainId, *this, domainJ, globalJ); + }; + + // derive the residuals numerically + decltype(evalCouplingResidual(0)) partialDerivs(element().subEntities(GridView::dimension)); + + const auto& paramGroup = cm_->problem(domainJ).paramGroup(); + static const int numDiffMethod = getParamFromGroup(paramGroup, "Assembly.NumericDifferenceMethod"); + static const auto epsCoupl = cm_->numericEpsilon(domainJ, paramGroup); + + NumericDifferentiation::partialDerivative(evalCouplingResidual, origPriVarsJ[pvIdx], partialDerivs, origResidual, + epsCoupl(origPriVarsJ[pvIdx], pvIdx), numDiffMethod); + + // update the global stiffness matrix with the current partial derivatives + for (auto&& scv : scvs(fvGeometry())) + { + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + // A[i][col][eqIdx][pvIdx] is the rate of change of + // the residual of equation 'eqIdx' at dof 'i' + // depending on the primary variable 'pvIdx' at dof + // 'col'. + + // If the dof is coupled by a Dirichlet condition, + // set the derived value only once (i.e. overwrite existing values). + // For other dofs, add the contribution of the partial derivative. + if (fvGeometry().gridGeometry().dofOnBoundary(scv.dofIndex())) + { + const auto bcTypes = problem.boundaryTypes(element(), scv); + if (bcTypes.isCouplingDirichlet(eqIdx)) + A[scv.dofIndex()][globalJ][eqIdx][pvIdx] = partialDerivs[scv.localDofIndex()][eqIdx]; + else if (bcTypes.isDirichlet(eqIdx)) + A[scv.dofIndex()][globalJ][eqIdx][pvIdx] = 0.0; + } + else + A[scv.dofIndex()][globalJ][eqIdx][pvIdx] += partialDerivs[scv.localDofIndex()][eqIdx]; + } + } + + // restore the current element solution + priVarsJ[pvIdx] = origPriVarsJ[pvIdx]; + + // restore the undeflected state of the coupling context + cm_->updateCouplingContext(domainId, *this, domainJ, globalJ, priVarsJ, pvIdx); + } + + // Restore original state of the flux vars cache and/or vol vars. + // This has to be done in case they depend on variables of domainJ before + // we continue with the numeric derivative w.r.t the next globalJ. Otherwise, + // the next "origResidual" will be incorrect. + updateCoupledVariables(); + } + } +public: + //! Return references to the local views + const Element& element() const { return element_; } + const FVElementGeometry& fvGeometry() const { return fvGeometry_; } + const ElementVariables& elemVariables() const { return elemVariables_.back(); } + ElementVariables& elemVariables() { return elemVariables_.back(); } + const auto& curElemVolVars() const { return elemVariables().elemVolVars(); } + const auto& elemFluxVarsCache() const { return elemVariables().elemFluxVarsCache(); } + + //! Returns if a stationary problem is assembled + bool isStationary() const { return !stageParams_; } + + // TODO: This is currently required by some coupling managers. + // We should get rid of this coupling probabaly. + static constexpr bool isImplicit() { return /*this is buggy*/ true; } + + //! Return a reference to the underlying problem + //! TODO: Should grid vars return problem directly!? + const auto& problem() const + { return elemVariables().gridVariables().gridVolVars().problem(); } + +private: + const Element& element_; + const FVElementGeometry& fvGeometry_; + std::vector& elemVariables_; + + bool elementIsGhost_; + std::shared_ptr stageParams_; + std::shared_ptr cm_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/multidomain/assembly/localassembler.hh b/dumux/multidomain/assembly/localassembler.hh new file mode 100644 index 0000000000..9d13c000a3 --- /dev/null +++ b/dumux/multidomain/assembly/localassembler.hh @@ -0,0 +1,57 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief Helper alias to select a local assembler based on the discretization scheme. + */ +#ifndef DUMUX_MD_LOCAL_ASSEMBLER_HH +#define DUMUX_MD_LOCAL_ASSEMBLER_HH + +#include +#include "boxlocalassembler.hh" + +namespace Dumux { +namespace Impl { + + template + struct MDLocalAssemblerChooser; + + template + struct MDLocalAssemblerChooser + { using type = SubDomainBoxLocalAssembler; }; + + template + using MDLocalAssemblerType = typename MDLocalAssemblerChooser::discMethod, + diffMethod>::type; + +} // end namespace Immpl + +/*! + * \ingroup Assembly + * \brief Helper alias to select the local assembler type from an assembler. + */ +template +using MultiDomainLocalAssembler = Impl::template MDLocalAssemblerType; + +} // namespace Dumux + +#endif -- GitLab From 73e563b49c73f244a9b3b7476f5521f75d852b96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 3 Feb 2021 10:53:51 +0100 Subject: [PATCH 12/27] [TEMP][facet-box-1p-test] make work with new assembly --- .../multidomain/facet/box/couplingmanager.hh | 15 +++-- .../facet/1p_1p/analytical/main.cc | 63 +++++++++++++++---- 2 files changed, 61 insertions(+), 17 deletions(-) diff --git a/dumux/multidomain/facet/box/couplingmanager.hh b/dumux/multidomain/facet/box/couplingmanager.hh index 732705b5b7..5485fdf5c4 100644 --- a/dumux/multidomain/facet/box/couplingmanager.hh +++ b/dumux/multidomain/facet/box/couplingmanager.hh @@ -359,6 +359,11 @@ public: typename LocalResidual::ElementResidualVector res(fvGeometry.numScv()); res = 0.0; + ElementBoundaryTypes elemBCTypes; + elemBCTypes.update(this->problem(bulkId), bulkLocalAssembler.element(), bulkLocalAssembler.fvGeometry()); + + LocalResidual localResidual(&(this->problem(bulkId))); + // compute fluxes across the coupling scvfs const auto& couplingScvfs = map.find(bulkContext_.elementIdx)->second.dofToCouplingScvfMap.at(dofIdxGlobalJ); for (auto scvfIdx : couplingScvfs) @@ -368,9 +373,9 @@ public: res[insideScv.localDofIndex()] += evalBulkFluxes_( bulkLocalAssembler.element(), bulkLocalAssembler.fvGeometry(), bulkLocalAssembler.curElemVolVars(), - bulkLocalAssembler.elemBcTypes(), + elemBCTypes, bulkLocalAssembler.elemFluxVarsCache(), - bulkLocalAssembler.localResidual(), + localResidual, std::array, 1>({scvfIdx}) ); } @@ -479,7 +484,7 @@ public: { const auto& ldGridGeometry = this->problem(lowDimId).gridGeometry(); - const auto& ldSol = Assembler::isImplicit() ? this->curSol()[lowDimId] : assembler.prevSol()[lowDimId]; + const auto& ldSol = this->curSol()[lowDimId]; const auto elemJ = ldGridGeometry.element(lowDimElemIdx); auto fvGeom = localView(ldGridGeometry); fvGeom.bindElement(elemJ); @@ -538,7 +543,7 @@ public: auto bulkElemVolVars = localView(assembler.gridVariables(bulkId).curGridVolVars()); auto bulkElemFluxVarsCache = localView(assembler.gridVariables(bulkId).gridFluxVarsCache()); - const auto& bulkSol = Assembler::isImplicit() ? this->curSol()[bulkId] : assembler.prevSol()[bulkId]; + const auto& bulkSol = this->curSol()[bulkId]; const auto curBulkElem = bulkGridGeom.element(embedments[i].first); bulkFvGeom.bind(curBulkElem); bulkElemVolVars.bind(curBulkElem, bulkFvGeom, bulkSol); @@ -552,7 +557,7 @@ public: } // finally, set the local residual - lowDimContext_.bulkLocalResidual = std::make_unique< LocalResidual >(assembler.localResidual(bulkId)); + lowDimContext_.bulkLocalResidual = std::make_unique< LocalResidual >( &(this->problem(bulkId)) ); } } diff --git a/test/multidomain/facet/1p_1p/analytical/main.cc b/test/multidomain/facet/1p_1p/analytical/main.cc index 4ac82416b1..87ad61b15b 100644 --- a/test/multidomain/facet/1p_1p/analytical/main.cc +++ b/test/multidomain/facet/1p_1p/analytical/main.cc @@ -29,10 +29,22 @@ #include #include +#include + +namespace Dumux::Properties { + // property forward declaration + // TODO: We need this in MDTraits such that the MDAssembler + // can extract the local operator from the sub-typetags. + // Thus, we need to introduce this property for all models? + // Should this be circumvented somehow? (In standard models one can + // select the operator in the main file so far...) + template + struct LocalOperator { using type = UndefinedProperty; }; +} // end namespace Dumux::Properties + #include "problem_bulk.hh" #include "problem_lowdim.hh" -#include #include #include @@ -43,7 +55,7 @@ #include #include -#include +#include #include #include @@ -53,6 +65,10 @@ #include +#include +#include +#include + // obtain/define some types to be used below in the property definitions and in main template< class BulkTypeTag, class LowDimTypeTag > class TestTraits @@ -69,6 +85,24 @@ public: namespace Dumux { namespace Properties { +template +struct SDOperator +{ +private: + using ModelTraits = GetPropType; + using FluxVariables = GetPropType; + using ElemVariables = typename GetPropType::LocalView; + using ImmiscibleOperators = FVImmiscibleOperators; + +public: + using Type = FVLocalOperator; +}; + +template +struct LocalOperator { using type = typename SDOperator::Type; }; +template +struct LocalOperator { using type = typename SDOperator::Type; }; + // set cm property for the box test using BoxTraits = TestTraits; template @@ -77,11 +111,11 @@ template struct CouplingManager { using type = typename BoxTraits::CouplingManager; }; // set cm property for the tpfa test -using TpfaTraits = TestTraits; -template -struct CouplingManager { using type = typename TpfaTraits::CouplingManager; }; -template -struct CouplingManager { using type = typename TpfaTraits::CouplingManager; }; +// using TpfaTraits = TestTraits; +// template +// struct CouplingManager { using type = typename TpfaTraits::CouplingManager; }; +// template +// struct CouplingManager { using type = typename TpfaTraits::CouplingManager; }; } // end namespace Properties } // end namespace Dumux @@ -326,10 +360,8 @@ int main(int argc, char** argv) } // the assembler - using Assembler = MultiDomainFVAssembler; - auto assembler = std::make_shared( std::make_tuple(bulkProblem, lowDimProblem), - std::make_tuple(bulkFvGridGeometry, lowDimFvGridGeometry), - std::make_tuple(bulkGridVariables, lowDimGridVariables), + using Assembler = MultiDomainAssembler; + auto assembler = std::make_shared( std::make_tuple(bulkFvGridGeometry, lowDimFvGridGeometry), couplingManager); // the linear solver @@ -341,7 +373,14 @@ int main(int argc, char** argv) auto newtonSolver = std::make_shared(assembler, linearSolver, couplingManager); // linearize & solve - newtonSolver->solve(x); + auto mdGridVars = MultiDomainFVGridVariables(std::make_tuple(bulkGridVariables, lowDimGridVariables)); + mdGridVars.update(x); + newtonSolver->solve(mdGridVars); + + // update solution vector used here in main + // with the new assembly we wouldn't need to care about a + // solution vector, we'd only need to have grid variables + x = mdGridVars.dofs(); // update grid variables for output bulkGridVariables->update(x[bulkId]); -- GitLab From a5826a7acb312019273efc2bc20bd207e51981de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Mon, 8 Feb 2021 18:05:51 +0100 Subject: [PATCH 13/27] [TEMP][md][boxlocalassembler] rearrangements --- .../multidomain/assembly/boxlocalassembler.hh | 65 ++++++++++--------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/dumux/multidomain/assembly/boxlocalassembler.hh b/dumux/multidomain/assembly/boxlocalassembler.hh index 4e83ad050c..38f65d2fc7 100644 --- a/dumux/multidomain/assembly/boxlocalassembler.hh +++ b/dumux/multidomain/assembly/boxlocalassembler.hh @@ -33,7 +33,6 @@ #include #include -#include #include namespace Dumux { @@ -84,7 +83,7 @@ public: std::shared_ptr cm) : element_(element) , fvGeometry_(fvGeometry) - , elemVariables_(elemVars) + , elementVariables_(elemVars) , elementIsGhost_(element.partitionType() == Dune::GhostEntity) , stageParams_(nullptr) , cm_(cm) @@ -104,7 +103,7 @@ public: std::shared_ptr cm) : element_(element) , fvGeometry_(fvGeometry) - , elemVariables_(elemVars) + , elementVariables_(elemVars) , elementIsGhost_(element.partitionType() == Dune::GhostEntity) , stageParams_(stageParams) , cm_(cm) @@ -263,7 +262,7 @@ public: for (std::size_t k = 0; k < stageParams_->size(); ++k) { - LocalOperator localOperator(element(), fvGeometry(), elemVariables_[k]); + LocalOperator localOperator(element(), fvGeometry(), elementVariables_[k]); if (!stageParams_->skipTemporal(k)) residual.axpy(stageParams_->temporalWeight(k), localOperator.evalStorage()); @@ -275,6 +274,33 @@ public: } } + //! Return references to the local views + const Element& element() const { return element_; } + const FVElementGeometry& fvGeometry() const { return fvGeometry_; } + const ElementVariables& elemVariables() const { return elementVariables_.back(); } + + //! Still needed by some coupling managers + //! \todo TODO: Double check if still necessary after complete + //! integration of new time integration schemes. + const auto& curElemVolVars() const { return elemVariables().elemVolVars(); } + const auto& elemFluxVarsCache() const { return elemVariables().elemFluxVarsCache(); } + + //! Returns if a stationary problem is assembled + bool isStationary() const { return !stageParams_; } + + // TODO: This is currently required by some coupling managers. + // We should get rid of this coupling probabaly. + static constexpr bool isImplicit() { return /*this is buggy*/ true; } + + //! Return a reference to the underlying problem + //! TODO: Should grid vars return problem directly!? + const auto& problem() const + { return elemVariables().gridVariables().gridVolVars().problem(); } + +private: + ElementVariables& elemVariables_() + { return elementVariables_.back(); } + /*! * \brief Computes the derivatives with respect to the dofs of the given * element and adds them to the global matrix. @@ -299,7 +325,7 @@ public: ElementResidualVector assembleJacobianAndResidualNumeric_(JacobianBlock& A) { // get the variables of the current stage - auto& curVariables = elemVariables(); + auto& curVariables = elemVariables_(); auto& curElemVolVars = curVariables.elemVolVars(); const auto& x = curVariables.gridVariables().dofs(); @@ -388,12 +414,11 @@ public: template void assembleJacobianCouplingNumeric_(Dune::index_constant domainJ, JacobianBlock& A) { - const auto& problem = elemVariables().elemVolVars().gridVolVars().problem(); const auto& stencil = cm_->couplingStencil(domainId, element(), domainJ); const auto& dofs = cm_->dofs(domainJ); - auto& elemVolVars = elemVariables().elemVolVars(); - auto& elemFluxVarsCache = elemVariables().elemFluxVarsCache(); + auto& elemVolVars = elemVariables_().elemVolVars(); + auto& elemFluxVarsCache = elemVariables_().elemFluxVarsCache(); // TODO: How to handle the case of caching? auto updateCoupledVariables = [&] () @@ -447,7 +472,7 @@ public: // For other dofs, add the contribution of the partial derivative. if (fvGeometry().gridGeometry().dofOnBoundary(scv.dofIndex())) { - const auto bcTypes = problem.boundaryTypes(element(), scv); + const auto bcTypes = problem().boundaryTypes(element(), scv); if (bcTypes.isCouplingDirichlet(eqIdx)) A[scv.dofIndex()][globalJ][eqIdx][pvIdx] = partialDerivs[scv.localDofIndex()][eqIdx]; else if (bcTypes.isDirichlet(eqIdx)) @@ -472,31 +497,11 @@ public: updateCoupledVariables(); } } -public: - //! Return references to the local views - const Element& element() const { return element_; } - const FVElementGeometry& fvGeometry() const { return fvGeometry_; } - const ElementVariables& elemVariables() const { return elemVariables_.back(); } - ElementVariables& elemVariables() { return elemVariables_.back(); } - const auto& curElemVolVars() const { return elemVariables().elemVolVars(); } - const auto& elemFluxVarsCache() const { return elemVariables().elemFluxVarsCache(); } - - //! Returns if a stationary problem is assembled - bool isStationary() const { return !stageParams_; } - - // TODO: This is currently required by some coupling managers. - // We should get rid of this coupling probabaly. - static constexpr bool isImplicit() { return /*this is buggy*/ true; } - - //! Return a reference to the underlying problem - //! TODO: Should grid vars return problem directly!? - const auto& problem() const - { return elemVariables().gridVariables().gridVolVars().problem(); } private: const Element& element_; const FVElementGeometry& fvGeometry_; - std::vector& elemVariables_; + std::vector& elementVariables_; bool elementIsGhost_; std::shared_ptr stageParams_; -- GitLab From 05b9ae61299fb9e4dd4ff33478ea12a416b4e07f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Mon, 8 Feb 2021 19:56:44 +0100 Subject: [PATCH 14/27] [TEMP][md] add draft for cc local assembler --- dumux/multidomain/assembler.hh | 2 +- .../multidomain/assembly/cclocalassembler.hh | 534 ++++++++++++++++++ dumux/multidomain/assembly/localassembler.hh | 9 + 3 files changed, 544 insertions(+), 1 deletion(-) create mode 100644 dumux/multidomain/assembly/cclocalassembler.hh diff --git a/dumux/multidomain/assembler.hh b/dumux/multidomain/assembler.hh index 936dee20a5..f20290a200 100644 --- a/dumux/multidomain/assembler.hh +++ b/dumux/multidomain/assembler.hh @@ -44,7 +44,7 @@ #include #include "couplingjacobianpattern.hh" -#include "subdomaincclocalassembler.hh" +// #include "subdomaincclocalassembler.hh" // #include "subdomainboxlocalassembler.hh" #include "subdomainstaggeredlocalassembler.hh" diff --git a/dumux/multidomain/assembly/cclocalassembler.hh b/dumux/multidomain/assembly/cclocalassembler.hh new file mode 100644 index 0000000000..6809c0ebf1 --- /dev/null +++ b/dumux/multidomain/assembly/cclocalassembler.hh @@ -0,0 +1,534 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution per element + * for cell-centered schemes in the context of multidomain models. + */ +#ifndef DUMUX_MULTIDOMAIN_CC_LOCAL_ASSEMBLER_HH +#define DUMUX_MULTIDOMAIN_CC_LOCAL_ASSEMBLER_HH + +#include +#include + +#include +#include + +#include +#include +#include + +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for cell-centered schemes. + * \tparam Assembler The grid-wide assembler type + */ +template +class SubDomainCCLocalAssembler +{ + using CouplingManager = typename Assembler::CouplingManager; + using GridVariables = typename Assembler::template SubDomainGridVariables; + using GridGeometry = typename Assembler::template SubDomainGridGeometry; + using LocalOperator = typename Assembler::template SubDomainLocalOperator; + + using Operators = typename LocalOperator::Operators; + using NumEqVector = typename Operators::NumEqVector; + + using FVElementGeometry = typename GridGeometry::LocalView; + using ElementVariables = typename GridVariables::LocalView; + using PrimaryVariables = typename GridVariables::PrimaryVariables; + using Scalar = typename GridVariables::Scalar; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using JacobianMatrix = typename Assembler::JacobianMatrix; + using ResidualVector = typename Assembler::SolutionVector; + + // TODO: remove this assert? Get numEq from somewhere else? + static constexpr int numEq = PrimaryVariables::size(); + static constexpr int maxElementStencilSize = GridGeometry::maxElementStencilSize; + static_assert(diffMethod == DiffMethod::numeric, "Analytical assembly not implemented"); + // static_assert(numEq == JacobianMatrix::block_type::cols, "Matrix block size doesn't match privars size"); + + //! the parameters of a stage in time integration + using StageParams = MultiStageParams; + +public: + static constexpr auto domainId = typename Dune::index_constant(); + using ElementResidualVector = typename LocalOperator::ElementResidualVector; + + /*! + * \brief Constructor for stationary problems. + */ + explicit SubDomainCCLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars, + std::shared_ptr cm) + : element_(element) + , fvGeometry_(fvGeometry) + , elementVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(nullptr) + , cm_(cm) + { + assert(elemVars.size() == 1); + assert(fvGeometry_.numScvs() == 1); + } + + /*! + * \brief Constructor for instationary problems. + * \note Using this constructor, we assemble one stage within + * a time integration step using multi-stage methods. + */ + explicit SubDomainCCLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars, + std::shared_ptr stageParams, + std::shared_ptr cm) + : element_(element) + , fvGeometry_(fvGeometry) + , elementVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(stageParams) + , cm_(cm) + { assert(fvGeometry_.numScvs() == 1); } + + /*! + * \brief Computes the derivatives with respect to the given element and adds + * them to the global matrix. The element residual is written into the + * right hand side. + */ + template + void assembleJacobianAndResidual(JacRow& jacRow, SubRes& res) + { + // TODO: treat ghost elements for parallelism + + // assemble the diagonal block + const auto residual = assembleJacobianAndResidual_(jacRow[domainId]); + res[scvs(fvGeometry()).begin()->dofIndex()] += residual; + + // assemble the coupling blocks + using namespace Dune::Hybrid; + forEach(integralRange(Dune::Hybrid::size(jacRow)), [&](auto&& j) + { + if constexpr (j != id) + { + static constexpr auto domainJ = Dune::index_constant(); + assembleJacobianCoupling_(j, jacRow[domainJ]); + } + }); + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + template + void assembleJacobian(JacobianBlock& jac) + { + assembleJacobianAndResidual_(jac); + } + + /*! + * \brief Assemble the residual only + */ + template + void assembleResidual(SubRes& res) + { + const auto residual = evalLocalResidual(); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + } + + /*! + * \brief Evaluate the complete local residual for the current element. + */ + ElementResidualVector evalLocalResidual() const + { + if (isStationary()) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables()); + return elementIsGhost_ ? localOperator.getEmptyResidual() + : localOperator.evalFluxesAndSources(); + } + else + { + ElementResidualVector residual(fvGeometry().numScv()); + residual = 0.0; + + for (std::size_t k = 0; k < stageParams_->size(); ++k) + { + LocalOperator localOperator(element(), fvGeometry(), elementVariables_[k]); + + if (!stageParams_->skipTemporal(k)) + residual.axpy(stageParams_->temporalWeight(k), localOperator.evalStorage()); + if (!stageParams_->skipSpatial(k)) + residual.axpy(stageParams_->spatialWeight(k), localOperator.evalFluxesAndSources()); + } + + return residual; + } + } + + //! Return references to the local views + const Element& element() const { return element_; } + const FVElementGeometry& fvGeometry() const { return fvGeometry_; } + const ElementVariables& elemVariables() const { return elementVariables_.back(); } + + //! Still needed by some coupling managers + //! \todo TODO: Double check if still necessary after complete + //! integration of new time integration schemes. + const auto& curElemVolVars() const { return elemVariables().elemVolVars(); } + const auto& elemFluxVarsCache() const { return elemVariables().elemFluxVarsCache(); } + + //! Returns if a stationary problem is assembled + bool isStationary() const { return !stageParams_; } + + // TODO: This is currently required by some coupling managers. + // We should get rid of this coupling probabaly. + static constexpr bool isImplicit() { return /*this is buggy*/ true; } + + //! Return a reference to the underlying problem + //! TODO: Should grid vars return problem directly!? + const auto& problem() const + { return elemVariables().gridVariables().gridVolVars().problem(); } + +private: + ElementVariables& elemVariables_() + { return elementVariables_.back(); } + + /*! + * \brief Computes the derivatives with respect to the dofs of the given + * element and adds them to the global matrix. + * \return The element residual at the current solution. + */ + template + NumEqVector assembleJacobianAndResidual_(JacobianBlock& A) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianAndResidualNumeric_(A); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box"); + } + + /*! + * \brief Computes the derivatives by means of numeric differentiation + * and adds them to the global matrix. + * \return The element residual at the current solution. + * \note This specialization is for the box scheme with numeric differentiation + */ + template + NumEqVector assembleJacobianAndResidualNumeric_(JacobianBlock& A) + { + using Problem = std::decay_t; + + // get the variables of the current stage + auto& curVariables = elemVariables_(); + auto& curElemVolVars = curVariables.elemVolVars(); + auto& curElemFluxVarsCache = curVariables.elemFluxVarsCache(); + const auto& x = curVariables.gridVariables().dofs(); + + // get stencil informations + const auto& gridGeometry = fvGeometry().gridGeometry(); + const auto& connectivityMap = gridGeometry.connectivityMap(); + const auto globalI = gridGeometry.elementMapper().index(element_); + const auto numNeighbors = connectivityMap[globalI].size(); + + // container to store the neighboring elements + Dune::ReservedVector neighborElements; + neighborElements.resize(numNeighbors); + + // assemble the undeflected residual + using Residuals = ReservedBlockVector; + Residuals origResiduals(numNeighbors + 1); origResiduals = 0.0; + origResiduals[0] = evalLocalResidual()[0]; + + // lambda for convenient evaluation of the fluxes across scvfs in the neighbors + // if the neighbor is a ghost we don't want to add anything to their residual + // so we return 0 and omit computing the flux + auto evalNeighborFlux = [&] (const auto& neighbor, const auto& scvf) + { + if (neighbor.partitionType() == Dune::GhostEntity) + return NumEqVector(0.0); + else + return Operators::flux(problem(), neighbor, fvGeometry(), + curElemVolVars, curElemFluxVarsCache, scvf); + }; + + // get the elements in which we need to evaluate the fluxes + // and calculate these in the undeflected state + unsigned int j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + { + neighborElements[j-1] = gridGeometry.element(dataJ.globalJ); + for (const auto scvfIdx : dataJ.scvfsJ) + origResiduals[j] += evalNeighborFlux(neighborElements[j-1], fvGeometry().scvf(scvfIdx)); + ++j; + } + + // reference to the element's scv (needed later) and corresponding vol vars + // TODO: Support the case of caching?! + const auto& scv = fvGeometry().scv(globalI); + auto& curVolVars = curElemVolVars[scv]; + + // save a copy of the original privars and vol vars in order + // to restore the original solution after deflection + const auto origPriVars = x[globalI]; + const auto origVolVars = curVolVars; + + // element solution container to be deflected + auto elemSol = elementSolution(element_, x, gridGeometry); + + // derivatives in the neighbors with repect to the current elements + // in index 0 we save the derivative of the element residual with respect to it's own dofs + Residuals partialDerivs(numNeighbors + 1); + for (int pvIdx = 0; pvIdx < numEq; ++pvIdx) + { + partialDerivs = 0.0; + + auto evalResiduals = [&](Scalar priVar) + { + Residuals partialDerivsTmp(numNeighbors + 1); + partialDerivsTmp = 0.0; + + // update the volume variables and the flux var cache + elemSol[0][pvIdx] = priVar; + curVolVars.update(elemSol, problem(), element_, scv); + curElemFluxVarsCache.update(element_, fvGeometry(), curElemVolVars); + cm_->updateCouplingContext(domainId, *this, domainId, globalI, elemSol[0], pvIdx); + + // TODO: UPDATE GLOBAL FLUX VARS CACHE HERE? + + // calculate the residual with the deflected primary variables + partialDerivsTmp[0] = evalLocalResidual()[0]; + + // calculate the fluxes in the neighbors with the deflected primary variables + for (std::size_t k = 0; k < numNeighbors; ++k) + for (auto scvfIdx : connectivityMap[globalI][k].scvfsJ) + partialDerivsTmp[k+1] += evalNeighborFlux(neighborElements[k], fvGeometry().scvf(scvfIdx)); + + return partialDerivsTmp; + }; + + // derive the residuals numerically + static const NumericEpsilon eps_{problem().paramGroup()}; + static const int numDiffMethod = getParamFromGroup(problem().paramGroup(), "Assembly.NumericDifferenceMethod"); + NumericDifferentiation::partialDerivative(evalResiduals, elemSol[0][pvIdx], partialDerivs, origResiduals, + eps_(elemSol[0][pvIdx], pvIdx), numDiffMethod); + + // Correct derivative for ghost elements, i.e. set a 1 for the derivative w.r.t. the + // current primary variable and a 0 elsewhere. As we always solve for a delta of the + // solution with repect to the initial one, this results in a delta of 0 for ghosts. + if (elementIsGhost_) + { + partialDerivs[0] = 0.0; + partialDerivs[0][pvIdx] = 1.0; + } + + // add the current partial derivatives to the global jacobian matrix + // no special treatment is needed if globalJ is a ghost because then derivatives have been assembled to 0 above + if constexpr (Problem::enableInternalDirichletConstraints()) + { + // check if own element has internal Dirichlet constraint + const auto internalDirichletConstraintsOwnElement = this->problem().hasInternalDirichletConstraint(element_, scv); + const auto dirichletValues = this->problem().internalDirichlet(element_, scv); + + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (internalDirichletConstraintsOwnElement[eqIdx]) + { + origResiduals[0][eqIdx] = origVolVars.priVars()[eqIdx] - dirichletValues[eqIdx]; + A[globalI][globalI][eqIdx][pvIdx] = (eqIdx == pvIdx) ? 1.0 : 0.0; + } + else + A[globalI][globalI][eqIdx][pvIdx] += partialDerivs[0][eqIdx]; + } + + // off-diagonal entries + j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + { + const auto& neighborElement = neighborElements[j-1]; + const auto& neighborScv = fvGeometry().scv(dataJ.globalJ); + const auto internalDirichletConstraintsNeighbor = problem().hasInternalDirichletConstraint(neighborElement, neighborScv); + + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (internalDirichletConstraintsNeighbor[eqIdx]) + A[dataJ.globalJ][globalI][eqIdx][pvIdx] = 0.0; + else + A[dataJ.globalJ][globalI][eqIdx][pvIdx] += partialDerivs[j][eqIdx]; + } + + ++j; + } + } + else // no internal Dirichlet constraints specified + { + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + // the diagonal entries + A[globalI][globalI][eqIdx][pvIdx] += partialDerivs[0][eqIdx]; + + // off-diagonal entries + j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + A[dataJ.globalJ][globalI][eqIdx][pvIdx] += partialDerivs[j++][eqIdx]; + } + } + + // restore the original state of the scv's volume variables + curVolVars = origVolVars; + + // restore the current element solution + elemSol[0][pvIdx] = origPriVars[pvIdx]; + + // restore the undeflected state of the coupling context + cm_->updateCouplingContext(domainId, *this, domainId, globalI, elemSol[0], pvIdx); + } + + // restore original state of the flux vars cache in case of global caching. + // This has to be done in order to guarantee that everything is in an undeflected + // state before the assembly of another element is called. In the case of local caching + // this is obsolete because the elemFluxVarsCache used here goes out of scope after this. + // We only have to do this for the last primary variable, for all others the flux var cache + // is updated with the correct element volume variables before residual evaluations + // TODO: RESET GLOBAL FLUX VARS CACHE HERE + + // return the original residual + return origResiduals[0]; + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + template + void assembleJacobianCoupling_(Dune::index_constant domainJ, JacobianBlock& A) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianCouplingNumeric_(domainJ, A); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box"); + } + + /*! + * \brief Computes the derivatives with respect to the given element by + * means of numeric differentiation and adds them to the global matrix. + */ + template + void assembleJacobianCouplingNumeric_(Dune::index_constant domainJ, JacobianBlock& A) + { + using Problem = std::decay_t; + + const auto& stencil = cm_->couplingStencil(domainId, element(), domainJ); + const auto& dofs = cm_->dofs(domainJ); + + auto& elemVolVars = elemVariables_().elemVolVars(); + auto& elemFluxVarsCache = elemVariables_().elemFluxVarsCache(); + + // TODO: How to handle the case of caching? + auto updateCoupledVariables = [&] () + { + // Update ourself after the context has been modified. Depending on the + // type of caching, other objects might have to be updated. All ifs can be optimized away. + cm_->updateCoupledVariables(domainId, *this, elemVolVars, elemFluxVarsCache); + }; + + for (const auto globalJ : stencil) + { + // undeflected privars and privars to be deflected + const auto origPriVarsJ = dofs[globalJ]; + auto priVarsJ = origPriVarsJ; + + // the undeflected coupling residual + const auto origResidual = cm_->evalCouplingResidual(domainId, *this, domainJ, globalJ); + + for (int pvIdx = 0; pvIdx < JacobianBlock::block_type::cols; ++pvIdx) + { + auto evalCouplingResidual = [&](Scalar priVar) + { + priVarsJ[pvIdx] = priVar; + cm_->updateCouplingContext(domainId, *this, domainJ, globalJ, priVarsJ, pvIdx); + updateCoupledVariables(); + return cm_->evalCouplingResidual(domainId, *this, domainJ, globalJ); + }; + + // derive the residuals numerically + decltype(evalCouplingResidual(0)) partialDerivs(element().subEntities(GridView::dimension)); + + const auto& paramGroup = cm_->problem(domainJ).paramGroup(); + static const int numDiffMethod = getParamFromGroup(paramGroup, "Assembly.NumericDifferenceMethod"); + static const auto epsCoupl = cm_->numericEpsilon(domainJ, paramGroup); + + NumericDifferentiation::partialDerivative(evalCouplingResidual, origPriVarsJ[pvIdx], partialDerivs, origResidual, + epsCoupl(origPriVarsJ[pvIdx], pvIdx), numDiffMethod); + + if constexpr (Problem::enableInternalDirichletConstraints()) + DUNE_THROW(Dune::NotImplemented, "Internal Dirichlet"); + + // update the global stiffness matrix with the current partial derivatives + for (auto&& scv : scvs(fvGeometry())) + { + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + + // A[i][col][eqIdx][pvIdx] is the rate of change of + // the residual of equation 'eqIdx' at dof 'i' + // depending on the primary variable 'pvIdx' at dof + // 'col'. + A[scv.dofIndex()][globalJ][eqIdx][pvIdx] += partialDerivs[scv.localDofIndex()][eqIdx]; + } + } + + // restore the current element solution + priVarsJ[pvIdx] = origPriVarsJ[pvIdx]; + + // restore the undeflected state of the coupling context + cm_->updateCouplingContext(domainId, *this, domainJ, globalJ, priVarsJ, pvIdx); + } + + // Restore original state of the flux vars cache and/or vol vars. + // This has to be done in case they depend on variables of domainJ before + // we continue with the numeric derivative w.r.t the next globalJ. Otherwise, + // the next "origResidual" will be incorrect. + updateCoupledVariables(); + } + } + +private: + const Element& element_; + const FVElementGeometry& fvGeometry_; + std::vector& elementVariables_; + + bool elementIsGhost_; + std::shared_ptr stageParams_; + std::shared_ptr cm_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/multidomain/assembly/localassembler.hh b/dumux/multidomain/assembly/localassembler.hh index 9d13c000a3..7b0a644740 100644 --- a/dumux/multidomain/assembly/localassembler.hh +++ b/dumux/multidomain/assembly/localassembler.hh @@ -26,6 +26,7 @@ #include #include "boxlocalassembler.hh" +#include "cclocalassembler.hh" namespace Dumux { namespace Impl { @@ -37,6 +38,14 @@ namespace Impl { struct MDLocalAssemblerChooser { using type = SubDomainBoxLocalAssembler; }; + template + struct MDLocalAssemblerChooser + { using type = SubDomainCCLocalAssembler; }; + + template + struct MDLocalAssemblerChooser + { using type = SubDomainCCLocalAssembler; }; + template using MDLocalAssemblerType = typename MDLocalAssemblerChooser Date: Mon, 8 Feb 2021 19:57:15 +0100 Subject: [PATCH 15/27] [TEMP][facet-tpfa-1p-test] make work with new assembly --- .../facet/cellcentered/tpfa/couplingmanager.hh | 13 +++++++------ test/multidomain/facet/1p_1p/analytical/main.cc | 15 ++++++++++----- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/dumux/multidomain/facet/cellcentered/tpfa/couplingmanager.hh b/dumux/multidomain/facet/cellcentered/tpfa/couplingmanager.hh index 19fca678a6..b90cec7465 100644 --- a/dumux/multidomain/facet/cellcentered/tpfa/couplingmanager.hh +++ b/dumux/multidomain/facet/cellcentered/tpfa/couplingmanager.hh @@ -314,13 +314,15 @@ public: assert(map.find(bulkContext_.elementIdx) != map.end()); assert(bulkContext_.elementIdx == this->problem(bulkId).gridGeometry().elementMapper().index(bulkLocalAssembler.element())); + LocalResidual localResidual(&(this->problem(bulkId))); + typename LocalResidual::ElementResidualVector res(1); res = 0.0; res[0] = evalBulkFluxes(bulkLocalAssembler.element(), bulkLocalAssembler.fvGeometry(), bulkLocalAssembler.curElemVolVars(), bulkLocalAssembler.elemFluxVarsCache(), - bulkLocalAssembler.localResidual(), + localResidual, map.find(bulkContext_.elementIdx)->second.dofToCouplingScvfMap.at(dofIdxGlobalJ)); return res; } @@ -433,7 +435,7 @@ public: for (const auto lowDimElemIdx : elementStencil) { - const auto& ldSol = Assembler::isImplicit() ? this->curSol()[lowDimId] : assembler.prevSol()[lowDimId]; + const auto& ldSol = this->curSol()[lowDimId]; const auto& ldProblem = this->problem(lowDimId); const auto& ldGridGeometry = this->problem(lowDimId).gridGeometry(); @@ -496,12 +498,11 @@ public: // then simply bind the local views of that first neighbor auto bulkFvGeom = localView(bulkGridGeom); - auto bulkElemVolVars = Assembler::isImplicit() ? localView(assembler.gridVariables(bulkId).curGridVolVars()) - : localView(assembler.gridVariables(bulkId).prevGridVolVars()); + auto bulkElemVolVars = localView(assembler.gridVariables(bulkId).curGridVolVars()); auto bulkElemFluxVarsCache = localView(assembler.gridVariables(bulkId).gridFluxVarsCache()); // evaluate variables on old/new time level depending on time disc scheme - const auto& bulkSol = Assembler::isImplicit() ? this->curSol()[bulkId] : assembler.prevSol()[bulkId]; + const auto& bulkSol = this->curSol()[bulkId]; bulkFvGeom.bind(bulkElem); bulkElemVolVars.bind(bulkElem, bulkFvGeom, bulkSol); bulkElemFluxVarsCache.bind(bulkElem, bulkFvGeom, bulkElemVolVars); @@ -510,7 +511,7 @@ public: lowDimContext_.bulkFvGeometry = std::make_unique< FVElementGeometry >( std::move(bulkFvGeom) ); lowDimContext_.bulkElemVolVars = std::make_unique< ElementVolumeVariables >( std::move(bulkElemVolVars) ); lowDimContext_.bulkElemFluxVarsCache = std::make_unique< ElementFluxVariablesCache >( std::move(bulkElemFluxVarsCache) ); - lowDimContext_.bulkLocalResidual = std::make_unique< LocalResidual >(assembler.localResidual(bulkId)); + lowDimContext_.bulkLocalResidual = std::make_unique< LocalResidual >( &(this->problem(bulkId)) ); } } diff --git a/test/multidomain/facet/1p_1p/analytical/main.cc b/test/multidomain/facet/1p_1p/analytical/main.cc index 87ad61b15b..6b1491afd2 100644 --- a/test/multidomain/facet/1p_1p/analytical/main.cc +++ b/test/multidomain/facet/1p_1p/analytical/main.cc @@ -103,6 +103,11 @@ struct LocalOperator { using type = typename SDOpera template struct LocalOperator { using type = typename SDOperator::Type; }; +template +struct LocalOperator { using type = typename SDOperator::Type; }; +template +struct LocalOperator { using type = typename SDOperator::Type; }; + // set cm property for the box test using BoxTraits = TestTraits; template @@ -111,11 +116,11 @@ template struct CouplingManager { using type = typename BoxTraits::CouplingManager; }; // set cm property for the tpfa test -// using TpfaTraits = TestTraits; -// template -// struct CouplingManager { using type = typename TpfaTraits::CouplingManager; }; -// template -// struct CouplingManager { using type = typename TpfaTraits::CouplingManager; }; +using TpfaTraits = TestTraits; +template +struct CouplingManager { using type = typename TpfaTraits::CouplingManager; }; +template +struct CouplingManager { using type = typename TpfaTraits::CouplingManager; }; } // end namespace Properties } // end namespace Dumux -- GitLab From 878ab1725f0b1ed01408fb3024dd072e9efc857e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Mon, 8 Feb 2021 20:36:52 +0100 Subject: [PATCH 16/27] [TEMP][md][assembler] add stage stuff to assembler --- dumux/multidomain/assembler.hh | 40 ++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/dumux/multidomain/assembler.hh b/dumux/multidomain/assembler.hh index f20290a200..fca0911232 100644 --- a/dumux/multidomain/assembler.hh +++ b/dumux/multidomain/assembler.hh @@ -318,6 +318,46 @@ public: SolutionVector& residual() { return *residual_; } + /*! + * \brief Prepare for a new stage within a time integration step. + * This caches the given grid variables, which are then used as a + * representation of the previous stage. Moreover, the given grid + * variables are then updated to the time level of the upcoming stage. + * \param gridVars the grid variables representing the previous stage + * \param params the parameters with the weights to be used in the upcoming stage + * \todo TODO: This function does two things, namely caching and then updating. + * Should we split/delegate this, or is the current name descriptive enough? + * When used from outside, one would expect the gridvars to be prepared maybe, + * and that is what's done. Caching might not be expected from the outside but + * it is also not important that that is known from there? + */ + void prepareStage(GridVariables& gridVars, + std::shared_ptr params) + { + stageParams_ = params; + const auto curStage = params->size() - 1; + + // we keep track of previous stages, they are needed for residual assembly + prevStageVariables_.push_back(gridVars); + + // Now we update the time level of the given grid variables + const auto t = params->timeAtStage(curStage); + const auto prevT = params->timeAtStage(0); + const auto dtFraction = params->timeStepFraction(curStage); + TimeLevel timeLevel(t, prevT, dtFraction); + + gridVars.updateTime(timeLevel); + } + + /*! + * \brief Remove traces from stages within a time integration step. + */ + void clearStages() + { + prevStageVariables_.clear(); + stageParams_ = nullptr; + } + private: // reset the residual vector to 0.0 void resetResidual_() -- GitLab From 787dee6f366d3f87c17776e4c86adc0741af9bb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Mon, 8 Feb 2021 20:38:07 +0100 Subject: [PATCH 17/27] [TEMP][md][geomech] add operators --- dumux/geomechanics/fvoperators.hh | 115 ++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 dumux/geomechanics/fvoperators.hh diff --git a/dumux/geomechanics/fvoperators.hh b/dumux/geomechanics/fvoperators.hh new file mode 100644 index 0000000000..c43ee24006 --- /dev/null +++ b/dumux/geomechanics/fvoperators.hh @@ -0,0 +1,115 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup PorousmediumflowModels + * \brief Sub-control entity-local evaluation of the operators + * within in the systems of equations of n-phase immiscible models. + */ +#ifndef DUMUX_FV_GEOMECHANICS_OPERATORS_HH +#define DUMUX_FV_GEOMECHANICS_OPERATORS_HH + +#include + +#include +#include + +namespace Dumux { + +/*! + * \ingroup PorousmediumflowModels + * \brief Sub-control entity-local evaluation of the operators + * within in the systems of equations of n-phase immiscible models. + * \tparam ModelTraits defines model-related types and variables (e.g. number of phases) + * \tparam ElementVariables the type of element-local view on the grid variables + * \tparam StressType the constitutive model for stress computation at scv faces. + */ +template +class FVGeomechanicsOperators +: public FVOperators +{ + using ParentType = FVOperators; + + // The variables required for the evaluation of the equation + using GridVariables = typename ElementVariables::GridVariables; + using VolumeVariables = typename GridVariables::VolumeVariables; + using ElementVolumeVariables = typename ElementVariables::ElementVolumeVariables; + using ElementFluxVariablesCache = typename ElementVariables::ElementFluxVariablesCache; + + // The grid geometry on which the scheme operates + using GridGeometry = typename GridVariables::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + using SubControlVolume = typename GridGeometry::SubControlVolume; + using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace; + using Extrusion = Extrusion_t; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using Problem = std::decay_t().gridVolVars().problem())>; + using Indices = typename ModelTraits::Indices; + + static constexpr int numPhases = ModelTraits::numFluidPhases(); + static constexpr int conti0EqIdx = ModelTraits::Indices::conti0EqIdx; + +public: + //! export the type used to store scalar values for all equations + using typename ParentType::NumEqVector; + + // export the types of the flux/source/storage terms + using typename ParentType::FluxTerm; + using typename ParentType::SourceTerm; + using typename ParentType::StorageTerm; + + /*! + * \brief Compute the storage term of the equations for the given sub-control volume + * \param problem The problem to be solved (could store additionally required quantities) + * \param scv The sub-control volume + * \param volVars The primary & secondary variables evaluated for the scv + * \note This must be overloaded by the implementation + */ + static StorageTerm storage(const Problem& problem, + const SubControlVolume& scv, + const VolumeVariables& volVars) + { return StorageTerm(0.0); } + + // TODO: overload source here to add bulk gravity term! + + /*! + * \brief Compute the flux term of the equations for the given sub-control volume face + * \param problem The problem to be solved (could store additionally required quantities) + * \param element The grid element + * \param fvGeometry The element-local view on the finite volume grid geometry + * \param elemVolVars The element-local view on the grid volume variables + * \param elemFluxVarsCache The element-local view on the grid flux variables cache + * \param scvf The sub-control volume face for which the flux term is to be computed + * \note This must be overloaded by the implementation + */ + static FluxTerm flux(const Problem& problem, + const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVolumeVariables& elemVolVars, + const ElementFluxVariablesCache& elemFluxVarsCache, + const SubControlVolumeFace& scvf) + { return StressType::force(problem, element, fvGeometry, elemVolVars, scvf, elemFluxVarsCache); } +}; + +} // end namespace Dumux + +#endif -- GitLab From 8d960e3a5e1ae4e03dd4df473f3162daae2b09c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Mon, 8 Feb 2021 20:37:11 +0100 Subject: [PATCH 18/27] [TEMP][md][gridvars] add time info --- dumux/multidomain/fvgridvariables.hh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/dumux/multidomain/fvgridvariables.hh b/dumux/multidomain/fvgridvariables.hh index 400b8b7111..fd4d1c5d41 100644 --- a/dumux/multidomain/fvgridvariables.hh +++ b/dumux/multidomain/fvgridvariables.hh @@ -66,6 +66,12 @@ public: //! export type of tuple of pointers using TupleType = typename MDTraits::template Tuple; + //! export underlying scalar type + using Scalar = typename MDTraits::Scalar; + + //! export the time representation + using TimeLevel = Dumux::TimeLevel; + /*! * \brief The default constructor */ @@ -104,6 +110,16 @@ public: }); } + //! update the time level only + void updateTime(const TimeLevel& timeLevel) + { + using namespace Dune::Hybrid; + forEach(std::make_index_sequence{}, [&](auto&& id) + { + elementAt(gridVars_, id)->updateTime(timeLevel); + }); + } + //! update all variables void update(const SolutionVector& sol, bool forceFluxCacheUpdate = false) { -- GitLab From f74ce1b1b2882bfddecd1ed7bef199ffeffd6d52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Mon, 8 Feb 2021 20:37:48 +0100 Subject: [PATCH 19/27] [TEMP][test][el2p] make work with new assembly --- .../poroelastic/couplingmanager.hh | 27 +---- test/multidomain/poromechanics/el2p/main.cc | 106 +++++++++++++----- 2 files changed, 83 insertions(+), 50 deletions(-) diff --git a/dumux/geomechanics/poroelastic/couplingmanager.hh b/dumux/geomechanics/poroelastic/couplingmanager.hh index a49e3e79e4..01d64ebb75 100644 --- a/dumux/geomechanics/poroelastic/couplingmanager.hh +++ b/dumux/geomechanics/poroelastic/couplingmanager.hh @@ -308,26 +308,14 @@ public: * and the storage term here. */ template< class PMFlowLocalAssembler > - typename LocalResidual::ElementResidualVector + auto evalCouplingResidual(Dune::index_constant pmFlowDomainId, const PMFlowLocalAssembler& pmFlowLocalAssembler, Dune::index_constant poroMechDomainId, GridIndexType dofIdxGlobalJ) { - auto res = pmFlowLocalAssembler.localResidual().evalFluxAndSource(pmFlowLocalAssembler.element(), - pmFlowLocalAssembler.fvGeometry(), - pmFlowLocalAssembler.curElemVolVars(), - pmFlowLocalAssembler.elemFluxVarsCache(), - pmFlowLocalAssembler.elemBcTypes()); - - // If the residual instationary, evaluate storage - if (!pmFlowLocalAssembler.localResidual().isStationary()) - res += pmFlowLocalAssembler.localResidual().evalStorage(pmFlowLocalAssembler.element(), - pmFlowLocalAssembler.fvGeometry(), - pmFlowLocalAssembler.prevElemVolVars(), - pmFlowLocalAssembler.curElemVolVars()); - - return res; + // TODO: efficiency! + return pmFlowLocalAssembler.evalLocalResidual(); } /*! @@ -337,17 +325,14 @@ public: * the fluxes as well as the source term here. */ template< class PoroMechLocalAssembler > - typename LocalResidual::ElementResidualVector + auto evalCouplingResidual(Dune::index_constant poroMechDomainId, const PoroMechLocalAssembler& poroMechLocalAssembler, Dune::index_constant pmFlowDomainId, GridIndexType dofIdxGlobalJ) { - return poroMechLocalAssembler.localResidual().evalFluxAndSource(poroMechLocalAssembler.element(), - poroMechLocalAssembler.fvGeometry(), - poroMechLocalAssembler.curElemVolVars(), - poroMechLocalAssembler.elemFluxVarsCache(), - poroMechLocalAssembler.elemBcTypes()); + // TODO: efficiency! + return poroMechLocalAssembler.evalLocalResidual(); } //! Return the porous medium flow variables an element/scv of the poromech domain couples to diff --git a/test/multidomain/poromechanics/el2p/main.cc b/test/multidomain/poromechanics/el2p/main.cc index 7291bae011..e8c849da4d 100644 --- a/test/multidomain/poromechanics/el2p/main.cc +++ b/test/multidomain/poromechanics/el2p/main.cc @@ -33,6 +33,18 @@ #include "problem_poroelastic.hh" #include + +namespace Dumux::Properties { + // property forward declaration + // TODO: We need this in MDTraits such that the MDAssembler + // can extract the local operator from the sub-typetags. + // Thus, we need to introduce this property for all models? + // Should this be circumvented somehow? (In standard models one can + // select the operator in the main file so far...) + template + struct LocalOperator { using type = UndefinedProperty; }; +} // end namespace Dumux::Properties + #include #include @@ -40,9 +52,15 @@ #include #include -#include +#include +#include #include +#include +#include +#include +#include + #include #include @@ -71,6 +89,33 @@ private: public: using type = PoroMechanicsCouplingManager< Traits >; }; + +template +struct LocalOperator +{ +private: + using ModelTraits = GetPropType; + using FluxVariables = GetPropType; + using ElemVariables = typename GetPropType::LocalView; + using ImmiscibleOperators = FVImmiscibleOperators; + +public: + using type = FVLocalOperator; +}; + +template +struct LocalOperator +{ +private: + using ModelTraits = GetPropType; + using ElemVariables = typename GetPropType::LocalView; + using StressType = GetPropType; + using GeomechanicsOperators = FVGeomechanicsOperators; + +public: + using type = FVLocalOperator; +}; + } // end namespace Properties } // end namespace Dumux @@ -139,7 +184,6 @@ int main(int argc, char** argv) x[poroMechId].resize(poroMechFvGridGeometry->numDofs()); twoPProblem->applyInitialSolution(x[twoPId]); poroMechProblem->applyInitialSolution(x[poroMechId]); - SolutionVector xOld = x; // initialize the coupling manager couplingManager->init(twoPProblem, poroMechProblem, x); @@ -178,12 +222,16 @@ int main(int argc, char** argv) auto timeLoop = std::make_shared>(0.0, dt, tEnd); timeLoop->setMaxTimeStepSize(maxDT); + // use implicit Euler for time integration + auto timeMethod = std::make_shared>(); + // the assembler - using Assembler = MultiDomainFVAssembler; - auto assembler = std::make_shared( std::make_tuple(twoPProblem, poroMechProblem), - std::make_tuple(twoPFvGridGeometry, poroMechFvGridGeometry), - std::make_tuple(twoPGridVariables, poroMechGridVariables), - couplingManager, timeLoop, xOld); + using Assembler = MultiDomainAssembler; + auto assembler = std::make_shared( std::make_tuple(twoPFvGridGeometry, poroMechFvGridGeometry), + couplingManager, *timeMethod); + + auto mdGridVars = MultiDomainFVGridVariables(std::make_tuple(twoPGridVariables, poroMechGridVariables)); + mdGridVars.update(x); // the linear solver using LinearSolver = UMFPackBackend; @@ -191,21 +239,22 @@ int main(int argc, char** argv) // the non-linear solver using NewtonSolver = Dumux::MultiDomainNewtonSolver; - NewtonSolver nonLinearSolver(assembler, linearSolver, couplingManager); + auto nonLinearSolver = std::make_shared(assembler, linearSolver, couplingManager); + + // the time stepper for time integration + using TimeStepper = MultiStageTimeStepper; + TimeStepper timeStepper(nonLinearSolver, timeMethod); // time loop timeLoop->start(); do { - // solve the non-linear system with time step control - nonLinearSolver.solve(x, *timeLoop); - - // make the new solution the old solution - xOld = x; + // do time integraiton + timeStepper.step(mdGridVars, timeLoop->time(), timeLoop->timeStepSize()); // advance to the time loop to the next step timeLoop->advanceTimeStep(); - twoPGridVariables->advanceTimeStep(); - poroMechGridVariables->advanceTimeStep(); + // twoPGridVariables->advanceTimeStep(); + // poroMechGridVariables->advanceTimeStep(); // write vtk output twoPVtkWriter.write(timeLoop->time()); @@ -214,23 +263,22 @@ int main(int argc, char** argv) // report statistics of this time step timeLoop->reportTimeStep(); - using TwoPPrimaryVariables = GetPropType; - TwoPPrimaryVariables storage(0); - const auto& twoPLocalResidual = assembler->localResidual(twoPId); - for (const auto& element : elements(leafGridView, Dune::Partitions::interior)) - { - auto storageVec = twoPLocalResidual.evalStorage(*twoPProblem, element, *twoPFvGridGeometry, *twoPGridVariables, x[twoPId]); - storage += storageVec[0]; - } - std::cout << "time, mass CO2 (kg), mass brine (kg):" << std::endl; - std::cout << timeLoop->time() << " , " << storage[1] << " , " << storage[0] << std::endl; - std::cout << "***************************************" << std::endl; + // using TwoPPrimaryVariables = GetPropType; + // TwoPPrimaryVariables storage(0); + // const auto& twoPLocalResidual = assembler->localResidual(twoPId); + // for (const auto& element : elements(leafGridView, Dune::Partitions::interior)) + // { + // auto storageVec = twoPLocalResidual.evalStorage(*twoPProblem, element, *twoPFvGridGeometry, *twoPGridVariables, x[twoPId]); + // storage += storageVec[0]; + // } + // std::cout << "time, mass CO2 (kg), mass brine (kg):" << std::endl; + // std::cout << timeLoop->time() << " , " << storage[1] << " , " << storage[0] << std::endl; + // std::cout << "***************************************" << std::endl; } while (!timeLoop->finished()); - - // output some Newton statistics - nonLinearSolver.report(); + // // output some Newton statistics + // nonLinearSolver.report(); timeLoop->finalize(leafGridView.comm()); -- GitLab From 71d9bf7b4785b2dfa6dddd171bcaa4f8560dd2b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Tue, 9 Feb 2021 18:18:58 +0100 Subject: [PATCH 20/27] [TEMP][md] try using contexts for stages --- .../poroelastic/couplingmanager.hh | 198 +++++++++++++++--- dumux/multidomain/assembler.hh | 61 ++++-- .../multidomain/assembly/boxlocalassembler.hh | 7 + .../multidomain/assembly/cclocalassembler.hh | 11 +- test/multidomain/poromechanics/el2p/main.cc | 4 +- .../poromechanics/el2p/spatialparams_2p.hh | 2 +- 6 files changed, 223 insertions(+), 60 deletions(-) diff --git a/dumux/geomechanics/poroelastic/couplingmanager.hh b/dumux/geomechanics/poroelastic/couplingmanager.hh index 01d64ebb75..d43db70739 100644 --- a/dumux/geomechanics/poroelastic/couplingmanager.hh +++ b/dumux/geomechanics/poroelastic/couplingmanager.hh @@ -73,6 +73,11 @@ class PoroMechanicsCouplingManager : public virtual CouplingManager< MDTraits > template using GridIndexType = typename GridView::IndexSet::IndexType; template using Element = typename GridView::template Codim<0>::Entity; template using GlobalPosition = typename Element::Geometry::GlobalCoordinate; + template using SubSolutionVector = typename GridVariables::SolutionVector; + template + using ElementSolution = std::decay_t>(), + std::declval>(), + std::declval>()))>; //! we assume that the two sub-problem operate on the same grid static_assert(std::is_same< GridView, GridView >::value, @@ -103,14 +108,35 @@ class PoroMechanicsCouplingManager : public virtual CouplingManager< MDTraits > */ struct PoroMechanicsCouplingContext { - // We need unique ptrs because the local views have no default constructor + // We need optionals because the local views have no default constructor Element pmFlowElement; - std::unique_ptr< FVElementGeometry > pmFlowFvGeometry; - std::unique_ptr< ElementVolumeVariables > pmFlowElemVolVars; + std::optional< FVElementGeometry > pmFlowFvGeometry; + std::optional< ElementVolumeVariables > pmFlowElemVolVars; + }; + + /*! + * \brief Poromechanical domain data required for the residual calculation of an + * element of the porous medium flow domain. + */ + struct PorousMediumFlowCouplingContext + { + using Index = GridIndexType; + using ElemSol = ElementSolution; + using ElemDofs = std::vector; + + std::vector poroMechElemIndices; + std::vector poroMechElemDofs; + std::vector poroMechElemSols; }; public: + // export coupling context types + template + using CouplingContext = std::conditional_t; + // export the domain ids static constexpr auto pmFlowId = Dune::index_constant(); static constexpr auto poroMechId = Dune::index_constant(); @@ -170,33 +196,106 @@ public: return CouplingStencilType{ {eIdx} }; } - //! Pull up the base class' default implementation - using ParentType::bindCouplingContext; + //! Construct the coupling context for an element of the porous medium flow domain + template + std::shared_ptr + makeCouplingContext(Dune::index_constant pmFlowDomainId, + const Element& element, + const GridVariables& gridVars) const + { + const auto& poroMechGridVars = gridVars[poroMechId]; + const auto& poroMechGridGeom = poroMechGridVars.gridGeometry(); - /*! - * \brief For the assembly of the element residual of an element of the poro-mechanics domain, - * we have to prepare the element variables of the porous medium flow domain. - */ - template< class Assembler > - void bindCouplingContext(Dune::index_constant poroMechDomainId, - const Element& element, - const Assembler& assembler) + const auto globalI = gridVars[pmFlowId].gridGeometry().elementMapper().index(element); + const auto& connectivityMapI = gridVars[pmFlowId].gridGeometry().connectivityMap()[globalI]; + + std::vector< GridIndexType > elemIndices; + std::vector< ElementSolution > elemSols; + std::vector< std::vector> > elemDofs; + + elemIndices.reserve(connectivityMapI.size()+1); + elemSols.reserve(connectivityMapI.size()+1); + elemDofs.reserve(connectivityMapI.size()+1); + + static constexpr int dim = GridView::dimension; + { + const auto& elementI = poroMechGridGeom.element(globalI); + elemIndices.push_back(globalI); + elemSols.push_back(elementSolution(elementI, poroMechGridVars.dofs(), poroMechGridGeom)); + + elemDofs.push_back({}); + elemDofs.back().reserve(elementI.subEntities(dim)); + for (int i = 0; i < elementI.subEntities(dim); ++i) + elemDofs.back().push_back(poroMechGridGeom.vertexMapper().subIndex(elementI, i, dim)); + } + + for (const auto& dataJ : connectivityMapI) + { + const auto& elementJ = poroMechGridGeom.element(dataJ.globalJ); + elemIndices.push_back(dataJ.globalJ); + elemSols.push_back(elementSolution(elementJ, poroMechGridVars.dofs(), poroMechGridGeom)); + + elemDofs.push_back({}); + elemDofs.back().reserve(elementJ.subEntities(dim)); + for (int i = 0; i < elementJ.subEntities(dim); ++i) + elemDofs.back().push_back(poroMechGridGeom.vertexMapper().subIndex(elementJ, i, dim)); + } + + using C = PorousMediumFlowCouplingContext; + return std::make_shared(C{std::move(elemIndices), std::move(elemDofs), std::move(elemSols)}); + } + + //! Construct the coupling context for an element of the poromechanical domain + template + std::shared_ptr + makeCouplingContext(Dune::index_constant poroMechDomainId, + const Element& element, + const GridVariables& gridVars) const { - // first reset the context - poroMechCouplingContext_.pmFlowFvGeometry.reset(nullptr); - poroMechCouplingContext_.pmFlowElemVolVars.reset(nullptr); + PoroMechanicsCouplingContext result; - // prepare the fvGeometry and the element volume variables - // these quantities will be used later to obtain the effective pressure - auto fvGeometry = localView( this->problem(pmFlowId).gridGeometry() ); - auto elemVolVars = localView( assembler.gridVariables(Dune::index_constant()).curGridVolVars() ); + const auto& pmFlowGridVars = gridVars[Dune::index_constant()]; + const auto& pmFlowGridGeom = pmFlowGridVars.gridGeometry(); + const auto& pmFlowGridVolVars = pmFlowGridVars.gridVolVars(); + auto fvGeometry = localView( pmFlowGridGeom ); + auto elemVolVars = localView( pmFlowGridVolVars ); fvGeometry.bindElement(element); - elemVolVars.bindElement(element, fvGeometry, this->curSol()[Dune::index_constant()]); + elemVolVars.bindElement(element, fvGeometry, pmFlowGridVars.dofs()); - poroMechCouplingContext_.pmFlowElement = element; - poroMechCouplingContext_.pmFlowFvGeometry = std::make_unique< FVElementGeometry >(fvGeometry); - poroMechCouplingContext_.pmFlowElemVolVars = std::make_unique< ElementVolumeVariables >(elemVolVars); + result.pmFlowElement = element; + result.pmFlowFvGeometry.emplace(std::move(fvGeometry)); + result.pmFlowElemVolVars.emplace(std::move(elemVolVars)); + + return std::make_shared(result); + } + + void setCouplingContext(std::shared_ptr context) + { pmFlowCouplingContext_ = context; } + + void setCouplingContext(std::shared_ptr context) + { poroMechCouplingContext_ = context; } + + // TODO: This would be called when assembling both domains, thus, in the case + // of assembly of a poromech element, we don't have the correct context! + ElementSolution getPoroMechElemSol(const Element& element) const + { + const auto eIdx = this->problem(pmFlowId).gridGeometry().elementMapper().index(element); + + if (pmFlowCouplingContext_) + { + auto begin = pmFlowCouplingContext_->poroMechElemIndices.begin(); + auto end = pmFlowCouplingContext_->poroMechElemIndices.end(); + const auto it = std::find(begin, end, eIdx); + + if (it != end) + return pmFlowCouplingContext_->poroMechElemSols[std::distance(begin, it)]; + } + + // make elem sol from stored solution. + // TODO: How to resolve such circular dependencies? + const auto poroMechElement = this->problem(poroMechId).gridGeometry().element(eIdx); + return elementSolution(poroMechElement, this->curSol()[poroMechId], this->problem(poroMechId).gridGeometry()); } /*! @@ -213,12 +312,18 @@ public: unsigned int pvIdxJ) { // communicate the deflected pm flow domain primary variable + // TODO: We still have to have and update the solution vector because it is used in updateCoupledVariables ParentType::updateCouplingContext(poroMechDomainId, poroMechLocalAssembler, pmFlowDomainId, dofIdxGlobalJ, priVarsJ, pvIdxJ); // now, update the coupling context (i.e. elemVolVars) - const auto& element = poroMechCouplingContext_.pmFlowElement; - const auto& fvGeometry = *poroMechCouplingContext_.pmFlowFvGeometry; - poroMechCouplingContext_.pmFlowElemVolVars->bindElement(element, fvGeometry, this->curSol()[pmFlowDomainId]); + const auto& element = poroMechCouplingContext_->pmFlowElement; + const auto& fvGeometry = *(poroMechCouplingContext_->pmFlowFvGeometry); + + // TODO: Here the cm-internal solution is still used. + // This still works because the deflected solution is always the one of the current + // stage, and this function is only called to deflect the variables of the current stage. + const auto& solution = this->curSol()[pmFlowId]; + poroMechCouplingContext_->pmFlowElemVolVars->bindElement(element, fvGeometry, solution); } /*! @@ -235,13 +340,18 @@ public: const PrimaryVariables& priVarsJ, unsigned int pvIdxJ) { - // communicate the deflected displacement + // // communicate the deflected displacement + // TODO: We still have to have and update the solution vector because it is used in updateCoupledVariables ParentType::updateCouplingContext(poroMechDomainIdI, poroMechLocalAssembler, poroMechDomainIdJ, dofIdxGlobalJ, priVarsJ, pvIdxJ); // now, update the coupling context (i.e. elemVolVars) - (*poroMechCouplingContext_.pmFlowElemVolVars).bindElement(poroMechCouplingContext_.pmFlowElement, - *poroMechCouplingContext_.pmFlowFvGeometry, - this->curSol()[Dune::index_constant()]); + // TODO: Here the cm-internal solution is still used. + // This still works because the deflected solution is always the one of the current + // stage, and this function is only called to deflect the variables of the current stage. + const auto& solution = this->curSol()[pmFlowId]; + poroMechCouplingContext_->pmFlowElemVolVars->bindElement(poroMechCouplingContext_->pmFlowElement, + *(poroMechCouplingContext_->pmFlowFvGeometry), + solution); } /*! @@ -257,7 +367,26 @@ public: unsigned int pvIdxJ) { // communicate the deflected displacement + // TODO: We still have to have and update the solution vector because it is used in updateCoupledVariables ParentType::updateCouplingContext(pmFlowDomainId, pmFlowLocalAssembler, domainIdJ, dofIdxGlobalJ, priVarsJ, pvIdxJ); + + // TODO: This is now much more expensive than the simple update before... + // Could an alternative be to store all stage solutions in the cm + // and not rely on a context here? + if constexpr (j == PoroMechId) + { + for (int i = 0; i < pmFlowCouplingContext_->poroMechElemIndices.size(); ++i) + { + const auto& contextDofs = pmFlowCouplingContext_->poroMechElemDofs[i]; + const auto it = std::find(contextDofs.begin(), contextDofs.end(), dofIdxGlobalJ); + + if (it != contextDofs.end()) + { + const auto idxInContext = std::distance(contextDofs.begin(), it); + pmFlowCouplingContext_->poroMechElemSols[i][idxInContext][pvIdxJ] = priVarsJ[pvIdxJ]; + } + } + } } //! Pull up the base class' default implementation @@ -340,7 +469,7 @@ public: { //! If we do not yet have the queried object, build it first const auto eIdx = this->problem(poroMechId).gridGeometry().elementMapper().index(element); - return (*poroMechCouplingContext_.pmFlowElemVolVars)[eIdx]; + return (*poroMechCouplingContext_->pmFlowElemVolVars)[eIdx]; } /*! @@ -397,8 +526,9 @@ private: // Container for storing the coupling element stencils for the pm flow domain std::vector< CouplingStencilType > pmFlowCouplingMap_; - // the coupling context of the poromechanics domain - PoroMechanicsCouplingContext poroMechCouplingContext_; + // the coupling contexts + std::shared_ptr poroMechCouplingContext_ = nullptr; + std::shared_ptr pmFlowCouplingContext_ = nullptr; }; } //end namespace Dumux diff --git a/dumux/multidomain/assembler.hh b/dumux/multidomain/assembler.hh index fca0911232..8d71e4f84c 100644 --- a/dumux/multidomain/assembler.hh +++ b/dumux/multidomain/assembler.hh @@ -103,6 +103,9 @@ private: using ThisType = MultiDomainAssembler; using GridGeometryTuple = typename MDTraits::template TupleOfSharedPtrConst; + template + using Context = typename CouplingManager::template CouplingContext; + public: /*! @@ -152,18 +155,18 @@ public: this->assembleJacobianAndResidual_(domainId, jacRow, subRes, gridVars); }); - // using namespace Dune::Indices; + using namespace Dune::Indices; // std::cout << "SOL in GV: " << std::endl; // Dune::printvector(std::cout, gridVars.dofs()[_0], "", "", 1, 10, 5); // Dune::printvector(std::cout, gridVars.dofs()[_1], "", "", 1, 10, 5); // // std::cout << "Linear system: " << std::endl; - // Dune::printmatrix(std::cout, (*jacobian_)[_0][_0], "", "", 10, 5); - // Dune::printmatrix(std::cout, (*jacobian_)[_0][_1], "", "", 10, 5); - // Dune::printmatrix(std::cout, (*jacobian_)[_1][_0], "", "", 10, 5); - // Dune::printmatrix(std::cout, (*jacobian_)[_1][_1], "", "", 10, 5); - // Dune::printvector(std::cout, (*residual_)[_0], "", "", 1, 10, 5); - // Dune::printvector(std::cout, (*residual_)[_1], "", "", 1, 10, 5); + // Dune::printmatrix(std::cout, (*jacobian_)[_0][_0], "", "", 10, 2); + // Dune::printmatrix(std::cout, (*jacobian_)[_0][_1], "", "", 10, 2); + // Dune::printmatrix(std::cout, (*jacobian_)[_1][_0], "", "", 10, 2); + // Dune::printmatrix(std::cout, (*jacobian_)[_1][_1], "", "", 10, 2); + // Dune::printvector(std::cout, (*residual_)[_0], "", "", 1, 10, 2); + // Dune::printvector(std::cout, (*residual_)[_1], "", "", 1, 10, 2); } //! compute the residuals using the internal residual @@ -393,10 +396,10 @@ private: { auto ggLocalView = localView(gridGeometry(domainId)); ggLocalView.bind(element); - auto elemVars = this->prepareElemVariables_(gridVars[domainId], element, ggLocalView); + auto [elemVars, contexts] = this->prepareElemVariables_(gridVars, element, ggLocalView); using LocalAssembler = Dumux::MultiDomainLocalAssembler; - LocalAssembler localAssembler(element, ggLocalView, elemVars, stageParams_, couplingManager_); + LocalAssembler localAssembler(element, ggLocalView, contexts, elemVars, stageParams_, couplingManager_); localAssembler.assembleJacobianAndResidual(jacRow, subRes); }); } @@ -410,10 +413,10 @@ private: { auto ggLocalView = localView(gridGeometry(domainId)); ggLocalView.bind(element); - auto elemVars = this->prepareElemVariables_(gridVars[domainId], element, ggLocalView); + auto [elemVars, contexts] = this->prepareElemVariables_(gridVars, element, ggLocalView); using LocalAssembler = Dumux::MultiDomainLocalAssembler; - LocalAssembler localAssembler(element, ggLocalView, elemVars, stageParams_, couplingManager_); + LocalAssembler localAssembler(element, ggLocalView, contexts, elemVars, stageParams_, couplingManager_); localAssembler.assembleResidual(subRes); }); } @@ -460,31 +463,47 @@ private: //! prepares the local views on the grid variables for the given element //! \todo: TODO: when stageparams.skipSpatial() == true, we don't need to bind flux vars caches! template - std::vector> - prepareElemVariables_(const SubDomainGridVariables& gridVars, + std::pair< std::vector>, + std::vector>> > + prepareElemVariables_(const Variables& gridVars, const SubDomainElement& element, const typename SubDomainGridGeometry::LocalView& ggLocalView) const { - couplingManager_->bindCouplingContext(Dune::index_constant(), element, *this); + static constexpr auto domainId = Dune::index_constant(); + if (!stageParams_) { - auto elemVars = localView(gridVars); + auto context = couplingManager_->makeCouplingContext(domainId, element, gridVars); + auto elemVars = localView(gridVars[domainId]); elemVars.bind(element, ggLocalView); - return {elemVars}; + return std::make_pair(std::vector>{{elemVars}}, + std::vector>>{{context}}); } else { std::vector> elemVars; + std::vector>> contexts; + elemVars.reserve(stageParams_->size()); + contexts.reserve(stageParams_->size()); for (int i = 0; i < stageParams_->size()-1; ++i) - elemVars.emplace_back(prevStageVariables_[i][Dune::index_constant()]); - elemVars.emplace_back(gridVars); + { + elemVars.emplace_back(prevStageVariables_[i][domainId]); + contexts.emplace_back(couplingManager_->makeCouplingContext(domainId, element, prevStageVariables_[i])); + } + + elemVars.emplace_back(gridVars[domainId]); + contexts.emplace_back(couplingManager_->makeCouplingContext(domainId, element, gridVars)); - for (auto& evv : elemVars) - evv.bind(element, ggLocalView); + for (int i = 0; i < elemVars.size(); ++i) + { + couplingManager_->setCouplingContext(contexts[i]); + elemVars[i].bind(element, ggLocalView); + } - return elemVars; + return std::make_pair(std::vector>{elemVars}, + std::vector>>{contexts}); } } diff --git a/dumux/multidomain/assembly/boxlocalassembler.hh b/dumux/multidomain/assembly/boxlocalassembler.hh index 38f65d2fc7..4e799356e4 100644 --- a/dumux/multidomain/assembly/boxlocalassembler.hh +++ b/dumux/multidomain/assembly/boxlocalassembler.hh @@ -58,6 +58,7 @@ class SubDomainBoxLocalAssembler using GridView = typename GridGeometry::GridView; using Element = typename GridView::template Codim<0>::Entity; + using Context = typename CouplingManager::template CouplingContext; using JacobianMatrix = typename Assembler::JacobianMatrix; using ResidualVector = typename Assembler::SolutionVector; @@ -79,10 +80,12 @@ public: */ explicit SubDomainBoxLocalAssembler(const Element& element, const FVElementGeometry& fvGeometry, + std::vector>& contexts, std::vector& elemVars, std::shared_ptr cm) : element_(element) , fvGeometry_(fvGeometry) + , contexts_(contexts) , elementVariables_(elemVars) , elementIsGhost_(element.partitionType() == Dune::GhostEntity) , stageParams_(nullptr) @@ -98,11 +101,13 @@ public: */ explicit SubDomainBoxLocalAssembler(const Element& element, const FVElementGeometry& fvGeometry, + std::vector>& contexts, std::vector& elemVars, std::shared_ptr stageParams, std::shared_ptr cm) : element_(element) , fvGeometry_(fvGeometry) + , contexts_(contexts) , elementVariables_(elemVars) , elementIsGhost_(element.partitionType() == Dune::GhostEntity) , stageParams_(stageParams) @@ -262,6 +267,7 @@ public: for (std::size_t k = 0; k < stageParams_->size(); ++k) { + cm_->setCouplingContext(contexts_[k]); LocalOperator localOperator(element(), fvGeometry(), elementVariables_[k]); if (!stageParams_->skipTemporal(k)) @@ -501,6 +507,7 @@ private: private: const Element& element_; const FVElementGeometry& fvGeometry_; + std::vector>& contexts_; std::vector& elementVariables_; bool elementIsGhost_; diff --git a/dumux/multidomain/assembly/cclocalassembler.hh b/dumux/multidomain/assembly/cclocalassembler.hh index 6809c0ebf1..651fb44acb 100644 --- a/dumux/multidomain/assembly/cclocalassembler.hh +++ b/dumux/multidomain/assembly/cclocalassembler.hh @@ -63,6 +63,7 @@ class SubDomainCCLocalAssembler using GridView = typename GridGeometry::GridView; using Element = typename GridView::template Codim<0>::Entity; + using Context = typename CouplingManager::template CouplingContext; using JacobianMatrix = typename Assembler::JacobianMatrix; using ResidualVector = typename Assembler::SolutionVector; @@ -85,17 +86,19 @@ public: */ explicit SubDomainCCLocalAssembler(const Element& element, const FVElementGeometry& fvGeometry, + std::vector>& contexts, std::vector& elemVars, std::shared_ptr cm) : element_(element) , fvGeometry_(fvGeometry) + , contexts_(contexts) , elementVariables_(elemVars) , elementIsGhost_(element.partitionType() == Dune::GhostEntity) , stageParams_(nullptr) , cm_(cm) { assert(elemVars.size() == 1); - assert(fvGeometry_.numScvs() == 1); + assert(fvGeometry_.numScv() == 1); } /*! @@ -105,16 +108,18 @@ public: */ explicit SubDomainCCLocalAssembler(const Element& element, const FVElementGeometry& fvGeometry, + std::vector>& contexts, std::vector& elemVars, std::shared_ptr stageParams, std::shared_ptr cm) : element_(element) , fvGeometry_(fvGeometry) + , contexts_(contexts) , elementVariables_(elemVars) , elementIsGhost_(element.partitionType() == Dune::GhostEntity) , stageParams_(stageParams) , cm_(cm) - { assert(fvGeometry_.numScvs() == 1); } + { assert(fvGeometry_.numScv() == 1); } /*! * \brief Computes the derivatives with respect to the given element and adds @@ -181,6 +186,7 @@ public: for (std::size_t k = 0; k < stageParams_->size(); ++k) { + cm_->setCouplingContext(contexts_[k]); LocalOperator localOperator(element(), fvGeometry(), elementVariables_[k]); if (!stageParams_->skipTemporal(k)) @@ -522,6 +528,7 @@ private: private: const Element& element_; const FVElementGeometry& fvGeometry_; + std::vector>& contexts_; std::vector& elementVariables_; bool elementIsGhost_; diff --git a/test/multidomain/poromechanics/el2p/main.cc b/test/multidomain/poromechanics/el2p/main.cc index e8c849da4d..c715beffe5 100644 --- a/test/multidomain/poromechanics/el2p/main.cc +++ b/test/multidomain/poromechanics/el2p/main.cc @@ -215,8 +215,8 @@ int main(int argc, char** argv) PoroMechOutputFields::initOutputModule(poroMechVtkWriter); // write initial solution - twoPVtkWriter.write(0.0); - poroMechVtkWriter.write(0.0); + // twoPVtkWriter.write(0.0); + // poroMechVtkWriter.write(0.0); //instantiate time loop auto timeLoop = std::make_shared>(0.0, dt, tEnd); diff --git a/test/multidomain/poromechanics/el2p/spatialparams_2p.hh b/test/multidomain/poromechanics/el2p/spatialparams_2p.hh index ec9573bb9f..f190094cb1 100644 --- a/test/multidomain/poromechanics/el2p/spatialparams_2p.hh +++ b/test/multidomain/poromechanics/el2p/spatialparams_2p.hh @@ -86,7 +86,7 @@ public: static constexpr auto poroMechId = CouplingManager::poroMechId; const auto& poroMechGridGeom = couplingManagerPtr_->problem(poroMechId).gridGeometry(); - const auto poroMechElemSol = elementSolution(element, couplingManagerPtr_->curSol()[poroMechId], poroMechGridGeom); + const auto& poroMechElemSol = couplingManagerPtr_->getPoroMechElemSol(element); // evaluate the deformation-dependent porosity at the scv center return PorosityDeformation::evaluatePorosity(poroMechGridGeom, element, scv.center(), poroMechElemSol, initPorosity_); -- GitLab From 28c18d86d5f8c48bfab4049c0850bfd595cb54ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 10 Feb 2021 19:13:16 +0100 Subject: [PATCH 21/27] [TEMP][md][assembler] add todo --- dumux/multidomain/assembler.hh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dumux/multidomain/assembler.hh b/dumux/multidomain/assembler.hh index 8d71e4f84c..733034fa2f 100644 --- a/dumux/multidomain/assembler.hh +++ b/dumux/multidomain/assembler.hh @@ -118,6 +118,8 @@ public: , couplingManager_(couplingManager) , isImplicit_(true) { + // TODO: Store that instantiation happened for stationary problem for later error catch? + // Or maybe we need another mechanism to tell the assembler if implicit/explicit... std::cout << "Instantiated MultiDomainAssembler for a stationary problem." << std::endl; } -- GitLab From 7440e8378774ad318be89f0b85191bb79c38aadb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 10 Feb 2021 19:18:35 +0100 Subject: [PATCH 22/27] [md][fvgridvars] add todo --- dumux/multidomain/fvgridvariables.hh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dumux/multidomain/fvgridvariables.hh b/dumux/multidomain/fvgridvariables.hh index fd4d1c5d41..665b17688e 100644 --- a/dumux/multidomain/fvgridvariables.hh +++ b/dumux/multidomain/fvgridvariables.hh @@ -99,6 +99,8 @@ public: }); } + //! TODO: Adjust to all interfaces of FVGridVariables + //! initialize all variables void init(const SolutionVector& sol) { -- GitLab From c40a2052b18b4ef44478bfa2c1d2d68788850801 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 10 Feb 2021 19:16:46 +0100 Subject: [PATCH 23/27] [TEMP][md][cclocalssembler] fix scaling --- dumux/multidomain/assembly/cclocalassembler.hh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/dumux/multidomain/assembly/cclocalassembler.hh b/dumux/multidomain/assembly/cclocalassembler.hh index 651fb44acb..44da06244a 100644 --- a/dumux/multidomain/assembly/cclocalassembler.hh +++ b/dumux/multidomain/assembly/cclocalassembler.hh @@ -354,6 +354,14 @@ private: partialDerivs[0][pvIdx] = 1.0; } + // For instationary simulations, scale the coupling + // fluxes of the current stage correctly + if (stageParams_) + { + for (std::size_t k = 0; k < numNeighbors; ++k) + partialDerivs[k+1] *= stageParams_->spatialWeight(stageParams_->size()-1); + } + // add the current partial derivatives to the global jacobian matrix // no special treatment is needed if globalJ is a ghost because then derivatives have been assembled to 0 above if constexpr (Problem::enableInternalDirichletConstraints()) -- GitLab From eb6097a19ce420fd19ffec9a4157b0f3fe845542 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 10 Feb 2021 19:17:13 +0100 Subject: [PATCH 24/27] [TEMP][md][boxlocalssembler] fix update order --- dumux/multidomain/assembly/boxlocalassembler.hh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dumux/multidomain/assembly/boxlocalassembler.hh b/dumux/multidomain/assembly/boxlocalassembler.hh index 4e799356e4..71b3548c59 100644 --- a/dumux/multidomain/assembly/boxlocalassembler.hh +++ b/dumux/multidomain/assembly/boxlocalassembler.hh @@ -361,8 +361,8 @@ private: { // update the volume variables and compute element residual elemSol[localI][pvIdx] = priVar; - curVolVars.update(elemSol, problem(), element(), scvI); cm_->updateCouplingContext(domainId, *this, domainId, scvI.dofIndex(), elemSol[localI], pvIdx); + curVolVars.update(elemSol, problem(), element(), scvI); return evalLocalResidual(); }; -- GitLab From 937995e5a73407f7b395700c1b0b0e970ab07c1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Tue, 16 Feb 2021 14:20:11 +0100 Subject: [PATCH 25/27] [TEMP][md][gridvars] implement deep copy mechanism --- dumux/multidomain/assembler.hh | 2 +- dumux/multidomain/fvgridvariables.hh | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/dumux/multidomain/assembler.hh b/dumux/multidomain/assembler.hh index 733034fa2f..56c5c9f30c 100644 --- a/dumux/multidomain/assembler.hh +++ b/dumux/multidomain/assembler.hh @@ -343,7 +343,7 @@ public: const auto curStage = params->size() - 1; // we keep track of previous stages, they are needed for residual assembly - prevStageVariables_.push_back(gridVars); + prevStageVariables_.push_back(gridVars.deepCopy()); // Now we update the time level of the given grid variables const auto t = params->timeAtStage(curStage); diff --git a/dumux/multidomain/fvgridvariables.hh b/dumux/multidomain/fvgridvariables.hh index 665b17688e..ddaddb36d0 100644 --- a/dumux/multidomain/fvgridvariables.hh +++ b/dumux/multidomain/fvgridvariables.hh @@ -41,6 +41,7 @@ namespace Dumux { template class MultiDomainFVGridVariables { + using ThisType = MultiDomainFVGridVariables; static constexpr std::size_t numSubDomains = MDTraits::numSubDomains; template @@ -84,6 +85,15 @@ public: : gridVars_(tuple) {} + /*! + * \brief Construction from an existing tuple and solution + */ + MultiDomainFVGridVariables(const TupleType& tuple, + const SolutionVector& sol) + : gridVars_(tuple) + , dofs_(sol) + {} + /*! * \brief Contruct the grid variables * \param gridGeometries a tuple of grid geometry shared pointers @@ -199,6 +209,21 @@ public: TupleType getTuple() { return gridVars_; } + //! make a deep copy of this class + ThisType deepCopy() const + { + TupleType tupleCopy; + + using namespace Dune::Hybrid; + forEach(std::make_index_sequence{}, [&](auto&& id) + { + constexpr auto i = std::decay_t::value; + elementAt(tupleCopy, id) = std::make_shared>( *(std::get(gridVars_)) ); + }); + + return {tupleCopy, dofs_}; + } + private: //! a tuple of pointes to all grid variables TupleType gridVars_; -- GitLab From ca6202b5b2ad59f869c68941d8018c05222d1c7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Tue, 16 Feb 2021 14:34:13 +0100 Subject: [PATCH 26/27] [TEMP][test][el2p] fix output solution --- test/multidomain/poromechanics/el2p/main.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/multidomain/poromechanics/el2p/main.cc b/test/multidomain/poromechanics/el2p/main.cc index c715beffe5..08852b7108 100644 --- a/test/multidomain/poromechanics/el2p/main.cc +++ b/test/multidomain/poromechanics/el2p/main.cc @@ -253,6 +253,8 @@ int main(int argc, char** argv) // advance to the time loop to the next step timeLoop->advanceTimeStep(); + x[twoPId] = mdGridVars[twoPId].dofs(); + x[poroMechId] = mdGridVars[poroMechId].dofs(); // twoPGridVariables->advanceTimeStep(); // poroMechGridVariables->advanceTimeStep(); -- GitLab From c9e3588361f8d7b1bfae059971ffc1d5f23a17cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Tue, 16 Feb 2021 14:34:41 +0100 Subject: [PATCH 27/27] [TEMP][test][el2p] use new result for fuzzy comp --- test/multidomain/poromechanics/el2p/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/multidomain/poromechanics/el2p/CMakeLists.txt b/test/multidomain/poromechanics/el2p/CMakeLists.txt index 35eab6328f..dd21128e96 100644 --- a/test/multidomain/poromechanics/el2p/CMakeLists.txt +++ b/test/multidomain/poromechanics/el2p/CMakeLists.txt @@ -8,9 +8,9 @@ dumux_add_test(NAME test_md_poromechanics_el2p COMMAND ${CMAKE_SOURCE_DIR}/bin/testing/runtest.py CMD_ARGS --script fuzzy --files ${CMAKE_SOURCE_DIR}/test/references/test_md_poromechanics_el2p_2p-reference.vtu - ${CMAKE_CURRENT_BINARY_DIR}/test_md_poromechanics_el2p_twop-00010.vtu + ${CMAKE_CURRENT_BINARY_DIR}/test_md_poromechanics_el2p_twop-00009.vtu ${CMAKE_SOURCE_DIR}/test/references/test_md_poromechanics_el2p_poroelastic-reference.vtu - ${CMAKE_CURRENT_BINARY_DIR}/test_md_poromechanics_el2p_poroelastic-00010.vtu + ${CMAKE_CURRENT_BINARY_DIR}/test_md_poromechanics_el2p_poroelastic-00009.vtu --command "${CMAKE_CURRENT_BINARY_DIR}/test_md_poromechanics_el2p params.input -Vtk.OutputName test_md_poromechanics_el2p" --zeroThreshold {"u":1e-14}) -- GitLab