diff --git a/dumux/assembly/fv/boxlocalassembler.hh b/dumux/assembly/fv/boxlocalassembler.hh index 80fd526a1cf362e66d957e8b659005f841681003..bb424adcbc75209a8856bdaedf08e1659aeb1115 100644 --- a/dumux/assembly/fv/boxlocalassembler.hh +++ b/dumux/assembly/fv/boxlocalassembler.hh @@ -156,6 +156,7 @@ public: { res[scvI.dofIndex()][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + // TODO: Skip this for explicit schemes? auto& row = jac[scvI.dofIndex()]; for (auto col = row.begin(); col != row.end(); ++col) row[col.index()][eqIdx] = 0.0; @@ -333,6 +334,7 @@ protected: const auto globalI = scvI.dofIndex(); const auto localI = scvI.localDofIndex(); + // TODO: How to handle the case of caching? const auto origCurVolVars = curElemVolVars[scvI]; auto& curVolVars = curElemVolVars[scvI]; diff --git a/dumux/assembly/fv/localoperator.hh b/dumux/assembly/fv/localoperator.hh index d4374b8df10903a07836aa652caf9a70921a7fcd..06c543461a2d61a6c3248492d3e6d4d0b17e6172 100644 --- a/dumux/assembly/fv/localoperator.hh +++ b/dumux/assembly/fv/localoperator.hh @@ -215,7 +215,10 @@ protected: { const auto flux = Operators::flux(problem, element_, fvGeometry_, evv, efvc, scvf); r[fvGeometry_.scv(scvf.insideScvIdx()).localDofIndex()] += flux; - r[fvGeometry_.scv(scvf.outsideScvIdx()).localDofIndex()] -= flux; + + // We expects that for scvf.numOutsideScvs() > 1 outside faces are visited separately! + if (scvf.numOutsideScvs() == 1) + r[fvGeometry_.scv(scvf.outsideScvIdx()).localDofIndex()] -= flux; } // boundary faces diff --git a/dumux/discretization/box/subcontrolvolumeface.hh b/dumux/discretization/box/subcontrolvolumeface.hh index d2d1e3c8b36222b5ab05d2687780a67892fb505d..b11a298f3ea83dcb8b3aaeaad7b323fc4e7155c9 100644 --- a/dumux/discretization/box/subcontrolvolumeface.hh +++ b/dumux/discretization/box/subcontrolvolumeface.hh @@ -189,14 +189,20 @@ public: return scvIndices_[0]; } - //! index of the outside sub control volume for spatial param evaluation - // This results in undefined behaviour if boundary is true - LocalIndexType outsideScvIdx() const + //! Index of the i-th outside sub control volume or boundary scv index. + // Results in undefined behaviour if i >= numOutsideScvs() + LocalIndexType outsideScvIdx(int i = 0) const { assert(!boundary()); return scvIndices_[1]; } + //! The number of scvs on the outside of this face + std::size_t numOutsideScvs() const + { + return static_cast(!boundary()); + } + //! The local index of this sub control volume face GridIndexType index() const { diff --git a/dumux/geomechanics/fvoperators.hh b/dumux/geomechanics/fvoperators.hh new file mode 100644 index 0000000000000000000000000000000000000000..c43ee24006d892d6807912de26b33f725c348b11 --- /dev/null +++ b/dumux/geomechanics/fvoperators.hh @@ -0,0 +1,115 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup PorousmediumflowModels + * \brief Sub-control entity-local evaluation of the operators + * within in the systems of equations of n-phase immiscible models. + */ +#ifndef DUMUX_FV_GEOMECHANICS_OPERATORS_HH +#define DUMUX_FV_GEOMECHANICS_OPERATORS_HH + +#include + +#include +#include + +namespace Dumux { + +/*! + * \ingroup PorousmediumflowModels + * \brief Sub-control entity-local evaluation of the operators + * within in the systems of equations of n-phase immiscible models. + * \tparam ModelTraits defines model-related types and variables (e.g. number of phases) + * \tparam ElementVariables the type of element-local view on the grid variables + * \tparam StressType the constitutive model for stress computation at scv faces. + */ +template +class FVGeomechanicsOperators +: public FVOperators +{ + using ParentType = FVOperators; + + // The variables required for the evaluation of the equation + using GridVariables = typename ElementVariables::GridVariables; + using VolumeVariables = typename GridVariables::VolumeVariables; + using ElementVolumeVariables = typename ElementVariables::ElementVolumeVariables; + using ElementFluxVariablesCache = typename ElementVariables::ElementFluxVariablesCache; + + // The grid geometry on which the scheme operates + using GridGeometry = typename GridVariables::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + using SubControlVolume = typename GridGeometry::SubControlVolume; + using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace; + using Extrusion = Extrusion_t; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using Problem = std::decay_t().gridVolVars().problem())>; + using Indices = typename ModelTraits::Indices; + + static constexpr int numPhases = ModelTraits::numFluidPhases(); + static constexpr int conti0EqIdx = ModelTraits::Indices::conti0EqIdx; + +public: + //! export the type used to store scalar values for all equations + using typename ParentType::NumEqVector; + + // export the types of the flux/source/storage terms + using typename ParentType::FluxTerm; + using typename ParentType::SourceTerm; + using typename ParentType::StorageTerm; + + /*! + * \brief Compute the storage term of the equations for the given sub-control volume + * \param problem The problem to be solved (could store additionally required quantities) + * \param scv The sub-control volume + * \param volVars The primary & secondary variables evaluated for the scv + * \note This must be overloaded by the implementation + */ + static StorageTerm storage(const Problem& problem, + const SubControlVolume& scv, + const VolumeVariables& volVars) + { return StorageTerm(0.0); } + + // TODO: overload source here to add bulk gravity term! + + /*! + * \brief Compute the flux term of the equations for the given sub-control volume face + * \param problem The problem to be solved (could store additionally required quantities) + * \param element The grid element + * \param fvGeometry The element-local view on the finite volume grid geometry + * \param elemVolVars The element-local view on the grid volume variables + * \param elemFluxVarsCache The element-local view on the grid flux variables cache + * \param scvf The sub-control volume face for which the flux term is to be computed + * \note This must be overloaded by the implementation + */ + static FluxTerm flux(const Problem& problem, + const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVolumeVariables& elemVolVars, + const ElementFluxVariablesCache& elemFluxVarsCache, + const SubControlVolumeFace& scvf) + { return StressType::force(problem, element, fvGeometry, elemVolVars, scvf, elemFluxVarsCache); } +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/geomechanics/poroelastic/couplingmanager.hh b/dumux/geomechanics/poroelastic/couplingmanager.hh index a49e3e79e4d48cbaac9f8c4bf0aa9af72f30bdb3..d43db707399752a047a55ac5468fadcffe2a308b 100644 --- a/dumux/geomechanics/poroelastic/couplingmanager.hh +++ b/dumux/geomechanics/poroelastic/couplingmanager.hh @@ -73,6 +73,11 @@ class PoroMechanicsCouplingManager : public virtual CouplingManager< MDTraits > template using GridIndexType = typename GridView::IndexSet::IndexType; template using Element = typename GridView::template Codim<0>::Entity; template using GlobalPosition = typename Element::Geometry::GlobalCoordinate; + template using SubSolutionVector = typename GridVariables::SolutionVector; + template + using ElementSolution = std::decay_t>(), + std::declval>(), + std::declval>()))>; //! we assume that the two sub-problem operate on the same grid static_assert(std::is_same< GridView, GridView >::value, @@ -103,14 +108,35 @@ class PoroMechanicsCouplingManager : public virtual CouplingManager< MDTraits > */ struct PoroMechanicsCouplingContext { - // We need unique ptrs because the local views have no default constructor + // We need optionals because the local views have no default constructor Element pmFlowElement; - std::unique_ptr< FVElementGeometry > pmFlowFvGeometry; - std::unique_ptr< ElementVolumeVariables > pmFlowElemVolVars; + std::optional< FVElementGeometry > pmFlowFvGeometry; + std::optional< ElementVolumeVariables > pmFlowElemVolVars; + }; + + /*! + * \brief Poromechanical domain data required for the residual calculation of an + * element of the porous medium flow domain. + */ + struct PorousMediumFlowCouplingContext + { + using Index = GridIndexType; + using ElemSol = ElementSolution; + using ElemDofs = std::vector; + + std::vector poroMechElemIndices; + std::vector poroMechElemDofs; + std::vector poroMechElemSols; }; public: + // export coupling context types + template + using CouplingContext = std::conditional_t; + // export the domain ids static constexpr auto pmFlowId = Dune::index_constant(); static constexpr auto poroMechId = Dune::index_constant(); @@ -170,33 +196,106 @@ public: return CouplingStencilType{ {eIdx} }; } - //! Pull up the base class' default implementation - using ParentType::bindCouplingContext; + //! Construct the coupling context for an element of the porous medium flow domain + template + std::shared_ptr + makeCouplingContext(Dune::index_constant pmFlowDomainId, + const Element& element, + const GridVariables& gridVars) const + { + const auto& poroMechGridVars = gridVars[poroMechId]; + const auto& poroMechGridGeom = poroMechGridVars.gridGeometry(); - /*! - * \brief For the assembly of the element residual of an element of the poro-mechanics domain, - * we have to prepare the element variables of the porous medium flow domain. - */ - template< class Assembler > - void bindCouplingContext(Dune::index_constant poroMechDomainId, - const Element& element, - const Assembler& assembler) + const auto globalI = gridVars[pmFlowId].gridGeometry().elementMapper().index(element); + const auto& connectivityMapI = gridVars[pmFlowId].gridGeometry().connectivityMap()[globalI]; + + std::vector< GridIndexType > elemIndices; + std::vector< ElementSolution > elemSols; + std::vector< std::vector> > elemDofs; + + elemIndices.reserve(connectivityMapI.size()+1); + elemSols.reserve(connectivityMapI.size()+1); + elemDofs.reserve(connectivityMapI.size()+1); + + static constexpr int dim = GridView::dimension; + { + const auto& elementI = poroMechGridGeom.element(globalI); + elemIndices.push_back(globalI); + elemSols.push_back(elementSolution(elementI, poroMechGridVars.dofs(), poroMechGridGeom)); + + elemDofs.push_back({}); + elemDofs.back().reserve(elementI.subEntities(dim)); + for (int i = 0; i < elementI.subEntities(dim); ++i) + elemDofs.back().push_back(poroMechGridGeom.vertexMapper().subIndex(elementI, i, dim)); + } + + for (const auto& dataJ : connectivityMapI) + { + const auto& elementJ = poroMechGridGeom.element(dataJ.globalJ); + elemIndices.push_back(dataJ.globalJ); + elemSols.push_back(elementSolution(elementJ, poroMechGridVars.dofs(), poroMechGridGeom)); + + elemDofs.push_back({}); + elemDofs.back().reserve(elementJ.subEntities(dim)); + for (int i = 0; i < elementJ.subEntities(dim); ++i) + elemDofs.back().push_back(poroMechGridGeom.vertexMapper().subIndex(elementJ, i, dim)); + } + + using C = PorousMediumFlowCouplingContext; + return std::make_shared(C{std::move(elemIndices), std::move(elemDofs), std::move(elemSols)}); + } + + //! Construct the coupling context for an element of the poromechanical domain + template + std::shared_ptr + makeCouplingContext(Dune::index_constant poroMechDomainId, + const Element& element, + const GridVariables& gridVars) const { - // first reset the context - poroMechCouplingContext_.pmFlowFvGeometry.reset(nullptr); - poroMechCouplingContext_.pmFlowElemVolVars.reset(nullptr); + PoroMechanicsCouplingContext result; - // prepare the fvGeometry and the element volume variables - // these quantities will be used later to obtain the effective pressure - auto fvGeometry = localView( this->problem(pmFlowId).gridGeometry() ); - auto elemVolVars = localView( assembler.gridVariables(Dune::index_constant()).curGridVolVars() ); + const auto& pmFlowGridVars = gridVars[Dune::index_constant()]; + const auto& pmFlowGridGeom = pmFlowGridVars.gridGeometry(); + const auto& pmFlowGridVolVars = pmFlowGridVars.gridVolVars(); + auto fvGeometry = localView( pmFlowGridGeom ); + auto elemVolVars = localView( pmFlowGridVolVars ); fvGeometry.bindElement(element); - elemVolVars.bindElement(element, fvGeometry, this->curSol()[Dune::index_constant()]); + elemVolVars.bindElement(element, fvGeometry, pmFlowGridVars.dofs()); - poroMechCouplingContext_.pmFlowElement = element; - poroMechCouplingContext_.pmFlowFvGeometry = std::make_unique< FVElementGeometry >(fvGeometry); - poroMechCouplingContext_.pmFlowElemVolVars = std::make_unique< ElementVolumeVariables >(elemVolVars); + result.pmFlowElement = element; + result.pmFlowFvGeometry.emplace(std::move(fvGeometry)); + result.pmFlowElemVolVars.emplace(std::move(elemVolVars)); + + return std::make_shared(result); + } + + void setCouplingContext(std::shared_ptr context) + { pmFlowCouplingContext_ = context; } + + void setCouplingContext(std::shared_ptr context) + { poroMechCouplingContext_ = context; } + + // TODO: This would be called when assembling both domains, thus, in the case + // of assembly of a poromech element, we don't have the correct context! + ElementSolution getPoroMechElemSol(const Element& element) const + { + const auto eIdx = this->problem(pmFlowId).gridGeometry().elementMapper().index(element); + + if (pmFlowCouplingContext_) + { + auto begin = pmFlowCouplingContext_->poroMechElemIndices.begin(); + auto end = pmFlowCouplingContext_->poroMechElemIndices.end(); + const auto it = std::find(begin, end, eIdx); + + if (it != end) + return pmFlowCouplingContext_->poroMechElemSols[std::distance(begin, it)]; + } + + // make elem sol from stored solution. + // TODO: How to resolve such circular dependencies? + const auto poroMechElement = this->problem(poroMechId).gridGeometry().element(eIdx); + return elementSolution(poroMechElement, this->curSol()[poroMechId], this->problem(poroMechId).gridGeometry()); } /*! @@ -213,12 +312,18 @@ public: unsigned int pvIdxJ) { // communicate the deflected pm flow domain primary variable + // TODO: We still have to have and update the solution vector because it is used in updateCoupledVariables ParentType::updateCouplingContext(poroMechDomainId, poroMechLocalAssembler, pmFlowDomainId, dofIdxGlobalJ, priVarsJ, pvIdxJ); // now, update the coupling context (i.e. elemVolVars) - const auto& element = poroMechCouplingContext_.pmFlowElement; - const auto& fvGeometry = *poroMechCouplingContext_.pmFlowFvGeometry; - poroMechCouplingContext_.pmFlowElemVolVars->bindElement(element, fvGeometry, this->curSol()[pmFlowDomainId]); + const auto& element = poroMechCouplingContext_->pmFlowElement; + const auto& fvGeometry = *(poroMechCouplingContext_->pmFlowFvGeometry); + + // TODO: Here the cm-internal solution is still used. + // This still works because the deflected solution is always the one of the current + // stage, and this function is only called to deflect the variables of the current stage. + const auto& solution = this->curSol()[pmFlowId]; + poroMechCouplingContext_->pmFlowElemVolVars->bindElement(element, fvGeometry, solution); } /*! @@ -235,13 +340,18 @@ public: const PrimaryVariables& priVarsJ, unsigned int pvIdxJ) { - // communicate the deflected displacement + // // communicate the deflected displacement + // TODO: We still have to have and update the solution vector because it is used in updateCoupledVariables ParentType::updateCouplingContext(poroMechDomainIdI, poroMechLocalAssembler, poroMechDomainIdJ, dofIdxGlobalJ, priVarsJ, pvIdxJ); // now, update the coupling context (i.e. elemVolVars) - (*poroMechCouplingContext_.pmFlowElemVolVars).bindElement(poroMechCouplingContext_.pmFlowElement, - *poroMechCouplingContext_.pmFlowFvGeometry, - this->curSol()[Dune::index_constant()]); + // TODO: Here the cm-internal solution is still used. + // This still works because the deflected solution is always the one of the current + // stage, and this function is only called to deflect the variables of the current stage. + const auto& solution = this->curSol()[pmFlowId]; + poroMechCouplingContext_->pmFlowElemVolVars->bindElement(poroMechCouplingContext_->pmFlowElement, + *(poroMechCouplingContext_->pmFlowFvGeometry), + solution); } /*! @@ -257,7 +367,26 @@ public: unsigned int pvIdxJ) { // communicate the deflected displacement + // TODO: We still have to have and update the solution vector because it is used in updateCoupledVariables ParentType::updateCouplingContext(pmFlowDomainId, pmFlowLocalAssembler, domainIdJ, dofIdxGlobalJ, priVarsJ, pvIdxJ); + + // TODO: This is now much more expensive than the simple update before... + // Could an alternative be to store all stage solutions in the cm + // and not rely on a context here? + if constexpr (j == PoroMechId) + { + for (int i = 0; i < pmFlowCouplingContext_->poroMechElemIndices.size(); ++i) + { + const auto& contextDofs = pmFlowCouplingContext_->poroMechElemDofs[i]; + const auto it = std::find(contextDofs.begin(), contextDofs.end(), dofIdxGlobalJ); + + if (it != contextDofs.end()) + { + const auto idxInContext = std::distance(contextDofs.begin(), it); + pmFlowCouplingContext_->poroMechElemSols[i][idxInContext][pvIdxJ] = priVarsJ[pvIdxJ]; + } + } + } } //! Pull up the base class' default implementation @@ -308,26 +437,14 @@ public: * and the storage term here. */ template< class PMFlowLocalAssembler > - typename LocalResidual::ElementResidualVector + auto evalCouplingResidual(Dune::index_constant pmFlowDomainId, const PMFlowLocalAssembler& pmFlowLocalAssembler, Dune::index_constant poroMechDomainId, GridIndexType dofIdxGlobalJ) { - auto res = pmFlowLocalAssembler.localResidual().evalFluxAndSource(pmFlowLocalAssembler.element(), - pmFlowLocalAssembler.fvGeometry(), - pmFlowLocalAssembler.curElemVolVars(), - pmFlowLocalAssembler.elemFluxVarsCache(), - pmFlowLocalAssembler.elemBcTypes()); - - // If the residual instationary, evaluate storage - if (!pmFlowLocalAssembler.localResidual().isStationary()) - res += pmFlowLocalAssembler.localResidual().evalStorage(pmFlowLocalAssembler.element(), - pmFlowLocalAssembler.fvGeometry(), - pmFlowLocalAssembler.prevElemVolVars(), - pmFlowLocalAssembler.curElemVolVars()); - - return res; + // TODO: efficiency! + return pmFlowLocalAssembler.evalLocalResidual(); } /*! @@ -337,17 +454,14 @@ public: * the fluxes as well as the source term here. */ template< class PoroMechLocalAssembler > - typename LocalResidual::ElementResidualVector + auto evalCouplingResidual(Dune::index_constant poroMechDomainId, const PoroMechLocalAssembler& poroMechLocalAssembler, Dune::index_constant pmFlowDomainId, GridIndexType dofIdxGlobalJ) { - return poroMechLocalAssembler.localResidual().evalFluxAndSource(poroMechLocalAssembler.element(), - poroMechLocalAssembler.fvGeometry(), - poroMechLocalAssembler.curElemVolVars(), - poroMechLocalAssembler.elemFluxVarsCache(), - poroMechLocalAssembler.elemBcTypes()); + // TODO: efficiency! + return poroMechLocalAssembler.evalLocalResidual(); } //! Return the porous medium flow variables an element/scv of the poromech domain couples to @@ -355,7 +469,7 @@ public: { //! If we do not yet have the queried object, build it first const auto eIdx = this->problem(poroMechId).gridGeometry().elementMapper().index(element); - return (*poroMechCouplingContext_.pmFlowElemVolVars)[eIdx]; + return (*poroMechCouplingContext_->pmFlowElemVolVars)[eIdx]; } /*! @@ -412,8 +526,9 @@ private: // Container for storing the coupling element stencils for the pm flow domain std::vector< CouplingStencilType > pmFlowCouplingMap_; - // the coupling context of the poromechanics domain - PoroMechanicsCouplingContext poroMechCouplingContext_; + // the coupling contexts + std::shared_ptr poroMechCouplingContext_ = nullptr; + std::shared_ptr pmFlowCouplingContext_ = nullptr; }; } //end namespace Dumux diff --git a/dumux/multidomain/assembler.hh b/dumux/multidomain/assembler.hh new file mode 100644 index 0000000000000000000000000000000000000000..56c5c9f30c5ab73aac00bf4e5525bf4068eb8d6b --- /dev/null +++ b/dumux/multidomain/assembler.hh @@ -0,0 +1,536 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup MultiDomain + * \ingroup Assembly + * \brief A linear system assembler (residual and Jacobian) for finite volume schemes + * with multiple domains + */ +#ifndef DUMUX_MULTIDOMAIN_ASSEMBLER_HH +#define DUMUX_MULTIDOMAIN_ASSEMBLER_HH + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "couplingjacobianpattern.hh" +// #include "subdomaincclocalassembler.hh" +// #include "subdomainboxlocalassembler.hh" +#include "subdomainstaggeredlocalassembler.hh" + +#include +#include "assembly/localassembler.hh" + +namespace Dumux { + +/*! + * \ingroup MultiDomain + * \ingroup Assembly + * \brief A linear system assembler (residual and Jacobian) for finite volume schemes (box, tpfa, mpfa, ...) + * with multiple domains + * \tparam MDTraits the multidimension traits + * \tparam diffMethod the differentiation method to residual compute derivatives + * \tparam useImplicitAssembly if to use an implicit or explicit time discretization + */ +template +class MultiDomainAssembler +{ + template + using SubDomainTypeTag = typename MDTraits::template SubDomain::TypeTag; + +public: + using Traits = MDTraits; + using CouplingManager = CMType; + + // types related to linear algebra + using Scalar = typename MDTraits::Scalar; + using JacobianMatrix = typename MDTraits::JacobianMatrix; + using SolutionVector = typename MDTraits::SolutionVector; + using ResidualType = SolutionVector; + + // types underlying the subdomains + template using SubDomainLocalOperator = typename MDTraits::template SubDomain::LocalOperator; + template using SubDomainGridVariables = typename MDTraits::template SubDomain::GridVariables; + template using SubDomainGridGeometry = typename MDTraits::template SubDomain::GridGeometry; + template using SubDomainProblem = typename MDTraits::template SubDomain::Problem; + + // The variables that define an evaluation point + using Variables = MultiDomainFVGridVariables; + using GridVariables = Variables; + + // The grid geometry on which it is operated + using GridGeometry = MultiDomainFVGridGeometry; + + //! export the type for parameters of a stage in time integration + using StageParams = MultiStageParams; + +private: + template using SubDomainGridView = typename SubDomainGridGeometry::GridView; + template using SubDomainElemVars = typename SubDomainGridVariables::LocalView; + template using SubDomainElement = typename SubDomainGridView::template Codim<0>::Entity; + + using TimeLoop = TimeLoopBase; + using ThisType = MultiDomainAssembler; + using GridGeometryTuple = typename MDTraits::template TupleOfSharedPtrConst; + + template + using Context = typename CouplingManager::template CouplingContext; + +public: + + /*! + * \brief Constructor from a grid geometry tuple for stationary problems + * \note the grid variables might be temporarily changed during assembly (if caching is enabled) + * it is however guaranteed that the state after assembly will be the same as before + */ + MultiDomainAssembler(GridGeometryTuple&& gridGeometry, std::shared_ptr couplingManager) + : gridGeometryTuple_(gridGeometry) + , couplingManager_(couplingManager) + , isImplicit_(true) + { + // TODO: Store that instantiation happened for stationary problem for later error catch? + // Or maybe we need another mechanism to tell the assembler if implicit/explicit... + std::cout << "Instantiated MultiDomainAssembler for a stationary problem." << std::endl; + } + + /*! + * \brief The constructor for instationary problems + * \note the grid variables might be temporarily changed during assembly (if caching is enabled) + * it is however guaranteed that the state after assembly will be the same as before + */ + template + MultiDomainAssembler(GridGeometryTuple&& gridGeometry, + std::shared_ptr couplingManager, + const TimeIntegrationMethod& method) + : gridGeometryTuple_(gridGeometry) + , couplingManager_(couplingManager) + , isImplicit_(method.implicit()) + { + std::cout << "Instantiated MultiDomainAssembler for an instationary problem." << std::endl; + } + + /*! + * \brief Assembles the global Jacobian of the residual + * and the residual for the current solution. + */ + void assembleJacobianAndResidual(const GridVariables& gridVars) + { + ptr_ = &gridVars; + resetJacobian_(); + resetResidual_(); + + using namespace Dune::Hybrid; + forEach(std::make_index_sequence(), [&](const auto domainId) + { + auto& jacRow = (*jacobian_)[domainId]; + auto& subRes = (*residual_)[domainId]; + this->assembleJacobianAndResidual_(domainId, jacRow, subRes, gridVars); + }); + + using namespace Dune::Indices; + // std::cout << "SOL in GV: " << std::endl; + // Dune::printvector(std::cout, gridVars.dofs()[_0], "", "", 1, 10, 5); + // Dune::printvector(std::cout, gridVars.dofs()[_1], "", "", 1, 10, 5); + // + // std::cout << "Linear system: " << std::endl; + // Dune::printmatrix(std::cout, (*jacobian_)[_0][_0], "", "", 10, 2); + // Dune::printmatrix(std::cout, (*jacobian_)[_0][_1], "", "", 10, 2); + // Dune::printmatrix(std::cout, (*jacobian_)[_1][_0], "", "", 10, 2); + // Dune::printmatrix(std::cout, (*jacobian_)[_1][_1], "", "", 10, 2); + // Dune::printvector(std::cout, (*residual_)[_0], "", "", 1, 10, 2); + // Dune::printvector(std::cout, (*residual_)[_1], "", "", 1, 10, 2); + } + + //! compute the residuals using the internal residual + void assembleResidual(const GridVariables& gridVars) + { + ptr_ = &gridVars; + resetResidual_(); + assembleResidual(*residual_, gridVars); + } + + //! assemble a residual r + void assembleResidual(ResidualType& r, const GridVariables& gridVars) + { + ptr_ = &gridVars; + using namespace Dune::Hybrid; + forEach(integralRange(Dune::Hybrid::size(r)), [&](const auto domainId) + { + auto& subRes = r[domainId]; + this->assembleResidual_(domainId, subRes, gridVars); + }); + } + + //! compute the residual and return it's vector norm + //! TODO: this needs to be adapted in parallel + Scalar residualNorm(const GridVariables& gridVars) + { + ptr_ = &gridVars; + ResidualType residual; + setResidualSize(residual); + assembleResidual(residual, gridVars); + + // calculate the square norm of the residual + const Scalar result2 = residual.two_norm2(); + using std::sqrt; + return sqrt(result2); + } + + /*! + * \brief Tells the assembler which jacobian and residual to use. + * This also resizes the containers to the required sizes and sets the + * sparsity pattern of the jacobian matrix. + */ + void setLinearSystem(std::shared_ptr A, + std::shared_ptr r) + { + jacobian_ = A; + residual_ = r; + + setJacobianBuildMode(*jacobian_); + setJacobianPattern(*jacobian_); + setResidualSize(*residual_); + } + + /*! + * \brief The version without arguments uses the default constructor to create + * the jacobian and residual objects in this assembler if you don't need them outside this class + */ + void setLinearSystem() + { + jacobian_ = std::make_shared(); + residual_ = std::make_shared(); + + setJacobianBuildMode(*jacobian_); + setJacobianPattern(*jacobian_); + setResidualSize(*residual_); + } + + /*! + * \brief Sets the jacobian build mode + */ + void setJacobianBuildMode(JacobianMatrix& jac) const + { + using namespace Dune::Hybrid; + forEach(std::make_index_sequence(), [&](const auto i) + { + forEach(jac[i], [&](auto& jacBlock) + { + using BlockType = std::decay_t; + if (jacBlock.buildMode() == BlockType::BuildMode::unknown) + jacBlock.setBuildMode(BlockType::BuildMode::random); + else if (jacBlock.buildMode() != BlockType::BuildMode::random) + DUNE_THROW(Dune::NotImplemented, "Only BCRS matrices with random build mode are supported at the moment"); + }); + }); + } + + /*! + * \brief Sets the jacobian sparsity pattern. + */ + void setJacobianPattern(JacobianMatrix& jac) const + { + using namespace Dune::Hybrid; + forEach(std::make_index_sequence(), [&](const auto domainI) + { + forEach(integralRange(Dune::Hybrid::size(jac[domainI])), [&](const auto domainJ) + { + const auto pattern = this->getJacobianPattern_(domainI, domainJ); + pattern.exportIdx(jac[domainI][domainJ]); + }); + }); + } + + /*! + * \brief Resizes the residual + */ + void setResidualSize(SolutionVector& res) const + { + using namespace Dune::Hybrid; + forEach(integralRange(Dune::Hybrid::size(res)), [&](const auto domainId) + { res[domainId].resize(this->numDofs(domainId)); }); + } + + //! the number of dof locations of domain i + template + std::size_t numDofs(Dune::index_constant domainId) const + { return std::get(gridGeometryTuple_)->numDofs(); } + + //! the problem of domain i + // template + // const auto& problem(Dune::index_constant domainId) const + // { return *std::get(problemTuple_); } + + //! the finite volume grid geometry of domain i + template + const auto& gridGeometry(Dune::index_constant domainId) const + { return *std::get(gridGeometryTuple_); } + + //! the grid view of domain i + template + const auto& gridView(Dune::index_constant domainId) const + { return gridGeometry(domainId).gridView(); } + + //! the grid variables of domain i + template + auto& gridVariables(Dune::index_constant domainId) + { return (*ptr_)[domainId]; } + + //! the grid variables of domain i + template + const auto& gridVariables(Dune::index_constant domainId) const + { return (*ptr_)[domainId]; } + + //! the coupling manager + const CouplingManager& couplingManager() const + { return *couplingManager_; } + + //! the full Jacobian matrix + JacobianMatrix& jacobian() + { return *jacobian_; } + + //! the full residual vector + SolutionVector& residual() + { return *residual_; } + + /*! + * \brief Prepare for a new stage within a time integration step. + * This caches the given grid variables, which are then used as a + * representation of the previous stage. Moreover, the given grid + * variables are then updated to the time level of the upcoming stage. + * \param gridVars the grid variables representing the previous stage + * \param params the parameters with the weights to be used in the upcoming stage + * \todo TODO: This function does two things, namely caching and then updating. + * Should we split/delegate this, or is the current name descriptive enough? + * When used from outside, one would expect the gridvars to be prepared maybe, + * and that is what's done. Caching might not be expected from the outside but + * it is also not important that that is known from there? + */ + void prepareStage(GridVariables& gridVars, + std::shared_ptr params) + { + stageParams_ = params; + const auto curStage = params->size() - 1; + + // we keep track of previous stages, they are needed for residual assembly + prevStageVariables_.push_back(gridVars.deepCopy()); + + // Now we update the time level of the given grid variables + const auto t = params->timeAtStage(curStage); + const auto prevT = params->timeAtStage(0); + const auto dtFraction = params->timeStepFraction(curStage); + TimeLevel timeLevel(t, prevT, dtFraction); + + gridVars.updateTime(timeLevel); + } + + /*! + * \brief Remove traces from stages within a time integration step. + */ + void clearStages() + { + prevStageVariables_.clear(); + stageParams_ = nullptr; + } + +private: + // reset the residual vector to 0.0 + void resetResidual_() + { + if(!residual_) + { + residual_ = std::make_shared(); + setResidualSize(*residual_); + } + + (*residual_) = 0.0; + } + + // reset the jacobian vector to 0.0 + void resetJacobian_() + { + if(!jacobian_) + { + jacobian_ = std::make_shared(); + setJacobianBuildMode(*jacobian_); + setJacobianPattern(*jacobian_); + } + + (*jacobian_) = 0.0; + } + + template + void assembleJacobianAndResidual_(Dune::index_constant domainId, + JacRow& jacRow, SubRes& subRes, + const GridVariables& gridVars) + { + assemble_(domainId, [&](const auto& element) + { + auto ggLocalView = localView(gridGeometry(domainId)); + ggLocalView.bind(element); + auto [elemVars, contexts] = this->prepareElemVariables_(gridVars, element, ggLocalView); + + using LocalAssembler = Dumux::MultiDomainLocalAssembler; + LocalAssembler localAssembler(element, ggLocalView, contexts, elemVars, stageParams_, couplingManager_); + localAssembler.assembleJacobianAndResidual(jacRow, subRes); + }); + } + + template + void assembleResidual_(Dune::index_constant domainId, + SubRes& subRes, + const GridVariables& gridVars) + { + assemble_(domainId, [&](const auto& element) + { + auto ggLocalView = localView(gridGeometry(domainId)); + ggLocalView.bind(element); + auto [elemVars, contexts] = this->prepareElemVariables_(gridVars, element, ggLocalView); + + using LocalAssembler = Dumux::MultiDomainLocalAssembler; + LocalAssembler localAssembler(element, ggLocalView, contexts, elemVars, stageParams_, couplingManager_); + localAssembler.assembleResidual(subRes); + }); + } + + /*! + * \brief A method assembling something per element + * \note Handles exceptions for parallel runs + * \throws NumericalProblem on all processes if something throwed during assembly + * TODO: assemble in parallel + */ + template + void assemble_(Dune::index_constant domainId, AssembleElementFunc&& assembleElement) const + { + // let the local assembler add the element contributions + for (const auto& element : elements(gridView(domainId))) + assembleElement(element); + } + + // get diagonal block pattern + template = 0> + Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant domainI, + Dune::index_constant domainJ) const + { + const auto& gg = gridGeometry(domainI); + auto pattern = isImplicit_ ? getJacobianPattern(gg) + : getJacobianPattern(gg); + couplingManager_->extendJacobianPattern(domainI, pattern); + return pattern; + } + + // get coupling block pattern + template = 0> + Dune::MatrixIndexSet getJacobianPattern_(Dune::index_constant domainI, + Dune::index_constant domainJ) const + { + return isImplicit_ ? getCouplingJacobianPattern(*couplingManager_, + domainI, gridGeometry(domainI), + domainJ, gridGeometry(domainJ)) + : getCouplingJacobianPattern(*couplingManager_, + domainI, gridGeometry(domainI), + domainJ, gridGeometry(domainJ)); + } + + //! prepares the local views on the grid variables for the given element + //! \todo: TODO: when stageparams.skipSpatial() == true, we don't need to bind flux vars caches! + template + std::pair< std::vector>, + std::vector>> > + prepareElemVariables_(const Variables& gridVars, + const SubDomainElement& element, + const typename SubDomainGridGeometry::LocalView& ggLocalView) const + { + static constexpr auto domainId = Dune::index_constant(); + + if (!stageParams_) + { + auto context = couplingManager_->makeCouplingContext(domainId, element, gridVars); + auto elemVars = localView(gridVars[domainId]); + elemVars.bind(element, ggLocalView); + return std::make_pair(std::vector>{{elemVars}}, + std::vector>>{{context}}); + } + else + { + std::vector> elemVars; + std::vector>> contexts; + + elemVars.reserve(stageParams_->size()); + contexts.reserve(stageParams_->size()); + + for (int i = 0; i < stageParams_->size()-1; ++i) + { + elemVars.emplace_back(prevStageVariables_[i][domainId]); + contexts.emplace_back(couplingManager_->makeCouplingContext(domainId, element, prevStageVariables_[i])); + } + + elemVars.emplace_back(gridVars[domainId]); + contexts.emplace_back(couplingManager_->makeCouplingContext(domainId, element, gridVars)); + + for (int i = 0; i < elemVars.size(); ++i) + { + couplingManager_->setCouplingContext(contexts[i]); + elemVars[i].bind(element, ggLocalView); + } + + return std::make_pair(std::vector>{elemVars}, + std::vector>>{contexts}); + } + } + + //! the grid geometries of all subdomains + GridGeometryTuple gridGeometryTuple_; + + //! pointer to the coupling manager + std::shared_ptr couplingManager_; + + //! states if an implicit of explicit scheme is used (affects jacobian pattern) + bool isImplicit_; + + //! shared pointers to the jacobian matrix and residual + std::shared_ptr jacobian_; + std::shared_ptr residual_; + + //! parameters containing information on the current stage of time integration + std::shared_ptr stageParams_ = nullptr; + + //! keep track of the states of previous stages within a time integration step + std::vector prevStageVariables_; + + const GridVariables* ptr_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/multidomain/assembly/boxlocalassembler.hh b/dumux/multidomain/assembly/boxlocalassembler.hh new file mode 100644 index 0000000000000000000000000000000000000000..71b3548c591cc93c00d653e247bd066c58a3677b --- /dev/null +++ b/dumux/multidomain/assembly/boxlocalassembler.hh @@ -0,0 +1,520 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution per element + * for the box scheme in the context of multidomain models. + */ +#ifndef DUMUX_MULTIDOMAIN_BOX_LOCAL_ASSEMBLER_HH +#define DUMUX_MULTIDOMAIN_BOX_LOCAL_ASSEMBLER_HH + +#include +#include + +#include + +#include +#include + +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for the box scheme. + * \tparam Assembler The grid-wide assembler type + */ +template +class SubDomainBoxLocalAssembler +{ + using CouplingManager = typename Assembler::CouplingManager; + using GridVariables = typename Assembler::template SubDomainGridVariables; + using GridGeometry = typename Assembler::template SubDomainGridGeometry; + using LocalOperator = typename Assembler::template SubDomainLocalOperator; + + using FVElementGeometry = typename GridGeometry::LocalView; + using ElementVariables = typename GridVariables::LocalView; + using PrimaryVariables = typename GridVariables::PrimaryVariables; + using Scalar = typename GridVariables::Scalar; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + using Context = typename CouplingManager::template CouplingContext; + + using JacobianMatrix = typename Assembler::JacobianMatrix; + using ResidualVector = typename Assembler::SolutionVector; + + // TODO: remove this assert? Get numEq from somewhere else? + static constexpr int numEq = PrimaryVariables::size(); + static_assert(diffMethod == DiffMethod::numeric, "Analytical assembly not implemented"); + // static_assert(numEq == JacobianMatrix::block_type::cols, "Matrix block size doesn't match privars size"); + + //! the parameters of a stage in time integration + using StageParams = MultiStageParams; + +public: + static constexpr auto domainId = typename Dune::index_constant(); + using ElementResidualVector = typename LocalOperator::ElementResidualVector; + + /*! + * \brief Constructor for stationary problems. + */ + explicit SubDomainBoxLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector>& contexts, + std::vector& elemVars, + std::shared_ptr cm) + : element_(element) + , fvGeometry_(fvGeometry) + , contexts_(contexts) + , elementVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(nullptr) + , cm_(cm) + { + assert(elemVars.size() == 1); + } + + /*! + * \brief Constructor for instationary problems. + * \note Using this constructor, we assemble one stage within + * a time integration step using multi-stage methods. + */ + explicit SubDomainBoxLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector>& contexts, + std::vector& elemVars, + std::shared_ptr stageParams, + std::shared_ptr cm) + : element_(element) + , fvGeometry_(fvGeometry) + , contexts_(contexts) + , elementVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(stageParams) + , cm_(cm) + {} + + /*! + * \brief Computes the derivatives with respect to the given element and adds + * them to the global matrix. The element residual is written into the + * right hand side. + */ + template + void assembleJacobianAndResidual(JacRow& jacRow, SubRes& res) + { + // TODO: treat ghost elements for parallelism + + // assemble the diagonal block + const auto residual = assembleJacobianAndResidual_(jacRow[domainId]); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + + // assemble the coupling blocks + using namespace Dune::Hybrid; + forEach(integralRange(Dune::Hybrid::size(jacRow)), [&](auto&& j) + { + if constexpr (j != id) + { + static constexpr auto domainJ = Dune::index_constant(); + assembleJacobianCoupling_(j, jacRow[domainJ]); + } + }); + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + res[scvI.dofIndex()][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + + // TODO: Skip this for explicit schemes? + auto& jac = jacRow[domainId]; + auto& row = jac[scvI.dofIndex()]; + for (auto col = row.begin(); col != row.end(); ++col) + row[col.index()][eqIdx] = 0.0; + + jac[scvI.dofIndex()][scvI.dofIndex()][eqIdx][pvIdx] = 1.0; + + // TODO: Periodicity + // if (fvGeometry().gridGeometry().dofOnPeriodicBoundary(scvI.dofIndex())) {} + }; + + enforceDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + template + void assembleJacobian(JacobianBlock& jac) + { + assembleJacobianAndResidual_(jac); + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + // TODO: Skip this for explicit schemes? + auto& row = jac[scvI.dofIndex()]; + for (auto col = row.begin(); col != row.end(); ++col) + row[col.index()][eqIdx] = 0.0; + + jac[scvI.dofIndex()][scvI.dofIndex()][eqIdx][pvIdx] = 1.0; + }; + + enforceDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Assemble the residual only + */ + template + void assembleResidual(SubRes& res) + { + const auto residual = evalLocalResidual(); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + res[scvI.dofIndex()][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + }; + + enforceDirichletConstraints(applyDirichlet); + } + + //! Enforce Dirichlet constraints + template + void enforceDirichletConstraints(const ApplyFunction& applyDirichlet) + { + // enforce Dirichlet boundary conditions + evalDirichletBoundaries(applyDirichlet); + // TODO: take care of internal Dirichlet constraints (if enabled) + // enforceInternalDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Evaluates Dirichlet boundaries + */ + template< typename ApplyDirichletFunctionType > + void evalDirichletBoundaries(ApplyDirichletFunctionType applyDirichlet) + { + for (const auto& scvI : scvs(fvGeometry())) + { + if (!fvGeometry().gridGeometry().dofOnBoundary(scvI.dofIndex())) + continue; + + const auto bcTypes = problem().boundaryTypes(element(), scvI); + if (bcTypes.hasDirichlet()) + { + const auto dirichletValues = problem().dirichlet(element(), scvI); + + // set the Dirichlet conditions in residual and jacobian + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (bcTypes.isDirichlet(eqIdx)) + { + const auto pvIdx = bcTypes.eqToDirichletIndex(eqIdx); + assert(0 <= pvIdx && pvIdx < numEq); + applyDirichlet(scvI, dirichletValues, eqIdx, pvIdx); + } + } + } + } + } + + + /*! + * \brief Evaluate the complete local residual for the current element. + */ + ElementResidualVector evalLocalResidual() const + { + if (isStationary()) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables()); + return elementIsGhost_ ? localOperator.getEmptyResidual() + : localOperator.evalFluxesAndSources(); + } + else + { + ElementResidualVector residual(fvGeometry().numScv()); + residual = 0.0; + + for (std::size_t k = 0; k < stageParams_->size(); ++k) + { + cm_->setCouplingContext(contexts_[k]); + LocalOperator localOperator(element(), fvGeometry(), elementVariables_[k]); + + if (!stageParams_->skipTemporal(k)) + residual.axpy(stageParams_->temporalWeight(k), localOperator.evalStorage()); + if (!stageParams_->skipSpatial(k)) + residual.axpy(stageParams_->spatialWeight(k), localOperator.evalFluxesAndSources()); + } + + return residual; + } + } + + //! Return references to the local views + const Element& element() const { return element_; } + const FVElementGeometry& fvGeometry() const { return fvGeometry_; } + const ElementVariables& elemVariables() const { return elementVariables_.back(); } + + //! Still needed by some coupling managers + //! \todo TODO: Double check if still necessary after complete + //! integration of new time integration schemes. + const auto& curElemVolVars() const { return elemVariables().elemVolVars(); } + const auto& elemFluxVarsCache() const { return elemVariables().elemFluxVarsCache(); } + + //! Returns if a stationary problem is assembled + bool isStationary() const { return !stageParams_; } + + // TODO: This is currently required by some coupling managers. + // We should get rid of this coupling probabaly. + static constexpr bool isImplicit() { return /*this is buggy*/ true; } + + //! Return a reference to the underlying problem + //! TODO: Should grid vars return problem directly!? + const auto& problem() const + { return elemVariables().gridVariables().gridVolVars().problem(); } + +private: + ElementVariables& elemVariables_() + { return elementVariables_.back(); } + + /*! + * \brief Computes the derivatives with respect to the dofs of the given + * element and adds them to the global matrix. + * \return The element residual at the current solution. + */ + template + ElementResidualVector assembleJacobianAndResidual_(JacobianBlock& A) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianAndResidualNumeric_(A); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box"); + } + + /*! + * \brief Computes the derivatives by means of numeric differentiation + * and adds them to the global matrix. + * \return The element residual at the current solution. + * \note This specialization is for the box scheme with numeric differentiation + */ + template + ElementResidualVector assembleJacobianAndResidualNumeric_(JacobianBlock& A) + { + // get the variables of the current stage + auto& curVariables = elemVariables_(); + auto& curElemVolVars = curVariables.elemVolVars(); + const auto& x = curVariables.gridVariables().dofs(); + + const auto origResiduals = evalLocalResidual(); + const auto origElemSol = elementSolution(element(), x, fvGeometry().gridGeometry()); + auto elemSol = origElemSol; + + ////////////////////////////////////////////////////////////////////////////////////////////// + // Calculate derivatives of the residual of all dofs in element with respect to themselves. // + ////////////////////////////////////////////////////////////////////////////////////////////// + + ElementResidualVector partialDerivs(fvGeometry().numScv()); + for (const auto& scvI : scvs(fvGeometry())) + { + // dof index and corresponding actual pri vars + const auto globalI = scvI.dofIndex(); + const auto localI = scvI.localDofIndex(); + + const auto origCurVolVars = curElemVolVars[scvI]; + auto& curVolVars = curElemVolVars[scvI]; + + // calculate derivatives w.r.t to the privars at the dof at hand + for (int pvIdx = 0; pvIdx < numEq; pvIdx++) + { + partialDerivs = 0.0; + auto evalResiduals = [&](Scalar priVar) + { + // update the volume variables and compute element residual + elemSol[localI][pvIdx] = priVar; + cm_->updateCouplingContext(domainId, *this, domainId, scvI.dofIndex(), elemSol[localI], pvIdx); + curVolVars.update(elemSol, problem(), element(), scvI); + return evalLocalResidual(); + }; + + // derive the residuals numerically + static const NumericEpsilon eps_{problem().paramGroup()}; + static const int numDiffMethod = getParamFromGroup(problem().paramGroup(), "Assembly.NumericDifferenceMethod"); + NumericDifferentiation::partialDerivative(evalResiduals, elemSol[localI][pvIdx], partialDerivs, + origResiduals, eps_(elemSol[localI][pvIdx], pvIdx), + numDiffMethod); + + // update the global stiffness matrix with the current partial derivatives + for (const auto& scvJ : scvs(fvGeometry())) + { + const auto globalJ = scvJ.dofIndex(); + const auto localJ = scvJ.localDofIndex(); + + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + // A[i][col][eqIdx][pvIdx] is the rate of change of the + // the residual of equation 'eqIdx' at dof 'i' + // depending on the primary variable 'pvIdx' at dof 'col' + A[globalJ][globalI][eqIdx][pvIdx] += partialDerivs[localJ][eqIdx]; + } + } + + // restore the original element solution & volume variables + elemSol[localI][pvIdx] = origElemSol[localI][pvIdx]; + curVolVars = origCurVolVars; + cm_->updateCouplingContext(domainId, *this, domainId, globalI, elemSol[localI], pvIdx); + + // TODO additional dof dependencies + } + } + + return origResiduals; + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + template + void assembleJacobianCoupling_(Dune::index_constant domainJ, JacobianBlock& A) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianCouplingNumeric_(domainJ, A); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box"); + } + + /*! + * \brief Computes the derivatives with respect to the given element by + * means of numeric differentiation and adds them to the global matrix. + */ + template + void assembleJacobianCouplingNumeric_(Dune::index_constant domainJ, JacobianBlock& A) + { + const auto& stencil = cm_->couplingStencil(domainId, element(), domainJ); + const auto& dofs = cm_->dofs(domainJ); + + auto& elemVolVars = elemVariables_().elemVolVars(); + auto& elemFluxVarsCache = elemVariables_().elemFluxVarsCache(); + + // TODO: How to handle the case of caching? + auto updateCoupledVariables = [&] () + { + // Update ourself after the context has been modified. Depending on the + // type of caching, other objects might have to be updated. All ifs can be optimized away. + cm_->updateCoupledVariables(domainId, *this, elemVolVars, elemFluxVarsCache); + }; + + for (const auto globalJ : stencil) + { + // undeflected privars and privars to be deflected + const auto origPriVarsJ = dofs[globalJ]; + auto priVarsJ = origPriVarsJ; + + // the undeflected coupling residual + const auto origResidual = cm_->evalCouplingResidual(domainId, *this, domainJ, globalJ); + + for (int pvIdx = 0; pvIdx < JacobianBlock::block_type::cols; ++pvIdx) + { + auto evalCouplingResidual = [&](Scalar priVar) + { + priVarsJ[pvIdx] = priVar; + cm_->updateCouplingContext(domainId, *this, domainJ, globalJ, priVarsJ, pvIdx); + updateCoupledVariables(); + return cm_->evalCouplingResidual(domainId, *this, domainJ, globalJ); + }; + + // derive the residuals numerically + decltype(evalCouplingResidual(0)) partialDerivs(element().subEntities(GridView::dimension)); + + const auto& paramGroup = cm_->problem(domainJ).paramGroup(); + static const int numDiffMethod = getParamFromGroup(paramGroup, "Assembly.NumericDifferenceMethod"); + static const auto epsCoupl = cm_->numericEpsilon(domainJ, paramGroup); + + NumericDifferentiation::partialDerivative(evalCouplingResidual, origPriVarsJ[pvIdx], partialDerivs, origResidual, + epsCoupl(origPriVarsJ[pvIdx], pvIdx), numDiffMethod); + + // update the global stiffness matrix with the current partial derivatives + for (auto&& scv : scvs(fvGeometry())) + { + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + // A[i][col][eqIdx][pvIdx] is the rate of change of + // the residual of equation 'eqIdx' at dof 'i' + // depending on the primary variable 'pvIdx' at dof + // 'col'. + + // If the dof is coupled by a Dirichlet condition, + // set the derived value only once (i.e. overwrite existing values). + // For other dofs, add the contribution of the partial derivative. + if (fvGeometry().gridGeometry().dofOnBoundary(scv.dofIndex())) + { + const auto bcTypes = problem().boundaryTypes(element(), scv); + if (bcTypes.isCouplingDirichlet(eqIdx)) + A[scv.dofIndex()][globalJ][eqIdx][pvIdx] = partialDerivs[scv.localDofIndex()][eqIdx]; + else if (bcTypes.isDirichlet(eqIdx)) + A[scv.dofIndex()][globalJ][eqIdx][pvIdx] = 0.0; + } + else + A[scv.dofIndex()][globalJ][eqIdx][pvIdx] += partialDerivs[scv.localDofIndex()][eqIdx]; + } + } + + // restore the current element solution + priVarsJ[pvIdx] = origPriVarsJ[pvIdx]; + + // restore the undeflected state of the coupling context + cm_->updateCouplingContext(domainId, *this, domainJ, globalJ, priVarsJ, pvIdx); + } + + // Restore original state of the flux vars cache and/or vol vars. + // This has to be done in case they depend on variables of domainJ before + // we continue with the numeric derivative w.r.t the next globalJ. Otherwise, + // the next "origResidual" will be incorrect. + updateCoupledVariables(); + } + } + +private: + const Element& element_; + const FVElementGeometry& fvGeometry_; + std::vector>& contexts_; + std::vector& elementVariables_; + + bool elementIsGhost_; + std::shared_ptr stageParams_; + std::shared_ptr cm_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/multidomain/assembly/cclocalassembler.hh b/dumux/multidomain/assembly/cclocalassembler.hh new file mode 100644 index 0000000000000000000000000000000000000000..44da06244aff1fd9441d5eafa8c15e65bfe6be74 --- /dev/null +++ b/dumux/multidomain/assembly/cclocalassembler.hh @@ -0,0 +1,549 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution per element + * for cell-centered schemes in the context of multidomain models. + */ +#ifndef DUMUX_MULTIDOMAIN_CC_LOCAL_ASSEMBLER_HH +#define DUMUX_MULTIDOMAIN_CC_LOCAL_ASSEMBLER_HH + +#include +#include + +#include +#include + +#include +#include +#include + +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for cell-centered schemes. + * \tparam Assembler The grid-wide assembler type + */ +template +class SubDomainCCLocalAssembler +{ + using CouplingManager = typename Assembler::CouplingManager; + using GridVariables = typename Assembler::template SubDomainGridVariables; + using GridGeometry = typename Assembler::template SubDomainGridGeometry; + using LocalOperator = typename Assembler::template SubDomainLocalOperator; + + using Operators = typename LocalOperator::Operators; + using NumEqVector = typename Operators::NumEqVector; + + using FVElementGeometry = typename GridGeometry::LocalView; + using ElementVariables = typename GridVariables::LocalView; + using PrimaryVariables = typename GridVariables::PrimaryVariables; + using Scalar = typename GridVariables::Scalar; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + using Context = typename CouplingManager::template CouplingContext; + + using JacobianMatrix = typename Assembler::JacobianMatrix; + using ResidualVector = typename Assembler::SolutionVector; + + // TODO: remove this assert? Get numEq from somewhere else? + static constexpr int numEq = PrimaryVariables::size(); + static constexpr int maxElementStencilSize = GridGeometry::maxElementStencilSize; + static_assert(diffMethod == DiffMethod::numeric, "Analytical assembly not implemented"); + // static_assert(numEq == JacobianMatrix::block_type::cols, "Matrix block size doesn't match privars size"); + + //! the parameters of a stage in time integration + using StageParams = MultiStageParams; + +public: + static constexpr auto domainId = typename Dune::index_constant(); + using ElementResidualVector = typename LocalOperator::ElementResidualVector; + + /*! + * \brief Constructor for stationary problems. + */ + explicit SubDomainCCLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector>& contexts, + std::vector& elemVars, + std::shared_ptr cm) + : element_(element) + , fvGeometry_(fvGeometry) + , contexts_(contexts) + , elementVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(nullptr) + , cm_(cm) + { + assert(elemVars.size() == 1); + assert(fvGeometry_.numScv() == 1); + } + + /*! + * \brief Constructor for instationary problems. + * \note Using this constructor, we assemble one stage within + * a time integration step using multi-stage methods. + */ + explicit SubDomainCCLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector>& contexts, + std::vector& elemVars, + std::shared_ptr stageParams, + std::shared_ptr cm) + : element_(element) + , fvGeometry_(fvGeometry) + , contexts_(contexts) + , elementVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(stageParams) + , cm_(cm) + { assert(fvGeometry_.numScv() == 1); } + + /*! + * \brief Computes the derivatives with respect to the given element and adds + * them to the global matrix. The element residual is written into the + * right hand side. + */ + template + void assembleJacobianAndResidual(JacRow& jacRow, SubRes& res) + { + // TODO: treat ghost elements for parallelism + + // assemble the diagonal block + const auto residual = assembleJacobianAndResidual_(jacRow[domainId]); + res[scvs(fvGeometry()).begin()->dofIndex()] += residual; + + // assemble the coupling blocks + using namespace Dune::Hybrid; + forEach(integralRange(Dune::Hybrid::size(jacRow)), [&](auto&& j) + { + if constexpr (j != id) + { + static constexpr auto domainJ = Dune::index_constant(); + assembleJacobianCoupling_(j, jacRow[domainJ]); + } + }); + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + template + void assembleJacobian(JacobianBlock& jac) + { + assembleJacobianAndResidual_(jac); + } + + /*! + * \brief Assemble the residual only + */ + template + void assembleResidual(SubRes& res) + { + const auto residual = evalLocalResidual(); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + } + + /*! + * \brief Evaluate the complete local residual for the current element. + */ + ElementResidualVector evalLocalResidual() const + { + if (isStationary()) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables()); + return elementIsGhost_ ? localOperator.getEmptyResidual() + : localOperator.evalFluxesAndSources(); + } + else + { + ElementResidualVector residual(fvGeometry().numScv()); + residual = 0.0; + + for (std::size_t k = 0; k < stageParams_->size(); ++k) + { + cm_->setCouplingContext(contexts_[k]); + LocalOperator localOperator(element(), fvGeometry(), elementVariables_[k]); + + if (!stageParams_->skipTemporal(k)) + residual.axpy(stageParams_->temporalWeight(k), localOperator.evalStorage()); + if (!stageParams_->skipSpatial(k)) + residual.axpy(stageParams_->spatialWeight(k), localOperator.evalFluxesAndSources()); + } + + return residual; + } + } + + //! Return references to the local views + const Element& element() const { return element_; } + const FVElementGeometry& fvGeometry() const { return fvGeometry_; } + const ElementVariables& elemVariables() const { return elementVariables_.back(); } + + //! Still needed by some coupling managers + //! \todo TODO: Double check if still necessary after complete + //! integration of new time integration schemes. + const auto& curElemVolVars() const { return elemVariables().elemVolVars(); } + const auto& elemFluxVarsCache() const { return elemVariables().elemFluxVarsCache(); } + + //! Returns if a stationary problem is assembled + bool isStationary() const { return !stageParams_; } + + // TODO: This is currently required by some coupling managers. + // We should get rid of this coupling probabaly. + static constexpr bool isImplicit() { return /*this is buggy*/ true; } + + //! Return a reference to the underlying problem + //! TODO: Should grid vars return problem directly!? + const auto& problem() const + { return elemVariables().gridVariables().gridVolVars().problem(); } + +private: + ElementVariables& elemVariables_() + { return elementVariables_.back(); } + + /*! + * \brief Computes the derivatives with respect to the dofs of the given + * element and adds them to the global matrix. + * \return The element residual at the current solution. + */ + template + NumEqVector assembleJacobianAndResidual_(JacobianBlock& A) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianAndResidualNumeric_(A); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box"); + } + + /*! + * \brief Computes the derivatives by means of numeric differentiation + * and adds them to the global matrix. + * \return The element residual at the current solution. + * \note This specialization is for the box scheme with numeric differentiation + */ + template + NumEqVector assembleJacobianAndResidualNumeric_(JacobianBlock& A) + { + using Problem = std::decay_t; + + // get the variables of the current stage + auto& curVariables = elemVariables_(); + auto& curElemVolVars = curVariables.elemVolVars(); + auto& curElemFluxVarsCache = curVariables.elemFluxVarsCache(); + const auto& x = curVariables.gridVariables().dofs(); + + // get stencil informations + const auto& gridGeometry = fvGeometry().gridGeometry(); + const auto& connectivityMap = gridGeometry.connectivityMap(); + const auto globalI = gridGeometry.elementMapper().index(element_); + const auto numNeighbors = connectivityMap[globalI].size(); + + // container to store the neighboring elements + Dune::ReservedVector neighborElements; + neighborElements.resize(numNeighbors); + + // assemble the undeflected residual + using Residuals = ReservedBlockVector; + Residuals origResiduals(numNeighbors + 1); origResiduals = 0.0; + origResiduals[0] = evalLocalResidual()[0]; + + // lambda for convenient evaluation of the fluxes across scvfs in the neighbors + // if the neighbor is a ghost we don't want to add anything to their residual + // so we return 0 and omit computing the flux + auto evalNeighborFlux = [&] (const auto& neighbor, const auto& scvf) + { + if (neighbor.partitionType() == Dune::GhostEntity) + return NumEqVector(0.0); + else + return Operators::flux(problem(), neighbor, fvGeometry(), + curElemVolVars, curElemFluxVarsCache, scvf); + }; + + // get the elements in which we need to evaluate the fluxes + // and calculate these in the undeflected state + unsigned int j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + { + neighborElements[j-1] = gridGeometry.element(dataJ.globalJ); + for (const auto scvfIdx : dataJ.scvfsJ) + origResiduals[j] += evalNeighborFlux(neighborElements[j-1], fvGeometry().scvf(scvfIdx)); + ++j; + } + + // reference to the element's scv (needed later) and corresponding vol vars + // TODO: Support the case of caching?! + const auto& scv = fvGeometry().scv(globalI); + auto& curVolVars = curElemVolVars[scv]; + + // save a copy of the original privars and vol vars in order + // to restore the original solution after deflection + const auto origPriVars = x[globalI]; + const auto origVolVars = curVolVars; + + // element solution container to be deflected + auto elemSol = elementSolution(element_, x, gridGeometry); + + // derivatives in the neighbors with repect to the current elements + // in index 0 we save the derivative of the element residual with respect to it's own dofs + Residuals partialDerivs(numNeighbors + 1); + for (int pvIdx = 0; pvIdx < numEq; ++pvIdx) + { + partialDerivs = 0.0; + + auto evalResiduals = [&](Scalar priVar) + { + Residuals partialDerivsTmp(numNeighbors + 1); + partialDerivsTmp = 0.0; + + // update the volume variables and the flux var cache + elemSol[0][pvIdx] = priVar; + curVolVars.update(elemSol, problem(), element_, scv); + curElemFluxVarsCache.update(element_, fvGeometry(), curElemVolVars); + cm_->updateCouplingContext(domainId, *this, domainId, globalI, elemSol[0], pvIdx); + + // TODO: UPDATE GLOBAL FLUX VARS CACHE HERE? + + // calculate the residual with the deflected primary variables + partialDerivsTmp[0] = evalLocalResidual()[0]; + + // calculate the fluxes in the neighbors with the deflected primary variables + for (std::size_t k = 0; k < numNeighbors; ++k) + for (auto scvfIdx : connectivityMap[globalI][k].scvfsJ) + partialDerivsTmp[k+1] += evalNeighborFlux(neighborElements[k], fvGeometry().scvf(scvfIdx)); + + return partialDerivsTmp; + }; + + // derive the residuals numerically + static const NumericEpsilon eps_{problem().paramGroup()}; + static const int numDiffMethod = getParamFromGroup(problem().paramGroup(), "Assembly.NumericDifferenceMethod"); + NumericDifferentiation::partialDerivative(evalResiduals, elemSol[0][pvIdx], partialDerivs, origResiduals, + eps_(elemSol[0][pvIdx], pvIdx), numDiffMethod); + + // Correct derivative for ghost elements, i.e. set a 1 for the derivative w.r.t. the + // current primary variable and a 0 elsewhere. As we always solve for a delta of the + // solution with repect to the initial one, this results in a delta of 0 for ghosts. + if (elementIsGhost_) + { + partialDerivs[0] = 0.0; + partialDerivs[0][pvIdx] = 1.0; + } + + // For instationary simulations, scale the coupling + // fluxes of the current stage correctly + if (stageParams_) + { + for (std::size_t k = 0; k < numNeighbors; ++k) + partialDerivs[k+1] *= stageParams_->spatialWeight(stageParams_->size()-1); + } + + // add the current partial derivatives to the global jacobian matrix + // no special treatment is needed if globalJ is a ghost because then derivatives have been assembled to 0 above + if constexpr (Problem::enableInternalDirichletConstraints()) + { + // check if own element has internal Dirichlet constraint + const auto internalDirichletConstraintsOwnElement = this->problem().hasInternalDirichletConstraint(element_, scv); + const auto dirichletValues = this->problem().internalDirichlet(element_, scv); + + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (internalDirichletConstraintsOwnElement[eqIdx]) + { + origResiduals[0][eqIdx] = origVolVars.priVars()[eqIdx] - dirichletValues[eqIdx]; + A[globalI][globalI][eqIdx][pvIdx] = (eqIdx == pvIdx) ? 1.0 : 0.0; + } + else + A[globalI][globalI][eqIdx][pvIdx] += partialDerivs[0][eqIdx]; + } + + // off-diagonal entries + j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + { + const auto& neighborElement = neighborElements[j-1]; + const auto& neighborScv = fvGeometry().scv(dataJ.globalJ); + const auto internalDirichletConstraintsNeighbor = problem().hasInternalDirichletConstraint(neighborElement, neighborScv); + + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (internalDirichletConstraintsNeighbor[eqIdx]) + A[dataJ.globalJ][globalI][eqIdx][pvIdx] = 0.0; + else + A[dataJ.globalJ][globalI][eqIdx][pvIdx] += partialDerivs[j][eqIdx]; + } + + ++j; + } + } + else // no internal Dirichlet constraints specified + { + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + // the diagonal entries + A[globalI][globalI][eqIdx][pvIdx] += partialDerivs[0][eqIdx]; + + // off-diagonal entries + j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + A[dataJ.globalJ][globalI][eqIdx][pvIdx] += partialDerivs[j++][eqIdx]; + } + } + + // restore the original state of the scv's volume variables + curVolVars = origVolVars; + + // restore the current element solution + elemSol[0][pvIdx] = origPriVars[pvIdx]; + + // restore the undeflected state of the coupling context + cm_->updateCouplingContext(domainId, *this, domainId, globalI, elemSol[0], pvIdx); + } + + // restore original state of the flux vars cache in case of global caching. + // This has to be done in order to guarantee that everything is in an undeflected + // state before the assembly of another element is called. In the case of local caching + // this is obsolete because the elemFluxVarsCache used here goes out of scope after this. + // We only have to do this for the last primary variable, for all others the flux var cache + // is updated with the correct element volume variables before residual evaluations + // TODO: RESET GLOBAL FLUX VARS CACHE HERE + + // return the original residual + return origResiduals[0]; + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + template + void assembleJacobianCoupling_(Dune::index_constant domainJ, JacobianBlock& A) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianCouplingNumeric_(domainJ, A); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box"); + } + + /*! + * \brief Computes the derivatives with respect to the given element by + * means of numeric differentiation and adds them to the global matrix. + */ + template + void assembleJacobianCouplingNumeric_(Dune::index_constant domainJ, JacobianBlock& A) + { + using Problem = std::decay_t; + + const auto& stencil = cm_->couplingStencil(domainId, element(), domainJ); + const auto& dofs = cm_->dofs(domainJ); + + auto& elemVolVars = elemVariables_().elemVolVars(); + auto& elemFluxVarsCache = elemVariables_().elemFluxVarsCache(); + + // TODO: How to handle the case of caching? + auto updateCoupledVariables = [&] () + { + // Update ourself after the context has been modified. Depending on the + // type of caching, other objects might have to be updated. All ifs can be optimized away. + cm_->updateCoupledVariables(domainId, *this, elemVolVars, elemFluxVarsCache); + }; + + for (const auto globalJ : stencil) + { + // undeflected privars and privars to be deflected + const auto origPriVarsJ = dofs[globalJ]; + auto priVarsJ = origPriVarsJ; + + // the undeflected coupling residual + const auto origResidual = cm_->evalCouplingResidual(domainId, *this, domainJ, globalJ); + + for (int pvIdx = 0; pvIdx < JacobianBlock::block_type::cols; ++pvIdx) + { + auto evalCouplingResidual = [&](Scalar priVar) + { + priVarsJ[pvIdx] = priVar; + cm_->updateCouplingContext(domainId, *this, domainJ, globalJ, priVarsJ, pvIdx); + updateCoupledVariables(); + return cm_->evalCouplingResidual(domainId, *this, domainJ, globalJ); + }; + + // derive the residuals numerically + decltype(evalCouplingResidual(0)) partialDerivs(element().subEntities(GridView::dimension)); + + const auto& paramGroup = cm_->problem(domainJ).paramGroup(); + static const int numDiffMethod = getParamFromGroup(paramGroup, "Assembly.NumericDifferenceMethod"); + static const auto epsCoupl = cm_->numericEpsilon(domainJ, paramGroup); + + NumericDifferentiation::partialDerivative(evalCouplingResidual, origPriVarsJ[pvIdx], partialDerivs, origResidual, + epsCoupl(origPriVarsJ[pvIdx], pvIdx), numDiffMethod); + + if constexpr (Problem::enableInternalDirichletConstraints()) + DUNE_THROW(Dune::NotImplemented, "Internal Dirichlet"); + + // update the global stiffness matrix with the current partial derivatives + for (auto&& scv : scvs(fvGeometry())) + { + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + + // A[i][col][eqIdx][pvIdx] is the rate of change of + // the residual of equation 'eqIdx' at dof 'i' + // depending on the primary variable 'pvIdx' at dof + // 'col'. + A[scv.dofIndex()][globalJ][eqIdx][pvIdx] += partialDerivs[scv.localDofIndex()][eqIdx]; + } + } + + // restore the current element solution + priVarsJ[pvIdx] = origPriVarsJ[pvIdx]; + + // restore the undeflected state of the coupling context + cm_->updateCouplingContext(domainId, *this, domainJ, globalJ, priVarsJ, pvIdx); + } + + // Restore original state of the flux vars cache and/or vol vars. + // This has to be done in case they depend on variables of domainJ before + // we continue with the numeric derivative w.r.t the next globalJ. Otherwise, + // the next "origResidual" will be incorrect. + updateCoupledVariables(); + } + } + +private: + const Element& element_; + const FVElementGeometry& fvGeometry_; + std::vector>& contexts_; + std::vector& elementVariables_; + + bool elementIsGhost_; + std::shared_ptr stageParams_; + std::shared_ptr cm_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/multidomain/assembly/localassembler.hh b/dumux/multidomain/assembly/localassembler.hh new file mode 100644 index 0000000000000000000000000000000000000000..7b0a644740d15b0fdb890b033f4c33986d18bdc6 --- /dev/null +++ b/dumux/multidomain/assembly/localassembler.hh @@ -0,0 +1,66 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief Helper alias to select a local assembler based on the discretization scheme. + */ +#ifndef DUMUX_MD_LOCAL_ASSEMBLER_HH +#define DUMUX_MD_LOCAL_ASSEMBLER_HH + +#include +#include "boxlocalassembler.hh" +#include "cclocalassembler.hh" + +namespace Dumux { +namespace Impl { + + template + struct MDLocalAssemblerChooser; + + template + struct MDLocalAssemblerChooser + { using type = SubDomainBoxLocalAssembler; }; + + template + struct MDLocalAssemblerChooser + { using type = SubDomainCCLocalAssembler; }; + + template + struct MDLocalAssemblerChooser + { using type = SubDomainCCLocalAssembler; }; + + template + using MDLocalAssemblerType = typename MDLocalAssemblerChooser::discMethod, + diffMethod>::type; + +} // end namespace Immpl + +/*! + * \ingroup Assembly + * \brief Helper alias to select the local assembler type from an assembler. + */ +template +using MultiDomainLocalAssembler = Impl::template MDLocalAssemblerType; + +} // namespace Dumux + +#endif diff --git a/dumux/multidomain/couplingmanager.hh b/dumux/multidomain/couplingmanager.hh index b1fbbb629653c319ee4c3ab836b4586e279691d8..27b458e0d6681a3844ed14e58fdc52c2b19e52bc 100644 --- a/dumux/multidomain/couplingmanager.hh +++ b/dumux/multidomain/couplingmanager.hh @@ -45,6 +45,7 @@ template class CouplingManager { template using SubDomainTypeTag = typename Traits::template SubDomain::TypeTag; + template using SubDomainSolutionVector = GetPropType, Properties::SolutionVector>; template using PrimaryVariables = GetPropType, Properties::PrimaryVariables>; template using GridView = typename GetPropType, Properties::GridGeometry>::GridView; template using Element = typename GridView::template Codim<0>::Entity; @@ -269,6 +270,14 @@ public: DUNE_THROW(Dune::InvalidStateException, "The problem pointer was not set or has already expired. Use setSubProblems() before calling this function"); } + /*! + * \brief Return a reference to the current solution in a subdomain + * \param domainIdx The domain index + */ + template + const SubDomainSolutionVector& dofs(Dune::index_constant domainId) const + { return curSol_[domainId]; } + protected: /*! diff --git a/dumux/multidomain/facet/box/couplingmanager.hh b/dumux/multidomain/facet/box/couplingmanager.hh index 732705b5b77f5b8f5407c685d4b49c44a16d9dd3..5485fdf5c40e0e3377ab6048e8d98a090d19648a 100644 --- a/dumux/multidomain/facet/box/couplingmanager.hh +++ b/dumux/multidomain/facet/box/couplingmanager.hh @@ -359,6 +359,11 @@ public: typename LocalResidual::ElementResidualVector res(fvGeometry.numScv()); res = 0.0; + ElementBoundaryTypes elemBCTypes; + elemBCTypes.update(this->problem(bulkId), bulkLocalAssembler.element(), bulkLocalAssembler.fvGeometry()); + + LocalResidual localResidual(&(this->problem(bulkId))); + // compute fluxes across the coupling scvfs const auto& couplingScvfs = map.find(bulkContext_.elementIdx)->second.dofToCouplingScvfMap.at(dofIdxGlobalJ); for (auto scvfIdx : couplingScvfs) @@ -368,9 +373,9 @@ public: res[insideScv.localDofIndex()] += evalBulkFluxes_( bulkLocalAssembler.element(), bulkLocalAssembler.fvGeometry(), bulkLocalAssembler.curElemVolVars(), - bulkLocalAssembler.elemBcTypes(), + elemBCTypes, bulkLocalAssembler.elemFluxVarsCache(), - bulkLocalAssembler.localResidual(), + localResidual, std::array, 1>({scvfIdx}) ); } @@ -479,7 +484,7 @@ public: { const auto& ldGridGeometry = this->problem(lowDimId).gridGeometry(); - const auto& ldSol = Assembler::isImplicit() ? this->curSol()[lowDimId] : assembler.prevSol()[lowDimId]; + const auto& ldSol = this->curSol()[lowDimId]; const auto elemJ = ldGridGeometry.element(lowDimElemIdx); auto fvGeom = localView(ldGridGeometry); fvGeom.bindElement(elemJ); @@ -538,7 +543,7 @@ public: auto bulkElemVolVars = localView(assembler.gridVariables(bulkId).curGridVolVars()); auto bulkElemFluxVarsCache = localView(assembler.gridVariables(bulkId).gridFluxVarsCache()); - const auto& bulkSol = Assembler::isImplicit() ? this->curSol()[bulkId] : assembler.prevSol()[bulkId]; + const auto& bulkSol = this->curSol()[bulkId]; const auto curBulkElem = bulkGridGeom.element(embedments[i].first); bulkFvGeom.bind(curBulkElem); bulkElemVolVars.bind(curBulkElem, bulkFvGeom, bulkSol); @@ -552,7 +557,7 @@ public: } // finally, set the local residual - lowDimContext_.bulkLocalResidual = std::make_unique< LocalResidual >(assembler.localResidual(bulkId)); + lowDimContext_.bulkLocalResidual = std::make_unique< LocalResidual >( &(this->problem(bulkId)) ); } } diff --git a/dumux/multidomain/facet/box/subcontrolvolumeface.hh b/dumux/multidomain/facet/box/subcontrolvolumeface.hh index f848eeb2afaa29348309e07d10c797fec9e4c7a5..88aa660ff9ea2e27fb0a05d5e45f5092b5ab020e 100644 --- a/dumux/multidomain/facet/box/subcontrolvolumeface.hh +++ b/dumux/multidomain/facet/box/subcontrolvolumeface.hh @@ -131,11 +131,11 @@ public: Scalar area() const { return area_; } - //! returns bolean if the sub control volume face is on the boundary + //! returns true if the sub control volume face is on the boundary bool boundary() const { return boundary_; } - //! returns bolean if the sub control volume face is on an interior boundary + //! returns true if the sub control volume face is on an interior boundary bool interiorBoundary() const { return interiorBoundary_; } @@ -144,7 +144,7 @@ public: const GlobalPosition& unitOuterNormal() const { return unitOuterNormal_; } - //! index of the inside sub control volume for spatial param evaluation + //! index of the inside sub control volume LocalIndexType insideScvIdx() const { return scvIndices_[0]; } @@ -152,14 +152,20 @@ public: GridIndexType index() const { return scvfIndex_; } - //! index of the outside sub control volume for spatial param evaluation - //! This results in undefined behaviour if boundary is true - LocalIndexType outsideScvIdx() const + //! Index of the i-th outside sub control volume or boundary scv index. + // Results in undefined behaviour if i >= numOutsideScvs() + LocalIndexType outsideScvIdx(int i = 0) const { - assert(!boundary()); + assert(!boundary() && !interiorBoundary()); return scvIndices_[1]; } + //! The number of scvs on the outside of this face + std::size_t numOutsideScvs() const + { + return static_cast(!(boundary() || interiorBoundary())); + } + //! returns the element-local index of the facet this scvf is embedded in. //! This is only valid to be called for scvfs on domain/interior boundaries. LocalIndexType facetIndexInElement() const diff --git a/dumux/multidomain/facet/cellcentered/tpfa/couplingmanager.hh b/dumux/multidomain/facet/cellcentered/tpfa/couplingmanager.hh index 19fca678a62f42656425f09dd2e7f29f1b994be1..b90cec7465d45c26a8cfcb637e0fc48e0fb06f63 100644 --- a/dumux/multidomain/facet/cellcentered/tpfa/couplingmanager.hh +++ b/dumux/multidomain/facet/cellcentered/tpfa/couplingmanager.hh @@ -314,13 +314,15 @@ public: assert(map.find(bulkContext_.elementIdx) != map.end()); assert(bulkContext_.elementIdx == this->problem(bulkId).gridGeometry().elementMapper().index(bulkLocalAssembler.element())); + LocalResidual localResidual(&(this->problem(bulkId))); + typename LocalResidual::ElementResidualVector res(1); res = 0.0; res[0] = evalBulkFluxes(bulkLocalAssembler.element(), bulkLocalAssembler.fvGeometry(), bulkLocalAssembler.curElemVolVars(), bulkLocalAssembler.elemFluxVarsCache(), - bulkLocalAssembler.localResidual(), + localResidual, map.find(bulkContext_.elementIdx)->second.dofToCouplingScvfMap.at(dofIdxGlobalJ)); return res; } @@ -433,7 +435,7 @@ public: for (const auto lowDimElemIdx : elementStencil) { - const auto& ldSol = Assembler::isImplicit() ? this->curSol()[lowDimId] : assembler.prevSol()[lowDimId]; + const auto& ldSol = this->curSol()[lowDimId]; const auto& ldProblem = this->problem(lowDimId); const auto& ldGridGeometry = this->problem(lowDimId).gridGeometry(); @@ -496,12 +498,11 @@ public: // then simply bind the local views of that first neighbor auto bulkFvGeom = localView(bulkGridGeom); - auto bulkElemVolVars = Assembler::isImplicit() ? localView(assembler.gridVariables(bulkId).curGridVolVars()) - : localView(assembler.gridVariables(bulkId).prevGridVolVars()); + auto bulkElemVolVars = localView(assembler.gridVariables(bulkId).curGridVolVars()); auto bulkElemFluxVarsCache = localView(assembler.gridVariables(bulkId).gridFluxVarsCache()); // evaluate variables on old/new time level depending on time disc scheme - const auto& bulkSol = Assembler::isImplicit() ? this->curSol()[bulkId] : assembler.prevSol()[bulkId]; + const auto& bulkSol = this->curSol()[bulkId]; bulkFvGeom.bind(bulkElem); bulkElemVolVars.bind(bulkElem, bulkFvGeom, bulkSol); bulkElemFluxVarsCache.bind(bulkElem, bulkFvGeom, bulkElemVolVars); @@ -510,7 +511,7 @@ public: lowDimContext_.bulkFvGeometry = std::make_unique< FVElementGeometry >( std::move(bulkFvGeom) ); lowDimContext_.bulkElemVolVars = std::make_unique< ElementVolumeVariables >( std::move(bulkElemVolVars) ); lowDimContext_.bulkElemFluxVarsCache = std::make_unique< ElementFluxVariablesCache >( std::move(bulkElemFluxVarsCache) ); - lowDimContext_.bulkLocalResidual = std::make_unique< LocalResidual >(assembler.localResidual(bulkId)); + lowDimContext_.bulkLocalResidual = std::make_unique< LocalResidual >( &(this->problem(bulkId)) ); } } diff --git a/dumux/multidomain/fvgridvariables.hh b/dumux/multidomain/fvgridvariables.hh index 82f25a464b995dd7099234b3334b56caed8534df..ddaddb36d04441474de580d3e554d3b10a9588b0 100644 --- a/dumux/multidomain/fvgridvariables.hh +++ b/dumux/multidomain/fvgridvariables.hh @@ -41,7 +41,7 @@ namespace Dumux { template class MultiDomainFVGridVariables { - using SolutionVector = typename MDTraits::SolutionVector; + using ThisType = MultiDomainFVGridVariables; static constexpr std::size_t numSubDomains = MDTraits::numSubDomains; template @@ -53,6 +53,9 @@ class MultiDomainFVGridVariables using Problems = typename MDTraits::template TupleOfSharedPtrConst; public: + //! export the solution vector type required for update + using SolutionVector = typename MDTraits::SolutionVector; + //! export base types of the stored type template using Type = typename MDTraits::template SubDomain::GridVariables; @@ -64,11 +67,33 @@ public: //! export type of tuple of pointers using TupleType = typename MDTraits::template Tuple; + //! export underlying scalar type + using Scalar = typename MDTraits::Scalar; + + //! export the time representation + using TimeLevel = Dumux::TimeLevel; + /*! * \brief The default constructor */ MultiDomainFVGridVariables() = default; + /*! + * \brief Construction from an existing tuple + */ + MultiDomainFVGridVariables(const TupleType& tuple) + : gridVars_(tuple) + {} + + /*! + * \brief Construction from an existing tuple and solution + */ + MultiDomainFVGridVariables(const TupleType& tuple, + const SolutionVector& sol) + : gridVars_(tuple) + , dofs_(sol) + {} + /*! * \brief Contruct the grid variables * \param gridGeometries a tuple of grid geometry shared pointers @@ -84,9 +109,12 @@ public: }); } + //! TODO: Adjust to all interfaces of FVGridVariables + //! initialize all variables void init(const SolutionVector& sol) { + dofs_ = sol; using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { @@ -94,9 +122,21 @@ public: }); } + //! update the time level only + void updateTime(const TimeLevel& timeLevel) + { + using namespace Dune::Hybrid; + forEach(std::make_index_sequence{}, [&](auto&& id) + { + elementAt(gridVars_, id)->updateTime(timeLevel); + }); + } + //! update all variables void update(const SolutionVector& sol, bool forceFluxCacheUpdate = false) { + // TODO: This should update the coupling manager? + dofs_ = sol; using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { @@ -107,6 +147,7 @@ public: //! update all variables after grid adaption void updateAfterGridAdaption(const SolutionVector& sol) { + dofs_ = sol; using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { @@ -130,6 +171,7 @@ public: //! resets state to the one before time integration void resetTimeStep(const SolutionVector& sol) { + dofs_ = sol; using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { @@ -157,6 +199,9 @@ public: void set(PtrType p, Dune::index_constant id = Dune::index_constant{}) { Dune::Hybrid::elementAt(gridVars_, id) = p; } + const SolutionVector& dofs() const { return dofs_; } + SolutionVector& dofs() { return dofs_; } + /*! * \brief return the grid variables tuple we are wrapping * \note the copy is not expensive since it is a tuple of shared pointers @@ -164,10 +209,26 @@ public: TupleType getTuple() { return gridVars_; } -private: + //! make a deep copy of this class + ThisType deepCopy() const + { + TupleType tupleCopy; + + using namespace Dune::Hybrid; + forEach(std::make_index_sequence{}, [&](auto&& id) + { + constexpr auto i = std::decay_t::value; + elementAt(tupleCopy, id) = std::make_shared>( *(std::get(gridVars_)) ); + }); + return {tupleCopy, dofs_}; + } + +private: //! a tuple of pointes to all grid variables TupleType gridVars_; + + SolutionVector dofs_; }; } // end namespace Dumux diff --git a/dumux/multidomain/traits.hh b/dumux/multidomain/traits.hh index 22f2552456a96211ea0fb791a382275a7cd32b30..314ff3ab2f359398948c492d62ed6ba86a34ba0d 100644 --- a/dumux/multidomain/traits.hh +++ b/dumux/multidomain/traits.hh @@ -181,6 +181,7 @@ public: using GridVariables =GetPropType, Properties::GridVariables>; using IOFields = GetPropType, Properties::IOFields>; using SolutionVector = GetPropType, Properties::SolutionVector>; + using LocalOperator = GetPropType, Properties::LocalOperator>; }; //\} diff --git a/dumux/porousmediumflow/boxdfm/subcontrolvolumeface.hh b/dumux/porousmediumflow/boxdfm/subcontrolvolumeface.hh index 2be9c716cdd6fcd4da11a31bca8d73fea08ab715..259ea09672f55908e467b2b12979383f0d1499ef 100644 --- a/dumux/porousmediumflow/boxdfm/subcontrolvolumeface.hh +++ b/dumux/porousmediumflow/boxdfm/subcontrolvolumeface.hh @@ -210,7 +210,7 @@ public: Scalar area() const { return area_; } - //! returns bolean if the sub control volume face is on the boundary + //! returns true if the sub control volume face is on the boundary bool boundary() const { return boundary_; } @@ -234,18 +234,25 @@ public: typename BoundaryFlag::value_type boundaryFlag() const { return boundaryFlag_.get(); } - //! Index of the inside sub control volume for spatial param evaluation + //! index of the inside sub control volume LocalIndexType insideScvIdx() const { return scvIndices_[0]; } - //! Index of the outside sub control volume for spatial param evaluation - // This results in undefined behaviour if boundary is true - LocalIndexType outsideScvIdx() const + //! Index of the i-th outside sub control volume or boundary scv index. + // Results in undefined behaviour if i >= numOutsideScvs() + LocalIndexType outsideScvIdx(int i = 0) const { assert(!boundary()); return scvIndices_[1]; } + //! The number of scvs on the outside of this face + std::size_t numOutsideScvs() const + { + return static_cast(!boundary()); + } + + //! Returns a corner of the sub control volume face const GlobalPosition& corner(unsigned int localIdx) const { assert(localIdx < corners_.size() && "provided index exceeds the number of corners"); diff --git a/test/multidomain/facet/1p_1p/analytical/main.cc b/test/multidomain/facet/1p_1p/analytical/main.cc index 4ac82416b1f7620b15e4aba2ccf1b4c4bc981076..6b1491afd21c534c3e78cd899d96c482ecbb0ff5 100644 --- a/test/multidomain/facet/1p_1p/analytical/main.cc +++ b/test/multidomain/facet/1p_1p/analytical/main.cc @@ -29,10 +29,22 @@ #include #include +#include + +namespace Dumux::Properties { + // property forward declaration + // TODO: We need this in MDTraits such that the MDAssembler + // can extract the local operator from the sub-typetags. + // Thus, we need to introduce this property for all models? + // Should this be circumvented somehow? (In standard models one can + // select the operator in the main file so far...) + template + struct LocalOperator { using type = UndefinedProperty; }; +} // end namespace Dumux::Properties + #include "problem_bulk.hh" #include "problem_lowdim.hh" -#include #include #include @@ -43,7 +55,7 @@ #include #include -#include +#include #include #include @@ -53,6 +65,10 @@ #include +#include +#include +#include + // obtain/define some types to be used below in the property definitions and in main template< class BulkTypeTag, class LowDimTypeTag > class TestTraits @@ -69,6 +85,29 @@ public: namespace Dumux { namespace Properties { +template +struct SDOperator +{ +private: + using ModelTraits = GetPropType; + using FluxVariables = GetPropType; + using ElemVariables = typename GetPropType::LocalView; + using ImmiscibleOperators = FVImmiscibleOperators; + +public: + using Type = FVLocalOperator; +}; + +template +struct LocalOperator { using type = typename SDOperator::Type; }; +template +struct LocalOperator { using type = typename SDOperator::Type; }; + +template +struct LocalOperator { using type = typename SDOperator::Type; }; +template +struct LocalOperator { using type = typename SDOperator::Type; }; + // set cm property for the box test using BoxTraits = TestTraits; template @@ -326,10 +365,8 @@ int main(int argc, char** argv) } // the assembler - using Assembler = MultiDomainFVAssembler; - auto assembler = std::make_shared( std::make_tuple(bulkProblem, lowDimProblem), - std::make_tuple(bulkFvGridGeometry, lowDimFvGridGeometry), - std::make_tuple(bulkGridVariables, lowDimGridVariables), + using Assembler = MultiDomainAssembler; + auto assembler = std::make_shared( std::make_tuple(bulkFvGridGeometry, lowDimFvGridGeometry), couplingManager); // the linear solver @@ -341,7 +378,14 @@ int main(int argc, char** argv) auto newtonSolver = std::make_shared(assembler, linearSolver, couplingManager); // linearize & solve - newtonSolver->solve(x); + auto mdGridVars = MultiDomainFVGridVariables(std::make_tuple(bulkGridVariables, lowDimGridVariables)); + mdGridVars.update(x); + newtonSolver->solve(mdGridVars); + + // update solution vector used here in main + // with the new assembly we wouldn't need to care about a + // solution vector, we'd only need to have grid variables + x = mdGridVars.dofs(); // update grid variables for output bulkGridVariables->update(x[bulkId]); diff --git a/test/multidomain/poromechanics/el2p/CMakeLists.txt b/test/multidomain/poromechanics/el2p/CMakeLists.txt index 35eab6328f1d183ae495d057b85ebc0228032ac8..dd21128e96093dcf270d65e3d4c34e66d29e8a41 100644 --- a/test/multidomain/poromechanics/el2p/CMakeLists.txt +++ b/test/multidomain/poromechanics/el2p/CMakeLists.txt @@ -8,9 +8,9 @@ dumux_add_test(NAME test_md_poromechanics_el2p COMMAND ${CMAKE_SOURCE_DIR}/bin/testing/runtest.py CMD_ARGS --script fuzzy --files ${CMAKE_SOURCE_DIR}/test/references/test_md_poromechanics_el2p_2p-reference.vtu - ${CMAKE_CURRENT_BINARY_DIR}/test_md_poromechanics_el2p_twop-00010.vtu + ${CMAKE_CURRENT_BINARY_DIR}/test_md_poromechanics_el2p_twop-00009.vtu ${CMAKE_SOURCE_DIR}/test/references/test_md_poromechanics_el2p_poroelastic-reference.vtu - ${CMAKE_CURRENT_BINARY_DIR}/test_md_poromechanics_el2p_poroelastic-00010.vtu + ${CMAKE_CURRENT_BINARY_DIR}/test_md_poromechanics_el2p_poroelastic-00009.vtu --command "${CMAKE_CURRENT_BINARY_DIR}/test_md_poromechanics_el2p params.input -Vtk.OutputName test_md_poromechanics_el2p" --zeroThreshold {"u":1e-14}) diff --git a/test/multidomain/poromechanics/el2p/main.cc b/test/multidomain/poromechanics/el2p/main.cc index 7291bae011d2984e9b052469c13e4d3a4f592e2f..08852b7108047ae496e8342e20c814f2f8d2f29c 100644 --- a/test/multidomain/poromechanics/el2p/main.cc +++ b/test/multidomain/poromechanics/el2p/main.cc @@ -33,6 +33,18 @@ #include "problem_poroelastic.hh" #include + +namespace Dumux::Properties { + // property forward declaration + // TODO: We need this in MDTraits such that the MDAssembler + // can extract the local operator from the sub-typetags. + // Thus, we need to introduce this property for all models? + // Should this be circumvented somehow? (In standard models one can + // select the operator in the main file so far...) + template + struct LocalOperator { using type = UndefinedProperty; }; +} // end namespace Dumux::Properties + #include #include @@ -40,9 +52,15 @@ #include #include -#include +#include +#include #include +#include +#include +#include +#include + #include #include @@ -71,6 +89,33 @@ private: public: using type = PoroMechanicsCouplingManager< Traits >; }; + +template +struct LocalOperator +{ +private: + using ModelTraits = GetPropType; + using FluxVariables = GetPropType; + using ElemVariables = typename GetPropType::LocalView; + using ImmiscibleOperators = FVImmiscibleOperators; + +public: + using type = FVLocalOperator; +}; + +template +struct LocalOperator +{ +private: + using ModelTraits = GetPropType; + using ElemVariables = typename GetPropType::LocalView; + using StressType = GetPropType; + using GeomechanicsOperators = FVGeomechanicsOperators; + +public: + using type = FVLocalOperator; +}; + } // end namespace Properties } // end namespace Dumux @@ -139,7 +184,6 @@ int main(int argc, char** argv) x[poroMechId].resize(poroMechFvGridGeometry->numDofs()); twoPProblem->applyInitialSolution(x[twoPId]); poroMechProblem->applyInitialSolution(x[poroMechId]); - SolutionVector xOld = x; // initialize the coupling manager couplingManager->init(twoPProblem, poroMechProblem, x); @@ -171,19 +215,23 @@ int main(int argc, char** argv) PoroMechOutputFields::initOutputModule(poroMechVtkWriter); // write initial solution - twoPVtkWriter.write(0.0); - poroMechVtkWriter.write(0.0); + // twoPVtkWriter.write(0.0); + // poroMechVtkWriter.write(0.0); //instantiate time loop auto timeLoop = std::make_shared>(0.0, dt, tEnd); timeLoop->setMaxTimeStepSize(maxDT); + // use implicit Euler for time integration + auto timeMethod = std::make_shared>(); + // the assembler - using Assembler = MultiDomainFVAssembler; - auto assembler = std::make_shared( std::make_tuple(twoPProblem, poroMechProblem), - std::make_tuple(twoPFvGridGeometry, poroMechFvGridGeometry), - std::make_tuple(twoPGridVariables, poroMechGridVariables), - couplingManager, timeLoop, xOld); + using Assembler = MultiDomainAssembler; + auto assembler = std::make_shared( std::make_tuple(twoPFvGridGeometry, poroMechFvGridGeometry), + couplingManager, *timeMethod); + + auto mdGridVars = MultiDomainFVGridVariables(std::make_tuple(twoPGridVariables, poroMechGridVariables)); + mdGridVars.update(x); // the linear solver using LinearSolver = UMFPackBackend; @@ -191,21 +239,24 @@ int main(int argc, char** argv) // the non-linear solver using NewtonSolver = Dumux::MultiDomainNewtonSolver; - NewtonSolver nonLinearSolver(assembler, linearSolver, couplingManager); + auto nonLinearSolver = std::make_shared(assembler, linearSolver, couplingManager); + + // the time stepper for time integration + using TimeStepper = MultiStageTimeStepper; + TimeStepper timeStepper(nonLinearSolver, timeMethod); // time loop timeLoop->start(); do { - // solve the non-linear system with time step control - nonLinearSolver.solve(x, *timeLoop); - - // make the new solution the old solution - xOld = x; + // do time integraiton + timeStepper.step(mdGridVars, timeLoop->time(), timeLoop->timeStepSize()); // advance to the time loop to the next step timeLoop->advanceTimeStep(); - twoPGridVariables->advanceTimeStep(); - poroMechGridVariables->advanceTimeStep(); + x[twoPId] = mdGridVars[twoPId].dofs(); + x[poroMechId] = mdGridVars[poroMechId].dofs(); + // twoPGridVariables->advanceTimeStep(); + // poroMechGridVariables->advanceTimeStep(); // write vtk output twoPVtkWriter.write(timeLoop->time()); @@ -214,23 +265,22 @@ int main(int argc, char** argv) // report statistics of this time step timeLoop->reportTimeStep(); - using TwoPPrimaryVariables = GetPropType; - TwoPPrimaryVariables storage(0); - const auto& twoPLocalResidual = assembler->localResidual(twoPId); - for (const auto& element : elements(leafGridView, Dune::Partitions::interior)) - { - auto storageVec = twoPLocalResidual.evalStorage(*twoPProblem, element, *twoPFvGridGeometry, *twoPGridVariables, x[twoPId]); - storage += storageVec[0]; - } - std::cout << "time, mass CO2 (kg), mass brine (kg):" << std::endl; - std::cout << timeLoop->time() << " , " << storage[1] << " , " << storage[0] << std::endl; - std::cout << "***************************************" << std::endl; + // using TwoPPrimaryVariables = GetPropType; + // TwoPPrimaryVariables storage(0); + // const auto& twoPLocalResidual = assembler->localResidual(twoPId); + // for (const auto& element : elements(leafGridView, Dune::Partitions::interior)) + // { + // auto storageVec = twoPLocalResidual.evalStorage(*twoPProblem, element, *twoPFvGridGeometry, *twoPGridVariables, x[twoPId]); + // storage += storageVec[0]; + // } + // std::cout << "time, mass CO2 (kg), mass brine (kg):" << std::endl; + // std::cout << timeLoop->time() << " , " << storage[1] << " , " << storage[0] << std::endl; + // std::cout << "***************************************" << std::endl; } while (!timeLoop->finished()); - - // output some Newton statistics - nonLinearSolver.report(); + // // output some Newton statistics + // nonLinearSolver.report(); timeLoop->finalize(leafGridView.comm()); diff --git a/test/multidomain/poromechanics/el2p/spatialparams_2p.hh b/test/multidomain/poromechanics/el2p/spatialparams_2p.hh index ec9573bb9ff957293cfede5d5b9cb339aa8b2cdc..f190094cb1390770953d814477005b4d4216eef8 100644 --- a/test/multidomain/poromechanics/el2p/spatialparams_2p.hh +++ b/test/multidomain/poromechanics/el2p/spatialparams_2p.hh @@ -86,7 +86,7 @@ public: static constexpr auto poroMechId = CouplingManager::poroMechId; const auto& poroMechGridGeom = couplingManagerPtr_->problem(poroMechId).gridGeometry(); - const auto poroMechElemSol = elementSolution(element, couplingManagerPtr_->curSol()[poroMechId], poroMechGridGeom); + const auto& poroMechElemSol = couplingManagerPtr_->getPoroMechElemSol(element); // evaluate the deformation-dependent porosity at the scv center return PorosityDeformation::evaluatePorosity(poroMechGridGeom, element, scv.center(), poroMechElemSol, initPorosity_);