From 88b52a86ee5d936a942d56bf6d70e977ff59b185 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Thu, 4 Feb 2021 21:30:49 +0100 Subject: [PATCH 01/46] introduce time level class --- dumux/timestepping/CMakeLists.txt | 3 ++ dumux/timestepping/timelevel.hh | 78 +++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 dumux/timestepping/CMakeLists.txt create mode 100644 dumux/timestepping/timelevel.hh diff --git a/dumux/timestepping/CMakeLists.txt b/dumux/timestepping/CMakeLists.txt new file mode 100644 index 0000000000..3e7bc52b16 --- /dev/null +++ b/dumux/timestepping/CMakeLists.txt @@ -0,0 +1,3 @@ +install(FILES +timelevel.hh +DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/timestepping) diff --git a/dumux/timestepping/timelevel.hh b/dumux/timestepping/timelevel.hh new file mode 100644 index 0000000000..0e106539ae --- /dev/null +++ b/dumux/timestepping/timelevel.hh @@ -0,0 +1,78 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \brief Class that represents a time level during time integration. + */ +#ifndef DUMUX_TIME_LEVEL_HH +#define DUMUX_TIME_LEVEL_HH + +namespace Dumux { + +/*! + * \brief Class that represents a time level during time integration. + */ +template +class TimeLevel +{ +public: + + /*! + * \brief Construct a time level with a time. + * \note This can be used in contexts outside of time integration, + * where no information on a previous time or time step size is needed. + */ + explicit TimeLevel(Scalar curTime) + : curTime_(curTime) + , prevTime_(curTime) + , timeStepFraction_(1.0) + {} + + /*! + * \brief Construct a time level with information on an ongoing time step. + * \param curTime The current time level + * \param prevTime The previous time level + * \param dtFraction The fraction of a time step this level corresponds to. + * \note Within a time integration step, several time levels might occur + * when multi-stage methods are used. The argument dtFraction allows + * for determining the time that will be reached at the end of the + * time integration step. + */ + explicit TimeLevel(Scalar curTime, Scalar prevTime, Scalar dtFraction) + : curTime_(curTime) + , prevTime_(prevTime) + , timeStepFraction_(dtFraction) + {} + + //! Return the current time + Scalar current() const { return curTime_; } + //! Return the time at the beginning of time integration + Scalar previous() const { return prevTime_; } + //! Return the fraction of the time step this level corresponds to + Scalar timeStepFraction() const { return timeStepFraction_; } + +private: + Scalar curTime_; + Scalar prevTime_; + Scalar timeStepFraction_; +}; + +} // end namespace Dumux + +#endif -- GitLab From a53e3f89d424f730761ea7c80cb763e328907cda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Thu, 4 Feb 2021 21:33:18 +0100 Subject: [PATCH 02/46] [common] add base clase for (grid)variables --- dumux/common/CMakeLists.txt | 1 + dumux/common/variables.hh | 133 ++++++++++++++++++++++++++++++++++++ 2 files changed, 134 insertions(+) create mode 100644 dumux/common/variables.hh diff --git a/dumux/common/CMakeLists.txt b/dumux/common/CMakeLists.txt index e26b4dfb2a..86e31cd63a 100644 --- a/dumux/common/CMakeLists.txt +++ b/dumux/common/CMakeLists.txt @@ -48,4 +48,5 @@ timeloop.hh timemanager.hh valgrind.hh variablelengthspline_.hh +variables.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/common) diff --git a/dumux/common/variables.hh b/dumux/common/variables.hh new file mode 100644 index 0000000000..f75731626b --- /dev/null +++ b/dumux/common/variables.hh @@ -0,0 +1,133 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Common + * \copydoc Dumux::Variables + */ +#ifndef DUMUX_VARIABLES_HH +#define DUMUX_VARIABLES_HH + +#include + +#include +#include + +namespace Dumux { + +/*! + * \ingroup Discretization + * \brief Class that represents the variables of a model. + * We assume that models are formulated on the basis of primary and + * possibly secondary variables, where the latter may non-linearly + * depend on the former. Variables in Dumux represent the state of + * a numerical solution of a model, consisting of all primary/secondary + * variables and, if the a transient problem is modeled, of time information. + * + * This class defines the interface that is expected of variable classes, + * and it provides the implementation for models that do not require storing + * any additional information besides the primary variables and (optionally) + * time. + * \tparam X The type used for solution vectors, i.e. all primary variables. + */ +template +class Variables +{ + template + struct ScalarExtractorHelper; + + template + struct ScalarExtractorHelper + { using Type = T; }; + + template + struct ScalarExtractorHelper + { + private: + using ValueType = std::decay_t()[0])>; + static constexpr bool indexable = IsIndexable::value; + public: + using Type = typename ScalarExtractorHelper::Type; + }; + +public: + //! export the type of solution vector + using SolutionVector = X; + + //! export the underlying scalar type + using Scalar = typename ScalarExtractorHelper::value>::Type; + + //! export the time representation + using TimeLevel = Dumux::TimeLevel; + + //! Default constructor + explicit Variables() : x_(), t_(0.0) {} + + //! Construction from a solution + explicit Variables(const SolutionVector& x, + const TimeLevel& t = TimeLevel{0.0}) + : x_(x), t_(t) + {} + + //! Construction from a solution + explicit Variables(SolutionVector&& x, + const TimeLevel& t = TimeLevel{0.0}) + : x_(std::move(x)), t_(t) + {} + + //! Construction from initializer lambda + template), int> = 0> + explicit Variables(const Initializer& initializeSolution, + const TimeLevel& timeLevel = TimeLevel{0.0}) + : t_(timeLevel) + { + initializeSolution(x_); + } + + //! Return reference to the solution + const SolutionVector& dofs() const { return x_; } + + //! Non-const access still required for privar switch (TODO: Remove dependency) + SolutionVector& dofs() { return x_; } + + //! Update the state to a new solution + void update(const SolutionVector& x) + { x_ = x; } + + //! Update the time level only + void updateTime(const TimeLevel& t) + { t_ = t; } + + //! Update the state to a new solution & time level + void update(const SolutionVector& x, + const TimeLevel& t) + { + x_ = x; + t_ = t; + } + +private: + SolutionVector x_; + TimeLevel t_; +}; + +} // end namespace Dumux + +#endif -- GitLab From 775f5f642af8561a56d9451e39a5b7e300566144 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Fri, 16 Oct 2020 15:04:07 +0200 Subject: [PATCH 03/46] [disc] introduce base grid variables --- dumux/discretization/CMakeLists.txt | 1 + dumux/discretization/gridvariables.hh | 70 +++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 dumux/discretization/gridvariables.hh diff --git a/dumux/discretization/CMakeLists.txt b/dumux/discretization/CMakeLists.txt index a43e90fb37..05fa5cb88a 100644 --- a/dumux/discretization/CMakeLists.txt +++ b/dumux/discretization/CMakeLists.txt @@ -18,6 +18,7 @@ fluxstencil.hh functionspacebasis.hh fvgridvariables.hh fvproperties.hh +gridvariables.hh localview.hh method.hh rotationpolicy.hh diff --git a/dumux/discretization/gridvariables.hh b/dumux/discretization/gridvariables.hh new file mode 100644 index 0000000000..35e239c834 --- /dev/null +++ b/dumux/discretization/gridvariables.hh @@ -0,0 +1,70 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Discretization + * \brief Base class for grid variables + */ +#ifndef DUMUX_GRID_VARIABLES_HH +#define DUMUX_GRID_VARIABLES_HH + +#include + +#include + +namespace Dumux { + +/*! + * \ingroup Discretization + * \brief Base class for grid variables. + * \tparam GG The grid geometry type + * \tparam X The type used for solution vectors + */ +template +class GridVariables +: public Variables +{ + using ParentType = Variables; + +public: + //! export the grid geometry type + using GridGeometry = GG; + + /*! + * \brief Constructor from a grid geometry. The remaining arguments must + * be valid arguments for the construction of the Variables class. + */ + template + explicit GridVariables(std::shared_ptr gridGeometry, + Args&&... args) + : ParentType(std::forward(args)...) + , gridGeometry_(gridGeometry) + {} + + //! Return a reference to the grid geometry + const GridGeometry& gridGeometry() const + { return *gridGeometry_; } + +private: + std::shared_ptr gridGeometry_; +}; + +} // end namespace Dumux + +#endif -- GitLab From e45263b7f8e91bcc9cc0dbe68d206eaa68910cde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Thu, 4 Feb 2021 21:38:14 +0100 Subject: [PATCH 04/46] [fvgridvars] inherit from new base and define view --- dumux/discretization/fvgridvariables.hh | 238 +++++++++++++++++++----- 1 file changed, 190 insertions(+), 48 deletions(-) diff --git a/dumux/discretization/fvgridvariables.hh b/dumux/discretization/fvgridvariables.hh index ce693bd283..aa42c3d015 100644 --- a/dumux/discretization/fvgridvariables.hh +++ b/dumux/discretization/fvgridvariables.hh @@ -19,32 +19,122 @@ /*! * \file * \ingroup Discretization - * \brief The grid variable class for finite volume schemes storing variables on scv and scvf (volume and flux variables) + * \brief The grid variable class for finite volume schemes, + * storing variables on scv and scvf (volume and flux variables) */ #ifndef DUMUX_FV_GRID_VARIABLES_HH #define DUMUX_FV_GRID_VARIABLES_HH #include #include -#include + +// TODO: Remove once default template argument is omitted, +// which is there solely to ensure backwards compatibility. +#include + +#include +#include "gridvariables.hh" namespace Dumux { /*! * \ingroup Discretization - * \brief The grid variable class for finite volume schemes storing variables on scv and scvf (volume and flux variables) - * \tparam the type of the grid geometry - * \tparam the type of the grid volume variables - * \tparam the type of the grid flux variables cache + * \brief Finite volume-specific local view on grid variables. + * \tparam GV The grid variables class + */ +template +class FVGridVariablesLocalView +{ + using GridGeometry = typename GV::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using ElementVolumeVariables = typename GV::GridVolumeVariables::LocalView; + using ElementFluxVariablesCache = typename GV::GridFluxVariablesCache::LocalView; + +public: + //! export corresponding grid-wide class + using GridVariables = GV; + + //! Constructor + FVGridVariablesLocalView(const GridVariables& gridVariables) + : gridVariables_(&gridVariables) + , elemVolVars_(gridVariables.gridVolVars()) + , elemFluxVarsCache_(gridVariables.gridFluxVarsCache()) + {} + + /*! + * \brief Bind this local view to a grid element. + * \param element The grid element + * \param fvGeometry Local view on the grid geometry + */ + void bind(const Element& element, + const FVElementGeometry& fvGeometry) + { + const auto& x = gridVariables().dofs(); + elemVolVars_.bind(element, fvGeometry, x); + elemFluxVarsCache_.bind(element, fvGeometry, elemVolVars_); + } + + /*! + * \brief Bind only the volume variables local view to a grid element. + * \param element The grid element + * \param fvGeometry Local view on the grid geometry + */ + void bindElemVolVars(const Element& element, + const FVElementGeometry& fvGeometry) + { + elemVolVars_.bind(element, fvGeometry, gridVariables().dofs()); + + // unbind flux variables cache + elemFluxVarsCache_ = localView(gridVariables().gridFluxVarsCache()); + } + + //! return reference to the elem vol vars + const ElementVolumeVariables& elemVolVars() const { return elemVolVars_; } + ElementVolumeVariables& elemVolVars() { return elemVolVars_; } + + //! return reference to the flux variables cache + const ElementFluxVariablesCache& elemFluxVarsCache() const { return elemFluxVarsCache_; } + ElementFluxVariablesCache& elemFluxVarsCache() { return elemFluxVarsCache_; } + + //! Return reference to the grid variables + const GridVariables& gridVariables() const + { return *gridVariables_; } + +private: + const GridVariables* gridVariables_; + ElementVolumeVariables elemVolVars_; + ElementFluxVariablesCache elemFluxVarsCache_; +}; + +/*! + * \ingroup Discretization + * \brief The grid variable class for finite volume schemes, storing + * variables on scv and scvf (volume and flux variables). + * \tparam GG the type of the grid geometry + * \tparam GVV the type of the grid volume variables + * \tparam GFVC the type of the grid flux variables cache + * \tparam X the type used for solution vectors + * \todo TODO: GG is an obsolote template argument? */ -template +template> class FVGridVariables +: public GridVariables { + using ParentType = GridVariables; + using ThisType = FVGridVariables; + public: + using typename ParentType::SolutionVector; + //! export type of the finite volume grid geometry using GridGeometry = GG; - //! export type of the finite volume grid geometry + //! export type of the grid volume variables using GridVolumeVariables = GVV; //! export type of the volume variables @@ -53,59 +143,109 @@ public: //! export primary variable type using PrimaryVariables = typename VolumeVariables::PrimaryVariables; - //! export scalar type (TODO get it directly from the volvars) - using Scalar = std::decay_t()[0])>; - - //! export type of the finite volume grid geometry + //! export cache type for flux variables using GridFluxVariablesCache = GFVC; + //! export the local view on this class + using LocalView = FVGridVariablesLocalView; + + /*! + * \brief Constructor + * \param problem The problem to be solved + * \param gridGeometry The geometry of the computational grid + * \todo TODO: Here we could forward to the base class with + * [problem] (auto& x) { problem->applyInitialSolution(x); }, + * but we cannot be sure that user problems implement + * the initial() or initialAtPos() functions? + */ template FVGridVariables(std::shared_ptr problem, std::shared_ptr gridGeometry) - : gridGeometry_(gridGeometry) - , curGridVolVars_(*problem) + : ParentType(gridGeometry) + , gridVolVars_(*problem) , prevGridVolVars_(*problem) , gridFluxVarsCache_(*problem) {} + /*! + * \brief Constructor with custom initialization of the solution. + * \param problem The problem to be solved + * \param gridGeometry The geometry of the computational grid + * \param solOrInitializer This can be either a reference to a + * solution vector, or an initializer + * lambda. See Dumux::Variables. + */ + template + FVGridVariables(std::shared_ptr problem, + std::shared_ptr gridGeometry, + SolOrInitializer&& solOrInitializer) + : ParentType(gridGeometry, std::forward(solOrInitializer)) + , gridVolVars_(*problem) + , prevGridVolVars_(*problem) + , gridFluxVarsCache_(*problem) + { + gridVolVars_.update(this->gridGeometry(), this->dofs()); + gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, this->dofs(), true); + prevGridVolVars_ = gridVolVars_; + } + //! initialize all variables (stationary case) - template + [[deprecated("Initialize grid variables upon construction instead.")]] void init(const SolutionVector& curSol) { + ParentType::update(curSol); + // resize and update the volVars with the initial solution - curGridVolVars_.update(*gridGeometry_, curSol); + gridVolVars_.update(this->gridGeometry(), curSol); // update the flux variables caches (always force flux cache update on initialization) - gridFluxVarsCache_.update(*gridGeometry_, curGridVolVars_, curSol, true); + gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, curSol, true); // set the volvars of the previous time step in case we have an instationary problem // note that this means some memory overhead in the case of enabled caching, however // this it outweighted by the advantage of having a single grid variables object for // stationary and instationary problems - prevGridVolVars_ = curGridVolVars_; + // TODO: Remove this in the context of new time integration schemes + prevGridVolVars_ = gridVolVars_; } - //! update all variables - template - void update(const SolutionVector& curSol, bool forceFluxCacheUpdate = false) + //! Deprecated update interface. + [[deprecated("Use update(curSol) or forceUpdateAll(curSol) instead.")]] + void update(const SolutionVector& curSol, bool forceFluxCacheUpdate) { + if (forceFluxCacheUpdate) + forceUpdateAll(curSol); + else + update(curSol); + } + + //! update all variables after grid adaption + [[deprecated("use forceUpdateAll() instead.")]] + void updateAfterGridAdaption(const SolutionVector& curSol) + { forceUpdateAll(curSol); } + + //! Update all variables that may be affected by a change in solution + void update(const SolutionVector& curSol) + { + ParentType::update(curSol); + // resize and update the volVars with the initial solution - curGridVolVars_.update(*gridGeometry_, curSol); + gridVolVars_.update(this->gridGeometry(), curSol); // update the flux variables caches - gridFluxVarsCache_.update(*gridGeometry_, curGridVolVars_, curSol, forceFluxCacheUpdate); + gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, curSol); } - //! update all variables after grid adaption - template - void updateAfterGridAdaption(const SolutionVector& curSol) + //! Force the update of all variables + void forceUpdateAll(const SolutionVector& curSol) { - // update (always force flux cache update as the grid changed) - update(curSol, true); + ParentType::update(curSol); - // for instationary problems also update the variables - // for the previous time step to the new grid - prevGridVolVars_ = curGridVolVars_; + // resize and update the volVars with the initial solution + gridVolVars_.update(this->gridGeometry(), curSol); + + // update the flux variables caches + gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, curSol, true); } /*! @@ -114,18 +254,19 @@ public: */ void advanceTimeStep() { - prevGridVolVars_ = curGridVolVars_; + prevGridVolVars_ = gridVolVars_; } //! resets state to the one before time integration - template void resetTimeStep(const SolutionVector& solution) { + ParentType::update(solution); + // set the new time step vol vars to old vol vars - curGridVolVars_ = prevGridVolVars_; + gridVolVars_ = prevGridVolVars_; // update the flux variables caches - gridFluxVarsCache_.update(*gridGeometry_, curGridVolVars_, solution); + gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, solution); } //! return the flux variables cache @@ -137,12 +278,22 @@ public: { return gridFluxVarsCache_; } //! return the current volume variables + const GridVolumeVariables& gridVolVars() const + { return gridVolVars_; } + + //! return the current volume variables + GridVolumeVariables& gridVolVars() + { return gridVolVars_; } + + //! return the current volume variables + [[deprecated("Use gridVolVars() instead")]] const GridVolumeVariables& curGridVolVars() const - { return curGridVolVars_; } + { return gridVolVars_; } //! return the current volume variables + [[deprecated("Use gridVolVars() instead")]] GridVolumeVariables& curGridVolVars() - { return curGridVolVars_; } + { return gridVolVars_; } //! return the volume variables of the previous time step (for instationary problems) const GridVolumeVariables& prevGridVolVars() const @@ -152,18 +303,9 @@ public: GridVolumeVariables& prevGridVolVars() { return prevGridVolVars_; } - //! return the finite volume grid geometry - const GridGeometry& gridGeometry() const - { return *gridGeometry_; } - -protected: - - std::shared_ptr gridGeometry_; //!< pointer to the constant grid geometry - private: - GridVolumeVariables curGridVolVars_; //!< the current volume variables (primary and secondary variables) - GridVolumeVariables prevGridVolVars_; //!< the previous time step's volume variables (primary and secondary variables) - + GridVolumeVariables gridVolVars_; //!< the current volume variables (primary and secondary variables) + GridVolumeVariables prevGridVolVars_; //!< the previous time step's volume variables (primary and secondary variables) GridFluxVariablesCache gridFluxVarsCache_; //!< the flux variables cache }; -- GitLab From 1e264011d9daf7868db477847ff1daa1632a982a Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Tue, 20 Oct 2020 09:23:44 +0200 Subject: [PATCH 05/46] [nonequil][gridvars] support new grid vars concept TODO: squash in first grid vars commit --- .../nonequilibrium/gridvariables.hh | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/dumux/porousmediumflow/nonequilibrium/gridvariables.hh b/dumux/porousmediumflow/nonequilibrium/gridvariables.hh index e273129750..a871d2f703 100644 --- a/dumux/porousmediumflow/nonequilibrium/gridvariables.hh +++ b/dumux/porousmediumflow/nonequilibrium/gridvariables.hh @@ -46,12 +46,14 @@ template class NonEquilibriumGridVariables : public FVGridVariables, GetPropType, - GetPropType> + GetPropType, + GetPropType> { using ThisType = NonEquilibriumGridVariables; using ParentType = FVGridVariables, GetPropType, - GetPropType>; + GetPropType, + GetPropType>; using VelocityBackend = PorousMediumFlowVelocity>; @@ -88,15 +90,15 @@ public: for (int phaseIdx = 0; phaseIdx < numPhases; ++phaseIdx) { if(isBox && dim == 1) - velocity[phaseIdx].resize(this->gridGeometry_->gridView().size(0)); + velocity[phaseIdx].resize(this->gridGeometry().gridView().size(0)); else - velocity[phaseIdx].resize(this->gridGeometry_->numDofs()); + velocity[phaseIdx].resize(this->gridGeometry().numDofs()); } - for (const auto& element : elements(this->gridGeometry_->gridView(), Dune::Partitions::interior)) + for (const auto& element : elements(this->gridGeometry().gridView(), Dune::Partitions::interior)) { - const auto eIdxGlobal = this->gridGeometry_->elementMapper().index(element); + const auto eIdxGlobal = this->gridGeometry().elementMapper().index(element); - auto fvGeometry = localView(*this->gridGeometry_); + auto fvGeometry = localView(this->gridGeometry()); auto elemVolVars = localView(this->curGridVolVars()); auto elemFluxVarsCache = localView(this->gridFluxVarsCache()); -- GitLab From 86e1feee5b85cd12a37a0577e5dad93342a6e404 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Tue, 20 Oct 2020 09:24:10 +0200 Subject: [PATCH 06/46] [staggered][gridvars] support new grid vars concept TODO: incorporate new concepts also in staggered grid vars and squash commit into first grid vars commit --- dumux/discretization/staggered.hh | 3 ++- .../discretization/staggered/gridvariables.hh | 21 ++++++++++++------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/dumux/discretization/staggered.hh b/dumux/discretization/staggered.hh index f8c5bec7e4..8225554280 100644 --- a/dumux/discretization/staggered.hh +++ b/dumux/discretization/staggered.hh @@ -111,8 +111,9 @@ private: using GVV = GetPropType; using GFVC = GetPropType; using GFV = GetPropType; + using X = GetPropType; public: - using type = StaggeredGridVariables; + using type = StaggeredGridVariables; }; //! Use the cell center element boundary types per default diff --git a/dumux/discretization/staggered/gridvariables.hh b/dumux/discretization/staggered/gridvariables.hh index be35f83ac5..26e7b74f69 100644 --- a/dumux/discretization/staggered/gridvariables.hh +++ b/dumux/discretization/staggered/gridvariables.hh @@ -24,6 +24,8 @@ #ifndef DUMUX_STAGGERED_GRID_VARIABLES_HH #define DUMUX_STAGGERED_GRID_VARIABLES_HH +#include + #include namespace Dumux { @@ -93,7 +95,7 @@ public: //! return the fv grid geometry const GridGeometry& gridGeometry() const - { return (*gridVariables_->gridGeometry_); } + { return gridVariables_->gridGeometry(); } // return the actual grid variables const ActualGridVariables& gridVariables() const @@ -189,12 +191,15 @@ public: * \tparam GVV the type of the grid volume variables * \tparam GFVC the type of the grid flux variables cache * \tparam GFV the type of the grid face variables + * \tparam X the solution vector type */ -template -class StaggeredGridVariables : public FVGridVariables +template +class StaggeredGridVariables +: public FVGridVariables< GG, GVV, GFVC, std::decay_t()[GG::cellCenterIdx()])> > { - using ParentType = FVGridVariables; - using ThisType = StaggeredGridVariables; + using CellSolutionVector = std::decay_t()[GG::cellCenterIdx()])>; + using ParentType = FVGridVariables; + using ThisType = StaggeredGridVariables; friend class StaggeredGridVariablesView; static constexpr auto cellCenterIdx = GG::cellCenterIdx(); @@ -227,7 +232,7 @@ public: void update(const SolutionVector& curSol) { ParentType::update(curSol[cellCenterIdx]); - curGridFaceVariables_.update(*this->gridGeometry_, curSol[faceIdx]); + curGridFaceVariables_.update(this->gridGeometry(), curSol[faceIdx]); } //! initialize all variables (stationary case) @@ -235,8 +240,8 @@ public: void init(const SolutionVector& curSol) { ParentType::init(curSol[cellCenterIdx]); - curGridFaceVariables_.update(*this->gridGeometry_, curSol[faceIdx]); - prevGridFaceVariables_.update(*this->gridGeometry_, curSol[faceIdx]); + curGridFaceVariables_.update(this->gridGeometry(), curSol[faceIdx]); + prevGridFaceVariables_.update(this->gridGeometry(), curSol[faceIdx]); } //! Sets the current state as the previous for next time step -- GitLab From d7c91ff6c6c990169d7c42591662f44c6e372e91 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Tue, 20 Oct 2020 09:05:55 +0200 Subject: [PATCH 07/46] [test] add test for grid variables construction --- test/discretization/CMakeLists.txt | 6 + test/discretization/test_fvgridvariables.cc | 146 ++++++++++++++++++++ 2 files changed, 152 insertions(+) create mode 100644 test/discretization/test_fvgridvariables.cc diff --git a/test/discretization/CMakeLists.txt b/test/discretization/CMakeLists.txt index 25c1bdd297..c9e5b31c6e 100644 --- a/test/discretization/CMakeLists.txt +++ b/test/discretization/CMakeLists.txt @@ -3,3 +3,9 @@ add_subdirectory(staggered) add_subdirectory(box) add_subdirectory(projection) add_subdirectory(rotationsymmetry) + +dune_add_test(NAME test_fvgridvariables + LABELS unit + SOURCES test_fvgridvariables.cc + COMMAND ./test_fvgridvariables + CMD_ARGS -Problem.Name gridvarstest) diff --git a/test/discretization/test_fvgridvariables.cc b/test/discretization/test_fvgridvariables.cc new file mode 100644 index 0000000000..7d35a6b9f1 --- /dev/null +++ b/test/discretization/test_fvgridvariables.cc @@ -0,0 +1,146 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \brief Test for finite volume grid variables + */ +#include +#include + +#include +#include +#include +#include + +// we use the 1p type tag here in order not to be obliged +// to define grid flux vars cache & vol vars cache... +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +namespace Dumux { + +template +class MockSpatialParams +: public FVSpatialParamsOneP> +{ +public: + using PermeabilityType = Scalar; +}; + +template +class MockProblem : public FVProblem +{ + using Parent = FVProblem; +public: + using Parent::Parent; +}; + +namespace Properties { + +// new type tags +namespace TTag { +struct GridVariablesTest { using InheritsFrom = std::tuple; }; +struct GridVariablesTestBox { using InheritsFrom = std::tuple; }; +} // end namespace TTag + +// property definitions +template +struct Grid +{ using type = Dune::YaspGrid<2>; }; + +template +struct Problem +{ using type = MockProblem; }; + +template +struct SpatialParams +{ +private: + using Scalar = GetPropType; + using GG = GetPropType; +public: + using type = MockSpatialParams; +}; + +template +struct FluidSystem +{ +private: + using Scalar = GetPropType; +public: + using type = FluidSystems::OnePLiquid>; +}; + +} // end namespace Properties +} // end namespace Dumux + +int main (int argc, char *argv[]) +{ + Dune::MPIHelper::instance(argc, argv); + + using namespace Dumux; + Dumux::Parameters::init(argc, argv); + + using TypeTag = Properties::TTag::GridVariablesTestBox; + using Grid = GetPropType; + using GridFactory = Dune::StructuredGridFactory; + auto grid = GridFactory::createCubeGrid({0.0, 0.0}, {1.0, 1.0}, {2, 2}); + + using GridGeometry = GetPropType; + auto gridGeometry = std::make_shared(grid->leafGridView()); + + using Problem = GetPropType; + auto problem = std::make_shared(gridGeometry); + + using GridVariables = GetPropType; + + // constructor leaving the solution uninitialized, not resized + auto gridVariables = std::make_shared(problem, gridGeometry); + if (gridVariables->dofs().size() != 0) + DUNE_THROW(Dune::Exception, "Expected uninitialized solution"); + + // Construction with a solution + using SolutionVector = GetPropType; + SolutionVector x; x.resize(gridGeometry->numDofs()); x = 0.0; + gridVariables = std::make_shared(problem, gridGeometry, x); + + // Construction from a moved solution (TODO: how to check if move succeeded?) + gridVariables = std::make_shared(problem, gridGeometry, std::move(x)); + + // Construction from initializer lambda + const auto init = [gridGeometry] (auto& x) { x.resize(gridGeometry->numDofs()); x = 2.25; }; + gridVariables = std::make_shared(problem, gridGeometry, init); + + const auto& dofs = gridVariables->dofs(); + if (std::any_of(dofs.begin(), dofs.end(), + [] (auto d) { return Dune::FloatCmp::ne(2.25, d[0]); })) + DUNE_THROW(Dune::Exception, "Unexpected dof value"); + + std::cout << "\nAll tests passed" << std::endl; + return 0; +} -- GitLab From 1da3818bf446a7468dec688c643a1815973fda58 Mon Sep 17 00:00:00 2001 From: Timo Koch Date: Tue, 20 Oct 2020 10:18:59 +0200 Subject: [PATCH 08/46] [newton] introduce variables backend The newton solver linearizes the system of equations around a given state, currently represented by the primary variables. In order to support the state being represented by grid variables, this backend can be used. --- dumux/nonlinear/newtonvariablesbackend.hh | 86 +++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 dumux/nonlinear/newtonvariablesbackend.hh diff --git a/dumux/nonlinear/newtonvariablesbackend.hh b/dumux/nonlinear/newtonvariablesbackend.hh new file mode 100644 index 0000000000..a379eb8ec1 --- /dev/null +++ b/dumux/nonlinear/newtonvariablesbackend.hh @@ -0,0 +1,86 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/**************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Nonlinear + * \brief Backends for the Newton for different variable types + */ +#ifndef DUMUX_NEWTON_VARIABLES_BACKEND_HH +#define DUMUX_NEWTON_VARIABLES_BACKEND_HH + +#include +#include + +namespace Dumux { + +/*! + * \file + * \ingroup Nonlinear + * \brief Class providing operations with variables needed in the Newton solver + */ +template +class NewtonVariablesBackend; + +/*! + * \file + * \ingroup Nonlinear + * \brief Class providing Newton operations for scalar/number types + */ +template +class NewtonVariablesBackend::value, Scalar>> +{ +public: + using Variables = Scalar; //!< the type of the variables object + using DofVector = Scalar; //!< the type of the dofs parametrizing the variables object + + static std::size_t size(const DofVector& d) + { return 1; } + + static DofVector makeDofVector(const DofVector& d) + { return d; } + + static DofVector makeZeroDofVector(std::size_t size) + { return 0.0; } + + static Scalar makeDofVectorForSolver(const DofVector& d) + { return d; } + + static Scalar makeZeroDofVectorForSolver(std::size_t size) + { return 0.0; } + + static Scalar reconstructDofVectorFromSolver(const Scalar& d) + { return d; } + + static Scalar maxRelativeShift(const DofVector& previous, const DofVector& delta) + { + const auto current = previous - delta; + using std::abs; using std::max; + auto error = abs(previous - current); + error /= max(1.0, abs(previous + current)/2.0); + return error; + } + + // //! operations on variables + // static DofVector& getDofVector(Variables& v) + // { return v; } +}; + +} // end namespace Dumux + +#endif -- GitLab From ec19b20780233b365577a56e221b9851df91fe04 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 10:40:12 +0200 Subject: [PATCH 09/46] [newton][varsbackend] add backends for vectors & generic variables This commit also removes the interfaces related to the construction of vectors for the solver, as this is currently done in Detail::assign in newtonsolver.hh. The same holds for maxRelativeShift(), which is also currently implemented in the Detail namespace in newtonsolver.hh- In the future, we could think about outsourcing putting these two functions into the backend. --- dumux/nonlinear/newtonvariablesbackend.hh | 157 +++++++++++++++++++--- 1 file changed, 136 insertions(+), 21 deletions(-) diff --git a/dumux/nonlinear/newtonvariablesbackend.hh b/dumux/nonlinear/newtonvariablesbackend.hh index a379eb8ec1..9cb617140b 100644 --- a/dumux/nonlinear/newtonvariablesbackend.hh +++ b/dumux/nonlinear/newtonvariablesbackend.hh @@ -24,29 +24,41 @@ #ifndef DUMUX_NEWTON_VARIABLES_BACKEND_HH #define DUMUX_NEWTON_VARIABLES_BACKEND_HH +#include +#include #include + +#include #include +#include +#include +#include + +// forward declaration +namespace Dune { + +template +class MultiTypeBlockVector; + +} // end namespace Dune namespace Dumux { /*! - * \file * \ingroup Nonlinear - * \brief Class providing operations with variables needed in the Newton solver + * \brief Class providing operations with primary variable vectors */ -template -class NewtonVariablesBackend; +template +class NewtonDofBackend; /*! - * \file * \ingroup Nonlinear - * \brief Class providing Newton operations for scalar/number types + * \brief Specialization providing Newton operations for scalar/number types */ template -class NewtonVariablesBackend::value, Scalar>> +class NewtonDofBackend::value, Scalar>> { public: - using Variables = Scalar; //!< the type of the variables object using DofVector = Scalar; //!< the type of the dofs parametrizing the variables object static std::size_t size(const DofVector& d) @@ -57,30 +69,133 @@ public: static DofVector makeZeroDofVector(std::size_t size) { return 0.0; } +}; + +/*! + * \ingroup Nonlinear + * \brief Specialization providing Newton operations for block vectors + */ +template +class NewtonDofBackend> +{ + +public: + using DofVector = Dune::BlockVector; //!< the type of the dofs parametrizing the variables object - static Scalar makeDofVectorForSolver(const DofVector& d) + static std::size_t size(const DofVector& d) + { return d.size(); } + + static DofVector makeDofVector(const DofVector& d) { return d; } - static Scalar makeZeroDofVectorForSolver(std::size_t size) - { return 0.0; } + static DofVector makeZeroDofVector(std::size_t size) + { DofVector d; d.resize(size); return d; } +}; + +/*! + * \ingroup Nonlinear + * \brief Specialization providing Newton operations for multitype block vectors + */ +template +class NewtonDofBackend> +{ + using DV = Dune::MultiTypeBlockVector; + static constexpr auto numBlocks = DV::size(); + + using VectorSizeInfo = std::array; + +public: + using DofVector = DV; //!< the type of the dofs parametrizing the variables object + + static VectorSizeInfo size(const DofVector& d) + { + VectorSizeInfo result; + using namespace Dune::Hybrid; + forEach(std::make_index_sequence{}, [&](auto i) { + result[i] = d[Dune::index_constant{}].size(); + }); + return result; + } - static Scalar reconstructDofVectorFromSolver(const Scalar& d) + static DofVector makeDofVector(const DofVector& d) { return d; } - static Scalar maxRelativeShift(const DofVector& previous, const DofVector& delta) + static DofVector makeZeroDofVector(const VectorSizeInfo& size) { - const auto current = previous - delta; - using std::abs; using std::max; - auto error = abs(previous - current); - error /= max(1.0, abs(previous + current)/2.0); - return error; + DofVector result; + using namespace Dune::Hybrid; + forEach(std::make_index_sequence{}, [&](auto i) { + result[Dune::index_constant{}].resize(size[i]); + }); + return result; } +}; + +namespace Impl { + +template +using SolutionVectorType = typename Vars::SolutionVector; + +template +class NewtonVariablesBackend; + +/*! + * \ingroup Nonlinear + * \brief Class providing Newton operations for scalar/number types + * \note We assume the variables being simply a dof vector if we + * do not find the variables class to export `SolutionVector`. + */ +template +class NewtonVariablesBackend +: public NewtonDofBackend +{ + using ParentType = NewtonDofBackend; + +public: + using Variables = Vars; + using typename ParentType::DofVector; - // //! operations on variables - // static DofVector& getDofVector(Variables& v) - // { return v; } + static void update(Variables& v, const DofVector& dofs) + { v = dofs; } + + //! operations on variables + static const DofVector& getDofVector(const Variables& v) + { return v; } }; +/*! + * \file + * \ingroup Nonlinear + * \brief Class providing Newton operations for generic variable + * types, possibly containing also secondary variables. + */ +template +class NewtonVariablesBackend +: public NewtonDofBackend +{ +public: + using DofVector = typename Vars::SolutionVector; + using Variables = Vars; //!< the type of the variables object + + static void update(Variables& v, const DofVector& dofs) + { v.update(dofs); } + + //! operations on variables + static const DofVector& getDofVector(const Variables& v) + { return v.dofs(); } +}; +} // end namespace Impl + +/*! + * \ingroup Nonlinear + * \brief Class providing Newton operations for generic variable classes + * that represent the state of a numerical solution, possibly + * consisting of primary/secondary variables and information on + * the time level. + */ +template +using NewtonVariablesBackend = Impl::NewtonVariablesBackend>; + } // end namespace Dumux #endif -- GitLab From 19a7ef22fcaa77796b14994a37c686d69e95d564 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 11:02:30 +0200 Subject: [PATCH 10/46] [pdesolver] detect and export assembler variables type --- dumux/common/pdesolver.hh | 42 ++++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/dumux/common/pdesolver.hh b/dumux/common/pdesolver.hh index 99f61bcaa6..d11285c7ab 100644 --- a/dumux/common/pdesolver.hh +++ b/dumux/common/pdesolver.hh @@ -28,6 +28,7 @@ #include #include +#include #include @@ -35,9 +36,25 @@ namespace Dune { template class MultiTypeBlockMatrix; -} +} // end namespace Dune namespace Dumux { +namespace Impl { + + template + using AssemblerVariablesType = typename Assembler::Variables; + + template + inline constexpr bool exportsVariables = Dune::Std::is_detected_v; + + template> struct VariablesChooser; + template struct VariablesChooser { using Type = AssemblerVariablesType; }; + template struct VariablesChooser { using Type = typename A::ResidualType; }; + + template + using AssemblerVariables = typename VariablesChooser::Type; + +} // end namespace Impl /*! * \ingroup Common @@ -53,11 +70,19 @@ namespace Dumux { template class PDESolver { - using SolutionVector = typename Assembler::ResidualType; using Scalar = typename Assembler::Scalar; using TimeLoop = TimeLoopBase; public: + + //! export the type of variables that represent a numerical solution + using Variables = Impl::AssemblerVariables; + + /*! + * \brief Constructor + * \param assembler pointer to the assembler of the linear system + * \param linearSolver pointer to the solver of the resulting linear system + */ PDESolver(std::shared_ptr assembler, std::shared_ptr linearSolver) : assembler_(assembler) @@ -68,24 +93,27 @@ public: /*! * \brief Solve the given PDE system (usually assemble + solve linear system + update) - * \param sol a solution vector possbilty containing an initial solution + * \param vars instance of the `Variables` class representing a numerical + * solution, defining primary and possibly secondary variables + * and information on the time level. */ - virtual void solve(SolutionVector& sol) = 0; + virtual void solve(Variables& vars) = 0; /*! * \brief Solve the given PDE system with time step control * \note This is used for solvers that are allowed to e.g. automatically reduce the * time step if the solve was not successful - * \param sol a solution vector possbilty containing an initial solution + * \param vars instance of the `Variables` class representing a numerical solution * \param timeLoop a reference to the current time loop */ - virtual void solve(SolutionVector& sol, TimeLoop& timeLoop) + virtual void solve(Variables& vars, TimeLoop& timeLoop) { // per default we just forward to the method without time step control - solve(sol); + solve(vars); } protected: + /*! * \brief Access the assembler */ -- GitLab From 180e473af835a6ba95a4ffdb527930fa77141097 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 11:34:57 +0200 Subject: [PATCH 11/46] [newton] use variables backend --- dumux/nonlinear/newtonsolver.hh | 249 +++++++++++++++++++++----------- 1 file changed, 167 insertions(+), 82 deletions(-) diff --git a/dumux/nonlinear/newtonsolver.hh b/dumux/nonlinear/newtonsolver.hh index 3265ef8687..bf3b9d65cb 100644 --- a/dumux/nonlinear/newtonsolver.hh +++ b/dumux/nonlinear/newtonsolver.hh @@ -54,6 +54,7 @@ #include #include "newtonconvergencewriter.hh" +#include "newtonvariablesbackend.hh" namespace Dumux { namespace Detail { @@ -176,18 +177,21 @@ template { using ParentType = PDESolver; + using Backend = NewtonVariablesBackend; + using SolutionVector = typename Backend::DofVector; + using Scalar = typename Assembler::Scalar; using JacobianMatrix = typename Assembler::JacobianMatrix; - using SolutionVector = typename Assembler::ResidualType; using ConvergenceWriter = ConvergenceWriterInterface; using TimeLoop = TimeLoopBase; using PrimaryVariableSwitch = typename Detail::GetPVSwitch::type; using HasPriVarsSwitch = typename Detail::GetPVSwitch::value_t; // std::true_type or std::false_type static constexpr bool hasPriVarsSwitch() { return HasPriVarsSwitch::value; }; + static constexpr bool assemblerExportsVariables = Impl::exportsVariables; public: - + using typename ParentType::Variables; using Communication = Comm; /*! @@ -288,8 +292,10 @@ public: /*! * \brief Run the Newton method to solve a non-linear system. * Does time step control when the Newton fails to converge + * \param vars The variables object representing the current state of the + * numerical solution (primary and possibly secondary variables). */ - void solve(SolutionVector& uCurrentIter, TimeLoop& timeLoop) override + void solve(Variables& vars, TimeLoop& timeLoop) override { if (this->assembler().isStationaryProblem()) DUNE_THROW(Dune::InvalidStateException, "Using time step control with stationary problem makes no sense!"); @@ -298,7 +304,7 @@ public: for (std::size_t i = 0; i <= maxTimeStepDivisions_; ++i) { // linearize & solve - const bool converged = solve_(uCurrentIter); + const bool converged = solve_(vars); if (converged) return; @@ -306,10 +312,17 @@ public: else if (!converged && i < maxTimeStepDivisions_) { // set solution to previous solution - uCurrentIter = this->assembler().prevSol(); - - // reset the grid variables to the previous solution - this->assembler().resetTimeStep(uCurrentIter); + Backend::update(vars, this->assembler().prevSol()); + + // if this is true, we assume old-style assemblers/grid variables + // TODO: reset time step is more efficient as it simply copies + // prevVolVars into curVolVars. Above (in the new style) + // there is an update. We should think about how to achieve + // this more efficiently. + // TODO: Is there a test with time step reductions? It should + // probably be tested if this works properly. + if (!assemblerExportsVariables) + this->assembler().resetTimeStep(Backend::getDofVector(vars)); if (verbosity_ >= 1) { @@ -334,10 +347,12 @@ public: /*! * \brief Run the Newton method to solve a non-linear system. * The solver is responsible for all the strategic decisions. + * \param vars The variables object representing the current state of the + * numerical solution (primary and possibly secondary variables). */ - void solve(SolutionVector& uCurrentIter) override + void solve(Variables& vars) override { - const bool converged = solve_(uCurrentIter); + const bool converged = solve_(vars); if (!converged) DUNE_THROW(NumericalProblem, Fmt::format("Newton solver didn't converge after {} iterations.\n", numSteps_)); @@ -347,36 +362,36 @@ public: * \brief Called before the Newton method is applied to an * non-linear system of equations. * - * \param u The initial solution + * \param initVars The variables representing the initial solution */ - virtual void newtonBegin(SolutionVector& u) + virtual void newtonBegin(Variables& initVars) { numSteps_ = 0; - initPriVarSwitch_(u, HasPriVarsSwitch{}); + initPriVarSwitch_(initVars, HasPriVarsSwitch{}); + const auto& initSol = Backend::getDofVector(initVars); // write the initial residual if a convergence writer was set if (convergenceWriter_) { - this->assembler().assembleResidual(u); - SolutionVector delta(u); - delta = 0; // dummy vector, there is no delta before solving the linear system - convergenceWriter_->write(u, delta, this->assembler().residual()); + this->assembler().assembleResidual(initVars); + auto delta = Backend::makeZeroDofVector(Backend::size(initSol)); + convergenceWriter_->write(initSol, delta, this->assembler().residual()); } if (enablePartialReassembly_) { partialReassembler_->resetColors(); - resizeDistanceFromLastLinearization_(u, distanceFromLastLinearization_); + resizeDistanceFromLastLinearization_(initSol, distanceFromLastLinearization_); } } /*! * \brief Returns true if another iteration should be done. * - * \param uCurrentIter The solution of the current Newton iteration + * \param varsCurrentIter The variables of the current Newton iteration * \param converged if the Newton method's convergence criterion was met in this step */ - virtual bool newtonProceed(const SolutionVector &uCurrentIter, bool converged) + virtual bool newtonProceed(const Variables &varsCurrentIter, bool converged) { if (numSteps_ < minSteps_) return true; @@ -399,7 +414,7 @@ public: /*! * \brief Indicates the beginning of a Newton iteration. */ - virtual void newtonBeginStep(const SolutionVector& u) + virtual void newtonBeginStep(const Variables& vars) { lastShift_ = shift_; if (numSteps_ == 0) @@ -415,11 +430,11 @@ public: /*! * \brief Assemble the linear system of equations \f$\mathbf{A}x - b = 0\f$. * - * \param uCurrentIter The current iteration's solution vector + * \param vars The current iteration's variables */ - virtual void assembleLinearSystem(const SolutionVector& uCurrentIter) + virtual void assembleLinearSystem(const Variables& vars) { - assembleLinearSystem_(this->assembler(), uCurrentIter); + assembleLinearSystem_(this->assembler(), vars); if (enablePartialReassembly_) partialReassembler_->report(comm_, endIterMsgStream_); @@ -499,15 +514,15 @@ public: * subtract deltaU from uLastIter, i.e. * \f[ u^{k+1} = u^k - \Delta u^k \f] * - * \param uCurrentIter The solution vector after the current iteration + * \param vars The variables after the current iteration * \param uLastIter The solution vector after the last iteration * \param deltaU The delta as calculated from solving the linear * system of equations. This parameter also stores * the updated solution. */ - void newtonUpdate(SolutionVector &uCurrentIter, - const SolutionVector &uLastIter, - const SolutionVector &deltaU) + void newtonUpdate(Variables& vars, + const SolutionVector& uLastIter, + const SolutionVector& deltaU) { if (enableShiftCriterion_ || enablePartialReassembly_) newtonUpdateShift_(uLastIter, deltaU); @@ -548,32 +563,32 @@ public: } if (useLineSearch_) - lineSearchUpdate_(uCurrentIter, uLastIter, deltaU); + lineSearchUpdate_(vars, uLastIter, deltaU); else if (useChop_) - choppedUpdate_(uCurrentIter, uLastIter, deltaU); + choppedUpdate_(vars, uLastIter, deltaU); else { - uCurrentIter = uLastIter; + auto uCurrentIter = Backend::makeDofVector(uLastIter); uCurrentIter -= deltaU; - solutionChanged_(uCurrentIter); + solutionChanged_(vars, uCurrentIter); if (enableResidualCriterion_) - computeResidualReduction_(uCurrentIter); + computeResidualReduction_(vars); } } /*! * \brief Indicates that one Newton iteration was finished. * - * \param uCurrentIter The solution after the current Newton iteration + * \param vars The variables after the current Newton iteration * \param uLastIter The solution at the beginning of the current Newton iteration */ - virtual void newtonEndStep(SolutionVector &uCurrentIter, + virtual void newtonEndStep(Variables &vars, const SolutionVector &uLastIter) { - invokePriVarSwitch_(uCurrentIter, HasPriVarsSwitch{}); + invokePriVarSwitch_(vars, HasPriVarsSwitch{}); ++numSteps_; @@ -785,22 +800,37 @@ public: protected: /*! - * \brief Update solution-depended quantities like grid variables after the solution has changed. + * \brief Update solution-dependent quantities like grid variables after the solution has changed. + * \todo TODO: In case we stop support for old-style grid variables / assemblers at one point, + * this would become obsolete as only the update call to the backend would remain. */ - virtual void solutionChanged_(const SolutionVector &uCurrentIter) + virtual void solutionChanged_(Variables& vars, const SolutionVector& uCurrentIter) { - this->assembler().updateGridVariables(uCurrentIter); + Backend::update(vars, uCurrentIter); + + if constexpr (!assemblerExportsVariables) + this->assembler().updateGridVariables(Backend::getDofVector(vars)); } - void computeResidualReduction_(const SolutionVector &uCurrentIter) + void computeResidualReduction_(const Variables& vars) { + // we assume that the assembler works on solution vectors + // if it doesn't export the variables type if constexpr (Detail::hasNorm()) { - this->assembler().assembleResidual(uCurrentIter); + if constexpr (!assemblerExportsVariables) + this->assembler().assembleResidual(Backend::getDofVector(vars)); + else + this->assembler().assembleResidual(vars); residualNorm_ = this->linearSolver().norm(this->assembler().residual()); } else - residualNorm_ = this->assembler().residualNorm(uCurrentIter); + { + if constexpr (!assemblerExportsVariables) + residualNorm_ = this->assembler().residualNorm(Backend::getDofVector(vars)); + else + residualNorm_ = this->assembler().residualNorm(vars); + } reduction_ = residualNorm_; reduction_ /= initialResidual_; @@ -812,52 +842,102 @@ protected: /*! * \brief Initialize the privar switch, noop if there is no priVarSwitch */ - void initPriVarSwitch_(SolutionVector&, std::false_type) {} + void initPriVarSwitch_(Variables&, std::false_type) {} /*! * \brief Initialize the privar switch */ - void initPriVarSwitch_(SolutionVector& sol, std::true_type) + void initPriVarSwitch_(Variables& vars, std::true_type) { - priVarSwitch_->reset(sol.size()); + priVarSwitch_->reset(Backend::size(Backend::getDofVector(vars))); priVarsSwitchedInLastIteration_ = false; const auto& problem = this->assembler().problem(); const auto& gridGeometry = this->assembler().gridGeometry(); - auto& gridVariables = this->assembler().gridVariables(); - priVarSwitch_->updateDirichletConstraints(problem, gridGeometry, gridVariables, sol); + + // if the assembler does not export the variables type + // we assume old-style assemblers which operate on solution + // vectors and return the grid variables + if constexpr (!assemblerExportsVariables) + priVarSwitch_->updateDirichletConstraints(problem, gridGeometry, + this->assembler().gridVariables(), + vars); + else + { + // TODO: Non-const solution is expected, so here we introduce an additional copy!? + auto uCurrentIter = Backend::getDofVector(vars); + priVarSwitch_->updateDirichletConstraints(problem, gridGeometry, vars, uCurrentIter); + + // TODO: This is now an overhead. Both the variables and solution have been modified + // above, but the solution inside the variables might not be in sync with + // uCurrentIter. Therefore, we need access to non-const dofs from the variables, + // or modify the privarswitch functions, or have the possibility to update only + // the dofs of a variables object. + // We could also call privarswitch on a copy of variables and then copy back here + solutionChanged_(vars, uCurrentIter); + } } /*! * \brief Switch primary variables if necessary, noop if there is no priVarSwitch */ - void invokePriVarSwitch_(SolutionVector&, std::false_type) {} + void invokePriVarSwitch_(Variables&, std::false_type) {} /*! * \brief Switch primary variables if necessary */ - void invokePriVarSwitch_(SolutionVector& uCurrentIter, std::true_type) + void invokePriVarSwitch_(Variables& vars, std::true_type) { // update the variable switch (returns true if the pri vars at at least one dof were switched) // for disabled grid variable caching const auto& gridGeometry = this->assembler().gridGeometry(); const auto& problem = this->assembler().problem(); - auto& gridVariables = this->assembler().gridVariables(); - // invoke the primary variable switch - priVarsSwitchedInLastIteration_ = priVarSwitch_->update(uCurrentIter, gridVariables, - problem, gridGeometry); + // we assume an old-style assembler if it doesn't export the Variable type + if constexpr (!assemblerExportsVariables) + { + auto& gridVariables = this->assembler().gridVariables(); - if (priVarsSwitchedInLastIteration_) + // invoke the primary variable switch + priVarsSwitchedInLastIteration_ = priVarSwitch_->update(vars, gridVariables, problem, gridGeometry); + + if (priVarsSwitchedInLastIteration_) + { + for (const auto& element : elements(gridGeometry.gridView())) + { + // if the volume variables are cached globally, we need to update those where the primary variables have been switched + priVarSwitch_->updateSwitchedVolVars(problem, element, gridGeometry, gridVariables, Backend::getDofVector(vars)); + + // if the flux variables are cached globally, we need to update those where the primary variables have been switched + priVarSwitch_->updateSwitchedFluxVarsCache(problem, element, gridGeometry, gridVariables, Backend::getDofVector(vars)); + } + } + } + else { - for (const auto& element : elements(gridGeometry.gridView())) + // invoke the primary variable switch + auto uCurrentIter = Backend::getDofVector(vars); + priVarsSwitchedInLastIteration_ = priVarSwitch_->update(uCurrentIter, vars, problem, gridGeometry); + + if (priVarsSwitchedInLastIteration_) { - // if the volume variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch_->updateSwitchedVolVars(problem, element, gridGeometry, gridVariables, uCurrentIter); + for (const auto& element : elements(gridGeometry.gridView())) + { + // if the volume variables are cached globally, we need to update those where the primary variables have been switched + priVarSwitch_->updateSwitchedVolVars(problem, element, gridGeometry, vars, uCurrentIter); - // if the flux variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch_->updateSwitchedFluxVarsCache(problem, element, gridGeometry, gridVariables, uCurrentIter); + // if the flux variables are cached globally, we need to update those where the primary variables have been switched + priVarSwitch_->updateSwitchedFluxVarsCache(problem, element, gridGeometry, vars, uCurrentIter); + } } + + // TODO: This is now an overhead. Both the variables and solution have been modified + // above, but the solution inside the variables might not be in sync with + // uCurrentIter. Therefore, we need access to non-const dofs from the variables, + // or modify the privarswitch functions, or have the possibility to update only + // the dofs of a variables object. + // We could also call privarswitch on a copy of variables and then copy back here + solutionChanged_(vars, uCurrentIter); } } @@ -890,16 +970,16 @@ private: * \brief Run the Newton method to solve a non-linear system. * The solver is responsible for all the strategic decisions. */ - bool solve_(SolutionVector& uCurrentIter) + bool solve_(Variables& vars) { try { // newtonBegin may manipulate the solution - newtonBegin(uCurrentIter); + newtonBegin(vars); // the given solution is the initial guess - SolutionVector uLastIter(uCurrentIter); - SolutionVector deltaU(uCurrentIter); + auto uLastIter = Backend::makeDofVector(Backend::getDofVector(vars)); + auto deltaU = Backend::makeDofVector(Backend::getDofVector(vars)); // setup timers Dune::Timer assembleTimer(false); @@ -909,15 +989,15 @@ private: // execute the method as long as the solver thinks // that we should do another iteration bool converged = false; - while (newtonProceed(uCurrentIter, converged)) + while (newtonProceed(vars, converged)) { // notify the solver that we're about to start // a new iteration - newtonBeginStep(uCurrentIter); + newtonBeginStep(vars); // make the current solution to the old one if (numSteps_ > 0) - uLastIter = uCurrentIter; + uLastIter = Backend::getDofVector(vars); if (verbosity_ >= 1 && enableDynamicOutput_) std::cout << "Assemble: r(x^k) = dS/dt + div F - q; M = grad r" @@ -929,7 +1009,7 @@ private: // linearize the problem at the current solution assembleTimer.start(); - assembleLinearSystem(uCurrentIter); + assembleLinearSystem(vars); assembleTimer.stop(); /////////////// @@ -964,17 +1044,17 @@ private: updateTimer.start(); // update the current solution (i.e. uOld) with the delta // (i.e. u). The result is stored in u - newtonUpdate(uCurrentIter, uLastIter, deltaU); + newtonUpdate(vars, uLastIter, deltaU); updateTimer.stop(); // tell the solver that we're done with this iteration - newtonEndStep(uCurrentIter, uLastIter); + newtonEndStep(vars, uLastIter); // if a convergence writer was specified compute residual and write output if (convergenceWriter_) { - this->assembler().assembleResidual(uCurrentIter); - convergenceWriter_->write(uCurrentIter, deltaU, this->assembler().residual()); + this->assembler().assembleResidual(vars); + convergenceWriter_->write(Backend::getDofVector(vars), deltaU, this->assembler().residual()); } // detect if the method has converged @@ -988,6 +1068,8 @@ private: if (!newtonConverged()) { totalWastedIter_ += numSteps_; + // TODO: what should NewtonFail receive as arg? + auto uCurrentIter = Backend::getDofVector(vars); newtonFail(uCurrentIter); return false; } @@ -1014,6 +1096,9 @@ private: std::cout << "Newton: Caught exception: \"" << e.what() << "\"\n"; totalWastedIter_ += numSteps_; + + // TODO: what should NewtonFail receive as arg? + auto uCurrentIter = Backend::getDofVector(vars); newtonFail(uCurrentIter); return false; } @@ -1021,18 +1106,18 @@ private: //! assembleLinearSystem_ for assemblers that support partial reassembly template - auto assembleLinearSystem_(const A& assembler, const SolutionVector& uCurrentIter) + auto assembleLinearSystem_(const A& assembler, const Variables& vars) -> typename std::enable_if_t { - this->assembler().assembleJacobianAndResidual(uCurrentIter, partialReassembler_.get()); + this->assembler().assembleJacobianAndResidual(vars, partialReassembler_.get()); } //! assembleLinearSystem_ for assemblers that don't support partial reassembly template - auto assembleLinearSystem_(const A& assembler, const SolutionVector& uCurrentIter) + auto assembleLinearSystem_(const A& assembler, const Variables& vars) -> typename std::enable_if_t { - this->assembler().assembleJacobianAndResidual(uCurrentIter); + this->assembler().assembleJacobianAndResidual(vars); } /*! @@ -1053,7 +1138,7 @@ private: shift_ = comm_.max(shift_); } - virtual void lineSearchUpdate_(SolutionVector &uCurrentIter, + virtual void lineSearchUpdate_(Variables &vars, const SolutionVector &uLastIter, const SolutionVector &deltaU) { @@ -1061,12 +1146,12 @@ private: while (true) { - uCurrentIter = deltaU; + auto uCurrentIter = Backend::makeDofVector(deltaU); uCurrentIter *= -lambda; uCurrentIter += uLastIter; - solutionChanged_(uCurrentIter); + solutionChanged_(vars, uCurrentIter); - computeResidualReduction_(uCurrentIter); + computeResidualReduction_(vars); if (reduction_ < lastReduction_ || lambda <= 0.125) { endIterMsgStream_ << Fmt::format(", residual reduction {:.4e}->{:.4e}@lambda={:.4f}", lastReduction_, reduction_, lambda); @@ -1079,9 +1164,9 @@ private: } //! \note method must update the gridVariables, too! - virtual void choppedUpdate_(SolutionVector &uCurrentIter, - const SolutionVector &uLastIter, - const SolutionVector &deltaU) + virtual void choppedUpdate_(Variables& vars, + const SolutionVector& uLastIter, + const SolutionVector& deltaU) { DUNE_THROW(Dune::NotImplemented, "Chopped Newton update strategy not implemented."); -- GitLab From a9bf593d621816ef519ac56050b2ddef4df66ef1 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 12:02:10 +0200 Subject: [PATCH 12/46] [newtonbackend] provide access to non-const dofvector --- dumux/nonlinear/newtonvariablesbackend.hh | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/dumux/nonlinear/newtonvariablesbackend.hh b/dumux/nonlinear/newtonvariablesbackend.hh index 9cb617140b..48d6faab06 100644 --- a/dumux/nonlinear/newtonvariablesbackend.hh +++ b/dumux/nonlinear/newtonvariablesbackend.hh @@ -155,12 +155,17 @@ public: using Variables = Vars; using typename ParentType::DofVector; + //! update to new solution vector static void update(Variables& v, const DofVector& dofs) { v = dofs; } - //! operations on variables + //! return const reference to dof vector static const DofVector& getDofVector(const Variables& v) { return v; } + + //! return reference to dof vector + static DofVector& getDofVector(Variables& v) + { return v; } }; /*! @@ -177,12 +182,17 @@ public: using DofVector = typename Vars::SolutionVector; using Variables = Vars; //!< the type of the variables object + //! update to new solution vector static void update(Variables& v, const DofVector& dofs) { v.update(dofs); } - //! operations on variables + //! return const reference to dof vector static const DofVector& getDofVector(const Variables& v) { return v.dofs(); } + + //! return reference to dof vector + static DofVector& getDofVector(Variables& v) + { return v.dofs(); } }; } // end namespace Impl -- GitLab From 3d14721c1f6fa91dd22f9b5702ad6810b12decf0 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 12:04:59 +0200 Subject: [PATCH 13/46] [newton] use non-const dofvector for privar switch --- dumux/nonlinear/newtonsolver.hh | 29 ++++------------------------- 1 file changed, 4 insertions(+), 25 deletions(-) diff --git a/dumux/nonlinear/newtonsolver.hh b/dumux/nonlinear/newtonsolver.hh index bf3b9d65cb..3777cdc778 100644 --- a/dumux/nonlinear/newtonsolver.hh +++ b/dumux/nonlinear/newtonsolver.hh @@ -863,19 +863,7 @@ protected: this->assembler().gridVariables(), vars); else - { - // TODO: Non-const solution is expected, so here we introduce an additional copy!? - auto uCurrentIter = Backend::getDofVector(vars); - priVarSwitch_->updateDirichletConstraints(problem, gridGeometry, vars, uCurrentIter); - - // TODO: This is now an overhead. Both the variables and solution have been modified - // above, but the solution inside the variables might not be in sync with - // uCurrentIter. Therefore, we need access to non-const dofs from the variables, - // or modify the privarswitch functions, or have the possibility to update only - // the dofs of a variables object. - // We could also call privarswitch on a copy of variables and then copy back here - solutionChanged_(vars, uCurrentIter); - } + priVarSwitch_->updateDirichletConstraints(problem, gridGeometry, vars, Backend::getDofVector(vars)); } /*! @@ -916,28 +904,19 @@ protected: else { // invoke the primary variable switch - auto uCurrentIter = Backend::getDofVector(vars); - priVarsSwitchedInLastIteration_ = priVarSwitch_->update(uCurrentIter, vars, problem, gridGeometry); + priVarsSwitchedInLastIteration_ = priVarSwitch_->update(Backend::getDofVector(vars), vars, problem, gridGeometry); if (priVarsSwitchedInLastIteration_) { for (const auto& element : elements(gridGeometry.gridView())) { // if the volume variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch_->updateSwitchedVolVars(problem, element, gridGeometry, vars, uCurrentIter); + priVarSwitch_->updateSwitchedVolVars(problem, element, gridGeometry, vars, Backend::getDofVector(vars)); // if the flux variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch_->updateSwitchedFluxVarsCache(problem, element, gridGeometry, vars, uCurrentIter); + priVarSwitch_->updateSwitchedFluxVarsCache(problem, element, gridGeometry, vars, Backend::getDofVector(vars)); } } - - // TODO: This is now an overhead. Both the variables and solution have been modified - // above, but the solution inside the variables might not be in sync with - // uCurrentIter. Therefore, we need access to non-const dofs from the variables, - // or modify the privarswitch functions, or have the possibility to update only - // the dofs of a variables object. - // We could also call privarswitch on a copy of variables and then copy back here - solutionChanged_(vars, uCurrentIter); } } -- GitLab From 65608011153c062a68d34e1d77cbe3ec77959b4c Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 13:29:33 +0200 Subject: [PATCH 14/46] [newton] allow access to Backend type in derived classes --- dumux/nonlinear/newtonsolver.hh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dumux/nonlinear/newtonsolver.hh b/dumux/nonlinear/newtonsolver.hh index 3777cdc778..04077aad08 100644 --- a/dumux/nonlinear/newtonsolver.hh +++ b/dumux/nonlinear/newtonsolver.hh @@ -177,9 +177,12 @@ template { using ParentType = PDESolver; + +protected: using Backend = NewtonVariablesBackend; using SolutionVector = typename Backend::DofVector; +private: using Scalar = typename Assembler::Scalar; using JacobianMatrix = typename Assembler::JacobianMatrix; using ConvergenceWriter = ConvergenceWriterInterface; -- GitLab From 322e413cdc44b39ffc25d98019a46ad6a2d7f300 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 13:29:58 +0200 Subject: [PATCH 15/46] [richards][newton] use new backend --- dumux/porousmediumflow/richards/newtonsolver.hh | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/dumux/porousmediumflow/richards/newtonsolver.hh b/dumux/porousmediumflow/richards/newtonsolver.hh index 007506df95..455d78e6b7 100644 --- a/dumux/porousmediumflow/richards/newtonsolver.hh +++ b/dumux/porousmediumflow/richards/newtonsolver.hh @@ -45,27 +45,31 @@ class RichardsNewtonSolver : public NewtonSolver { using Scalar = typename Assembler::Scalar; using ParentType = NewtonSolver; - using SolutionVector = typename Assembler::ResidualType; using Indices = typename Assembler::GridVariables::VolumeVariables::Indices; enum { pressureIdx = Indices::pressureIdx }; + using typename ParentType::Backend; + using typename ParentType::SolutionVector; + public: using ParentType::ParentType; + using typename ParentType::Variables; private: /*! * \brief Update the current solution of the Newton method * - * \param uCurrentIter The solution after the current Newton iteration \f$ u^{k+1} \f$ + * \param varsCurrentIter The variables after the current Newton iteration \f$ u^{k+1} \f$ * \param uLastIter The solution after the last Newton iteration \f$ u^k \f$ * \param deltaU The vector of differences between the last * iterative solution and the next one \f$ \Delta u^k \f$ */ - void choppedUpdate_(SolutionVector &uCurrentIter, + void choppedUpdate_(Variables &varsCurrentIter, const SolutionVector &uLastIter, const SolutionVector &deltaU) final { + auto uCurrentIter = Backend::getDofVector(varsCurrentIter); uCurrentIter = uLastIter; uCurrentIter -= deltaU; @@ -110,11 +114,11 @@ private: } } - // update the grid variables - this->solutionChanged_(uCurrentIter); + // update the variables + this->solutionChanged_(varsCurrentIter, uCurrentIter); if (this->enableResidualCriterion()) - this->computeResidualReduction_(uCurrentIter); + this->computeResidualReduction_(varsCurrentIter); } }; -- GitLab From c50617f43d6744e24949e84ad1707b9f7510ea1f Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 13:30:14 +0200 Subject: [PATCH 16/46] [nonequil][newton] use new backend --- .../nonequilibrium/newtonsolver.hh | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/dumux/porousmediumflow/nonequilibrium/newtonsolver.hh b/dumux/porousmediumflow/nonequilibrium/newtonsolver.hh index 87b34a8e4b..e0c5c51e17 100644 --- a/dumux/porousmediumflow/nonequilibrium/newtonsolver.hh +++ b/dumux/porousmediumflow/nonequilibrium/newtonsolver.hh @@ -40,20 +40,27 @@ template class NonEquilibriumNewtonSolver : public NewtonSolver { using ParentType = NewtonSolver; - using SolutionVector = typename Assembler::ResidualType; + + using typename ParentType::Backend; + using typename ParentType::SolutionVector; + static constexpr bool assemblerExportsVariables = Impl::exportsVariables; public: using ParentType::ParentType; + using typename ParentType::Variables; - void newtonEndStep(SolutionVector &uCurrentIter, + void newtonEndStep(Variables &varsCurrentIter, const SolutionVector &uLastIter) final { - ParentType::newtonEndStep(uCurrentIter, uLastIter); + ParentType::newtonEndStep(varsCurrentIter, uLastIter); + const auto& uCurrentIter = Backend::getDofVector(varsCurrentIter); - auto& gridVariables = this->assembler().gridVariables(); // Averages the face velocities of a vertex. Implemented in the model. // The velocities are stored in the model. - gridVariables.calcVelocityAverage(uCurrentIter); + if constexpr(!assemblerExportsVariables) + this->assembler().gridVariables().calcVelocityAverage(uCurrentIter); + else + varsCurrentIter.calcVelocityAverage(uCurrentIter); } }; -- GitLab From 923f7968c769a8ef247b020eb6dcdb7d46654ade Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 13:30:32 +0200 Subject: [PATCH 17/46] [md][newton] use new backend --- dumux/multidomain/newtonsolver.hh | 62 ++++++++++++++++++------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/dumux/multidomain/newtonsolver.hh b/dumux/multidomain/newtonsolver.hh index dba8c58a41..898b263e89 100644 --- a/dumux/multidomain/newtonsolver.hh +++ b/dumux/multidomain/newtonsolver.hh @@ -50,7 +50,10 @@ template { using ParentType = NewtonSolver; - using SolutionVector = typename Assembler::ResidualType; + using typename ParentType::Backend; + using typename ParentType::SolutionVector; + + static constexpr bool assemblerExportsVariables = Impl::exportsVariables; template using PrimaryVariableSwitch = typename Detail::GetPVSwitchMultiDomain::type; @@ -63,6 +66,7 @@ class MultiDomainNewtonSolver: public NewtonSolver; public: + using typename ParentType::Variables; /*! * \brief The constructor @@ -89,10 +93,10 @@ public: /*! * \brief Indicates the beginning of a Newton iteration. */ - void newtonBeginStep(const SolutionVector& uCurrentIter) override + void newtonBeginStep(const Variables& varsCurrentIter) override { - ParentType::newtonBeginStep(uCurrentIter); - couplingManager_->updateSolution(uCurrentIter); + ParentType::newtonBeginStep(varsCurrentIter); + couplingManager_->updateSolution(Backend::getDofVector(varsCurrentIter)); } /*! @@ -102,14 +106,14 @@ public: * * \param u The initial solution */ - void newtonBegin(SolutionVector& u) override + void newtonBegin(Variables& vars) override { - ParentType::newtonBegin(u); + ParentType::newtonBegin(vars); using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { - this->initPriVarSwitch_(u, id, HasPriVarsSwitch::value>{}); + this->initPriVarSwitch_(vars, id, HasPriVarsSwitch::value>{}); }); } @@ -129,19 +133,24 @@ public: /*! * \brief Indicates that one Newton iteration was finished. * - * \param uCurrentIter The solution after the current Newton iteration + * \param varsCurrentIter The variables after the current Newton iteration * \param uLastIter The solution at the beginning of the current Newton iteration */ - void newtonEndStep(SolutionVector &uCurrentIter, const SolutionVector &uLastIter) override + void newtonEndStep(Variables& varsCurrentIter, const SolutionVector& uLastIter) override { using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { - this->invokePriVarSwitch_(uCurrentIter[id], id, HasPriVarsSwitch::value>{}); + auto& uCurrentIter = Backend::getDofVector(varsCurrentIter)[id]; + if constexpr (!assemblerExportsVariables) + this->invokePriVarSwitch_(this->assembler().gridVariables(id), + uCurrentIter, id, HasPriVarsSwitch::value>{}); + else + this->invokePriVarSwitch_(varsCurrentIter[id], uCurrentIter, id, HasPriVarsSwitch::value>{}); }); - ParentType::newtonEndStep(uCurrentIter, uLastIter); - couplingManager_->updateSolution(uCurrentIter); + ParentType::newtonEndStep(varsCurrentIter, uLastIter); + couplingManager_->updateSolution(Backend::getDofVector(varsCurrentIter)); } private: @@ -150,60 +159,61 @@ private: * \brief Reset the privar switch state, noop if there is no priVarSwitch */ template - void initPriVarSwitch_(SolutionVector&, Dune::index_constant id, std::false_type) {} + void initPriVarSwitch_(Variables&, Dune::index_constant id, std::false_type) {} /*! * \brief Switch primary variables if necessary */ template - void initPriVarSwitch_(SolutionVector& sol, Dune::index_constant id, std::true_type) + void initPriVarSwitch_(Variables& vars, Dune::index_constant id, std::true_type) { using namespace Dune::Hybrid; auto& priVarSwitch = *elementAt(priVarSwitches_, id); + auto& sol = Backend::getDofVector(vars)[id]; - priVarSwitch.reset(sol[id].size()); + priVarSwitch.reset(sol.size()); priVarsSwitchedInLastIteration_[i] = false; const auto& problem = this->assembler().problem(id); const auto& gridGeometry = this->assembler().gridGeometry(id); - auto& gridVariables = this->assembler().gridVariables(id); - priVarSwitch.updateDirichletConstraints(problem, gridGeometry, gridVariables, sol[id]); + if constexpr (!assemblerExportsVariables) + priVarSwitch.updateDirichletConstraints(problem, gridGeometry, this->assembler().gridVariables(id), sol); + else // This expects usage of MultiDomainGridVariables + priVarSwitch.updateDirichletConstraints(problem, gridGeometry, vars[id], sol[id]); } /*! * \brief Switch primary variables if necessary, noop if there is no priVarSwitch */ - template - void invokePriVarSwitch_(SubSol&, Dune::index_constant id, std::false_type) {} + template + void invokePriVarSwitch_(SubVars&, SubSol&, Dune::index_constant id, std::false_type) {} /*! * \brief Switch primary variables if necessary */ - template - void invokePriVarSwitch_(SubSol& uCurrentIter, Dune::index_constant id, std::true_type) + template + void invokePriVarSwitch_(SubVars& subVars, SubSol& uCurrentIter, Dune::index_constant id, std::true_type) { // update the variable switch (returns true if the pri vars at at least one dof were switched) // for disabled grid variable caching const auto& gridGeometry = this->assembler().gridGeometry(id); const auto& problem = this->assembler().problem(id); - auto& gridVariables = this->assembler().gridVariables(id); using namespace Dune::Hybrid; auto& priVarSwitch = *elementAt(priVarSwitches_, id); // invoke the primary variable switch - priVarsSwitchedInLastIteration_[i] = priVarSwitch.update(uCurrentIter, gridVariables, - problem, gridGeometry); + priVarsSwitchedInLastIteration_[i] = priVarSwitch.update(uCurrentIter, subVars, problem, gridGeometry); if (priVarsSwitchedInLastIteration_[i]) { for (const auto& element : elements(gridGeometry.gridView())) { // if the volume variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch.updateSwitchedVolVars(problem, element, gridGeometry, gridVariables, uCurrentIter); + priVarSwitch.updateSwitchedVolVars(problem, element, gridGeometry, subVars, uCurrentIter); // if the flux variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch.updateSwitchedFluxVarsCache(problem, element, gridGeometry, gridVariables, uCurrentIter); + priVarSwitch.updateSwitchedFluxVarsCache(problem, element, gridGeometry, subVars, uCurrentIter); } } } -- GitLab From 213bbafc9d53300b1ed393d2c1e1b0d6979f81e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Fri, 5 Feb 2021 12:40:55 +0100 Subject: [PATCH 18/46] [newton][varsbackend] remove makeDofVector() all implementation rely on the copy constructor, anyway. In case support for some solution vector type that is not copy constructible is required at one point, such logic can be reintroduced. --- dumux/nonlinear/newtonsolver.hh | 8 ++++---- dumux/nonlinear/newtonvariablesbackend.hh | 9 --------- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/dumux/nonlinear/newtonsolver.hh b/dumux/nonlinear/newtonsolver.hh index 04077aad08..4db4b094f1 100644 --- a/dumux/nonlinear/newtonsolver.hh +++ b/dumux/nonlinear/newtonsolver.hh @@ -573,7 +573,7 @@ public: else { - auto uCurrentIter = Backend::makeDofVector(uLastIter); + auto uCurrentIter = uLastIter; uCurrentIter -= deltaU; solutionChanged_(vars, uCurrentIter); @@ -960,8 +960,8 @@ private: newtonBegin(vars); // the given solution is the initial guess - auto uLastIter = Backend::makeDofVector(Backend::getDofVector(vars)); - auto deltaU = Backend::makeDofVector(Backend::getDofVector(vars)); + auto uLastIter = Backend::getDofVector(vars); + auto deltaU = Backend::getDofVector(vars); // setup timers Dune::Timer assembleTimer(false); @@ -1128,7 +1128,7 @@ private: while (true) { - auto uCurrentIter = Backend::makeDofVector(deltaU); + auto uCurrentIter = deltaU; uCurrentIter *= -lambda; uCurrentIter += uLastIter; solutionChanged_(vars, uCurrentIter); diff --git a/dumux/nonlinear/newtonvariablesbackend.hh b/dumux/nonlinear/newtonvariablesbackend.hh index 48d6faab06..a154671f8c 100644 --- a/dumux/nonlinear/newtonvariablesbackend.hh +++ b/dumux/nonlinear/newtonvariablesbackend.hh @@ -64,9 +64,6 @@ public: static std::size_t size(const DofVector& d) { return 1; } - static DofVector makeDofVector(const DofVector& d) - { return d; } - static DofVector makeZeroDofVector(std::size_t size) { return 0.0; } }; @@ -85,9 +82,6 @@ public: static std::size_t size(const DofVector& d) { return d.size(); } - static DofVector makeDofVector(const DofVector& d) - { return d; } - static DofVector makeZeroDofVector(std::size_t size) { DofVector d; d.resize(size); return d; } }; @@ -117,9 +111,6 @@ public: return result; } - static DofVector makeDofVector(const DofVector& d) - { return d; } - static DofVector makeZeroDofVector(const VectorSizeInfo& size) { DofVector result; -- GitLab From 3b05389ab16a123c3e7a3dbd5b04504207855a9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Fri, 5 Feb 2021 12:52:59 +0100 Subject: [PATCH 19/46] [test][newton] avoid unnecessary mock interfaces --- test/nonlinear/newton/test_newton.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/nonlinear/newton/test_newton.cc b/test/nonlinear/newton/test_newton.cc index 3a0d48a0d7..5a08387777 100644 --- a/test/nonlinear/newton/test_newton.cc +++ b/test/nonlinear/newton/test_newton.cc @@ -32,8 +32,9 @@ class MockScalarAssembler { public: using ResidualType = Dune::BlockVector; - using Scalar = double; + using Variables = ResidualType; using JacobianMatrix = double; + using Scalar = double; void setLinearSystem() {} @@ -65,8 +66,6 @@ public: return res_[0]; } - void updateGridVariables(const ResidualType& sol) {} - private: JacobianMatrix jac_; ResidualType res_; -- GitLab From 6cf01ef61bdd7a85e94ba587065738148eae483a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Fri, 5 Feb 2021 13:35:06 +0100 Subject: [PATCH 20/46] [test][newton] use norm() of linear solver --- test/nonlinear/newton/test_newton.cc | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/test/nonlinear/newton/test_newton.cc b/test/nonlinear/newton/test_newton.cc index 5a08387777..248314baf7 100644 --- a/test/nonlinear/newton/test_newton.cc +++ b/test/nonlinear/newton/test_newton.cc @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -60,12 +61,6 @@ public: ResidualType& residual() { return res_; } - double residualNorm(const ResidualType& sol) - { - assembleResidual(sol); - return res_[0]; - } - private: JacobianMatrix jac_; ResidualType res_; @@ -77,11 +72,19 @@ public: void setResidualReduction(double residualReduction) {} template - bool solve (const double& A, Vector& x, const Vector& b) const + bool solve(const double& A, Vector& x, const Vector& b) const { x[0] = b[0]/A; return true; } + + double norm(const Dune::BlockVector& residual) const + { + assert(residual.size() == 1); + + using std::abs; + return abs(residual[0]); + } }; } // end namespace Dumux -- GitLab From af4a745af2d705f86910c83df96d5f51840d55b5 Mon Sep 17 00:00:00 2001 From: Timo Koch Date: Mon, 25 May 2020 17:32:28 +0200 Subject: [PATCH 21/46] [newton] Extract privarswitch implementation to separate class --- dumux/nonlinear/CMakeLists.txt | 1 + dumux/nonlinear/newtonsolver.hh | 125 +++------------- .../nonlinear/primaryvariableswitchadapter.hh | 137 ++++++++++++++++++ 3 files changed, 160 insertions(+), 103 deletions(-) create mode 100644 dumux/nonlinear/primaryvariableswitchadapter.hh diff --git a/dumux/nonlinear/CMakeLists.txt b/dumux/nonlinear/CMakeLists.txt index 2c5c2050a3..0cdeabcdf5 100644 --- a/dumux/nonlinear/CMakeLists.txt +++ b/dumux/nonlinear/CMakeLists.txt @@ -2,5 +2,6 @@ install(FILES findscalarroot.hh newtonconvergencewriter.hh newtonsolver.hh +primaryvariableswitchadapter.hh staggerednewtonconvergencewriter.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/nonlinear) diff --git a/dumux/nonlinear/newtonsolver.hh b/dumux/nonlinear/newtonsolver.hh index 4db4b094f1..e12661ff3b 100644 --- a/dumux/nonlinear/newtonsolver.hh +++ b/dumux/nonlinear/newtonsolver.hh @@ -55,6 +55,7 @@ #include "newtonconvergencewriter.hh" #include "newtonvariablesbackend.hh" +#include "primaryvariableswitchadapter.hh" namespace Dumux { namespace Detail { @@ -69,13 +70,6 @@ struct supportsPartialReassembly {} }; -//! helper aliases to extract a primary variable switch from the VolumeVariables (if defined, yields int otherwise) -template -using DetectPVSwitch = typename Assembler::GridVariables::VolumeVariables::PrimaryVariableSwitch; - -template -using GetPVSwitch = Dune::Std::detected_or; - // helper struct and function detecting if the linear solver features a norm() function template using NormDetector = decltype(std::declval().norm(std::declval())); @@ -188,10 +182,13 @@ private: using ConvergenceWriter = ConvergenceWriterInterface; using TimeLoop = TimeLoopBase; - using PrimaryVariableSwitch = typename Detail::GetPVSwitch::type; - using HasPriVarsSwitch = typename Detail::GetPVSwitch::value_t; // std::true_type or std::false_type - static constexpr bool hasPriVarsSwitch() { return HasPriVarsSwitch::value; }; + // enable models with primary variable switch + // TODO: Always use ParentType::Variables once we require assemblers to export variables static constexpr bool assemblerExportsVariables = Impl::exportsVariables; + using Vars = std::conditional_t; + using PrimaryVariableSwitchAdapter = Dumux::PrimaryVariableSwitchAdapter; public: using typename ParentType::Variables; @@ -208,6 +205,7 @@ public: , endIterMsgStream_(std::ostringstream::out) , comm_(comm) , paramGroup_(paramGroup) + , priVarSwitchAdapter_(std::make_unique(paramGroup)) { initParams_(paramGroup); @@ -221,12 +219,6 @@ public: // initialize the partial reassembler if (enablePartialReassembly_) partialReassembler_ = std::make_unique(this->assembler()); - - if (hasPriVarsSwitch()) - { - const int priVarSwitchVerbosity = getParamFromGroup(paramGroup, "PrimaryVariableSwitch.Verbosity", 1); - priVarSwitch_ = std::make_unique(priVarSwitchVerbosity); - } } //! the communicator for parallel runs @@ -370,13 +362,20 @@ public: virtual void newtonBegin(Variables& initVars) { numSteps_ = 0; - initPriVarSwitch_(initVars, HasPriVarsSwitch{}); + + if constexpr (assemblerExportsVariables) + priVarSwitchAdapter_->initialize(Backend::getDofVector(initVars), initVars); + else // this assumes assembly with solution (i.e. Variables=SolutionVector) + priVarSwitchAdapter_->initialize(initVars, this->assembler().gridVariables()); + const auto& initSol = Backend::getDofVector(initVars); // write the initial residual if a convergence writer was set if (convergenceWriter_) { this->assembler().assembleResidual(initVars); + + // dummy vector, there is no delta before solving the linear system auto delta = Backend::makeZeroDofVector(Backend::size(initSol)); convergenceWriter_->write(initSol, delta, this->assembler().residual()); } @@ -591,7 +590,10 @@ public: virtual void newtonEndStep(Variables &vars, const SolutionVector &uLastIter) { - invokePriVarSwitch_(vars, HasPriVarsSwitch{}); + if constexpr (assemblerExportsVariables) + priVarSwitchAdapter_->invoke(Backend::getDofVector(vars), vars); + else // this assumes assembly with solution (i.e. Variables=SolutionVector) + priVarSwitchAdapter_->invoke(vars, this->assembler().gridVariables()); ++numSteps_; @@ -633,7 +635,7 @@ public: { // in case the model has a priVar switch and some some primary variables // actually switched their state in the last iteration, enforce another iteration - if (priVarsSwitchedInLastIteration_) + if (priVarSwitchAdapter_->switched()) return false; if (enableShiftCriterion_ && !enableResidualCriterion_) @@ -842,87 +844,6 @@ protected: bool enableResidualCriterion() const { return enableResidualCriterion_; } - /*! - * \brief Initialize the privar switch, noop if there is no priVarSwitch - */ - void initPriVarSwitch_(Variables&, std::false_type) {} - - /*! - * \brief Initialize the privar switch - */ - void initPriVarSwitch_(Variables& vars, std::true_type) - { - priVarSwitch_->reset(Backend::size(Backend::getDofVector(vars))); - priVarsSwitchedInLastIteration_ = false; - - const auto& problem = this->assembler().problem(); - const auto& gridGeometry = this->assembler().gridGeometry(); - - // if the assembler does not export the variables type - // we assume old-style assemblers which operate on solution - // vectors and return the grid variables - if constexpr (!assemblerExportsVariables) - priVarSwitch_->updateDirichletConstraints(problem, gridGeometry, - this->assembler().gridVariables(), - vars); - else - priVarSwitch_->updateDirichletConstraints(problem, gridGeometry, vars, Backend::getDofVector(vars)); - } - - /*! - * \brief Switch primary variables if necessary, noop if there is no priVarSwitch - */ - void invokePriVarSwitch_(Variables&, std::false_type) {} - - /*! - * \brief Switch primary variables if necessary - */ - void invokePriVarSwitch_(Variables& vars, std::true_type) - { - // update the variable switch (returns true if the pri vars at at least one dof were switched) - // for disabled grid variable caching - const auto& gridGeometry = this->assembler().gridGeometry(); - const auto& problem = this->assembler().problem(); - - // we assume an old-style assembler if it doesn't export the Variable type - if constexpr (!assemblerExportsVariables) - { - auto& gridVariables = this->assembler().gridVariables(); - - // invoke the primary variable switch - priVarsSwitchedInLastIteration_ = priVarSwitch_->update(vars, gridVariables, problem, gridGeometry); - - if (priVarsSwitchedInLastIteration_) - { - for (const auto& element : elements(gridGeometry.gridView())) - { - // if the volume variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch_->updateSwitchedVolVars(problem, element, gridGeometry, gridVariables, Backend::getDofVector(vars)); - - // if the flux variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch_->updateSwitchedFluxVarsCache(problem, element, gridGeometry, gridVariables, Backend::getDofVector(vars)); - } - } - } - else - { - // invoke the primary variable switch - priVarsSwitchedInLastIteration_ = priVarSwitch_->update(Backend::getDofVector(vars), vars, problem, gridGeometry); - - if (priVarsSwitchedInLastIteration_) - { - for (const auto& element : elements(gridGeometry.gridView())) - { - // if the volume variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch_->updateSwitchedVolVars(problem, element, gridGeometry, vars, Backend::getDofVector(vars)); - - // if the flux variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch_->updateSwitchedFluxVarsCache(problem, element, gridGeometry, vars, Backend::getDofVector(vars)); - } - } - } - } - //! optimal number of iterations we want to achieve int targetSteps_; //! minimum number of iterations we do @@ -1383,9 +1304,7 @@ private: std::size_t numLinearSolverBreakdowns_ = 0; //! total number of linear solves that failed //! the class handling the primary variable switch - std::unique_ptr priVarSwitch_; - //! if we switched primary variables in the last iteration - bool priVarsSwitchedInLastIteration_ = false; + std::unique_ptr priVarSwitchAdapter_; //! convergence writer std::shared_ptr convergenceWriter_ = nullptr; diff --git a/dumux/nonlinear/primaryvariableswitchadapter.hh b/dumux/nonlinear/primaryvariableswitchadapter.hh new file mode 100644 index 0000000000..df202314c6 --- /dev/null +++ b/dumux/nonlinear/primaryvariableswitchadapter.hh @@ -0,0 +1,137 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/**************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Nonlinear + * \brief An adapter for the Newton to manage models with primary variable switch + */ +#ifndef DUMUX_NONLINEAR_PRIMARY_VARIABLE_SWITCH_ADAPTER_HH +#define DUMUX_NONLINEAR_PRIMARY_VARIABLE_SWITCH_ADAPTER_HH + +#include +#include + +namespace Dumux { +namespace Impl { + +//! helper aliases to extract a primary variable switch from the VolumeVariables (if defined, yields int otherwise) +template +using DetectPVSwitch = typename Variables::VolumeVariables::PrimaryVariableSwitch; + +template +using GetPVSwitch = Dune::Std::detected_or; + +template +using PrimaryVariableSwitch = typename GetPVSwitch::type; + +template +constexpr bool hasPriVarsSwitch() { return typename GetPVSwitch::value_t(); }; + +} // end namespace Impl + +/*! + * \ingroup Nonlinear + * \brief An adapter for the Newton to manage models with primary variable switch + */ +template ()> +class PrimaryVariableSwitchAdapter +{ + using PrimaryVariableSwitch = typename Impl::PrimaryVariableSwitch; + +public: + PrimaryVariableSwitchAdapter(const std::string& paramGroup = "") + { + const int priVarSwitchVerbosity = getParamFromGroup(paramGroup, "PrimaryVariableSwitch.Verbosity", 1); + priVarSwitch_ = std::make_unique(priVarSwitchVerbosity); + } + + /*! + * \brief Initialize the privar switch + */ + template + void initialize(SolutionVector& sol, Variables& vars) + { + priVarSwitch_->reset(sol.size()); + priVarsSwitchedInLastIteration_ = false; + const auto& problem = vars.curGridVolVars().problem(); + const auto& gridGeometry = problem.gridGeometry(); + priVarSwitch_->updateDirichletConstraints(problem, gridGeometry, vars, sol); + } + + /*! + * \brief Switch primary variables if necessary + */ + template + void invoke(SolutionVector& uCurrentIter, Variables& vars) + { + // update the variable switch (returns true if the pri vars at at least one dof were switched) + // for disabled grid variable caching + const auto& problem = vars.curGridVolVars().problem(); + const auto& gridGeometry = problem.gridGeometry(); + + // invoke the primary variable switch + priVarsSwitchedInLastIteration_ = priVarSwitch_->update(uCurrentIter, vars, problem, gridGeometry); + if (priVarsSwitchedInLastIteration_) + { + for (const auto& element : elements(gridGeometry.gridView())) + { + // if the volume variables are cached globally, we need to update those where the primary variables have been switched + priVarSwitch_->updateSwitchedVolVars(problem, element, gridGeometry, vars, uCurrentIter); + + // if the flux variables are cached globally, we need to update those where the primary variables have been switched + priVarSwitch_->updateSwitchedFluxVarsCache(problem, element, gridGeometry, vars, uCurrentIter); + } + } + } + + /*! + * \brief Whether the primary variables have been switched in the last call to invoke + */ + bool switched() const + { return priVarsSwitchedInLastIteration_; } + +private: + //! the class handling the primary variable switch + std::unique_ptr priVarSwitch_; + //! if we switched primary variables in the last iteration + bool priVarsSwitchedInLastIteration_ = false; +}; + +/*! + * \ingroup Nonlinear + * \brief An empty adapter for the Newton for models without primary variable switch + */ +template +class PrimaryVariableSwitchAdapter +{ +public: + PrimaryVariableSwitchAdapter(const std::string& paramGroup = "") {} + + template + void initialize(SolutionVector&, Variables&) {} + + template + void invoke(SolutionVector&, Variables&) {} + + bool switched() const { return false; } +}; + +} // end namespace Dumux + +#endif -- GitLab From 671cf5d4982d971dc0a4c6c1c43e869bc8543a9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Mon, 8 Feb 2021 12:45:10 +0100 Subject: [PATCH 22/46] [newton] more distinction between new&old assemblers --- dumux/nonlinear/newtonsolver.hh | 53 +++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/dumux/nonlinear/newtonsolver.hh b/dumux/nonlinear/newtonsolver.hh index e12661ff3b..c57ca643ae 100644 --- a/dumux/nonlinear/newtonsolver.hh +++ b/dumux/nonlinear/newtonsolver.hh @@ -292,12 +292,16 @@ public: */ void solve(Variables& vars, TimeLoop& timeLoop) override { - if (this->assembler().isStationaryProblem()) - DUNE_THROW(Dune::InvalidStateException, "Using time step control with stationary problem makes no sense!"); + if constexpr (!assemblerExportsVariables) + { + if (this->assembler().isStationaryProblem()) + DUNE_THROW(Dune::InvalidStateException, "Using time step control with stationary problem makes no sense!"); + } // try solving the non-linear system for (std::size_t i = 0; i <= maxTimeStepDivisions_; ++i) { + // linearize & solve const bool converged = solve_(vars); @@ -306,28 +310,33 @@ public: else if (!converged && i < maxTimeStepDivisions_) { - // set solution to previous solution - Backend::update(vars, this->assembler().prevSol()); - - // if this is true, we assume old-style assemblers/grid variables - // TODO: reset time step is more efficient as it simply copies - // prevVolVars into curVolVars. Above (in the new style) - // there is an update. We should think about how to achieve - // this more efficiently. - // TODO: Is there a test with time step reductions? It should - // probably be tested if this works properly. - if (!assemblerExportsVariables) - this->assembler().resetTimeStep(Backend::getDofVector(vars)); - - if (verbosity_ >= 1) + if constexpr (assemblerExportsVariables) + DUNE_THROW(Dune::NotImplemented, "Time step reset for new assembly methods"); + else { - const auto dt = timeLoop.timeStepSize(); - std::cout << Fmt::format("Newton solver did not converge with dt = {} seconds. ", dt) - << Fmt::format("Retrying with time step of dt = {} seconds.\n", dt*retryTimeStepReductionFactor_); + // set solution to previous solution + Backend::update(vars, this->assembler().prevSol()); + + // if this is true, we assume old-style assemblers/grid variables + // TODO: reset time step is more efficient as it simply copies + // prevVolVars into curVolVars. Above (in the new style) + // there will probably be an update. We should think about + // how to achieve this efficiently. + // TODO: Is there a test with time step reductions? It should + // probably be tested if this works properly. + if (!assemblerExportsVariables) + this->assembler().resetTimeStep(Backend::getDofVector(vars)); + + if (verbosity_ >= 1) + { + const auto dt = timeLoop.timeStepSize(); + std::cout << Fmt::format("Newton solver did not converge with dt = {} seconds. ", dt) + << Fmt::format("Retrying with time step of dt = {} seconds.\n", dt*retryTimeStepReductionFactor_); + } + + // try again with dt = dt * retryTimeStepReductionFactor_ + timeLoop.setTimeStepSize(timeLoop.timeStepSize() * retryTimeStepReductionFactor_); } - - // try again with dt = dt * retryTimeStepReductionFactor_ - timeLoop.setTimeStepSize(timeLoop.timeStepSize() * retryTimeStepReductionFactor_); } else -- GitLab From b144cbe2fae1cf984728bb4bfc385cec6efcee2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Mon, 8 Feb 2021 12:48:23 +0100 Subject: [PATCH 23/46] [test][newton] remove obsolete assembler interface --- test/nonlinear/newton/test_newton.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/nonlinear/newton/test_newton.cc b/test/nonlinear/newton/test_newton.cc index 248314baf7..c1d81048da 100644 --- a/test/nonlinear/newton/test_newton.cc +++ b/test/nonlinear/newton/test_newton.cc @@ -39,8 +39,6 @@ public: void setLinearSystem() {} - bool isStationaryProblem() { return false; } - ResidualType prevSol() { return ResidualType(0.0); } void resetTimeStep(const ResidualType& sol) {} -- GitLab From 5f7a94e99e52e9bb2c208db6e5ec17ce21b451e5 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 12:06:32 +0200 Subject: [PATCH 24/46] TEMP [test][1p] test assembly with grid variables --- .../1p/compressible/instationary/main.cc | 43 +++++++++++++++---- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/test/porousmediumflow/1p/compressible/instationary/main.cc b/test/porousmediumflow/1p/compressible/instationary/main.cc index 509e4eb9fa..2805afaa99 100644 --- a/test/porousmediumflow/1p/compressible/instationary/main.cc +++ b/test/porousmediumflow/1p/compressible/instationary/main.cc @@ -48,6 +48,33 @@ #include #include +//! Custom assembler to test assembly with grid variables +template +class GridVarsAssembler : public Assembler +{ +public: + using Assembler::Assembler; + using typename Assembler::GridVariables; + using typename Assembler::ResidualType; + + using Variables = GridVariables; + + void assembleJacobianAndResidual(const GridVariables& gridVars) + { + Assembler::assembleJacobianAndResidual(gridVars.dofs()); + } + + void assembleResidual(const GridVariables& gridVars) + { + Assembler::assembleResidual(gridVars.dofs()); + } + + auto residualNorm(const GridVariables& gridVars) + { + return Assembler::residualNorm(gridVars.dofs()); + } +}; + int main(int argc, char** argv) { using namespace Dumux; @@ -93,14 +120,12 @@ int main(int argc, char** argv) // the solution vector using SolutionVector = GetPropType; - SolutionVector x(gridGeometry->numDofs()); - problem->applyInitialSolution(x); - auto xOld = x; // the grid variables using GridVariables = GetPropType; - auto gridVariables = std::make_shared(problem, gridGeometry); - gridVariables->init(x); + const auto init = [problem] (auto& x) { problem->applyInitialSolution(x); }; + auto gridVariables = std::make_shared(problem, gridGeometry, init); + auto xOld = gridVariables->dofs(); // get some time loop parameters using Scalar = GetPropType; @@ -109,7 +134,7 @@ int main(int argc, char** argv) auto maxDt = getParam("TimeLoop.MaxTimeStepSize"); // intialize the vtk output module - VtkOutputModule vtkWriter(*gridVariables, x, problem->name()); + VtkOutputModule vtkWriter(*gridVariables, gridVariables->dofs(), problem->name()); using VelocityOutput = GetPropType; vtkWriter.addVelocityOutput(std::make_shared(*gridVariables)); using IOFields = GetPropType; @@ -121,7 +146,7 @@ int main(int argc, char** argv) timeLoop->setMaxTimeStepSize(maxDt); // the assembler with time loop for instationary problem - using Assembler = FVAssembler; + using Assembler = GridVarsAssembler>; auto assembler = std::make_shared(problem, gridGeometry, gridVariables, timeLoop, xOld); // the linear solver @@ -139,10 +164,10 @@ int main(int argc, char** argv) timeLoop->start(); do { // linearize & solve - nonLinearSolver.solve(x, *timeLoop); + nonLinearSolver.solve(*gridVariables, *timeLoop); // make the new solution the old solution - xOld = x; + xOld = gridVariables->dofs(); gridVariables->advanceTimeStep(); // advance to the time loop to the next step -- GitLab From 2c4dcacde158ec570c85f2600f5f48240db08961 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Wed, 21 Oct 2020 12:06:50 +0200 Subject: [PATCH 25/46] TEMP [test][waterair] test assembly with grid variables --- test/porousmediumflow/2p2c/waterair/main.cc | 43 ++++++++++++++++----- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/test/porousmediumflow/2p2c/waterair/main.cc b/test/porousmediumflow/2p2c/waterair/main.cc index e58832f747..6ac940426a 100644 --- a/test/porousmediumflow/2p2c/waterair/main.cc +++ b/test/porousmediumflow/2p2c/waterair/main.cc @@ -45,6 +45,33 @@ // the problem definitions #include "problem.hh" +//! Custom assembler to test assembly with grid variables +template +class GridVarsAssembler : public Assembler +{ +public: + using Assembler::Assembler; + using typename Assembler::GridVariables; + using typename Assembler::ResidualType; + + using Variables = GridVariables; + + void assembleJacobianAndResidual(const GridVariables& gridVars) + { + Assembler::assembleJacobianAndResidual(gridVars.dofs()); + } + + void assembleResidual(const GridVariables& gridVars) + { + Assembler::assembleResidual(gridVars.dofs()); + } + + auto residualNorm(const GridVariables& gridVars) + { + return Assembler::residualNorm(gridVars.dofs()); + } +}; + int main(int argc, char** argv) { using namespace Dumux; @@ -84,17 +111,15 @@ int main(int argc, char** argv) // the solution vector using SolutionVector = GetPropType; - SolutionVector x; - problem->applyInitialSolution(x); - auto xOld = x; // the grid variables using GridVariables = GetPropType; - auto gridVariables = std::make_shared(problem, gridGeometry); - gridVariables->init(x); + const auto init = [problem] (auto& x) { problem->applyInitialSolution(x); }; + auto gridVariables = std::make_shared(problem, gridGeometry, init); + auto xOld = gridVariables->dofs(); // intialize the vtk output module - VtkOutputModule vtkWriter(*gridVariables, x, problem->name()); + VtkOutputModule vtkWriter(*gridVariables, gridVariables->dofs(), problem->name()); using VelocityOutput = GetPropType; vtkWriter.addVelocityOutput(std::make_shared(*gridVariables)); // Add model specific output fields @@ -110,7 +135,7 @@ int main(int argc, char** argv) timeLoop->setMaxTimeStepSize(maxDt); // the assembler with time loop for instationary problem - using Assembler = FVAssembler; + using Assembler = GridVarsAssembler>; auto assembler = std::make_shared(problem, gridGeometry, gridVariables, timeLoop, xOld); // the linear solver @@ -125,10 +150,10 @@ int main(int argc, char** argv) timeLoop->start(); do { // solve the non-linear system with time step control - nonLinearSolver.solve(x, *timeLoop); + nonLinearSolver.solve(*gridVariables, *timeLoop); // make the new solution the old solution - xOld = x; + xOld = gridVariables->dofs(); gridVariables->advanceTimeStep(); // advance to the time loop to the next step -- GitLab From f4678bb0e67f66c20cfa610765a958e69f5611fd Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Thu, 22 Oct 2020 16:48:14 +0200 Subject: [PATCH 26/46] [newton] make varsbackend newton-agnostic --- dumux/common/CMakeLists.txt | 1 + .../variablesbackend.hh} | 44 ++++++++++--------- dumux/nonlinear/newtonsolver.hh | 5 ++- 3 files changed, 27 insertions(+), 23 deletions(-) rename dumux/{nonlinear/newtonvariablesbackend.hh => common/variablesbackend.hh} (80%) diff --git a/dumux/common/CMakeLists.txt b/dumux/common/CMakeLists.txt index 86e31cd63a..5bd0405ba6 100644 --- a/dumux/common/CMakeLists.txt +++ b/dumux/common/CMakeLists.txt @@ -49,4 +49,5 @@ timemanager.hh valgrind.hh variablelengthspline_.hh variables.hh +variablesbackend.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/common) diff --git a/dumux/nonlinear/newtonvariablesbackend.hh b/dumux/common/variablesbackend.hh similarity index 80% rename from dumux/nonlinear/newtonvariablesbackend.hh rename to dumux/common/variablesbackend.hh index a154671f8c..52e506e4ae 100644 --- a/dumux/nonlinear/newtonvariablesbackend.hh +++ b/dumux/common/variablesbackend.hh @@ -19,10 +19,12 @@ /*! * \file * \ingroup Nonlinear - * \brief Backends for the Newton for different variable types + * \brief Backends for operations on different solution vector types + * or more generic variable classes to be used in places where + * several different types/layouts should be supported. */ -#ifndef DUMUX_NEWTON_VARIABLES_BACKEND_HH -#define DUMUX_NEWTON_VARIABLES_BACKEND_HH +#ifndef DUMUX_VARIABLES_BACKEND_HH +#define DUMUX_VARIABLES_BACKEND_HH #include #include @@ -49,14 +51,14 @@ namespace Dumux { * \brief Class providing operations with primary variable vectors */ template -class NewtonDofBackend; +class DofBackend; /*! * \ingroup Nonlinear - * \brief Specialization providing Newton operations for scalar/number types + * \brief Specialization providing operations for scalar/number types */ template -class NewtonDofBackend::value, Scalar>> +class DofBackend::value, Scalar>> { public: using DofVector = Scalar; //!< the type of the dofs parametrizing the variables object @@ -70,10 +72,10 @@ public: /*! * \ingroup Nonlinear - * \brief Specialization providing Newton operations for block vectors + * \brief Specialization providing operations for block vectors */ template -class NewtonDofBackend> +class DofBackend> { public: @@ -88,10 +90,10 @@ public: /*! * \ingroup Nonlinear - * \brief Specialization providing Newton operations for multitype block vectors + * \brief Specialization providing operations for multitype block vectors */ template -class NewtonDofBackend> +class DofBackend> { using DV = Dune::MultiTypeBlockVector; static constexpr auto numBlocks = DV::size(); @@ -128,19 +130,19 @@ template using SolutionVectorType = typename Vars::SolutionVector; template -class NewtonVariablesBackend; +class VariablesBackend; /*! * \ingroup Nonlinear - * \brief Class providing Newton operations for scalar/number types + * \brief Class providing operations for primary variable vector/scalar types * \note We assume the variables being simply a dof vector if we * do not find the variables class to export `SolutionVector`. */ template -class NewtonVariablesBackend -: public NewtonDofBackend +class VariablesBackend +: public DofBackend { - using ParentType = NewtonDofBackend; + using ParentType = DofBackend; public: using Variables = Vars; @@ -162,12 +164,12 @@ public: /*! * \file * \ingroup Nonlinear - * \brief Class providing Newton operations for generic variable - * types, possibly containing also secondary variables. + * \brief Class providing operations for generic variable classes, + * containing primary and possibly also secondary variables. */ template -class NewtonVariablesBackend -: public NewtonDofBackend +class VariablesBackend +: public DofBackend { public: using DofVector = typename Vars::SolutionVector; @@ -189,13 +191,13 @@ public: /*! * \ingroup Nonlinear - * \brief Class providing Newton operations for generic variable classes + * \brief Class providing operations for generic variable classes * that represent the state of a numerical solution, possibly * consisting of primary/secondary variables and information on * the time level. */ template -using NewtonVariablesBackend = Impl::NewtonVariablesBackend>; +using VariablesBackend = Impl::VariablesBackend>; } // end namespace Dumux diff --git a/dumux/nonlinear/newtonsolver.hh b/dumux/nonlinear/newtonsolver.hh index c57ca643ae..57fb554f84 100644 --- a/dumux/nonlinear/newtonsolver.hh +++ b/dumux/nonlinear/newtonsolver.hh @@ -48,13 +48,14 @@ #include #include #include +#include + #include #include #include #include #include "newtonconvergencewriter.hh" -#include "newtonvariablesbackend.hh" #include "primaryvariableswitchadapter.hh" namespace Dumux { @@ -173,7 +174,7 @@ class NewtonSolver : public PDESolver using ParentType = PDESolver; protected: - using Backend = NewtonVariablesBackend; + using Backend = VariablesBackend; using SolutionVector = typename Backend::DofVector; private: -- GitLab From db12174e9227e5c48a8c0a8bb64892121269d751 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Mon, 8 Feb 2021 12:59:45 +0100 Subject: [PATCH 27/46] [disc][fvelemvars] export local views --- dumux/discretization/fvgridvariables.hh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/dumux/discretization/fvgridvariables.hh b/dumux/discretization/fvgridvariables.hh index aa42c3d015..21c518847f 100644 --- a/dumux/discretization/fvgridvariables.hh +++ b/dumux/discretization/fvgridvariables.hh @@ -51,13 +51,14 @@ class FVGridVariablesLocalView using GridView = typename GridGeometry::GridView; using Element = typename GridView::template Codim<0>::Entity; - using ElementVolumeVariables = typename GV::GridVolumeVariables::LocalView; - using ElementFluxVariablesCache = typename GV::GridFluxVariablesCache::LocalView; - public: //! export corresponding grid-wide class using GridVariables = GV; + //! export underlying local views + using ElementVolumeVariables = typename GV::GridVolumeVariables::LocalView; + using ElementFluxVariablesCache = typename GV::GridFluxVariablesCache::LocalView; + //! Constructor FVGridVariablesLocalView(const GridVariables& gridVariables) : gridVariables_(&gridVariables) -- GitLab From 488c673397150db8020bc6a4459be5a855c4bebc Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Thu, 22 Oct 2020 16:59:30 +0200 Subject: [PATCH 28/46] [linear][pdesolver] support variables-based assembly --- dumux/linear/pdesolver.hh | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/dumux/linear/pdesolver.hh b/dumux/linear/pdesolver.hh index ce204474e9..4bb0d55364 100644 --- a/dumux/linear/pdesolver.hh +++ b/dumux/linear/pdesolver.hh @@ -42,6 +42,8 @@ #include #include #include +#include + #include #include @@ -67,6 +69,8 @@ class LinearPDESolver : public PDESolver using TimeLoop = TimeLoopBase; public: + using typename ParentType::Variables; + /*! * \brief The Constructor */ @@ -85,7 +89,7 @@ public: /*! * \brief Solve a linear PDE system */ - void solve(SolutionVector& uCurrentIter) override + void solve(Variables& vars) override { Dune::Timer assembleTimer(false); Dune::Timer solveTimer(false); @@ -102,7 +106,7 @@ public: // linearize the problem at the current solution assembleTimer.start(); - this->assembler().assembleJacobianAndResidual(uCurrentIter); + this->assembler().assembleJacobianAndResidual(vars); assembleTimer.stop(); /////////////// @@ -124,7 +128,8 @@ public: solveTimer.start(); // set the delta vector to zero before solving the linear system! - SolutionVector deltaU(uCurrentIter); + using Backend = VariablesBackend; + auto deltaU = Backend::getDofVector(vars); deltaU = 0; // solve by calling the appropriate implementation depending on whether the linear solver @@ -146,8 +151,12 @@ public: // update the current solution and secondary variables updateTimer.start(); + + // TODO: This currently does one additional copy in case assembly from solution is used + auto uCurrentIter = Backend::getDofVector(vars); uCurrentIter -= deltaU; - this->assembler().updateGridVariables(uCurrentIter); + Backend::update(vars, uCurrentIter); + updateTimer.stop(); if (verbose_) { -- GitLab From 4eb12dc965954e084438234dec1f7e4498facf10 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Thu, 22 Oct 2020 17:02:20 +0200 Subject: [PATCH 29/46] [assembly] introduce fv operators --- dumux/assembly/fv/CMakeLists.txt | 3 + dumux/assembly/fv/operators.hh | 148 +++++++++++++++++++++++++++++++ 2 files changed, 151 insertions(+) create mode 100644 dumux/assembly/fv/CMakeLists.txt create mode 100644 dumux/assembly/fv/operators.hh diff --git a/dumux/assembly/fv/CMakeLists.txt b/dumux/assembly/fv/CMakeLists.txt new file mode 100644 index 0000000000..72883ac6ae --- /dev/null +++ b/dumux/assembly/fv/CMakeLists.txt @@ -0,0 +1,3 @@ +install(FILES +operators.hh +DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/assembly/fv) diff --git a/dumux/assembly/fv/operators.hh b/dumux/assembly/fv/operators.hh new file mode 100644 index 0000000000..2c8352e7c9 --- /dev/null +++ b/dumux/assembly/fv/operators.hh @@ -0,0 +1,148 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief The base class for the sub-control entity-local evaluation of + * the terms of equations in the context of finite-volume schemes + */ +#ifndef DUMUX_FV_OPERATORS_HH +#define DUMUX_FV_OPERATORS_HH + +#include +#include + +#include +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief The base class for the sub-control entity-local evaluation of + * the terms of equations in the context of finite-volume schemes + * \todo TODO: Should operators have a state? That is, be constructed and have non-static functions? + */ +template +class FVOperators +{ + // The variables required for the evaluation of the equation + using GridVariables = typename ElementVariables::GridVariables; + using VolumeVariables = typename GridVariables::VolumeVariables; + using ElementVolumeVariables = typename ElementVariables::ElementVolumeVariables; + using ElementFluxVariablesCache = typename ElementVariables::ElementFluxVariablesCache; + using Scalar = typename GridVariables::Scalar; + + using PrimaryVariables = typename GridVariables::PrimaryVariables; + static constexpr int numEq = PrimaryVariables::size(); + + // The grid geometry on which the scheme operates + using GridGeometry = typename GridVariables::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + using SubControlVolume = typename GridGeometry::SubControlVolume; + using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace; + using Extrusion = Extrusion_t; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + // user-input defined in the problem + using Problem = std::decay_t().gridVolVars().problem())>; + +public: + //! export the type used to store scalar values for all equations + //! TODO: How to obtain user-defined NumEqVector (ProblemTraits?) + using NumEqVector = Dune::FieldVector; + + // export the types of the flux/source/storage terms + using FluxTerm = NumEqVector; + using SourceTerm = NumEqVector; + using StorageTerm = NumEqVector; + + /*! + * \name Model specific interfaces + * \note The following method are the model specific implementations of the + * operators appearing in the equation and should be overloaded by the + * implementations. + */ + // \{ + + /*! + * \brief Compute the storage term of the equations for the given sub-control volume + * \param problem The problem to be solved (could store additionally required quantities) + * \param scv The sub-control volume + * \param volVars The primary & secondary variables evaluated for the scv + * \note This must be overloaded by the implementation + */ + static StorageTerm storage(const Problem& problem, + const SubControlVolume& scv, + const VolumeVariables& volVars) + { DUNE_THROW(Dune::NotImplemented, "Storage operator not implemented!"); } + + /*! + * \brief Compute the flux term of the equations for the given sub-control volume face + * \param problem The problem to be solved (could store additionally required quantities) + * \param element The grid element + * \param fvGeometry The element-local view on the finite volume grid geometry + * \param elemVolVars The element-local view on the grid volume variables + * \param elemFluxVarsCache The element-local view on the grid flux variables cache + * \param scvf The sub-control volume face for which the flux term is to be computed + * \note This must be overloaded by the implementation + */ + static FluxTerm flux(const Problem& problem, + const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVolumeVariables& elemVolVars, + const ElementFluxVariablesCache& elemFluxVarsCache, + const SubControlVolumeFace& scvf) + { DUNE_THROW(Dune::NotImplemented, "This model does not implement a flux method!"); } + + /*! + * \brief Compute the source term of the equations for the given sub-control volume + * \param problem The problem to be solved (could store additionally required quantities) + * \param element The grid element + * \param fvGeometry The element-local view on the finite volume grid geometry + * \param elemVolVars The element-local view on the grid volume variables + * \param scv The sub-control volume for which the source term is to be computed + * \note This is a default implementation forwarding to interfaces in the problem + */ + static SourceTerm source(const Problem& problem, + const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVolumeVariables& elemVolVars, + const SubControlVolume& scv) + { + SourceTerm source(0.0); + + // add contributions from volume flux sources + source += problem.source(element, fvGeometry, elemVolVars, scv); + + // add contribution from possible point sources + source += problem.scvPointSources(element, fvGeometry, elemVolVars, scv); + + // multiply with scv volume + source *= Extrusion::volume(scv)*elemVolVars[scv].extrusionFactor(); + + return source; + } +}; + +} // end namespace Dumux + +#endif -- GitLab From d9bb4b17e83eac9c36d929e831511b7575d3e859 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Thu, 22 Oct 2020 17:07:19 +0200 Subject: [PATCH 30/46] [nonisothermal] add fv-type operators --- .../nonisothermal/CMakeLists.txt | 1 + .../nonisothermal/operators.hh | 149 ++++++++++++++++++ 2 files changed, 150 insertions(+) create mode 100644 dumux/porousmediumflow/nonisothermal/operators.hh diff --git a/dumux/porousmediumflow/nonisothermal/CMakeLists.txt b/dumux/porousmediumflow/nonisothermal/CMakeLists.txt index 24dc71bd45..5db30722a0 100644 --- a/dumux/porousmediumflow/nonisothermal/CMakeLists.txt +++ b/dumux/porousmediumflow/nonisothermal/CMakeLists.txt @@ -3,5 +3,6 @@ indices.hh iofields.hh localresidual.hh model.hh +operators.hh volumevariables.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/porousmediumflow/nonisothermal) diff --git a/dumux/porousmediumflow/nonisothermal/operators.hh b/dumux/porousmediumflow/nonisothermal/operators.hh new file mode 100644 index 0000000000..1e12074f86 --- /dev/null +++ b/dumux/porousmediumflow/nonisothermal/operators.hh @@ -0,0 +1,149 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup NIModel + * \brief Sub-control entity-local evaluation of the operators + * involved in the system of equations non-isothermal models + * that assume thermal equilibrium between all phases. + */ +#ifndef DUMUX_FV_NON_ISOTHERMAL_OPERATORS_HH +#define DUMUX_FV_NON_ISOTHERMAL_OPERATORS_HH + +namespace Dumux { + +/*! + * \ingroup NIModel + * \brief Sub-control entity-local evaluation of the operators + * involved in the system of equations of non-isothermal models + * that assume thermal equilibrium between all phases. + * \tparam FluxVariables the type that is responsible for computing the individual + * flux contributions, i.e., advective, diffusive, conduvtive... + * \tparam ElementVariables the type of element-local view on the grid variables + */ +template +class FVNonIsothermalOperators +{ + // The variables required for the evaluation of the equation + using GridVariables = typename ElementVariables::GridVariables; + using VolumeVariables = typename GridVariables::VolumeVariables; + using ElementVolumeVariables = typename ElementVariables::ElementVolumeVariables; + + // The grid geometry on which the scheme operates + using GridGeometry = typename GridVariables::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + using SubControlVolume = typename GridGeometry::SubControlVolume; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + static constexpr int energyEqIdx = ModelTraits::Indices::energyEqIdx; + +public: + + /*! + * \brief The energy storage in the fluid phase with index phaseIdx + * + * \param storage The mass of the component within the sub-control volume + * \param scv The sub-control volume + * \param volVars The volume variables + * \param phaseIdx The phase index + */ + template + static void addPhaseStorage(NumEqVector& storage, + const SubControlVolume& scv, + const VolumeVariables& volVars, + int phaseIdx) + { + storage[energyEqIdx] += volVars.porosity() + * volVars.density(phaseIdx) + * volVars.internalEnergy(phaseIdx) + * volVars.saturation(phaseIdx); + } + + /*! + * \brief The energy storage in the solid matrix + * + * \param storage The mass of the component within the sub-control volume + * \param scv The sub-control volume + * \param volVars The volume variables + */ + template + static void addSolidPhaseStorage(NumEqVector& storage, + const SubControlVolume& scv, + const VolumeVariables& volVars) + { + storage[energyEqIdx] += volVars.temperature() + * volVars.solidHeatCapacity() + * volVars.solidDensity() + * (1.0 - volVars.porosity()); + } + + /*! + * \brief The advective phase energy fluxes + * + * \param flux The flux + * \param fluxVars The flux variables. + * \param phaseIdx The phase index + */ + template + static void addHeatConvectionFlux(NumEqVector& flux, + FluxVariables& fluxVars, + int phaseIdx) + { + auto upwindTerm = [phaseIdx](const auto& volVars) + { return volVars.density(phaseIdx)*volVars.mobility(phaseIdx)*volVars.enthalpy(phaseIdx); }; + + flux[energyEqIdx] += fluxVars.advectiveFlux(phaseIdx, upwindTerm); + } + + /*! + * \brief The diffusive energy fluxes + * + * \param flux The flux + * \param fluxVars The flux variables. + */ + template + static void addHeatConductionFlux(NumEqVector& flux, + FluxVariables& fluxVars) + { + flux[energyEqIdx] += fluxVars.heatConductionFlux(); + } + + /*! + * \brief heat transfer between the phases for nonequilibrium models + * + * \param source The source which ought to be simulated + * \param element An element which contains part of the control volume + * \param fvGeometry The finite-volume geometry + * \param elemVolVars The volume variables of the current element + * \param scv The sub-control volume over which we integrate the source term + */ + template + static void addSourceEnergy(NumEqVector& source, + const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVolumeVariables& elemVolVars, + const SubControlVolume &scv) + {} +}; + +} // end namespace Dumux + +#endif -- GitLab From 6b03f719008fd4cf0674dc36a8f7f39c58611e48 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Thu, 22 Oct 2020 17:09:16 +0200 Subject: [PATCH 31/46] [immiscible] add fv-type operators --- .../immiscible/CMakeLists.txt | 1 + .../porousmediumflow/immiscible/operators.hh | 180 ++++++++++++++++++ 2 files changed, 181 insertions(+) create mode 100644 dumux/porousmediumflow/immiscible/operators.hh diff --git a/dumux/porousmediumflow/immiscible/CMakeLists.txt b/dumux/porousmediumflow/immiscible/CMakeLists.txt index 192e7955c6..ccb0268dc6 100644 --- a/dumux/porousmediumflow/immiscible/CMakeLists.txt +++ b/dumux/porousmediumflow/immiscible/CMakeLists.txt @@ -1,3 +1,4 @@ install(FILES localresidual.hh +operators.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/porousmediumflow/immiscible) diff --git a/dumux/porousmediumflow/immiscible/operators.hh b/dumux/porousmediumflow/immiscible/operators.hh new file mode 100644 index 0000000000..20eae5b700 --- /dev/null +++ b/dumux/porousmediumflow/immiscible/operators.hh @@ -0,0 +1,180 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup PorousmediumflowModels + * \brief Sub-control entity-local evaluation of the operators + * within in the systems of equations of n-phase immiscible models. + */ +#ifndef DUMUX_FV_IMMISCIBLE_OPERATORS_HH +#define DUMUX_FV_IMMISCIBLE_OPERATORS_HH + +#include + +#include +#include +#include + +namespace Dumux { +namespace Impl { + + template struct DefaultEnergyOperatorsChooser; + template + struct DefaultEnergyOperatorsChooser { using Type = void; }; + template + struct DefaultEnergyOperatorsChooser { using Type = FVNonIsothermalOperators; }; + + template + using DefaultEnergyOperators = typename DefaultEnergyOperatorsChooser::Type; + +} // end namespace Impl + +/*! + * \ingroup PorousmediumflowModels + * \brief Sub-control entity-local evaluation of the operators + * within in the systems of equations of n-phase immiscible models. + * \tparam ModelTraits defines model-related types and variables (e.g. number of phases) + * \tparam FluxVariables the type that is responsible for computing the individual + * flux contributions, i.e., advective, diffusive, convective... + * \tparam ElementVariables the type of element-local view on the grid variables + * \tparam EnergyOperators optional template argument, specifying the class that + * handles the operators related to non-isothermal effects. + * These are assumed to be taken into account by the model + * if this template argument is other than void. + */ +template> +class FVImmiscibleOperators +: public FVOperators +{ + using ParentType = FVOperators; + + // The variables required for the evaluation of the equation + using GridVariables = typename ElementVariables::GridVariables; + using VolumeVariables = typename GridVariables::VolumeVariables; + using ElementVolumeVariables = typename ElementVariables::ElementVolumeVariables; + using ElementFluxVariablesCache = typename ElementVariables::ElementFluxVariablesCache; + + // The grid geometry on which the scheme operates + using GridGeometry = typename GridVariables::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + using SubControlVolume = typename GridGeometry::SubControlVolume; + using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace; + using Extrusion = Extrusion_t; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using Problem = std::decay_t().gridVolVars().problem())>; + using Indices = typename ModelTraits::Indices; + + static constexpr int numPhases = ModelTraits::numFluidPhases(); + static constexpr int conti0EqIdx = ModelTraits::Indices::conti0EqIdx; + static constexpr bool isNonIsothermal = !std::is_same_v; + +public: + //! export the type used to store scalar values for all equations + using typename ParentType::NumEqVector; + + // export the types of the flux/source/storage terms + using typename ParentType::FluxTerm; + using typename ParentType::SourceTerm; + using typename ParentType::StorageTerm; + + /*! + * \brief Compute the storage term of the equations for the given sub-control volume + * \param problem The problem to be solved (could store additionally required quantities) + * \param scv The sub-control volume + * \param volVars The primary & secondary variables evaluated for the scv + * \note This must be overloaded by the implementation + */ + static StorageTerm storage(const Problem& problem, + const SubControlVolume& scv, + const VolumeVariables& volVars) + { + // partial time derivative of the phase mass + StorageTerm storage; + for (int phaseIdx = 0; phaseIdx < numPhases; ++phaseIdx) + { + auto eqIdx = conti0EqIdx + phaseIdx; + storage[eqIdx] = volVars.porosity() + * volVars.density(phaseIdx) + * volVars.saturation(phaseIdx); + + // The energy storage in the fluid phase with index phaseIdx + if constexpr (isNonIsothermal) + EnergyOperators::fluidPhaseStorage(storage, scv, volVars, phaseIdx); + } + + // The energy storage in the solid matrix + if constexpr (isNonIsothermal) + EnergyOperators::solidPhaseStorage(storage, scv, volVars); + + // multiply with volume + storage *= Extrusion::volume(scv)*volVars.extrusionFactor(); + + return storage; + } + + /*! + * \brief Compute the flux term of the equations for the given sub-control volume face + * \param problem The problem to be solved (could store additionally required quantities) + * \param element The grid element + * \param fvGeometry The element-local view on the finite volume grid geometry + * \param elemVolVars The element-local view on the grid volume variables + * \param elemFluxVarsCache The element-local view on the grid flux variables cache + * \param scvf The sub-control volume face for which the flux term is to be computed + * \note This must be overloaded by the implementation + */ + static FluxTerm flux(const Problem& problem, + const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVolumeVariables& elemVolVars, + const ElementFluxVariablesCache& elemFluxVarsCache, + const SubControlVolumeFace& scvf) + { + FluxVariables fluxVars; + fluxVars.init(problem, element, fvGeometry, elemVolVars, scvf, elemFluxVarsCache); + + NumEqVector flux; + for (int phaseIdx = 0; phaseIdx < numPhases; ++phaseIdx) + { + // the physical quantities for which we perform upwinding + auto upwindTerm = [phaseIdx](const auto& volVars) + { return volVars.density(phaseIdx)*volVars.mobility(phaseIdx); }; + + auto eqIdx = conti0EqIdx + phaseIdx; + flux[eqIdx] = fluxVars.advectiveFlux(phaseIdx, upwindTerm); + + // Add advective phase energy fluxes. For isothermal model the contribution is zero. + if constexpr (isNonIsothermal) + EnergyOperators::heatConvectionFlux(flux, fluxVars, phaseIdx); + } + + // Add diffusive energy fluxes. For isothermal model the contribution is zero. + if constexpr (isNonIsothermal) + EnergyOperators::heatConductionFlux(flux, fluxVars); + + return flux; + } +}; + +} // end namespace Dumux + +#endif -- GitLab From 5ccd1a02e715ef643ecb3afe8212174c27907ade Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Thu, 22 Oct 2020 17:11:54 +0200 Subject: [PATCH 32/46] [assembly] add fv local operator --- dumux/assembly/fv/CMakeLists.txt | 1 + dumux/assembly/fv/localoperator.hh | 248 +++++++++++++++++++++++++++++ 2 files changed, 249 insertions(+) create mode 100644 dumux/assembly/fv/localoperator.hh diff --git a/dumux/assembly/fv/CMakeLists.txt b/dumux/assembly/fv/CMakeLists.txt index 72883ac6ae..fb70764d7e 100644 --- a/dumux/assembly/fv/CMakeLists.txt +++ b/dumux/assembly/fv/CMakeLists.txt @@ -1,3 +1,4 @@ install(FILES +localoperator.hh operators.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/assembly/fv) diff --git a/dumux/assembly/fv/localoperator.hh b/dumux/assembly/fv/localoperator.hh new file mode 100644 index 0000000000..7a9d0211bf --- /dev/null +++ b/dumux/assembly/fv/localoperator.hh @@ -0,0 +1,248 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief An element-wise local operator for finite-volume schemes. + */ +#ifndef DUMUX_FV_LOCAL_OPERATOR_HH +#define DUMUX_FV_LOCAL_OPERATOR_HH + +#include + +#include +#include +#include + +#include +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief The element-wise local operator for finite volume schemes. + * This allows for element-wise evaluation of individual terms + * of the equations to be solved. + * \tparam ElementVariables local view on the grid variables + * \tparam Operators The model-specific operators + */ +template +class FVLocalOperator +{ + // The variables required for the evaluation of the equation + using GridVars = typename ElementVariables::GridVariables; + using PrimaryVariables = typename GridVars::PrimaryVariables; + + // The grid geometry on which the scheme operates + using GridGeometry = typename GridVars::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace; + using Extrusion = Extrusion_t; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using NumEqVector = typename Operators::NumEqVector; + static constexpr int dim = GridView::dimension; + static constexpr int numEq = NumEqVector::size(); + static constexpr bool isBox = GridGeometry::discMethod == DiscretizationMethod::box; + +public: + //! export the grid variables type this operator requires a local view of + using GridVariables = GridVars; + + //! export a grid-independent alias for compatibility with non grid-based schemes + //! TODO: Necessary? + using Variables = GridVars; + + //! the container storing the operator values on all dofs of an element + using ElementResidualVector = Dune::BlockVector; + + //! export a grid-independent alias for compatibility with non grid-based schemes + //! TODO: Necessary? + using Residual = ElementResidualVector; + + /*! + * \brief The constructor + * \note The grid geometry/grid variables local views are expected to + * be bound to the same (the given) element + */ + explicit FVLocalOperator(const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVariables& elemVars) + : element_(element) + , fvGeometry_(fvGeometry) + , elemVariables_(elemVars) + {} + + /*! + * \name Main interface + * \note Methods used by the assembler to compute derivatives and residual + */ + // \{ + + /*! + * \brief Compute the terms of the local residual that do not appear in + * time derivatives. These are the sources and the fluxes. + */ + ElementResidualVector evalFluxesAndSources() const + { + const auto& problem = elemVariables_.gridVariables().gridVolVars().problem(); + const auto& evv = elemVariables_.elemVolVars(); + + // source term + auto result = getEmptyResidual(); + for (const auto& scv : scvs(fvGeometry_)) + result[scv.localDofIndex()] -= Operators::source(problem, element_, fvGeometry_, evv, scv); + + // flux term + for (const auto& scvf : scvfs(fvGeometry_)) + addFlux_(result, scvf); + + return result; + } + + /*! + * \brief Compute the storage term, i.e. the term appearing in the time derivative. + */ + ElementResidualVector evalStorage() const + { + const auto& problem = elemVariables_.gridVariables().gridVolVars().problem(); + const auto& evv = elemVariables_.elemVolVars(); + + // TODO: Until now, FVLocalResidual defined storage as the entire + // time derivative. Now it means the term above the time derivative. + // We should think about the correct naming here... + // TODO: Should storage() NOT multiply with volume?? That was different until + // now but flux() also returns the flux multiplied with area so this should + // be more consistent + auto result = getEmptyResidual(); + for (const auto& scv : scvs(fvGeometry_)) + result[scv.localDofIndex()] += operators_.storage(problem, scv, evv[scv]); + + return result; + } + + ElementResidualVector getEmptyResidual() const + { + ElementResidualVector res(fvGeometry_.numScv()); + res = 0.0; + return res; + } + + // \} + + /*! + * \name Interfaces for analytic Jacobian computation + */ + // \{ + + //! \todo TODO: Add interfaces. Or, should this be here at all!? + + //\} + + // \} + +protected: + + //! compute and add the flux across the given face to the container (cc schemes) + template = 0> + void addFlux_(ElementResidualVector& r, const SubControlVolumeFace& scvf) const + { + const auto& insideScv = fvGeometry_.scv(scvf.insideScvIdx()); + const auto localDofIdx = insideScv.localDofIndex(); + + const auto& problem = elemVariables_.gridVariables().gridVolVars().problem(); + const auto& evv = elemVariables_.elemVolVars(); + const auto& efvc = elemVariables_.elemFluxVarsCache(); + + if (!scvf.boundary()) + r[localDofIdx] += Operators::flux(problem, element_, fvGeometry_, evv, efvc, scvf); + else + { + const auto& bcTypes = problem.boundaryTypes(element_, scvf); + + // Dirichlet boundaries + if (bcTypes.hasDirichlet() && !bcTypes.hasNeumann()) + r[localDofIdx] += Operators::flux(problem, element_, fvGeometry_, evv, efvc, scvf); + + // Neumann and Robin ("solution dependent Neumann") boundary conditions + else if (bcTypes.hasNeumann() && !bcTypes.hasDirichlet()) + { + auto neumannFluxes = problem.neumann(element_, fvGeometry_, evv, efvc, scvf); + + // multiply neumann fluxes with the area and the extrusion factor + neumannFluxes *= Extrusion::area(scvf)*evv[insideScv].extrusionFactor(); + r[localDofIdx] += neumannFluxes; + } + + else + DUNE_THROW(Dune::NotImplemented, "Mixed boundary conditions for cell-centered schemes. " << + "Use pure boundary conditions by converting Dirichlet BCs to Robin BCs"); + } + } + + //! compute and add the flux across the given face to the container (box scheme) + template = 0> + void addFlux_(ElementResidualVector& r, const SubControlVolumeFace& scvf) const + { + const auto& problem = elemVariables_.gridVariables().gridVolVars().problem(); + const auto& evv = elemVariables_.elemVolVars(); + const auto& efvc = elemVariables_.elemFluxVarsCache(); + + // inner faces + if (!scvf.boundary()) + { + const auto flux = Operators::flux(problem, element_, fvGeometry_, evv, efvc, scvf); + r[fvGeometry_.scv(scvf.insideScvIdx()).localDofIndex()] += flux; + r[fvGeometry_.scv(scvf.outsideScvIdx()).localDofIndex()] -= flux; + } + + // boundary faces + else + { + const auto& scv = fvGeometry_.scv(scvf.insideScvIdx()); + const auto& bcTypes = problem.boundaryTypes(element_, scv); + + // Treat Neumann and Robin ("solution dependent Neumann") boundary conditions. + if (bcTypes.hasNeumann()) + { + const auto neumannFluxes = problem.neumann(element_, fvGeometry_, evv, efvc, scvf); + const auto area = Extrusion::area(scvf)*evv[scv].extrusionFactor(); + + // only add fluxes to equations for which Neumann is set + for (int eqIdx = 0; eqIdx < NumEqVector::size(); ++eqIdx) + if (bcTypes.isNeumann(eqIdx)) + r[scv.localDofIndex()][eqIdx] += neumannFluxes[eqIdx]*area; + } + } + } + +private: + + const Element& element_; //!< pointer to the element for which the residual is computed + const FVElementGeometry& fvGeometry_; //!< the local view on the finite element grid geometry + const ElementVariables& elemVariables_; //!< the local view on the grid variables + Operators operators_; //!< evaluates storage/flux operators of the actual equation +}; + +} // end namespace Dumux + +#endif -- GitLab From 58957e612690446f59337cef51714e06477f7e73 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Thu, 22 Oct 2020 17:12:51 +0200 Subject: [PATCH 33/46] [assembly] add new box local assembler --- dumux/assembly/fv/CMakeLists.txt | 1 + dumux/assembly/fv/boxlocalassembler.hh | 373 +++++++++++++++++++++++++ 2 files changed, 374 insertions(+) create mode 100644 dumux/assembly/fv/boxlocalassembler.hh diff --git a/dumux/assembly/fv/CMakeLists.txt b/dumux/assembly/fv/CMakeLists.txt index fb70764d7e..40d6910f65 100644 --- a/dumux/assembly/fv/CMakeLists.txt +++ b/dumux/assembly/fv/CMakeLists.txt @@ -1,4 +1,5 @@ install(FILES +boxlocalassembler.hh localoperator.hh operators.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/assembly/fv) diff --git a/dumux/assembly/fv/boxlocalassembler.hh b/dumux/assembly/fv/boxlocalassembler.hh new file mode 100644 index 0000000000..47d56d55b0 --- /dev/null +++ b/dumux/assembly/fv/boxlocalassembler.hh @@ -0,0 +1,373 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for the box scheme. + */ +#ifndef DUMUX_BOX_LOCAL_ASSEMBLER_HH +#define DUMUX_BOX_LOCAL_ASSEMBLER_HH + +#include +#include + +#include + +#include +#include +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for the box scheme. + * \tparam Assembler The grid-wide assembler type + */ +template +class BoxLocalAssembler +{ + using GridVariables = typename Assembler::GridVariables; + using GridGeometry = typename GridVariables::GridGeometry; + + using FVElementGeometry = typename GridGeometry::LocalView; + using ElementVariables = typename GridVariables::LocalView; + using PrimaryVariables = typename GridVariables::PrimaryVariables; + using Scalar = typename GridVariables::Scalar; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using LocalOperator = typename Assembler::LocalOperator; + using JacobianMatrix = typename Assembler::JacobianMatrix; + using ResidualVector = typename Assembler::ResidualVector; + + static constexpr int numEq = PrimaryVariables::size(); + static_assert(diffMethod == DiffMethod::numeric, "Analytical assembly not implemented"); + static_assert(numEq == JacobianMatrix::block_type::cols, "Matrix block size doesn't match privars size"); + +public: + using ElementResidualVector = typename LocalOperator::ElementResidualVector; + + /*! + * \brief Constructor for stationary problems. + */ + explicit BoxLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + ElementVariables& elemVars) + : element_(element) + , fvGeometry_(fvGeometry) + , elemVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , isStationary_(true) + {} + + /*! + * \brief Computes the derivatives with respect to the given element and adds + * them to the global matrix. The element residual is written into the + * right hand side. + */ + template + void assembleJacobianAndResidual(JacobianMatrix& jac, + ResidualVector& res, + const PartialReassembler* partialReassembler = nullptr) + { + const auto eIdxGlobal = fvGeometry().gridGeometry().elementMapper().index(element()); + + if (partialReassembler && partialReassembler->elementColor(eIdxGlobal) == EntityColor::green) + { + const auto residual = evalLocalResidual(); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + } + else if (!elementIsGhost_) + { + const auto residual = assembleJacobianAndResidual_(jac, partialReassembler); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + } + else + { + static constexpr int dim = GridView::dimension; + + for (const auto& scv : scvs(fvGeometry())) + { + const auto vIdxLocal = scv.indexInElement(); + const auto& v = element().template subEntity(vIdxLocal); + + // do not change the non-ghost vertices + if (v.partitionType() == Dune::InteriorEntity || + v.partitionType() == Dune::BorderEntity) + continue; + + const auto dofIdx = scv.dofIndex(); + res[dofIdx] = 0; + for (int pvIdx = 0; pvIdx < numEq; ++pvIdx) + jac[dofIdx][dofIdx][pvIdx][pvIdx] = 1.0; + } + } + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + res[scvI.dofIndex()][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + + auto& row = jac[scvI.dofIndex()]; + for (auto col = row.begin(); col != row.end(); ++col) + row[col.index()][eqIdx] = 0.0; + + jac[scvI.dofIndex()][scvI.dofIndex()][eqIdx][pvIdx] = 1.0; + + // if a periodic dof has Dirichlet values also apply the same Dirichlet values to the other dof + if (fvGeometry().gridGeometry().dofOnPeriodicBoundary(scvI.dofIndex())) + { + const auto periodicDof = fvGeometry().gridGeometry().periodicallyMappedDof(scvI.dofIndex()); + res[periodicDof][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + const auto end = jac[periodicDof].end(); + for (auto it = jac[periodicDof].begin(); it != end; ++it) + (*it) = periodicDof != it.index() ? 0.0 : 1.0; + } + }; + + enforceDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + void assembleJacobian(JacobianMatrix& jac) + { + assembleJacobianAndResidual_(jac); + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + auto& row = jac[scvI.dofIndex()]; + for (auto col = row.begin(); col != row.end(); ++col) + row[col.index()][eqIdx] = 0.0; + + jac[scvI.dofIndex()][scvI.dofIndex()][eqIdx][pvIdx] = 1.0; + }; + + enforceDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Assemble the residual only + */ + void assembleResidual(ResidualVector& res) + { + const auto residual = evalLocalResidual(); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + res[scvI.dofIndex()][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + }; + + enforceDirichletConstraints(applyDirichlet); + } + + //! Enforce Dirichlet constraints + template + void enforceDirichletConstraints(const ApplyFunction& applyDirichlet) + { + // enforce Dirichlet boundary conditions + evalDirichletBoundaries(applyDirichlet); + // TODO: take care of internal Dirichlet constraints (if enabled) + // enforceInternalDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Evaluates Dirichlet boundaries + */ + template< typename ApplyDirichletFunctionType > + void evalDirichletBoundaries(ApplyDirichletFunctionType applyDirichlet) + { + for (const auto& scvI : scvs(fvGeometry())) + { + const auto bcTypes = problem().boundaryTypes(element(), scvI); + if (bcTypes.hasDirichlet()) + { + const auto dirichletValues = problem().dirichlet(element(), scvI); + + // set the Dirichlet conditions in residual and jacobian + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (bcTypes.isDirichlet(eqIdx)) + { + const auto pvIdx = bcTypes.eqToDirichletIndex(eqIdx); + assert(0 <= pvIdx && pvIdx < numEq); + applyDirichlet(scvI, dirichletValues, eqIdx, pvIdx); + } + } + } + } + } + +protected: + + /*! + * \brief Evaluate the complete local residual for the current element. + */ + ElementResidualVector evalLocalResidual() const + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables()); + + // residual of ghost elements is zero + if (elementIsGhost_) + return localOperator.getEmptyResidual(); + + auto residual = localOperator.evalFluxesAndSources(); + if (!isStationary_) + DUNE_THROW(Dune::NotImplemented, "New assembly for instationary problems"); + + return residual; + } + + /*! + * \brief Computes the derivatives with respect to the dofs of the given + * element and adds them to the global matrix. + * \return The element residual at the current solution. + */ + template< class PartialReassembler = DefaultPartialReassembler > + ElementResidualVector assembleJacobianAndResidual_(JacobianMatrix& A, + const PartialReassembler* partialReassembler = nullptr) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianAndResidualNumeric_(A, partialReassembler); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box"); + } + + /*! + * \brief Computes the derivatives by means of numeric differentiation + * and adds them to the global matrix. + * \return The element residual at the current solution. + * \note This specialization is for the box scheme with numeric differentiation + */ + template< class PartialReassembler = DefaultPartialReassembler > + ElementResidualVector assembleJacobianAndResidualNumeric_(JacobianMatrix& A, + const PartialReassembler* partialReassembler = nullptr) + { + // get the variables of the current stage + auto& curVariables = elemVariables(); + auto& curElemVolVars = curVariables.elemVolVars(); + const auto& x = curVariables.gridVariables().dofs(); + + const auto origResiduals = evalLocalResidual(); + const auto origElemSol = elementSolution(element(), x, fvGeometry().gridGeometry()); + auto elemSol = origElemSol; + + ////////////////////////////////////////////////////////////////////////////////////////////// + // Calculate derivatives of the residual of all dofs in element with respect to themselves. // + ////////////////////////////////////////////////////////////////////////////////////////////// + + ElementResidualVector partialDerivs(fvGeometry().numScv()); + for (const auto& scvI : scvs(fvGeometry())) + { + // dof index and corresponding actual pri vars + const auto globalI = scvI.dofIndex(); + const auto localI = scvI.localDofIndex(); + + const auto origCurVolVars = curElemVolVars[scvI]; + auto& curVolVars = curElemVolVars[scvI]; + + // calculate derivatives w.r.t to the privars at the dof at hand + for (int pvIdx = 0; pvIdx < numEq; pvIdx++) + { + partialDerivs = 0.0; + auto evalResiduals = [&](Scalar priVar) + { + // update the volume variables and compute element residual + elemSol[scvI.localDofIndex()][pvIdx] = priVar; + curVolVars.update(elemSol, problem(), element(), scvI); + return evalLocalResidual(); + }; + + // derive the residuals numerically + static const NumericEpsilon eps_{problem().paramGroup()}; + static const int numDiffMethod = getParamFromGroup(problem().paramGroup(), "Assembly.NumericDifferenceMethod"); + NumericDifferentiation::partialDerivative(evalResiduals, elemSol[localI][pvIdx], partialDerivs, + origResiduals, eps_(elemSol[localI][pvIdx], pvIdx), + numDiffMethod); + + // update the global stiffness matrix with the current partial derivatives + for (const auto& scvJ : scvs(fvGeometry())) + { + const auto globalJ = scvJ.dofIndex(); + const auto localJ = scvJ.localDofIndex(); + + // don't add derivatives for green entities + if (!partialReassembler || partialReassembler->dofColor(globalJ) != EntityColor::green) + { + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + // A[i][col][eqIdx][pvIdx] is the rate of change of the + // the residual of equation 'eqIdx' at dof 'i' + // depending on the primary variable 'pvIdx' at dof 'col' + A[globalJ][globalI][eqIdx][pvIdx] += partialDerivs[localJ][eqIdx]; + } + } + } + + // restore the original element solution & volume variables + elemSol[localI][pvIdx] = origElemSol[localI][pvIdx]; + curVolVars = origCurVolVars; + + // TODO additional dof dependencies + } + } + + return origResiduals; + } + + //! Return references to the local views + const Element& element() const { return element_; } + const FVElementGeometry& fvGeometry() const { return fvGeometry_; } + const ElementVariables& elemVariables() const { return elemVariables_; } + ElementVariables& elemVariables() { return elemVariables_; } + + //! Return a reference to the underlying problem + //! TODO: Should grid vars return problem directly!? + const auto& problem() const + { return elemVariables().gridVariables().gridVolVars().problem(); } + +private: + const Element& element_; + const FVElementGeometry& fvGeometry_; + ElementVariables& elemVariables_; + bool elementIsGhost_; + bool isStationary_; +}; + +} // end namespace Dumux + +#endif -- GitLab From 04c3eb26b4d28d2ffe68fede7bbe103d7b253fa2 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Thu, 22 Oct 2020 17:14:08 +0200 Subject: [PATCH 34/46] [assembly] add scheme-agnostic assembler --- dumux/assembly/CMakeLists.txt | 2 + dumux/assembly/assembler.hh | 418 +++++++++++++++++++++++++++++++ dumux/assembly/localassembler.hh | 56 +++++ 3 files changed, 476 insertions(+) create mode 100644 dumux/assembly/assembler.hh create mode 100644 dumux/assembly/localassembler.hh diff --git a/dumux/assembly/CMakeLists.txt b/dumux/assembly/CMakeLists.txt index 2543a6631f..63a13d1e43 100644 --- a/dumux/assembly/CMakeLists.txt +++ b/dumux/assembly/CMakeLists.txt @@ -1,4 +1,5 @@ install(FILES +assembler.hh boxlocalassembler.hh boxlocalresidual.hh cclocalassembler.hh @@ -10,6 +11,7 @@ fvlocalassemblerbase.hh fvlocalresidual.hh initialsolution.hh jacobianpattern.hh +localassembler.hh numericepsilon.hh partialreassembler.hh staggeredfvassembler.hh diff --git a/dumux/assembly/assembler.hh b/dumux/assembly/assembler.hh new file mode 100644 index 0000000000..4702c043ee --- /dev/null +++ b/dumux/assembly/assembler.hh @@ -0,0 +1,418 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief Assembler class for residuals and jacobian matrices + * for grid-based numerical schemes. + */ +#ifndef DUMUX_ASSEMBLER_HH +#define DUMUX_ASSEMBLER_HH + +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "localassembler.hh" +#include "jacobianpattern.hh" + +namespace Dumux { + +//! Default types used for the linear system +template struct DefaultLinearSystemTraits; + +//! Default linear system types for Dune::BlockVector +template +struct DefaultLinearSystemTraits> +{ +private: + static constexpr int numEq = PrimaryVariables::size(); + using Scalar = typename PrimaryVariables::value_type; + using MatrixBlockType = Dune::FieldMatrix; + +public: + using ResidualVector = Dune::BlockVector; + using JacobianMatrix = Dune::BCRSMatrix; +}; + +/*! + * \ingroup Assembly + * \brief A linear system assembler (residual and Jacobian) for grid-based numerical schemes + * \tparam LO The local operator (evaluation of the terms of the equation) + * \tparam diffMethod The differentiation method to compute derivatives + * \tparam LST The linear system traits (types used for jacobians and residuals) + */ +template< class LO, DiffMethod diffMethod, + class LST = DefaultLinearSystemTraits > +class Assembler +{ + using ThisType = Assembler; + using GG = typename LO::GridVariables::GridGeometry; + using GridView = typename GG::GridView; + using Element = typename GridView::template Codim<0>::Entity; + +public: + //! export the types used for the linear system + using Scalar = typename LO::GridVariables::Scalar; + using JacobianMatrix = typename LST::JacobianMatrix; + using ResidualVector = typename LST::ResidualVector; + using ResidualType = ResidualVector; + + //! export the local operator type + using LocalOperator = LO; + + //! the local operator states the type of variables needed for evaluation + using GridVariables = typename LO::GridVariables; + + //! export the underlying grid geometry type + using GridGeometry = GG; + + //! export a grid-independent alias of the variables + using Variables = GridVariables; + + //! export type used for solution vectors + using SolutionVector = typename GridVariables::SolutionVector; + + /*! + * \brief The Constructor from a grid geometry. + * \param gridGeometry A grid geometry instance + * \note This assembler class is, after construction, defined for a specific equation + * (given by the template argument of the LocalOperator) and a specific grid + * geometry - which defines the connectivity of the degrees of freedom of the + * underlying discretization scheme on a particular grid. The evaluation point, + * consisting of a particular solution/variables/parameters may vary, and therefore, + * an instance of the grid variables is passed to the assembly functions. + */ + Assembler(std::shared_ptr gridGeometry) + : gridGeometry_(gridGeometry) + {} + + /*! + * \brief The Constructor from a grid geometry. + * \param gridGeometry A grid geometry instance + * \param isImplicit boolean to indicate whether an implicit or explicit pattern should be used + * \todo TODO replace bool with time stepping scheme once available + */ + Assembler(std::shared_ptr gridGeometry, + bool isImplicit) + : gridGeometry_(gridGeometry) + , isImplicit_(isImplicit) + {} + + /*! + * \brief Assembles the Jacobian matrix and the residual around the given evaluation point + * which is determined by the grid variables, containing all quantities required + * to evaluate the equations to be assembled. + * \param gridVariables The variables corresponding to the given solution state + * \note We assume the grid geometry on which the grid variables are defined + * to be the same as the one used to instantiate this class + */ + template + void assembleJacobianAndResidual(const GridVariables& gridVariables, + const PartialReassembler* partialReassembler = nullptr) + { + resetJacobian_(partialReassembler); + resetResidual_(); + + // TODO: Remove this assert? + assert(gridVariables.gridGeometry().numDofs() == gridGeometry().numDofs()); + + using LocalAssembler = Dumux::LocalAssembler; + assemble_([&](const Element& element) + { + auto fvGeometry = localView(gridGeometry()); + auto elemVars = localView(gridVariables); + + fvGeometry.bind(element); + elemVars.bind(element, fvGeometry); + + LocalAssembler localAssembler(element, fvGeometry, elemVars); + localAssembler.assembleJacobianAndResidual(*jacobian_, *residual_, partialReassembler); + }); + + // TODO: Put these into discretization-specific helpers? + enforceDirichletConstraints_(gridVariables, *jacobian_, *residual_); + enforceInternalConstraints_(gridVariables, *jacobian_, *residual_); + enforcePeriodicConstraints_(gridVariables, *jacobian_, *residual_); + } + + /*! + * \brief Assembles the Jacobian matrix of the discrete system of equations + * around a given state represented by the grid variables object. + */ + void assembleJacobian(const GridVariables& gridVariables) + { + resetJacobian_(); + + // TODO: Remove this assert? + assert(gridVariables.gridGeometry().numDofs() == gridGeometry().numDofs()); + + using LocalAssembler = Dumux::LocalAssembler; + assemble_([&](const Element& element) + { + auto fvGeometry = localView(gridGeometry()); + auto elemVars = localView(gridVariables); + + fvGeometry.bind(element); + elemVars.bind(element, fvGeometry); + + LocalAssembler localAssembler(element, fvGeometry, elemVars); + localAssembler.assembleJacobianAndResidual(*jacobian_); + }); + + // TODO: Put these into discretization-specific helpers? + enforceDirichletConstraints_(gridVariables, *jacobian_, *residual_); + enforceInternalConstraints_(gridVariables, *jacobian_, *residual_); + enforcePeriodicConstraints_(gridVariables, *jacobian_, *residual_); + } + + /*! + * \brief Assembles the residual for a given state represented by the provided + * grid variables object, using the internal residual vector to store the result. + */ + void assembleResidual(const GridVariables& gridVariables) + { + resetResidual_(); + assembleResidual(*residual_, gridVariables); + } + + /*! + * \brief Assembles the residual for a given state represented by the provided + * grid variables object, using the provided residual vector to store the result. + */ + void assembleResidual(ResidualVector& r, const GridVariables& gridVariables) const + { + using LocalAssembler = Dumux::LocalAssembler; + assemble_([&](const Element& element) + { + auto fvGeometry = localView(gridGeometry()); + auto elemVars = localView(gridVariables); + + fvGeometry.bind(element); + elemVars.bind(element, fvGeometry); + + LocalAssembler localAssembler(element, fvGeometry, elemVars); + localAssembler.assembleResidual(r); + }); + } + + //! Will become obsolete once the new linear solvers are available + Scalar residualNorm(const GridVariables& gridVars) const + { + ResidualType residual(numDofs()); + assembleResidual(residual, gridVars); + + // for box communicate the residual with the neighboring processes + if (GridGeometry::discMethod == DiscretizationMethod::box && gridView().comm().size() > 1) + { + using VertexMapper = typename GridGeometry::VertexMapper; + VectorCommDataHandleSum + sumResidualHandle(gridGeometry_->vertexMapper(), residual); + gridView().communicate(sumResidualHandle, + Dune::InteriorBorder_InteriorBorder_Interface, + Dune::ForwardCommunication); + } + + // calculate the square norm of the residual + Scalar result2 = residual.two_norm2(); + if (gridView().comm().size() > 1) + result2 = gridView().comm().sum(result2); + + using std::sqrt; + return sqrt(result2); + } + + + /*! + * \brief Tells the assembler which jacobian and residual to use. + * This also resizes the containers to the required sizes and sets the + * sparsity pattern of the jacobian matrix. + */ + void setLinearSystem(std::shared_ptr A, + std::shared_ptr r) + { + jacobian_ = A; + residual_ = r; + + // check and/or set the BCRS matrix's build mode + if (jacobian_->buildMode() == JacobianMatrix::BuildMode::unknown) + jacobian_->setBuildMode(JacobianMatrix::random); + else if (jacobian_->buildMode() != JacobianMatrix::BuildMode::random) + DUNE_THROW(Dune::NotImplemented, "Only BCRS matrices with random build mode are supported at the moment"); + + setJacobianPattern(); + setResidualSize(); + } + + /*! + * \brief The version without arguments uses the default constructor to create + * the jacobian and residual objects in this assembler if you don't need them outside this class + */ + void setLinearSystem() + { + jacobian_ = std::make_shared(); + jacobian_->setBuildMode(JacobianMatrix::random); + residual_ = std::make_shared(); + + setJacobianPattern(); + setResidualSize(); + } + + /*! + * \brief Resizes the jacobian and sets the jacobian' sparsity pattern. + */ + void setJacobianPattern() + { + // resize the jacobian and the residual + const auto numDofs = this->numDofs(); + jacobian_->setSize(numDofs, numDofs); + + // create occupation pattern of the jacobian + // TODO: Does the bool need to be at compile time in getJacobianPattern? + const auto occupationPattern = isImplicit_ ? getJacobianPattern(gridGeometry()) + : getJacobianPattern(gridGeometry()); + + // export pattern to jacobian + occupationPattern.exportIdx(*jacobian_); + } + + //! Resizes the residual + void setResidualSize() + { residual_->resize(numDofs()); } + + //! Returns the number of degrees of freedom + std::size_t numDofs() const + { return gridGeometry().numDofs(); } + + //! The global finite volume geometry + const GridGeometry& gridGeometry() const + { return *gridGeometry_; } + + //! The gridview + const GridView& gridView() const + { return gridGeometry().gridView(); } + + //! The jacobian matrix + JacobianMatrix& jacobian() + { return *jacobian_; } + + //! The residual vector (rhs) + ResidualVector& residual() + { return *residual_; } + +protected: + // reset the residual vector to 0.0 + void resetResidual_() + { + if (!residual_) + { + residual_ = std::make_shared(); + setResidualSize(); + } + + (*residual_) = 0.0; + } + + // reset the Jacobian matrix to 0.0 + template + void resetJacobian_(const PartialReassembler *partialReassembler = nullptr) + { + if (!jacobian_) + { + jacobian_ = std::make_shared(); + jacobian_->setBuildMode(JacobianMatrix::random); + setJacobianPattern(); + } + + if (partialReassembler) + partialReassembler->resetJacobian(*this); + else + *jacobian_ = 0.0; + } + + /*! + * \brief A method assembling something per element + * \note Handles exceptions for parallel runs + * \throws NumericalProblem on all processes if something throwed during assembly + */ + template + void assemble_(AssembleElementFunc&& assembleElement) const + { + // a state that will be checked on all processes + bool succeeded = false; + + // try assembling using the local assembly function + try + { + // let the local assembler add the element contributions + for (const auto& element : elements(gridView())) + assembleElement(element); + + // if we get here, everything worked well on this process + succeeded = true; + } + // throw exception if a problem ocurred + catch (NumericalProblem &e) + { + std::cout << "rank " << gridView().comm().rank() + << " caught an exception while assembling:" << e.what() + << "\n"; + succeeded = false; + } + + // make sure everything worked well on all processes + if (gridView().comm().size() > 1) + succeeded = gridView().comm().min(succeeded); + + // if not succeeded rethrow the error on all processes + if (!succeeded) + DUNE_THROW(NumericalProblem, "A process did not succeed in linearizing the system"); + } + + void enforceDirichletConstraints_(const GridVariables& gridVars, JacobianMatrix& jac, SolutionVector& res) + { /*TODO: Implement / where to put this? Currently, local assemblers do this*/ } + + void enforceInternalConstraints_(const GridVariables& gridVars, JacobianMatrix& jac, SolutionVector& res) + { /*TODO: Implement / where to put this? Currently, local assemblers do this*/ } + + void enforcePeriodicConstraints_(const GridVariables& gridVars, JacobianMatrix& jac, SolutionVector& res) + { /*TODO: Implement / where to put this? Currently, local assemblers do this*/ } + +private: + //! the grid geometry on which it is assembled + std::shared_ptr gridGeometry_; + + //! states if an implicit of explicit scheme is used (affects jacobian pattern) + bool isImplicit_ = true; + + //! shared pointers to the jacobian matrix and residual + std::shared_ptr jacobian_; + std::shared_ptr residual_; +}; + +} // namespace Dumux + +#endif diff --git a/dumux/assembly/localassembler.hh b/dumux/assembly/localassembler.hh new file mode 100644 index 0000000000..b5fbe3da62 --- /dev/null +++ b/dumux/assembly/localassembler.hh @@ -0,0 +1,56 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief Helper alias to select a local assembler based on the discretization scheme. + */ +#ifndef DUMUX_LOCAL_ASSEMBLER_HH +#define DUMUX_LOCAL_ASSEMBLER_HH + +#include +#include "fv/boxlocalassembler.hh" + +namespace Dumux { +namespace Impl { + + template + struct LocalAssemblerChooser; + + template + struct LocalAssemblerChooser + { using type = BoxLocalAssembler; }; + + template + using LocalAssemblerType = typename LocalAssemblerChooser::type; + +} // end namespace Immpl + +/*! + * \ingroup Assembly + * \brief Helper alias to select the local assembler type from an assembler. + */ +template +using LocalAssembler = Impl::LocalAssemblerType; + +} // namespace Dumux + +#endif -- GitLab From 91a811ef0d9218e548cf38cc8d3adea0c9e90ee3 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Thu, 22 Oct 2020 17:17:22 +0200 Subject: [PATCH 35/46] TEMP [test][1p][box] use new assembler --- .../1p/incompressible/main.cc | 33 +++++++++++-------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/test/porousmediumflow/1p/incompressible/main.cc b/test/porousmediumflow/1p/incompressible/main.cc index 4f3818bcb7..869689a14f 100644 --- a/test/porousmediumflow/1p/incompressible/main.cc +++ b/test/porousmediumflow/1p/incompressible/main.cc @@ -46,7 +46,9 @@ #include #include -#include +#include +#include +#include #include "problem.hh" #include "../internaldirichlet/problem.hh" @@ -117,17 +119,14 @@ int main(int argc, char** argv) using Problem = GetPropType; auto problem = std::make_shared(gridGeometry); - // the solution vector - using SolutionVector = GetPropType; - SolutionVector x(gridGeometry->numDofs()); - // the grid variables using GridVariables = GetPropType; - auto gridVariables = std::make_shared(problem, gridGeometry); - gridVariables->init(x); + auto initX = [&] (auto& x) { x.resize(gridGeometry->numDofs()); x = 0.0; }; + auto gridVariables = std::make_shared(problem, gridGeometry, initX); // intialize the vtk output module - VtkOutputModule vtkWriter(*gridVariables, x, problem->name()); + using SolutionVector = GetPropType; + VtkOutputModule vtkWriter(*gridVariables, gridVariables->dofs(), problem->name()); using VelocityOutput = GetPropType; vtkWriter.addVelocityOutput(std::make_shared(*gridVariables)); using IOFields = GetPropType; @@ -135,15 +134,23 @@ int main(int argc, char** argv) vtkWriter.write(0.0); // create assembler & linear solver - using Assembler = FVAssembler; - auto assembler = std::make_shared(problem, gridGeometry, gridVariables); + // TODO: The operators or local operator could be a property, just as is LocalResidual currently + using ModelTraits = GetPropType; + using FluxVariables = GetPropType; + using ElemVariables = typename GridVariables::LocalView; + + using ImmiscibleOperators = FVImmiscibleOperators; + using LocalOperator = FVLocalOperator; + + using Assembler = Assembler; + auto assembler = std::make_shared(gridGeometry); using LinearSolver = SSORCGBackend; auto linearSolver = std::make_shared(); // solver the linear problem LinearPDESolver solver(assembler, linearSolver); - solver.solve(x); + solver.solve(*gridVariables); // output result to vtk vtkWriter.write(1.0); @@ -177,7 +184,7 @@ int main(int argc, char** argv) auto elemFluxVarsCache = localView(gridVariables->gridFluxVarsCache()); fvGeometry.bind(element); - elemVolVars.bind(element, fvGeometry, x); + elemVolVars.bind(element, fvGeometry, gridVariables->dofs()); elemFluxVarsCache.bind(element, fvGeometry, elemVolVars); velocityOutput.calculateVelocity(velocity, element, fvGeometry, elemVolVars, elemFluxVarsCache, 0); @@ -200,7 +207,7 @@ int main(int argc, char** argv) // For the mpfa test, write out the gradients in the scv centers if (getParam("IO.WriteMpfaVelocities", false)) - writeMpfaVelocities(*gridGeometry, *gridVariables, x); + writeMpfaVelocities(*gridGeometry, *gridVariables, gridVariables->dofs()); if (mpiHelper.rank() == 0) Parameters::print(); -- GitLab From ae5c91a145a682d432f00b2a6b02a2ab74faf529 Mon Sep 17 00:00:00 2001 From: Timo Koch Date: Thu, 30 Apr 2020 22:16:33 +0200 Subject: [PATCH 36/46] [timeintegration] Add time stepper draft --- dumux/CMakeLists.txt | 1 + dumux/timestepping/CMakeLists.txt | 2 + dumux/timestepping/multistagemethods.hh | 204 +++++++++++++++++++ dumux/timestepping/multistagetimestepper.hh | 213 ++++++++++++++++++++ 4 files changed, 420 insertions(+) create mode 100644 dumux/timestepping/multistagemethods.hh create mode 100644 dumux/timestepping/multistagetimestepper.hh diff --git a/dumux/CMakeLists.txt b/dumux/CMakeLists.txt index 8b27a75173..291ce498c1 100644 --- a/dumux/CMakeLists.txt +++ b/dumux/CMakeLists.txt @@ -13,6 +13,7 @@ add_subdirectory(multidomain) add_subdirectory(nonlinear) add_subdirectory(parallel) add_subdirectory(porousmediumflow) +add_subdirectory(timestepping) # if Python bindings are enabled, include necessary sub directories. if(DUNE_ENABLE_PYTHONBINDINGS) diff --git a/dumux/timestepping/CMakeLists.txt b/dumux/timestepping/CMakeLists.txt index 3e7bc52b16..3042828afc 100644 --- a/dumux/timestepping/CMakeLists.txt +++ b/dumux/timestepping/CMakeLists.txt @@ -1,3 +1,5 @@ install(FILES timelevel.hh +multistagemethods.hh +multistagetimestepper.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/timestepping) diff --git a/dumux/timestepping/multistagemethods.hh b/dumux/timestepping/multistagemethods.hh new file mode 100644 index 0000000000..62f1e4bb2a --- /dev/null +++ b/dumux/timestepping/multistagemethods.hh @@ -0,0 +1,204 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \brief Parameters for different multistage time stepping methods + * \note See e.g. https://en.wikipedia.org/wiki/List_of_Runge%E2%80%93Kutta_methods + */ +#ifndef DUMUX_TIMESTEPPING_MULTISTAGE_METHODS_HH +#define DUMUX_TIMESTEPPING_MULTISTAGE_METHODS_HH + +#include +#include +#include + +namespace Dumux { + +/*! + * \brief Abstract interface for one-step multi-stage method parameters in Shu/Osher form. + * + * This implementation is based on the Shu/Osher form from: + * Chi W. Shu and Stanley Osher. Efficient implementation of essentially + * non- oscillatory shock-capturing schemes. J. Comput. Phys., 77:439–471, 1988. + * https://doi.org/10.1016/0021-9991(88)90177-5. + * To this end Eq. (2.12) is extended for implicit schemes. + * + * We consider the general PDE form + * + * \f[ + * \begin{equation} + * \frac{\partial M(x)}{\partial t} - R(x, t) = 0, + * \end{equation} + * \f] + * + * where \f$ M(x)\f$ is the temporal operator/residual in terms of the solution \f$ x \f$, + * and \f$ R(x)\f$ is the discrete spatial operator/residual. + * \f$ M(x)\f$ usually corresponds to the conserved quanity (e.g. mass), and \f$ R(x)\f$ + * contains the rest of the residual. We can then construct \f$ m \f$-stage time discretization methods. + * Integrating from time \f$ t^n\f$ to \f$ t^{n+1}\f$ with time step size \f$ \Delta t^n\f$, we solve: + * + * \f[ + * \begin{aligned} + * x^{(0)} &= u^n\\ + * \sum_{k=0}^i \left[ \alpha_{ik} M\left(x^{(k)}, t^n + d_k\Delta t^n\right) + * + \beta_{ik}\Delta t^n R \left(x^{(k)}, t^n+d_k\Delta t^n \right)\right] &= 0 & \forall i \in \{1,\ldots,m\} \\ + * x^{n+1} &= x^{(m)} + * \end{aligned} + * \f] + * where \f$ x^{(k)} \f$ denotes the intermediate solution at stage \f$ k \f$. + * Dependent on the number of stages \f$ m \f$, and the coefficients \f$ \alpha, \beta, d\f$, + * schemes with different properties and order of accuracy can be constructed. + */ +template +class MultiStageMethod +{ +public: + virtual bool implicit () const = 0; + + virtual std::size_t numStages () const = 0; + + //! weights of the temporal operator residual (\f$ \alpha_{ik} \f$) + virtual Scalar temporalWeight (std::size_t i, std::size_t k) const = 0; + + //! weights of the spatial operator residual (\f$ \beta_{ik} \f$) + virtual Scalar spatialWeight (std::size_t i, std::size_t k) const = 0; + + //! time step weights for each stage (\f$ d_k \f$) + virtual Scalar timeStepWeight (std::size_t k) const = 0; + + virtual std::string name () const = 0; + + virtual ~MultiStageMethod() = default; +}; + +//! Multi-stage time stepping scheme implementations +namespace MultiStage { + +/*! + * \brief A theta time stepping scheme + * theta=1.0 is an implicit Euler scheme, + * theta=0.0 an explicit Euler scheme, + * theta=0.5 is a Cranck-Nicholson scheme + */ +template +class Theta : public MultiStageMethod +{ +public: + explicit Theta(const Scalar theta) + : paramAlpha_{{-1.0, 1.0}} + , paramBeta_{{1.0-theta, theta}} + , paramD_{{0.0, 1.0}}; + {} + + bool implicit () const final + { return paramBeta_[1] > 0.0; } + + std::size_t numStages () const final + { return 1; } + + Scalar temporalWeight (std::size_t, std::size_t k) const final + { return paramAlpha_[k]; } + + Scalar spatialWeight (std::size_t, std::size_t k) const final + { return paramBeta_[k]; } + + Scalar timeStepWeight (std::size_t k) const final + { return paramD_[k]; } + + std::string name () const override + { return "theta scheme"; } + +private: + std::array paramAlpha_; + std::array paramBeta_; + std::array paramD_; +}; + +/*! + * \brief An explicit Euler time stepping scheme + */ +template +class ExplicitEuler final : public Theta +{ +public: + ExplicitEuler() : Theta(0.0) {} + + std::string name () const final + { return "explicit Euler"; } +}; + +/*! + * \brief An implicit Euler time stepping scheme + */ +template +class ImplicitEuler final : public Theta +{ +public: + ImplicitEuler() : Theta(1.0) {} + + std::string name () const final + { return "implicit Euler"; } +}; + +/*! + * \brief Classical explicit fourth order Runge-Kutta scheme + */ +template +class RungeKuttaExplicitFourthOrder final : public MultiStageMethod +{ + RungeKuttaExplicitFourthOrder() + : paramAlpha_{{{-1.0, 1.0, 0.0, 0.0, 0.0}, + {-1.0, 0.0, 1.0, 0.0, 0.0}, + {-1.0, 0.0, 0.0, 1.0, 0.0}, + {-1.0, 0.0, 0.0, 0.0, 1.0}}} + , paramBeta_{{{0.5, 0.0, 0.0, 0.0, 0.0}, + {0.0, 0.5, 0.0, 0.0, 0.0}, + {0.0, 0.0, 1.0, 0.0, 0.0}, + {1.0/6.0, 1.0/3.0, 1.0/3.0, 1.0/6.0, 0.0}}} + , paramD_{{0.0, 0.5, 0.5, 1.0, 1.0}}; + {} + + bool implicit () const final + { return false; } + + std::size_t numStages () const final + { return 4; } + + Scalar temporalWeight (std::size_t i, std::size_t k) const final + { return paramAlpha_[i-1][k]; } + + Scalar spatialWeight (std::size_t i, std::size_t k) const final + { return paramBeta_[i-1][k]; } + + Scalar timeStepWeight (std::size_t k) const final + { return paramD_[k]; } + + std::string name () const final + { return "explicit Runge-Kutta 4th order"; } + +private: + std::array, 4> paramAlpha_; + std::array, 4> paramBeta_; + std::array paramD_; +}; + +} // end namespace MultiStage +} // end namespace Dumux + +#endif diff --git a/dumux/timestepping/multistagetimestepper.hh b/dumux/timestepping/multistagetimestepper.hh new file mode 100644 index 0000000000..8362d6ad66 --- /dev/null +++ b/dumux/timestepping/multistagetimestepper.hh @@ -0,0 +1,213 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \brief A time stepper performing a single time step of a transient simulation + */ +#ifndef DUMUX_TIMESTEPPING_MULTISTAGE_TIMESTEPPER_HH +#define DUMUX_TIMESTEPPING_MULTISTAGE_TIMESTEPPER_HH + +#include +#include +#include +#include + +namespace Dumux { + +//! forward declaration +template +class MultiStageMethod; + +//! Data object for the parameters of a given stage +template +class MultiStageParams +{ + struct Params { + Scalar alpha, betaDt, timeAtStage; + bool skipTemporal, skipSpatial; + }; +public: + //! Extract params for stage i from method m + StageParams(std::size_t i, const MultiStageMethod& m, const Scalar t, const Scalar dt) + : size_(i+1) + { + params_.resize(size_); + for (std::size_t k = 0; k < size_; ++k) + { + auto p = params_[k]; + params_.alpha = m.temporalWeight(i, k); + params_.betaDt = m.spatialWeight(i, k)*dt; + params_.timeAtStage = t + m.timeStepWeight(k)*dt; + + using std::abs; + params_.skipTemporal = (abs(params_.alpha) < 1e-6); + params_.skipSpatial = (abs(params_.beta) < 1e-6); + } + } + + std::size_t size () const + { return size_; } + + //! weights of the temporal operator residual (\f$ \alpha_{ik} \f$) + Scalar temporalWeight (std::size_t k) const + { return params_[k].alpha; } + + //! weights of the spatial operator residual (\f$ \beta_{ik} \f$) + Scalar spatialWeight (std::size_t k) const + { return params_[k].betaDt; } + + //! the time at which we have to evaluate the operators + Scalar timeAtStage (std::size_t k) const + { return params_[k].timeAtStage; } + + //! If \f$ \alpha_{ik} = 0\f$ + Scalar skipTemporal (std::size_t k) const + { return params_[k].skipTemporal; } + + //! If \f$ \beta_{ik} = 0\f$ + Scalar skipSpatial (std::size_t k) const + { return params_[k].skipSpatial; } + +private: + std::size_t size_; + std::vector params_; +}; + +/*! + * \brief Solution vectors for the multi-stage time stepping scheme + */ +template +class MultiStageSolutions +{ +public: + MultiStageSolutions(std::size_t numStages) + { + // avoid resizing vectors after construction + sols_.reserve(numStages); + owned_.reserve(numStages); + } + + MultiStageSolutions(const MultiStageSolutions&) = delete; + MultiStageSolutions(MultiStageSolutions&&) = delete; + MultiStageSolutions& operator=(const MultiStageSolutions&) = delete; + MultiStageSolutions& operator=(MultiStageSolutions&&) = delete; + + ~MultiStageSolutions() + { + // destroy all owned solution resources + for (int i = 0; i < sols_.size(); ++i) + if (owned_[i]) + delete sols_[i]; + } + + //! push an existing solution + void pushSolution(SolutionVector& sol) + { + sols_.push_back(&sol); + sols_.push_back(false); + } + + //! create an intermediate/temporary solution + void createSolution(const SolutionVector& guess) + { + sols_.push_back(new SolutionVector(guess)); + sols_.push_back(true); + } + + auto size() const + { return sols_.size(); } + + SolutionVector& operator[] (std::size_t i) + { return *sol_[i]; } + + SolutionVector& back() + { return *sols_.back(); } + +private: + std::vector sols_; + std::vector owned_; +}; + +/*! + * \brief Time stepping with a multi-stage method + * \note We limit ourselves to "diagonally" implicit multi-stage methods where solving + * a stage can only depend on the values of the same stage and stages before + * but not future stages (which would require solving larger linear systems) + */ +template +class MultiStageTimeStepper +{ + using Scalar = typename PDESolver::Scalar; + using SolutionVector = typename PDESolver::Assembler::ResidualType; + using Solutions = MultiStageSolutions; + +public: + MultiStageTimeStepper(std::shared_ptr pdeSolver, + std::shared_ptr> msMethod) + : pdeSolver_(pdeSolver) + , msMethod_(msMethod) + {} + + /*! + * \brief Advance one time step of the given time loop + * TODO: Add time step control if the pde solver doesn't converge + */ + void step(SolutionVector& sol, SolutionVector& solOld, const Scalar tOld, const Scalar dt) + { + const auto numStages = method_->numStages(); + Solutions solutions(numStages); + + // The solution of stage zero is oldOld (\f$ x^{n}\f$). + solutions.pushSolution(solOld); + + // For each intermediate stage we solve for the stage solution. + // In the last stage (stageIdx = numStages), we obtain the solution (\f$ x^{n+1}\f$) + for (auto stageIdx = 1UL; stageIdx <= numStages; ++stageIdx) + { + if (stageIdx == numStages) + solutions.pushSolution(sol); + else + solutions.createSolution(solutions[stageIdx-1]); + + // extract parameters for this stage from the time stepping method + auto stageParams = StageParams(*msMethod_, stageIdx, tOld, dt); + + // prepare the assembler + pdeSolver_->assembler().setMultiStageSolutions(solutions); + pdeSolver_->assembler().setStageParams(std::move(stageParams)); + + // assemble & solve + pdeSolver_->solve(solutions.back()); + } + } + + /*! + * \brief Set/change the time step method + */ + void setMethod(std::shared_ptr> msMethod) + { msMethod_ = msMethod; } + +private: + std::shared_ptr pdeSolver_; + std::shared_ptr> msMethod_; +}; + +} // end namespace Dumux + +#endif -- GitLab From 911febcf8d58572c4c5cd8ef63e7341f9a783ffc Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Tue, 19 May 2020 22:41:15 +0200 Subject: [PATCH 37/46] [timeintegration] progress implementation --- dumux/timestepping/multistagemethods.hh | 5 +- dumux/timestepping/multistagetimestepper.hh | 69 +++++++++++++++------ 2 files changed, 52 insertions(+), 22 deletions(-) diff --git a/dumux/timestepping/multistagemethods.hh b/dumux/timestepping/multistagemethods.hh index 62f1e4bb2a..a64d34d796 100644 --- a/dumux/timestepping/multistagemethods.hh +++ b/dumux/timestepping/multistagemethods.hh @@ -103,7 +103,7 @@ public: explicit Theta(const Scalar theta) : paramAlpha_{{-1.0, 1.0}} , paramBeta_{{1.0-theta, theta}} - , paramD_{{0.0, 1.0}}; + , paramD_{{0.0, 1.0}} {} bool implicit () const final @@ -162,6 +162,7 @@ public: template class RungeKuttaExplicitFourthOrder final : public MultiStageMethod { +public: RungeKuttaExplicitFourthOrder() : paramAlpha_{{{-1.0, 1.0, 0.0, 0.0, 0.0}, {-1.0, 0.0, 1.0, 0.0, 0.0}, @@ -171,7 +172,7 @@ class RungeKuttaExplicitFourthOrder final : public MultiStageMethod {0.0, 0.5, 0.0, 0.0, 0.0}, {0.0, 0.0, 1.0, 0.0, 0.0}, {1.0/6.0, 1.0/3.0, 1.0/3.0, 1.0/6.0, 0.0}}} - , paramD_{{0.0, 0.5, 0.5, 1.0, 1.0}}; + , paramD_{{0.0, 0.5, 0.5, 1.0, 1.0}} {} bool implicit () const final diff --git a/dumux/timestepping/multistagetimestepper.hh b/dumux/timestepping/multistagetimestepper.hh index 8362d6ad66..f0eb51dc93 100644 --- a/dumux/timestepping/multistagetimestepper.hh +++ b/dumux/timestepping/multistagetimestepper.hh @@ -39,25 +39,26 @@ template class MultiStageParams { struct Params { - Scalar alpha, betaDt, timeAtStage; + Scalar alpha, betaDt, timeAtStage, dtFraction; bool skipTemporal, skipSpatial; }; public: //! Extract params for stage i from method m - StageParams(std::size_t i, const MultiStageMethod& m, const Scalar t, const Scalar dt) + MultiStageParams(const MultiStageMethod& m, std::size_t i, const Scalar t, const Scalar dt) : size_(i+1) { params_.resize(size_); for (std::size_t k = 0; k < size_; ++k) { - auto p = params_[k]; - params_.alpha = m.temporalWeight(i, k); - params_.betaDt = m.spatialWeight(i, k)*dt; - params_.timeAtStage = t + m.timeStepWeight(k)*dt; + auto& p = params_[k]; + p.alpha = m.temporalWeight(i, k); + p.betaDt = m.spatialWeight(i, k)*dt; + p.timeAtStage = t + m.timeStepWeight(k)*dt; + p.dtFraction = m.timeStepWeight(k); using std::abs; - params_.skipTemporal = (abs(params_.alpha) < 1e-6); - params_.skipSpatial = (abs(params_.beta) < 1e-6); + p.skipTemporal = (abs(p.alpha) < 1e-6); + p.skipSpatial = (abs(p.betaDt) < 1e-6); } } @@ -76,6 +77,10 @@ public: Scalar timeAtStage (std::size_t k) const { return params_[k].timeAtStage; } + //! the fraction of a time step corresponding to the k-th stage + Scalar timeStepFraction (std::size_t k) const + { return params_[k].dtFraction; } + //! If \f$ \alpha_{ik} = 0\f$ Scalar skipTemporal (std::size_t k) const { return params_[k].skipTemporal; } @@ -120,30 +125,37 @@ public: void pushSolution(SolutionVector& sol) { sols_.push_back(&sol); - sols_.push_back(false); + owned_.push_back(false); } //! create an intermediate/temporary solution void createSolution(const SolutionVector& guess) { sols_.push_back(new SolutionVector(guess)); - sols_.push_back(true); + owned_.push_back(true); } auto size() const { return sols_.size(); } SolutionVector& operator[] (std::size_t i) - { return *sol_[i]; } + { return *sols_[i]; } + + const SolutionVector& operator[] (std::size_t i) const + { return *sols_[i]; } SolutionVector& back() { return *sols_.back(); } + const SolutionVector& back() const + { return *sols_.back(); } + private: std::vector sols_; std::vector owned_; }; + /*! * \brief Time stepping with a multi-stage method * \note We limit ourselves to "diagonally" implicit multi-stage methods where solving @@ -153,11 +165,18 @@ private: template class MultiStageTimeStepper { - using Scalar = typename PDESolver::Scalar; + using Scalar = typename PDESolver::Assembler::Scalar; using SolutionVector = typename PDESolver::Assembler::ResidualType; using Solutions = MultiStageSolutions; public: + + /*! + * \brief The constructor + * \param pdeSolver Solver class for solving a PDE in each stage + * \param msMethod The multi-stage method which is to be used for time integration + * \todo TODO: Add time step control if the pde solver doesn't converge + */ MultiStageTimeStepper(std::shared_ptr pdeSolver, std::shared_ptr> msMethod) : pdeSolver_(pdeSolver) @@ -166,31 +185,41 @@ public: /*! * \brief Advance one time step of the given time loop + * \param x The container in which to write the solution of the next time step. + * This is also used as the initial guess. + * \param t The current time level + * \param dt The time step size to be used * TODO: Add time step control if the pde solver doesn't converge */ - void step(SolutionVector& sol, SolutionVector& solOld, const Scalar tOld, const Scalar dt) + void step(SolutionVector& x, const Scalar t, const Scalar dt) { - const auto numStages = method_->numStages(); + const auto numStages = msMethod_->numStages(); Solutions solutions(numStages); // The solution of stage zero is oldOld (\f$ x^{n}\f$). - solutions.pushSolution(solOld); + solutions.createSolution(x); + + // make sure the assembler gets rid of stuff from the last time integration + pdeSolver_->assembler().prepareTimeIntegration(numStages); // For each intermediate stage we solve for the stage solution. // In the last stage (stageIdx = numStages), we obtain the solution (\f$ x^{n+1}\f$) + // and write it into the provided container for (auto stageIdx = 1UL; stageIdx <= numStages; ++stageIdx) { if (stageIdx == numStages) - solutions.pushSolution(sol); + { + solutions.pushSolution(x); + solutions.back() = solutions[stageIdx-1]; + } else solutions.createSolution(solutions[stageIdx-1]); // extract parameters for this stage from the time stepping method - auto stageParams = StageParams(*msMethod_, stageIdx, tOld, dt); + auto stageParams = std::make_shared>(*msMethod_, stageIdx, t, dt); - // prepare the assembler - pdeSolver_->assembler().setMultiStageSolutions(solutions); - pdeSolver_->assembler().setStageParams(std::move(stageParams)); + // prepare the assembler for this stage + pdeSolver_->assembler().prepareStage(solutions, stageParams); // assemble & solve pdeSolver_->solve(solutions.back()); -- GitLab From f84b0c8ec9ceeb72bed6d6de04f4c5b2f57c4b62 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Thu, 22 Oct 2020 18:35:11 +0200 Subject: [PATCH 38/46] [mstimestepper] accept generic variables as argument --- dumux/timestepping/multistagetimestepper.hh | 29 +++++++++++++++------ 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/dumux/timestepping/multistagetimestepper.hh b/dumux/timestepping/multistagetimestepper.hh index f0eb51dc93..6052f38f81 100644 --- a/dumux/timestepping/multistagetimestepper.hh +++ b/dumux/timestepping/multistagetimestepper.hh @@ -167,8 +167,11 @@ class MultiStageTimeStepper { using Scalar = typename PDESolver::Assembler::Scalar; using SolutionVector = typename PDESolver::Assembler::ResidualType; + using Variables = typename PDESolver::Assembler::Variables; using Solutions = MultiStageSolutions; + using StageParams = MultiStageParams; + public: /*! @@ -185,13 +188,16 @@ public: /*! * \brief Advance one time step of the given time loop - * \param x The container in which to write the solution of the next time step. - * This is also used as the initial guess. + * \param x The solution at the current time level. This is used as an initial + * guess during time integration, and the result is stored in that vector. + * \params vars The variables object at the current time level. This is expected + * to correpond to the given solution vector, and after successful + * time integration this holds the state of the new time level. * \param t The current time level * \param dt The time step size to be used * TODO: Add time step control if the pde solver doesn't converge */ - void step(SolutionVector& x, const Scalar t, const Scalar dt) + void step(SolutionVector& x, Variables& vars, const Scalar t, const Scalar dt) { const auto numStages = msMethod_->numStages(); Solutions solutions(numStages); @@ -199,8 +205,12 @@ public: // The solution of stage zero is oldOld (\f$ x^{n}\f$). solutions.createSolution(x); - // make sure the assembler gets rid of stuff from the last time integration - pdeSolver_->assembler().prepareTimeIntegration(numStages); + // 0-th stage does not require solve, so we just call prepare and register + // This makes sure the assembler is prepared for time integration and the + // 0-th stage is registered + auto stageParams = std::make_shared(*msMethod_, 0, t, dt); + pdeSolver_->assembler().prepareStage(solutions.back(), vars, stageParams); + pdeSolver_->assembler().registerStage(solutions.back(), vars); // For each intermediate stage we solve for the stage solution. // In the last stage (stageIdx = numStages), we obtain the solution (\f$ x^{n+1}\f$) @@ -216,13 +226,16 @@ public: solutions.createSolution(solutions[stageIdx-1]); // extract parameters for this stage from the time stepping method - auto stageParams = std::make_shared>(*msMethod_, stageIdx, t, dt); + stageParams = std::make_shared(*msMethod_, stageIdx, t, dt); // prepare the assembler for this stage - pdeSolver_->assembler().prepareStage(solutions, stageParams); + pdeSolver_->assembler().prepareStage(solutions.back(), vars, stageParams); // assemble & solve - pdeSolver_->solve(solutions.back()); + pdeSolver_->solve(solutions.back(), vars); + + // register the result of this stage + pdeSolver_->assembler().registerStage(solutions.back(), vars); } } -- GitLab From d813e8a8fdfc66a036cc70dc884ab237c2541364 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Fri, 23 Oct 2020 14:27:55 +0200 Subject: [PATCH 39/46] [mstimestepper] remove obsolete code --- dumux/timestepping/multistagetimestepper.hh | 119 +++----------------- 1 file changed, 15 insertions(+), 104 deletions(-) diff --git a/dumux/timestepping/multistagetimestepper.hh b/dumux/timestepping/multistagetimestepper.hh index 6052f38f81..2ddac8d3bd 100644 --- a/dumux/timestepping/multistagetimestepper.hh +++ b/dumux/timestepping/multistagetimestepper.hh @@ -94,68 +94,6 @@ private: std::vector params_; }; -/*! - * \brief Solution vectors for the multi-stage time stepping scheme - */ -template -class MultiStageSolutions -{ -public: - MultiStageSolutions(std::size_t numStages) - { - // avoid resizing vectors after construction - sols_.reserve(numStages); - owned_.reserve(numStages); - } - - MultiStageSolutions(const MultiStageSolutions&) = delete; - MultiStageSolutions(MultiStageSolutions&&) = delete; - MultiStageSolutions& operator=(const MultiStageSolutions&) = delete; - MultiStageSolutions& operator=(MultiStageSolutions&&) = delete; - - ~MultiStageSolutions() - { - // destroy all owned solution resources - for (int i = 0; i < sols_.size(); ++i) - if (owned_[i]) - delete sols_[i]; - } - - //! push an existing solution - void pushSolution(SolutionVector& sol) - { - sols_.push_back(&sol); - owned_.push_back(false); - } - - //! create an intermediate/temporary solution - void createSolution(const SolutionVector& guess) - { - sols_.push_back(new SolutionVector(guess)); - owned_.push_back(true); - } - - auto size() const - { return sols_.size(); } - - SolutionVector& operator[] (std::size_t i) - { return *sols_[i]; } - - const SolutionVector& operator[] (std::size_t i) const - { return *sols_[i]; } - - SolutionVector& back() - { return *sols_.back(); } - - const SolutionVector& back() const - { return *sols_.back(); } - -private: - std::vector sols_; - std::vector owned_; -}; - - /*! * \brief Time stepping with a multi-stage method * \note We limit ourselves to "diagonally" implicit multi-stage methods where solving @@ -165,11 +103,8 @@ private: template class MultiStageTimeStepper { - using Scalar = typename PDESolver::Assembler::Scalar; - using SolutionVector = typename PDESolver::Assembler::ResidualType; using Variables = typename PDESolver::Assembler::Variables; - using Solutions = MultiStageSolutions; - + using Scalar = typename Variables::Scalar; using StageParams = MultiStageParams; public: @@ -188,55 +123,31 @@ public: /*! * \brief Advance one time step of the given time loop - * \param x The solution at the current time level. This is used as an initial - * guess during time integration, and the result is stored in that vector. - * \params vars The variables object at the current time level. This is expected - * to correpond to the given solution vector, and after successful - * time integration this holds the state of the new time level. + * \params vars The variables object at the current time level. * \param t The current time level * \param dt The time step size to be used - * TODO: Add time step control if the pde solver doesn't converge + * \note We expect the time level in vars to correspond to the given time `t` + * \todo: TODO: Add time step control if the pde solver doesn't converge */ - void step(SolutionVector& x, Variables& vars, const Scalar t, const Scalar dt) + void step(Variables& vars, const Scalar t, const Scalar dt) { - const auto numStages = msMethod_->numStages(); - Solutions solutions(numStages); - - // The solution of stage zero is oldOld (\f$ x^{n}\f$). - solutions.createSolution(x); - - // 0-th stage does not require solve, so we just call prepare and register - // This makes sure the assembler is prepared for time integration and the - // 0-th stage is registered - auto stageParams = std::make_shared(*msMethod_, 0, t, dt); - pdeSolver_->assembler().prepareStage(solutions.back(), vars, stageParams); - pdeSolver_->assembler().registerStage(solutions.back(), vars); - - // For each intermediate stage we solve for the stage solution. - // In the last stage (stageIdx = numStages), we obtain the solution (\f$ x^{n+1}\f$) - // and write it into the provided container - for (auto stageIdx = 1UL; stageIdx <= numStages; ++stageIdx) - { - if (stageIdx == numStages) - { - solutions.pushSolution(x); - solutions.back() = solutions[stageIdx-1]; - } - else - solutions.createSolution(solutions[stageIdx-1]); + // make sure there are no traces of previous stages + pdeSolver_->assembler().clearStages(); + for (auto stageIdx = 1UL; stageIdx <= msMethod_->numStages(); ++stageIdx) + { // extract parameters for this stage from the time stepping method - stageParams = std::make_shared(*msMethod_, stageIdx, t, dt); + auto stageParams = std::make_shared(*msMethod_, stageIdx, t, dt); // prepare the assembler for this stage - pdeSolver_->assembler().prepareStage(solutions.back(), vars, stageParams); + pdeSolver_->assembler().prepareStage(vars, stageParams); // assemble & solve - pdeSolver_->solve(solutions.back(), vars); - - // register the result of this stage - pdeSolver_->assembler().registerStage(solutions.back(), vars); + pdeSolver_->solve(vars); } + + // clear traces of previously registered stages + pdeSolver_->assembler().clearStages(); } /*! -- GitLab From 3c6ef4d47a63c5230a4b558ea28796e884229605 Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Fri, 23 Oct 2020 14:27:22 +0200 Subject: [PATCH 40/46] [assembler] support transient problems --- dumux/assembly/assembler.hh | 132 ++++++++++++++++++++----- dumux/assembly/fv/boxlocalassembler.hh | 68 ++++++++++--- 2 files changed, 160 insertions(+), 40 deletions(-) diff --git a/dumux/assembly/assembler.hh b/dumux/assembly/assembler.hh index 4702c043ee..93870f578d 100644 --- a/dumux/assembly/assembler.hh +++ b/dumux/assembly/assembler.hh @@ -36,6 +36,9 @@ #include #include +#include +#include + #include "localassembler.hh" #include "jacobianpattern.hh" @@ -70,9 +73,12 @@ template< class LO, DiffMethod diffMethod, class Assembler { using ThisType = Assembler; + using GG = typename LO::GridVariables::GridGeometry; + using GGLocalView = typename GG::LocalView; using GridView = typename GG::GridView; using Element = typename GridView::template Codim<0>::Entity; + using ElementVariables = typename LO::GridVariables::LocalView; public: //! export the types used for the linear system @@ -96,6 +102,9 @@ public: //! export type used for solution vectors using SolutionVector = typename GridVariables::SolutionVector; + //! export the type for parameters of a stage in time integration + using StageParams = MultiStageParams; + /*! * \brief The Constructor from a grid geometry. * \param gridGeometry A grid geometry instance @@ -105,21 +114,24 @@ public: * underlying discretization scheme on a particular grid. The evaluation point, * consisting of a particular solution/variables/parameters may vary, and therefore, * an instance of the grid variables is passed to the assembly functions. + * \note This constructor (without time integration method) assumes hhat a stationary problem + * is assembled, and thus, an implicit jacobian pattern is used. */ Assembler(std::shared_ptr gridGeometry) : gridGeometry_(gridGeometry) + , isImplicit_(true) {} /*! * \brief The Constructor from a grid geometry. * \param gridGeometry A grid geometry instance - * \param isImplicit boolean to indicate whether an implicit or explicit pattern should be used - * \todo TODO replace bool with time stepping scheme once available + * \param method the time integration method that will be used. */ + template Assembler(std::shared_ptr gridGeometry, - bool isImplicit) + const TimeIntegrationMethod& method) : gridGeometry_(gridGeometry) - , isImplicit_(isImplicit) + , isImplicit_(method.implicit()) {} /*! @@ -140,16 +152,14 @@ public: // TODO: Remove this assert? assert(gridVariables.gridGeometry().numDofs() == gridGeometry().numDofs()); - using LocalAssembler = Dumux::LocalAssembler; assemble_([&](const Element& element) { - auto fvGeometry = localView(gridGeometry()); - auto elemVars = localView(gridVariables); - - fvGeometry.bind(element); - elemVars.bind(element, fvGeometry); + auto ggLocalView = localView(gridGeometry()); + ggLocalView.bind(element); + auto elemVars = this->prepareElemVariables_(gridVariables, element, ggLocalView); - LocalAssembler localAssembler(element, fvGeometry, elemVars); + using LocalAssembler = Dumux::LocalAssembler; + LocalAssembler localAssembler(element, ggLocalView, elemVars); localAssembler.assembleJacobianAndResidual(*jacobian_, *residual_, partialReassembler); }); @@ -170,16 +180,14 @@ public: // TODO: Remove this assert? assert(gridVariables.gridGeometry().numDofs() == gridGeometry().numDofs()); - using LocalAssembler = Dumux::LocalAssembler; assemble_([&](const Element& element) { - auto fvGeometry = localView(gridGeometry()); - auto elemVars = localView(gridVariables); + auto ggLocalView = localView(gridGeometry()); + ggLocalView.bind(element); + auto elemVars = this->prepareElemVariables_(gridVariables, element, ggLocalView); - fvGeometry.bind(element); - elemVars.bind(element, fvGeometry); - - LocalAssembler localAssembler(element, fvGeometry, elemVars); + using LocalAssembler = Dumux::LocalAssembler; + LocalAssembler localAssembler(element, ggLocalView, elemVars); localAssembler.assembleJacobianAndResidual(*jacobian_); }); @@ -205,16 +213,14 @@ public: */ void assembleResidual(ResidualVector& r, const GridVariables& gridVariables) const { - using LocalAssembler = Dumux::LocalAssembler; assemble_([&](const Element& element) { - auto fvGeometry = localView(gridGeometry()); - auto elemVars = localView(gridVariables); - - fvGeometry.bind(element); - elemVars.bind(element, fvGeometry); + auto ggLocalView = localView(gridGeometry()); + ggLocalView.bind(element); + auto elemVars = this->prepareElemVariables_(gridVariables, element, ggLocalView); - LocalAssembler localAssembler(element, fvGeometry, elemVars); + using LocalAssembler = Dumux::LocalAssembler; + LocalAssembler localAssembler(element, ggLocalView, elemVars); localAssembler.assembleResidual(r); }); } @@ -299,6 +305,46 @@ public: occupationPattern.exportIdx(*jacobian_); } + /*! + * \brief Prepare for a new stage within a time integration step. + * This caches the given grid variables, which are then used as a + * representation of the previous stage. Moreover, the given grid + * variables are then updated to the time level of the upcoming stage. + * \param gridVars the grid variables representing the previous stage + * \param params the parameters with the weights to be used in the upcoming stage + * \todo TODO: This function does two things, namely caching and then updating. + * Should we split/delegate this, or is the current name descriptive enough? + * When used from outside, one would expect the gridvars to be prepared maybe, + * and that is what's done. Caching might not be expected from the outside but + * it is also not important that that is known from there? + */ + void prepareStage(GridVariables& gridVars, + std::shared_ptr params) + { + stageParams_ = params; + const auto curStage = params->size() - 1; + + // we keep track of previous stages, they are needed for residual assembly + prevStageVariables_.push_back(gridVars); + + // Now we update the time level of the given grid variables + const auto t = params->timeAtStage(curStage); + const auto prevT = params->timeAtStage(0); + const auto dtFraction = params->timeStepFraction(curStage); + TimeLevel timeLevel(t, prevT, dtFraction); + + gridVars.updateTime(timeLevel); + } + + /*! + * \brief Remove traces from stages within a time integration step. + */ + void clearStages() + { + prevStageVariables_.clear(); + stageParams_ = nullptr; + } + //! Resizes the residual void setResidualSize() { residual_->resize(numDofs()); } @@ -401,16 +447,50 @@ protected: void enforcePeriodicConstraints_(const GridVariables& gridVars, JacobianMatrix& jac, SolutionVector& res) { /*TODO: Implement / where to put this? Currently, local assemblers do this*/ } + //! prepares the local views on the grid variables for the given element + //! \todo: TODO: when stageparams.skipSpatial() == true, we don't need to bind flux vars caches! + std::vector prepareElemVariables_(const GridVariables& gridVars, + const Element& element, + const GGLocalView& ggLocalView) const + { + if (!stageParams_) + { + auto elemVars = localView(gridVars); + elemVars.bind(element, ggLocalView); + return {elemVars}; + } + else + { + std::vector elemVars; + elemVars.reserve(stageParams_->size()); + + for (int i = 0; i < stageParams_->size()-1; ++i) + elemVars.emplace_back(prevStageVariables_[i]); + elemVars.emplace_back(gridVars); + + for (auto& evv : elemVars) + evv.bind(element, ggLocalView); + + return elemVars; + } + } + private: //! the grid geometry on which it is assembled std::shared_ptr gridGeometry_; //! states if an implicit of explicit scheme is used (affects jacobian pattern) - bool isImplicit_ = true; + bool isImplicit_; //! shared pointers to the jacobian matrix and residual std::shared_ptr jacobian_; std::shared_ptr residual_; + + //! parameters containing information on the current stage of time integration + std::shared_ptr stageParams_ = nullptr; + + //! keep track of the states of previous stages within a time integration step + std::vector prevStageVariables_; }; } // namespace Dumux diff --git a/dumux/assembly/fv/boxlocalassembler.hh b/dumux/assembly/fv/boxlocalassembler.hh index 47d56d55b0..60bb72e8e8 100644 --- a/dumux/assembly/fv/boxlocalassembler.hh +++ b/dumux/assembly/fv/boxlocalassembler.hh @@ -32,7 +32,10 @@ #include #include + #include +#include + namespace Dumux { @@ -64,6 +67,9 @@ class BoxLocalAssembler static_assert(diffMethod == DiffMethod::numeric, "Analytical assembly not implemented"); static_assert(numEq == JacobianMatrix::block_type::cols, "Matrix block size doesn't match privars size"); + //! the parameters of a stage in time integration + using StageParams = MultiStageParams; + public: using ElementResidualVector = typename LocalOperator::ElementResidualVector; @@ -72,12 +78,30 @@ public: */ explicit BoxLocalAssembler(const Element& element, const FVElementGeometry& fvGeometry, - ElementVariables& elemVars) + std::vector& elemVars) : element_(element) , fvGeometry_(fvGeometry) , elemVariables_(elemVars) , elementIsGhost_(element.partitionType() == Dune::GhostEntity) - , isStationary_(true) + , stageParams_(nullptr) + { + assert(elemVars.size() == 1); + } + + /*! + * \brief Constructor for instationary problems. + * \note Using this constructor, we assemble one stage within + * a time integration step using multi-stage methods. + */ + explicit BoxLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars, + std::shared_ptr stageParams) + : element_(element) + , fvGeometry_(fvGeometry) + , elemVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(stageParams) {} /*! @@ -239,17 +263,29 @@ protected: */ ElementResidualVector evalLocalResidual() const { - LocalOperator localOperator(element(), fvGeometry(), elemVariables()); + if (isStationary()) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables()); + return elementIsGhost_ ? localOperator.getEmptyResidual() + : localOperator.evalFluxesAndSources(); + } + else + { + ElementResidualVector residual(fvGeometry().numScv()); + residual = 0.0; - // residual of ghost elements is zero - if (elementIsGhost_) - return localOperator.getEmptyResidual(); + for (std::size_t k = 0; k < stageParams_->size(); ++k) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables_[k]); - auto residual = localOperator.evalFluxesAndSources(); - if (!isStationary_) - DUNE_THROW(Dune::NotImplemented, "New assembly for instationary problems"); + if (!stageParams_->skipTemporal(k)) + residual.axpy(stageParams_->temporalWeight(k), localOperator.evalStorage()); + if (!stageParams_->skipSpatial(k)) + residual.axpy(stageParams_->spatialWeight(k), localOperator.evalFluxesAndSources()); + } - return residual; + return residual; + } } /*! @@ -352,8 +388,11 @@ protected: //! Return references to the local views const Element& element() const { return element_; } const FVElementGeometry& fvGeometry() const { return fvGeometry_; } - const ElementVariables& elemVariables() const { return elemVariables_; } - ElementVariables& elemVariables() { return elemVariables_; } + const ElementVariables& elemVariables() const { return elemVariables_.back(); } + ElementVariables& elemVariables() { return elemVariables_.back(); } + + //! Returns if a stationary problem is assembled + bool isStationary() const { return !stageParams_; } //! Return a reference to the underlying problem //! TODO: Should grid vars return problem directly!? @@ -363,9 +402,10 @@ protected: private: const Element& element_; const FVElementGeometry& fvGeometry_; - ElementVariables& elemVariables_; + std::vector& elemVariables_; + bool elementIsGhost_; - bool isStationary_; + std::shared_ptr stageParams_; }; } // end namespace Dumux -- GitLab From 187418027b66742608431515a5c7d96cea191d5e Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Fri, 23 Oct 2020 14:28:22 +0200 Subject: [PATCH 41/46] [pdesolver] provide access to assembler/linear solver --- dumux/common/pdesolver.hh | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/dumux/common/pdesolver.hh b/dumux/common/pdesolver.hh index d11285c7ab..49ce5123e0 100644 --- a/dumux/common/pdesolver.hh +++ b/dumux/common/pdesolver.hh @@ -64,16 +64,19 @@ namespace Impl { * and has a method solve that linearizes (if not already linear), assembles, solves and updates * given an initial solution producing a new solution. * - * \tparam Assembler A PDE linearized system assembler - * \tparam LinearSolver A linear system solver + * \tparam A Assembler for the PDE linearized system + * \tparam LS Linear system solver */ -template +template class PDESolver { - using Scalar = typename Assembler::Scalar; + using Scalar = typename A::Scalar; using TimeLoop = TimeLoopBase; public: + //! export the assembler and linear solver types + using Assembler = A; + using LinearSolver = LS; //! export the type of variables that represent a numerical solution using Variables = Impl::AssemblerVariables; @@ -112,8 +115,6 @@ public: solve(vars); } -protected: - /*! * \brief Access the assembler */ @@ -138,6 +139,8 @@ protected: LinearSolver& linearSolver() { return *linearSolver_; } +protected: + /*! * \brief Helper function to assure the MultiTypeBlockMatrix's sub-blocks have the correct sizes. */ -- GitLab From 28f6f1cb766cf0a4c1617f1d251d2e92b5fcf4da Mon Sep 17 00:00:00 2001 From: "Dennis.Glaeser" Date: Fri, 23 Oct 2020 14:28:46 +0200 Subject: [PATCH 42/46] [test][1p][box] use new time stepper --- .../1p/compressible/instationary/main.cc | 64 ++++++++----------- 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/test/porousmediumflow/1p/compressible/instationary/main.cc b/test/porousmediumflow/1p/compressible/instationary/main.cc index 2805afaa99..e914140ef7 100644 --- a/test/porousmediumflow/1p/compressible/instationary/main.cc +++ b/test/porousmediumflow/1p/compressible/instationary/main.cc @@ -43,37 +43,14 @@ #include #include -#include +#include +#include +#include +#include #include #include -//! Custom assembler to test assembly with grid variables -template -class GridVarsAssembler : public Assembler -{ -public: - using Assembler::Assembler; - using typename Assembler::GridVariables; - using typename Assembler::ResidualType; - - using Variables = GridVariables; - - void assembleJacobianAndResidual(const GridVariables& gridVars) - { - Assembler::assembleJacobianAndResidual(gridVars.dofs()); - } - - void assembleResidual(const GridVariables& gridVars) - { - Assembler::assembleResidual(gridVars.dofs()); - } - - auto residualNorm(const GridVariables& gridVars) - { - return Assembler::residualNorm(gridVars.dofs()); - } -}; int main(int argc, char** argv) { @@ -145,9 +122,20 @@ int main(int argc, char** argv) auto timeLoop = std::make_shared>(0.0, dt, tEnd); timeLoop->setMaxTimeStepSize(maxDt); - // the assembler with time loop for instationary problem - using Assembler = GridVarsAssembler>; - auto assembler = std::make_shared(problem, gridGeometry, gridVariables, timeLoop, xOld); + // use implicit Euler for time integration + auto timeMethod = std::make_shared>(); + + // create assembler & linear solver + // TODO: The operators or local operator could be a property, just as is LocalResidual currently + using ModelTraits = GetPropType; + using FluxVariables = GetPropType; + using ElemVariables = typename GridVariables::LocalView; + + using ImmiscibleOperators = FVImmiscibleOperators; + using LocalOperator = FVLocalOperator; + + using Assembler = Assembler; + auto assembler = std::make_shared(gridGeometry, *timeMethod); // the linear solver using LinearSolver = ILU0BiCGSTABBackend; @@ -155,7 +143,11 @@ int main(int argc, char** argv) // the non-linear solver using NewtonSolver = Dumux::NewtonSolver; - NewtonSolver nonLinearSolver(assembler, linearSolver); + auto nonLinearSolver = std::make_shared(assembler, linearSolver); + + // the time stepper for time integration + using TimeStepper = MultiStageTimeStepper; + TimeStepper timeStepper(nonLinearSolver, timeMethod); // set some check points for the time loop timeLoop->setPeriodicCheckPoint(tEnd/10.0); @@ -163,12 +155,8 @@ int main(int argc, char** argv) // time loop timeLoop->start(); do { - // linearize & solve - nonLinearSolver.solve(*gridVariables, *timeLoop); - - // make the new solution the old solution - xOld = gridVariables->dofs(); - gridVariables->advanceTimeStep(); + // do time integraiton + timeStepper.step(*gridVariables, timeLoop->time(), timeLoop->timeStepSize()); // advance to the time loop to the next step timeLoop->advanceTimeStep(); @@ -181,7 +169,7 @@ int main(int argc, char** argv) timeLoop->reportTimeStep(); // set new dt as suggested by the newton solver - timeLoop->setTimeStepSize(nonLinearSolver.suggestTimeStepSize(timeLoop->timeStepSize())); + timeLoop->setTimeStepSize(nonLinearSolver->suggestTimeStepSize(timeLoop->timeStepSize())); } while (!timeLoop->finished()); -- GitLab From 955ea43db0b80026f93dd1aae3b18a3be1cfb146 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 13 Jan 2021 14:43:38 +0100 Subject: [PATCH 43/46] [assembler] fix local assembler construction --- dumux/assembly/assembler.hh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dumux/assembly/assembler.hh b/dumux/assembly/assembler.hh index 93870f578d..40913644c3 100644 --- a/dumux/assembly/assembler.hh +++ b/dumux/assembly/assembler.hh @@ -159,7 +159,7 @@ public: auto elemVars = this->prepareElemVariables_(gridVariables, element, ggLocalView); using LocalAssembler = Dumux::LocalAssembler; - LocalAssembler localAssembler(element, ggLocalView, elemVars); + LocalAssembler localAssembler(element, ggLocalView, elemVars, stageParams_); localAssembler.assembleJacobianAndResidual(*jacobian_, *residual_, partialReassembler); }); @@ -187,7 +187,7 @@ public: auto elemVars = this->prepareElemVariables_(gridVariables, element, ggLocalView); using LocalAssembler = Dumux::LocalAssembler; - LocalAssembler localAssembler(element, ggLocalView, elemVars); + LocalAssembler localAssembler(element, ggLocalView, elemVars, stageParams_); localAssembler.assembleJacobianAndResidual(*jacobian_); }); @@ -220,7 +220,7 @@ public: auto elemVars = this->prepareElemVariables_(gridVariables, element, ggLocalView); using LocalAssembler = Dumux::LocalAssembler; - LocalAssembler localAssembler(element, ggLocalView, elemVars); + LocalAssembler localAssembler(element, ggLocalView, elemVars, stageParams_); localAssembler.assembleResidual(r); }); } -- GitLab From a386427e8bd1dd08f9ac6a77b2183cd3b4fbcb76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 13 Jan 2021 15:05:41 +0100 Subject: [PATCH 44/46] [boxlocalassembler] add todo comment --- dumux/assembly/fv/boxlocalassembler.hh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/dumux/assembly/fv/boxlocalassembler.hh b/dumux/assembly/fv/boxlocalassembler.hh index 60bb72e8e8..80fd526a1c 100644 --- a/dumux/assembly/fv/boxlocalassembler.hh +++ b/dumux/assembly/fv/boxlocalassembler.hh @@ -355,6 +355,14 @@ protected: origResiduals, eps_(elemSol[localI][pvIdx], pvIdx), numDiffMethod); + // TODO: Distinguish between implicit/explicit here. For explicit schemes, + // no entries between different scvs of an element are reserved. Thus, + // we currently get an error when using explicit schemes. + // TODO: Doesn't this mean we only have to evaluate the residual for a single + // scv instead of calling evalLocalResidual()? That computes the residuals + // and derivs for all other scvs of the element, too, which are never used. + // Note: this is the same in the current implementation of master. + // Should we try to optimize this for explicit schemes? Or adjust the Jacobian pattern? // update the global stiffness matrix with the current partial derivatives for (const auto& scvJ : scvs(fvGeometry())) { -- GitLab From 13590874868be0162c5ce4e0c10d3a8878a9d5bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 10 Feb 2021 11:43:23 +0100 Subject: [PATCH 45/46] [fv][localop] export operators --- dumux/assembly/fv/localoperator.hh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/dumux/assembly/fv/localoperator.hh b/dumux/assembly/fv/localoperator.hh index 7a9d0211bf..d4374b8df1 100644 --- a/dumux/assembly/fv/localoperator.hh +++ b/dumux/assembly/fv/localoperator.hh @@ -41,9 +41,9 @@ namespace Dumux { * This allows for element-wise evaluation of individual terms * of the equations to be solved. * \tparam ElementVariables local view on the grid variables - * \tparam Operators The model-specific operators + * \tparam Op The model-specific operators */ -template +template class FVLocalOperator { // The variables required for the evaluation of the equation @@ -59,12 +59,15 @@ class FVLocalOperator using GridView = typename GridGeometry::GridView; using Element = typename GridView::template Codim<0>::Entity; - using NumEqVector = typename Operators::NumEqVector; + using NumEqVector = typename Op::NumEqVector; static constexpr int dim = GridView::dimension; static constexpr int numEq = NumEqVector::size(); static constexpr bool isBox = GridGeometry::discMethod == DiscretizationMethod::box; public: + //! export operators + using Operators = Op; + //! export the grid variables type this operator requires a local view of using GridVariables = GridVars; -- GitLab From 9de313691e6efff043ce487afc7e63b6bc479628 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dennis=20Gl=C3=A4ser?= Date: Wed, 10 Feb 2021 11:44:20 +0100 Subject: [PATCH 46/46] [assembly] add new cc local assembler --- dumux/assembly/fv/cclocalassembler.hh | 412 ++++++++++++++++++++++++++ dumux/assembly/localassembler.hh | 9 + 2 files changed, 421 insertions(+) create mode 100644 dumux/assembly/fv/cclocalassembler.hh diff --git a/dumux/assembly/fv/cclocalassembler.hh b/dumux/assembly/fv/cclocalassembler.hh new file mode 100644 index 0000000000..a1f92a3334 --- /dev/null +++ b/dumux/assembly/fv/cclocalassembler.hh @@ -0,0 +1,412 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for cell-centered schemes. + */ +#ifndef DUMUX_CC_LOCAL_ASSEMBLER_HH +#define DUMUX_CC_LOCAL_ASSEMBLER_HH + +#include +#include + +#include +#include + +#include +#include +#include + +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for cell-centered schemes. + * \tparam Assembler The grid-wide assembler type + */ +template +class CCLocalAssembler +{ + using GridVariables = typename Assembler::GridVariables; + using GridGeometry = typename GridVariables::GridGeometry; + + using FVElementGeometry = typename GridGeometry::LocalView; + using ElementVariables = typename GridVariables::LocalView; + using PrimaryVariables = typename GridVariables::PrimaryVariables; + using Scalar = typename GridVariables::Scalar; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using JacobianMatrix = typename Assembler::JacobianMatrix; + using ResidualVector = typename Assembler::ResidualVector; + using LocalOperator = typename Assembler::LocalOperator; + using Operators = typename LocalOperator::Operators; + using NumEqVector = typename Operators::NumEqVector; + + static constexpr int numEq = PrimaryVariables::size(); + static constexpr int maxElementStencilSize = GridGeometry::maxElementStencilSize; + static_assert(diffMethod == DiffMethod::numeric, "Analytical assembly not implemented"); + static_assert(numEq == JacobianMatrix::block_type::cols, "Matrix block size doesn't match privars size"); + + //! the parameters of a stage in time integration + using StageParams = MultiStageParams; + +public: + using ElementResidualVector = typename LocalOperator::ElementResidualVector; + + /*! + * \brief Constructor for stationary problems. + */ + explicit CCLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars) + : element_(element) + , fvGeometry_(fvGeometry) + , elementVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(nullptr) + { + assert(elemVars.size() == 1); + assert(fvGeometry_.numScv() == 1); + } + + /*! + * \brief Constructor for instationary problems. + * \note Using this constructor, we assemble one stage within + * a time integration step using multi-stage methods. + */ + explicit CCLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars, + std::shared_ptr stageParams) + : element_(element) + , fvGeometry_(fvGeometry) + , elementVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(stageParams) + { assert(fvGeometry_.numScv() == 1); } + + /*! + * \brief Computes the derivatives with respect to the given element and adds + * them to the global matrix. The element residual is written into the + * right hand side. + */ + template + void assembleJacobianAndResidual(JacobianMatrix& jac, + ResidualVector& res, + const PartialReassembler* partialReassembler = nullptr) + { + const auto globalI = fvGeometry().gridGeometry().elementMapper().index(element()); + if (partialReassembler + && partialReassembler->elementColor(globalI) == EntityColor::green) + { + res[globalI] = evalLocalResidual()[0]; // forward to the internal implementation + } + else + { + res[globalI] = assembleJacobianAndResidual_(jac); // forward to the internal implementation + } + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + void assembleJacobian(JacobianMatrix& jac) + { + assembleJacobianAndResidual_(jac); + } + + /*! + * \brief Assemble the residual only + */ + void assembleResidual(ResidualVector& res) + { + const auto residual = evalLocalResidual(); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + } + +protected: + + /*! + * \brief Evaluate the complete local residual for the current element. + */ + ElementResidualVector evalLocalResidual() const + { + if (isStationary()) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables()); + return elementIsGhost_ ? localOperator.getEmptyResidual() + : localOperator.evalFluxesAndSources(); + } + else + { + ElementResidualVector residual(fvGeometry().numScv()); + residual = 0.0; + + for (std::size_t k = 0; k < stageParams_->size(); ++k) + { + LocalOperator localOperator(element(), fvGeometry(), elementVariables_[k]); + + if (!stageParams_->skipTemporal(k)) + residual.axpy(stageParams_->temporalWeight(k), localOperator.evalStorage()); + if (!stageParams_->skipSpatial(k)) + residual.axpy(stageParams_->spatialWeight(k), localOperator.evalFluxesAndSources()); + } + + return residual; + } + } + + /*! + * \brief Computes the derivatives with respect to the dofs of the given + * element and adds them to the global matrix. + * \return The element residual at the current solution. + */ + NumEqVector assembleJacobianAndResidual_(JacobianMatrix& A) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianAndResidualNumeric_(A); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for cc schemes"); + } + + /*! + * \brief Computes the derivatives by means of numeric differentiation + * and adds them to the global matrix. + * \return The element residual at the current solution. + * \note This specialization is for numeric differentiation + */ + NumEqVector assembleJacobianAndResidualNumeric_(JacobianMatrix& A) + { + using Problem = std::decay_t; + + // get the variables of the current stage + auto& curVariables = elemVariables_(); + auto& curElemVolVars = curVariables.elemVolVars(); + auto& curElemFluxVarsCache = curVariables.elemFluxVarsCache(); + const auto& x = curVariables.gridVariables().dofs(); + + // get stencil informations + const auto& gridGeometry = fvGeometry().gridGeometry(); + const auto& connectivityMap = gridGeometry.connectivityMap(); + const auto globalI = gridGeometry.elementMapper().index(element_); + const auto numNeighbors = connectivityMap[globalI].size(); + + // container to store the neighboring elements + Dune::ReservedVector neighborElements; + neighborElements.resize(numNeighbors); + + // assemble the undeflected residual + using Residuals = ReservedBlockVector; + Residuals origResiduals(numNeighbors + 1); origResiduals = 0.0; + origResiduals[0] = evalLocalResidual()[0]; + + // lambda for convenient evaluation of the fluxes across scvfs in the neighbors + // if the neighbor is a ghost we don't want to add anything to their residual + // so we return 0 and omit computing the flux + auto evalNeighborFlux = [&] (const auto& neighbor, const auto& scvf) + { + if (neighbor.partitionType() == Dune::GhostEntity) + return NumEqVector(0.0); + else + return Operators::flux(problem(), neighbor, fvGeometry(), + curElemVolVars, curElemFluxVarsCache, scvf); + }; + + // get the elements in which we need to evaluate the fluxes + // and calculate these in the undeflected state + unsigned int j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + { + neighborElements[j-1] = gridGeometry.element(dataJ.globalJ); + for (const auto scvfIdx : dataJ.scvfsJ) + origResiduals[j] += evalNeighborFlux(neighborElements[j-1], fvGeometry().scvf(scvfIdx)); + ++j; + } + + // reference to the element's scv (needed later) and corresponding vol vars + // TODO: Support the case of caching?! + const auto& scv = fvGeometry().scv(globalI); + auto& curVolVars = curElemVolVars[scv]; + + // save a copy of the original privars and vol vars in order + // to restore the original solution after deflection + const auto origPriVars = x[globalI]; + const auto origVolVars = curVolVars; + + // element solution container to be deflected + auto elemSol = elementSolution(element_, x, gridGeometry); + + // derivatives in the neighbors with repect to the current elements + // in index 0 we save the derivative of the element residual with respect to it's own dofs + Residuals partialDerivs(numNeighbors + 1); + for (int pvIdx = 0; pvIdx < numEq; ++pvIdx) + { + partialDerivs = 0.0; + + auto evalResiduals = [&](Scalar priVar) + { + Residuals partialDerivsTmp(numNeighbors + 1); + partialDerivsTmp = 0.0; + + // update the volume variables and the flux var cache + elemSol[0][pvIdx] = priVar; + curVolVars.update(elemSol, problem(), element_, scv); + curElemFluxVarsCache.update(element_, fvGeometry(), curElemVolVars); + // TODO: UPDATE GLOBAL FLUX VARS CACHE HERE? + + // calculate the residual with the deflected primary variables + partialDerivsTmp[0] = evalLocalResidual()[0]; + + // calculate the fluxes in the neighbors with the deflected primary variables + for (std::size_t k = 0; k < numNeighbors; ++k) + for (auto scvfIdx : connectivityMap[globalI][k].scvfsJ) + partialDerivsTmp[k+1] += evalNeighborFlux(neighborElements[k], fvGeometry().scvf(scvfIdx)); + + return partialDerivsTmp; + }; + + // derive the residuals numerically + static const NumericEpsilon eps_{problem().paramGroup()}; + static const int numDiffMethod = getParamFromGroup(problem().paramGroup(), "Assembly.NumericDifferenceMethod"); + NumericDifferentiation::partialDerivative(evalResiduals, elemSol[0][pvIdx], partialDerivs, origResiduals, + eps_(elemSol[0][pvIdx], pvIdx), numDiffMethod); + + // Correct derivative for ghost elements, i.e. set a 1 for the derivative w.r.t. the + // current primary variable and a 0 elsewhere. As we always solve for a delta of the + // solution with repect to the initial one, this results in a delta of 0 for ghosts. + if (elementIsGhost_) + { + partialDerivs[0] = 0.0; + partialDerivs[0][pvIdx] = 1.0; + } + + // For instationary simulations, scale the coupling + // fluxes of the current stage correctly + if (stageParams_) + { + for (std::size_t k = 0; k < numNeighbors; ++k) + partialDerivs[k+1] *= stageParams_->spatialWeight(stageParams_->size()-1); + } + + // add the current partial derivatives to the global jacobian matrix + // no special treatment is needed if globalJ is a ghost because then derivatives have been assembled to 0 above + if constexpr (Problem::enableInternalDirichletConstraints()) + { + // check if own element has internal Dirichlet constraint + const auto internalDirichletConstraintsOwnElement = this->problem().hasInternalDirichletConstraint(element_, scv); + const auto dirichletValues = this->problem().internalDirichlet(element_, scv); + + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (internalDirichletConstraintsOwnElement[eqIdx]) + { + origResiduals[0][eqIdx] = origVolVars.priVars()[eqIdx] - dirichletValues[eqIdx]; + A[globalI][globalI][eqIdx][pvIdx] = (eqIdx == pvIdx) ? 1.0 : 0.0; + } + else + A[globalI][globalI][eqIdx][pvIdx] += partialDerivs[0][eqIdx]; + } + + // off-diagonal entries + j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + { + const auto& neighborElement = neighborElements[j-1]; + const auto& neighborScv = fvGeometry().scv(dataJ.globalJ); + const auto internalDirichletConstraintsNeighbor = problem().hasInternalDirichletConstraint(neighborElement, neighborScv); + + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (internalDirichletConstraintsNeighbor[eqIdx]) + A[dataJ.globalJ][globalI][eqIdx][pvIdx] = 0.0; + else + A[dataJ.globalJ][globalI][eqIdx][pvIdx] += partialDerivs[j][eqIdx]; + } + + ++j; + } + } + else // no internal Dirichlet constraints specified + { + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + // the diagonal entries + A[globalI][globalI][eqIdx][pvIdx] += partialDerivs[0][eqIdx]; + + // off-diagonal entries + j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + A[dataJ.globalJ][globalI][eqIdx][pvIdx] += partialDerivs[j++][eqIdx]; + } + } + + // restore the original state of the scv's volume variables + curVolVars = origVolVars; + + // restore the current element solution + elemSol[0][pvIdx] = origPriVars[pvIdx]; + } + + // restore original state of the flux vars cache in case of global caching. + // This has to be done in order to guarantee that everything is in an undeflected + // state before the assembly of another element is called. In the case of local caching + // this is obsolete because the elemFluxVarsCache used here goes out of scope after this. + // We only have to do this for the last primary variable, for all others the flux var cache + // is updated with the correct element volume variables before residual evaluations + // TODO: RESET GLOBAL FLUX VARS CACHE HERE + + // return the original residual + return origResiduals[0]; + } + + //! Return references to the local views + const Element& element() const { return element_; } + const FVElementGeometry& fvGeometry() const { return fvGeometry_; } + const ElementVariables& elemVariables() const { return elementVariables_.back(); } + ElementVariables& elemVariables_() { return elementVariables_.back(); } + + //! Returns if a stationary problem is assembled + bool isStationary() const { return !stageParams_; } + + //! Return a reference to the underlying problem + //! TODO: Should grid vars return problem directly!? + const auto& problem() const + { return elemVariables().gridVariables().gridVolVars().problem(); } + +private: + const Element& element_; + const FVElementGeometry& fvGeometry_; + std::vector& elementVariables_; + + bool elementIsGhost_; + std::shared_ptr stageParams_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/assembly/localassembler.hh b/dumux/assembly/localassembler.hh index b5fbe3da62..8fa6e006d7 100644 --- a/dumux/assembly/localassembler.hh +++ b/dumux/assembly/localassembler.hh @@ -26,6 +26,7 @@ #include #include "fv/boxlocalassembler.hh" +#include "fv/cclocalassembler.hh" namespace Dumux { namespace Impl { @@ -37,6 +38,14 @@ namespace Impl { struct LocalAssemblerChooser { using type = BoxLocalAssembler; }; + template + struct LocalAssemblerChooser + { using type = CCLocalAssembler; }; + + template + struct LocalAssemblerChooser + { using type = CCLocalAssembler; }; + template using LocalAssemblerType = typename LocalAssemblerChooser