diff --git a/dumux/CMakeLists.txt b/dumux/CMakeLists.txt index 8b27a751738d8a46aa6be7c793cfafad433a5d6d..291ce498c1b32697a4fa06a7556b489f84246582 100644 --- a/dumux/CMakeLists.txt +++ b/dumux/CMakeLists.txt @@ -13,6 +13,7 @@ add_subdirectory(multidomain) add_subdirectory(nonlinear) add_subdirectory(parallel) add_subdirectory(porousmediumflow) +add_subdirectory(timestepping) # if Python bindings are enabled, include necessary sub directories. if(DUNE_ENABLE_PYTHONBINDINGS) diff --git a/dumux/assembly/CMakeLists.txt b/dumux/assembly/CMakeLists.txt index 2543a6631f3a61445e1bbb4aeeb8461b93115bd7..63a13d1e43643adf778ad09a48d437e10987d427 100644 --- a/dumux/assembly/CMakeLists.txt +++ b/dumux/assembly/CMakeLists.txt @@ -1,4 +1,5 @@ install(FILES +assembler.hh boxlocalassembler.hh boxlocalresidual.hh cclocalassembler.hh @@ -10,6 +11,7 @@ fvlocalassemblerbase.hh fvlocalresidual.hh initialsolution.hh jacobianpattern.hh +localassembler.hh numericepsilon.hh partialreassembler.hh staggeredfvassembler.hh diff --git a/dumux/assembly/assembler.hh b/dumux/assembly/assembler.hh new file mode 100644 index 0000000000000000000000000000000000000000..40913644c388834cd6b6e8a37bc187acb9479b79 --- /dev/null +++ b/dumux/assembly/assembler.hh @@ -0,0 +1,498 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief Assembler class for residuals and jacobian matrices + * for grid-based numerical schemes. + */ +#ifndef DUMUX_ASSEMBLER_HH +#define DUMUX_ASSEMBLER_HH + +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "localassembler.hh" +#include "jacobianpattern.hh" + +namespace Dumux { + +//! Default types used for the linear system +template struct DefaultLinearSystemTraits; + +//! Default linear system types for Dune::BlockVector +template +struct DefaultLinearSystemTraits> +{ +private: + static constexpr int numEq = PrimaryVariables::size(); + using Scalar = typename PrimaryVariables::value_type; + using MatrixBlockType = Dune::FieldMatrix; + +public: + using ResidualVector = Dune::BlockVector; + using JacobianMatrix = Dune::BCRSMatrix; +}; + +/*! + * \ingroup Assembly + * \brief A linear system assembler (residual and Jacobian) for grid-based numerical schemes + * \tparam LO The local operator (evaluation of the terms of the equation) + * \tparam diffMethod The differentiation method to compute derivatives + * \tparam LST The linear system traits (types used for jacobians and residuals) + */ +template< class LO, DiffMethod diffMethod, + class LST = DefaultLinearSystemTraits > +class Assembler +{ + using ThisType = Assembler; + + using GG = typename LO::GridVariables::GridGeometry; + using GGLocalView = typename GG::LocalView; + using GridView = typename GG::GridView; + using Element = typename GridView::template Codim<0>::Entity; + using ElementVariables = typename LO::GridVariables::LocalView; + +public: + //! export the types used for the linear system + using Scalar = typename LO::GridVariables::Scalar; + using JacobianMatrix = typename LST::JacobianMatrix; + using ResidualVector = typename LST::ResidualVector; + using ResidualType = ResidualVector; + + //! export the local operator type + using LocalOperator = LO; + + //! the local operator states the type of variables needed for evaluation + using GridVariables = typename LO::GridVariables; + + //! export the underlying grid geometry type + using GridGeometry = GG; + + //! export a grid-independent alias of the variables + using Variables = GridVariables; + + //! export type used for solution vectors + using SolutionVector = typename GridVariables::SolutionVector; + + //! export the type for parameters of a stage in time integration + using StageParams = MultiStageParams; + + /*! + * \brief The Constructor from a grid geometry. + * \param gridGeometry A grid geometry instance + * \note This assembler class is, after construction, defined for a specific equation + * (given by the template argument of the LocalOperator) and a specific grid + * geometry - which defines the connectivity of the degrees of freedom of the + * underlying discretization scheme on a particular grid. The evaluation point, + * consisting of a particular solution/variables/parameters may vary, and therefore, + * an instance of the grid variables is passed to the assembly functions. + * \note This constructor (without time integration method) assumes hhat a stationary problem + * is assembled, and thus, an implicit jacobian pattern is used. + */ + Assembler(std::shared_ptr gridGeometry) + : gridGeometry_(gridGeometry) + , isImplicit_(true) + {} + + /*! + * \brief The Constructor from a grid geometry. + * \param gridGeometry A grid geometry instance + * \param method the time integration method that will be used. + */ + template + Assembler(std::shared_ptr gridGeometry, + const TimeIntegrationMethod& method) + : gridGeometry_(gridGeometry) + , isImplicit_(method.implicit()) + {} + + /*! + * \brief Assembles the Jacobian matrix and the residual around the given evaluation point + * which is determined by the grid variables, containing all quantities required + * to evaluate the equations to be assembled. + * \param gridVariables The variables corresponding to the given solution state + * \note We assume the grid geometry on which the grid variables are defined + * to be the same as the one used to instantiate this class + */ + template + void assembleJacobianAndResidual(const GridVariables& gridVariables, + const PartialReassembler* partialReassembler = nullptr) + { + resetJacobian_(partialReassembler); + resetResidual_(); + + // TODO: Remove this assert? + assert(gridVariables.gridGeometry().numDofs() == gridGeometry().numDofs()); + + assemble_([&](const Element& element) + { + auto ggLocalView = localView(gridGeometry()); + ggLocalView.bind(element); + auto elemVars = this->prepareElemVariables_(gridVariables, element, ggLocalView); + + using LocalAssembler = Dumux::LocalAssembler; + LocalAssembler localAssembler(element, ggLocalView, elemVars, stageParams_); + localAssembler.assembleJacobianAndResidual(*jacobian_, *residual_, partialReassembler); + }); + + // TODO: Put these into discretization-specific helpers? + enforceDirichletConstraints_(gridVariables, *jacobian_, *residual_); + enforceInternalConstraints_(gridVariables, *jacobian_, *residual_); + enforcePeriodicConstraints_(gridVariables, *jacobian_, *residual_); + } + + /*! + * \brief Assembles the Jacobian matrix of the discrete system of equations + * around a given state represented by the grid variables object. + */ + void assembleJacobian(const GridVariables& gridVariables) + { + resetJacobian_(); + + // TODO: Remove this assert? + assert(gridVariables.gridGeometry().numDofs() == gridGeometry().numDofs()); + + assemble_([&](const Element& element) + { + auto ggLocalView = localView(gridGeometry()); + ggLocalView.bind(element); + auto elemVars = this->prepareElemVariables_(gridVariables, element, ggLocalView); + + using LocalAssembler = Dumux::LocalAssembler; + LocalAssembler localAssembler(element, ggLocalView, elemVars, stageParams_); + localAssembler.assembleJacobianAndResidual(*jacobian_); + }); + + // TODO: Put these into discretization-specific helpers? + enforceDirichletConstraints_(gridVariables, *jacobian_, *residual_); + enforceInternalConstraints_(gridVariables, *jacobian_, *residual_); + enforcePeriodicConstraints_(gridVariables, *jacobian_, *residual_); + } + + /*! + * \brief Assembles the residual for a given state represented by the provided + * grid variables object, using the internal residual vector to store the result. + */ + void assembleResidual(const GridVariables& gridVariables) + { + resetResidual_(); + assembleResidual(*residual_, gridVariables); + } + + /*! + * \brief Assembles the residual for a given state represented by the provided + * grid variables object, using the provided residual vector to store the result. + */ + void assembleResidual(ResidualVector& r, const GridVariables& gridVariables) const + { + assemble_([&](const Element& element) + { + auto ggLocalView = localView(gridGeometry()); + ggLocalView.bind(element); + auto elemVars = this->prepareElemVariables_(gridVariables, element, ggLocalView); + + using LocalAssembler = Dumux::LocalAssembler; + LocalAssembler localAssembler(element, ggLocalView, elemVars, stageParams_); + localAssembler.assembleResidual(r); + }); + } + + //! Will become obsolete once the new linear solvers are available + Scalar residualNorm(const GridVariables& gridVars) const + { + ResidualType residual(numDofs()); + assembleResidual(residual, gridVars); + + // for box communicate the residual with the neighboring processes + if (GridGeometry::discMethod == DiscretizationMethod::box && gridView().comm().size() > 1) + { + using VertexMapper = typename GridGeometry::VertexMapper; + VectorCommDataHandleSum + sumResidualHandle(gridGeometry_->vertexMapper(), residual); + gridView().communicate(sumResidualHandle, + Dune::InteriorBorder_InteriorBorder_Interface, + Dune::ForwardCommunication); + } + + // calculate the square norm of the residual + Scalar result2 = residual.two_norm2(); + if (gridView().comm().size() > 1) + result2 = gridView().comm().sum(result2); + + using std::sqrt; + return sqrt(result2); + } + + + /*! + * \brief Tells the assembler which jacobian and residual to use. + * This also resizes the containers to the required sizes and sets the + * sparsity pattern of the jacobian matrix. + */ + void setLinearSystem(std::shared_ptr A, + std::shared_ptr r) + { + jacobian_ = A; + residual_ = r; + + // check and/or set the BCRS matrix's build mode + if (jacobian_->buildMode() == JacobianMatrix::BuildMode::unknown) + jacobian_->setBuildMode(JacobianMatrix::random); + else if (jacobian_->buildMode() != JacobianMatrix::BuildMode::random) + DUNE_THROW(Dune::NotImplemented, "Only BCRS matrices with random build mode are supported at the moment"); + + setJacobianPattern(); + setResidualSize(); + } + + /*! + * \brief The version without arguments uses the default constructor to create + * the jacobian and residual objects in this assembler if you don't need them outside this class + */ + void setLinearSystem() + { + jacobian_ = std::make_shared(); + jacobian_->setBuildMode(JacobianMatrix::random); + residual_ = std::make_shared(); + + setJacobianPattern(); + setResidualSize(); + } + + /*! + * \brief Resizes the jacobian and sets the jacobian' sparsity pattern. + */ + void setJacobianPattern() + { + // resize the jacobian and the residual + const auto numDofs = this->numDofs(); + jacobian_->setSize(numDofs, numDofs); + + // create occupation pattern of the jacobian + // TODO: Does the bool need to be at compile time in getJacobianPattern? + const auto occupationPattern = isImplicit_ ? getJacobianPattern(gridGeometry()) + : getJacobianPattern(gridGeometry()); + + // export pattern to jacobian + occupationPattern.exportIdx(*jacobian_); + } + + /*! + * \brief Prepare for a new stage within a time integration step. + * This caches the given grid variables, which are then used as a + * representation of the previous stage. Moreover, the given grid + * variables are then updated to the time level of the upcoming stage. + * \param gridVars the grid variables representing the previous stage + * \param params the parameters with the weights to be used in the upcoming stage + * \todo TODO: This function does two things, namely caching and then updating. + * Should we split/delegate this, or is the current name descriptive enough? + * When used from outside, one would expect the gridvars to be prepared maybe, + * and that is what's done. Caching might not be expected from the outside but + * it is also not important that that is known from there? + */ + void prepareStage(GridVariables& gridVars, + std::shared_ptr params) + { + stageParams_ = params; + const auto curStage = params->size() - 1; + + // we keep track of previous stages, they are needed for residual assembly + prevStageVariables_.push_back(gridVars); + + // Now we update the time level of the given grid variables + const auto t = params->timeAtStage(curStage); + const auto prevT = params->timeAtStage(0); + const auto dtFraction = params->timeStepFraction(curStage); + TimeLevel timeLevel(t, prevT, dtFraction); + + gridVars.updateTime(timeLevel); + } + + /*! + * \brief Remove traces from stages within a time integration step. + */ + void clearStages() + { + prevStageVariables_.clear(); + stageParams_ = nullptr; + } + + //! Resizes the residual + void setResidualSize() + { residual_->resize(numDofs()); } + + //! Returns the number of degrees of freedom + std::size_t numDofs() const + { return gridGeometry().numDofs(); } + + //! The global finite volume geometry + const GridGeometry& gridGeometry() const + { return *gridGeometry_; } + + //! The gridview + const GridView& gridView() const + { return gridGeometry().gridView(); } + + //! The jacobian matrix + JacobianMatrix& jacobian() + { return *jacobian_; } + + //! The residual vector (rhs) + ResidualVector& residual() + { return *residual_; } + +protected: + // reset the residual vector to 0.0 + void resetResidual_() + { + if (!residual_) + { + residual_ = std::make_shared(); + setResidualSize(); + } + + (*residual_) = 0.0; + } + + // reset the Jacobian matrix to 0.0 + template + void resetJacobian_(const PartialReassembler *partialReassembler = nullptr) + { + if (!jacobian_) + { + jacobian_ = std::make_shared(); + jacobian_->setBuildMode(JacobianMatrix::random); + setJacobianPattern(); + } + + if (partialReassembler) + partialReassembler->resetJacobian(*this); + else + *jacobian_ = 0.0; + } + + /*! + * \brief A method assembling something per element + * \note Handles exceptions for parallel runs + * \throws NumericalProblem on all processes if something throwed during assembly + */ + template + void assemble_(AssembleElementFunc&& assembleElement) const + { + // a state that will be checked on all processes + bool succeeded = false; + + // try assembling using the local assembly function + try + { + // let the local assembler add the element contributions + for (const auto& element : elements(gridView())) + assembleElement(element); + + // if we get here, everything worked well on this process + succeeded = true; + } + // throw exception if a problem ocurred + catch (NumericalProblem &e) + { + std::cout << "rank " << gridView().comm().rank() + << " caught an exception while assembling:" << e.what() + << "\n"; + succeeded = false; + } + + // make sure everything worked well on all processes + if (gridView().comm().size() > 1) + succeeded = gridView().comm().min(succeeded); + + // if not succeeded rethrow the error on all processes + if (!succeeded) + DUNE_THROW(NumericalProblem, "A process did not succeed in linearizing the system"); + } + + void enforceDirichletConstraints_(const GridVariables& gridVars, JacobianMatrix& jac, SolutionVector& res) + { /*TODO: Implement / where to put this? Currently, local assemblers do this*/ } + + void enforceInternalConstraints_(const GridVariables& gridVars, JacobianMatrix& jac, SolutionVector& res) + { /*TODO: Implement / where to put this? Currently, local assemblers do this*/ } + + void enforcePeriodicConstraints_(const GridVariables& gridVars, JacobianMatrix& jac, SolutionVector& res) + { /*TODO: Implement / where to put this? Currently, local assemblers do this*/ } + + //! prepares the local views on the grid variables for the given element + //! \todo: TODO: when stageparams.skipSpatial() == true, we don't need to bind flux vars caches! + std::vector prepareElemVariables_(const GridVariables& gridVars, + const Element& element, + const GGLocalView& ggLocalView) const + { + if (!stageParams_) + { + auto elemVars = localView(gridVars); + elemVars.bind(element, ggLocalView); + return {elemVars}; + } + else + { + std::vector elemVars; + elemVars.reserve(stageParams_->size()); + + for (int i = 0; i < stageParams_->size()-1; ++i) + elemVars.emplace_back(prevStageVariables_[i]); + elemVars.emplace_back(gridVars); + + for (auto& evv : elemVars) + evv.bind(element, ggLocalView); + + return elemVars; + } + } + +private: + //! the grid geometry on which it is assembled + std::shared_ptr gridGeometry_; + + //! states if an implicit of explicit scheme is used (affects jacobian pattern) + bool isImplicit_; + + //! shared pointers to the jacobian matrix and residual + std::shared_ptr jacobian_; + std::shared_ptr residual_; + + //! parameters containing information on the current stage of time integration + std::shared_ptr stageParams_ = nullptr; + + //! keep track of the states of previous stages within a time integration step + std::vector prevStageVariables_; +}; + +} // namespace Dumux + +#endif diff --git a/dumux/assembly/fv/CMakeLists.txt b/dumux/assembly/fv/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..40d6910f65323ddab8e818e6c6739d5fdf9c9ace --- /dev/null +++ b/dumux/assembly/fv/CMakeLists.txt @@ -0,0 +1,5 @@ +install(FILES +boxlocalassembler.hh +localoperator.hh +operators.hh +DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/assembly/fv) diff --git a/dumux/assembly/fv/boxlocalassembler.hh b/dumux/assembly/fv/boxlocalassembler.hh new file mode 100644 index 0000000000000000000000000000000000000000..80fd526a1cf362e66d957e8b659005f841681003 --- /dev/null +++ b/dumux/assembly/fv/boxlocalassembler.hh @@ -0,0 +1,421 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for the box scheme. + */ +#ifndef DUMUX_BOX_LOCAL_ASSEMBLER_HH +#define DUMUX_BOX_LOCAL_ASSEMBLER_HH + +#include +#include + +#include + +#include +#include + +#include +#include + + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for the box scheme. + * \tparam Assembler The grid-wide assembler type + */ +template +class BoxLocalAssembler +{ + using GridVariables = typename Assembler::GridVariables; + using GridGeometry = typename GridVariables::GridGeometry; + + using FVElementGeometry = typename GridGeometry::LocalView; + using ElementVariables = typename GridVariables::LocalView; + using PrimaryVariables = typename GridVariables::PrimaryVariables; + using Scalar = typename GridVariables::Scalar; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using LocalOperator = typename Assembler::LocalOperator; + using JacobianMatrix = typename Assembler::JacobianMatrix; + using ResidualVector = typename Assembler::ResidualVector; + + static constexpr int numEq = PrimaryVariables::size(); + static_assert(diffMethod == DiffMethod::numeric, "Analytical assembly not implemented"); + static_assert(numEq == JacobianMatrix::block_type::cols, "Matrix block size doesn't match privars size"); + + //! the parameters of a stage in time integration + using StageParams = MultiStageParams; + +public: + using ElementResidualVector = typename LocalOperator::ElementResidualVector; + + /*! + * \brief Constructor for stationary problems. + */ + explicit BoxLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars) + : element_(element) + , fvGeometry_(fvGeometry) + , elemVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(nullptr) + { + assert(elemVars.size() == 1); + } + + /*! + * \brief Constructor for instationary problems. + * \note Using this constructor, we assemble one stage within + * a time integration step using multi-stage methods. + */ + explicit BoxLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars, + std::shared_ptr stageParams) + : element_(element) + , fvGeometry_(fvGeometry) + , elemVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(stageParams) + {} + + /*! + * \brief Computes the derivatives with respect to the given element and adds + * them to the global matrix. The element residual is written into the + * right hand side. + */ + template + void assembleJacobianAndResidual(JacobianMatrix& jac, + ResidualVector& res, + const PartialReassembler* partialReassembler = nullptr) + { + const auto eIdxGlobal = fvGeometry().gridGeometry().elementMapper().index(element()); + + if (partialReassembler && partialReassembler->elementColor(eIdxGlobal) == EntityColor::green) + { + const auto residual = evalLocalResidual(); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + } + else if (!elementIsGhost_) + { + const auto residual = assembleJacobianAndResidual_(jac, partialReassembler); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + } + else + { + static constexpr int dim = GridView::dimension; + + for (const auto& scv : scvs(fvGeometry())) + { + const auto vIdxLocal = scv.indexInElement(); + const auto& v = element().template subEntity(vIdxLocal); + + // do not change the non-ghost vertices + if (v.partitionType() == Dune::InteriorEntity || + v.partitionType() == Dune::BorderEntity) + continue; + + const auto dofIdx = scv.dofIndex(); + res[dofIdx] = 0; + for (int pvIdx = 0; pvIdx < numEq; ++pvIdx) + jac[dofIdx][dofIdx][pvIdx][pvIdx] = 1.0; + } + } + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + res[scvI.dofIndex()][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + + auto& row = jac[scvI.dofIndex()]; + for (auto col = row.begin(); col != row.end(); ++col) + row[col.index()][eqIdx] = 0.0; + + jac[scvI.dofIndex()][scvI.dofIndex()][eqIdx][pvIdx] = 1.0; + + // if a periodic dof has Dirichlet values also apply the same Dirichlet values to the other dof + if (fvGeometry().gridGeometry().dofOnPeriodicBoundary(scvI.dofIndex())) + { + const auto periodicDof = fvGeometry().gridGeometry().periodicallyMappedDof(scvI.dofIndex()); + res[periodicDof][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + const auto end = jac[periodicDof].end(); + for (auto it = jac[periodicDof].begin(); it != end; ++it) + (*it) = periodicDof != it.index() ? 0.0 : 1.0; + } + }; + + enforceDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + void assembleJacobian(JacobianMatrix& jac) + { + assembleJacobianAndResidual_(jac); + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + auto& row = jac[scvI.dofIndex()]; + for (auto col = row.begin(); col != row.end(); ++col) + row[col.index()][eqIdx] = 0.0; + + jac[scvI.dofIndex()][scvI.dofIndex()][eqIdx][pvIdx] = 1.0; + }; + + enforceDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Assemble the residual only + */ + void assembleResidual(ResidualVector& res) + { + const auto residual = evalLocalResidual(); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + + auto applyDirichlet = [&] (const auto& scvI, + const auto& dirichletValues, + const auto eqIdx, + const auto pvIdx) + { + res[scvI.dofIndex()][eqIdx] = elemVariables().elemVolVars()[scvI].priVars()[pvIdx] - dirichletValues[pvIdx]; + }; + + enforceDirichletConstraints(applyDirichlet); + } + + //! Enforce Dirichlet constraints + template + void enforceDirichletConstraints(const ApplyFunction& applyDirichlet) + { + // enforce Dirichlet boundary conditions + evalDirichletBoundaries(applyDirichlet); + // TODO: take care of internal Dirichlet constraints (if enabled) + // enforceInternalDirichletConstraints(applyDirichlet); + } + + /*! + * \brief Evaluates Dirichlet boundaries + */ + template< typename ApplyDirichletFunctionType > + void evalDirichletBoundaries(ApplyDirichletFunctionType applyDirichlet) + { + for (const auto& scvI : scvs(fvGeometry())) + { + const auto bcTypes = problem().boundaryTypes(element(), scvI); + if (bcTypes.hasDirichlet()) + { + const auto dirichletValues = problem().dirichlet(element(), scvI); + + // set the Dirichlet conditions in residual and jacobian + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (bcTypes.isDirichlet(eqIdx)) + { + const auto pvIdx = bcTypes.eqToDirichletIndex(eqIdx); + assert(0 <= pvIdx && pvIdx < numEq); + applyDirichlet(scvI, dirichletValues, eqIdx, pvIdx); + } + } + } + } + } + +protected: + + /*! + * \brief Evaluate the complete local residual for the current element. + */ + ElementResidualVector evalLocalResidual() const + { + if (isStationary()) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables()); + return elementIsGhost_ ? localOperator.getEmptyResidual() + : localOperator.evalFluxesAndSources(); + } + else + { + ElementResidualVector residual(fvGeometry().numScv()); + residual = 0.0; + + for (std::size_t k = 0; k < stageParams_->size(); ++k) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables_[k]); + + if (!stageParams_->skipTemporal(k)) + residual.axpy(stageParams_->temporalWeight(k), localOperator.evalStorage()); + if (!stageParams_->skipSpatial(k)) + residual.axpy(stageParams_->spatialWeight(k), localOperator.evalFluxesAndSources()); + } + + return residual; + } + } + + /*! + * \brief Computes the derivatives with respect to the dofs of the given + * element and adds them to the global matrix. + * \return The element residual at the current solution. + */ + template< class PartialReassembler = DefaultPartialReassembler > + ElementResidualVector assembleJacobianAndResidual_(JacobianMatrix& A, + const PartialReassembler* partialReassembler = nullptr) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianAndResidualNumeric_(A, partialReassembler); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box"); + } + + /*! + * \brief Computes the derivatives by means of numeric differentiation + * and adds them to the global matrix. + * \return The element residual at the current solution. + * \note This specialization is for the box scheme with numeric differentiation + */ + template< class PartialReassembler = DefaultPartialReassembler > + ElementResidualVector assembleJacobianAndResidualNumeric_(JacobianMatrix& A, + const PartialReassembler* partialReassembler = nullptr) + { + // get the variables of the current stage + auto& curVariables = elemVariables(); + auto& curElemVolVars = curVariables.elemVolVars(); + const auto& x = curVariables.gridVariables().dofs(); + + const auto origResiduals = evalLocalResidual(); + const auto origElemSol = elementSolution(element(), x, fvGeometry().gridGeometry()); + auto elemSol = origElemSol; + + ////////////////////////////////////////////////////////////////////////////////////////////// + // Calculate derivatives of the residual of all dofs in element with respect to themselves. // + ////////////////////////////////////////////////////////////////////////////////////////////// + + ElementResidualVector partialDerivs(fvGeometry().numScv()); + for (const auto& scvI : scvs(fvGeometry())) + { + // dof index and corresponding actual pri vars + const auto globalI = scvI.dofIndex(); + const auto localI = scvI.localDofIndex(); + + const auto origCurVolVars = curElemVolVars[scvI]; + auto& curVolVars = curElemVolVars[scvI]; + + // calculate derivatives w.r.t to the privars at the dof at hand + for (int pvIdx = 0; pvIdx < numEq; pvIdx++) + { + partialDerivs = 0.0; + auto evalResiduals = [&](Scalar priVar) + { + // update the volume variables and compute element residual + elemSol[scvI.localDofIndex()][pvIdx] = priVar; + curVolVars.update(elemSol, problem(), element(), scvI); + return evalLocalResidual(); + }; + + // derive the residuals numerically + static const NumericEpsilon eps_{problem().paramGroup()}; + static const int numDiffMethod = getParamFromGroup(problem().paramGroup(), "Assembly.NumericDifferenceMethod"); + NumericDifferentiation::partialDerivative(evalResiduals, elemSol[localI][pvIdx], partialDerivs, + origResiduals, eps_(elemSol[localI][pvIdx], pvIdx), + numDiffMethod); + + // TODO: Distinguish between implicit/explicit here. For explicit schemes, + // no entries between different scvs of an element are reserved. Thus, + // we currently get an error when using explicit schemes. + // TODO: Doesn't this mean we only have to evaluate the residual for a single + // scv instead of calling evalLocalResidual()? That computes the residuals + // and derivs for all other scvs of the element, too, which are never used. + // Note: this is the same in the current implementation of master. + // Should we try to optimize this for explicit schemes? Or adjust the Jacobian pattern? + // update the global stiffness matrix with the current partial derivatives + for (const auto& scvJ : scvs(fvGeometry())) + { + const auto globalJ = scvJ.dofIndex(); + const auto localJ = scvJ.localDofIndex(); + + // don't add derivatives for green entities + if (!partialReassembler || partialReassembler->dofColor(globalJ) != EntityColor::green) + { + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + // A[i][col][eqIdx][pvIdx] is the rate of change of the + // the residual of equation 'eqIdx' at dof 'i' + // depending on the primary variable 'pvIdx' at dof 'col' + A[globalJ][globalI][eqIdx][pvIdx] += partialDerivs[localJ][eqIdx]; + } + } + } + + // restore the original element solution & volume variables + elemSol[localI][pvIdx] = origElemSol[localI][pvIdx]; + curVolVars = origCurVolVars; + + // TODO additional dof dependencies + } + } + + return origResiduals; + } + + //! Return references to the local views + const Element& element() const { return element_; } + const FVElementGeometry& fvGeometry() const { return fvGeometry_; } + const ElementVariables& elemVariables() const { return elemVariables_.back(); } + ElementVariables& elemVariables() { return elemVariables_.back(); } + + //! Returns if a stationary problem is assembled + bool isStationary() const { return !stageParams_; } + + //! Return a reference to the underlying problem + //! TODO: Should grid vars return problem directly!? + const auto& problem() const + { return elemVariables().gridVariables().gridVolVars().problem(); } + +private: + const Element& element_; + const FVElementGeometry& fvGeometry_; + std::vector& elemVariables_; + + bool elementIsGhost_; + std::shared_ptr stageParams_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/assembly/fv/cclocalassembler.hh b/dumux/assembly/fv/cclocalassembler.hh new file mode 100644 index 0000000000000000000000000000000000000000..a1f92a3334c7dab1ccb2f609af897a23c19ce2ce --- /dev/null +++ b/dumux/assembly/fv/cclocalassembler.hh @@ -0,0 +1,412 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for cell-centered schemes. + */ +#ifndef DUMUX_CC_LOCAL_ASSEMBLER_HH +#define DUMUX_CC_LOCAL_ASSEMBLER_HH + +#include +#include + +#include +#include + +#include +#include +#include + +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief An assembler for Jacobian and residual contribution + * per element for cell-centered schemes. + * \tparam Assembler The grid-wide assembler type + */ +template +class CCLocalAssembler +{ + using GridVariables = typename Assembler::GridVariables; + using GridGeometry = typename GridVariables::GridGeometry; + + using FVElementGeometry = typename GridGeometry::LocalView; + using ElementVariables = typename GridVariables::LocalView; + using PrimaryVariables = typename GridVariables::PrimaryVariables; + using Scalar = typename GridVariables::Scalar; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using JacobianMatrix = typename Assembler::JacobianMatrix; + using ResidualVector = typename Assembler::ResidualVector; + using LocalOperator = typename Assembler::LocalOperator; + using Operators = typename LocalOperator::Operators; + using NumEqVector = typename Operators::NumEqVector; + + static constexpr int numEq = PrimaryVariables::size(); + static constexpr int maxElementStencilSize = GridGeometry::maxElementStencilSize; + static_assert(diffMethod == DiffMethod::numeric, "Analytical assembly not implemented"); + static_assert(numEq == JacobianMatrix::block_type::cols, "Matrix block size doesn't match privars size"); + + //! the parameters of a stage in time integration + using StageParams = MultiStageParams; + +public: + using ElementResidualVector = typename LocalOperator::ElementResidualVector; + + /*! + * \brief Constructor for stationary problems. + */ + explicit CCLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars) + : element_(element) + , fvGeometry_(fvGeometry) + , elementVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(nullptr) + { + assert(elemVars.size() == 1); + assert(fvGeometry_.numScv() == 1); + } + + /*! + * \brief Constructor for instationary problems. + * \note Using this constructor, we assemble one stage within + * a time integration step using multi-stage methods. + */ + explicit CCLocalAssembler(const Element& element, + const FVElementGeometry& fvGeometry, + std::vector& elemVars, + std::shared_ptr stageParams) + : element_(element) + , fvGeometry_(fvGeometry) + , elementVariables_(elemVars) + , elementIsGhost_(element.partitionType() == Dune::GhostEntity) + , stageParams_(stageParams) + { assert(fvGeometry_.numScv() == 1); } + + /*! + * \brief Computes the derivatives with respect to the given element and adds + * them to the global matrix. The element residual is written into the + * right hand side. + */ + template + void assembleJacobianAndResidual(JacobianMatrix& jac, + ResidualVector& res, + const PartialReassembler* partialReassembler = nullptr) + { + const auto globalI = fvGeometry().gridGeometry().elementMapper().index(element()); + if (partialReassembler + && partialReassembler->elementColor(globalI) == EntityColor::green) + { + res[globalI] = evalLocalResidual()[0]; // forward to the internal implementation + } + else + { + res[globalI] = assembleJacobianAndResidual_(jac); // forward to the internal implementation + } + } + + /*! + * \brief Computes the derivatives with respect to the given element and adds them + * to the global matrix. + */ + void assembleJacobian(JacobianMatrix& jac) + { + assembleJacobianAndResidual_(jac); + } + + /*! + * \brief Assemble the residual only + */ + void assembleResidual(ResidualVector& res) + { + const auto residual = evalLocalResidual(); + for (const auto& scv : scvs(fvGeometry())) + res[scv.dofIndex()] += residual[scv.localDofIndex()]; + } + +protected: + + /*! + * \brief Evaluate the complete local residual for the current element. + */ + ElementResidualVector evalLocalResidual() const + { + if (isStationary()) + { + LocalOperator localOperator(element(), fvGeometry(), elemVariables()); + return elementIsGhost_ ? localOperator.getEmptyResidual() + : localOperator.evalFluxesAndSources(); + } + else + { + ElementResidualVector residual(fvGeometry().numScv()); + residual = 0.0; + + for (std::size_t k = 0; k < stageParams_->size(); ++k) + { + LocalOperator localOperator(element(), fvGeometry(), elementVariables_[k]); + + if (!stageParams_->skipTemporal(k)) + residual.axpy(stageParams_->temporalWeight(k), localOperator.evalStorage()); + if (!stageParams_->skipSpatial(k)) + residual.axpy(stageParams_->spatialWeight(k), localOperator.evalFluxesAndSources()); + } + + return residual; + } + } + + /*! + * \brief Computes the derivatives with respect to the dofs of the given + * element and adds them to the global matrix. + * \return The element residual at the current solution. + */ + NumEqVector assembleJacobianAndResidual_(JacobianMatrix& A) + { + if constexpr (diffMethod == DiffMethod::numeric) + return assembleJacobianAndResidualNumeric_(A); + else + DUNE_THROW(Dune::NotImplemented, "Analytic assembler for cc schemes"); + } + + /*! + * \brief Computes the derivatives by means of numeric differentiation + * and adds them to the global matrix. + * \return The element residual at the current solution. + * \note This specialization is for numeric differentiation + */ + NumEqVector assembleJacobianAndResidualNumeric_(JacobianMatrix& A) + { + using Problem = std::decay_t; + + // get the variables of the current stage + auto& curVariables = elemVariables_(); + auto& curElemVolVars = curVariables.elemVolVars(); + auto& curElemFluxVarsCache = curVariables.elemFluxVarsCache(); + const auto& x = curVariables.gridVariables().dofs(); + + // get stencil informations + const auto& gridGeometry = fvGeometry().gridGeometry(); + const auto& connectivityMap = gridGeometry.connectivityMap(); + const auto globalI = gridGeometry.elementMapper().index(element_); + const auto numNeighbors = connectivityMap[globalI].size(); + + // container to store the neighboring elements + Dune::ReservedVector neighborElements; + neighborElements.resize(numNeighbors); + + // assemble the undeflected residual + using Residuals = ReservedBlockVector; + Residuals origResiduals(numNeighbors + 1); origResiduals = 0.0; + origResiduals[0] = evalLocalResidual()[0]; + + // lambda for convenient evaluation of the fluxes across scvfs in the neighbors + // if the neighbor is a ghost we don't want to add anything to their residual + // so we return 0 and omit computing the flux + auto evalNeighborFlux = [&] (const auto& neighbor, const auto& scvf) + { + if (neighbor.partitionType() == Dune::GhostEntity) + return NumEqVector(0.0); + else + return Operators::flux(problem(), neighbor, fvGeometry(), + curElemVolVars, curElemFluxVarsCache, scvf); + }; + + // get the elements in which we need to evaluate the fluxes + // and calculate these in the undeflected state + unsigned int j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + { + neighborElements[j-1] = gridGeometry.element(dataJ.globalJ); + for (const auto scvfIdx : dataJ.scvfsJ) + origResiduals[j] += evalNeighborFlux(neighborElements[j-1], fvGeometry().scvf(scvfIdx)); + ++j; + } + + // reference to the element's scv (needed later) and corresponding vol vars + // TODO: Support the case of caching?! + const auto& scv = fvGeometry().scv(globalI); + auto& curVolVars = curElemVolVars[scv]; + + // save a copy of the original privars and vol vars in order + // to restore the original solution after deflection + const auto origPriVars = x[globalI]; + const auto origVolVars = curVolVars; + + // element solution container to be deflected + auto elemSol = elementSolution(element_, x, gridGeometry); + + // derivatives in the neighbors with repect to the current elements + // in index 0 we save the derivative of the element residual with respect to it's own dofs + Residuals partialDerivs(numNeighbors + 1); + for (int pvIdx = 0; pvIdx < numEq; ++pvIdx) + { + partialDerivs = 0.0; + + auto evalResiduals = [&](Scalar priVar) + { + Residuals partialDerivsTmp(numNeighbors + 1); + partialDerivsTmp = 0.0; + + // update the volume variables and the flux var cache + elemSol[0][pvIdx] = priVar; + curVolVars.update(elemSol, problem(), element_, scv); + curElemFluxVarsCache.update(element_, fvGeometry(), curElemVolVars); + // TODO: UPDATE GLOBAL FLUX VARS CACHE HERE? + + // calculate the residual with the deflected primary variables + partialDerivsTmp[0] = evalLocalResidual()[0]; + + // calculate the fluxes in the neighbors with the deflected primary variables + for (std::size_t k = 0; k < numNeighbors; ++k) + for (auto scvfIdx : connectivityMap[globalI][k].scvfsJ) + partialDerivsTmp[k+1] += evalNeighborFlux(neighborElements[k], fvGeometry().scvf(scvfIdx)); + + return partialDerivsTmp; + }; + + // derive the residuals numerically + static const NumericEpsilon eps_{problem().paramGroup()}; + static const int numDiffMethod = getParamFromGroup(problem().paramGroup(), "Assembly.NumericDifferenceMethod"); + NumericDifferentiation::partialDerivative(evalResiduals, elemSol[0][pvIdx], partialDerivs, origResiduals, + eps_(elemSol[0][pvIdx], pvIdx), numDiffMethod); + + // Correct derivative for ghost elements, i.e. set a 1 for the derivative w.r.t. the + // current primary variable and a 0 elsewhere. As we always solve for a delta of the + // solution with repect to the initial one, this results in a delta of 0 for ghosts. + if (elementIsGhost_) + { + partialDerivs[0] = 0.0; + partialDerivs[0][pvIdx] = 1.0; + } + + // For instationary simulations, scale the coupling + // fluxes of the current stage correctly + if (stageParams_) + { + for (std::size_t k = 0; k < numNeighbors; ++k) + partialDerivs[k+1] *= stageParams_->spatialWeight(stageParams_->size()-1); + } + + // add the current partial derivatives to the global jacobian matrix + // no special treatment is needed if globalJ is a ghost because then derivatives have been assembled to 0 above + if constexpr (Problem::enableInternalDirichletConstraints()) + { + // check if own element has internal Dirichlet constraint + const auto internalDirichletConstraintsOwnElement = this->problem().hasInternalDirichletConstraint(element_, scv); + const auto dirichletValues = this->problem().internalDirichlet(element_, scv); + + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (internalDirichletConstraintsOwnElement[eqIdx]) + { + origResiduals[0][eqIdx] = origVolVars.priVars()[eqIdx] - dirichletValues[eqIdx]; + A[globalI][globalI][eqIdx][pvIdx] = (eqIdx == pvIdx) ? 1.0 : 0.0; + } + else + A[globalI][globalI][eqIdx][pvIdx] += partialDerivs[0][eqIdx]; + } + + // off-diagonal entries + j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + { + const auto& neighborElement = neighborElements[j-1]; + const auto& neighborScv = fvGeometry().scv(dataJ.globalJ); + const auto internalDirichletConstraintsNeighbor = problem().hasInternalDirichletConstraint(neighborElement, neighborScv); + + for (int eqIdx = 0; eqIdx < numEq; ++eqIdx) + { + if (internalDirichletConstraintsNeighbor[eqIdx]) + A[dataJ.globalJ][globalI][eqIdx][pvIdx] = 0.0; + else + A[dataJ.globalJ][globalI][eqIdx][pvIdx] += partialDerivs[j][eqIdx]; + } + + ++j; + } + } + else // no internal Dirichlet constraints specified + { + for (int eqIdx = 0; eqIdx < numEq; eqIdx++) + { + // the diagonal entries + A[globalI][globalI][eqIdx][pvIdx] += partialDerivs[0][eqIdx]; + + // off-diagonal entries + j = 1; + for (const auto& dataJ : connectivityMap[globalI]) + A[dataJ.globalJ][globalI][eqIdx][pvIdx] += partialDerivs[j++][eqIdx]; + } + } + + // restore the original state of the scv's volume variables + curVolVars = origVolVars; + + // restore the current element solution + elemSol[0][pvIdx] = origPriVars[pvIdx]; + } + + // restore original state of the flux vars cache in case of global caching. + // This has to be done in order to guarantee that everything is in an undeflected + // state before the assembly of another element is called. In the case of local caching + // this is obsolete because the elemFluxVarsCache used here goes out of scope after this. + // We only have to do this for the last primary variable, for all others the flux var cache + // is updated with the correct element volume variables before residual evaluations + // TODO: RESET GLOBAL FLUX VARS CACHE HERE + + // return the original residual + return origResiduals[0]; + } + + //! Return references to the local views + const Element& element() const { return element_; } + const FVElementGeometry& fvGeometry() const { return fvGeometry_; } + const ElementVariables& elemVariables() const { return elementVariables_.back(); } + ElementVariables& elemVariables_() { return elementVariables_.back(); } + + //! Returns if a stationary problem is assembled + bool isStationary() const { return !stageParams_; } + + //! Return a reference to the underlying problem + //! TODO: Should grid vars return problem directly!? + const auto& problem() const + { return elemVariables().gridVariables().gridVolVars().problem(); } + +private: + const Element& element_; + const FVElementGeometry& fvGeometry_; + std::vector& elementVariables_; + + bool elementIsGhost_; + std::shared_ptr stageParams_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/assembly/fv/localoperator.hh b/dumux/assembly/fv/localoperator.hh new file mode 100644 index 0000000000000000000000000000000000000000..d4374b8df10903a07836aa652caf9a70921a7fcd --- /dev/null +++ b/dumux/assembly/fv/localoperator.hh @@ -0,0 +1,251 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief An element-wise local operator for finite-volume schemes. + */ +#ifndef DUMUX_FV_LOCAL_OPERATOR_HH +#define DUMUX_FV_LOCAL_OPERATOR_HH + +#include + +#include +#include +#include + +#include +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief The element-wise local operator for finite volume schemes. + * This allows for element-wise evaluation of individual terms + * of the equations to be solved. + * \tparam ElementVariables local view on the grid variables + * \tparam Op The model-specific operators + */ +template +class FVLocalOperator +{ + // The variables required for the evaluation of the equation + using GridVars = typename ElementVariables::GridVariables; + using PrimaryVariables = typename GridVars::PrimaryVariables; + + // The grid geometry on which the scheme operates + using GridGeometry = typename GridVars::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace; + using Extrusion = Extrusion_t; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using NumEqVector = typename Op::NumEqVector; + static constexpr int dim = GridView::dimension; + static constexpr int numEq = NumEqVector::size(); + static constexpr bool isBox = GridGeometry::discMethod == DiscretizationMethod::box; + +public: + //! export operators + using Operators = Op; + + //! export the grid variables type this operator requires a local view of + using GridVariables = GridVars; + + //! export a grid-independent alias for compatibility with non grid-based schemes + //! TODO: Necessary? + using Variables = GridVars; + + //! the container storing the operator values on all dofs of an element + using ElementResidualVector = Dune::BlockVector; + + //! export a grid-independent alias for compatibility with non grid-based schemes + //! TODO: Necessary? + using Residual = ElementResidualVector; + + /*! + * \brief The constructor + * \note The grid geometry/grid variables local views are expected to + * be bound to the same (the given) element + */ + explicit FVLocalOperator(const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVariables& elemVars) + : element_(element) + , fvGeometry_(fvGeometry) + , elemVariables_(elemVars) + {} + + /*! + * \name Main interface + * \note Methods used by the assembler to compute derivatives and residual + */ + // \{ + + /*! + * \brief Compute the terms of the local residual that do not appear in + * time derivatives. These are the sources and the fluxes. + */ + ElementResidualVector evalFluxesAndSources() const + { + const auto& problem = elemVariables_.gridVariables().gridVolVars().problem(); + const auto& evv = elemVariables_.elemVolVars(); + + // source term + auto result = getEmptyResidual(); + for (const auto& scv : scvs(fvGeometry_)) + result[scv.localDofIndex()] -= Operators::source(problem, element_, fvGeometry_, evv, scv); + + // flux term + for (const auto& scvf : scvfs(fvGeometry_)) + addFlux_(result, scvf); + + return result; + } + + /*! + * \brief Compute the storage term, i.e. the term appearing in the time derivative. + */ + ElementResidualVector evalStorage() const + { + const auto& problem = elemVariables_.gridVariables().gridVolVars().problem(); + const auto& evv = elemVariables_.elemVolVars(); + + // TODO: Until now, FVLocalResidual defined storage as the entire + // time derivative. Now it means the term above the time derivative. + // We should think about the correct naming here... + // TODO: Should storage() NOT multiply with volume?? That was different until + // now but flux() also returns the flux multiplied with area so this should + // be more consistent + auto result = getEmptyResidual(); + for (const auto& scv : scvs(fvGeometry_)) + result[scv.localDofIndex()] += operators_.storage(problem, scv, evv[scv]); + + return result; + } + + ElementResidualVector getEmptyResidual() const + { + ElementResidualVector res(fvGeometry_.numScv()); + res = 0.0; + return res; + } + + // \} + + /*! + * \name Interfaces for analytic Jacobian computation + */ + // \{ + + //! \todo TODO: Add interfaces. Or, should this be here at all!? + + //\} + + // \} + +protected: + + //! compute and add the flux across the given face to the container (cc schemes) + template = 0> + void addFlux_(ElementResidualVector& r, const SubControlVolumeFace& scvf) const + { + const auto& insideScv = fvGeometry_.scv(scvf.insideScvIdx()); + const auto localDofIdx = insideScv.localDofIndex(); + + const auto& problem = elemVariables_.gridVariables().gridVolVars().problem(); + const auto& evv = elemVariables_.elemVolVars(); + const auto& efvc = elemVariables_.elemFluxVarsCache(); + + if (!scvf.boundary()) + r[localDofIdx] += Operators::flux(problem, element_, fvGeometry_, evv, efvc, scvf); + else + { + const auto& bcTypes = problem.boundaryTypes(element_, scvf); + + // Dirichlet boundaries + if (bcTypes.hasDirichlet() && !bcTypes.hasNeumann()) + r[localDofIdx] += Operators::flux(problem, element_, fvGeometry_, evv, efvc, scvf); + + // Neumann and Robin ("solution dependent Neumann") boundary conditions + else if (bcTypes.hasNeumann() && !bcTypes.hasDirichlet()) + { + auto neumannFluxes = problem.neumann(element_, fvGeometry_, evv, efvc, scvf); + + // multiply neumann fluxes with the area and the extrusion factor + neumannFluxes *= Extrusion::area(scvf)*evv[insideScv].extrusionFactor(); + r[localDofIdx] += neumannFluxes; + } + + else + DUNE_THROW(Dune::NotImplemented, "Mixed boundary conditions for cell-centered schemes. " << + "Use pure boundary conditions by converting Dirichlet BCs to Robin BCs"); + } + } + + //! compute and add the flux across the given face to the container (box scheme) + template = 0> + void addFlux_(ElementResidualVector& r, const SubControlVolumeFace& scvf) const + { + const auto& problem = elemVariables_.gridVariables().gridVolVars().problem(); + const auto& evv = elemVariables_.elemVolVars(); + const auto& efvc = elemVariables_.elemFluxVarsCache(); + + // inner faces + if (!scvf.boundary()) + { + const auto flux = Operators::flux(problem, element_, fvGeometry_, evv, efvc, scvf); + r[fvGeometry_.scv(scvf.insideScvIdx()).localDofIndex()] += flux; + r[fvGeometry_.scv(scvf.outsideScvIdx()).localDofIndex()] -= flux; + } + + // boundary faces + else + { + const auto& scv = fvGeometry_.scv(scvf.insideScvIdx()); + const auto& bcTypes = problem.boundaryTypes(element_, scv); + + // Treat Neumann and Robin ("solution dependent Neumann") boundary conditions. + if (bcTypes.hasNeumann()) + { + const auto neumannFluxes = problem.neumann(element_, fvGeometry_, evv, efvc, scvf); + const auto area = Extrusion::area(scvf)*evv[scv].extrusionFactor(); + + // only add fluxes to equations for which Neumann is set + for (int eqIdx = 0; eqIdx < NumEqVector::size(); ++eqIdx) + if (bcTypes.isNeumann(eqIdx)) + r[scv.localDofIndex()][eqIdx] += neumannFluxes[eqIdx]*area; + } + } + } + +private: + + const Element& element_; //!< pointer to the element for which the residual is computed + const FVElementGeometry& fvGeometry_; //!< the local view on the finite element grid geometry + const ElementVariables& elemVariables_; //!< the local view on the grid variables + Operators operators_; //!< evaluates storage/flux operators of the actual equation +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/assembly/fv/operators.hh b/dumux/assembly/fv/operators.hh new file mode 100644 index 0000000000000000000000000000000000000000..2c8352e7c9e76174eb82d3f47403085c24eb5ab1 --- /dev/null +++ b/dumux/assembly/fv/operators.hh @@ -0,0 +1,148 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief The base class for the sub-control entity-local evaluation of + * the terms of equations in the context of finite-volume schemes + */ +#ifndef DUMUX_FV_OPERATORS_HH +#define DUMUX_FV_OPERATORS_HH + +#include +#include + +#include +#include + +namespace Dumux { + +/*! + * \ingroup Assembly + * \brief The base class for the sub-control entity-local evaluation of + * the terms of equations in the context of finite-volume schemes + * \todo TODO: Should operators have a state? That is, be constructed and have non-static functions? + */ +template +class FVOperators +{ + // The variables required for the evaluation of the equation + using GridVariables = typename ElementVariables::GridVariables; + using VolumeVariables = typename GridVariables::VolumeVariables; + using ElementVolumeVariables = typename ElementVariables::ElementVolumeVariables; + using ElementFluxVariablesCache = typename ElementVariables::ElementFluxVariablesCache; + using Scalar = typename GridVariables::Scalar; + + using PrimaryVariables = typename GridVariables::PrimaryVariables; + static constexpr int numEq = PrimaryVariables::size(); + + // The grid geometry on which the scheme operates + using GridGeometry = typename GridVariables::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + using SubControlVolume = typename GridGeometry::SubControlVolume; + using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace; + using Extrusion = Extrusion_t; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + // user-input defined in the problem + using Problem = std::decay_t().gridVolVars().problem())>; + +public: + //! export the type used to store scalar values for all equations + //! TODO: How to obtain user-defined NumEqVector (ProblemTraits?) + using NumEqVector = Dune::FieldVector; + + // export the types of the flux/source/storage terms + using FluxTerm = NumEqVector; + using SourceTerm = NumEqVector; + using StorageTerm = NumEqVector; + + /*! + * \name Model specific interfaces + * \note The following method are the model specific implementations of the + * operators appearing in the equation and should be overloaded by the + * implementations. + */ + // \{ + + /*! + * \brief Compute the storage term of the equations for the given sub-control volume + * \param problem The problem to be solved (could store additionally required quantities) + * \param scv The sub-control volume + * \param volVars The primary & secondary variables evaluated for the scv + * \note This must be overloaded by the implementation + */ + static StorageTerm storage(const Problem& problem, + const SubControlVolume& scv, + const VolumeVariables& volVars) + { DUNE_THROW(Dune::NotImplemented, "Storage operator not implemented!"); } + + /*! + * \brief Compute the flux term of the equations for the given sub-control volume face + * \param problem The problem to be solved (could store additionally required quantities) + * \param element The grid element + * \param fvGeometry The element-local view on the finite volume grid geometry + * \param elemVolVars The element-local view on the grid volume variables + * \param elemFluxVarsCache The element-local view on the grid flux variables cache + * \param scvf The sub-control volume face for which the flux term is to be computed + * \note This must be overloaded by the implementation + */ + static FluxTerm flux(const Problem& problem, + const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVolumeVariables& elemVolVars, + const ElementFluxVariablesCache& elemFluxVarsCache, + const SubControlVolumeFace& scvf) + { DUNE_THROW(Dune::NotImplemented, "This model does not implement a flux method!"); } + + /*! + * \brief Compute the source term of the equations for the given sub-control volume + * \param problem The problem to be solved (could store additionally required quantities) + * \param element The grid element + * \param fvGeometry The element-local view on the finite volume grid geometry + * \param elemVolVars The element-local view on the grid volume variables + * \param scv The sub-control volume for which the source term is to be computed + * \note This is a default implementation forwarding to interfaces in the problem + */ + static SourceTerm source(const Problem& problem, + const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVolumeVariables& elemVolVars, + const SubControlVolume& scv) + { + SourceTerm source(0.0); + + // add contributions from volume flux sources + source += problem.source(element, fvGeometry, elemVolVars, scv); + + // add contribution from possible point sources + source += problem.scvPointSources(element, fvGeometry, elemVolVars, scv); + + // multiply with scv volume + source *= Extrusion::volume(scv)*elemVolVars[scv].extrusionFactor(); + + return source; + } +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/assembly/localassembler.hh b/dumux/assembly/localassembler.hh new file mode 100644 index 0000000000000000000000000000000000000000..8fa6e006d7ff426be021da241975393f8e81b7b2 --- /dev/null +++ b/dumux/assembly/localassembler.hh @@ -0,0 +1,65 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Assembly + * \brief Helper alias to select a local assembler based on the discretization scheme. + */ +#ifndef DUMUX_LOCAL_ASSEMBLER_HH +#define DUMUX_LOCAL_ASSEMBLER_HH + +#include +#include "fv/boxlocalassembler.hh" +#include "fv/cclocalassembler.hh" + +namespace Dumux { +namespace Impl { + + template + struct LocalAssemblerChooser; + + template + struct LocalAssemblerChooser + { using type = BoxLocalAssembler; }; + + template + struct LocalAssemblerChooser + { using type = CCLocalAssembler; }; + + template + struct LocalAssemblerChooser + { using type = CCLocalAssembler; }; + + template + using LocalAssemblerType = typename LocalAssemblerChooser::type; + +} // end namespace Immpl + +/*! + * \ingroup Assembly + * \brief Helper alias to select the local assembler type from an assembler. + */ +template +using LocalAssembler = Impl::LocalAssemblerType; + +} // namespace Dumux + +#endif diff --git a/dumux/common/CMakeLists.txt b/dumux/common/CMakeLists.txt index e26b4dfb2abc466eb260114c3435dfd9b36a0942..5bd0405ba61caccd299efa2042f3b044956fb825 100644 --- a/dumux/common/CMakeLists.txt +++ b/dumux/common/CMakeLists.txt @@ -48,4 +48,6 @@ timeloop.hh timemanager.hh valgrind.hh variablelengthspline_.hh +variables.hh +variablesbackend.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/common) diff --git a/dumux/common/pdesolver.hh b/dumux/common/pdesolver.hh index 99f61bcaa65deee6bba5fb40996c9f93f472744c..49ce5123e0975803ca970da9a509c38ceb7a9ac2 100644 --- a/dumux/common/pdesolver.hh +++ b/dumux/common/pdesolver.hh @@ -28,6 +28,7 @@ #include #include +#include #include @@ -35,9 +36,25 @@ namespace Dune { template class MultiTypeBlockMatrix; -} +} // end namespace Dune namespace Dumux { +namespace Impl { + + template + using AssemblerVariablesType = typename Assembler::Variables; + + template + inline constexpr bool exportsVariables = Dune::Std::is_detected_v; + + template> struct VariablesChooser; + template struct VariablesChooser { using Type = AssemblerVariablesType; }; + template struct VariablesChooser { using Type = typename A::ResidualType; }; + + template + using AssemblerVariables = typename VariablesChooser::Type; + +} // end namespace Impl /*! * \ingroup Common @@ -47,17 +64,28 @@ namespace Dumux { * and has a method solve that linearizes (if not already linear), assembles, solves and updates * given an initial solution producing a new solution. * - * \tparam Assembler A PDE linearized system assembler - * \tparam LinearSolver A linear system solver + * \tparam A Assembler for the PDE linearized system + * \tparam LS Linear system solver */ -template +template class PDESolver { - using SolutionVector = typename Assembler::ResidualType; - using Scalar = typename Assembler::Scalar; + using Scalar = typename A::Scalar; using TimeLoop = TimeLoopBase; public: + //! export the assembler and linear solver types + using Assembler = A; + using LinearSolver = LS; + + //! export the type of variables that represent a numerical solution + using Variables = Impl::AssemblerVariables; + + /*! + * \brief Constructor + * \param assembler pointer to the assembler of the linear system + * \param linearSolver pointer to the solver of the resulting linear system + */ PDESolver(std::shared_ptr assembler, std::shared_ptr linearSolver) : assembler_(assembler) @@ -68,24 +96,25 @@ public: /*! * \brief Solve the given PDE system (usually assemble + solve linear system + update) - * \param sol a solution vector possbilty containing an initial solution + * \param vars instance of the `Variables` class representing a numerical + * solution, defining primary and possibly secondary variables + * and information on the time level. */ - virtual void solve(SolutionVector& sol) = 0; + virtual void solve(Variables& vars) = 0; /*! * \brief Solve the given PDE system with time step control * \note This is used for solvers that are allowed to e.g. automatically reduce the * time step if the solve was not successful - * \param sol a solution vector possbilty containing an initial solution + * \param vars instance of the `Variables` class representing a numerical solution * \param timeLoop a reference to the current time loop */ - virtual void solve(SolutionVector& sol, TimeLoop& timeLoop) + virtual void solve(Variables& vars, TimeLoop& timeLoop) { // per default we just forward to the method without time step control - solve(sol); + solve(vars); } -protected: /*! * \brief Access the assembler */ @@ -110,6 +139,8 @@ protected: LinearSolver& linearSolver() { return *linearSolver_; } +protected: + /*! * \brief Helper function to assure the MultiTypeBlockMatrix's sub-blocks have the correct sizes. */ diff --git a/dumux/common/variables.hh b/dumux/common/variables.hh new file mode 100644 index 0000000000000000000000000000000000000000..f75731626b1b1540d1060d72826bdeab6d2e053e --- /dev/null +++ b/dumux/common/variables.hh @@ -0,0 +1,133 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Common + * \copydoc Dumux::Variables + */ +#ifndef DUMUX_VARIABLES_HH +#define DUMUX_VARIABLES_HH + +#include + +#include +#include + +namespace Dumux { + +/*! + * \ingroup Discretization + * \brief Class that represents the variables of a model. + * We assume that models are formulated on the basis of primary and + * possibly secondary variables, where the latter may non-linearly + * depend on the former. Variables in Dumux represent the state of + * a numerical solution of a model, consisting of all primary/secondary + * variables and, if the a transient problem is modeled, of time information. + * + * This class defines the interface that is expected of variable classes, + * and it provides the implementation for models that do not require storing + * any additional information besides the primary variables and (optionally) + * time. + * \tparam X The type used for solution vectors, i.e. all primary variables. + */ +template +class Variables +{ + template + struct ScalarExtractorHelper; + + template + struct ScalarExtractorHelper + { using Type = T; }; + + template + struct ScalarExtractorHelper + { + private: + using ValueType = std::decay_t()[0])>; + static constexpr bool indexable = IsIndexable::value; + public: + using Type = typename ScalarExtractorHelper::Type; + }; + +public: + //! export the type of solution vector + using SolutionVector = X; + + //! export the underlying scalar type + using Scalar = typename ScalarExtractorHelper::value>::Type; + + //! export the time representation + using TimeLevel = Dumux::TimeLevel; + + //! Default constructor + explicit Variables() : x_(), t_(0.0) {} + + //! Construction from a solution + explicit Variables(const SolutionVector& x, + const TimeLevel& t = TimeLevel{0.0}) + : x_(x), t_(t) + {} + + //! Construction from a solution + explicit Variables(SolutionVector&& x, + const TimeLevel& t = TimeLevel{0.0}) + : x_(std::move(x)), t_(t) + {} + + //! Construction from initializer lambda + template), int> = 0> + explicit Variables(const Initializer& initializeSolution, + const TimeLevel& timeLevel = TimeLevel{0.0}) + : t_(timeLevel) + { + initializeSolution(x_); + } + + //! Return reference to the solution + const SolutionVector& dofs() const { return x_; } + + //! Non-const access still required for privar switch (TODO: Remove dependency) + SolutionVector& dofs() { return x_; } + + //! Update the state to a new solution + void update(const SolutionVector& x) + { x_ = x; } + + //! Update the time level only + void updateTime(const TimeLevel& t) + { t_ = t; } + + //! Update the state to a new solution & time level + void update(const SolutionVector& x, + const TimeLevel& t) + { + x_ = x; + t_ = t; + } + +private: + SolutionVector x_; + TimeLevel t_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/common/variablesbackend.hh b/dumux/common/variablesbackend.hh new file mode 100644 index 0000000000000000000000000000000000000000..52e506e4aeb9a7a76bfbd90d7424206330e9bcb5 --- /dev/null +++ b/dumux/common/variablesbackend.hh @@ -0,0 +1,204 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/**************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Nonlinear + * \brief Backends for operations on different solution vector types + * or more generic variable classes to be used in places where + * several different types/layouts should be supported. + */ +#ifndef DUMUX_VARIABLES_BACKEND_HH +#define DUMUX_VARIABLES_BACKEND_HH + +#include +#include +#include + +#include +#include +#include +#include +#include + +// forward declaration +namespace Dune { + +template +class MultiTypeBlockVector; + +} // end namespace Dune + +namespace Dumux { + +/*! + * \ingroup Nonlinear + * \brief Class providing operations with primary variable vectors + */ +template +class DofBackend; + +/*! + * \ingroup Nonlinear + * \brief Specialization providing operations for scalar/number types + */ +template +class DofBackend::value, Scalar>> +{ +public: + using DofVector = Scalar; //!< the type of the dofs parametrizing the variables object + + static std::size_t size(const DofVector& d) + { return 1; } + + static DofVector makeZeroDofVector(std::size_t size) + { return 0.0; } +}; + +/*! + * \ingroup Nonlinear + * \brief Specialization providing operations for block vectors + */ +template +class DofBackend> +{ + +public: + using DofVector = Dune::BlockVector; //!< the type of the dofs parametrizing the variables object + + static std::size_t size(const DofVector& d) + { return d.size(); } + + static DofVector makeZeroDofVector(std::size_t size) + { DofVector d; d.resize(size); return d; } +}; + +/*! + * \ingroup Nonlinear + * \brief Specialization providing operations for multitype block vectors + */ +template +class DofBackend> +{ + using DV = Dune::MultiTypeBlockVector; + static constexpr auto numBlocks = DV::size(); + + using VectorSizeInfo = std::array; + +public: + using DofVector = DV; //!< the type of the dofs parametrizing the variables object + + static VectorSizeInfo size(const DofVector& d) + { + VectorSizeInfo result; + using namespace Dune::Hybrid; + forEach(std::make_index_sequence{}, [&](auto i) { + result[i] = d[Dune::index_constant{}].size(); + }); + return result; + } + + static DofVector makeZeroDofVector(const VectorSizeInfo& size) + { + DofVector result; + using namespace Dune::Hybrid; + forEach(std::make_index_sequence{}, [&](auto i) { + result[Dune::index_constant{}].resize(size[i]); + }); + return result; + } +}; + +namespace Impl { + +template +using SolutionVectorType = typename Vars::SolutionVector; + +template +class VariablesBackend; + +/*! + * \ingroup Nonlinear + * \brief Class providing operations for primary variable vector/scalar types + * \note We assume the variables being simply a dof vector if we + * do not find the variables class to export `SolutionVector`. + */ +template +class VariablesBackend +: public DofBackend +{ + using ParentType = DofBackend; + +public: + using Variables = Vars; + using typename ParentType::DofVector; + + //! update to new solution vector + static void update(Variables& v, const DofVector& dofs) + { v = dofs; } + + //! return const reference to dof vector + static const DofVector& getDofVector(const Variables& v) + { return v; } + + //! return reference to dof vector + static DofVector& getDofVector(Variables& v) + { return v; } +}; + +/*! + * \file + * \ingroup Nonlinear + * \brief Class providing operations for generic variable classes, + * containing primary and possibly also secondary variables. + */ +template +class VariablesBackend +: public DofBackend +{ +public: + using DofVector = typename Vars::SolutionVector; + using Variables = Vars; //!< the type of the variables object + + //! update to new solution vector + static void update(Variables& v, const DofVector& dofs) + { v.update(dofs); } + + //! return const reference to dof vector + static const DofVector& getDofVector(const Variables& v) + { return v.dofs(); } + + //! return reference to dof vector + static DofVector& getDofVector(Variables& v) + { return v.dofs(); } +}; +} // end namespace Impl + +/*! + * \ingroup Nonlinear + * \brief Class providing operations for generic variable classes + * that represent the state of a numerical solution, possibly + * consisting of primary/secondary variables and information on + * the time level. + */ +template +using VariablesBackend = Impl::VariablesBackend>; + +} // end namespace Dumux + +#endif diff --git a/dumux/discretization/CMakeLists.txt b/dumux/discretization/CMakeLists.txt index a43e90fb378a589d1ad6eb942091d1764ad5af9a..05fa5cb88a09f2a76a8a4dfd9d34c9a109aed8e3 100644 --- a/dumux/discretization/CMakeLists.txt +++ b/dumux/discretization/CMakeLists.txt @@ -18,6 +18,7 @@ fluxstencil.hh functionspacebasis.hh fvgridvariables.hh fvproperties.hh +gridvariables.hh localview.hh method.hh rotationpolicy.hh diff --git a/dumux/discretization/fvgridvariables.hh b/dumux/discretization/fvgridvariables.hh index ce693bd28360c4e92c0f8276d6485fde9fc2b2b0..21c518847f18e87a13ef99d07a6f8b78e3f7ffc2 100644 --- a/dumux/discretization/fvgridvariables.hh +++ b/dumux/discretization/fvgridvariables.hh @@ -19,32 +19,123 @@ /*! * \file * \ingroup Discretization - * \brief The grid variable class for finite volume schemes storing variables on scv and scvf (volume and flux variables) + * \brief The grid variable class for finite volume schemes, + * storing variables on scv and scvf (volume and flux variables) */ #ifndef DUMUX_FV_GRID_VARIABLES_HH #define DUMUX_FV_GRID_VARIABLES_HH #include #include -#include + +// TODO: Remove once default template argument is omitted, +// which is there solely to ensure backwards compatibility. +#include + +#include +#include "gridvariables.hh" namespace Dumux { /*! * \ingroup Discretization - * \brief The grid variable class for finite volume schemes storing variables on scv and scvf (volume and flux variables) - * \tparam the type of the grid geometry - * \tparam the type of the grid volume variables - * \tparam the type of the grid flux variables cache + * \brief Finite volume-specific local view on grid variables. + * \tparam GV The grid variables class */ -template +template +class FVGridVariablesLocalView +{ + using GridGeometry = typename GV::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + +public: + //! export corresponding grid-wide class + using GridVariables = GV; + + //! export underlying local views + using ElementVolumeVariables = typename GV::GridVolumeVariables::LocalView; + using ElementFluxVariablesCache = typename GV::GridFluxVariablesCache::LocalView; + + //! Constructor + FVGridVariablesLocalView(const GridVariables& gridVariables) + : gridVariables_(&gridVariables) + , elemVolVars_(gridVariables.gridVolVars()) + , elemFluxVarsCache_(gridVariables.gridFluxVarsCache()) + {} + + /*! + * \brief Bind this local view to a grid element. + * \param element The grid element + * \param fvGeometry Local view on the grid geometry + */ + void bind(const Element& element, + const FVElementGeometry& fvGeometry) + { + const auto& x = gridVariables().dofs(); + elemVolVars_.bind(element, fvGeometry, x); + elemFluxVarsCache_.bind(element, fvGeometry, elemVolVars_); + } + + /*! + * \brief Bind only the volume variables local view to a grid element. + * \param element The grid element + * \param fvGeometry Local view on the grid geometry + */ + void bindElemVolVars(const Element& element, + const FVElementGeometry& fvGeometry) + { + elemVolVars_.bind(element, fvGeometry, gridVariables().dofs()); + + // unbind flux variables cache + elemFluxVarsCache_ = localView(gridVariables().gridFluxVarsCache()); + } + + //! return reference to the elem vol vars + const ElementVolumeVariables& elemVolVars() const { return elemVolVars_; } + ElementVolumeVariables& elemVolVars() { return elemVolVars_; } + + //! return reference to the flux variables cache + const ElementFluxVariablesCache& elemFluxVarsCache() const { return elemFluxVarsCache_; } + ElementFluxVariablesCache& elemFluxVarsCache() { return elemFluxVarsCache_; } + + //! Return reference to the grid variables + const GridVariables& gridVariables() const + { return *gridVariables_; } + +private: + const GridVariables* gridVariables_; + ElementVolumeVariables elemVolVars_; + ElementFluxVariablesCache elemFluxVarsCache_; +}; + +/*! + * \ingroup Discretization + * \brief The grid variable class for finite volume schemes, storing + * variables on scv and scvf (volume and flux variables). + * \tparam GG the type of the grid geometry + * \tparam GVV the type of the grid volume variables + * \tparam GFVC the type of the grid flux variables cache + * \tparam X the type used for solution vectors + * \todo TODO: GG is an obsolote template argument? + */ +template> class FVGridVariables +: public GridVariables { + using ParentType = GridVariables; + using ThisType = FVGridVariables; + public: + using typename ParentType::SolutionVector; + //! export type of the finite volume grid geometry using GridGeometry = GG; - //! export type of the finite volume grid geometry + //! export type of the grid volume variables using GridVolumeVariables = GVV; //! export type of the volume variables @@ -53,59 +144,109 @@ public: //! export primary variable type using PrimaryVariables = typename VolumeVariables::PrimaryVariables; - //! export scalar type (TODO get it directly from the volvars) - using Scalar = std::decay_t()[0])>; - - //! export type of the finite volume grid geometry + //! export cache type for flux variables using GridFluxVariablesCache = GFVC; + //! export the local view on this class + using LocalView = FVGridVariablesLocalView; + + /*! + * \brief Constructor + * \param problem The problem to be solved + * \param gridGeometry The geometry of the computational grid + * \todo TODO: Here we could forward to the base class with + * [problem] (auto& x) { problem->applyInitialSolution(x); }, + * but we cannot be sure that user problems implement + * the initial() or initialAtPos() functions? + */ template FVGridVariables(std::shared_ptr problem, std::shared_ptr gridGeometry) - : gridGeometry_(gridGeometry) - , curGridVolVars_(*problem) + : ParentType(gridGeometry) + , gridVolVars_(*problem) , prevGridVolVars_(*problem) , gridFluxVarsCache_(*problem) {} + /*! + * \brief Constructor with custom initialization of the solution. + * \param problem The problem to be solved + * \param gridGeometry The geometry of the computational grid + * \param solOrInitializer This can be either a reference to a + * solution vector, or an initializer + * lambda. See Dumux::Variables. + */ + template + FVGridVariables(std::shared_ptr problem, + std::shared_ptr gridGeometry, + SolOrInitializer&& solOrInitializer) + : ParentType(gridGeometry, std::forward(solOrInitializer)) + , gridVolVars_(*problem) + , prevGridVolVars_(*problem) + , gridFluxVarsCache_(*problem) + { + gridVolVars_.update(this->gridGeometry(), this->dofs()); + gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, this->dofs(), true); + prevGridVolVars_ = gridVolVars_; + } + //! initialize all variables (stationary case) - template + [[deprecated("Initialize grid variables upon construction instead.")]] void init(const SolutionVector& curSol) { + ParentType::update(curSol); + // resize and update the volVars with the initial solution - curGridVolVars_.update(*gridGeometry_, curSol); + gridVolVars_.update(this->gridGeometry(), curSol); // update the flux variables caches (always force flux cache update on initialization) - gridFluxVarsCache_.update(*gridGeometry_, curGridVolVars_, curSol, true); + gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, curSol, true); // set the volvars of the previous time step in case we have an instationary problem // note that this means some memory overhead in the case of enabled caching, however // this it outweighted by the advantage of having a single grid variables object for // stationary and instationary problems - prevGridVolVars_ = curGridVolVars_; + // TODO: Remove this in the context of new time integration schemes + prevGridVolVars_ = gridVolVars_; } - //! update all variables - template - void update(const SolutionVector& curSol, bool forceFluxCacheUpdate = false) + //! Deprecated update interface. + [[deprecated("Use update(curSol) or forceUpdateAll(curSol) instead.")]] + void update(const SolutionVector& curSol, bool forceFluxCacheUpdate) { + if (forceFluxCacheUpdate) + forceUpdateAll(curSol); + else + update(curSol); + } + + //! update all variables after grid adaption + [[deprecated("use forceUpdateAll() instead.")]] + void updateAfterGridAdaption(const SolutionVector& curSol) + { forceUpdateAll(curSol); } + + //! Update all variables that may be affected by a change in solution + void update(const SolutionVector& curSol) + { + ParentType::update(curSol); + // resize and update the volVars with the initial solution - curGridVolVars_.update(*gridGeometry_, curSol); + gridVolVars_.update(this->gridGeometry(), curSol); // update the flux variables caches - gridFluxVarsCache_.update(*gridGeometry_, curGridVolVars_, curSol, forceFluxCacheUpdate); + gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, curSol); } - //! update all variables after grid adaption - template - void updateAfterGridAdaption(const SolutionVector& curSol) + //! Force the update of all variables + void forceUpdateAll(const SolutionVector& curSol) { - // update (always force flux cache update as the grid changed) - update(curSol, true); + ParentType::update(curSol); - // for instationary problems also update the variables - // for the previous time step to the new grid - prevGridVolVars_ = curGridVolVars_; + // resize and update the volVars with the initial solution + gridVolVars_.update(this->gridGeometry(), curSol); + + // update the flux variables caches + gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, curSol, true); } /*! @@ -114,18 +255,19 @@ public: */ void advanceTimeStep() { - prevGridVolVars_ = curGridVolVars_; + prevGridVolVars_ = gridVolVars_; } //! resets state to the one before time integration - template void resetTimeStep(const SolutionVector& solution) { + ParentType::update(solution); + // set the new time step vol vars to old vol vars - curGridVolVars_ = prevGridVolVars_; + gridVolVars_ = prevGridVolVars_; // update the flux variables caches - gridFluxVarsCache_.update(*gridGeometry_, curGridVolVars_, solution); + gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, solution); } //! return the flux variables cache @@ -137,12 +279,22 @@ public: { return gridFluxVarsCache_; } //! return the current volume variables + const GridVolumeVariables& gridVolVars() const + { return gridVolVars_; } + + //! return the current volume variables + GridVolumeVariables& gridVolVars() + { return gridVolVars_; } + + //! return the current volume variables + [[deprecated("Use gridVolVars() instead")]] const GridVolumeVariables& curGridVolVars() const - { return curGridVolVars_; } + { return gridVolVars_; } //! return the current volume variables + [[deprecated("Use gridVolVars() instead")]] GridVolumeVariables& curGridVolVars() - { return curGridVolVars_; } + { return gridVolVars_; } //! return the volume variables of the previous time step (for instationary problems) const GridVolumeVariables& prevGridVolVars() const @@ -152,18 +304,9 @@ public: GridVolumeVariables& prevGridVolVars() { return prevGridVolVars_; } - //! return the finite volume grid geometry - const GridGeometry& gridGeometry() const - { return *gridGeometry_; } - -protected: - - std::shared_ptr gridGeometry_; //!< pointer to the constant grid geometry - private: - GridVolumeVariables curGridVolVars_; //!< the current volume variables (primary and secondary variables) - GridVolumeVariables prevGridVolVars_; //!< the previous time step's volume variables (primary and secondary variables) - + GridVolumeVariables gridVolVars_; //!< the current volume variables (primary and secondary variables) + GridVolumeVariables prevGridVolVars_; //!< the previous time step's volume variables (primary and secondary variables) GridFluxVariablesCache gridFluxVarsCache_; //!< the flux variables cache }; diff --git a/dumux/discretization/gridvariables.hh b/dumux/discretization/gridvariables.hh new file mode 100644 index 0000000000000000000000000000000000000000..35e239c8342e2907c01383dac35ed550020cf593 --- /dev/null +++ b/dumux/discretization/gridvariables.hh @@ -0,0 +1,70 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Discretization + * \brief Base class for grid variables + */ +#ifndef DUMUX_GRID_VARIABLES_HH +#define DUMUX_GRID_VARIABLES_HH + +#include + +#include + +namespace Dumux { + +/*! + * \ingroup Discretization + * \brief Base class for grid variables. + * \tparam GG The grid geometry type + * \tparam X The type used for solution vectors + */ +template +class GridVariables +: public Variables +{ + using ParentType = Variables; + +public: + //! export the grid geometry type + using GridGeometry = GG; + + /*! + * \brief Constructor from a grid geometry. The remaining arguments must + * be valid arguments for the construction of the Variables class. + */ + template + explicit GridVariables(std::shared_ptr gridGeometry, + Args&&... args) + : ParentType(std::forward(args)...) + , gridGeometry_(gridGeometry) + {} + + //! Return a reference to the grid geometry + const GridGeometry& gridGeometry() const + { return *gridGeometry_; } + +private: + std::shared_ptr gridGeometry_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/discretization/staggered.hh b/dumux/discretization/staggered.hh index f8c5bec7e4b9447a16d0d5df59d859c818114f77..82255542803a088b818908fd48e1b8734bdfb46f 100644 --- a/dumux/discretization/staggered.hh +++ b/dumux/discretization/staggered.hh @@ -111,8 +111,9 @@ private: using GVV = GetPropType; using GFVC = GetPropType; using GFV = GetPropType; + using X = GetPropType; public: - using type = StaggeredGridVariables; + using type = StaggeredGridVariables; }; //! Use the cell center element boundary types per default diff --git a/dumux/discretization/staggered/gridvariables.hh b/dumux/discretization/staggered/gridvariables.hh index be35f83ac582c677e4e144c0f35849e74dc48bb3..26e7b74f6967bb45aca111cc437e0527f829c8b2 100644 --- a/dumux/discretization/staggered/gridvariables.hh +++ b/dumux/discretization/staggered/gridvariables.hh @@ -24,6 +24,8 @@ #ifndef DUMUX_STAGGERED_GRID_VARIABLES_HH #define DUMUX_STAGGERED_GRID_VARIABLES_HH +#include + #include namespace Dumux { @@ -93,7 +95,7 @@ public: //! return the fv grid geometry const GridGeometry& gridGeometry() const - { return (*gridVariables_->gridGeometry_); } + { return gridVariables_->gridGeometry(); } // return the actual grid variables const ActualGridVariables& gridVariables() const @@ -189,12 +191,15 @@ public: * \tparam GVV the type of the grid volume variables * \tparam GFVC the type of the grid flux variables cache * \tparam GFV the type of the grid face variables + * \tparam X the solution vector type */ -template -class StaggeredGridVariables : public FVGridVariables +template +class StaggeredGridVariables +: public FVGridVariables< GG, GVV, GFVC, std::decay_t()[GG::cellCenterIdx()])> > { - using ParentType = FVGridVariables; - using ThisType = StaggeredGridVariables; + using CellSolutionVector = std::decay_t()[GG::cellCenterIdx()])>; + using ParentType = FVGridVariables; + using ThisType = StaggeredGridVariables; friend class StaggeredGridVariablesView; static constexpr auto cellCenterIdx = GG::cellCenterIdx(); @@ -227,7 +232,7 @@ public: void update(const SolutionVector& curSol) { ParentType::update(curSol[cellCenterIdx]); - curGridFaceVariables_.update(*this->gridGeometry_, curSol[faceIdx]); + curGridFaceVariables_.update(this->gridGeometry(), curSol[faceIdx]); } //! initialize all variables (stationary case) @@ -235,8 +240,8 @@ public: void init(const SolutionVector& curSol) { ParentType::init(curSol[cellCenterIdx]); - curGridFaceVariables_.update(*this->gridGeometry_, curSol[faceIdx]); - prevGridFaceVariables_.update(*this->gridGeometry_, curSol[faceIdx]); + curGridFaceVariables_.update(this->gridGeometry(), curSol[faceIdx]); + prevGridFaceVariables_.update(this->gridGeometry(), curSol[faceIdx]); } //! Sets the current state as the previous for next time step diff --git a/dumux/linear/pdesolver.hh b/dumux/linear/pdesolver.hh index ce204474e949b65155ccd7906161384a6c048fc5..4bb0d5536436b75152232c8453ad7a13856139f5 100644 --- a/dumux/linear/pdesolver.hh +++ b/dumux/linear/pdesolver.hh @@ -42,6 +42,8 @@ #include #include #include +#include + #include #include @@ -67,6 +69,8 @@ class LinearPDESolver : public PDESolver using TimeLoop = TimeLoopBase; public: + using typename ParentType::Variables; + /*! * \brief The Constructor */ @@ -85,7 +89,7 @@ public: /*! * \brief Solve a linear PDE system */ - void solve(SolutionVector& uCurrentIter) override + void solve(Variables& vars) override { Dune::Timer assembleTimer(false); Dune::Timer solveTimer(false); @@ -102,7 +106,7 @@ public: // linearize the problem at the current solution assembleTimer.start(); - this->assembler().assembleJacobianAndResidual(uCurrentIter); + this->assembler().assembleJacobianAndResidual(vars); assembleTimer.stop(); /////////////// @@ -124,7 +128,8 @@ public: solveTimer.start(); // set the delta vector to zero before solving the linear system! - SolutionVector deltaU(uCurrentIter); + using Backend = VariablesBackend; + auto deltaU = Backend::getDofVector(vars); deltaU = 0; // solve by calling the appropriate implementation depending on whether the linear solver @@ -146,8 +151,12 @@ public: // update the current solution and secondary variables updateTimer.start(); + + // TODO: This currently does one additional copy in case assembly from solution is used + auto uCurrentIter = Backend::getDofVector(vars); uCurrentIter -= deltaU; - this->assembler().updateGridVariables(uCurrentIter); + Backend::update(vars, uCurrentIter); + updateTimer.stop(); if (verbose_) { diff --git a/dumux/multidomain/newtonsolver.hh b/dumux/multidomain/newtonsolver.hh index dba8c58a41b89b18c546275b1fb0fe29dda0235e..898b263e89ad5f7b3ddfca440340418ae6bfec47 100644 --- a/dumux/multidomain/newtonsolver.hh +++ b/dumux/multidomain/newtonsolver.hh @@ -50,7 +50,10 @@ template { using ParentType = NewtonSolver; - using SolutionVector = typename Assembler::ResidualType; + using typename ParentType::Backend; + using typename ParentType::SolutionVector; + + static constexpr bool assemblerExportsVariables = Impl::exportsVariables; template using PrimaryVariableSwitch = typename Detail::GetPVSwitchMultiDomain::type; @@ -63,6 +66,7 @@ class MultiDomainNewtonSolver: public NewtonSolver; public: + using typename ParentType::Variables; /*! * \brief The constructor @@ -89,10 +93,10 @@ public: /*! * \brief Indicates the beginning of a Newton iteration. */ - void newtonBeginStep(const SolutionVector& uCurrentIter) override + void newtonBeginStep(const Variables& varsCurrentIter) override { - ParentType::newtonBeginStep(uCurrentIter); - couplingManager_->updateSolution(uCurrentIter); + ParentType::newtonBeginStep(varsCurrentIter); + couplingManager_->updateSolution(Backend::getDofVector(varsCurrentIter)); } /*! @@ -102,14 +106,14 @@ public: * * \param u The initial solution */ - void newtonBegin(SolutionVector& u) override + void newtonBegin(Variables& vars) override { - ParentType::newtonBegin(u); + ParentType::newtonBegin(vars); using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { - this->initPriVarSwitch_(u, id, HasPriVarsSwitch::value>{}); + this->initPriVarSwitch_(vars, id, HasPriVarsSwitch::value>{}); }); } @@ -129,19 +133,24 @@ public: /*! * \brief Indicates that one Newton iteration was finished. * - * \param uCurrentIter The solution after the current Newton iteration + * \param varsCurrentIter The variables after the current Newton iteration * \param uLastIter The solution at the beginning of the current Newton iteration */ - void newtonEndStep(SolutionVector &uCurrentIter, const SolutionVector &uLastIter) override + void newtonEndStep(Variables& varsCurrentIter, const SolutionVector& uLastIter) override { using namespace Dune::Hybrid; forEach(std::make_index_sequence{}, [&](auto&& id) { - this->invokePriVarSwitch_(uCurrentIter[id], id, HasPriVarsSwitch::value>{}); + auto& uCurrentIter = Backend::getDofVector(varsCurrentIter)[id]; + if constexpr (!assemblerExportsVariables) + this->invokePriVarSwitch_(this->assembler().gridVariables(id), + uCurrentIter, id, HasPriVarsSwitch::value>{}); + else + this->invokePriVarSwitch_(varsCurrentIter[id], uCurrentIter, id, HasPriVarsSwitch::value>{}); }); - ParentType::newtonEndStep(uCurrentIter, uLastIter); - couplingManager_->updateSolution(uCurrentIter); + ParentType::newtonEndStep(varsCurrentIter, uLastIter); + couplingManager_->updateSolution(Backend::getDofVector(varsCurrentIter)); } private: @@ -150,60 +159,61 @@ private: * \brief Reset the privar switch state, noop if there is no priVarSwitch */ template - void initPriVarSwitch_(SolutionVector&, Dune::index_constant id, std::false_type) {} + void initPriVarSwitch_(Variables&, Dune::index_constant id, std::false_type) {} /*! * \brief Switch primary variables if necessary */ template - void initPriVarSwitch_(SolutionVector& sol, Dune::index_constant id, std::true_type) + void initPriVarSwitch_(Variables& vars, Dune::index_constant id, std::true_type) { using namespace Dune::Hybrid; auto& priVarSwitch = *elementAt(priVarSwitches_, id); + auto& sol = Backend::getDofVector(vars)[id]; - priVarSwitch.reset(sol[id].size()); + priVarSwitch.reset(sol.size()); priVarsSwitchedInLastIteration_[i] = false; const auto& problem = this->assembler().problem(id); const auto& gridGeometry = this->assembler().gridGeometry(id); - auto& gridVariables = this->assembler().gridVariables(id); - priVarSwitch.updateDirichletConstraints(problem, gridGeometry, gridVariables, sol[id]); + if constexpr (!assemblerExportsVariables) + priVarSwitch.updateDirichletConstraints(problem, gridGeometry, this->assembler().gridVariables(id), sol); + else // This expects usage of MultiDomainGridVariables + priVarSwitch.updateDirichletConstraints(problem, gridGeometry, vars[id], sol[id]); } /*! * \brief Switch primary variables if necessary, noop if there is no priVarSwitch */ - template - void invokePriVarSwitch_(SubSol&, Dune::index_constant id, std::false_type) {} + template + void invokePriVarSwitch_(SubVars&, SubSol&, Dune::index_constant id, std::false_type) {} /*! * \brief Switch primary variables if necessary */ - template - void invokePriVarSwitch_(SubSol& uCurrentIter, Dune::index_constant id, std::true_type) + template + void invokePriVarSwitch_(SubVars& subVars, SubSol& uCurrentIter, Dune::index_constant id, std::true_type) { // update the variable switch (returns true if the pri vars at at least one dof were switched) // for disabled grid variable caching const auto& gridGeometry = this->assembler().gridGeometry(id); const auto& problem = this->assembler().problem(id); - auto& gridVariables = this->assembler().gridVariables(id); using namespace Dune::Hybrid; auto& priVarSwitch = *elementAt(priVarSwitches_, id); // invoke the primary variable switch - priVarsSwitchedInLastIteration_[i] = priVarSwitch.update(uCurrentIter, gridVariables, - problem, gridGeometry); + priVarsSwitchedInLastIteration_[i] = priVarSwitch.update(uCurrentIter, subVars, problem, gridGeometry); if (priVarsSwitchedInLastIteration_[i]) { for (const auto& element : elements(gridGeometry.gridView())) { // if the volume variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch.updateSwitchedVolVars(problem, element, gridGeometry, gridVariables, uCurrentIter); + priVarSwitch.updateSwitchedVolVars(problem, element, gridGeometry, subVars, uCurrentIter); // if the flux variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch.updateSwitchedFluxVarsCache(problem, element, gridGeometry, gridVariables, uCurrentIter); + priVarSwitch.updateSwitchedFluxVarsCache(problem, element, gridGeometry, subVars, uCurrentIter); } } } diff --git a/dumux/nonlinear/CMakeLists.txt b/dumux/nonlinear/CMakeLists.txt index 2c5c2050a30e68e34df4e6f3a9f6f50fcf28b3db..0cdeabcdf59f518eaaa23d65c3e0b756550de98c 100644 --- a/dumux/nonlinear/CMakeLists.txt +++ b/dumux/nonlinear/CMakeLists.txt @@ -2,5 +2,6 @@ install(FILES findscalarroot.hh newtonconvergencewriter.hh newtonsolver.hh +primaryvariableswitchadapter.hh staggerednewtonconvergencewriter.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/nonlinear) diff --git a/dumux/nonlinear/newtonsolver.hh b/dumux/nonlinear/newtonsolver.hh index 3265ef86877f21a21ffdb7124012261f6924eb8b..57fb554f84d7bdf03830d08ad9cd88bddcade567 100644 --- a/dumux/nonlinear/newtonsolver.hh +++ b/dumux/nonlinear/newtonsolver.hh @@ -48,12 +48,15 @@ #include #include #include +#include + #include #include #include #include #include "newtonconvergencewriter.hh" +#include "primaryvariableswitchadapter.hh" namespace Dumux { namespace Detail { @@ -68,13 +71,6 @@ struct supportsPartialReassembly {} }; -//! helper aliases to extract a primary variable switch from the VolumeVariables (if defined, yields int otherwise) -template -using DetectPVSwitch = typename Assembler::GridVariables::VolumeVariables::PrimaryVariableSwitch; - -template -using GetPVSwitch = Dune::Std::detected_or; - // helper struct and function detecting if the linear solver features a norm() function template using NormDetector = decltype(std::declval().norm(std::declval())); @@ -176,18 +172,27 @@ template { using ParentType = PDESolver; + +protected: + using Backend = VariablesBackend; + using SolutionVector = typename Backend::DofVector; + +private: using Scalar = typename Assembler::Scalar; using JacobianMatrix = typename Assembler::JacobianMatrix; - using SolutionVector = typename Assembler::ResidualType; using ConvergenceWriter = ConvergenceWriterInterface; using TimeLoop = TimeLoopBase; - using PrimaryVariableSwitch = typename Detail::GetPVSwitch::type; - using HasPriVarsSwitch = typename Detail::GetPVSwitch::value_t; // std::true_type or std::false_type - static constexpr bool hasPriVarsSwitch() { return HasPriVarsSwitch::value; }; + // enable models with primary variable switch + // TODO: Always use ParentType::Variables once we require assemblers to export variables + static constexpr bool assemblerExportsVariables = Impl::exportsVariables; + using Vars = std::conditional_t; + using PrimaryVariableSwitchAdapter = Dumux::PrimaryVariableSwitchAdapter; public: - + using typename ParentType::Variables; using Communication = Comm; /*! @@ -201,6 +206,7 @@ public: , endIterMsgStream_(std::ostringstream::out) , comm_(comm) , paramGroup_(paramGroup) + , priVarSwitchAdapter_(std::make_unique(paramGroup)) { initParams_(paramGroup); @@ -214,12 +220,6 @@ public: // initialize the partial reassembler if (enablePartialReassembly_) partialReassembler_ = std::make_unique(this->assembler()); - - if (hasPriVarsSwitch()) - { - const int priVarSwitchVerbosity = getParamFromGroup(paramGroup, "PrimaryVariableSwitch.Verbosity", 1); - priVarSwitch_ = std::make_unique(priVarSwitchVerbosity); - } } //! the communicator for parallel runs @@ -288,38 +288,56 @@ public: /*! * \brief Run the Newton method to solve a non-linear system. * Does time step control when the Newton fails to converge + * \param vars The variables object representing the current state of the + * numerical solution (primary and possibly secondary variables). */ - void solve(SolutionVector& uCurrentIter, TimeLoop& timeLoop) override + void solve(Variables& vars, TimeLoop& timeLoop) override { - if (this->assembler().isStationaryProblem()) - DUNE_THROW(Dune::InvalidStateException, "Using time step control with stationary problem makes no sense!"); + if constexpr (!assemblerExportsVariables) + { + if (this->assembler().isStationaryProblem()) + DUNE_THROW(Dune::InvalidStateException, "Using time step control with stationary problem makes no sense!"); + } // try solving the non-linear system for (std::size_t i = 0; i <= maxTimeStepDivisions_; ++i) { + // linearize & solve - const bool converged = solve_(uCurrentIter); + const bool converged = solve_(vars); if (converged) return; else if (!converged && i < maxTimeStepDivisions_) { - // set solution to previous solution - uCurrentIter = this->assembler().prevSol(); - - // reset the grid variables to the previous solution - this->assembler().resetTimeStep(uCurrentIter); - - if (verbosity_ >= 1) + if constexpr (assemblerExportsVariables) + DUNE_THROW(Dune::NotImplemented, "Time step reset for new assembly methods"); + else { - const auto dt = timeLoop.timeStepSize(); - std::cout << Fmt::format("Newton solver did not converge with dt = {} seconds. ", dt) - << Fmt::format("Retrying with time step of dt = {} seconds.\n", dt*retryTimeStepReductionFactor_); + // set solution to previous solution + Backend::update(vars, this->assembler().prevSol()); + + // if this is true, we assume old-style assemblers/grid variables + // TODO: reset time step is more efficient as it simply copies + // prevVolVars into curVolVars. Above (in the new style) + // there will probably be an update. We should think about + // how to achieve this efficiently. + // TODO: Is there a test with time step reductions? It should + // probably be tested if this works properly. + if (!assemblerExportsVariables) + this->assembler().resetTimeStep(Backend::getDofVector(vars)); + + if (verbosity_ >= 1) + { + const auto dt = timeLoop.timeStepSize(); + std::cout << Fmt::format("Newton solver did not converge with dt = {} seconds. ", dt) + << Fmt::format("Retrying with time step of dt = {} seconds.\n", dt*retryTimeStepReductionFactor_); + } + + // try again with dt = dt * retryTimeStepReductionFactor_ + timeLoop.setTimeStepSize(timeLoop.timeStepSize() * retryTimeStepReductionFactor_); } - - // try again with dt = dt * retryTimeStepReductionFactor_ - timeLoop.setTimeStepSize(timeLoop.timeStepSize() * retryTimeStepReductionFactor_); } else @@ -334,10 +352,12 @@ public: /*! * \brief Run the Newton method to solve a non-linear system. * The solver is responsible for all the strategic decisions. + * \param vars The variables object representing the current state of the + * numerical solution (primary and possibly secondary variables). */ - void solve(SolutionVector& uCurrentIter) override + void solve(Variables& vars) override { - const bool converged = solve_(uCurrentIter); + const bool converged = solve_(vars); if (!converged) DUNE_THROW(NumericalProblem, Fmt::format("Newton solver didn't converge after {} iterations.\n", numSteps_)); @@ -347,36 +367,43 @@ public: * \brief Called before the Newton method is applied to an * non-linear system of equations. * - * \param u The initial solution + * \param initVars The variables representing the initial solution */ - virtual void newtonBegin(SolutionVector& u) + virtual void newtonBegin(Variables& initVars) { numSteps_ = 0; - initPriVarSwitch_(u, HasPriVarsSwitch{}); + + if constexpr (assemblerExportsVariables) + priVarSwitchAdapter_->initialize(Backend::getDofVector(initVars), initVars); + else // this assumes assembly with solution (i.e. Variables=SolutionVector) + priVarSwitchAdapter_->initialize(initVars, this->assembler().gridVariables()); + + const auto& initSol = Backend::getDofVector(initVars); // write the initial residual if a convergence writer was set if (convergenceWriter_) { - this->assembler().assembleResidual(u); - SolutionVector delta(u); - delta = 0; // dummy vector, there is no delta before solving the linear system - convergenceWriter_->write(u, delta, this->assembler().residual()); + this->assembler().assembleResidual(initVars); + + // dummy vector, there is no delta before solving the linear system + auto delta = Backend::makeZeroDofVector(Backend::size(initSol)); + convergenceWriter_->write(initSol, delta, this->assembler().residual()); } if (enablePartialReassembly_) { partialReassembler_->resetColors(); - resizeDistanceFromLastLinearization_(u, distanceFromLastLinearization_); + resizeDistanceFromLastLinearization_(initSol, distanceFromLastLinearization_); } } /*! * \brief Returns true if another iteration should be done. * - * \param uCurrentIter The solution of the current Newton iteration + * \param varsCurrentIter The variables of the current Newton iteration * \param converged if the Newton method's convergence criterion was met in this step */ - virtual bool newtonProceed(const SolutionVector &uCurrentIter, bool converged) + virtual bool newtonProceed(const Variables &varsCurrentIter, bool converged) { if (numSteps_ < minSteps_) return true; @@ -399,7 +426,7 @@ public: /*! * \brief Indicates the beginning of a Newton iteration. */ - virtual void newtonBeginStep(const SolutionVector& u) + virtual void newtonBeginStep(const Variables& vars) { lastShift_ = shift_; if (numSteps_ == 0) @@ -415,11 +442,11 @@ public: /*! * \brief Assemble the linear system of equations \f$\mathbf{A}x - b = 0\f$. * - * \param uCurrentIter The current iteration's solution vector + * \param vars The current iteration's variables */ - virtual void assembleLinearSystem(const SolutionVector& uCurrentIter) + virtual void assembleLinearSystem(const Variables& vars) { - assembleLinearSystem_(this->assembler(), uCurrentIter); + assembleLinearSystem_(this->assembler(), vars); if (enablePartialReassembly_) partialReassembler_->report(comm_, endIterMsgStream_); @@ -499,15 +526,15 @@ public: * subtract deltaU from uLastIter, i.e. * \f[ u^{k+1} = u^k - \Delta u^k \f] * - * \param uCurrentIter The solution vector after the current iteration + * \param vars The variables after the current iteration * \param uLastIter The solution vector after the last iteration * \param deltaU The delta as calculated from solving the linear * system of equations. This parameter also stores * the updated solution. */ - void newtonUpdate(SolutionVector &uCurrentIter, - const SolutionVector &uLastIter, - const SolutionVector &deltaU) + void newtonUpdate(Variables& vars, + const SolutionVector& uLastIter, + const SolutionVector& deltaU) { if (enableShiftCriterion_ || enablePartialReassembly_) newtonUpdateShift_(uLastIter, deltaU); @@ -548,32 +575,35 @@ public: } if (useLineSearch_) - lineSearchUpdate_(uCurrentIter, uLastIter, deltaU); + lineSearchUpdate_(vars, uLastIter, deltaU); else if (useChop_) - choppedUpdate_(uCurrentIter, uLastIter, deltaU); + choppedUpdate_(vars, uLastIter, deltaU); else { - uCurrentIter = uLastIter; + auto uCurrentIter = uLastIter; uCurrentIter -= deltaU; - solutionChanged_(uCurrentIter); + solutionChanged_(vars, uCurrentIter); if (enableResidualCriterion_) - computeResidualReduction_(uCurrentIter); + computeResidualReduction_(vars); } } /*! * \brief Indicates that one Newton iteration was finished. * - * \param uCurrentIter The solution after the current Newton iteration + * \param vars The variables after the current Newton iteration * \param uLastIter The solution at the beginning of the current Newton iteration */ - virtual void newtonEndStep(SolutionVector &uCurrentIter, + virtual void newtonEndStep(Variables &vars, const SolutionVector &uLastIter) { - invokePriVarSwitch_(uCurrentIter, HasPriVarsSwitch{}); + if constexpr (assemblerExportsVariables) + priVarSwitchAdapter_->invoke(Backend::getDofVector(vars), vars); + else // this assumes assembly with solution (i.e. Variables=SolutionVector) + priVarSwitchAdapter_->invoke(vars, this->assembler().gridVariables()); ++numSteps_; @@ -615,7 +645,7 @@ public: { // in case the model has a priVar switch and some some primary variables // actually switched their state in the last iteration, enforce another iteration - if (priVarsSwitchedInLastIteration_) + if (priVarSwitchAdapter_->switched()) return false; if (enableShiftCriterion_ && !enableResidualCriterion_) @@ -785,22 +815,37 @@ public: protected: /*! - * \brief Update solution-depended quantities like grid variables after the solution has changed. + * \brief Update solution-dependent quantities like grid variables after the solution has changed. + * \todo TODO: In case we stop support for old-style grid variables / assemblers at one point, + * this would become obsolete as only the update call to the backend would remain. */ - virtual void solutionChanged_(const SolutionVector &uCurrentIter) + virtual void solutionChanged_(Variables& vars, const SolutionVector& uCurrentIter) { - this->assembler().updateGridVariables(uCurrentIter); + Backend::update(vars, uCurrentIter); + + if constexpr (!assemblerExportsVariables) + this->assembler().updateGridVariables(Backend::getDofVector(vars)); } - void computeResidualReduction_(const SolutionVector &uCurrentIter) + void computeResidualReduction_(const Variables& vars) { + // we assume that the assembler works on solution vectors + // if it doesn't export the variables type if constexpr (Detail::hasNorm()) { - this->assembler().assembleResidual(uCurrentIter); + if constexpr (!assemblerExportsVariables) + this->assembler().assembleResidual(Backend::getDofVector(vars)); + else + this->assembler().assembleResidual(vars); residualNorm_ = this->linearSolver().norm(this->assembler().residual()); } else - residualNorm_ = this->assembler().residualNorm(uCurrentIter); + { + if constexpr (!assemblerExportsVariables) + residualNorm_ = this->assembler().residualNorm(Backend::getDofVector(vars)); + else + residualNorm_ = this->assembler().residualNorm(vars); + } reduction_ = residualNorm_; reduction_ /= initialResidual_; @@ -809,58 +854,6 @@ protected: bool enableResidualCriterion() const { return enableResidualCriterion_; } - /*! - * \brief Initialize the privar switch, noop if there is no priVarSwitch - */ - void initPriVarSwitch_(SolutionVector&, std::false_type) {} - - /*! - * \brief Initialize the privar switch - */ - void initPriVarSwitch_(SolutionVector& sol, std::true_type) - { - priVarSwitch_->reset(sol.size()); - priVarsSwitchedInLastIteration_ = false; - - const auto& problem = this->assembler().problem(); - const auto& gridGeometry = this->assembler().gridGeometry(); - auto& gridVariables = this->assembler().gridVariables(); - priVarSwitch_->updateDirichletConstraints(problem, gridGeometry, gridVariables, sol); - } - - /*! - * \brief Switch primary variables if necessary, noop if there is no priVarSwitch - */ - void invokePriVarSwitch_(SolutionVector&, std::false_type) {} - - /*! - * \brief Switch primary variables if necessary - */ - void invokePriVarSwitch_(SolutionVector& uCurrentIter, std::true_type) - { - // update the variable switch (returns true if the pri vars at at least one dof were switched) - // for disabled grid variable caching - const auto& gridGeometry = this->assembler().gridGeometry(); - const auto& problem = this->assembler().problem(); - auto& gridVariables = this->assembler().gridVariables(); - - // invoke the primary variable switch - priVarsSwitchedInLastIteration_ = priVarSwitch_->update(uCurrentIter, gridVariables, - problem, gridGeometry); - - if (priVarsSwitchedInLastIteration_) - { - for (const auto& element : elements(gridGeometry.gridView())) - { - // if the volume variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch_->updateSwitchedVolVars(problem, element, gridGeometry, gridVariables, uCurrentIter); - - // if the flux variables are cached globally, we need to update those where the primary variables have been switched - priVarSwitch_->updateSwitchedFluxVarsCache(problem, element, gridGeometry, gridVariables, uCurrentIter); - } - } - } - //! optimal number of iterations we want to achieve int targetSteps_; //! minimum number of iterations we do @@ -890,16 +883,16 @@ private: * \brief Run the Newton method to solve a non-linear system. * The solver is responsible for all the strategic decisions. */ - bool solve_(SolutionVector& uCurrentIter) + bool solve_(Variables& vars) { try { // newtonBegin may manipulate the solution - newtonBegin(uCurrentIter); + newtonBegin(vars); // the given solution is the initial guess - SolutionVector uLastIter(uCurrentIter); - SolutionVector deltaU(uCurrentIter); + auto uLastIter = Backend::getDofVector(vars); + auto deltaU = Backend::getDofVector(vars); // setup timers Dune::Timer assembleTimer(false); @@ -909,15 +902,15 @@ private: // execute the method as long as the solver thinks // that we should do another iteration bool converged = false; - while (newtonProceed(uCurrentIter, converged)) + while (newtonProceed(vars, converged)) { // notify the solver that we're about to start // a new iteration - newtonBeginStep(uCurrentIter); + newtonBeginStep(vars); // make the current solution to the old one if (numSteps_ > 0) - uLastIter = uCurrentIter; + uLastIter = Backend::getDofVector(vars); if (verbosity_ >= 1 && enableDynamicOutput_) std::cout << "Assemble: r(x^k) = dS/dt + div F - q; M = grad r" @@ -929,7 +922,7 @@ private: // linearize the problem at the current solution assembleTimer.start(); - assembleLinearSystem(uCurrentIter); + assembleLinearSystem(vars); assembleTimer.stop(); /////////////// @@ -964,17 +957,17 @@ private: updateTimer.start(); // update the current solution (i.e. uOld) with the delta // (i.e. u). The result is stored in u - newtonUpdate(uCurrentIter, uLastIter, deltaU); + newtonUpdate(vars, uLastIter, deltaU); updateTimer.stop(); // tell the solver that we're done with this iteration - newtonEndStep(uCurrentIter, uLastIter); + newtonEndStep(vars, uLastIter); // if a convergence writer was specified compute residual and write output if (convergenceWriter_) { - this->assembler().assembleResidual(uCurrentIter); - convergenceWriter_->write(uCurrentIter, deltaU, this->assembler().residual()); + this->assembler().assembleResidual(vars); + convergenceWriter_->write(Backend::getDofVector(vars), deltaU, this->assembler().residual()); } // detect if the method has converged @@ -988,6 +981,8 @@ private: if (!newtonConverged()) { totalWastedIter_ += numSteps_; + // TODO: what should NewtonFail receive as arg? + auto uCurrentIter = Backend::getDofVector(vars); newtonFail(uCurrentIter); return false; } @@ -1014,6 +1009,9 @@ private: std::cout << "Newton: Caught exception: \"" << e.what() << "\"\n"; totalWastedIter_ += numSteps_; + + // TODO: what should NewtonFail receive as arg? + auto uCurrentIter = Backend::getDofVector(vars); newtonFail(uCurrentIter); return false; } @@ -1021,18 +1019,18 @@ private: //! assembleLinearSystem_ for assemblers that support partial reassembly template - auto assembleLinearSystem_(const A& assembler, const SolutionVector& uCurrentIter) + auto assembleLinearSystem_(const A& assembler, const Variables& vars) -> typename std::enable_if_t { - this->assembler().assembleJacobianAndResidual(uCurrentIter, partialReassembler_.get()); + this->assembler().assembleJacobianAndResidual(vars, partialReassembler_.get()); } //! assembleLinearSystem_ for assemblers that don't support partial reassembly template - auto assembleLinearSystem_(const A& assembler, const SolutionVector& uCurrentIter) + auto assembleLinearSystem_(const A& assembler, const Variables& vars) -> typename std::enable_if_t { - this->assembler().assembleJacobianAndResidual(uCurrentIter); + this->assembler().assembleJacobianAndResidual(vars); } /*! @@ -1053,7 +1051,7 @@ private: shift_ = comm_.max(shift_); } - virtual void lineSearchUpdate_(SolutionVector &uCurrentIter, + virtual void lineSearchUpdate_(Variables &vars, const SolutionVector &uLastIter, const SolutionVector &deltaU) { @@ -1061,12 +1059,12 @@ private: while (true) { - uCurrentIter = deltaU; + auto uCurrentIter = deltaU; uCurrentIter *= -lambda; uCurrentIter += uLastIter; - solutionChanged_(uCurrentIter); + solutionChanged_(vars, uCurrentIter); - computeResidualReduction_(uCurrentIter); + computeResidualReduction_(vars); if (reduction_ < lastReduction_ || lambda <= 0.125) { endIterMsgStream_ << Fmt::format(", residual reduction {:.4e}->{:.4e}@lambda={:.4f}", lastReduction_, reduction_, lambda); @@ -1079,9 +1077,9 @@ private: } //! \note method must update the gridVariables, too! - virtual void choppedUpdate_(SolutionVector &uCurrentIter, - const SolutionVector &uLastIter, - const SolutionVector &deltaU) + virtual void choppedUpdate_(Variables& vars, + const SolutionVector& uLastIter, + const SolutionVector& deltaU) { DUNE_THROW(Dune::NotImplemented, "Chopped Newton update strategy not implemented."); @@ -1316,9 +1314,7 @@ private: std::size_t numLinearSolverBreakdowns_ = 0; //! total number of linear solves that failed //! the class handling the primary variable switch - std::unique_ptr priVarSwitch_; - //! if we switched primary variables in the last iteration - bool priVarsSwitchedInLastIteration_ = false; + std::unique_ptr priVarSwitchAdapter_; //! convergence writer std::shared_ptr convergenceWriter_ = nullptr; diff --git a/dumux/nonlinear/primaryvariableswitchadapter.hh b/dumux/nonlinear/primaryvariableswitchadapter.hh new file mode 100644 index 0000000000000000000000000000000000000000..df202314c64b186827b7b2953aa325f302aac23c --- /dev/null +++ b/dumux/nonlinear/primaryvariableswitchadapter.hh @@ -0,0 +1,137 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/**************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup Nonlinear + * \brief An adapter for the Newton to manage models with primary variable switch + */ +#ifndef DUMUX_NONLINEAR_PRIMARY_VARIABLE_SWITCH_ADAPTER_HH +#define DUMUX_NONLINEAR_PRIMARY_VARIABLE_SWITCH_ADAPTER_HH + +#include +#include + +namespace Dumux { +namespace Impl { + +//! helper aliases to extract a primary variable switch from the VolumeVariables (if defined, yields int otherwise) +template +using DetectPVSwitch = typename Variables::VolumeVariables::PrimaryVariableSwitch; + +template +using GetPVSwitch = Dune::Std::detected_or; + +template +using PrimaryVariableSwitch = typename GetPVSwitch::type; + +template +constexpr bool hasPriVarsSwitch() { return typename GetPVSwitch::value_t(); }; + +} // end namespace Impl + +/*! + * \ingroup Nonlinear + * \brief An adapter for the Newton to manage models with primary variable switch + */ +template ()> +class PrimaryVariableSwitchAdapter +{ + using PrimaryVariableSwitch = typename Impl::PrimaryVariableSwitch; + +public: + PrimaryVariableSwitchAdapter(const std::string& paramGroup = "") + { + const int priVarSwitchVerbosity = getParamFromGroup(paramGroup, "PrimaryVariableSwitch.Verbosity", 1); + priVarSwitch_ = std::make_unique(priVarSwitchVerbosity); + } + + /*! + * \brief Initialize the privar switch + */ + template + void initialize(SolutionVector& sol, Variables& vars) + { + priVarSwitch_->reset(sol.size()); + priVarsSwitchedInLastIteration_ = false; + const auto& problem = vars.curGridVolVars().problem(); + const auto& gridGeometry = problem.gridGeometry(); + priVarSwitch_->updateDirichletConstraints(problem, gridGeometry, vars, sol); + } + + /*! + * \brief Switch primary variables if necessary + */ + template + void invoke(SolutionVector& uCurrentIter, Variables& vars) + { + // update the variable switch (returns true if the pri vars at at least one dof were switched) + // for disabled grid variable caching + const auto& problem = vars.curGridVolVars().problem(); + const auto& gridGeometry = problem.gridGeometry(); + + // invoke the primary variable switch + priVarsSwitchedInLastIteration_ = priVarSwitch_->update(uCurrentIter, vars, problem, gridGeometry); + if (priVarsSwitchedInLastIteration_) + { + for (const auto& element : elements(gridGeometry.gridView())) + { + // if the volume variables are cached globally, we need to update those where the primary variables have been switched + priVarSwitch_->updateSwitchedVolVars(problem, element, gridGeometry, vars, uCurrentIter); + + // if the flux variables are cached globally, we need to update those where the primary variables have been switched + priVarSwitch_->updateSwitchedFluxVarsCache(problem, element, gridGeometry, vars, uCurrentIter); + } + } + } + + /*! + * \brief Whether the primary variables have been switched in the last call to invoke + */ + bool switched() const + { return priVarsSwitchedInLastIteration_; } + +private: + //! the class handling the primary variable switch + std::unique_ptr priVarSwitch_; + //! if we switched primary variables in the last iteration + bool priVarsSwitchedInLastIteration_ = false; +}; + +/*! + * \ingroup Nonlinear + * \brief An empty adapter for the Newton for models without primary variable switch + */ +template +class PrimaryVariableSwitchAdapter +{ +public: + PrimaryVariableSwitchAdapter(const std::string& paramGroup = "") {} + + template + void initialize(SolutionVector&, Variables&) {} + + template + void invoke(SolutionVector&, Variables&) {} + + bool switched() const { return false; } +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/porousmediumflow/immiscible/CMakeLists.txt b/dumux/porousmediumflow/immiscible/CMakeLists.txt index 192e7955c6e66f742d5ccdee1718cbc1d9979069..ccb0268dc6d3332a4cd90ce934e6b51643b31c25 100644 --- a/dumux/porousmediumflow/immiscible/CMakeLists.txt +++ b/dumux/porousmediumflow/immiscible/CMakeLists.txt @@ -1,3 +1,4 @@ install(FILES localresidual.hh +operators.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/porousmediumflow/immiscible) diff --git a/dumux/porousmediumflow/immiscible/operators.hh b/dumux/porousmediumflow/immiscible/operators.hh new file mode 100644 index 0000000000000000000000000000000000000000..20eae5b7009bc4307c5ab6bda60186fcbdba0ab6 --- /dev/null +++ b/dumux/porousmediumflow/immiscible/operators.hh @@ -0,0 +1,180 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup PorousmediumflowModels + * \brief Sub-control entity-local evaluation of the operators + * within in the systems of equations of n-phase immiscible models. + */ +#ifndef DUMUX_FV_IMMISCIBLE_OPERATORS_HH +#define DUMUX_FV_IMMISCIBLE_OPERATORS_HH + +#include + +#include +#include +#include + +namespace Dumux { +namespace Impl { + + template struct DefaultEnergyOperatorsChooser; + template + struct DefaultEnergyOperatorsChooser { using Type = void; }; + template + struct DefaultEnergyOperatorsChooser { using Type = FVNonIsothermalOperators; }; + + template + using DefaultEnergyOperators = typename DefaultEnergyOperatorsChooser::Type; + +} // end namespace Impl + +/*! + * \ingroup PorousmediumflowModels + * \brief Sub-control entity-local evaluation of the operators + * within in the systems of equations of n-phase immiscible models. + * \tparam ModelTraits defines model-related types and variables (e.g. number of phases) + * \tparam FluxVariables the type that is responsible for computing the individual + * flux contributions, i.e., advective, diffusive, convective... + * \tparam ElementVariables the type of element-local view on the grid variables + * \tparam EnergyOperators optional template argument, specifying the class that + * handles the operators related to non-isothermal effects. + * These are assumed to be taken into account by the model + * if this template argument is other than void. + */ +template> +class FVImmiscibleOperators +: public FVOperators +{ + using ParentType = FVOperators; + + // The variables required for the evaluation of the equation + using GridVariables = typename ElementVariables::GridVariables; + using VolumeVariables = typename GridVariables::VolumeVariables; + using ElementVolumeVariables = typename ElementVariables::ElementVolumeVariables; + using ElementFluxVariablesCache = typename ElementVariables::ElementFluxVariablesCache; + + // The grid geometry on which the scheme operates + using GridGeometry = typename GridVariables::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + using SubControlVolume = typename GridGeometry::SubControlVolume; + using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace; + using Extrusion = Extrusion_t; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + using Problem = std::decay_t().gridVolVars().problem())>; + using Indices = typename ModelTraits::Indices; + + static constexpr int numPhases = ModelTraits::numFluidPhases(); + static constexpr int conti0EqIdx = ModelTraits::Indices::conti0EqIdx; + static constexpr bool isNonIsothermal = !std::is_same_v; + +public: + //! export the type used to store scalar values for all equations + using typename ParentType::NumEqVector; + + // export the types of the flux/source/storage terms + using typename ParentType::FluxTerm; + using typename ParentType::SourceTerm; + using typename ParentType::StorageTerm; + + /*! + * \brief Compute the storage term of the equations for the given sub-control volume + * \param problem The problem to be solved (could store additionally required quantities) + * \param scv The sub-control volume + * \param volVars The primary & secondary variables evaluated for the scv + * \note This must be overloaded by the implementation + */ + static StorageTerm storage(const Problem& problem, + const SubControlVolume& scv, + const VolumeVariables& volVars) + { + // partial time derivative of the phase mass + StorageTerm storage; + for (int phaseIdx = 0; phaseIdx < numPhases; ++phaseIdx) + { + auto eqIdx = conti0EqIdx + phaseIdx; + storage[eqIdx] = volVars.porosity() + * volVars.density(phaseIdx) + * volVars.saturation(phaseIdx); + + // The energy storage in the fluid phase with index phaseIdx + if constexpr (isNonIsothermal) + EnergyOperators::fluidPhaseStorage(storage, scv, volVars, phaseIdx); + } + + // The energy storage in the solid matrix + if constexpr (isNonIsothermal) + EnergyOperators::solidPhaseStorage(storage, scv, volVars); + + // multiply with volume + storage *= Extrusion::volume(scv)*volVars.extrusionFactor(); + + return storage; + } + + /*! + * \brief Compute the flux term of the equations for the given sub-control volume face + * \param problem The problem to be solved (could store additionally required quantities) + * \param element The grid element + * \param fvGeometry The element-local view on the finite volume grid geometry + * \param elemVolVars The element-local view on the grid volume variables + * \param elemFluxVarsCache The element-local view on the grid flux variables cache + * \param scvf The sub-control volume face for which the flux term is to be computed + * \note This must be overloaded by the implementation + */ + static FluxTerm flux(const Problem& problem, + const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVolumeVariables& elemVolVars, + const ElementFluxVariablesCache& elemFluxVarsCache, + const SubControlVolumeFace& scvf) + { + FluxVariables fluxVars; + fluxVars.init(problem, element, fvGeometry, elemVolVars, scvf, elemFluxVarsCache); + + NumEqVector flux; + for (int phaseIdx = 0; phaseIdx < numPhases; ++phaseIdx) + { + // the physical quantities for which we perform upwinding + auto upwindTerm = [phaseIdx](const auto& volVars) + { return volVars.density(phaseIdx)*volVars.mobility(phaseIdx); }; + + auto eqIdx = conti0EqIdx + phaseIdx; + flux[eqIdx] = fluxVars.advectiveFlux(phaseIdx, upwindTerm); + + // Add advective phase energy fluxes. For isothermal model the contribution is zero. + if constexpr (isNonIsothermal) + EnergyOperators::heatConvectionFlux(flux, fluxVars, phaseIdx); + } + + // Add diffusive energy fluxes. For isothermal model the contribution is zero. + if constexpr (isNonIsothermal) + EnergyOperators::heatConductionFlux(flux, fluxVars); + + return flux; + } +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/porousmediumflow/nonequilibrium/gridvariables.hh b/dumux/porousmediumflow/nonequilibrium/gridvariables.hh index e27312975058a80f78c4fd025b044fcee6a19f9d..a871d2f703993f00ca336e27726ed2351cafa799 100644 --- a/dumux/porousmediumflow/nonequilibrium/gridvariables.hh +++ b/dumux/porousmediumflow/nonequilibrium/gridvariables.hh @@ -46,12 +46,14 @@ template class NonEquilibriumGridVariables : public FVGridVariables, GetPropType, - GetPropType> + GetPropType, + GetPropType> { using ThisType = NonEquilibriumGridVariables; using ParentType = FVGridVariables, GetPropType, - GetPropType>; + GetPropType, + GetPropType>; using VelocityBackend = PorousMediumFlowVelocity>; @@ -88,15 +90,15 @@ public: for (int phaseIdx = 0; phaseIdx < numPhases; ++phaseIdx) { if(isBox && dim == 1) - velocity[phaseIdx].resize(this->gridGeometry_->gridView().size(0)); + velocity[phaseIdx].resize(this->gridGeometry().gridView().size(0)); else - velocity[phaseIdx].resize(this->gridGeometry_->numDofs()); + velocity[phaseIdx].resize(this->gridGeometry().numDofs()); } - for (const auto& element : elements(this->gridGeometry_->gridView(), Dune::Partitions::interior)) + for (const auto& element : elements(this->gridGeometry().gridView(), Dune::Partitions::interior)) { - const auto eIdxGlobal = this->gridGeometry_->elementMapper().index(element); + const auto eIdxGlobal = this->gridGeometry().elementMapper().index(element); - auto fvGeometry = localView(*this->gridGeometry_); + auto fvGeometry = localView(this->gridGeometry()); auto elemVolVars = localView(this->curGridVolVars()); auto elemFluxVarsCache = localView(this->gridFluxVarsCache()); diff --git a/dumux/porousmediumflow/nonequilibrium/newtonsolver.hh b/dumux/porousmediumflow/nonequilibrium/newtonsolver.hh index 87b34a8e4b0355ec0cabe96e4e380dc946467c9f..e0c5c51e17a1075d6de4af0341aacc8a6973d42a 100644 --- a/dumux/porousmediumflow/nonequilibrium/newtonsolver.hh +++ b/dumux/porousmediumflow/nonequilibrium/newtonsolver.hh @@ -40,20 +40,27 @@ template class NonEquilibriumNewtonSolver : public NewtonSolver { using ParentType = NewtonSolver; - using SolutionVector = typename Assembler::ResidualType; + + using typename ParentType::Backend; + using typename ParentType::SolutionVector; + static constexpr bool assemblerExportsVariables = Impl::exportsVariables; public: using ParentType::ParentType; + using typename ParentType::Variables; - void newtonEndStep(SolutionVector &uCurrentIter, + void newtonEndStep(Variables &varsCurrentIter, const SolutionVector &uLastIter) final { - ParentType::newtonEndStep(uCurrentIter, uLastIter); + ParentType::newtonEndStep(varsCurrentIter, uLastIter); + const auto& uCurrentIter = Backend::getDofVector(varsCurrentIter); - auto& gridVariables = this->assembler().gridVariables(); // Averages the face velocities of a vertex. Implemented in the model. // The velocities are stored in the model. - gridVariables.calcVelocityAverage(uCurrentIter); + if constexpr(!assemblerExportsVariables) + this->assembler().gridVariables().calcVelocityAverage(uCurrentIter); + else + varsCurrentIter.calcVelocityAverage(uCurrentIter); } }; diff --git a/dumux/porousmediumflow/nonisothermal/CMakeLists.txt b/dumux/porousmediumflow/nonisothermal/CMakeLists.txt index 24dc71bd458be782c29a4934952dbb8da06dcd4d..5db30722a0709c13579cd8e1c1fdd152a46856cb 100644 --- a/dumux/porousmediumflow/nonisothermal/CMakeLists.txt +++ b/dumux/porousmediumflow/nonisothermal/CMakeLists.txt @@ -3,5 +3,6 @@ indices.hh iofields.hh localresidual.hh model.hh +operators.hh volumevariables.hh DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/porousmediumflow/nonisothermal) diff --git a/dumux/porousmediumflow/nonisothermal/operators.hh b/dumux/porousmediumflow/nonisothermal/operators.hh new file mode 100644 index 0000000000000000000000000000000000000000..1e12074f865365d6be7c18a91477d5303c0f612b --- /dev/null +++ b/dumux/porousmediumflow/nonisothermal/operators.hh @@ -0,0 +1,149 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \ingroup NIModel + * \brief Sub-control entity-local evaluation of the operators + * involved in the system of equations non-isothermal models + * that assume thermal equilibrium between all phases. + */ +#ifndef DUMUX_FV_NON_ISOTHERMAL_OPERATORS_HH +#define DUMUX_FV_NON_ISOTHERMAL_OPERATORS_HH + +namespace Dumux { + +/*! + * \ingroup NIModel + * \brief Sub-control entity-local evaluation of the operators + * involved in the system of equations of non-isothermal models + * that assume thermal equilibrium between all phases. + * \tparam FluxVariables the type that is responsible for computing the individual + * flux contributions, i.e., advective, diffusive, conduvtive... + * \tparam ElementVariables the type of element-local view on the grid variables + */ +template +class FVNonIsothermalOperators +{ + // The variables required for the evaluation of the equation + using GridVariables = typename ElementVariables::GridVariables; + using VolumeVariables = typename GridVariables::VolumeVariables; + using ElementVolumeVariables = typename ElementVariables::ElementVolumeVariables; + + // The grid geometry on which the scheme operates + using GridGeometry = typename GridVariables::GridGeometry; + using FVElementGeometry = typename GridGeometry::LocalView; + using SubControlVolume = typename GridGeometry::SubControlVolume; + + using GridView = typename GridGeometry::GridView; + using Element = typename GridView::template Codim<0>::Entity; + + static constexpr int energyEqIdx = ModelTraits::Indices::energyEqIdx; + +public: + + /*! + * \brief The energy storage in the fluid phase with index phaseIdx + * + * \param storage The mass of the component within the sub-control volume + * \param scv The sub-control volume + * \param volVars The volume variables + * \param phaseIdx The phase index + */ + template + static void addPhaseStorage(NumEqVector& storage, + const SubControlVolume& scv, + const VolumeVariables& volVars, + int phaseIdx) + { + storage[energyEqIdx] += volVars.porosity() + * volVars.density(phaseIdx) + * volVars.internalEnergy(phaseIdx) + * volVars.saturation(phaseIdx); + } + + /*! + * \brief The energy storage in the solid matrix + * + * \param storage The mass of the component within the sub-control volume + * \param scv The sub-control volume + * \param volVars The volume variables + */ + template + static void addSolidPhaseStorage(NumEqVector& storage, + const SubControlVolume& scv, + const VolumeVariables& volVars) + { + storage[energyEqIdx] += volVars.temperature() + * volVars.solidHeatCapacity() + * volVars.solidDensity() + * (1.0 - volVars.porosity()); + } + + /*! + * \brief The advective phase energy fluxes + * + * \param flux The flux + * \param fluxVars The flux variables. + * \param phaseIdx The phase index + */ + template + static void addHeatConvectionFlux(NumEqVector& flux, + FluxVariables& fluxVars, + int phaseIdx) + { + auto upwindTerm = [phaseIdx](const auto& volVars) + { return volVars.density(phaseIdx)*volVars.mobility(phaseIdx)*volVars.enthalpy(phaseIdx); }; + + flux[energyEqIdx] += fluxVars.advectiveFlux(phaseIdx, upwindTerm); + } + + /*! + * \brief The diffusive energy fluxes + * + * \param flux The flux + * \param fluxVars The flux variables. + */ + template + static void addHeatConductionFlux(NumEqVector& flux, + FluxVariables& fluxVars) + { + flux[energyEqIdx] += fluxVars.heatConductionFlux(); + } + + /*! + * \brief heat transfer between the phases for nonequilibrium models + * + * \param source The source which ought to be simulated + * \param element An element which contains part of the control volume + * \param fvGeometry The finite-volume geometry + * \param elemVolVars The volume variables of the current element + * \param scv The sub-control volume over which we integrate the source term + */ + template + static void addSourceEnergy(NumEqVector& source, + const Element& element, + const FVElementGeometry& fvGeometry, + const ElementVolumeVariables& elemVolVars, + const SubControlVolume &scv) + {} +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/porousmediumflow/richards/newtonsolver.hh b/dumux/porousmediumflow/richards/newtonsolver.hh index 007506df957afe1b472f6137fd1e92b0c36cf828..455d78e6b7bb3fdfc639f6a62de2160b09313529 100644 --- a/dumux/porousmediumflow/richards/newtonsolver.hh +++ b/dumux/porousmediumflow/richards/newtonsolver.hh @@ -45,27 +45,31 @@ class RichardsNewtonSolver : public NewtonSolver { using Scalar = typename Assembler::Scalar; using ParentType = NewtonSolver; - using SolutionVector = typename Assembler::ResidualType; using Indices = typename Assembler::GridVariables::VolumeVariables::Indices; enum { pressureIdx = Indices::pressureIdx }; + using typename ParentType::Backend; + using typename ParentType::SolutionVector; + public: using ParentType::ParentType; + using typename ParentType::Variables; private: /*! * \brief Update the current solution of the Newton method * - * \param uCurrentIter The solution after the current Newton iteration \f$ u^{k+1} \f$ + * \param varsCurrentIter The variables after the current Newton iteration \f$ u^{k+1} \f$ * \param uLastIter The solution after the last Newton iteration \f$ u^k \f$ * \param deltaU The vector of differences between the last * iterative solution and the next one \f$ \Delta u^k \f$ */ - void choppedUpdate_(SolutionVector &uCurrentIter, + void choppedUpdate_(Variables &varsCurrentIter, const SolutionVector &uLastIter, const SolutionVector &deltaU) final { + auto uCurrentIter = Backend::getDofVector(varsCurrentIter); uCurrentIter = uLastIter; uCurrentIter -= deltaU; @@ -110,11 +114,11 @@ private: } } - // update the grid variables - this->solutionChanged_(uCurrentIter); + // update the variables + this->solutionChanged_(varsCurrentIter, uCurrentIter); if (this->enableResidualCriterion()) - this->computeResidualReduction_(uCurrentIter); + this->computeResidualReduction_(varsCurrentIter); } }; diff --git a/dumux/timestepping/CMakeLists.txt b/dumux/timestepping/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..3042828afc156e42db77dfeb89782c058eae6070 --- /dev/null +++ b/dumux/timestepping/CMakeLists.txt @@ -0,0 +1,5 @@ +install(FILES +timelevel.hh +multistagemethods.hh +multistagetimestepper.hh +DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/timestepping) diff --git a/dumux/timestepping/multistagemethods.hh b/dumux/timestepping/multistagemethods.hh new file mode 100644 index 0000000000000000000000000000000000000000..a64d34d796a94f1ca99e06a1a75e6e60c0526896 --- /dev/null +++ b/dumux/timestepping/multistagemethods.hh @@ -0,0 +1,205 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \brief Parameters for different multistage time stepping methods + * \note See e.g. https://en.wikipedia.org/wiki/List_of_Runge%E2%80%93Kutta_methods + */ +#ifndef DUMUX_TIMESTEPPING_MULTISTAGE_METHODS_HH +#define DUMUX_TIMESTEPPING_MULTISTAGE_METHODS_HH + +#include +#include +#include + +namespace Dumux { + +/*! + * \brief Abstract interface for one-step multi-stage method parameters in Shu/Osher form. + * + * This implementation is based on the Shu/Osher form from: + * Chi W. Shu and Stanley Osher. Efficient implementation of essentially + * non- oscillatory shock-capturing schemes. J. Comput. Phys., 77:439–471, 1988. + * https://doi.org/10.1016/0021-9991(88)90177-5. + * To this end Eq. (2.12) is extended for implicit schemes. + * + * We consider the general PDE form + * + * \f[ + * \begin{equation} + * \frac{\partial M(x)}{\partial t} - R(x, t) = 0, + * \end{equation} + * \f] + * + * where \f$ M(x)\f$ is the temporal operator/residual in terms of the solution \f$ x \f$, + * and \f$ R(x)\f$ is the discrete spatial operator/residual. + * \f$ M(x)\f$ usually corresponds to the conserved quanity (e.g. mass), and \f$ R(x)\f$ + * contains the rest of the residual. We can then construct \f$ m \f$-stage time discretization methods. + * Integrating from time \f$ t^n\f$ to \f$ t^{n+1}\f$ with time step size \f$ \Delta t^n\f$, we solve: + * + * \f[ + * \begin{aligned} + * x^{(0)} &= u^n\\ + * \sum_{k=0}^i \left[ \alpha_{ik} M\left(x^{(k)}, t^n + d_k\Delta t^n\right) + * + \beta_{ik}\Delta t^n R \left(x^{(k)}, t^n+d_k\Delta t^n \right)\right] &= 0 & \forall i \in \{1,\ldots,m\} \\ + * x^{n+1} &= x^{(m)} + * \end{aligned} + * \f] + * where \f$ x^{(k)} \f$ denotes the intermediate solution at stage \f$ k \f$. + * Dependent on the number of stages \f$ m \f$, and the coefficients \f$ \alpha, \beta, d\f$, + * schemes with different properties and order of accuracy can be constructed. + */ +template +class MultiStageMethod +{ +public: + virtual bool implicit () const = 0; + + virtual std::size_t numStages () const = 0; + + //! weights of the temporal operator residual (\f$ \alpha_{ik} \f$) + virtual Scalar temporalWeight (std::size_t i, std::size_t k) const = 0; + + //! weights of the spatial operator residual (\f$ \beta_{ik} \f$) + virtual Scalar spatialWeight (std::size_t i, std::size_t k) const = 0; + + //! time step weights for each stage (\f$ d_k \f$) + virtual Scalar timeStepWeight (std::size_t k) const = 0; + + virtual std::string name () const = 0; + + virtual ~MultiStageMethod() = default; +}; + +//! Multi-stage time stepping scheme implementations +namespace MultiStage { + +/*! + * \brief A theta time stepping scheme + * theta=1.0 is an implicit Euler scheme, + * theta=0.0 an explicit Euler scheme, + * theta=0.5 is a Cranck-Nicholson scheme + */ +template +class Theta : public MultiStageMethod +{ +public: + explicit Theta(const Scalar theta) + : paramAlpha_{{-1.0, 1.0}} + , paramBeta_{{1.0-theta, theta}} + , paramD_{{0.0, 1.0}} + {} + + bool implicit () const final + { return paramBeta_[1] > 0.0; } + + std::size_t numStages () const final + { return 1; } + + Scalar temporalWeight (std::size_t, std::size_t k) const final + { return paramAlpha_[k]; } + + Scalar spatialWeight (std::size_t, std::size_t k) const final + { return paramBeta_[k]; } + + Scalar timeStepWeight (std::size_t k) const final + { return paramD_[k]; } + + std::string name () const override + { return "theta scheme"; } + +private: + std::array paramAlpha_; + std::array paramBeta_; + std::array paramD_; +}; + +/*! + * \brief An explicit Euler time stepping scheme + */ +template +class ExplicitEuler final : public Theta +{ +public: + ExplicitEuler() : Theta(0.0) {} + + std::string name () const final + { return "explicit Euler"; } +}; + +/*! + * \brief An implicit Euler time stepping scheme + */ +template +class ImplicitEuler final : public Theta +{ +public: + ImplicitEuler() : Theta(1.0) {} + + std::string name () const final + { return "implicit Euler"; } +}; + +/*! + * \brief Classical explicit fourth order Runge-Kutta scheme + */ +template +class RungeKuttaExplicitFourthOrder final : public MultiStageMethod +{ +public: + RungeKuttaExplicitFourthOrder() + : paramAlpha_{{{-1.0, 1.0, 0.0, 0.0, 0.0}, + {-1.0, 0.0, 1.0, 0.0, 0.0}, + {-1.0, 0.0, 0.0, 1.0, 0.0}, + {-1.0, 0.0, 0.0, 0.0, 1.0}}} + , paramBeta_{{{0.5, 0.0, 0.0, 0.0, 0.0}, + {0.0, 0.5, 0.0, 0.0, 0.0}, + {0.0, 0.0, 1.0, 0.0, 0.0}, + {1.0/6.0, 1.0/3.0, 1.0/3.0, 1.0/6.0, 0.0}}} + , paramD_{{0.0, 0.5, 0.5, 1.0, 1.0}} + {} + + bool implicit () const final + { return false; } + + std::size_t numStages () const final + { return 4; } + + Scalar temporalWeight (std::size_t i, std::size_t k) const final + { return paramAlpha_[i-1][k]; } + + Scalar spatialWeight (std::size_t i, std::size_t k) const final + { return paramBeta_[i-1][k]; } + + Scalar timeStepWeight (std::size_t k) const final + { return paramD_[k]; } + + std::string name () const final + { return "explicit Runge-Kutta 4th order"; } + +private: + std::array, 4> paramAlpha_; + std::array, 4> paramBeta_; + std::array paramD_; +}; + +} // end namespace MultiStage +} // end namespace Dumux + +#endif diff --git a/dumux/timestepping/multistagetimestepper.hh b/dumux/timestepping/multistagetimestepper.hh new file mode 100644 index 0000000000000000000000000000000000000000..2ddac8d3bd5b9d4290133759d573a72f033adc56 --- /dev/null +++ b/dumux/timestepping/multistagetimestepper.hh @@ -0,0 +1,166 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \brief A time stepper performing a single time step of a transient simulation + */ +#ifndef DUMUX_TIMESTEPPING_MULTISTAGE_TIMESTEPPER_HH +#define DUMUX_TIMESTEPPING_MULTISTAGE_TIMESTEPPER_HH + +#include +#include +#include +#include + +namespace Dumux { + +//! forward declaration +template +class MultiStageMethod; + +//! Data object for the parameters of a given stage +template +class MultiStageParams +{ + struct Params { + Scalar alpha, betaDt, timeAtStage, dtFraction; + bool skipTemporal, skipSpatial; + }; +public: + //! Extract params for stage i from method m + MultiStageParams(const MultiStageMethod& m, std::size_t i, const Scalar t, const Scalar dt) + : size_(i+1) + { + params_.resize(size_); + for (std::size_t k = 0; k < size_; ++k) + { + auto& p = params_[k]; + p.alpha = m.temporalWeight(i, k); + p.betaDt = m.spatialWeight(i, k)*dt; + p.timeAtStage = t + m.timeStepWeight(k)*dt; + p.dtFraction = m.timeStepWeight(k); + + using std::abs; + p.skipTemporal = (abs(p.alpha) < 1e-6); + p.skipSpatial = (abs(p.betaDt) < 1e-6); + } + } + + std::size_t size () const + { return size_; } + + //! weights of the temporal operator residual (\f$ \alpha_{ik} \f$) + Scalar temporalWeight (std::size_t k) const + { return params_[k].alpha; } + + //! weights of the spatial operator residual (\f$ \beta_{ik} \f$) + Scalar spatialWeight (std::size_t k) const + { return params_[k].betaDt; } + + //! the time at which we have to evaluate the operators + Scalar timeAtStage (std::size_t k) const + { return params_[k].timeAtStage; } + + //! the fraction of a time step corresponding to the k-th stage + Scalar timeStepFraction (std::size_t k) const + { return params_[k].dtFraction; } + + //! If \f$ \alpha_{ik} = 0\f$ + Scalar skipTemporal (std::size_t k) const + { return params_[k].skipTemporal; } + + //! If \f$ \beta_{ik} = 0\f$ + Scalar skipSpatial (std::size_t k) const + { return params_[k].skipSpatial; } + +private: + std::size_t size_; + std::vector params_; +}; + +/*! + * \brief Time stepping with a multi-stage method + * \note We limit ourselves to "diagonally" implicit multi-stage methods where solving + * a stage can only depend on the values of the same stage and stages before + * but not future stages (which would require solving larger linear systems) + */ +template +class MultiStageTimeStepper +{ + using Variables = typename PDESolver::Assembler::Variables; + using Scalar = typename Variables::Scalar; + using StageParams = MultiStageParams; + +public: + + /*! + * \brief The constructor + * \param pdeSolver Solver class for solving a PDE in each stage + * \param msMethod The multi-stage method which is to be used for time integration + * \todo TODO: Add time step control if the pde solver doesn't converge + */ + MultiStageTimeStepper(std::shared_ptr pdeSolver, + std::shared_ptr> msMethod) + : pdeSolver_(pdeSolver) + , msMethod_(msMethod) + {} + + /*! + * \brief Advance one time step of the given time loop + * \params vars The variables object at the current time level. + * \param t The current time level + * \param dt The time step size to be used + * \note We expect the time level in vars to correspond to the given time `t` + * \todo: TODO: Add time step control if the pde solver doesn't converge + */ + void step(Variables& vars, const Scalar t, const Scalar dt) + { + // make sure there are no traces of previous stages + pdeSolver_->assembler().clearStages(); + + for (auto stageIdx = 1UL; stageIdx <= msMethod_->numStages(); ++stageIdx) + { + // extract parameters for this stage from the time stepping method + auto stageParams = std::make_shared(*msMethod_, stageIdx, t, dt); + + // prepare the assembler for this stage + pdeSolver_->assembler().prepareStage(vars, stageParams); + + // assemble & solve + pdeSolver_->solve(vars); + } + + // clear traces of previously registered stages + pdeSolver_->assembler().clearStages(); + } + + /*! + * \brief Set/change the time step method + */ + void setMethod(std::shared_ptr> msMethod) + { msMethod_ = msMethod; } + +private: + std::shared_ptr pdeSolver_; + std::shared_ptr> msMethod_; +}; + +} // end namespace Dumux + +#endif diff --git a/dumux/timestepping/timelevel.hh b/dumux/timestepping/timelevel.hh new file mode 100644 index 0000000000000000000000000000000000000000..0e106539aee9244c08c67952f459fec6810bec32 --- /dev/null +++ b/dumux/timestepping/timelevel.hh @@ -0,0 +1,78 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \brief Class that represents a time level during time integration. + */ +#ifndef DUMUX_TIME_LEVEL_HH +#define DUMUX_TIME_LEVEL_HH + +namespace Dumux { + +/*! + * \brief Class that represents a time level during time integration. + */ +template +class TimeLevel +{ +public: + + /*! + * \brief Construct a time level with a time. + * \note This can be used in contexts outside of time integration, + * where no information on a previous time or time step size is needed. + */ + explicit TimeLevel(Scalar curTime) + : curTime_(curTime) + , prevTime_(curTime) + , timeStepFraction_(1.0) + {} + + /*! + * \brief Construct a time level with information on an ongoing time step. + * \param curTime The current time level + * \param prevTime The previous time level + * \param dtFraction The fraction of a time step this level corresponds to. + * \note Within a time integration step, several time levels might occur + * when multi-stage methods are used. The argument dtFraction allows + * for determining the time that will be reached at the end of the + * time integration step. + */ + explicit TimeLevel(Scalar curTime, Scalar prevTime, Scalar dtFraction) + : curTime_(curTime) + , prevTime_(prevTime) + , timeStepFraction_(dtFraction) + {} + + //! Return the current time + Scalar current() const { return curTime_; } + //! Return the time at the beginning of time integration + Scalar previous() const { return prevTime_; } + //! Return the fraction of the time step this level corresponds to + Scalar timeStepFraction() const { return timeStepFraction_; } + +private: + Scalar curTime_; + Scalar prevTime_; + Scalar timeStepFraction_; +}; + +} // end namespace Dumux + +#endif diff --git a/test/discretization/CMakeLists.txt b/test/discretization/CMakeLists.txt index 25c1bdd297e74a772cd7d985038aa7cde682c038..c9e5b31c6e8d4a2cc81eb1d8e34be7223a93060b 100644 --- a/test/discretization/CMakeLists.txt +++ b/test/discretization/CMakeLists.txt @@ -3,3 +3,9 @@ add_subdirectory(staggered) add_subdirectory(box) add_subdirectory(projection) add_subdirectory(rotationsymmetry) + +dune_add_test(NAME test_fvgridvariables + LABELS unit + SOURCES test_fvgridvariables.cc + COMMAND ./test_fvgridvariables + CMD_ARGS -Problem.Name gridvarstest) diff --git a/test/discretization/test_fvgridvariables.cc b/test/discretization/test_fvgridvariables.cc new file mode 100644 index 0000000000000000000000000000000000000000..7d35a6b9f141a80469df0101420ec66020c09c71 --- /dev/null +++ b/test/discretization/test_fvgridvariables.cc @@ -0,0 +1,146 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/***************************************************************************** + * See the file COPYING for full copying permissions. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, either version 3 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program. If not, see . * + *****************************************************************************/ +/*! + * \file + * \brief Test for finite volume grid variables + */ +#include +#include + +#include +#include +#include +#include + +// we use the 1p type tag here in order not to be obliged +// to define grid flux vars cache & vol vars cache... +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +namespace Dumux { + +template +class MockSpatialParams +: public FVSpatialParamsOneP> +{ +public: + using PermeabilityType = Scalar; +}; + +template +class MockProblem : public FVProblem +{ + using Parent = FVProblem; +public: + using Parent::Parent; +}; + +namespace Properties { + +// new type tags +namespace TTag { +struct GridVariablesTest { using InheritsFrom = std::tuple; }; +struct GridVariablesTestBox { using InheritsFrom = std::tuple; }; +} // end namespace TTag + +// property definitions +template +struct Grid +{ using type = Dune::YaspGrid<2>; }; + +template +struct Problem +{ using type = MockProblem; }; + +template +struct SpatialParams +{ +private: + using Scalar = GetPropType; + using GG = GetPropType; +public: + using type = MockSpatialParams; +}; + +template +struct FluidSystem +{ +private: + using Scalar = GetPropType; +public: + using type = FluidSystems::OnePLiquid>; +}; + +} // end namespace Properties +} // end namespace Dumux + +int main (int argc, char *argv[]) +{ + Dune::MPIHelper::instance(argc, argv); + + using namespace Dumux; + Dumux::Parameters::init(argc, argv); + + using TypeTag = Properties::TTag::GridVariablesTestBox; + using Grid = GetPropType; + using GridFactory = Dune::StructuredGridFactory; + auto grid = GridFactory::createCubeGrid({0.0, 0.0}, {1.0, 1.0}, {2, 2}); + + using GridGeometry = GetPropType; + auto gridGeometry = std::make_shared(grid->leafGridView()); + + using Problem = GetPropType; + auto problem = std::make_shared(gridGeometry); + + using GridVariables = GetPropType; + + // constructor leaving the solution uninitialized, not resized + auto gridVariables = std::make_shared(problem, gridGeometry); + if (gridVariables->dofs().size() != 0) + DUNE_THROW(Dune::Exception, "Expected uninitialized solution"); + + // Construction with a solution + using SolutionVector = GetPropType; + SolutionVector x; x.resize(gridGeometry->numDofs()); x = 0.0; + gridVariables = std::make_shared(problem, gridGeometry, x); + + // Construction from a moved solution (TODO: how to check if move succeeded?) + gridVariables = std::make_shared(problem, gridGeometry, std::move(x)); + + // Construction from initializer lambda + const auto init = [gridGeometry] (auto& x) { x.resize(gridGeometry->numDofs()); x = 2.25; }; + gridVariables = std::make_shared(problem, gridGeometry, init); + + const auto& dofs = gridVariables->dofs(); + if (std::any_of(dofs.begin(), dofs.end(), + [] (auto d) { return Dune::FloatCmp::ne(2.25, d[0]); })) + DUNE_THROW(Dune::Exception, "Unexpected dof value"); + + std::cout << "\nAll tests passed" << std::endl; + return 0; +} diff --git a/test/nonlinear/newton/test_newton.cc b/test/nonlinear/newton/test_newton.cc index 3a0d48a0d718424e2dc0ce5af7e5adfac46eea84..c1d81048da9c3f7377dddfb69188be91f8b17b94 100644 --- a/test/nonlinear/newton/test_newton.cc +++ b/test/nonlinear/newton/test_newton.cc @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -32,13 +33,12 @@ class MockScalarAssembler { public: using ResidualType = Dune::BlockVector; - using Scalar = double; + using Variables = ResidualType; using JacobianMatrix = double; + using Scalar = double; void setLinearSystem() {} - bool isStationaryProblem() { return false; } - ResidualType prevSol() { return ResidualType(0.0); } void resetTimeStep(const ResidualType& sol) {} @@ -59,14 +59,6 @@ public: ResidualType& residual() { return res_; } - double residualNorm(const ResidualType& sol) - { - assembleResidual(sol); - return res_[0]; - } - - void updateGridVariables(const ResidualType& sol) {} - private: JacobianMatrix jac_; ResidualType res_; @@ -78,11 +70,19 @@ public: void setResidualReduction(double residualReduction) {} template - bool solve (const double& A, Vector& x, const Vector& b) const + bool solve(const double& A, Vector& x, const Vector& b) const { x[0] = b[0]/A; return true; } + + double norm(const Dune::BlockVector& residual) const + { + assert(residual.size() == 1); + + using std::abs; + return abs(residual[0]); + } }; } // end namespace Dumux diff --git a/test/porousmediumflow/1p/compressible/instationary/main.cc b/test/porousmediumflow/1p/compressible/instationary/main.cc index 509e4eb9faecfa3478c430bd9d07fae74caaf02d..e914140ef72925c2a110e1880035fce6f017bf0f 100644 --- a/test/porousmediumflow/1p/compressible/instationary/main.cc +++ b/test/porousmediumflow/1p/compressible/instationary/main.cc @@ -43,11 +43,15 @@ #include #include -#include +#include +#include +#include +#include #include #include + int main(int argc, char** argv) { using namespace Dumux; @@ -93,14 +97,12 @@ int main(int argc, char** argv) // the solution vector using SolutionVector = GetPropType; - SolutionVector x(gridGeometry->numDofs()); - problem->applyInitialSolution(x); - auto xOld = x; // the grid variables using GridVariables = GetPropType; - auto gridVariables = std::make_shared(problem, gridGeometry); - gridVariables->init(x); + const auto init = [problem] (auto& x) { problem->applyInitialSolution(x); }; + auto gridVariables = std::make_shared(problem, gridGeometry, init); + auto xOld = gridVariables->dofs(); // get some time loop parameters using Scalar = GetPropType; @@ -109,7 +111,7 @@ int main(int argc, char** argv) auto maxDt = getParam("TimeLoop.MaxTimeStepSize"); // intialize the vtk output module - VtkOutputModule vtkWriter(*gridVariables, x, problem->name()); + VtkOutputModule vtkWriter(*gridVariables, gridVariables->dofs(), problem->name()); using VelocityOutput = GetPropType; vtkWriter.addVelocityOutput(std::make_shared(*gridVariables)); using IOFields = GetPropType; @@ -120,9 +122,20 @@ int main(int argc, char** argv) auto timeLoop = std::make_shared>(0.0, dt, tEnd); timeLoop->setMaxTimeStepSize(maxDt); - // the assembler with time loop for instationary problem - using Assembler = FVAssembler; - auto assembler = std::make_shared(problem, gridGeometry, gridVariables, timeLoop, xOld); + // use implicit Euler for time integration + auto timeMethod = std::make_shared>(); + + // create assembler & linear solver + // TODO: The operators or local operator could be a property, just as is LocalResidual currently + using ModelTraits = GetPropType; + using FluxVariables = GetPropType; + using ElemVariables = typename GridVariables::LocalView; + + using ImmiscibleOperators = FVImmiscibleOperators; + using LocalOperator = FVLocalOperator; + + using Assembler = Assembler; + auto assembler = std::make_shared(gridGeometry, *timeMethod); // the linear solver using LinearSolver = ILU0BiCGSTABBackend; @@ -130,7 +143,11 @@ int main(int argc, char** argv) // the non-linear solver using NewtonSolver = Dumux::NewtonSolver; - NewtonSolver nonLinearSolver(assembler, linearSolver); + auto nonLinearSolver = std::make_shared(assembler, linearSolver); + + // the time stepper for time integration + using TimeStepper = MultiStageTimeStepper; + TimeStepper timeStepper(nonLinearSolver, timeMethod); // set some check points for the time loop timeLoop->setPeriodicCheckPoint(tEnd/10.0); @@ -138,12 +155,8 @@ int main(int argc, char** argv) // time loop timeLoop->start(); do { - // linearize & solve - nonLinearSolver.solve(x, *timeLoop); - - // make the new solution the old solution - xOld = x; - gridVariables->advanceTimeStep(); + // do time integraiton + timeStepper.step(*gridVariables, timeLoop->time(), timeLoop->timeStepSize()); // advance to the time loop to the next step timeLoop->advanceTimeStep(); @@ -156,7 +169,7 @@ int main(int argc, char** argv) timeLoop->reportTimeStep(); // set new dt as suggested by the newton solver - timeLoop->setTimeStepSize(nonLinearSolver.suggestTimeStepSize(timeLoop->timeStepSize())); + timeLoop->setTimeStepSize(nonLinearSolver->suggestTimeStepSize(timeLoop->timeStepSize())); } while (!timeLoop->finished()); diff --git a/test/porousmediumflow/1p/incompressible/main.cc b/test/porousmediumflow/1p/incompressible/main.cc index 4f3818bcb715445259643d91050e2c4f3c725297..869689a14fceaeb88103d67d745e90640897c203 100644 --- a/test/porousmediumflow/1p/incompressible/main.cc +++ b/test/porousmediumflow/1p/incompressible/main.cc @@ -46,7 +46,9 @@ #include #include -#include +#include +#include +#include #include "problem.hh" #include "../internaldirichlet/problem.hh" @@ -117,17 +119,14 @@ int main(int argc, char** argv) using Problem = GetPropType; auto problem = std::make_shared(gridGeometry); - // the solution vector - using SolutionVector = GetPropType; - SolutionVector x(gridGeometry->numDofs()); - // the grid variables using GridVariables = GetPropType; - auto gridVariables = std::make_shared(problem, gridGeometry); - gridVariables->init(x); + auto initX = [&] (auto& x) { x.resize(gridGeometry->numDofs()); x = 0.0; }; + auto gridVariables = std::make_shared(problem, gridGeometry, initX); // intialize the vtk output module - VtkOutputModule vtkWriter(*gridVariables, x, problem->name()); + using SolutionVector = GetPropType; + VtkOutputModule vtkWriter(*gridVariables, gridVariables->dofs(), problem->name()); using VelocityOutput = GetPropType; vtkWriter.addVelocityOutput(std::make_shared(*gridVariables)); using IOFields = GetPropType; @@ -135,15 +134,23 @@ int main(int argc, char** argv) vtkWriter.write(0.0); // create assembler & linear solver - using Assembler = FVAssembler; - auto assembler = std::make_shared(problem, gridGeometry, gridVariables); + // TODO: The operators or local operator could be a property, just as is LocalResidual currently + using ModelTraits = GetPropType; + using FluxVariables = GetPropType; + using ElemVariables = typename GridVariables::LocalView; + + using ImmiscibleOperators = FVImmiscibleOperators; + using LocalOperator = FVLocalOperator; + + using Assembler = Assembler; + auto assembler = std::make_shared(gridGeometry); using LinearSolver = SSORCGBackend; auto linearSolver = std::make_shared(); // solver the linear problem LinearPDESolver solver(assembler, linearSolver); - solver.solve(x); + solver.solve(*gridVariables); // output result to vtk vtkWriter.write(1.0); @@ -177,7 +184,7 @@ int main(int argc, char** argv) auto elemFluxVarsCache = localView(gridVariables->gridFluxVarsCache()); fvGeometry.bind(element); - elemVolVars.bind(element, fvGeometry, x); + elemVolVars.bind(element, fvGeometry, gridVariables->dofs()); elemFluxVarsCache.bind(element, fvGeometry, elemVolVars); velocityOutput.calculateVelocity(velocity, element, fvGeometry, elemVolVars, elemFluxVarsCache, 0); @@ -200,7 +207,7 @@ int main(int argc, char** argv) // For the mpfa test, write out the gradients in the scv centers if (getParam("IO.WriteMpfaVelocities", false)) - writeMpfaVelocities(*gridGeometry, *gridVariables, x); + writeMpfaVelocities(*gridGeometry, *gridVariables, gridVariables->dofs()); if (mpiHelper.rank() == 0) Parameters::print(); diff --git a/test/porousmediumflow/2p2c/waterair/main.cc b/test/porousmediumflow/2p2c/waterair/main.cc index e58832f747be9d5dffc2618444e6a8aa79819d25..6ac940426adf4030fb9cdfb989635b774d71a1eb 100644 --- a/test/porousmediumflow/2p2c/waterair/main.cc +++ b/test/porousmediumflow/2p2c/waterair/main.cc @@ -45,6 +45,33 @@ // the problem definitions #include "problem.hh" +//! Custom assembler to test assembly with grid variables +template +class GridVarsAssembler : public Assembler +{ +public: + using Assembler::Assembler; + using typename Assembler::GridVariables; + using typename Assembler::ResidualType; + + using Variables = GridVariables; + + void assembleJacobianAndResidual(const GridVariables& gridVars) + { + Assembler::assembleJacobianAndResidual(gridVars.dofs()); + } + + void assembleResidual(const GridVariables& gridVars) + { + Assembler::assembleResidual(gridVars.dofs()); + } + + auto residualNorm(const GridVariables& gridVars) + { + return Assembler::residualNorm(gridVars.dofs()); + } +}; + int main(int argc, char** argv) { using namespace Dumux; @@ -84,17 +111,15 @@ int main(int argc, char** argv) // the solution vector using SolutionVector = GetPropType; - SolutionVector x; - problem->applyInitialSolution(x); - auto xOld = x; // the grid variables using GridVariables = GetPropType; - auto gridVariables = std::make_shared(problem, gridGeometry); - gridVariables->init(x); + const auto init = [problem] (auto& x) { problem->applyInitialSolution(x); }; + auto gridVariables = std::make_shared(problem, gridGeometry, init); + auto xOld = gridVariables->dofs(); // intialize the vtk output module - VtkOutputModule vtkWriter(*gridVariables, x, problem->name()); + VtkOutputModule vtkWriter(*gridVariables, gridVariables->dofs(), problem->name()); using VelocityOutput = GetPropType; vtkWriter.addVelocityOutput(std::make_shared(*gridVariables)); // Add model specific output fields @@ -110,7 +135,7 @@ int main(int argc, char** argv) timeLoop->setMaxTimeStepSize(maxDt); // the assembler with time loop for instationary problem - using Assembler = FVAssembler; + using Assembler = GridVarsAssembler>; auto assembler = std::make_shared(problem, gridGeometry, gridVariables, timeLoop, xOld); // the linear solver @@ -125,10 +150,10 @@ int main(int argc, char** argv) timeLoop->start(); do { // solve the non-linear system with time step control - nonLinearSolver.solve(x, *timeLoop); + nonLinearSolver.solve(*gridVariables, *timeLoop); // make the new solution the old solution - xOld = x; + xOld = gridVariables->dofs(); gridVariables->advanceTimeStep(); // advance to the time loop to the next step