diff --git a/dumux/assembly/CMakeLists.txt b/dumux/assembly/CMakeLists.txt
index 2543a6631f3a61445e1bbb4aeeb8461b93115bd7..63a13d1e43643adf778ad09a48d437e10987d427 100644
--- a/dumux/assembly/CMakeLists.txt
+++ b/dumux/assembly/CMakeLists.txt
@@ -1,4 +1,5 @@
install(FILES
+assembler.hh
boxlocalassembler.hh
boxlocalresidual.hh
cclocalassembler.hh
@@ -10,6 +11,7 @@ fvlocalassemblerbase.hh
fvlocalresidual.hh
initialsolution.hh
jacobianpattern.hh
+localassembler.hh
numericepsilon.hh
partialreassembler.hh
staggeredfvassembler.hh
diff --git a/dumux/assembly/assembler.hh b/dumux/assembly/assembler.hh
new file mode 100644
index 0000000000000000000000000000000000000000..fa79c7d9fa5c52321eb4cb574c711b0e23defe4c
--- /dev/null
+++ b/dumux/assembly/assembler.hh
@@ -0,0 +1,463 @@
+// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+/*****************************************************************************
+ * See the file COPYING for full copying permissions. *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, either version 3 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program. If not, see . *
+ *****************************************************************************/
+/*!
+ * \file
+ * \ingroup Assembly
+ * \brief Assembler class for residuals and jacobian matrices
+ * for grid-based numerical schemes.
+ */
+#ifndef DUMUX_ASSEMBLER_HH
+#define DUMUX_ASSEMBLER_HH
+
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include "jacobianpattern.hh"
+
+// TODO: Remove when linear algebra traits are available
+#include
+#include
+#include
+#include
+
+namespace Dumux::Experimental {
+
+//! Default types used for the linear system
+template
+struct DefaultLinearSystemTraits;
+
+//! Default linear system types for Dune::BlockVector
+template
+struct DefaultLinearSystemTraits>
+{
+private:
+ static constexpr int numEq = NumEqVectorTraits::numEq;
+ using Scalar = typename PrimaryVariables::value_type;
+ using MatrixBlockType = Dune::FieldMatrix;
+
+public:
+ using ResidualVector = Dune::BlockVector;
+ using JacobianMatrix = Dune::BCRSMatrix;
+};
+
+/*!
+ * \ingroup Assembly
+ * \brief A linear system assembler (residual and Jacobian) for grid-based numerical schemes
+ * \tparam LA The local assembler (element-local assembly)
+ * \tparam LST The linear system traits (types used for jacobians and residuals)
+ */
+template>
+class Assembler
+{
+ using GV = typename LA::GridVariables;
+ using GG = typename GV::GridGeometry;
+
+ using GridView = typename GG::GridView;
+ using Element = typename GridView::template Codim<0>::Entity;
+
+public:
+ //! export the types used for the linear system
+ using JacobianMatrix = typename LST::JacobianMatrix;
+ using ResidualVector = typename LST::ResidualVector;
+ using ResidualType = ResidualVector;
+
+ //! export the types underlying discrete solutions
+ using Scalar = typename GV::Scalar;
+ using SolutionVector = typename GV::SolutionVector;
+
+ //! export the types involved in element-local assembly
+ using LocalAssembler = LA;
+ using LocalOperator = typename LA::LocalOperator;
+
+ //! the variables representing an evaluation point
+ using GridVariables = GV;
+ using Variables = GridVariables;
+
+ //! export the underlying grid geometry type
+ using GridGeometry = GG;
+
+ //! export the type for parameters of a stage in time integration
+ using StageParams = MultiStageParams;
+
+ /*!
+ * \brief The Constructor from a grid geometry.
+ * \param gridGeometry A grid geometry instance
+ * \param diffMethod The method used for computing partial derivatives
+ * \note This assembler class is, after construction, defined for a specific equation
+ * (defined by the LocalOperator inside the LocalAssembler) and a specific grid
+ * geometry - which defines the connectivity of the degrees of freedom of the
+ * underlying discretization scheme on a particular grid. The evaluation point,
+ * consisting of a particular solution/variables/parameters may vary, and therefore,
+ * an instance of the grid variables is passed to the assembly functions.
+ * \note This constructor (without time integration method) assumes hhat a stationary problem
+ * is assembled, and thus, an implicit jacobian pattern is used.
+ */
+ Assembler(std::shared_ptr gridGeometry,
+ DiffMethod diffMethod = DiffMethod::numeric)
+ : gridGeometry_(gridGeometry)
+ , isImplicit_(true)
+ , diffMethod_(diffMethod)
+ {}
+
+ /*!
+ * \brief The Constructor from a grid geometry.
+ * \param gridGeometry A grid geometry instance
+ * \param method the time integration method that will be used.
+ */
+ template
+ Assembler(std::shared_ptr gridGeometry,
+ const TimeIntegrationMethod& method,
+ DiffMethod diffMethod = DiffMethod::numeric)
+ : gridGeometry_(gridGeometry)
+ , isImplicit_(method.implicit())
+ , diffMethod_(diffMethod)
+ {}
+
+ /*!
+ * \brief Assembles the Jacobian matrix and the residual around the given evaluation point
+ * which is determined by the grid variables, containing all quantities required
+ * to evaluate the equations to be assembled.
+ * \param gridVariables The variables corresponding to the given solution state
+ * \note We assume the grid geometry on which the grid variables are defined
+ * to be the same as the one used to instantiate this class.
+ * \todo TODO: The grid variables have to be non-const in order to compute
+ * the derivatives in the case of global caching. Could this be
+ * circumvented somehow?
+ */
+ template
+ void assembleJacobianAndResidual(GridVariables& gridVariables,
+ const PartialReassembler* partialReassembler = nullptr)
+ {
+ assert(gridVariables.gridGeometry().numDofs() == gridGeometry().numDofs());
+
+ resetJacobian_(partialReassembler);
+ resetResidual_();
+
+ assemble_([&](const Element& element)
+ {
+ auto localAssembler = this->makeLocalAssembler_(element, gridVariables);
+ localAssembler.assembleJacobianAndResidual(*jacobian_, *residual_, partialReassembler);
+ });
+
+ // TODO: Put these into discretization-specific helpers?
+ enforceDirichletConstraints_(gridVariables, *jacobian_, *residual_);
+ enforceInternalConstraints_(gridVariables, *jacobian_, *residual_);
+ enforcePeriodicConstraints_(gridVariables, *jacobian_, *residual_);
+ }
+
+ /*!
+ * \brief Assembles the Jacobian matrix of the discrete system of equations
+ * around a given state represented by the grid variables object.
+ */
+ void assembleJacobian(GridVariables& gridVariables)
+ {
+ assert(gridVariables.gridGeometry().numDofs() == gridGeometry().numDofs());
+
+ resetJacobian_();
+
+ assemble_([&](const Element& element)
+ {
+ auto localAssembler = this->makeLocalAssembler_(element, gridVariables);
+ localAssembler.assembleJacobianAndResidual(*jacobian_);
+ });
+
+ // TODO: Put these into discretization-specific helpers?
+ enforceDirichletConstraints_(gridVariables, *jacobian_, *residual_);
+ enforceInternalConstraints_(gridVariables, *jacobian_, *residual_);
+ enforcePeriodicConstraints_(gridVariables, *jacobian_, *residual_);
+ }
+
+ /*!
+ * \brief Assembles the residual for a given state represented by the provided
+ * grid variables object, using the internal residual vector to store the result.
+ */
+ void assembleResidual(GridVariables& gridVariables)
+ {
+ resetResidual_();
+ assembleResidual(*residual_, gridVariables);
+ }
+
+ /*!
+ * \brief Assembles the residual for a given state represented by the provided
+ * grid variables object, using the provided residual vector to store the result.
+ */
+ void assembleResidual(ResidualVector& r, GridVariables& gridVariables) const
+ {
+ assemble_([&](const Element& element)
+ { this->makeLocalAssembler_(element, gridVariables).assembleResidual(r); });
+ }
+
+ //! Will become obsolete once the new linear solvers are available
+ Scalar residualNorm(GridVariables& gridVars) const
+ {
+ ResidualType residual(numDofs());
+ assembleResidual(residual, gridVars);
+
+ // for box communicate the residual with the neighboring processes
+ if (GridGeometry::discMethod == DiscretizationMethod::box && gridView().comm().size() > 1)
+ {
+ using VertexMapper = typename GridGeometry::VertexMapper;
+ VectorCommDataHandleSum
+ sumResidualHandle(gridGeometry_->vertexMapper(), residual);
+ gridView().communicate(sumResidualHandle,
+ Dune::InteriorBorder_InteriorBorder_Interface,
+ Dune::ForwardCommunication);
+ }
+
+ // calculate the square norm of the residual
+ Scalar result2 = residual.two_norm2();
+ if (gridView().comm().size() > 1)
+ result2 = gridView().comm().sum(result2);
+
+ using std::sqrt;
+ return sqrt(result2);
+ }
+
+
+ /*!
+ * \brief Tells the assembler which jacobian and residual to use.
+ * This also resizes the containers to the required sizes and sets the
+ * sparsity pattern of the jacobian matrix.
+ */
+ void setLinearSystem(std::shared_ptr A,
+ std::shared_ptr r)
+ {
+ jacobian_ = A;
+ residual_ = r;
+
+ // check and/or set the BCRS matrix's build mode
+ if (jacobian_->buildMode() == JacobianMatrix::BuildMode::unknown)
+ jacobian_->setBuildMode(JacobianMatrix::random);
+ else if (jacobian_->buildMode() != JacobianMatrix::BuildMode::random)
+ DUNE_THROW(Dune::NotImplemented, "Only BCRS matrices with random build mode are supported at the moment");
+
+ setJacobianPattern();
+ setResidualSize();
+ }
+
+ /*!
+ * \brief The version without arguments uses the default constructor to create
+ * the jacobian and residual objects in this assembler if you don't need them outside this class
+ */
+ void setLinearSystem()
+ {
+ jacobian_ = std::make_shared();
+ jacobian_->setBuildMode(JacobianMatrix::random);
+ residual_ = std::make_shared();
+
+ setJacobianPattern();
+ setResidualSize();
+ }
+
+ /*!
+ * \brief Resizes the jacobian and sets the jacobian' sparsity pattern.
+ */
+ void setJacobianPattern()
+ {
+ // resize the jacobian and the residual
+ const auto numDofs = this->numDofs();
+ jacobian_->setSize(numDofs, numDofs);
+
+ // create occupation pattern of the jacobian
+ // TODO: Does the bool need to be at compile time in getJacobianPattern?
+ const auto occupationPattern = isImplicit_ ? getJacobianPattern(gridGeometry())
+ : getJacobianPattern(gridGeometry());
+
+ // export pattern to jacobian
+ occupationPattern.exportIdx(*jacobian_);
+ }
+
+ /*!
+ * \brief Prepare for a new stage within a time integration step.
+ * This caches the given grid variables, which are then used as a
+ * representation of the previous stage. Moreover, the given grid
+ * variables are then updated to the time level of the upcoming stage.
+ * \param gridVars the grid variables representing the previous stage
+ * \param params the parameters with the weights to be used in the upcoming stage
+ */
+ void prepareStage(GridVariables& gridVars,
+ std::shared_ptr params)
+ {
+ stageParams_ = params;
+ const auto curStage = params->size() - 1;
+
+ // we keep track of previous stages, they are needed for residual assembly
+ prevStageVariables_.push_back(gridVars);
+
+ // Now we update the time level of the given grid variables
+ const auto t = params->timeAtStage(curStage);
+ const auto prevT = params->timeAtStage(0);
+ const auto dtFraction = params->timeStepFraction(curStage);
+ gridVars.updateTime(TimeLevel{t, prevT, dtFraction});
+ }
+
+ /*!
+ * \brief Remove traces from stages within a time integration step.
+ */
+ void clearStages()
+ {
+ prevStageVariables_.clear();
+ stageParams_ = nullptr;
+ }
+
+ //! Resizes the residual
+ void setResidualSize()
+ { residual_->resize(numDofs()); }
+
+ //! Returns the number of degrees of freedom
+ std::size_t numDofs() const
+ { return gridGeometry().numDofs(); }
+
+ //! The global finite volume geometry
+ const GridGeometry& gridGeometry() const
+ { return *gridGeometry_; }
+
+ //! The grid view
+ const GridView& gridView() const
+ { return gridGeometry().gridView(); }
+
+ //! The jacobian matrix
+ JacobianMatrix& jacobian()
+ { return *jacobian_; }
+
+ //! The residual vector (rhs)
+ ResidualVector& residual()
+ { return *residual_; }
+
+protected:
+ // make a local assembler instance
+ LocalAssembler makeLocalAssembler_(const Element& element, GridVariables& gridVars) const
+ {
+ // instationary assembly
+ if (!stageParams_)
+ return LocalAssembler{element, gridVars, diffMethod_};
+
+ // stationary assembly
+ return LocalAssembler{element, prevStageVariables_, gridVars, stageParams_, diffMethod_};
+ }
+
+ // reset the residual vector to 0.0
+ void resetResidual_()
+ {
+ if (!residual_)
+ {
+ residual_ = std::make_shared();
+ setResidualSize();
+ }
+
+ (*residual_) = 0.0;
+ }
+
+ // reset the Jacobian matrix to 0.0
+ template
+ void resetJacobian_(const PartialReassembler *partialReassembler = nullptr)
+ {
+ if (!jacobian_)
+ {
+ jacobian_ = std::make_shared();
+ jacobian_->setBuildMode(JacobianMatrix::random);
+ setJacobianPattern();
+ }
+
+ if (partialReassembler)
+ partialReassembler->resetJacobian(*this);
+ else
+ *jacobian_ = 0.0;
+ }
+
+ /*!
+ * \brief A method assembling something per element
+ * \note Handles exceptions for parallel runs
+ * \throws NumericalProblem on all processes if something throwed during assembly
+ */
+ template
+ void assemble_(AssembleElementFunc&& assembleElement) const
+ {
+ // a state that will be checked on all processes
+ bool succeeded = false;
+
+ // try assembling using the local assembly function
+ try
+ {
+ // let the local assembler add the element contributions
+ for (const auto& element : elements(gridView()))
+ assembleElement(element);
+
+ // if we get here, everything worked well on this process
+ succeeded = true;
+ }
+ // throw exception if a problem ocurred
+ catch (NumericalProblem &e)
+ {
+ std::cout << "rank " << gridView().comm().rank()
+ << " caught an exception while assembling:" << e.what()
+ << "\n";
+ succeeded = false;
+ }
+
+ // make sure everything worked well on all processes
+ if (gridView().comm().size() > 1)
+ succeeded = gridView().comm().min(succeeded);
+
+ // if not succeeded rethrow the error on all processes
+ if (!succeeded)
+ DUNE_THROW(NumericalProblem, "A process did not succeed in linearizing the system");
+ }
+
+ void enforceDirichletConstraints_(const GridVariables& gridVars, JacobianMatrix& jac, SolutionVector& res)
+ { /*TODO: Implement / where to put this? Currently, local assemblers do this*/ }
+
+ void enforceInternalConstraints_(const GridVariables& gridVars, JacobianMatrix& jac, SolutionVector& res)
+ { /*TODO: Implement / where to put this? Currently, local assemblers do this*/ }
+
+ void enforcePeriodicConstraints_(const GridVariables& gridVars, JacobianMatrix& jac, SolutionVector& res)
+ { /*TODO: Implement / where to put this? Currently, local assemblers do this*/ }
+
+private:
+ //! the grid geometry on which it is assembled
+ std::shared_ptr gridGeometry_;
+
+ //! states if an implicit of explicit scheme is used (affects jacobian pattern)
+ bool isImplicit_;
+
+ //! the method to compute the derivatives
+ DiffMethod diffMethod_;
+
+ //! shared pointers to the jacobian matrix and residual
+ std::shared_ptr jacobian_;
+ std::shared_ptr residual_;
+
+ //! parameters containing information on the current stage of time integration
+ std::shared_ptr stageParams_ = nullptr;
+
+ //! keep track of the states of previous stages within a time integration step
+ std::vector prevStageVariables_;
+};
+
+} // end namespace Dumux::Experimental
+
+#endif
diff --git a/dumux/assembly/fv/CMakeLists.txt b/dumux/assembly/fv/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..890727a9263f546be0e2cc0938586ff7981885e1
--- /dev/null
+++ b/dumux/assembly/fv/CMakeLists.txt
@@ -0,0 +1,6 @@
+install(FILES
+boxlocalassembler.hh
+cclocalassembler.hh
+localoperator.hh
+operators.hh
+DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/assembly/fv)
diff --git a/dumux/assembly/fv/boxlocalassembler.hh b/dumux/assembly/fv/boxlocalassembler.hh
new file mode 100644
index 0000000000000000000000000000000000000000..9e49482ca12fff4b6dc69f959e179fa6682855ca
--- /dev/null
+++ b/dumux/assembly/fv/boxlocalassembler.hh
@@ -0,0 +1,479 @@
+// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+/*****************************************************************************
+ * See the file COPYING for full copying permissions. *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, either version 3 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program. If not, see . *
+ *****************************************************************************/
+/*!
+ * \file
+ * \ingroup Assembly
+ * \brief An assembler for Jacobian and residual contribution
+ * per element for the box scheme.
+ */
+#ifndef DUMUX_BOX_LOCAL_ASSEMBLER_HH
+#define DUMUX_BOX_LOCAL_ASSEMBLER_HH
+
+#include
+#include
+
+#include
+
+#include
+#include
+
+#include
+#include
+#include
+
+
+namespace Dumux::Experimental {
+
+/*!
+ * \ingroup Assembly
+ * \brief An assembler for Jacobian and residual contribution
+ * per element for the box scheme.
+ * \tparam LO The element-local operator
+ */
+template
+class BoxLocalAssembler
+{
+ using LocalContext = typename LO::LocalContext;
+ using ElementVariables = typename LocalContext::ElementVariables;
+ using ElementGridGeometry = typename LocalContext::ElementGridGeometry;
+
+ using GG = typename ElementGridGeometry::GridGeometry;
+ using GV = typename ElementVariables::GridVariables;
+
+ using FVElementGeometry = typename GG::LocalView;
+ using SubControlVolume = typename GG::SubControlVolume;
+ using Element = typename GG::GridView::template Codim<0>::Entity;
+
+ using PrimaryVariables = typename GV::PrimaryVariables;
+ using Scalar = typename GV::Scalar;
+
+ static constexpr int numEq = NumEqVectorTraits::numEq;
+
+public:
+
+ //! the parameters of a stage in time integration
+ using StageParams = MultiStageParams;
+
+ //! export the grid variables type on which to operate
+ using GridVariables = GV;
+
+ //! export the underlying local operator
+ using LocalOperator = LO;
+
+ //! export the vector storing the residuals of all dofs of the element
+ using ElementResidualVector = typename LO::ElementResidualVector;
+
+ /*!
+ * \brief Constructor for stationary problems.
+ */
+ explicit BoxLocalAssembler(const Element& element,
+ GridVariables& gridVariables,
+ DiffMethod dm = DiffMethod::numeric)
+ : diffMethod_(dm)
+ , gridVariables_(gridVariables)
+ , fvGeometry_(localView(gridVariables.gridGeometry()))
+ , elemVariables_(localView(gridVariables))
+ , prevElemVariables_()
+ , elementIsGhost_(element.partitionType() == Dune::GhostEntity)
+ , stageParams_(nullptr)
+ {
+ if (diffMethod_ != DiffMethod::numeric)
+ DUNE_THROW(Dune::NotImplemented, "Provided differentiation method");
+
+ fvGeometry_.bind(element);
+ elemVariables_.bind(element, fvGeometry_);
+ }
+
+ /*!
+ * \brief Constructor for instationary problems.
+ * \note Using this constructor, we assemble one stage within
+ * a time integration step using multi-stage methods.
+ */
+ explicit BoxLocalAssembler(const Element& element,
+ const std::vector& prevGridVars,
+ GridVariables& gridVariables,
+ std::shared_ptr stageParams,
+ DiffMethod dm = DiffMethod::numeric)
+ : diffMethod_(dm)
+ , gridVariables_(gridVariables)
+ , fvGeometry_(localView(gridVariables.gridGeometry()))
+ , elemVariables_(localView(gridVariables))
+ , prevElemVariables_()
+ , elementIsGhost_(element.partitionType() == Dune::GhostEntity)
+ , stageParams_(stageParams)
+ {
+ if (diffMethod_ != DiffMethod::numeric)
+ DUNE_THROW(Dune::NotImplemented, "Provided differentiation method");
+
+ if (prevGridVars.size() != stageParams->size() - 1)
+ DUNE_THROW(Dune::InvalidStateException, "Grid Variables for all stages needed");
+
+ fvGeometry_.bind(element);
+ for (const auto& gridVars : prevGridVars)
+ {
+ prevElemVariables_.emplace_back(localView(gridVars));
+ prevElemVariables_.back().bind(element, fvGeometry_);
+ }
+
+ elemVariables_.bind(element, fvGeometry_);
+ }
+
+ /*!
+ * \brief Computes the derivatives with respect to the given element and adds
+ * them to the global matrix. The element residual is written into the
+ * right hand side.
+ */
+ template
+ void assembleJacobianAndResidual(JacobianMatrix& jac,
+ ResidualVector& res,
+ const PartialReassembler* partialReassembler = nullptr)
+ {
+ const auto& element = fvGeometry_.element();
+ const auto eIdxGlobal = fvGeometry_.gridGeometry().elementMapper().index(element);
+
+ if (partialReassembler && partialReassembler->elementColor(eIdxGlobal) == EntityColor::green)
+ {
+ const auto residual = evalLocalResidual();
+ for (const auto& scv : scvs(fvGeometry_))
+ res[scv.dofIndex()] += residual[scv.localDofIndex()];
+ }
+ else if (!elementIsGhost_)
+ {
+ const auto residual = assembleJacobianAndResidual_(jac, partialReassembler);
+ for (const auto& scv : scvs(fvGeometry_))
+ res[scv.dofIndex()] += residual[scv.localDofIndex()];
+ }
+ else
+ {
+ static constexpr int dim = GG::GridView::dimension;
+
+ for (const auto& scv : scvs(fvGeometry_))
+ {
+ const auto vIdxLocal = scv.indexInElement();
+ const auto& v = fvGeometry_.element().template subEntity(vIdxLocal);
+
+ // do not change the non-ghost vertices
+ if (v.partitionType() == Dune::InteriorEntity ||
+ v.partitionType() == Dune::BorderEntity)
+ continue;
+
+ const auto dofIdx = scv.dofIndex();
+ res[dofIdx] = 0;
+ for (int pvIdx = 0; pvIdx < numEq; ++pvIdx)
+ jac[dofIdx][dofIdx][pvIdx][pvIdx] = 1.0;
+ }
+ }
+
+ auto applyDirichlet = [&] (const auto& scvI,
+ const auto& dirichletValues,
+ const auto eqIdx,
+ const auto pvIdx)
+ {
+ const auto& priVars = elemVariables_.elemVolVars()[scvI].priVars();
+ res[scvI.dofIndex()][eqIdx] = priVars[pvIdx] - dirichletValues[pvIdx];
+
+ auto& row = jac[scvI.dofIndex()];
+ for (auto col = row.begin(); col != row.end(); ++col)
+ row[col.index()][eqIdx] = 0.0;
+
+ jac[scvI.dofIndex()][scvI.dofIndex()][eqIdx][pvIdx] = 1.0;
+
+ // if a periodic dof has Dirichlet values also apply the same Dirichlet values to the other dof
+ if (fvGeometry_.gridGeometry().dofOnPeriodicBoundary(scvI.dofIndex()))
+ {
+ const auto periodicDof = fvGeometry_.gridGeometry().periodicallyMappedDof(scvI.dofIndex());
+ res[periodicDof][eqIdx] = priVars[pvIdx] - dirichletValues[pvIdx];
+ const auto end = jac[periodicDof].end();
+ for (auto it = jac[periodicDof].begin(); it != end; ++it)
+ (*it) = periodicDof != it.index() ? 0.0 : 1.0;
+ }
+ };
+
+ enforceDirichletConstraints(applyDirichlet);
+ }
+
+ /*!
+ * \brief Computes the derivatives with respect to the dofs of the given
+ * element and adds them to the given jacobian matrix.
+ */
+ template
+ void assembleJacobian(JacobianMatrix& jac)
+ {
+ assembleJacobianAndResidual_(jac);
+
+ auto applyDirichlet = [&] (const auto& scvI,
+ const auto& dirichletValues,
+ const auto eqIdx,
+ const auto pvIdx)
+ {
+ auto& row = jac[scvI.dofIndex()];
+ for (auto col = row.begin(); col != row.end(); ++col)
+ row[col.index()][eqIdx] = 0.0;
+
+ jac[scvI.dofIndex()][scvI.dofIndex()][eqIdx][pvIdx] = 1.0;
+ };
+
+ enforceDirichletConstraints(applyDirichlet);
+ }
+
+ //! Assemble the residual only
+ template
+ void assembleResidual(ResidualVector& res)
+ {
+ const auto residual = evalLocalResidual();
+ for (const auto& scv : scvs(fvGeometry_))
+ res[scv.dofIndex()] += residual[scv.localDofIndex()];
+
+ auto applyDirichlet = [&] (const auto& scvI,
+ const auto& dirichletValues,
+ const auto eqIdx,
+ const auto pvIdx)
+ {
+ const auto& priVars = elemVariables_.elemVolVars()[scvI].priVars();
+ res[scvI.dofIndex()][eqIdx] = priVars[pvIdx] - dirichletValues[pvIdx];
+ };
+
+ enforceDirichletConstraints(applyDirichlet);
+ }
+
+ //! Enforce Dirichlet constraints
+ template
+ void enforceDirichletConstraints(const ApplyFunction& applyDirichlet)
+ {
+ // enforce Dirichlet boundary conditions
+ evalDirichletBoundaries(applyDirichlet);
+ // TODO: take care of internal Dirichlet constraints (if enabled)
+ // enforceInternalDirichletConstraints(applyDirichlet);
+ }
+
+ //! Evaluates Dirichlet boundaries
+ template< typename ApplyDirichletFunctionType >
+ void evalDirichletBoundaries(ApplyDirichletFunctionType applyDirichlet)
+ {
+ const auto& problem = gridVariables_.gridVolVars().problem();
+
+ for (const auto& scvI : scvs(fvGeometry_))
+ {
+ const auto bcTypes = problem.boundaryTypes(fvGeometry_.element(), scvI);
+ if (bcTypes.hasDirichlet())
+ {
+ const auto dirichletValues = problem.dirichlet(fvGeometry_.element(), scvI);
+
+ // set the Dirichlet conditions in residual and jacobian
+ for (int eqIdx = 0; eqIdx < numEq; ++eqIdx)
+ {
+ if (bcTypes.isDirichlet(eqIdx))
+ {
+ const auto pvIdx = bcTypes.eqToDirichletIndex(eqIdx);
+ assert(0 <= pvIdx && pvIdx < numEq);
+ applyDirichlet(scvI, dirichletValues, eqIdx, pvIdx);
+ }
+ }
+ }
+ }
+ }
+
+ //! Evaluate the complete local residual for the current element.
+ ElementResidualVector evalLocalResidual() const
+ {
+ if (isStationary())
+ {
+ const auto context = makeLocalContext(fvGeometry_, elemVariables_);
+ LocalOperator localOperator(context);
+ return elementIsGhost_ ? localOperator.getEmptyResidual()
+ : localOperator.evalFluxesAndSources();
+ }
+ else
+ {
+ ElementResidualVector residual(fvGeometry_.numScv());
+ residual = 0.0;
+
+ if (!elementIsGhost_)
+ {
+ // add the terms associated with previous stages
+ for (std::size_t k = 0; k < stageParams_->size()-1; ++k)
+ addStageTerms_(residual, k,
+ makeLocalContext(fvGeometry_, prevElemVariables_[k]));
+
+ // add the terms of the current stage
+ addStageTerms_(residual, stageParams_->size()-1,
+ makeLocalContext(fvGeometry_, elemVariables_));
+ }
+
+ return residual;
+ }
+ }
+
+ //! Return true if a stationary problem is assembled
+ bool isStationary() const
+ { return !stageParams_; }
+
+protected:
+
+ //! add the terms of a stage to the current element residual
+ void addStageTerms_(ElementResidualVector& r,
+ std::size_t stageIdx,
+ const LocalContext& context) const
+ {
+ LocalOperator localOperator(context);
+ if (!stageParams_->skipTemporal(stageIdx))
+ r.axpy(stageParams_->temporalWeight(stageIdx),
+ localOperator.evalStorage());
+ if (!stageParams_->skipSpatial(stageIdx))
+ r.axpy(stageParams_->spatialWeight(stageIdx),
+ localOperator.evalFluxesAndSources());
+ }
+
+ /*!
+ * \brief Computes the derivatives with respect to the dofs of the given
+ * element and adds them to the global matrix.
+ * \return The element residual at the current solution.
+ */
+ template
+ ElementResidualVector assembleJacobianAndResidual_(JacobianMatrix& A,
+ const PartialReassembler* partialReassembler = nullptr)
+ {
+ // TODO: DO we need this to be constexpr?
+ if (diffMethod_ == DiffMethod::numeric)
+ return assembleJacobianAndResidualNumeric_(A, partialReassembler);
+ else
+ DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box");
+ }
+
+ /*!
+ * \brief Computes the derivatives by means of numeric differentiation
+ * and adds them to the global matrix.
+ * \return The element residual at the current solution.
+ * \note This specialization is for the box scheme with numeric differentiation
+ */
+ template
+ ElementResidualVector assembleJacobianAndResidualNumeric_(JacobianMatrix& A,
+ const PartialReassembler* partialReassembler = nullptr)
+ {
+ // alias for the variables of the current stage
+ auto& curVariables = elemVariables_;
+ auto& curElemVolVars = curVariables.elemVolVars();
+ const auto& x = curVariables.gridVariables().dofs();
+ const auto& element = fvGeometry_.element();
+ const auto& problem = curElemVolVars.gridVolVars().problem();
+
+ const auto origResiduals = evalLocalResidual();
+ const auto origElemSol = elementSolution(element, x, fvGeometry_.gridGeometry());
+ auto elemSol = origElemSol;
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ // Calculate derivatives of the residual of all dofs in element with respect to themselves. //
+ //////////////////////////////////////////////////////////////////////////////////////////////
+
+ ElementResidualVector partialDerivs(fvGeometry_.numScv());
+ for (const auto& scvI : scvs(fvGeometry_))
+ {
+ // dof index and corresponding actual pri vars
+ const auto globalI = scvI.dofIndex();
+ const auto localI = scvI.localDofIndex();
+
+ const auto origCurVolVars = curElemVolVars[scvI];
+ auto& curVolVars = getVolVarAcess_(curElemVolVars, gridVariables_.gridVolVars(), scvI);
+
+ // calculate derivatives w.r.t to the privars at the dof at hand
+ for (int pvIdx = 0; pvIdx < numEq; pvIdx++)
+ {
+ partialDerivs = 0.0;
+ auto evalResiduals = [&](Scalar priVar)
+ {
+ // update the volume variables and compute element residual
+ elemSol[scvI.localDofIndex()][pvIdx] = priVar;
+ curVolVars.update(elemSol, problem, element, scvI);
+ return evalLocalResidual();
+ };
+
+ static const NumericEpsilon numEps{problem.paramGroup()};
+ static const int numDiffMethod = getParamFromGroup(problem.paramGroup(),
+ "Assembly.NumericDifferenceMethod");
+
+ // epsilon used for numeric differentiation
+ const auto eps = numEps(elemSol[localI][pvIdx], pvIdx);
+
+ // derive the residuals numerically
+ NumericDifferentiation::partialDerivative(evalResiduals, elemSol[localI][pvIdx], partialDerivs,
+ origResiduals, eps, numDiffMethod);
+
+ // TODO: Distinguish between implicit/explicit here. For explicit schemes,
+ // no entries between different scvs of an element are reserved. Thus,
+ // we currently get an error when using explicit schemes.
+ // TODO: Doesn't this mean we only have to evaluate the residual for a single
+ // scv instead of calling evalLocalResidual()? That computes the residuals
+ // and derivs for all other scvs of the element, too, which are never used.
+ // Note: this is the same in the current implementation of master.
+ // Should we try to optimize this for explicit schemes? Or adjust the Jacobian pattern?
+ // update the global stiffness matrix with the current partial derivatives
+ for (const auto& scvJ : scvs(fvGeometry_))
+ {
+ const auto globalJ = scvJ.dofIndex();
+ const auto localJ = scvJ.localDofIndex();
+
+ // don't add derivatives for green entities
+ if (!partialReassembler || partialReassembler->dofColor(globalJ) != EntityColor::green)
+ {
+ for (int eqIdx = 0; eqIdx < numEq; eqIdx++)
+ {
+ // A[i][col][eqIdx][pvIdx] is the rate of change of the
+ // the residual of equation 'eqIdx' at dof 'i'
+ // depending on the primary variable 'pvIdx' at dof 'col'
+ A[globalJ][globalI][eqIdx][pvIdx] += partialDerivs[localJ][eqIdx];
+ }
+ }
+ }
+
+ // restore the original element solution & volume variables
+ elemSol[localI][pvIdx] = origElemSol[localI][pvIdx];
+ curVolVars = origCurVolVars;
+
+ // TODO additional dof dependencies
+ }
+ }
+
+ return origResiduals;
+ }
+
+ //! Returns the volume variables of the local view (case of no caching)
+ template = 0>
+ auto& getVolVarAcess_(ElemVolVars& elemVolVars, GridVolVars& gridVolVars, const SubControlVolume& scv)
+ { return elemVolVars[scv]; }
+
+ //! Returns the volume variables of the grid vol vars (case of global caching)
+ template = 0>
+ auto& getVolVarAcess_(ElemVolVars& elemVolVars, GridVolVars& gridVolVars, const SubControlVolume& scv)
+ { return gridVolVars.volVars(scv); }
+
+ DiffMethod diffMethod_; //!< the type of differentiation method
+ GridVariables& gridVariables_; //!< reference to the grid variables
+ FVElementGeometry fvGeometry_; //!< element-local finite volume geometry
+ ElementVariables elemVariables_; //!< element variables of the current stage
+ std::vector prevElemVariables_; //!< element variables of prior stages
+
+ bool elementIsGhost_;
+ std::shared_ptr stageParams_;
+};
+
+} // end namespace Dumux::Experimental
+
+#endif
diff --git a/dumux/assembly/fv/cclocalassembler.hh b/dumux/assembly/fv/cclocalassembler.hh
new file mode 100644
index 0000000000000000000000000000000000000000..6210660368a3447a079c9faaf70ecb59b3cdbca8
--- /dev/null
+++ b/dumux/assembly/fv/cclocalassembler.hh
@@ -0,0 +1,477 @@
+// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+/*****************************************************************************
+ * See the file COPYING for full copying permissions. *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, either version 3 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program. If not, see . *
+ *****************************************************************************/
+/*!
+ * \file
+ * \ingroup Assembly
+ * \brief An assembler for Jacobian and residual contribution
+ * per element for cell-centered finite volume schemes.
+ */
+#ifndef DUMUX_CC_LOCAL_ASSEMBLER_HH
+#define DUMUX_CC_LOCAL_ASSEMBLER_HH
+
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+
+
+namespace Dumux::Experimental {
+
+/*!
+ * \ingroup Assembly
+ * \brief An assembler for Jacobian and residual contribution
+ * per element for cell-centered finite volume schemes.
+ * \tparam LO The element-local operator
+ */
+template
+class CCLocalAssembler
+{
+ using Operators = typename LO::Operators;
+ using LocalContext = typename LO::LocalContext;
+ using ElementVariables = typename LocalContext::ElementVariables;
+ using ElementGridGeometry = typename LocalContext::ElementGridGeometry;
+
+ using GG = typename ElementGridGeometry::GridGeometry;
+ using GV = typename ElementVariables::GridVariables;
+
+ using FVElementGeometry = typename GG::LocalView;
+ using SubControlVolume = typename GG::SubControlVolume;
+ using Element = typename GG::GridView::template Codim<0>::Entity;
+
+ using PrimaryVariables = typename GV::PrimaryVariables;
+ using NumEqVector = Dumux::NumEqVector;
+ using Scalar = typename GV::Scalar;
+
+ static constexpr int numEq = NumEqVectorTraits::numEq;
+ static constexpr int maxElementStencilSize = GG::maxElementStencilSize;
+ static constexpr bool enableGridFluxVarsCache = GV::GridFluxVariablesCache::cachingEnabled;
+
+public:
+
+ //! the parameters of a stage in time integration
+ using StageParams = MultiStageParams;
+
+ //! export the grid variables type on which to operate
+ using GridVariables = GV;
+
+ //! export the underlying local operator
+ using LocalOperator = LO;
+
+ //! export the vector storing the residuals of all dofs of the element
+ using ElementResidualVector = typename LO::ElementResidualVector;
+
+ /*!
+ * \brief Constructor for stationary problems.
+ */
+ explicit CCLocalAssembler(const Element& element,
+ GridVariables& gridVariables,
+ DiffMethod dm = DiffMethod::numeric)
+ : diffMethod_(dm)
+ , gridVariables_(gridVariables)
+ , fvGeometry_(localView(gridVariables.gridGeometry()))
+ , elemVariables_(localView(gridVariables))
+ , prevElemVariables_()
+ , elementIsGhost_(element.partitionType() == Dune::GhostEntity)
+ , stageParams_(nullptr)
+ {
+ if (diffMethod_ != DiffMethod::numeric)
+ DUNE_THROW(Dune::NotImplemented, "Provided differentiation method");
+
+ fvGeometry_.bind(element);
+ elemVariables_.bind(element, fvGeometry_);
+ }
+
+ /*!
+ * \brief Constructor for instationary problems.
+ * \note Using this constructor, we assemble one stage within
+ * a time integration step using multi-stage methods.
+ */
+ explicit CCLocalAssembler(const Element& element,
+ const std::vector& prevGridVars,
+ GridVariables& gridVariables,
+ std::shared_ptr stageParams,
+ DiffMethod dm = DiffMethod::numeric)
+ : diffMethod_(dm)
+ , gridVariables_(gridVariables)
+ , fvGeometry_(localView(gridVariables.gridGeometry()))
+ , elemVariables_(localView(gridVariables))
+ , prevElemVariables_()
+ , elementIsGhost_(element.partitionType() == Dune::GhostEntity)
+ , stageParams_(stageParams)
+ {
+ if (diffMethod_ != DiffMethod::numeric)
+ DUNE_THROW(Dune::NotImplemented, "Provided differentiation method");
+
+ if (prevGridVars.size() != stageParams->size() - 1)
+ DUNE_THROW(Dune::InvalidStateException, "Grid Variables for all stages needed");
+
+ fvGeometry_.bind(element);
+ for (const auto& gridVars : prevGridVars)
+ {
+ prevElemVariables_.emplace_back(localView(gridVars));
+ prevElemVariables_.back().bind(element, fvGeometry_);
+ }
+
+ elemVariables_.bind(element, fvGeometry_);
+ }
+
+ /*!
+ * \brief Computes the derivatives with respect to the given element and adds
+ * them to the global matrix. The element residual is written into the
+ * right hand side.
+ */
+ template
+ void assembleJacobianAndResidual(JacobianMatrix& jac,
+ ResidualVector& res,
+ const PartialReassembler* partialReassembler = nullptr)
+ {
+ const auto& element = fvGeometry_.element();
+ const auto globalI = fvGeometry_.gridGeometry().elementMapper().index(element);
+ if (partialReassembler
+ && partialReassembler->elementColor(globalI) == EntityColor::green)
+ {
+ res[globalI] = evalLocalResidual()[0]; // forward to the internal implementation
+ }
+ else
+ {
+ res[globalI] = assembleJacobianAndResidual_(jac); // forward to the internal implementation
+ }
+ }
+
+
+ /*!
+ * \brief Computes the derivatives with respect to the dofs of the given
+ * element and adds them to the given jacobian matrix.
+ */
+ template
+ void assembleJacobian(JacobianMatrix& jac)
+ {
+ assembleJacobianAndResidual_(jac);
+ }
+
+ //! Assemble the residual only
+ template
+ void assembleResidual(ResidualVector& res)
+ {
+ const auto residual = evalLocalResidual();
+ for (const auto& scv : scvs(fvGeometry_))
+ res[scv.dofIndex()] += residual[scv.localDofIndex()];
+ }
+
+ //! Evaluate the complete local residual for the current element.
+ ElementResidualVector evalLocalResidual() const
+ {
+ if (isStationary())
+ {
+ const auto context = makeLocalContext(fvGeometry_, elemVariables_);
+ LocalOperator localOperator(context);
+ return elementIsGhost_ ? localOperator.getEmptyResidual()
+ : localOperator.evalFluxesAndSources();
+ }
+ else
+ {
+ ElementResidualVector residual(fvGeometry_.numScv());
+ residual = 0.0;
+
+ if (!elementIsGhost_)
+ {
+ // add the terms associated with previous stages
+ for (std::size_t k = 0; k < stageParams_->size()-1; ++k)
+ addStageTerms_(residual, k,
+ makeLocalContext(fvGeometry_, prevElemVariables_[k]));
+
+ // add the terms of the current stage
+ addStageTerms_(residual, stageParams_->size()-1,
+ makeLocalContext(fvGeometry_, elemVariables_));
+ }
+
+ return residual;
+ }
+ }
+
+ //! Return true if a stationary problem is assembled
+ bool isStationary() const
+ { return !stageParams_; }
+
+protected:
+
+ //! add the terms of a stage to the current element residual
+ void addStageTerms_(ElementResidualVector& r,
+ std::size_t stageIdx,
+ const LocalContext& context) const
+ {
+ LocalOperator localOperator(context);
+ if (!stageParams_->skipTemporal(stageIdx))
+ r.axpy(stageParams_->temporalWeight(stageIdx),
+ localOperator.evalStorage());
+ if (!stageParams_->skipSpatial(stageIdx))
+ r.axpy(stageParams_->spatialWeight(stageIdx),
+ localOperator.evalFluxesAndSources());
+ }
+
+ /*!
+ * \brief Computes the derivatives with respect to the dofs of the given
+ * element and adds them to the global matrix.
+ * \return The element residual at the current solution.
+ */
+ template
+ NumEqVector assembleJacobianAndResidual_(JacobianMatrix& A,
+ const PartialReassembler* partialReassembler = nullptr)
+ {
+ // TODO: DO we need this to be constexpr?
+ if (diffMethod_ == DiffMethod::numeric)
+ return assembleJacobianAndResidualNumeric_(A, partialReassembler);
+ else
+ DUNE_THROW(Dune::NotImplemented, "Analytic assembler for box");
+ }
+
+ /*!
+ * \brief Computes the derivatives by means of numeric differentiation
+ * and adds them to the global matrix.
+ * \return The element residual at the current solution.
+ * \note This specialization is for the box scheme with numeric differentiation
+ */
+ template
+ NumEqVector assembleJacobianAndResidualNumeric_(JacobianMatrix& A,
+ const PartialReassembler* partialReassembler = nullptr)
+ {
+ // alias for the variables of the current stage
+ auto& curVariables = elemVariables_;
+ auto& curElemVolVars = curVariables.elemVolVars();
+ auto& curElemFluxVarsCache = curVariables.elemFluxVarsCache();
+ const auto& x = curVariables.gridVariables().dofs();
+
+ // get stencil informations
+ const auto& element = fvGeometry_.element();
+ const auto& gridGeometry = fvGeometry_.gridGeometry();
+ const auto& connectivityMap = gridGeometry.connectivityMap();
+ const auto globalI = gridGeometry.elementMapper().index(element);
+ const auto numNeighbors = connectivityMap[globalI].size();
+
+ // deduce the problem type
+ const auto& problem = curElemVolVars.gridVolVars().problem();
+ using Problem = std::decay_t;
+
+ // assemble the undeflected residual
+ using Residuals = ReservedBlockVector;
+ Residuals origResiduals(numNeighbors + 1); origResiduals = 0.0;
+ origResiduals[0] = evalLocalResidual()[0];
+
+ // lambda for convenient evaluation of the fluxes across scvfs in the neighbors
+ auto evalNeighborFlux = [&] (const auto& neighbor, const auto& scvf)
+ {
+ const auto context = makeLocalContext(fvGeometry_, elemVariables_);
+ return LocalOperator(context).computeFlux(neighbor, scvf);
+ };
+
+ // get the elements in which we need to evaluate the fluxes
+ // and calculate these in the undeflected state
+ Dune::ReservedVector neighborElements;
+ neighborElements.resize(numNeighbors);
+
+ unsigned int j = 1;
+ for (const auto& dataJ : connectivityMap[globalI])
+ {
+ neighborElements[j-1] = gridGeometry.element(dataJ.globalJ);
+
+ if (neighborElements[j-1].partitionType() != Dune::GhostEntity)
+ for (const auto scvfIdx : dataJ.scvfsJ)
+ origResiduals[j] += evalNeighborFlux(neighborElements[j-1],
+ fvGeometry_.scvf(scvfIdx));
+
+ ++j;
+ }
+
+ // reference to the element's scv (needed later) and corresponding vol vars
+ const auto& scv = fvGeometry_.scv(globalI);
+ auto& curVolVars = getVolVarAcess_(curElemVolVars, gridVariables_.gridVolVars(), scv);
+
+ // save a copy of the original privars and vol vars in order
+ // to restore the original solution after deflection
+ const auto origPriVars = x[globalI];
+ const auto origVolVars = curVolVars;
+
+ // element solution container to be deflected
+ auto elemSol = elementSolution(element, x, gridGeometry);
+
+ // derivatives in the neighbors with repect to the current elements
+ // in index 0 we save the derivative of the element residual with respect to it's own dofs
+ Residuals partialDerivs(numNeighbors + 1);
+ for (int pvIdx = 0; pvIdx < numEq; ++pvIdx)
+ {
+ partialDerivs = 0.0;
+
+ auto evalResiduals = [&](Scalar priVar)
+ {
+ Residuals partialDerivsTmp(numNeighbors + 1);
+ partialDerivsTmp = 0.0;
+
+ // update the volume variables and the flux var cache
+ elemSol[0][pvIdx] = priVar;
+ curVolVars.update(elemSol, problem, element, scv);
+ curElemFluxVarsCache.update(element, fvGeometry_, curElemVolVars);
+ if (enableGridFluxVarsCache)
+ gridVariables_.gridFluxVarsCache().updateElement(element, fvGeometry_, curElemVolVars);
+
+ // calculate the residual with the deflected primary variables
+ partialDerivsTmp[0] = evalLocalResidual()[0];
+
+ // calculate the fluxes in the neighbors with the deflected primary variables
+ for (std::size_t k = 0; k < numNeighbors; ++k)
+ if (neighborElements[k].partitionType() != Dune::GhostEntity)
+ for (auto scvfIdx : connectivityMap[globalI][k].scvfsJ)
+ partialDerivsTmp[k+1] += evalNeighborFlux(neighborElements[k],
+ fvGeometry_.scvf(scvfIdx));
+
+ return partialDerivsTmp;
+ };
+
+ // derive the residuals numerically
+ static const NumericEpsilon numEps{problem.paramGroup()};
+ static const int numDiffMethod = getParamFromGroup(problem.paramGroup(), "Assembly.NumericDifferenceMethod");
+
+ const auto eps = numEps(elemSol[0][pvIdx], pvIdx);
+ NumericDifferentiation::partialDerivative(evalResiduals, elemSol[0][pvIdx],
+ partialDerivs, origResiduals,
+ eps, numDiffMethod);
+
+ // Correct derivative for ghost elements, i.e. set a 1 for the derivative w.r.t. the
+ // current primary variable and a 0 elsewhere. As we always solve for a delta of the
+ // solution with repect to the initial one, this results in a delta of 0 for ghosts.
+ if (elementIsGhost_)
+ {
+ partialDerivs[0] = 0.0;
+ partialDerivs[0][pvIdx] = 1.0;
+ }
+
+ // For instationary simulations, scale the coupling
+ // fluxes of the current stage correctly
+ if (stageParams_)
+ {
+ for (std::size_t k = 0; k < numNeighbors; ++k)
+ partialDerivs[k+1] *= stageParams_->spatialWeight(stageParams_->size()-1);
+ }
+
+ // add the current partial derivatives to the global jacobian matrix
+ // no special treatment is needed if globalJ is a ghost because then derivatives have been assembled to 0 above
+ if constexpr (Problem::enableInternalDirichletConstraints())
+ {
+ // check if own element has internal Dirichlet constraint
+ const auto internalDirichletConstraintsOwnElement = problem.hasInternalDirichletConstraint(element, scv);
+ const auto dirichletValues = problem.internalDirichlet(element, scv);
+
+ for (int eqIdx = 0; eqIdx < numEq; ++eqIdx)
+ {
+ if (internalDirichletConstraintsOwnElement[eqIdx])
+ {
+ origResiduals[0][eqIdx] = origVolVars.priVars()[eqIdx] - dirichletValues[eqIdx];
+ A[globalI][globalI][eqIdx][pvIdx] = (eqIdx == pvIdx) ? 1.0 : 0.0;
+ }
+ else
+ A[globalI][globalI][eqIdx][pvIdx] += partialDerivs[0][eqIdx];
+ }
+
+ // off-diagonal entries
+ j = 1;
+ for (const auto& dataJ : connectivityMap[globalI])
+ {
+ const auto& neighborElement = gridGeometry.element(dataJ.globalJ);
+ const auto& neighborScv = fvGeometry_.scv(dataJ.globalJ);
+ const auto internalDirichletConstraintsNeighbor = problem().hasInternalDirichletConstraint(neighborElement, neighborScv);
+
+ for (int eqIdx = 0; eqIdx < numEq; ++eqIdx)
+ {
+ if (internalDirichletConstraintsNeighbor[eqIdx])
+ A[dataJ.globalJ][globalI][eqIdx][pvIdx] = 0.0;
+ else
+ A[dataJ.globalJ][globalI][eqIdx][pvIdx] += partialDerivs[j][eqIdx];
+ }
+
+ ++j;
+ }
+ }
+ else // no internal Dirichlet constraints specified
+ {
+ for (int eqIdx = 0; eqIdx < numEq; eqIdx++)
+ {
+ // the diagonal entries
+ A[globalI][globalI][eqIdx][pvIdx] += partialDerivs[0][eqIdx];
+
+ // off-diagonal entries
+ j = 1;
+ for (const auto& dataJ : connectivityMap[globalI])
+ A[dataJ.globalJ][globalI][eqIdx][pvIdx] += partialDerivs[j++][eqIdx];
+ }
+ }
+
+ // restore the original state of the scv's volume variables
+ curVolVars = origVolVars;
+
+ // restore the current element solution
+ elemSol[0][pvIdx] = origPriVars[pvIdx];
+ }
+
+ // restore original state of the flux vars cache in case of global caching.
+ // This has to be done in order to guarantee that everything is in an undeflected
+ // state before the assembly of another element is called. In the case of local caching
+ // this is obsolete because the elemFluxVarsCache used here goes out of scope after this.
+ // We only have to do this for the last primary variable, for all others the flux var cache
+ // is updated with the correct element volume variables before residual evaluations
+ curElemFluxVarsCache.update(element, fvGeometry_, curElemVolVars);
+ if (enableGridFluxVarsCache)
+ gridVariables_.gridFluxVarsCache().updateElement(element, fvGeometry_, curElemVolVars);
+
+ // return the original residual
+ return origResiduals[0];
+ }
+
+ //! Returns the volume variables of the local view (case of no caching)
+ template = 0>
+ auto& getVolVarAcess_(ElemVolVars& elemVolVars, GridVolVars& gridVolVars, const SubControlVolume& scv)
+ { return elemVolVars[scv]; }
+
+ //! Returns the volume variables of the grid vol vars (case of global caching)
+ template = 0>
+ auto& getVolVarAcess_(ElemVolVars& elemVolVars, GridVolVars& gridVolVars, const SubControlVolume& scv)
+ { return gridVolVars.volVars(scv); }
+
+ DiffMethod diffMethod_; //!< the type of differentiation method
+ GridVariables& gridVariables_; //!< reference to the grid variables
+ FVElementGeometry fvGeometry_; //!< element-local finite volume geometry
+ ElementVariables elemVariables_; //!< element variables of the current stage
+ std::vector prevElemVariables_; //!< element variables of prior stages
+
+ bool elementIsGhost_;
+ std::shared_ptr stageParams_;
+};
+
+} // end namespace Dumux::Experimental
+
+#endif
diff --git a/dumux/assembly/fv/localoperator.hh b/dumux/assembly/fv/localoperator.hh
new file mode 100644
index 0000000000000000000000000000000000000000..55483f5fe31903e91ac7e04a6e8b1f45a7012f96
--- /dev/null
+++ b/dumux/assembly/fv/localoperator.hh
@@ -0,0 +1,283 @@
+// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+/*****************************************************************************
+ * See the file COPYING for full copying permissions. *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, either version 3 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program. If not, see . *
+ *****************************************************************************/
+/*!
+ * \file
+ * \ingroup Assembly
+ * \brief An element-wise local operator for finite-volume schemes.
+ */
+#ifndef DUMUX_FV_LOCAL_OPERATOR_HH
+#define DUMUX_FV_LOCAL_OPERATOR_HH
+
+#include
+
+#include
+#include
+
+#include
+#include
+#include
+
+namespace Dumux::Experimental {
+
+/*!
+ * \ingroup Assembly
+ * \brief The element-wise local operator for finite volume schemes.
+ * This allows for element-wise evaluation of individual terms
+ * of the equations to be solved.
+ * \tparam OP The model-specific operators
+ */
+template
+class FVLocalOperator
+{
+ using LC = typename OP::LocalContext;
+ using ElementVariables = typename LC::ElementVariables;
+ using GridVariables = typename ElementVariables::GridVariables;
+ using PrimaryVariables = typename GridVariables::PrimaryVariables;
+ using NumEqVector = Dumux::NumEqVector;
+
+ using FVElementGeometry = typename LC::ElementGridGeometry;
+ using GridGeometry = typename FVElementGeometry::GridGeometry;
+ using Element = typename GridGeometry::GridView::template Codim<0>::Entity;
+ using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace;
+ using Extrusion = Extrusion_t;
+
+ static constexpr int numEq = NumEqVectorTraits::numEq();
+ static constexpr bool isBox = GridGeometry::discMethod == DiscretizationMethod::box;
+
+public:
+ //! export the expected local context type
+ using LocalContext = LC;
+
+ //! export the underlying operators
+ using Operators = OP;
+
+ //! vector type storing the operator values on all dofs of an element
+ //! TODO: Use ReservedBlockVector
+ using ElementResidualVector = Dune::BlockVector;
+
+ //! Constructor from a local context
+ explicit FVLocalOperator(const LocalContext& context)
+ : context_(context)
+ {}
+
+ /*!
+ * \name Main interface
+ * \note Methods used by the assembler to compute derivatives and residual
+ */
+ // \{
+
+ /*!
+ * \brief Compute the terms of the local residual that do not appear in
+ * time derivatives. These are the sources and the fluxes.
+ */
+ ElementResidualVector evalFluxesAndSources() const
+ {
+ auto result = getEmptyResidual();
+ const auto& problem = context_.elementVariables().gridVariables().gridVolVars().problem();
+ const auto& fvGeometry = context_.elementGridGeometry();
+
+ // source term
+ for (const auto& scv : scvs(fvGeometry))
+ result[scv.localDofIndex()] -= Operators::source(problem, context_, scv);
+
+ // flux term
+ for (const auto& scvf : scvfs(fvGeometry))
+ addFlux_(result, scvf);
+
+ return result;
+ }
+
+ /*!
+ * \brief Compute the storage term, i.e. the term appearing in the time derivative.
+ */
+ ElementResidualVector evalStorage() const
+ {
+ const auto& problem = context_.elementVariables().gridVariables().gridVolVars().problem();
+ const auto& fvGeometry = context_.elementGridGeometry();
+
+ // TODO: Until now, FVLocalResidual defined storage as the entire
+ // time derivative. Now it means the term above the time derivative.
+ // We should think about the correct naming here...
+ // TODO: Should storage() NOT multiply with volume?? That was different until
+ // now but flux() also returns the flux multiplied with area so this should
+ // be more consistent
+ auto result = getEmptyResidual();
+ for (const auto& scv : scvs(fvGeometry))
+ result[scv.localDofIndex()] += Operators::storage(problem, context_, scv);
+
+ return result;
+ }
+
+ ElementResidualVector getEmptyResidual() const
+ {
+ ElementResidualVector res(context_.elementGridGeometry().numScv());
+ res = 0.0;
+ return res;
+ }
+
+ // \}
+
+ /*!
+ * \name Interfaces for analytic Jacobian computation
+ */
+ // \{
+
+ //! \todo TODO: Add interfaces. Or, should this be here at all!?
+
+ //\}
+
+ // \}
+
+ /*!
+ * \brief Compute the flux across a single face embedded in the given element.
+ * \note This function behaves different than the flux function in the
+ * operators, as it checks if the face is on the boundary. If so,
+ * it returns the flux resulting from the user-specified conditions.
+ * \note This assumes that the given element and face are contained within
+ * the context that was used to instantiate this class.
+ */
+ NumEqVector computeFlux(const Element& element,
+ const SubControlVolumeFace& scvf) const
+ {
+ const auto& evv = context_.elementVariables().elemVolVars();
+ const auto& problem = evv.gridVolVars().problem();
+
+ // interior faces
+ if (!scvf.boundary())
+ return Operators::flux(problem, context_, scvf);
+
+ const auto& insideScv = context_.elementGridGeometry().scv(scvf.insideScvIdx());
+
+ // for cell-centered schemes, evaluate fluxes also across Dirichlet boundaries
+ if constexpr (!isBox)
+ {
+ const auto& bcTypes = problem.boundaryTypes(element, scvf);
+ if (bcTypes.hasDirichlet() && !bcTypes.hasNeumann())
+ return Operators::flux(problem, context_, scvf);
+ else if (bcTypes.hasNeumann() && bcTypes.hasDirichlet())
+ DUNE_THROW(Dune::NotImplemented, "Mixed boundary conditions for cell-centered schemes. " <<
+ "Use pure boundary conditions by converting Dirichlet BCs to Robin BCs.");
+ }
+ // for the box scheme, only pure Neumann boundaries are supported
+ else
+ {
+ if (!problem.boundaryTypes(element, insideScv).hasOnlyNeumann())
+ DUNE_THROW(Dune::NotImplemented, "For the box scheme only Neumann boundary fluxes can be computed.");
+ }
+
+ // compute and return the Neumann boundary fluxes
+ auto neumannFluxes = getUnscaledNeumannFluxes_(element, scvf);
+ neumannFluxes *= Extrusion::area(scvf)*evv[insideScv].extrusionFactor();
+ return neumannFluxes;
+ }
+
+protected:
+
+ //! compute and add the flux across the given face to the container (cc schemes)
+ template = 0>
+ void addFlux_(ElementResidualVector& r, const SubControlVolumeFace& scvf) const
+ {
+ const auto& fvGeometry = context_.elementGridGeometry();
+ const auto& evv = context_.elementVariables().elemVolVars();
+ const auto& problem = evv.gridVolVars().problem();
+
+ const auto& insideScv = fvGeometry.scv(scvf.insideScvIdx());
+ const auto localDofIdx = insideScv.localDofIndex();
+
+ if (!scvf.boundary())
+ r[localDofIdx] += Operators::flux(problem, context_, scvf);
+ else
+ {
+ const auto& bcTypes = problem.boundaryTypes(fvGeometry.element(), scvf);
+
+ // Dirichlet boundaries
+ if (bcTypes.hasDirichlet() && !bcTypes.hasNeumann())
+ r[localDofIdx] += Operators::flux(problem, context_, scvf);
+
+ // Neumann and Robin ("solution dependent Neumann") boundary conditions
+ else if (bcTypes.hasNeumann() && !bcTypes.hasDirichlet())
+ {
+ auto neumannFluxes = getUnscaledNeumannFluxes_(fvGeometry.element(), scvf);
+ neumannFluxes *= Extrusion::area(scvf)*evv[insideScv].extrusionFactor();
+ r[localDofIdx] += neumannFluxes;
+ }
+
+ else
+ DUNE_THROW(Dune::NotImplemented, "Mixed boundary conditions for cell-centered schemes. " <<
+ "Use pure boundary conditions by converting Dirichlet BCs to Robin BCs");
+ }
+ }
+
+ //! compute and add the flux across the given face to the container (box scheme)
+ template = 0>
+ void addFlux_(ElementResidualVector& r, const SubControlVolumeFace& scvf) const
+ {
+ const auto& fvGeometry = context_.elementGridGeometry();
+ const auto& evv = context_.elementVariables().elemVolVars();
+ const auto& problem = evv.gridVolVars().problem();
+
+ // inner faces
+ if (!scvf.boundary())
+ {
+ const auto flux = Operators::flux(problem, context_, scvf);
+ r[fvGeometry.scv(scvf.insideScvIdx()).localDofIndex()] += flux;
+
+ if (scvf.numOutsideScvs() > 0)
+ r[fvGeometry.scv(scvf.outsideScvIdx()).localDofIndex()] -= flux;
+ }
+
+ // boundary faces
+ else
+ {
+ const auto& scv = fvGeometry.scv(scvf.insideScvIdx());
+ const auto& bcTypes = problem.boundaryTypes(fvGeometry.element(), scv);
+
+ // Treat Neumann and Robin ("solution dependent Neumann") boundary conditions.
+ if (bcTypes.hasNeumann())
+ {
+ const auto neumannFluxes = getUnscaledNeumannFluxes_(fvGeometry.element(), scvf);
+ const auto area = Extrusion::area(scvf)*evv[scv].extrusionFactor();
+
+ // only add fluxes to equations for which Neumann is set
+ for (int eqIdx = 0; eqIdx < NumEqVector::size(); ++eqIdx)
+ if (bcTypes.isNeumann(eqIdx))
+ r[scv.localDofIndex()][eqIdx] += neumannFluxes[eqIdx]*area;
+ }
+ }
+ }
+
+ //! get the user-specified Neumann boundary flux
+ NumEqVector getUnscaledNeumannFluxes_(const Element& element,
+ const SubControlVolumeFace& scvf) const
+ {
+ // TODO: Modify problem interfaces to receive context
+ const auto& fvGeometry = context_.elementGridGeometry();
+ const auto& evv = context_.elementVariables().elemVolVars();
+ const auto& efvc = context_.elementVariables().elemFluxVarsCache();
+ const auto& problem = evv.gridVolVars().problem();
+ return problem.neumann(element, fvGeometry, evv, efvc, scvf);
+ }
+
+private:
+ const LocalContext& context_;
+};
+
+} // end namespace Dumux::Experimental
+
+#endif
diff --git a/dumux/assembly/fv/operators.hh b/dumux/assembly/fv/operators.hh
new file mode 100644
index 0000000000000000000000000000000000000000..7e07999ee373dd20cf9d29e645c59fec392b7458
--- /dev/null
+++ b/dumux/assembly/fv/operators.hh
@@ -0,0 +1,145 @@
+// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+/*****************************************************************************
+ * See the file COPYING for full copying permissions. *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, either version 3 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program. If not, see . *
+ *****************************************************************************/
+/*!
+ * \file
+ * \ingroup Assembly
+ * \brief The base class for the sub-control entity-local evaluation of
+ * the terms of equations in the context of finite-volume schemes
+ */
+#ifndef DUMUX_FV_OPERATORS_HH
+#define DUMUX_FV_OPERATORS_HH
+
+#include
+#include
+#include
+
+namespace Dumux::Experimental {
+
+/*!
+ * \ingroup Assembly
+ * \brief Convenience alias to define the context finite-volume operators work on.
+ * \tparam EV The element-(stencil)-local variables
+ */
+template
+using FVOperatorsContext = DefaultLocalContext;
+
+/*!
+ * \ingroup Assembly
+ * \brief The base class for the sub-control entity-local evaluation of
+ * the terms of equations in the context of finite-volume schemes
+ * \tparam EV The element-(stencil)-local variables
+ */
+template
+class FVOperators
+{
+ // context type on which to operate
+ using LC = FVOperatorsContext;
+
+ // The grid geometry on which the scheme operates
+ using FVElementGeometry = typename LC::ElementGridGeometry;
+ using GridGeometry = typename FVElementGeometry::GridGeometry;
+ using SubControlVolume = typename GridGeometry::SubControlVolume;
+ using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace;
+ using Extrusion = Extrusion_t;
+
+ // The variables (primary/secondary) on the grid
+ using GridVariables = typename LC::ElementVariables::GridVariables;
+ using PrimaryVariables = typename GridVariables::PrimaryVariables;
+
+public:
+ //! export the local context on which this operates
+ using LocalContext = LC;
+
+ //! export the type used to store scalar values for all equations
+ using NumEqVector = Dumux::NumEqVector;
+
+ // export the types of the flux/source/storage terms
+ using FluxTerm = NumEqVector;
+ using SourceTerm = NumEqVector;
+ using StorageTerm = NumEqVector;
+
+ /*!
+ * \name Model specific interfaces
+ * \note The following method are the model specific implementations of the
+ * operators appearing in the equation and should be overloaded by the
+ * implementations.
+ */
+ // \{
+
+ /*!
+ * \brief Compute the storage term of the equations for the given sub-control volume
+ * \param problem The problem to be solved (could store additionally required quantities)
+ * \param context The element-stencil-local required data
+ * \param scv The sub-control volume
+ * \note This must be overloaded by the implementation
+ */
+ template
+ static StorageTerm storage(const Problem& problem,
+ const LocalContext& context,
+ const SubControlVolume& scv)
+ { DUNE_THROW(Dune::NotImplemented, "Storage operator not implemented!"); }
+
+ /*!
+ * \brief Compute the flux term of the equations for the given sub-control volume face
+ * \param problem The problem to be solved (could store additionally required quantities)
+ * \param context The element-stencil-local required data
+ * \param scvf The sub-control volume face for which the flux term is to be computed
+ * \note This must be overloaded by the implementation
+ */
+ template
+ static FluxTerm flux(const Problem& problem,
+ const LocalContext& context,
+ const SubControlVolumeFace& scvf)
+ { DUNE_THROW(Dune::NotImplemented, "This model does not implement a flux method!"); }
+
+ /*!
+ * \brief Compute the source term of the equations for the given sub-control volume
+ * \param problem The problem to be solved (could store additionally required quantities)
+ * \param context The element-stencil-local required data
+ * \param scv The sub-control volume for which the source term is to be computed
+ * \note This is a default implementation forwarding to interfaces in the problem
+ */
+ template
+ static SourceTerm source(const Problem& problem,
+ const LocalContext& context,
+ const SubControlVolume& scv)
+ {
+ SourceTerm source(0.0);
+
+ // TODO: The problem-interfaces should be adapted to receive context?
+ const auto& fvGeometry = context.elementGridGeometry();
+ const auto& elemVolVars = context.elementVariables().elemVolVars();
+ const auto& element = fvGeometry.element();
+
+ // add contributions from volume flux sources
+ source += problem.source(element, fvGeometry, elemVolVars, scv);
+
+ // add contribution from possible point sources
+ source += problem.scvPointSources(element, fvGeometry, elemVolVars, scv);
+
+ // multiply with scv volume
+ source *= Extrusion::volume(scv)*elemVolVars[scv].extrusionFactor();
+
+ return source;
+ }
+};
+
+} // end namespace Dumux::Experimental
+
+#endif
diff --git a/test/porousmediumflow/1p/compressible/instationary/assembler.hh b/dumux/assembly/localassembler.hh
similarity index 55%
rename from test/porousmediumflow/1p/compressible/instationary/assembler.hh
rename to dumux/assembly/localassembler.hh
index 115b25570c655bbae65f89bde25120ca7db1dc7e..75b39f51f8a97cb3246f9614f79198a285776070 100644
--- a/test/porousmediumflow/1p/compressible/instationary/assembler.hh
+++ b/dumux/assembly/localassembler.hh
@@ -18,37 +18,44 @@
*****************************************************************************/
/*!
* \file
- * \ingroup OnePTests
- * \brief Dummy assembler that fulfills the new experimental assembly interface.
+ * \ingroup Assembly
+ * \brief Convenience alias to select a local assembler based on the discretization scheme.
*/
-#ifndef DUMUX_COMPRESSIBLE_ONEP_TEST_ASSEMBLER_HH
-#define DUMUX_COMPRESSIBLE_ONEP_TEST_ASSEMBLER_HH
+#ifndef DUMUX_LOCAL_ASSEMBLER_HH
+#define DUMUX_LOCAL_ASSEMBLER_HH
-namespace Dumux::OnePCompressibleTest {
+#include
+#include "fv/boxlocalassembler.hh"
+#include "fv/cclocalassembler.hh"
-// Custom assembler to test assembly with grid variables,
-// fulfilling the foreseen required interface
-template
-class GridVarsAssembler : public Assembler
-{
-public:
- using Assembler::Assembler;
- using typename Assembler::GridVariables;
- using typename Assembler::ResidualType;
+namespace Dumux::Experimental {
+namespace Detail {
- using Variables = GridVariables;
+template
+struct LocalAssemblerChooser;
- void assembleJacobianAndResidual(const GridVariables& gridVars)
- { Assembler::assembleJacobianAndResidual(gridVars.dofs()); }
+template
+struct LocalAssemblerChooser
+{ using type = BoxLocalAssembler; };
- void assembleResidual(const GridVariables& gridVars)
- { Assembler::assembleResidual(gridVars.dofs()); }
+template
+struct LocalAssemblerChooser
+{ using type = CCLocalAssembler; };
- //! Remove residualNorm once this is fixed in the solvers !2113
- auto residualNorm(const GridVariables& gridVars)
- { return Assembler::residualNorm(gridVars.dofs()); }
-};
+template
+struct LocalAssemblerChooser
+{ using type = CCLocalAssembler; };
-} // end namespace Dumux::OnePCompressibleTest
+} // end namespace Detail
+
+/*!
+ * \ingroup Assembly
+ * \brief Helper alias to select the local assembler type from a local operator.
+ */
+template
+using LocalAssembler = typename Detail::LocalAssemblerChooser::type;
+
+} // end namespace Dumux::Experimental
#endif
diff --git a/dumux/discretization/CMakeLists.txt b/dumux/discretization/CMakeLists.txt
index 3b145a92fb493e50c73f4afb2f7dfafc068b10b5..7660bb1139512817b5cd9ec9c917c8f79bbe630b 100644
--- a/dumux/discretization/CMakeLists.txt
+++ b/dumux/discretization/CMakeLists.txt
@@ -11,6 +11,7 @@ box.hh
ccmpfa.hh
cctpfa.hh
checkoverlapsize.hh
+concepts.hh
elementsolution.hh
evalgradients.hh
evalsolution.hh
@@ -20,6 +21,7 @@ functionspacebasis.hh
fvgridvariables.hh
fvproperties.hh
gridvariables.hh
+localcontext.hh
localview.hh
method.hh
rotationpolicy.hh
diff --git a/dumux/discretization/concepts.hh b/dumux/discretization/concepts.hh
new file mode 100644
index 0000000000000000000000000000000000000000..2aaf72731b22a66476f1efdb7f816851aa7129fb
--- /dev/null
+++ b/dumux/discretization/concepts.hh
@@ -0,0 +1,81 @@
+// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+/*****************************************************************************
+ * See the file COPYING for full copying permissions. *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, either version 3 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program. If not, see . *
+ *****************************************************************************/
+/*!
+ * \file
+ * \ingroup Discretization
+ * \brief Discretization-related concepts.
+ */
+#ifndef DUMUX_DISCRETIZATION_CONCEPTS_HH
+#define DUMUX_DISCRETIZATION_CONCEPTS_HH
+
+#include
+#include
+
+namespace Dumux::Experimental::Concept {
+
+/*!
+ * \ingroup Discretization
+ * \brief Concept for (still) experimental variables of a numerical model.
+ */
+ struct Variables
+ {
+ template
+ auto require(const T& t) -> decltype(
+ Dune::Concept::requireType(),
+ Dune::Concept::requireType(),
+ Dune::Concept::requireConvertible(t.dofs()),
+ Dune::Concept::requireConvertible(t.timeLevel()),
+ const_cast(t).updateTime(std::declval()),
+ const_cast(t).update(std::declval(),
+ std::declval()),
+ const_cast(t).update(std::declval())
+ );
+ };
+
+/*!
+ * \ingroup Discretization
+ * \brief Concept for (still) experimental grid variables.
+ */
+ struct GridVariables : Dune::Concept::Refines
+ {
+ template
+ auto require(const T& t) -> decltype(
+ Dune::Concept::requireType(),
+ Dune::Concept::requireConvertible(t.gridGeometry())
+ );
+ };
+
+ /*!
+ * \ingroup Discretization
+ * \brief Concept for (still) experimental finite volume grid variables.
+ */
+ struct FVGridVariables : Dune::Concept::Refines
+ {
+ template
+ auto require(const T& t) -> decltype(
+ Dune::Concept::requireType(),
+ Dune::Concept::requireType(),
+ Dune::Concept::requireConvertible(t.gridVolVars()),
+ Dune::Concept::requireConvertible(t.gridFluxVarsCache())
+ );
+ };
+
+} // end namespace Dumux::Experimental::Concept
+
+#endif
diff --git a/dumux/discretization/fvgridvariables.hh b/dumux/discretization/fvgridvariables.hh
index 9c50679c1ab872d9b067dd70070392b8d6318752..1e01db0ebb0754333f0e9fd43d0c169ab83e07db 100644
--- a/dumux/discretization/fvgridvariables.hh
+++ b/dumux/discretization/fvgridvariables.hh
@@ -173,7 +173,10 @@ private:
// Experimental implementation of new grid variables layout //
//////////////////////////////////////////////////////////////
+#include
+#include
#include
+
#include
#include
@@ -193,13 +196,16 @@ class FVGridVariablesLocalView
using GridView = typename GridGeometry::GridView;
using Element = typename GridView::template Codim<0>::Entity;
- using ElementVolumeVariables = typename GV::GridVolumeVariables::LocalView;
- using ElementFluxVariablesCache = typename GV::GridFluxVariablesCache::LocalView;
-
public:
//! export corresponding grid-wide class
using GridVariables = GV;
+ //! export underlying volume variables cache
+ using ElementVolumeVariables = typename GV::GridVolumeVariables::LocalView;
+
+ //! export underlying flux variables cache
+ using ElementFluxVariablesCache = typename GV::GridFluxVariablesCache::LocalView;
+
//! Constructor
FVGridVariablesLocalView(const GridVariables& gridVariables)
: gridVariables_(&gridVariables)
@@ -326,6 +332,14 @@ public:
gridFluxVarsCache_.update(this->gridGeometry(), gridVolVars_, this->dofs(), true);
}
+ //! Update all variables that may be affected by a change in solution or time
+ void update(const SolutionVector& curSol,
+ const typename ParentType::TimeLevel& timeLevel)
+ {
+ ParentType::update(curSol, timeLevel);
+ update(curSol);
+ }
+
//! Update all variables that may be affected by a change in solution
void update(const SolutionVector& curSol)
{
diff --git a/dumux/discretization/localcontext.hh b/dumux/discretization/localcontext.hh
new file mode 100644
index 0000000000000000000000000000000000000000..49edf921651f784f55f3bafc293364fff3fc18a2
--- /dev/null
+++ b/dumux/discretization/localcontext.hh
@@ -0,0 +1,82 @@
+// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+/*****************************************************************************
+ * See the file COPYING for full copying permissions. *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, either version 3 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program. If not, see . *
+ *****************************************************************************/
+/*!
+ * \file
+ * \ingroup Discretization
+ * \brief Class that contains the element-local (or element stencil-local) data
+ * required to evaluate the terms of discrete equations.
+ */
+#ifndef DUMUX_LOCAL_CONTEXT_HH
+#define DUMUX_LOCAL_CONTEXT_HH
+
+namespace Dumux {
+namespace Experimental {
+
+/*!
+ * \ingroup Discretization
+ * \brief Implementation of element-stencil-local contexts, which solely store
+ * the local geometry and primary/secondary variables. This implementation
+ * defines the minimum interface required for contexts in single-domain
+ * applications to work with the default assembly mechanism.
+ * \tparam EV The element-local view on the grid variables
+ */
+template
+class DefaultLocalContext
+{
+ using GridVariables = typename EV::GridVariables;
+ using GridGeometry = typename GridVariables::GridGeometry;
+
+public:
+ using ElementGridGeometry = typename GridGeometry::LocalView;
+ using ElementVariables = EV;
+
+ //! Constructor
+ DefaultLocalContext(const ElementGridGeometry& eg,
+ const ElementVariables& ev)
+ : elemGeom_(eg)
+ , elemVars_(ev)
+ {}
+
+ //! Return the element-local view on the grid geometry
+ const ElementGridGeometry& elementGridGeometry() const
+ { return elemGeom_; }
+
+ //! Return the element-local view on the grid variables
+ const ElementVariables& elementVariables() const
+ { return elemVars_; }
+
+private:
+ const ElementGridGeometry& elemGeom_;
+ const ElementVariables& elemVars_;
+};
+
+/*!
+ * \ingroup Discretization
+ * \brief Factory function to create a context from local views.
+ */
+template
+DefaultLocalContext
+makeLocalContext(const typename EV::GridVariables::GridGeometry::LocalView& gglocalView,
+ const EV& elemVariables)
+{ return {gglocalView, elemVariables}; }
+
+} // end namespace Experimental
+} // end namespace Dumux
+
+#endif
diff --git a/dumux/flux/fluxvariablesbase.hh b/dumux/flux/fluxvariablesbase.hh
index 1ec5e876572aabc884f8e2e1b4c7d906d268027b..31d06a9f8fea4a18fd595e02ff234e67484299c0 100644
--- a/dumux/flux/fluxvariablesbase.hh
+++ b/dumux/flux/fluxvariablesbase.hh
@@ -25,6 +25,9 @@
#define DUMUX_DISCRETIZATION_FLUXVARIABLESBASE_HH
#include
+#include
+
+#include
namespace Dumux {
@@ -93,6 +96,82 @@ private:
const ElementFluxVariablesCache* elemFluxVarsCachePtr_; //!< Pointer to the current element flux variables cache
};
+namespace Experimental {
+
+/*!
+ * \ingroup Flux
+ * \brief Base class for the flux variables living on a sub control volume face
+ * \tparam LocalContext the element stencil-local context, consisting of
+ * the local geometry and primary & secondary variables
+ */
+template
+class FluxVariablesBase
+{
+ using FVElementGeometry = typename LocalContext::ElementGridGeometry;
+ using SubControlVolumeFace = typename FVElementGeometry::SubControlVolumeFace;
+ using GridGeometry = typename FVElementGeometry::GridGeometry;
+
+ using GridView = typename GridGeometry::GridView;
+ using Element = typename GridView::template Codim<0>::Entity;
+ using Stencil = std::vector;
+
+ static constexpr bool isBox = GridGeometry::discMethod == DiscretizationMethod::box;
+
+public:
+
+ //! Initialize the flux variables storing some temporary pointers
+ void init(const LocalContext& context,
+ const SubControlVolumeFace& scvf)
+ {
+ contextPtr_ = &context;
+ scvFacePtr_ = &scvf;
+
+ // for cell-centered methods, the element inside of the scvf may
+ // not be the one the FVElementGeometry is bound to.
+ if constexpr (!isBox)
+ {
+ const auto& fvGeometry = context.elementGridGeometry();
+ const auto& gridGeometry = fvGeometry.gridGeometry();
+ const auto& boundElement = fvGeometry.element();
+ const auto& insideScv = fvGeometry.scv(scvf.insideScvIdx());
+
+ const auto boundElementIdx = gridGeometry.elementMapper().index(boundElement);
+ if (insideScv.elementIndex() != boundElementIdx)
+ element_ = gridGeometry.element(boundElementIdx);
+ }
+ }
+
+ decltype(auto) problem() const
+ { return elemVolVars().gridVolVars().problem(); }
+
+ decltype(auto) elemVolVars() const
+ { return contextPtr_->elementVariables().elemVolVars(); }
+
+ decltype(auto) elemFluxVarsCache() const
+ { return contextPtr_->elementVariables().elemFluxVarsCache(); }
+
+ const SubControlVolumeFace& scvFace() const
+ { return *scvFacePtr_; }
+
+ const FVElementGeometry& fvGeometry() const
+ { return contextPtr_->elementGridGeometry(); }
+
+ const Element& element() const
+ {
+ if constexpr (isBox)
+ return contextPtr_->elementGridGeometry().element();
+ else
+ return element_ ? *element_
+ : contextPtr_->elementGridGeometry().element();
+ }
+
+private:
+ const LocalContext* contextPtr_; //!< Pointer to the local context
+ const SubControlVolumeFace* scvFacePtr_; //!< Pointer to the sub control volume face for which the flux variables are created
+ std::optional element_; //!< The element on the inside of the sub-control volume face
+};
+
+} // end namespace Experimental
} // end namespace Dumux
#endif
diff --git a/dumux/io/vtkoutputmodule.hh b/dumux/io/vtkoutputmodule.hh
index 0805552aae8c2928811ba04002e7517cbbe7fedb..0fcad23d12679e1a6bef3f4da838cd57c35cc170 100644
--- a/dumux/io/vtkoutputmodule.hh
+++ b/dumux/io/vtkoutputmodule.hh
@@ -43,7 +43,9 @@
#include
#include
+
#include
+#include
#include "vtkfunction.hh"
#include "velocityoutput.hh"
@@ -378,8 +380,17 @@ public:
}
protected:
+ // obtain the grid volume variables from the grid variables
+ decltype(auto) gridVolVars() const
+ {
+ if constexpr (Dune::models())
+ return gridVariables_.gridVolVars();
+ else
+ return gridVariables_.curGridVolVars();
+ }
+
// some return functions for differing implementations to use
- const auto& problem() const { return gridVariables_.curGridVolVars().problem(); }
+ const auto& problem() const { return gridVolVars().problem(); }
const GridVariables& gridVariables() const { return gridVariables_; }
const GridGeometry& gridGeometry() const { return gridVariables_.gridGeometry(); }
const SolutionVector& sol() const { return sol_; }
@@ -447,7 +458,7 @@ private:
const auto eIdxGlobal = gridGeometry().elementMapper().index(element);
auto fvGeometry = localView(gridGeometry());
- auto elemVolVars = localView(gridVariables_.curGridVolVars());
+ auto elemVolVars = localView(gridVolVars());
// If velocity output is enabled we need to bind to the whole stencil
// otherwise element-local data is sufficient
@@ -634,7 +645,7 @@ private:
const auto numCorners = element.subEntities(dim);
auto fvGeometry = localView(gridGeometry());
- auto elemVolVars = localView(gridVariables_.curGridVolVars());
+ auto elemVolVars = localView(gridVolVars());
// resize element-local data containers
for (std::size_t i = 0; i < volVarScalarDataInfo_.size(); ++i)
diff --git a/dumux/nonlinear/newtonsolver.hh b/dumux/nonlinear/newtonsolver.hh
index 121e68cac2d120852343761c78e397354e9d46f0..e1fd60ef3753147f6a4d21f91effd3f69b9770f6 100644
--- a/dumux/nonlinear/newtonsolver.hh
+++ b/dumux/nonlinear/newtonsolver.hh
@@ -481,7 +481,7 @@ public:
*
* \param vars The current iteration's variables
*/
- virtual void assembleLinearSystem(const Variables& vars)
+ virtual void assembleLinearSystem(Variables& vars)
{
assembleLinearSystem_(this->assembler(), vars);
@@ -879,7 +879,7 @@ protected:
this->assembler().updateGridVariables(Backend::dofs(vars));
}
- void computeResidualReduction_(const Variables& vars)
+ void computeResidualReduction_(Variables& vars)
{
// we assume that the assembler works on solution vectors
// if it doesn't export the variables type
@@ -1067,7 +1067,7 @@ private:
//! assembleLinearSystem_ for assemblers that support partial reassembly
template
- auto assembleLinearSystem_(const A& assembler, const Variables& vars)
+ auto assembleLinearSystem_(const A& assembler, Variables& vars)
-> typename std::enable_if_t
{
this->assembler().assembleJacobianAndResidual(vars, partialReassembler_.get());
@@ -1075,7 +1075,7 @@ private:
//! assembleLinearSystem_ for assemblers that don't support partial reassembly
template
- auto assembleLinearSystem_(const A& assembler, const Variables& vars)
+ auto assembleLinearSystem_(const A& assembler, Variables& vars)
-> typename std::enable_if_t
{
this->assembler().assembleJacobianAndResidual(vars);
diff --git a/dumux/porousmediumflow/fluxvariables.hh b/dumux/porousmediumflow/fluxvariables.hh
index 3a3478f3138e39948cb1886702ab77332ffc8c2c..91308c6cbd52c6b6b81db8018414c50b7f8f351f 100644
--- a/dumux/porousmediumflow/fluxvariables.hh
+++ b/dumux/porousmediumflow/fluxvariables.hh
@@ -29,27 +29,17 @@
#include
#include
+#include
#include
#include
namespace Dumux {
+namespace Detail {
-/*!
- * \ingroup PorousmediumflowModels
- * \brief The porous medium flux variables class that computes advective / convective,
- * molecular diffusive and heat conduction fluxes.
- *
- * \param TypeTag The type tag for access to type traits
- * \param UpScheme The upwind scheme to be applied to advective fluxes
- * \note Not all specializations are currently implemented
- */
-template> >
-class PorousMediumFluxVariables
-: public FluxVariablesBase,
- typename GetPropType::LocalView,
- typename GetPropType::LocalView,
- typename GetPropType::LocalView>
+//! Implementation of the flux variables to achieve compatibility-layer with experimental assembly
+template
+class PorousMediumFluxVariablesImpl
+: public BaseFluxVariables
{
using Scalar = GetPropType;
using ModelTraits = GetPropType;
@@ -72,7 +62,7 @@ public:
static constexpr bool enableThermalNonEquilibrium = getPropValue();
//! The constructor
- PorousMediumFluxVariables()
+ PorousMediumFluxVariablesImpl()
{
advFluxIsCached_.reset();
advFluxBeforeUpwinding_.fill(0.0);
@@ -165,6 +155,47 @@ private:
mutable std::array advFluxBeforeUpwinding_;
};
+} // end namespace Detail
+
+/*!
+ * \ingroup PorousmediumflowModels
+ * \brief The porous medium flux variables class that computes advective / convective,
+ * molecular diffusive and heat conduction fluxes.
+ *
+ * \param TypeTag The type tag for access to type traits
+ * \param UpScheme The upwind scheme to be applied to advective fluxes
+ * \note Not all specializations are currently implemented
+ */
+template> >
+using PorousMediumFluxVariables
+ = Detail::PorousMediumFluxVariablesImpl,
+ typename GetPropType::LocalView,
+ typename GetPropType::LocalView,
+ typename GetPropType::LocalView>,
+ UpScheme>;
+
+namespace Experimental {
+
+/*!
+ * \ingroup PorousmediumflowModels
+ * \brief The porous medium flux variables class that computes advective / convective,
+ * molecular diffusive and heat conduction fluxes.
+ *
+ * \param TypeTag The type tag for access to type traits
+ * \param UpScheme The upwind scheme to be applied to advective fluxes
+ * \note Not all specializations are currently implemented
+ * \todo Get rid of type tag as template arg somehow!
+ */
+template> >
+using PorousMediumFluxVariables
+ = Dumux::Detail::PorousMediumFluxVariablesImpl::LocalView>>,
+ UpScheme>;
+
+} // end namespace Experimental
} // end namespace Dumux
#endif
diff --git a/dumux/porousmediumflow/immiscible/CMakeLists.txt b/dumux/porousmediumflow/immiscible/CMakeLists.txt
index 192e7955c6e66f742d5ccdee1718cbc1d9979069..ccb0268dc6d3332a4cd90ce934e6b51643b31c25 100644
--- a/dumux/porousmediumflow/immiscible/CMakeLists.txt
+++ b/dumux/porousmediumflow/immiscible/CMakeLists.txt
@@ -1,3 +1,4 @@
install(FILES
localresidual.hh
+operators.hh
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/porousmediumflow/immiscible)
diff --git a/dumux/porousmediumflow/immiscible/operators.hh b/dumux/porousmediumflow/immiscible/operators.hh
new file mode 100644
index 0000000000000000000000000000000000000000..2c64633f0a20e40817a4abac54a2a9eaf3dfbb64
--- /dev/null
+++ b/dumux/porousmediumflow/immiscible/operators.hh
@@ -0,0 +1,182 @@
+// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+/*****************************************************************************
+ * See the file COPYING for full copying permissions. *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, either version 3 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program. If not, see . *
+ *****************************************************************************/
+/*!
+ * \file
+ * \ingroup PorousmediumflowModels
+ * \brief Sub-control entity-local evaluation of the operators
+ * within in the systems of equations of n-phase immiscible models.
+ */
+#ifndef DUMUX_FV_IMMISCIBLE_OPERATORS_HH
+#define DUMUX_FV_IMMISCIBLE_OPERATORS_HH
+
+#include
+#include
+#include
+
+namespace Dumux::Experimental {
+namespace Detail {
+
+template
+struct DefaultEnergyOperatorsChooser;
+
+template
+struct DefaultEnergyOperatorsChooser
+{ using type = FVNonIsothermalOperators>; };
+
+template
+struct DefaultEnergyOperatorsChooser
+{ using type = void; };
+
+} // end namespace Detail
+
+/*!
+ * \ingroup PorousmediumflowModels
+ * \brief Sub-control entity-local evaluation of the operators
+ * within in the systems of equations of n-phase immiscible models.
+ * \tparam ModelTraits defines model-related types and variables (e.g. number of phases)
+ * \tparam FluxVariables the type that is responsible for computing the individual
+ * flux contributions, i.e., advective, diffusive, convective...
+ * \tparam ElemVars The element-(stencil)-local variables
+ * \tparam EnergyOperators optional template argument, specifying the class that
+ * handles the operators related to non-isothermal effects.
+ */
+template::type>
+class FVImmiscibleOperators
+: public FVOperators
+{
+ using ParentType = FVOperators;
+ using LC = typename ParentType::LocalContext;
+
+ // The variables required for the evaluation of the equation
+ using ElementVariables = typename LC::ElementVariables;
+ using GridVariables = typename ElementVariables::GridVariables;
+ using VolumeVariables = typename GridVariables::VolumeVariables;
+ using ElementVolumeVariables = typename ElementVariables::ElementVolumeVariables;
+ using ElementFluxVariablesCache = typename ElementVariables::ElementFluxVariablesCache;
+
+ // The grid geometry on which the scheme operates
+ using GridGeometry = typename GridVariables::GridGeometry;
+ using FVElementGeometry = typename GridGeometry::LocalView;
+ using SubControlVolume = typename GridGeometry::SubControlVolume;
+ using SubControlVolumeFace = typename GridGeometry::SubControlVolumeFace;
+ using Extrusion = Extrusion_t;
+
+ using GridView = typename GridGeometry::GridView;
+ using Element = typename GridView::template Codim<0>::Entity;
+
+ using Problem = std::decay_t().gridVolVars().problem())>;
+ using Indices = typename ModelTraits::Indices;
+
+ static constexpr int numPhases = ModelTraits::numFluidPhases();
+ static constexpr int conti0EqIdx = ModelTraits::Indices::conti0EqIdx;
+ static constexpr bool enableEnergyBalance = ModelTraits::enableEnergyBalance();;
+
+public:
+ //! export the type of context on which this class operates
+ using LocalContext = LC;
+
+ //! export the type used to store scalar values for all equations
+ using typename ParentType::NumEqVector;
+
+ // export the types of the flux/source/storage terms
+ using typename ParentType::FluxTerm;
+ using typename ParentType::SourceTerm;
+ using typename ParentType::StorageTerm;
+
+ /*!
+ * \brief Compute the storage term of the equations for the given sub-control volume
+ * \param problem The problem to be solved (could store additionally required quantities)
+ * \param context The element-stencil-local required data
+ * \param scv The sub-control volume
+ */
+ template
+ static StorageTerm storage(const Problem& problem,
+ const LocalContext& context,
+ const SubControlVolume& scv)
+ {
+ const auto& volVars = context.elementVariables().elemVolVars()[scv];
+
+ StorageTerm storage;
+ for (int phaseIdx = 0; phaseIdx < numPhases; ++phaseIdx)
+ {
+ auto eqIdx = conti0EqIdx + phaseIdx;
+ storage[eqIdx] = volVars.porosity()
+ * volVars.density(phaseIdx)
+ * volVars.saturation(phaseIdx);
+
+ // The energy storage in the fluid phase with index phaseIdx
+ if constexpr (enableEnergyBalance)
+ EnergyOperators::fluidPhaseStorage(storage, scv, volVars, phaseIdx);
+ }
+
+ // The energy storage in the solid matrix
+ if constexpr (enableEnergyBalance)
+ EnergyOperators::solidPhaseStorage(storage, scv, volVars);
+
+ // multiply with volume
+ storage *= Extrusion::volume(scv)*volVars.extrusionFactor();
+
+ return storage;
+ }
+
+ /*!
+ * \brief Compute the flux term of the equations for the given sub-control volume face
+ * \param problem The problem to be solved (could store additionally required quantities)
+ * \param context The element-stencil-local required data
+ * \param scvf The sub-control volume face for which the flux term is to be computed
+ * \note This must be overloaded by the implementation
+ */
+ template
+ static FluxTerm flux(const Problem& problem,
+ const LocalContext& context,
+ const SubControlVolumeFace& scvf)
+ {
+ FluxVariables fluxVars;
+ fluxVars.init(context, scvf);
+
+ NumEqVector flux;
+ for (int phaseIdx = 0; phaseIdx < numPhases; ++phaseIdx)
+ {
+ // the physical quantities for which we perform upwinding
+ auto upwindTerm = [phaseIdx](const auto& volVars)
+ { return volVars.density(phaseIdx)*volVars.mobility(phaseIdx); };
+
+ auto eqIdx = conti0EqIdx + phaseIdx;
+ flux[eqIdx] = fluxVars.advectiveFlux(phaseIdx, upwindTerm);
+
+ // Add advective phase energy fluxes. For isothermal model the contribution is zero.
+ if constexpr (enableEnergyBalance)
+ EnergyOperators::heatConvectionFlux(flux, fluxVars, phaseIdx);
+ }
+
+ // Add diffusive energy fluxes. For isothermal model the contribution is zero.
+ if constexpr (enableEnergyBalance)
+ EnergyOperators::heatConductionFlux(flux, fluxVars);
+
+ return flux;
+ }
+};
+
+} // end namespace Dumux::Experimental
+
+#endif
diff --git a/dumux/porousmediumflow/nonisothermal/CMakeLists.txt b/dumux/porousmediumflow/nonisothermal/CMakeLists.txt
index 24dc71bd458be782c29a4934952dbb8da06dcd4d..5db30722a0709c13579cd8e1c1fdd152a46856cb 100644
--- a/dumux/porousmediumflow/nonisothermal/CMakeLists.txt
+++ b/dumux/porousmediumflow/nonisothermal/CMakeLists.txt
@@ -3,5 +3,6 @@ indices.hh
iofields.hh
localresidual.hh
model.hh
+operators.hh
volumevariables.hh
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dumux/porousmediumflow/nonisothermal)
diff --git a/dumux/porousmediumflow/nonisothermal/operators.hh b/dumux/porousmediumflow/nonisothermal/operators.hh
new file mode 100644
index 0000000000000000000000000000000000000000..0bff501a2c1cc9fd84d453af72b17938a1024a59
--- /dev/null
+++ b/dumux/porousmediumflow/nonisothermal/operators.hh
@@ -0,0 +1,143 @@
+// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+/*****************************************************************************
+ * See the file COPYING for full copying permissions. *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, either version 3 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program. If not, see . *
+ *****************************************************************************/
+/*!
+ * \file
+ * \ingroup NIModel
+ * \brief Sub-control entity-local evaluation of the operators
+ * involved in the system of equations non-isothermal models
+ * that assume thermal equilibrium between all phases.
+ */
+#ifndef DUMUX_FV_NON_ISOTHERMAL_OPERATORS_HH
+#define DUMUX_FV_NON_ISOTHERMAL_OPERATORS_HH
+
+namespace Dumux::Experimental {
+
+/*!
+ * \ingroup NIModel
+ * \brief Sub-control entity-local evaluation of the operators
+ * involved in the system of equations of non-isothermal models
+ * that assume thermal equilibrium between all phases.
+ * \tparam ModelTraits Model-specific traits.
+ * \tparam LC Element-local context (geometry & primary/secondary variables)
+ */
+template
+class FVNonIsothermalOperators
+{
+ // The variables required for the evaluation of the equation
+ using ElementVariables = typename LC::ElementVariables;
+
+ // The grid geometry on which the scheme operates
+ using GridGeometry = typename LC::ElementGridGeometry::GridGeometry;
+ using SubControlVolume = typename GridGeometry::SubControlVolume;
+
+public:
+
+ //! export the type of context on which this class operates
+ using LocalContext = LC;
+
+ /*!
+ * \brief The energy storage in the fluid phase with index phaseIdx
+ *
+ * \param storage The mass of the component within the sub-control volume
+ * \param scv The sub-control volume
+ * \param context The element-local context (primary/secondary variables)
+ * \param phaseIdx The phase index
+ */
+ template
+ static void addFluidPhaseStorage(NumEqVector& storage,
+ const SubControlVolume& scv,
+ const LocalContext& context,
+ int phaseIdx)
+ {
+ const auto& volVars = context.elementVariables().elemVolVars()[scv];
+ storage[ModelTraits::Indices::energyEqIdx] += volVars.porosity()
+ *volVars.density(phaseIdx)
+ *volVars.internalEnergy(phaseIdx)
+ *volVars.saturation(phaseIdx);
+ }
+
+ /*!
+ * \brief The energy storage in the solid matrix
+ *
+ * \param storage The mass of the component within the sub-control volume
+ * \param scv The sub-control volume
+ * \param context The element-local context (primary/secondary variables)
+ */
+ template
+ static void addSolidPhaseStorage(NumEqVector& storage,
+ const SubControlVolume& scv,
+ const LocalContext& context)
+ {
+ const auto& volVars = context.elementVariables().elemVolVars()[scv];
+ storage[ModelTraits::Indices::energyEqIdx] += volVars.temperature()
+ *volVars.solidHeatCapacity()
+ *volVars.solidDensity()
+ *(1.0 - volVars.porosity());
+ }
+
+ /*!
+ * \brief The advective phase energy fluxes
+ *
+ * \param flux The flux
+ * \param fluxVars The flux variables.
+ * \param phaseIdx The phase index
+ */
+ template
+ static void addHeatConvectionFlux(NumEqVector& flux,
+ FluxVariables& fluxVars,
+ int phaseIdx)
+ {
+ auto upwindTerm = [phaseIdx](const auto& volVars)
+ { return volVars.density(phaseIdx)*volVars.mobility(phaseIdx)*volVars.enthalpy(phaseIdx); };
+
+ flux[ModelTraits::Indices::energyEqIdx] += fluxVars.advectiveFlux(phaseIdx, upwindTerm);
+ }
+
+ /*!
+ * \brief The diffusive energy fluxes
+ *
+ * \param flux The flux
+ * \param fluxVars The flux variables.
+ */
+ template
+ static void addHeatConductionFlux(NumEqVector& flux,
+ FluxVariables& fluxVars)
+ {
+ flux[ModelTraits::Indices::energyEqIdx] += fluxVars.heatConductionFlux();
+ }
+
+ /*!
+ * \brief heat transfer between the phases for nonequilibrium models
+ *
+ * \param source The source which ought to be simulated
+ * \param element An element which contains part of the control volume
+ * \param fvGeometry The finite-volume geometry
+ * \param context The element-local context (primary/secondary variables)
+ * \param scv The sub-control volume over which we integrate the source term
+ */
+ template
+ static void addEnergySource(NumEqVector& source,
+ const LocalContext& context,
+ const SubControlVolume &scv)
+ {}
+};
+
+} // end namespace Dumux::Experimental
+
+#endif
diff --git a/dumux/porousmediumflow/velocity.hh b/dumux/porousmediumflow/velocity.hh
index e34bf588da6cabc767895a637264fcb61363f456..2f7fa122fd8fc554cb7c9af35c13a5ab3e6c5e8f 100644
--- a/dumux/porousmediumflow/velocity.hh
+++ b/dumux/porousmediumflow/velocity.hh
@@ -36,6 +36,7 @@
#include
#include
#include
+#include
#include
namespace Dumux {
@@ -108,7 +109,7 @@ public:
* \param gridVariables The grid variables
*/
PorousMediumFlowVelocity(const GridVariables& gridVariables)
- : problem_(gridVariables.curGridVolVars().problem())
+ : problem_(getProblem_(gridVariables))
, gridGeometry_(gridVariables.gridGeometry())
, gridVariables_(gridVariables)
{
@@ -460,6 +461,16 @@ private:
}
private:
+
+ // extract problem from grid variables
+ const Problem& getProblem_(const GridVariables& gv)
+ {
+ if constexpr (Dune::models())
+ return gv.gridVolVars().problem();
+ else
+ return gv.curGridVolVars().problem();
+ }
+
const Problem& problem_;
const GridGeometry& gridGeometry_;
const GridVariables& gridVariables_;
diff --git a/dumux/porousmediumflow/velocityoutput.hh b/dumux/porousmediumflow/velocityoutput.hh
index a3319b14e86f0fb8e25b5c0f1c9ac086487f39d3..78090eefd0fe70d1b8da46786f29c46e8ce38334 100644
--- a/dumux/porousmediumflow/velocityoutput.hh
+++ b/dumux/porousmediumflow/velocityoutput.hh
@@ -29,9 +29,11 @@
#include
#include
+
#include
#include
#include
+#include
#include
namespace Dumux {
@@ -67,6 +69,15 @@ class PorousMediumFlowVelocityOutput : public VelocityOutput
using Problem = typename GridVolumeVariables::Problem;
using VelocityBackend = PorousMediumFlowVelocity;
+ struct hasCurGridVolVars
+ {
+ template
+ auto operator()(const GV& gv) -> decltype(gv.curGridVolVars()) {}
+ };
+
+ static constexpr auto isOldGVInterface =
+ decltype(isValid(hasCurGridVolVars())(std::declval()))::value;
+
public:
using VelocityVector = typename ParentType::VelocityVector;
@@ -78,7 +89,11 @@ public:
PorousMediumFlowVelocityOutput(const GridVariables& gridVariables)
{
// check, if velocity output can be used (works only for cubes so far)
- enableOutput_ = getParamFromGroup(gridVariables.curGridVolVars().problem().paramGroup(), "Vtk.AddVelocity");
+ if constexpr (Dune::models())
+ enableOutput_ = getParamFromGroup(gridVariables.gridVolVars().problem().paramGroup(), "Vtk.AddVelocity");
+ else
+ enableOutput_ = getParamFromGroup(gridVariables.curGridVolVars().problem().paramGroup(), "Vtk.AddVelocity");
+
if (enableOutput_)
velocityBackend = std::make_unique(gridVariables);
}
diff --git a/test/porousmediumflow/1p/compressible/instationary/CMakeLists.txt b/test/porousmediumflow/1p/compressible/instationary/CMakeLists.txt
index f824ba4a8f4d20d4fc5007ec33d98aa55a3f53d5..ed0822004deaaa2d9804681718565acf4e4189c1 100644
--- a/test/porousmediumflow/1p/compressible/instationary/CMakeLists.txt
+++ b/test/porousmediumflow/1p/compressible/instationary/CMakeLists.txt
@@ -31,6 +31,17 @@ dumux_add_test(NAME test_1p_compressible_instationary_box
${CMAKE_CURRENT_BINARY_DIR}/test_1p_compressible_instationary_box-00010.vtu
--command "${CMAKE_CURRENT_BINARY_DIR}/test_1p_compressible_instationary_box params.input -Problem.Name test_1p_compressible_instationary_box")
+dumux_add_test(NAME test_1p_compressible_instationary_box_experimental
+ LABELS porousmediumflow 1p experimental
+ SOURCES main_experimental.cc
+ COMPILE_DEFINITIONS TYPETAG=OnePCompressibleBox
+ COMPILE_DEFINITIONS EXPERIMENTAL=1
+ COMMAND ${CMAKE_SOURCE_DIR}/bin/testing/runtest.py
+ CMD_ARGS --script fuzzy
+ --files ${CMAKE_SOURCE_DIR}/test/references/test_1p_box-reference.vtu
+ ${CMAKE_CURRENT_BINARY_DIR}/test_1p_compressible_instationary_box_experimental-00010.vtu
+ --command "${CMAKE_CURRENT_BINARY_DIR}/test_1p_compressible_instationary_box_experimental params.input -Problem.Name test_1p_compressible_instationary_box_experimental")
+
dumux_add_test(NAME test_1p_compressible_instationary_tpfa_experimental
LABELS porousmediumflow 1p experimental
SOURCES main_experimental.cc
diff --git a/test/porousmediumflow/1p/compressible/instationary/gridvariables.hh b/test/porousmediumflow/1p/compressible/instationary/gridvariables.hh
deleted file mode 100644
index c9234e9c05802d15f5e37828f6c9d6323010e6ac..0000000000000000000000000000000000000000
--- a/test/porousmediumflow/1p/compressible/instationary/gridvariables.hh
+++ /dev/null
@@ -1,68 +0,0 @@
-// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
-// vi: set et ts=4 sw=4 sts=4:
-/*****************************************************************************
- * See the file COPYING for full copying permissions. *
- * *
- * This program is free software: you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation, either version 3 of the License, or *
- * (at your option) any later version. *
- * *
- * This program is distributed in the hope that it will be useful, *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
- * GNU General Public License for more details. *
- * *
- * You should have received a copy of the GNU General Public License *
- * along with this program. If not, see . *
- *****************************************************************************/
-/*!
- * \file
- * \ingroup OnePTests
- * \brief Wrapper around the current FVGridVariables to fulfill the layout
- * of the new grid variables to test grid variables-based assembly.
- */
-#ifndef DUMUX_COMPRESSIBLE_ONEP_TEST_GRID_VARIABLES_HH
-#define DUMUX_COMPRESSIBLE_ONEP_TEST_GRID_VARIABLES_HH
-
-#include
-
-namespace Dumux::OnePCompressibleTest {
-
-template
-class TestGridVariables
-: public BaseGridVariables
-, public Dumux::Experimental::GridVariables
-{
- using ExperimentalBase = Dumux::Experimental::GridVariables;
-
-public:
- // export some types to avoid ambiguity
- using GridGeometry = GG;
- using Scalar = typename BaseGridVariables::Scalar;
-
- template
- TestGridVariables(std::shared_ptr