From 5e8fae771af2c6275585a373bb418ef7b470c1ec Mon Sep 17 00:00:00 2001 From: Christoph Grueninger <christoph.grueninger@iws.uni-stuttgart.de> Date: Mon, 13 Jul 2015 09:35:32 +0000 Subject: [PATCH] [cleanup] Use shared_ptr from std:: instead of Dune:: Same for make_shared and some includes. (reviewed by fetzer) git-svn-id: svn://svn.iws.uni-stuttgart.de/DUMUX/dumux/trunk@15050 2fb0f335-1f38-0410-981e-8018bf24f1b0 --- .../1p/diffusion/diffusionproblem1p.hh | 6 +- .../2p/diffusion/diffusionproblem2p.hh | 16 ++-- dumux/decoupled/2p/impes/impesproblem2p.hh | 4 +- .../2p/transport/fv/fvsaturation2p.hh | 12 +-- .../2p/transport/transportproblem2p.hh | 4 +- dumux/decoupled/2p2c/celldata2p2c.hh | 4 +- .../2p2c/celldata2p2cmultiphysics.hh | 6 +- dumux/decoupled/common/fv/fvtransport.hh | 4 +- dumux/decoupled/common/impetproblem.hh | 24 +++--- dumux/decoupled/common/onemodelproblem.hh | 18 ++--- dumux/geomechanics/el2p/el2pamgbackend.hh | 6 +- dumux/geomechanics/el2p/el2passembler.hh | 52 ++++++------- dumux/geomechanics/el2p/el2pbasemodel.hh | 14 ++-- dumux/implicit/box/boxassembler.hh | 2 +- dumux/implicit/cellcentered/ccassembler.hh | 2 +- dumux/implicit/common/implicitassembler.hh | 2 +- dumux/implicit/common/implicitmodel.hh | 6 +- .../common/implicitporousmediaproblem.hh | 4 +- dumux/implicit/common/implicitproblem.hh | 8 +- dumux/implicit/mpnc/mpncmodel.hh | 4 +- dumux/io/cubegridcreator.hh | 2 +- dumux/io/simplexgridcreator.hh | 2 +- dumux/io/vtkmultiwriter.hh | 10 +-- dumux/linear/boxlinearsolver.hh | 12 +-- dumux/linear/domesticoverlapfrombcrsmatrix.hh | 8 +- dumux/linear/impetbicgstabilu0solver.hh | 12 +-- dumux/linear/overlappingbcrsmatrix.hh | 60 +++++++-------- dumux/linear/overlappingblockvector.hh | 30 ++++---- .../common/multidomainassembler.hh | 76 +++++++++---------- dumux/multidomain/common/multidomainmodel.hh | 8 +- dumux/nonlinear/newtonconvergencewriter.hh | 4 +- test/decoupled/1p/test_diffusion.cc | 2 +- .../2cnistokes2p2cni/test_2cnistokes2p2cni.cc | 2 +- .../2cnizeroeq2p2cni/test_2cnizeroeq2p2cni.cc | 2 +- .../2cstokes2p2c/test_2cstokes2p2c.cc | 2 +- .../2czeroeq2p2c/test_2czeroeq2p2c.cc | 2 +- 36 files changed, 215 insertions(+), 217 deletions(-) diff --git a/dumux/decoupled/1p/diffusion/diffusionproblem1p.hh b/dumux/decoupled/1p/diffusion/diffusionproblem1p.hh index b22856e5f9..d5e801a695 100644 --- a/dumux/decoupled/1p/diffusion/diffusionproblem1p.hh +++ b/dumux/decoupled/1p/diffusion/diffusionproblem1p.hh @@ -76,7 +76,7 @@ public: DiffusionProblem1P(TimeManager &timeManager, const GridView &gridView) : ParentType(timeManager, gridView), gravity_(0) { - spatialParams_ = Dune::make_shared<SpatialParams>(gridView); + spatialParams_ = std::make_shared<SpatialParams>(gridView); gravity_ = 0; if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity)) gravity_[dim - 1] = -9.81; @@ -104,7 +104,7 @@ public: DiffusionProblem1P(const GridView &gridView) : ParentType(gridView, false), gravity_(0) { - spatialParams_ = Dune::make_shared<SpatialParams>(gridView); + spatialParams_ = std::make_shared<SpatialParams>(gridView); gravity_ = 0; if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity)) gravity_[dim - 1] = -9.81; @@ -240,7 +240,7 @@ private: GlobalPosition gravity_; // fluids and material properties - Dune::shared_ptr<SpatialParams> spatialParams_; + std::shared_ptr<SpatialParams> spatialParams_; }; } diff --git a/dumux/decoupled/2p/diffusion/diffusionproblem2p.hh b/dumux/decoupled/2p/diffusion/diffusionproblem2p.hh index 8a5ad4f244..4c9eb05726 100644 --- a/dumux/decoupled/2p/diffusion/diffusionproblem2p.hh +++ b/dumux/decoupled/2p/diffusion/diffusionproblem2p.hh @@ -73,12 +73,12 @@ public: DiffusionProblem2P(TimeManager &timeManager, const GridView &gridView) : ParentType(timeManager, gridView), gravity_(0) { - spatialParams_ = Dune::make_shared<SpatialParams>(gridView); + spatialParams_ = std::make_shared<SpatialParams>(gridView); gravity_ = 0; if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity)) gravity_[dim - 1] = -9.81; - pressModel_ = Dune::make_shared<PressureModel>(asImp_()); + pressModel_ = std::make_shared<PressureModel>(asImp_()); } /*! * \brief Constructs a DiffusionProblem2P object @@ -95,7 +95,7 @@ public: if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity)) gravity_[dim - 1] = -9.81; - pressModel_ = Dune::make_shared<PressureModel>(asImp_()); + pressModel_ = std::make_shared<PressureModel>(asImp_()); } /*! @@ -106,12 +106,12 @@ public: DiffusionProblem2P(const GridView &gridView) : ParentType(gridView, false), gravity_(0) { - spatialParams_ = Dune::make_shared<SpatialParams>(gridView); + spatialParams_ = std::make_shared<SpatialParams>(gridView); gravity_ = 0; if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity)) gravity_[dim - 1] = -9.81; - pressModel_ = Dune::make_shared<PressureModel>(asImp_()); + pressModel_ = std::make_shared<PressureModel>(asImp_()); } /*! * \brief Constructs a DiffusionProblem2P object @@ -127,7 +127,7 @@ public: if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity)) gravity_[dim - 1] = -9.81; - pressModel_ = Dune::make_shared<PressureModel>(asImp_()); + pressModel_ = std::make_shared<PressureModel>(asImp_()); } /*! @@ -248,9 +248,9 @@ private: GlobalPosition gravity_; // fluids and material properties - Dune::shared_ptr<SpatialParams> spatialParams_; + std::shared_ptr<SpatialParams> spatialParams_; bool newSpatialParams_; - Dune::shared_ptr<PressureModel> pressModel_; + std::shared_ptr<PressureModel> pressModel_; }; } diff --git a/dumux/decoupled/2p/impes/impesproblem2p.hh b/dumux/decoupled/2p/impes/impesproblem2p.hh index f5fb7c5cb4..3f06589d8e 100644 --- a/dumux/decoupled/2p/impes/impesproblem2p.hh +++ b/dumux/decoupled/2p/impes/impesproblem2p.hh @@ -79,7 +79,7 @@ public: : ParentType(timeManager, gridView), gravity_(0) { - spatialParams_ = Dune::make_shared<SpatialParams>(gridView); + spatialParams_ = std::make_shared<SpatialParams>(gridView); gravity_ = 0; if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity)) @@ -192,7 +192,7 @@ private: GlobalPosition gravity_; // fluids and material properties - Dune::shared_ptr<SpatialParams> spatialParams_; + std::shared_ptr<SpatialParams> spatialParams_; }; } diff --git a/dumux/decoupled/2p/transport/fv/fvsaturation2p.hh b/dumux/decoupled/2p/transport/fv/fvsaturation2p.hh index 6c05265d20..b8ad02192f 100644 --- a/dumux/decoupled/2p/transport/fv/fvsaturation2p.hh +++ b/dumux/decoupled/2p/transport/fv/fvsaturation2p.hh @@ -482,9 +482,9 @@ public: DUNE_THROW(Dune::NotImplemented, "Velocity type not supported!"); } - capillaryFlux_ = Dune::make_shared<CapillaryFlux>(problem); - gravityFlux_ = Dune::make_shared<GravityFlux>(problem); - velocity_ = Dune::make_shared<Velocity>(problem); + capillaryFlux_ = std::make_shared<CapillaryFlux>(problem); + gravityFlux_ = std::make_shared<GravityFlux>(problem); + velocity_ = std::make_shared<Velocity>(problem); vtkOutputLevel_ = GET_PARAM_FROM_GROUP(TypeTag, int, Vtk, OutputLevel); porosityThreshold_ = GET_PARAM_FROM_GROUP(TypeTag, Scalar, Impet, PorosityThreshold); @@ -500,9 +500,9 @@ private: { return *static_cast<const Implementation *>(this); } Problem& problem_; - Dune::shared_ptr<Velocity> velocity_; - Dune::shared_ptr<CapillaryFlux> capillaryFlux_; - Dune::shared_ptr<GravityFlux> gravityFlux_; + std::shared_ptr<Velocity> velocity_; + std::shared_ptr<CapillaryFlux> capillaryFlux_; + std::shared_ptr<GravityFlux> gravityFlux_; int vtkOutputLevel_; Scalar porosityThreshold_; diff --git a/dumux/decoupled/2p/transport/transportproblem2p.hh b/dumux/decoupled/2p/transport/transportproblem2p.hh index 7ecb113c85..1a39cf4bc9 100644 --- a/dumux/decoupled/2p/transport/transportproblem2p.hh +++ b/dumux/decoupled/2p/transport/transportproblem2p.hh @@ -98,7 +98,7 @@ public: { cFLFactor_ = GET_PARAM_FROM_GROUP(TypeTag, Scalar, Impet, CFLFactor); - spatialParams_ = Dune::make_shared<SpatialParams>(gridView); + spatialParams_ = std::make_shared<SpatialParams>(gridView); gravity_ = 0; if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity)) @@ -241,7 +241,7 @@ private: GlobalPosition gravity_; // material properties - Dune::shared_ptr<SpatialParams> spatialParams_; + std::shared_ptr<SpatialParams> spatialParams_; Scalar cFLFactor_; }; diff --git a/dumux/decoupled/2p2c/celldata2p2c.hh b/dumux/decoupled/2p2c/celldata2p2c.hh index e0e263e348..0d47bdcfa3 100644 --- a/dumux/decoupled/2p2c/celldata2p2c.hh +++ b/dumux/decoupled/2p2c/celldata2p2c.hh @@ -78,7 +78,7 @@ protected: int globalIdx_; Scalar perimeter_; - Dune::shared_ptr<FluidState> fluidState_; + std::shared_ptr<FluidState> fluidState_; FluxData fluxData_; public: @@ -353,7 +353,7 @@ public: FluidState& manipulateFluidState() { if(!fluidState_) - fluidState_ = Dune::make_shared<FluidState>(); + fluidState_ = std::make_shared<FluidState>(); return *fluidState_; } diff --git a/dumux/decoupled/2p2c/celldata2p2cmultiphysics.hh b/dumux/decoupled/2p2c/celldata2p2cmultiphysics.hh index db661fe9b1..60797378a3 100644 --- a/dumux/decoupled/2p2c/celldata2p2cmultiphysics.hh +++ b/dumux/decoupled/2p2c/celldata2p2cmultiphysics.hh @@ -62,7 +62,7 @@ private: private: int subdomain_; int fluidStateType_; - Dune::shared_ptr<SimpleFluidState> simpleFluidState_; + std::shared_ptr<SimpleFluidState> simpleFluidState_; // FluxData fluxData_; public: @@ -283,7 +283,7 @@ public: } if(!simpleFluidState_) - simpleFluidState_ = Dune::make_shared<SimpleFluidState>(); + simpleFluidState_ = std::make_shared<SimpleFluidState>(); return *simpleFluidState_; } //! Allows manipulation of the complex fluid state @@ -301,7 +301,7 @@ public: if(!this->fluidState_) { - this->fluidState_ = Dune::make_shared<FluidState>(); + this->fluidState_ = std::make_shared<FluidState>(); // properly initialize pressure, since it is evaluated later: this->fluidState_->setPressure(wPhaseIdx, 1e5); this->fluidState_->setPressure(nPhaseIdx, 1e5); diff --git a/dumux/decoupled/common/fv/fvtransport.hh b/dumux/decoupled/common/fv/fvtransport.hh index a6e46aabd5..3084411894 100644 --- a/dumux/decoupled/common/fv/fvtransport.hh +++ b/dumux/decoupled/common/fv/fvtransport.hh @@ -223,7 +223,7 @@ public: problem_(problem), switchNormals_(GET_PARAM_FROM_GROUP(TypeTag, bool, Impet, SwitchNormals)), subCFLFactor_(1.0), accumulatedDt_(0), dtThreshold_(1e-6) { - evalCflFluxFunction_ = Dune::make_shared<EvalCflFluxFunction>(problem); + evalCflFluxFunction_ = std::make_shared<EvalCflFluxFunction>(problem); Scalar cFLFactor = GET_PARAM_FROM_GROUP(TypeTag, Scalar, Impet, CFLFactor); subCFLFactor_ = std::min(GET_PARAM_FROM_GROUP(TypeTag, Scalar, Impet, SubCFLFactor), cFLFactor); @@ -255,7 +255,7 @@ private: Problem& problem_; bool switchNormals_; - Dune::shared_ptr<EvalCflFluxFunction> evalCflFluxFunction_; + std::shared_ptr<EvalCflFluxFunction> evalCflFluxFunction_; std::vector<LocalTimesteppingData> timeStepData_; bool localTimeStepping_; Scalar subCFLFactor_; diff --git a/dumux/decoupled/common/impetproblem.hh b/dumux/decoupled/common/impetproblem.hh index 73496e3f47..e09466ca20 100644 --- a/dumux/decoupled/common/impetproblem.hh +++ b/dumux/decoupled/common/impetproblem.hh @@ -124,14 +124,14 @@ public: bBoxMax_[i] = gridView.comm().max(bBoxMax_[i]); } - pressModel_ = Dune::make_shared<PressureModel>(asImp_()); + pressModel_ = std::make_shared<PressureModel>(asImp_()); - transportModel_ = Dune::make_shared<TransportModel>(asImp_()); - model_ = Dune::make_shared<IMPETModel>(asImp_()) ; + transportModel_ = std::make_shared<TransportModel>(asImp_()); + model_ = std::make_shared<IMPETModel>(asImp_()) ; // create an Object to handle adaptive grids if (adaptiveGrid) - gridAdapt_ = Dune::make_shared<GridAdaptModel>(asImp_()); + gridAdapt_ = std::make_shared<GridAdaptModel>(asImp_()); vtkOutputLevel_ = GET_PARAM_FROM_GROUP(TypeTag, int, Vtk, OutputLevel); dtVariationRestrictionFactor_ = GET_PARAM_FROM_GROUP(TypeTag, Scalar, Impet, DtVariationRestrictionFactor); @@ -806,7 +806,7 @@ public: std::cout << "Writing result file for current time step\n"; if (!resultWriter_) - resultWriter_ = Dune::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); + resultWriter_ = std::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); if (adaptiveGrid) resultWriter_->gridChanged(); resultWriter_->beginWrite(timeManager().time() + timeManager().timeStepSize()); @@ -822,14 +822,14 @@ protected: VtkMultiWriter& resultWriter() { if (!resultWriter_) - resultWriter_ = Dune::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); + resultWriter_ = std::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); return *resultWriter_; } //! \copydoc Dumux::IMPETProblem::resultWriter() VtkMultiWriter& resultWriter() const { if (!resultWriter_) - resultWriter_ = Dune::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); + resultWriter_ = std::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); return *resultWriter_; } @@ -856,15 +856,15 @@ private: Variables variables_; - Dune::shared_ptr<PressureModel> pressModel_;//!< object including the pressure model - Dune::shared_ptr<TransportModel> transportModel_;//!< object including the saturation model - Dune::shared_ptr<IMPETModel> model_; + std::shared_ptr<PressureModel> pressModel_;//!< object including the pressure model + std::shared_ptr<TransportModel> transportModel_;//!< object including the saturation model + std::shared_ptr<IMPETModel> model_; - Dune::shared_ptr<VtkMultiWriter> resultWriter_; + std::shared_ptr<VtkMultiWriter> resultWriter_; int outputInterval_; Scalar outputTimeInterval_; int vtkOutputLevel_; - Dune::shared_ptr<GridAdaptModel> gridAdapt_; + std::shared_ptr<GridAdaptModel> gridAdapt_; Scalar dtVariationRestrictionFactor_; }; diff --git a/dumux/decoupled/common/onemodelproblem.hh b/dumux/decoupled/common/onemodelproblem.hh index 79d355bd97..81081ad382 100644 --- a/dumux/decoupled/common/onemodelproblem.hh +++ b/dumux/decoupled/common/onemodelproblem.hh @@ -107,9 +107,9 @@ public: } } - timeManager_ = Dune::make_shared<TimeManager>(verbose); + timeManager_ = std::make_shared<TimeManager>(verbose); - model_ = Dune::make_shared<Model>(asImp_()) ; + model_ = std::make_shared<Model>(asImp_()) ; } //! Constructs an object of type OneModelProblemProblem @@ -137,7 +137,7 @@ public: timeManager_ = Dune::stackobject_to_shared_ptr<TimeManager>(timeManager); - model_ = Dune::make_shared<Model>(asImp_()) ; + model_ = std::make_shared<Model>(asImp_()) ; } /*! @@ -451,7 +451,7 @@ public: if (verbose && gridView().comm().rank() == 0) std::cout << "Writing result file for current time step\n"; if (!resultWriter_) - resultWriter_ = Dune::make_shared<VtkMultiWriter>(gridView(), asImp_().name()); + resultWriter_ = std::make_shared<VtkMultiWriter>(gridView(), asImp_().name()); resultWriter_->beginWrite(timeManager().time() + timeManager().timeStepSize()); model().addOutputVtkFields(*resultWriter_); asImp_().addOutputVtkFields(); @@ -627,14 +627,14 @@ protected: VtkMultiWriter& resultWriter() { if (!resultWriter_) - resultWriter_ = Dune::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); + resultWriter_ = std::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); return *resultWriter_; } VtkMultiWriter& resultWriter() const { if (!resultWriter_) - resultWriter_ = Dune::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); + resultWriter_ = std::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); return *resultWriter_; } @@ -655,13 +655,13 @@ private: GlobalPosition bBoxMin_; GlobalPosition bBoxMax_; - Dune::shared_ptr<TimeManager> timeManager_; + std::shared_ptr<TimeManager> timeManager_; Variables variables_; - Dune::shared_ptr<Model> model_; + std::shared_ptr<Model> model_; - Dune::shared_ptr<VtkMultiWriter> resultWriter_; + std::shared_ptr<VtkMultiWriter> resultWriter_; int outputInterval_; Scalar outputTimeInterval_; }; diff --git a/dumux/geomechanics/el2p/el2pamgbackend.hh b/dumux/geomechanics/el2p/el2pamgbackend.hh index aaef7f612d..f5c4e7461f 100644 --- a/dumux/geomechanics/el2p/el2pamgbackend.hh +++ b/dumux/geomechanics/el2p/el2pamgbackend.hh @@ -79,9 +79,9 @@ private: { int numVertices = this->problem().gridView().size(dim); - aBlocked_ = Dune::make_shared<BlockMatrix>(numVertices, numVertices, BlockMatrix::random); - xBlocked_ = Dune::make_shared<BlockVector>(numVertices); - bBlocked_ = Dune::make_shared<BlockVector>(numVertices); + aBlocked_ = std::make_shared<BlockMatrix>(numVertices, numVertices, BlockMatrix::random); + xBlocked_ = std::make_shared<BlockVector>(numVertices); + bBlocked_ = std::make_shared<BlockVector>(numVertices); // find out the global indices of the neighboring vertices of // each vertex diff --git a/dumux/geomechanics/el2p/el2passembler.hh b/dumux/geomechanics/el2p/el2passembler.hh index 52767c4c50..6c92bd0f4c 100644 --- a/dumux/geomechanics/el2p/el2passembler.hh +++ b/dumux/geomechanics/el2p/el2passembler.hh @@ -143,35 +143,35 @@ public: { problemPtr_ = &problem; - constraints_ = Dune::make_shared<Constraints>(); + constraints_ = std::make_shared<Constraints>(); - pressureFEM_ = Dune::make_shared<PressureFEM>(problemPtr_->gridView()); - pressureScalarGFS_ = Dune::make_shared<PressureScalarGFS>(problemPtr_->gridView(), *pressureFEM_, *constraints_); - pressureGFS_ = Dune::make_shared<PressureGFS>(*pressureScalarGFS_); + pressureFEM_ = std::make_shared<PressureFEM>(problemPtr_->gridView()); + pressureScalarGFS_ = std::make_shared<PressureScalarGFS>(problemPtr_->gridView(), *pressureFEM_, *constraints_); + pressureGFS_ = std::make_shared<PressureGFS>(*pressureScalarGFS_); - displacementFEM_ = Dune::make_shared<DisplacementFEM>(problemPtr_->gridView()); - displacementScalarGFS_ = Dune::make_shared<DisplacementScalarGFS>(problemPtr_->gridView(), *displacementFEM_, *constraints_); - displacementGFS_ = Dune::make_shared<DisplacementGFS>(*displacementScalarGFS_); + displacementFEM_ = std::make_shared<DisplacementFEM>(problemPtr_->gridView()); + displacementScalarGFS_ = std::make_shared<DisplacementScalarGFS>(problemPtr_->gridView(), *displacementFEM_, *constraints_); + displacementGFS_ = std::make_shared<DisplacementGFS>(*displacementScalarGFS_); - gridFunctionSpace_ = Dune::make_shared<GridFunctionSpace>(*pressureGFS_, *displacementGFS_); + gridFunctionSpace_ = std::make_shared<GridFunctionSpace>(*pressureGFS_, *displacementGFS_); - constraintsTrafo_ = Dune::make_shared<ConstraintsTrafo>(); + constraintsTrafo_ = std::make_shared<ConstraintsTrafo>(); // initialize the grid operator spaces - localOperator_ = Dune::make_shared<LocalOperator>(problemPtr_->model()); + localOperator_ = std::make_shared<LocalOperator>(problemPtr_->model()); gridOperator_ = - Dune::make_shared<GridOperator>(*gridFunctionSpace_, *constraintsTrafo_, + std::make_shared<GridOperator>(*gridFunctionSpace_, *constraintsTrafo_, *gridFunctionSpace_, *constraintsTrafo_, *localOperator_); // allocate raw matrix - matrix_ = Dune::make_shared<JacobianMatrix>(*gridOperator_); + matrix_ = std::make_shared<JacobianMatrix>(*gridOperator_); // initialize the jacobian matrix and the right hand side // vector *matrix_ = 0; reuseMatrix_ = false; - residual_ = Dune::make_shared<SolutionVector>(*gridFunctionSpace_); + residual_ = std::make_shared<SolutionVector>(*gridFunctionSpace_); int numVertices = gridView_().size(dim); int numElements = gridView_().size(0); @@ -575,9 +575,9 @@ private: Problem *problemPtr_; // the jacobian matrix - Dune::shared_ptr<JacobianMatrix> matrix_; + std::shared_ptr<JacobianMatrix> matrix_; // the right-hand side - Dune::shared_ptr<SolutionVector> residual_; + std::shared_ptr<SolutionVector> residual_; // attributes required for jacobian matrix recycling bool reuseMatrix_; @@ -594,17 +594,17 @@ private: Scalar reassembleTolerance_; - Dune::shared_ptr<Constraints> constraints_; - Dune::shared_ptr<PressureFEM> pressureFEM_; - Dune::shared_ptr<DisplacementFEM> displacementFEM_; - Dune::shared_ptr<PressureScalarGFS> pressureScalarGFS_; - Dune::shared_ptr<DisplacementScalarGFS> displacementScalarGFS_; - Dune::shared_ptr<PressureGFS> pressureGFS_; - Dune::shared_ptr<DisplacementGFS> displacementGFS_; - Dune::shared_ptr<GridFunctionSpace> gridFunctionSpace_; - Dune::shared_ptr<ConstraintsTrafo> constraintsTrafo_; - Dune::shared_ptr<LocalOperator> localOperator_; - Dune::shared_ptr<GridOperator> gridOperator_; + std::shared_ptr<Constraints> constraints_; + std::shared_ptr<PressureFEM> pressureFEM_; + std::shared_ptr<DisplacementFEM> displacementFEM_; + std::shared_ptr<PressureScalarGFS> pressureScalarGFS_; + std::shared_ptr<DisplacementScalarGFS> displacementScalarGFS_; + std::shared_ptr<PressureGFS> pressureGFS_; + std::shared_ptr<DisplacementGFS> displacementGFS_; + std::shared_ptr<GridFunctionSpace> gridFunctionSpace_; + std::shared_ptr<ConstraintsTrafo> constraintsTrafo_; + std::shared_ptr<LocalOperator> localOperator_; + std::shared_ptr<GridOperator> gridOperator_; }; } // namespace PDELab diff --git a/dumux/geomechanics/el2p/el2pbasemodel.hh b/dumux/geomechanics/el2p/el2pbasemodel.hh index e3cf522ce8..b0f7d7be85 100644 --- a/dumux/geomechanics/el2p/el2pbasemodel.hh +++ b/dumux/geomechanics/el2p/el2pbasemodel.hh @@ -105,11 +105,11 @@ public: boxVolume_.resize(numDofs); localJacobian_.init(problem_()); - jacAsm_ = Dune::make_shared<JacobianAssembler>(); + jacAsm_ = std::make_shared<JacobianAssembler>(); jacAsm_->init(problem_()); - uCur_ = Dune::make_shared<SolutionVector>(jacAsm_->gridFunctionSpace()); - uPrev_ = Dune::make_shared<SolutionVector>(jacAsm_->gridFunctionSpace()); + uCur_ = std::make_shared<SolutionVector>(jacAsm_->gridFunctionSpace()); + uPrev_ = std::make_shared<SolutionVector>(jacAsm_->gridFunctionSpace()); asImp_().applyInitialSolution_(); @@ -627,7 +627,7 @@ public: void resetJacobianAssembler () { jacAsm_.template reset<JacobianAssembler>(0); - jacAsm_ = Dune::make_shared<JacobianAssembler>(); + jacAsm_ = std::make_shared<JacobianAssembler>(); jacAsm_->init(problem_()); } @@ -1000,15 +1000,15 @@ protected: LocalJacobian localJacobian_; // Linearizes the problem at the current time step using the // local jacobian - Dune::shared_ptr<JacobianAssembler> jacAsm_; + std::shared_ptr<JacobianAssembler> jacAsm_; // the set of all indices of vertices on the boundary std::vector<bool> boundaryIndices_; // cur is the current iterative solution, prev the converged // solution of the previous time step - Dune::shared_ptr<SolutionVector> uCur_; - Dune::shared_ptr<SolutionVector> uPrev_; + std::shared_ptr<SolutionVector> uCur_; + std::shared_ptr<SolutionVector> uPrev_; Dune::BlockVector<Dune::FieldVector<Scalar, 1> > boxVolume_; diff --git a/dumux/implicit/box/boxassembler.hh b/dumux/implicit/box/boxassembler.hh index e4ecf0d16f..00be6f5d9d 100644 --- a/dumux/implicit/box/boxassembler.hh +++ b/dumux/implicit/box/boxassembler.hh @@ -271,7 +271,7 @@ private: int numVerticesGlobal = this->gridView_().size(dim); // allocate raw matrix - this->matrix_ = Dune::make_shared<JacobianMatrix>(numVerticesGlobal, numVerticesGlobal, JacobianMatrix::random); + this->matrix_ = std::make_shared<JacobianMatrix>(numVerticesGlobal, numVerticesGlobal, JacobianMatrix::random); // find out the global indices of the neighboring vertices of // each vertex diff --git a/dumux/implicit/cellcentered/ccassembler.hh b/dumux/implicit/cellcentered/ccassembler.hh index a5938280c1..bea9c33ee5 100644 --- a/dumux/implicit/cellcentered/ccassembler.hh +++ b/dumux/implicit/cellcentered/ccassembler.hh @@ -142,7 +142,7 @@ private: int numElements = this->gridView_().size(0); // allocate raw matrix - this->matrix_ = Dune::make_shared<JacobianMatrix>(numElements, numElements, JacobianMatrix::random); + this->matrix_ = std::make_shared<JacobianMatrix>(numElements, numElements, JacobianMatrix::random); // find out the global indices of the neighboring elements of // each element diff --git a/dumux/implicit/common/implicitassembler.hh b/dumux/implicit/common/implicitassembler.hh index 0b136696a9..04353e32b3 100644 --- a/dumux/implicit/common/implicitassembler.hh +++ b/dumux/implicit/common/implicitassembler.hh @@ -528,7 +528,7 @@ protected: Problem *problemPtr_; // the jacobian matrix - Dune::shared_ptr<JacobianMatrix> matrix_; + std::shared_ptr<JacobianMatrix> matrix_; // the right-hand side SolutionVector residual_; diff --git a/dumux/implicit/common/implicitmodel.hh b/dumux/implicit/common/implicitmodel.hh index 1f46a4ffbc..f6e80d97b8 100644 --- a/dumux/implicit/common/implicitmodel.hh +++ b/dumux/implicit/common/implicitmodel.hh @@ -109,7 +109,7 @@ public: boxVolume_.resize(numDofs); localJacobian_.init(problem_()); - jacAsm_ = Dune::make_shared<JacobianAssembler>(); + jacAsm_ = std::make_shared<JacobianAssembler>(); jacAsm_->init(problem_()); asImp_().applyInitialSolution_(); @@ -711,7 +711,7 @@ public: void resetJacobianAssembler () { jacAsm_.template reset<JacobianAssembler>(0); - jacAsm_ = Dune::make_shared<JacobianAssembler>(); + jacAsm_ = std::make_shared<JacobianAssembler>(); jacAsm_->init(problem_()); } @@ -1093,7 +1093,7 @@ protected: LocalJacobian localJacobian_; // Linearizes the problem at the current time step using the // local jacobian - Dune::shared_ptr<JacobianAssembler> jacAsm_; + std::shared_ptr<JacobianAssembler> jacAsm_; // the set of all indices of vertices on the boundary std::vector<bool> boundaryIndices_; diff --git a/dumux/implicit/common/implicitporousmediaproblem.hh b/dumux/implicit/common/implicitporousmediaproblem.hh index c89c66b1e3..42df2864d2 100644 --- a/dumux/implicit/common/implicitporousmediaproblem.hh +++ b/dumux/implicit/common/implicitporousmediaproblem.hh @@ -77,7 +77,7 @@ public: : ParentType(timeManager, gridView), gravity_(0) { - spatialParams_ = Dune::make_shared<SpatialParams>(gridView); + spatialParams_ = std::make_shared<SpatialParams>(gridView); if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity)) gravity_[dim-1] = -9.81; @@ -155,7 +155,7 @@ protected: GlobalPosition gravity_; // fluids and material properties - Dune::shared_ptr<SpatialParams> spatialParams_; + std::shared_ptr<SpatialParams> spatialParams_; }; } diff --git a/dumux/implicit/common/implicitproblem.hh b/dumux/implicit/common/implicitproblem.hh index 1fcc9051ba..dd1da455ec 100644 --- a/dumux/implicit/common/implicitproblem.hh +++ b/dumux/implicit/common/implicitproblem.hh @@ -127,7 +127,7 @@ public: // if we are calculating on an adaptive grid get the grid adapt model if (adaptiveGrid) - gridAdapt_ = Dune::make_shared<GridAdaptModel>(asImp_()); + gridAdapt_ = std::make_shared<GridAdaptModel>(asImp_()); } /*! @@ -920,7 +920,7 @@ private: void createResultWriter_() { if (!resultWriter_) - resultWriter_ = Dune::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); + resultWriter_ = std::make_shared<VtkMultiWriter>(gridView_, asImp_().name()); // Tell the result writer that the grid changes if we are adaptive if (adaptiveGrid) @@ -945,9 +945,9 @@ private: NewtonMethod newtonMethod_; NewtonController newtonCtl_; - Dune::shared_ptr<VtkMultiWriter> resultWriter_; + std::shared_ptr<VtkMultiWriter> resultWriter_; - Dune::shared_ptr<GridAdaptModel> gridAdapt_; + std::shared_ptr<GridAdaptModel> gridAdapt_; }; } // namespace Dumux diff --git a/dumux/implicit/mpnc/mpncmodel.hh b/dumux/implicit/mpnc/mpncmodel.hh index cd2956f0b5..3297e6a414 100644 --- a/dumux/implicit/mpnc/mpncmodel.hh +++ b/dumux/implicit/mpnc/mpncmodel.hh @@ -151,7 +151,7 @@ public: void init(Problem &problem) { ParentType::init(problem); - vtkWriter_ = Dune::make_shared<MPNCVtkWriter>(problem); + vtkWriter_ = std::make_shared<MPNCVtkWriter>(problem); if (this->gridView_().comm().rank() == 0) std::cout @@ -200,7 +200,7 @@ public: vtkWriter_->addCurrentSolution(writer); } - Dune::shared_ptr<MPNCVtkWriter> vtkWriter_; + std::shared_ptr<MPNCVtkWriter> vtkWriter_; private: bool enableSmoothUpwinding_; diff --git a/dumux/io/cubegridcreator.hh b/dumux/io/cubegridcreator.hh index dbf9733262..97d547c95d 100644 --- a/dumux/io/cubegridcreator.hh +++ b/dumux/io/cubegridcreator.hh @@ -50,7 +50,7 @@ class CubeGridCreator { typedef typename GET_PROP_TYPE(TypeTag, Scalar) Scalar; typedef typename GET_PROP_TYPE(TypeTag, Grid) Grid; - typedef Dune::shared_ptr<Grid> GridPointer; + typedef std::shared_ptr<Grid> GridPointer; enum { dim = Grid::dimension }; diff --git a/dumux/io/simplexgridcreator.hh b/dumux/io/simplexgridcreator.hh index b48781d7ef..092cfc6f91 100644 --- a/dumux/io/simplexgridcreator.hh +++ b/dumux/io/simplexgridcreator.hh @@ -48,7 +48,7 @@ class SimplexGridCreator { typedef typename GET_PROP_TYPE(TypeTag, Scalar) Scalar; typedef typename GET_PROP_TYPE(TypeTag, Grid) Grid; - typedef Dune::shared_ptr<Grid> GridPointer; + typedef std::shared_ptr<Grid> GridPointer; enum { dim = Grid::dimension }; diff --git a/dumux/io/vtkmultiwriter.hh b/dumux/io/vtkmultiwriter.hh index 6a9e7a1e54..4fa01da0d5 100644 --- a/dumux/io/vtkmultiwriter.hh +++ b/dumux/io/vtkmultiwriter.hh @@ -122,7 +122,7 @@ public: } - curWriter_ = Dune::make_shared<VtkWriter>(gridView_, Dune::VTK::conforming); + curWriter_ = std::make_shared<VtkWriter>(gridView_, Dune::VTK::conforming); ++curWriterNum_; curTime_ = t; @@ -140,8 +140,8 @@ public: { typedef Dune::BlockVector<Dune::FieldVector<Scalar, nComp> > VectorField; - Dune::shared_ptr<ManagedVectorField_<VectorField> > vfs = - Dune::make_shared<ManagedVectorField_<VectorField> >(nEntities); + std::shared_ptr<ManagedVectorField_<VectorField> > vfs = + std::make_shared<ManagedVectorField_<VectorField> >(nEntities); managedObjects_.push_back(vfs); return &(vfs->vf); } @@ -492,12 +492,12 @@ private: int commSize_; // number of processes in the communicator int commRank_; // rank of the current process in the communicator - Dune::shared_ptr<VtkWriter> curWriter_; + std::shared_ptr<VtkWriter> curWriter_; double curTime_; std::string curOutFileName_; int curWriterNum_; - std::list<Dune::shared_ptr<ManagedObject_> > managedObjects_; + std::list<std::shared_ptr<ManagedObject_> > managedObjects_; }; } diff --git a/dumux/linear/boxlinearsolver.hh b/dumux/linear/boxlinearsolver.hh index 870068aefa..fbfba0c39f 100644 --- a/dumux/linear/boxlinearsolver.hh +++ b/dumux/linear/boxlinearsolver.hh @@ -152,15 +152,15 @@ private: borderListCreator(problem_.gridView(), problem_.vertexMapper()); // create the overlapping Jacobian matrix - overlapMatrix_ = Dune::make_shared<OverlappingMatrix> (M, + overlapMatrix_ = std::make_shared<OverlappingMatrix> (M, borderListCreator.foreignBorderList(), borderListCreator.domesticBorderList(), overlapSize_); // create the overlapping vectors for the residual and the // solution - overlapb_ = Dune::make_shared<OverlappingVector>(overlapMatrix_->overlap()); - overlapx_ = Dune::make_shared<OverlappingVector>(*overlapb_); + overlapb_ = std::make_shared<OverlappingVector>(overlapMatrix_->overlap()); + overlapx_ = std::make_shared<OverlappingVector>(*overlapb_); } void cleanup_() @@ -173,9 +173,9 @@ private: const Problem &problem_; int overlapSize_; - Dune::shared_ptr<OverlappingMatrix> overlapMatrix_; - Dune::shared_ptr<OverlappingVector> overlapb_; - Dune::shared_ptr<OverlappingVector> overlapx_; + std::shared_ptr<OverlappingMatrix> overlapMatrix_; + std::shared_ptr<OverlappingVector> overlapb_; + std::shared_ptr<OverlappingVector> overlapx_; }; template <class TypeTag, class Imp> diff --git a/dumux/linear/domesticoverlapfrombcrsmatrix.hh b/dumux/linear/domesticoverlapfrombcrsmatrix.hh index afa78986e7..f3d9b61cfc 100644 --- a/dumux/linear/domesticoverlapfrombcrsmatrix.hh +++ b/dumux/linear/domesticoverlapfrombcrsmatrix.hh @@ -34,8 +34,6 @@ #include <memory> #include <tuple> -#include <dune/common/shared_ptr.hh> - #include <dumux/parallel/mpibuffer.hh> #include "foreignoverlapfrombcrsmatrix.hh" @@ -374,12 +372,12 @@ protected: // indices stemming from the overlap (i.e. without the border // indices) int numIndices = foreignOverlap.size(); - numIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1); + numIndicesSendBuff_[peerRank] = std::make_shared<MpiBuffer<int> >(1); (*numIndicesSendBuff_[peerRank])[0] = numIndices; numIndicesSendBuff_[peerRank]->send(peerRank); // create MPI buffers - indicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<IndexDistanceNpeers> >(numIndices); + indicesSendBuff_[peerRank] = std::make_shared<MpiBuffer<IndexDistanceNpeers> >(numIndices); // then send the additional indices themselfs ForeignOverlapWithPeer::const_iterator overlapIt = foreignOverlap.begin(); @@ -399,7 +397,7 @@ protected: numPeers); // send all peer ranks which see the given index - peersSendBuff_[peerRank].push_back(Dune::make_shared<MpiBuffer<int> >(2*numPeers)); + peersSendBuff_[peerRank].push_back(std::make_shared<MpiBuffer<int> >(2*numPeers)); typename std::map<ProcessRank, BorderDistance>::const_iterator it = foreignIndexOverlap.begin(); typename std::map<ProcessRank, BorderDistance>::const_iterator endIt = foreignIndexOverlap.end(); for (int j = 0; it != endIt; ++it, ++j) diff --git a/dumux/linear/impetbicgstabilu0solver.hh b/dumux/linear/impetbicgstabilu0solver.hh index f4498cabef..f0e79781ee 100644 --- a/dumux/linear/impetbicgstabilu0solver.hh +++ b/dumux/linear/impetbicgstabilu0solver.hh @@ -171,15 +171,15 @@ private: borderListCreator(problem_.gridView(), problem_.elementMapper()); // create the overlapping Jacobian matrix - overlapMatrix_ = Dune::make_shared<OverlappingMatrix> (M, + overlapMatrix_ = std::make_shared<OverlappingMatrix> (M, borderListCreator.foreignBorderList(), borderListCreator.domesticBorderList(), overlapSize_); // create the overlapping vectors for the residual and the // solution - overlapb_ = Dune::make_shared<OverlappingVector>(overlapMatrix_->overlap()); - overlapx_ = Dune::make_shared<OverlappingVector>(*overlapb_); + overlapb_ = std::make_shared<OverlappingVector>(overlapMatrix_->overlap()); + overlapx_ = std::make_shared<OverlappingVector>(*overlapb_); } void cleanup_() @@ -192,9 +192,9 @@ private: const Problem &problem_; int overlapSize_; - Dune::shared_ptr<OverlappingMatrix> overlapMatrix_; - Dune::shared_ptr<OverlappingVector> overlapb_; - Dune::shared_ptr<OverlappingVector> overlapx_; + std::shared_ptr<OverlappingMatrix> overlapMatrix_; + std::shared_ptr<OverlappingVector> overlapb_; + std::shared_ptr<OverlappingVector> overlapx_; }; } // namespace Dumux diff --git a/dumux/linear/overlappingbcrsmatrix.hh b/dumux/linear/overlappingbcrsmatrix.hh index 6af7d6722f..1aa8dee459 100644 --- a/dumux/linear/overlappingbcrsmatrix.hh +++ b/dumux/linear/overlappingbcrsmatrix.hh @@ -25,18 +25,18 @@ #ifndef DUMUX_OVERLAPPING_BCRS_MATRIX_HH #define DUMUX_OVERLAPPING_BCRS_MATRIX_HH -#include <dumux/linear/domesticoverlapfrombcrsmatrix.hh> -#include <dumux/linear/globalindices.hh> -#include <dumux/parallel/mpibuffer.hh> - -#include <dune/common/shared_ptr.hh> -#include <dune/istl/scalarproducts.hh> -#include <dune/istl/io.hh> - #include <algorithm> #include <list> #include <set> #include <map> +#include <memory> + +#include <dune/istl/scalarproducts.hh> +#include <dune/istl/io.hh> + +#include <dumux/linear/domesticoverlapfrombcrsmatrix.hh> +#include <dumux/linear/globalindices.hh> +#include <dumux/parallel/mpibuffer.hh> namespace Dumux { @@ -75,7 +75,7 @@ public: const BorderList &domesticBorderList, int overlapSize) { - overlap_ = Dune::make_shared<Overlap>(M, foreignBorderList, domesticBorderList, overlapSize); + overlap_ = std::make_shared<Overlap>(M, foreignBorderList, domesticBorderList, overlapSize); myRank_ = 0; #if HAVE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &myRank_); @@ -301,12 +301,12 @@ private: // send size of foreign overlap to peer int numOverlapRows = peerOverlap.size(); - numRowsSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1); + numRowsSendBuff_[peerRank] = std::make_shared<MpiBuffer<int> >(1); (*numRowsSendBuff_[peerRank])[0] = numOverlapRows; numRowsSendBuff_[peerRank]->send(peerRank); - rowSizesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows); - rowIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows); + rowSizesSendBuff_[peerRank] = std::make_shared<MpiBuffer<Index> >(numOverlapRows); + rowIndicesSendBuff_[peerRank] = std::make_shared<MpiBuffer<Index> >(numOverlapRows); // create the row size MPI buffer int numEntries = 0; @@ -338,7 +338,7 @@ private: // create and fill the MPI buffer for the indices of the // matrix entries - entryIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numEntries); + entryIndicesSendBuff_[peerRank] = std::make_shared<MpiBuffer<Index> >(numEntries); i = 0; it = peerOverlap.begin(); for (; it != endIt; ++it) { @@ -359,7 +359,7 @@ private: // create the send buffers for the values of the matrix // entries - entryValuesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<block_type> >(numEntries); + entryValuesSendBuff_[peerRank] = std::make_shared<MpiBuffer<block_type> >(numEntries); #endif // HAVE_MPI } @@ -375,8 +375,8 @@ private: // create receive buffer for the row sizes and receive them // from the peer - rowIndicesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows); - rowSizesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(numOverlapRows); + rowIndicesRecvBuff_[peerRank] = std::make_shared<MpiBuffer<Index> >(numOverlapRows); + rowSizesRecvBuff_[peerRank] = std::make_shared<MpiBuffer<int> >(numOverlapRows); rowIndicesRecvBuff_[peerRank]->receive(peerRank); rowSizesRecvBuff_[peerRank]->receive(peerRank); @@ -388,8 +388,8 @@ private: } // create the buffer to store the column indices of the matrix entries - entryIndicesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(totalIndices); - entryValuesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<block_type> >(totalIndices); + entryIndicesRecvBuff_[peerRank] = std::make_shared<MpiBuffer<Index> >(totalIndices); + entryValuesRecvBuff_[peerRank] = std::make_shared<MpiBuffer<block_type> >(totalIndices); // communicate with the peer entryIndicesRecvBuff_[peerRank]->receive(peerRank); @@ -568,18 +568,18 @@ private: int myRank_; Entries entries_; - Dune::shared_ptr<Overlap> overlap_; - - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowSizesRecvBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowIndicesRecvBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > entryIndicesRecvBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<block_type> > > entryValuesRecvBuff_; - - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > numRowsSendBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowSizesSendBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowIndicesSendBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > entryIndicesSendBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<block_type> > > entryValuesSendBuff_; + std::shared_ptr<Overlap> overlap_; + + std::map<ProcessRank, std::shared_ptr<MpiBuffer<int> > > rowSizesRecvBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<int> > > rowIndicesRecvBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<int> > > entryIndicesRecvBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<block_type> > > entryValuesRecvBuff_; + + std::map<ProcessRank, std::shared_ptr<MpiBuffer<int> > > numRowsSendBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<int> > > rowSizesSendBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<int> > > rowIndicesSendBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<int> > > entryIndicesSendBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<block_type> > > entryValuesSendBuff_; }; } // namespace Dumux diff --git a/dumux/linear/overlappingblockvector.hh b/dumux/linear/overlappingblockvector.hh index ab77bfb786..a0691ab1c4 100644 --- a/dumux/linear/overlappingblockvector.hh +++ b/dumux/linear/overlappingblockvector.hh @@ -25,11 +25,11 @@ #ifndef DUMUX_OVERLAPPING_BLOCK_VECTOR_HH #define DUMUX_OVERLAPPING_BLOCK_VECTOR_HH -#include <vector> -#include <map> #include <iostream> +#include <map> +#include <memory> +#include <vector> -#include <dune/common/shared_ptr.hh> #include <dune/istl/bvector.hh> #include <dumux/parallel/mpibuffer.hh> @@ -285,7 +285,7 @@ private: #if HAVE_MPI // create array for the front indices int numDomestic = overlap_->numDomestic(); - frontMaster_ = Dune::make_shared<std::vector<ProcessRank> >(numDomestic, -1); + frontMaster_ = std::make_shared<std::vector<ProcessRank> >(numDomestic, -1); typename PeerSet::const_iterator peerIt; typename PeerSet::const_iterator peerEndIt = overlap_->peerSet().end(); @@ -297,9 +297,9 @@ private: const DomesticOverlapWithPeer &domesticOverlap = overlap_->domesticOverlapWithPeer(peerRank); int numEntries = domesticOverlap.size(); - numIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1); - indicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<RowIndex> >(numEntries); - valuesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<FieldVector> >(numEntries); + numIndicesSendBuff_[peerRank] = std::make_shared<MpiBuffer<int> >(1); + indicesSendBuff_[peerRank] = std::make_shared<MpiBuffer<RowIndex> >(numEntries); + valuesSendBuff_[peerRank] = std::make_shared<MpiBuffer<FieldVector> >(numEntries); // fill the indices buffer with global indices MpiBuffer<RowIndex> &indicesSendBuff = *indicesSendBuff_[peerRank]; @@ -330,8 +330,8 @@ private: numRows = numRowsRecvBuff[0]; // then, create the MPI buffers - indicesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<RowIndex> >(numRows); - valuesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<FieldVector> >(numRows); + indicesRecvBuff_[peerRank] = std::make_shared<MpiBuffer<RowIndex> >(numRows); + valuesRecvBuff_[peerRank] = std::make_shared<MpiBuffer<FieldVector> >(numRows); MpiBuffer<RowIndex> &indicesRecvBuff = *indicesRecvBuff_[peerRank]; // next, receive the actual indices @@ -439,14 +439,14 @@ private: } } - Dune::shared_ptr<std::vector<ProcessRank> > frontMaster_; + std::shared_ptr<std::vector<ProcessRank> > frontMaster_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<RowIndex> > > numIndicesSendBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<RowIndex> > > indicesSendBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<RowIndex> > > indicesRecvBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<RowIndex> > > numIndicesSendBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<RowIndex> > > indicesSendBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<RowIndex> > > indicesRecvBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<FieldVector> > > valuesSendBuff_; - std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<FieldVector> > > valuesRecvBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<FieldVector> > > valuesSendBuff_; + std::map<ProcessRank, std::shared_ptr<MpiBuffer<FieldVector> > > valuesRecvBuff_; const Overlap *overlap_; }; diff --git a/dumux/multidomain/common/multidomainassembler.hh b/dumux/multidomain/common/multidomainassembler.hh index 2a3ff3fc65..bb914442ac 100644 --- a/dumux/multidomain/common/multidomainassembler.hh +++ b/dumux/multidomain/common/multidomainassembler.hh @@ -95,43 +95,43 @@ public: sdProblem1_ = &globalProblem_->sdProblem1(); sdProblem2_ = &globalProblem_->sdProblem2(); - fem1_ = Dune::make_shared<FEM1>(globalProblem_->sdGridView1()); - fem2_ = Dune::make_shared<FEM2>(globalProblem_->sdGridView2()); + fem1_ = std::make_shared<FEM1>(globalProblem_->sdGridView1()); + fem2_ = std::make_shared<FEM2>(globalProblem_->sdGridView2()); - scalarGridFunctionSpace1_ = Dune::make_shared<ScalarGridFunctionSpace1>(globalProblem_->sdGridView1(), + scalarGridFunctionSpace1_ = std::make_shared<ScalarGridFunctionSpace1>(globalProblem_->sdGridView1(), *fem1_); - scalarGridFunctionSpace2_ = Dune::make_shared<ScalarGridFunctionSpace2>(globalProblem_->sdGridView2(), + scalarGridFunctionSpace2_ = std::make_shared<ScalarGridFunctionSpace2>(globalProblem_->sdGridView2(), *fem2_); - gridFunctionSpace1_ = Dune::make_shared<GridFunctionSpace1>(*scalarGridFunctionSpace1_); - gridFunctionSpace2_ = Dune::make_shared<GridFunctionSpace2>(*scalarGridFunctionSpace2_); + gridFunctionSpace1_ = std::make_shared<GridFunctionSpace1>(*scalarGridFunctionSpace1_); + gridFunctionSpace2_ = std::make_shared<GridFunctionSpace2>(*scalarGridFunctionSpace2_); - mdGridFunctionSpace_ = Dune::make_shared<MultiDomainGridFunctionSpace>(globalProblem_->mdGrid(), + mdGridFunctionSpace_ = std::make_shared<MultiDomainGridFunctionSpace>(globalProblem_->mdGrid(), *gridFunctionSpace1_, *gridFunctionSpace2_); - localOperator1_ = Dune::make_shared<LocalOperator1>(sdProblem1_->model()); - localOperator2_ = Dune::make_shared<LocalOperator2>(sdProblem2_->model()); + localOperator1_ = std::make_shared<LocalOperator1>(sdProblem1_->model()); + localOperator2_ = std::make_shared<LocalOperator2>(sdProblem2_->model()); - condition1_ = Dune::make_shared<MultiDomainCondition>(0); - condition2_ = Dune::make_shared<MultiDomainCondition>(1); + condition1_ = std::make_shared<MultiDomainCondition>(0); + condition2_ = std::make_shared<MultiDomainCondition>(1); - mdSubProblem1_ = Dune::make_shared<MultiDomainSubProblem1>(*localOperator1_, *condition1_); - mdSubProblem2_ = Dune::make_shared<MultiDomainSubProblem2>(*localOperator2_, *condition2_); + mdSubProblem1_ = std::make_shared<MultiDomainSubProblem1>(*localOperator1_, *condition1_); + mdSubProblem2_ = std::make_shared<MultiDomainSubProblem2>(*localOperator2_, *condition2_); - couplingLocalOperator_ = Dune::make_shared<MultiDomainCouplingLocalOperator>(*globalProblem_); - mdCoupling_ = Dune::make_shared<MultiDomainCoupling>(*mdSubProblem1_, *mdSubProblem2_, *couplingLocalOperator_); + couplingLocalOperator_ = std::make_shared<MultiDomainCouplingLocalOperator>(*globalProblem_); + mdCoupling_ = std::make_shared<MultiDomainCoupling>(*mdSubProblem1_, *mdSubProblem2_, *couplingLocalOperator_); - constraintsTrafo_ = Dune::make_shared<MultiDomainConstraintsTrafo>(); + constraintsTrafo_ = std::make_shared<MultiDomainConstraintsTrafo>(); - mdGridOperator_ = Dune::make_shared<MultiDomainGridOperator>(*mdGridFunctionSpace_, *mdGridFunctionSpace_, + mdGridOperator_ = std::make_shared<MultiDomainGridOperator>(*mdGridFunctionSpace_, *mdGridFunctionSpace_, *constraintsTrafo_, *constraintsTrafo_, *mdSubProblem1_, *mdSubProblem2_, *mdCoupling_); - matrix_ = Dune::make_shared<JacobianMatrix>(*mdGridOperator_); + matrix_ = std::make_shared<JacobianMatrix>(*mdGridOperator_); *matrix_ = 0; - residual_ = Dune::make_shared<SolutionVector>(*mdGridFunctionSpace_); + residual_ = std::make_shared<SolutionVector>(*mdGridFunctionSpace_); } //! \copydoc ImplicitAssembler::assemble() @@ -192,34 +192,34 @@ private: SubDomainProblem1 *sdProblem1_; SubDomainProblem2 *sdProblem2_; - Dune::shared_ptr<FEM1> fem1_; - Dune::shared_ptr<FEM2> fem2_; + std::shared_ptr<FEM1> fem1_; + std::shared_ptr<FEM2> fem2_; - Dune::shared_ptr<ScalarGridFunctionSpace1> scalarGridFunctionSpace1_; - Dune::shared_ptr<ScalarGridFunctionSpace2> scalarGridFunctionSpace2_; + std::shared_ptr<ScalarGridFunctionSpace1> scalarGridFunctionSpace1_; + std::shared_ptr<ScalarGridFunctionSpace2> scalarGridFunctionSpace2_; - Dune::shared_ptr<GridFunctionSpace1> gridFunctionSpace1_; - Dune::shared_ptr<GridFunctionSpace2> gridFunctionSpace2_; - Dune::shared_ptr<MultiDomainGridFunctionSpace> mdGridFunctionSpace_; + std::shared_ptr<GridFunctionSpace1> gridFunctionSpace1_; + std::shared_ptr<GridFunctionSpace2> gridFunctionSpace2_; + std::shared_ptr<MultiDomainGridFunctionSpace> mdGridFunctionSpace_; - Dune::shared_ptr<LocalOperator1> localOperator1_; - Dune::shared_ptr<LocalOperator2> localOperator2_; + std::shared_ptr<LocalOperator1> localOperator1_; + std::shared_ptr<LocalOperator2> localOperator2_; - Dune::shared_ptr<MultiDomainCondition> condition1_; - Dune::shared_ptr<MultiDomainCondition> condition2_; + std::shared_ptr<MultiDomainCondition> condition1_; + std::shared_ptr<MultiDomainCondition> condition2_; - Dune::shared_ptr<MultiDomainSubProblem1> mdSubProblem1_; - Dune::shared_ptr<MultiDomainSubProblem2> mdSubProblem2_; + std::shared_ptr<MultiDomainSubProblem1> mdSubProblem1_; + std::shared_ptr<MultiDomainSubProblem2> mdSubProblem2_; - Dune::shared_ptr<MultiDomainCouplingLocalOperator> couplingLocalOperator_; - Dune::shared_ptr<MultiDomainCoupling> mdCoupling_; + std::shared_ptr<MultiDomainCouplingLocalOperator> couplingLocalOperator_; + std::shared_ptr<MultiDomainCoupling> mdCoupling_; - Dune::shared_ptr<MultiDomainConstraintsTrafo> constraintsTrafo_; - Dune::shared_ptr<MultiDomainGridOperator> mdGridOperator_; + std::shared_ptr<MultiDomainConstraintsTrafo> constraintsTrafo_; + std::shared_ptr<MultiDomainGridOperator> mdGridOperator_; - Dune::shared_ptr<JacobianMatrix> matrix_; + std::shared_ptr<JacobianMatrix> matrix_; - Dune::shared_ptr<SolutionVector> residual_; + std::shared_ptr<SolutionVector> residual_; }; } // namespace Dumux diff --git a/dumux/multidomain/common/multidomainmodel.hh b/dumux/multidomain/common/multidomainmodel.hh index 4d22a4a665..1cf14a78b3 100644 --- a/dumux/multidomain/common/multidomainmodel.hh +++ b/dumux/multidomain/common/multidomainmodel.hh @@ -80,8 +80,8 @@ public: jacAsm_ = new JacobianAssembler(); jacAsm_->init(problem); - uCur_ = Dune::make_shared<SolutionVector>(jacAsm_->gridFunctionSpace()); - uPrev_ = Dune::make_shared<SolutionVector>(jacAsm_->gridFunctionSpace()); + uCur_ = std::make_shared<SolutionVector>(jacAsm_->gridFunctionSpace()); + uPrev_ = std::make_shared<SolutionVector>(jacAsm_->gridFunctionSpace()); *uCur_= 0; *uPrev_= 0; @@ -357,8 +357,8 @@ protected: // cur is the current solution, prev the solution of the previous // time step - Dune::shared_ptr<SolutionVector> uCur_; - Dune::shared_ptr<SolutionVector> uPrev_; + std::shared_ptr<SolutionVector> uCur_; + std::shared_ptr<SolutionVector> uPrev_; bool wasRestarted_; }; diff --git a/dumux/nonlinear/newtonconvergencewriter.hh b/dumux/nonlinear/newtonconvergencewriter.hh index 5755e97a7f..224539f2c8 100644 --- a/dumux/nonlinear/newtonconvergencewriter.hh +++ b/dumux/nonlinear/newtonconvergencewriter.hh @@ -62,7 +62,7 @@ public: { ++ iteration_; if (!vtkMultiWriter_) - vtkMultiWriter_ = Dune::make_shared<VtkMultiWriter>(gridView, "convergence"); + vtkMultiWriter_ = std::make_shared<VtkMultiWriter>(gridView, "convergence"); vtkMultiWriter_->beginWrite(timeStepIndex_ + iteration_ / 100.0); } @@ -83,7 +83,7 @@ public: private: int timeStepIndex_; int iteration_; - Dune::shared_ptr<VtkMultiWriter> vtkMultiWriter_; + std::shared_ptr<VtkMultiWriter> vtkMultiWriter_; NewtonController &ctl_; }; diff --git a/test/decoupled/1p/test_diffusion.cc b/test/decoupled/1p/test_diffusion.cc index eba6c66471..19d3b2f841 100644 --- a/test/decoupled/1p/test_diffusion.cc +++ b/test/decoupled/1p/test_diffusion.cc @@ -73,7 +73,7 @@ int main(int argc, char** argv) cellRes.fill(1); GlobalPosition lowerLeft(0.0); GlobalPosition upperRight(1.0); - static Dune::shared_ptr<Grid> grid + static std::shared_ptr<Grid> grid = Dune::StructuredGridFactory<Grid>::createCubeGrid(lowerLeft, upperRight, cellRes); diff --git a/test/multidomain/2cnistokes2p2cni/test_2cnistokes2p2cni.cc b/test/multidomain/2cnistokes2p2cni/test_2cnistokes2p2cni.cc index 0f71f0e612..0293316a25 100644 --- a/test/multidomain/2cnistokes2p2cni/test_2cnistokes2p2cni.cc +++ b/test/multidomain/2cnistokes2p2cni/test_2cnistokes2p2cni.cc @@ -181,7 +181,7 @@ int startLocal_(int argc, char **argv, TimeManager timeManager; // instantiate coupled problem - Dune::shared_ptr<MDGrid> mdGrid_ = Dune::make_shared<MDGrid> (GridCreator::grid()); + std::shared_ptr<MDGrid> mdGrid_ = std::make_shared<MDGrid> (GridCreator::grid()); // instantiate coupled problem Problem problem(*mdGrid_, diff --git a/test/multidomain/2cnizeroeq2p2cni/test_2cnizeroeq2p2cni.cc b/test/multidomain/2cnizeroeq2p2cni/test_2cnizeroeq2p2cni.cc index db3457f449..979e3adde4 100644 --- a/test/multidomain/2cnizeroeq2p2cni/test_2cnizeroeq2p2cni.cc +++ b/test/multidomain/2cnizeroeq2p2cni/test_2cnizeroeq2p2cni.cc @@ -253,7 +253,7 @@ int startLocal_(int argc, char **argv, TimeManager timeManager; // instantiate grid - Dune::shared_ptr<MDGrid> mdGrid_ = Dune::make_shared<MDGrid> (GridCreator::grid()); + std::shared_ptr<MDGrid> mdGrid_ = std::make_shared<MDGrid> (GridCreator::grid()); // instantiate coupled problem Problem problem(*mdGrid_, diff --git a/test/multidomain/2cstokes2p2c/test_2cstokes2p2c.cc b/test/multidomain/2cstokes2p2c/test_2cstokes2p2c.cc index d4902436f9..6524192e69 100644 --- a/test/multidomain/2cstokes2p2c/test_2cstokes2p2c.cc +++ b/test/multidomain/2cstokes2p2c/test_2cstokes2p2c.cc @@ -176,7 +176,7 @@ int startLocal_(int argc, char **argv, TimeManager timeManager; // instantiate coupled problem - Dune::shared_ptr<MDGrid> mdGrid_ = Dune::make_shared<MDGrid> (GridCreator::grid()); + std::shared_ptr<MDGrid> mdGrid_ = std::make_shared<MDGrid> (GridCreator::grid()); // instantiate coupled problem Problem problem(*mdGrid_, diff --git a/test/multidomain/2czeroeq2p2c/test_2czeroeq2p2c.cc b/test/multidomain/2czeroeq2p2c/test_2czeroeq2p2c.cc index ae12d128d4..f472e36582 100644 --- a/test/multidomain/2czeroeq2p2c/test_2czeroeq2p2c.cc +++ b/test/multidomain/2czeroeq2p2c/test_2czeroeq2p2c.cc @@ -242,7 +242,7 @@ int startLocal_(int argc, char **argv, TimeManager timeManager; // instantiate grid - Dune::shared_ptr<MDGrid> mdGrid_ = Dune::make_shared<MDGrid> (GridCreator::grid()); + std::shared_ptr<MDGrid> mdGrid_ = std::make_shared<MDGrid> (GridCreator::grid()); // instantiate coupled problem Problem problem(*mdGrid_, timeManager); -- GitLab