From ca0f5c40062f8e20e67b03bc1f89a2ff9b5405f6 Mon Sep 17 00:00:00 2001
From: Bernd Flemisch <bernd@iws.uni-stuttgart.de>
Date: Tue, 5 Feb 2013 06:44:48 +0000
Subject: [PATCH] Replace standard by smart pointers for common and implicit.
 This partially implements FS#182. Reviewed by Christoph.

git-svn-id: svn://svn.iws.uni-stuttgart.de/DUMUX/dumux/trunk@10176 2fb0f335-1f38-0410-981e-8018bf24f1b0
---
 dumux/implicit/box/boxassembler.hh            |  2 +-
 dumux/implicit/cellcentered/ccassembler.hh    |  2 +-
 dumux/implicit/common/implicitassembler.hh    | 11 +---
 dumux/implicit/common/implicitmodel.hh        | 12 ++--
 .../common/implicitporousmediaproblem.hh      | 10 +--
 dumux/implicit/common/implicitproblem.hh      | 15 ++---
 dumux/implicit/mpnc/mpncmodel.hh              | 14 +----
 dumux/io/vtkmultiwriter.hh                    | 14 ++---
 dumux/linear/amgbackend.hh                    | 59 ++++++++---------
 dumux/linear/boxlinearsolver.hh               | 29 +++------
 dumux/linear/domesticoverlapfrombcrsmatrix.hh | 18 +++---
 dumux/linear/impetbicgstabilu0solver.hh       | 37 +++++------
 dumux/linear/overlappingbcrsmatrix.hh         | 63 ++++++-------------
 dumux/linear/overlappingblockvector.hh        | 12 ++--
 dumux/nonlinear/newtonconvergencewriter.hh    | 10 +--
 15 files changed, 110 insertions(+), 198 deletions(-)

diff --git a/dumux/implicit/box/boxassembler.hh b/dumux/implicit/box/boxassembler.hh
index 6d6ca06651..bd2a9544fa 100644
--- a/dumux/implicit/box/boxassembler.hh
+++ b/dumux/implicit/box/boxassembler.hh
@@ -220,7 +220,7 @@ private:
         int numVerticesGlobal = this->gridView_().size(dim);
 
         // allocate raw matrix
-        this->matrix_ = new JacobianMatrix(numVerticesGlobal, numVerticesGlobal, JacobianMatrix::random);
+        this->matrix_ = Dune::make_shared<JacobianMatrix>(numVerticesGlobal, numVerticesGlobal, JacobianMatrix::random);
 
         // find out the global indices of the neighboring vertices of
         // each vertex
diff --git a/dumux/implicit/cellcentered/ccassembler.hh b/dumux/implicit/cellcentered/ccassembler.hh
index f4285df634..0c23a386a8 100644
--- a/dumux/implicit/cellcentered/ccassembler.hh
+++ b/dumux/implicit/cellcentered/ccassembler.hh
@@ -134,7 +134,7 @@ private:
         int nElems = this->gridView_().size(0);
 
         // allocate raw matrix
-        this->matrix_ = new JacobianMatrix(nElems, nElems, JacobianMatrix::random);
+        this->matrix_ = Dune::make_shared<JacobianMatrix>(nElems, nElems, JacobianMatrix::random);
 
         // find out the global indices of the neighboring elements of
         // each element
diff --git a/dumux/implicit/common/implicitassembler.hh b/dumux/implicit/common/implicitassembler.hh
index 218cae454c..1fa2c44a25 100644
--- a/dumux/implicit/common/implicitassembler.hh
+++ b/dumux/implicit/common/implicitassembler.hh
@@ -95,21 +95,14 @@ public:
     };
 
     ImplicitAssembler()
+    : problemPtr_(0)
     {
-        problemPtr_ = 0;
-        matrix_ = 0;
-
         // set reassemble accuracy to 0, so that if partial reassembly
         // of the jacobian matrix is disabled, the reassemble accuracy
         // is always smaller than the current relative tolerance
         reassembleAccuracy_ = 0.0;
     }
 
-    ~ImplicitAssembler()
-    {
-        delete matrix_;
-    }
-
     /*!
      * \brief Initialize the jacobian assembler.
      *
@@ -543,7 +536,7 @@ protected:
     Problem *problemPtr_;
 
     // the jacobian matrix
-    JacobianMatrix *matrix_;
+    Dune::shared_ptr<JacobianMatrix> matrix_;
     // the right-hand side
     SolutionVector residual_;
 
diff --git a/dumux/implicit/common/implicitmodel.hh b/dumux/implicit/common/implicitmodel.hh
index 417783256b..61f5780998 100644
--- a/dumux/implicit/common/implicitmodel.hh
+++ b/dumux/implicit/common/implicitmodel.hh
@@ -88,13 +88,11 @@ public:
      * \brief The constructor.
      */
     ImplicitModel()
+    : problemPtr_(0)
     {
         enableHints_ = GET_PARAM_FROM_GROUP(TypeTag, bool, Implicit, EnableHints);
     }
 
-    ~ImplicitModel()
-    { delete jacAsm_;  }
-
     /*!
      * \brief Apply the initial conditions to the model.
      *
@@ -114,7 +112,7 @@ public:
             boxVolume_.resize(nDofs);
 
         localJacobian_.init(problem_());
-        jacAsm_ = new JacobianAssembler();
+        jacAsm_ = Dune::make_shared<JacobianAssembler>();
         jacAsm_->init(problem_());
 
         asImp_().applyInitialSolution_();
@@ -637,8 +635,8 @@ public:
      */
     void resetJacobianAssembler ()
     {
-        delete jacAsm_;
-        jacAsm_ = new JacobianAssembler;
+        jacAsm_.template reset<JacobianAssembler>(0);
+        jacAsm_ = Dune::make_shared<JacobianAssembler>();
         jacAsm_->init(problem_());
     }
 
@@ -999,7 +997,7 @@ protected:
     LocalJacobian localJacobian_;
     // Linearizes the problem at the current time step using the
     // local jacobian
-    JacobianAssembler *jacAsm_;
+    Dune::shared_ptr<JacobianAssembler> jacAsm_;
 
     // the set of all indices of vertices on the boundary
     std::vector<bool> boundaryIndices_;
diff --git a/dumux/implicit/common/implicitporousmediaproblem.hh b/dumux/implicit/common/implicitporousmediaproblem.hh
index fab231a7b6..fd1529ecae 100644
--- a/dumux/implicit/common/implicitporousmediaproblem.hh
+++ b/dumux/implicit/common/implicitporousmediaproblem.hh
@@ -78,18 +78,12 @@ public:
           gravity_(0)
     {
         newSpatialParams_ = true;
-        spatialParams_ = new SpatialParams(gridView);
+        spatialParams_ = Dune::make_shared<SpatialParams>(gridView);
 
         if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity))
             gravity_[dim-1]  = -9.81;
     }
 
-    ~ImplicitPorousMediaProblem()
-    {
-        if (newSpatialParams_)
-            delete spatialParams_;
-    }
-
     /*!
      * \name Problem parameters
      */
@@ -199,7 +193,7 @@ protected:
     DimVector gravity_;
 
     // fluids and material properties
-    SpatialParams*  spatialParams_;
+    Dune::shared_ptr<SpatialParams> spatialParams_;
     bool newSpatialParams_;
 };
 
diff --git a/dumux/implicit/common/implicitproblem.hh b/dumux/implicit/common/implicitproblem.hh
index caab75353f..6a4f88a759 100644
--- a/dumux/implicit/common/implicitproblem.hh
+++ b/dumux/implicit/common/implicitproblem.hh
@@ -99,7 +99,6 @@ public:
         , timeManager_(&timeManager)
         , newtonMethod_(asImp_())
         , newtonCtl_(asImp_())
-        , resultWriter_(0)
     {
         // calculate the bounding box of the local partition of the grid view
         VertexIterator vIt = gridView.template begin<dim>();
@@ -122,13 +121,6 @@ public:
         simName_ = "sim";
     }
 
-    ~ImplicitProblem()
-    {
-        if (resultWriter_)
-            delete resultWriter_;
-    }
-
-
     /*!
      * \brief Called by the Dumux::TimeManager in order to
      *        initialize the problem.
@@ -857,7 +849,10 @@ protected:
 private:
     // makes sure that the result writer exists
     void createResultWriter_()
-    { if (!resultWriter_) resultWriter_ = new VtkMultiWriter(gridView_, asImp_().name()); }
+    { 
+        if (!resultWriter_) 
+            resultWriter_ = Dune::make_shared<VtkMultiWriter>(gridView_, asImp_().name());
+    }
 
     std::string simName_;
     const GridView gridView_;
@@ -875,7 +870,7 @@ private:
     NewtonMethod newtonMethod_;
     NewtonController newtonCtl_;
 
-    VtkMultiWriter *resultWriter_;
+    Dune::shared_ptr<VtkMultiWriter> resultWriter_;
 };
 
 }
diff --git a/dumux/implicit/mpnc/mpncmodel.hh b/dumux/implicit/mpnc/mpncmodel.hh
index f61d9d8e14..7488dc1d13 100644
--- a/dumux/implicit/mpnc/mpncmodel.hh
+++ b/dumux/implicit/mpnc/mpncmodel.hh
@@ -126,20 +126,10 @@ class MPNCModel : public GET_PROP_TYPE(TypeTag, BaseModel)
     enum {numEq = GET_PROP_VALUE(TypeTag, NumEq)};
 
 public:
-    MPNCModel()
-    : vtkWriter_(0) 
-    {}
-    
-    ~MPNCModel()
-    { 
-        if (vtkWriter_)
-            delete vtkWriter_; 
-    }
-
     void init(Problem &problem)
     {
         ParentType::init(problem);
-        vtkWriter_ = new MPNCVtkWriter(problem);
+        vtkWriter_ = Dune::make_shared<MPNCVtkWriter>(problem);
 
         if (this->gridView_().comm().rank() == 0)
             std::cout
@@ -185,7 +175,7 @@ public:
         vtkWriter_->addCurrentSolution(writer);
     }
 
-    MPNCVtkWriter *vtkWriter_;
+    Dune::shared_ptr<MPNCVtkWriter> vtkWriter_;
 };
 
 }
diff --git a/dumux/io/vtkmultiwriter.hh b/dumux/io/vtkmultiwriter.hh
index e0276a20f8..c4158e5e62 100644
--- a/dumux/io/vtkmultiwriter.hh
+++ b/dumux/io/vtkmultiwriter.hh
@@ -122,7 +122,7 @@ public:
         }
 
 
-        curWriter_ = new VtkWriter(gridView_, Dune::VTK::conforming);
+        curWriter_ = Dune::make_shared<VtkWriter>(gridView_, Dune::VTK::conforming);
         ++curWriterNum_;
 
         curTime_ = t;
@@ -140,8 +140,8 @@ public:
     {
         typedef Dune::BlockVector<Dune::FieldVector<Scalar, nComp> > VectorField;
 
-        ManagedVectorField_<VectorField> *vfs =
-            new ManagedVectorField_<VectorField>(nEntities);
+        Dune::shared_ptr<ManagedVectorField_<VectorField> > vfs =
+            Dune::make_shared<ManagedVectorField_<VectorField> >(nEntities);
         managedObjects_.push_back(vfs);
         return &(vfs->vf);
     }
@@ -275,10 +275,8 @@ public:
         else
             -- curWriterNum_;
 
-        // discard managed objects and the current VTK writer
-        delete curWriter_;
+        // discard managed objects
         while (managedObjects_.begin() != managedObjects_.end()) {
-            delete managedObjects_.front();
             managedObjects_.pop_front();
         }
 
@@ -493,12 +491,12 @@ private:
     int commSize_; // number of processes in the communicator
     int commRank_; // rank of the current process in the communicator
 
-    VtkWriter *curWriter_;
+    Dune::shared_ptr<VtkWriter> curWriter_;
     double curTime_;
     std::string curOutFileName_;
     int curWriterNum_;
 
-    std::list<ManagedObject_*> managedObjects_;
+    std::list<Dune::shared_ptr<ManagedObject_> > managedObjects_;
 };
 }
 
diff --git a/dumux/linear/amgbackend.hh b/dumux/linear/amgbackend.hh
index 8871cfe3d8..a8992acbb4 100644
--- a/dumux/linear/amgbackend.hh
+++ b/dumux/linear/amgbackend.hh
@@ -224,13 +224,13 @@ public:
     AMGBackend(const Problem& problem)
     : problem_(problem)
     {
-        fem_ = new LocalFemMap();
-        constraints_ = new Constraints();
-        scalarGridFunctionSpace_ = new ScalarGridFunctionSpace(problem.gridView(), *fem_, *constraints_);
-        gridFunctionSpace_ = new GridFunctionSpace(*scalarGridFunctionSpace_);
-        imp_ = new PDELabBackend(*gridFunctionSpace_,
-                GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations),
-                GET_PROP_VALUE(TypeTag, LinearSolverVerbosity));
+        fem_ = Dune::make_shared<LocalFemMap>();
+        constraints_ = Dune::make_shared<Constraints>();
+        scalarGridFunctionSpace_ = Dune::make_shared<ScalarGridFunctionSpace>(problem.gridView(), *fem_, *constraints_);
+        gridFunctionSpace_ = Dune::make_shared<GridFunctionSpace>(*scalarGridFunctionSpace_);
+        int maxIt = GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations);
+        int verbosity = GET_PROP_VALUE(TypeTag, LinearSolverVerbosity);
+        imp_ = Dune::make_shared<PDELabBackend>(*gridFunctionSpace_, maxIt, verbosity);
     }
 
     /*!
@@ -262,23 +262,14 @@ public:
     {
         return result_;
     }
-
-    ~AMGBackend()
-    {
-        delete imp_;
-        delete gridFunctionSpace_;
-        delete scalarGridFunctionSpace_;
-        delete constraints_;
-        delete fem_;
-    }
     
 private:
     const Problem& problem_;
-    LocalFemMap *fem_;
-    Constraints *constraints_;
-    ScalarGridFunctionSpace *scalarGridFunctionSpace_;
-    GridFunctionSpace *gridFunctionSpace_;
-    PDELabBackend *imp_;
+    Dune::shared_ptr<LocalFemMap> fem_;
+    Dune::shared_ptr<Constraints> constraints_;
+    Dune::shared_ptr<ScalarGridFunctionSpace> scalarGridFunctionSpace_;
+    Dune::shared_ptr<GridFunctionSpace> gridFunctionSpace_;
+    Dune::shared_ptr<PDELabBackend> imp_;
     Dune::InverseOperatorResult result_;
 };
 
@@ -312,9 +303,9 @@ public:
     template<class Matrix, class Vector>
     bool solve(Matrix& A, Vector& x, Vector& b)
     {
-        imp_ = new PDELabBackend(
-                GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations),
-                GET_PROP_VALUE(TypeTag, LinearSolverVerbosity));
+        int maxIt = GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations);
+        int verbosity = GET_PROP_VALUE(TypeTag, LinearSolverVerbosity);
+        imp_ = Dune::make_shared<PDELabBackend>(maxIt, verbosity);
 
         static const double residReduction = GET_PROP_VALUE(TypeTag, LinearSolverResidualReduction);
         imp_->apply(A, x, b, residReduction);
@@ -325,8 +316,8 @@ public:
         result_.reduction  = imp_->result().reduction;
         result_.conv_rate  = imp_->result().conv_rate;
 
-        delete imp_;
-
+        imp_.template reset<PDELabBackend>(0);
+        
         return result_.converged;
     }
 
@@ -340,7 +331,7 @@ public:
 
 private:
     const Problem& problem_;
-    PDELabBackend *imp_;
+    Dune::shared_ptr<PDELabBackend> imp_;
     Dune::InverseOperatorResult result_;
 };
 
@@ -379,10 +370,10 @@ public:
     {
         scaleLinearSystem(A, b);
 
-        imp_ = new PDELabBackend(
-                GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations),
-                GET_PROP_VALUE(TypeTag, LinearSolverVerbosity));
-
+        int maxIt = GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations);
+        int verbosity = GET_PROP_VALUE(TypeTag, LinearSolverVerbosity);
+        imp_ = Dune::make_shared<PDELabBackend>(maxIt, verbosity);
+        
         static const double residReduction = GET_PROP_VALUE(TypeTag, LinearSolverResidualReduction);
         imp_->apply(A, x, b, residReduction);
 
@@ -392,8 +383,8 @@ public:
         result_.reduction  = imp_->result().reduction;
         result_.conv_rate  = imp_->result().conv_rate;
 
-        delete imp_;
-
+        imp_.template reset<PDELabBackend>(0);
+        
         return result_.converged;
     }
 
@@ -407,7 +398,7 @@ public:
 
 private:
     const Problem& problem_;
-    PDELabBackend *imp_;
+    Dune::shared_ptr<PDELabBackend> imp_;
     Dune::InverseOperatorResult result_;
 };
 
diff --git a/dumux/linear/boxlinearsolver.hh b/dumux/linear/boxlinearsolver.hh
index c8cba18c91..5bb58aa35a 100644
--- a/dumux/linear/boxlinearsolver.hh
+++ b/dumux/linear/boxlinearsolver.hh
@@ -68,11 +68,7 @@ public:
     BoxLinearSolver(const Problem &problem, int overlapSize)
     : problem_(problem)
     , overlapSize_(overlapSize)
-    {
-        overlapMatrix_ = 0;
-        overlapb_ = 0;
-        overlapx_ = 0;
-    }
+    {}
 
     ~BoxLinearSolver()
     { cleanup_(); }
@@ -156,35 +152,30 @@ private:
         borderListCreator(problem_.gridView(), problem_.vertexMapper());
 
         // create the overlapping Jacobian matrix
-        overlapMatrix_ = new OverlappingMatrix (M,
+        overlapMatrix_ = Dune::make_shared<OverlappingMatrix> (M,
                 borderListCreator.foreignBorderList(),
                 borderListCreator.domesticBorderList(),
                 overlapSize_);
 
         // create the overlapping vectors for the residual and the
         // solution
-        overlapb_ = new OverlappingVector(overlapMatrix_->overlap());
-        overlapx_ = new OverlappingVector(*overlapb_);
+        overlapb_ = Dune::make_shared<OverlappingVector>(overlapMatrix_->overlap());
+        overlapx_ = Dune::make_shared<OverlappingVector>(*overlapb_);
     }
 
     void cleanup_()
     {
-        // create the overlapping Jacobian matrix and vectors
-        delete overlapMatrix_;
-        delete overlapb_;
-        delete overlapx_;
-
-        overlapMatrix_ = 0;
-        overlapb_ = 0;
-        overlapx_ = 0;
+        overlapMatrix_.template reset<OverlappingMatrix>(0);
+        overlapb_.template reset<OverlappingVector>(0);
+        overlapx_.template reset<OverlappingVector>(0);
     }
 
     const Problem &problem_;
 
     int overlapSize_;
-    OverlappingMatrix *overlapMatrix_;
-    OverlappingVector *overlapb_;
-    OverlappingVector *overlapx_;
+    Dune::shared_ptr<OverlappingMatrix> overlapMatrix_;
+    Dune::shared_ptr<OverlappingVector> overlapb_;
+    Dune::shared_ptr<OverlappingVector> overlapx_;
 };
 
 template <class TypeTag, class Imp>
diff --git a/dumux/linear/domesticoverlapfrombcrsmatrix.hh b/dumux/linear/domesticoverlapfrombcrsmatrix.hh
index 38fe79b31c..6cb0533a56 100644
--- a/dumux/linear/domesticoverlapfrombcrsmatrix.hh
+++ b/dumux/linear/domesticoverlapfrombcrsmatrix.hh
@@ -372,12 +372,12 @@ protected:
         // indices stemming from the overlap (i.e. without the border
         // indices)
         int numIndices = foreignOverlap.size();
-        numIndicesSendBuff_[peerRank] = new MpiBuffer<int>(1);
+        numIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1);
         (*numIndicesSendBuff_[peerRank])[0] = numIndices;
         numIndicesSendBuff_[peerRank]->send(peerRank);
 
         // create MPI buffers
-        indicesSendBuff_[peerRank] = new MpiBuffer<IndexDistanceNpeers>(numIndices);
+        indicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<IndexDistanceNpeers> >(numIndices);
 
         // then send the additional indices themselfs
         ForeignOverlapWithPeer::const_iterator overlapIt = foreignOverlap.begin();
@@ -397,7 +397,7 @@ protected:
                                     numPeers);
 
             // send all peer ranks which see the given index
-            peersSendBuff_[peerRank].push_back(new MpiBuffer<int>(2*numPeers));
+            peersSendBuff_[peerRank].push_back(Dune::make_shared<MpiBuffer<int> >(2*numPeers));
             typename std::map<ProcessRank, BorderDistance>::const_iterator it = foreignIndexOverlap.begin();
             typename std::map<ProcessRank, BorderDistance>::const_iterator endIt = foreignIndexOverlap.end();
             for (int j = 0; it != endIt; ++it, ++j)
@@ -419,10 +419,10 @@ protected:
     void waitSendIndices_(int peerRank)
     {
         numIndicesSendBuff_[peerRank]->wait();
-        delete numIndicesSendBuff_[peerRank];
+        numIndicesSendBuff_[peerRank].template reset<MpiBuffer<int> >(0);
 
         indicesSendBuff_[peerRank]->wait();
-        delete indicesSendBuff_[peerRank];
+        indicesSendBuff_[peerRank].template reset<MpiBuffer<IndexDistanceNpeers> >(0);
 
         const ForeignOverlapWithPeer &foreignPeerOverlap
             = foreignOverlap_.foreignOverlapWithPeer(peerRank);
@@ -430,7 +430,7 @@ protected:
         ForeignOverlapWithPeer::const_iterator overlapEndIt = foreignPeerOverlap.end();
         for (int i = 0; overlapIt != overlapEndIt; ++overlapIt, ++i) {
             peersSendBuff_[peerRank][i]->wait();
-            delete peersSendBuff_[peerRank][i];
+            peersSendBuff_[peerRank][i].template reset<MpiBuffer<int> >(0);
         }
     }
 
@@ -498,9 +498,9 @@ protected:
     DomesticOverlapByIndex domesticOverlapByIndex_;
     std::vector<int> borderDistance_;
 
-    std::map<ProcessRank, MpiBuffer<int>* > numIndicesSendBuff_;
-    std::map<ProcessRank, MpiBuffer<IndexDistanceNpeers>* > indicesSendBuff_;
-    std::map<ProcessRank, std::vector<MpiBuffer<int>*> > peersSendBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > numIndicesSendBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<IndexDistanceNpeers> > > indicesSendBuff_;
+    std::map<ProcessRank, std::vector<Dune::shared_ptr<MpiBuffer<int> > > > peersSendBuff_;
     GlobalIndices globalIndices_;
     PeerSet peerSet_;
 };
diff --git a/dumux/linear/impetbicgstabilu0solver.hh b/dumux/linear/impetbicgstabilu0solver.hh
index 79672445a7..f4498cabef 100644
--- a/dumux/linear/impetbicgstabilu0solver.hh
+++ b/dumux/linear/impetbicgstabilu0solver.hh
@@ -82,14 +82,10 @@ class IMPETBiCGStabILU0Solver
 
 public:
     IMPETBiCGStabILU0Solver(const Problem &problem, int overlapSize=1)
-        : problem_(problem)
-        , overlapSize_(overlapSize)
-    {
-        overlapMatrix_ = 0;
-        overlapb_ = 0;
-        overlapx_ = 0;
-    }
-
+    : problem_(problem)
+    , overlapSize_(overlapSize)
+    {}
+        
     ~IMPETBiCGStabILU0Solver()
     { cleanup_(); }
 
@@ -175,35 +171,30 @@ private:
             borderListCreator(problem_.gridView(), problem_.elementMapper());
 
         // create the overlapping Jacobian matrix
-        overlapMatrix_ = new OverlappingMatrix (M,
+        overlapMatrix_ = Dune::make_shared<OverlappingMatrix> (M,
                                                 borderListCreator.foreignBorderList(),
                                                 borderListCreator.domesticBorderList(),
                                                 overlapSize_);
 
         // create the overlapping vectors for the residual and the
         // solution
-        overlapb_ = new OverlappingVector(overlapMatrix_->overlap());
-        overlapx_ = new OverlappingVector(*overlapb_);
+        overlapb_ = Dune::make_shared<OverlappingVector>(overlapMatrix_->overlap());
+        overlapx_ = Dune::make_shared<OverlappingVector>(*overlapb_);
     }
 
     void cleanup_()
     {
-        // create the overlapping Jacobian matrix and vectors
-        delete overlapMatrix_;
-        delete overlapb_;
-        delete overlapx_;
-
-        overlapMatrix_ = 0;
-        overlapb_ = 0;
-        overlapx_ = 0;
+        overlapMatrix_.template reset<OverlappingMatrix>(0);
+        overlapb_.template reset<OverlappingVector>(0);
+        overlapx_.template reset<OverlappingVector>(0);
     }
-
+    
     const Problem &problem_;
 
     int overlapSize_;
-    OverlappingMatrix *overlapMatrix_;
-    OverlappingVector *overlapb_;
-    OverlappingVector *overlapx_;
+    Dune::shared_ptr<OverlappingMatrix> overlapMatrix_;
+    Dune::shared_ptr<OverlappingVector> overlapb_;
+    Dune::shared_ptr<OverlappingVector> overlapx_;
 };
 
 } // namespace Dumux
diff --git a/dumux/linear/overlappingbcrsmatrix.hh b/dumux/linear/overlappingbcrsmatrix.hh
index 5569ceb681..6af7d6722f 100644
--- a/dumux/linear/overlappingbcrsmatrix.hh
+++ b/dumux/linear/overlappingbcrsmatrix.hh
@@ -75,7 +75,7 @@ public:
                           const BorderList &domesticBorderList,
                           int overlapSize)
     {
-        overlap_ = Dune::shared_ptr<Overlap>(new Overlap(M, foreignBorderList, domesticBorderList, overlapSize));
+        overlap_ = Dune::make_shared<Overlap>(M, foreignBorderList, domesticBorderList, overlapSize);
         myRank_ = 0;
 #if HAVE_MPI
         MPI_Comm_rank(MPI_COMM_WORLD, &myRank_);
@@ -86,31 +86,6 @@ public:
         build_(M);
     }
 
-    ~OverlappingBCRSMatrix()
-    {
-        if (overlap_.use_count() == 0)
-            return;
-
-        // delete all MPI buffers
-        const PeerSet &peerSet = overlap_->foreignOverlap().peerSet();
-        typename PeerSet::const_iterator peerIt = peerSet.begin();
-        typename PeerSet::const_iterator peerEndIt = peerSet.end();
-        for (; peerIt != peerEndIt; ++peerIt) {
-            int peerRank = *peerIt;
-
-            delete rowSizesRecvBuff_[peerRank];
-            delete rowIndicesRecvBuff_[peerRank];
-            delete entryIndicesRecvBuff_[peerRank];
-            delete entryValuesRecvBuff_[peerRank];
-
-            delete numRowsSendBuff_[peerRank];
-            delete rowSizesSendBuff_[peerRank];
-            delete rowIndicesSendBuff_[peerRank];
-            delete entryIndicesSendBuff_[peerRank];
-            delete entryValuesSendBuff_[peerRank];
-        }
-    }
-
     /*!
      * \brief Returns the domestic overlap for the process.
      */
@@ -326,12 +301,12 @@ private:
 
         // send size of foreign overlap to peer
         int numOverlapRows = peerOverlap.size();
-        numRowsSendBuff_[peerRank] = new MpiBuffer<int>(1);
+        numRowsSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1);
         (*numRowsSendBuff_[peerRank])[0] = numOverlapRows;
         numRowsSendBuff_[peerRank]->send(peerRank);
 
-        rowSizesSendBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
-        rowIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
+        rowSizesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows);
+        rowIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows);
 
         // create the row size MPI buffer
         int numEntries = 0;
@@ -363,7 +338,7 @@ private:
 
         // create and fill the MPI buffer for the indices of the
         // matrix entries
-        entryIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numEntries);
+        entryIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numEntries);
         i = 0;
         it = peerOverlap.begin();
         for (; it != endIt; ++it) {
@@ -384,7 +359,7 @@ private:
 
         // create the send buffers for the values of the matrix
         // entries
-        entryValuesSendBuff_[peerRank] = new MpiBuffer<block_type>(numEntries);
+        entryValuesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<block_type> >(numEntries);
 #endif // HAVE_MPI
     }
 
@@ -400,8 +375,8 @@ private:
 
         // create receive buffer for the row sizes and receive them
         // from the peer
-        rowIndicesRecvBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
-        rowSizesRecvBuff_[peerRank] = new MpiBuffer<int>(numOverlapRows);
+        rowIndicesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows);
+        rowSizesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(numOverlapRows);
         rowIndicesRecvBuff_[peerRank]->receive(peerRank);
         rowSizesRecvBuff_[peerRank]->receive(peerRank);
 
@@ -413,8 +388,8 @@ private:
         }
 
         // create the buffer to store the column indices of the matrix entries
-        entryIndicesRecvBuff_[peerRank] = new MpiBuffer<Index>(totalIndices);
-        entryValuesRecvBuff_[peerRank] = new MpiBuffer<block_type>(totalIndices);
+        entryIndicesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(totalIndices);
+        entryValuesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<block_type> >(totalIndices);
 
         // communicate with the peer
         entryIndicesRecvBuff_[peerRank]->receive(peerRank);
@@ -595,16 +570,16 @@ private:
     Entries entries_;
     Dune::shared_ptr<Overlap> overlap_;
 
-    std::map<ProcessRank, MpiBuffer<int>* > rowSizesRecvBuff_;
-    std::map<ProcessRank, MpiBuffer<int>* > rowIndicesRecvBuff_;
-    std::map<ProcessRank, MpiBuffer<int>* > entryIndicesRecvBuff_;
-    std::map<ProcessRank, MpiBuffer<block_type>* > entryValuesRecvBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowSizesRecvBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowIndicesRecvBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > entryIndicesRecvBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<block_type> > > entryValuesRecvBuff_;
 
-    std::map<ProcessRank, MpiBuffer<int>* > numRowsSendBuff_;
-    std::map<ProcessRank, MpiBuffer<int>* > rowSizesSendBuff_;
-    std::map<ProcessRank, MpiBuffer<int>* > rowIndicesSendBuff_;
-    std::map<ProcessRank, MpiBuffer<int>* > entryIndicesSendBuff_;
-    std::map<ProcessRank, MpiBuffer<block_type>* > entryValuesSendBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > numRowsSendBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowSizesSendBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowIndicesSendBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > entryIndicesSendBuff_;
+    std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<block_type> > > entryValuesSendBuff_;
 };
 
 } // namespace Dumux
diff --git a/dumux/linear/overlappingblockvector.hh b/dumux/linear/overlappingblockvector.hh
index 2fc83ea415..ab77bfb786 100644
--- a/dumux/linear/overlappingblockvector.hh
+++ b/dumux/linear/overlappingblockvector.hh
@@ -285,7 +285,7 @@ private:
 #if HAVE_MPI
         // create array for the front indices
         int numDomestic = overlap_->numDomestic();
-        frontMaster_ = Dune::shared_ptr<std::vector<ProcessRank> >(new std::vector<ProcessRank>(numDomestic, -1));
+        frontMaster_ = Dune::make_shared<std::vector<ProcessRank> >(numDomestic, -1);
 
         typename PeerSet::const_iterator peerIt;
         typename PeerSet::const_iterator peerEndIt = overlap_->peerSet().end();
@@ -297,9 +297,9 @@ private:
 
             const DomesticOverlapWithPeer &domesticOverlap = overlap_->domesticOverlapWithPeer(peerRank);
             int numEntries = domesticOverlap.size();
-            numIndicesSendBuff_[peerRank] = Dune::shared_ptr<MpiBuffer<int> >(new MpiBuffer<int>(1));
-            indicesSendBuff_[peerRank] = Dune::shared_ptr<MpiBuffer<RowIndex> >(new MpiBuffer<RowIndex>(numEntries));
-            valuesSendBuff_[peerRank] = Dune::shared_ptr<MpiBuffer<FieldVector> >(new MpiBuffer<FieldVector>(numEntries));
+            numIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1);
+            indicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<RowIndex> >(numEntries);
+            valuesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<FieldVector> >(numEntries);
 
             // fill the indices buffer with global indices
             MpiBuffer<RowIndex> &indicesSendBuff = *indicesSendBuff_[peerRank];
@@ -330,8 +330,8 @@ private:
             numRows = numRowsRecvBuff[0];
 
             // then, create the MPI buffers
-            indicesRecvBuff_[peerRank] = Dune::shared_ptr<MpiBuffer<RowIndex> >(new MpiBuffer<RowIndex>(numRows));
-            valuesRecvBuff_[peerRank] = Dune::shared_ptr<MpiBuffer<FieldVector> >(new MpiBuffer<FieldVector>(numRows));
+            indicesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<RowIndex> >(numRows);
+            valuesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<FieldVector> >(numRows);
             MpiBuffer<RowIndex> &indicesRecvBuff = *indicesRecvBuff_[peerRank];
 
             // next, receive the actual indices
diff --git a/dumux/nonlinear/newtonconvergencewriter.hh b/dumux/nonlinear/newtonconvergencewriter.hh
index c6889f6ff3..543ff90c34 100644
--- a/dumux/nonlinear/newtonconvergencewriter.hh
+++ b/dumux/nonlinear/newtonconvergencewriter.hh
@@ -46,16 +46,12 @@ public:
     typedef Dumux::VtkMultiWriter<GridView>  VtkMultiWriter;
 
     NewtonConvergenceWriter(NewtonController &ctl)
-        : ctl_(ctl)
+    : ctl_(ctl)
     {
         timeStepIndex_ = 0;
         iteration_ = 0;
-        vtkMultiWriter_ = 0;
     }
 
-    ~NewtonConvergenceWriter()
-    { delete vtkMultiWriter_; }
-
     void beginTimestep()
     {
         ++timeStepIndex_;
@@ -66,7 +62,7 @@ public:
     {
         ++ iteration_;
         if (!vtkMultiWriter_)
-            vtkMultiWriter_ = new VtkMultiWriter(gv, "convergence");
+            vtkMultiWriter_ = Dune::make_shared<VtkMultiWriter>(gv, "convergence");
         vtkMultiWriter_->beginWrite(timeStepIndex_ + iteration_ / 100.0);
     }
 
@@ -87,7 +83,7 @@ public:
 private:
     int timeStepIndex_;
     int iteration_;
-    VtkMultiWriter *vtkMultiWriter_;
+    Dune::shared_ptr<VtkMultiWriter> vtkMultiWriter_;
     NewtonController &ctl_;
 };
 
-- 
GitLab