The patch listed below replaces most of the standard pointers used in the common and fully implicit parts of Dumux by Dune::shared_ptr. This dramatically increases the safety in our memory management. Some applications investigated by Philipp segfaulted after throwing an exception, now they properly terminate.
Whereever a standard pointer has been replaced by a Dune::shared_ptr, the following changes are usually made which exhibit several advantages:
- Calls like
Type* pointer = new Type(...);
are replaced by
Dune::shared_ptr<Type> pointer = Dune::make_shared<Type>(...);
- No explicit initializations like pointer(0) are necessary to be on
the safe side for comparisons and can be forgotten, since
the smart pointers are always properly initialized with 0.
- No explicit delete statements are necessary and can be forgotten,
since the smart pointers take care of that themselves. This renders
several destructors obsolete.
Please have a look at the patch which I would like to commit before 2.3. Thank you.
Index: dumux/implicit/box/boxassembler.hh
===================================================================
--- dumux/implicit/box/boxassembler.hh (revision 10135)
+++ dumux/implicit/box/boxassembler.hh (working copy)
@@ -220,7 +220,7 @@
int numVerticesGlobal = this->gridView_().size(dim);
// allocate raw matrix
- this->matrix_ = new JacobianMatrix(numVerticesGlobal, numVerticesGlobal, JacobianMatrix::random);
+ this->matrix_ = Dune::make_shared<JacobianMatrix>(numVerticesGlobal, numVerticesGlobal, JacobianMatrix::random);
// find out the global indices of the neighboring vertices of
// each vertex
Index: dumux/implicit/cellcentered/ccassembler.hh
===================================================================
--- dumux/implicit/cellcentered/ccassembler.hh (revision 10135)
+++ dumux/implicit/cellcentered/ccassembler.hh (working copy)
@@ -134,7 +134,7 @@
int nElems = this->gridView_().size(0);
// allocate raw matrix
- this->matrix_ = new JacobianMatrix(nElems, nElems, JacobianMatrix::random);
+ this->matrix_ = Dune::make_shared<JacobianMatrix>(nElems, nElems, JacobianMatrix::random);
// find out the global indices of the neighboring elements of
// each element
Index: dumux/implicit/common/implicitassembler.hh
===================================================================
--- dumux/implicit/common/implicitassembler.hh (revision 10135)
+++ dumux/implicit/common/implicitassembler.hh (working copy)
@@ -95,21 +95,14 @@
};
ImplicitAssembler()
+ : problemPtr_(0)
{
- problemPtr_ = 0;
- matrix_ = 0;
-
// set reassemble accuracy to 0, so that if partial reassembly
// of the jacobian matrix is disabled, the reassemble accuracy
// is always smaller than the current relative tolerance
reassembleAccuracy_ = 0.0;
}
- ~ImplicitAssembler()
- {
- delete matrix_;
- }
-
/*!
* \\brief Initialize the jacobian assembler.
*
@@ -543,7 +536,7 @@
Problem *problemPtr_;
// the jacobian matrix
- JacobianMatrix *matrix_;
+ Dune::shared_ptr<JacobianMatrix> matrix_;
// the right-hand side
SolutionVector residual_;
Index: dumux/implicit/common/implicitmodel.hh
===================================================================
--- dumux/implicit/common/implicitmodel.hh (revision 10135)
+++ dumux/implicit/common/implicitmodel.hh (working copy)
@@ -88,13 +88,11 @@
* \\brief The constructor.
*/
ImplicitModel()
+ : problemPtr_(0)
{
enableHints_ = GET_PARAM_FROM_GROUP(TypeTag, bool, Implicit, EnableHints);
}
- ~ImplicitModel()
- { delete jacAsm_; }
-
/*!
* \\brief Apply the initial conditions to the model.
*
@@ -114,7 +112,7 @@
boxVolume_.resize(nDofs);
localJacobian_.init(problem_());
- jacAsm_ = new JacobianAssembler();
+ jacAsm_ = Dune::make_shared<JacobianAssembler>();
jacAsm_->init(problem_());
asImp_().applyInitialSolution_();
@@ -637,8 +635,8 @@
*/
void resetJacobianAssembler ()
{
- delete jacAsm_;
- jacAsm_ = new JacobianAssembler;
+ jacAsm_.template reset<JacobianAssembler>(0);
+ jacAsm_ = Dune::make_shared<JacobianAssembler>();
jacAsm_->init(problem_());
}
@@ -999,7 +997,7 @@
LocalJacobian localJacobian_;
// Linearizes the problem at the current time step using the
// local jacobian
- JacobianAssembler *jacAsm_;
+ Dune::shared_ptr<JacobianAssembler> jacAsm_;
// the set of all indices of vertices on the boundary
std::vector<bool> boundaryIndices_;
Index: dumux/implicit/common/implicitporousmediaproblem.hh
===================================================================
--- dumux/implicit/common/implicitporousmediaproblem.hh (revision 10135)
+++ dumux/implicit/common/implicitporousmediaproblem.hh (working copy)
@@ -78,18 +78,12 @@
gravity_(0)
{
newSpatialParams_ = true;
- spatialParams_ = new SpatialParams(gridView);
+ spatialParams_ = Dune::make_shared<SpatialParams>(gridView);
if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity))
gravity_[dim-1] = -9.81;
}
- ~ImplicitPorousMediaProblem()
- {
- if (newSpatialParams_)
- delete spatialParams_;
- }
-
/*!
* \\name Problem parameters
*/
@@ -199,7 +193,7 @@
DimVector gravity_;
// fluids and material properties
- SpatialParams* spatialParams_;
+ Dune::shared_ptr<SpatialParams> spatialParams_;
bool newSpatialParams_;
};
Index: dumux/implicit/common/implicitproblem.hh
===================================================================
--- dumux/implicit/common/implicitproblem.hh (revision 10135)
+++ dumux/implicit/common/implicitproblem.hh (working copy)
@@ -99,7 +99,6 @@
, timeManager_(&timeManager)
, newtonMethod_(asImp_())
, newtonCtl_(asImp_())
- , resultWriter_(0)
{
// calculate the bounding box of the local partition of the grid view
VertexIterator vIt = gridView.template begin<dim>();
@@ -122,13 +121,6 @@
simName_ = \"sim\";
}
- ~ImplicitProblem()
- {
- if (resultWriter_)
- delete resultWriter_;
- }
-
-
/*!
* \\brief Called by the Dumux::TimeManager in order to
* initialize the problem.
@@ -857,7 +849,10 @@
private:
// makes sure that the result writer exists
void createResultWriter_()
- { if (!resultWriter_) resultWriter_ = new VtkMultiWriter(gridView_, asImp_().name()); }
+ {
+ if (!resultWriter_)
+ resultWriter_ = Dune::make_shared<VtkMultiWriter>(gridView_, asImp_().name());
+ }
std::string simName_;
const GridView gridView_;
@@ -875,7 +870,7 @@
NewtonMethod newtonMethod_;
NewtonController newtonCtl_;
- VtkMultiWriter *resultWriter_;
+ Dune::shared_ptr<VtkMultiWriter> resultWriter_;
};
}
Index: dumux/implicit/mpnc/mpncmodel.hh
===================================================================
--- dumux/implicit/mpnc/mpncmodel.hh (revision 10135)
+++ dumux/implicit/mpnc/mpncmodel.hh (working copy)
@@ -126,20 +126,10 @@
enum {numEq = GET_PROP_VALUE(TypeTag, NumEq)};
public:
- MPNCModel()
- : vtkWriter_(0)
- {}
-
- ~MPNCModel()
- {
- if (vtkWriter_)
- delete vtkWriter_;
- }
-
void init(Problem &problem)
{
ParentType::init(problem);
- vtkWriter_ = new MPNCVtkWriter(problem);
+ vtkWriter_ = Dune::make_shared<MPNCVtkWriter>(problem);
if (this->gridView_().comm().rank() == 0)
std::cout
@@ -185,7 +175,7 @@
vtkWriter_->addCurrentSolution(writer);
}
- MPNCVtkWriter *vtkWriter_;
+ Dune::shared_ptr<MPNCVtkWriter> vtkWriter_;
};
}
Index: dumux/io/vtkmultiwriter.hh
===================================================================
--- dumux/io/vtkmultiwriter.hh (revision 10135)
+++ dumux/io/vtkmultiwriter.hh (working copy)
@@ -122,7 +122,7 @@
}
- curWriter_ = new VtkWriter(gridView_, Dune::VTK::conforming);
+ curWriter_ = Dune::make_shared<VtkWriter>(gridView_, Dune::VTK::conforming);
++curWriterNum_;
curTime_ = t;
@@ -140,8 +140,8 @@
{
typedef Dune::BlockVector<Dune::FieldVector<Scalar, nComp> > VectorField;
- ManagedVectorField_<VectorField> *vfs =
- new ManagedVectorField_<VectorField>(nEntities);
+ Dune::shared_ptr<ManagedVectorField_<VectorField> > vfs =
+ Dune::make_shared<ManagedVectorField_<VectorField> >(nEntities);
managedObjects_.push_back(vfs);
return &(vfs->vf);
}
@@ -275,10 +275,8 @@
else
-- curWriterNum_;
- // discard managed objects and the current VTK writer
- delete curWriter_;
+ // discard managed objects
while (managedObjects_.begin() != managedObjects_.end()) {
- delete managedObjects_.front();
managedObjects_.pop_front();
}
@@ -493,12 +491,12 @@
int commSize_; // number of processes in the communicator
int commRank_; // rank of the current process in the communicator
- VtkWriter *curWriter_;
+ Dune::shared_ptr<VtkWriter> curWriter_;
double curTime_;
std::string curOutFileName_;
int curWriterNum_;
- std::list<ManagedObject_*> managedObjects_;
+ std::list<Dune::shared_ptr<ManagedObject_> > managedObjects_;
};
}
Index: dumux/linear/amgbackend.hh
===================================================================
--- dumux/linear/amgbackend.hh (revision 10135)
+++ dumux/linear/amgbackend.hh (working copy)
@@ -178,13 +178,13 @@
AMGBackend(const Problem& problem)
: problem_(problem)
{
- fem_ = new LocalFemMap();
- constraints_ = new Constraints();
- scalarGridFunctionSpace_ = new ScalarGridFunctionSpace(problem.gridView(), *fem_, *constraints_);
- gridFunctionSpace_ = new GridFunctionSpace(*scalarGridFunctionSpace_);
- imp_ = new PDELabBackend(*gridFunctionSpace_,
- GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations),
- GET_PROP_VALUE(TypeTag, LinearSolverVerbosity));
+ fem_ = Dune::make_shared<LocalFemMap>();
+ constraints_ = Dune::make_shared<Constraints>();
+ scalarGridFunctionSpace_ = Dune::make_shared<ScalarGridFunctionSpace>(problem.gridView(), *fem_, *constraints_);
+ gridFunctionSpace_ = Dune::make_shared<GridFunctionSpace>(*scalarGridFunctionSpace_);
+ int maxIt = GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations);
+ int verbosity = GET_PROP_VALUE(TypeTag, LinearSolverVerbosity);
+ imp_ = Dune::make_shared<PDELabBackend>(*gridFunctionSpace_, maxIt, verbosity);
}
/*!
@@ -216,23 +216,14 @@
{
return result_;
}
-
- ~AMGBackend()
- {
- delete imp_;
- delete gridFunctionSpace_;
- delete scalarGridFunctionSpace_;
- delete constraints_;
- delete fem_;
- }
private:
const Problem& problem_;
- LocalFemMap *fem_;
- Constraints *constraints_;
- ScalarGridFunctionSpace *scalarGridFunctionSpace_;
- GridFunctionSpace *gridFunctionSpace_;
- PDELabBackend *imp_;
+ Dune::shared_ptr<LocalFemMap> fem_;
+ Dune::shared_ptr<Constraints> constraints_;
+ Dune::shared_ptr<ScalarGridFunctionSpace> scalarGridFunctionSpace_;
+ Dune::shared_ptr<GridFunctionSpace> gridFunctionSpace_;
+ Dune::shared_ptr<PDELabBackend> imp_;
Dune::InverseOperatorResult result_;
};
@@ -266,9 +257,9 @@
template<class Matrix, class Vector>
bool solve(Matrix& A, Vector& x, Vector& b)
{
- imp_ = new PDELabBackend(
- GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations),
- GET_PROP_VALUE(TypeTag, LinearSolverVerbosity));
+ int maxIt = GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations);
+ int verbosity = GET_PROP_VALUE(TypeTag, LinearSolverVerbosity);
+ imp_ = Dune::make_shared<PDELabBackend>(maxIt, verbosity);
static const double residReduction = GET_PROP_VALUE(TypeTag, LinearSolverResidualReduction);
imp_->apply(A, x, b, residReduction);
@@ -279,8 +270,8 @@
result_.reduction = imp_->result().reduction;
result_.conv_rate = imp_->result().conv_rate;
- delete imp_;
-
+ imp_.template reset<PDELabBackend>(0);
+
return result_.converged;
}
@@ -294,7 +285,7 @@
private:
const Problem& problem_;
- PDELabBackend *imp_;
+ Dune::shared_ptr<PDELabBackend> imp_;
Dune::InverseOperatorResult result_;
};
@@ -333,10 +324,10 @@
{
scaleLinearSystem(A, b);
- imp_ = new PDELabBackend(
- GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations),
- GET_PROP_VALUE(TypeTag, LinearSolverVerbosity));
-
+ int maxIt = GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations);
+ int verbosity = GET_PROP_VALUE(TypeTag, LinearSolverVerbosity);
+ imp_ = Dune::make_shared<PDELabBackend>(maxIt, verbosity);
+
static const double residReduction = GET_PROP_VALUE(TypeTag, LinearSolverResidualReduction);
imp_->apply(A, x, b, residReduction);
@@ -346,8 +337,8 @@
result_.reduction = imp_->result().reduction;
result_.conv_rate = imp_->result().conv_rate;
- delete imp_;
-
+ imp_.template reset<PDELabBackend>(0);
+
return result_.converged;
}
@@ -361,7 +352,7 @@
private:
const Problem& problem_;
- PDELabBackend *imp_;
+ Dune::shared_ptr<PDELabBackend> imp_;
Dune::InverseOperatorResult result_;
};
Index: dumux/linear/boxlinearsolver.hh
===================================================================
--- dumux/linear/boxlinearsolver.hh (revision 10135)
+++ dumux/linear/boxlinearsolver.hh (working copy)
@@ -68,11 +68,7 @@
BoxLinearSolver(const Problem &problem, int overlapSize)
: problem_(problem)
, overlapSize_(overlapSize)
- {
- overlapMatrix_ = 0;
- overlapb_ = 0;
- overlapx_ = 0;
- }
+ {}
~BoxLinearSolver()
{ cleanup_(); }
@@ -156,35 +152,30 @@
borderListCreator(problem_.gridView(), problem_.vertexMapper());
// create the overlapping Jacobian matrix
- overlapMatrix_ = new OverlappingMatrix (M,
+ overlapMatrix_ = Dune::make_shared<OverlappingMatrix> (M,
borderListCreator.foreignBorderList(),
borderListCreator.domesticBorderList(),
overlapSize_);
// create the overlapping vectors for the residual and the
// solution
- overlapb_ = new OverlappingVector(overlapMatrix_->overlap());
- overlapx_ = new OverlappingVector(*overlapb_);
+ overlapb_ = Dune::make_shared<OverlappingVector>(overlapMatrix_->overlap());
+ overlapx_ = Dune::make_shared<OverlappingVector>(*overlapb_);
}
void cleanup_()
{
- // create the overlapping Jacobian matrix and vectors
- delete overlapMatrix_;
- delete overlapb_;
- delete overlapx_;
-
- overlapMatrix_ = 0;
- overlapb_ = 0;
- overlapx_ = 0;
+ overlapMatrix_.template reset<OverlappingMatrix>(0);
+ overlapb_.template reset<OverlappingVector>(0);
+ overlapx_.template reset<OverlappingVector>(0);
}
const Problem &problem_;
int overlapSize_;
- OverlappingMatrix *overlapMatrix_;
- OverlappingVector *overlapb_;
- OverlappingVector *overlapx_;
+ Dune::shared_ptr<OverlappingMatrix> overlapMatrix_;
+ Dune::shared_ptr<OverlappingVector> overlapb_;
+ Dune::shared_ptr<OverlappingVector> overlapx_;
};
template <class TypeTag, class Imp>
Index: dumux/linear/domesticoverlapfrombcrsmatrix.hh
===================================================================
--- dumux/linear/domesticoverlapfrombcrsmatrix.hh (revision 10135)
+++ dumux/linear/domesticoverlapfrombcrsmatrix.hh (working copy)
@@ -372,12 +372,12 @@
// indices stemming from the overlap (i.e. without the border
// indices)
int numIndices = foreignOverlap.size();
- numIndicesSendBuff_[peerRank] = new MpiBuffer<int>(1);
+ numIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1);
(*numIndicesSendBuff_[peerRank])[0] = numIndices;
numIndicesSendBuff_[peerRank]->send(peerRank);
// create MPI buffers
- indicesSendBuff_[peerRank] = new MpiBuffer<IndexDistanceNpeers>(numIndices);
+ indicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<IndexDistanceNpeers> >(numIndices);
// then send the additional indices themselfs
ForeignOverlapWithPeer::const_iterator overlapIt = foreignOverlap.begin();
@@ -397,7 +397,7 @@
numPeers);
// send all peer ranks which see the given index
- peersSendBuff_[peerRank].push_back(new MpiBuffer<int>(2*numPeers));
+ peersSendBuff_[peerRank].push_back(Dune::make_shared<MpiBuffer<int> >(2*numPeers));
typename std::map<ProcessRank, BorderDistance>::const_iterator it = foreignIndexOverlap.begin();
typename std::map<ProcessRank, BorderDistance>::const_iterator endIt = foreignIndexOverlap.end();
for (int j = 0; it != endIt; ++it, ++j)
@@ -419,10 +419,10 @@
void waitSendIndices_(int peerRank)
{
numIndicesSendBuff_[peerRank]->wait();
- delete numIndicesSendBuff_[peerRank];
+ numIndicesSendBuff_[peerRank].template reset<MpiBuffer<int> >(0);
indicesSendBuff_[peerRank]->wait();
- delete indicesSendBuff_[peerRank];
+ indicesSendBuff_[peerRank].template reset<MpiBuffer<IndexDistanceNpeers> >(0);
const ForeignOverlapWithPeer &foreignPeerOverlap
= foreignOverlap_.foreignOverlapWithPeer(peerRank);
@@ -430,7 +430,7 @@
ForeignOverlapWithPeer::const_iterator overlapEndIt = foreignPeerOverlap.end();
for (int i = 0; overlapIt != overlapEndIt; ++overlapIt, ++i) {
peersSendBuff_[peerRank][i]->wait();
- delete peersSendBuff_[peerRank][i];
+ peersSendBuff_[peerRank][i].template reset<MpiBuffer<int> >(0);
}
}
@@ -498,9 +498,9 @@
DomesticOverlapByIndex domesticOverlapByIndex_;
std::vector<int> borderDistance_;
- std::map<ProcessRank, MpiBuffer<int>* > numIndicesSendBuff_;
- std::map<ProcessRank, MpiBuffer<IndexDistanceNpeers>* > indicesSendBuff_;
- std::map<ProcessRank, std::vector<MpiBuffer<int>*> > peersSendBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > numIndicesSendBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<IndexDistanceNpeers> > > indicesSendBuff_;
+ std::map<ProcessRank, std::vector<Dune::shared_ptr<MpiBuffer<int> > > > peersSendBuff_;
GlobalIndices globalIndices_;
PeerSet peerSet_;
};
Index: dumux/linear/impetbicgstabilu0solver.hh
===================================================================
--- dumux/linear/impetbicgstabilu0solver.hh (revision 10135)
+++ dumux/linear/impetbicgstabilu0solver.hh (working copy)
@@ -82,14 +82,10 @@
public:
IMPETBiCGStabILU0Solver(const Problem &problem, int overlapSize=1)
- : problem_(problem)
- , overlapSize_(overlapSize)
- {
- overlapMatrix_ = 0;
- overlapb_ = 0;
- overlapx_ = 0;
- }
-
+ : problem_(problem)
+ , overlapSize_(overlapSize)
+ {}
+
~IMPETBiCGStabILU0Solver()
{ cleanup_(); }
@@ -175,35 +171,30 @@
borderListCreator(problem_.gridView(), problem_.elementMapper());
// create the overlapping Jacobian matrix
- overlapMatrix_ = new OverlappingMatrix (M,
+ overlapMatrix_ = Dune::make_shared<OverlappingMatrix> (M,
borderListCreator.foreignBorderList(),
borderListCreator.domesticBorderList(),
overlapSize_);
// create the overlapping vectors for the residual and the
// solution
- overlapb_ = new OverlappingVector(overlapMatrix_->overlap());
- overlapx_ = new OverlappingVector(*overlapb_);
+ overlapb_ = Dune::make_shared<OverlappingVector>(overlapMatrix_->overlap());
+ overlapx_ = Dune::make_shared<OverlappingVector>(*overlapb_);
}
void cleanup_()
{
- // create the overlapping Jacobian matrix and vectors
- delete overlapMatrix_;
- delete overlapb_;
- delete overlapx_;
-
- overlapMatrix_ = 0;
- overlapb_ = 0;
- overlapx_ = 0;
+ overlapMatrix_.template reset<OverlappingMatrix>(0);
+ overlapb_.template reset<OverlappingVector>(0);
+ overlapx_.template reset<OverlappingVector>(0);
}
-
+
const Problem &problem_;
int overlapSize_;
- OverlappingMatrix *overlapMatrix_;
- OverlappingVector *overlapb_;
- OverlappingVector *overlapx_;
+ Dune::shared_ptr<OverlappingMatrix> overlapMatrix_;
+ Dune::shared_ptr<OverlappingVector> overlapb_;
+ Dune::shared_ptr<OverlappingVector> overlapx_;
};
} // namespace Dumux
Index: dumux/linear/overlappingbcrsmatrix.hh
===================================================================
--- dumux/linear/overlappingbcrsmatrix.hh (revision 10135)
+++ dumux/linear/overlappingbcrsmatrix.hh (working copy)
@@ -75,7 +75,7 @@
const BorderList &domesticBorderList,
int overlapSize)
{
- overlap_ = Dune::shared_ptr<Overlap>(new Overlap(M, foreignBorderList, domesticBorderList, overlapSize));
+ overlap_ = Dune::make_shared<Overlap>(M, foreignBorderList, domesticBorderList, overlapSize);
myRank_ = 0;
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myRank_);
@@ -86,31 +86,6 @@
build_(M);
}
- ~OverlappingBCRSMatrix()
- {
- if (overlap_.use_count() == 0)
- return;
-
- // delete all MPI buffers
- const PeerSet &peerSet = overlap_->foreignOverlap().peerSet();
- typename PeerSet::const_iterator peerIt = peerSet.begin();
- typename PeerSet::const_iterator peerEndIt = peerSet.end();
- for (; peerIt != peerEndIt; ++peerIt) {
- int peerRank = *peerIt;
-
- delete rowSizesRecvBuff_[peerRank];
- delete rowIndicesRecvBuff_[peerRank];
- delete entryIndicesRecvBuff_[peerRank];
- delete entryValuesRecvBuff_[peerRank];
-
- delete numRowsSendBuff_[peerRank];
- delete rowSizesSendBuff_[peerRank];
- delete rowIndicesSendBuff_[peerRank];
- delete entryIndicesSendBuff_[peerRank];
- delete entryValuesSendBuff_[peerRank];
- }
- }
-
/*!
* \\brief Returns the domestic overlap for the process.
*/
@@ -326,12 +301,12 @@
// send size of foreign overlap to peer
int numOverlapRows = peerOverlap.size();
- numRowsSendBuff_[peerRank] = new MpiBuffer<int>(1);
+ numRowsSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1);
(*numRowsSendBuff_[peerRank])[0] = numOverlapRows;
numRowsSendBuff_[peerRank]->send(peerRank);
- rowSizesSendBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
- rowIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
+ rowSizesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows);
+ rowIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows);
// create the row size MPI buffer
int numEntries = 0;
@@ -363,7 +338,7 @@
// create and fill the MPI buffer for the indices of the
// matrix entries
- entryIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numEntries);
+ entryIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numEntries);
i = 0;
it = peerOverlap.begin();
for (; it != endIt; ++it) {
@@ -384,7 +359,7 @@
// create the send buffers for the values of the matrix
// entries
- entryValuesSendBuff_[peerRank] = new MpiBuffer<block_type>(numEntries);
+ entryValuesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<block_type> >(numEntries);
#endif // HAVE_MPI
}
@@ -400,8 +375,8 @@
// create receive buffer for the row sizes and receive them
// from the peer
- rowIndicesRecvBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
- rowSizesRecvBuff_[peerRank] = new MpiBuffer<int>(numOverlapRows);
+ rowIndicesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows);
+ rowSizesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(numOverlapRows);
rowIndicesRecvBuff_[peerRank]->receive(peerRank);
rowSizesRecvBuff_[peerRank]->receive(peerRank);
@@ -413,8 +388,8 @@
}
// create the buffer to store the column indices of the matrix entries
- entryIndicesRecvBuff_[peerRank] = new MpiBuffer<Index>(totalIndices);
- entryValuesRecvBuff_[peerRank] = new MpiBuffer<block_type>(totalIndices);
+ entryIndicesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(totalIndices);
+ entryValuesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<block_type> >(totalIndices);
// communicate with the peer
entryIndicesRecvBuff_[peerRank]->receive(peerRank);
@@ -595,16 +570,16 @@
Entries entries_;
Dune::shared_ptr<Overlap> overlap_;
- std::map<ProcessRank, MpiBuffer<int>* > rowSizesRecvBuff_;
- std::map<ProcessRank, MpiBuffer<int>* > rowIndicesRecvBuff_;
- std::map<ProcessRank, MpiBuffer<int>* > entryIndicesRecvBuff_;
- std::map<ProcessRank, MpiBuffer<block_type>* > entryValuesRecvBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowSizesRecvBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowIndicesRecvBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > entryIndicesRecvBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<block_type> > > entryValuesRecvBuff_;
- std::map<ProcessRank, MpiBuffer<int>* > numRowsSendBuff_;
- std::map<ProcessRank, MpiBuffer<int>* > rowSizesSendBuff_;
- std::map<ProcessRank, MpiBuffer<int>* > rowIndicesSendBuff_;
- std::map<ProcessRank, MpiBuffer<int>* > entryIndicesSendBuff_;
- std::map<ProcessRank, MpiBuffer<block_type>* > entryValuesSendBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > numRowsSendBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowSizesSendBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > rowIndicesSendBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > entryIndicesSendBuff_;
+ std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<block_type> > > entryValuesSendBuff_;
};
} // namespace Dumux
Index: dumux/linear/overlappingblockvector.hh
===================================================================
--- dumux/linear/overlappingblockvector.hh (revision 10135)
+++ dumux/linear/overlappingblockvector.hh (working copy)
@@ -285,7 +285,7 @@
#if HAVE_MPI
// create array for the front indices
int numDomestic = overlap_->numDomestic();
- frontMaster_ = Dune::shared_ptr<std::vector<ProcessRank> >(new std::vector<ProcessRank>(numDomestic, -1));
+ frontMaster_ = Dune::make_shared<std::vector<ProcessRank> >(numDomestic, -1);
typename PeerSet::const_iterator peerIt;
typename PeerSet::const_iterator peerEndIt = overlap_->peerSet().end();
@@ -297,9 +297,9 @@
const DomesticOverlapWithPeer &domesticOverlap = overlap_->domesticOverlapWithPeer(peerRank);
int numEntries = domesticOverlap.size();
- numIndicesSendBuff_[peerRank] = Dune::shared_ptr<MpiBuffer<int> >(new MpiBuffer<int>(1));
- indicesSendBuff_[peerRank] = Dune::shared_ptr<MpiBuffer<RowIndex> >(new MpiBuffer<RowIndex>(numEntries));
- valuesSendBuff_[peerRank] = Dune::shared_ptr<MpiBuffer<FieldVector> >(new MpiBuffer<FieldVector>(numEntries));
+ numIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1);
+ indicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<RowIndex> >(numEntries);
+ valuesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<FieldVector> >(numEntries);
// fill the indices buffer with global indices
MpiBuffer<RowIndex> &indicesSendBuff = *indicesSendBuff_[peerRank];
@@ -330,8 +330,8 @@
numRows = numRowsRecvBuff[0];
// then, create the MPI buffers
- indicesRecvBuff_[peerRank] = Dune::shared_ptr<MpiBuffer<RowIndex> >(new MpiBuffer<RowIndex>(numRows));
- valuesRecvBuff_[peerRank] = Dune::shared_ptr<MpiBuffer<FieldVector> >(new MpiBuffer<FieldVector>(numRows));
+ indicesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<RowIndex> >(numRows);
+ valuesRecvBuff_[peerRank] = Dune::make_shared<MpiBuffer<FieldVector> >(numRows);
MpiBuffer<RowIndex> &indicesRecvBuff = *indicesRecvBuff_[peerRank];
// next, receive the actual indices
Index: dumux/nonlinear/newtonconvergencewriter.hh
===================================================================
--- dumux/nonlinear/newtonconvergencewriter.hh (revision 10135)
+++ dumux/nonlinear/newtonconvergencewriter.hh (working copy)
@@ -46,16 +46,12 @@
typedef Dumux::VtkMultiWriter<GridView> VtkMultiWriter;
NewtonConvergenceWriter(NewtonController &ctl)
- : ctl_(ctl)
+ : ctl_(ctl)
{
timeStepIndex_ = 0;
iteration_ = 0;
- vtkMultiWriter_ = 0;
}
- ~NewtonConvergenceWriter()
- { delete vtkMultiWriter_; }
-
void beginTimestep()
{
++timeStepIndex_;
@@ -66,7 +62,7 @@
{
++ iteration_;
if (!vtkMultiWriter_)
- vtkMultiWriter_ = new VtkMultiWriter(gv, \"convergence\");
+ vtkMultiWriter_ = Dune::make_shared<VtkMultiWriter>(gv, \"convergence\");
vtkMultiWriter_->beginWrite(timeStepIndex_ + iteration_ / 100.0);
}
@@ -87,7 +83,7 @@
private:
int timeStepIndex_;
int iteration_;
- VtkMultiWriter *vtkMultiWriter_;
+ Dune::shared_ptr<VtkMultiWriter> vtkMultiWriter_;
NewtonController &ctl_;
};