Commit ca0f5c40 authored by Bernd Flemisch's avatar Bernd Flemisch
Browse files

Replace standard by smart pointers for common and implicit. This

partially implements FS#182. 
Reviewed by Christoph.


git-svn-id: svn://svn.iws.uni-stuttgart.de/DUMUX/dumux/trunk@10176 2fb0f335-1f38-0410-981e-8018bf24f1b0
parent ebdafbe0
......@@ -220,7 +220,7 @@ private:
int numVerticesGlobal = this->gridView_().size(dim);
// allocate raw matrix
this->matrix_ = new JacobianMatrix(numVerticesGlobal, numVerticesGlobal, JacobianMatrix::random);
this->matrix_ = Dune::make_shared<JacobianMatrix>(numVerticesGlobal, numVerticesGlobal, JacobianMatrix::random);
// find out the global indices of the neighboring vertices of
// each vertex
......
......@@ -134,7 +134,7 @@ private:
int nElems = this->gridView_().size(0);
// allocate raw matrix
this->matrix_ = new JacobianMatrix(nElems, nElems, JacobianMatrix::random);
this->matrix_ = Dune::make_shared<JacobianMatrix>(nElems, nElems, JacobianMatrix::random);
// find out the global indices of the neighboring elements of
// each element
......
......@@ -95,21 +95,14 @@ public:
};
ImplicitAssembler()
: problemPtr_(0)
{
problemPtr_ = 0;
matrix_ = 0;
// set reassemble accuracy to 0, so that if partial reassembly
// of the jacobian matrix is disabled, the reassemble accuracy
// is always smaller than the current relative tolerance
reassembleAccuracy_ = 0.0;
}
~ImplicitAssembler()
{
delete matrix_;
}
/*!
* \brief Initialize the jacobian assembler.
*
......@@ -543,7 +536,7 @@ protected:
Problem *problemPtr_;
// the jacobian matrix
JacobianMatrix *matrix_;
Dune::shared_ptr<JacobianMatrix> matrix_;
// the right-hand side
SolutionVector residual_;
......
......@@ -88,13 +88,11 @@ public:
* \brief The constructor.
*/
ImplicitModel()
: problemPtr_(0)
{
enableHints_ = GET_PARAM_FROM_GROUP(TypeTag, bool, Implicit, EnableHints);
}
~ImplicitModel()
{ delete jacAsm_; }
/*!
* \brief Apply the initial conditions to the model.
*
......@@ -114,7 +112,7 @@ public:
boxVolume_.resize(nDofs);
localJacobian_.init(problem_());
jacAsm_ = new JacobianAssembler();
jacAsm_ = Dune::make_shared<JacobianAssembler>();
jacAsm_->init(problem_());
asImp_().applyInitialSolution_();
......@@ -637,8 +635,8 @@ public:
*/
void resetJacobianAssembler ()
{
delete jacAsm_;
jacAsm_ = new JacobianAssembler;
jacAsm_.template reset<JacobianAssembler>(0);
jacAsm_ = Dune::make_shared<JacobianAssembler>();
jacAsm_->init(problem_());
}
......@@ -999,7 +997,7 @@ protected:
LocalJacobian localJacobian_;
// Linearizes the problem at the current time step using the
// local jacobian
JacobianAssembler *jacAsm_;
Dune::shared_ptr<JacobianAssembler> jacAsm_;
// the set of all indices of vertices on the boundary
std::vector<bool> boundaryIndices_;
......
......@@ -78,18 +78,12 @@ public:
gravity_(0)
{
newSpatialParams_ = true;
spatialParams_ = new SpatialParams(gridView);
spatialParams_ = Dune::make_shared<SpatialParams>(gridView);
if (GET_PARAM_FROM_GROUP(TypeTag, bool, Problem, EnableGravity))
gravity_[dim-1] = -9.81;
}
~ImplicitPorousMediaProblem()
{
if (newSpatialParams_)
delete spatialParams_;
}
/*!
* \name Problem parameters
*/
......@@ -199,7 +193,7 @@ protected:
DimVector gravity_;
// fluids and material properties
SpatialParams* spatialParams_;
Dune::shared_ptr<SpatialParams> spatialParams_;
bool newSpatialParams_;
};
......
......@@ -99,7 +99,6 @@ public:
, timeManager_(&timeManager)
, newtonMethod_(asImp_())
, newtonCtl_(asImp_())
, resultWriter_(0)
{
// calculate the bounding box of the local partition of the grid view
VertexIterator vIt = gridView.template begin<dim>();
......@@ -122,13 +121,6 @@ public:
simName_ = "sim";
}
~ImplicitProblem()
{
if (resultWriter_)
delete resultWriter_;
}
/*!
* \brief Called by the Dumux::TimeManager in order to
* initialize the problem.
......@@ -857,7 +849,10 @@ protected:
private:
// makes sure that the result writer exists
void createResultWriter_()
{ if (!resultWriter_) resultWriter_ = new VtkMultiWriter(gridView_, asImp_().name()); }
{
if (!resultWriter_)
resultWriter_ = Dune::make_shared<VtkMultiWriter>(gridView_, asImp_().name());
}
std::string simName_;
const GridView gridView_;
......@@ -875,7 +870,7 @@ private:
NewtonMethod newtonMethod_;
NewtonController newtonCtl_;
VtkMultiWriter *resultWriter_;
Dune::shared_ptr<VtkMultiWriter> resultWriter_;
};
}
......
......@@ -126,20 +126,10 @@ class MPNCModel : public GET_PROP_TYPE(TypeTag, BaseModel)
enum {numEq = GET_PROP_VALUE(TypeTag, NumEq)};
public:
MPNCModel()
: vtkWriter_(0)
{}
~MPNCModel()
{
if (vtkWriter_)
delete vtkWriter_;
}
void init(Problem &problem)
{
ParentType::init(problem);
vtkWriter_ = new MPNCVtkWriter(problem);
vtkWriter_ = Dune::make_shared<MPNCVtkWriter>(problem);
if (this->gridView_().comm().rank() == 0)
std::cout
......@@ -185,7 +175,7 @@ public:
vtkWriter_->addCurrentSolution(writer);
}
MPNCVtkWriter *vtkWriter_;
Dune::shared_ptr<MPNCVtkWriter> vtkWriter_;
};
}
......
......@@ -122,7 +122,7 @@ public:
}
curWriter_ = new VtkWriter(gridView_, Dune::VTK::conforming);
curWriter_ = Dune::make_shared<VtkWriter>(gridView_, Dune::VTK::conforming);
++curWriterNum_;
curTime_ = t;
......@@ -140,8 +140,8 @@ public:
{
typedef Dune::BlockVector<Dune::FieldVector<Scalar, nComp> > VectorField;
ManagedVectorField_<VectorField> *vfs =
new ManagedVectorField_<VectorField>(nEntities);
Dune::shared_ptr<ManagedVectorField_<VectorField> > vfs =
Dune::make_shared<ManagedVectorField_<VectorField> >(nEntities);
managedObjects_.push_back(vfs);
return &(vfs->vf);
}
......@@ -275,10 +275,8 @@ public:
else
-- curWriterNum_;
// discard managed objects and the current VTK writer
delete curWriter_;
// discard managed objects
while (managedObjects_.begin() != managedObjects_.end()) {
delete managedObjects_.front();
managedObjects_.pop_front();
}
......@@ -493,12 +491,12 @@ private:
int commSize_; // number of processes in the communicator
int commRank_; // rank of the current process in the communicator
VtkWriter *curWriter_;
Dune::shared_ptr<VtkWriter> curWriter_;
double curTime_;
std::string curOutFileName_;
int curWriterNum_;
std::list<ManagedObject_*> managedObjects_;
std::list<Dune::shared_ptr<ManagedObject_> > managedObjects_;
};
}
......
......@@ -224,13 +224,13 @@ public:
AMGBackend(const Problem& problem)
: problem_(problem)
{
fem_ = new LocalFemMap();
constraints_ = new Constraints();
scalarGridFunctionSpace_ = new ScalarGridFunctionSpace(problem.gridView(), *fem_, *constraints_);
gridFunctionSpace_ = new GridFunctionSpace(*scalarGridFunctionSpace_);
imp_ = new PDELabBackend(*gridFunctionSpace_,
GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations),
GET_PROP_VALUE(TypeTag, LinearSolverVerbosity));
fem_ = Dune::make_shared<LocalFemMap>();
constraints_ = Dune::make_shared<Constraints>();
scalarGridFunctionSpace_ = Dune::make_shared<ScalarGridFunctionSpace>(problem.gridView(), *fem_, *constraints_);
gridFunctionSpace_ = Dune::make_shared<GridFunctionSpace>(*scalarGridFunctionSpace_);
int maxIt = GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations);
int verbosity = GET_PROP_VALUE(TypeTag, LinearSolverVerbosity);
imp_ = Dune::make_shared<PDELabBackend>(*gridFunctionSpace_, maxIt, verbosity);
}
/*!
......@@ -262,23 +262,14 @@ public:
{
return result_;
}
~AMGBackend()
{
delete imp_;
delete gridFunctionSpace_;
delete scalarGridFunctionSpace_;
delete constraints_;
delete fem_;
}
private:
const Problem& problem_;
LocalFemMap *fem_;
Constraints *constraints_;
ScalarGridFunctionSpace *scalarGridFunctionSpace_;
GridFunctionSpace *gridFunctionSpace_;
PDELabBackend *imp_;
Dune::shared_ptr<LocalFemMap> fem_;
Dune::shared_ptr<Constraints> constraints_;
Dune::shared_ptr<ScalarGridFunctionSpace> scalarGridFunctionSpace_;
Dune::shared_ptr<GridFunctionSpace> gridFunctionSpace_;
Dune::shared_ptr<PDELabBackend> imp_;
Dune::InverseOperatorResult result_;
};
......@@ -312,9 +303,9 @@ public:
template<class Matrix, class Vector>
bool solve(Matrix& A, Vector& x, Vector& b)
{
imp_ = new PDELabBackend(
GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations),
GET_PROP_VALUE(TypeTag, LinearSolverVerbosity));
int maxIt = GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations);
int verbosity = GET_PROP_VALUE(TypeTag, LinearSolverVerbosity);
imp_ = Dune::make_shared<PDELabBackend>(maxIt, verbosity);
static const double residReduction = GET_PROP_VALUE(TypeTag, LinearSolverResidualReduction);
imp_->apply(A, x, b, residReduction);
......@@ -325,8 +316,8 @@ public:
result_.reduction = imp_->result().reduction;
result_.conv_rate = imp_->result().conv_rate;
delete imp_;
imp_.template reset<PDELabBackend>(0);
return result_.converged;
}
......@@ -340,7 +331,7 @@ public:
private:
const Problem& problem_;
PDELabBackend *imp_;
Dune::shared_ptr<PDELabBackend> imp_;
Dune::InverseOperatorResult result_;
};
......@@ -379,10 +370,10 @@ public:
{
scaleLinearSystem(A, b);
imp_ = new PDELabBackend(
GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations),
GET_PROP_VALUE(TypeTag, LinearSolverVerbosity));
int maxIt = GET_PROP_VALUE(TypeTag, LinearSolverMaxIterations);
int verbosity = GET_PROP_VALUE(TypeTag, LinearSolverVerbosity);
imp_ = Dune::make_shared<PDELabBackend>(maxIt, verbosity);
static const double residReduction = GET_PROP_VALUE(TypeTag, LinearSolverResidualReduction);
imp_->apply(A, x, b, residReduction);
......@@ -392,8 +383,8 @@ public:
result_.reduction = imp_->result().reduction;
result_.conv_rate = imp_->result().conv_rate;
delete imp_;
imp_.template reset<PDELabBackend>(0);
return result_.converged;
}
......@@ -407,7 +398,7 @@ public:
private:
const Problem& problem_;
PDELabBackend *imp_;
Dune::shared_ptr<PDELabBackend> imp_;
Dune::InverseOperatorResult result_;
};
......
......@@ -68,11 +68,7 @@ public:
BoxLinearSolver(const Problem &problem, int overlapSize)
: problem_(problem)
, overlapSize_(overlapSize)
{
overlapMatrix_ = 0;
overlapb_ = 0;
overlapx_ = 0;
}
{}
~BoxLinearSolver()
{ cleanup_(); }
......@@ -156,35 +152,30 @@ private:
borderListCreator(problem_.gridView(), problem_.vertexMapper());
// create the overlapping Jacobian matrix
overlapMatrix_ = new OverlappingMatrix (M,
overlapMatrix_ = Dune::make_shared<OverlappingMatrix> (M,
borderListCreator.foreignBorderList(),
borderListCreator.domesticBorderList(),
overlapSize_);
// create the overlapping vectors for the residual and the
// solution
overlapb_ = new OverlappingVector(overlapMatrix_->overlap());
overlapx_ = new OverlappingVector(*overlapb_);
overlapb_ = Dune::make_shared<OverlappingVector>(overlapMatrix_->overlap());
overlapx_ = Dune::make_shared<OverlappingVector>(*overlapb_);
}
void cleanup_()
{
// create the overlapping Jacobian matrix and vectors
delete overlapMatrix_;
delete overlapb_;
delete overlapx_;
overlapMatrix_ = 0;
overlapb_ = 0;
overlapx_ = 0;
overlapMatrix_.template reset<OverlappingMatrix>(0);
overlapb_.template reset<OverlappingVector>(0);
overlapx_.template reset<OverlappingVector>(0);
}
const Problem &problem_;
int overlapSize_;
OverlappingMatrix *overlapMatrix_;
OverlappingVector *overlapb_;
OverlappingVector *overlapx_;
Dune::shared_ptr<OverlappingMatrix> overlapMatrix_;
Dune::shared_ptr<OverlappingVector> overlapb_;
Dune::shared_ptr<OverlappingVector> overlapx_;
};
template <class TypeTag, class Imp>
......
......@@ -372,12 +372,12 @@ protected:
// indices stemming from the overlap (i.e. without the border
// indices)
int numIndices = foreignOverlap.size();
numIndicesSendBuff_[peerRank] = new MpiBuffer<int>(1);
numIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1);
(*numIndicesSendBuff_[peerRank])[0] = numIndices;
numIndicesSendBuff_[peerRank]->send(peerRank);
// create MPI buffers
indicesSendBuff_[peerRank] = new MpiBuffer<IndexDistanceNpeers>(numIndices);
indicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<IndexDistanceNpeers> >(numIndices);
// then send the additional indices themselfs
ForeignOverlapWithPeer::const_iterator overlapIt = foreignOverlap.begin();
......@@ -397,7 +397,7 @@ protected:
numPeers);
// send all peer ranks which see the given index
peersSendBuff_[peerRank].push_back(new MpiBuffer<int>(2*numPeers));
peersSendBuff_[peerRank].push_back(Dune::make_shared<MpiBuffer<int> >(2*numPeers));
typename std::map<ProcessRank, BorderDistance>::const_iterator it = foreignIndexOverlap.begin();
typename std::map<ProcessRank, BorderDistance>::const_iterator endIt = foreignIndexOverlap.end();
for (int j = 0; it != endIt; ++it, ++j)
......@@ -419,10 +419,10 @@ protected:
void waitSendIndices_(int peerRank)
{
numIndicesSendBuff_[peerRank]->wait();
delete numIndicesSendBuff_[peerRank];
numIndicesSendBuff_[peerRank].template reset<MpiBuffer<int> >(0);
indicesSendBuff_[peerRank]->wait();
delete indicesSendBuff_[peerRank];
indicesSendBuff_[peerRank].template reset<MpiBuffer<IndexDistanceNpeers> >(0);
const ForeignOverlapWithPeer &foreignPeerOverlap
= foreignOverlap_.foreignOverlapWithPeer(peerRank);
......@@ -430,7 +430,7 @@ protected:
ForeignOverlapWithPeer::const_iterator overlapEndIt = foreignPeerOverlap.end();
for (int i = 0; overlapIt != overlapEndIt; ++overlapIt, ++i) {
peersSendBuff_[peerRank][i]->wait();
delete peersSendBuff_[peerRank][i];
peersSendBuff_[peerRank][i].template reset<MpiBuffer<int> >(0);
}
}
......@@ -498,9 +498,9 @@ protected:
DomesticOverlapByIndex domesticOverlapByIndex_;
std::vector<int> borderDistance_;
std::map<ProcessRank, MpiBuffer<int>* > numIndicesSendBuff_;
std::map<ProcessRank, MpiBuffer<IndexDistanceNpeers>* > indicesSendBuff_;
std::map<ProcessRank, std::vector<MpiBuffer<int>*> > peersSendBuff_;
std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<int> > > numIndicesSendBuff_;
std::map<ProcessRank, Dune::shared_ptr<MpiBuffer<IndexDistanceNpeers> > > indicesSendBuff_;
std::map<ProcessRank, std::vector<Dune::shared_ptr<MpiBuffer<int> > > > peersSendBuff_;
GlobalIndices globalIndices_;
PeerSet peerSet_;
};
......
......@@ -82,14 +82,10 @@ class IMPETBiCGStabILU0Solver
public:
IMPETBiCGStabILU0Solver(const Problem &problem, int overlapSize=1)
: problem_(problem)
, overlapSize_(overlapSize)
{
overlapMatrix_ = 0;
overlapb_ = 0;
overlapx_ = 0;
}
: problem_(problem)
, overlapSize_(overlapSize)
{}
~IMPETBiCGStabILU0Solver()
{ cleanup_(); }
......@@ -175,35 +171,30 @@ private:
borderListCreator(problem_.gridView(), problem_.elementMapper());
// create the overlapping Jacobian matrix
overlapMatrix_ = new OverlappingMatrix (M,
overlapMatrix_ = Dune::make_shared<OverlappingMatrix> (M,
borderListCreator.foreignBorderList(),
borderListCreator.domesticBorderList(),
overlapSize_);
// create the overlapping vectors for the residual and the
// solution
overlapb_ = new OverlappingVector(overlapMatrix_->overlap());
overlapx_ = new OverlappingVector(*overlapb_);
overlapb_ = Dune::make_shared<OverlappingVector>(overlapMatrix_->overlap());
overlapx_ = Dune::make_shared<OverlappingVector>(*overlapb_);
}
void cleanup_()
{
// create the overlapping Jacobian matrix and vectors
delete overlapMatrix_;
delete overlapb_;
delete overlapx_;
overlapMatrix_ = 0;
overlapb_ = 0;
overlapx_ = 0;
overlapMatrix_.template reset<OverlappingMatrix>(0);
overlapb_.template reset<OverlappingVector>(0);
overlapx_.template reset<OverlappingVector>(0);
}
const Problem &problem_;
int overlapSize_;
OverlappingMatrix *overlapMatrix_;
OverlappingVector *overlapb_;
OverlappingVector *overlapx_;
Dune::shared_ptr<OverlappingMatrix> overlapMatrix_;
Dune::shared_ptr<OverlappingVector> overlapb_;
Dune::shared_ptr<OverlappingVector> overlapx_;
};
} // namespace Dumux
......
......@@ -75,7 +75,7 @@ public:
const BorderList &domesticBorderList,
int overlapSize)
{
overlap_ = Dune::shared_ptr<Overlap>(new Overlap(M, foreignBorderList, domesticBorderList, overlapSize));
overlap_ = Dune::make_shared<Overlap>(M, foreignBorderList, domesticBorderList, overlapSize);
myRank_ = 0;
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myRank_);
......@@ -86,31 +86,6 @@ public:
build_(M);
}
~OverlappingBCRSMatrix()
{
if (overlap_.use_count() == 0)
return;
// delete all MPI buffers
const PeerSet &peerSet = overlap_->foreignOverlap().peerSet();
typename PeerSet::const_iterator peerIt = peerSet.begin();
typename PeerSet::const_iterator peerEndIt = peerSet.end();
for (; peerIt != peerEndIt; ++peerIt) {
int peerRank = *peerIt;
delete rowSizesRecvBuff_[peerRank];
delete rowIndicesRecvBuff_[peerRank];
delete entryIndicesRecvBuff_[peerRank];
delete entryValuesRecvBuff_[peerRank];
delete numRowsSendBuff_[peerRank];
delete rowSizesSendBuff_[peerRank];
delete rowIndicesSendBuff_[peerRank];
delete entryIndicesSendBuff_[peerRank];
delete entryValuesSendBuff_[peerRank];
}
}
/*!
* \brief Returns the domestic overlap for the process.
*/
......@@ -326,12 +301,12 @@ private:
// send size of foreign overlap to peer
int numOverlapRows = peerOverlap.size();
numRowsSendBuff_[peerRank] = new MpiBuffer<int>(1);
numRowsSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<int> >(1);
(*numRowsSendBuff_[peerRank])[0] = numOverlapRows;
numRowsSendBuff_[peerRank]->send(peerRank);
rowSizesSendBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
rowIndicesSendBuff_[peerRank] = new MpiBuffer<Index>(numOverlapRows);
rowSizesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows);
rowIndicesSendBuff_[peerRank] = Dune::make_shared<MpiBuffer<Index> >(numOverlapRows);
<