From 17c9d2eb992f93202b28a8220d8843fcd829729b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20Gr=C3=BCninger?= Date: Sat, 25 Dec 2021 23:44:27 +0100 Subject: [PATCH 1/2] Use Communication instead of deprecated CollectiveCommunication --- dumux/common/timeloop.hh | 6 +++--- dumux/discretization/cellcentered/mpfa/helper.hh | 2 +- dumux/io/grid/cpgridmanager.hh | 2 +- dumux/io/grid/gmshgriddatahandle.hh | 6 +++--- dumux/io/grid/gridmanager_alu.hh | 4 ++-- dumux/io/grid/gridmanager_base.hh | 4 ++-- dumux/io/grid/gridmanager_sub.hh | 2 +- dumux/io/grid/gridmanager_ug.hh | 2 +- dumux/io/grid/porenetwork/gridmanager.hh | 2 +- dumux/io/grid/porenetwork/structuredlatticegridcreator.hh | 2 +- dumux/io/vtk/intersectionwriter.hh | 4 ++-- dumux/io/vtk/vtkreader.hh | 4 ++-- dumux/linear/amgbackend.hh | 4 ++-- dumux/linear/istlsolverfactorybackend.hh | 4 ++-- dumux/linear/pdesolver.hh | 2 +- dumux/multidomain/newtonsolver.hh | 4 ++-- dumux/nonlinear/newtonsolver.hh | 4 ++-- examples/1ptracer/doc/main.md | 2 +- examples/1ptracer/main.cc | 2 +- test/freeflow/navierstokes/channel/1d/main.cc | 2 +- test/freeflow/navierstokes/channel/3d/main.cc | 2 +- test/freeflow/navierstokes/donea/main.cc | 2 +- test/freeflow/navierstokes/kovasznay/main.cc | 2 +- test/freeflow/navierstokes/periodic/main.cc | 2 +- test/freeflow/navierstokes/sincos/main.cc | 2 +- test/geomechanics/elastic/main.cc | 2 +- test/geomechanics/poroelastic/main.cc | 2 +- test/multidomain/boundary/darcydarcy/1p_1p/main.cc | 2 +- test/multidomain/boundary/darcydarcy/1p_2p/main.cc | 2 +- test/multidomain/embedded/1d3d/1p2c_richards2c/main.cc | 2 +- test/multidomain/embedded/1d3d/1p_richards/main.cc | 2 +- test/porenetwork/1p/main.cc | 2 +- test/porousmediumflow/1p/compressible/stationary/main.cc | 2 +- test/porousmediumflow/1p/incompressible/main.cc | 2 +- test/porousmediumflow/1p/periodicbc/main.cc | 2 +- 35 files changed, 47 insertions(+), 47 deletions(-) diff --git a/dumux/common/timeloop.hh b/dumux/common/timeloop.hh index e354c70430..5e531b2673 100644 --- a/dumux/common/timeloop.hh +++ b/dumux/common/timeloop.hh @@ -156,7 +156,7 @@ public: { verbose_ = verbose && - Dune::MPIHelper::getCollectiveCommunication().rank() == 0; + Dune::MPIHelper::getCommunication().rank() == 0; time_ = startTime; endTime_ = tEnd; @@ -359,8 +359,8 @@ public: /*! * \brief Print final status and stops tracking the time. */ - template< class Communicator = Dune::CollectiveCommunication > - void finalize(const Communicator& comm = Dune::MPIHelper::getCollectiveCommunication()) + template< class Communicator = Dune::Communication > + void finalize(const Communicator& comm = Dune::MPIHelper::getCommunication()) { auto cpuTime = timer_.stop(); diff --git a/dumux/discretization/cellcentered/mpfa/helper.hh b/dumux/discretization/cellcentered/mpfa/helper.hh index f1b78cd982..1d772bc190 100644 --- a/dumux/discretization/cellcentered/mpfa/helper.hh +++ b/dumux/discretization/cellcentered/mpfa/helper.hh @@ -550,7 +550,7 @@ public: std::vector ghostVertices(gridView.size(dim), false); // if not run in parallel, skip the rest - if (Dune::MPIHelper::getCollectiveCommunication().size() == 1) + if (Dune::MPIHelper::getCommunication().size() == 1) return ghostVertices; // mpfa methods cannot yet handle ghost cells diff --git a/dumux/io/grid/cpgridmanager.hh b/dumux/io/grid/cpgridmanager.hh index dbaf27a9f1..1fba301fbe 100644 --- a/dumux/io/grid/cpgridmanager.hh +++ b/dumux/io/grid/cpgridmanager.hh @@ -82,7 +82,7 @@ public: */ void loadBalance() { - if (Dune::MPIHelper::getCollectiveCommunication().size() > 1) + if (Dune::MPIHelper::getCommunication().size() > 1) grid_->loadBalance(); } diff --git a/dumux/io/grid/gmshgriddatahandle.hh b/dumux/io/grid/gmshgriddatahandle.hh index b8af221b7c..bef8ef4cfa 100644 --- a/dumux/io/grid/gmshgriddatahandle.hh +++ b/dumux/io/grid/gmshgriddatahandle.hh @@ -152,13 +152,13 @@ struct GmshGridDataHandle, GridFactory, Data> // all processes of the boundary markers vector is zero. If yes, assume // that the root process contains all markers and broadcast them. auto bmSizeMin = boundaryMarkers_.size(); - Dune::MPIHelper::getCollectiveCommunication().min(&bmSizeMin, 1); + Dune::MPIHelper::getCommunication().min(&bmSizeMin, 1); if (bmSizeMin == 0) { auto bmSize = boundaryMarkers_.size(); - Dune::MPIHelper::getCollectiveCommunication().broadcast(&bmSize, 1, 0); + Dune::MPIHelper::getCommunication().broadcast(&bmSize, 1, 0); boundaryMarkers_.resize(bmSize); - Dune::MPIHelper::getCollectiveCommunication().broadcast(&boundaryMarkers_.front(), bmSize, 0); + Dune::MPIHelper::getCommunication().broadcast(&boundaryMarkers_.front(), bmSize, 0); } } diff --git a/dumux/io/grid/gridmanager_alu.hh b/dumux/io/grid/gridmanager_alu.hh index 3d0d16a036..95db21e6d0 100644 --- a/dumux/io/grid/gridmanager_alu.hh +++ b/dumux/io/grid/gridmanager_alu.hh @@ -82,7 +82,7 @@ public: std::cerr << "Warning: You are using a deprecated restart mechanism. The usage will change in the future.\n"; } - const int rank = Dune::MPIHelper::getCollectiveCommunication().rank(); + const int rank = Dune::MPIHelper::getCommunication().rank(); const std::string name = getParamFromGroup(modelParamGroup, "Problem.Name"); std::ostringstream oss; oss << name << "_time=" << restartTime << "_rank=" << rank << ".grs"; @@ -187,7 +187,7 @@ public: { auto gridFactory = std::make_unique>(); - if (Dune::MPIHelper::getCollectiveCommunication().rank() == 0) + if (Dune::MPIHelper::getCommunication().rank() == 0) Dune::GmshReader::read(*gridFactory, fileName, verbose, boundarySegments); ParentType::gridPtr() = std::shared_ptr(gridFactory->createGrid()); diff --git a/dumux/io/grid/gridmanager_base.hh b/dumux/io/grid/gridmanager_base.hh index 2e79542caa..9d90cbc6e1 100644 --- a/dumux/io/grid/gridmanager_base.hh +++ b/dumux/io/grid/gridmanager_base.hh @@ -95,7 +95,7 @@ public: */ void loadBalance() { - if (Dune::MPIHelper::getCollectiveCommunication().size() > 1) + if (Dune::MPIHelper::getCommunication().size() > 1) { // if we may have dgf parameters use load balancing of the dgf pointer if(enableDgfGridPointer_) @@ -230,7 +230,7 @@ protected: // VTK file formats for unstructured grids else if (extension == "vtu" || extension == "vtp") { - if (Dune::MPIHelper::getCollectiveCommunication().size() > 1) + if (Dune::MPIHelper::getCommunication().size() > 1) DUNE_THROW(Dune::NotImplemented, "Reading grids in parallel from VTK file formats is currently not supported!"); VTKReader vtkReader(fileName); diff --git a/dumux/io/grid/gridmanager_sub.hh b/dumux/io/grid/gridmanager_sub.hh index fadeff1335..b0d2ef6ccc 100644 --- a/dumux/io/grid/gridmanager_sub.hh +++ b/dumux/io/grid/gridmanager_sub.hh @@ -107,7 +107,7 @@ public: */ void loadBalance() { - if (Dune::MPIHelper::getCollectiveCommunication().size() > 1) + if (Dune::MPIHelper::getCommunication().size() > 1) this->grid().loadBalance(); } diff --git a/dumux/io/grid/gridmanager_ug.hh b/dumux/io/grid/gridmanager_ug.hh index ab9b043c5b..d624f1499e 100644 --- a/dumux/io/grid/gridmanager_ug.hh +++ b/dumux/io/grid/gridmanager_ug.hh @@ -114,7 +114,7 @@ public: */ void loadBalance() { - if (Dune::MPIHelper::getCollectiveCommunication().size() > 1) + if (Dune::MPIHelper::getCommunication().size() > 1) { // if we may have dgf parameters use load balancing of the dgf pointer if(ParentType::enableDgfGridPointer_) diff --git a/dumux/io/grid/porenetwork/gridmanager.hh b/dumux/io/grid/porenetwork/gridmanager.hh index 83aaf35a90..2b0d1fe348 100644 --- a/dumux/io/grid/porenetwork/gridmanager.hh +++ b/dumux/io/grid/porenetwork/gridmanager.hh @@ -168,7 +168,7 @@ public: */ void loadBalance() { - if (Dune::MPIHelper::getCollectiveCommunication().size() > 1) + if (Dune::MPIHelper::getCommunication().size() > 1) { // if we may have dgf parameters use load balancing of the dgf pointer if (enableDgfGridPointer_) diff --git a/dumux/io/grid/porenetwork/structuredlatticegridcreator.hh b/dumux/io/grid/porenetwork/structuredlatticegridcreator.hh index 4e1024fd67..a6e81a102b 100644 --- a/dumux/io/grid/porenetwork/structuredlatticegridcreator.hh +++ b/dumux/io/grid/porenetwork/structuredlatticegridcreator.hh @@ -127,7 +127,7 @@ public: */ void loadBalance() { - if (Dune::MPIHelper::getCollectiveCommunication().size() > 1) + if (Dune::MPIHelper::getCommunication().size() > 1) gridPtr_->loadBalance(); } diff --git a/dumux/io/vtk/intersectionwriter.hh b/dumux/io/vtk/intersectionwriter.hh index d87a63899f..b944226578 100644 --- a/dumux/io/vtk/intersectionwriter.hh +++ b/dumux/io/vtk/intersectionwriter.hh @@ -130,7 +130,7 @@ public: using Point = Corner; using PointIterator = CornerIterator; using ConnectivityWriter = Dune::VTK::NonConformingConnectivityWriter; - using CollectiveCommunication = typename GridView::CollectiveCommunication; + using Communication = typename GridView::CollectiveCommunication; explicit NonConformingIntersectionIteratorFactory(const GridView& gv) : gridView_(gv) {} @@ -156,7 +156,7 @@ public: ConnectivityWriter makeConnectivity() const { return ConnectivityWriter(); } - const CollectiveCommunication& comm() const + const Communication& comm() const { return gridView_.comm(); } private: diff --git a/dumux/io/vtk/vtkreader.hh b/dumux/io/vtk/vtkreader.hh index ea65e39f3e..db797d60ba 100644 --- a/dumux/io/vtk/vtkreader.hh +++ b/dumux/io/vtk/vtkreader.hh @@ -64,7 +64,7 @@ public: explicit VTKReader(const std::string& fileName) { using namespace tinyxml2; - fileName_ = Dune::MPIHelper::getCollectiveCommunication().size() > 1 ? + fileName_ = Dune::MPIHelper::getCommunication().size() > 1 ? getProcessFileName_(fileName) : fileName; const auto eResult = doc_.LoadFile(fileName_.c_str()); @@ -193,7 +193,7 @@ private: // get the first piece node const XMLElement* pieceNode = getPieceNode_(pDoc, pvtkFileName); - const auto myrank = Dune::MPIHelper::getCollectiveCommunication().rank(); + const auto myrank = Dune::MPIHelper::getCommunication().rank(); for (int rank = 0; rank < myrank; ++rank) { pieceNode = pieceNode->NextSiblingElement("Piece"); diff --git a/dumux/linear/amgbackend.hh b/dumux/linear/amgbackend.hh index d83b7533cb..468d4f7dd9 100644 --- a/dumux/linear/amgbackend.hh +++ b/dumux/linear/amgbackend.hh @@ -57,7 +57,7 @@ public: */ AMGBiCGSTABBackend(const std::string& paramGroup = "") : LinearSolver(paramGroup) - , isParallel_(Dune::MPIHelper::getCollectiveCommunication().size() > 1) + , isParallel_(Dune::MPIHelper::getCommunication().size() > 1) { if (isParallel_) DUNE_THROW(Dune::InvalidStateException, "Using sequential constructor for parallel run. Use signature with gridView and dofMapper!"); @@ -77,7 +77,7 @@ public: const std::string& paramGroup = "") : LinearSolver(paramGroup) #if HAVE_MPI - , isParallel_(Dune::MPIHelper::getCollectiveCommunication().size() > 1) + , isParallel_(Dune::MPIHelper::getCommunication().size() > 1) #endif { #if HAVE_MPI diff --git a/dumux/linear/istlsolverfactorybackend.hh b/dumux/linear/istlsolverfactorybackend.hh index baff4827d0..9c90f0f83e 100644 --- a/dumux/linear/istlsolverfactorybackend.hh +++ b/dumux/linear/istlsolverfactorybackend.hh @@ -126,7 +126,7 @@ public: */ IstlSolverFactoryBackend(const std::string& paramGroup = "") : paramGroup_(paramGroup) - , isParallel_(Dune::MPIHelper::getCollectiveCommunication().size() > 1) + , isParallel_(Dune::MPIHelper::getCommunication().size() > 1) { if (isParallel_) DUNE_THROW(Dune::InvalidStateException, "Using sequential constructor for parallel run. Use signature with gridView and dofMapper!"); @@ -147,7 +147,7 @@ public: const std::string& paramGroup = "") : paramGroup_(paramGroup) #if HAVE_MPI - , isParallel_(Dune::MPIHelper::getCollectiveCommunication().size() > 1) + , isParallel_(Dune::MPIHelper::getCommunication().size() > 1) #endif { firstCall_ = true; diff --git a/dumux/linear/pdesolver.hh b/dumux/linear/pdesolver.hh index 2303b8cbc5..b4cdc6294a 100644 --- a/dumux/linear/pdesolver.hh +++ b/dumux/linear/pdesolver.hh @@ -327,7 +327,7 @@ private: //! initialize the parameters by reading from the parameter tree void initParams_(const std::string& group = "") { - verbose_ = (Dune::MPIHelper::getCollectiveCommunication().rank() == 0); + verbose_ = (Dune::MPIHelper::getCommunication().rank() == 0); } //! switches on/off verbosity diff --git a/dumux/multidomain/newtonsolver.hh b/dumux/multidomain/newtonsolver.hh index 360413d737..7f2f93da0b 100644 --- a/dumux/multidomain/newtonsolver.hh +++ b/dumux/multidomain/newtonsolver.hh @@ -46,7 +46,7 @@ using GetPVSwitchMultiDomain = Dune::Std::detected_or > + class Comm = Dune::Communication > class MultiDomainNewtonSolver: public NewtonSolver { using ParentType = NewtonSolver; @@ -74,7 +74,7 @@ public: MultiDomainNewtonSolver(std::shared_ptr assembler, std::shared_ptr linearSolver, std::shared_ptr couplingManager, - const Comm& comm = Dune::MPIHelper::getCollectiveCommunication(), + const Comm& comm = Dune::MPIHelper::getCommunication(), const std::string& paramGroup = "") : ParentType(assembler, linearSolver, comm, paramGroup) , couplingManager_(couplingManager) diff --git a/dumux/nonlinear/newtonsolver.hh b/dumux/nonlinear/newtonsolver.hh index 0b00ce33dc..0701f314f9 100644 --- a/dumux/nonlinear/newtonsolver.hh +++ b/dumux/nonlinear/newtonsolver.hh @@ -211,7 +211,7 @@ using BlockType = typename BlockTypeHelper, - class Comm = Dune::CollectiveCommunication > + class Comm = Dune::Communication > class NewtonSolver : public PDESolver { using ParentType = PDESolver; @@ -244,7 +244,7 @@ public: */ NewtonSolver(std::shared_ptr assembler, std::shared_ptr linearSolver, - const Communication& comm = Dune::MPIHelper::getCollectiveCommunication(), + const Communication& comm = Dune::MPIHelper::getCommunication(), const std::string& paramGroup = "") : ParentType(assembler, linearSolver) , endIterMsgStream_(std::ostringstream::out) diff --git a/examples/1ptracer/doc/main.md b/examples/1ptracer/doc/main.md index 045bfc3e38..3d9741c64f 100644 --- a/examples/1ptracer/doc/main.md +++ b/examples/1ptracer/doc/main.md @@ -199,7 +199,7 @@ problem defined in `problem_1p.hh`. Let us now write this solution to a VTK file // print overall CPU time required for assembling and solving the 1p problem. timer.stop(); - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" << "The cumulative CPU time was " << timer.elapsed()*comm.size() << " seconds.\n"; diff --git a/examples/1ptracer/main.cc b/examples/1ptracer/main.cc index 21569bc52d..4faeafcdbf 100644 --- a/examples/1ptracer/main.cc +++ b/examples/1ptracer/main.cc @@ -161,7 +161,7 @@ int main(int argc, char** argv) try // print overall CPU time required for assembling and solving the 1p problem. timer.stop(); - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" << "The cumulative CPU time was " << timer.elapsed()*comm.size() << " seconds.\n"; diff --git a/test/freeflow/navierstokes/channel/1d/main.cc b/test/freeflow/navierstokes/channel/1d/main.cc index 6a30b5fa83..a6c9d6bd91 100644 --- a/test/freeflow/navierstokes/channel/1d/main.cc +++ b/test/freeflow/navierstokes/channel/1d/main.cc @@ -163,7 +163,7 @@ int main(int argc, char** argv) timer.stop(); - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" << "The cumulative CPU time was " << timer.elapsed()*comm.size() << " seconds.\n"; diff --git a/test/freeflow/navierstokes/channel/3d/main.cc b/test/freeflow/navierstokes/channel/3d/main.cc index 40df458c2b..2ba127d34d 100644 --- a/test/freeflow/navierstokes/channel/3d/main.cc +++ b/test/freeflow/navierstokes/channel/3d/main.cc @@ -238,7 +238,7 @@ flux.addSurface("middle", p0middle, p1middle); std::cout << "analyticalFlux: " << problem->analyticalFlux() << std::endl; - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" << "The cumulative CPU time was " << timer.elapsed()*comm.size() << " seconds.\n"; diff --git a/test/freeflow/navierstokes/donea/main.cc b/test/freeflow/navierstokes/donea/main.cc index 9909f86cf7..7986ac5af9 100644 --- a/test/freeflow/navierstokes/donea/main.cc +++ b/test/freeflow/navierstokes/donea/main.cc @@ -167,7 +167,7 @@ int main(int argc, char** argv) timer.stop(); - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" << "The cumulative CPU time was " << timer.elapsed()*comm.size() << " seconds.\n"; diff --git a/test/freeflow/navierstokes/kovasznay/main.cc b/test/freeflow/navierstokes/kovasznay/main.cc index 544559710b..c8b054a9b2 100644 --- a/test/freeflow/navierstokes/kovasznay/main.cc +++ b/test/freeflow/navierstokes/kovasznay/main.cc @@ -172,7 +172,7 @@ int main(int argc, char** argv) timer.stop(); - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" << "The cumulative CPU time was " << timer.elapsed()*comm.size() << " seconds.\n"; diff --git a/test/freeflow/navierstokes/periodic/main.cc b/test/freeflow/navierstokes/periodic/main.cc index a3b0f416bd..aa562e9e9b 100644 --- a/test/freeflow/navierstokes/periodic/main.cc +++ b/test/freeflow/navierstokes/periodic/main.cc @@ -165,7 +165,7 @@ int main(int argc, char** argv) vtkWriter.write(1.0); timer.stop(); - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" << "The cumulative CPU time was " << timer.elapsed()*comm.size() << " seconds.\n"; diff --git a/test/freeflow/navierstokes/sincos/main.cc b/test/freeflow/navierstokes/sincos/main.cc index 510cba6fac..563a6536b7 100644 --- a/test/freeflow/navierstokes/sincos/main.cc +++ b/test/freeflow/navierstokes/sincos/main.cc @@ -226,7 +226,7 @@ int main(int argc, char** argv) timer.stop(); - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" << "The cumulative CPU time was " << timer.elapsed()*comm.size() << " seconds.\n"; diff --git a/test/geomechanics/elastic/main.cc b/test/geomechanics/elastic/main.cc index a6b49e382f..68e5b12b23 100644 --- a/test/geomechanics/elastic/main.cc +++ b/test/geomechanics/elastic/main.cc @@ -132,7 +132,7 @@ int main(int argc, char** argv) vtkWriter.write(1.0); // print time and say goodbye - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); if (mpiHelper.rank() == 0) std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" diff --git a/test/geomechanics/poroelastic/main.cc b/test/geomechanics/poroelastic/main.cc index 02e81a968a..8f5587c2ac 100644 --- a/test/geomechanics/poroelastic/main.cc +++ b/test/geomechanics/poroelastic/main.cc @@ -201,7 +201,7 @@ int main(int argc, char** argv) vtkWriter.write(1.0); // print time and say goodbye - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); if (mpiHelper.rank() == 0) std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" diff --git a/test/multidomain/boundary/darcydarcy/1p_1p/main.cc b/test/multidomain/boundary/darcydarcy/1p_1p/main.cc index 1cbab4d60e..1005c4dcca 100644 --- a/test/multidomain/boundary/darcydarcy/1p_1p/main.cc +++ b/test/multidomain/boundary/darcydarcy/1p_1p/main.cc @@ -236,7 +236,7 @@ int main(int argc, char** argv) timeLoop->setTimeStepSize(nonLinearSolver.suggestTimeStepSize(timeLoop->timeStepSize())); } - timeLoop->finalize(mpiHelper.getCollectiveCommunication()); + timeLoop->finalize(mpiHelper.getCommunication()); ////////////////////////////////////////////////////////////////////////// // write out a combined vtu for vtu comparison in the testing framework diff --git a/test/multidomain/boundary/darcydarcy/1p_2p/main.cc b/test/multidomain/boundary/darcydarcy/1p_2p/main.cc index 385d63c448..525c22f537 100644 --- a/test/multidomain/boundary/darcydarcy/1p_2p/main.cc +++ b/test/multidomain/boundary/darcydarcy/1p_2p/main.cc @@ -205,7 +205,7 @@ int main(int argc, char** argv) timeLoop->setTimeStepSize(nonLinearSolver.suggestTimeStepSize(timeLoop->timeStepSize())); } - timeLoop->finalize(mpiHelper.getCollectiveCommunication()); + timeLoop->finalize(mpiHelper.getCommunication()); //////////////////////////////////////////////////////////// // finalize, print dumux message to say goodbye diff --git a/test/multidomain/embedded/1d3d/1p2c_richards2c/main.cc b/test/multidomain/embedded/1d3d/1p2c_richards2c/main.cc index a6f440007a..2bbddbda7f 100644 --- a/test/multidomain/embedded/1d3d/1p2c_richards2c/main.cc +++ b/test/multidomain/embedded/1d3d/1p2c_richards2c/main.cc @@ -408,7 +408,7 @@ int main(int argc, char** argv) outFile.close(); - timeLoop->finalize(mpiHelper.getCollectiveCommunication()); + timeLoop->finalize(mpiHelper.getCommunication()); //////////////////////////////////////////////////////////// // finalize, print dumux message to say goodbye diff --git a/test/multidomain/embedded/1d3d/1p_richards/main.cc b/test/multidomain/embedded/1d3d/1p_richards/main.cc index 79357b586c..941471f3a4 100644 --- a/test/multidomain/embedded/1d3d/1p_richards/main.cc +++ b/test/multidomain/embedded/1d3d/1p_richards/main.cc @@ -210,7 +210,7 @@ int main(int argc, char** argv) timeLoop->setTimeStepSize(nonLinearSolver.suggestTimeStepSize(timeLoop->timeStepSize())); } - timeLoop->finalize(mpiHelper.getCollectiveCommunication()); + timeLoop->finalize(mpiHelper.getCommunication()); //////////////////////////////////////////////////////////// // finalize, print dumux message to say goodbye diff --git a/test/porenetwork/1p/main.cc b/test/porenetwork/1p/main.cc index c0004b776b..41155ab8cb 100644 --- a/test/porenetwork/1p/main.cc +++ b/test/porenetwork/1p/main.cc @@ -145,7 +145,7 @@ int main(int argc, char** argv) timer.stop(); - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); if (mpiHelper.rank() == 0) std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" diff --git a/test/porousmediumflow/1p/compressible/stationary/main.cc b/test/porousmediumflow/1p/compressible/stationary/main.cc index 9141d23374..e587eab9b8 100644 --- a/test/porousmediumflow/1p/compressible/stationary/main.cc +++ b/test/porousmediumflow/1p/compressible/stationary/main.cc @@ -126,7 +126,7 @@ int main(int argc, char** argv) timer.stop(); - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" << "The cumulative CPU time was " << timer.elapsed()*comm.size() << " seconds.\n"; diff --git a/test/porousmediumflow/1p/incompressible/main.cc b/test/porousmediumflow/1p/incompressible/main.cc index 6c7ab96175..2bf4beeae8 100644 --- a/test/porousmediumflow/1p/incompressible/main.cc +++ b/test/porousmediumflow/1p/incompressible/main.cc @@ -197,7 +197,7 @@ int main(int argc, char** argv) } } - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); if (mpiHelper.rank() == 0) std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" diff --git a/test/porousmediumflow/1p/periodicbc/main.cc b/test/porousmediumflow/1p/periodicbc/main.cc index cad884638e..2ee30d141b 100644 --- a/test/porousmediumflow/1p/periodicbc/main.cc +++ b/test/porousmediumflow/1p/periodicbc/main.cc @@ -143,7 +143,7 @@ int main(int argc, char** argv) timer.stop(); - const auto& comm = Dune::MPIHelper::getCollectiveCommunication(); + const auto& comm = Dune::MPIHelper::getCommunication(); if (mpiHelper.rank() == 0) std::cout << "Simulation took " << timer.elapsed() << " seconds on " << comm.size() << " processes.\n" -- GitLab From 0249ec0e3167ce4edfd772d1cc2a865e9278b8b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20Gr=C3=BCninger?= Date: Fri, 7 Jan 2022 14:57:41 +0100 Subject: [PATCH 2/2] [freeflowporenetwork] Guard test_md_boundary_ff1p_pnm1p to have FOAM grid --- .../boundary/freeflowporenetwork/1p_1p/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/multidomain/boundary/freeflowporenetwork/1p_1p/CMakeLists.txt b/test/multidomain/boundary/freeflowporenetwork/1p_1p/CMakeLists.txt index afa6e0e818..417fa71f90 100644 --- a/test/multidomain/boundary/freeflowporenetwork/1p_1p/CMakeLists.txt +++ b/test/multidomain/boundary/freeflowporenetwork/1p_1p/CMakeLists.txt @@ -3,7 +3,7 @@ add_input_file_links() dumux_add_test(NAME test_md_boundary_ff1p_pnm1p LABELS multidomain multidomain_boundary freeflowpnm 1p navierstokes SOURCES main.cc - CMAKE_GUARD HAVE_UMFPACK + CMAKE_GUARD "( HAVE_UMFPACK AND dune-foamgrid_FOUND )" COMMAND ${CMAKE_SOURCE_DIR}/bin/testing/runtest.py CMD_ARGS --script fuzzy --files ${CMAKE_SOURCE_DIR}/test/references/test_md_boundary_ff1p_pnm1p_ff-reference.vtu -- GitLab