main.cc 8.2 KB
Newer Older
1
2
3
4
5
6
7
// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
// vi: set et ts=4 sw=4 sts=4:
/*****************************************************************************
 *   See the file COPYING for full copying permissions.                      *
 *                                                                           *
 *   This program is free software: you can redistribute it and/or modify    *
 *   it under the terms of the GNU General Public License as published by    *
8
 *   the Free Software Foundation, either version 3 of the License, or       *
9
10
11
12
13
14
15
16
17
18
19
20
 *   (at your option) any later version.                                     *
 *                                                                           *
 *   This program is distributed in the hope that it will be useful,         *
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of          *
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the            *
 *   GNU General Public License for more details.                            *
 *                                                                           *
 *   You should have received a copy of the GNU General Public License       *
 *   along with this program.  If not, see <http://www.gnu.org/licenses/>.   *
 *****************************************************************************/
/*!
 * \file
21
 * \ingroup NavierStokesTests
22
 * \brief Test for the staggered grid Navier-Stokes model (Donea 2003, \cite Donea2003).
23
 */
24

25
#include <config.h>
26

27
28
#include <ctime>
#include <iostream>
29

30
31
#include <dune/common/parallel/mpihelper.hh>
#include <dune/common/timer.hh>
32

33
#include <dumux/common/dumuxmessage.hh>
34
#include <dumux/common/parameters.hh>
35
36
#include <dumux/common/properties.hh>
#include <dumux/io/grid/gridmanager.hh>
37
#include <dumux/io/vtkoutputmodule.hh>
38
#include <dumux/linear/seqsolverbackend.hh>
39
#include <dumux/linear/istlsolvers.hh>
40

41
42
43
44
45
46
#include <dumux/multidomain/fvassembler.hh>
#include <dumux/multidomain/traits.hh>
#include <dumux/multidomain/staggeredfreeflow/couplingmanager.hh>
#include <dumux/multidomain/newtonsolver.hh>

#include <dumux/freeflow/navierstokes/velocityoutput.hh>
47
#include <test/freeflow/navierstokes/analyticalsolutionvectors.hh>
48
49
50
#include <test/freeflow/navierstokes/errors.hh>

#include "properties.hh"
51

52
int main(int argc, char** argv)
53
{
54
55
    using namespace Dumux;

56
57
58
    // define the type tags for this problem
    using MomentumTypeTag = Properties::TTag::DoneaTestMomentum;
    using MassTypeTag = Properties::TTag::DoneaTestMass;
59
60
61
62
63
64
65
66
67

    // initialize MPI, finalize is done automatically on exit
    const auto& mpiHelper = Dune::MPIHelper::instance(argc, argv);

    // print dumux start message
    if (mpiHelper.rank() == 0)
        DumuxMessage::print(/*firstCall=*/true);

    // parse command line arguments and input file
68
    Parameters::init(argc, argv);
69
70

    // try to create a grid (from the given grid file or the input file)
71
    using GridManager = Dumux::GridManager<GetPropType<MassTypeTag, Properties::Grid>>;
72
73
    GridManager gridManager;
    gridManager.init();
74

75
76
    // start timer
    Dune::Timer timer;
77
78

    // we compute on the leaf grid view
79
    const auto& leafGridView = gridManager.grid().leafGridView();
80

81
82
83
84
85
86
87
88
89
90
91
    // create the finite volume grid geometries
    using MomentumGridGeometry = GetPropType<MomentumTypeTag, Properties::GridGeometry>;
    auto momentumGridGeometry = std::make_shared<MomentumGridGeometry>(leafGridView);

    using MassGridGeometry = GetPropType<MassTypeTag, Properties::GridGeometry>;
    auto massGridGeometry = std::make_shared<MassGridGeometry>(leafGridView);

    // mass-momentum coupling manager
    using Traits = MultiDomainTraits<MomentumTypeTag, MassTypeTag>;
    using CouplingManager = StaggeredFreeFlowCouplingManager<Traits>;
    auto couplingManager = std::make_shared<CouplingManager>();
92

93
94
95
96
97
98
    // the problems (boundary conditions)
    using MomentumProblem = GetPropType<MomentumTypeTag, Properties::Problem>;
    auto momentumProblem = std::make_shared<MomentumProblem>(momentumGridGeometry, couplingManager);

    using MassProblem = GetPropType<MassTypeTag, Properties::Problem>;
    auto massProblem = std::make_shared<MassProblem>(massGridGeometry, couplingManager);
99
100

    // the solution vector
101
102
103
    constexpr auto momentumIdx = CouplingManager::freeFlowMomentumIndex;
    constexpr auto massIdx = CouplingManager::freeFlowMassIndex;
    using SolutionVector = typename Traits::SolutionVector;
104
    SolutionVector x;
105
106
    x[momentumIdx].resize(momentumGridGeometry->numDofs());
    x[massIdx].resize(massGridGeometry->numDofs());
107
108

    // the grid variables
109
110
111
112
113
114
115
116
117
118
119
    using MomentumGridVariables = GetPropType<MomentumTypeTag, Properties::GridVariables>;
    auto momentumGridVariables = std::make_shared<MomentumGridVariables>(momentumProblem, momentumGridGeometry);

    using MassGridVariables = GetPropType<MassTypeTag, Properties::GridVariables>;
    auto massGridVariables = std::make_shared<MassGridVariables>(massProblem, massGridGeometry);

    // initialize the coupling stencils
    couplingManager->init(momentumProblem, massProblem, std::make_tuple(momentumGridVariables, massGridVariables), x);
    // intializing the gridvariables requires the coupling manager to be set up
    momentumGridVariables->init(x[momentumIdx]);
    massGridVariables->init(x[massIdx]);
120

121
122
123
    // initialize the vtk output module
    using IOFields = GetPropType<MassTypeTag, Properties::IOFields>;
    VtkOutputModule vtkWriter(*massGridVariables, x[massIdx], massProblem->name());
124
    IOFields::initOutputModule(vtkWriter); // Add model specific output fields
125
    vtkWriter.addVelocityOutput(std::make_shared<NavierStokesVelocityOutput<MassGridVariables>>());
126

127
128
129
130
    NavierStokesTest::AnalyticalSolutionVectors analyticalSolVectors(momentumProblem, massProblem);
    vtkWriter.addField(analyticalSolVectors.analyticalPressureSolution(), "pressureExact");
    vtkWriter.addField(analyticalSolVectors.analyticalVelocitySolution(), "velocityExact");
    //vtkWriter.addFaceField(analyticalSolVectors.analyticalVelocitySolutionOnFace(), "faceVelocityExact");
131
132
    vtkWriter.write(0.0);

133
134
135
136
137
138
    // use the multidomain FV assembler
    using Assembler = MultiDomainFVAssembler<Traits, CouplingManager, DiffMethod::numeric>;
    auto assembler = std::make_shared<Assembler>(std::make_tuple(momentumProblem, massProblem),
                                                 std::make_tuple(momentumGridGeometry, massGridGeometry),
                                                 std::make_tuple(momentumGridVariables, massGridVariables),
                                                 couplingManager);
139
140

    // the linear solver
141
    using LinearSolver = UMFPackIstlSolver;
142
143
144
    auto linearSolver = std::make_shared<LinearSolver>();

    // the non-linear solver
145
146
    using NewtonSolver = MultiDomainNewtonSolver<Assembler, LinearSolver, CouplingManager>;
    NewtonSolver nonLinearSolver(assembler, linearSolver, couplingManager);
147

148
149
    // linearize & solve
    nonLinearSolver.solve(x);
150

151
    // print discrete L2 and Linfity errors
152
153
154
155
    const bool printErrors = getParam<bool>("Problem.PrintErrors", false);
    const bool printConvergenceTestFile = getParam<bool>("Problem.PrintConvergenceTestFile", false);

    if (printErrors || printConvergenceTestFile)
156
    {
157
158
159
        NavierStokesTest::Errors errors(momentumProblem, massProblem, x);
        NavierStokesTest::ErrorCSVWriter(
            momentumProblem, massProblem, std::to_string(x[massIdx].size())
160
        ).printErrors(errors);
161
162

        if (printConvergenceTestFile)
163
            NavierStokesTest::convergenceTestAppendErrors(momentumProblem, massProblem, errors);
164
165
    }

166
167
    // write vtk output
    vtkWriter.write(1.0);
168

169
    timer.stop();
170

171
    const auto& comm = Dune::MPIHelper::getCommunication();
172
173
174
    std::cout << "Simulation took " << timer.elapsed() << " seconds on "
              << comm.size() << " processes.\n"
              << "The cumulative CPU time was " << timer.elapsed()*comm.size() << " seconds.\n";
175
176
177
178
179
180
181
182
183
184
185
186
187

    ////////////////////////////////////////////////////////////
    // finalize, print dumux message to say goodbye
    ////////////////////////////////////////////////////////////

    // print dumux end message
    if (mpiHelper.rank() == 0)
    {
        Parameters::print();
        DumuxMessage::print(/*firstCall=*/false);
    }

    return 0;
188
} // end main