diff --git a/CHANGELOG.md b/CHANGELOG.md
index 99e923fcc8a3ac36f10ab0457502edf2d72febba..5ff4248ec0eada7583426645a7d56fadbf39bd9f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,7 @@ Differences Between DuMu<sup>x</sup> 3.10 and DuMu<sup>x</sup> 3.9
 
 - __Grid I/O__: The vtu/vtp reader now allows to read unstructured grid from (ASCII) vtu/vtp files with e.g. dune-alugrid, dune-uggrid. Grid data can also be handled in parallel. Like for the GmshReader, the grid and data is read on rank 0 and then broadcasted for now.
 - __Multidomain boundary__: A init function was added to coupling managers of free-flow porenetwork as well as free-flow porousmedium to allow for transient problems.
+- __Nonlinear least squares__: Added a nonlinear least squares solver (in `nonlinear/leastsquares.hh`) that can be used to for example fit a curve to data points. The fitting function is general and can be, for example, a whole PDE solver. The solver is based on a Levenberg-Marquardt algorithm.
 
 ### Immediate interface changes not allowing/requiring a deprecation period:
 
diff --git a/doc/doxygen/dumux.bib b/doc/doxygen/dumux.bib
index 987c42191e7181995eb5a1975fed6b6f163baeb0..38e4463199ccb481152e68de01901ef2c80deb96 100644
--- a/doc/doxygen/dumux.bib
+++ b/doc/doxygen/dumux.bib
@@ -2114,3 +2114,15 @@ author = {F. Fichot and F. Duval and N. Trégourès and C. Béchaud and M. Quint
   year = {1940},
   pages = {529–558}
 }
+
+@book{NocedalWright2006,
+  title     = {{Numerical Optimization}},
+  author    = {Nocedal, Jorge and Wright, Stephen},
+  ISBN = {9780387303031},
+  DOI = {10.1007/978-0-387-40065-5},
+  publisher = {Springer},
+  series    = {Springer Series in Operations Research and Financial Engineering},
+  edition   = {2},
+  year      = {2006},
+  address   = {New York, NY}
+}
diff --git a/dumux/common/variablesbackend.hh b/dumux/common/variablesbackend.hh
index 2f2141b8d76f5c4364766d9d261f308b43ae5184..7654f0aa77318371b389d5a103775d49d5f06d7e 100644
--- a/dumux/common/variablesbackend.hh
+++ b/dumux/common/variablesbackend.hh
@@ -25,6 +25,8 @@
 #include <dune/common/typetraits.hh>
 #include <dune/istl/bvector.hh>
 
+#include <dumux/common/typetraits/isvalid.hh>
+
 // forward declaration
 namespace Dune {
 
@@ -33,6 +35,21 @@ class MultiTypeBlockVector;
 
 } // end namespace Dune
 
+namespace Dumux::Detail::DofBackend {
+
+struct HasResize
+{
+    template<class V>
+    auto operator()(const V& v) -> decltype(std::declval<V>().resize(0))
+    {}
+};
+
+template<class Vector>
+static constexpr auto hasResize()
+{ return decltype( isValid(HasResize())(std::declval<Vector>()) )::value; }
+
+} // end namespace Dumux::Detail::VariablesBackend
+
 namespace Dumux {
 
 /*!
@@ -89,7 +106,12 @@ public:
 
     //! Make a zero-initialized dof vector instance
     static DofVector zeros(SizeType size)
-    { DofVector d; d.resize(size); return d; }
+    {
+        DofVector d;
+        if constexpr (Detail::DofBackend::hasResize<Vector>())
+            d.resize(size);
+        return d;
+    }
 
     //! Perform axpy operation (y += a * x)
     template<class OtherDofVector>
@@ -172,9 +194,9 @@ class VariablesBackend;
  */
 template<class Vars>
 class VariablesBackend<Vars, false>
-: public DofBackend<Vars>
+: public Dumux::DofBackend<Vars>
 {
-    using ParentType = DofBackend<Vars>;
+    using ParentType = Dumux::DofBackend<Vars>;
 
 public:
     using Variables = Vars;
@@ -200,7 +222,7 @@ public:
  */
 template<class Vars>
 class VariablesBackend<Vars, true>
-: public DofBackend<typename Vars::SolutionVector>
+: public Dumux::DofBackend<typename Vars::SolutionVector>
 {
 public:
     using DofVector = typename Vars::SolutionVector;
diff --git a/dumux/nonlinear/leastsquares.hh b/dumux/nonlinear/leastsquares.hh
new file mode 100644
index 0000000000000000000000000000000000000000..af5fdccc5924fa10f6b3f0b56ab3a76b32b03663
--- /dev/null
+++ b/dumux/nonlinear/leastsquares.hh
@@ -0,0 +1,425 @@
+// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// vi: set et ts=4 sw=4 sts=4:
+//
+// SPDX-FileCopyrightInfo: Copyright © DuMux Project contributors, see AUTHORS.md in root folder
+// SPDX-License-Identifier: GPL-3.0-or-later
+//
+/*!
+ * \file
+ * \ingroup Nonlinear
+ * \brief Levenberg-Marquardt algorithm for solving nonlinear least squares problems
+ */
+#ifndef DUMUX_NONLINEAR_LEASTSQUARES_HH
+#define DUMUX_NONLINEAR_LEASTSQUARES_HH
+
+#include <iostream>
+#include <cmath>
+#include <memory>
+
+#include <dune/common/exceptions.hh>
+#include <dune/istl/bvector.hh>
+#include <dune/istl/matrix.hh>
+#if HAVE_SUITESPARSE_CHOLMOD
+#include <dune/istl/cholmod.hh>
+#endif // HAVE_SUITESPARSE_CHOLMOD
+#include <dune/istl/io.hh>
+
+#include <dumux/common/parameters.hh>
+#include <dumux/common/exceptions.hh>
+#include <dumux/io/format.hh>
+#include <dumux/nonlinear/newtonsolver.hh>
+
+namespace Dumux::Optimization {
+
+//! a solver base class
+template<class Variables>
+class Solver
+{
+public:
+    virtual ~Solver() = default;
+    virtual bool apply(Variables& vars) = 0;
+};
+
+} // end namespace Dumux::Optimization
+
+#ifndef DOXYGEN // exclude from doxygen
+// helper code for curve fitting
+namespace Dumux::Optimization::Detail {
+
+template<class T, class F>
+class Assembler
+{
+public:
+    using Scalar = T;
+    using JacobianMatrix = Dune::Matrix<T>;
+    using ResidualType = Dune::BlockVector<T>;
+    using SolutionVector = Dune::BlockVector<T>;
+    using Variables = Dune::BlockVector<T>;
+
+    /*!
+     * \brief Assembler for the Levenberg-Marquardt linear system: [J^T J + λ diag(J^T J)] x = J^T r
+     * \param f The residual function r = f(x) to minimize the norm of (\f$ f: \mathbb{R}^n \mapsto \mathbb{R}^m \f$)
+     * \param x0 Initial guess for the parameters (vector of size \f$n\f$)
+     * \param residualSize The number of residuals (\f$m\f$)
+     */
+    Assembler(const F& f, const SolutionVector& x0, std::size_t residualSize)
+    : prevSol_(x0), f_(f), solSize_(x0.size()), residualSize_(residualSize)
+    , JT_(solSize_, residualSize_), regularizedJTJ_(solSize_, solSize_)
+    , residual_(residualSize_), projectedResidual_(solSize_)
+    {
+        printMatrix_ = getParamFromGroup<bool>("LevenbergMarquardt", "PrintMatrix", false);
+        baseEpsilon_ = getParamFromGroup<Scalar>("LevenbergMarquardt", "BaseEpsilon", 1e-1);
+    }
+
+    //! initialize the linear system variables
+    void setLinearSystem()
+    {
+        std::cout << "Setting up linear system with " << solSize_ << " variables and "
+                  << residualSize_ << " residuals." << std::endl;
+
+        JT_ = 0.0;
+        regularizedJTJ_ = 0.0;
+        residual_ = 0.0;
+        projectedResidual_ = 0.0;
+    }
+
+    //! interface for the Newton scheme: this is J^T J + λ diag(J^T J)
+    JacobianMatrix& jacobian() { return regularizedJTJ_; }
+
+    //! interface for the Newton scheme: this is J^T r
+    ResidualType& residual() { return projectedResidual_; }
+
+    //! regularization parameter λ
+    void setLambda(const T lambda) { lambda_ = lambda; }
+    T lambda() { return lambda_; }
+
+    //! assemble the right hand side of the linear system
+    void assembleResidual(const SolutionVector& x)
+    {
+        projectedResidual_ = 0.0;
+        JT_ = 0.0;
+
+        // assemble residual, J^T, and projected residual, J^T r
+        for (auto rowIt = JT_.begin(); rowIt != JT_.end(); ++rowIt)
+        {
+            const auto paramIdx = rowIt.index();
+            const auto residual = f_(x);
+            auto p = x;
+            const auto eps = baseEpsilon_;
+            p[paramIdx] = x[paramIdx] + eps;
+            auto deflectedResidual = f_(p);
+            deflectedResidual -= residual;
+            deflectedResidual /= eps;
+            for (auto colIt = rowIt->begin(); colIt != rowIt->end(); ++colIt)
+                *colIt += deflectedResidual[colIt.index()];
+
+            projectedResidual_[paramIdx] = residual * deflectedResidual;
+        }
+
+        if (printMatrix_)
+        {
+            std::cout << std::endl << "J^T = " << std::endl;
+            Dune::printmatrix(std::cout, JT_, "", "");
+        }
+    }
+
+    //! assemble the left hand side of the linear system
+    void assembleJacobianAndResidual(const SolutionVector& x)
+    {
+        assembleResidual(x);
+
+        regularizedJTJ_ = 0.0;
+        for (auto rowIt = regularizedJTJ_.begin(); rowIt != regularizedJTJ_.end(); ++rowIt)
+        {
+            for (auto colIt = rowIt->begin(); colIt != rowIt->end(); ++colIt)
+            {
+                for (int j = 0; j < residualSize_; ++j)
+                    *colIt += JT_[rowIt.index()][j]*JT_[colIt.index()][j];
+
+                if (rowIt.index() == colIt.index())
+                {
+                    *colIt += lambda_*(*colIt);
+
+                    if (*colIt == 0.0)
+                        *colIt = 1e-6*lambda_;
+                }
+            }
+        }
+
+        if (printMatrix_)
+        {
+            std::cout << std::endl << "J^T J + λ diag(J^T J) = " << std::endl;
+            Dune::printmatrix(std::cout, regularizedJTJ_, "", "");
+        }
+    }
+
+    //! this is the actual cost unction we are minimizing MSE = ||f_(x)||^2
+    T computeMeanSquaredError(const SolutionVector& x) const
+    { return f_(x).two_norm2(); }
+
+    //! initial guess to be able to reset the solver
+    const SolutionVector& prevSol() const
+    { return prevSol_; }
+
+private:
+    SolutionVector prevSol_; // initial guess
+    const F& f_; //!< the residual function to minimize the norm of
+    std::size_t solSize_, residualSize_;
+
+    JacobianMatrix JT_; // J^T
+    JacobianMatrix regularizedJTJ_; // J^T J + λ diag(J^T J)
+    ResidualType residual_; // r = f_(x)
+    ResidualType projectedResidual_; // J^T r
+
+    Scalar lambda_ = 0.0; // regularization parameter
+    Scalar baseEpsilon_; // for numerical differentiation
+
+    bool printMatrix_ = false;
+};
+
+#if HAVE_SUITESPARSE_CHOLMOD
+template<class Matrix, class Vector>
+class CholmodLinearSolver
+{
+public:
+    void setResidualReduction(double residualReduction) {}
+
+    bool solve(const Matrix& A, Vector& x, const Vector& b) const
+    {
+        Dune::Cholmod<Vector> solver; // matrix is symmetric
+        solver.setMatrix(A);
+        Dune::InverseOperatorResult r;
+        auto bCopy = b;
+        auto xCopy = x;
+        solver.apply(xCopy, bCopy, r);
+        checkResult_(xCopy, r);
+        if (!r.converged )
+            DUNE_THROW(NumericalProblem, "Linear solver did not converge.");
+        x = xCopy;
+        return r.converged;
+    }
+
+    auto norm(const Vector& residual) const
+    { return residual.two_norm(); }
+
+private:
+    void checkResult_(Vector& x, Dune::InverseOperatorResult& result) const
+    {
+        flatVectorForEach(x, [&](auto&& entry, std::size_t){
+            using std::isnan, std::isinf;
+            if (isnan(entry) || isinf(entry))
+                result.converged = false;
+        });
+    }
+};
+#endif // HAVE_SUITESPARSE_CHOLMOD
+
+} // end namespace Dumux::Optimization::Detail
+#endif // DOXYGEN
+
+namespace Dumux::Optimization::Detail {
+
+/*!
+ * \ingroup Nonlinear
+ * \brief A nonlinear least squares solver with \f$n\f$ model parameters and \f$m\f$ observations
+ * \tparam Assembler a class that assembles the linear system of equations in each iteration
+ * \tparam LinearSolver a class that solves the linear system of equations
+ * \note The assembler has to assemble the actual least squares problem (i.e. the normal equations)
+ */
+template<class Assembler, class LinearSolver>
+class LevenbergMarquardt : private Dumux::NewtonSolver<Assembler, LinearSolver, DefaultPartialReassembler, Dune::Communication<Dune::No_Comm>>
+{
+    using ParentType = Dumux::NewtonSolver<Assembler, LinearSolver, DefaultPartialReassembler, Dune::Communication<Dune::No_Comm>>;
+    using Scalar = typename Assembler::Scalar;
+    using Variables = typename Assembler::Variables;
+    using SolutionVector = typename Assembler::SolutionVector;
+    using ResidualVector = typename Assembler::ResidualType;
+    using Backend = typename ParentType::Backend;
+public:
+    using ParentType::ParentType;
+
+    LevenbergMarquardt(std::shared_ptr<Assembler> assembler,
+                       std::shared_ptr<LinearSolver> linearSolver,
+                       int verbosity = 0)
+    : ParentType(assembler, linearSolver, {}, "", "LevenbergMarquardt", verbosity)
+    {
+        assembler->setLambda(getParamFromGroup<Scalar>(ParentType::paramGroup(), "LevenbergMarquardt.LambdaInitial", 0.001));
+        maxLambdaDivisions_ = getParamFromGroup<std::size_t>(ParentType::paramGroup(), "LevenbergMarquardt.MaxLambdaDivisions", 10);
+
+        this->setUseLineSearch();
+        this->setMaxRelativeShift(1e-3);
+    }
+
+    /*!
+     * \brief Solve the nonlinear least squares problem
+     * \param vars instance of the `Variables` class representing a numerical
+     *             solution, defining primary and possibly secondary variables
+     *             and information on the time level.
+     * \return bool true if the solver converged
+     * \post If converged, the given `Variables` will represent the solution. If the solver
+     *       does not converge, the `Variables` will represent the best solution so far (lowest value of the objective function)
+     */
+    bool apply(Variables& vars) override
+    {
+        // try solving the non-linear system
+        for (std::size_t i = 0; i <= maxLambdaDivisions_; ++i)
+        {
+            // linearize & solve
+            const bool converged = ParentType::apply(vars);
+
+            if (converged)
+                return true;
+
+            else if (!converged && i < maxLambdaDivisions_)
+            {
+                if (this->verbosity() >= 1)
+                    std::cout << Fmt::format("LevenbergMarquardt solver did not converge with λ = {:.2e}. ", ParentType::assembler().lambda())
+                                << Fmt::format("Retrying with λ = {:.2e}.\n", ParentType::assembler().lambda() * 9);
+
+                ParentType::assembler().setLambda(ParentType::assembler().lambda() * 9);
+            }
+
+            // convergence criterium not fulfilled
+            // return best solution so far
+            else
+            {
+                this->solutionChanged_(vars, bestSol_);
+                if (this->verbosity() >= 1)
+                    std::cout << Fmt::format("Choose best solution so far with a MSE of {:.4e}", minResidual_) << std::endl;
+
+                return false;
+            }
+        }
+
+        return false;
+    }
+
+private:
+    void newtonEndStep(Variables &vars, const SolutionVector &uLastIter) override
+    {
+        ParentType::newtonEndStep(vars, uLastIter);
+
+        if (ParentType::reduction_ > ParentType::lastReduction_)
+        {
+            if (ParentType::assembler().lambda() < 1e5)
+            {
+                ParentType::assembler().setLambda(ParentType::assembler().lambda() * 9);
+                if (this->verbosity() > 0)
+                    std::cout << "-- Increasing λ to " << ParentType::assembler().lambda();
+            }
+            else
+            {
+                if (this->verbosity() > 0)
+                    std::cout << "-- Keeping λ at " << ParentType::assembler().lambda();
+            }
+        }
+        else
+        {
+            if (ParentType::reduction_ < 0.1 && ParentType::assembler().lambda() > 1e-5)
+            {
+                ParentType::assembler().setLambda(ParentType::assembler().lambda() / 11.0);
+                if (this->verbosity() > 0)
+                    std::cout << "-- Decreasing λ to " << ParentType::assembler().lambda();
+            }
+            else
+            {
+                if (this->verbosity() > 0)
+                    std::cout << "-- Keeping λ at " << ParentType::assembler().lambda();
+            }
+        }
+
+        if (this->verbosity() > 0)
+            std::cout << ", error reduction: " << ParentType::reduction_
+                      << " and mean squared error: " <<  ParentType::residualNorm_ << std::endl;
+
+        // store best solution
+        if (ParentType::residualNorm_ < minResidual_)
+        {
+            minResidual_ = ParentType::residualNorm_;
+            bestSol_ = uLastIter;
+        }
+    }
+
+    void lineSearchUpdate_(Variables& vars,
+                           const SolutionVector& uLastIter,
+                           const ResidualVector& deltaU) override
+    {
+        alpha_ = 1.0;
+        auto uCurrentIter = uLastIter;
+
+        while (true)
+        {
+            Backend::axpy(-alpha_, deltaU, uCurrentIter);
+            this->solutionChanged_(vars, uCurrentIter);
+
+            ParentType::residualNorm_ = ParentType::assembler().computeMeanSquaredError(Backend::dofs(vars));
+            if (ParentType::numSteps_ == 0)
+                ParentType::initialResidual_ = ParentType::assembler().computeMeanSquaredError(ParentType::assembler().prevSol());
+            ParentType::reduction_ = ParentType::residualNorm_ / ParentType::initialResidual_;
+
+            if (ParentType::reduction_ < ParentType::lastReduction_ || alpha_ <= 0.001)
+            {
+                ParentType::endIterMsgStream_ << Fmt::format(", residual reduction {:.4e}->{:.4e}@α={:.4f}", ParentType::lastReduction_, ParentType::reduction_, alpha_);
+                return;
+            }
+
+            // try with a smaller update and reset solution
+            alpha_ *= 0.5;
+            uCurrentIter = uLastIter;
+        }
+    }
+
+    Scalar minResidual_ = std::numeric_limits<Scalar>::max();
+    SolutionVector bestSol_;
+    std::size_t maxLambdaDivisions_ = 10;
+    Scalar alpha_ = 1.0;
+};
+
+#if HAVE_SUITESPARSE_CHOLMOD
+template<class T, class F>
+class NonlinearLeastSquaresSolver : public Solver<typename Optimization::Detail::Assembler<T, F>::Variables>
+{
+    using Assembler = Optimization::Detail::Assembler<T, F>;
+    using LinearSolver = Optimization::Detail::CholmodLinearSolver<typename Assembler::JacobianMatrix, typename Assembler::ResidualType>;
+    using Optimizer = Optimization::Detail::LevenbergMarquardt<Assembler, LinearSolver>;
+public:
+    using Variables = typename Assembler::Variables;
+
+    NonlinearLeastSquaresSolver(const F& f, const Dune::BlockVector<T>& x0, std::size_t size)
+    : solver_(std::make_unique<Optimizer>(std::make_shared<Assembler>(f, x0, size), std::make_shared<LinearSolver>(), 2))
+    {}
+
+    bool apply(Variables& vars) override
+    { return solver_->apply(vars); }
+
+private:
+    std::unique_ptr<Optimizer> solver_;
+};
+#endif // HAVE_SUITESPARSE_CHOLMOD
+
+} // end namespace Dumux::Optimization::Detail
+
+namespace Dumux::Optimization {
+
+#if HAVE_SUITESPARSE_CHOLMOD
+
+/*!
+ * \ingroup Nonlinear
+ * \brief Creates a nonlinear least squares solver with \f$n\f$ model parameters and \f$m\f$ observations
+ * For a detailed description of the mathematical background see \cite NocedalWright2006
+ * \param f The residual functional to minimize the norm of (\f$ f: \mathbb{R}^n \mapsto \mathbb{R}^m \f$)
+ * \param x0 Initial guess for the parameters (vector of size \f$n\f$)
+ * \param size The number of observations (\f$m\f$)
+ * \return a unique pointer to the nonlinear least squares solver with a method `apply(Variables& vars)`
+ * \note The solver can be configured through the parameter group `LevenbergMarquardt`
+ */
+template<class T, class F>
+auto makeNonlinearLeastSquaresSolver(const F& f, const Dune::BlockVector<T>& x0, std::size_t size)
+-> std::unique_ptr<Solver<Dune::BlockVector<T>>>
+{ return std::make_unique<Optimization::Detail::NonlinearLeastSquaresSolver<T, F>>(f, x0, size); }
+
+#endif // HAVE_SUITESPARSE_CHOLMOD
+
+} // end namespace Dumux::Optimization
+
+#endif
diff --git a/test/nonlinear/CMakeLists.txt b/test/nonlinear/CMakeLists.txt
index ae2c987a477f5d51c09265ed3fc9bf42b90c1d98..380a307e3555dbbd3db2fcefd89d3de60c46825d 100644
--- a/test/nonlinear/CMakeLists.txt
+++ b/test/nonlinear/CMakeLists.txt
@@ -2,4 +2,5 @@
 # SPDX-License-Identifier: GPL-3.0-or-later
 
 add_subdirectory(findscalarroot)
+add_subdirectory(leastsquares)
 add_subdirectory(newton)
diff --git a/test/nonlinear/leastsquares/CMakeLists.txt b/test/nonlinear/leastsquares/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e8d16bb87eb19b09d23b58731ec826218636df21
--- /dev/null
+++ b/test/nonlinear/leastsquares/CMakeLists.txt
@@ -0,0 +1,6 @@
+# SPDX-FileCopyrightInfo: Copyright © DuMux Project contributors, see AUTHORS.md in root folder
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dumux_add_test(SOURCES test_nonlinear_leastsquares.cc
+               LABELS unit nonlinear
+               CMAKE_GUARD SuiteSparse_CHOLMOD_FOUND)
diff --git a/test/nonlinear/leastsquares/test_nonlinear_leastsquares.cc b/test/nonlinear/leastsquares/test_nonlinear_leastsquares.cc
new file mode 100644
index 0000000000000000000000000000000000000000..87524e7301f6a44f1cf783ececd68df6a5eb2e17
--- /dev/null
+++ b/test/nonlinear/leastsquares/test_nonlinear_leastsquares.cc
@@ -0,0 +1,132 @@
+//
+// SPDX-FileCopyrightInfo: Copyright © DuMux Project contributors, see AUTHORS.md in root folder
+// SPDX-License-Identifier: GPL-3.0-or-later
+//
+#include <config.h>
+
+#include <iostream>
+#include <cmath>
+#include <random>
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/float_cmp.hh>
+
+#include <dumux/common/initialize.hh>
+#include <dumux/common/parameters.hh>
+#include <dumux/common/math.hh>
+#include <dumux/common/random.hh>
+
+#include <dumux/nonlinear/leastsquares.hh>
+
+namespace Dumux {
+
+template<class F>
+void testCurveFit(const Dune::BlockVector<double>& trueParameters, std::size_t numObservations, F&& function, const double noiseStddev = 1e-5, const double tol = 1e-3)
+{
+    // generate some observations
+    const auto observationsX = Dumux::linspace(-1.0, 1.0, numObservations);
+
+    // y = f(x) + noise
+    // noise is drawn from a normal distribution with mean 0 and standard deviation noiseStddev
+    std::mt19937 gen(0); // fixed seed for reproducibility
+    SimpleNormalDistribution<double> noise(0.0, noiseStddev);
+
+    auto observationsY = observationsX;
+    std::transform(observationsX.begin(), observationsX.end(), observationsY.begin(),
+                   [&](const double x) { return function(trueParameters, x) + noise(gen); });
+
+    // residual function
+    Dune::BlockVector<double> r(numObservations);
+    const auto residual = [&](const auto& p)
+    {
+        for (std::size_t i = 0; i < numObservations; ++i)
+            r[i] = observationsY[i] - function(p, observationsX[i]);
+        return r;
+    };
+
+    // initial guess
+    auto initialGuess(trueParameters);
+    initialGuess = 1.0;
+
+    auto optimizer = Optimization::makeNonlinearLeastSquaresSolver(residual, initialGuess, numObservations);
+    auto params = initialGuess;
+    const bool converged = optimizer->apply(params);
+    if (!converged)
+        DUNE_THROW(Dune::Exception, "Nonlinear least squares solver did not converge.");
+
+    std::cout << "True parameters:\n" << trueParameters << std::endl;
+    std::cout << "Estimated parameters:\n" << params << std::endl;
+
+    // check if the estimated parameters are close to the true parameters
+    for (std::size_t i = 0; i < 3; ++i)
+        if (!Dune::FloatCmp::eq(params[i], trueParameters[i], tol))
+            DUNE_THROW(Dune::Exception, "Estimated parameters are not close to the true parameters."
+                << "Parameter: " << i << ", true: " << trueParameters[i] << ", estimated: " << params[i]);
+}
+
+} // end namespace Dumux
+
+int main(int argc, char* argv[])
+{
+    // maybe initialize MPI and/or multithreading backend
+    Dumux::initialize(argc, argv);
+
+    // initialize parameters
+    Dumux::Parameters::init(argc, argv);
+
+    // simple curve fitting problems
+
+    // f(x) = a*x + b * exp(c*x)
+    {
+        const auto trueParameters = Dune::BlockVector<double>({1.0, 2.0, 0.5});
+        std::cout << "Test function f(x) = a*x + b * exp(c*x)" << std::endl;
+        Dumux::testCurveFit(
+            trueParameters, 50,
+            [&](const auto& p, const double x)
+            { return p[0]*x + p[1] * std::exp(p[2]*x); }
+        );
+
+        std::cout << "Test function f(x) = a*x + b * exp(c*x) with more noise" << std::endl;
+        Dumux::testCurveFit(
+            trueParameters, 50,
+            [&](const auto& p, const double x)
+            { return p[0]*x + p[1] * std::exp(p[2]*x); },
+            0.01, 0.3
+        );
+    }
+
+    // f(x) = a*x**3 + b*x**2 + c*x + d
+    {
+        std::cout << "Test function f(x) = a*x**3 + b*x**2 + c*x + d" << std::endl;
+        const auto trueParameters = Dune::BlockVector<double>({1.0, -2.0, 3.0, 4.0});
+        Dumux::testCurveFit(
+            trueParameters, 50,
+            [&](const auto& p, const double x)
+            { return p[0]*std::pow(x, 3) + p[1]*std::pow(x, 2) + p[2]*x + p[3]; }
+        );
+    }
+
+    // f(x) = a*sin(b*x + c)
+    {
+        std::cout << "Test function f(x) = a*sin(b*x + c)" << std::endl;
+        const auto trueParameters = Dune::BlockVector<double>({1.0, 1.5, 0.7});
+        Dumux::testCurveFit(
+            trueParameters, 50,
+            [&](const auto& p, const double x)
+            { return p[0]*std::sin(p[1]*x + p[2]); }
+        );
+    }
+
+    // quartic with large coeffs
+    {
+        std::cout << "Test function f(x) = a*x**4 + b*x**3 + c*x**2 + d*x + e" << std::endl;
+        const auto trueParameters = Dune::BlockVector<double>({1e5, -2e4, 3e3, 4e2, 5e1});
+        Dumux::testCurveFit(
+            trueParameters, 50,
+            [&](const auto& p, const double x)
+            { return p[0]*std::pow(x, 4) + p[1]*std::pow(x, 3) + p[2]*std::pow(x, 2) + p[3]*x + p[4]; }
+        );
+    }
+
+    return 0;
+}