From 0e9c33b35b9b3f8b4b811aa245628830cdf63a83 Mon Sep 17 00:00:00 2001
From: Timo Koch <timo.koch@iws.uni-stuttgart.de>
Date: Fri, 30 Jul 2021 22:29:05 +0200
Subject: [PATCH] [bin][python] Format all Python scripts with black and enable
 black CI

---
 .gitlab-ci/default.yml                        |   1 +
 bin/create_cmakelists.py                      |  28 +-
 bin/create_dockerimage.py                     | 138 ++++-----
 bin/doc/getparameterlist.py                   | 192 +++++++-----
 bin/extract_as_new_module.py                  |  53 +---
 bin/installdumux.py                           |  94 +++---
 bin/installexternal.py                        | 249 +++++++++-------
 bin/make_installscript.py                     |  10 +-
 bin/postprocessing/exportscreenshot2d.py      | 274 +++++++++++++-----
 bin/postprocessing/extractlinedata.py         |  87 ++++--
 .../extractpointdataovertime.py               |  87 +++---
 bin/postprocessing/l2error.py                 | 200 ++++++++-----
 bin/remove_clutter_after_last_endif.py        |   7 +-
 bin/testing/findtests.py                      |  84 +++---
 bin/testing/fuzzycomparedata.py               |  89 ++++--
 bin/testing/fuzzycomparevtu.py                | 214 ++++++++++----
 bin/testing/getchangedfiles.py                |  62 ++--
 bin/testing/runselectedtests.py               | 144 ++++-----
 bin/testing/runtest.py                        | 115 ++++++--
 bin/util/common.py                            | 186 ++++++------
 bin/util/installscript.py                     | 145 +++++----
 bin/util/installscript_writer.py              |  76 +++--
 bin/util/moduleinfo.py                        |  38 ++-
 23 files changed, 1579 insertions(+), 994 deletions(-)

diff --git a/.gitlab-ci/default.yml b/.gitlab-ci/default.yml
index a01fd7dc4f..93b3c5c5f0 100644
--- a/.gitlab-ci/default.yml
+++ b/.gitlab-ci/default.yml
@@ -40,6 +40,7 @@ black (python):
   # TODO: maybe extend this to the utility scripts?
     - black --check --verbose -- python
     - black --check --verbose -- test/python
+    - black --check --verbose -- bin
 
 
 pylint-flake8 (python):
diff --git a/bin/create_cmakelists.py b/bin/create_cmakelists.py
index 3a8d4d0fe3..e045490832 100755
--- a/bin/create_cmakelists.py
+++ b/bin/create_cmakelists.py
@@ -13,15 +13,20 @@ import argparse
 if __name__ == "__main__":
 
     parser = argparse.ArgumentParser()
-    parser.add_argument('folder', type=str, nargs='?', help='the folder to create CMakeLists.txt\'s for', default=None)
+    parser.add_argument(
+        "folder",
+        type=str,
+        nargs="?",
+        help="the folder to create CMakeLists.txt's for",
+        default=None,
+    )
     args = vars(parser.parse_args())
 
     # default to the dumux folder (relative path to the location of this script)
-    if args['folder'] is None:
+    if args["folder"] is None:
         rootDir = os.path.dirname(os.path.abspath(__file__)) + "/../../dumux"
     else:
-        rootDir = args['folder']
-
+        rootDir = args["folder"]
 
     ignore_folders = ["", "io/format/fmt", "io/xml"]
     extensions = [".hh", ".inc"]
@@ -30,7 +35,7 @@ if __name__ == "__main__":
         subFolders = sorted(subFolders)
         files = sorted(files)
         # get folder name relative to dumux
-        folderName = fullFolderName.replace(rootDir + '/', '').replace(rootDir, '')
+        folderName = fullFolderName.replace(rootDir + "/", "").replace(rootDir, "")
         if folderName not in ignore_folders:
             with open(fullFolderName + "/CMakeLists.txt", "w") as cmakelists:
                 # add subfolders
@@ -45,9 +50,16 @@ if __name__ == "__main__":
                         break
 
                 if headersExist:
-                    if subFolders: cmakelists.write("\n")
+                    if subFolders:
+                        cmakelists.write("\n")
                     # collect all files to be installed in a CMake variable
                     headers_variable = "DUMUX_" + folderName.upper().replace("/", "_") + "_HEADERS"
-                    cmakelists.write("file(GLOB {}{})\n".format(headers_variable, " *".join([''] + extensions)))
+                    cmakelists.write(
+                        "file(GLOB {}{})\n".format(headers_variable, " *".join([""] + extensions))
+                    )
                     cmakelists.write("install(FILES ${{{}}}\n".format(headers_variable))
-                    cmakelists.write("        DESTINATION ${{CMAKE_INSTALL_INCLUDEDIR}}/dumux/{})\n".format(folderName))
+                    cmakelists.write(
+                        "        DESTINATION ${{CMAKE_INSTALL_INCLUDEDIR}}/dumux/{})\n".format(
+                            folderName
+                        )
+                    )
diff --git a/bin/create_dockerimage.py b/bin/create_dockerimage.py
index ed3d002631..8658f48152 100644
--- a/bin/create_dockerimage.py
+++ b/bin/create_dockerimage.py
@@ -19,40 +19,38 @@ if __name__ == "__main__":
         description="Create a docker image for a given module and install script."
     )
 
-    parser.add_argument('-m', '--modulepath',
-                        required=True,
-                        help='the path to the your module')
-    parser.add_argument('-i', '--installScript',
-                        required=True,
-                        help="Specify the installation script")
-    parser.add_argument('-t', '--templateFolder',
-                        required=False,
-                        help="Specify the folder with the template files")
+    parser.add_argument("-m", "--modulepath", required=True, help="the path to the your module")
+    parser.add_argument(
+        "-i", "--installScript", required=True, help="Specify the installation script"
+    )
+    parser.add_argument(
+        "-t", "--templateFolder", required=False, help="Specify the folder with the template files"
+    )
 
     args = vars(parser.parse_args())
 
     # get information on the module
-    modulePath = os.path.abspath(args['modulepath'])
-    modInfo = extractModuleInfos(getModuleFile(modulePath),
-                                ['Module', 'Maintainer'])
-    moduleName = modInfo['Module']
-    moduleMaintainer = modInfo['Maintainer']
+    modulePath = os.path.abspath(args["modulepath"])
+    modInfo = extractModuleInfos(getModuleFile(modulePath), ["Module", "Maintainer"])
+    moduleName = modInfo["Module"]
+    moduleMaintainer = modInfo["Maintainer"]
     dockerTag = moduleName.lower()  # docker only supports lower case
 
     # get folder with the template files
-    templateFolder = args['templateFolder']
+    templateFolder = args["templateFolder"]
     if not templateFolder:
-        templateFolder = os.path.join(modulePath, '../dumux/docker')
+        templateFolder = os.path.join(modulePath, "../dumux/docker")
     if not os.path.exists(templateFolder):
         sys.exit("Template folder {} could not be found".format(templateFolder))
 
-    print("*"*54)
+    print("*" * 54)
     print("\n-- Creating a Docker image for module " + moduleName + " --\n")
-    print("*"*54)
+    print("*" * 54)
 
     if os.path.exists("docker"):
-        print("\nA docker folder already exists. "
-            "Continue anyway? - will be overwritten - [y/N]\n")
+        print(
+            "\nA docker folder already exists. " "Continue anyway? - will be overwritten - [y/N]\n"
+        )
         delete = input()
         if delete == "y" or delete == "Y":
             shutil.rmtree("docker")
@@ -64,62 +62,63 @@ if __name__ == "__main__":
     print("--> Created the folder 'docker'.")
 
     # copy install script into docker folder and make it executable
-    installScriptPath = args['installScript']
+    installScriptPath = args["installScript"]
     installScriptName = os.path.split(installScriptPath)[1]
-    installScript = os.path.join(os.path.join(os.getcwd(), 'docker'),
-                                installScriptName)
+    installScript = os.path.join(os.path.join(os.getcwd(), "docker"), installScriptName)
     shutil.copy(installScriptPath, installScript)
     os.system("chmod +x {}".format(installScript))
-    print("--> Using install script: {} to install dependencies for module {}."
-        .format(installScript, moduleName))
-
+    print(
+        "--> Using install script: {} to install dependencies for module {}.".format(
+            installScript, moduleName
+        )
+    )
 
     # substitute content from template and write to target
     def substituteAndWrite(template, target, mapping):
         if not os.path.exists(template):
             sys.exit("Template file '" + template + "' could not be found")
-        with open(target, 'w') as targetFile:
+        with open(target, "w") as targetFile:
             raw = string.Template(open(template).read())
             targetFile.write(raw.substitute(**mapping))
 
-
     # write setpermissions helper script
-    template = os.path.join(templateFolder, 'setpermissions.sh.template')
-    target = os.path.join(os.getcwd(), 'docker/setpermissions.sh')
+    template = os.path.join(templateFolder, "setpermissions.sh.template")
+    target = os.path.join(os.getcwd(), "docker/setpermissions.sh")
     substituteAndWrite(template, target, {})
     print("--> Created permission helper script for easier container setup.")
 
     # write welcome message file
-    template = os.path.join(templateFolder, 'WELCOME.template')
-    target = os.path.join(os.getcwd(), 'docker/WELCOME')
-    substituteAndWrite(template, target,
-                    {'modName': moduleName, 'modFolder': moduleName})
+    template = os.path.join(templateFolder, "WELCOME.template")
+    target = os.path.join(os.getcwd(), "docker/WELCOME")
+    substituteAndWrite(template, target, {"modName": moduleName, "modFolder": moduleName})
     print("--> Created welcome message displayed on Docker container startup.")
 
     # write readme file
-    template = os.path.join(templateFolder, 'README.md.template')
-    target = os.path.join(os.getcwd(), 'docker/README.md')
-    substituteAndWrite(template, target,
-                    {'modName': moduleName, 'dockerTag': dockerTag})
+    template = os.path.join(templateFolder, "README.md.template")
+    target = os.path.join(os.getcwd(), "docker/README.md")
+    substituteAndWrite(template, target, {"modName": moduleName, "dockerTag": dockerTag})
     print("--> Created README.md on how to use the docker image.")
 
     # write helper file for container spin-up (make it executable after creation)
-    template = os.path.join(templateFolder, 'docker.sh.template')
-    target = os.path.join(os.getcwd(), 'docker/docker_{}.sh'.format(dockerTag))
-    substituteAndWrite(template, target, {'dockerTag': dockerTag})
+    template = os.path.join(templateFolder, "docker.sh.template")
+    target = os.path.join(os.getcwd(), "docker/docker_{}.sh".format(dockerTag))
+    substituteAndWrite(template, target, {"dockerTag": dockerTag})
     os.system("chmod +x " + target)
     print("--> Created helper script to spin up the docker container.")
 
     # write the docker file
-    template = os.path.join(templateFolder, 'Dockerfile.template')
-    target = os.path.join(os.getcwd(), 'docker/Dockerfile')
-    substituteAndWrite(template, target,
-                    {
-                        'modName': moduleName,
-                        'modMaintainer': moduleMaintainer,
-                        'dockerTag': dockerTag,
-                        'instScript': installScriptName
-                    })
+    template = os.path.join(templateFolder, "Dockerfile.template")
+    target = os.path.join(os.getcwd(), "docker/Dockerfile")
+    substituteAndWrite(
+        template,
+        target,
+        {
+            "modName": moduleName,
+            "modMaintainer": moduleMaintainer,
+            "dockerTag": dockerTag,
+            "instScript": installScriptName,
+        },
+    )
     print("--> Created Dockerfile. You can adapt it to your needs.")
     print()
     print("Do you want to directly build the Docker image? [y/N]")
@@ -128,25 +127,32 @@ if __name__ == "__main__":
     if build == "y" or build == "Y":
         print("Building Docker image... this may take several minutes.")
         try:
-            os.chdir('docker')
-            subprocess.run(['docker', 'build',
-                            '-f', 'Dockerfile',
-                            '-t', dockerTag, '.'], check=True)
-            os.chdir('../')
+            os.chdir("docker")
+            subprocess.run(
+                ["docker", "build", "-f", "Dockerfile", "-t", dockerTag, "."], check=True
+            )
+            os.chdir("../")
         except Exception:
-            os.chdir('../')
+            os.chdir("../")
             sys.exit("ERROR: docker image build failed")
 
         print()
-        print("Successfully built image: {}. "
-            "Have a look at docker/README.md.".format(dockerTag))
-        print("Check the container by running "
-            "'docker run -it {} /bin/bash' in the same".format(dockerTag))
-        print("directory as the Dockerfile, and try using the convenience script "
-            "docker_{}.sh".format(dockerTag))
+        print("Successfully built image: {}. " "Have a look at docker/README.md.".format(dockerTag))
+        print(
+            "Check the container by running "
+            "'docker run -it {} /bin/bash' in the same".format(dockerTag)
+        )
+        print(
+            "directory as the Dockerfile, and try using the convenience script "
+            "docker_{}.sh".format(dockerTag)
+        )
         print("See docker/README.md for more information.")
     else:
-        print("You can build your Docker image later by running "
-            "'docker build -f Dockerfile -t {}'".format(dockerTag))
-        print("from within the folder 'docker' that was created by this script, "
-            "and in which you should find the 'Dockerfile'.")
+        print(
+            "You can build your Docker image later by running "
+            "'docker build -f Dockerfile -t {}'".format(dockerTag)
+        )
+        print(
+            "from within the folder 'docker' that was created by this script, "
+            "and in which you should find the 'Dockerfile'."
+        )
diff --git a/bin/doc/getparameterlist.py b/bin/doc/getparameterlist.py
index 300f4fb38d..8f6b474ec7 100644
--- a/bin/doc/getparameterlist.py
+++ b/bin/doc/getparameterlist.py
@@ -18,53 +18,62 @@ def getEnclosedContent(string, openKey, closeKey):
     result, rest = rest[0] + closeKey, rest[2]
     while result.count(openKey) != result.count(closeKey):
         rest = rest.partition(closeKey)
-        if rest[1] == '': raise IOError('Could not get content between "{}" and "{}" in given string "{}"'.format(openKey, closeKey, string))
+        if rest[1] == "":
+            raise IOError(
+                'Could not get content between "{}" and "{}" in given string "{}"'.format(
+                    openKey, closeKey, string
+                )
+            )
         result, rest = result + rest[0] + closeKey, rest[2]
 
     return result.partition(openKey)[2].rpartition(closeKey)[0]
 
+
 # extract a parameter from a given line
 def extractParamName(line):
 
     # split occurrences of getParam<T>(CALLARGS) or getParamFromGroup<T>(CALLARGS)
     # into the template arguments T and the function arguments CALLARGS
-    if 'getParamFromGroup<' in line:
-        line = line.split('getParamFromGroup')[1]
+    if "getParamFromGroup<" in line:
+        line = line.split("getParamFromGroup")[1]
         hasGroup = True
-    elif 'getParam<' in line:
-        line = line.split('getParam')[1]
+    elif "getParam<" in line:
+        line = line.split("getParam")[1]
         hasGroup = False
     else:
         return {}
 
     # TODO: Support this also
-    if line.count('getParam') > 1:
+    if line.count("getParam") > 1:
         raise IOError('Cannot process multiple occurrences of "getParam" in one line')
 
     # remove trailing spaces and cut off everything behind semicolon
-    line = line.strip('\n').strip(' ').split(';')[0]
+    line = line.strip("\n").strip(" ").split(";")[0]
 
     # extract template arg between '<' and '>'
-    paramType = getEnclosedContent(line, '<', '>')
+    paramType = getEnclosedContent(line, "<", ">")
 
     # extract function arguments
-    functionArgs = line.partition('<' + paramType + '>')[2]
-    functionArgs = getEnclosedContent(functionArgs, '(', ')')
+    functionArgs = line.partition("<" + paramType + ">")[2]
+    functionArgs = getEnclosedContent(functionArgs, "(", ")")
 
-    if hasGroup: functionArgs = functionArgs.partition(',')[2]
-    functionArgs = functionArgs.partition(',')
+    if hasGroup:
+        functionArgs = functionArgs.partition(",")[2]
+    functionArgs = functionArgs.partition(",")
     paramName = functionArgs[0]
     defaultValue = None if not functionArgs[2] else functionArgs[2]
 
-    paramType = paramType.strip(' ')
-    paramName = paramName.strip(' ')
-    if (defaultValue): defaultValue = defaultValue.strip(' ')
+    paramType = paramType.strip(" ")
+    paramName = paramName.strip(" ")
+    if defaultValue:
+        defaultValue = defaultValue.strip(" ")
 
     # if interior spaces occur in the parameter name, we can't identify it
-    if paramName[0] != '"' or paramName[-1] != '"' or ' ' in paramName:
+    if paramName[0] != '"' or paramName[-1] != '"' or " " in paramName:
         raise IOError("Could not correctly process parameter name")
 
-    return {'paramType': paramType, 'paramName': paramName.strip('"'), 'defaultValue': defaultValue}
+    return {"paramType": paramType, "paramName": paramName.strip('"'), "defaultValue": defaultValue}
+
 
 # extract all parameters from a given file
 def getParamsFromFile(file):
@@ -73,41 +82,47 @@ def getParamsFromFile(file):
     with open(file) as f:
         for lineIdx, line in enumerate(f):
             try:
-                param = extractParamName(line);
-                if param: parameters.append(param);
+                param = extractParamName(line)
+                if param:
+                    parameters.append(param)
             except IOError as e:
-                errors[lineIdx] = {'line': line.strip(), 'message': e}
+                errors[lineIdx] = {"line": line.strip(), "message": e}
 
     # print encountered errors
     if errors:
-        print('\n\n{} parameter{} in file {} could not be retrieved automatically. Please check them yourself:'.format(len(errors), 's' if len(errors) > 1 else '', file))
+        print(
+            "\n\n{} parameter{} in file {} could not be retrieved automatically. Please check them yourself:".format(
+                len(errors), "s" if len(errors) > 1 else "", file
+            )
+        )
         for lineIdx in errors:
-            print("\n\t-> line {}: {}".format(lineIdx, errors[lineIdx]['line']))
-            print("\t\t-> error message: {}".format(errors[lineIdx]['message']))
+            print("\n\t-> line {}: {}".format(lineIdx, errors[lineIdx]["line"]))
+            print("\t\t-> error message: {}".format(errors[lineIdx]["message"]))
 
     return parameters
 
+
 # search all *.hh files for parameters
 # TODO: allow runtime args with extensions and folder(s) to be checked
 parameters = []
 rootDir = os.path.dirname(os.path.abspath(__file__)) + "/../../dumux"
 for root, _, files in os.walk(rootDir):
     for file in files:
-        if os.path.splitext(file)[1] == ".hh" and os.path.splitext(file)[0] != 'parameters':
+        if os.path.splitext(file)[1] == ".hh" and os.path.splitext(file)[0] != "parameters":
             parameters.extend(getParamsFromFile(os.path.join(root, file)))
 
 # make sorted dictionary of the entries
 # treat duplicates (could have differing default values or type names - e.g. via aliases)
 parameterDict = {}
 for params in parameters:
-    key = params['paramName']
+    key = params["paramName"]
     if key in parameterDict:
-        parameterDict[key]['defaultValue'].append(params['defaultValue'])
-        parameterDict[key]['paramType'].append(params['paramType'])
+        parameterDict[key]["defaultValue"].append(params["defaultValue"])
+        parameterDict[key]["paramType"].append(params["paramType"])
     else:
         parameterDict[key] = params
-        parameterDict[key]['defaultValue'] = [params['defaultValue']]
-        parameterDict[key]['paramType'] = [params['paramType']]
+        parameterDict[key]["defaultValue"] = [params["defaultValue"]]
+        parameterDict[key]["paramType"] = [params["paramType"]]
 parameterDict = {key: value for key, value in sorted(parameterDict.items())}
 
 # determine actual entries (from duplicates)
@@ -121,32 +136,54 @@ tableEntryData = []
 for key in parameterDict:
 
     entry = parameterDict[key]
-    hasGroup = True if entry['paramName'].count('.') != 0 else False
-    groupEntry = '-' if not hasGroup else entry['paramName'].split('.')[0]
-    paramName = entry['paramName'] if not hasGroup else entry['paramName'].partition('.')[2]
+    hasGroup = True if entry["paramName"].count(".") != 0 else False
+    groupEntry = "-" if not hasGroup else entry["paramName"].split(".")[0]
+    paramName = entry["paramName"] if not hasGroup else entry["paramName"].partition(".")[2]
 
     # In case of multiple occurrences, we use the first entry that is not None and print the others for possible manual editing
-    paramType = entry['paramType'][0]
-    defaultValue = next((e for e in entry['defaultValue'] if e), '-')
-
-    hasMultiplePT = True if not all(pt == paramType for pt in entry['paramType']) else False
-    hasMultipleDV = True if not all(dv == (defaultValue if defaultValue != '-' else None) for dv in entry['defaultValue']) else False
+    paramType = entry["paramType"][0]
+    defaultValue = next((e for e in entry["defaultValue"] if e), "-")
+
+    hasMultiplePT = True if not all(pt == paramType for pt in entry["paramType"]) else False
+    hasMultipleDV = (
+        True
+        if not all(
+            dv == (defaultValue if defaultValue != "-" else None) for dv in entry["defaultValue"]
+        )
+        else False
+    )
     if hasMultiplePT or hasMultipleDV:
-        print('\nFound multiple occurrences of parameter ' + paramName + ' with differing specifications: ')
+        print(
+            "\nFound multiple occurrences of parameter "
+            + paramName
+            + " with differing specifications: "
+        )
     if hasMultiplePT:
-        print(' -> Specified type names:')
-        for typeName in entry['paramType']: print(' '*8 + typeName)
-        print(' ---> For the parameters list, ' + paramType + ' has been chosen. Please adapt manually if desired.')
+        print(" -> Specified type names:")
+        for typeName in entry["paramType"]:
+            print(" " * 8 + typeName)
+        print(
+            " ---> For the parameters list, "
+            + paramType
+            + " has been chosen. Please adapt manually if desired."
+        )
     if hasMultipleDV:
-        print(' -> Specified default values:')
-        for default in entry['defaultValue']: print(' '*8 + (default if default else '- (none given)'))
-        print(' ---> For the parameters list, ' + defaultValue + ' has been chosen. Please adapt manually if desired.')
-
-    maxGroupWidth = max(maxGroupWidth, len(groupEntry)+3) # +3 because \b will be added later
+        print(" -> Specified default values:")
+        for default in entry["defaultValue"]:
+            print(" " * 8 + (default if default else "- (none given)"))
+        print(
+            " ---> For the parameters list, "
+            + defaultValue
+            + " has been chosen. Please adapt manually if desired."
+        )
+
+    maxGroupWidth = max(maxGroupWidth, len(groupEntry) + 3)  # +3 because \b will be added later
     maxParamWidth = max(maxParamWidth, len(paramName))
     maxTypeWidth = max(maxTypeWidth, len(paramType))
     maxDefaultWidth = max(maxDefaultWidth, len(defaultValue))
-    tableEntryData.append({'group': groupEntry, 'name': paramName, 'type': paramType, 'default': defaultValue})
+    tableEntryData.append(
+        {"group": groupEntry, "name": paramName, "type": paramType, "default": defaultValue}
+    )
 
 # generate actual table entries
 tableEntriesWithGroup = []
@@ -155,22 +192,27 @@ previousGroupEntry = None
 
 for data in tableEntryData:
 
-    groupEntry = data['group']
-    paramName = data['name']
-    paramType = data['type']
-    defaultValue = data['default']
+    groupEntry = data["group"]
+    paramName = data["name"]
+    paramType = data["type"]
+    defaultValue = data["default"]
 
     if groupEntry != previousGroupEntry:
         previousGroupEntry = groupEntry
-        if groupEntry != '-': groupEntry = '\\b ' + groupEntry
-
-    tableEntry = ' * | {} | {} | {} | {} | TODO: explanation |'.format(groupEntry.ljust(maxGroupWidth),
-                                                                       paramName.ljust(maxParamWidth),
-                                                                       paramType.ljust(maxTypeWidth),
-                                                                       defaultValue.ljust(maxDefaultWidth))
-
-    if groupEntry != '-': tableEntriesWithGroup.append(tableEntry)
-    else: tableEntriesWithoutGroup.append(tableEntry)
+        if groupEntry != "-":
+            groupEntry = "\\b " + groupEntry
+
+    tableEntry = " * | {} | {} | {} | {} | TODO: explanation |".format(
+        groupEntry.ljust(maxGroupWidth),
+        paramName.ljust(maxParamWidth),
+        paramType.ljust(maxTypeWidth),
+        defaultValue.ljust(maxDefaultWidth),
+    )
+
+    if groupEntry != "-":
+        tableEntriesWithGroup.append(tableEntry)
+    else:
+        tableEntriesWithoutGroup.append(tableEntry)
 
 # combine entries
 tableEntries = tableEntriesWithoutGroup + tableEntriesWithGroup
@@ -186,26 +228,26 @@ header = """/*!
  * to use every parameter!
  *\n"""
 header += " * | " + "Group".ljust(maxGroupWidth)
-header +=   " | " + "Parameter".ljust(maxParamWidth)
-header +=   " | " + "Type".ljust(maxTypeWidth)
-header +=   " | " + "Default Value".ljust(maxDefaultWidth)
-header +=   " | Explanation |\n"
+header += " | " + "Parameter".ljust(maxParamWidth)
+header += " | " + "Type".ljust(maxTypeWidth)
+header += " | " + "Default Value".ljust(maxDefaultWidth)
+header += " | Explanation |\n"
 
 header += " * | " + ":-".ljust(maxGroupWidth)
-header +=   " | " + ":-".ljust(maxParamWidth)
-header +=   " | " + ":-".ljust(maxTypeWidth)
-header +=   " | " + ":-".ljust(maxDefaultWidth)
-header +=   " | :-         |\n"
+header += " | " + ":-".ljust(maxParamWidth)
+header += " | " + ":-".ljust(maxTypeWidth)
+header += " | " + ":-".ljust(maxDefaultWidth)
+header += " | :-         |\n"
 
 header += " * | " + "".ljust(maxGroupWidth)
-header +=   " | " + "ParameterFile".ljust(maxParamWidth)
-header +=   " | " + "std::string".ljust(maxTypeWidth)
-header +=   " | " + "executable.input".ljust(maxDefaultWidth)
-header +=   " | :-         |\n"
+header += " | " + "ParameterFile".ljust(maxParamWidth)
+header += " | " + "std::string".ljust(maxTypeWidth)
+header += " | " + "executable.input".ljust(maxDefaultWidth)
+header += " | :-         |\n"
 
 # overwrite the old parameterlist.txt file
-with open(rootDir + '/../doc/doxygen/extradoc/parameterlist.txt', "w") as outputfile:
+with open(rootDir + "/../doc/doxygen/extradoc/parameterlist.txt", "w") as outputfile:
     outputfile.write(header)
     for e in tableEntries:
-        outputfile.write(e + '\n')
-    outputfile.write(' */\n')
+        outputfile.write(e + "\n")
+    outputfile.write(" */\n")
diff --git a/bin/extract_as_new_module.py b/bin/extract_as_new_module.py
index cca14fc2b8..414a143258 100755
--- a/bin/extract_as_new_module.py
+++ b/bin/extract_as_new_module.py
@@ -67,11 +67,7 @@ def isInSubTree(file, base):
 def removeRedundantFolders(folders):
     """Remove folders that are duplicates or that are contained in a parent folder"""
     uniqueFolders = list(set(folders))
-    return [
-        sf
-        for sf in uniqueFolders
-        if not any(isInSubTree(sf, base) for base in uniqueFolders)
-    ]
+    return [sf for sf in uniqueFolders if not any(isInSubTree(sf, base) for base in uniqueFolders)]
 
 
 def checkModuleFolder(moduleDirectory):
@@ -166,20 +162,18 @@ def addFoldersToCMakeLists(modulePath, subFolder):
                         break
 
                 newLines = lines[0:idx]
-                newLines += [
-                    f"add_subdirectory({subFolderPATH.split(os.path.sep)[i]})\n"
-                ]
+                newLines += [f"add_subdirectory({subFolderPATH.split(os.path.sep)[i]})\n"]
                 newLines += lines[idx:]
                 newContent = "".join(line for line in reversed(newLines))
 
                 replaceFileContent(cmakeListsFile, newContent)
             else:
                 with open(cmakeListsFile, "w") as cml:
-                    cml.write(
-                        f"add_subdirectory({subFolderPATH.split(os.path.sep)[i]})\n"
-                    )
+                    cml.write(f"add_subdirectory({subFolderPATH.split(os.path.sep)[i]})\n")
             cmakeListsFile = os.path.join(
-                os.path.dirname(cmakeListsFile), subFolderPATH.split(os.path.sep)[i], "CMakeLists.txt"
+                os.path.dirname(cmakeListsFile),
+                subFolderPATH.split(os.path.sep)[i],
+                "CMakeLists.txt",
             )
 
 
@@ -187,9 +181,7 @@ def findHeaders(modulePath, sourceFiles):
     """Find header included (recursively) in the given source files"""
     with mp.Pool() as p:
         headers = itertools.chain.from_iterable(
-            p.map(
-                partial(includedCppProjectHeaders, projectBase=modulePath), sourceFiles
-            )
+            p.map(partial(includedCppProjectHeaders, projectBase=modulePath), sourceFiles)
         )
     return list(set(headers))
 
@@ -218,9 +210,7 @@ def foldersWithoutSourceFiles(modulePath, checkSubFolder, sources):
         return any(isInSubTree(s, directory) for s in sourceDirectories)
 
     def isNotASourceDirectory(directory):
-        return directory not in sourceDirectories and not hasChildSourceDirectory(
-            directory
-        )
+        return directory not in sourceDirectories and not hasChildSourceDirectory(directory)
 
     noSourceDirectories = []
     for sf in checkSubFolder:
@@ -289,8 +279,7 @@ def guideFolderDeletion(modulePath, candidates):
 
     deleted = []
     if queryYesNo(
-        "Do you want to remove some of them "
-        "(by choosing 'no' they are all preserved)?",
+        "Do you want to remove some of them " "(by choosing 'no' they are all preserved)?",
         default="no",
     ):
         for folder in foldersWithoutSources:
@@ -321,9 +310,7 @@ def queryEmptyRemoteURL():
 
         try:
             print("Checking the repo (you may have to introduce credentials):")
-            remoteContent = runCommand(
-                "git ls-remote {}".format(remote), suppressTraceBack=True
-            )
+            remoteContent = runCommand("git ls-remote {}".format(remote), suppressTraceBack=True)
         except subprocess.CalledProcessError:
             print(" - Could not find your repo at {}. ".format(remote))
             print(" - Please revisit the provided information.")
@@ -393,9 +380,7 @@ def guideVersionsReadme(modulePath, dependencies, readme, remoteURL=None):
     if writeVersionInfo:
         table = versionTable(dependencies)
         appendFileContent(readme, f"\n## Version Information\n\n{table}\n")
-        runGitCommand(
-            modulePath, f'git commit {readme} -m "[readme] Update version information"'
-        )
+        runGitCommand(modulePath, f'git commit {readme} -m "[readme] Update version information"')
 
         if remoteURL:
             pushRepository(modulePath, remoteURL)
@@ -599,12 +584,8 @@ if __name__ == "__main__":
         formatter_class=argparse.RawDescriptionHelpFormatter,
         epilog=EPILOG,
     )
-    parser.add_argument(
-        "module_dir", help="Module from which the subfolder is extracted"
-    )
-    parser.add_argument(
-        "subfolder", nargs="+", help='subfolder(s) of "module_dir" to be extracted'
-    )
+    parser.add_argument("module_dir", help="Module from which the subfolder is extracted")
+    parser.add_argument("subfolder", nargs="+", help='subfolder(s) of "module_dir" to be extracted')
 
     # prepare input
     args = vars(parser.parse_args())
@@ -623,9 +604,7 @@ if __name__ == "__main__":
 
     # guide user through new module creation
     print(infoInitial(moduleDirectory, subFolder, sourceFiles))
-    input(
-        "Please read the above carefully and press [Enter] to proceed or abort with [Ctrl-C]..."
-    )
+    input("Please read the above carefully and press [Enter] to proceed or abort with [Ctrl-C]...")
 
     # duneproject creates a new Dune module
     runDuneProject()
@@ -682,9 +661,7 @@ if __name__ == "__main__":
 
     # prepare new README.md file
     newReadme = os.path.join(newModulePath, readmeFileName())
-    replaceFileContent(
-        newReadme, infoReadmeMain(moduleDirectory, actualSubFolder, sourceFiles)
-    )
+    replaceFileContent(newReadme, infoReadmeMain(moduleDirectory, actualSubFolder, sourceFiles))
 
     # try to initialize repo (to use its URL in later steps)
     remoteURL = guideRepositoryInitialization(newModulePath)
diff --git a/bin/installdumux.py b/bin/installdumux.py
index e1ec3413b0..5ff12c02eb 100755
--- a/bin/installdumux.py
+++ b/bin/installdumux.py
@@ -10,20 +10,24 @@ import subprocess
 from distutils.spawn import find_executable
 from distutils.version import LooseVersion
 
-parser = argparse.ArgumentParser(prog='installdumux',
-                                 usage='./installdumux.py [OPTIONS]',
-                                 description='This script downloads and compiles the latest release of Dumux.')
+parser = argparse.ArgumentParser(
+    prog="installdumux",
+    usage="./installdumux.py [OPTIONS]",
+    description="This script downloads and compiles the latest release of Dumux.",
+)
 # Optional arguments
-parser.add_argument('--dune-version',
-                    default="2.7",
-                    help='Dune version to be checked out.')
-parser.add_argument('--dumux-version',
-                    default="3.4",
-                    help='Dumux version to be checked out.')
+parser.add_argument("--dune-version", default="2.7", help="Dune version to be checked out.")
+parser.add_argument("--dumux-version", default="3.4", help="Dumux version to be checked out.")
 args = vars(parser.parse_args())
 
-dune_branch = args["dune_version"] if args["dune_version"] == "master" else "releases/" + args["dune_version"]
-dumux_branch = args["dumux_version"] if args["dumux_version"] == "master" else "releases/" + args["dumux_version"]
+dune_branch = (
+    args["dune_version"] if args["dune_version"] == "master" else "releases/" + args["dune_version"]
+)
+dumux_branch = (
+    args["dumux_version"]
+    if args["dumux_version"] == "master"
+    else "releases/" + args["dumux_version"]
+)
 
 
 def show_message(message):
@@ -37,24 +41,36 @@ def check_cpp_version():
     result = subprocess.check_output(["g++", "-dumpversion"]).decode().strip()
     if LooseVersion(result) < LooseVersion(requiredversion):
         print("-- An error occured while checking for prerequistes.")
-        raise Exception("g++ greater than or equal to {} is required for dumux releases >=3.2!".format(requiredversion))
+        raise Exception(
+            "g++ greater than or equal to {} is required for dumux releases >=3.2!".format(
+                requiredversion
+            )
+        )
 
 
 def run_command(command, workdir="."):
     with open("../installdumux.log", "a") as log:
-        popen = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, cwd=workdir)
+        popen = subprocess.Popen(
+            command,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            universal_newlines=True,
+            cwd=workdir,
+        )
         for line in popen.stdout:
             log.write(line)
-            print(line, end='')
+            print(line, end="")
         for line in popen.stderr:
             log.write(line)
-            print(line, end='')
+            print(line, end="")
         popen.stdout.close()
         popen.stderr.close()
         return_code = popen.wait()
         if return_code:
             print("\n")
-            message = "\n    (Error) The command {} returned with non-zero exit code\n".format(command)
+            message = "\n    (Error) The command {} returned with non-zero exit code\n".format(
+                command
+            )
             message += "\n    If you can't fix the problem yourself consider reporting your issue\n"
             message += "    on the mailing list (dumux@listserv.uni-stuttgart.de) and attach the file 'installdumux.log'\n"
             show_message(message)
@@ -74,14 +90,14 @@ def git_setbranch(folder, branch):
 
 
 # clear the log file
-open('installdumux.log', 'w').close()
+open("installdumux.log", "w").close()
 
 #################################################################
 #################################################################
 # (1/3) Check some prerequistes
 #################################################################
 #################################################################
-programs = ['git', 'gcc', 'g++', 'cmake', 'pkg-config']
+programs = ["git", "gcc", "g++", "cmake", "pkg-config"]
 show_message("(1/3) Checking all prerequistes: " + " ".join(programs) + "...")
 
 # check some prerequistes
@@ -90,8 +106,10 @@ for program in programs:
         print("-- An error occured while checking for prerequistes.")
         raise Exception("Program {} has not been found.".format(program))
 
-if find_executable('paraview') is None:
-    print("-- Warning: paraview seems to be missing. You may not be able to view simulation results!")
+if find_executable("paraview") is None:
+    print(
+        "-- Warning: paraview seems to be missing. You may not be able to view simulation results!"
+    )
 
 check_cpp_version()
 
@@ -106,19 +124,21 @@ show_message("(1/3) Step completed. All prerequistes found.")
 os.makedirs("./dumux", exist_ok=True)
 os.chdir("dumux")
 
-show_message("(2/3) Cloning repositories. This may take a while. Make sure to be connected to the internet...")
+show_message(
+    "(2/3) Cloning repositories. This may take a while. Make sure to be connected to the internet..."
+)
 
 # the core modules
-for module in ['common', 'geometry', 'grid', 'localfunctions', 'istl']:
+for module in ["common", "geometry", "grid", "localfunctions", "istl"]:
     if not os.path.exists("dune-{}".format(module)):
-        git_clone('https://gitlab.dune-project.org/core/dune-{}.git'.format(module), dune_branch)
+        git_clone("https://gitlab.dune-project.org/core/dune-{}.git".format(module), dune_branch)
     else:
         print("-- Skip cloning dune-{} because the folder already exists.".format(module))
         git_setbranch("dune-{}".format(module), dune_branch)
 
 # dumux
 if not os.path.exists("dumux"):
-    git_clone('https://git.iws.uni-stuttgart.de/dumux-repositories/dumux.git', dumux_branch)
+    git_clone("https://git.iws.uni-stuttgart.de/dumux-repositories/dumux.git", dumux_branch)
 else:
     print("-- Skip cloning dumux because the folder already exists.")
     git_setbranch("dumux", dumux_branch)
@@ -131,7 +151,9 @@ show_message("(2/3) Step completed. All repositories have been cloned into a con
 # (3/3) Configure and build
 #################################################################
 #################################################################
-show_message("(3/3) Configure and build dune modules and dumux using dunecontrol. This may take several minutes...")
+show_message(
+    "(3/3) Configure and build dune modules and dumux using dunecontrol. This may take several minutes..."
+)
 
 # run dunecontrol
 run_command(command=["./dune-common/bin/dunecontrol", "--opts=dumux/cmake.opts", "all"])
@@ -143,14 +165,16 @@ show_message("(3/3) Step completed. Succesfully configured and built dune and du
 # Show message how to check that everything works
 #################################################################
 #################################################################
-test_path = 'dumux/dumux/build-cmake/test/porousmediumflow/1p'
-if dumux_branch == "master" or LooseVersion(args["dumux_version"]) > LooseVersion('3.3'):
-    test_path += '/isothermal'
+test_path = "dumux/dumux/build-cmake/test/porousmediumflow/1p"
+if dumux_branch == "master" or LooseVersion(args["dumux_version"]) > LooseVersion("3.3"):
+    test_path += "/isothermal"
 else:
-    test_path += '/implicit/isothermal'
-
-show_message("(Installation complete) To test if everything works, please run the following commands (can be copied to command line):\n\n"
-             "  cd {}\n"
-             "  make test_1p_tpfa\n"
-             "  ./test_1p_tpfa\n"
-             "  paraview *pvd\n".format(test_path))
+    test_path += "/implicit/isothermal"
+
+show_message(
+    "(Installation complete) To test if everything works, please run the following commands (can be copied to command line):\n\n"
+    "  cd {}\n"
+    "  make test_1p_tpfa\n"
+    "  ./test_1p_tpfa\n"
+    "  paraview *pvd\n".format(test_path)
+)
diff --git a/bin/installexternal.py b/bin/installexternal.py
index 68ccf70e6c..d99a9e261c 100755
--- a/bin/installexternal.py
+++ b/bin/installexternal.py
@@ -12,19 +12,23 @@ import shutil
 import re
 import argparse
 
+
 class ChoicesAction(argparse._StoreAction):
     def __init__(self, **kwargs):
         super(ChoicesAction, self).__init__(**kwargs)
         if self.choices is None:
             self.choices = []
         self._choices_actions = []
-    def add_choice(self, choice, help=''):
+
+    def add_choice(self, choice, help=""):
         self.choices.append(choice)
         choice_action = argparse.Action(option_strings=[], dest=choice, help=help)
         self._choices_actions.append(choice_action)
+
     def _get_subactions(self):
         return self._choices_actions
 
+
 def show_message(message):
     print("*" * 120)
     print(message)
@@ -33,145 +37,162 @@ def show_message(message):
 
 
 if len(sys.argv) == 1:
-    show_message('No options given. For more information run the following command: \n ./installexternal.py --help')
+    show_message(
+        "No options given. For more information run the following command: \n ./installexternal.py --help"
+    )
     sys.exit()
 
-parser = argparse.ArgumentParser(prog='installexternal',
-                                 usage='./installexternal.py [OPTIONS] PACKAGES',
-                                 description='This script downloads extenstions for dumux and dune \
-                                     and installs some External Libraries and Modules.')
-parser.register('action', 'store_choice', ChoicesAction)
+parser = argparse.ArgumentParser(
+    prog="installexternal",
+    usage="./installexternal.py [OPTIONS] PACKAGES",
+    description="This script downloads extenstions for dumux and dune \
+                                     and installs some External Libraries and Modules.",
+)
+parser.register("action", "store_choice", ChoicesAction)
 # Positional arguments
-group = parser.add_argument_group(title='your choice of packages')
-packages = group.add_argument('packages', nargs='+', metavar='PACKAGES',
-                 action='store_choice')
-packages.add_choice('dumux-extensions', help="Download dumux-course and dumux-lecture.")
-packages.add_choice('dune-extensions', help="Download dune-uggrid, dune-alugrid, dune-foamgrid, \
-                    dune-subgrid, dune-spgrid, dune-mmesh and dune-functions.")
-packages.add_choice('optimization', help="Download and install glpk and nlopt.")
-packages.add_choice('others', help="Download and install opm , metis and gstat.")
-packages.add_choice('lecture', help="Download dumux-lecture.")
-packages.add_choice('course', help="Download dumux-course.")
-packages.add_choice('ug', help="Download dune-uggrid.")
-packages.add_choice('alugrid', help="Download dune-alugrid.")
-packages.add_choice('foamgrid', help="Download dune-foamgrid.")
-packages.add_choice('subgrid', help="Download dune-subgrid.")
-packages.add_choice('spgrid', help="Download dune-spgrid.")
-packages.add_choice('mmesh', help="Download dune-mmesh.")
-packages.add_choice('functions', help="Download dune-functions.")
-packages.add_choice('glpk', help="Download and install glpk.")
-packages.add_choice('nlopt', help="Download and install nlopt.")
-packages.add_choice('opm', help="Download opm modules required for cornerpoint grids.")
-packages.add_choice('metis', help="Download and install the METIS graph partitioner.")
-packages.add_choice('gstat', help="Download and install gstat.")
+group = parser.add_argument_group(title="your choice of packages")
+packages = group.add_argument("packages", nargs="+", metavar="PACKAGES", action="store_choice")
+packages.add_choice("dumux-extensions", help="Download dumux-course and dumux-lecture.")
+packages.add_choice(
+    "dune-extensions",
+    help="Download dune-uggrid, dune-alugrid, dune-foamgrid, \
+                    dune-subgrid, dune-spgrid, dune-mmesh and dune-functions.",
+)
+packages.add_choice("optimization", help="Download and install glpk and nlopt.")
+packages.add_choice("others", help="Download and install opm , metis and gstat.")
+packages.add_choice("lecture", help="Download dumux-lecture.")
+packages.add_choice("course", help="Download dumux-course.")
+packages.add_choice("ug", help="Download dune-uggrid.")
+packages.add_choice("alugrid", help="Download dune-alugrid.")
+packages.add_choice("foamgrid", help="Download dune-foamgrid.")
+packages.add_choice("subgrid", help="Download dune-subgrid.")
+packages.add_choice("spgrid", help="Download dune-spgrid.")
+packages.add_choice("mmesh", help="Download dune-mmesh.")
+packages.add_choice("functions", help="Download dune-functions.")
+packages.add_choice("glpk", help="Download and install glpk.")
+packages.add_choice("nlopt", help="Download and install nlopt.")
+packages.add_choice("opm", help="Download opm modules required for cornerpoint grids.")
+packages.add_choice("metis", help="Download and install the METIS graph partitioner.")
+packages.add_choice("gstat", help="Download and install gstat.")
 
 
 # Optional arguments
 options = parser.add_mutually_exclusive_group(required=False)
-options.add_argument('--clean', action="store_true", default=False,
-                     help='Delete all files for the given packages.')
-options.add_argument('--download', action="store_true", default=False,
-                     help='Only download the packages.')
-
-parser.add_argument('--dune_branch', default="releases/2.7",
-                     help='Dune branch to be checked out.')
-parser.add_argument('--dumux_branch', default="releases/3.4",
-                     help='Dumux branch to be checked out.')
-parser.add_argument('--opm_branch', default="release/2020.10",
-                     help='Opm branch to be checked out.')
-parser.add_argument('--mmesh_branch', default="release/1.2",
-                     help='Mmesh branch to be checked out.')
+options.add_argument(
+    "--clean", action="store_true", default=False, help="Delete all files for the given packages."
+)
+options.add_argument(
+    "--download", action="store_true", default=False, help="Only download the packages."
+)
+
+parser.add_argument("--dune_branch", default="releases/2.7", help="Dune branch to be checked out.")
+parser.add_argument(
+    "--dumux_branch", default="releases/3.4", help="Dumux branch to be checked out."
+)
+parser.add_argument("--opm_branch", default="release/2020.10", help="Opm branch to be checked out.")
+parser.add_argument("--mmesh_branch", default="release/1.2", help="Mmesh branch to be checked out.")
 
 args = vars(parser.parse_args())
 
-def run_command(command, currentdir='.'):
-    with open(currentdir+"/installexternal.log", "a") as log:
-        popen = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
+
+def run_command(command, currentdir="."):
+    with open(currentdir + "/installexternal.log", "a") as log:
+        popen = subprocess.Popen(
+            command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
+        )
         for line in popen.stdout:
             log.write(line)
-            print(line, end='')
+            print(line, end="")
         for line in popen.stderr:
             log.write(line)
-            print(line, end='')
+            print(line, end="")
         popen.stdout.close()
         popen.stderr.close()
         return_code = popen.wait()
         if return_code:
             print("\n")
-            message = "\n    (Error) The command {} returned with non-zero exit code\n".format(command)
+            message = "\n    (Error) The command {} returned with non-zero exit code\n".format(
+                command
+            )
             message += "\n    If you can't fix the problem yourself consider reporting your issue\n"
             message += "    on the mailing list (dumux@listserv.uni-stuttgart.de) and attach the file 'installexternal.log'\n"
             show_message(message)
             sys.exit(1)
 
+
 def git_clone(url, branch=None):
     clone = ["git", "clone"]
     if branch:
         clone += ["-b", branch]
     result = run_command(command=[*clone, url])
 
+
 def install_external(args):
-    dune_branch = args['dune_branch']
-    dumux_branch = args['dumux_branch']
-    opm_branch = args['opm_branch']
-    mmesh_branch = args['mmesh_branch']
-    packages = args['packages']
-    cleanup = args['clean']
-    download = args['download']
+    dune_branch = args["dune_branch"]
+    dumux_branch = args["dumux_branch"]
+    opm_branch = args["opm_branch"]
+    mmesh_branch = args["mmesh_branch"]
+    packages = args["packages"]
+    cleanup = args["clean"]
+    download = args["download"]
 
     final_message = []
     top_dir = os.getcwd()
-    ext_dir =  top_dir + "/external"
-
+    ext_dir = top_dir + "/external"
 
     # Prepare a list of packages
     packages = []
-    for pkg in args['packages']:
+    for pkg in args["packages"]:
         if pkg in packagenames:
             packages.extend(packagenames[pkg])
         else:
             packages.extend([key for key in external_urls.keys() if pkg in key])
-    args['packages'] = packages
+    args["packages"] = packages
 
     # print the list of packages to be downloaded/installed/removed
-    print("The following package(s) will be {0}:\n".format('removed' if cleanup else 'downloaded'), ', '.join(args['packages']), "\n")
+    print(
+        "The following package(s) will be {0}:\n".format("removed" if cleanup else "downloaded"),
+        ", ".join(args["packages"]),
+        "\n",
+    )
 
     # check Location For DuneModules
     if not os.path.isdir("dune-common"):
         show_message(
             "You have to call " + sys.argv[0] + " for " + sys.argv[1] + " from\n"
             "the same directory in which dune-common is located.\n"
-            "You cannot install it in this folder.")
+            "You cannot install it in this folder."
+        )
         return
 
     # clear the log file
     logdir = ext_dir if os.path.exists(ext_dir) else top_dir
-    open(logdir+'/installexternal.log', 'w').close()
+    open(logdir + "/installexternal.log", "w").close()
 
     for package in packages:
         os.chdir(top_dir)
         # Package name for final message
-        final_message.append('[---'+package+'---]')
+        final_message.append("[---" + package + "---]")
 
         # Set the directory: create ext_dir for external packages
-        if not any([re.compile(p).match(package) for p in ['dumux','dune', 'opm']]):
+        if not any([re.compile(p).match(package) for p in ["dumux", "dune", "opm"]]):
             os.makedirs(ext_dir, exist_ok=True)
             os.chdir(ext_dir)
 
         # Set the branch
-        if 'dumux' in package:
+        if "dumux" in package:
             branch = dumux_branch
-        elif 'mmesh' in package:
+        elif "mmesh" in package:
             branch = mmesh_branch
-        elif 'dune' in package:
+        elif "dune" in package:
             branch = dune_branch
-        elif 'opm' in package:
+        elif "opm" in package:
             branch = opm_branch
 
         # Run the requested command
         if cleanup:
-            if os.path.isfile(package + '.tar.gz'):
-                os.remove(package + '.tar.gz')
+            if os.path.isfile(package + ".tar.gz"):
+                os.remove(package + ".tar.gz")
             if os.path.exists(package):
                 # Remove
                 shutil.rmtree(package)
@@ -185,7 +206,7 @@ def install_external(args):
 
         else:
             # Check if tarball
-            tarball = external_urls[package].endswith('tar.gz')
+            tarball = external_urls[package].endswith("tar.gz")
 
             if not os.path.exists(package):
 
@@ -195,7 +216,7 @@ def install_external(args):
                     filedata = urllib.request.urlopen(external_urls[package])
                     datatowrite = filedata.read()
 
-                    with open(ext_dir + "/" + package +".tar.gz", 'wb') as f:
+                    with open(ext_dir + "/" + package + ".tar.gz", "wb") as f:
                         f.write(datatowrite)
                     # Save message to be shown at the end
                     final_message.append("{} has been sucessfully downloaded.".format(package))
@@ -203,31 +224,33 @@ def install_external(args):
                     # Start Installation if the flag download is set to false.
                     if not download:
                         # Extract
-                        tf = tarfile.open(package+".tar.gz")
+                        tf = tarfile.open(package + ".tar.gz")
                         tf.extractall()
 
                         # Rename
                         shutil.move(os.path.commonprefix(tf.getnames()), package)
 
                         # Start the configuration
-                        os.chdir(ext_dir + "/"+package)
-                        if package == 'gstat':
-                            with open('configure', 'r+') as f:
+                        os.chdir(ext_dir + "/" + package)
+                        if package == "gstat":
+                            with open("configure", "r+") as f:
                                 content = f.read()
                                 f.seek(0)
                                 f.truncate()
-                                f.write(content.replace('doc/tex/makefile', ''))
+                                f.write(content.replace("doc/tex/makefile", ""))
 
                         # Run Configuration command
-                        configcmd = "./configure" if package != 'metis' else ["make", "config"]
+                        configcmd = "./configure" if package != "metis" else ["make", "config"]
                         run_command(configcmd, currentdir=ext_dir)
                         try:
                             run_command("make", currentdir=ext_dir)
                         except:
                             raise Exception("{} installation has failed.".format(package))
                         # Save message to be shown at the end
-                        if os.path.exists(ext_dir+ "/" + package):
-                            final_message.append("{} has been successfully installed.".format(package))
+                        if os.path.exists(ext_dir + "/" + package):
+                            final_message.append(
+                                "{} has been successfully installed.".format(package)
+                            )
 
                 else:
                     # Clone from repo
@@ -239,10 +262,12 @@ def install_external(args):
                     final_message.append("{} has been already installed.".format(package))
                 else:
                     # Checkout to the requested branch
-                    os.chdir(top_dir + '/' + package)
+                    os.chdir(top_dir + "/" + package)
                     subprocess.Popen(["git", "checkout", branch])
                     # Save message to be shown at the end
-                    final_message.append("-- Skip cloning {}, because the folder already exists.".format(package))
+                    final_message.append(
+                        "-- Skip cloning {}, because the folder already exists.".format(package)
+                    )
                     final_message.append("-- Checking out {} ".format(package) + branch)
                     continue
 
@@ -254,16 +279,19 @@ def install_external(args):
         os.chdir(top_dir)
 
     # Save post installation message about dunecontrol if need be.
-    if not cleanup and any(x in pkg for pkg in packages for x in ["dumux","dune","opm"]):
-        final_message.append("\n\nPlease run the following command (can be copied to command line):\n\n  ./dune-common/bin/dunecontrol --opts=./dumux/cmake.opts all")
+    if not cleanup and any(x in pkg for pkg in packages for x in ["dumux", "dune", "opm"]):
+        final_message.append(
+            "\n\nPlease run the following command (can be copied to command line):\n\n  ./dune-common/bin/dunecontrol --opts=./dumux/cmake.opts all"
+        )
 
     # If cleanup and only logfile in the external directory, remove the directory
     if os.path.isdir(ext_dir):
         _, _, files = next(os.walk(ext_dir))
-        if cleanup and len(files)==1 and 'installexternal.log' in files:
+        if cleanup and len(files) == 1 and "installexternal.log" in files:
             shutil.rmtree(ext_dir)
 
-    return '\n'.join(final_message)
+    return "\n".join(final_message)
+
 
 #################################################################
 #################################################################
@@ -293,27 +321,42 @@ external_urls = {
 
 packagenames = {
     "dumux-extensions": ["dumux-lecture", "dumux-course"],
-    "dune-extensions": ["dune-uggrid", "dune-alugrid", "dune-foamgrid",
-                        "dune-subgrid", "dune-spgrid", "dune-mmesh",
-                        "dune-functions", "dune-typetree"],
+    "dune-extensions": [
+        "dune-uggrid",
+        "dune-alugrid",
+        "dune-foamgrid",
+        "dune-subgrid",
+        "dune-spgrid",
+        "dune-mmesh",
+        "dune-functions",
+        "dune-typetree",
+    ],
     "functions": ["dune-functions", "dune-typetree"],
     "optimization": ["glpk", "nlopt"],
-    "others": ["opm-common", "opm-grid", "metis", "gstat"]
+    "others": ["opm-common", "opm-grid", "metis", "gstat"],
 }
 
-messages ={
-    'glpk': ["In addition, it might be necessary to set manually",
-            "the glpk path in the CMAKE_FLAGS section of the .opts-file:",
-            "  -DGLPK_ROOT=/path/to/glpk \\"],
-    'dune-mmesh': ["Maybe you also have to install CGAL",
-            "(see cgal.org/download.html)", "Maybe you also need to change your core dune modules' branches to their newest versions."],
-    'opm-common': ["In addition, it might be necessary to set manually some",
-                "CMake variables in the CMAKE_FLAGS section of the .opts-file:",
-                "  -DUSE_MPI=ON",
-                "Currently, compiling opm with clang is not possible.", "",
-                "Maybe you also have to install the following packages (see the",
-                " opm prerequisites at opm-project.org):",
-                "  BLAS, LAPACK, Boost, SuiteSparse, Zoltan"]
+messages = {
+    "glpk": [
+        "In addition, it might be necessary to set manually",
+        "the glpk path in the CMAKE_FLAGS section of the .opts-file:",
+        "  -DGLPK_ROOT=/path/to/glpk \\",
+    ],
+    "dune-mmesh": [
+        "Maybe you also have to install CGAL",
+        "(see cgal.org/download.html)",
+        "Maybe you also need to change your core dune modules' branches to their newest versions.",
+    ],
+    "opm-common": [
+        "In addition, it might be necessary to set manually some",
+        "CMake variables in the CMAKE_FLAGS section of the .opts-file:",
+        "  -DUSE_MPI=ON",
+        "Currently, compiling opm with clang is not possible.",
+        "",
+        "Maybe you also have to install the following packages (see the",
+        " opm prerequisites at opm-project.org):",
+        "  BLAS, LAPACK, Boost, SuiteSparse, Zoltan",
+    ],
 }
 
 
diff --git a/bin/make_installscript.py b/bin/make_installscript.py
index 12103d3101..bba4014331 100755
--- a/bin/make_installscript.py
+++ b/bin/make_installscript.py
@@ -36,9 +36,7 @@ if __name__ == "__main__":
         "This expects that all modules are git repositories and "
         "have a remote origin URL defined."
     )
-    parser.add_argument(
-        "-p", "--path", required=True, help="The path to your dune module"
-    )
+    parser.add_argument("-p", "--path", required=True, help="The path to your dune module")
     parser.add_argument(
         "-f",
         "--filename",
@@ -112,11 +110,7 @@ if __name__ == "__main__":
     modPath = os.path.abspath(modPath)
     modName = getModuleInfo(modPath, "Module")
     printProgressInfo(
-        [
-            "Creating install script for module '{}' in folder '{}'".format(
-                modName, modPath
-            )
-        ]
+        ["Creating install script for module '{}' in folder '{}'".format(modName, modPath)]
     )
 
     language = cmdArgs["language"]
diff --git a/bin/postprocessing/exportscreenshot2d.py b/bin/postprocessing/exportscreenshot2d.py
index d7a826a52e..2032ff41a1 100644
--- a/bin/postprocessing/exportscreenshot2d.py
+++ b/bin/postprocessing/exportscreenshot2d.py
@@ -9,47 +9,153 @@ import argparse
 import os
 import sys
 
-bool = ['True','False']
-parameterType = ['CELLS','POINTS']
-legendOrientation = ['Horizontal','Vertical']
+bool = ["True", "False"]
+parameterType = ["CELLS", "POINTS"]
+legendOrientation = ["Horizontal", "Vertical"]
 parser = argparse.ArgumentParser(
-  prog='\033[1m\033[94m' + 'pvbatch' + '\033[0m' + ' ' + sys.argv[0],
-  description='Export a screenshot of a standard 2D plot. To change the color palette, change the default in the paraview GUI.'
+    prog="\033[1m\033[94m" + "pvbatch" + "\033[0m" + " " + sys.argv[0],
+    description="Export a screenshot of a standard 2D plot. To change the color palette, change the default in the paraview GUI.",
 )
 # on/off-type features
 offscreen = parser.add_mutually_exclusive_group(required=False)
-offscreen.add_argument('--offscreen', dest='offscreen', action='store_true', help='Enable offscreen rendering (large pixel size)')
-offscreen.add_argument('--no-offscreen', dest='offscreen', action='store_false', help='Disable offscreen rendering (low pixel sizes)')
+offscreen.add_argument(
+    "--offscreen",
+    dest="offscreen",
+    action="store_true",
+    help="Enable offscreen rendering (large pixel size)",
+)
+offscreen.add_argument(
+    "--no-offscreen",
+    dest="offscreen",
+    action="store_false",
+    help="Disable offscreen rendering (low pixel sizes)",
+)
 parser.set_defaults(offscreen=False)
 showaxesgrid = parser.add_mutually_exclusive_group(required=False)
-showaxesgrid.add_argument('--showAxesGrid', dest='showAxesGrid', action='store_true', help='Show the axes grid for the domain')
-showaxesgrid.add_argument('--no-showAxesGrid', dest='showAxesGrid', action='store_false', help='Do not show the axes grid for the domain')
+showaxesgrid.add_argument(
+    "--showAxesGrid",
+    dest="showAxesGrid",
+    action="store_true",
+    help="Show the axes grid for the domain",
+)
+showaxesgrid.add_argument(
+    "--no-showAxesGrid",
+    dest="showAxesGrid",
+    action="store_false",
+    help="Do not show the axes grid for the domain",
+)
 parser.set_defaults(showAxesGrid=False)
 showlegend = parser.add_mutually_exclusive_group(required=False)
-showlegend.add_argument('--showLegend', dest='showLegend', action='store_true', help='Show the parameter legend/range')
-showlegend.add_argument('--no-showLegend', dest='showLegend', action='store_false', help='Do not show the parameter legend/range')
+showlegend.add_argument(
+    "--showLegend",
+    dest="showLegend",
+    action="store_true",
+    help="Show the parameter legend/range",
+)
+showlegend.add_argument(
+    "--no-showLegend",
+    dest="showLegend",
+    action="store_false",
+    help="Do not show the parameter legend/range",
+)
 parser.set_defaults(showLegend=True)
 showorientaxes = parser.add_mutually_exclusive_group(required=False)
-showorientaxes.add_argument('--showOrientationAxes', dest='showOrientationAxes', action='store_true', help='Show the orientation axis')
-showorientaxes.add_argument('--no-showOrientationAxes', dest='showOrientationAxes', action='store_false', help='Do not the orientation axis')
+showorientaxes.add_argument(
+    "--showOrientationAxes",
+    dest="showOrientationAxes",
+    action="store_true",
+    help="Show the orientation axis",
+)
+showorientaxes.add_argument(
+    "--no-showOrientationAxes",
+    dest="showOrientationAxes",
+    action="store_false",
+    help="Do not the orientation axis",
+)
 parser.set_defaults(showOrientationAxes=False)
 # more complicated features
-parser.add_argument('-f', '--files', nargs='+', required=True, help="vtu files to be processed")
-parser.add_argument('-o', '--outputDirectory', default='', help="Directory to which the pcitures are written")
-parser.add_argument('-of', '--outFile', default='', help="Basename of the written png file")
-parser.add_argument('-p', '--parameter', default='', help='The name of the parameter to be plotted')
-parser.add_argument('-pc', '--parameterComponent', type=int, default=-1, help='Plot only a specific component of a vector (default: magnitude=-1)')
-parser.add_argument('-pr', '--parameterRange', type=float, nargs=2, default=[0, 0], help='Adjustment of the color rane (default: min/max)')
-parser.add_argument('-pt', '--parameterType', choices=parameterType, default='CELLS', help='The type of the data field (CELLS or POINTS)')
-parser.add_argument('-lo', '--legendOrientation', choices=legendOrientation, default='Horizontal', help='The name of the parameter to be plotted')
-parser.add_argument('-lp', '--legendPosition', type=float, nargs=2, default=[0.25, 0.0], help='The position of the legend')
-parser.add_argument('-lz', '--legendZoom', type=float, nargs=2, default=[0.5, 0.5], help='The zoom of the legend')
-parser.add_argument('-lt', '--legendTitle', default='', help="Title of the legend")
-parser.add_argument('-lct', '--legendComponentTitle', default='none', help="Title of the legend component")
-parser.add_argument('--size', type=int, nargs=2, default=[600, 400], help="The pixel size of the png file (default: 600x400)")
-parser.add_argument('--scale', type=float, nargs=3, default=[1.0, 1.0, 1.0], help="Scaling factors in each direction")
-parser.add_argument('--whiteBackground', dest='whiteBackground', action='store_true', help="Sets a white background")
-parser.add_argument('-v', '--verbosity', type=int, default=2, help='Verbosity of the output')
+parser.add_argument("-f", "--files", nargs="+", required=True, help="vtu files to be processed")
+parser.add_argument(
+    "-o",
+    "--outputDirectory",
+    default="",
+    help="Directory to which the pcitures are written",
+)
+parser.add_argument("-of", "--outFile", default="", help="Basename of the written png file")
+parser.add_argument("-p", "--parameter", default="", help="The name of the parameter to be plotted")
+parser.add_argument(
+    "-pc",
+    "--parameterComponent",
+    type=int,
+    default=-1,
+    help="Plot only a specific component of a vector (default: magnitude=-1)",
+)
+parser.add_argument(
+    "-pr",
+    "--parameterRange",
+    type=float,
+    nargs=2,
+    default=[0, 0],
+    help="Adjustment of the color rane (default: min/max)",
+)
+parser.add_argument(
+    "-pt",
+    "--parameterType",
+    choices=parameterType,
+    default="CELLS",
+    help="The type of the data field (CELLS or POINTS)",
+)
+parser.add_argument(
+    "-lo",
+    "--legendOrientation",
+    choices=legendOrientation,
+    default="Horizontal",
+    help="The name of the parameter to be plotted",
+)
+parser.add_argument(
+    "-lp",
+    "--legendPosition",
+    type=float,
+    nargs=2,
+    default=[0.25, 0.0],
+    help="The position of the legend",
+)
+parser.add_argument(
+    "-lz",
+    "--legendZoom",
+    type=float,
+    nargs=2,
+    default=[0.5, 0.5],
+    help="The zoom of the legend",
+)
+parser.add_argument("-lt", "--legendTitle", default="", help="Title of the legend")
+parser.add_argument(
+    "-lct",
+    "--legendComponentTitle",
+    default="none",
+    help="Title of the legend component",
+)
+parser.add_argument(
+    "--size",
+    type=int,
+    nargs=2,
+    default=[600, 400],
+    help="The pixel size of the png file (default: 600x400)",
+)
+parser.add_argument(
+    "--scale",
+    type=float,
+    nargs=3,
+    default=[1.0, 1.0, 1.0],
+    help="Scaling factors in each direction",
+)
+parser.add_argument(
+    "--whiteBackground",
+    dest="whiteBackground",
+    action="store_true",
+    help="Sets a white background",
+)
+parser.add_argument("-v", "--verbosity", type=int, default=2, help="Verbosity of the output")
 args = vars(parser.parse_args())
 
 try:
@@ -59,34 +165,36 @@ except ImportError:
 
 # import locations
 commonOutDirectory = False
-outDirectory = args['outputDirectory']
-if not outDirectory == '':
-    outDirectory += '/'
+outDirectory = args["outputDirectory"]
+if not outDirectory == "":
+    outDirectory += "/"
     commonOutDirectory = True
     if not os.path.exists(outDirectory):
         os.makedirs(outDirectory)
 
 # loop over all vtu files
 counter = 1
-for curFile in args['files']:
+for curFile in args["files"]:
     # print progress to command line
     fileWithoutPath = os.path.basename(curFile)
     if not commonOutDirectory:
         abspath = os.path.abspath(curFile)
-        outDirectory = os.path.dirname(abspath) + '/'
+        outDirectory = os.path.dirname(abspath) + "/"
     basename = os.path.splitext(fileWithoutPath)[0]
-    if args['verbosity'] == 1:
-        print("Processing file ({}/{}): {}".format(counter, len(args['files']), fileWithoutPath))
+    if args["verbosity"] == 1:
+        print("Processing file ({}/{}): {}".format(counter, len(args["files"]), fileWithoutPath))
     counter += 1
 
     # read vtu file and print available parameters
     vtuFile = XMLUnstructuredGridReader(FileName=curFile)
-    if args['parameter'] == '':
-        print "\nNo parameter was specified, use '-p PARAMETER' to specify it. Available parameters are:"
-        if args['parameterType'] == 'CELLS':
-            print vtuFile.CellArrayStatus
+    if args["parameter"] == "":
+        print(
+            "\nNo parameter was specified, use '-p PARAMETER' to specify it. Available parameters are:"
+        )
+        if args["parameterType"] == "CELLS":
+            print(vtuFile.CellArrayStatus)
         else:
-            print vtuFile.PointArrayStatus
+            print(vtuFile.PointArrayStatus)
         exit(1)
 
     # get active view
@@ -96,73 +204,79 @@ for curFile in args['files']:
         renderView1 = CreateRenderView()
 
     # print additional help message for large picture sizes
-    if (args['size'][0] > 1024 or args['size'][1] > 1024) and args['offscreen'] == False:
-        print "\nIt seems like you want to export a picture greater then your actual screen size. Use:"
-        print "pvbatch --use-offscreen-rendering SCRIPT OPTIONS --offscreen"
+    if (args["size"][0] > 1024 or args["size"][1] > 1024) and args["offscreen"] == False:
+        print(
+            "\nIt seems like you want to export a picture greater then your actual screen size. Use:"
+        )
+        print("pvbatch --use-offscreen-rendering SCRIPT OPTIONS --offscreen")
         exit(2)
-    renderView1.ViewSize = args['size']
+    renderView1.ViewSize = args["size"]
 
-    if args['showOrientationAxes'] == False:
+    if args["showOrientationAxes"] == False:
         renderView1.OrientationAxesVisibility = 0
 
-    if args['showAxesGrid'] == True:
+    if args["showAxesGrid"] == True:
         renderView1.AxesGrid.Visibility = 1
 
     # show data in view
     vtuFileDisplay = Show(vtuFile, renderView1)
-    vtuFileDisplay.Scale = args['scale']
+    vtuFileDisplay.Scale = args["scale"]
 
     # reset view to fit data
     renderView1.ResetCamera()
 
     # set scalar coloring
-    ColorBy(vtuFileDisplay, (args['parameterType'], args['parameter']))
+    ColorBy(vtuFileDisplay, (args["parameterType"], args["parameter"]))
 
     # show color bar/color legend
-    if args['showLegend'] == True:
+    if args["showLegend"] == True:
         vtuFileDisplay.SetScalarBarVisibility(renderView1, True)
 
     # get color transfer function/color map for the parameter
-    parameterLUT = GetColorTransferFunction(args['parameter'])
+    parameterLUT = GetColorTransferFunction(args["parameter"])
 
     # plot only a specific vector component
-    if args['parameterComponent'] != -1:
-        parameterLUT.VectorMode = 'Component'
-        parameterLUT.VectorComponent = args['parameterComponent']
-        #if args['parameterRange'][0] == 0 and args['parameterRange'][1] == 0:
-            #vtuFileDisplay.RescaleTransferFunctionToDataRange(False)
-        if args['showLegend'] == True:
+    if args["parameterComponent"] != -1:
+        parameterLUT.VectorMode = "Component"
+        parameterLUT.VectorComponent = args["parameterComponent"]
+        # if args['parameterRange'][0] == 0 and args['parameterRange'][1] == 0:
+        # vtuFileDisplay.RescaleTransferFunctionToDataRange(False)
+        if args["showLegend"] == True:
             velocityLUTColorBar = GetScalarBar(parameterLUT, renderView1)
-            velocityLUTColorBar.Title = args['parameter']
-            velocityLUTColorBar.ComponentTitle = str(args['parameterComponent'])
+            velocityLUTColorBar.Title = args["parameter"]
+            velocityLUTColorBar.ComponentTitle = str(args["parameterComponent"])
 
     # adjust the range of the legend
-    if args['parameterRange'][0] != 0 or args['parameterRange'][1] != 0:
-        mean = (args['parameterRange'][0] + args['parameterRange'][1]) / 2
-        parameterLUT.RGBPoints = [args['parameterRange'][0], 0.231373, 0.298039, 0.752941,
-                                  mean, 0.865003, 0.865003, 0.865003,
-                                  args['parameterRange'][1], 0.705882, 0.0156863, 0.14902]
-    if args['parameterRange'][0] == 0 and args['parameterRange'][1] == 0:
+    if args["parameterRange"][0] != 0 or args["parameterRange"][1] != 0:
+        mean = (args["parameterRange"][0] + args["parameterRange"][1]) / 2
+        # fmt: off
+        parameterLUT.RGBPoints = [
+            args["parameterRange"][0], 0.231373, 0.298039, 0.752941,
+            mean, 0.865003, 0.865003, 0.865003,
+            args["parameterRange"][1], 0.705882, 0.0156863, 0.14902,
+        ]
+        # fmt: on
+    if args["parameterRange"][0] == 0 and args["parameterRange"][1] == 0:
         vtuFileDisplay.RescaleTransferFunctionToDataRange(False)
 
     # the placement and size of the legend
     legend = GetScalarBar(parameterLUT, renderView1)
-    legend.Position = args['legendPosition']
-    legend.Position2 = args['legendZoom']
-    legend.Orientation = args['legendOrientation']
+    legend.Position = args["legendPosition"]
+    legend.Position2 = args["legendZoom"]
+    legend.Orientation = args["legendOrientation"]
 
     # rename the legend if desired
-    if args['legendTitle'] != '':
-        legend.Title = args['legendTitle']
-    if args['legendComponentTitle'] != 'none':
-        legend.ComponentTitle = args['legendComponentTitle']
+    if args["legendTitle"] != "":
+        legend.Title = args["legendTitle"]
+    if args["legendComponentTitle"] != "none":
+        legend.ComponentTitle = args["legendComponentTitle"]
 
     # set a white background color and black color for fonts and the grid
-    if args['whiteBackground'] == True:
+    if args["whiteBackground"] == True:
         renderView1.Background = [255, 255, 255]
         legend.TitleColor = [0.0, 0.0, 0.0]
         legend.LabelColor = [0.0, 0.0, 0.0]
-        if args['showAxesGrid'] == True:
+        if args["showAxesGrid"] == True:
             renderView1.AxesGrid.GridColor = [0.0, 0.0, 0.0]
             renderView1.AxesGrid.XTitleColor = [0.0, 0.0, 0.0]
             renderView1.AxesGrid.YTitleColor = [0.0, 0.0, 0.0]
@@ -172,15 +286,15 @@ for curFile in args['files']:
             renderView1.AxesGrid.ZLabelColor = [0.0, 0.0, 0.0]
 
     # current camera placement for renderView1
-    renderView1.InteractionMode = '2D'
-    #renderView1.CameraPosition = [5.0, 0.12345, 0.0]
-    #renderView1.CameraFocalPoint = [5.0, 0.12345, 0.0]
+    renderView1.InteractionMode = "2D"
+    # renderView1.CameraPosition = [5.0, 0.12345, 0.0]
+    # renderView1.CameraFocalPoint = [5.0, 0.12345, 0.0]
 
     # uncomment the following to render all views
     RenderAllViews()
     UseOffscreenRenderingForScreenshots = 1
 
     # save screenshot
-    if not args['outFile'] == '':
-        basename = args['outFile']
-    SaveScreenshot(outDirectory + basename + '.png')
+    if not args["outFile"] == "":
+        basename = args["outFile"]
+    SaveScreenshot(outDirectory + basename + ".png")
diff --git a/bin/postprocessing/extractlinedata.py b/bin/postprocessing/extractlinedata.py
index 0347cb736f..7f5a7817d1 100644
--- a/bin/postprocessing/extractlinedata.py
+++ b/bin/postprocessing/extractlinedata.py
@@ -5,16 +5,47 @@ import os
 
 # parse arguments
 parser = argparse.ArgumentParser(
-  prog='\033[1m\033[94m' + 'pvpython' + '\033[0m' + ' ' + sys.argv[0],
-  description='Extract data from the paraview plotOverLine filter.'
+    prog="\033[1m\033[94m" + "pvpython" + "\033[0m" + " " + sys.argv[0],
+    description="Extract data from the paraview plotOverLine filter.",
+)
+parser.add_argument("-f", "--files", nargs="+", required=True, help="vtu files to be processed")
+parser.add_argument(
+    "-o",
+    "--outputDirectory",
+    default="",
+    help="Directory to which the data files are written",
+)
+parser.add_argument("-of", "--outFile", default="", help="Basename of the written csv file")
+parser.add_argument(
+    "-p1",
+    "--point1",
+    type=float,
+    nargs=3,
+    required=True,
+    help="Coordinates of the first point (in 3D)",
+)
+parser.add_argument(
+    "-p2",
+    "--point2",
+    type=float,
+    nargs=3,
+    required=True,
+    help="Coordinates of the second point (in 3D)",
+)
+parser.add_argument(
+    "-r",
+    "--resolution",
+    type=int,
+    default=1000,
+    help="Resolution of the line (number of data points written to data file)",
+)
+parser.add_argument(
+    "-v",
+    "--verbosity",
+    type=int,
+    default=2,
+    help="Verbosity of the output. 1 = print progress. 2 = print data columns",
 )
-parser.add_argument('-f', '--files', nargs='+', required=True, help="vtu files to be processed")
-parser.add_argument('-o', '--outputDirectory', default='', help="Directory to which the data files are written")
-parser.add_argument('-of', '--outFile', default='', help="Basename of the written csv file")
-parser.add_argument('-p1', '--point1', type=float, nargs=3, required=True, help='Coordinates of the first point (in 3D)')
-parser.add_argument('-p2', '--point2', type=float, nargs=3, required=True, help='Coordinates of the second point (in 3D)')
-parser.add_argument('-r', '--resolution', type=int, default=1000, help='Resolution of the line (number of data points written to data file)')
-parser.add_argument('-v', '--verbosity', type=int, default=2, help='Verbosity of the output. 1 = print progress. 2 = print data columns')
 args = vars(parser.parse_args())
 
 try:
@@ -23,31 +54,37 @@ except:
     raise ImportError("`paraview.simple` not found. Make sure using pvpython instead of python.")
 
 # import locations
-outDirectory = args['outputDirectory']
+outDirectory = args["outputDirectory"]
 if outDirectory.strip():
     os.makedirs(outDirectory, exist_ok=True)
 
 # loop over all vtk files
 counter = 0
-for curFile in args['files']:
+for curFile in args["files"]:
 
     # if no output directory was specified, use the directory of the given file
     curOutDirectory = outDirectory
-    if curOutDirectory == '':
-        curOutDirectory = os.path.dirname( os.path.abspath(curFile) )
+    if curOutDirectory == "":
+        curOutDirectory = os.path.dirname(os.path.abspath(curFile))
 
     # if no basename was specified, reuse the file name for the .csv file
-    if args['outFile'] == '':
-        csvFileName = os.path.join(curOutDirectory, os.path.splitext(os.path.basename(curFile))[0] + ".csv")
-    elif len(args['files']) > 1:
-        csvFileName = os.path.join(curOutDirectory, args['outFile'] + "_" + str(counter) + ".csv")
+    if args["outFile"] == "":
+        csvFileName = os.path.join(
+            curOutDirectory, os.path.splitext(os.path.basename(curFile))[0] + ".csv"
+        )
+    elif len(args["files"]) > 1:
+        csvFileName = os.path.join(curOutDirectory, args["outFile"] + "_" + str(counter) + ".csv")
     else:
-        csvFileName = os.path.join(curOutDirectory, args['outFile'] + ".csv")
+        csvFileName = os.path.join(curOutDirectory, args["outFile"] + ".csv")
     counter += 1
 
     # print progress to command line
-    if args['verbosity'] == 1:
-        print("Processing file ({}/{}): {}".format(counter, len(args['files']), os.path.basename(curFile)))
+    if args["verbosity"] == 1:
+        print(
+            "Processing file ({}/{}): {}".format(
+                counter, len(args["files"]), os.path.basename(curFile)
+            )
+        )
 
     # load vtk file
     if os.path.splitext(curFile)[1] == ".vtp":
@@ -58,18 +95,18 @@ for curFile in args['files']:
 
     # apply and configure PlotOverLine filter
     plotOverLine = PlotOverLine(Source="High Resolution Line Source")
-    plotOverLine.Source.Resolution = args['resolution']
-    plotOverLine.Source.Point1 = args['point1']
-    plotOverLine.Source.Point2 = args['point2']
+    plotOverLine.Source.Resolution = args["resolution"]
+    plotOverLine.Source.Point1 = args["point1"]
+    plotOverLine.Source.Point2 = args["point2"]
 
     # write output to csv writer
     writer = CreateWriter(csvFileName, plotOverLine)
     writer.UpdatePipeline()
 
     # print the parameters and the column numbers
-    if args['verbosity'] == 2:
+    if args["verbosity"] == 2:
         with open(csvFileName) as csvFile:
             print(csvFileName)
             paramList = list(csv.reader(csvFile))[0]
             for i, param in enumerate(paramList):
-                print("{:>5}  {}".format(i+1, param))
+                print("{:>5}  {}".format(i + 1, param))
diff --git a/bin/postprocessing/extractpointdataovertime.py b/bin/postprocessing/extractpointdataovertime.py
index 2fcf9394dd..6c4132dad6 100644
--- a/bin/postprocessing/extractpointdataovertime.py
+++ b/bin/postprocessing/extractpointdataovertime.py
@@ -1,19 +1,36 @@
 import argparse
 import csv
-import fileinput
 import os
 import sys
 
 # parse arguments
 parser = argparse.ArgumentParser(
-  prog='\033[1m\033[94m' + 'pvpython' + '\033[0m' + ' ' + sys.argv[0],
-  description='Extract data from the paraview probeLocation and plotOverTime filters.'
+    prog="\033[1m\033[94m" + "pvpython" + "\033[0m" + " " + sys.argv[0],
+    description="Extract data from the paraview probeLocation and plotOverTime filters.",
+)
+parser.add_argument("-f", "--files", nargs="+", required=True, help="pvd files to be processed")
+parser.add_argument(
+    "-o",
+    "--outputDirectory",
+    default="",
+    help="Directory to which the .csv files are written",
+)
+parser.add_argument("-of", "--outFile", default="", help="Basename of the written csv file")
+parser.add_argument(
+    "-p",
+    "--point",
+    type=float,
+    nargs=3,
+    required=True,
+    help="Coordinates of the probed point (in 3D)",
+)
+parser.add_argument(
+    "-v",
+    "--verbosity",
+    type=int,
+    default=2,
+    help="Verbosity of the output. 1 = print progress. 2 = print data columns",
 )
-parser.add_argument('-f', '--files', nargs='+', required=True, help="pvd files to be processed")
-parser.add_argument('-o', '--outputDirectory', default='', help="Directory to which the .csv files are written")
-parser.add_argument('-of', '--outFile', default='', help="Basename of the written csv file")
-parser.add_argument('-p', '--point', type=float, nargs=3, required=True, help='Coordinates of the probed point (in 3D)')
-parser.add_argument('-v', '--verbosity', type=int, default=2, help='Verbosity of the output. 1 = print progress. 2 = print data columns')
 args = vars(parser.parse_args())
 
 try:
@@ -23,31 +40,33 @@ except ImportError:
 
 # import locations
 commonOutDirectory = False
-outDirectory = args['outputDirectory']
-if not outDirectory == '':
-    outDirectory += '/'
+outDirectory = args["outputDirectory"]
+if not outDirectory == "":
+    outDirectory += "/"
     commonOutDirectory = True
     if not os.path.exists(outDirectory):
         os.makedirs(outDirectory)
 
 # loop over all pvd files
 counter = 1
-for curFile in args['files']:
+for curFile in args["files"]:
     # print progress to command line
     fileWithoutPath = os.path.basename(curFile)
     if not commonOutDirectory:
         abspath = os.path.abspath(curFile)
-        outDirectory = os.path.dirname(abspath) + '/'
+        outDirectory = os.path.dirname(abspath) + "/"
     basename = os.path.splitext(fileWithoutPath)[0]
-    if args['verbosity'] == 1:
-        print("Processing file ({}/{}): {}".format(counter, len(args['files']), curFile))
+    if args["verbosity"] == 1:
+        print("Processing file ({}/{}): {}".format(counter, len(args["files"]), curFile))
     counter += 1
 
     # load pvd file
     pvdFile = PVDReader(FileName=curFile)
 
     # Extract Point and Probe at a location
-    selectionSource = IDSelectionSource( ContainingCells=0, InsideOut=False, FieldType='POINT', IDs=0 )
+    selectionSource = IDSelectionSource(
+        ContainingCells=0, InsideOut=False, FieldType="POINT", IDs=0
+    )
     ExtractSelection = ExtractSelection(Selection=selectionSource, Input=pvdFile)
     ExtractSelection.UpdatePipeline()
     selectionData = servermanager.Fetch(ExtractSelection)
@@ -55,39 +74,39 @@ for curFile in args['files']:
     probeLocation = ProbeLocation()
     probeLocation.Input = pvdFile
     pointSource = probeLocation.ProbeType
-    pointSource.Center.SetData(args['point'])
+    pointSource.Center.SetData(args["point"])
     probeLocation.UpdatePipeline()
 
     # Parse the extracted source and plot over time
-    selectionSourceprobeLocationation = IDSelectionSource( ContainingCells=0, InsideOut=False, FieldType='POINT', IDs=[0, 0] )
+    selectionSourceprobeLocationation = IDSelectionSource(
+        ContainingCells=0, InsideOut=False, FieldType="POINT", IDs=[0, 0]
+    )
     plotSelectionOverTime = PlotSelectionOverTime(OnlyReportSelectionStatistics=False)
     plotSelectionOverTime.Selection = selectionSourceprobeLocationation
     plotSelectionOverTime.Input = probeLocation
 
     # write output to csv writer
-    if not args['outFile'] == '':
-        basename = args['outFile']
-    csvFile = outDirectory + basename + '.csv'
+    if not args["outFile"] == "":
+        basename = args["outFile"]
+    csvFile = outDirectory + basename + ".csv"
     writer = CreateWriter(csvFile, plotSelectionOverTime)
     writer.UpdatePipeline()
 
     # print the parameters and the column numbers
-    if args['verbosity'] == 2:
-        with open(outDirectory + basename + '0.csv') as file:
-            print outDirectory + basename + '.csv'
+    if args["verbosity"] == 2:
+        with open(outDirectory + basename + "0.csv") as file:
+            print(outDirectory + basename + ".csv")
             reader = csv.reader(file)
             paramList = list(reader)
-            paramCounter=1
+            paramCounter = 1
             for param in paramList[0]:
-                print "%-2i   %s" % (paramCounter, param)
+                print("%-2i   %s" % (paramCounter, param))
                 paramCounter += 1
 
     # create a proper csv file with semicolons as separators
-    f = open(outDirectory + basename + '0.csv', 'r')
-    filedata = f.read()
-    f.close()
-    os.remove(outDirectory + basename + '0.csv')
-    newdata = filedata.replace(',', ';')
-    f = open(csvFile,'w')
-    f.write(newdata)
-    f.close()
+    with open(outDirectory + basename + "0.csv", "r") as file:
+        filedata = file.read()
+    os.remove(outDirectory + basename + "0.csv")
+    newdata = filedata.replace(",", ";")
+    with open(csvFile, "w") as file:
+        file.write(newdata)
diff --git a/bin/postprocessing/l2error.py b/bin/postprocessing/l2error.py
index 5f27863785..cc75447368 100644
--- a/bin/postprocessing/l2error.py
+++ b/bin/postprocessing/l2error.py
@@ -2,64 +2,99 @@ import argparse
 import csv
 import sys
 
-#Auxiliary function that provides a handy parser
-parser = argparse.ArgumentParser(prog='python ' + sys.argv[0], description='Calculate the l2 error of csv data files.')
-parser.add_argument('-f1', '--reference', type=str, required=True, help='Reference csv-file')
-parser.add_argument('-f2', '--newSolution', type=str, required=True, help='NewSolution csv-file')
-parser.add_argument('-xMin', '--xMin', type=float, required=False, default=-1e99, help='Restrict data to x>xMin')
-parser.add_argument('-xMax', '--xMax', type=float, required=False, default=1e99, help='Restrict data to x>xMax')
+# Auxiliary function that provides a handy parser
+parser = argparse.ArgumentParser(
+    prog="python " + sys.argv[0],
+    description="Calculate the l2 error of csv data files.",
+)
+parser.add_argument("-f1", "--reference", type=str, required=True, help="Reference csv-file")
+parser.add_argument("-f2", "--newSolution", type=str, required=True, help="NewSolution csv-file")
+parser.add_argument(
+    "-xMin",
+    "--xMin",
+    type=float,
+    required=False,
+    default=-1e99,
+    help="Restrict data to x>xMin",
+)
+parser.add_argument(
+    "-xMax",
+    "--xMax",
+    type=float,
+    required=False,
+    default=1e99,
+    help="Restrict data to x>xMax",
+)
 group1 = parser.add_mutually_exclusive_group(required=True)
-group1.add_argument('-x1', '--xData1', type=int, help='Column index of x data in reference')
-group1.add_argument('-x1Name', '--xDataName1', type=str, help='Name x data in reference')
+group1.add_argument("-x1", "--xData1", type=int, help="Column index of x data in reference")
+group1.add_argument("-x1Name", "--xDataName1", type=str, help="Name x data in reference")
 group2 = parser.add_mutually_exclusive_group(required=True)
-group2.add_argument('-x2', '--xData2', type=int, help='Column index of x data in newSolution')
-group2.add_argument('-x2Name', '--xDataName2', type=str, help='Name x data in newSolution')
+group2.add_argument("-x2", "--xData2", type=int, help="Column index of x data in newSolution")
+group2.add_argument("-x2Name", "--xDataName2", type=str, help="Name x data in newSolution")
 group3 = parser.add_mutually_exclusive_group(required=True)
-group3.add_argument('-y1', '--yData1', type=int, help='Column index of y data in reference')
-group3.add_argument('-y1Name', '--yDataName1', type=str, help='Name y data in reference')
+group3.add_argument("-y1", "--yData1", type=int, help="Column index of y data in reference")
+group3.add_argument("-y1Name", "--yDataName1", type=str, help="Name y data in reference")
 group4 = parser.add_mutually_exclusive_group(required=True)
-group4.add_argument('-y2', '--yData2', type=int, help='Column index of y data in newSolution')
-group4.add_argument('-y2Name', '--yDataName2', type=str, help='Name y data in newSolution')
-parser.add_argument('-p', '--percent', action='store_true', help='Print errors in percent')
-parser.add_argument('-f', '--force', action='store_true', help='Ignore \'not-matching\' errors')
-parser.add_argument('-v', '--verbose', action='store_true', help='Verbosity of the script')
+group4.add_argument("-y2", "--yData2", type=int, help="Column index of y data in newSolution")
+group4.add_argument("-y2Name", "--yDataName2", type=str, help="Name y data in newSolution")
+parser.add_argument("-p", "--percent", action="store_true", help="Print errors in percent")
+parser.add_argument("-f", "--force", action="store_true", help="Ignore 'not-matching' errors")
+parser.add_argument("-v", "--verbose", action="store_true", help="Verbosity of the script")
 args = vars(parser.parse_args())
 
-with open(args['reference'], 'rb') as referenceFile:
-  reader = csv.reader(referenceFile)
-  reference = list(reader)
-  if(args['xDataName1'] is not None):
-    indexReferenceX = reference[0].index(args['xDataName1'])
-  else:
-    indexReferenceX = args['xData1']
-  if(args['yDataName1'] is not None):
-    indexReferenceY = reference[0].index(args['yDataName1'])
-  else:
-    indexReferenceY = args['yData1']
+with open(args["reference"], "rb") as referenceFile:
+    reader = csv.reader(referenceFile)
+    reference = list(reader)
+    if args["xDataName1"] is not None:
+        indexReferenceX = reference[0].index(args["xDataName1"])
+    else:
+        indexReferenceX = args["xData1"]
+    if args["yDataName1"] is not None:
+        indexReferenceY = reference[0].index(args["yDataName1"])
+    else:
+        indexReferenceY = args["yData1"]
 
-with open(args['newSolution'], 'rb') as newSolutionFile:
-  reader = csv.reader(newSolutionFile)
-  newSolution = list(reader)
-  if(args['xDataName2'] is not None):
-    indexNewSolutionX = reference[0].index(args['xDataName2'])
-  else:
-    indexNewSolutionX = args['xData2']
-  if(args['yDataName2'] is not None):
-    indexNewSolutionY = reference[0].index(args['yDataName2'])
-  else:
-    indexNewSolutionY = args['yData2']
+with open(args["newSolution"], "rb") as newSolutionFile:
+    reader = csv.reader(newSolutionFile)
+    newSolution = list(reader)
+    if args["xDataName2"] is not None:
+        indexNewSolutionX = reference[0].index(args["xDataName2"])
+    else:
+        indexNewSolutionX = args["xData2"]
+    if args["yDataName2"] is not None:
+        indexNewSolutionY = reference[0].index(args["yDataName2"])
+    else:
+        indexNewSolutionY = args["yData2"]
 
-if (reference[0][indexReferenceX] != reference[0][indexNewSolutionX] and not args['force']):
-    print "X-Identifier not equal: ref=", reference[0][indexReferenceX], ",new=", reference[0][indexNewSolutionX], ". Aborting! (Use -f to continue anyway)"
-    exit (1)
+if reference[0][indexReferenceX] != reference[0][indexNewSolutionX] and not args["force"]:
+    print(
+        "X-Identifier not equal: ref=",
+        reference[0][indexReferenceX],
+        ",new=",
+        reference[0][indexNewSolutionX],
+        ". Aborting! (Use -f to continue anyway)",
+    )
+    exit(1)
 
-if (reference[0][indexReferenceY] != newSolution[0][indexNewSolutionY] and not args['force']):
-    print "Y-Identifier not equal. ref=", reference[0][indexReferenceY], ",new=", newSolution[0][indexNewSolutionY], ". Aborting! (Use -f to continue anyway)"
-    exit (2)
+if reference[0][indexReferenceY] != newSolution[0][indexNewSolutionY] and not args["force"]:
+    print(
+        "Y-Identifier not equal. ref=",
+        reference[0][indexReferenceY],
+        ",new=",
+        newSolution[0][indexNewSolutionY],
+        ". Aborting! (Use -f to continue anyway)",
+    )
+    exit(2)
 
-if (len(reference) != len(newSolution)):
-    print "Length of reference and newSolution not equal: ref=", len(reference), ",new=", len(newSolution), ". Aborting!"
-    exit (3)
+if len(reference) != len(newSolution):
+    print(
+        "Length of reference and newSolution not equal: ref=",
+        len(reference),
+        ",new=",
+        len(newSolution),
+        ". Aborting!",
+    )
+    exit(3)
 
 distanceOld = 0.0
 sumError = 0.0
@@ -67,33 +102,64 @@ sumReference = 0.0
 sumDistance = 0.0
 numPoints = 0
 
-for i in range(1,len(reference)):
+for i in range(1, len(reference)):
     coord_ref = float(reference[i][indexReferenceX])
     coord_newSolution = float(newSolution[i][indexNewSolutionX])
-    if (coord_ref != coord_newSolution):
-        print "Coordinates not equal: ref=", coord_ref, ",new=", coord_newSolution, ". Aborting!"
-        exit (4)
-    if (coord_ref < float(args['xMin']) or coord_ref > float(args['xMax'])):
+    if coord_ref != coord_newSolution:
+        print(
+            "Coordinates not equal: ref=",
+            coord_ref,
+            ",new=",
+            coord_newSolution,
+            ". Aborting!",
+        )
+        exit(4)
+    if coord_ref < float(args["xMin"]) or coord_ref > float(args["xMax"]):
         continue
 
-    if (i == 1):
-        distance = 0.5*(float(reference[2][indexReferenceX]) - float(reference[1][indexReferenceX]))
-    elif (i == len(reference)-1):
-        distance = 0.5*(float(reference[len(reference)-1][indexReferenceX]) - float(reference[len(reference)-2][indexReferenceX]))
+    if i == 1:
+        distance = 0.5 * (
+            float(reference[2][indexReferenceX]) - float(reference[1][indexReferenceX])
+        )
+    elif i == len(reference) - 1:
+        distance = 0.5 * (
+            float(reference[len(reference) - 1][indexReferenceX])
+            - float(reference[len(reference) - 2][indexReferenceX])
+        )
     else:
-        distance = 0.5*(float(reference[i+1][indexReferenceX]) - float(reference[i-1][indexReferenceX]))
-    sumError += ((float(reference[i][indexReferenceY])-float(newSolution[i][indexNewSolutionY]))**2)*distance
-    sumReference += ((float(reference[i][indexReferenceY]))**2)*distance
+        distance = 0.5 * (
+            float(reference[i + 1][indexReferenceX]) - float(reference[i - 1][indexReferenceX])
+        )
+    sumError += (
+        (float(reference[i][indexReferenceY]) - float(newSolution[i][indexNewSolutionY])) ** 2
+    ) * distance
+    sumReference += ((float(reference[i][indexReferenceY])) ** 2) * distance
     sumDistance += distance
     numPoints += 1
 
-if (numPoints < 999 and not args['force']):
-    print "Warning: numPoints=", numPoints, " is low, could result in bad the error approximation. (Use -f to suppress this warning)"
+if numPoints < 999 and not args["force"]:
+    print(
+        "Warning: numPoints=",
+        numPoints,
+        " is low, could result in bad the error approximation. (Use -f to suppress this warning)",
+    )
 
-l2normAbs = (sumError/sumDistance)**0.5 # numPoints is needed, resulting from the equidistant integration
-l2normRel = (sumError/sumReference)**0.5 # numPoints cancels out for equidistant integration
+l2normAbs = (
+    sumError / sumDistance
+) ** 0.5  # numPoints is needed, resulting from the equidistant integration
+l2normRel = (sumError / sumReference) ** 0.5  # numPoints cancels out for equidistant integration
 
-if (args['percent']):
-    print "L2_Error_in_%: ", "{0:.5f}%".format(l2normAbs*100), "Rel_L2_Error_in_%: ", "{0:.5f}%".format(l2normRel*100)
+if args["percent"]:
+    print(
+        "L2_Error_in_%: ",
+        "{0:.5f}%".format(l2normAbs * 100),
+        "Rel_L2_Error_in_%: ",
+        "{0:.5f}%".format(l2normRel * 100),
+    )
 else:
-    print "L2_Error: ", "{0:.5e}".format(l2normAbs), " Rel_L2_Error: ", "{0:.5e}".format(l2normRel)
+    print(
+        "L2_Error: ",
+        "{0:.5e}".format(l2normAbs),
+        " Rel_L2_Error: ",
+        "{0:.5e}".format(l2normRel),
+    )
diff --git a/bin/remove_clutter_after_last_endif.py b/bin/remove_clutter_after_last_endif.py
index afeeed8fec..84b8e6900f 100644
--- a/bin/remove_clutter_after_last_endif.py
+++ b/bin/remove_clutter_after_last_endif.py
@@ -3,12 +3,13 @@ import os
 
 # replace everything after last #endif with new line
 def clearAfterLastEndIf(filename):
-    with open(filename, 'r') as header:
+    with open(filename, "r") as header:
         split = header.read().split("#endif")
-        split[-1] = '\n'
-    with open(filename, 'w') as header:
+        split[-1] = "\n"
+    with open(filename, "w") as header:
         header.write("#endif".join(split))
 
+
 for root, _, files in os.walk(os.getcwd()):
     for file in files:
         if file.endswith(".hh"):
diff --git a/bin/testing/findtests.py b/bin/testing/findtests.py
index 651198c762..b01946db7d 100755
--- a/bin/testing/findtests.py
+++ b/bin/testing/findtests.py
@@ -25,14 +25,14 @@ def hasCommonMember(myset, mylist):
 
 
 # make dry run and return the compilation command
-def getCompileCommand(testConfig, buildTreeRoot='.'):
-    target = testConfig['target']
-    lines = subprocess.check_output(["make", "-B", "--dry-run", target],
-                                    encoding='ascii',
-                                    cwd=buildTreeRoot).splitlines()
+def getCompileCommand(testConfig, buildTreeRoot="."):
+    target = testConfig["target"]
+    lines = subprocess.check_output(
+        ["make", "-B", "--dry-run", target], encoding="ascii", cwd=buildTreeRoot
+    ).splitlines()
 
     def hasCppCommand(line):
-        return any(cpp in line for cpp in ['g++', 'clang++'])
+        return any(cpp in line for cpp in ["g++", "clang++"])
 
     # there may be library build commands first, last one is the actual target
     commands = list(filter(lambda line: hasCppCommand(line), lines))
@@ -40,7 +40,7 @@ def getCompileCommand(testConfig, buildTreeRoot='.'):
 
 
 # get the command and folder to compile the given test
-def buildCommandAndDir(testConfig, buildTreeRoot='.'):
+def buildCommandAndDir(testConfig, buildTreeRoot="."):
     compCommand = getCompileCommand(testConfig, buildTreeRoot)
     if compCommand is None:
         raise Exception("Could not determine compile command for {}".format(testConfig))
@@ -50,7 +50,7 @@ def buildCommandAndDir(testConfig, buildTreeRoot='.'):
 
 
 # check if a test is affected by changes in the given files
-def isAffectedTest(testConfigFile, changedFiles, buildTreeRoot='.'):
+def isAffectedTest(testConfigFile, changedFiles, buildTreeRoot="."):
     with open(testConfigFile) as configFile:
         testConfig = json.load(configFile)
 
@@ -61,10 +61,10 @@ def isAffectedTest(testConfigFile, changedFiles, buildTreeRoot='.'):
     # -MM skips headers from system directories
     # -H  prints the name(+path) of each used header
     # for some reason g++ writes to stderr
-    headers = subprocess.run(command + ["-MM", "-H"],
-                             stderr=PIPE, stdout=PIPE, cwd=dir,
-                             encoding='ascii').stderr.splitlines()
-    headers = [h.lstrip('. ') for h in headers]
+    headers = subprocess.run(
+        command + ["-MM", "-H"], stderr=PIPE, stdout=PIPE, cwd=dir, encoding="ascii"
+    ).stderr.splitlines()
+    headers = [h.lstrip(". ") for h in headers]
     headers.append(mainFile)
 
     if hasCommonMember(changedFiles, headers):
@@ -73,27 +73,41 @@ def isAffectedTest(testConfigFile, changedFiles, buildTreeRoot='.'):
     return False, testConfig["name"], testConfig["target"]
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
 
     # parse input arguments
-    parser = ArgumentParser(description='Find tests affected by changes')
-    parser.add_argument('-l', '--file-list', required=True,
-                        help='A file containing a list of files that changed')
-    parser.add_argument('-np', '--num-processes',
-                        required=False, type=int, default=8,
-                        help='Number of processes (default: 8)')
-    parser.add_argument('-o', '--outfile',
-                        required=False, default='affectedtests.json',
-                        help='The file in which to write the affected tests')
-    parser.add_argument('-b', '--build-dir',
-                        required=False, default='.',
-                        help='The path to the top-level build directory of the project to be checked')
+    parser = ArgumentParser(description="Find tests affected by changes")
+    parser.add_argument(
+        "-l", "--file-list", required=True, help="A file containing a list of files that changed"
+    )
+    parser.add_argument(
+        "-np",
+        "--num-processes",
+        required=False,
+        type=int,
+        default=8,
+        help="Number of processes (default: 8)",
+    )
+    parser.add_argument(
+        "-o",
+        "--outfile",
+        required=False,
+        default="affectedtests.json",
+        help="The file in which to write the affected tests",
+    )
+    parser.add_argument(
+        "-b",
+        "--build-dir",
+        required=False,
+        default=".",
+        help="The path to the top-level build directory of the project to be checked",
+    )
     args = vars(parser.parse_args())
 
-    buildDir = os.path.abspath(args['build_dir'])
-    targetFile = os.path.abspath(args['outfile'])
-    with open(args['file_list']) as files:
-        changedFiles = set([line.strip('\n') for line in files.readlines()])
+    buildDir = os.path.abspath(args["build_dir"])
+    targetFile = os.path.abspath(args["outfile"])
+    with open(args["file_list"]) as files:
+        changedFiles = set([line.strip("\n") for line in files.readlines()])
 
     # clean build directory
     subprocess.run(["make", "clean"], cwd=buildDir)
@@ -104,17 +118,15 @@ if __name__ == '__main__':
     affectedTests = {}
     tests = glob(os.path.join(buildDir, "TestMetaData") + "/*json")
 
-    numProcesses = max(1, args['num_processes'])
-    findAffectedTest = partial(isAffectedTest,
-                               changedFiles=changedFiles,
-                               buildTreeRoot=buildDir)
+    numProcesses = max(1, args["num_processes"])
+    findAffectedTest = partial(isAffectedTest, changedFiles=changedFiles, buildTreeRoot=buildDir)
     with Pool(processes=numProcesses) as p:
         for affected, name, target in p.imap_unordered(findAffectedTest, tests, chunksize=4):
             if affected:
-                affectedTests[name] = {'target': target}
-                print('\t- {} (target: {})'.format(name, target))
+                affectedTests[name] = {"target": target}
+                print("\t- {} (target: {})".format(name, target))
 
     print("Detected {} affected tests".format(len(affectedTests)))
 
-    with open(targetFile, 'w') as jsonFile:
+    with open(targetFile, "w") as jsonFile:
         json.dump(affectedTests, jsonFile)
diff --git a/bin/testing/fuzzycomparedata.py b/bin/testing/fuzzycomparedata.py
index 4fb758c387..a8625668d8 100644
--- a/bin/testing/fuzzycomparedata.py
+++ b/bin/testing/fuzzycomparedata.py
@@ -11,8 +11,17 @@ import json
 import sys
 from fuzzycomparevtu import is_fuzzy_equal_text
 
-def compare_data(dataFile1, dataFile2, delimiter, absolute=1.5e-7, relative=1e-2, zeroValueThreshold={}, verbose=True):
-    """ take two data files and compare them. Returns an exit key as returnvalue.
+
+def compare_data(
+    dataFile1,
+    dataFile2,
+    delimiter,
+    absolute=1.5e-7,
+    relative=1e-2,
+    zeroValueThreshold={},
+    verbose=True,
+):
+    """take two data files and compare them. Returns an exit key as returnvalue.
 
     Arguments:
     ----------
@@ -38,25 +47,37 @@ def compare_data(dataFile1, dataFile2, delimiter, absolute=1.5e-7, relative=1e-2
 
     if verbose:
         print("Comparing {} and {}".format(dataFile1, dataFile2))
-        print("... with a maximum relative error of {} and a maximum absolute error of {}*max_abs_parameter_value.".format(relative, absolute))
+        print(
+            "... with a maximum relative error of {} and a maximum absolute error of {}*max_abs_parameter_value.".format(
+                relative, absolute
+            )
+        )
 
     # construct element tree from data files
-    data1 = list(csv.reader(open(dataFile1, 'r'), delimiter=delimiter))
-    data2 = list(csv.reader(open(dataFile2, 'r'), delimiter=delimiter))
+    data1 = list(csv.reader(open(dataFile1, "r"), delimiter=delimiter))
+    data2 = list(csv.reader(open(dataFile2, "r"), delimiter=delimiter))
 
-    if (len(data1) != len(data2)):
-        print("Length of data1 and data2 not equal: ref=", len(data1), ",new=", len(data2), ". Aborting!")
-        exit (3)
+    if len(data1) != len(data2):
+        print(
+            "Length of data1 and data2 not equal: ref=",
+            len(data1),
+            ",new=",
+            len(data2),
+            ". Aborting!",
+        )
+        exit(3)
 
     is_equal = True
-    for i in range(0,len(data1[0])):
+    for i in range(0, len(data1[0])):
         a = data1[0][i]
         b = data2[0][i]
-        for j in range(1,len(data1)):
+        for j in range(1, len(data1)):
             a += " {0}".format(data1[j][i])
             b += " {0}".format(data2[j][i])
 
-        if not is_fuzzy_equal_text(a, b, "row {0}".format(i), len(data1), absolute, relative, zeroValueThreshold, verbose):
+        if not is_fuzzy_equal_text(
+            a, b, "row {0}".format(i), len(data1), absolute, relative, zeroValueThreshold, verbose
+        ):
             if verbose:
                 is_equal = False
             else:
@@ -71,16 +92,42 @@ def compare_data(dataFile1, dataFile2, delimiter, absolute=1.5e-7, relative=1e-2
 # main program if called as script return appropriate error codes
 if __name__ == "__main__":
     # handle arguments and print help message
-    parser = argparse.ArgumentParser(description='Fuzzy compare of two data files (e.g csv). \
+    parser = argparse.ArgumentParser(
+        description="Fuzzy compare of two data files (e.g csv). \
         The files are accepted if for every value the difference is below the absolute error \
-        or below the relative error or below both.')
-    parser.add_argument('data_file_1', type=str, help='first file to compare')
-    parser.add_argument('data_file_2', type=str, help='second file to compare')
-    parser.add_argument('delimiter', type=str, help='second file to compare')
-    parser.add_argument('-r', '--relative', type=float, default=1e-2, help='maximum relative error (default=1e-2)')
-    parser.add_argument('-a', '--absolute', type=float, default=1.5e-7, help='maximum absolute error (default=1.5e-7)')
-    parser.add_argument('-v', '--verbose', type=bool, default=True, help='verbosity of the script')
-    parser.add_argument('-z', '--zeroThreshold', type=json.loads, default='{}', help='Thresholds for treating numbers as zero for a parameter as a python dict e.g. {"vel":1e-7,"delP":1.0}')
+        or below the relative error or below both."
+    )
+    parser.add_argument("data_file_1", type=str, help="first file to compare")
+    parser.add_argument("data_file_2", type=str, help="second file to compare")
+    parser.add_argument("delimiter", type=str, help="second file to compare")
+    parser.add_argument(
+        "-r", "--relative", type=float, default=1e-2, help="maximum relative error (default=1e-2)"
+    )
+    parser.add_argument(
+        "-a",
+        "--absolute",
+        type=float,
+        default=1.5e-7,
+        help="maximum absolute error (default=1.5e-7)",
+    )
+    parser.add_argument("-v", "--verbose", type=bool, default=True, help="verbosity of the script")
+    parser.add_argument(
+        "-z",
+        "--zeroThreshold",
+        type=json.loads,
+        default="{}",
+        help='Thresholds for treating numbers as zero for a parameter as a python dict e.g. {"vel":1e-7,"delP":1.0}',
+    )
     args = vars(parser.parse_args())
 
-    sys.exit(compare_data(args["data_file_1"], args["data_file_2"], args["delimiter"], args["absolute"], args["relative"], args["zeroThreshold"], args["verbose"]))
+    sys.exit(
+        compare_data(
+            args["data_file_1"],
+            args["data_file_2"],
+            args["delimiter"],
+            args["absolute"],
+            args["relative"],
+            args["zeroThreshold"],
+            args["verbose"],
+        )
+    )
diff --git a/bin/testing/fuzzycomparevtu.py b/bin/testing/fuzzycomparevtu.py
index 7241b08c27..c42923a97a 100644
--- a/bin/testing/fuzzycomparevtu.py
+++ b/bin/testing/fuzzycomparevtu.py
@@ -18,7 +18,7 @@ import functools
 
 # fuzzy compare VTK tree from VTK strings
 def compare_vtk(vtk1, vtk2, absolute=1.5e-7, relative=1e-2, zeroValueThreshold={}, verbose=True):
-    """ take two vtk files and compare them. Returns an exit key as returnvalue.
+    """take two vtk files and compare them. Returns an exit key as returnvalue.
 
     Arguments:
     ----------
@@ -50,10 +50,10 @@ def compare_vtk(vtk1, vtk2, absolute=1.5e-7, relative=1e-2, zeroValueThreshold={
 
     # convert parallel vtu to sequential vtu if necessary
     convertedFromParallelVtu = False
-    if vtk1.endswith('.pvtu'):
+    if vtk1.endswith(".pvtu"):
         root1 = convert_pvtu_to_vtu(root1, vtk1)
         convertedFromParallelVtu = True
-    if vtk2.endswith('.pvtu'):
+    if vtk2.endswith(".pvtu"):
         root2 = convert_pvtu_to_vtu(root2, vtk2)
         convertedFromParallelVtu = True
 
@@ -64,20 +64,35 @@ def compare_vtk(vtk1, vtk2, absolute=1.5e-7, relative=1e-2, zeroValueThreshold={
 
     if verbose:
         print("Comparing {} and {}".format(vtk1, vtk2))
-        print("... with a maximum relative error of {} and a maximum absolute error of {}*max_abs_parameter_value.".format(relative, absolute))
+        print(
+            "... with a maximum relative error of {} and a maximum absolute error of {}*max_abs_parameter_value.".format(
+                relative, absolute
+            )
+        )
 
     # sort the vtk file so that the comparison is independent of the
     # index numbering (coming e.g. from different grid managers)
-    sortedroot1, sortedroot2 = sort_vtk_by_coordinates(sortedroot1, sortedroot2, verbose, convertedFromParallelVtu)
+    sortedroot1, sortedroot2 = sort_vtk_by_coordinates(
+        sortedroot1, sortedroot2, verbose, convertedFromParallelVtu
+    )
 
     # do the fuzzy compare
-    if is_fuzzy_equal_node(sortedroot1, sortedroot2, absolute, relative, zeroValueThreshold, verbose, convertedFromParallelVtu):
+    if is_fuzzy_equal_node(
+        sortedroot1,
+        sortedroot2,
+        absolute,
+        relative,
+        zeroValueThreshold,
+        verbose,
+        convertedFromParallelVtu,
+    ):
         print("Fuzzy comparison done (equal)")
         return 0
     else:
         print("Fuzzy comparison done (not equal)")
         return 1
 
+
 # convert a parallel vtu file into sequential one by glueing the pieces together
 def convert_pvtu_to_vtu(pvturoot, filename):
 
@@ -128,7 +143,7 @@ def convert_pvtu_to_vtu(pvturoot, filename):
         # compute offset for the offsets vector (it's the last entry of the current root piece)
         for dataArray in root.findall(".//Cells/DataArray"):
             if dataArray.attrib["Name"] == "offsets":
-                offsets_offset = int(dataArray.text.strip().rsplit(' ', 1)[1])
+                offsets_offset = int(dataArray.text.strip().rsplit(" ", 1)[1])
 
         # add the offsets to the root piece
         for value in offsets.text.strip().split():
@@ -168,28 +183,33 @@ def convert_pvtu_to_vtu(pvturoot, filename):
 
     return root
 
+
 # fuzzy compare of VTK nodes
-def is_fuzzy_equal_node(node1, node2, absolute, relative, zeroValueThreshold, verbose, convertedFromParallelVtu=False):
+def is_fuzzy_equal_node(
+    node1, node2, absolute, relative, zeroValueThreshold, verbose, convertedFromParallelVtu=False
+):
 
     is_equal = True
     for node1child, node2child in zip(node1.iter(), node2.iter()):
         if node1.tag != node2.tag:
             if verbose:
-                print('The name of the node differs in: {} and {}'.format(node1.tag, node2.tag))
+                print("The name of the node differs in: {} and {}".format(node1.tag, node2.tag))
                 is_equal = False
             else:
                 return False
-        if not convertedFromParallelVtu and list(node1.attrib.items()) != list(node2.attrib.items()):
+        if not convertedFromParallelVtu and list(node1.attrib.items()) != list(
+            node2.attrib.items()
+        ):
             if verbose:
-                print('Attributes differ in node: {}'.format(node1.tag))
-                print('Attributes1: ', list(node1.attrib.items()))
-                print('Attributes2: ', list(node2.attrib.items()))
+                print("Attributes differ in node: {}".format(node1.tag))
+                print("Attributes1: ", list(node1.attrib.items()))
+                print("Attributes2: ", list(node2.attrib.items()))
                 is_equal = False
             else:
                 return False
         if len(list(node1.iter())) != len(list(node2.iter())):
             if verbose:
-                print('Number of children differs in node: {}'.format(node1.tag))
+                print("Number of children differs in node: {}".format(node1.tag))
                 is_equal = False
             else:
                 return False
@@ -198,10 +218,16 @@ def is_fuzzy_equal_node(node1, node2, absolute, relative, zeroValueThreshold, ve
                 numberOfComponents = 1
             else:
                 numberOfComponents = int(node1child.attrib["NumberOfComponents"])
-            if not is_fuzzy_equal_text(node1child.text, node2child.text,
-                                       node1child.attrib["Name"],
-                                       numberOfComponents,
-                                       absolute, relative, zeroValueThreshold, verbose):
+            if not is_fuzzy_equal_text(
+                node1child.text,
+                node2child.text,
+                node1child.attrib["Name"],
+                numberOfComponents,
+                absolute,
+                relative,
+                zeroValueThreshold,
+                verbose,
+            ):
                 if node1child.attrib["Name"] == node2child.attrib["Name"]:
                     if verbose:
                         is_equal = False
@@ -209,7 +235,11 @@ def is_fuzzy_equal_node(node1, node2, absolute, relative, zeroValueThreshold, ve
                         return False
                 else:
                     if verbose:
-                        print('Comparing different parameters: {} and {}'.format(node1child.attrib["Name"], node2child.attrib["Name"]))
+                        print(
+                            "Comparing different parameters: {} and {}".format(
+                                node1child.attrib["Name"], node2child.attrib["Name"]
+                            )
+                        )
                         is_equal = False
                     else:
                         return False
@@ -217,11 +247,13 @@ def is_fuzzy_equal_node(node1, node2, absolute, relative, zeroValueThreshold, ve
 
 
 # fuzzy compare of text (in the xml sense) consisting of whitespace separated numbers
-def is_fuzzy_equal_text(text1, text2, parameter, numComp, absolute, relative, zeroValueThreshold, verbose):
+def is_fuzzy_equal_text(
+    text1, text2, parameter, numComp, absolute, relative, zeroValueThreshold, verbose
+):
     list1 = text1.split()
     list2 = text2.split()
     # difference only in whitespace?
-    if (list1 == list2):
+    if list1 == list2:
         return True
     # compare number by number
     is_equal = True
@@ -244,7 +276,7 @@ def is_fuzzy_equal_text(text1, text2, parameter, numComp, absolute, relative, ze
     for list1, list2, parameter in zip(lists1, lists2, parameters):
         # for verbose output
         max_relative_difference = 0.0
-        message = ''
+        message = ""
 
         # see inspiration, explanations in
         # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
@@ -255,10 +287,10 @@ def is_fuzzy_equal_text(text1, text2, parameter, numComp, absolute, relative, ze
         # check for nan and inf
         for number1, number2 in zip(floatList1, floatList2):
             if math.isnan(number1) or math.isnan(number2):
-                print('Parameter {} contains NaN!'.format(parameter))
+                print("Parameter {} contains NaN!".format(parameter))
                 return False
             if math.isinf(number1) or math.isinf(number2):
-                print('Parameter {} contains inf!'.format(parameter))
+                print("Parameter {} contains inf!".format(parameter))
                 return False
 
         # manipulate the data set for the sake of sensible comparison
@@ -266,10 +298,20 @@ def is_fuzzy_equal_text(text1, text2, parameter, numComp, absolute, relative, ze
         # only replace them with zero if the parameters in both lists are under the threshold. Otherwise we
         # compare a non-zero value with 0 later.
         if parameter in zeroValueThreshold:
-            floatList1 = [0.0 if abs(i) < float(zeroValueThreshold[parameter]) and abs(j) < float(zeroValueThreshold[parameter])
-                          else i for i, j in zip(floatList1, floatList2)]
-            floatList2 = [0.0 if abs(i) < float(zeroValueThreshold[parameter]) and abs(j) < float(zeroValueThreshold[parameter])
-                          else j for i, j in zip(floatList1, floatList2)]
+            floatList1 = [
+                0.0
+                if abs(i) < float(zeroValueThreshold[parameter])
+                and abs(j) < float(zeroValueThreshold[parameter])
+                else i
+                for i, j in zip(floatList1, floatList2)
+            ]
+            floatList2 = [
+                0.0
+                if abs(i) < float(zeroValueThreshold[parameter])
+                and abs(j) < float(zeroValueThreshold[parameter])
+                else j
+                for i, j in zip(floatList1, floatList2)
+            ]
 
         absFloatList1 = [abs(i) for i in floatList1]
         absFloatList2 = [abs(i) for i in floatList2]
@@ -296,28 +338,40 @@ def is_fuzzy_equal_text(text1, text2, parameter, numComp, absolute, relative, ze
                     if largernumber != 0.0:
                         if diff / largernumber > max_relative_difference:
                             max_relative_difference = diff / largernumber
-                            message = 'Difference is too large: {:.2%} -> between: {} and {}'.format(max_relative_difference, number1, number2)
+                            message = (
+                                "Difference is too large: {:.2%} -> between: {} and {}".format(
+                                    max_relative_difference, number1, number2
+                                )
+                            )
                 else:
                     return False
 
         if verbose and max_relative_difference != 0.0:
-            print('\nData differs in parameter: {}'.format(parameter))
+            print("\nData differs in parameter: {}".format(parameter))
             print(message)
-            print('Info for {}: max_abs_parameter_value={} and min_abs_parameter_value={}.'.format(parameter, magnitude, minimal))
+            print(
+                "Info for {}: max_abs_parameter_value={} and min_abs_parameter_value={}.".format(
+                    parameter, magnitude, minimal
+                )
+            )
             if parameter in zeroValueThreshold:
-                print('For parameter {} a zero value threshold of {} was given.'.format(parameter, zeroValueThreshold[parameter]))
+                print(
+                    "For parameter {} a zero value threshold of {} was given.".format(
+                        parameter, zeroValueThreshold[parameter]
+                    )
+                )
 
     return is_equal
 
 
 def sort_by_name(elem):
-    name = elem.get('Name')
+    name = elem.get("Name")
     if name:
         try:
             return str(name)
         except ValueError:
-            return ''
-    return ''
+            return ""
+    return ""
 
 
 # sorts attributes of an item and returns a sorted item
@@ -329,7 +383,7 @@ def sort_attributes(item, sorteditem):
 
 def sort_elements(items, newroot):
     items = sorted(items, key=sort_by_name)
-    items = sorted(items, key=attrgetter('tag'))
+    items = sorted(items, key=attrgetter("tag"))
 
     # Once sorted, we sort each of the items
     for item in items:
@@ -351,8 +405,8 @@ def sort_elements(items, newroot):
 
 # has to sort all Cell and Point Data after the attribute "Name"!
 def sort_vtk(root):
-    if(root.tag != "VTKFile"):
-        print('Format is not a VTKFile. Sorting will most likely fail!')
+    if root.tag != "VTKFile":
+        print("Format is not a VTKFile. Sorting will most likely fail!")
     # create a new root for the sorted tree
     newroot = ET.Element(root.tag)
     # create the sorted copy
@@ -362,9 +416,18 @@ def sort_vtk(root):
     # return the sorted element tree
     return newroot
 
+
 # sorts the data by point coordinates so that it is independent of index numbering
 def sort_vtk_by_coordinates(root1, root2, verbose, convertedFromParallelVtu=False):
-    if not is_fuzzy_equal_node(root1.find(".//Points/DataArray"), root2.find(".//Points/DataArray"), absolute=1e-2, relative=1.5e-7, zeroValueThreshold=dict(), verbose=False, convertedFromParallelVtu=False):
+    if not is_fuzzy_equal_node(
+        root1.find(".//Points/DataArray"),
+        root2.find(".//Points/DataArray"),
+        absolute=1e-2,
+        relative=1.5e-7,
+        zeroValueThreshold=dict(),
+        verbose=False,
+        convertedFromParallelVtu=False,
+    ):
         if verbose:
             print("Sorting vtu by coordinates...")
         for root in [root1, root2]:
@@ -382,7 +445,9 @@ def sort_vtk_by_coordinates(root1, root2, verbose, convertedFromParallelVtu=Fals
                 if dataArray.get("NumberOfComponents") == None:
                     numberOfComponents[dataArray.attrib["Name"]] = 1
                 else:
-                    numberOfComponents[dataArray.attrib["Name"]] = dataArray.attrib["NumberOfComponents"]
+                    numberOfComponents[dataArray.attrib["Name"]] = dataArray.attrib[
+                        "NumberOfComponents"
+                    ]
 
             vertexArray = []
             coords = dataArrays["Coordinates"].split()
@@ -422,17 +487,19 @@ def sort_vtk_by_coordinates(root1, root2, verbose, convertedFromParallelVtu=Fals
             # for non-conforming output vertices can have the same coordinates and also
             # different indices / sorting so we need another criterium to sort.
             # we use the largest cell midpoint coordinate vector the vertex is connected to
-            largestCellMidPointForVertex = [[0, 0, 0]]*len(vertexArray)
+            largestCellMidPointForVertex = [[0, 0, 0]] * len(vertexArray)
             for cellIdx, cell in enumerate(cellArray):
                 # compute cell midpoint
                 coords = [vertexArray[i] for i in cell]
-                midpoint = [i/float(len(coords)) for i in [sum(coord) for coord in zip(*coords)]]
+                midpoint = [i / float(len(coords)) for i in [sum(coord) for coord in zip(*coords)]]
                 for vertexIndex in cell:
-                    largestCellMidPointForVertex[vertexIndex] = max(largestCellMidPointForVertex[vertexIndex], midpoint)
+                    largestCellMidPointForVertex[vertexIndex] = max(
+                        largestCellMidPointForVertex[vertexIndex], midpoint
+                    )
 
             # floating point comparison operator for scalars
             def float_cmp(a, b, eps):
-                if math.fabs(a-b) < eps:
+                if math.fabs(a - b) < eps:
                     return 0
                 elif a > b:
                     return 1
@@ -450,7 +517,7 @@ def sort_vtk_by_coordinates(root1, root2, verbose, convertedFromParallelVtu=Fals
             # compute an epsilon and a comparison operator for floating point comparisons
             bBoxMax = max(vertexArray)
             bBoxMin = min(vertexArray)
-            epsilon = math.sqrt(sum([(a-b)**2 for a, b in zip(bBoxMax, bBoxMin)]))*1e-7
+            epsilon = math.sqrt(sum([(a - b) ** 2 for a, b in zip(bBoxMax, bBoxMin)])) * 1e-7
             # first compare by coordinates, if the same compare largestCellMidPointForVertex
             # TODO: is there a more pythonic way?
             def vertex_cmp(a, b):
@@ -458,7 +525,9 @@ def sort_vtk_by_coordinates(root1, root2, verbose, convertedFromParallelVtu=Fals
                 if res != 0:
                     return res
 
-                res2 = floatvec_cmp(largestCellMidPointForVertex[a[0]], largestCellMidPointForVertex[b[0]], epsilon)
+                res2 = floatvec_cmp(
+                    largestCellMidPointForVertex[a[0]], largestCellMidPointForVertex[b[0]], epsilon
+                )
                 if res2 != 0:
                     return res2
 
@@ -469,8 +538,8 @@ def sort_vtk_by_coordinates(root1, root2, verbose, convertedFromParallelVtu=Fals
             for idx, coords in enumerate(vertexArray):
                 vMap.append((idx, coords))
 
-            vertexIndexMap = [0]*len(vMap)
-            vertexIndexMapInverse = [0]*len(vMap)
+            vertexIndexMap = [0] * len(vMap)
+            vertexIndexMapInverse = [0] * len(vMap)
             # first sort by coordinates, if the same by largestCellMidPointForVertex
             for idxNew, idxOld in enumerate(sorted(vMap, key=functools.cmp_to_key(vertex_cmp))):
                 vertexIndexMap[idxOld[0]] = idxNew
@@ -495,14 +564,14 @@ def sort_vtk_by_coordinates(root1, root2, verbose, convertedFromParallelVtu=Fals
                 num = int(numberOfComponents[name])
                 newitems = []
                 for i in range(len(items) // num):
-                    newitems.append([i for i in items[i * num: i * num + num]])
+                    newitems.append([i for i in items[i * num : i * num + num]])
                 items = newitems
                 # sort the items: we have either vertex or cell data
                 if name in pointDataArrays:
                     # use the unique indices if the vtk file has been converted
                     # from pvd
                     if convertedFromParallelVtu:
-                        uniqueItems = [None]*len(vertexArray)
+                        uniqueItems = [None] * len(vertexArray)
                         for i in range(len(items)):
                             uniqueItems[uniqueIdx[i]] = items[i]
                         sortedItems = [uniqueItems[i] for i in vertexIndexMapInverse]
@@ -537,22 +606,47 @@ def sort_vtk_by_coordinates(root1, root2, verbose, convertedFromParallelVtu=Fals
 # main program if called as script return appropriate error codes
 if __name__ == "__main__":
     # handle arguments and print help message
-    parser = argparse.ArgumentParser(description='Fuzzy compare of two VTK\
+    parser = argparse.ArgumentParser(
+        description="Fuzzy compare of two VTK\
         (Visualization Toolkit) files. The files are accepted if for every\
         value the difference is below the absolute error or below the\
         relative error or below both.  If a pvd file is given instead, the\
         corresponding possibly parallel vtk file(s) have to be present and\
         will be converted to a (series of) sequential vtk file(s). The last\
         one in the natural ordering of these files will be taken for\
-        comparison.')
-    parser.add_argument('vtk_file_1', type=str, help='first file to compare')
-    parser.add_argument('vtk_file_2', type=str, help='second file to compare')
-    parser.add_argument('-r', '--relative', type=float, default=1e-2, help='maximum relative error (default=1e-2)')
-    parser.add_argument('-a', '--absolute', type=float, default=1.5e-7, help='maximum absolute error (default=1.5e-7)')
-    parser.add_argument('-z', '--zeroThreshold', type=json.loads, default='{}', help='Thresholds for treating numbers as zero for a parameter as a python dict e.g. {"vel":1e-7,"delP":1.0}')
-    parser.add_argument('-v', '--verbose', dest='verbose', action='store_true')
-    parser.add_argument('--no-verbose', dest='verbose', action='store_false')
+        comparison."
+    )
+    parser.add_argument("vtk_file_1", type=str, help="first file to compare")
+    parser.add_argument("vtk_file_2", type=str, help="second file to compare")
+    parser.add_argument(
+        "-r", "--relative", type=float, default=1e-2, help="maximum relative error (default=1e-2)"
+    )
+    parser.add_argument(
+        "-a",
+        "--absolute",
+        type=float,
+        default=1.5e-7,
+        help="maximum absolute error (default=1.5e-7)",
+    )
+    parser.add_argument(
+        "-z",
+        "--zeroThreshold",
+        type=json.loads,
+        default="{}",
+        help='Thresholds for treating numbers as zero for a parameter as a python dict e.g. {"vel":1e-7,"delP":1.0}',
+    )
+    parser.add_argument("-v", "--verbose", dest="verbose", action="store_true")
+    parser.add_argument("--no-verbose", dest="verbose", action="store_false")
     parser.set_defaults(verbose=True)
     args = vars(parser.parse_args())
 
-    sys.exit(compare_vtk(args["vtk_file_1"], args["vtk_file_2"], args["absolute"], args["relative"], args["zeroThreshold"], args["verbose"]))
+    sys.exit(
+        compare_vtk(
+            args["vtk_file_1"],
+            args["vtk_file_2"],
+            args["absolute"],
+            args["relative"],
+            args["zeroThreshold"],
+            args["verbose"],
+        )
+    )
diff --git a/bin/testing/getchangedfiles.py b/bin/testing/getchangedfiles.py
index 69651b7c2b..4539a932a0 100644
--- a/bin/testing/getchangedfiles.py
+++ b/bin/testing/getchangedfiles.py
@@ -10,49 +10,59 @@ from argparse import ArgumentParser
 
 
 def getCommandOutput(command, cwd=None):
-    return subprocess.check_output(command, encoding='ascii', cwd=cwd)
+    return subprocess.check_output(command, encoding="ascii", cwd=cwd)
 
 
 # get the files that differ between two trees in a git repo
 def getChangedFiles(gitFolder, sourceTree, targetTree):
 
     gitFolder = os.path.abspath(gitFolder)
-    root = getCommandOutput(
-        command=['git', 'rev-parse', '--show-toplevel'],
-        cwd=gitFolder
-    ).strip('\n')
+    root = getCommandOutput(command=["git", "rev-parse", "--show-toplevel"], cwd=gitFolder).strip(
+        "\n"
+    )
     changedFiles = getCommandOutput(
-        command=["git", "diff-tree", "-r", "--name-only", sourceTree, targetTree],
-        cwd=gitFolder
+        command=["git", "diff-tree", "-r", "--name-only", sourceTree, targetTree], cwd=gitFolder
     ).splitlines()
 
     return [os.path.join(root, file) for file in changedFiles]
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
 
     # parse input arguments
-    parser = ArgumentParser(
-        description='Get the files that differ between two git-trees'
+    parser = ArgumentParser(description="Get the files that differ between two git-trees")
+    parser.add_argument(
+        "-f",
+        "--folder",
+        required=False,
+        default=".",
+        help="The path to a folder within the git repository",
+    )
+    parser.add_argument(
+        "-s",
+        "--source-tree",
+        required=False,
+        default="HEAD",
+        help="The source tree (default: `HEAD`)",
+    )
+    parser.add_argument(
+        "-t",
+        "--target-tree",
+        required=False,
+        default="master",
+        help="The tree to compare against (default: `master`)",
+    )
+    parser.add_argument(
+        "-o",
+        "--outfile",
+        required=False,
+        default="changedfiles.txt",
+        help="The file in which to write the changed files",
     )
-    parser.add_argument('-f', '--folder',
-                        required=False, default='.',
-                        help='The path to a folder within the git repository')
-    parser.add_argument('-s', '--source-tree',
-                        required=False, default='HEAD',
-                        help='The source tree (default: `HEAD`)')
-    parser.add_argument('-t', '--target-tree',
-                        required=False, default='master',
-                        help='The tree to compare against (default: `master`)')
-    parser.add_argument('-o', '--outfile',
-                        required=False, default='changedfiles.txt',
-                        help='The file in which to write the changed files')
     args = vars(parser.parse_args())
 
-    changedFiles = getChangedFiles(args['folder'],
-                                   args['source_tree'],
-                                   args['target_tree'])
+    changedFiles = getChangedFiles(args["folder"], args["source_tree"], args["target_tree"])
 
-    with open(args['outfile'], 'w') as outFile:
+    with open(args["outfile"], "w") as outFile:
         for file in changedFiles:
             outFile.write(f"{os.path.abspath(file)}\n")
diff --git a/bin/testing/runselectedtests.py b/bin/testing/runselectedtests.py
index 43aca44475..1f8db7ef9e 100755
--- a/bin/testing/runselectedtests.py
+++ b/bin/testing/runselectedtests.py
@@ -13,13 +13,13 @@ from argparse import ArgumentParser
 
 # require Python 3
 if sys.version_info.major < 3:
-    sys.exit('Python 3 required')
+    sys.exit("Python 3 required")
 
 
-def buildTests(config, flags=['-j8', '--keep-going']):
+def buildTests(config, flags=["-j8", "--keep-going"]):
 
     if not config:
-        print('No tests to be built')
+        print("No tests to be built")
         return
 
     # The MakeFile generated by cmake contains the .NOTPARALLEL statement, as
@@ -27,94 +27,108 @@ def buildTests(config, flags=['-j8', '--keep-going']):
     # is taken care of within that latter Makefile. Therefore, we create a
     # small custom Makefile here on top of `Makefile2`, where we define a new
     # target, composed of affected tests, that can be built in parallel
-    with open('TestMakeFile', 'w') as makeFile:
+    with open("TestMakeFile", "w") as makeFile:
         # include make file generated by cmake
-        makeFile.write('include CMakeFiles/Makefile2\n')
+        makeFile.write("include CMakeFiles/Makefile2\n")
 
         # define a new target composed of the test targets
-        makeFile.write('testselection: ')
-        makeFile.write(' '.join([tc['target'] for tc in config.values()]))
+        makeFile.write("testselection: ")
+        makeFile.write(" ".join([tc["target"] for tc in config.values()]))
 
-    subprocess.run(['make', '-f', 'TestMakeFile'] + flags + ['testselection'],
-                   check=True)
+    subprocess.run(["make", "-f", "TestMakeFile"] + flags + ["testselection"], check=True)
 
 
-def runTests(config, script='', flags=['-j8', '--output-on-failure']):
+def runTests(config, script="", flags=["-j8", "--output-on-failure"]):
 
     tests = list(config.keys())
     if not tests:
-        print('No tests to be run. Letting dune-ctest produce empty report.')
-        tests = ['NOOP']
+        print("No tests to be run. Letting dune-ctest produce empty report.")
+        tests = ["NOOP"]
 
     # turn test names into a regular expression
-    testRegEx = '|'.join(["^{}$".format(t) for t in tests])
+    testRegEx = "|".join(["^{}$".format(t) for t in tests])
 
     # if not given, try system-wide call to dune-ctest
-    script = ['dune-ctest'] if not script else script
-    subprocess.run([script] + flags + ['-R', testRegEx], check=True)
-
-
-if __name__ == '__main__':
-    parser = ArgumentParser(description='Build or run a selection of tests')
-    parser.add_argument('-a', '--all',
-                        required=False,
-                        action='store_true',
-                        help='use this flag to build/run all tests')
-    parser.add_argument('-c', '--config',
-                        required=False,
-                        help='json file with configuration of tests to be run')
-    parser.add_argument('-s', '--script',
-                        required=False,
-                        default='',
-                        help='provide the path to the dune-ctest script')
-    parser.add_argument('-b', '--build',
-                        required=False,
-                        action='store_true',
-                        help='use this flag to build the tests')
-    parser.add_argument('-t', '--test',
-                        required=False,
-                        action='store_true',
-                        help='use this flag to run the tests')
-    parser.add_argument('-bf', '--buildflags',
-                        required=False,
-                        default='-j8 --keep-going',
-                        help='set the flags passed to make')
-    parser.add_argument('-tf', '--testflags',
-                        required=False,
-                        default='-j8 --output-on-failure -LE python',
-                        help='set the flags passed to ctest')
+    script = ["dune-ctest"] if not script else script
+    subprocess.run([script] + flags + ["-R", testRegEx], check=True)
+
+
+if __name__ == "__main__":
+    parser = ArgumentParser(description="Build or run a selection of tests")
+    parser.add_argument(
+        "-a",
+        "--all",
+        required=False,
+        action="store_true",
+        help="use this flag to build/run all tests",
+    )
+    parser.add_argument(
+        "-c", "--config", required=False, help="json file with configuration of tests to be run"
+    )
+    parser.add_argument(
+        "-s",
+        "--script",
+        required=False,
+        default="",
+        help="provide the path to the dune-ctest script",
+    )
+    parser.add_argument(
+        "-b",
+        "--build",
+        required=False,
+        action="store_true",
+        help="use this flag to build the tests",
+    )
+    parser.add_argument(
+        "-t", "--test", required=False, action="store_true", help="use this flag to run the tests"
+    )
+    parser.add_argument(
+        "-bf",
+        "--buildflags",
+        required=False,
+        default="-j8 --keep-going",
+        help="set the flags passed to make",
+    )
+    parser.add_argument(
+        "-tf",
+        "--testflags",
+        required=False,
+        default="-j8 --output-on-failure -LE python",
+        help="set the flags passed to ctest",
+    )
     args = vars(parser.parse_args())
 
-    if not args['build'] and not args['test']:
-        sys.exit('Neither `build` not `test` flag was set. Exiting.')
+    if not args["build"] and not args["test"]:
+        sys.exit("Neither `build` not `test` flag was set. Exiting.")
 
-    if args['config'] and args['all']:
-        sys.exit('Error: both `config` and `all` specified. '
-                 'Please set only one of these arguments.')
+    if args["config"] and args["all"]:
+        sys.exit(
+            "Error: both `config` and `all` specified. " "Please set only one of these arguments."
+        )
 
     # prepare build and test flags
-    buildFlags = args['buildflags'].split(' ')
-    testFlags = args['testflags'].split(' ')
-    dunectest = args['script']
-    dunectest = 'dune-ctest' if not dunectest else os.path.abspath(dunectest)
+    buildFlags = args["buildflags"].split(" ")
+    testFlags = args["testflags"].split(" ")
+    dunectest = args["script"]
+    dunectest = "dune-ctest" if not dunectest else os.path.abspath(dunectest)
 
     # use target `all`
-    if args['all']:
-        if args['build']:
-            print('Building all tests')
-            subprocess.run(['make'] + buildFlags + ['build_tests'], check=True)
-        if args['test']:
-            print('Running all tests')
+    if args["all"]:
+        if args["build"]:
+            print("Building all tests")
+            subprocess.run(["make"] + buildFlags + ["build_tests"], check=True)
+        if args["test"]:
+            print("Running all tests")
             subprocess.run([dunectest] + testFlags, check=True)
 
     # use target selection
     else:
-        with open(args['config']) as configFile:
+        with open(args["config"]) as configFile:
             config = json.load(configFile)
             numTests = len(config)
-            print('{} tests found in the configuration file'.format(numTests))
+            print("{} tests found in the configuration file".format(numTests))
 
-            if args['build']:
+            if args["build"]:
                 buildTests(config, buildFlags)
-            if args['test']:
+            if args["test"]:
                 runTests(config, dunectest, testFlags)
diff --git a/bin/testing/runtest.py b/bin/testing/runtest.py
index b1bc1eabb6..480a22de32 100755
--- a/bin/testing/runtest.py
+++ b/bin/testing/runtest.py
@@ -9,68 +9,123 @@ from fuzzycomparedata import compare_data
 
 # parse arguments
 parser = argparse.ArgumentParser()
-parser.add_argument('-c', '--command', nargs=1, help='The executable and optional arguments as a single string', required=True)
-parser.add_argument('-s', '--script', nargs=1, help="The comparison script. [fuzzy, fuzzyData, exact, <path_to_script>] where the script takes two files as arguments.")
-parser.add_argument('-f', '--files', nargs='+', help="Pairs of file names (first reference, then current). Usage: '[-f ref1 cur1 [[ref2] [cur2] ...]]'")
-parser.add_argument('-d', '--delimiter', type=str, default=',', help='Column delimiter for data files')
-parser.add_argument('-r', '--relative', type=float, default=1e-2, help='maximum relative error (default=1e-2) when using fuzzy comparison')
-parser.add_argument('-a', '--absolute', type=float, default=1.5e-7, help='maximum absolute error (default=1.5e-7) when using fuzzy comparison')
-parser.add_argument('-z', '--zeroThreshold', type=json.loads, default='{}', help='Thresholds for treating numbers as zero for a parameter as a python dict e.g. {"vel":1e-7,"delP":1.0}')
+parser.add_argument(
+    "-c",
+    "--command",
+    nargs=1,
+    help="The executable and optional arguments as a single string",
+    required=True,
+)
+parser.add_argument(
+    "-s",
+    "--script",
+    nargs=1,
+    help="The comparison script. [fuzzy, fuzzyData, exact, <path_to_script>] where the script takes two files as arguments.",
+)
+parser.add_argument(
+    "-f",
+    "--files",
+    nargs="+",
+    help="Pairs of file names (first reference, then current). Usage: '[-f ref1 cur1 [[ref2] [cur2] ...]]'",
+)
+parser.add_argument(
+    "-d", "--delimiter", type=str, default=",", help="Column delimiter for data files"
+)
+parser.add_argument(
+    "-r",
+    "--relative",
+    type=float,
+    default=1e-2,
+    help="maximum relative error (default=1e-2) when using fuzzy comparison",
+)
+parser.add_argument(
+    "-a",
+    "--absolute",
+    type=float,
+    default=1.5e-7,
+    help="maximum absolute error (default=1.5e-7) when using fuzzy comparison",
+)
+parser.add_argument(
+    "-z",
+    "--zeroThreshold",
+    type=json.loads,
+    default="{}",
+    help='Thresholds for treating numbers as zero for a parameter as a python dict e.g. {"vel":1e-7,"delP":1.0}',
+)
 args = vars(parser.parse_args())
 
 # check parameters
-if args['script']:
-    if len(args['files'])%2 != 0 or not args['files']:
-        sys.stderr.write("The files have to be pairs of reference and current solution files. Usage '-f [ref1] [cur1] [[ref2] [cur2] ...]'")
+if args["script"]:
+    if len(args["files"]) % 2 != 0 or not args["files"]:
+        sys.stderr.write(
+            "The files have to be pairs of reference and current solution files. Usage '-f [ref1] [cur1] [[ref2] [cur2] ...]'"
+        )
         parser.print_help()
         sys.exit(1)
-    for i in range(0, len(args['files'])//2):
+    for i in range(0, len(args["files"]) // 2):
         # delete the vtu files to compare
         ref_dir = os.path.dirname(os.path.abspath(__file__)).rstrip("bin") + "test/references"
-        if os.path.dirname(args['files'][(i*2)+1]) == ref_dir:
-            sys.stderr.write("Tried to delete a reference solution. Specify reference file first, then the current solution. Usage: '[-f ref1 cur1 [[ref2] [cur2] ...]]'")
+        if os.path.dirname(args["files"][(i * 2) + 1]) == ref_dir:
+            sys.stderr.write(
+                "Tried to delete a reference solution. Specify reference file first, then the current solution. Usage: '[-f ref1 cur1 [[ref2] [cur2] ...]]'"
+            )
             sys.exit(1)
-        subprocess.call(['rm', '-fv', args['files'][(i*2)+1]])
+        subprocess.call(["rm", "-fv", args["files"][(i * 2) + 1]])
 
 # run the test
 res = 1
 try:
-    res = subprocess.call(shlex.split(args['command'][0]))
+    res = subprocess.call(shlex.split(args["command"][0]))
 except OSError as e:
-    print(args['command'][0].split())
+    print(args["command"][0].split())
     print("OSError: Command not found. Most likely the executable specified doesn't exist.")
     sys.exit(1)
 if res:
     sys.exit(res)
 
 # run the comparison
-if args['script']:
+if args["script"]:
     # exact comparison?
-    if args['script'] == ['exact']:
+    if args["script"] == ["exact"]:
         return_code = 0
-        for i in range(0, len(args['files'])//2):
+        for i in range(0, len(args["files"]) // 2):
             print("\nExact comparison...")
-            result = subprocess.call(['diff', args['files'][i*2], args['files'][(i*2)+1]])
+            result = subprocess.call(["diff", args["files"][i * 2], args["files"][(i * 2) + 1]])
             if result:
                 return_code = 1
         sys.exit(return_code)
 
     # fuzzy comparison?
-    elif args['script'] == ["fuzzy"] or args['script'] == [os.path.dirname(os.path.abspath(__file__)) + "/fuzzycomparevtu.py"]:
+    elif args["script"] == ["fuzzy"] or args["script"] == [
+        os.path.dirname(os.path.abspath(__file__)) + "/fuzzycomparevtu.py"
+    ]:
         return_code = 0
-        for i in range(0, len(args['files'])//2):
+        for i in range(0, len(args["files"]) // 2):
             print("\nFuzzy comparison...")
-            result = compare_vtk(args['files'][i*2], args['files'][(i*2)+1], relative=args['relative'], absolute=args['absolute'], zeroValueThreshold=args['zeroThreshold'])
+            result = compare_vtk(
+                args["files"][i * 2],
+                args["files"][(i * 2) + 1],
+                relative=args["relative"],
+                absolute=args["absolute"],
+                zeroValueThreshold=args["zeroThreshold"],
+            )
             if result:
                 return_code = 1
         sys.exit(return_code)
 
     # fuzzy comparison of data sets?
-    elif args['script'] == ["fuzzyData"]:
+    elif args["script"] == ["fuzzyData"]:
         return_code = 0
-        for i in range(0, len(args['files'])//2):
+        for i in range(0, len(args["files"]) // 2):
             print("\nFuzzy data comparison...")
-            result = compare_data(args['files'][i*2], args['files'][(i*2)+1], args['delimiter'], relative=args['relative'], absolute=args['absolute'], zeroValueThreshold=args['zeroThreshold'])
+            result = compare_data(
+                args["files"][i * 2],
+                args["files"][(i * 2) + 1],
+                args["delimiter"],
+                relative=args["relative"],
+                absolute=args["absolute"],
+                zeroValueThreshold=args["zeroThreshold"],
+            )
             if result:
                 return_code = 1
         sys.exit(return_code)
@@ -78,9 +133,11 @@ if args['script']:
     # other script?
     else:
         return_code = 0
-        for i in range(0, len(args['files'])//2):
-            print("\n{} comparison...".format(args['script']))
-            result = subprocess.call(args['script'], args['files'][i*2], args['files'][(i*2)+1])
+        for i in range(0, len(args["files"]) // 2):
+            print("\n{} comparison...".format(args["script"]))
+            result = subprocess.call(
+                args["script"], args["files"][i * 2], args["files"][(i * 2) + 1]
+            )
             if result:
                 return_code = 1
         sys.exit(return_code)
diff --git a/bin/util/common.py b/bin/util/common.py
index 70a1fe3786..e8a7dc201d 100644
--- a/bin/util/common.py
+++ b/bin/util/common.py
@@ -24,18 +24,16 @@ def styledBotPrint(s, style="none", **kwargs):
     sys.stdout.write(TERM_FORMATTING["reset"])
 
 
-def addPrefix(prefix, text, separator=' '):
+def addPrefix(prefix, text, separator=" "):
     return prefix + separator + text
 
 
-def addPrefixToLines(prefix, text, separator=' '):
-    return '\n'.join(
-        addPrefix(prefix, line, separator) for line in text.split('\n')
-    )
+def addPrefixToLines(prefix, text, separator=" "):
+    return "\n".join(addPrefix(prefix, line, separator) for line in text.split("\n"))
 
 
 def escapeCharacter(text, character, escCharacter="\\"):
-    return text.replace(character, f'{escCharacter}{character}')
+    return text.replace(character, f"{escCharacter}{character}")
 
 
 def escapeCharacters(text, characters, escCharacter="\\"):
@@ -44,10 +42,10 @@ def escapeCharacters(text, characters, escCharacter="\\"):
     return text
 
 
-def indent(text, indentation='  '):
-    text = text.split('\n')
+def indent(text, indentation="  "):
+    text = text.split("\n")
     text = [indentation + line for line in text]
-    return '\n'.join(text)
+    return "\n".join(text)
 
 
 def makeTable(dictList, config=None, padding=2):
@@ -55,7 +53,7 @@ def makeTable(dictList, config=None, padding=2):
         config = {key: key for d in dictList for key in d}
 
     def getColWidth(row):
-        return max(len(str(r)) for r in row) + padding*2
+        return max(len(str(r)) for r in row) + padding * 2
 
     def getCol(key):
         return [config[key]] + [d.get(key, "") for d in dictList]
@@ -69,20 +67,22 @@ def makeTable(dictList, config=None, padding=2):
         return row
 
     table = [makeRow({key: config[key] for key in config})]
-    table.append('|' + '|'.join('-'*widths[key] for key in config) + '|')
+    table.append("|" + "|".join("-" * widths[key] for key in config) + "|")
     table.extend(makeRow(row) for row in dictList)
-    return '\n'.join(table)
+    return "\n".join(table)
 
 
 def getCommandErrorHints(command):
     if "git " in command:
-        return "It seems that a git command failed. Please check:\n" \
-               "    -- is the module registered as git repository?\n" \
-               "    -- is upstream defined for the branch?"
+        return (
+            "It seems that a git command failed. Please check:\n"
+            "    -- is the module registered as git repository?\n"
+            "    -- is upstream defined for the branch?"
+        )
     return None
 
 
-def runCommand(command, check=True, suppressTraceBack=False, errorMessage=''):
+def runCommand(command, check=True, suppressTraceBack=False, errorMessage=""):
     """execute a command and retrieve the output"""
 
     try:
@@ -116,15 +116,17 @@ def callFromPath(path):
             result = callFunc(*args, **kwargs)
             os.chdir(curPath)
             return result
+
         return wrapper_callFromPath
+
     return decorator_callFromPath
 
 
 def userQuery(query, choices=None):
     """query something from the user"""
 
-    choicesString = ', '.join(str(c) for c in choices) if choices else ''
-    querySuffix = f" (choices: {choicesString})\n" if choices else ' '
+    choicesString = ", ".join(str(c) for c in choices) if choices else ""
+    querySuffix = f" (choices: {choicesString})\n" if choices else " "
 
     while True:
         styledBotPrint(f"{query.strip()}{querySuffix}", style="highlight")
@@ -132,8 +134,7 @@ def userQuery(query, choices=None):
 
         if choices and inp not in choices:
             styledBotPrint(
-                f"Invalid answer: '{inp}'. Choose from {choicesString}.",
-                style="warning"
+                f"Invalid answer: '{inp}'. Choose from {choicesString}.", style="warning"
             )
         else:
             return inp
@@ -148,13 +149,19 @@ def queryYesNo(question, default="yes"):
     def getChoices():
         return ", ".join(c for c in affirmative + negative)
 
-    def isAffirmative(choice): return choice in affirmative
-    def isNegative(choice): return choice in negative
-    def isValid(choice): return isAffirmative(choice) or isNegative(choice)
+    def isAffirmative(choice):
+        return choice in affirmative
+
+    def isNegative(choice):
+        return choice in negative
+
+    def isValid(choice):
+        return isAffirmative(choice) or isNegative(choice)
 
     if default is not None and not isValid(default):
-        raise ValueError("\nInvalid default answer: '{}', choices: '{}'\n"
-                         .format(default, getChoices()))
+        raise ValueError(
+            "\nInvalid default answer: '{}', choices: '{}'\n".format(default, getChoices())
+        )
 
     if default is None:
         prompt = " [y/n] "
@@ -170,30 +177,26 @@ def queryYesNo(question, default="yes"):
 
         if not isValid(choice):
             styledBotPrint(
-                f"Invalid answer: '{choice}'. Choose from '{getChoices()}'",
-                style="warning"
+                f"Invalid answer: '{choice}'. Choose from '{getChoices()}'", style="warning"
             )
         else:
             return True if isAffirmative(choice) else False
 
 
 def cppHeaderFilter():
-    return lambda fileName: fileName == 'config.h'
+    return lambda fileName: fileName == "config.h"
 
 
-def includedCppProjectHeaders(file,
-                              projectBase,
-                              headers=[],
-                              headerFilter=cppHeaderFilter()):
+def includedCppProjectHeaders(file, projectBase, headers=[], headerFilter=cppHeaderFilter()):
     """get all project headers included by a cpp file"""
 
     filePath = os.path.join(projectBase, file)
     if not os.path.exists(filePath):
-        raise IOError(f'Cpp file {filePath} does not exist')
+        raise IOError(f"Cpp file {filePath} does not exist")
 
-    with open(filePath, 'r') as f:
+    with open(filePath, "r") as f:
         content = f.read()
-        headerInBracket = re.findall(r'#include\s+<(.+?)>', content)
+        headerInBracket = re.findall(r"#include\s+<(.+?)>", content)
         headerInQuotation = re.findall(r'#include\s+"(.+?)"', content)
 
         def process(pathInProject):
@@ -202,10 +205,7 @@ def includedCppProjectHeaders(file,
                 if not headerFilter(pathInProject):
                     if headerPath not in headers:
                         headers.append(headerPath)
-                        includedCppProjectHeaders(
-                            headerPath, projectBase,
-                            headers, headerFilter
-                        )
+                        includedCppProjectHeaders(headerPath, projectBase, headers, headerFilter)
 
         for header in headerInBracket:
             process(header)
@@ -228,57 +228,57 @@ def findMatchingFiles(path, pattern):
     return result
 
 
-def isGitRepository(pathToRepo='.'):
+def isGitRepository(pathToRepo="."):
     try:
         run = callFromPath(pathToRepo)(runCommand)
-        run('git status')
+        run("git status")
         return True
     except Exception:
         return False
 
 
-def getRemote(pathToRepo='.'):
+def getRemote(pathToRepo="."):
     run = callFromPath(pathToRepo)(runCommand)
-    return run('git ls-remote --get-url').strip('\n')
+    return run("git ls-remote --get-url").strip("\n")
 
 
-def fetchRepo(remote, pathToRepo='.'):
+def fetchRepo(remote, pathToRepo="."):
     run = callFromPath(pathToRepo)(runCommand)
-    run('git fetch {}'.format(remote))
+    run("git fetch {}".format(remote))
 
 
-def hasUntrackedFiles(pathToRepo='.'):
+def hasUntrackedFiles(pathToRepo="."):
     run = callFromPath(pathToRepo)(runCommand)
-    return run('git ls-files --others --exclude-standard') != ''
+    return run("git ls-files --others --exclude-standard") != ""
 
 
 def isPersistentBranch(branchName):
-    if branchName == 'origin/master':
+    if branchName == "origin/master":
         return True
-    if branchName.startswith('origin/releases/'):
+    if branchName.startswith("origin/releases/"):
         return True
     return False
 
 
 # get the most recent commit that also exists on remote master/release branch
 # may be used to find a commit we can use as basis for a pub module
-def mostRecentCommonCommitWithRemote(modFolderPath,
-                                     branchFilter=isPersistentBranch):
+def mostRecentCommonCommitWithRemote(modFolderPath, branchFilter=isPersistentBranch):
     run = callFromPath(modFolderPath)(runCommand)
 
     def findBranches(sha):
-        candidates = run('git branch -r --contains {}'.format(sha)).split('\n')
-        candidates = [branch.strip().split(' ->')[0] for branch in candidates]
+        candidates = run("git branch -r --contains {}".format(sha)).split("\n")
+        candidates = [branch.strip().split(" ->")[0] for branch in candidates]
         return list(filter(branchFilter, candidates))
 
-    revList = run('git rev-list HEAD').split('\n')
+    revList = run("git rev-list HEAD").split("\n")
     for rev in revList:
         branches = findBranches(rev)
         if branches:
             return branches[0], rev
 
-    raise RuntimeError('Could not find suitable ancestor commit'
-                       ' on a branch that matches the given filter')
+    raise RuntimeError(
+        "Could not find suitable ancestor commit" " on a branch that matches the given filter"
+    )
 
 
 # function to extract persistent, remotely available git versions for all
@@ -288,35 +288,32 @@ def getPersistentVersions(modFolderPaths, ignoreUntracked=False):
     for modFolderPath in modFolderPaths:
 
         if not isGitRepository(modFolderPath):
-            raise Exception('Folder is not a git repository')
+            raise Exception("Folder is not a git repository")
 
         if hasUntrackedFiles(modFolderPath) and not ignoreUntracked:
             raise Exception(
                 "Found untracked files in '{}'. "
                 "Please commit, stash, or remove them. Alternatively, if you "
-                "are sure they are not needed set ignoreUntracked=True"
-                .format(modFolderPath)
+                "are sure they are not needed set ignoreUntracked=True".format(modFolderPath)
             )
 
         result[modFolderPath] = {}
-        result[modFolderPath]['remote'] = getRemote(modFolderPath)
+        result[modFolderPath]["remote"] = getRemote(modFolderPath)
 
         # update remote to make sure we find all upstream commits
-        fetchRepo(result[modFolderPath]['remote'], modFolderPath)
+        fetchRepo(result[modFolderPath]["remote"], modFolderPath)
 
         branch, rev = mostRecentCommonCommitWithRemote(modFolderPath)
         run = callFromPath(modFolderPath)(runCommand)
 
-        result[modFolderPath]['revision'] = rev
-        result[modFolderPath]['date'] = run(
-            'git log -n 1 --format=%ai {}'.format(rev)
-        ).strip('\n')
-        result[modFolderPath]['author'] = run(
-            'git log -n 1 --format=%an {}'.format(rev)
-        ).strip('\n')
+        result[modFolderPath]["revision"] = rev
+        result[modFolderPath]["date"] = run("git log -n 1 --format=%ai {}".format(rev)).strip("\n")
+        result[modFolderPath]["author"] = run("git log -n 1 --format=%an {}".format(rev)).strip(
+            "\n"
+        )
 
         # this may return HEAD if we are on some detached HEAD tree
-        result[modFolderPath]['branch'] = branch
+        result[modFolderPath]["branch"] = branch
 
     return result
 
@@ -326,35 +323,48 @@ def getPatches(persistentVersions):
     for path, gitInfo in persistentVersions.items():
         run = callFromPath(path)(runCommand)
 
-        uncommittedPatch = run('git diff')
-        unpublishedPatch = run(
-            'git format-patch --stdout {}'.format(gitInfo['revision'])
-        )
-        untrackedPatch = ''
-        untrackedFiles = run('git ls-files --others --exclude-standard')
+        uncommittedPatch = run("git diff")
+        unpublishedPatch = run("git format-patch --stdout {}".format(gitInfo["revision"]))
+        untrackedPatch = ""
+        untrackedFiles = run("git ls-files --others --exclude-standard")
         binaryExtension = (
-            '.png', '.gif', '.jpg', '.tiff', '.bmp', '.DS_Store', '.eot', '.otf', '.ttf', '.woff', '.rgb', '.pdf',
+            ".png",
+            ".gif",
+            ".jpg",
+            ".tiff",
+            ".bmp",
+            ".DS_Store",
+            ".eot",
+            ".otf",
+            ".ttf",
+            ".woff",
+            ".rgb",
+            ".pdf",
         )
         if untrackedFiles:
             for file in untrackedFiles.splitlines():
                 if not str(file).endswith(binaryExtension):
-                    untrackedPatch += run('git --no-pager diff /dev/null {}'.format(file), check=False)
+                    untrackedPatch += run(
+                        "git --no-pager diff /dev/null {}".format(file), check=False
+                    )
 
         result[path] = {}
-        result[path]['untracked'] = untrackedPatch if untrackedPatch else None
-        result[path]['unpublished'] = unpublishedPatch if unpublishedPatch else None
-        result[path]['uncommitted'] = uncommittedPatch if uncommittedPatch else None
+        result[path]["untracked"] = untrackedPatch if untrackedPatch else None
+        result[path]["unpublished"] = unpublishedPatch if unpublishedPatch else None
+        result[path]["uncommitted"] = uncommittedPatch if uncommittedPatch else None
     return result
 
 
-def versionTable(versions,
-                 config={
-                     'name': 'module name',
-                     'branch': 'branch name',
-                     'revision': 'commit sha',
-                     'date': 'commit date'
-                 },
-                 padding=2):
+def versionTable(
+    versions,
+    config={
+        "name": "module name",
+        "branch": "branch name",
+        "revision": "commit sha",
+        "date": "commit date",
+    },
+    padding=2,
+):
     return makeTable(versions, config)
 
 
diff --git a/bin/util/installscript.py b/bin/util/installscript.py
index 1d8669d75b..c65125bd34 100644
--- a/bin/util/installscript.py
+++ b/bin/util/installscript.py
@@ -17,36 +17,30 @@ if sys.version_info[0] < 3:
 
 
 def supportedLanguages():
-    return ['python', 'bash']
+    return ["python", "bash"]
 
 
 def getScriptExtension(language):
     assert language in supportedLanguages()
-    ext = {
-        'python': '.py',
-        'bash': '.sh'
-    }
+    ext = {"python": ".py", "bash": ".sh"}
     return ext[language]
 
 
 def makeScriptWriter(language):
-    if language == 'bash':
+    if language == "bash":
         return InstallScriptWriterBash()
-    elif language == 'python':
+    elif language == "python":
         return InstallScriptWriterPython()
-    raise ValueError(f'Could not create writer for language {language}')
+    raise ValueError(f"Could not create writer for language {language}")
 
 
 def getDefaultScriptName(modName, language):
-    return 'install_{}{}'.format(
-        modName,
-        getScriptExtension(language)
-    )
+    return "install_{}{}".format(modName, getScriptExtension(language))
 
 
 def printProgressInfo(infoLines, indLevel=0):
-    firstPrefix = '\n' + '--'*(indLevel+1)
-    emptyPrefix = firstPrefix.replace('-', ' ').strip('\n')
+    firstPrefix = "\n" + "--" * (indLevel + 1)
+    emptyPrefix = firstPrefix.replace("-", " ").strip("\n")
     print(f"{firstPrefix} {infoLines[0]}")
     for line in infoLines[1:]:
         print(f"{emptyPrefix} {line}")
@@ -56,20 +50,18 @@ def filterDependencies(dependencies, skipFolders=[]):
     if not skipFolders:
         return dependencies
     else:
+
         def skipFolder(folderName):
             return any(folderName == os.path.basename(path) for path in skipFolders)
-        return [
-            dep for dep in dependencies if not skipFolder(dep['folder'])
-        ]
+
+        return [dep for dep in dependencies if not skipFolder(dep["folder"])]
 
 
 def addDependencyVersions(dependencies, ignoreUntracked=False):
     def getKey(dependency):
-        return dependency['path']
+        return dependency["path"]
 
-    versions = getPersistentVersions(
-        [getKey(d) for d in dependencies], ignoreUntracked
-    )
+    versions = getPersistentVersions([getKey(d) for d in dependencies], ignoreUntracked)
     if len(versions) != len(dependencies):
         raise Exception("Not all versions of all modules could be found.")
 
@@ -82,11 +74,9 @@ def addDependencyVersions(dependencies, ignoreUntracked=False):
 
 def addDependencyPatches(dependenciesWithVersions):
     def getKey(dependency):
-        return dependency['path']
+        return dependency["path"]
 
-    patches = getPatches({
-        getKey(d): d for d in dependenciesWithVersions
-    })
+    patches = getPatches({getKey(d): d for d in dependenciesWithVersions})
 
     mergedResult = []
     for depInfo in dependenciesWithVersions:
@@ -95,34 +85,33 @@ def addDependencyPatches(dependenciesWithVersions):
     return mergedResult
 
 
-def makeInstallScript(modPath,
-                      dependencies,
-                      scriptName,
-                      writer,
-                      topFolderName='DUMUX',
-                      optsFile=None):
+def makeInstallScript(
+    modPath, dependencies, scriptName, writer, topFolderName="DUMUX", optsFile=None
+):
 
     modPath = os.path.abspath(modPath)
-    modName = getModuleInfo(modPath, 'Module')
+    modName = getModuleInfo(modPath, "Module")
 
-    modOptsFile = '{}/cmake.opts'.format(modPath)
+    modOptsFile = "{}/cmake.opts".format(modPath)
     if not optsFile:
         if os.path.isfile(modOptsFile):
-            optsFile = '{}/cmake.opts'.format(os.path.relpath(modPath))
+            optsFile = "{}/cmake.opts".format(os.path.relpath(modPath))
         else:
-            optsFile = 'dumux/cmake.opts'
+            optsFile = "dumux/cmake.opts"
     if os.path.isabs(optsFile):
         raise ValueError("Opts file must be given as relative path")
-    if not any(optsFile.startswith(d['folder']) for d in dependencies):
+    if not any(optsFile.startswith(d["folder"]) for d in dependencies):
         print("Warning: opts file is not contained in any of the dependencies")
 
-    with open(scriptName, 'w') as script:
+    with open(scriptName, "w") as script:
 
         writer.setOutputStream(script)
         writer.writeSheBang()
 
-        script.write('\n')
-        writer.writeComment(textwrap.dedent(f"""\
+        script.write("\n")
+        writer.writeComment(
+            textwrap.dedent(
+                f"""\
 
             This installs the module {modName} and its dependencies.
             The exact revisions used are listed in the table below.
@@ -130,74 +119,76 @@ def makeInstallScript(modPath,
             If so, all patches are required to be the current folder, or,
             in the one that you specified as argument to this script.
 
-        """))
+        """
+            )
+        )
 
-        script.write('\n')
+        script.write("\n")
         writer.writeComment(versionTable(dependencies))
 
-        script.write('\n')
+        script.write("\n")
         writer.writePreamble(topFolderName)
 
         for dep in dependencies:
-            script.write('\n')
-            writer.writeMessageOutput('Installing {}'.format(dep['name']))
+            script.write("\n")
+            writer.writeMessageOutput("Installing {}".format(dep["name"]))
             writer.writeInstallation(dep)
 
         for dep in dependencies:
+
             def writePatch(patch, moduleName, description):
-                script.write('\n')
-                writer.writeMessageOutput(
-                    f'Applying patch for {description} in {moduleName}'
-                )
-                writer.writePatchApplication(dep['folder'], patch)
-
-            if dep['untracked'] is not None:
-                description = 'untracked files'
-                writePatch(dep['untracked'], description, dep['name'])
-            if dep['unpublished'] is not None:
-                description = 'unpublished commits'
-                writePatch(dep['unpublished'], description, dep['name'])
-            if dep['uncommitted'] is not None:
-                description = 'uncommitted changes'
-                writePatch(dep['uncommitted'], description, dep['name'])
-
-        script.write('\n')
-        writer.writeMessageOutput('Configuring project')
+                script.write("\n")
+                writer.writeMessageOutput(f"Applying patch for {description} in {moduleName}")
+                writer.writePatchApplication(dep["folder"], patch)
+
+            if dep["untracked"] is not None:
+                description = "untracked files"
+                writePatch(dep["untracked"], description, dep["name"])
+            if dep["unpublished"] is not None:
+                description = "unpublished commits"
+                writePatch(dep["unpublished"], description, dep["name"])
+            if dep["uncommitted"] is not None:
+                description = "uncommitted changes"
+                writePatch(dep["uncommitted"], description, dep["name"])
+
+        script.write("\n")
+        writer.writeMessageOutput("Configuring project")
         writer.writeConfiguration(optsFile)
 
 
 def printFoundDependencies(deps):
     if len(deps) > 0:
         infoText = ["Found the following dependencies"]
-        infoText.extend(
-            versionTable(
-                deps, {'name': 'module name', 'path': 'folder'}
-            ).split('\n')
-        )
+        infoText.extend(versionTable(deps, {"name": "module name", "path": "folder"}).split("\n"))
         printProgressInfo(infoText)
 
 
 def printFoundVersionInfo(dependenciesWithVersions):
     table = versionTable(dependenciesWithVersions)
     printProgressInfo(
-        ["The following (remotely available) versions are used as a basis",
-         "on top of which the required patches will be automatically created:",
-         "\n{}".format(table)]
+        [
+            "The following (remotely available) versions are used as a basis",
+            "on top of which the required patches will be automatically created:",
+            "\n{}".format(table),
+        ]
     )
 
 
-def printFinalMessage(scriptName,
-                      topFolderName=None):
+def printFinalMessage(scriptName, topFolderName=None):
 
     if topFolderName:
-        description = textwrap.dedent(f"""\
+        description = textwrap.dedent(
+            f"""\
             Running this script will create a folder `{topFolderName}`, clone all modules
             into it, configure the entire project and build the contained applications
-        """)
+        """
+        )
     else:
-        description = textwrap.dedent(f"""\
+        description = textwrap.dedent(
+            f"""\
             Running this script will clone all modules into the folder from which it is
             called, configure the entire project and build the contained applications
-        """)
+        """
+        )
 
-    printProgressInfo(['Info:', description])
+    printProgressInfo(["Info:", description])
diff --git a/bin/util/installscript_writer.py b/bin/util/installscript_writer.py
index 0eb133b344..72884693de 100644
--- a/bin/util/installscript_writer.py
+++ b/bin/util/installscript_writer.py
@@ -8,8 +8,12 @@ from util.common import addPrefixToLines, escapeCharacters
 
 
 def getRawString(text):
-    def makeRaw(text): return repr(text)
-    def removeEnclosingQuotes(text): return text[1:-1]
+    def makeRaw(text):
+        return repr(text)
+
+    def removeEnclosingQuotes(text):
+        return text[1:-1]
+
     return removeEnclosingQuotes(makeRaw(text))
 
 
@@ -54,17 +58,19 @@ class InstallScriptWriterBash(InstallScriptWriterInterface):
         super().__init__()
 
     def writeSheBang(self):
-        self.ostream.write('#!/bin/bash\n')
+        self.ostream.write("#!/bin/bash\n")
 
     def writeComment(self, comment):
-        comment = addPrefixToLines('#', comment)
+        comment = addPrefixToLines("#", comment)
         self.ostream.write(comment)
 
     def writeMessageOutput(self, message):
         self.ostream.write(f'echo "{message}"\n')
 
     def writePreamble(self, topFolderName=None):
-        self.ostream.write(textwrap.dedent("""\
+        self.ostream.write(
+            textwrap.dedent(
+                """\
 
             exitWithError()
             {
@@ -103,39 +109,44 @@ class InstallScriptWriterBash(InstallScriptWriterInterface):
                 popd
             }
 
-        """))
+        """
+            )
+        )
         top = topFolderName if topFolderName else "."
         self.ostream.write('TOP="{}"\n'.format(top))
-        self.ostream.write('mkdir -p $TOP\n')
-        self.ostream.write('cd $TOP\n')
+        self.ostream.write("mkdir -p $TOP\n")
+        self.ostream.write("cd $TOP\n")
 
     def writeInstallation(self, dependency):
-        self.ostream.write('installModule {} {} {} {}'
-                           .format(dependency['folder'],
-                                   dependency['remote'],
-                                   dependency['branch'],
-                                   dependency['revision']))
+        self.ostream.write(
+            "installModule {} {} {} {}".format(
+                dependency["folder"],
+                dependency["remote"],
+                dependency["branch"],
+                dependency["revision"],
+            )
+        )
 
     def writePatchApplication(self, folder, patchContent):
         def removeEscapedSingleQuotes(line):
             return line.replace(r"\'", "'")
 
         self.ostream.write('PATCH="\n')
-        for line in patchContent.rstrip('\n').split('\n'):
+        for line in patchContent.rstrip("\n").split("\n"):
             line = getRawString(line)
             line = removeEscapedSingleQuotes(line)
             line = escapeCharacters(line, ['"', "$", "`"])
             self.ostream.write(line)
-            self.ostream.write('\n')
+            self.ostream.write("\n")
         self.ostream.write('"\n')
         self.ostream.write(f'applyPatch {folder} "$PATCH"')
 
     def writeConfiguration(self, opts):
         self.ostream.write(
-            f'if ! ./dune-common/bin/dunecontrol --opts={opts} all; then\n'
+            f"if ! ./dune-common/bin/dunecontrol --opts={opts} all; then\n"
             '    echo "Configuration of the project failed"\n'
-            '    exit 1\n'
-            'fi\n'
+            "    exit 1\n"
+            "fi\n"
         )
 
 
@@ -144,10 +155,10 @@ class InstallScriptWriterPython(InstallScriptWriterInterface):
         super().__init__()
 
     def writeSheBang(self):
-        self.ostream.write('#!/usr/bin/env python3\n')
+        self.ostream.write("#!/usr/bin/env python3\n")
 
     def writeComment(self, comment):
-        comment = addPrefixToLines('#', comment)
+        comment = addPrefixToLines("#", comment)
         self.ostream.write(comment)
 
     def writeMessageOutput(self, message):
@@ -155,7 +166,9 @@ class InstallScriptWriterPython(InstallScriptWriterInterface):
 
     def writePreamble(self, topFolderName=None):
         top = topFolderName if topFolderName else "."
-        self.ostream.write(textwrap.dedent(f"""\
+        self.ostream.write(
+            textwrap.dedent(
+                f"""\
 
             import os
             import sys
@@ -194,21 +207,26 @@ class InstallScriptWriterPython(InstallScriptWriterInterface):
                     patchFile.write(patch)
                 runFromSubFolder(['git', 'apply', 'tmp.patch'], subFolder)
                 os.remove(patchPath)
-        """))
+        """
+            )
+        )
 
     def writeInstallation(self, dependency):
-        self.ostream.write('installModule("{}", "{}", "{}", "{}")\n'
-                           .format(dependency['folder'],
-                                   dependency['remote'],
-                                   dependency['branch'],
-                                   dependency['revision']))
+        self.ostream.write(
+            'installModule("{}", "{}", "{}", "{}")\n'.format(
+                dependency["folder"],
+                dependency["remote"],
+                dependency["branch"],
+                dependency["revision"],
+            )
+        )
 
     def writePatchApplication(self, folder, patchContent):
         self.ostream.write('patch = """\n')
-        for line in patchContent.rstrip('\n').split('\n'):
+        for line in patchContent.rstrip("\n").split("\n"):
             line = getRawString(line)
             self.ostream.write(escapeCharacters(line, ['"']))
-            self.ostream.write('\n')
+            self.ostream.write("\n")
         self.ostream.write('"""\n')
 
         self.ostream.write(f'applyPatch("{folder}", patch)\n')
diff --git a/bin/util/moduleinfo.py b/bin/util/moduleinfo.py
index ad7a0600eb..643924c149 100644
--- a/bin/util/moduleinfo.py
+++ b/bin/util/moduleinfo.py
@@ -5,9 +5,9 @@ from util.common import callFromPath
 
 def extractModuleInfos(moduleFile, keys):
     results = {}
-    with open(moduleFile, 'r') as modFile:
+    with open(moduleFile, "r") as modFile:
         for line in modFile.readlines():
-            line = line.strip('\n').split(':')
+            line = line.strip("\n").split(":")
             if line[0] in keys:
                 results[line[0]] = line[1].strip()
             if len(results) == len(keys):
@@ -23,7 +23,7 @@ def extractModuleInfos(moduleFile, keys):
 
 
 def getModuleFile(modulePath):
-    modFile = os.path.join(modulePath, 'dune.module')
+    modFile = os.path.join(modulePath, "dune.module")
     if not os.path.exists(modFile):
         raise RuntimeError("Could not find module file")
     return modFile
@@ -34,27 +34,26 @@ def getModuleInfo(modulePath, key):
 
 
 def getDependencies(modulePath, verbose=False, includeSelf=False):
-    modName = getModuleInfo(modulePath, 'Module')
-    parentPath = os.path.join(modulePath, '../')
-    duneControlPath = os.path.join(parentPath, 'dune-common/bin/dunecontrol')
+    modName = getModuleInfo(modulePath, "Module")
+    parentPath = os.path.join(modulePath, "../")
+    duneControlPath = os.path.join(parentPath, "dune-common/bin/dunecontrol")
     if not os.path.exists(duneControlPath):
         raise RuntimeError(
-            'Could not find dunecontrol, expected it to be in {}'
-            .format(duneControlPath)
+            "Could not find dunecontrol, expected it to be in {}".format(duneControlPath)
         )
 
     dcOutput = callFromPath(parentPath)(runCommand)(
-        './dune-common/bin/dunecontrol --module={}'.format(modName)
+        "./dune-common/bin/dunecontrol --module={}".format(modName)
     )
 
     if not dcOutput:
         raise RuntimeError("Error: call to dunecontrol failed.")
 
-    for line in dcOutput.split('\n'):
+    for line in dcOutput.split("\n"):
         if "going to build" in line:
-            line = line.replace('going to build', '').strip('-')
-            line = line.strip('\n').strip()
-            line = line.split(' ')
+            line = line.replace("going to build", "").strip("-")
+            line = line.strip("\n").strip()
+            line = line.split(" ")
             deps = line
 
     if not includeSelf:
@@ -68,20 +67,17 @@ def getDependencies(modulePath, verbose=False, includeSelf=False):
     parentFiles = [os.path.join(parentPath, d) for d in os.listdir(parentPath)]
     for path in filter(os.path.isdir, parentFiles):
         try:
-            depModName = getModuleInfo(path, 'Module')
+            depModName = getModuleInfo(path, "Module")
         except Exception:
             if verbose:
-                print(f" --- skipping folder '{path}' "
-                      "as it could not be identifed as dune module")
+                print(
+                    f" --- skipping folder '{path}' " "as it could not be identifed as dune module"
+                )
         else:
             if verbose:
                 print(" --- visited module '{}'".format(depModName))
             if depModName in deps:
-                result.append({
-                    'name': depModName,
-                    'folder': os.path.basename(path),
-                    'path': path
-                })
+                result.append({"name": depModName, "folder": os.path.basename(path), "path": path})
 
     if len(result) != len(deps):
         raise RuntimeError("Could not find the folders of all dependencies")
-- 
GitLab