From 24d0979471875330ba8217282ee9119516784adf Mon Sep 17 00:00:00 2001
From: farid <farid.mohammadi@iws.uni-stuttgart.de>
Date: Sat, 19 Jun 2021 09:13:50 +0200
Subject: [PATCH] [PA-A] add new model exe file and updated input files.

---
 BayesValidRox/tests/PA-A/model_exe.py         |  62 ++++--
 .../tests/PA-A/models/stokes/averaging.py     |  18 +-
 .../models/stokes/ffpm_stokes_p_final.csv     |   5 +
 .../stokes/ffpm_stokes_velocity_final.csv     |  20 ++
 .../PA-A/models/stokes/model_stokes_exe.py    |  95 +++++++++
 .../stokesdarcy/averagedPressureValues.csv    |   2 +
 .../stokesdarcy/averagedVelocityValues.csv    |  12 ++
 .../stokesdarcy/averaging_stokesdarcy.py      | 198 ++++++++++++++++++
 .../stokesdarcy/stokesdarcy_BJ_params.input   |   4 +-
 .../stokesdarcy/stokesdarcy_ER_params.input   |   6 +-
 .../models/stokespnm/averaging_stokespnm.py   | 148 +++++++++++++
 .../models/stokespnm/stokespnm_params.input   |  10 +-
 BayesValidRox/tests/PA-A/ref_stokesdarcy.py   |  24 +--
 BayesValidRox/tests/PA-A/ref_stokespnm.py     |  16 +-
 .../PA-A/stokesdarcy_BJ_params.tpl.input      |   4 +-
 .../PA-A/stokesdarcy_ER_params.tpl.input      |   4 +-
 .../tests/PA-A/stokespnm_params.tpl.input     |  10 +-
 .../tests/PA-A/util/postDist_visualization.py |   6 +-
 .../tests/PA-A/util/post_plot_PAPER.py        |  10 +-
 19 files changed, 588 insertions(+), 66 deletions(-)
 create mode 100644 BayesValidRox/tests/PA-A/models/stokes/ffpm_stokes_p_final.csv
 create mode 100644 BayesValidRox/tests/PA-A/models/stokes/ffpm_stokes_velocity_final.csv
 create mode 100755 BayesValidRox/tests/PA-A/models/stokes/model_stokes_exe.py
 create mode 100644 BayesValidRox/tests/PA-A/models/stokesdarcy/averagedPressureValues.csv
 create mode 100644 BayesValidRox/tests/PA-A/models/stokesdarcy/averagedVelocityValues.csv
 create mode 100755 BayesValidRox/tests/PA-A/models/stokesdarcy/averaging_stokesdarcy.py
 create mode 100755 BayesValidRox/tests/PA-A/models/stokespnm/averaging_stokespnm.py

diff --git a/BayesValidRox/tests/PA-A/model_exe.py b/BayesValidRox/tests/PA-A/model_exe.py
index c67e3ead7..74bbca5ee 100755
--- a/BayesValidRox/tests/PA-A/model_exe.py
+++ b/BayesValidRox/tests/PA-A/model_exe.py
@@ -18,11 +18,16 @@ parser = argparse.ArgumentParser(
 )
 parser.add_argument('model', help="model name")
 parser.add_argument('file', help="input file")
+parser.add_argument('-a', '--averaging', default=True, help='Uses averaging scripts to average quantities in the porous medium. True = averaging. False = no averaging')
 args = vars(parser.parse_args())
 
 # Specification
 model = args['model']
 inputFile = args['file']
+averaging = args['averaging']
+
+velocity_points = "../models/velocity_points.csv"
+pressure_points = "../models/pressure_points.csv"
 
 # Run the model
 if model == 'stokesdarcy':
@@ -30,18 +35,21 @@ if model == 'stokesdarcy':
     model_dir = 'stokesdarcy'
     pvdfile1 = 'regular_2D_stokesdarcy_250_micron_stokes'
     pvdfile2 = 'regular_2D_stokesdarcy_250_micron_darcy'
-    velocity_points = "../models/velocity_points.csv"
-    pressure_points = "../models/pressure_points.csv"
+    darcyfile = 'regular_2D_stokesdarcy_250_micron_darcy-00001.vtu'
+    stokesfile = 'regular_2D_stokesdarcy_250_micron_stokes-00001.vtu'
 
 elif model == 'stokespnm':
     model_exe = 'stokes_pnm_regular_2d'
     model_dir = 'stokespnm'
     pvdfile1 = 'regular_2D_stokespnm_250_micron_stokes'
     pvdfile2 = 'regular_2D_stokespnm_250_micron_pnm'
-    velocity_points = "../models/velocity_points_pnm.csv"
-    pressure_points = "../models/pressure_points_pnm.csv"
-    
-    
+    darcyfile = 'regular_2D_stokespnm_250_micron_pnm-00001.vtp'
+    stokesfile = 'regular_2D_stokespnm_250_micron_stokes-00001.vtu'
+    if not averaging:
+        velocity_points = "../models/velocity_points_pnm.csv"
+        pressure_points = "../models/pressure_points_pnm.csv"
+
+
 else:
     raise Exception('This model does not exist. \
                     Please verify the model name.')
@@ -66,22 +74,52 @@ Command = "pvpython ../extractpointdataovertime.py \
 
 Process2 = os.system(Command + velocity_points)
 if Process2 != 0:
-    print('\nMessage 1:')
+    print('\nMessage 2:')
     print('\tIf value of \'%d\' is a non-zero value, then compilation problems \n' % Process2)
 
 Process3 = os.system(Command + pressure_points + ' -var p')
 if Process3 != 0:
-    print('\nMessage 1:')
+    print('\nMessage 3:')
     print('\tIf value of \'%d\' is a non-zero value, then compilation problems \n' % Process3)
 
+# Averaging
+if averaging:
+    if model == 'stokesdarcy':
+        Command_avg = "./../models/"+model_dir+"/averaging_stokesdarcy.py \
+            -fd "+darcyfile+" -fs "+stokesfile+ " -p "
+    elif model == 'stokespnm':
+        Command_avg = "./../models/"+model_dir+"/averaging_stokespnm.py \
+            -fd "+darcyfile+" -fs "+stokesfile+ " -p "
+
+    Process4 = os.system(Command_avg + velocity_points + ' ' + pressure_points)
+    if Process4 != 0:
+        print('\nMessage 4:')
+        print('\tIf value of \'%d\' is a non-zero value, then compilation problems \n' % Process4)
+
+# Combine csv files and remove them
+pointVel = np.genfromtxt('ffpm_{}_velocity.csv'.format(model), delimiter=',')
+pointP = np.genfromtxt('ffpm_{}_p.csv'.format(model), delimiter=',')
+if averaging:
+    averagedVel = np.genfromtxt('averagedVelocityValues.csv', delimiter=',')
+    averagedP = np.genfromtxt('averagedPressureValues.csv', delimiter=',')
+    pointVel[:12] = averagedVel[:12] # 12: number of the points in pm
+    pointP[:2] = averagedP[:2] # 2: number of the points in pm
+# Save to csv files
+np.savetxt('ffpm_{}_velocity_final.csv'.format(model), pointVel, delimiter=',')
+np.savetxt('ffpm_{}_p_final.csv'.format(model), pointP, delimiter=',')
+
+
 # Zip auxillary files
-keys = ['vtu', 'vtp', 'pvd']
+keys = ['vtu', 'vtp', 'pvd', '.csv']
 filePaths = [path for path in os.listdir('.') for key in keys if key in path]
 
 zip_file = zipfile.ZipFile('outFiles.zip', 'w')
 with zip_file:
     # writing each file one by one
-    for file in filePaths:
-      zip_file.write(file)
+    for path in filePaths:
+        if 'final' not in path:
+            zip_file.write(path)
 
-for path in filePaths: os.remove(path)
+for path in filePaths:
+    if 'final' not in path:
+        os.remove(path)
diff --git a/BayesValidRox/tests/PA-A/models/stokes/averaging.py b/BayesValidRox/tests/PA-A/models/stokes/averaging.py
index 9c0208361..3d77a0950 100755
--- a/BayesValidRox/tests/PA-A/models/stokes/averaging.py
+++ b/BayesValidRox/tests/PA-A/models/stokes/averaging.py
@@ -69,7 +69,10 @@ class ControlVolume:
         self.area = np.prod(cvSize)
 
     def mean(self):
-        return self.sum/self.cellCounterShouldBe
+        if self.type==CVType.VELOCITY:
+            return self.sum/self.cellCounterShouldBe
+        else:
+            return self.sum/self.cellCounter
 
     def pointIsInside(self, point):
         inside = np.abs(self.midpoint-point)<0.5*self.size
@@ -127,12 +130,13 @@ ControlVolumes = []
 for pPoint in pressurePoints:
     ControlVolumes.append(ControlVolume(pPoint,CVType.PRESSURE,cvSize,dim))
 for velPoint in velocityPoints:
-    if velPoint[1] < 0.005 and velPoint[1] + cvSize[1] > 0.005:
-        ySize = cvSize[1] - (velPoint[1] + 0.5*cvSize[1] - 0.005)
-        yCoord = velPoint[1] - 0.5*cvSize[1] + 0.5*ySize
-        ControlVolumes.append(ControlVolume(np.array([velPoint[0], yCoord, velPoint[2]]), CVType.VELOCITY, np.array([cvSize[0], ySize, cvSize[2]]), dim))
-    else:
-        ControlVolumes.append(ControlVolume(velPoint, CVType.VELOCITY, cvSize, dim))
+    ControlVolumes.append(ControlVolume(velPoint, CVType.VELOCITY, cvSize, dim))
+    #if velPoint[1] < 0.005 and velPoint[1] + cvSize[1] > 0.005:
+    #    ySize = cvSize[1] - (velPoint[1] + 0.5*cvSize[1] - 0.005)
+    #    yCoord = velPoint[1] - 0.5*cvSize[1] + 0.5*ySize
+    #    ControlVolumes.append(ControlVolume(np.array([velPoint[0], yCoord, velPoint[2]]), CVType.VELOCITY, np.array([cvSize[0], ySize, cvSize[2]]), dim))
+    #else:
+    #    ControlVolumes.append(ControlVolume(velPoint, CVType.VELOCITY, cvSize, dim))
 ControlVolumes = [cv for cv in ControlVolumes if isValidCV(cv)]# filter cvs
 for cv in ControlVolumes:# account for zero in inlets:
     cv.calcCellCounterShouldBe(meshWidth)
diff --git a/BayesValidRox/tests/PA-A/models/stokes/ffpm_stokes_p_final.csv b/BayesValidRox/tests/PA-A/models/stokes/ffpm_stokes_p_final.csv
new file mode 100644
index 000000000..43e99d2d8
--- /dev/null
+++ b/BayesValidRox/tests/PA-A/models/stokes/ffpm_stokes_p_final.csv
@@ -0,0 +1,5 @@
+1.160392835736274719e-01
+1.053493395447731018e-01
+1.362950056791305542e-01
+4.141930118203163147e-02
+1.882909983396530151e-01
diff --git a/BayesValidRox/tests/PA-A/models/stokes/ffpm_stokes_velocity_final.csv b/BayesValidRox/tests/PA-A/models/stokes/ffpm_stokes_velocity_final.csv
new file mode 100644
index 000000000..6aa3385b9
--- /dev/null
+++ b/BayesValidRox/tests/PA-A/models/stokes/ffpm_stokes_velocity_final.csv
@@ -0,0 +1,20 @@
+3.165179471594783359e-05,2.214644077070730102e-05,0.000000000000000000e+00
+1.540285314504784563e-05,-2.068315743566863287e-05,0.000000000000000000e+00
+3.362981353987131946e-05,-1.417179408042436447e-05,0.000000000000000000e+00
+4.025765203667575531e-05,2.817641047427767159e-06,0.000000000000000000e+00
+5.127534863766571992e-05,3.041133790215316151e-06,0.000000000000000000e+00
+4.439944152883686129e-05,3.091425627168941205e-05,0.000000000000000000e+00
+1.381734849442392451e-05,-3.096480086510291666e-05,0.000000000000000000e+00
+4.274039184194417353e-05,-2.309773982861605300e-05,0.000000000000000000e+00
+4.457191560288683392e-06,-3.349336158221328033e-05,0.000000000000000000e+00
+3.002016947657205892e-05,-2.721945364359175130e-05,0.000000000000000000e+00
+3.381359079979119261e-05,2.268143035819367029e-06,0.000000000000000000e+00
+3.237415196805936454e-05,2.742005139280608581e-05,0.000000000000000000e+00
+7.822449697414413095e-05,-1.156200014520436525e-04,0.000000000000000000e+00
+1.610659994184970856e-03,-1.076069966075010598e-04,0.000000000000000000e+00
+1.731469994410872459e-03,1.792079956430825405e-06,0.000000000000000000e+00
+1.767599955201148987e-03,4.475340028875507414e-05,0.000000000000000000e+00
+2.389380009844899178e-03,-3.657320048660039902e-04,0.000000000000000000e+00
+2.568129915744066238e-03,1.096409960155142471e-06,0.000000000000000000e+00
+2.620619954541325569e-03,1.194299966300604865e-05,0.000000000000000000e+00
+1.138989973696880043e-04,-3.639819915406405926e-04,0.000000000000000000e+00
diff --git a/BayesValidRox/tests/PA-A/models/stokes/model_stokes_exe.py b/BayesValidRox/tests/PA-A/models/stokes/model_stokes_exe.py
new file mode 100755
index 000000000..fb3ad600c
--- /dev/null
+++ b/BayesValidRox/tests/PA-A/models/stokes/model_stokes_exe.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Jul 19 16:48:59 2019
+
+@author: farid
+"""
+import numpy as np
+import os, sys
+import zipfile
+import argparse
+
+# parse arguments
+parser = argparse.ArgumentParser(
+  prog='\033[1m\033[94m' + 'python3.7' + '\033[0m' + ' ' + sys.argv[0],
+  description='Run Stokes-Darcy model and extract outputs.'
+)
+parser.add_argument('model', help="model name")
+parser.add_argument('file', help="input file")
+parser.add_argument('-a', '--averaging', default=True, help='Uses averaging scripts to average quantities in the porous medium. True = averaging. False = no averaging')
+args = vars(parser.parse_args())
+
+# Specification
+model = args['model']
+inputFile = args['file']
+averaging = args['averaging']
+
+velocity_points = "../../models/velocity_points.csv"
+pressure_points = "../../models/pressure_points.csv"
+
+# Run the model
+model_exe = 'stokes_regular_2d'
+pvdfile1 = 'regular_2D_stokes_250_micron'
+vtufile = 'regular_2D_stokes_250_micron-00001.vtu'
+
+
+NewCommand = model_exe + ' ' + inputFile
+
+Process1 = os.system('timeout 1000 ./%s > log.out 2>&1' %NewCommand)
+
+if Process1 != 0:
+    print('\nMessage 1:')
+    print('\tIf value of \'%d\' is a non-zero value, then compilation problems \n' % Process1)
+
+# Extract velocity and pressure values
+Command = "pvpython ../../extractpointdataovertime.py \
+    -f "+pvdfile1+".pvd -p"
+
+Process2 = os.system(Command + velocity_points)
+if Process2 != 0:
+    print('\nMessage 2:')
+    print('\tIf value of \'%d\' is a non-zero value, then compilation problems \n' % Process2)
+
+Process3 = os.system(Command + pressure_points + ' -var p')
+if Process3 != 0:
+    print('\nMessage 3:')
+    print('\tIf value of \'%d\' is a non-zero value, then compilation problems \n' % Process3)
+
+# Averaging
+if averaging:
+    Command_avg = "./averaging.py \
+        -f "+vtufile + " -p "
+
+    Process4 = os.system(Command_avg + velocity_points + ' ' + pressure_points)
+    if Process4 != 0:
+        print('\nMessage 4:')
+        print('\tIf value of \'%d\' is a non-zero value, then compilation problems \n' % Process3)
+
+# Combine csv files and remove them
+pointVel = np.genfromtxt('ffpm_{}_velocity.csv'.format(model), delimiter=',')
+pointP = np.genfromtxt('ffpm_{}_p.csv'.format(model), delimiter=',')
+if averaging:
+    averagedVel = np.genfromtxt('averagedVelocityValues.csv', delimiter=',')
+    averagedP = np.genfromtxt('averagedPressureValues.csv', delimiter=',')
+    pointVel[:12] = averagedVel[:12] # 12: number of the points in pm
+    pointP[:2] = averagedP[:2] # 2: number of the points in pm
+# Save to csv files
+np.savetxt('ffpm_{}_velocity_final.csv'.format(model), pointVel, delimiter=',')
+np.savetxt('ffpm_{}_p_final.csv'.format(model), pointP, delimiter=',')
+
+
+# Zip auxillary files
+keys = ['vtu', 'vtp', 'pvd', '.csv']
+filePaths = [path for path in os.listdir('.') for key in keys if key in path]
+
+zip_file = zipfile.ZipFile('outFiles.zip', 'w')
+with zip_file:
+    # writing each file one by one
+    for path in filePaths:
+        if 'final' not in path:
+            zip_file.write(path)
+
+for path in filePaths:
+    if 'final' not in path:
+        os.remove(path)
diff --git a/BayesValidRox/tests/PA-A/models/stokesdarcy/averagedPressureValues.csv b/BayesValidRox/tests/PA-A/models/stokesdarcy/averagedPressureValues.csv
new file mode 100644
index 000000000..8c239b4ae
--- /dev/null
+++ b/BayesValidRox/tests/PA-A/models/stokesdarcy/averagedPressureValues.csv
@@ -0,0 +1,2 @@
+1.420461684465408325e-01
+1.290845423936843872e-01
diff --git a/BayesValidRox/tests/PA-A/models/stokesdarcy/averagedVelocityValues.csv b/BayesValidRox/tests/PA-A/models/stokesdarcy/averagedVelocityValues.csv
new file mode 100644
index 000000000..8d41e75ee
--- /dev/null
+++ b/BayesValidRox/tests/PA-A/models/stokesdarcy/averagedVelocityValues.csv
@@ -0,0 +1,12 @@
+2.850944005849953555e-05,2.022558201097979236e-05,0.000000000000000000e+00
+1.350654755327695966e-05,-1.858842417590267360e-05,0.000000000000000000e+00
+3.029674551591199361e-05,-1.284843263960055992e-05,0.000000000000000000e+00
+3.639810284994382528e-05,2.540879547846541797e-06,0.000000000000000000e+00
+4.631782223540881205e-05,2.777586454888263633e-06,0.000000000000000000e+00
+4.053034709627922894e-05,2.815173673198501092e-05,0.000000000000000000e+00
+1.162640863316966033e-05,-2.803774872361941561e-05,0.000000000000000000e+00
+3.879129756612087476e-05,-2.112816746091539244e-05,0.000000000000000000e+00
+1.542961680963284704e-05,-4.468019681169721480e-05,0.000000000000000000e+00
+2.851222695561261844e-04,-3.746420108541315130e-05,0.000000000000000000e+00
+3.056353254677901532e-04,2.821594478993015576e-06,0.000000000000000000e+00
+3.103969673610151858e-04,3.174190650234720579e-05,0.000000000000000000e+00
diff --git a/BayesValidRox/tests/PA-A/models/stokesdarcy/averaging_stokesdarcy.py b/BayesValidRox/tests/PA-A/models/stokesdarcy/averaging_stokesdarcy.py
new file mode 100755
index 000000000..226706f14
--- /dev/null
+++ b/BayesValidRox/tests/PA-A/models/stokesdarcy/averaging_stokesdarcy.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Idea:
+- Get grid data from vtu file
+- Get velocity and pressure data from vtu file
+- averaging: loop over all grid cells
+    - determine to which CV the cell belongs
+    - add variable (p or vel) value to the local mean value
+    - calculate expected number of grid cells in each cv, cells occupied by
+    the inclusions are not included in the grid => account for this when calculating mean
+
+Assumptions:
+ grid unfirom => determine mesh width from first cell in grid
+ all control volumes (CVs) have the same shape and size => function midpointInCV very simple
+ CVs dont overlap => allows early loop exit, see code
+ midpoints of CVs are inside grid cells => unused
+
+ author: Lars Kaiser / adapted to Stokes-Darcy models by Martin Schneider
+ email: lars.kaiser@mathematik.uni-stuttgart.de
+"""
+from enum import IntEnum
+import argparse
+import sys
+import numpy as np
+import meshio
+
+# parse arguments
+parser = argparse.ArgumentParser(
+  prog='\033[1m\033[94m' + 'python' + '\033[0m' + ' ' + sys.argv[0],
+  description='Averaging the outputs using control volumes.'
+)
+parser.add_argument('-fd', '--fileDarcy', nargs='+', required=True, help="darcy vtu file to be processed")
+parser.add_argument('-fs', '--fileStokes', nargs='+', required=True, help="stokes vtu file to be processed")
+parser.add_argument('-c', '--cvsize', nargs="+", default=[0.0005,0.0005,0.0005], help="size of the cv along the y-axis")
+parser.add_argument('-yif', '--yinterface', nargs=1, default=0.005, help="y-coordinate of interface location")
+parser.add_argument('-o', '--outputDirectory', default='', help="Directory to which the .csv files are written")
+parser.add_argument('-of', '--outFile', nargs="+", default=['averagedVelocityValues','averagedPressureValues'], help="Basename of the written csv file")
+parser.add_argument('-p', '--points', type=str, nargs=2, required=True, help=' List of coordinates of the probed point (in 3D)')
+parser.add_argument('-v', '--verbosity', default=False, help='Verbosity of the output. True = print progress. False = print data columns')
+args = vars(parser.parse_args())
+
+class CVType(IntEnum):
+    PRESSURE = 0
+    VELOCITY = 2
+
+class ControlVolume:
+    def __init__(self, midpoint, type, size, dim=2):
+        self.midpoint = midpoint
+        self.type = type
+        self.size = size
+        self.dim = dim
+        if type==CVType.PRESSURE:
+            self.sum = 0.0
+        else:
+            self.sum = np.array([0.,0.,0.])
+
+        self.cellIndexList = []
+        self.cellCounter = 0
+        self.cellCounterShouldBe = 0
+
+        self.calcArea()
+
+    def addCell(self, cellIndex, value):
+        self.cellIndexList.append(cellIndex)
+        self.cellCounter += 1
+        self.sum += value
+
+    def calcArea(self):
+        self.area = np.prod(cvSize)
+
+    def mean(self):
+        #assertion might fail because discretisation length might not be related to the REV size
+        #assert self.cellCounterShouldBe == self.cellCounter, "Not enough cells in REV for Stokes-Darcy models"
+        return self.sum/self.cellCounter
+
+    def pointIsInside(self, point):
+        inside = np.abs(self.midpoint-point)<0.5*self.size
+        return all(inside)
+
+    def calcCellCounterShouldBe(self,meshWidth):
+        self.cellCounterShouldBe=round(self.area/np.prod(meshWidth))
+
+    def getCVTypeArray(self):
+        return np.array([self.type.value]*self.cellCounter)
+
+    def getCVMeanArray(self,type):
+        if type==self.type:
+            return np.array([self.mean()]*self.cellCounter)
+        else:
+            return np.array( [(np.array([-1,-1,-1]) if type==CVType.VELOCITY else [-1])] *self.cellCounter)
+
+    def getCellIndices(self):
+        return self.cellIndexList 
+
+## User Parameters
+vtuFilenameDarcy=args['fileDarcy'][0]
+vtuFilenameStokes=args['fileStokes'][0]
+verbose = args['verbosity']
+yif = args['yinterface']
+dim=2
+def isValidCV(controlVolume):# funCellTypesion to filter for valid control volumes
+    return controlVolume.midpoint[1]<yif # ignore points with y>0.005
+
+## Implementation
+# Control volumes
+cvSize=np.array(args['cvsize'])
+if dim!=3: cvSize[2]=1.0
+
+# Grid
+meshDarcy = meshio.read(vtuFilenameDarcy)
+pointsDarcy = meshDarcy.points
+cellsDarcy = meshDarcy.cells[0][1]
+pDarcy = meshDarcy.cell_data["cellP"][0]
+velocityDarcy = meshDarcy.cell_data["cellVelocity"][0]
+
+meshStokes = meshio.read(vtuFilenameStokes)
+pointsStokes = meshStokes.points
+cellsStokes = meshStokes.cells[0][1]
+cellsStokes += len(pointsDarcy)
+pStokes = meshStokes.cell_data["p"][0]
+velocityStokes = meshStokes.cell_data["velocity_liq (m/s)"][0]
+
+points = np.concatenate([pointsDarcy,pointsStokes])
+cells = np.concatenate([cellsDarcy,cellsStokes])
+p = np.concatenate([pDarcy,pStokes])
+velocity =np.concatenate([velocityDarcy,velocityStokes])
+
+# Mesh width
+representationCell = cells[0]
+distances=np.diff(points[representationCell,:], axis=0)#calc 3 of the 4 distances along the "axis" in the first cell
+meshWidth=np.amax(np.abs(distances),axis=0)
+if dim==2: meshWidth[2]=1.0
+
+# Print grid and data information
+if verbose:
+    griddescription = "number of grid points: {}, number of cells: {}, cell size: {}x{}x{}".format(len(points), len(cells), meshWidth[0],meshWidth[1],meshWidth[2])
+    print("grid description: ", griddescription)
+    datadescription = "pressure data points: {}, velocity data points {}".format(len(p), len(velocity))
+    print("data description: ", datadescription)
+    print()
+
+# Init ControlVolumes (cvs)
+pressurePoints= np.genfromtxt(args['points'][1], delimiter=',',skip_header=1, usecols=[1,2,3],dtype=float)
+velocityPoints= np.genfromtxt(args['points'][0], delimiter=',',skip_header=1, usecols=[1,2,3],dtype=float)
+ControlVolumes = []
+for pPoint in pressurePoints:
+    ControlVolumes.append(ControlVolume(pPoint,CVType.PRESSURE,cvSize,dim))
+for velPoint in velocityPoints:
+    ControlVolumes.append(ControlVolume(velPoint, CVType.VELOCITY, cvSize, dim))
+ControlVolumes = [cv for cv in ControlVolumes if isValidCV(cv)]# filter cvs
+for cv in ControlVolumes:# account for zero in inlets:
+    cv.calcCellCounterShouldBe(meshWidth)
+
+# Averaging: Go through all cells in the grid and calculate average
+def midpoint(cell):
+    return np.mean(points[cell],axis=0)
+def applyToControlVolume(cellIndex):
+    cellMidpoint = midpoint(cells[cellIndex])
+    for cv in ControlVolumes:
+        if(cv.pointIsInside(cellMidpoint)):
+            cv.addCell(cellIndex, p[cellIndex] if cv.type==CVType.PRESSURE else velocity[cellIndex])
+            return #Assumption: each grid cell only contained in one control volume (cv)
+
+for cellIndex in range(0,len(cells)):
+    applyToControlVolume(cellIndex)
+
+# export to vtu
+velocityAverages=np.array([cv.mean() for cv in ControlVolumes if cv.type==CVType.VELOCITY])
+pressureAverages=np.array([cv.mean() for cv in ControlVolumes if cv.type==CVType.PRESSURE])
+
+# export
+outDir = args['outputDirectory']
+np.savetxt(outDir+args['outFile'][0]+".csv", velocityAverages, delimiter=",")
+np.savetxt(outDir+args['outFile'][1]+".csv", pressureAverages, delimiter=",")
+
+if verbose:
+    allCellsIndexList = []
+    allCellTypes = []
+    allVelocities = []
+    allPressures = []
+    for cv in ControlVolumes:
+        allCellsIndexList.extend(cv.cellIndexList)
+        allCellTypes.extend(cv.getCVTypeArray())
+        allVelocities.extend(velocity[cv.getCellIndices()])
+        allPressures.extend(p[cv.getCellIndices()])
+
+    mesh = meshio.Mesh(
+        points,
+        [("quad", cells[allCellsIndexList,:])],
+        # Each item in cell data must match the cells array
+        cell_data={"type": [allCellTypes], "pressure": [allPressures], "velocity": [allVelocities]}
+    )
+    mesh.write(
+        "ControlVolumes.vtu",
+        file_format="vtu",
+        binary=False #Change to True for smaller/ faster files/processing
+    )
diff --git a/BayesValidRox/tests/PA-A/models/stokesdarcy/stokesdarcy_BJ_params.input b/BayesValidRox/tests/PA-A/models/stokesdarcy/stokesdarcy_BJ_params.input
index 894205a27..f98386917 100644
--- a/BayesValidRox/tests/PA-A/models/stokesdarcy/stokesdarcy_BJ_params.input
+++ b/BayesValidRox/tests/PA-A/models/stokesdarcy/stokesdarcy_BJ_params.input
@@ -1,8 +1,8 @@
 [Grid]
 Positions0 = -125e-6 0.010125
 Positions1 = 0 0.6e-2
-Cells0 = 656 #1025
-Cells1 = 384 #600
+Cells0 = 1312 #656
+Cells1 = 768 #384
 
 [Stokes.Grid]
 InletLength = 0.0
diff --git a/BayesValidRox/tests/PA-A/models/stokesdarcy/stokesdarcy_ER_params.input b/BayesValidRox/tests/PA-A/models/stokesdarcy/stokesdarcy_ER_params.input
index 8ae0a048a..952b0ec4f 100644
--- a/BayesValidRox/tests/PA-A/models/stokesdarcy/stokesdarcy_ER_params.input
+++ b/BayesValidRox/tests/PA-A/models/stokesdarcy/stokesdarcy_ER_params.input
@@ -1,8 +1,8 @@
 [Grid]
 Positions0 = -125e-6 0.010125
 Positions1 = 0 0.6e-2
-Cells0 = 656 #1025
-Cells1 = 384 #600
+Cells0 = 1312 #656
+Cells1 = 768 #384
 
 [Stokes.Grid]
 InletLength = 0.0
@@ -26,7 +26,7 @@ Pressure = 0.0
 
 [Darcy.SpatialParams]
 Permeability = 3.25e-9 # m^2
-AlphaBeaversJoseph = 2.0
+AlphaBeaversJoseph = 1.0
 
 [Darcy.InterfaceParams]
 EpsInterface = 5.0e-4
diff --git a/BayesValidRox/tests/PA-A/models/stokespnm/averaging_stokespnm.py b/BayesValidRox/tests/PA-A/models/stokespnm/averaging_stokespnm.py
new file mode 100755
index 000000000..1e1b8078f
--- /dev/null
+++ b/BayesValidRox/tests/PA-A/models/stokespnm/averaging_stokespnm.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+import argparse
+import sys
+import numpy as np
+from vtk import vtkXMLPolyDataReader
+from vtk.util.numpy_support import vtk_to_numpy
+import meshio
+
+# parse arguments
+parser = argparse.ArgumentParser(
+  prog='\033[1m\033[94m' + 'python' + '\033[0m' + ' ' + sys.argv[0],
+  description='Averaging the outputs using control volumes.'
+)
+parser.add_argument('-fd', '--fileDarcy', nargs='+', required=True, help="darcy vtu file to be processed")
+parser.add_argument('-fs', '--fileStokes', nargs='+', required=True, help="stokes vtu file to be processed")
+parser.add_argument('-c', '--cvsize', nargs="+", default=[0.0005,0.0005,0.0005], help="size of the cv along the y-axis")
+parser.add_argument('-o', '--outputDirectory', default='', help="Directory to which the .csv files are written")
+parser.add_argument('-of', '--outFile', nargs="+", default=['averagedVelocityValues','averagedPressureValues'], help="Basename of the written csv file")
+parser.add_argument('-p', '--points', type=str, nargs=2, required=True, help=' List of coordinates of the probed point (in 3D)')
+parser.add_argument('-v', '--verbosity', default=False, help='Verbosity of the output. True = print progress. False = print data columns')
+args = vars(parser.parse_args())
+
+## User Parameters
+vtuFilenameDarcy=args['fileDarcy'][0]
+vtuFilenameStokes=args['fileStokes'][0]
+verbose = args['verbosity']
+dim=2
+
+## Implementation
+# Control volumes
+cvSize=np.array(args['cvsize'])
+if dim!=3: cvSize[2]=1.0
+
+# Read vtp data of PNM
+reader = vtkXMLPolyDataReader()
+reader.SetFileName(vtuFilenameDarcy)
+reader.Update()
+polydata = reader.GetOutput()
+
+pointsDarcy = vtk_to_numpy(polydata.GetPoints().GetData())
+
+# Get 1d cells of PNM
+cellsDarcy = vtk_to_numpy(polydata.GetLines().GetData())
+cellsDarcy = np.reshape(cellsDarcy, (polydata.GetNumberOfLines(), 3))
+cellsDarcy = cellsDarcy[:,1:3]
+
+cellData = polydata.GetCellData()
+pointData = polydata.GetPointData()
+pDarcy = vtk_to_numpy(pointData.GetArray("p"))
+pDarcy = np.reshape(pDarcy, (len(pointsDarcy),1))
+velocityDarcy = vtk_to_numpy(cellData.GetArray("velocity_liq (m/s)"))
+throatLength = vtk_to_numpy(cellData.GetArray("throatLength"))
+refThroatLength = throatLength[0] # we assume the same throat Length
+cellVolumeDarcy = refThroatLength * refThroatLength # and a uniform grain distribution
+
+meshStokes= meshio.read(vtuFilenameStokes)
+pointsStokes = meshStokes.points
+pointsStokes[:,1] -= 0.5*refThroatLength
+cellsStokes = meshStokes.cells[0][1]
+pStokes = meshStokes.cell_data["p"][0]
+velocityStokes = meshStokes.cell_data["velocity_liq (m/s)"][0]
+
+# Mesh width
+representationCell = cellsStokes[0]
+distances = np.diff(pointsStokes[representationCell,:], axis=0)#calc 3 of the 4 distances along the "axis" in the first cell
+meshWidthStokes = np.amax(np.abs(distances),axis=0)
+cellVolumeStokes = round(np.product(meshWidthStokes[0:dim]),14)
+
+# Calculate averages for velocities and pressures
+pressurePoints = np.genfromtxt(args['points'][1], delimiter=',',skip_header=1, usecols=[1,2,3],dtype=float)
+velocityPoints = np.genfromtxt(args['points'][0], delimiter=',',skip_header=1, usecols=[1,2,3],dtype=float)
+pressurePoints = pressurePoints[np.where(pressurePoints[:,1]< 0.005)]
+velocityPoints = velocityPoints[np.where(velocityPoints[:,1]< 0.005)]
+
+velocityAverages = np.zeros(np.shape(velocityPoints))
+pressureAverages = np.zeros((len(pressurePoints),1))
+#totalVolume = np.zeros((len(velocityAverages),1))
+
+# Averaging: Go through all cells in the grid and calculate average
+def midpoint(cellPoints):
+    return np.mean(cellPoints,axis=0)
+
+def isYThroat(cellPoints):
+    assert len(cellPoints)==2, "PNM only allows 1d lines"
+    dirVector = np.abs(cellPoints[1,:] - cellPoints[0,:])
+    if dirVector[1] > 1e-15:
+        return True
+    else:
+        return False
+    
+def checkVelocityPoints(cellPoints):
+    cellMidpoint = midpoint(cellPoints)
+    for pvIdx in range(0,len(velocityPoints)):
+        if( all(np.abs(cellMidpoint-velocityPoints[pvIdx]) < 0.51*cvSize) ):
+            return True, pvIdx  #Assumption: each grid cell only contained in one control volume (cv)
+    return False, -1
+
+def checkVelocityPointsStokes(cellPoints):
+    cellMidpoint = midpoint(cellPoints)
+    for pvIdx in range(0,len(velocityPoints)):
+        if( all(np.abs(cellMidpoint-velocityPoints[pvIdx]) < 0.5*cvSize) ):
+            return True, pvIdx  #Assumption: each grid cell only contained in one control volume (cv)
+    return False, -1
+
+#def checkPressurePoints(cellPoints):
+#    cellMidpoint = midpoint(cellPoints)
+#    for pvIdx in range(0,len(pressurePoints)):
+#        if( all(np.abs(cellMidpoint-pressurePoints[pvIdx]) < 0.001*cvSize) ):
+#            return True, pvIdx  #Assumption: each grid cell only contained in one control volume (cv)
+#    return False, -1  
+
+def checkPressurePointsDarcy(point):
+    for pvIdx in range(0,len(pressurePoints)):
+        if( all(np.abs(point-pressurePoints[pvIdx]) < 0.001*cvSize) ):
+            return True, pvIdx  #Assumption: each grid cell only contained in one control volume (cv)
+    return False, -1    
+
+revVolume = np.product(cvSize)
+
+for cellIdx in range(0,len(cellsDarcy)):
+    cellPoints = pointsDarcy[cellsDarcy[cellIdx]]
+    valid, idx = checkVelocityPoints(cellPoints)
+    if valid:
+        if isYThroat(cellPoints):
+            factor = 1.5 if (midpoint(cellPoints)[1] + 0.5*refThroatLength > 0.00499) else 2 
+            velocityAverages[idx] += factor*cellVolumeDarcy*velocityDarcy[cellIdx]
+        else:
+            velocityAverages[idx] += 0.5*cellVolumeDarcy*velocityDarcy[cellIdx]
+
+for pIdx in range(0,len(pointsDarcy)):
+    valid, idx = checkPressurePointsDarcy(pointsDarcy[pIdx])
+    if valid:
+        pressureAverages[idx] = pDarcy[pIdx]
+    
+# We assume that there are no contributions to the average pressures from the Stokes domain    
+for cellIdx in range(0,len(cellsStokes)):
+    valid, idx = checkVelocityPointsStokes(pointsStokes[cellsStokes[cellIdx]])
+    if valid:
+        velocityAverages[idx] += cellVolumeStokes*velocityStokes[cellIdx]
+
+# Divide by REV volume.
+velocityAverages /= revVolume
+        
+# export
+outDir = args['outputDirectory']
+np.savetxt(outDir+args['outFile'][0]+".csv", velocityAverages, delimiter=",")
+np.savetxt(outDir+args['outFile'][1]+".csv", pressureAverages, delimiter=",")
diff --git a/BayesValidRox/tests/PA-A/models/stokespnm/stokespnm_params.input b/BayesValidRox/tests/PA-A/models/stokespnm/stokespnm_params.input
index e07ff2a1e..364d36f9f 100644
--- a/BayesValidRox/tests/PA-A/models/stokespnm/stokespnm_params.input
+++ b/BayesValidRox/tests/PA-A/models/stokespnm/stokespnm_params.input
@@ -1,17 +1,17 @@
 [Stokes.Grid]
 LowerLeft = -0.000125000 0.005125 # inlet length: 0.25e-2 m
 UpperRight = 0.010125 0.006125 # outlet length: 0.25e-2 m, heigth channel: 1e-3 m
-CellsPerThroat = 5
-Cells1 = 20
-FixedCellsBetweenThroats = 5
+CellsPerThroat = 32
+Cells1 = 128
+FixedCellsBetweenThroats = 32
 Refinement = 0
 
 [PNM.Grid]
 LowerLeft = 0 125e-6
 UpperRight = 0.010 0.005125 # 20 x 10 block, block size 250e-6 m
 NumPores = 21 11
-PoreRadius = 125e-6
-ThroatRadius = 125e-6
+PoreInscribedRadius = 125e-6
+ThroatInscribedRadius = 125e-6
 PriorityList = 2 3 0 1
 BoundaryFaceMarker = 1 1 1 2
 DeletionProbability = 0 0 1 1
diff --git a/BayesValidRox/tests/PA-A/ref_stokesdarcy.py b/BayesValidRox/tests/PA-A/ref_stokesdarcy.py
index 9036bcddb..9bfd60dad 100755
--- a/BayesValidRox/tests/PA-A/ref_stokesdarcy.py
+++ b/BayesValidRox/tests/PA-A/ref_stokesdarcy.py
@@ -22,7 +22,7 @@ except ModuleNotFoundError:
 sys.path.insert(0,'./../../../BayesValidRox/')
 
 from PyLink.PyLinkForwardModel import PyLinkForwardModel
-    
+
 #=====================================================
 #============   COMPUTATIONAL MODEL   ================
 #=====================================================
@@ -38,13 +38,13 @@ Model.Command = "model_exe.py stokesdarcy stokesdarcy_%s_params.input"%couplingc
 Model.ExecutionPath = os.getcwd()
 Model.Output.Parser = 'read_ffpm'
 Model.Output.Names = ['velocity [m/s]', 'p']
-Model.Output.FileNames = ["ffpm_stokesdarcy_velocity.csv","ffpm_stokesdarcy_p.csv"]
+Model.Output.FileNames = ["ffpm_stokesdarcy_velocity_final.csv","ffpm_stokesdarcy_p_final.csv"]
 
 
 #=====================================================
 #=========   PROBABILISTIC INPUT MODEL  ==============
 #=====================================================
-NrSamples = 150
+NrSamples = 100
 
 # l = Microscopic length scale >>>> l/4 nach oben und l/2 nach unten.
 l = 0.5e-3
@@ -58,7 +58,7 @@ allParams = [(1e-4, 1e-2), # 'VyMaxTop'
              (1.0e-08, 1.5e-08)# '$K$'
              ]
 
-if couplingcond == 'BJ':    
+if couplingcond == 'BJ':
     allParams.append((0.0 , 4.0)) # '$\\alpha_{BJ}$'
 
 parametersets = np.zeros((NrSamples,len(allParams)))
@@ -68,9 +68,9 @@ for parIdx, params in enumerate(allParams):
     else:
         Mu = np.log(params[0]**2 / np.sqrt(params[0]**2 + params[1]**2))
         Sigma  = np.sqrt(np.log(1 + params[1]**2 / params[0]**2))
-                
+
         parametersets[:,parIdx] = chaospy.LogNormal(mu=Mu,sigma=Sigma).sample(NrSamples)
-        
+
 #=====================================================
 #=============  RUN THE SIMULATIONS  =================
 #=====================================================
@@ -100,36 +100,36 @@ OutputNames = ['velocity [m/s]', 'p']
 for case in ['Calibration', 'Validation']:
     hdf5file = "ExpDesign_"+Model.Name+"_"+case+".hdf5"
     file = h5py.File(hdf5file, 'a')
-    
+
     # Save Samples
     grpX = file.create_group("EDX")
     grpX.create_dataset("init_", data=validSamples)
-    
+
     # Get the sorted index
     sorted_indices = {}
     Velocity = pd.read_csv("models/velocity_points.csv")
     Pressure = pd.read_csv("models/pressure_points.csv")
     sorted_indices[OutputNames[0]] = list(range(0,10)) if case=='Calibration' else list(range(10,20))
     sorted_indices[OutputNames[1]] = list(range(0,3)) if case=='Calibration' else list(range(3,5))
-    
+
     # Extract x values
     grp_x_values = file.create_group("x_values/")
     validModelRuns = dict()
     for varIdx, var in enumerate(OutputNames):
         validModelRuns[var] = np.array(ValidSets["x_values/"+var])[sorted_indices[var]]
         grp_x_values.create_dataset(var, data=validModelRuns[var])
-    
+
     # Extract Y values
     for varIdx, var in enumerate(OutputNames):
         validModelRuns[var] = OutputMatrix[var][:,sorted_indices[var]]
         grpY = file.create_group("EDY/"+var)
         grpY.create_dataset("init_", data=validModelRuns[var])
-    
+
     # close
     file.close()
 
 # close
-ValidSets.close()    
+ValidSets.close()
 
 # Remove original hdf5
 os.remove("ExpDesign_"+Model.Name+".hdf5")
diff --git a/BayesValidRox/tests/PA-A/ref_stokespnm.py b/BayesValidRox/tests/PA-A/ref_stokespnm.py
index 06babd084..c7dddf6b6 100755
--- a/BayesValidRox/tests/PA-A/ref_stokespnm.py
+++ b/BayesValidRox/tests/PA-A/ref_stokespnm.py
@@ -25,7 +25,7 @@ from PyLink.PyLinkForwardModel import PyLinkForwardModel
 #============   COMPUTATIONAL MODEL   ================
 #=====================================================
 Model = PyLinkForwardModel()
-# Model.nrCPUs = 15
+# Model.nrCPUs = 4
 Model.Type = 'PyLink'
 Model.Name = 'ffpm-stokespnm-testset'
 Model.InputFile = 'stokespnm_params.input'
@@ -35,7 +35,7 @@ Model.Command = "model_exe.py stokespnm stokespnm_params.input"
 Model.ExecutionPath = os.getcwd()
 Model.Output.Parser = 'read_ffpm'
 Model.Output.Names = ['velocity [m/s]', 'p']
-Model.Output.FileNames = ["ffpm_stokespnm_velocity.csv","ffpm_stokespnm_p.csv"]
+Model.Output.FileNames = ["ffpm_stokespnm_velocity_final.csv","ffpm_stokespnm_p_final.csv"]
 
 #=====================================================
 #=========   PROBABILISTIC INPUT MODEL  ==============
@@ -55,7 +55,7 @@ origSpaceDist = chaospy.J(*origJoints)
 #=============  RUN THE SIMULATIONS  =================
 #=====================================================
 SamplingMethod = 'random'
-NrSamples = 150
+NrSamples = 100
 
 # Generate samples with chaospy
 parametersets = chaospy.generate_samples(NrSamples, domain=origSpaceDist , rule=SamplingMethod).T
@@ -87,31 +87,31 @@ OutputNames = ['velocity [m/s]', 'p']
 for case in ['Calibration', 'Validation']:
     hdf5file = "ExpDesign_"+Model.Name+"_"+case+".hdf5"
     file = h5py.File(hdf5file, 'a')
-    
+
     # Save Samples
     grpX = file.create_group("EDX")
     grpX.create_dataset("init_", data=validSamples)
-    
+
     # Get the sorted index
     sorted_indices = {}
     Velocity = pd.read_csv("models/velocity_points.csv")
     Pressure = pd.read_csv("models/pressure_points.csv")
     sorted_indices[OutputNames[0]] = list(range(0,10)) if case=='Calibration' else list(range(10,20))
     sorted_indices[OutputNames[1]] = list(range(0,3)) if case=='Calibration' else list(range(3,5))
-    
+
     # Extract x values
     grp_x_values = file.create_group("x_values/")
     validModelRuns = dict()
     for varIdx, var in enumerate(OutputNames):
         validModelRuns[var] = np.array(ValidSets["x_values/"+var])[sorted_indices[var]]
         grp_x_values.create_dataset(var, data=validModelRuns[var])
-    
+
     # Extract Y values
     for varIdx, var in enumerate(OutputNames):
         validModelRuns[var] = OutputMatrix[var][:,sorted_indices[var]]
         grpY = file.create_group("EDY/"+var)
         grpY.create_dataset("init_", data=validModelRuns[var])
-    
+
     # close
     file.close()
 
diff --git a/BayesValidRox/tests/PA-A/stokesdarcy_BJ_params.tpl.input b/BayesValidRox/tests/PA-A/stokesdarcy_BJ_params.tpl.input
index d0b3c6713..eee4a9b24 100644
--- a/BayesValidRox/tests/PA-A/stokesdarcy_BJ_params.tpl.input
+++ b/BayesValidRox/tests/PA-A/stokesdarcy_BJ_params.tpl.input
@@ -1,8 +1,8 @@
 [Grid]
 Positions0 = -125e-6 0.010125
 Positions1 = 0 0.6e-2
-Cells0 = 656 #1025 512
-Cells1 = 384 #600 96
+Cells0 = 1312 #656
+Cells1 = 768 #384
 
 [Stokes.Grid]
 InletLength = 0.0
diff --git a/BayesValidRox/tests/PA-A/stokesdarcy_ER_params.tpl.input b/BayesValidRox/tests/PA-A/stokesdarcy_ER_params.tpl.input
index f6c3ee685..62e1f053f 100644
--- a/BayesValidRox/tests/PA-A/stokesdarcy_ER_params.tpl.input
+++ b/BayesValidRox/tests/PA-A/stokesdarcy_ER_params.tpl.input
@@ -1,8 +1,8 @@
 [Grid]
 Positions0 = -125e-6 0.010125
 Positions1 = 0 0.6e-2
-Cells0 = 656 #1025 512
-Cells1 = 384 #600 300
+Cells0 = 1312 #656 512
+Cells1 = 768 #384 300
 
 [Stokes.Grid]
 InletLength = 0.0
diff --git a/BayesValidRox/tests/PA-A/stokespnm_params.tpl.input b/BayesValidRox/tests/PA-A/stokespnm_params.tpl.input
index b338e780f..f1fb34a46 100644
--- a/BayesValidRox/tests/PA-A/stokespnm_params.tpl.input
+++ b/BayesValidRox/tests/PA-A/stokespnm_params.tpl.input
@@ -1,17 +1,17 @@
 [Stokes.Grid]
 LowerLeft = -0.000125000 0.005125 # inlet length: 0.25e-2 m
 UpperRight = 0.010125 0.006125 # outlet length: 0.25e-2 m, heigth channel: 1e-3 m
-CellsPerThroat = 5
-Cells1 = 20
-FixedCellsBetweenThroats = 5
+CellsPerThroat = 32
+Cells1 = 128
+FixedCellsBetweenThroats = 32
 Refinement = 0
 
 [PNM.Grid]
 LowerLeft = 0 125e-6
 UpperRight = 0.010 0.005125 # 20 x 10 block, block size 250e-6 m
 NumPores = 21 11
-PoreRadius = 125e-6
-ThroatRadius = 125e-6
+PoreInscribedRadius = 125e-6
+ThroatInscribedRadius = 125e-6
 PriorityList = 2 3 0 1
 BoundaryFaceMarker = 1 1 1 2
 DeletionProbability = 0 0 1 1
diff --git a/BayesValidRox/tests/PA-A/util/postDist_visualization.py b/BayesValidRox/tests/PA-A/util/postDist_visualization.py
index ac4dbfb75..0e6bcc617 100755
--- a/BayesValidRox/tests/PA-A/util/postDist_visualization.py
+++ b/BayesValidRox/tests/PA-A/util/postDist_visualization.py
@@ -15,12 +15,12 @@ except ModuleNotFoundError:
     import pickle
 
 # Local
-sys.path.insert(0,'./../../../BayesValidRox/')
+sys.path.insert(0,'./../../../../BayesValidRox/')
 
-modelName = 'stokesdarcyBJ' #stokespnm stokesdarcyER stokesdarcyBJ
+modelName = 'stokesdarcyER' #stokespnm stokesdarcyER stokesdarcyBJ
 
 # Load the pickle objects
-data_dir = './Results_20_05_2021/outputs_ffpm-{}/'.format(modelName)
+data_dir = '../Results_14_06_2021/outputs_ffpm-{}/'.format(modelName)
 
 with open(data_dir+'PA_A_Bayesffpm-{}.pkl'.format(modelName), 'rb') as input:
     Bayes = pickle.load(input)
diff --git a/BayesValidRox/tests/PA-A/util/post_plot_PAPER.py b/BayesValidRox/tests/PA-A/util/post_plot_PAPER.py
index d2d9081b6..bcbbb673f 100644
--- a/BayesValidRox/tests/PA-A/util/post_plot_PAPER.py
+++ b/BayesValidRox/tests/PA-A/util/post_plot_PAPER.py
@@ -53,7 +53,7 @@ def upper_rugplot(data, height=.05, ax=None, **kwargs):
     ax.add_collection(lc)
 
 def postPredictiveplot(modelName, errorPrec, averaging=True, case='Calib', bins='auto'):
-    result_folder = '../Results_12_06_2021/outputs_{}/'.format(modelName.split('-v')[0])
+    result_folder = '../Results_14_06_2021/outputs_{}/'.format(modelName.split('-v')[0])
     #result_folder = './'
     directory = result_folder+'Outputs_Bayes_'+modelName+'_'+case
     OutputDir = ('postPred_'+modelName+'_'+case)
@@ -108,9 +108,9 @@ def postPredictiveplot(modelName, errorPrec, averaging=True, case='Calib', bins=
                          color='orange',stat="count") #normalizes counts so that the sum of the bar heights is 1
 
             # Reference data from the pore-scale simulation
-            #ax.axvline(x=data[OutputName][idx], linewidth=8, color='green')
-            sns.histplot(np.random.normal(data[OutputName][idx], errorPrec*data[OutputName][idx], len(postPred[postPred>0])), ax=ax,bins=bins,
-                          color='green', alpha=0.5, stat="count")
+            ax.axvline(x=data[OutputName][idx], linewidth=8, color='green')
+            #sns.histplot(np.random.normal(data[OutputName][idx], errorPrec*data[OutputName][idx], len(postPred[postPred>0])), ax=ax,bins=bins,
+                          #color='green', alpha=0.5, stat="count")
 
             # Print conidence interval of ExpDesign.Y (Trained area)
             modelRuns = PCEModel.ExpDesign.Y[OutputName][:,idx]
@@ -155,6 +155,6 @@ def postPredictiveplot(modelName, errorPrec, averaging=True, case='Calib', bins=
         fig.savefig('./'+OutputDir+'/'+plotname+'.pdf', bbox_inches='tight')
         plt.close()
 
-modelName = 'ffpm-stokespnm' #stokespnm stokesdarcyER stokesdarcyBJ
+modelName = 'ffpm-stokesdarcyBJ' #stokespnm stokesdarcyER stokesdarcyBJ
 postPredictiveplot(modelName, errorPrec=0.05, case='Calib', bins=20)
 postPredictiveplot(modelName+'-valid', errorPrec=0.05, case='Valid',bins=20)
-- 
GitLab