diff --git a/BayesValidRox/tests/PA-A/Benchmark_PAA.py b/BayesValidRox/tests/PA-A/Benchmark_PAA.py
index 47837798e1cece6f55c8076898d83052660e3fee..25fc016c585ea07a67500bc9de1fbb4e31f89c7e 100755
--- a/BayesValidRox/tests/PA-A/Benchmark_PAA.py
+++ b/BayesValidRox/tests/PA-A/Benchmark_PAA.py
@@ -24,9 +24,8 @@ import seaborn as sns
 from matplotlib.patches import Patch
 import matplotlib.lines as mlines
 import matplotlib
-
-matplotlib.use("agg")
 import matplotlib.pylab as plt
+matplotlib.use("agg")
 
 SIZE = 40
 plt.rc("figure", figsize=(24, 16))
@@ -396,17 +395,17 @@ def perturbData(data, OutputNames, noItr, noiseLevel):
 
 if __name__ == "__main__":
 
-    # ==========================================================================
-    # ========================   Set Parameters   ==============================
-    # ==========================================================================
+    # =========================================================================
+    # =======================   Set Parameters   ==============================
+    # =========================================================================
     # ---------- set scenario params ----------
     inclusion = "squared"  # squared or circular
-    inletLoc = "left"  # left or top
+    inletLoc = "top"  # left or top
     outputs = ["velocity [m/s]", "p"]
 
     # ---------- set BayesValidRox params ----------
     PCEExpDesignMethod = "normal"  # 'normal' or 'sequential'
-    nInitSamples = 500  # 50 Initial No. of orig. Model runs for surrogate training
+    nInitSamples = 300  # 50 Initial No. of orig. Model runs for surrogate training
     nTotalSamples = 300  # 100 Total No. of orig. Model runs for surrogate training
     nBootstrapItr = 1000  # No. of bootstraping iterations for Bayesian analysis
     BootstrapNoise_Avg = 0.0005  # Noise amount for bootstraping in Bayesian analysis
@@ -436,46 +435,42 @@ if __name__ == "__main__":
     # ==========================================================================
     # Set dir name for plots
     OutputDir = f"Outputs_BayesAnalysis_{inclusion}_inclusion_{inletLoc}Inflow/"
-    result_folder = "./Results_09_01_2022_leftInflow"  # './' or './Results_05_10_2021'
+    result_folder = "./"  # './' or './Results_05_10_2021'
     # result_folder = './Results_07_12_2021_leftInflow'
     # result_folder = 'Results_07_12_2021_Circular'
 
     # ------------- Run scripts ---------
-    # if inclusion == 'squared':
-    # Stokes-PN model with the averaged data
-    #     _, _ , BayesValid_PNM = stokespnm.run(paramsAvg, inletLoc=inletLoc,errorPerc=0.2,
-    #                                           PCEEDMethod=PCEExpDesignMethod)
-
-    #     # Stokes-PN model without the averaged data
-    #     _, _, BayesValid_PNM_NA = stokespnm.run(params, averaging=False,inletLoc=inletLoc,
-    #                                             PCEEDMethod=PCEExpDesignMethod)
+    if inclusion == 'squared':
+        # Stokes-PN model with the averaged data
+        _, _, BayesValid_PNM = stokespnm.run(paramsAvg, inletLoc=inletLoc)
+
+        # Stokes-PN model without the averaged data
+        _, _, BayesValid_PNM_NA = stokespnm.run(params, averaging=False,
+                                                inletLoc=inletLoc)
+        # StokesDarcy with Classical IC (Beaver-Joseph)
+        _, _, BayesValid_BJ = stokesdarcy.run(paramsAvg, couplingcond='BJ',
+                                              inletLoc=inletLoc,
+                                              inclusion=inclusion)
 
-    #     # StokesDarcy with Classical IC (Beaver-Joseph)
-    #     _, _, BayesValid_BJ = stokesdarcy.run(paramsAvg, couplingcond='BJ',inletLoc=inletLoc,
-    #                                             inclusion=inclusion,
-    #                                           errorPerc=0.2,PCEEDMethod=PCEExpDesignMethod)
-
-    # # StokesDarcy with Generalized (ER) IC
+    # StokesDarcy with Generalized (ER) IC
     _, _, BayesValid_ER = stokesdarcy.run(paramsAvg, couplingcond='ER',
                                           inletLoc=inletLoc,
-                                          inclusion=inclusion,
-                                          errorPerc=0.2,
-                                          PCEEDMethod=PCEExpDesignMethod)
-    sys.exit()
+                                          inclusion=inclusion)
+    # sys.exit()
 
     # ------------- Load the objects ---------
     # Stokes-PN model with the averaged data
     BayesValid_PNM_logBME = np.loadtxt(
         result_folder
-        + "/outputs_ffpm-stokespnm_{inclusion}_inclusion_{inletLoc}Inflow/"
-        + "logBME_ffpm-stokespnm_{inclusion}_inclusion_{inletLoc}Inflow-valid.csv"
+        + f"/outputs_ffpm-stokespnm_{inclusion}_inclusion_{inletLoc}Inflow/"
+        + f"logBME_ffpm-stokespnm_{inclusion}_inclusion_{inletLoc}Inflow-valid.csv"
     )
 
     # Stokes-PN model without the averaged data
     BayesValid_PNM_NA_logBME = np.loadtxt(
         result_folder
-        + "/outputs_ffpm-stokespnmNA_{inclusion}_inclusion_{inletLoc}Inflow/"
-        + "logBME_ffpm-stokespnmNA_{inclusion}_inclusion_{inletLoc}Inflow-valid.csv"
+        + f"/outputs_ffpm-stokespnmNA_{inclusion}_inclusion_{inletLoc}Inflow/"
+        + f"logBME_ffpm-stokespnmNA_{inclusion}_inclusion_{inletLoc}Inflow-valid.csv"
     )
 
     # StokesDarcy with Classical IC (Beaver-Joseph)
@@ -489,7 +484,7 @@ if __name__ == "__main__":
     BayesValid_ER_logBME = np.loadtxt(
         result_folder
         + f"/outputs_ffpm-stokesdarcyER_{inclusion}_inclusion_{inletLoc}Inflow/"
-        + "logBME_ffpm-stokesdarcyER_{inclusion}_inclusion_{inletLoc}Inflow-valid.csv"
+        + f"logBME_ffpm-stokesdarcyER_{inclusion}_inclusion_{inletLoc}Inflow-valid.csv"
     )
 
     # ==========================================================================
diff --git a/BayesValidRox/tests/PA-A/ffpm_validation_stokesdarcy.py b/BayesValidRox/tests/PA-A/ffpm_validation_stokesdarcy.py
index 95f43496fa7ff2da75c2522439d4c3043f1f084b..abd314d0893db26b24bf31319a614493345e9b5f 100755
--- a/BayesValidRox/tests/PA-A/ffpm_validation_stokesdarcy.py
+++ b/BayesValidRox/tests/PA-A/ffpm_validation_stokesdarcy.py
@@ -138,7 +138,7 @@ def run(params, couplingcond="BJ", inclusion="squared", inletLoc="top"):
     # Inputs.Marginals[1].Parameters = params
 
     Inputs.addMarginals()  # Permeability
-    Inputs.Marginals[2].Name = "$K$"  # [1e-10, 1e-8]
+    Inputs.Marginals[2].Name = r"$\mathsf{K}$"  # [1e-10, 1e-8]
     Inputs.Marginals[2].InputValues = stats.uniform(loc=1e-10, scale=1e-8-1e-10).rvs(
         size=MCSize
     )
@@ -192,11 +192,11 @@ def run(params, couplingcond="BJ", inclusion="squared", inletLoc="top"):
     # The degree with the lowest Leave-One-Out cross-validation (LOO)
     # error (or the highest score=1-LOO)estimator is chosen as the final
     # metamodel.
-    MetaModelOpts.MinPceDegree = 8  # default = 1
-    MetaModelOpts.MaxPceDegree = 8  # 9
+    MetaModelOpts.MinPceDegree = 12  # default = 1
+    MetaModelOpts.MaxPceDegree = 12  # 9
 
     # q-quasi-norm 0<q<1 (default=1)
-    MetaModelOpts.q = 0.6
+    MetaModelOpts.q = 0.85
 
     # Print summary of the regression results
     # MetaModelOpts.DisplayFlag = True
@@ -212,28 +212,28 @@ def run(params, couplingcond="BJ", inclusion="squared", inletLoc="top"):
     MetaModelOpts.ExpDesign.NrSamples = nInitSamples
 
     # Sampling methods
-    # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley 6) chebyshev(FT)
-    # 7) korobov 8) grid(FT) 9) nested_grid(FT) 10)user
+    # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley
+    # 6) chebyshev(FT) 7) korobov 8) grid(FT) 9) nested_grid(FT) 10)user
     MetaModelOpts.ExpDesign.SamplingMethod = "user"
 
     # Provide the experimental design object with a hdf5 file
-    MetaModelOpts.ExpDesign.hdf5File = "ExpDesign_{}.hdf5".format(Model.Name)
+    MetaModelOpts.ExpDesign.hdf5File = f"ExpDesign_{Model.Name}.hdf5"
 
     # For calculation of validation error
     import h5py
 
-    HDF5File = "ExpDesign_{}-testset_Calibration.hdf5".format(Model.Name)
-    ValidSets = h5py.File("./data/ValidationSets/" + HDF5File, "r+")
+    HDF5File = f"ExpDesign_{Model.Name}-testset_Calibration.hdf5"
+    ValidSets = h5py.File(f"./data/ValidationSets/{HDF5File}", "r+")
     MetaModelOpts.validSamples = np.array(ValidSets["EDX/init_"])
     validSets = dict()
 
     # Extract x values
     validSets["x_values"] = dict()
     for varIdx, var in enumerate(Model.Output.Names):
-        validSets["x_values"][var] = np.array(ValidSets["x_values/" + var])
+        validSets["x_values"][var] = np.array(ValidSets[f"x_values/{var}"])
 
     for varIdx, var in enumerate(Model.Output.Names):
-        validSets[var] = np.array(ValidSets["EDY/" + var + "/init_"])
+        validSets[var] = np.array(ValidSets[f"EDY/{var}/init_"])
     ValidSets.close()
 
     MetaModelOpts.validModelRuns = validSets
@@ -276,6 +276,7 @@ def run(params, couplingcond="BJ", inclusion="squared", inletLoc="top"):
     PostPCE.accuracyCheckMetaModel(
         Samples=MetaModelOpts.validSamples, validOutputsDict=validSets
     )
+
     # =====================================================
     # =========  Bayesian inference (Calibration)  ========
     # =====================================================
@@ -294,7 +295,7 @@ def run(params, couplingcond="BJ", inclusion="squared", inletLoc="top"):
     # Select the inference method
     BayesOptsCalib.SamplingMethod = "MCMC"
     BayesOptsCalib.MCMCnwalkers = 30
-    BayesOptsCalib.MCMCnSteps = 20000
+    # BayesOptsCalib.MCMCnSteps = 20000
     # BayesOptsCalib.MCMCverbose = True
 
     BayesOptsCalib.MAP = "mean"
@@ -304,35 +305,35 @@ def run(params, couplingcond="BJ", inclusion="squared", inletLoc="top"):
     BayesOptsCalib.PlotMAPPred = False
 
     # ----- Define the discrepancy model -------
-    # calibMeasuError = (errorPerc * pd.read_csv(Model.MeasurementFile)) ** 2
-    # calibNumErr = pd.read_csv('data/NumErrorCalib_stokesdarcy_{}_{}_inclusion_{}Inflow.csv'.format(couplingcond,inclusion,inletLoc))**2
-
+    # calibNumErr = pd.read_csv(f'data/NumErrorCalib_stokesdarcy_{couplingcond}'
+    #                           f'_{inclusion}_inclusion_{inletLoc}Inflow.csv')
+    calibNumErr = pd.read_csv("data/NumErrorCalib_"
+                              f"{Model.Name.split('ffpm-')[1]}.csv")
     DiscrepancyOpts = Discrepancy("")
     DiscrepancyOpts.Type = "Gaussian"
-    # DiscrepancyOpts.Parameters = calibNumErr#.add(calibNumErr,fill_value=0)
+    DiscrepancyOpts.Parameters = calibNumErr**2
+
+    # delta_dict = {}
+    # Data = pd.read_csv(Model.MeasurementFile)
+    # ED_Y = PCEModel.ExpDesign.Y
+    # for out in list(Data.keys())[1:]:
+    #     try:
+    #         data = Data[out].to_numpy()[~np.isnan(Data[out])]
+    #     except:
+    #         data = Data[out][~np.isnan(Data[out])]
+
+    #     # Mean absolute error
+    #     # delta_dict[out] = np.mean(abs(data - ED_Y[out]),axis=0)
+    #     # _,delta_dict[out] = np.linalg.eig(np.cov(((data-ED_Y[out]).T)))
+
+    #     # Mean squared error
+    #     delta_dict[out] = np.ones_like(data)
+    #     # delta_dict[out] = np.mean(((data - ED_Y[out])) ** 2, axis=0)
+
+    # errormodels = pd.DataFrame.from_dict(delta_dict, orient="index").T
+    # DiscrepancyOpts.Parameters = errormodels #/ errormodels.sum()
     # BayesOptsCalib.Discrepancy = DiscrepancyOpts
-
-    delta_dict = {}
-    Data = pd.read_csv(Model.MeasurementFile)
-    ED_Y = PCEModel.ExpDesign.Y
-    for out in list(Data.keys())[1:]:
-        try:
-            data = Data[out].to_numpy()[~np.isnan(Data[out])]
-        except:
-            data = Data[out][~np.isnan(Data[out])]
-
-        # Mean absolute error
-        # delta_dict[out] = np.mean(abs(data - ED_Y[out]),axis=0)
-        # _,delta_dict[out] = np.linalg.eig(np.cov(((data-ED_Y[out]).T)))
-
-        # Mean squared error
-        delta_dict[out] = np.ones_like(data)
-        # delta_dict[out] = np.mean(((data - ED_Y[out])) ** 2, axis=0)
-
-    errormodels = pd.DataFrame.from_dict(delta_dict, orient="index").T
-    DiscrepancyOpts.Parameters = errormodels #/ errormodels.sum()
-    BayesOptsCalib.Discrepancy = DiscrepancyOpts
-    print(DiscrepancyOpts.Parameters)
+    # print(DiscrepancyOpts.Parameters)
 
     # -- (Option C) --
     DiscOutputOpts = Input()
@@ -544,42 +545,48 @@ def run(params, couplingcond="BJ", inclusion="squared", inletLoc="top"):
     BayesOptsValid.PlotPostPred = False
 
     # # ----- Define the discrepancy model -------
-    # validMeasuError = (errorPerc * pd.read_csv(Model.MeasurementFileValid)) ** 2
     # validNumErr = pd.read_csv('data/NumErrorValid_stokesdarcy_{}_{}_inclusion_{}Inflow.csv'.format(couplingcond,inclusion,inletLoc))**2
-
     DiscrepancyOpts = Discrepancy("")
     DiscrepancyOpts.Type = "Gaussian"
-    # DiscrepancyOpts.Parameters = validMeasuError#.add(validNumErr,fill_value=0)
-
-    # Pass expected inferred sigma2s
-    # data = pd.read_csv(Model.MeasurementFileValid)
-    # factors = ((data**2)/((data**2).sum()))
-    delta_dict = {}
-    Data = pd.read_csv(Model.MeasurementFileValid)
-    ED_Y = ValidPCEModel.ExpDesign.Y
-    for out in list(Data.keys())[1:]:
-        try:
-            data = Data[out].to_numpy()[~np.isnan(Data[out])]
-        except:
-            data = Data[out][~np.isnan(Data[out])]
-
-        # Mean absolute error
-        # delta_dict[out] = np.mean(abs(data - ED_Y[out]),axis=0)
-        # Mean squared error
-        delta_dict[out] = np.mean((data - ED_Y[out]) ** 2, axis=0)
-        delta_dict[out] = np.ones_like(data)
-
-    errormodels = pd.DataFrame.from_dict(delta_dict, orient="index").T
-    factors = errormodels #/ errormodels.sum()
-
+    validNumErr = pd.read_csv("data/NumErrorValid_"
+                              f"{Model.Name.split('ffpm-')[1]}.csv")
     DiscrepancyOpts.Parameters = {
         "velocity [m/s]": np.mean(BayesCalib.Posterior_df["$\sigma^2_{vel}$"].values)
-        * factors["velocity [m/s]"].values,
+        + validNumErr["velocity [m/s]"].values**2,
         "p": np.mean(BayesCalib.Posterior_df["$\sigma^2_{p}$"].values)
-        * factors["p"].values,
+        + validNumErr["p"].values**2,
     }
-
     BayesOptsValid.Discrepancy = DiscrepancyOpts
+
+    # Pass expected inferred sigma2s
+    # data = pd.read_csv(Model.MeasurementFileValid)
+    # factors = ((data**2)/((data**2).sum()))
+    # delta_dict = {}
+    # Data = pd.read_csv(Model.MeasurementFileValid)
+    # ED_Y = ValidPCEModel.ExpDesign.Y
+    # for out in list(Data.keys())[1:]:
+    #     try:
+    #         data = Data[out].to_numpy()[~np.isnan(Data[out])]
+    #     except:
+    #         data = Data[out][~np.isnan(Data[out])]
+
+    #     # Mean absolute error
+    #     # delta_dict[out] = np.mean(abs(data - ED_Y[out]),axis=0)
+    #     # Mean squared error
+    #     delta_dict[out] = np.mean((data - ED_Y[out]) ** 2, axis=0)
+    #     delta_dict[out] = np.ones_like(data)
+
+    # errormodels = pd.DataFrame.from_dict(delta_dict, orient="index").T
+    # factors = errormodels #/ errormodels.sum()
+
+    # DiscrepancyOpts.Parameters = {
+    #     "velocity [m/s]": np.mean(BayesCalib.Posterior_df["$\sigma^2_{vel}$"].values)
+    #     * factors["velocity [m/s]"].values,
+    #     "p": np.mean(BayesCalib.Posterior_df["$\sigma^2_{p}$"].values)
+    #     * factors["p"].values,
+    # }
+
+    # BayesOptsValid.Discrepancy = DiscrepancyOpts
     # BayesOptsValid.selectedIndices = {'velocity [m/s]':[2,3,4,5,6,7],
     #                                     'p':[0,1]}
 
@@ -668,4 +675,4 @@ def run(params, couplingcond="BJ", inclusion="squared", inletLoc="top"):
 # ====================   Run main scripts for PA-B   =======================
 # ==========================================================================
 # PCEModel, BayesCalib, BayesValid = run(params,couplingcond='BJ')
-# PCEModel = run(params, couplingcond="BJ", inletLoc="top", inclusion="squared")
+# PCEModel = run(params, couplingcond="ER", inletLoc="top", inclusion="squared")
diff --git a/BayesValidRox/tests/PA-A/ffpm_validation_stokespnm.py b/BayesValidRox/tests/PA-A/ffpm_validation_stokespnm.py
index 0dcfb0d7eba15cb4f685a7be06daa16d4b167835..2e49cd75e02b848085c67bf357e6bd1750aa3395 100755
--- a/BayesValidRox/tests/PA-A/ffpm_validation_stokespnm.py
+++ b/BayesValidRox/tests/PA-A/ffpm_validation_stokespnm.py
@@ -100,11 +100,6 @@ def run(params, averaging=True, inletLoc="top"):
     Model.Type = "PyLink"
 
     Model.Name = modelName
-    if averaging:
-        Model.Name = f"ffpm-stokespnm_squared_inclusion_{inletLoc}Inflow"
-    else:
-        Model.Name = f"ffpm-stokespnmNA_squared_inclusion_{inletLoc}Inflow"
-
     modelDir = "./models/stokespnm/"
     Model.InputFile = f"{modelDir}params_squared_inclusion_{inletLoc}"\
         "Inflow.input"
@@ -112,7 +107,7 @@ def run(params, averaging=True, inletLoc="top"):
         "Inflow.tpl.input"
 
     Model.Command = f"model_exe.py stokespnm {Model.InputFile}"\
-        f"--averaging {averaging}"
+        f" --averaging {averaging}"
     Model.ExecutionPath = os.getcwd()
     Model.Output.Parser = "read_ffpm"
     Model.Output.Names = ["velocity [m/s]", "p"]
@@ -151,14 +146,15 @@ def run(params, averaging=True, inletLoc="top"):
     )
     # Inputs.Marginals[1].DistType = 'unif'
     # Inputs.Marginals[1].Parameters = [1e-7, 1e-05]
-    # Inputs.addMarginals() #TransmissibilityThroat '$g_{t,ij}$'
+
+    # Inputs.addMarginals()  #TransmissibilityThroat '$g_{t,ij}$'
     # Inputs.Marginals[1].Name = '$g_{t,ij}$' # [1.0e-7, 1.0e-04]
     # Inputs.Marginals[1].InputValues = stats.uniform(loc=1e-7, scale=1e-4-1e-7).rvs(size=MCSize)
 
-    # Inputs.addMarginals() #TransmissibilityHalfPore '$g_{p,i}$'
+    # Inputs.addMarginals()  #TransmissibilityHalfPore '$g_{p,i}$'
     # Inputs.Marginals[2].Name = '$g_{p,i}$' # [1.0e-7, 1.0e-04]
     # Inputs.Marginals[2].InputValues = stats.uniform(loc=1e-7, scale=1e-4-1e-7).rvs(size=MCSize)
-    
+
     # '$\\beta_{pore}$'
     Inputs.addMarginals()
     Inputs.Marginals[2].Name = "$\\beta_{pore}$"  # [1000.0, 100000.0]
@@ -198,8 +194,8 @@ def run(params, averaging=True, inletLoc="top"):
     # The degree with the lowest Leave-One-Out cross-validation (LOO)
     # error (or the highest score=1-LOO)estimator is  chosen as the final
     # metamodel.
-    MetaModelOpts.MinPceDegree = 9  # default = 1 9
-    MetaModelOpts.MaxPceDegree = 9  # 9
+    MetaModelOpts.MinPceDegree = 12  # default = 1 9
+    MetaModelOpts.MaxPceDegree = 12  # 9
 
     # q-quasi-norm 0<q<1 (default=1)
     # MetaModelOpts.q = np.linspace(0.3,0.6,3)
@@ -310,34 +306,33 @@ def run(params, averaging=True, inletLoc="top"):
     BayesOptsCalib.PlotMAPPred = False
 
     # ----- Define the discrepancy model -------
-    # calibMeasuError = (errorPerc * pd.read_csv(Model.MeasurementFile)) ** 2
-    # calibNumErr = pd.read_csv('data/NumErrorCalib_stokespnm_\
-    # squared_inclusion_{}Inflow.csv'.format(inletLoc))**2
+    calibNumErr = pd.read_csv("data/NumErrorCalib_"
+                              f"{modelName.split('ffpm-')[1]}.csv")
 
     DiscrepancyOpts = Discrepancy("")
     DiscrepancyOpts.Type = "Gaussian"
-    # DiscrepancyOpts.Parameters = calibMeasuError.add(calibNumErr,fill_value=0)
+    DiscrepancyOpts.Parameters = calibNumErr**2
     # data = pd.read_csv(Model.MeasurementFile)
     # DiscrepancyOpts.Parameters = ((data**2)/((data**2).sum()))
     # BayesOptsCalib.Discrepancy = DiscrepancyOpts
-    delta_dict = {}
-    Data = pd.read_csv(Model.MeasurementFile)
-    ED_Y = PCEModel.ExpDesign.Y
-    for out in list(Data.keys())[1:]:
-        try:
-            data = Data[out].to_numpy()[~np.isnan(Data[out])]
-        except:
-            data = Data[out][~np.isnan(Data[out])]
-
-        # Mean absolute error
-        # delta_dict[out] = np.mean(abs(data - ED_Y[out]),axis=0)
-        # Mean squared error
-        # delta_dict[out] = np.mean((data - ED_Y[out])**2,axis=0)
-        delta_dict[out] = np.ones_like(data)
-
-    errormodels = pd.DataFrame.from_dict(delta_dict, orient="index").T
-    DiscrepancyOpts.Parameters = errormodels  # /errormodels.sum()
-    BayesOptsCalib.Discrepancy = DiscrepancyOpts
+    # delta_dict = {}
+    # Data = pd.read_csv(Model.MeasurementFile)
+    # ED_Y = PCEModel.ExpDesign.Y
+    # for out in list(Data.keys())[1:]:
+    #     try:
+    #         data = Data[out].to_numpy()[~np.isnan(Data[out])]
+    #     except:
+    #         data = Data[out][~np.isnan(Data[out])]
+
+    #     # Mean absolute error
+    #     # delta_dict[out] = np.mean(abs(data - ED_Y[out]),axis=0)
+    #     # Mean squared error
+    #     # delta_dict[out] = np.mean((data - ED_Y[out])**2,axis=0)
+    #     delta_dict[out] = np.ones_like(data)
+
+    # errormodels = pd.DataFrame.from_dict(delta_dict, orient="index").T
+    # DiscrepancyOpts.Parameters = errormodels  # /errormodels.sum()
+    # BayesOptsCalib.Discrepancy = DiscrepancyOpts
 
     # # -- (Option A) --
     DiscOutputOpts = Input()
@@ -415,7 +410,6 @@ def run(params, averaging=True, inletLoc="top"):
     # Plot posterior predictive
     postPredictiveplot(PCEModel.ModelObj.Name, averaging, inletLoc=inletLoc,
                        case="Calib", bins=20)
-
     # =====================================================
     # ==================  VALIDATION  =====================
     # =====================================================
@@ -545,42 +539,46 @@ def run(params, averaging=True, inletLoc="top"):
     BayesOptsValid.Corner_title_fmt = ".2e"
     BayesOptsValid.PlotPostPred = False
 
-    # # ----- Define the discrepancy model -------
-    # validMeasuError = (errorPerc * pd.read_csv(Model.MeasurementFileValid)) ** 2
-    # validNumErr = pd.read_csv('data/NumErrorValid_stokespnm_squared_inclusion_{}Inflow.csv'.format(inletLoc))**2
-
-    # DiscrepancyOpts = Discrepancy('')
-    # DiscrepancyOpts.Type = 'Gaussian'
-    # DiscrepancyOpts.Parameters = validMeasuError  # .add(validNumErr,fill_value=0)
-
-    # Pass expected inferred sigma2s
-    data = pd.read_csv(Model.MeasurementFileValid)
-    factors = (data ** 2) / ((data ** 2).sum())
-    delta_dict = {}
-    Data = pd.read_csv(Model.MeasurementFileValid)
-    ED_Y = ValidPCEModel.ExpDesign.Y
-    for out in list(Data.keys())[1:]:
-        try:
-            data = Data[out].to_numpy()[~np.isnan(Data[out])]
-        except:
-            data = Data[out][~np.isnan(Data[out])]
-
-        # Mean absolute error
-        # delta_dict[out] = np.mean(abs(data - ED_Y[out]),axis=0)
-        # Mean squared error
-        # delta_dict[out] = np.mean((data - ED_Y[out]) ** 2, axis=0)
-        delta_dict[out] = np.ones_like(data)
-
-    errormodels = pd.DataFrame.from_dict(delta_dict, orient="index").T
-    factors = errormodels #/ errormodels.sum()
+    # ----- Define the discrepancy model -------
+    DiscrepancyOpts = Discrepancy("")
+    DiscrepancyOpts.Type = "Gaussian"
+    validNumErr = pd.read_csv("data/NumErrorValid_"
+                              f"{modelName.split('ffpm-')[1]}.csv")
     DiscrepancyOpts.Parameters = {
         "velocity [m/s]": np.mean(BayesCalib.Posterior_df["$\sigma^2_{vel}$"].values)
-        * factors["velocity [m/s]"].values,
+        + validNumErr["velocity [m/s]"].values**2,
         "p": np.mean(BayesCalib.Posterior_df["$\sigma^2_{p}$"].values)
-        * factors["p"].values,
+        + validNumErr["p"].values**2,
     }
     BayesOptsValid.Discrepancy = DiscrepancyOpts
 
+    # Pass expected inferred sigma2s
+    # data = pd.read_csv(Model.MeasurementFileValid)
+    # factors = (data ** 2) / ((data ** 2).sum())
+    # delta_dict = {}
+    # Data = pd.read_csv(Model.MeasurementFileValid)
+    # for out in list(Data.keys())[1:]:
+    #     try:
+    #         data = Data[out].to_numpy()[~np.isnan(Data[out])]
+    #     except:
+    #         data = Data[out][~np.isnan(Data[out])]
+
+    #     # Mean absolute error
+    #     # delta_dict[out] = np.mean(abs(data - ED_Y[out]),axis=0)
+    #     # Mean squared error
+    #     # delta_dict[out] = np.mean((data - ED_Y[out]) ** 2, axis=0)
+    #     delta_dict[out] = np.ones_like(data)
+
+    # errormodels = pd.DataFrame.from_dict(delta_dict, orient="index").T
+    # factors = errormodels #/ errormodels.sum()
+    # DiscrepancyOpts.Parameters = {
+    #     "velocity [m/s]": np.mean(BayesCalib.Posterior_df["$\sigma^2_{vel}$"].values)
+    #     * factors["velocity [m/s]"].values,
+    #     "p": np.mean(BayesCalib.Posterior_df["$\sigma^2_{p}$"].values)
+    #     * factors["p"].values,
+    # }
+    # BayesOptsValid.Discrepancy = DiscrepancyOpts
+
     # Construct error model based on discrepancy between ED and Data
     # BayesOptsValid.errorModel = True
     # coord_vel = pd.read_csv('models/velocity_points.csv')
@@ -662,4 +660,4 @@ def run(params, averaging=True, inletLoc="top"):
 # ====================   Run main scripts for PA-B   =======================
 # ==========================================================================
 # PCEModel, BayesCalib, BayesValid = run(params,averaging=False)
-# PCEModel = run(params,averaging=False, inletLoc='top')
+# PCEModel = run(params, averaging=False, inletLoc='top')