diff --git a/BayesValidRox/tests/PollutionTest/Pollution.py b/BayesValidRox/tests/PollutionTest/Pollution.py
index 46d814bb6a4f87f1164fce771fadc20b1a076ea3..2a517b44ff50180f8ae24bfcf1fa3b56fa5c2636 100644
--- a/BayesValidRox/tests/PollutionTest/Pollution.py
+++ b/BayesValidRox/tests/PollutionTest/Pollution.py
@@ -56,9 +56,9 @@ def Pollution(xx, s=None, t=None):
     
 
     
-    if t is None: t = np.arange(0.3, 60, 1) # 0.3
+    if t is None: t = [40+5*i for i in range(21)]
     
-    if s is None: s = np.array([2.5])#np.array([0.5, 1, 1.5, 2, 2.5])
+    if s is None: s = np.array([3.0])#np.array([0.5, 1, 1.5, 2, 2.5])
     
     ds = len(s);
     dt = len(t);
@@ -96,27 +96,30 @@ def Pollution(xx, s=None, t=None):
 
 if __name__ == "__main__":
 
-    MCSize = 750000 #1000000
+    MCSize = 500000 #500000
+    validSize = 100
     ndim = 4
-    sigma = 0.5
+    sigma_noise = 0.15
+    sigma_error = 0.5 
     
     # -------------------------------------------------------------------------
     # ----------------------- Synthetic data generation -----------------------
     # -------------------------------------------------------------------------
-    MAP = np.array([[10, 0.07, 1.505, 30.1525]])
+    MAP = np.array([[10, 0.07, 1.0, 30.16]])
     outputMAP = Pollution(MAP)
     
-    synthethicData = np.random.normal(0,sigma,outputMAP[1].shape)
+    synthethicData = outputMAP[1] + np.random.normal(0,sigma_noise,outputMAP[1].shape)
     
-    # import matplotlib.pyplot as plt
-    # plt.scatter(outputMAP[0], synthethicData, ls='-', marker='*')
-    # plt.legend(loc='best')
-    # plt.show()
+    import matplotlib.pyplot as plt
+    plt.scatter(outputMAP[0], synthethicData, ls='-', marker='*', label='Obs. Data')
+    plt.legend(loc='best')
+    plt.show()
     np.save("data/MeasuredData.npy", synthethicData)
+
     # -------------------------------------------------------------------------
     # ---------------------- Generate Prior distribution ----------------------
     # -------------------------------------------------------------------------
-    
+    validSet = np.zeros((validSize, ndim))
     xx = np.zeros((MCSize, ndim))
     
     params = [(7, 13), (0.02, 0.12), (0.01, 3), (30.01, 30.295)]
@@ -124,13 +127,16 @@ if __name__ == "__main__":
     for idxDim in range(ndim):
         lower, upper = params[idxDim]
         xx[:,idxDim] = stats.uniform(loc=lower, scale=upper-lower).rvs(size=MCSize)
+        validSet[:,idxDim] = stats.uniform(loc=lower, scale=upper-lower).rvs(size=validSize)
     
+    # run validation set
+    validOutputs = Pollution(validSet)
     # -------------------------------------------------------------------------
     # ------------- BME and Kullback-Leibler Divergence -----------------------
     # -------------------------------------------------------------------------
     Outputs = Pollution(xx)
     
-    cov_matrix = np.diag(np.repeat(sigma**2, synthethicData.shape))
+    cov_matrix = np.diag(np.repeat(sigma_error**2, synthethicData.shape))
     
     Likelihoods = stats.multivariate_normal.pdf(Outputs[1:], mean=synthethicData, cov=cov_matrix)
     
@@ -157,13 +163,14 @@ if __name__ == "__main__":
     # -------------------------------------------------------------------------
     # ----------------- Save the arrays as .npy files -------------------------
     # -------------------------------------------------------------------------
-    if MCSize > 500000:
-        np.save("data/ParamSets.npy", xx)
-        np.save("data/refBME_KLD.npy", (logBME, KLD))
-        np.save("data/mean.npy", np.mean(Outputs[1:],axis=0))
-        np.save("data/std.npy", np.std(Outputs[1:],axis=0))
-    else:
-        np.save("data/validSet.npy", xx)
-        np.save("data/validSetOutput.npy", Outputs[1:])
-        np.save("data/validLikelihoods.npy", Likelihoods)
+    np.save("data/ParamSets.npy", xx)
+    np.save("data/refBME_KLD.npy", (logBME, KLD))
+    np.save("data/mean.npy", np.mean(Outputs[1:],axis=0))
+    np.save("data/std.npy", np.std(Outputs[1:],axis=0))
+    np.save("data/validLikelihoods.npy", Likelihoods)
+    
+    # Save validation set
+    np.save("data/validSet.npy", validSet)
+    np.save("data/validSetOutput.npy", validOutputs[1:])
+    
 
diff --git a/BayesValidRox/tests/PollutionTest/Pollution_Test.py b/BayesValidRox/tests/PollutionTest/Pollution_Test.py
index ce3cdb96cce242762d76d11a7a99d884afc77ffd..c3fda7129244e96a319b082ad2814429e78ec040 100644
--- a/BayesValidRox/tests/PollutionTest/Pollution_Test.py
+++ b/BayesValidRox/tests/PollutionTest/Pollution_Test.py
@@ -43,15 +43,15 @@ if __name__ == "__main__":
     Model.pyFile = 'Pollution'
     Model.Name = 'Pollution'
     
-    Model.Output.Names = ['Z']
+    Model.Output.Names = ['C']
     
     # Synthetic data for Bayesian inversion.
-    # MAP = (10, 0.07, 1.505, 30.1525)
-    Model.Observations['Time [s]'] = np.arange(0.3, 60, 1)
-    Model.Observations['Z'] = np.load("data/MeasuredData.npy")
+    # MAP = (10, 0.07, 1.0, 30.16)
+    Model.Observations['Time [s]'] = [40+5*i for i in range(21)]
+    Model.Observations['C'] = np.load("data/MeasuredData.npy")
     
     # For Checking with the MonteCarlo refrence
-    Model.MCReference['Time [s]'] = np.arange(0.3, 60, 1)
+    Model.MCReference['Time [s]'] = [40+5*i for i in range(21)]
     Model.MCReference['mean'] = np.load("data/mean.npy")
     Model.MCReference['std'] = np.load("data/std.npy")
     
@@ -106,8 +106,8 @@ if __name__ == "__main__":
     MetaModelOpts = Metamodel(Inputs)
     
     # Select if you want to preserve the spatial/temporal depencencies
-    # MetaModelOpts.DimRedMethod = 'PCA'
-    # MetaModelOpts.varPCAThreshold = 100 #99.999
+    MetaModelOpts.DimRedMethod = 'PCA'
+    # MetaModelOpts.varPCAThreshold = 99.5 #99.999
     
      # Select your metamodel method
     # 1) PCE (Polynomial Chaos Expansion) 2) GPE (Gaussian Process Emulator)
@@ -132,7 +132,7 @@ if __name__ == "__main__":
     MetaModelOpts.MaxPceDegree = 10
     
     # q-quasi-norm 0<q<1 (default=1)
-    MetaModelOpts.q = 0.85 #np.linspace(0.3,0.8, 3)
+    MetaModelOpts.q = 0.75 #np.linspace(0.3,0.8, 3)
     
     # Print summary of the regression results
     #MetaModelOpts.DisplayFlag = True
@@ -144,7 +144,7 @@ if __name__ == "__main__":
     
     # One-shot (normal) or Sequential Adaptive (sequential) Design
     MetaModelOpts.ExpDesign.Method = 'sequential'
-    NrofInitSamples = 10 #200
+    NrofInitSamples = 20 #200
     MetaModelOpts.ExpDesign.NrSamples = NrofInitSamples
     # Sampling methods
     # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley 6) chebyshev(FT) 
@@ -152,38 +152,61 @@ if __name__ == "__main__":
     MetaModelOpts.ExpDesign.SamplingMethod = 'random'
     
     # Sequential experimental design (needed only for sequential ExpDesign)
-    MetaModelOpts.ExpDesign.MaxNSamples = 50
+    MetaModelOpts.ExpDesign.MaxNSamples = 100
     MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-3
     
+    # Defining the measurement error, if it's known a priori
+    obsData = pd.DataFrame(0.5*np.ones(21), columns=Model.Output.Names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.Type = 'Gaussian'
+    DiscrepancyOpts.Parameters = obsData  ** 2
+    MetaModelOpts.Discrepancy = DiscrepancyOpts
+    
     # Plot the posterior snapshots for SeqDesign
-    MetaModelOpts.ExpDesign.PostSnapshot = True
+    MetaModelOpts.ExpDesign.PostSnapshot = False
     MetaModelOpts.ExpDesign.stepSnapshot = 1
-    MetaModelOpts.ExpDesign.MAP = (10, 0.07, 1.505, 30.1525)
+    MetaModelOpts.ExpDesign.MAP = (10, 0.07, 1.0, 30.16)
     MetaModelOpts.ExpDesign.parNames = ['M', 'D', 'L', 'tau']
     
-    # -------- Optimality criteria: Optimization ------
-    # 1)'dual annealing' 2)'BayesOptDesign'
-    MetaModelOpts.ExpDesign.SeqOptimMethod = 'BayesOptDesign'
-    MetaModelOpts.ExpDesign.ExplorationMethod = 'MC' #1)'Voronoi' 2)'LHS' 3) 'MC'
+    # ------------------------------------------------
+    # ------- Sequential Design configuration --------
+    # ------------------------------------------------
+    # MetaModelOpts.adaptVerbose = True
+    
+    # -------- Tradeoff scheme ------
+    # 1) 'None' 2) 'equal' 3)'epsilon-decreasing' 4) 'adaptive'
+    MetaModelOpts.ExpDesign.TradeOffScheme = 'None'
+    # MetaModelOpts.ExpDesign.nReprications = 20
     
-    MetaModelOpts.ExpDesign.MaxFunItr = 100
+    # -------- Exploration ------
+    #1)'Voronoi' 2)'random' 3)'latin_hypercube' 4)'LOOCV' 5)'dual annealing'
+    MetaModelOpts.ExpDesign.ExploreMethod = 'random'
     
-    MetaModelOpts.ExpDesign.NCandidate = 1000 # 5000
-    MetaModelOpts.ExpDesign.NrofCandGroups = 4
+    # Use when 'dual annealing' chosen
+    MetaModelOpts.ExpDesign.MaxFunItr = 200
     
+    # Use when 'Voronoi' or 'random' or 'latin_hypercube' chosen
+    MetaModelOpts.ExpDesign.NCandidate = 400 #5000 
+    MetaModelOpts.ExpDesign.NrofCandGroups = 8
+    
+    # -------- Exploitation ------
+    # 1)'BayesOptDesign' 2)'BayesActDesign' 3)'VarOptDesign' 4)'alphabetic' 
+    # 5)'Space-filling'
+    MetaModelOpts.ExpDesign.ExploitMethod = 'VarOptDesign'
+    
+    # BayesOptDesign -> when data is available
     # 1)DKL (Kullback-Leibler Divergence) 2)DPP (D-Posterior-percision)
-    # 3)APP (A-Posterior-percision) 
-    MetaModelOpts.ExpDesign.UtilityFunction = 'DKL'
+    # 3)APP (A-Posterior-percision)  # ['DKL', 'BME', 'infEntropy']
+    # MetaModelOpts.ExpDesign.UtilityFunction = 'DKL'
     
+    # VarBasedOptDesign -> when data is not available
+    # Only with Vornoi >>> 1)Entropy 2)EIGF, 3)LOOCV
+    MetaModelOpts.ExpDesign.UtilityFunction = 'EIGF' #['EIGF', 'Entropy', 'LOOCV']
     
-    # -------- Optimality criteria: alphabetic ------
-#    MetaModelOpts.ExpDesign.SeqOptimMethod = 'alphabetic'
-#    MetaModelOpts.ExpDesign.NCandidate = 500
-#    
-#    # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality)
-#    # 3)K-Opt (K-Optimality)
-#    MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt'#['D-Opt', 'A-Opt', 'K-Opt']
-#    
+    # alphabetic
+    # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality)
+    # 3)K-Opt (K-Optimality)
+    # MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt' #['D-Opt', 'A-Opt', 'K-Opt']
     
     # For colculation of validation error for SeqDesign 
     MetaModelOpts.validSamples = np.load("data/validSet.npy")
@@ -208,14 +231,21 @@ if __name__ == "__main__":
     
     # Plot the evolution of the KLD,BME, and Modified LOOCV error
     if MetaModelOpts.ExpDesign.Method == 'sequential':
-        PostPCE.seqDesignDiagnosticPlots()
+        refBME_KLD = np.load("data/refBME_KLD.npy")
+        PostPCE.seqDesignDiagnosticPlots(refBME_KLD)
     
     # Plot the sobol indices
     if MetaModelOpts.metaModel != 'GPE':
         sobol_cell, total_sobol = PostPCE.sobolIndicesPCE()
-        
+    
+    # Compute and print RMSE error
+    validOutputsDict = dict()
+    validOutputsDict['Time [s]'] = [40+5*i for i in range(21)]
+    validOutputsDict['C']= np.load("data/validSetOutput.npy")
+    PostPCE.accuracyCheckMetaModel(Samples=MetaModelOpts.validSamples, 
+                                   validOutputsDict=validOutputsDict)
     import sys
-    sys.exit("STOP")
+    sys.exit()
     #=====================================================
     #========  Bayesian inference with Emulator ==========
     #=====================================================    
@@ -240,7 +270,7 @@ if __name__ == "__main__":
     BayesOpts.PlotPostDist = True
     BayesOpts.PlotPostPred = True
     # ----- Define the discrepancy model -------
-    obsData = pd.DataFrame(0.5*np.ones(60), columns=Model.Output.Names)
+    obsData = pd.DataFrame(0.5*np.ones(21), columns=Model.Output.Names)
     BayesOpts.MeasurementError = obsData
     # (Option B)
     DiscrepancyOpts = Discrepancy('')
diff --git a/BayesValidRox/tests/PollutionTest/data/MeasuredData.npy b/BayesValidRox/tests/PollutionTest/data/MeasuredData.npy
index f05ce103231930d48404e37bcf121624b56524b9..106643acd153f1f681cdde626f805102aad8381b 100644
Binary files a/BayesValidRox/tests/PollutionTest/data/MeasuredData.npy and b/BayesValidRox/tests/PollutionTest/data/MeasuredData.npy differ
diff --git a/BayesValidRox/tests/PollutionTest/data/ParamSets.npy b/BayesValidRox/tests/PollutionTest/data/ParamSets.npy
index 42d580ee270f2f9b55d46ee845d308567849905d..8b3b95f5f6fe0147e98ff3f56577febd99589766 100644
Binary files a/BayesValidRox/tests/PollutionTest/data/ParamSets.npy and b/BayesValidRox/tests/PollutionTest/data/ParamSets.npy differ
diff --git a/BayesValidRox/tests/PollutionTest/data/mean.npy b/BayesValidRox/tests/PollutionTest/data/mean.npy
index dd05d21e7e85d24a220a896ce794da1647520a08..409e24cf02b46c266a5c7d9d1b18c7e62eaf2870 100644
Binary files a/BayesValidRox/tests/PollutionTest/data/mean.npy and b/BayesValidRox/tests/PollutionTest/data/mean.npy differ
diff --git a/BayesValidRox/tests/PollutionTest/data/refBME_KLD.npy b/BayesValidRox/tests/PollutionTest/data/refBME_KLD.npy
index 9ba819a69039a242e1921362f96fe56bc4a62dd6..5eb288c17cd98e23860227c508ec4b1a1bc94764 100644
Binary files a/BayesValidRox/tests/PollutionTest/data/refBME_KLD.npy and b/BayesValidRox/tests/PollutionTest/data/refBME_KLD.npy differ
diff --git a/BayesValidRox/tests/PollutionTest/data/std.npy b/BayesValidRox/tests/PollutionTest/data/std.npy
index 2a085253272e1b894944e2ad458c36e805ead161..3f2f58f1b3298d17740947675fc08ba4b5ca47b8 100644
Binary files a/BayesValidRox/tests/PollutionTest/data/std.npy and b/BayesValidRox/tests/PollutionTest/data/std.npy differ
diff --git a/BayesValidRox/tests/PollutionTest/data/validLikelihoods.npy b/BayesValidRox/tests/PollutionTest/data/validLikelihoods.npy
index 72609ba49b08843737860d40dc6c4593147cfbea..baf3243ed267b47c16be270869e72d16478b9c22 100644
Binary files a/BayesValidRox/tests/PollutionTest/data/validLikelihoods.npy and b/BayesValidRox/tests/PollutionTest/data/validLikelihoods.npy differ
diff --git a/BayesValidRox/tests/PollutionTest/data/validSet.npy b/BayesValidRox/tests/PollutionTest/data/validSet.npy
index a030354ed1399cf582a6bf328ff9a485d622b08d..57bcb0c5dd8eb778382ae331f5193c4eaccb2b3f 100644
Binary files a/BayesValidRox/tests/PollutionTest/data/validSet.npy and b/BayesValidRox/tests/PollutionTest/data/validSet.npy differ
diff --git a/BayesValidRox/tests/PollutionTest/data/validSetOutput.npy b/BayesValidRox/tests/PollutionTest/data/validSetOutput.npy
index a570452f76f8795ebcc87eb96adaa6d5b8f4b0cf..d62a701f3a9a16cc30cb179456e9b6a556fc0c40 100644
Binary files a/BayesValidRox/tests/PollutionTest/data/validSetOutput.npy and b/BayesValidRox/tests/PollutionTest/data/validSetOutput.npy differ