diff --git a/BayesValidRox/PostProcessing/PostProcessing.py b/BayesValidRox/PostProcessing/PostProcessing.py
index 2f4e2daa04acbf201bc8caec79ccdc0f8fc67174..0f7993af0643a25e3a35b7eb273253a01c4678c2 100644
--- a/BayesValidRox/PostProcessing/PostProcessing.py
+++ b/BayesValidRox/PostProcessing/PostProcessing.py
@@ -861,9 +861,7 @@ class PostProcessing:
         #==================================================================
         #============  Visualisation of Sobol indices =====================
         #==================================================================
-        objects = []
-        for pid in range(1,NofPa+1):
-            objects.append('$P_%s$'%pid)
+        objects = ['$X_{%s}$'%(pid+1) for pid in range(NofPa)]
         
         if PlotType == 'bar':
             y_pos = np.arange(len(objects))
@@ -886,7 +884,7 @@ class PostProcessing:
             # Make the plot
             for idx in range(NofMeasurements):
                 plt.bar(position[idx], total_sobol_array[:,idx], color=colors[idx],
-                        width=barWidth, edgecolor='white', label='$y_%s$'%(idx+1))
+                        width=barWidth, edgecolor='white', label='$y_{%s}$'%(idx+1))
             
             
     #        for i, v in enumerate(total_sobol_array[:,0]):
diff --git a/BayesValidRox/surrogate_models/surrogate_models.py b/BayesValidRox/surrogate_models/surrogate_models.py
index 5a26479ab11a3048673c270e482a89aaeb7ee012..94068e509966a4ad526877df8aa3fe14c57dab65 100644
--- a/BayesValidRox/surrogate_models/surrogate_models.py
+++ b/BayesValidRox/surrogate_models/surrogate_models.py
@@ -99,21 +99,58 @@ class aPCE:
     
     #--------------------------------------------------------------------------------------------------------
     def PolyBasisIndices(self, degree, q):
+        # from scipy import sparse
+        # # Generate all possible combinations according to SparseLimit
+        # Prod = product(np.arange(0, deg+1), repeat=self.NofPa)
         
-        # Generate all possible combinations according to SparseLimit
-        Prod = product(np.arange(0, degree+1), repeat=self.NofPa)
+        # # Sort based on the L1-norm
+        # #SortDegreeCombinations = np.asarray(sorted(Prod, key=sum), dtype=int)
+        # SortDegreeCombinations = sparse.csr_matrix(sorted(Prod, key=sum), dtype=int)
         
-        # Sort based on the L1-norm
-        SortDegreeCombinations = np.asarray(sorted(Prod, key=sum), dtype=int)
+        # # quasiNorm ||alpha||_q
+        # quasiNorm = np.around(np.power(np.power(SortDegreeCombinations.toarray(), q).sum(axis=1), 1/q), decimals=2)
         
-        # quasiNorm ||alpha||_q
-        quasiNorm = np.around(np.power(np.power(SortDegreeCombinations, q).sum(axis=1), 1/q), decimals=2)
+        # # Store if quasiNorm <= SparseLimit and ignore existing index of last SparseDegree
+        # Combos = SortDegreeCombinations[quasiNorm<=deg]
         
-        # Store if quasiNorm <= SparseLimit and ignore existing index of last SparseDegree
-        Combos = SortDegreeCombinations[quasiNorm<=degree]
+        # self.PolynomialDegrees = Combos
+        ndim = self.NofPa
+        row_ind , col_ind, values = np.empty((0)),np.empty((0)),np.empty((0))
+        cnt=1
+        idx = [0]*ndim
         
+        while True:
+            #print(idx)
+            nnz_idx = np.where(np.asarray(idx)>0)[0]
+            
+            # quasiNorm ||alpha||_q
+            quasiNorm = np.around(np.power(np.power(np.asarray(idx), q).sum(), 1/q), decimals=2)
+            
+            
+            if len(nnz_idx)!=0 and quasiNorm <= degree:
+                row_ind = np.hstack((row_ind, [cnt]*len(nnz_idx))) 
+                col_ind = np.hstack((col_ind, nnz_idx))
+                value = [idx[i] for i in nnz_idx]
+                values = np.hstack((values, value))
+                cnt+=1
+            
+            # increase first dimension
+            idx[0] += 1
+                
+            # handle overflows
+            for i in range(0, ndim-1):
+                if idx[i] > degree or sum(idx) > degree:
+                    # reset this dimension and increase next higher dimension
+                    idx[i] = 0
+                    idx[i+1] += 1
+            
+            if idx[-1] > degree:
+                # overflow in the last dimension, we are finished
+                break
         
-        self.PolynomialDegrees = Combos
+        sparseBasisIndices = sp.sparse.coo_matrix((values.astype(int), (row_ind.astype(int), col_ind.astype(int))))
+        
+        self.PolynomialDegrees = sparseBasisIndices
         
         return self.PolynomialDegrees
     
@@ -370,7 +407,9 @@ class aPCE:
         This function assemble the design matrix Psi from the given basis index 
         set INDICES and the univariate polynomial evaluations univ_p_val.
         """
-
+        # Check if BasisIndices is a sparse matrix
+        sparsity = sp.sparse.issparse(BasisIndices)
+        
         # Initialization and consistency checks
         # number of input variables
         NofPa = univ_p_val.shape[1]
@@ -383,16 +422,17 @@ class aPCE:
         
         # check that the variables have consistent sizes
         if NofPa != BasisIndices.shape[1]:
-            raise ValueError("BasisIndices and univ_p_val don't seem to have consistent sizes!!")
+            raise ValueError("The shapes of BasisIndices and univ_p_val don't match!!")
         
         # Preallocate the Psi matrix for performance
         Psi = np.ones((NofSamples, P))
         
         # Assemble the Psi matrix
         for m in range(BasisIndices.shape[1]):
-            aa = np.where(BasisIndices[:,m]>0)[0]
+            aa = np.where(BasisIndices.toarray()[:,m]>0)[0] if sparsity else np.where(BasisIndices[:,m]>0)[0]
             try:
-                Psi[:,aa] = np.multiply(Psi[:,aa] , np.reshape(univ_p_val[:, m, BasisIndices[aa,m]], Psi[:,aa].shape))
+                basisIdx = BasisIndices.toarray()[aa,m] if sparsity else BasisIndices[aa,m]
+                Psi[:,aa] = np.multiply(Psi[:,aa] , np.reshape(univ_p_val[:, m, basisIdx], Psi[:,aa].shape))
             except:
                 raise ValueError
         
@@ -429,6 +469,9 @@ class aPCE:
             https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html#sklearn.linear_model.SGDRegressor
         
         """
+        # Check if BasisIndices is a sparse matrix
+        sparsity = sp.sparse.issparse(BasisIndices)
+        
         clf_poly = []
         compute_score = True if self.DisplayFlag else False
         
@@ -473,7 +516,7 @@ class aPCE:
             # Remove the zero entries for Bases and PSI if need be
             z_idx = np.where(clf_poly.coef_==0)[0]
             if len(z_idx) > 0:
-                PolynomialDegrees_Sparse = BasisIndices[nnz_idx]
+                PolynomialDegrees_Sparse = BasisIndices.toarray()[nnz_idx] if sparsity else BasisIndices[nnz_idx]
                 PSI_Sparse = PSI[:,nnz_idx]
 
                 # Store the coefficients of the regression model (mean of distribution).
@@ -1912,23 +1955,29 @@ class aPCE:
         # Estimation of the integral via Monte Varlo integration
         while ((ESS > MCsize) or (ESS < 1)):
              
+            # Generate samples for Monte Carlo simulation
+            if len(validLikelihoods)==0:
+                ExpDesign = ExpDesigns(self.Inputs)
+                X_MC = ExpDesign.GetSample(MCsize,self.MaxPceDegree, SamplingMethod);
+            else:
+                X_MC = self.validSamples
+                MCsize = X_MC.shape[0]
+                
             # Monte Carlo simulation for the candidate design
-             ExpDesign = ExpDesigns(self.Inputs)
-             X_MC = ExpDesign.GetSample(MCsize,self.MaxPceDegree, SamplingMethod);
-             Y_PC_MC, std_PC_MC = PCEModel.eval_PCEmodel(X_MC)
+            Y_PC_MC, std_PC_MC = PCEModel.eval_PCEmodel(X_MC)
              
-             # Likelihood computation (Comparison of data and 
-             # simulation results via PCE with candidate design)
+            # Likelihood computation (Comparison of data and 
+            # simulation results via PCE with candidate design)
              
-             Likelihoods = self.normpdf(Y_PC_MC,std_PC_MC,ObservationData, sigma2Dict)
+            Likelihoods = self.normpdf(Y_PC_MC,std_PC_MC,ObservationData, sigma2Dict)
              
-             # Check the Effective Sample Size (1000<ESS<MCsize)
-             ESS = 1 / np.sum(np.square(Likelihoods/np.sum(Likelihoods)))
+            # Check the Effective Sample Size (1000<ESS<MCsize)
+            ESS = 1 / np.sum(np.square(Likelihoods/np.sum(Likelihoods)))
              
-             # Enlarge sample size if it doesn't fulfill the criteria
-             if ((ESS > MCsize) or (ESS < 1)):
-                 print('MC size should be larger.')
-                 MCsize = MCsize * 10
+            # Enlarge sample size if it doesn't fulfill the criteria
+            if ((ESS > MCsize) or (ESS < 1)):
+                print('MC size should be larger.')
+                MCsize = MCsize * 10
         
         # Rejection Step
         # Random numbers between 0 and 1
diff --git a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction.py b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction.py
index 32a01d55503485e308439afa25189d44891bc1a7..c4ed8e05c3ecd841f16d6b2d4757b12d5ecad325 100644
--- a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction.py
+++ b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction.py
@@ -6,6 +6,10 @@ Created on Wed Nov 20 14:48:43 2019
 @author: farid
 """
 import numpy as np
+import scipy.stats as stats
+import matplotlib.pyplot as plt
+import scipy.stats as st
+import seaborn as sns
 
 def AnalyticalFunction(xx, t=None):
     """
@@ -73,47 +77,69 @@ def AnalyticalFunction(xx, t=None):
     
     return np.vstack((t, Output))
 
-# if __name__ == "__main__":
-#     import scipy.stats as stats
-#     import matplotlib.pyplot as plt
+if __name__ == "__main__":
+
+    MCSize = 10000 #1000000
+    ndim = 10
+    sigma = 2
+    
+    # -------------------------------------------------------------------------
+    # ----------------------- Synthetic data generation -----------------------
+    # -------------------------------------------------------------------------
+    t = np.arange(0, 10, 1.) / 9
+    
+    MAP = np.zeros((1, ndim))
+    synthethicData = AnalyticalFunction(MAP, t=t)
+    
+    # -------------------------------------------------------------------------
+    # ---------------------- Generate Prior distribution ----------------------
+    # -------------------------------------------------------------------------
+    
+    xx = np.zeros((MCSize, ndim))
     
-#     MCSize = 1000000
-#     ndim= 4
-#     t = np.arange(0, 10, 1.) / 9
+    params = (-5,5)
     
-#     MAP = np.zeros((1, ndim))
-#     synthethicData = AnalyticalFunction(MAP, t=t)
+    for idxDim in range(ndim):
+        lower, upper = params
+        xx[:,idxDim] = stats.uniform(loc=lower, scale=upper-lower).rvs(size=MCSize)
     
+    # -------------------------------------------------------------------------
+    # ------------- BME and Kullback-Leibler Divergence -----------------------
+    # -------------------------------------------------------------------------
+    Outputs = AnalyticalFunction(xx, t=t)
     
-#     xx = np.zeros((MCSize, ndim))
+    cov_matrix = np.diag(np.repeat(sigma**2, synthethicData.shape[1]))
     
-#     params = (-5,5)
+    Likelihoods = st.multivariate_normal.pdf(Outputs[1:], mean=synthethicData[1], cov=cov_matrix)
     
-#     for idxDim in range(ndim):
-#         lower, upper = params
-#         xx[:,idxDim] = stats.uniform(loc=lower, scale=upper-lower).rvs(size=MCSize)
+    sns.kdeplot(np.log(Likelihoods[Likelihoods>0]), shade=True, color="g", label='Ref. Likelihood')
     
+    normLikelihood = Likelihoods / np.nanmax(Likelihoods)
+    # Random numbers between 0 and 1
+    unif = np.random.rand(1, MCSize)[0]
     
-#     Outputs = AnalyticalFunction(xx, t=t)
+    # Reject the poorly performed prior
+    accepted = normLikelihood >= unif
     
-#     sigma = 2
-#     cov_matrix = np.diag(np.repeat(sigma**2, synthethicData.shape[1]))
-#     import scipy.stats as st
-#     Likelihoods = st.multivariate_normal.pdf(Outputs, mean=synthethicData[0], cov=cov_matrix)
+    # Prior-based estimation of BME
+    logBME = np.log(np.nanmean(Likelihoods))
+    print('\nThe Naive MC-Estimation of BME is %.5f.'%(logBME))
     
-#     import seaborn as sns
-#     sns.kdeplot(np.log(Likelihoods[Likelihoods>0]), shade=True, color="g", label='Ref. Likelihood')
+    # Posterior-based expectation of likelihoods
+    postExpLikelihoods = np.mean(np.log(Likelihoods[accepted]))
     
-#     if ndim == 2:
-#         np.save("validLikelihoods.npy", Likelihoods)
-#     else:
-#         np.save("validLikelihoodsComplex.npy", Likelihoods)
-#    print("Mean:\n", Outputs.mean(axis=0))
-#    print("Std:\n", Outputs.std(axis=0))
-#    plt.plot(t, Outputs.mean(axis=0),linestyle='--', marker='^')
-#
-#    #plt.errorbar(t, Outputs.mean(axis=0), Outputs.std(axis=0), linestyle='--', marker='^')
-#
-#    plt.show()
+    # Calculate Kullback-Leibler Divergence
+    KLD = postExpLikelihoods - logBME
+    print("The Kullback-Leibler divergence estimation is %.5f."%KLD)
     
-    
\ No newline at end of file
+    # -------------------------------------------------------------------------
+    # ----------------- Save the arrays as .npy files -------------------------
+    # -------------------------------------------------------------------------
+    if MCSize > 500000:
+        np.save("data/refBME_KLD_"+str(ndim)+".npy", (logBME, KLD))
+        np.save("data/mean_"+str(ndim)+".npy", np.mean(Outputs[1:],axis=0))
+        np.save("data/std_"+str(ndim)+".npy", np.std(Outputs[1:],axis=0))
+    else:
+        np.save("data/Prior_"+str(ndim)+".npy", xx)
+        np.save("data/origModelOutput_"+str(ndim)+".npy", Outputs[1:])
+        np.save("data/validLikelihoods_"+str(ndim)+".npy", Likelihoods)
diff --git a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py
deleted file mode 100644
index 1cdbd5517bd8fbceb3804dcbad9b91de17f70258..0000000000000000000000000000000000000000
--- a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py
+++ /dev/null
@@ -1,325 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Aug  9 16:25:43 2019
-
-@author: farid
-"""
-
-import numpy as np
-import pandas as pd
-import matplotlib.pyplot as plt
-plt.style.use('seaborn-white')
-plt.rcParams.update({'font.size': 18})
-plt.rc('font', family='sans-serif', serif='Arial')
-plt.rc('figure', figsize = (24, 16))
-plt.rc('text', usetex=True)
-plt.rc('xtick', labelsize=18)
-plt.rc('ytick', labelsize=18)
-plt.rc('axes', labelsize=18)
-
-import seaborn as sns
-import os
-
-try:
-    import cPickle as pickle
-except ModuleNotFoundError:
-    import pickle
-
-from PyLink.FuncForwardModel import FuncForwardModel
-from surrogate_models.Input import Input
-from surrogate_models.surrogate_models import aPCE
-from PostProcessing.PostProcessing import PostProcessing
-from BayesInference.BayesInference import BayesInference, Discrepancy
-
-if __name__ == "__main__":
-    
-    #=====================================================
-    #=============   COMPUTATIONAL MODEL  ================
-    #=====================================================
-    Model = FuncForwardModel('AnalyticalFunction')
-    
-    Model.Type = 'Function'
-    Model.pyFile = 'AnalyticalFunction'
-    Model.Name = 'AnalyticFunc'
-    
-    Model.Output.Names = ['Z']
-    
-    
-    # For Bayesian inversion synthetic data with X=[0,0]
-    Model.Observations['Time [s]'] = np.arange(0, 10, 1.) / 9
-    Model.Observations['Z'] = np.repeat([2],10)
-    
-    # For Checking with the MonteCarlo refrence
-    #Model.MCReferenceFile = 'MCrefs_MeanStd.csv'
-    Model.MCReference['Time [s]'] = np.arange(0, 10, 1.) / 9
-    Model.MCReference['mean'] = np.array([127.0125924,  127.01258296, 127.01257912,
-                             127.0125762,  127.01257375,127.01257161, 127.01256968,
-                             127.01256792, 127.01256629, 127.01256476])
-    Model.MCReference['std'] = np.array([171.14352189, 171.11558731, 171.10718417,
-                             171.10199472, 171.09846887,171.09600621, 171.09429878, 
-                             171.09316364, 171.09248159, 171.09216983])
-
-    #=====================================================
-    #=========   PROBABILISTIC INPUT MODEL  ==============
-    #=====================================================
-    # Define the uncertain parameters with their mean and 
-    # standard deviation
-    Inputs = Input()
-    
-    Inputs.addMarginals()
-    Inputs.Marginals[0].Name = 'x1'
-    Inputs.Marginals[0].DistType = 'unif'
-    Inputs.Marginals[0].Parameters =  [-5, 5]
-    
-    Inputs.addMarginals()
-    Inputs.Marginals[1].Name = 'x2'
-    Inputs.Marginals[1].DistType = 'unif'
-    Inputs.Marginals[1].Parameters =  [-5, 5]
-    
-    #=====================================================
-    #======  POLYNOMIAL CHAOS EXPANSION METAMODELS  ======
-    #=====================================================    
-    MetaModelOpts = aPCE(Inputs)
-
-    # Specify the max degree to be compared by the adaptive algorithm:
-    # The degree with the lowest Leave-One-Out cross-validation (LOO)
-    # error (or the highest score=1-LOO)estimator is chosen as the final 
-    # metamodel.
-    MetaModelOpts.MinPceDegree = 10 #2
-    MetaModelOpts.MaxPceDegree = 10 #10
-    
-    # q-quasi-norm 0<q<1 (default=1)
-    MetaModelOpts.q = 1.0 #np.linspace(0.25,1, 4) #1.0
-    
-    # Select the sparse least-square minimization method for 
-    # the PCE coefficients calculation:
-    # 1)OLS: Ordinary Least Square  2)BRR: Bayesian Ridge Regression 
-    # 3)LARS: Least angle regression  4)ARD: Bayesian ARD Regression
-    # 5)SGDRegressor: Stochastic gradient descent learning
-    
-    MetaModelOpts.RegMethod = 'ARD'
-    
-    # Print summary of the regression results
-    #MetaModelOpts.DisplayFlag = True
-    
-    # ------ Experimental Design --------
-    # Generate an experimental design of size NrExpDesign based on a latin 
-    # hypercube sampling of the input model or user-defined values of X and/or Y:
-    MetaModelOpts.addExpDesign()
-    
-    # One-shot (normal) or Sequential Adaptive (sequential) Design
-    MetaModelOpts.ExpDesign.Method = 'sequential'
-    NrofInitSamples = 10
-    MetaModelOpts.ExpDesign.NrSamples = NrofInitSamples
-    
-    # Sampling methods
-    # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley 6) chebyshev(FT) 
-    # 7) korobov 8) grid(FT) 9) nested_grid(FT) 10)user
-    MetaModelOpts.ExpDesign.SamplingMethod = 'halton'
-    
-    # Sequential experimental design (needed only for sequential ExpDesign)
-    MetaModelOpts.ExpDesign.NrofNewSample = 1
-    MetaModelOpts.ExpDesign.MaxNSamples = 50
-    MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-16
-    
-    # Defining the measurement error, if it's known a priori
-    obsData = pd.DataFrame(Model.Observations, columns=Model.Output.Names)
-    DiscrepancyOpts = Discrepancy('')
-    DiscrepancyOpts.Type = 'Gaussian'
-    DiscrepancyOpts.Parameters = obsData  ** 2
-    MetaModelOpts.Discrepancy = DiscrepancyOpts
-        
-    # Plot the posterior snapshots for SeqDesign
-    MetaModelOpts.ExpDesign.PostSnapshot = False
-    MetaModelOpts.ExpDesign.stepSnapshot = 1
-    MetaModelOpts.ExpDesign.MAP = (0,0)
-    MetaModelOpts.ExpDesign.parNames = ['$X_1$', '$X_2$']
-    
-    # ------------------------------------------------
-    # ------- Sequential Design configuration --------
-    # ------------------------------------------------
-    # 1) 'None' 2) 'epsilon-decreasing'
-    MetaModelOpts.ExpDesign.TradeOffScheme = 'None'
-    #MetaModelOpts.ExpDesign.nReprications = 5
-    # -------- Exploration ------
-    #1)'Voronoi' 2)'random' 3)'latin_hypercube' 4)'dual annealing'
-    MetaModelOpts.ExpDesign.ExploreMethod = 'random'
-    
-    # Use when 'dual annealing' chosen
-    MetaModelOpts.ExpDesign.MaxFunItr = 200
-    
-    # Use when 'Voronoi' or 'random' or 'latin_hypercube' chosen
-    MetaModelOpts.ExpDesign.NCandidate = 100
-    MetaModelOpts.ExpDesign.NrofCandGroups = 4
-    
-    # -------- Exploitation ------
-    # 1)'BayesOptDesign' 2)'VarOptDesign' 3)'alphabetic' 4)'Space-filling'
-    MetaModelOpts.ExpDesign.ExploitMethod = 'VarOptDesign'
-    
-    # BayesOptDesign -> when data is available
-    # 1)DKL (Kullback-Leibler Divergence) 2)DPP (D-Posterior-percision)
-    # 3)APP (A-Posterior-percision) 
-    # 1)DKL (Kullback-Leibler Divergence) 2)BME (Bayesian model evidence)
-    # 3)infEntropy (Entropy-based information gain) 
-    # MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' #['DKL', 'DPP']
-    
-    # VarBasedOptDesign -> when data is not available
-    # Only with Vornoi >>> 1)Entropy 2)EIGF, 3)ALM, 4)LOOCV
-    MetaModelOpts.ExpDesign.UtilityFunction = 'LOOCV' #['EIGF', 'Entropy', 'LOOCV']
-    
-    # alphabetic
-    # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality)
-    # 3)K-Opt (K-Optimality)
-    # MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt' #['D-Opt', 'A-Opt', 'K-Opt']
-    
-    # For colculation of validation error for SeqDesign 
-    MetaModelOpts.validModelRuns = np.load("origModelOutput.npy")
-    MetaModelOpts.validSamples = np.load("Prior.npy")
-    MetaModelOpts.validLikelihoods = np.load("validLikelihoods.npy")
-    
-    # >>>>>>>>>>>>>>>>>>>>>> Build Surrogate <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
-    # Adaptive sparse arbitrary polynomial chaos expansion
-    PCEModel = MetaModelOpts.create_PCE(Model)
-    
-    #=====================================================
-    #=========  POST PROCESSING OF METAMODELS  ===========
-    #=====================================================
-    PostPCE = PostProcessing(PCEModel)
-    
-    PostPCE.NrofSamples = 1 #3
-    PostPCE.plotFlag = True
-    t = np.arange(0, 10, 1.) / 9
-    PostPCE.ValidMetamodel(x_values=t, x_axis="Time [s]")
-    
-    # Comparison with Monte-Carlo reference
-    # Compute the moments
-    PostPCE.PCEMoments(PCEModel)
-    
-    # Plot Mean & Std for all Outputs
-    PostPCE.MomentLineplot(PCE_Means = PostPCE.PCEMeans,
-                           PCE_Stds = PostPCE.PCEStd,
-                           x_label = 'Time [s]', SaveFig = True)
-    
-    
-    # Plot the evolution of the KLD,BME, and Modified LOOCV error
-    if MetaModelOpts.ExpDesign.Method == 'sequential':
-        PostPCE.seqDesignDiagnosticPlots(NrofInitSamples, refBME_KLD=(-19.28640,2.582278348728405)
-                                         , SaveFig=True)
-    
-    # Plot the sobol indices
-    sobol_cell, total_sobol = PostPCE.SobolIndicesPCE('Z', SaveFig=True)
-    
-    # Compute and print RMSE error
-    PostPCE.relErrorPCEModel(nSamples=3000)
-    
-    #=====================================================
-    #========  Bayesian inference with Emulator ==========
-    #=====================================================    
-    BayesOpts = BayesInference(PCEModel)
-    BayesOpts.emulator = True
-    
-    # Bootstrap
-    BayesOpts.Bootstrap = True
-    BayesOpts.BootstrapItrNr = 1
-    BayesOpts.BootstrapNoise = 0.05
-    
-    # Evaluation of surrogate model predictions
-    BayesOpts.NrofSamples = 100000
-    
-    BayesOpts.PlotPostDist = True
-    BayesOpts.PlotPostPred = True
-    
-    # ----- Define the discrepancy model -------
-    obsData = pd.DataFrame(Model.Observations, columns=Model.Output.Names)
-    BayesOpts.MeasurementError = obsData
-    # (Option B)
-    DiscrepancyOpts = Discrepancy('')
-    DiscrepancyOpts.Type = 'Gaussian'
-    DiscrepancyOpts.Parameters = obsData **2
-    
-    BayesOpts.Discrepancy = DiscrepancyOpts
-    
-    
-    Bayes_PCE = BayesOpts.create_Inference()
-    
-    
-    # Make directory for saving the plots/outputs
-    newpath = (r'Outputs_'+MetaModelOpts.ExpDesign.Method+'_'+MetaModelOpts.ExpDesign.ExploitMethod) 
-    if not os.path.exists(newpath): os.makedirs(newpath)
-
-    
-    ## Plot the posterior (Model)
-    with sns.axes_style("white"):
-        g = sns.jointplot(x=Bayes_PCE.Posterior_df['x1'], y=Bayes_PCE.Posterior_df['x2'], 
-                          kind="kde", cmap='jet');
-        g.ax_joint.collections[0].set_alpha(0)
-        g.set_axis_labels("$X_1$", "$X_2$")
-        
-        g.savefig('./'+newpath+'/Posterior_PCEModel_'+MetaModelOpts.ExpDesign.UtilityFunction+'.svg')
-        plt.close()
-
-
-    #=====================================================
-    #====== Bayesian inference with Forward Model ========
-    #=====================================================    
-    BayesOptsModel = BayesInference(PCEModel)
-    BayesOptsModel.emulator = False
-    BayesOptsModel.NrofSamples = 100000
-    
-    # Evaluation of forward model predictions for the samples used for PCEModel
-    BayesOptsModel.Samples = Bayes_PCE.Samples
-    
-    # Bootstrap for BME calulations
-    BayesOptsModel.Bootstrap = True
-    BayesOptsModel.BootstrapItrNr = 1
-    BayesOptsModel.BootstrapNoise = 0.05
-    
-    BayesOptsModel.PlotPostDist = True
-    BayesOptsModel.PlotPostPred = True
-    
-  
-    # ----- Define the discrepancy model -------
-    BayesOptsModel.MeasurementError = obsData
-    # (Option B)
-    DiscrepancyOpts = Discrepancy('')
-    DiscrepancyOpts.Type = 'Gaussian'
-    DiscrepancyOpts.Parameters = obsData **2
-    
-    BayesOptsModel.Discrepancy = DiscrepancyOpts    
-    
-    Bayes_Model = BayesOptsModel.create_Inference()
-    
-    
-    ## Plot the posterior (PCEModel)
-    with sns.axes_style("white"):
-        g = sns.jointplot(x=Bayes_Model.Posterior_df['x1'], y=Bayes_Model.Posterior_df['x2'], 
-                          kind="kde", cmap='jet');
-        g.ax_joint.collections[0].set_alpha(0)
-        g.set_axis_labels("$X_1$", "$X_2$")
-        g.savefig('./'+newpath+'/Posterior_Model_'+MetaModelOpts.ExpDesign.UtilityFunction+'.svg')
-        plt.close()
-    
-    
-    #=====================================================
-    #==============  Save class objects  =================
-    #=====================================================
-    with open('AnalyticFunc_Results.pkl', 'wb') as output:
-        pickle.dump(PCEModel, output, pickle.HIGHEST_PROTOCOL)
-    
-        pickle.dump(PostPCE, output, pickle.HIGHEST_PROTOCOL)
-    
-        pickle.dump(Bayes_PCE, output, pickle.HIGHEST_PROTOCOL)
-        
-        pickle.dump(Bayes_Model, output, pickle.HIGHEST_PROTOCOL)
-    
-#    del PCEModel
-#    del PostPCE
-#    del Bayes
-    
-    # Load the objects
-#    with open('AnalyticFunc_Results.pkl', 'rb') as input:
-#        PCEModel = pickle.load(input)
-#        PostPCE = pickle.load(input)
-#        Bayes_PCE = pickle.load(input)    
-#        Bayes_Model = pickle.load(input) 
\ No newline at end of file
diff --git a/BayesValidRox/tests/AnalyticalFunction/Prior.npy b/BayesValidRox/tests/AnalyticalFunction/Prior.npy
deleted file mode 100644
index 46f2d0a302acd1aafe5c92b56efe76e2139f25f2..0000000000000000000000000000000000000000
Binary files a/BayesValidRox/tests/AnalyticalFunction/Prior.npy and /dev/null differ
diff --git a/BayesValidRox/tests/AnalyticalFunction/PriorComplex.npy b/BayesValidRox/tests/AnalyticalFunction/PriorComplex.npy
deleted file mode 100644
index bff41819d3740fa6a8106f318c499d082fc22cbf..0000000000000000000000000000000000000000
Binary files a/BayesValidRox/tests/AnalyticalFunction/PriorComplex.npy and /dev/null differ
diff --git a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunctionComplex_Test.py b/BayesValidRox/tests/AnalyticalFunction/Test_AnalyticalFunction.py
old mode 100644
new mode 100755
similarity index 83%
rename from BayesValidRox/tests/AnalyticalFunction/AnalyticalFunctionComplex_Test.py
rename to BayesValidRox/tests/AnalyticalFunction/Test_AnalyticalFunction.py
index acd42f60bd5630052d351ffd7990de9f197e5e7c..c83e47cc9275f9f0457c7751c51cefd6810e339d
--- a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunctionComplex_Test.py
+++ b/BayesValidRox/tests/AnalyticalFunction/Test_AnalyticalFunction.py
@@ -34,6 +34,9 @@ from BayesInference.BayesInference import BayesInference, Discrepancy
 
 if __name__ == "__main__":
     
+    # Number of parameters
+    ndim = 2
+    
     #=====================================================
     #=============   COMPUTATIONAL MODEL  ================
     #=====================================================
@@ -41,7 +44,7 @@ if __name__ == "__main__":
     
     Model.Type = 'Function'
     Model.pyFile = 'AnalyticalFunction'
-    Model.Name = 'AnalyticFuncComplex'
+    Model.Name = 'AnalyticFunc'
     
     Model.Output.Names = ['Z']
     
@@ -53,12 +56,8 @@ if __name__ == "__main__":
     # For Checking with the MonteCarlo refrence
     #Model.MCReferenceFile = 'MCrefs_MeanStd.csv'
     Model.MCReference['Time [s]'] = np.arange(0, 10, 1.) / 9
-    Model.MCReference['mean'] = np.array([127.12140284, 127.122227, 127.12256845,
-                             127.12283047, 127.12305139, 127.12324603, 127.12342201,
-                             127.12358386, 127.1237345,  127.123876])
-    Model.MCReference['std'] = np.array([173.78059749, 173.75168669, 173.74283551,
-                             173.73728493, 173.73344294, 173.73069289, 173.72871849,
-                             173.72733186, 173.72641059, 173.72586962])
+    Model.MCReference['mean'] = np.load("data/mean_"+str(ndim)+".npy")
+    Model.MCReference['std'] = np.load("data/std_"+str(ndim)+".npy")
 
     #=====================================================
     #=========   PROBABILISTIC INPUT MODEL  ==============
@@ -67,7 +66,6 @@ if __name__ == "__main__":
     # standard deviation
     Inputs = Input()
     
-    ndim = 4
     for i in range(ndim):
         Inputs.addMarginals()
         Inputs.Marginals[i].Name = 'x'+str(i+1)
@@ -88,7 +86,7 @@ if __name__ == "__main__":
     MetaModelOpts.MaxPceDegree = 10 #8
     
     # q-quasi-norm 0<q<1 (default=1)
-    MetaModelOpts.q = 1.0
+    MetaModelOpts.q = 1.0 if ndim<5 else 0.5
     
     # Select the sparse least-square minimization method for 
     # the PCE coefficients calculation:
@@ -108,7 +106,7 @@ if __name__ == "__main__":
     
     # One-shot (normal) or Sequential Adaptive (sequential) Design
     MetaModelOpts.ExpDesign.Method = 'sequential'
-    NrofInitSamples = 50
+    NrofInitSamples = 20
     MetaModelOpts.ExpDesign.NrSamples = NrofInitSamples
     
     # Sampling methods
@@ -132,7 +130,7 @@ if __name__ == "__main__":
     MetaModelOpts.ExpDesign.PostSnapshot = False
     MetaModelOpts.ExpDesign.stepSnapshot = 1
     MetaModelOpts.ExpDesign.MAP = [0] * ndim
-    MetaModelOpts.ExpDesign.parNames = ['$X_%s$'%(i+1) for i in range(ndim)]
+    MetaModelOpts.ExpDesign.parNames = ['$X_{%s}$'%(i+1) for i in range(ndim)]
     
     # ------------------------------------------------
     # ------- Sequential Design configuration --------
@@ -170,9 +168,9 @@ if __name__ == "__main__":
     # MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt' #['D-Opt', 'A-Opt', 'K-Opt']
     
     # For colculation of validation error for SeqDesign 
-    MetaModelOpts.validModelRuns = np.load("origModelOutputComplex.npy")
-    MetaModelOpts.validSamples = np.load("PriorComplex.npy")
-    MetaModelOpts.validLikelihoods = np.load("validLikelihoodsComplex.npy")
+    MetaModelOpts.validSamples = np.load("data/Prior_"+str(ndim)+".npy")
+    MetaModelOpts.validModelRuns = np.load("data/origModelOutput_"+str(ndim)+".npy")
+    MetaModelOpts.validLikelihoods = np.load("data/validLikelihoods_"+str(ndim)+".npy")
     
     # >>>>>>>>>>>>>>>>>>>>>> Build Surrogate <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
     # Adaptive sparse arbitrary polynomial chaos expansion
@@ -200,7 +198,8 @@ if __name__ == "__main__":
     
     # Plot the evolution of the KLD,BME, and Modified LOOCV error
     if MetaModelOpts.ExpDesign.Method == 'sequential':
-        PostPCE.seqDesignDiagnosticPlots(NrofInitSamples, refBME_KLD=(-20.70660,3.8734296205796745)
+        refBME_KLD = np.load("data/refBME_KLD_"+str(ndim)+".npy")
+        PostPCE.seqDesignDiagnosticPlots(NrofInitSamples, refBME_KLD
                                          , SaveFig=True)
     
     # Plot the sobol indices
@@ -242,33 +241,34 @@ if __name__ == "__main__":
     
     #=====================================================
     #====== Bayesian inference with Forward Model ========
-    #=====================================================    
-    BayesOptsModel = BayesInference(PCEModel)
-    BayesOptsModel.emulator = False
-    BayesOptsModel.NrofSamples = 100000
-    
-    # Evaluation of forward model predictions for the samples used for PCEModel
-    BayesOptsModel.Samples = Bayes_PCE.Samples
-    
-    # Bootstrap for BME calulations
-    BayesOptsModel.Bootstrap = True
-    BayesOptsModel.BootstrapItrNr = 1
-    BayesOptsModel.BootstrapNoise = 0.05
-    
-    BayesOptsModel.PlotPostDist = True
-    BayesOptsModel.PlotPostPred = True
-    
-  
-    # ----- Define the discrepancy model -------
-    BayesOptsModel.MeasurementError = obsData
-    # (Option B)
-    DiscrepancyOpts = Discrepancy('')
-    DiscrepancyOpts.Type = 'Gaussian'
-    DiscrepancyOpts.Parameters = obsData **2
-    
-    BayesOptsModel.Discrepancy = DiscrepancyOpts    
-    
-    Bayes_Model = BayesOptsModel.create_Inference()
+    #=====================================================
+    if ndim < 7:    
+        BayesOptsModel = BayesInference(PCEModel)
+        BayesOptsModel.emulator = False
+        BayesOptsModel.NrofSamples = 100000
+        
+        # Evaluation of forward model predictions for the samples used for PCEModel
+        BayesOptsModel.Samples = Bayes_PCE.Samples
+        
+        # Bootstrap for BME calulations
+        BayesOptsModel.Bootstrap = True
+        BayesOptsModel.BootstrapItrNr = 1
+        BayesOptsModel.BootstrapNoise = 0.05
+        
+        BayesOptsModel.PlotPostDist = True
+        BayesOptsModel.PlotPostPred = True
+        
+      
+        # ----- Define the discrepancy model -------
+        BayesOptsModel.MeasurementError = obsData
+        # (Option B)
+        DiscrepancyOpts = Discrepancy('')
+        DiscrepancyOpts.Type = 'Gaussian'
+        DiscrepancyOpts.Parameters = obsData **2
+        
+        BayesOptsModel.Discrepancy = DiscrepancyOpts    
+        
+        Bayes_Model = BayesOptsModel.create_Inference()
     
     
     #=====================================================
@@ -281,7 +281,8 @@ if __name__ == "__main__":
     
         pickle.dump(Bayes_PCE, output, pickle.HIGHEST_PROTOCOL)
         
-        pickle.dump(Bayes_Model, output, pickle.HIGHEST_PROTOCOL)
+        if ndim < 7: 
+            pickle.dump(Bayes_Model, output, pickle.HIGHEST_PROTOCOL)
     
 #    del PCEModel
 #    del PostPCE
diff --git a/BayesValidRox/tests/AnalyticalFunction/origModelOutput.npy b/BayesValidRox/tests/AnalyticalFunction/origModelOutput.npy
deleted file mode 100644
index 983507baa8dc56a4aada61de1f00b85b28f35788..0000000000000000000000000000000000000000
Binary files a/BayesValidRox/tests/AnalyticalFunction/origModelOutput.npy and /dev/null differ
diff --git a/BayesValidRox/tests/AnalyticalFunction/origModelOutputComplex.npy b/BayesValidRox/tests/AnalyticalFunction/origModelOutputComplex.npy
deleted file mode 100644
index ec52b3fdef6f05829411bbceb1bc5500ff42adc1..0000000000000000000000000000000000000000
Binary files a/BayesValidRox/tests/AnalyticalFunction/origModelOutputComplex.npy and /dev/null differ
diff --git a/BayesValidRox/tests/AnalyticalFunction/validLikelihoods.npy b/BayesValidRox/tests/AnalyticalFunction/validLikelihoods.npy
deleted file mode 100644
index 58a0ba80e5ed1c11d082130d6fea5e287f22108d..0000000000000000000000000000000000000000
Binary files a/BayesValidRox/tests/AnalyticalFunction/validLikelihoods.npy and /dev/null differ
diff --git a/BayesValidRox/tests/AnalyticalFunction/validLikelihoodsComplex.npy b/BayesValidRox/tests/AnalyticalFunction/validLikelihoodsComplex.npy
deleted file mode 100644
index a30f8214bc99f670a391f94da87d15dac396e704..0000000000000000000000000000000000000000
Binary files a/BayesValidRox/tests/AnalyticalFunction/validLikelihoodsComplex.npy and /dev/null differ
diff --git a/BayesValidRox/tests/MECBM/SurrogateModel/Surrogate_MECBM.py b/BayesValidRox/tests/MECBM/SurrogateModel/Surrogate_MECBM.py
index 2d311676f1f1ce11ff6ba7a2a5ea9e280196749d..d3c1ae2f32cf6188ea8e7f129fabf8497e3433f6 100644
--- a/BayesValidRox/tests/MECBM/SurrogateModel/Surrogate_MECBM.py
+++ b/BayesValidRox/tests/MECBM/SurrogateModel/Surrogate_MECBM.py
@@ -92,6 +92,7 @@ if __name__ == "__main__":
     # The degree with the lowest Leave-One-Out cross-validation (LOO)
     # error (or the highest score=1-LOO)estimator is chosen as the final 
     # metamodel.
+    MetaModelOpts.MinPceDegree = 10
     MetaModelOpts.MaxPceDegree = 10
     
     # Select the sparse least-square minimization method for 
@@ -99,7 +100,7 @@ if __name__ == "__main__":
     # 1)AaPCE: Adaptive aPCE  2)BRR: Bayesian Ridge Regression 
     # 3)LARS: Least angle regression  4)ARD: Bayesian ARD Regression
     
-    MetaModelOpts.RegMethod = 'BRR'
+    MetaModelOpts.RegMethod = 'ARD'
     
     # Print summary of the regression results
     #MetaModelOpts.DisplayFlag = True
@@ -138,7 +139,7 @@ if __name__ == "__main__":
     #MetaModelOpts.ExpDesign.nReprications = 2
     # -------- Exploration ------
     #1)'Voronoi' 2)'random' 3)'latin_hypercube' 4)'dual annealing'
-    MetaModelOpts.ExpDesign.ExploreMethod = 'random'
+    MetaModelOpts.ExpDesign.ExploreMethod = 'Voronoi'
     
     # Use when 'dual annealing' chosen  
     MetaModelOpts.ExpDesign.MaxFunItr = 200
@@ -149,16 +150,16 @@ if __name__ == "__main__":
     
     # -------- Exploitation ------
     # 1)'BayesOptDesign' 2)'VarOptDesign' 3)'alphabetic' 4)'Space-filling'
-    MetaModelOpts.ExpDesign.ExploitMethod = 'BayesOptDesign'
+    MetaModelOpts.ExpDesign.ExploitMethod = 'VarOptDesign'
     
     # BayesOptDesign -> when data is available
     # 1)DKL (Kullback-Leibler Divergence) 2)DPP (D-Posterior-percision)
     # 3)APP (A-Posterior-percision) 
-    MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' #['DKL', 'DPP']
+    #MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' #['DKL', 'DPP']
     
     # VarBasedOptDesign -> when data is not available
     # Only with Vornoi >>> 1)Entropy 2)EIGF, 3)ALM, 4)LOOCV
-    #MetaModelOpts.ExpDesign.UtilityFunction = 'Entropy'
+    MetaModelOpts.ExpDesign.UtilityFunction = 'Entropy'
     
     # alphabetic
     # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality)