diff --git a/BayesValidRox/PostProcessing/PostProcessing.py b/BayesValidRox/PostProcessing/PostProcessing.py
index 95d4ddff23b6fe5f31790aacdecf0ab1752befc1..5e597f6c6b18a8fe6dd1e36b5c5e2ea7b584ade7 100644
--- a/BayesValidRox/PostProcessing/PostProcessing.py
+++ b/BayesValidRox/PostProcessing/PostProcessing.py
@@ -143,21 +143,27 @@ class PostProcessing:
             
             idx = 0
             for Inkey, InIdxValues in ValuesDict.items():
-                PolynomialDegrees = PCEModel.BasisDict[Outkey][Inkey]
                 
-                clf_poly = PCEModel.clf_poly[Outkey][Inkey]
+                if PCEModel.RegMethod == 'GPR':
+                    gp = PCEModel.clf_poly[Outkey][Inkey]
+                    y_mean, y_std = gp.predict(Samples, return_std=True)
                 
-                PSI_Val = PCEModel.PCE_create_Psi(PolynomialDegrees, univ_p_val)
-                
-                # Perdiction
-                try:
-                    # with error bar 
-                    y_mean, y_std = clf_poly.predict(PSI_Val, return_std=True)
-                except:
-                    # without error bar 
-                    coeffs = PCEModel.CoeffsDict[Outkey][Inkey]
-                    y_mean = np.dot(PSI_Val, coeffs)
-                    y_std = np.zeros(shape=y_mean.shape)
+                else:
+                    PolynomialDegrees = PCEModel.BasisDict[Outkey][Inkey]
+                    
+                    clf_poly = PCEModel.clf_poly[Outkey][Inkey]
+                    
+                    PSI_Val = PCEModel.PCE_create_Psi(PolynomialDegrees, univ_p_val)
+                    
+                    # Perdiction
+                    try:
+                        # with error bar 
+                        y_mean, y_std = clf_poly.predict(PSI_Val, return_std=True)
+                    except:
+                        # without error bar 
+                        coeffs = PCEModel.CoeffsDict[Outkey][Inkey]
+                        y_mean = np.dot(PSI_Val, coeffs)
+                        y_std = np.zeros(shape=y_mean.shape)
                 
                 PCEOutputs_mean[:, idx] = y_mean
                 PCEOutputs_std[:, idx] = y_std
diff --git a/BayesValidRox/surrogate_models/RegressionFastARD.py b/BayesValidRox/surrogate_models/RegressionFastARD.py
index b53aea5251e5521b3d7dde4e47237f9dd29bddd4..cad82103fd8c1b7a76d28d4d79113438c51d2377 100755
--- a/BayesValidRox/surrogate_models/RegressionFastARD.py
+++ b/BayesValidRox/surrogate_models/RegressionFastARD.py
@@ -12,6 +12,7 @@ import warnings
 from sklearn.utils import check_X_y,check_array,as_float_array
 from scipy.linalg import pinvh
 
+
 def update_precisions(Q,S,q,s,A,active,tol,n_samples,clf_bias):
     '''
     Selects one feature to be added/recomputed/deleted to model based on 
@@ -42,7 +43,7 @@ def update_precisions(Q,S,q,s,A,active,tol,n_samples,clf_bias):
     deltaL[recompute] = Qrec**2 / (Srec + 1. / delta_alpha) - np.log(1 + Srec*delta_alpha)
     deltaL[delete]    = Qdel**2 / (Sdel - Adel) - np.log(1 - Sdel / Adel)
     deltaL            = deltaL  / n_samples
-    
+
     # find feature which caused largest change in likelihood
     feature_index = np.argmax(deltaL)
              
@@ -106,8 +107,12 @@ class RegressionFastARD():
         
     copy_X : boolean, optional (DEFAULT = True)
         If True, X will be copied; else, it may be overwritten.
-        
-    verbose : boolean, optional (DEFAULT = True)
+    
+    compute_score : bool, default=False
+        If True, compute the log marginal likelihood at each iteration of the
+        optimization.
+    
+    verbose : boolean, optional (DEFAULT = FALSE)
         Verbose mode when fitting the model
         
         
@@ -128,7 +133,10 @@ class RegressionFastARD():
     sigma_ : array, shape = (n_features, n_features)
         estimated covariance matrix of the weights, computed only
         for non-zero coefficients  
-       
+    
+    scores_ : array-like of shape (n_iter_+1,)
+        If computed_score is True, value of the log marginal likelihood (to be
+        maximized) at each iteration of the optimization. 
        
     References
     ----------
@@ -140,12 +148,13 @@ class RegressionFastARD():
     '''
     
     def __init__( self, n_iter = 300, tol = 1e-3, fit_intercept = True, 
-                  copy_X = True, verbose = False):
+                  copy_X = True, compute_score=False, verbose = False):
         self.n_iter          = n_iter
         self.tol             = tol
         self.scores_         = list()
         self.fit_intercept   = fit_intercept
         self.copy_X          = copy_X
+        self.compute_score   = compute_score
         self.verbose         = verbose
         
         
@@ -206,7 +215,7 @@ class RegressionFastARD():
         
         # in case of almost perfect multicollinearity between some features
         # start from feature 0
-        if np.sum( XXd - X_mean**2 < np.finfo(np.float32).eps ) > 0:
+        if np.sum( XXd - X_mean**2 < np.finfo(np.float32).eps ) >= 0:
             A[0]       = np.finfo(np.float16).eps
             active[0]  = True
         else:
@@ -217,6 +226,7 @@ class RegressionFastARD():
             A[start]      = XXd[start]/( proj[start] - var_y)
  
         warning_flag = 0
+        scores_ = []
         for i in range(self.n_iter):
             XXa     = XX[active,:][:,active]
             XYa     = XY[active]
@@ -247,6 +257,10 @@ class RegressionFastARD():
             # update precision parameters of coefficients
             A,converged  = update_precisions(Q,S,q,s,A,active,self.tol,
                                              n_samples,False)
+            
+            if self.compute_score:
+                scores_.append(self.log_marginal_likelihood(XXa,XYa, Aa, beta))
+            
             if self.verbose:
                 print(('Iteration: {0}, number of features '
                        'in the model: {1}').format(i,np.sum(active)))
@@ -254,7 +268,8 @@ class RegressionFastARD():
                 if converged and self.verbose:
                     print('Algorithm converged !')
                 break
-                 
+            
+            
         # after last update of alpha & beta update parameters
         # of posterior distribution
         XXa,XYa,Aa         = XX[active,:][:,active],XY[active],A[active]
@@ -265,11 +280,27 @@ class RegressionFastARD():
         self.active_       = active
         self.lambda_       = A
         self.alpha_        = beta
+        if self.compute_score:
+            self.scores_       = np.array(scores_)
         #self._set_intercept(X_mean,y_mean,X_std)
         return self
+    
+    def log_marginal_likelihood(self, XXa,XYa, Aa, beta):
+        """Computes the log of the marginal likelihood."""
+        N, M = XXa.shape
+        A = np.diag(Aa)
+        
+        Mn, sigma_, cholesky = self._posterior_dist(Aa,beta,XXa,XYa,full_covar=True)
+        
+        C = sigma_ + np.dot(np.dot(XXa.T, np.linalg.pinv(A)), XXa)
         
+        score = np.dot(np.dot(XYa.T, np.linalg.pinv(C)), XYa) + \
+                np.log(np.linalg.det(C)) + \
+                N * np.log(2 * np.pi)
+        
+        return -0.5 * score    
+    
         
-    #def predict_dist(self,X):
     def predict(self,X, return_std=False):
         '''
         Computes predictive distribution for test set.
@@ -292,7 +323,7 @@ class RegressionFastARD():
              var_hat: numpy array of size (n_samples_test,)
                     Variance of predictive distribution
         '''
-        y_hat     = np.dot(X[:,self.active_],self.coef_[self.active_]) #+ self.intercept_ #self._decision_function(X)
+        y_hat     = np.dot(X[:,self.active_],self.coef_[self.active_])
         if return_std:
             var_hat   = 1./self.alpha_
             var_hat  += np.sum( np.dot(X[:,self.active_],self.sigma_) * X[:,self.active_], axis = 1)
@@ -316,9 +347,9 @@ class RegressionFastARD():
             # find posterior mean : R*R.T*mean = beta*X.T*Y
             # solve(R*z = beta*X.T*Y) => find z => solve(R.T*mean = z) => find mean
             R    = np.linalg.cholesky(Sinv)
-            Z    = solve_triangular(R,beta*XY, check_finite=False, lower = True)
-            Mn   = solve_triangular(R.T,Z, check_finite=False, lower = False)
-            
+            Z    = solve_triangular(R,beta*XY, check_finite=True, lower = True)
+            Mn   = solve_triangular(R.T,Z, check_finite=True, lower = False)
+
             # invert lower triangular matrix from cholesky decomposition
             Ri   = solve_triangular(R,np.eye(A.shape[0]), check_finite=False, lower=True)
             if full_covar:
diff --git a/BayesValidRox/surrogate_models/surrogate_models.py b/BayesValidRox/surrogate_models/surrogate_models.py
index 4ed089fe3a7b2d83d872a5d77f3b7f9ac34a6e18..313f82f273545fa3db7b6bdb141fe8ebb7ea5fae 100644
--- a/BayesValidRox/surrogate_models/surrogate_models.py
+++ b/BayesValidRox/surrogate_models/surrogate_models.py
@@ -427,6 +427,7 @@ class aPCE:
         
         """
         clf_poly = []
+        compute_score = True if self.DisplayFlag else False
         
         #  inverse of the observed variance of the data
         Lambda = 1 / np.var(ModelOutput) if np.var(ModelOutput) !=0 else 1e-6
@@ -435,17 +436,17 @@ class aPCE:
         if self.RegMethod != 'OLS':
             if self.RegMethod == 'BRR':
                 clf_poly = linear_model.BayesianRidge(n_iter=1000, tol=1e-7,
-                                                      fit_intercept = False, 
-                                                      compute_score=True,
+                                                      fit_intercept=False, 
+                                                      compute_score=compute_score,
                                                       lambda_1=Lambda, lambda_2=Lambda)
-                                                        
+            
             elif self.RegMethod == 'ARD':
                 # clf_poly = ARDRegression(fit_intercept = False,compute_score=True,
                 #                         n_iter=1000, tol= 0.001)
                                         # alpha_1=1e-6, alpha_2=1e-6, 
                                         # lambda_1=Lambda, lambda_2=Lambda)
-                clf_poly = RegressionFastARD(fit_intercept = False)
-                
+                clf_poly = RegressionFastARD(fit_intercept=False,compute_score=compute_score,
+                                              tol= 0.001)
                 
             elif self.RegMethod == 'LARS':
                 clf_poly = linear_model.Lars(fit_intercept=False)
@@ -542,28 +543,29 @@ class aPCE:
         qNormScores = -np.inf*np.ones(nqnorms);
         scores_BRR = -np.inf*np.ones(self.DegreeArray.shape[0])
         
+        # Extract the univariate polynomials on ExpDesign
+        univ_p_val = self.univ_p_val
+        
         for degIdx, deg in enumerate(self.DegreeArray):
             
             for qidx, q in enumerate(qnorm): 
                 # Generate the polynomial basis indices 
                 BasisIndices = self.PolyBasisIndices(degree = deg, q=q)
                 
-                
-                # Evaluate the univariate polynomials on ExpDesig
-                univ_p_val = self.univ_basis_vals(CollocationPoints)
-                
                 # Assemble the Psi matrix
                 Psi = self.PCE_create_Psi(BasisIndices,univ_p_val)
                 
                 # ---- Calulate the cofficients of aPCE ----------------------
-                PolynomialDegrees_Sparse, PSI_Sparse, Coeffs, clf_poly = self.RS_Builder(Psi, ModelOutput,BasisIndices)
+                sparseBasisIndices, sparsePSI, Coeffs, clf_poly = self.RS_Builder(Psi,ModelOutput,BasisIndices)
+                
+                
                 AllCoeffs[str(degIdx+1)] = Coeffs
-                AllIndices_Sparse[str(degIdx+1)] = PolynomialDegrees_Sparse
+                AllIndices_Sparse[str(degIdx+1)] = sparseBasisIndices
                 Allclf_poly[str(degIdx+1)] = clf_poly
                 AllnTerms[str(degIdx+1)] = BasisIndices.shape[0]
                 
                 # Calculate and save the score of LOOCV
-                score = self.CorrectedLeaveoutError(PSI_Sparse, Coeffs, ModelOutput, clf_poly)
+                score = self.CorrectedLeaveoutError(sparsePSI, Coeffs, ModelOutput, clf_poly)
                 
                 qNormScores[qidx] = score
                 # EarlyStop check
@@ -575,7 +577,10 @@ class aPCE:
                     if sum(deltas[-n_checks_qNorm+1:]) == 2:
                         # stop the q-norm loop here
                         break
-                
+                if np.var(ModelOutput) == 0:
+                    break
+            
+            
             # check the direction of the error (on average):
             # if it increases consistently stop the iterations
             if len(scores[scores!=-np.inf]) > n_checks_degree:
@@ -588,6 +593,9 @@ class aPCE:
             # Store the score in the scores list 
             scores[degIdx] = np.max(qNormScores)
             
+            # Check only one degree 
+            if np.var(ModelOutput) == 0:
+                break
             
         # ------------------ Summary of results ------------------
         # Select the one with the best score and save the necessary outputs
@@ -606,7 +614,7 @@ class aPCE:
         print('\n'+'-'*50)
         
         # ------------------ Summary of results ------------------
-        if self.DisplayFlag == True:
+        if self.DisplayFlag:
             print('='*50)
             print(' '* 10 +' Summary of results ')
             print('='*50)
@@ -716,7 +724,7 @@ class aPCE:
         varY = np.var(ModelOutputs)
         
         
-        if varY ==0:
+        if varY == 0:
             normEmpErr = 0
             ErrLoo = 0
         else:
@@ -732,10 +740,12 @@ class aPCE:
         trM = np.trace(M)
         if trM < 0 or abs(trM) > 1e6:
             trM = np.trace(np.linalg.pinv(np.dot(Psi.T,Psi)))
-
+        
+        # Over-determined system of Equation
         if N > P:
             T_factor = N/(N-P) * (1 + trM)
         
+        # Under-determined system of Equation
         else:
             T_factor = np.inf
             
@@ -757,7 +767,36 @@ class aPCE:
                 value = self[item] = type(self)()
                 return value
     #--------------------------------------------------------------------------------------------------------
-    
+    def GPR(self, X, y, varIdx):
+        
+        from sklearn.gaussian_process import GaussianProcessRegressor
+        from sklearn.gaussian_process.kernels import (RBF, Matern)
+        
+        kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
+           1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
+                        nu=1.5)]
+        
+        for kernel in kernels:
+            # Instantiate a Gaussian Process model
+            #kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
+            #kernel = RBF(20, (1e-5, 1e5))
+            gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
+            
+            # Fit to data using Maximum Likelihood Estimation of the parameters
+            gp.fit(X, y)
+            
+            # Compute score
+            residual = gp.predict(X) - y
+            normEmpErr = np.mean(residual**2)/np.var(y)
+        
+            print('\n'+'-'*50)
+            print('The estimation of GPR coefficients converged at for output variable %s.'%(varIdx+1))
+            print('Final normalized emperical error estimate: %s'%(normEmpErr))
+            print('\n'+'-'*50)
+        
+        return gp, gp.alpha_, 1-normEmpErr
+        
+    #--------------------------------------------------------------------------------------------------------
     def create_PCE_normDesign(self, Model, OptDesignFlag=False):
         """
         This function loops over the outputs and each time step/point to compute the PCE
@@ -782,6 +821,10 @@ class aPCE:
         CollocationPoints = self.OptimalCollocationPointsBase
         OutputDict = self.ModelOutputDict.copy()
         
+        # Evaluate the univariate polynomials on ExpDesign
+        self.univ_p_val = self.univ_basis_vals(CollocationPoints)
+        
+        
         if 'x_values' in OutputDict: del OutputDict['x_values']
 
         for key , OutputMatrix in OutputDict.items():
@@ -789,12 +832,16 @@ class aPCE:
             if not OptDesignFlag: print(">>>> Calculating the PCE coefficients of %s with %s<<<<<<"%(key, self.RegMethod))
             for idx in range(OutputMatrix.shape[1]):
                 
-                # Only update the coeffs in the searching alg. of SeqExpDesign
-                if OptDesignFlag:
-                    BasisIndices = self.BasisDict[key]["y_"+str(idx+1)]
-                    self.CoeffsDict[key]["y_"+str(idx+1)] , self.BasisDict[key]["y_"+str(idx+1)], self.ScoresDict[key]["y_"+str(idx+1)], self.clf_poly[key]["y_"+str(idx+1)] = self.CoeffsUpdateaPCE(CollocationPoints, OutputMatrix[:,idx], BasisIndices)
+                if self.RegMethod== 'GPR': # TODO: Change RegMethod to metamodel type
+                    self.clf_poly[key]["y_"+str(idx+1)], self.CoeffsDict[key]["y_"+str(idx+1)], self.ScoresDict[key]["y_"+str(idx+1)] =  self.GPR(CollocationPoints, OutputMatrix[:,idx], idx)
                 else:
-                    self.CoeffsDict[key]["y_"+str(idx+1)] , self.BasisDict[key]["y_"+str(idx+1)], self.ScoresDict[key]["y_"+str(idx+1)], self.clf_poly[key]["y_"+str(idx+1)] = self.adaptiveSparseaPCE(CollocationPoints, OutputMatrix[:,idx],idx)
+                
+                    # Only update the coeffs in the searching alg. of SeqExpDesign
+                    if OptDesignFlag:
+                        BasisIndices = self.BasisDict[key]["y_"+str(idx+1)]
+                        self.CoeffsDict[key]["y_"+str(idx+1)] , self.BasisDict[key]["y_"+str(idx+1)], self.ScoresDict[key]["y_"+str(idx+1)], self.clf_poly[key]["y_"+str(idx+1)] = self.CoeffsUpdateaPCE(CollocationPoints, OutputMatrix[:,idx], BasisIndices)
+                    else:
+                        self.CoeffsDict[key]["y_"+str(idx+1)] , self.BasisDict[key]["y_"+str(idx+1)], self.ScoresDict[key]["y_"+str(idx+1)], self.clf_poly[key]["y_"+str(idx+1)] = self.adaptiveSparseaPCE(CollocationPoints, OutputMatrix[:,idx],idx)
 
         return self
     #--------------------------------------------------------------------------------------------------------
@@ -914,12 +961,12 @@ class aPCE:
                  cov = np.zeros((means.shape[0], means.shape[0]), float)
                  np.fill_diagonal(cov, stds**2)
                  
-                 multiNormalDensity = stats.multivariate_normal(mean=means, cov=cov)
+                 multiNormalDensity = stats.multivariate_normal(mean=means, cov=cov, allow_singular=True)
                  Y_PC_MC[key] = multiNormalDensity.rvs(MCsize)
                  logPriorLikelihoods = np.multiply(logPriorLikelihoods, multiNormalDensity.logpdf(Y_PC_MC[key]))
                  std_PC_MC[key] = np.zeros((MCsize, means.shape[0]))
 
-                #  Likelihood computation (Comparison of data 
+             #  Likelihood computation (Comparison of data 
              # and simulation results via PCE with candidate design)
              
              Likelihoods = self.normpdf(Y_PC_MC,std_PC_MC, ObservationData, sigma2Dict)
@@ -927,7 +974,7 @@ class aPCE:
              
              # Check the Effective Sample Size (1<ESS<MCsize)
              ESS = 1 / np.sum(np.square(Likelihoods/np.sum(Likelihoods)))
-             
+
              # Enlarge sample size if it doesn't fulfill the criteria
              if ((ESS > MCsize) or (ESS < 1)):
                  MCsize = MCsize * 10
@@ -1517,20 +1564,27 @@ class aPCE:
             
             for Inkey, InIdxValues in ValuesDict.items():
                 idx = int(Inkey.split('_')[1]) - 1
-                PolynomialDegrees = self.BasisDict[Outkey][Inkey]
-                PSI_Val = self.PCE_create_Psi(PolynomialDegrees, univ_p_val)
                 
-                # Perdiction 
-                try: # with error bar
-                    clf_poly = self.clf_poly[Outkey][Inkey]
-                    y_mean, y_std = clf_poly.predict(PSI_Val, return_std=True)
-                    
-                    PCEOutputs_mean[:, idx] = y_mean
-                    PCEOutputs_std[:, idx] = y_std
+                if self.RegMethod == 'GPR':
+                    gp = self.clf_poly[Outkey][Inkey]
+                    PCEOutputs_mean[:, idx], PCEOutputs_std[:, idx] = gp.predict(Samples, return_std=True)
+                
+                else:
+                
+                    PolynomialDegrees = self.BasisDict[Outkey][Inkey]
+                    PSI_Val = self.PCE_create_Psi(PolynomialDegrees, univ_p_val)
                     
-                except: # without error bar
-                    coeffs = self.CoeffsDict[Outkey][Inkey]
-                    PCEOutputs_mean[:, idx] = np.dot(PSI_Val, coeffs)
+                    # Perdiction 
+                    try: # with error bar
+                        clf_poly = self.clf_poly[Outkey][Inkey]
+                        PCEOutputs_mean[:, idx], PCEOutputs_std[:, idx] = clf_poly.predict(PSI_Val, return_std=True)
+                        
+                        # PCEOutputs_mean[:, idx] = y_mean
+                        # PCEOutputs_std[:, idx] = y_std
+                        
+                    except: # without error bar
+                        coeffs = self.CoeffsDict[Outkey][Inkey]
+                        PCEOutputs_mean[:, idx] = np.dot(PSI_Val, coeffs)
                 
             meanPCEOutputs[Outkey] = PCEOutputs_mean
             stdPCEOutputs[Outkey] = PCEOutputs_std
@@ -1570,7 +1624,7 @@ class aPCE:
         covMatrix = covMatrix + covMatrix_PCE
         
         # Compute likelihood
-        self.Likelihoods = stats.multivariate_normal.pdf(TotalOutputs, mean=TotalData, cov=covMatrix)
+        self.Likelihoods = stats.multivariate_normal.pdf(TotalOutputs, mean=TotalData, cov=covMatrix, allow_singular=True)
         
         return self.Likelihoods       
     
@@ -2023,7 +2077,6 @@ class aPCE:
                     print("\n")
                           
                     # Evaluate the full model response at the new sample points:
-                    #Ynew = Model.Run_Model(Xnew, TotalNSamples)
                     Ynew, _ = Model.Run_Model_Parallel(Xnew, prevRun_No=TotalNSamples)
                     TotalNSamples += Xnew.shape[0]
                     
@@ -2054,7 +2107,7 @@ class aPCE:
                     prevPCEModel = PCEModel
                     PCEModel = PCEModel.create_PCE_normDesign(Model)
                     
-                    # TODO: Compute the mean squre error
+                    # Compute the validation error
                     if len(self.validModelRuns) != 0:
                         validError = PCEModel.validError_()
                         Scores_all=[]
diff --git a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunctionComplex_Test.py b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunctionComplex_Test.py
index 7b13e7d8199a4c9cec243c4838f50cc184f14e1a..2c92917eae72f6b3f19803db2dd69f7fd4448415 100644
--- a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunctionComplex_Test.py
+++ b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunctionComplex_Test.py
@@ -98,7 +98,7 @@ if __name__ == "__main__":
     # error (or the highest score=1-LOO)estimator is chosen as the final 
     # metamodel.
     MetaModelOpts.MinPceDegree = 1 #2
-    MetaModelOpts.MaxPceDegree = 8 #8
+    MetaModelOpts.MaxPceDegree = 10 #8
     
     # q-quasi-norm 0<q<1 (default=1)
     MetaModelOpts.q = 1.0
@@ -107,9 +107,9 @@ if __name__ == "__main__":
     # the PCE coefficients calculation:
     # 1)OLS: Ordinary Least Square  2)BRR: Bayesian Ridge Regression 
     # 3)LARS: Least angle regression  4)ARD: Bayesian ARD Regression
-    # 5)SGDRegressor: Stochastic gradient descent learning
+    # 5)SGDR: Stochastic gradient descent learning
     
-    MetaModelOpts.RegMethod = 'BRR'
+    MetaModelOpts.RegMethod = 'ARD'
     
     # Print summary of the regression results
     #MetaModelOpts.DisplayFlag = True
@@ -121,7 +121,7 @@ if __name__ == "__main__":
     
     # One-shot (normal) or Sequential Adaptive (sequential) Design
     MetaModelOpts.ExpDesign.Method = 'sequential'
-    NrofInitSamples = 100
+    NrofInitSamples = 25
     MetaModelOpts.ExpDesign.NrSamples = NrofInitSamples
     
     # Sampling methods
@@ -130,9 +130,9 @@ if __name__ == "__main__":
     MetaModelOpts.ExpDesign.SamplingMethod = 'latin_hypercube'
     
     # Sequential experimental design (needed only for sequential ExpDesign)
-    MetaModelOpts.ExpDesign.NrofNewSample = 5
-    MetaModelOpts.ExpDesign.MaxNSamples = 200
-    MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-6
+    MetaModelOpts.ExpDesign.NrofNewSample = 1
+    MetaModelOpts.ExpDesign.MaxNSamples = 30
+    MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-10
     
     # Defining the measurement error, if it's known a priori
     obsData = pd.DataFrame(Model.Observations, columns=Model.Output.Names)
@@ -152,7 +152,7 @@ if __name__ == "__main__":
     # ------- Sequential Design configuration --------
     # ------------------------------------------------
     # 1) 'None' 2) 'epsilon-decreasing'
-    MetaModelOpts.ExpDesign.TradeOffScheme = 'epsilon-decreasing'
+    MetaModelOpts.ExpDesign.TradeOffScheme = 'None'
     #MetaModelOpts.ExpDesign.nReprications = 2
     # -------- Exploration ------
     #1)'Voronoi' 2)'random' 3)'latin_hypercube' 4)'dual annealing'
diff --git a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py
index 1679cf30476dd192d08de210a4697f37fbe1329a..4441dc6fed33bd94d8eb43f915f5a66e845d2bc5 100644
--- a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py
+++ b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py
@@ -98,7 +98,7 @@ if __name__ == "__main__":
     # 3)LARS: Least angle regression  4)ARD: Bayesian ARD Regression
     # 5)SGDRegressor: Stochastic gradient descent learning
     
-    MetaModelOpts.RegMethod = 'ARD'
+    MetaModelOpts.RegMethod = 'BRR'
     
     # Print summary of the regression results
     # MetaModelOpts.DisplayFlag = True
@@ -110,7 +110,7 @@ if __name__ == "__main__":
     
     # One-shot (normal) or Sequential Adaptive (sequential) Design
     MetaModelOpts.ExpDesign.Method = 'sequential'
-    NrofInitSamples = 15
+    NrofInitSamples = 10
     MetaModelOpts.ExpDesign.NrSamples = NrofInitSamples
     
     # Sampling methods
@@ -120,8 +120,8 @@ if __name__ == "__main__":
     
     # Sequential experimental design (needed only for sequential ExpDesign)
     MetaModelOpts.ExpDesign.NrofNewSample = 1
-    MetaModelOpts.ExpDesign.MaxNSamples = 50
-    MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-6
+    MetaModelOpts.ExpDesign.MaxNSamples = 100
+    MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-16
     
     # Defining the measurement error, if it's known a priori
     obsData = pd.DataFrame(Model.Observations, columns=Model.Output.Names)
@@ -144,34 +144,34 @@ if __name__ == "__main__":
     #MetaModelOpts.ExpDesign.nReprications = 5
     # -------- Exploration ------
     #1)'Voronoi' 2)'random' 3)'latin_hypercube' 4)'dual annealing'
-    MetaModelOpts.ExpDesign.ExploreMethod = 'Voronoi'
+    MetaModelOpts.ExpDesign.ExploreMethod = 'random'
     
     # Use when 'dual annealing' chosen
     MetaModelOpts.ExpDesign.MaxFunItr = 200
     
     # Use when 'Voronoi' or 'MC' or 'LHS' chosen
-    MetaModelOpts.ExpDesign.NCandidate = 5 #200
+    MetaModelOpts.ExpDesign.NCandidate = 100
     MetaModelOpts.ExpDesign.NrofCandGroups = 4
     
     # -------- Exploitation ------
     # 1)'BayesOptDesign' 2)'VarOptDesign' 3)'alphabetic' 4)'Space-filling'
-    MetaModelOpts.ExpDesign.ExploitMethod = 'BayesOptDesign'
+    MetaModelOpts.ExpDesign.ExploitMethod = 'VarOptDesign'
     
     # BayesOptDesign -> when data is available
     # 1)DKL (Kullback-Leibler Divergence) 2)DPP (D-Posterior-percision)
     # 3)APP (A-Posterior-percision) 
     # 1)DKL (Kullback-Leibler Divergence) 2)BME (Bayesian model evidence)
     # 3)infEntropy (Entropy-based information gain) 
-    MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' #['DKL', 'DPP']
+    # MetaModelOpts.ExpDesign.UtilityFunction = 'BME' #['DKL', 'DPP']
     
     # VarBasedOptDesign -> when data is not available
     # Only with Vornoi >>> 1)Entropy 2)EIGF, 3)ALM, 4)LOOCV
-    #MetaModelOpts.ExpDesign.UtilityFunction = 'Entropy' #['EIGF', 'Entropy', 'LOOCV']
+    MetaModelOpts.ExpDesign.UtilityFunction = 'Entropy' #['EIGF', 'Entropy', 'LOOCV']
     
     # alphabetic
     # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality)
     # 3)K-Opt (K-Optimality)
-    #MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt' #['D-Opt', 'A-Opt', 'K-Opt']
+    # MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt' #['D-Opt', 'A-Opt', 'K-Opt']
     
     # For colculation of validation error for SeqDesign 
     MetaModelOpts.validModelRuns = np.load("origModelOutput.npy")
diff --git a/BayesValidRox/tests/Co2BenchmarkTest/Test_CO2Benchmark.py b/BayesValidRox/tests/Co2BenchmarkTest/Test_CO2Benchmark.py
index fa6eede4d19da73bfe142153a9b9d2916eba61a6..1b303b168ff81a92d11aa3d9303be91d4b4bcd1f 100644
--- a/BayesValidRox/tests/Co2BenchmarkTest/Test_CO2Benchmark.py
+++ b/BayesValidRox/tests/Co2BenchmarkTest/Test_CO2Benchmark.py
@@ -102,10 +102,10 @@ if __name__ == "__main__":
     # error (or the highest score=1-LOO)estimator is chosen as the final 
     # metamodel.
     MetaModelOpts.MinPceDegree = 1 # default = 1
-    MetaModelOpts.MaxPceDegree = 10
+    MetaModelOpts.MaxPceDegree = 3
     
     # q-quasi-norm 0<q<1 (default=1)
-    MetaModelOpts.q = 1 #np.linspace(0.25,1, 4)
+    MetaModelOpts.q = np.linspace(0.25,1, 4)
     
     # Select the sparse least-square minimization method for 
     # the PCE coefficients calculation:
@@ -113,10 +113,10 @@ if __name__ == "__main__":
     # 3)LARS: Least angle regression  4)ARD: Bayesian ARD Regression
     # 5)SGDRegressor: Stochastic gradient descent learning
     
-    MetaModelOpts.RegMethod = 'BRR'
+    MetaModelOpts.RegMethod = 'ARD'
     
     # Print summary of the regression results
-    #MetaModelOpts.DisplayFlag = True
+    # MetaModelOpts.DisplayFlag = True
     
     # ------ Experimental Design --------
     # Generate an experimental design of size NrExpDesign based on a latin 
@@ -126,13 +126,13 @@ if __name__ == "__main__":
     
     # One-shot (normal) or Sequential Adaptive (sequential) Design
     MetaModelOpts.ExpDesign.Method = 'sequential'
-    initNSamples = 5
+    initNSamples = 10
     MetaModelOpts.ExpDesign.NrSamples = initNSamples
     
     # Sampling methods
     # 1)random 2)PCM 3)LSCM 4)user
     MetaModelOpts.ExpDesign.SamplingMethod = 'PCM'
-    #MetaModelOpts.ExpDesign.X = np.load('EDX_CO2Benchmark.npy')
+    # MetaModelOpts.ExpDesign.X = np.load('EDX_CO2Benchmark.npy')
     #MetaModelOpts.ExpDesign.Y = ExpDesign_Y
     
     # Sequential experimental design (needed only for sequential ExpDesign)