From 35676948fcb6f0057fad79b349ee589ecc564f96 Mon Sep 17 00:00:00 2001
From: farid <farid.mohammadi@iws.uni-stuttgart.de>
Date: Tue, 18 Jan 2022 15:20:27 +0100
Subject: [PATCH] [surrogate] remove chaospy dependency and update beta in
 RegressionFastARD class.

---
 BayesValidRox/surrogate_models/ExpDesigns.py  | 495 ++++++++------
 .../surrogate_models/RegressionFastARD.py     | 387 ++++++-----
 .../surrogate_models/eval_rec_rule.py         | 253 ++++++++
 .../surrogate_models/surrogate_models.py      | 604 +++++++++---------
 4 files changed, 1047 insertions(+), 692 deletions(-)
 create mode 100644 BayesValidRox/surrogate_models/eval_rec_rule.py

diff --git a/BayesValidRox/surrogate_models/ExpDesigns.py b/BayesValidRox/surrogate_models/ExpDesigns.py
index 37c82776b..afa9ec3d6 100644
--- a/BayesValidRox/surrogate_models/ExpDesigns.py
+++ b/BayesValidRox/surrogate_models/ExpDesigns.py
@@ -15,17 +15,13 @@ from tqdm import tqdm
 
 from .aPoly_Construction import aPoly_Construction
 
+
 class ExpDesigns:
     def __init__(self, Input, metaModel='pce'):
-        self.NrSamples = None
         self.InputObj = Input
-        self.MCSize = 10000
-        self.Input_distributions = []
-        self.BoundTuples = []
         self.SamplingMethod = 'random'
         self.metaModel = metaModel
         self.TradeOffScheme = 'None'
-        # TODO: This should be prescribed by TotalNSamples
         self.MaxNSamples = None
         self.NrofNewSample = 1
         self.NCandidate = 1
@@ -38,16 +34,14 @@ class ExpDesigns:
         self.nReprications = 1
         self.parNames = []
         self.MAP = []
-        self.JDist = None
-        self.Polytypes= []
-        self.polycoeffs = {}
         self.X = None
         self.Y = None
         self.hdf5File = None
-    
-    def GetSample(self, NrSamples, SamplingMethod='random', MaxPceDegree=None):   
+
+    def GetSample(self, NrSamples, SamplingMethod='random', transform=False,
+                  MaxPceDegree=None):
         """
-        
+        Generates samples with the given method.
 
         Parameters
         ----------
@@ -67,216 +61,334 @@ class ExpDesigns:
 
         """
         Inputs = self.InputObj
-        NofPa = len(Inputs.Marginals)
-        self.ndim = NofPa
+        self.ndim = len(Inputs.Marginals)
         self.SamplingMethod = SamplingMethod
         NrSamples = int(NrSamples)
-        self.arbitrary = False if len(Inputs.Marginals[0].InputValues) == 0 else True
+        if len(Inputs.Marginals[0].InputValues) == 0:
+            self.arbitrary = False
+        else:
+            self.arbitrary = True
+
         # Get the bounds if InputValues are directly defined by user:
         if Inputs.Marginals[0].Parameters is None:
-            for i in range(NofPa):
-                Inputs.Marginals[i].Parameters = [np.min(Inputs.Marginals[i].InputValues),np.max(Inputs.Marginals[i].InputValues)]
-        
+            for i in range(self.ndim):
+                low_bound = np.min(Inputs.Marginals[i].InputValues)
+                up_bound = np.max(Inputs.Marginals[i].InputValues)
+                Inputs.Marginals[i].Parameters = [low_bound, up_bound]
+
         if self.SamplingMethod == 'user':
             samples = self.X
             self.NrSamples = len(samples)
-                
-        ## Sample the distribution of parameters
+
+        # Sample the distribution of parameters
         if not self.arbitrary:
-            ## Case I = polytype not arbitrary
+            # Case I = polytype not arbitrary
             # Execute initialization to get the boundtuples
-            self.Input_distributions, self.BoundTuples = self.Parameter_Initialization(MaxPceDegree)
-
+            raw_data, bounds = self.Parameter_Initialization(MaxPceDegree)
+            self.raw_data = raw_data
+            self.BoundTuples = bounds
             # Create ExpDesign in the actual space
             if self.SamplingMethod != 'user':
-                samples = chaospy.generate_samples(NrSamples, domain=self.JDist , 
+                samples = chaospy.generate_samples(NrSamples, domain=self.JDist,
                                                    rule=SamplingMethod).T
-        
-        
-        else:
-            # Case II: 
-            # polytype arbitrary or Input values are directly prescribed by the user.
- 
+        elif self.arbitrary:
+            # Case II: polytype arbitrary or Input values are directly given by
+            # the user.
+
             # Generate the samples based on requested method
             if self.SamplingMethod == 'user':
-                self.Input_distributions, self.BoundTuples = self.Parameter_Initialization(MaxPceDegree)
-                
+                raw_data, bounds = self.Parameter_Initialization(MaxPceDegree)
+                self.raw_data = raw_data
+                self.BoundTuples = bounds
+
             elif self.SamplingMethod == 'random':
-                samples = self.MCSampling(NrSamples,MaxPceDegree)
-                
+                samples = self.MCSampling(NrSamples, MaxPceDegree)
+
             elif self.SamplingMethod == 'PCM' or self.SamplingMethod == 'LSCM':
                 samples = self.PCMSampling(MaxPceDegree)
-            
+
             else:
-                # raise Exception('Unfortunately, for the prescribed inputs, the samepling method '
-                #                  '%s is not valid.'%self.SamplingMethod)
                 # Execute initialization to get the boundtuples
-                self.Input_distributions, self.BoundTuples = self.Parameter_Initialization(MaxPceDegree)
+                raw_data, bounds = self.Parameter_Initialization(MaxPceDegree)
+                self.raw_data = raw_data
+                self.BoundTuples = bounds
 
                 # Create ExpDesign in the actual space using chaospy
                 try:
-                    samples = chaospy.generate_samples(NrSamples, domain=self.JDist , 
+                    samples = chaospy.generate_samples(NrSamples,
+                                                       domain=self.JDist,
                                                        rule=SamplingMethod).T
                 except:
-                    samples = self.MCSampling(NrSamples,MaxPceDegree)
-        
+                    samples = self.MCSampling(NrSamples, MaxPceDegree)
+
         # Transform samples to the original space
-        self.collocationPoints = samples
-        if not Inputs.Rosenblatt:
-            return samples
+        if transform:
+            tr_samples = self.transform(samples)
+            return samples, tr_samples
         else:
+            return samples
+
+    def transform(self, X, params=None):
+        """
+        Transform the samples via either a Rosenblatt or an isoprobabilistic
+        transformation.
+
+        Parameters
+        ----------
+        X : ndarray of shape (n_samples,n_params)
+            Samples to be transformed.
+
+        Returns
+        -------
+        tr_X: ndarray of shape (n_samples,n_params)
+            Transformed samples.
+
+        """
+        if self.InputObj.Rosenblatt:
             self.origJDist, _ = self.DistConstructor(False)
-            return self.origJDist.inv(self.JDist.fwd(samples.T)).T
-        
-    
+            tr_X = self.origJDist.inv(self.JDist.fwd(X.T)).T
+        else:
+            # Transform samples via an isoprobabilistic transformation
+            n_samples, n_params = X.shape
+            Inputs = self.InputObj
+            origJDist = self.JDist
+            polyTypes = self.Polytypes
+
+            disttypes = []
+            for par_i in range(n_params):
+                disttypes.append(Inputs.Marginals[par_i].DistType)
+
+            # Pass non-transformed X, if arbitrary PCE is selected.
+            if None in disttypes or self.arbitrary:
+                return X
+
+            cdfx = np.zeros((X.shape))
+            tr_X = np.zeros((X.shape))
+
+            for par_i in range(n_params):
+
+                # Extract the parameters of the original space
+                disttype = disttypes[par_i]
+                if disttype is not None:
+                    dist = origJDist[par_i]
+                else:
+                    dist = None
+                polytype = polyTypes[par_i]
+                cdf = np.vectorize(lambda x: dist.cdf(x))
+
+                # Extract the parameters of the transformation space based on
+                # polyType
+                if polytype == 'legendre' or disttype == 'uniform':
+                    # Generate Y_Dists based
+                    params_Y = [-1, 1]
+                    dist_Y = st.uniform(loc=params_Y[0],
+                                        scale=params_Y[1]-params_Y[0])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                elif polytype == 'hermite' or disttype == 'norm':
+                    params_Y = [0, 1]
+                    dist_Y = st.norm(loc=params_Y[0], scale=params_Y[1])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                elif polytype == 'laguerre' or disttype == 'gamma':
+                    params_Y = [1, params[1]]
+                    dist_Y = st.gamma(loc=params_Y[0], scale=params_Y[1])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                elif polytype == 'arbitrary' or disttype is None:
+                    # mu_X = Inputs.Marginals[par_i].Moments[0]
+                    # stDev_X = Inputs.Marginals[par_i].Moments[1]
+                    # cdf = np.vectorize(lambda x: (x - mu_X) / stDev_X)
+                    # # TODO: Unknown dist with gaussian_kde
+                    # mu_Y = Y_marginals[par_i].Moments[0]
+                    # stDev_Y = Y_marginals[par_i].Moments[1]
+                    # inv_cdf = np.vectorize(lambda x: stDev_Y * x + mu_Y)
+                    pass
+
+                # Compute CDF_x(X)
+                cdfx[:, par_i] = cdf(X[:, par_i])
+
+                # Compute invCDF_y(cdfx)
+                tr_X[:, par_i] = inv_cdf(cdfx[:, par_i])
+
+        return tr_X
+
     def FitDist(self, y):
         dist_results = []
         params = {}
-        dist_names = ['lognorm', 'norm','uniform', 'expon']
+        dist_names = ['lognorm', 'norm', 'uniform', 'expon']
         for dist_name in dist_names:
             dist = getattr(st, dist_name)
-            
+
             try:
-                param = dist.fit(y) if dist_name != 'lognorm' else dist.fit(np.exp(y),floc=0)
+                if dist_name != 'lognorm':
+                    param = dist.fit(y)
+                else:
+                    param = dist.fit(np.exp(y), floc=0)
             except:
                 param = dist.fit(y)
-            
+
             params[dist_name] = param
-            #Applying the Kolmogorov-Smirnov test
-            D, p = st.kstest(y, dist_name, args=param);
-            dist_results.append((dist_name,D))
-        
-        #select the best fitted distribution
-        sel_dist,D = (min(dist_results,key=lambda item:item[1]))
-        
+            # Applying the Kolmogorov-Smirnov test
+            D, p = st.kstest(y, dist_name, args=param)
+            dist_results.append((dist_name, D))
+
+        # select the best fitted distribution
+        sel_dist, D = (min(dist_results, key=lambda item: item[1]))
+
         if sel_dist == 'uniform':
-            params[sel_dist] = [params[sel_dist][0], params[sel_dist][0]+params[sel_dist][1]]
+            params[sel_dist] = [params[sel_dist][0], params[sel_dist][0] +
+                                params[sel_dist][1]]
         if D < 0.05:
-            return sel_dist,params[sel_dist]
+            return sel_dist, params[sel_dist]
         else:
             return None, None
-    
+
     def DistConstructor(self, rosenblatt):
         Inputs = self.InputObj
-        NofPa = len(Inputs.Marginals)
-        
+        all_data = []
+        all_DistTypes = []
         origJoints = []
         Polytypes = []
-        
-        for parIdx in range(NofPa):
-            
+
+        for parIdx in range(self.ndim):
+
             if Inputs.Marginals[parIdx].DistType is None:
-                Inputs.Marginals[parIdx].DistType, Inputs.Marginals[parIdx].Parameters = self.FitDist(Inputs.Marginals[parIdx].InputValues)
-            
-            DistType = Inputs.Marginals[parIdx].DistType
-            params = Inputs.Marginals[parIdx].Parameters
-            
+                data = Inputs.Marginals[parIdx].InputValues
+                all_data.append(data)
+                DistType, params = self.FitDist(data)
+                Inputs.Marginals[parIdx].DistType = DistType
+                Inputs.Marginals[parIdx].Parameters = params
+            else:
+                DistType = Inputs.Marginals[parIdx].DistType
+                params = Inputs.Marginals[parIdx].Parameters
+
             if rosenblatt:
                 polytype = 'hermite'
                 Dist = chaospy.Normal()
-                
+
             elif DistType is None:
                 polytype = 'arbitrary'
                 Dist = None
 
             elif 'unif' in DistType:
                 polytype = 'legendre'
-                Dist = chaospy.Uniform(lower=params[0],upper=params[1])
-                
+                Dist = chaospy.Uniform(lower=params[0], upper=params[1])
+
             elif DistType == 'norm':
                 polytype = 'hermite'
-                Dist = chaospy.Normal(mu=params[0],sigma=params[1])
+                Dist = chaospy.Normal(mu=params[0], sigma=params[1])
 
             elif DistType == 'gamma':
                 polytype = 'laguerre'
-                Dist = chaospy.Gamma(shape=params[0],scale=params[1],shift=params[2])
-                
+                Dist = chaospy.Gamma(shape=params[0],
+                                     scale=params[1],
+                                     shift=params[2])
+
             elif DistType == 'beta':
                 polytype = 'jacobi'
-                Dist = chaospy.Beta(alpha=params[0],beta=params[1],
+                Dist = chaospy.Beta(alpha=params[0], beta=params[1],
                                     lower=params[2], upper=params[3])
-                
+
             elif 'lognorm' in DistType:
                 polytype = 'hermite'
-                
-                Mu = np.log(params[0]**2 / np.sqrt(params[0]**2 + params[1]**2))
-                Sigma  = np.sqrt(np.log(1 + params[1]**2 / params[0]**2))
-                
-                Dist = chaospy.LogNormal(mu=Mu,sigma=Sigma)
-            
+                Mu = np.log(params[0]**2/np.sqrt(params[0]**2 + params[1]**2))
+                Sigma = np.sqrt(np.log(1 + params[1]**2 / params[0]**2))
+                Dist = chaospy.LogNormal(mu=Mu, sigma=Sigma)
+
             elif DistType == 'exponential' or 'expon' in DistType:
                 polytype = 'arbitrary'
-                Dist = chaospy.Exponential(scale=params[0],shift=params[1])
-                
+                Dist = chaospy.Exponential(scale=params[0], shift=params[1])
+
             elif DistType == 'weibull':
                 polytype = 'arbitrary'
-                Dist = chaospy.Weibull(shape=params[0],scale=params[1],shift=params[2])
-            
+                Dist = chaospy.Weibull(shape=params[0], scale=params[1],
+                                       shift=params[2])
+
             else:
-                raise ValueError('DistType %s for parameter %s is not available.'%(DistType,parIdx+1))
-            
+                message = (f"DistType {DistType} for parameter"
+                           f"{parIdx+1} is not available.")
+                raise ValueError(message)
+
             if self.arbitrary:
                 polytype = 'arbitrary'
-            
+
+            # Store dists and polytypes
             origJoints.append(Dist)
             Polytypes.append(polytype)
-        
-        if None in [Inputs.Marginals[parIdx].DistType for parIdx in range(NofPa)]:
+            all_DistTypes.append(DistType)
+
+        # Prepare final output to return
+        if None in all_DistTypes:
             # Naive approach: Fit a gaussian kernel to the provided data
-            Data = np.asarray([Inputs.Marginals[parIdx].InputValues for parIdx in range(NofPa)])
+            Data = np.asarray(all_data)
             origSpaceDist = st.gaussian_kde(Data)
             self.priorSpace = origSpaceDist
         else:
             origSpaceDist = chaospy.J(*origJoints)
             self.priorSpace = st.gaussian_kde(origSpaceDist.sample(10000))
-        
+
         return origSpaceDist, Polytypes
-    
+
     def Parameter_Initialization(self, MaxPceDegree=None, OptDesignFlag=False):
         """
-        Initialization Uncertain Parameters in case arbitrary polytype is selected or
-        
+        Initialization Uncertain Parameters in case arbitrary polytype is
+        selected.
         """
         Inputs = self.InputObj
-        NofPa = len(Inputs.Marginals) 
-        
-        ## Create a multivariate probability distribution
+        ndim = self.ndim
+        Rosenblatt = Inputs.Rosenblatt
+        self.MCSize = 10000
+
+        # Create a multivariate probability distribution
         if MaxPceDegree is not None:
-            self.JDist, self.Polytypes = self.DistConstructor(rosenblatt=Inputs.Rosenblatt)
-        
-        if len(Inputs.Marginals[0].InputValues) != 0:
-            
+            JDist, Polytypes = self.DistConstructor(rosenblatt=Rosenblatt)
+            self.JDist, self.Polytypes = JDist, Polytypes
+
+        if self.arbitrary:
+
             self.MCSize = len(Inputs.Marginals[0].InputValues)
-            self.Input_distributions = np.zeros((NofPa, self.MCSize))
-            
-            for parIdx in range(NofPa):
+            self.raw_data = np.zeros((ndim, self.MCSize))
+
+            for parIdx in range(ndim):
                 try:
-                    self.Input_distributions[parIdx,:] = np.array(Inputs.Marginals[parIdx].InputValues)
+                    self.raw_data[parIdx] = np.array(Inputs.Marginals[parIdx].InputValues)
                 except:
-                    self.Input_distributions[parIdx,:] = self.JDist[parIdx].sample(self.MCSize)
-                    
+                    self.raw_data[parIdx] = self.JDist[parIdx].sample(self.MCSize)
+
             # Create orthogonal polynomial coefficients if necessary
-            if self.metaModel.lower() != 'gpe' and MaxPceDegree is not None and Inputs.polycoeffsFlag:
-                for parIdx in tqdm(range(NofPa), ascii=True, desc ="Computing orth. polynomial coeffs"):
-                    self.polycoeffs['p_'+str(parIdx+1)] = aPoly_Construction(self.Input_distributions[parIdx,:], MaxPceDegree, parIdx)
+            if self.metaModel.lower() != 'gpe' \
+                and MaxPceDegree is not None \
+                    and Inputs.polycoeffsFlag:
+                self.polycoeffs = {}
+                for parIdx in tqdm(range(ndim), ascii=True, desc="Computing orth. polynomial coeffs"):
+                    self.polycoeffs[f'p_{parIdx+1}'] = aPoly_Construction(self.raw_data[parIdx], MaxPceDegree)
         else:
             # Generate random samples based on parameter distributions
-            self.Input_distributions = chaospy.generate_samples(self.MCSize, domain=self.JDist)
-        
+            self.raw_data = chaospy.generate_samples(self.MCSize,
+                                                     domain=self.JDist)
+        # Extract moments
+        for parIdx in range(ndim):
+            mu = np.mean(self.raw_data[parIdx])
+            std = np.std(self.raw_data[parIdx])
+            self.InputObj.Marginals[parIdx].Moments = [mu, std]
+
         # Generate the bounds based on given inputs for marginals
         BoundTuples = []
-        for i in range(NofPa):
+        for i in range(ndim):
             if Inputs.Marginals[i].DistType == 'unif':
-                BoundTuples.append((Inputs.Marginals[i].Parameters[0],Inputs.Marginals[i].Parameters[1]))
+                low_bound, up_bound = Inputs.Marginals[i].Parameters
             else:
-                BoundTuples.append((np.min(self.Input_distributions[i,:]),np.max(self.Input_distributions[i,:])))
-        
+                low_bound = np.min(self.raw_data[i])
+                up_bound = np.max(self.raw_data[i])
+
+            BoundTuples.append((low_bound, up_bound))
+
         self.BoundTuples = tuple(BoundTuples)
-        
-        return self.Input_distributions, self.BoundTuples
-    
-    
-    def MCSampling(self, NrSamples,MaxPceDegree):
+
+        return self.raw_data, self.BoundTuples
+
+    def MCSampling(self, NrSamples, MaxPceDegree):
         """
         Compute the requested number of sampling points.
         Arguments
@@ -288,89 +400,76 @@ class ExpDesigns:
         ndarray[NrSamples, NofPa]
             The sampling locations in the input space.
         """
-        Inputs = self.InputObj
-        NofPa = len(Inputs.Marginals) 
-        
+
         # Execute initialization to get the boundtuples
-        self.Input_distributions, self.BoundTuples = self.Parameter_Initialization(MaxPceDegree)
+        raw_data, BoundTuples = self.Parameter_Initialization(MaxPceDegree)
+        self.raw_data, self.BoundTuples = raw_data, BoundTuples
 
-        Samples = np.zeros((NrSamples, NofPa))
-        
-        for idxPa in range(NofPa):
-            
+        Samples = np.zeros((NrSamples, self.ndim))
+
+        for idxPa in range(self.ndim):
             # InputValues given
-            randIdx = np.random.randint(0, len(self.Input_distributions[idxPa]), NrSamples)
-            Samples[:,idxPa] = self.Input_distributions[idxPa, randIdx]
-            
+            sample_size = len(self.raw_data[idxPa])
+            randIdx = np.random.randint(0, sample_size, NrSamples)
+            Samples[:, idxPa] = self.raw_data[idxPa, randIdx]
+
         return Samples
-    
-        
+
     def PCMSampling(self, MaxPceDegree):
-        
-        Inputs = self.InputObj
-        NofPa = len(Inputs.Marginals) 
-        Input_distributions = self.Input_distributions
-        
-        # TODO: Collocation Points based on roots for Guess_Degree+1
-        
+
+        raw_data = self.raw_data
+
         # Guess the closest degree to self.NrSamples
-        M_uptoMax = lambda MaxPceDegree: np.array([math.factorial(NofPa+d)// (math.factorial(NofPa)*math.factorial(d))  for d in range(1,MaxPceDegree+1)])
-        Guess_Degree = np.where(M_uptoMax(MaxPceDegree)>self.NrSamples)[0][0]
-        
-        Cpoints = np.zeros(shape=[Guess_Degree+1,NofPa])
-        PolynomialPa = lambda parIdx: aPoly_Construction(self.Input_distributions[parIdx,:], MaxPceDegree, parIdx)
-        
-        for PaIdx in range(NofPa):
-            
-            Cpoints[:,PaIdx] = np.trim_zeros(np.roots(PolynomialPa(PaIdx)[Guess_Degree+1][::-1])) # [::-1]
-             
-        
-        #==============================================================================
-        #============= Construction of optimal integration points =====================
-        #==============================================================================
-        Prod = itertools.product(np.arange(1, Guess_Degree+2), repeat=NofPa)
-        SortDigitalUniqueCombinations = np.array(list(filter(lambda x: x, Prod)))
-        
-        
+        def M_uptoMax(deg):
+            result = []
+            for d in range(1, deg+1):
+                result.append(math.factorial(self.ndim+d) //
+                              (math.factorial(self.ndim) * math.factorial(d)))
+            return np.array(result)
+
+        guess_Deg = np.where(M_uptoMax(MaxPceDegree) > self.NrSamples)[0][0]
+
+        Cpoints = np.zeros(shape=[guess_Deg+1, self.ndim])
+
+        def PolynomialPa(parIdx):
+            return aPoly_Construction(self.raw_data[parIdx], MaxPceDegree)
+
+        for i in range(self.ndim):
+            poly_coeffs = PolynomialPa(i)[guess_Deg+1][::-1]
+            Cpoints[:, i] = np.trim_zeros(np.roots(poly_coeffs))
+
+        #  Construction of optimal integration points
+        Prod = itertools.product(np.arange(1, guess_Deg+2), repeat=self.ndim)
+        SortDigUniqueCombos = np.array(list(filter(lambda x: x, Prod)))
+
         # Ranking relatively mean
-        Temp=np.empty(shape=[0, Guess_Degree+1])
-        for j in range(NofPa):
-            s=abs(Cpoints[:, j]-np.mean(Input_distributions[j,:]))
-            Temp=np.append(Temp, [s], axis=0)
-    
-        temp=Temp.T
-    
-        temp_sort, index_CP = np.sort(temp, axis=0), np.argsort(temp, axis=0)
-    
-    
-        #SortCpoints=np.sort(Cpoints, axis=0)
-        SortCpoints=np.empty(shape=[0, Guess_Degree+1])
-    
-        for j in range(NofPa):
-            SortCp=Cpoints[index_CP[:, j], j]
-            SortCpoints=np.vstack((SortCpoints, SortCp))
-    
-    
-        # Mapping of Digital Combination to Cpoint Combination
-        SortUniqueCombinations=np.empty(shape=[0, NofPa])
-        for i in range(len(SortDigitalUniqueCombinations)):
-            SortUnComb=[]
-            for j in range(NofPa):
-                SortUC=SortCpoints[j, SortDigitalUniqueCombinations[i, j]-1]
+        Temp = np.empty(shape=[0, guess_Deg+1])
+        for j in range(self.ndim):
+            s = abs(Cpoints[:, j]-np.mean(raw_data[j]))
+            Temp = np.append(Temp, [s], axis=0)
+        temp = Temp.T
+
+        index_CP = np.sort(temp, axis=0)
+        SortCpoints = np.empty(shape=[0, guess_Deg+1])
+
+        for j in range(self.ndim):
+            SortCp = Cpoints[index_CP[:, j], j]
+            SortCpoints = np.vstack((SortCpoints, SortCp))
+
+        # Mapping of Combination to Cpoint Combination
+        SortUniqueCombos = np.empty(shape=[0, self.ndim])
+        for i in range(len(SortDigUniqueCombos)):
+            SortUnComb = []
+            for j in range(self.ndim):
+                SortUC = SortCpoints[j, SortDigUniqueCombos[i, j]-1]
                 SortUnComb.append(SortUC)
-                SortUnicomb=np.asarray(SortUnComb)
-            SortUniqueCombinations=np.vstack((SortUniqueCombinations, SortUnicomb))
-    
-        #--------------------------------
+                SortUnicomb = np.asarray(SortUnComb)
+            SortUniqueCombos = np.vstack((SortUniqueCombos, SortUnicomb))
+
         # Output the collocation points
-        #--------------------------------
         if self.SamplingMethod == 'LSCM':
-            
-            OptimalCollocationPointsBase = SortUniqueCombinations
-        
+            OptimalCollocationPointsBase = SortUniqueCombos
         else:
-            OptimalCollocationPointsBase = SortUniqueCombinations[0:self.NrSamples,:]
-        
-        #print('---> Calculations are successfully completed. \n\n')
+            OptimalCollocationPointsBase = SortUniqueCombos[0:self.NrSamples]
 
         return OptimalCollocationPointsBase
diff --git a/BayesValidRox/surrogate_models/RegressionFastARD.py b/BayesValidRox/surrogate_models/RegressionFastARD.py
index df0dd1d35..2d9eaca8a 100755
--- a/BayesValidRox/surrogate_models/RegressionFastARD.py
+++ b/BayesValidRox/surrogate_models/RegressionFastARD.py
@@ -11,9 +11,9 @@ from numpy.linalg import LinAlgError
 from sklearn.base import RegressorMixin
 from sklearn.linear_model._base import LinearModel
 import warnings
-from sklearn.utils import check_X_y,check_array,as_float_array
+from sklearn.utils import check_X_y
 from scipy.linalg import pinvh
-from sklearn.preprocessing import normalize as f_normalize
+
 
 def update_precisions(Q,S,q,s,A,active,tol,n_samples,clf_bias):
     '''
@@ -22,20 +22,20 @@ def update_precisions(Q,S,q,s,A,active,tol,n_samples,clf_bias):
     '''
     # initialise vector holding changes in log marginal likelihood
     deltaL = np.zeros(Q.shape[0])
-    
+
     # identify features that can be added , recomputed and deleted in model
     theta        =  q**2 - s 
     add          =  (theta > 0) * (active == False)
     recompute    =  (theta > 0) * (active == True)
     delete       = ~(add + recompute)
 
-    # compute sparsity & quality parameters corresponding to features in 
+    # compute sparsity & quality parameters corresponding to features in
     # three groups identified above
     Qadd,Sadd      = Q[add], S[add]
     Qrec,Srec,Arec = Q[recompute], S[recompute], A[recompute]
     Qdel,Sdel,Adel = Q[delete], S[delete], A[delete]
-    
-    # compute new alpha's (precision parameters) for features that are 
+
+    # compute new alpha's (precision parameters) for features that are
     # currently in model and will be recomputed
     Anew           = s[recompute]**2/ ( theta[recompute] + np.finfo(np.float32).eps)
     delta_alpha    = (1./Anew - 1./Arec)
@@ -51,7 +51,7 @@ def update_precisions(Q,S,q,s,A,active,tol,n_samples,clf_bias):
 
     # no deletions or additions
     same_features  = np.sum( theta[~recompute] > 0) == 0
-    
+
     # changes in precision for features already in model is below threshold
     no_delta       = np.sum( abs( Anew - Arec ) > tol ) == 0
     # if same_features: print(abs( Anew - Arec ))
@@ -62,7 +62,7 @@ def update_precisions(Q,S,q,s,A,active,tol,n_samples,clf_bias):
     if same_features and no_delta:
         converged = True
         return [A,converged]
-    
+
     # if not converged update precision parameter of weights and return
     if theta[feature_index] > 0:
         A[feature_index] = s[feature_index]**2 / theta[feature_index]
@@ -76,85 +76,75 @@ def update_precisions(Q,S,q,s,A,active,tol,n_samples,clf_bias):
             if not (feature_index == 0 and clf_bias):
                 active[feature_index] = False
                 A[feature_index]      = np.PINF
-                
-    return [A,converged]
-
-
-###############################################################################
-#                ARD REGRESSION AND CLASSIFICATION
-###############################################################################
-
 
-#-------------------------- Regression ARD ------------------------------------
+    return [A,converged]
 
 
-class RegressionFastARD(LinearModel,RegressorMixin):
+class RegressionFastARD(LinearModel, RegressorMixin):
     '''
     Regression with Automatic Relevance Determination (Fast Version uses 
     Sparse Bayesian Learning)
     https://github.com/AmazaspShumik/sklearn-bayes/blob/master/skbayes/rvm_ard_models/fast_rvm.py
-    
+
     Parameters
     ----------
     n_iter: int, optional (DEFAULT = 100)
         Maximum number of iterations
-    
+
     start: list, optional (DEFAULT = None)
         Initial selected features.
-    
+
     tol: float, optional (DEFAULT = 1e-3)
         If absolute change in precision parameter for weights is below threshold
         algorithm terminates.
-        
+
     fit_intercept : boolean, optional (DEFAULT = True)
         whether to calculate the intercept for this model. If set
         to false, no intercept will be used in calculations
         (e.g. data is expected to be already centered).
-        
+
     copy_X : boolean, optional (DEFAULT = True)
         If True, X will be copied; else, it may be overwritten.
-    
+
     compute_score : bool, default=False
         If True, compute the log marginal likelihood at each iteration of the
         optimization.
-    
+
     verbose : boolean, optional (DEFAULT = FALSE)
         Verbose mode when fitting the model
-        
-        
+
     Attributes
     ----------
     coef_ : array, shape = (n_features)
         Coefficients of the regression model (mean of posterior distribution)
-        
+
     alpha_ : float
        estimated precision of the noise
-       
+
     active_ : array, dtype = np.bool, shape = (n_features)
        True for non-zero coefficients, False otherwise
-       
+
     lambda_ : array, shape = (n_features)
        estimated precisions of the coefficients
-       
+
     sigma_ : array, shape = (n_features, n_features)
         estimated covariance matrix of the weights, computed only
-        for non-zero coefficients  
-    
+        for non-zero coefficients
+
     scores_ : array-like of shape (n_iter_+1,)
         If computed_score is True, value of the log marginal likelihood (to be
-        maximized) at each iteration of the optimization. 
-       
+        maximized) at each iteration of the optimization.
+
     References
     ----------
-    [1] Fast marginal likelihood maximisation for sparse Bayesian models (Tipping & Faul 2003)
-        (http://www.miketipping.com/papers/met-fastsbl.pdf)
+    [1] Fast marginal likelihood maximisation for sparse Bayesian models
+    (Tipping & Faul 2003) (http://www.miketipping.com/papers/met-fastsbl.pdf)
     [2] Analysis of sparse Bayesian learning (Tipping & Faul 2001)
         (http://www.miketipping.com/abstracts.htm#Faul:NIPS01)
-        
     '''
-    
-    def __init__( self, n_iter=300, start=None, tol=1e-3, fit_intercept=True, 
-                  normalize=False, copy_X=True, compute_score=False, verbose=False):
+
+    def __init__(self, n_iter=300, start=None, tol=1e-3, fit_intercept=True, 
+                 normalize=False, copy_X=True, compute_score=False, verbose=False):
         self.n_iter          = n_iter
         self.start           = start
         self.tol             = tol
@@ -164,32 +154,34 @@ class RegressionFastARD(LinearModel,RegressorMixin):
         self.copy_X          = copy_X
         self.compute_score   = compute_score
         self.verbose         = verbose
-        
-        
-    def _preprocess_data(self,X, y):
+
+    def _preprocess_data(self, X, y):
         """Center and scale data.
-        Centers data to have mean zero along axis 0. If fit_intercept=False or if
-        the X is a sparse matrix, no centering is done, but normalization can still
-        be applied. The function returns the statistics necessary to reconstruct
-        the input data, which are X_offset, y_offset, X_scale, such that the output
+        Centers data to have mean zero along axis 0. If fit_intercept=False or
+        if the X is a sparse matrix, no centering is done, but normalization
+        can still be applied. The function returns the statistics necessary to
+        reconstruct the input data, which are X_offset, y_offset, X_scale, such
+        that the output
             X = (X - X_offset) / X_scale
-        X_scale is the L2 norm of X - X_offset. 
+        X_scale is the L2 norm of X - X_offset.
         """
-        
+
         if self.copy_X:
             X = X.copy(order='K')
-    
+
         y = np.asarray(y, dtype=X.dtype)
-    
+
         if self.fit_intercept:
             X_offset = np.average(X, axis=0)
             X -= X_offset
             if self.normalize:
-                X, X_scale = f_normalize(X, axis=0, copy=False,
-                                         return_norm=True)
+                X_scale = np.ones(X.shape[1], dtype=X.dtype)
+                std = np.sqrt(np.sum(X**2, axis=0)/(len(X)-1))
+                X_scale[std != 0] = std[std != 0]
+                X /= X_scale
             else:
                 X_scale = np.ones(X.shape[1], dtype=X.dtype)
-            y_offset = np.average(y, axis=0)
+            y_offset = np.mean(y)
             y = y - y_offset
         else:
             X_offset = np.zeros(X.shape[1], dtype=X.dtype)
@@ -198,21 +190,21 @@ class RegressionFastARD(LinearModel,RegressorMixin):
                 y_offset = X.dtype.type(0)
             else:
                 y_offset = np.zeros(y.shape[1], dtype=X.dtype)
-    
-        return X, y, X_offset, y_offset, X_scale  
-        
-    def fit(self,X,y):
+
+        return X, y, X_offset, y_offset, X_scale
+
+    def fit(self, X, y):
         '''
         Fits ARD Regression with Sequential Sparse Bayes Algorithm.
-        
+
         Parameters
         -----------
         X: {array-like, sparse matrix} of size (n_samples, n_features)
            Training data, matrix of explanatory variables
-        
-        y: array-like of size [n_samples, n_features] 
+
+        y: array-like of size [n_samples, n_features]
            Target values
-           
+
         Returns
         -------
         self : object
@@ -220,99 +212,101 @@ class RegressionFastARD(LinearModel,RegressorMixin):
         '''
         X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
         n_samples, n_features = X.shape
-        
+
         X, y, X_mean, y_mean, X_std = self._preprocess_data(X, y)
         self._x_mean_ = X_mean
-        self._y_mean  = y_mean
-        self._x_std   = X_std
+        self._y_mean = y_mean
+        self._x_std = X_std
 
         #  precompute X'*Y , X'*X for faster iterations & allocate memory for
         #  sparsity & quality vectors
-        XY     = np.dot(X.T,y)
-        XX     = np.dot(X.T,X)
-        XXd    = np.diag(XX)
+        XY = np.dot(X.T, y)
+        XX = np.dot(X.T, X)
+        XXd = np.diag(XX)
 
         #  initialise precision of noise & and coefficients
-        var_y  = np.var(y)
-        
+        var_y = np.var(y)
+
         # check that variance is non zero !!!
-        if var_y == 0 :
+        if var_y == 0:
             beta = 1e-2
         else:
             beta = 1. / np.var(y)
 
-        A      = np.PINF * np.ones(n_features)
-        active = np.zeros(n_features , dtype = np.bool)
-        
-        
+        A = np.PINF * np.ones(n_features)
+        active = np.zeros(n_features, dtype=np.bool)
+
         if self.start is not None and not hasattr(self, 'active_'):
             start = self.start
             # start from a given start basis vector
-            proj  = XY**2 / XXd
+            proj = XY**2 / XXd
             active[start] = True
-            A[start]      = XXd[start]/( proj[start] - var_y)
-        
+            A[start] = XXd[start]/(proj[start] - var_y)
+
         else:
             # in case of almost perfect multicollinearity between some features
             # start from feature 0
-            if np.sum( XXd - X_mean**2 < np.finfo(np.float32).eps ) > 0:
-                A[0]       = np.finfo(np.float16).eps
-                active[0]  = True
-            
+            if np.sum(XXd - X_mean**2 < np.finfo(np.float32).eps) > 0:
+                A[0] = np.finfo(np.float16).eps
+                active[0] = True
+
             else:
-                # start from a single basis vector with largest projection on targets
-                proj  = XY**2 / XXd
+                # start from a single basis vector with largest projection on
+                # targets
+                proj = XY**2 / XXd
                 start = np.argmax(proj)
                 active[start] = True
-                A[start]      = XXd[start]/( proj[start] - var_y+np.finfo(np.float32).eps)
-                
-
+                A[start] = XXd[start]/(proj[start] - var_y +
+                                       np.finfo(np.float32).eps)
 
         warning_flag = 0
         scores_ = []
         for i in range(self.n_iter):
-            XXa     = XX[active,:][:,active]
-            XYa     = XY[active]
-            Aa      =  A[active]
-            
+            XXa = XX[active, :][:, active]
+            XYa = XY[active]
+            Aa = A[active]
+
             # mean & covariance of posterior distribution
-            Mn,Ri,cholesky  = self._posterior_dist(Aa,beta,XXa,XYa)
+            Mn, Ri, cholesky = self._posterior_dist(Aa, beta, XXa, XYa)
             if cholesky:
-                Sdiag  = np.sum(Ri**2,0)
+                Sdiag = np.sum(Ri**2, 0)
             else:
-                Sdiag  = np.copy(np.diag(Ri)) 
+                Sdiag = np.copy(np.diag(Ri))
                 warning_flag += 1
-            
-            # raise warning in case cholesky failes
+
+            # raise warning in case cholesky fails
             if warning_flag == 1:
-                warnings.warn(("Cholesky decomposition failed ! Algorithm uses pinvh, "
-                               "which is significantly slower, if you use RVR it "
-                               "is advised to change parameters of kernel"))
-                
-            # compute quality & sparsity parameters            
-            s,q,S,Q = self._sparsity_quality(XX,XXd,XY,XYa,Aa,Ri,active,beta,cholesky)
-                
+                warnings.warn(("Cholesky decomposition failed! Algorithm uses "
+                               "pinvh, which is significantly slower, if you "
+                               "use RVR it is advised to change parameters of "
+                               "kernel"))
+
+            # compute quality & sparsity parameters
+            s, q, S, Q = self._sparsity_quality(XX, XXd, XY, XYa, Aa, Ri,
+                                                active, beta, cholesky)
+
             # update precision parameter for noise distribution
-            rss     = np.sum( ( y - np.dot(X[:,active] , Mn) )**2 )
+            rss = np.sum((y - np.dot(X[:, active], Mn))**2)
 
             # if near perfect fit , then terminate
-            # if (rss / n_samples/var_y) < self.tol:
-            #     warnings.warn('Early termination due to near perfect fit')
-            #     converged = True
-            #     break
-            beta    = n_samples - np.sum(active) + np.sum(Aa * Sdiag )
-            beta   /= ( rss + np.finfo(np.float32).eps )
+            if (rss / n_samples/var_y) < self.tol:
+                warnings.warn('Early termination due to near perfect fit')
+                converged = True
+                break
+            beta = n_samples - np.sum(active) + np.sum(Aa * Sdiag)
+            beta /= rss
+            # beta /= (rss + np.finfo(np.float32).eps)
 
             # update precision parameters of coefficients
-            A,converged  = update_precisions(Q,S,q,s,A,active,self.tol,
-                                             n_samples,False)
-            
+            A, converged = update_precisions(Q, S, q, s, A, active, self.tol,
+                                             n_samples, False)
+
             if self.compute_score:
-                scores_.append(self.log_marginal_likelihood(XXa,XYa, Aa, beta))
-            
+                scores_.append(self.log_marginal_like(XXa, XYa, Aa, beta))
+
             if self.verbose:
                 print(('Iteration: {0}, number of features '
-                        'in the model: {1}').format(i,np.sum(active)))
+                       'in the model: {1}').format(i, np.sum(active)))
 
             if converged or i == self.n_iter - 1:
                 if converged and self.verbose:
@@ -321,84 +315,83 @@ class RegressionFastARD(LinearModel,RegressorMixin):
 
         # after last update of alpha & beta update parameters
         # of posterior distribution
-        XXa,XYa,Aa         = XX[active,:][:,active],XY[active],A[active]
-        Mn, Sn, cholesky   = self._posterior_dist(Aa,beta,XXa,XYa,True)
-        self.coef_         = np.zeros(n_features)
+        XXa, XYa, Aa = XX[active, :][:, active], XY[active], A[active]
+        Mn, Sn, cholesky = self._posterior_dist(Aa, beta, XXa, XYa, True)
+        self.coef_ = np.zeros(n_features)
         self.coef_[active] = Mn
-        self.sigma_        = Sn
-        self.active_       = active
-        self.lambda_       = A
-        self.alpha_        = beta
-        self.converged     = converged
+        self.sigma_ = Sn
+        self.active_ = active
+        self.lambda_ = A
+        self.alpha_ = beta
+        self.converged = converged
         if self.compute_score:
-            self.scores_       = np.array(scores_)
-        
+            self.scores_ = np.array(scores_)
+
         # set intercept_
         if self.fit_intercept:
             self.coef_ = self.coef_ / X_std
             self.intercept_ = y_mean - np.dot(X_mean, self.coef_.T)
         else:
             self.intercept_ = 0.
-        
         return self
-    
-    def log_marginal_likelihood(self, XXa,XYa, Aa, beta):
+
+    def log_marginal_like(self, XXa, XYa, Aa, beta):
         """Computes the log of the marginal likelihood."""
         N, M = XXa.shape
         A = np.diag(Aa)
-        
-        Mn, sigma_, cholesky = self._posterior_dist(Aa,beta,XXa,XYa,full_covar=True)
-        
+
+        Mn, sigma_, cholesky = self._posterior_dist(Aa, beta, XXa, XYa,
+                                                    full_covar=True)
+
         C = sigma_ + np.dot(np.dot(XXa.T, np.linalg.pinv(A)), XXa)
-        
-        score = np.dot(np.dot(XYa.T, np.linalg.pinv(C)), XYa) + \
-                np.log(np.linalg.det(C)) + \
-                N * np.log(2 * np.pi)
-        
-        return -0.5 * score    
-    
-        
-    def predict(self,X, return_std=False):
+
+        score = np.dot(np.dot(XYa.T, np.linalg.pinv(C)), XYa) +\
+            np.log(np.linalg.det(C)) + N * np.log(2 * np.pi)
+
+        return -0.5 * score
+
+    def predict(self, X, return_std=False):
         '''
         Computes predictive distribution for test set.
         Predictive distribution for each data point is one dimensional
         Gaussian and therefore is characterised by mean and variance based on
         Ref.[1] Section 3.3.2.
-        
+
         Parameters
         -----------
         X: {array-like, sparse} (n_samples_test, n_features)
            Test data, matrix of explanatory variables
-           
+
         Returns
         -------
         : list of length two [y_hat, var_hat]
-        
+
              y_hat: numpy array of size (n_samples_test,)
-                    Estimated values of targets on test set (i.e. mean of predictive
-                    distribution)
-           
-             var_hat: numpy array of size (n_samples_test,)
+                    Estimated values of targets on test set (i.e. mean of
+                    predictive distribution)
+
+                var_hat: numpy array of size (n_samples_test,)
                     Variance of predictive distribution
         References
         ----------
-        [1] Bishop, C. M. (2006). Pattern recognition and machine learning. springer.
+        [1] Bishop, C. M. (2006). Pattern recognition and machine learning.
+        springer.
         '''
-        
-        y_hat     = np.dot(X,self.coef_) + self.intercept_
-        
+
+        y_hat = np.dot(X, self.coef_) + self.intercept_
+
         if return_std:
             if self.normalize:
-                x   = (X - self._x_mean_) / self._x_std
-            var_hat   = 1./self.alpha_
-            var_hat  += np.sum( np.dot(x[:,self.active_],self.sigma_) * x[:,self.active_], axis = 1)
-            std_hat   = np.sqrt(var_hat)
+                x = (X - self._x_mean_) / self._x_std
+            var_hat = 1./self.alpha_
+            var_hat += np.sum(np.dot(x[:, self.active_], self.sigma_) *
+                              x[:, self.active_], axis=1)
+            std_hat = np.sqrt(var_hat)
             return y_hat, std_hat
         else:
             return y_hat
 
-
-    def _posterior_dist(self,A,beta,XX,XY,full_covar=False):
+    def _posterior_dist(self, A, beta, XX, XY, full_covar=False):
         '''
         Calculates mean and covariance matrix of posterior distribution
         of coefficients.
@@ -407,63 +400,63 @@ class RegressionFastARD(LinearModel,RegressorMixin):
         Sinv = beta * XX
         np.fill_diagonal(Sinv, np.diag(Sinv) + A)
         cholesky = True
-        
+
         # try cholesky, if it fails go back to pinvh
         try:
             # find posterior mean : R*R.T*mean = beta*X.T*Y
-            # solve(R*z = beta*X.T*Y) => find z => solve(R.T*mean = z) => find mean
-            R    = np.linalg.cholesky(Sinv)
-            Z    = solve_triangular(R,beta*XY, check_finite=True, lower = True)
-            Mn   = solve_triangular(R.T,Z, check_finite=True, lower = False)
+            # solve(R*z = beta*X.T*Y) =>find z=> solve(R.T*mean = z)=>find mean
+            R = np.linalg.cholesky(Sinv)
+            Z = solve_triangular(R, beta*XY, check_finite=True, lower=True)
+            Mn = solve_triangular(R.T, Z, check_finite=True, lower=False)
 
             # invert lower triangular matrix from cholesky decomposition
-            Ri   = solve_triangular(R,np.eye(A.shape[0]), check_finite=False, lower=True)
+            Ri = solve_triangular(R, np.eye(A.shape[0]), check_finite=False,
+                                  lower=True)
             if full_covar:
-                Sn   = np.dot(Ri.T,Ri)
-                return Mn,Sn,cholesky
+                Sn = np.dot(Ri.T, Ri)
+                return Mn, Sn, cholesky
             else:
-                return Mn,Ri,cholesky
+                return Mn, Ri, cholesky
         except LinAlgError:
             cholesky = False
-            Sn   = pinvh(Sinv)
-            Mn   = beta*np.dot(Sinv,XY)
+            Sn = pinvh(Sinv)
+            Mn = beta*np.dot(Sinv, XY)
             return Mn, Sn, cholesky
-                
-    
-    
-    
-    def _sparsity_quality(self,XX,XXd,XY,XYa,Aa,Ri,active,beta,cholesky):
+
+    def _sparsity_quality(self, XX, XXd, XY, XYa, Aa, Ri, active, beta, cholesky):
         '''
         Calculates sparsity and quality parameters for each feature
-        
+
         Theoretical Note:
         -----------------
         Here we used Woodbury Identity for inverting covariance matrix
-        of target distribution 
+        of target distribution
         C    = 1/beta + 1/alpha * X' * X
         C^-1 = beta - beta^2 * X * Sn * X'
         '''
-        bxy        = beta*XY
-        bxx        = beta*XXd
+        bxy = beta*XY
+        bxx = beta*XXd
         if cholesky:
-            # here Ri is inverse of lower triangular matrix obtained from cholesky decomp
-            xxr    = np.dot(XX[:,active],Ri.T)
-            rxy    = np.dot(Ri,XYa)
-            S      = bxx - beta**2 * np.sum( xxr**2, axis=1)
-            Q      = bxy - beta**2 * np.dot( xxr, rxy)
+            # here Ri is inverse of lower triangular matrix obtained from
+            # cholesky decomp
+            xxr = np.dot(XX[:, active], Ri.T)
+            rxy = np.dot(Ri, XYa)
+            S = bxx - beta**2 * np.sum(xxr**2, axis=1)
+            Q = bxy - beta**2 * np.dot(xxr, rxy)
         else:
             # here Ri is covariance matrix
-            XXa    = XX[:,active]
-            XS     = np.dot(XXa,Ri)
-            S      = bxx - beta**2 * np.sum(XS*XXa,1)
-            Q      = bxy - beta**2 * np.dot(XS,XYa)
+            XXa = XX[:, active]
+            XS = np.dot(XXa, Ri)
+            S = bxx - beta**2 * np.sum(XS*XXa, 1)
+            Q = bxy - beta**2 * np.dot(XS, XYa)
         # Use following:
-        # (EQ 1) q = A*Q/(A - S) ; s = A*S/(A-S), so if A = np.PINF q = Q, s = S
-        qi         = np.copy(Q)
-        si         = np.copy(S) 
-        #  If A is not np.PINF, then it should be 'active' feature => use (EQ 1)
-        Qa,Sa      = Q[active], S[active]
-        qi[active] = Aa * Qa / (Aa - Sa )
-        si[active] = Aa * Sa / (Aa - Sa )
-
-        return [si,qi,S,Q]
\ No newline at end of file
+        # (EQ 1) q = A*Q/(A - S) ; s = A*S/(A-S)
+        # so if A = np.PINF q = Q, s = S
+        qi = np.copy(Q)
+        si = np.copy(S)
+        # If A is not np.PINF, then it should be 'active' feature => use (EQ 1)
+        Qa, Sa = Q[active], S[active]
+        qi[active] = Aa * Qa / (Aa - Sa)
+        si[active] = Aa * Sa / (Aa - Sa)
+
+        return [si, qi, S, Q]
diff --git a/BayesValidRox/surrogate_models/eval_rec_rule.py b/BayesValidRox/surrogate_models/eval_rec_rule.py
new file mode 100644
index 000000000..d2d67d6ec
--- /dev/null
+++ b/BayesValidRox/surrogate_models/eval_rec_rule.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+
+
+
+Based on the implementation in UQLab [1].
+
+References:
+1. S. Marelli, and B. Sudret, UQLab: A framework for uncertainty quantification
+in Matlab, Proc. 2nd Int. Conf. on Vulnerability, Risk Analysis and Management
+(ICVRAM2014), Liverpool, United Kingdom, 2014, 2554-2563.
+
+2. S. Marelli, N. Lüthen, B. Sudret, UQLab user manual – Polynomial chaos
+expansions, Report # UQLab-V1.4-104, Chair of Risk, Safety and Uncertainty
+Quantification, ETH Zurich, Switzerland, 2021.
+
+Author: Farid Mohammadi, M.Sc.
+E-Mail: farid.mohammadi@iws.uni-stuttgart.de
+Department of Hydromechanics and Modelling of Hydrosystems (LH2)
+Institute for Modelling Hydraulic and Environmental Systems (IWS), University
+of Stuttgart, www.iws.uni-stuttgart.de/lh2/
+Pfaffenwaldring 61
+70569 Stuttgart
+
+Created on Fri Jan 14 2022
+"""
+import numpy as np
+from numpy.polynomial.polynomial import polyval
+# import scipy.stats as st
+from .glexindex import glexindex
+# from .aPoly_Construction import aPoly_Construction
+
+
+def poly_rec_coeffs(n_max, polytype, params=None):
+
+    if polytype == 'legendre':
+
+        def an(n):
+            return np.zeros((n+1, 1))
+
+        def sqrt_bn(n):
+            sq_bn = np.zeros((n+1, 1))
+            sq_bn[0, 0] = 1
+            for i in range(1, n+1):
+                sq_bn[i, 0] = np.sqrt(1./(4-i**-2))
+            return sq_bn
+
+        bounds = [-1, 1]
+
+    elif polytype == 'hermite':
+
+        def an(n):
+            return np.zeros((n+1, 1))
+
+        def sqrt_bn(n):
+            sq_bn = np.zeros((n+1, 1))
+            sq_bn[0, 0] = 1
+            for i in range(1, n+1):
+                sq_bn[i, 0] = np.sqrt(i)
+            return sq_bn
+
+        bounds = [-np.inf, np.inf]
+
+    elif polytype == 'laguerre':
+
+        def an(n):
+            a = np.zeros((n+1, 1))
+            for i in range(1, n+1):
+                a[i] = 2*n + params[1]
+            return a
+
+        def sqrt_bn(n):
+            sq_bn = np.zeros((n+1, 1))
+            sq_bn[0, 0] = 1
+            for i in range(1, n+1):
+                sq_bn[i, 0] = -np.sqrt(i * (i+params[1]-1))
+            return sq_bn
+
+        bounds = [0, np.inf]
+
+    AB = {'alpha_beta': np.concatenate((an(n_max), sqrt_bn(n_max)), axis=1),
+          'bounds': bounds}
+
+    return AB
+
+
+def eval_rec_rule(x, max_deg, polyType):
+
+    AB = poly_rec_coeffs(max_deg, polyType)
+    AB = AB['alpha_beta']
+
+    values = np.zeros((len(x), AB.shape[0]+1))
+    values[:, 1] = 1 / AB[0, 1]
+
+    for k in range(AB.shape[0]-1):
+        values[:, k+2] = np.multiply((x - AB[k, 0]), values[:, k+1]) - \
+                         np.multiply(values[:, k], AB[k, 1])
+        values[:, k+2] = np.divide(values[:, k+2], AB[k+1, 1])
+    return values[:, 1:]
+
+
+def eval_rec_rule_arbitrary(x, max_deg, polycoeffs):
+    P = max_deg+1
+    values = np.zeros((len(x), P))
+
+    for deg in range(P):
+        values[:, deg] = polyval(x, polycoeffs[deg]).T
+
+    return values
+
+
+def eval_univ_basis(x, max_deg, polyTypes, apolycoeffs=None):
+
+    # Initilize the output array
+    n_samples, n_params = x.shape
+    P = max_deg+1
+    univ_vals = np.zeros((n_samples, n_params, P))
+
+    for i in range(n_params):
+
+        if polyTypes[i] == 'arbitrary':
+            polycoeffs = apolycoeffs[f'p_{i+1}']
+            univ_vals[:, i] = eval_rec_rule_arbitrary(x[:, i], max_deg,
+                                                      polycoeffs)
+        else:
+            univ_vals[:, i] = eval_rec_rule(x[:, i], max_deg, polyTypes[i])
+
+    return univ_vals
+
+
+def create_psi(basis_indices, univ_p_val):
+    """
+    This function assemble the design matrix Psi from the given basis index
+    set INDICES and the univariate polynomial evaluations univ_p_val.
+    """
+
+    # number of input variables and size of the experimental design
+    n_samples, n_params, _ = univ_p_val.shape
+
+    # number of basis elements
+    P = basis_indices.shape[0]
+
+    # check that the variables have consistent sizes
+    if n_params != basis_indices.shape[1]:
+        raise ValueError("The shapes of BasisIndices and "
+                         "univ_p_val don't match!!")
+
+    # Preallocate the Psi matrix for performance
+    Psi = np.ones((n_samples, P), dtype=np.float64)
+
+    # Assemble the Psi matrix
+    for m in range(basis_indices.shape[1]):
+        aa = np.where(basis_indices[:, m] > 0)[0]
+        try:
+            basisIdx = basis_indices[aa, m]
+            bb = np.reshape(univ_p_val[:, m, basisIdx], Psi[:, aa].shape)
+            Psi[:, aa] = np.multiply(Psi[:, aa], bb)
+        except:
+            raise ValueError
+
+    return Psi
+
+
+def create_basis_indices(nparams, deg, q):
+    return glexindex(start=0, stop=deg+1, dimensions=nparams, reverse=True,
+                     graded=True, cross_truncation=q)
+
+
+# def fit_transform(X, distJoints, polyTypes, params=None):
+#     # uq_GetExpDesignSample --> uq_GeneralIsopTransform
+#     # --> uq_IsopTransform & uq_poly_marginals
+#     # Start transformation
+#     n_samples, n_params = X.shape
+#     cdfx = np.zeros((X.shape))
+#     Y = np.zeros((X.shape))
+
+#     for par_i in range(n_params):
+
+#         # Extract the parameters of the original space
+#         dist = distJoints[par_i]
+#         cdf = np.vectorize(lambda x: dist.cdf(x))
+
+#         # Extract the parameters of the transformation space based on polyType
+#         if polyTypes[par_i] == 'legendre':
+#             # Generate Y_Dists based
+#             params_Y = [-1, 1]
+#             dist_Y = st.uniform(loc=params_Y[0], scale=params_Y[1]-params_Y[0])
+#             inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+#         elif polyTypes[par_i] == 'hermite':
+#             params_Y = [0, 1]
+#             dist_Y = st.norm(loc=params_Y[0], scale=params_Y[1])
+#             inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+#         elif polyTypes[par_i] == 'laguerre':
+#             params_Y = [1, params[1]]
+#             dist_Y = st.gamma(loc=params_Y[0], scale=params_Y[1])
+#             inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+#         elif polyTypes[par_i] == 'arbitrary':
+#             # TODO: Unknown dist with gaussian_kde
+#             mu_X = X_marginals[par_i].Moments[0]
+#             stDev_X = X_marginals[par_i].Moments[1]
+#             cdf = np.vectorize(lambda x: (x - mu_X) / stDev_X)
+
+#             mu_Y = Y_marginals[par_i].Moments[0]
+#             stDev_Y = Y_marginals[par_i].Moments[1]
+#             inv_cdf = np.vectorize(lambda x: stDev_Y * x + mu_Y)
+#             pass
+
+#         # Compute CDF_x(X)
+#         cdfx[:, par_i] = cdf(X[:, par_i])
+
+#         # Compute invCDF_y(cdfx)
+#         Y[:, par_i] = inv_cdf(cdfx[:, par_i])
+
+#     return Y
+
+
+# params = np.array([[5.0e-4, 1.5e-3],
+#                   [5e-3, 5.1e-3],
+#                   [1e-10, 1e-8]])
+
+# distJoints = [st.uniform(loc=params[0, 0], scale=params[0, 1]-params[0, 0]),
+#               st.uniform(loc=params[1, 0], scale=params[1, 1]-params[1, 0]),
+#               st.uniform(loc=params[2, 0], scale=params[2, 1]-params[2, 0])]
+# polyTypes = ['legendre', 'legendre', 'legendre']
+# max_deg = 12
+# q = 0.85
+# n_parms = params.shape[0]
+
+# ED_X = np.loadtxt('../tests/PA-A/ExpDesignX.csv', delimiter=',')
+# # data = np.loadtxt('../tests/PA-A/data.csv', delimiter=',')
+
+
+# ED_X_tr = fit_transform(ED_X, distJoints, polyTypes)
+# basis_indices = create_basis_indices(n_parms, max_deg, q)
+# univ_vals = eval_univ_basis(ED_X_tr, max_deg, polyTypes)
+# psi = create_psi(basis_indices, univ_vals)
+
+
+# # Arbirtary polynomials
+# polycoeffs = {}
+# for parIdx in range(n_parms):
+#     data = distJoints[parIdx].rvs(20000)
+#     polyTypes[parIdx] = 'arbitrary'
+#     polycoeffs[f'p_{parIdx+1}'] = aPoly_Construction(data, max_deg)
+
+# univ_vals_aPCE = eval_univ_basis(ED_X, max_deg, polyTypes,
+#                                   apolycoeffs=polycoeffs)
+
+# psi_aPCE = create_psi(basis_indices, univ_vals_aPCE)
diff --git a/BayesValidRox/surrogate_models/surrogate_models.py b/BayesValidRox/surrogate_models/surrogate_models.py
index a9d8391c0..080e09a07 100644
--- a/BayesValidRox/surrogate_models/surrogate_models.py
+++ b/BayesValidRox/surrogate_models/surrogate_models.py
@@ -39,8 +39,7 @@ from tqdm import tqdm
 import warnings
 from itertools import combinations, product
 from scipy import stats
-from sklearn.linear_model import LinearRegression, ARDRegression
-from sklearn import linear_model
+import sklearn.linear_model as lm
 from sklearn.metrics import r2_score, mean_squared_error
 from sklearn.gaussian_process import GaussianProcessRegressor
 import sklearn.gaussian_process.kernels as kernels
@@ -109,36 +108,37 @@ class Metamodel:
         self.index = None
         self.adaptVerbose = False
         self.Likelihoods = None
-        
-    
-    #--------------------------------------------------------------------------------------------------------
+
+    # -------------------------------------------------------------------------
     class AutoVivification(dict):
         """Implementation of perl's AutoVivification feature."""
+
         def __getitem__(self, item):
             try:
                 return dict.__getitem__(self, item)
             except KeyError:
                 value = self[item] = type(self)()
                 return value
-    #--------------------------------------------------------------------------------------------------------
+    # -------------------------------------------------------------------------
+
     def PolyBasisIndices(self, degree, q):
-        
-        # sparseBasisIndices = sp.sparse.coo_matrix((values.astype(int), (row_ind.astype(int), col_ind.astype(int))))
-        self.PolynomialDegrees = glexindex(start=0, stop=degree+1, dimensions=self.NofPa, 
-                                               reverse=True,graded=True,cross_truncation=q)
 
+        self.PolynomialDegrees = glexindex(start=0, stop=degree+1,
+                                           dimensions=self.NofPa,
+                                           cross_truncation=q,
+                                           reverse=False, graded=True)
         return self.PolynomialDegrees
-    
-    #--------------------------------------------------------------------------------------------------------
+
+    # -------------------------------------------------------------------------
     def addExpDesign(self):
         self.ExpDesign = ExpDesigns(self.Inputs, metaModel=self.metaModel)
 
     def Exe_ExpDesign(self, Model):
-        
+
         if self.ExpDesignFlag != 'sequential':
             # Read ExpDesign from the provided hdf5
             if self.ExpDesign.hdf5File is not None:
-                
+
                 # Read hdf5 file
                 f = h5py.File(self.ExpDesign.hdf5File, 'r+')
 
@@ -147,268 +147,281 @@ class Metamodel:
                     self.ExpDesign.X = np.array(f["EDX/New_init_"])
                 except:
                     self.ExpDesign.X = np.array(f["EDX/init_"])
-                
+
                 self.ExpDesign.NrSamples = self.ExpDesign.X.shape[0]
-                
+
                 # Read EDX and pass it to ExpDesign object
                 OutputNames = self.ModelObj.Output.Names
                 self.ExpDesign.Y = {}
-                
+
                 # Extract x values
                 try:
                     self.ExpDesign.Y["x_values"] = dict()
                     for varIdx, var in enumerate(OutputNames):
-                        self.ExpDesign.Y["x_values"][var] = np.array(f["x_values/"+var])
+                        self.ExpDesign.Y["x_values"][var] = np.array(f[f"x_values/{var}"])
                 except:
                     self.ExpDesign.Y["x_values"] = np.array(f["x_values"])
 
                 for varIdx, var in enumerate(OutputNames):
                     try:
-                        self.ExpDesign.Y[var] = np.array(f["EDY/"+var+"/New_init_"])
+                        self.ExpDesign.Y[var] = np.array(f[f"EDY/{var}/New_init_"])
                     except:
-                        self.ExpDesign.Y[var] = np.array(f["EDY/"+var+"/init_"])
+                        self.ExpDesign.Y[var] = np.array(f[f"EDY/{var}/init_"])
                 f.close()
             else:
-                # Check if an old hdf5 file exists: if yes, rename
+                # Check if an old hdf5 file exists: if yes, rename it
                 hdf5file = 'ExpDesign'+'_'+self.ModelObj.Name+'.hdf5'
-                if os.path.exists(hdf5file): os.rename(hdf5file,'old_'+hdf5file)
-        
-        # Get the sample for X (GetSample)
-        self.ExpDesign.X = self.ExpDesign.GetSample(self.ExpDesign.NrSamples, self.ExpDesign.SamplingMethod,
-                                                    self.MaxPceDegree)
+                if os.path.exists(hdf5file):
+                    os.rename(hdf5file, 'old_'+hdf5file)
+
+        # ---- Prepare X samples ----
+        ED_X, ED_X_tr = self.ExpDesign.GetSample(self.ExpDesign.NrSamples,
+                                                 self.ExpDesign.SamplingMethod,
+                                                 transform=True,
+                                                 MaxPceDegree=self.MaxPceDegree)
+        self.ExpDesign.X = ED_X
+        self.ExpDesign.collocationPoints = ED_X_tr
         self.BoundTuples = self.ExpDesign.BoundTuples
-        
-        # ---- Create ModelOutput ------
-        # Add concurrent simulations
+
+        # ---- Run simulations at X ----
         if self.ExpDesign.Y is None:
             print('\n Now the forward model needs to be run!\n')
-            self.ModelOutputDict, self.ExpDesign.X = Model.Run_Model_Parallel(self.ExpDesign.X) 
-            self.ExpDesign.Y = self.ModelOutputDict
-            
+            ED_Y, up_ED_X = Model.Run_Model_Parallel(ED_X)
+            self.ExpDesign.X = up_ED_X
+            self.ModelOutputDict = ED_Y
+            self.ExpDesign.Y = ED_Y
         else:
             # Check if a dict has been passed.
             if type(self.ExpDesign.Y) is dict:
                 self.ModelOutputDict = self.ExpDesign.Y
-            
             else:
-                raise Exception('Please provide either a dictionary or pass a' 
-                                'hdf5 file using the ExpDesign.hdf5File argument.')
-            
+                raise Exception('Please provide either a dictionary or a hdf5'
+                                'file to ExpDesign.hdf5File argument.')
+
         return self.ExpDesign.collocationPoints
-    
-    #--------------------------------------------------------------------------------------------------------
-    def univ_basis_vals(self, ExpDesignX, n_max=None):
+
+    # -------------------------------------------------------------------------
+    def univ_basis_vals(self, ED_X, n_max=None):
         """
          A function to evaluate univariate regressors along input directions.
         """
-        
-        # The number of samples for pre-allocation is deduced from the 
-        # number of rows in ParamtersetsMatrix:
-        polytypes = self.ExpDesign.Polytypes
-        origSpaceDist = self.ExpDesign.JDist
-        if ExpDesignX.ndim !=2: ExpDesignX = ExpDesignX.reshape(1, len(ExpDesignX))
-        NofSamples, NofPa = ExpDesignX.shape
-        n_max = self.MaxPceDegree if n_max is None else n_max
-        P = n_max + 1
-        
-        # Allocate the output matrix
-        univ_vals = np.zeros((NofSamples,NofPa, P),dtype=np.float32)
 
-        # ----------------
-        if 'arbitrary' in polytypes:
+        # Extract information
+        polyTypes = self.ExpDesign.Polytypes
+        if ED_X.ndim != 2:
+            ED_X = ED_X.reshape(1, len(ED_X))
+        n_max = self.MaxPceDegree if n_max is None else n_max
 
-            univ_kernel = lambda Input, Kernel_Degree: polyval(Input, polycoeffs[Kernel_Degree]).T
-            
-            # Calculate the univariate polynomials
-            for parIdx in range(NofPa):
-                polycoeffs = self.ExpDesign.polycoeffs['p_'+str(parIdx+1)]
-                
-                for deg in range(P):
-                    univ_vals[:,parIdx,deg] = univ_kernel(ExpDesignX[:,parIdx], deg)
+        from .eval_rec_rule import eval_univ_basis
+        if self.ExpDesign.arbitrary:
+            apolycoeffs = self.ExpDesign.polycoeffs
+        else:
+            apolycoeffs = None
+        return eval_univ_basis(ED_X, n_max, polyTypes, apolycoeffs)
+
+        # polytypes = self.ExpDesign.Polytypes
+        # origSpaceDist = self.ExpDesign.JDist
+        # if ED_X.ndim != 2:
+        #     ED_X = ED_X.reshape(1, len(ED_X))
+        # NofSamples, NofPa = ED_X.shape
+        # n_max = self.MaxPceDegree if n_max is None else n_max
+        # P = n_max + 1
+        # Allocate the output matrix
+        # univ_vals = np.zeros((NofSamples, NofPa, P), dtype=np.float32)
 
-            return univ_vals
         # ----------------
-        else: # Following chaospy/orthogonal/three_terms_recursion.py
-            _, polynomials, norms, = chaospy.quadrature.recurrence.stieltjes.analytical_stieljes(n_max, origSpaceDist, normed=True)
-            univ_vals = np.swapaxes(polynomials(*ExpDesignX.T).T,1,2)
-            return univ_vals
-    #--------------------------------------------------------------------------------------------------------
-    def PCE_create_Psi(self,BasisIndices,univ_p_val):
+        # if 'arbitrary' in polytypes:
+
+        #     def eval_rec_rule_arbitrary(x, max_deg, polycoeffs):
+        #         P = max_deg+1
+        #         values = np.zeros((len(x), P))
+
+        #         for deg in range(P):
+        #             values[:, deg] = polyval(x, polycoeffs[deg]).T
+
+        #         return values
+
+        #     # Calculate the univariate polynomials
+        #     for parIdx in range(NofPa):
+        #         polycoeffs = self.ExpDesign.polycoeffs['p_'+str(parIdx+1)]
+        #         univ_vals[:, parIdx] = eval_rec_rule_arbitrary(ED_X[:, parIdx],
+        #                                                        n_max,
+        #                                                        polycoeffs)
+        #     return univ_vals
+        # # ----------------
+        # else:
+        #     from .eval_rec_rule import eval_univ_basis
+        #     polyTypes = self.ExpDesign.Polytypes
+        #     return eval_univ_basis(ED_X, n_max, polyTypes)
+
+    # -------------------------------------------------------------------------
+    def PCE_create_Psi(self, BasisIndices, univ_p_val):
         """
-        This function assemble the design matrix Psi from the given basis index 
+        This function assemble the design matrix Psi from the given basis index
         set INDICES and the univariate polynomial evaluations univ_p_val.
         """
 
         # Check if BasisIndices is a sparse matrix
         sparsity = sp.sparse.issparse(BasisIndices)
-        if sparsity: BasisIndices = BasisIndices.toarray()
+        if sparsity:
+            BasisIndices = BasisIndices.toarray()
 
         # Initialization and consistency checks
         # number of input variables
         NofPa = univ_p_val.shape[1]
-        
+
         # Size of the experimental design
         NofSamples = univ_p_val.shape[0]
-        
+
         # number of basis elements
         P = BasisIndices.shape[0]
 
         # check that the variables have consistent sizes
         if NofPa != BasisIndices.shape[1]:
-            raise ValueError("The shapes of BasisIndices and univ_p_val don't match!!")
+            raise ValueError("The shapes of BasisIndices and univ_p_val don't "
+                             "match!!")
 
         # Preallocate the Psi matrix for performance
-        Psi = np.ones((NofSamples, P),dtype=np.float32)
+        Psi = np.ones((NofSamples, P))
         # Assemble the Psi matrix
         for m in range(BasisIndices.shape[1]):
-            aa = np.where(BasisIndices[:,m]>0)[0] 
+            aa = np.where(BasisIndices[:, m] > 0)[0]
             try:
-                basisIdx = BasisIndices[aa,m]
-                Psi[:,aa] = np.multiply(Psi[:,aa] , np.reshape(univ_p_val[:, m, basisIdx], Psi[:,aa].shape))
+                basisIdx = BasisIndices[aa, m]
+                bb = np.reshape(univ_p_val[:, m, basisIdx], Psi[:, aa].shape)
+                Psi[:, aa] = np.multiply(Psi[:, aa], bb)
             except:
                 raise ValueError
 
         return Psi
-    
-    #--------------------------------------------------------------------------------------------------------
-    def RS_Builder(self, PSI, ModelOutput, BasisIndices,startBasisIndices=None, RegMethod=None):
+
+    # -------------------------------------------------------------------------
+    def RS_Builder(self, PSI, ModelOutput, BasisIndices,
+                   startBasisIndices=None, RegMethod=None):
         """
 
-        ==================================================
-        Automatic Relevance Determination Regression (ARD)
-        ==================================================
-        
-        Fit regression model with Bayesian Ridge Regression.
-        
-        See :ref:`bayesian_ridge_regression` for more information on the regressor.
-        
-        Compared to the OLS (ordinary least squares) estimator, the coefficient
-        weights are slightly shifted toward zeros, which stabilises them.
-        
-        The histogram of the estimated weights is very peaked, as a sparsity-inducing
-        prior is implied on the weights.
-        
-        The estimation of the model is done by iteratively maximizing the
-        marginal log-likelihood of the observations.
+        Fit regression model.
 
-        Bayesian ARD Regression: 
-            https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ARDRegression.html
-        
-        Bayesian Ridge Regression:
-            https://scikit-learn.org/stable/modules/linear_model.html#bayesian-regression
-        
-        Stochastic gradient descent learning (SGDRegressor)
-            https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html#sklearn.linear_model.SGDRegressor
-        
         """
-        RegMethod = self.RegMethod if RegMethod == None else RegMethod
-        
+        if RegMethod is None:
+            RegMethod = self.RegMethod
+
         # Check if BasisIndices is a sparse matrix
         sparsity = sp.sparse.issparse(BasisIndices)
-        
+
         clf_poly = []
         compute_score = True if self.DisplayFlag else False
-        
+
         #  inverse of the observed variance of the data
-        Lambda = 1 / np.var(ModelOutput) if np.var(ModelOutput) !=0 else 1e-6
+        if np.var(ModelOutput) != 0:
+            Lambda = 1 / np.var(ModelOutput)
+        else:
+            Lambda = 1e-6
 
         # Bayes sparse adaptive aPCE
         if RegMethod.lower() != 'ols':
-            if RegMethod.lower() == 'brr' or np.all(ModelOutput==0):
-                clf_poly = linear_model.BayesianRidge(n_iter=1000, tol=1e-7,
-                                                      fit_intercept=True,
-                                                      normalize=True, 
-                                                      compute_score=compute_score,
-                                                      alpha_1=1e-04, alpha_2=1e-04, 
-                                                      lambda_1=Lambda, lambda_2=Lambda)
+            if RegMethod.lower() == 'brr' or np.all(ModelOutput == 0):
+                clf_poly = lm.BayesianRidge(n_iter=1000, tol=1e-7,
+                                            fit_intercept=True,
+                                            normalize=True,
+                                            compute_score=compute_score,
+                                            alpha_1=1e-04, alpha_2=1e-04,
+                                            lambda_1=Lambda, lambda_2=Lambda)
                 clf_poly.converged = True
-                
+
             elif RegMethod.lower() == 'ard':
-                clf_poly = ARDRegression(fit_intercept=True,
-                                         normalize=True,
-                                         compute_score=compute_score,
-                                         n_iter=1000, tol= 0.0001,
-                                         alpha_1=1e-3, alpha_2=1e-3,
-                                         lambda_1=Lambda, lambda_2=Lambda)
-            
+                clf_poly = lm.ARDRegression(fit_intercept=True,
+                                            normalize=True,
+                                            compute_score=compute_score,
+                                            n_iter=1000, tol=0.0001,
+                                            alpha_1=1e-3, alpha_2=1e-3,
+                                            lambda_1=Lambda, lambda_2=Lambda)
+
             elif RegMethod.lower() == 'fastard':
                 clf_poly = RegressionFastARD(start=startBasisIndices,
                                              fit_intercept=True,
                                              normalize=True,
                                              compute_score=compute_score,
-                                             n_iter=2000, tol=1e-5)
-            
+                                             n_iter=300, tol=1e-10)
+
             elif RegMethod.lower() == 'bcs':
                 clf_poly = RegressionFastLaplace(fit_intercept=False,
                                                  n_iter=1000, tol=1e-7)
-            
+
             elif RegMethod.lower() == 'lars':
-                clf_poly = linear_model.LassoLarsCV(fit_intercept=False)
-            
+                clf_poly = lm.LassoLarsCV(fit_intercept=False)
+
             elif RegMethod.lower() == 'sgdr':
-                clf_poly = linear_model.SGDRegressor(fit_intercept=False, 
-                                                     max_iter=5000, tol=1e-7)
-            
+                clf_poly = lm.SGDRegressor(fit_intercept=False,
+                                           max_iter=5000, tol=1e-7)
+
             elif RegMethod.lower() == 'omp':
-                clf_poly = linear_model.OrthogonalMatchingPursuitCV(fit_intercept=False,
-                                                                    max_iter=10)
-            
+                clf_poly = lm.OrthogonalMatchingPursuitCV(fit_intercept=False,
+                                                          max_iter=10)
+
             elif RegMethod.lower() == 'vbl':
                 clf_poly = VBLinearRegression(fit_intercept=False)
-                
+
             elif RegMethod.lower() == 'ebl':
-                clf_poly = EBLinearRegression(optimizer = 'em')
+                clf_poly = EBLinearRegression(optimizer='em')
 
             # Fit
             clf_poly.fit(PSI, ModelOutput)
-            
+
             # Select the nonzero entries of coefficients
             # The first column must be kept (For mean calculations)
             nnz_idx = np.nonzero(clf_poly.coef_)[0]
 
             if len(nnz_idx) == 0 or nnz_idx[0] != 0:
-
                 nnz_idx = np.insert(np.nonzero(clf_poly.coef_)[0], 0, 0)
                 # Remove the zero entries for Bases and PSI if need be
-                PolynomialDegrees_Sparse = BasisIndices.toarray()[nnz_idx] if sparsity else BasisIndices[nnz_idx]
-                PSI_Sparse = PSI[:,nnz_idx]
+                if sparsity:
+                    PolynomialDegrees_Sparse = BasisIndices.toarray()[nnz_idx]
+                else:
+                    PolynomialDegrees_Sparse = BasisIndices[nnz_idx]
+                PSI_Sparse = PSI[:, nnz_idx]
 
-                # Store the coefficients of the regression model (mean of distribution).
+                # Store the coefficients of the regression model
                 clf_poly.fit(PSI_Sparse, ModelOutput)
                 coeffs = clf_poly.coef_
-    
             else:
                 # This is for the case where all outputs are zero, thereby
                 # all coefficients are zero
-                PolynomialDegrees_Sparse = BasisIndices.toarray() if sparsity else BasisIndices
+                if sparsity:
+                    PolynomialDegrees_Sparse = BasisIndices.toarray()
+                else:
+                    PolynomialDegrees_Sparse = BasisIndices
                 PSI_Sparse = PSI
                 coeffs = clf_poly.coef_
                 # Raise an error, if all coeffs are zero
-                #raise ValueError('All the coefficients of the regression model are zero!')
-                
-                
-                
+                # raise ValueError('All the coefficients of the regression
+                # model are zero!')
+
         # Ordinary least square method (OLS)
         else:
-            PolynomialDegrees_Sparse =  BasisIndices.toarray() if sparsity else BasisIndices
+            if sparsity:
+                PolynomialDegrees_Sparse = BasisIndices.toarray()
+            else:
+                PolynomialDegrees_Sparse = BasisIndices
             PSI_Sparse = PSI
 
             PsiTPsi = np.dot(PSI_Sparse.T, PSI_Sparse)
-            
-            if np.linalg.cond(PsiTPsi) > 1e-12 and np.linalg.cond(PsiTPsi) < 1/sys.float_info.epsilon:
+
+            if np.linalg.cond(PsiTPsi) > 1e-12 and \
+               np.linalg.cond(PsiTPsi) < 1 / sys.float_info.epsilon:
                 # faster
-                coeffs = sp.linalg.solve(PsiTPsi, np.dot(PSI_Sparse.T,ModelOutput))
+                coeffs = sp.linalg.solve(PsiTPsi, np.dot(PSI_Sparse.T, ModelOutput))
             else:
                 # stabler
-                coeffs = np.dot(np.dot(np.linalg.pinv(PsiTPsi), PSI_Sparse.T), ModelOutput)
-        
-        
-        return PolynomialDegrees_Sparse,PSI_Sparse,coeffs, clf_poly
-    
-    #--------------------------------------------------------------------------------------------------------
+                coeffs = np.dot(np.dot(np.linalg.pinv(PsiTPsi), PSI_Sparse.T),
+                                ModelOutput)
+
+        return PolynomialDegrees_Sparse, PSI_Sparse, coeffs, clf_poly
+
+    # --------------------------------------------------------------------------------------------------------
     def adaptiveSparseaPCE(self, CollocationPoints, ModelOutput, varIdx, prevBasisIndices=None, verbose=False):
         
-        NrSamples,NofPa = CollocationPoints.shape
+        NrSamples, NofPa = CollocationPoints.shape
         # Initialization
         qAllCoeffs, AllCoeffs = {}, {}
         qAllIndices_Sparse ,AllIndices_Sparse = {}, {}
@@ -441,22 +454,22 @@ class Metamodel:
         #==============================================================================
         # For each degree check all q-norms and choose the best one
         scores = -np.inf*np.ones(DegreeArray.shape[0])
-        qNormScores = -np.inf*np.ones(nqnorms);
+        qNormScores = -np.inf*np.ones(nqnorms)
         scores_BRR = -np.inf*np.ones(DegreeArray.shape[0])
         
         for degIdx, deg in enumerate(DegreeArray):
 
             for qidx, q in enumerate(qnorm):
-                
+
                 # Extract the polynomial basis indices from the pool of allBasisIndices
                 BasisIndices = self.allBasisIndices[str(deg)][str(q)]
-                
+
                 # Assemble the Psi matrix
-                Psi = self.PCE_create_Psi(BasisIndices,self.univ_p_val)
+                Psi = self.PCE_create_Psi(BasisIndices, self.univ_p_val)
 
                 # ---- Calulate the cofficients of aPCE ----------------------
                 sparseBasisIndices, sparsePSI, Coeffs, clf_poly = self.RS_Builder(Psi,ModelOutput,BasisIndices)
-                
+
                 # Calculate and save the score of LOOCV
                 score, LCerror = self.CorrectedLeaveoutError(sparsePSI, Coeffs, ModelOutput, clf_poly)
 
@@ -470,7 +483,7 @@ class Metamodel:
                 qAllclf_poly[str(qidx+1)] = clf_poly
                 qAllnTerms[str(qidx+1)] = BasisIndices.shape[0]
                 qAllLCerror[str(qidx+1)] = LCerror
-                
+
                 # EarlyStop check
                 # if there are at least n_checks_qNorm entries after the
                 # best one, we stop
@@ -600,57 +613,50 @@ class Metamodel:
         
         return returnVars
     
-    #--------------------------------------------------------------------------------------------------------
+    # -------------------------------------------------------------------------
     def CorrectedLeaveoutError(self, TotalPsi, Coeffs, ModelOutputs, clf):
         """
-         calculate the LOO error for the OLS regression on regressor matrix
-         PSI that generated the coefficients in COEFFICIENTS. If MODIFIED_FLAG
-         is set to true, modified leave one out calculation is returned
+         calculate the corrected LOO error for the OLS regression on regressor
+         matrix PSI that generated the coefficients.
          (based on Blatman, 2009 (PhD Thesis), pg. 115-116).
-        
-         
-         modified Leave One Out Estimate is given by (eq. 5.10, pg 116):
-           Err_loo = mean((M(x^(i))- metaM(x^(i))/(1-h_i)).^2), with
-           h_i = tr(Psi*(PsiTPsi)^-1 *PsiT)_i and
-             divided by var(Y) (eq. 5.11) and multiplied by the T coefficient (eq 5.13):
-           T(P,NCoeff) = NCoeff/(NCoeff-P) * (1 + tr(PsiTPsi)^-1)
-    
+
         This is based on the following paper:
-            ""Blatman, G., & Sudret, B. (2011). Adaptive sparse polynomial chaos expansion based on least angle regression. 
+            ""Blatman, G., & Sudret, B. (2011). Adaptive sparse polynomial
+            chaos expansion based on least angle regression.
             Journal of Computational Physics, 230(6), 2345-2367.""
-    
+
         Psi: the orthogonal polynomials of the response surface
         """
-        
-        Psi = np.array(TotalPsi, dtype = float)
-        
+        Psi = np.array(TotalPsi, dtype=float)
+
         # Create PSI_Sparse by removing redundent terms
         nnz_idx = np.nonzero(Coeffs)[0]
-        if len(nnz_idx)==0: nnz_idx = [0]
-        PSI_Sparse = Psi[:,nnz_idx]
+        if len(nnz_idx) == 0:
+            nnz_idx = [0]
+        PSI_Sparse = Psi[:, nnz_idx]
+
+        # NrCoeffs of aPCEs
+        P = len(nnz_idx)
+        # NrEvaluation (Size of experimental design)
+        N = Psi.shape[0]
 
-        
-        P = len(nnz_idx)  # NrCoeffs of aPCEs 
-        N = Psi.shape[0]  # NrEvaluation (Size of experimental design)
-        
-        
         # Build the projection matrix
         PsiTPsi = np.dot(PSI_Sparse.T, PSI_Sparse)
-        
-        if np.linalg.cond(PsiTPsi) > 1e-12 and np.linalg.cond(PsiTPsi) < 1/sys.float_info.epsilon:
+
+        if np.linalg.cond(PsiTPsi) > 1e-12 and \
+           np.linalg.cond(PsiTPsi) < 1/sys.float_info.epsilon:
             # faster
-            #M = PsiTPsi\sparse.eye(PsiTPsi.shape).toarray()
-            M = sp.linalg.solve(PsiTPsi, sp.sparse.eye(PsiTPsi.shape[0]).toarray())
+            M = sp.linalg.solve(PsiTPsi,
+                                sp.sparse.eye(PsiTPsi.shape[0]).toarray())
         else:
             # stabler
             M = np.linalg.pinv(PsiTPsi)
-    
-        # h factor (the full matrix is not calculated explicitly, 
-        # only the trace is to save memory)
+
+        # h factor (the full matrix is not calculated explicitly,
+        # only the trace is, to save memory)
         PsiM = np.dot(PSI_Sparse, M)
-        
-        h =  np.sum(np.multiply(PsiM,PSI_Sparse), axis = 1, dtype=np.float128)
-    
+
+        h = np.sum(np.multiply(PsiM, PSI_Sparse), axis=1, dtype=np.float128)
 
         # ------ Calculate Error Loocv for each measurement point ----
         # Residuals
@@ -658,44 +664,43 @@ class Metamodel:
             residual = np.dot(Psi, Coeffs) - ModelOutputs
         else:
             residual = clf.predict(Psi) - ModelOutputs
-            
+
         # Variance
         varY = np.var(ModelOutputs)
-        
-        
+
         if varY == 0:
             normEmpErr = 0
             ErrLoo = 0
             LCerror = np.zeros((ModelOutputs.shape))
         else:
             normEmpErr = np.mean(residual**2)/varY
-            
+
             # LCerror = np.divide(residual, (1-h))
             LCerror = residual / (1-h)[:, np.newaxis]
             ErrLoo = np.mean(np.square(LCerror)) / varY
-            # if there are NaNs, just return an infinite LOO error (this happens,
-            # e.g., when a strongly underdetermined problem is solved)
-            if np.isnan(ErrLoo): ErrLoo = np.inf
-            
-        
+            # if there are NaNs, just return an infinite LOO error (this
+            # happens, e.g., when a strongly underdetermined problem is solved)
+            if np.isnan(ErrLoo):
+                ErrLoo = np.inf
+
         # Corrected Error for over-determined system
         trM = np.trace(M)
         if trM < 0 or abs(trM) > 1e6:
-            trM = np.trace(np.linalg.pinv(np.dot(Psi.T,Psi)))
-        
+            trM = np.trace(np.linalg.pinv(np.dot(Psi.T, Psi)))
+
         # Over-determined system of Equation
         if N > P:
             T_factor = N/(N-P) * (1 + trM)
-        
+
         # Under-determined system of Equation
         else:
             T_factor = np.inf
-            
+
         CorrectedErrLoo = ErrLoo * T_factor
-        
+
         Q_2 = 1 - CorrectedErrLoo
-    
-        return Q_2, residual #LCerror
+
+        return Q_2, residual
 
     #--------------------------------------------------------------------------------------------------------
     def PCATransformation(self, Output):
@@ -759,21 +764,21 @@ class Metamodel:
             print('with the R^2 score: {0:.3f}'.format(Score))
             #print("BME:", gp.log_marginal_likelihood())
             print('-'*50)
-        
+
         return gp
-        
-    #--------------------------------------------------------------------------------------------------------
+
+    # -------------------------------------------------------------------------
     def create_PCE_normDesign(self, Model, OptDesignFlag=False):
         """
-        This function loops over the outputs and each time step/point to compute the PCE
-        coefficients.
-        
+        This function loops over the outputs and each time step/point and fits
+        the meta model.
+
         """
-        
+
         # Get the collocation points to run the forward model
         CollocationPoints = self.Exe_ExpDesign(Model)
         OutputDict = self.ModelOutputDict.copy()
-        
+
         # Initialize the nested dictionaries
         self.DegreeDict = self.AutoVivification()
         self.qNormDict = self.AutoVivification()
@@ -788,9 +793,10 @@ class Metamodel:
 
         if self.ExpDesignFlag != 'sequential':
             # Read observations or MCReference
-            if Model.MeasurementFile is not None or len(Model.Observations) != 0:
+            if Model.MeasurementFile is not None \
+                or len(Model.Observations) != 0:
                 self.Observations = Model.read_Observation()
-        
+
         # Define the DegreeArray
         maxDeg = self.MaxPceDegree
         nSamples, ndim = CollocationPoints.shape
@@ -801,7 +807,7 @@ class Metamodel:
         degNew = degNew if self.ExpDesignFlag == 'sequential' else self.MaxPceDegree
         self.q = np.array(self.q) if not np.isscalar(self.q) else np.array([self.q])
         if degNew > self.MinPceDegree and self.RegMethod.lower() != 'fastard':
-            self.DegreeArray = np.arange(self.MinPceDegree,degNew+1)
+            self.DegreeArray = np.arange(self.MinPceDegree, degNew+1)
         else:
             self.DegreeArray = np.array([degNew])
 
@@ -812,74 +818,75 @@ class Metamodel:
                 # Generate the polynomial basis indices
                 for qidx, q in enumerate(self.q):
                     self.allBasisIndices[str(deg)][str(q)] = self.PolyBasisIndices(degree=deg, q=q)
-                    
-            
+
         # Evaluate the univariate polynomials on ExpDesign
         if self.metaModel.lower() != 'gpe':
             self.univ_p_val = self.univ_basis_vals(CollocationPoints)
-        
-        
-        if 'x_values' in OutputDict: del OutputDict['x_values']
-                
+
+        if 'x_values' in OutputDict:
+            del OutputDict['x_values']
+
         # --- Loop through data points and fit the surrogate ---
         if not OptDesignFlag:
-            print("\n>>>> Training the {0} metamodel started. <<<<<<\n".format(self.metaModel))
-            items = tqdm(OutputDict.items(), desc ="Fitting regression")
+            print(f"\n>>>> Training the {self.metaModel} metamodel started. <<<<<<\n")
+            items = tqdm(OutputDict.items(), desc="Fitting regression")
         else:
             items = OutputDict.items()
-        
+
         # For loop over the components/outputs
-        for key , Output in items:
-            
+        for key, Output in items:
+
             # Dimensionality reduction with PCA, if specified
             if self.DimRedMethod.lower() == 'pca':
                 self.pca[key], OutputMatrix = self.PCATransformation(Output)
             else:
                 OutputMatrix = Output
-            
-                
+
             # Parallel fit regression
             if self.metaModel.lower() == 'gpe':
                 # Prepare the input matrix
                 scaler = MinMaxScaler()
                 X_S = scaler.fit_transform(CollocationPoints)
-                
+
                 self.x_scaler[key] = scaler
-                
+
                 outDict = Parallel(n_jobs=-1, prefer='threads')(
-                    delayed(self.GaussianProcessEmulator)(X_S,OutputMatrix[:,idx]) 
-                            for idx in range(OutputMatrix.shape[1]))
+                    delayed(self.GaussianProcessEmulator)(X_S, OutputMatrix[:, idx]) 
+                    for idx in range(OutputMatrix.shape[1]))
+
                 for idx in range(OutputMatrix.shape[1]):
-                    self.gp_poly[key]["y_"+str(idx+1)] = outDict[idx]
+                    self.gp_poly[key][f"y_{idx+1}"] = outDict[idx]
 
             else:
                 outDict = Parallel(n_jobs=-1, prefer='threads')(
-                    delayed(self.adaptiveSparseaPCE)(CollocationPoints,OutputMatrix[:,idx],
-                                          idx) for idx in range(OutputMatrix.shape[1]))
-                
+                    delayed(self.adaptiveSparseaPCE)(CollocationPoints,
+                                                     OutputMatrix[:, idx], idx)
+                    for idx in range(OutputMatrix.shape[1]))
+
                 for idx in range(OutputMatrix.shape[1]):
                     # Create a dict to pass the variables
-                    self.DegreeDict[key]["y_"+str(idx+1)] = outDict[idx]['degree']
-                    self.qNormDict[key]["y_"+str(idx+1)] = outDict[idx]['qnorm']
-                    self.CoeffsDict[key]["y_"+str(idx+1)] = outDict[idx]['coeffs']
-                    self.BasisDict[key]["y_"+str(idx+1)] = outDict[idx]['PolynomialDegrees']
-                    self.ScoresDict[key]["y_"+str(idx+1)] = outDict[idx]['LOOCVScore']
-                    self.clf_poly[key]["y_"+str(idx+1)] = outDict[idx]['clf_poly']
-                    self.LCerror[key]["y_"+str(idx+1)] = outDict[idx]['LCerror']
-                
+                    self.DegreeDict[key][f"y_{idx+1}"] = outDict[idx]['degree']
+                    self.qNormDict[key][f"y_{idx+1}"] = outDict[idx]['qnorm']
+                    self.CoeffsDict[key][f"y_{idx+1}"] = outDict[idx]['coeffs']
+                    self.BasisDict[key][f"y_{idx+1}"] = outDict[idx]['PolynomialDegrees']
+                    self.ScoresDict[key][f"y_{idx+1}"] = outDict[idx]['LOOCVScore']
+                    self.clf_poly[key][f"y_{idx+1}"] = outDict[idx]['clf_poly']
+                    self.LCerror[key][f"y_{idx+1}"] = outDict[idx]['LCerror']
+
                 # PCEKriging (PCE with Gaussian Process Emulator)
                 # if self.metaModel.lower() == 'pcekriging':
                 #     # outDict = Parallel(n_jobs=-1)(
                 #     # delayed(self.GaussianProcessEmulator)(CollocationPoints,self.LCerror[key]["y_"+str(idx+1)])
                 #     #         for idx in range(OutputMatrix.shape[1]))
-                    
+
                 #     for idx in range(OutputMatrix.shape[1]):
                 #         self.gp_poly[key]["y_"+str(idx+1)] =  self.GaussianProcessEmulator(CollocationPoints, self.LCerror[key]["y_"+str(idx+1)])
                         # self.gp_poly[key]["y_"+str(idx+1)] = outDict[idx]
-        
-        if not OptDesignFlag:    
-            print("\n>>>> Training the {0} metamodel sucessfully completed. <<<<<<\n".format(self.metaModel))           
-        
+
+        if not OptDesignFlag:
+            print(f"\n>>>> Training the {self.metaModel} metamodel"
+                  " sucessfully completed. <<<<<<\n")
+
         return self
 
     #--------------------------------------------------------------------------------------------------------
@@ -1910,62 +1917,65 @@ class Metamodel:
         
         return Xnew
     
-    #--------------------------------------------------------------------------------------------------------
-    def eval_metamodel(self,samples=None, nsamples=None,Y=None, discrepModel=False,samplingMethod='random', name='Calib', return_samples=False):
-        
+    # -------------------------------------------------------------------------
+    def eval_metamodel(self,samples=None, nsamples=None, Y=None, discrepModel=False,samplingMethod='random', name='Calib', return_samples=False):
+
         ModelDict = self.gp_poly if self.metaModel.lower() == 'gpe' else self.CoeffsDict 
-        rosenblatt = self.Inputs.Rosenblatt
-        
+
         if samples is None:
             if nsamples is None:
                 self.NrofSamples = 100000
             else:
                 self.NrofSamples = nsamples
-            
-            samples = self.ExpDesign.GetSample(self.NrofSamples, samplingMethod)
+
+            samples = self.ExpDesign.GetSample(self.NrofSamples, 
+                                               samplingMethod,
+                                               transform=True)
         else:
             self.Samples = samples
             self.NrofSamples = len(samples)
-        
-        # Check if samples need Rosenblatt Transformation
-        if rosenblatt:
-            samples = self.ExpDesign.JDist.inv(self.ExpDesign.origJDist.fwd(samples.T)).T
-        
+
+        # Check if samples need Transformation
+        samples = self.ExpDesign.transform(samples)
+
         if self.metaModel.lower() != 'gpe':
             univ_p_val = self.univ_basis_vals(samples, n_max=self.MaxPceDegree)
-        
+
         meanPCEOutputs = {}
         stdPCEOutputs = {}
-        
+
         # Loop over outputs
         cnt = 0
         for Outkey, ValuesDict in ModelDict.items():
-            
+
             PCEOutputs_mean = np.zeros((len(samples), len(ValuesDict)))
             PCEOutputs_std = np.zeros((len(samples), len(ValuesDict)))
             idx = 0
             for Inkey, InIdxValues in ValuesDict.items():
-                
+
                 # Perdiction with GPE
                 if self.metaModel.lower() == 'gpe':
                     X_T = self.x_scaler[Outkey].transform(samples)
                     gp = self.gp_poly[Outkey][Inkey]
                     y_mean, y_std = gp.predict(X_T, return_std=True)
 
-                else: # Perdiction with PCE or pcekriging
+                else:
+                    # Perdiction with PCE or pcekriging
                     # Assemble Psi matrix
-                    PSI_Val = self.PCE_create_Psi(self.BasisDict[Outkey][Inkey], univ_p_val)
-
-                    # Perdiction 
-                    try: # with error bar
+                    psi = self.PCE_create_Psi(self.BasisDict[Outkey][Inkey],
+                                              univ_p_val)
+                    # Perdiction
+                    try:
+                        # with error bar
                         clf_poly = self.clf_poly[Outkey][Inkey]
-                        y_mean, y_std = clf_poly.predict(PSI_Val, return_std=True)
+                        y_mean, y_std = clf_poly.predict(psi, return_std=True)
 
-                    except: # without error bar
+                    except:
+                        # without error bar
                         coeffs = self.CoeffsDict[Outkey][Inkey]
-                        y_mean = np.dot(PSI_Val, coeffs)
+                        y_mean = np.dot(psi, coeffs)
                         y_std = np.zeros_like(y_mean)
-                        
+
                     # if self.metaModel.lower() == 'pcekriging':
                         # gp = self.gp_poly[Outkey][Inkey]
                         # y_gp_mean, y_gp_std = gp.predict(samples, return_std=True)
@@ -1979,7 +1989,7 @@ class Metamodel:
                 PCEOutputs_mean[:, idx] = y_mean
                 PCEOutputs_std[:, idx] = y_std
                 idx += 1
-            
+
             if self.index is None:
                 if self.DimRedMethod.lower() == 'pca':
                     PCA = self.pca[Outkey]
@@ -1988,32 +1998,32 @@ class Metamodel:
                 else:
                     meanPCEOutputs[Outkey] = PCEOutputs_mean
                     stdPCEOutputs[Outkey] = PCEOutputs_std
-                
+
             else:
-                index = self.index[cnt] 
+                index = self.index[cnt]
                 if name.lower() == 'calib':
                     if self.DimRedMethod.lower() == 'pca':
                         PCA = self.pca[Outkey]
                         meanPCEOutputs[Outkey] = PCA.mean_[:index] + np.dot(PCEOutputs_mean,PCA.components_)[:,:index]
                         stdPCEOutputs[Outkey] = np.sqrt(np.dot(PCEOutputs_std**2, PCA.components_**2))[:,:index]
                     else:
-                        meanPCEOutputs[Outkey] = PCEOutputs_mean[:,:index]
-                        stdPCEOutputs[Outkey] = PCEOutputs_std[:,:index]
+                        meanPCEOutputs[Outkey] = PCEOutputs_mean[:, :index]
+                        stdPCEOutputs[Outkey] = PCEOutputs_std[:, :index]
                 else:
                     if self.DimRedMethod.lower() == 'pca':
                         PCA = self.pca[Outkey]
                         meanPCEOutputs[Outkey] = PCA.mean_[index:] + np.dot(PCEOutputs_mean,PCA.components_)[:,index:]
                         stdPCEOutputs[Outkey] = np.sqrt(np.dot(PCEOutputs_std**2, PCA.components_**2))[:,index:]
                     else:
-                        meanPCEOutputs[Outkey] = PCEOutputs_mean[:,index:]
-                        stdPCEOutputs[Outkey] = PCEOutputs_std[:,index:]
+                        meanPCEOutputs[Outkey] = PCEOutputs_mean[:, index:]
+                        stdPCEOutputs[Outkey] = PCEOutputs_std[:, index:]
                 cnt += 1
-        
+
         if return_samples:
             return meanPCEOutputs, stdPCEOutputs, samples
         else:
-            return meanPCEOutputs, stdPCEOutputs 
-            
+            return meanPCEOutputs, stdPCEOutputs
+
     #--------------------------------------------------------------------------------------------------------
     def normpdf(self, PCEOutputs, std_PC_MC, ObservationData, Sigma2s):
         
@@ -2961,7 +2971,7 @@ class Metamodel:
                   implemented.")
         
         # Cleanup
-        #Zip the subdirectories
+        # Zip the subdirectories
         try:
             dir_name = Model.Name
             key = Model.Name + '_'
-- 
GitLab