diff --git a/examples/principal_component_analysis/example_principalcomponentanalysis.py b/examples/principal_component_analysis/example_principalcomponentanalysis.py
index 53f9c304ebe2b2e02af0e928c583c3c72a9495e2..ff9cb628f6b5af3f570b4b10b979ceabba4ae4c8 100644
--- a/examples/principal_component_analysis/example_principalcomponentanalysis.py
+++ b/examples/principal_component_analysis/example_principalcomponentanalysis.py
@@ -109,7 +109,7 @@ if __name__ == "__main__":
     # Select if you want to preserve the spatial/temporal depencencies
     MetaModelOpts.dim_red_method = 'PCA'
     MetaModelOpts.var_pca_threshold = 99.999
-    MetaModelOpts.n_pca_components = 10#5#10
+    MetaModelOpts.n_pca_components = 9#10#5#10
     #MetaModelOpts.n_bootstrap_itrs = 2
 
     # Select your metamodel method
@@ -144,7 +144,7 @@ if __name__ == "__main__":
 
     # Create the engine
     engine = Engine(MetaModelOpts, Model, ExpDesign)
-    engine.train_normal()
+    engine.train_normal(verbose = True)
     print('Surrogate has been trained')
     
     # =====================================================
diff --git a/src/bayesvalidrox/surrogate_models/surrogate_models.py b/src/bayesvalidrox/surrogate_models/surrogate_models.py
index adac29fcdf60c6a23184fbc324da18283ee7133e..629bdd7a5caab1eda13894d900b991d1b2fe0ede 100644
--- a/src/bayesvalidrox/surrogate_models/surrogate_models.py
+++ b/src/bayesvalidrox/surrogate_models/surrogate_models.py
@@ -504,6 +504,10 @@ class MetaModel:
         None.
 
         """
+        # Use settings
+        self.verbose = verbose
+        self.parallel = parallel
+        
         # Check size of inputs
         X = np.array(X)
         for key in y.keys():
@@ -581,8 +585,7 @@ class MetaModel:
 
                     # Start transformation
                     pca, target, n_comp = self.pca_transformation(
-                        Output[b_indices], verbose=False
-                    )
+                        Output[b_indices])
                     self.pca[f'b_{b_i + 1}'][key] = pca
                     # Store the number of components for fast bootsrtrapping
                     if fast_bootstrap and b_i == 0:
@@ -635,13 +638,13 @@ class MetaModel:
                     if fast_bootstrap and b_i == 0:
                         first_out[key] = copy.deepcopy(out)
 
+                    # Update the coefficients with OLS during bootstrap-iters
                     if b_i > 0 and fast_bootstrap:
-                        # fast bootstrap
                         out = self.update_pce_coeffs(
                             X_train_b, target, first_out[key])
 
+                    # Create a dict to pass the variables
                     for i in range(target.shape[1]):
-                        # Create a dict to pass the variables
                         self.deg_dict[f'b_{b_i + 1}'][key][f"y_{i + 1}"] = out[i]['degree']
                         self.q_norm_dict[f'b_{b_i + 1}'][key][f"y_{i + 1}"] = out[i]['qnorm']
                         self.coeffs_dict[f'b_{b_i + 1}'][key][f"y_{i + 1}"] = out[i]['coeffs']
@@ -666,7 +669,7 @@ class MetaModel:
         Parameters
         ----------
         X : array of shape (n_samples, n_params)
-            Training set.
+            Training set. These samples should be already transformed.
         y : array of shape (n_samples, n_outs)
             The (transformed) model responses.
         out_dict : dict
@@ -679,9 +682,15 @@ class MetaModel:
             The updated training output dictionary.
 
         """
-        # TODO: why is X not used here?
+        # TODO: why is X not used here? -> Uses self.univ_p_val instead
+        #       This should be changed to either check if the univ_p_val are accurate
+        #       or to always construct them from the X-values
         # Make a copy
         final_out_dict = copy.deepcopy(out_dict)
+        
+        # Create the univ_p_val
+        univ_p_val = self.univ_p_val
+#        univ_p_val = self.univ_basis_vals(X, n_max=np.max(self.pce_deg))
 
         # Loop over the points
         for i in range(y.shape[1]):
@@ -692,7 +701,7 @@ class MetaModel:
                 basis_indices = out_dict[i]['multi_indices']
 
                 # Evaluate the multivariate polynomials on CollocationPoints
-                psi = create_psi(basis_indices, self.univ_p_val)
+                psi = create_psi(basis_indices, univ_p_val)#self.univ_p_val)
 
                 # Calulate the cofficients of surrogate model
                 updated_out = self.regression(
@@ -752,8 +761,6 @@ class MetaModel:
 
         return univ_basis
 
-    # -------------------------------------------------------------------------
-
     # -------------------------------------------------------------------------
     def regression(self, X, y, basis_indices, reg_method=None, sparsity=True):
         """
@@ -1091,7 +1098,7 @@ class MetaModel:
         return returnVars
 
     # -------------------------------------------------------------------------
-    def pca_transformation(self, target, verbose=False):
+    def pca_transformation(self, target):
         """
         Transforms the targets (outputs) via Principal Component Analysis.
         The number of features is set by `self.n_pca_components`.
@@ -1101,9 +1108,6 @@ class MetaModel:
         ----------
         target : array of shape (n_samples,)
             Target values.
-        verbose : bool
-            Set to True to get more information during functtion call.
-            The default is False.
 
         Returns
         -------
@@ -1124,8 +1128,9 @@ class MetaModel:
         # Switch to var_pca if n_pca_components is too large
         if n_pca_components >= n_features:
             n_pca_components = None
-            print('')
-            print('WARNING: Too many components are set for PCA. The transformation will proceed based on explainable variance.')
+            if self.verbose:
+                print('')
+                print('WARNING: Too many components are set for PCA. The transformation will proceed based on explainable variance.')
         
         # Calculate n_pca_components based on decomposition of the variance
         if n_pca_components is None:
@@ -1148,13 +1153,13 @@ class MetaModel:
             n_pca_components = min(n_samples, n_features, n_components)
 
         # Print out a report
-        if verbose:
-            print()
-            print('-' * 50)
-            print(f"PCA transformation is performed with {n_pca_components}"
-                  " components.")
-            print('-' * 50)
-            print()
+        #if self.verbose:
+        #    print()
+        #    print('-' * 50)
+        #    print(f"PCA transformation is performed with {n_pca_components}"
+        #          " components.")
+        #    print('-' * 50)
+        #    print()
 
         # Fit and transform with the selected number of components
         pca = sklearnPCA(n_components=n_pca_components, svd_solver='arpack')
@@ -1189,7 +1194,6 @@ class MetaModel:
             method='user'
         )
         # Compute univariate bases for the given samples
-        #print('Creating the univariate basis.')
         univ_p_val = None
         if self.meta_model_type.lower() != 'gpe':
             univ_p_val = self.univ_basis_vals(
@@ -1203,7 +1207,6 @@ class MetaModel:
         std_pred_b = {}
         b_i = 0
         # Loop over bootstrap iterations
-        #print('Looping over bootstrap iterations.')
         for b_i in range(self.n_bootstrap_itrs):
 
             # Extract model dictionary
@@ -1215,8 +1218,6 @@ class MetaModel:
             # Loop over outputs
             mean_pred = {}
             std_pred = {}
-            #print(model_dict.items())
-            #print('Looping over the output timesteps')
             for output, values in model_dict.items():
 
                 mean = np.empty((len(samples), len(values)))
@@ -1285,14 +1286,13 @@ class MetaModel:
             finite_rows = np.isfinite(
                 mean_pred_all[output]).all(axis=2).all(axis=1)
             outs = np.asarray(mean_pred_all[output])[finite_rows]
-            # Compute mean
+            
+            # Compute mean and stdev
             mean_pred[output] = np.mean(outs, axis=0)
-            # Compute standard deviation
             if self.n_bootstrap_itrs > 1:
                 std_pred[output] = np.std(outs, axis=0)
             else:
-                # TODO: this b_i seems off here
-                std_pred[output] = std_pred_b[b_i][output]
+                std_pred[output] = std_pred_b[self.n_bootstrap_itrs-1][output]
 
         return mean_pred, std_pred