diff --git a/src/bayesvalidrox/post_processing/post_processing.py b/src/bayesvalidrox/post_processing/post_processing.py
index 10a27595a2ea0ac6619de25078094a94b47a3125..2fc4c84de15125a3d16910d916ba2fc7d78db98b 100644
--- a/src/bayesvalidrox/post_processing/post_processing.py
+++ b/src/bayesvalidrox/post_processing/post_processing.py
@@ -846,13 +846,10 @@ class PostProcessing:
 
     # -------------------------------------------------------------------------
     def check_reg_quality(self, n_samples:int=1000, samples=None, outputs:dict=None)->None:
-        """
-        """
         """
         Checks the quality of the metamodel for single output models based on:
         https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685
 
-
         Parameters
         ----------
         n_samples : int, optional
@@ -863,6 +860,10 @@ class PostProcessing:
             Output dictionary with model outputs for all given output types in
             `Model.Output.names`. The default is None.
 
+        Return 
+        ------
+        None
+
         """
         if samples is None:
             self.n_samples = n_samples
@@ -871,7 +872,10 @@ class PostProcessing:
             self.n_samples = samples.shape[0]
 
         # Evaluate the original and the surrogate model
-        y_val = self._eval_model(samples, key_str='valid')
+        if outputs is None:
+            y_val = self._eval_model(samples, key_str='valid')
+        else: 
+            y_val = outputs
         y_pce_val, _ = self.engine.eval_metamodel(samples=samples)
 
         # Open a pdf for the plots