diff --git a/src/bayesvalidrox/post_processing/post_processing.py b/src/bayesvalidrox/post_processing/post_processing.py
index f87d0009631684ac29376f6d6084f7473dc8afdb..e09904f48469474528a56a7096b103f2b84db2f5 100644
--- a/src/bayesvalidrox/post_processing/post_processing.py
+++ b/src/bayesvalidrox/post_processing/post_processing.py
@@ -849,7 +849,11 @@ class PostProcessing:
         return self.total_sobol
 
     # -------------------------------------------------------------------------
+<<<<<<< HEAD
     def check_reg_quality(self, n_samples: int = 1000, samples=None, outputs: dict = None) -> None:
+=======
+    def check_reg_quality(self, n_samples=1000, samples=None, outputs=None):
+>>>>>>> 4281a0b3 ([fix] Add output-option to check_reg_quality)
         """
         Checks the quality of the metamodel for single output models based on:
         https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685
@@ -877,7 +881,11 @@ class PostProcessing:
         # Evaluate the original and the surrogate model
         if outputs is None:
             y_val = self._eval_model(samples, key_str='valid')
+<<<<<<< HEAD
         else:
+=======
+        else: 
+>>>>>>> 4281a0b3 ([fix] Add output-option to check_reg_quality)
             y_val = outputs
         y_pce_val, _ = self.engine.eval_metamodel(samples=samples)