From 1f7014d63c9933a0737d57b9d6649f042b6ad93a Mon Sep 17 00:00:00 2001
From: kohlhaasrebecca <rebecca.kohlhaas@outlook.com>
Date: Wed, 9 Oct 2024 12:25:24 +0200
Subject: [PATCH] [fix] Add output-option to check_reg_quality

Also added related PCE-tests
---
 src/bayesvalidrox/post_processing/post_processing.py | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/src/bayesvalidrox/post_processing/post_processing.py b/src/bayesvalidrox/post_processing/post_processing.py
index 10a27595a..2fc4c84de 100644
--- a/src/bayesvalidrox/post_processing/post_processing.py
+++ b/src/bayesvalidrox/post_processing/post_processing.py
@@ -846,13 +846,10 @@ class PostProcessing:
 
     # -------------------------------------------------------------------------
     def check_reg_quality(self, n_samples:int=1000, samples=None, outputs:dict=None)->None:
-        """
-        """
         """
         Checks the quality of the metamodel for single output models based on:
         https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685
 
-
         Parameters
         ----------
         n_samples : int, optional
@@ -863,6 +860,10 @@ class PostProcessing:
             Output dictionary with model outputs for all given output types in
             `Model.Output.names`. The default is None.
 
+        Return 
+        ------
+        None
+
         """
         if samples is None:
             self.n_samples = n_samples
@@ -871,7 +872,10 @@ class PostProcessing:
             self.n_samples = samples.shape[0]
 
         # Evaluate the original and the surrogate model
-        y_val = self._eval_model(samples, key_str='valid')
+        if outputs is None:
+            y_val = self._eval_model(samples, key_str='valid')
+        else: 
+            y_val = outputs
         y_pce_val, _ = self.engine.eval_metamodel(samples=samples)
 
         # Open a pdf for the plots
-- 
GitLab