From 91d117fb9851d1c316ce392b97a5c7c5af587506 Mon Sep 17 00:00:00 2001
From: Alina Lacheim <a.lacheim@outlook.de>
Date: Mon, 2 Dec 2024 23:22:11 +0100
Subject: [PATCH] arbitrary plot_validation_multi

---
 .../example_analytical_function.py            |   2 +-
 .../Beam9points_1/SSBeam_Deflection_1.inp     |   8 +-
 examples/ishigami/example_ishigami.py         |   3 +-
 .../post_processing/post_processing.py        | 150 ++++++------------
 src/bayesvalidrox/pylink/pylink.py            |   6 +
 src/bayesvalidrox/surrogate_models/engine.py  |  33 ++--
 .../surrogate_models/supplementary.py         |  19 +++
 7 files changed, 91 insertions(+), 130 deletions(-)

diff --git a/examples/analytical-function/example_analytical_function.py b/examples/analytical-function/example_analytical_function.py
index 4507de7e6..03e84fb0b 100644
--- a/examples/analytical-function/example_analytical_function.py
+++ b/examples/analytical-function/example_analytical_function.py
@@ -224,7 +224,7 @@ if __name__ == "__main__":
     # =====================================================
     # =========  POST PROCESSING OF METAMODELS  ===========
     # =====================================================
-    PostPCE = PostProcessing(engine)
+    PostPCE = PostProcessing(engine, out_format="png")
 
     # Plot to check validation visually.
     PostPCE.valid_metamodel(n_samples=1)
diff --git a/examples/beam/Beam9points_1/SSBeam_Deflection_1.inp b/examples/beam/Beam9points_1/SSBeam_Deflection_1.inp
index fec3b20b0..594ec4fa5 100644
--- a/examples/beam/Beam9points_1/SSBeam_Deflection_1.inp
+++ b/examples/beam/Beam9points_1/SSBeam_Deflection_1.inp
@@ -1,6 +1,6 @@
 % Input file for the simply supported beam model
-1.5557e-01 % b in m
-2.8052e-01 % h in m
+1.2759e-01 % b in m
+2.8948e-01 % h in m
 5 % L in m
-2.2558e+10 % E in Pa
-9.2858e+03 % p in N/m
+2.1572e+10 % E in Pa
+9.2437e+03 % p in N/m
diff --git a/examples/ishigami/example_ishigami.py b/examples/ishigami/example_ishigami.py
index c74a6bcdb..16ec14319 100644
--- a/examples/ishigami/example_ishigami.py
+++ b/examples/ishigami/example_ishigami.py
@@ -162,7 +162,8 @@ if __name__ == "__main__":
     MetaModelOpts.ExpDesign = ExpDesign
     engine = Engine(MetaModelOpts, Model, ExpDesign)
     engine.start_engine()
-    engine.train_sequential()
+    #engine.train_sequential()
+    engine.train_normal()
 
     # Save PCE models
     with open(f'PCEModel_{Model.name}.pkl', 'wb') as output:
diff --git a/src/bayesvalidrox/post_processing/post_processing.py b/src/bayesvalidrox/post_processing/post_processing.py
index 12f09eed1..1f4a8ff3c 100644
--- a/src/bayesvalidrox/post_processing/post_processing.py
+++ b/src/bayesvalidrox/post_processing/post_processing.py
@@ -11,12 +11,14 @@ import numpy as np
 import pandas as pd
 from scipy import stats
 from sklearn.linear_model import LinearRegression
-from sklearn.metrics import mean_squared_error, r2_score
+from sklearn.metrics import r2_score
 import matplotlib.pyplot as plt
 from matplotlib import ticker
 from matplotlib.offsetbox import AnchoredText
 from matplotlib.patches import Patch
 
+from bayesvalidrox.surrogate_models.supplementary import root_mean_squared_error
+
 # Load the mplstyle
 plt.style.use(os.path.join(os.path.split(__file__)[0], "../", "bayesvalidrox.mplstyle"))
 
@@ -69,8 +71,8 @@ class PostProcessing:
         self.stds = None
 
         # TODO: Check if these could be removed!
-        self.pce_out_mean = None
-        self.pce_out_std = None
+        #self.pce_out_mean = None
+        #self.pce_out_std = None
 
     # -------------------------------------------------------------------------
     def plot_moments(self, plot_type: str = None):
@@ -225,12 +227,10 @@ class PostProcessing:
         if model_out_dict is not None:
             self.model_out_dict = model_out_dict
         else:
-            self.model_out_dict, _ = self.engine.Model.run_model_parallel(
-                samples, key_str="valid"
-            )
-        self.pce_out_mean, self.pce_out_std = self.engine.eval_metamodel(samples)
+            self.model_out_dict = self._eval_model(samples, key_str="valid")
+        out_mean, out_std = self.engine.eval_metamodel(samples)
 
-        self._plot_validation_multi()
+        self._plot_validation_multi(out_mean=out_mean, out_std=out_std)
 
         # TODO: should this be kept here?
         # Zip the subdirectories
@@ -293,12 +293,10 @@ class PostProcessing:
         self.valid_error = {}
         # Loop over the keys and compute RMSE error.
         for key in self.engine.out_names:
-            # Root mean square
-            self.rmse[key] = mean_squared_error(
+            # Root mena square
+            self.rmse[key] = root_mean_squared_error(
                 outputs[key],
-                metamod_outputs[key],
-                squared=False,
-                multioutput="raw_values",
+                metamod_outputs[key]
             )
             # Validation error
             self.valid_error[key] = (self.rmse[key] ** 2) / np.var(
@@ -984,76 +982,7 @@ class PostProcessing:
                 plt.close(fig)
 
     # -------------------------------------------------------------------------
-    def _plot_validation(self):
-        """
-        Plots outputs for visual comparison of metamodel outputs with that of
-        the (full) original model.
-
-        Returns
-        -------
-        None.
-
-        """
-        warnings.warn(
-            "This function will be deprecated in the future! Use PostProcessing._plot_validation_multi() instead!",
-            DeprecationWarning,
-        )
-        # This function currently only supports PCE/aPCE
-        metamod = self.engine.MetaModel
-        if not hasattr(metamod, "meta_model_type"):
-            raise AttributeError("This evaluation only support PCE-type models!")
-        if metamod.meta_model_type.lower() not in ["pce", "apce"]:
-            raise AttributeError("This evaluation only support PCE-type models!")
-
-        # get the samples
-        y_pce_val = self.pce_out_mean
-        y_val = self.model_out_dict
-
-        fig = plt.figure()
-        # Fit the data(train the model)
-        for key in y_pce_val.keys():
-            regression_model = LinearRegression()
-            regression_model.fit(y_pce_val[key], y_val[key])
-
-            # Predict
-            x_new = np.linspace(np.min(y_pce_val[key]), np.max(y_val[key]), 100)
-            y_predicted = regression_model.predict(x_new[:, np.newaxis])
-
-            plt.scatter(y_pce_val[key], y_val[key], color="gold", linewidth=2)
-            plt.plot(x_new, y_predicted, color="k")
-
-            # Calculate the adjusted R_squared and RMSE
-            # the total number of explanatory variables in the model
-            # (not including the constant term)
-            # TODO: this should work without PCE-specific values
-            length_list = []
-            for key, value in metamod._coeffs_dict["b_1"][key].items():
-                length_list.append(len(value))
-            n_predictors = min(length_list)
-
-            n_samples = y_pce_val[key].shape[0]
-
-            r_2 = r2_score(y_pce_val[key], y_val[key])
-            adj_r2 = 1 - (1 - r_2) * (n_samples - 1) / (n_samples - n_predictors - 1)
-            rmse = mean_squared_error(y_pce_val[key], y_val[key], squared=False)
-
-            plt.annotate(
-                f"RMSE = {rmse:.3f}\n Adjusted $R^2$ = {adj_r2:.3f}",
-                xy=(0.05, 0.85),
-                xycoords="axes fraction",
-            )
-            plt.ylabel("Original Model")
-            plt.xlabel("PCE Model")
-            plt.grid()
-            plot_name = key.replace(" ", "_")
-            fig.savefig(
-                f"./{self.out_dir}/Model_vs_PCEModel_{plot_name}.{self.out_format}",
-                bbox_inches="tight",
-            )
-            plt.close()
-
-    # -------------------------------------------------------------------------
-    def _plot_validation_multi(self):
+    def _plot_validation_multi(self, out_mean, out_std):
         """
         Plots outputs for visual comparison of metamodel outputs with that of
         the (full) multioutput original model
@@ -1077,20 +1006,27 @@ class PostProcessing:
         # This function currently only supports PCE/aPCE
         if not hasattr(self.engine.MetaModel, "meta_model_type"):
             raise AttributeError("This evaluation only support PCE-type models!")
-        if self.engine.MetaModel.meta_model_type.lower() not in ["pce", "apce"]:
+        if self.engine.MetaModel.meta_model_type.lower() not in ["pce", "apce", "gpe"]:
             raise AttributeError("This evaluation only support PCE-type models!")
 
         # List of markers and colors
         color = cycle((["b", "g", "r", "y", "k"]))
         marker = cycle(("x", "d", "+", "o", "*"))
-
+        
+        if self.engine.MetaModel.meta_model_type.lower() == "gpe":
+            lower = "GPE"
+        elif self.engine.MetaModel.meta_model_type.lower() == "pce" or self.engine.MetaModel.meta_model_type.lower() == "apce":
+            lower = "PCE"
+        
         # Plot the model vs PCE model
         fig = plt.figure()
         for _, key in enumerate(self.engine.out_names):
-            y_pce_val = self.pce_out_mean[key]
-            y_pce_val_std = self.pce_out_std[key]
+            y_val = out_mean[key]
+            y_val_std = out_std[key]
             y_val = self.model_out_dict[key]
-
+            
+            print(f"out_dict: {self.model_out_dict}")
+            
             for idx in range(y_val.shape[0]):
                 plt.plot(
                     self.x_values,
@@ -1099,26 +1035,37 @@ class PostProcessing:
                     marker=next(marker),
                     label="$Y_{%s}^M$" % (idx + 1),
                 )
-
-                plt.plot(
-                    self.x_values,
-                    y_pce_val[idx],
-                    color=next(color),
-                    marker=next(marker),
-                    linestyle="--",
-                    label="$Y_{%s}^{PCE}$" % (idx + 1),
+                if self.engine.MetaModel.meta_model_type.lower() == "pce" \
+                    or self.engine.MetaModel.meta_model_type.lower() == "apce":
+                    plt.plot(
+                        self.x_values,
+                        y_val[idx],
+                        color=next(color),
+                        marker=next(marker),
+                        linestyle="--",
+                        label="$Y_{{{}}}^{{{}}}$".format(idx + 1, lower)
                 )
+                
+                elif self.engine.MetaModel.meta_model_type.lower() == "gpe":
+                    plt.plot(
+                        self.x_values,
+                        y_val[idx],
+                        color=next(color),
+                        marker=next(marker),
+                        linestyle="--",
+                        label="$Y_{{{}}}^{{{}}}$".format(idx + 1, lower)
+                    )
                 plt.fill_between(
                     self.x_values,
-                    y_pce_val[idx] - 1.96 * y_pce_val_std[idx],
-                    y_pce_val[idx] + 1.96 * y_pce_val_std[idx],
+                    y_val[idx] - 1.96 * y_val_std[idx],
+                    y_val[idx] + 1.96 * y_val_std[idx],
                     color=next(color),
                     alpha=0.15,
                 )
 
             # Calculate the RMSE
-            rmse = mean_squared_error(y_pce_val, y_val, squared=False)
-            r_2 = r2_score(y_pce_val[idx].reshape(-1, 1), y_val[idx].reshape(-1, 1))
+            rmse = root_mean_squared_error(y_val, y_val)
+            r_2 = r2_score(y_val[idx].reshape(-1, 1), y_val[idx].reshape(-1, 1))
 
             plt.annotate(
                 f"RMSE = {rmse:.3f}\n $R^2$ = {r_2:.3f}",
@@ -1131,7 +1078,6 @@ class PostProcessing:
             plt.grid()
             key = key.replace(" ", "_")
             fig.savefig(
-                f"./{self.out_dir}/Model_vs_PCEModel_{key}.{self.out_format}",
-                bbox_inches="tight",
+                f"./{self.out_dir}/Model_vs_{lower}Model_{key}.{self.out_format}", bbox_inches="tight"
             )
             plt.close()
diff --git a/src/bayesvalidrox/pylink/pylink.py b/src/bayesvalidrox/pylink/pylink.py
index be6f9c82a..56821a682 100644
--- a/src/bayesvalidrox/pylink/pylink.py
+++ b/src/bayesvalidrox/pylink/pylink.py
@@ -340,8 +340,14 @@ class PyLinkForwardModel(object):
                 break
             else:
                 # Run command
+                print(f"Running command: ./../{command}")
+
                 Process = os.system(f'./../{command}')
                 if Process != 0:
+                        
+                    print(f"Command failed with exit code: {Process}")
+
+
                     print('\nMessage 1:')
                     print(f'\tIf the value of \'{Process}\' is a non-zero value'
                           ', then compilation problems occur \n' % Process)          
diff --git a/src/bayesvalidrox/surrogate_models/engine.py b/src/bayesvalidrox/surrogate_models/engine.py
index 6695a1c43..7a1cdbe4c 100644
--- a/src/bayesvalidrox/surrogate_models/engine.py
+++ b/src/bayesvalidrox/surrogate_models/engine.py
@@ -21,6 +21,7 @@ from .polynomial_chaos import PCE
 from.surrogate_models import MetaModel as MM
 from bayesvalidrox.bayes_inference.bayes_inference import BayesInference
 from bayesvalidrox.bayes_inference.discrepancy import Discrepancy
+from bayesvalidrox.surrogate_models.supplementary import root_mean_squared_error
 
 
 class Engine:
@@ -1011,7 +1012,7 @@ class Engine:
         # TODO: check that the self-implemented version of RMSE without the additional settings
         #       returns the same results as the former mse - scikit-version!
         for key in self.out_names:
-            rms_error[key] = self._root_mean_squared_error(
+            rms_error[key] = root_mean_squared_error(
                 valid_model_runs[key], valid_PCE_runs[key]#,
                 #multioutput='raw_values', # TODO: how does this parameter translate?
                 #sample_weight=None, # No weights
@@ -1033,25 +1034,6 @@ class Engine:
 
         return rms_error, valid_error
 
-    def _root_mean_squared_error(self, reference, approximation): 
-        """
-        Implementation of RMSE
-
-        Parameters
-        ----------
-        reference : np.array
-            Reference values
-        approximation : np.array
-            Approximation values
-
-        Returns
-        -------
-        rmse : np.array
-            RMSE of approximation against the reference along the samples.
-        """
-        rmse = np.sqrt(np.mean(np.power(reference-approximation,2), axis = 0))
-        return rmse
-
     # -------------------------------------------------------------------------
     def _error_Mean_Std(self):
         """
@@ -1079,8 +1061,15 @@ class Engine:
         for output in self.out_names:
             # Compute the error between mean and std of MetaModel and OrigModel
             # TODO: write test that checks if mc_reference exists
-            rmse_mean += self._root_mean_squared_error(self.Model.mc_reference['mean'], means[output])
-            rmse_std += self._root_mean_squared_error(self.Model.mc_reference['std'], stds[output])
+            
+            if self.Model.mc_reference is None:
+                raise AttributeError(
+                    'Model.mc_reference needs to be given to calculate the error mean!')
+            
+            print(f"Reference mean: {self.Model.mc_reference}")
+            
+            rmse_mean += root_mean_squared_error(self.Model.mc_reference['mean'], means[output])
+            rmse_std += root_mean_squared_error(self.Model.mc_reference['std'], stds[output])
             
         return rmse_mean, rmse_std
 
diff --git a/src/bayesvalidrox/surrogate_models/supplementary.py b/src/bayesvalidrox/surrogate_models/supplementary.py
index fa1042f6d..bd2aa6ce6 100644
--- a/src/bayesvalidrox/surrogate_models/supplementary.py
+++ b/src/bayesvalidrox/surrogate_models/supplementary.py
@@ -315,3 +315,22 @@ def create_psi(basis_indices, univ_p_val):
             raise err
     return psi
 
+def root_mean_squared_error(reference, approximation): 
+    """
+    Implementation of RMSE
+
+    Parameters
+    ----------
+    reference : np.array
+        Reference values
+    approximation : np.array
+        Approximation values
+
+    Returns
+    -------
+    rmse : np.array
+        RMSE of approximation against the reference along the samples.
+    """
+    rmse = np.sqrt(np.mean(np.power(reference-approximation,2), axis = 0))
+    return rmse
+
-- 
GitLab