From a03917405522c78d2cd8542e8b8e9f0376e67c08 Mon Sep 17 00:00:00 2001
From: farid <farid.mohammadi@iws.uni-stuttgart.de>
Date: Fri, 23 Jul 2021 09:49:01 +0200
Subject: [PATCH] [BayesInference] removed unnecessary variables in object.

---
 .../BayesInference/BayesInference.py          | 78 +++++++------------
 1 file changed, 30 insertions(+), 48 deletions(-)

diff --git a/BayesValidRox/BayesInference/BayesInference.py b/BayesValidRox/BayesInference/BayesInference.py
index 66d27b924..26321d533 100644
--- a/BayesValidRox/BayesInference/BayesInference.py
+++ b/BayesValidRox/BayesInference/BayesInference.py
@@ -58,18 +58,11 @@ class BayesInference:
         self.MCMCmoves = None
         self.MCMCverbose = False
         self.ModelOutputs = []
-        self.meanPCEPriorPred = []
-        self.stdPCEPriorPred = []
-        self.PCEPosteriorPred = []
         
         self.perturbedData = []
         self.MeasuredData = None
         self.MeasurementError = []
         self.selectedIndices = None
-
-        self.ModelPriorPred = []
-        self.ModelPosteriorPred = []
-        self.PCEAllPred_df = [] 
         
         self.logLikelihoods = []
         self.logBME = []
@@ -81,8 +74,6 @@ class BayesInference:
         self.logTOMBME = []
         self.Posterior_df = {}
         self.PCEPriorPred = {}
-        self.PCEPosteriorPred = {}
-        self.PCEAllPred_df = {}
         
         self.PlotPostPred = True
         self.PlotMAPPred = False
@@ -194,7 +185,7 @@ class BayesInference:
                 stdPCE = np.concatenate(list(std.values()))
                 covMatrix += np.diag(stdPCE**3)
             else:
-                stdPCE = np.concatenate(list(self.stdPCEPriorPred.values()),1)
+                stdPCE = np.concatenate(list(self._stdPCEPriorPred.values()),1)
                 # Expected value of variance (Assump: i.i.d stds)
                 covMatrix += np.diag(np.mean(stdPCE**2, axis=0))
         
@@ -291,7 +282,7 @@ class BayesInference:
                 covMatrix_PCE = np.zeros((NofMeasurements, NofMeasurements), float)
                 stdPCE = np.empty((SampleSize,0))
                 for outputType in OutputType:
-                    stdPCE = np.hstack((stdPCE, self.stdPCEPriorPred[outputType]))
+                    stdPCE = np.hstack((stdPCE, self._stdPCEPriorPred[outputType]))
                 
                 # Expected value of variance
                 varPCE = np.mean(stdPCE**2, axis=0)
@@ -491,16 +482,14 @@ class BayesInference:
             Posterior_df = self.Posterior_df
         
         
-        # Prior predictive
+        # Posterior predictive
         if self.emulator:
-            PriorPred = self.meanPCEPriorPred
+            PriorPred = self.__meanPCEPriorPred
             PosteriorPred, _ = PCEModel.eval_metamodel(samples=Posterior_df.to_numpy(),name=self.Name)
         else:
-            PriorPred = self.ModelPriorPred
+            PriorPred = self.__ModelPriorPred
             PosteriorPred = self.eval_Model(Samples=Posterior_df.to_numpy(),key='PostPred')
         
-        self.PCEAllPred_df = PosteriorPred
-        
         # ----- Prior Predictive -----
         if self.SamplingMethod == 'rejection':
             # Create hdf5 metadata
@@ -589,8 +578,8 @@ class BayesInference:
         nMeasurement, nOutputs = ObservationData.shape
         ntotalMeasurement = ObservationData[~np.isnan(ObservationData)].shape[0]
         BootstrapItrNr = self.BootstrapItrNr if not self.BayesLOOCV else ntotalMeasurement
-        Data = np.zeros((BootstrapItrNr,ntotalMeasurement))
-        Data[0] = ObservationData.T[~np.isnan(ObservationData.T)]
+        perturbedData = np.zeros((BootstrapItrNr,ntotalMeasurement))
+        perturbedData[0] = ObservationData.T[~np.isnan(ObservationData.T)]
         
         if len(self.MeasurementError) == 0:
             try:
@@ -646,10 +635,8 @@ class BayesInference:
         
         # ---------------- Bootstrap & TOM --------------------
         if self.Bootstrap or self.BayesLOOCV:
-            if len(self.perturbedData) != 0:
-                Data = self.perturbedData
-            else:
-                # TODO: zero mean noise Adding some noise to the observation function
+            if len(self.perturbedData) == 0:
+                # zero mean noise Adding some noise to the observation function
                 BootstrapNoise = self.BootstrapNoise
                 for itrIdx in range(1,BootstrapItrNr):
                     data = np.zeros((nMeasurement, nOutputs))
@@ -663,8 +650,9 @@ class BayesInference:
                             data[:,idx] = np.add(ObservationData[:,idx] , np.random.normal(0, 1, ObservationData.shape[0]) * noise)
                             
                     
-                    Data[itrIdx] = data.T[~np.isnan(data.T)]
-            
+                    perturbedData[itrIdx] = data.T[~np.isnan(data.T)]
+                self.perturbedData = perturbedData
+                
             # -----------------------------------------------------
             # ----- Loop over the perturbed observation data ------
             # -----------------------------------------------------
@@ -678,38 +666,31 @@ class BayesInference:
             
             # Evaluate the PCEModel
             if self.emulator:
-                self.meanPCEPriorPred, self.stdPCEPriorPred = PCEModel.eval_metamodel(samples=self.Samples,name=self.Name)
+                self.__meanPCEPriorPred, self.__stdPCEPriorPred = PCEModel.eval_metamodel(samples=self.Samples,name=self.Name)
                 # Surrogate model's error using RMSE of test data
                 surrError = PCEModel.RMSE if hasattr(PCEModel,'RMSE') else None
             else:
-                self.ModelPriorPred = self.eval_Model(Samples=self.Samples,key='PriorPred')
+                self.__ModelPriorPred = self.eval_Model(Samples=self.Samples,key='PriorPred')
             
-            for itrIdx, data in tqdm(enumerate(Data), ascii=True, desc ="Boostraping the BME calculations"):
+            for itrIdx, data in tqdm(enumerate(self.perturbedData), ascii=True, desc ="Boostraping the BME calculations"):
                 
                 # ---------------- Likelihood calculation ---------------- 
-                if self.emulator:
+                modelEvaluations = self.__meanPCEPriorPred if self.emulator else self.__ModelPriorPred
                     
-                    # Leave one out
-                    if self.BayesLOOCV:
-                        self.selectedIndices = np.delete(list(range(len(Data))),itrIdx)
+                # Leave one out
+                if self.BayesLOOCV:
+                    self.selectedIndices = np.delete(list(range(len(self.perturbedData))),itrIdx)
                     
-                    # unknown sigma2
-                    if optSigma == 'C':
-                        logLikelihoods[:,itrIdx] = self.normpdfSigma2(self.meanPCEPriorPred, data, TotalSigma2)
-                    else:
-                        # known sigma2
-                        logLikelihoods[:,itrIdx] = self.normpdf(self.meanPCEPriorPred, data, TotalSigma2, std=surrError)
+                # unknown sigma2
+                if optSigma == 'C':
+                    logLikelihoods[:,itrIdx] = self.normpdfSigma2(modelEvaluations, data, TotalSigma2)
+                else:
+                    # known sigma2
+                    logLikelihoods[:,itrIdx] = self.normpdf(modelEvaluations, data, TotalSigma2, std=surrError)
                     
                     # # TODO: Model error
                     # y,std = PCEModel.eval_errormodel(self.Samples)
                     # logLikError = self.normpdferror(y, std)
-                else:
-                    # unknown sigma2
-                    if optSigma == 'C':
-                        logLikelihoods[:,itrIdx] = self.normpdfSigma2(self.ModelPriorPred, data, TotalSigma2)
-                    else:
-                        # known sigma2
-                        logLikelihoods[:,itrIdx] = self.normpdf(self.ModelPriorPred, data, TotalSigma2, std=surrError)
                 
                 # ---------------- BME Calculations ----------------
                 # BME (log)
@@ -770,7 +751,7 @@ class BayesInference:
             multiprocessing = False if self.MultiProcessMCMC is None else self.MultiProcessMCMC
             MCMC_ = MCMC(self, initsamples=initsamples, nwalkers=nwalkers, verbose=self.MCMCverbose,
                          nsteps = nsteps, moves=self.MCMCmoves, multiprocessing=multiprocessing)
-            self.Posterior_df = MCMC_.run_sampler(Data[0], TotalSigma2)
+            self.Posterior_df = MCMC_.run_sampler(self.perturbedData[0], TotalSigma2)
         
         elif self.Name.lower() == 'valid':
             self.Posterior_df = pd.DataFrame(self.Samples, columns=parNames)
@@ -924,18 +905,19 @@ class BayesInference:
         if self.Bootstrap:
             # # Computing the TOM performance
             # x_values = np.linspace(np.min(self.logBME), np.max(self.logBME), 1000)
-            # dof = ObservationData.shape[0] 
+            dof = ObservationData.shape[0] 
             # self.logTOMBME = stats.chi2(dof).logpdf(-1*x_values)#pdf(x_values)
+            self.logTOMBME = stats.chi2.rvs(dof, size=self.logBME.shape[0])
             
             fig, ax = plt.subplots()
-            # sns.kdeplot(self.logTOMBME, ax=ax, color="green", shade=True)
+            sns.kdeplot(self.logTOMBME, ax=ax, color="green", shade=True)
             sns.kdeplot(self.logBME, ax=ax, color="blue", shade=True,label='Model BME')
             
             ax.set_xlabel('log$_{10}$(BME)')
             ax.set_ylabel('Probability density')
             
             from matplotlib.patches import Patch
-            legend_elements = [#Patch(facecolor='green', edgecolor='green',label='TOM BME'),
+            legend_elements = [Patch(facecolor='green', edgecolor='green',label='TOM BME'),
                                 Patch(facecolor='blue', edgecolor='blue',label='Model BME')]
             ax.legend(handles=legend_elements)
         
-- 
GitLab