From 7397fe305fb477e7918ebdcacc1e30b07b521422 Mon Sep 17 00:00:00 2001 From: faridm69 <faridmohammadi69@gmail.com> Date: Fri, 17 Jul 2020 10:44:59 +0200 Subject: [PATCH] [bayesinference][MCMC] added nsteps variable. --- .../BayesInference/BayesInference.py | 5 +- .../PostProcessing/PostProcessing.py | 35 ++++-- .../AnalytFuncValid_Test.py | 103 ++++++++++-------- .../Test_AnalyticalFunction.py | 12 +- 4 files changed, 96 insertions(+), 59 deletions(-) diff --git a/BayesValidRox/BayesInference/BayesInference.py b/BayesValidRox/BayesInference/BayesInference.py index dcf817bd3..a19e42627 100644 --- a/BayesValidRox/BayesInference/BayesInference.py +++ b/BayesValidRox/BayesInference/BayesInference.py @@ -44,6 +44,7 @@ class BayesInference: self.NrofSamples = None self.Samples = None self.Samplesu = None + self.MCMCnSteps = None self.ModelOutputs = [] self.meanPCEPriorPred = [] self.stdPCEPriorPred = [] @@ -678,7 +679,7 @@ class BayesInference: else: # TODO: MCMC initsamples = None if self.Samples is None else self.Samples - nsteps = 1000 if self.NrofSamples is None else self.NrofSamples + nsteps = 1000 if self.MCMCnSteps is None else self.MCMCnSteps multiprocessing = True if self.MultiProcessMCMC is None else self.MultiProcessMCMC MCMC_ = MCMC(self, initsamples=initsamples, nwalkers=2*self.PCEModel.NofPa, nsteps = nsteps, multiprocessing=multiprocessing) @@ -746,7 +747,7 @@ class BayesInference: bbox_inches='tight') # -------- Plot logBME dist -------- - if self.BootstrapItrNr != 1: + if self.Bootstrap: # Computing the TOM performance x_values = np.linspace(np.min(self.BME), np.max(self.BME), 1000) dof = ObservationData.shape[0] diff --git a/BayesValidRox/PostProcessing/PostProcessing.py b/BayesValidRox/PostProcessing/PostProcessing.py index 42bf97780..613fea138 100644 --- a/BayesValidRox/PostProcessing/PostProcessing.py +++ b/BayesValidRox/PostProcessing/PostProcessing.py @@ -44,7 +44,7 @@ class PostProcessing: self.sobol_cell = {} self.total_sobol = {} self.Likelihoods = [] - + self.Name = 'calib' #-------------------------------------------------------------------------------------------------------- def PCEMoments(self, aPCE): @@ -182,14 +182,33 @@ class PostProcessing: PCEOutputs_std[:, idx] = y_std idx += 1 - if PCEModel.DimRedMethod.lower() == 'pca': - PCA = PCEModel.pca[Outkey] - self.PCEOutputs[Outkey] = PCA.mean_ + np.dot(PCEOutputs_mean,PCA.components_) - self.PCEOutputs_std[Outkey] = np.sqrt(np.dot(PCEOutputs_std**2, PCA.components_**2)) + if PCEModel.index is None: + if PCEModel.DimRedMethod.lower() == 'pca': + PCA = PCEModel.pca[Outkey] + self.PCEOutputs[Outkey] = PCA.mean_ + np.dot(PCEOutputs_mean,PCA.components_) + self.PCEOutputs_std[Outkey] = np.sqrt(np.dot(PCEOutputs_std**2, PCA.components_**2)) + else: + self.PCEOutputs[Outkey] = PCEOutputs_mean + self.PCEOutputs_std[Outkey] = PCEOutputs_std else: - self.PCEOutputs[Outkey] = PCEOutputs_mean - self.PCEOutputs_std[Outkey] = PCEOutputs_std - + index = PCEModel.index + if self.Name.lower() == 'calib': + if PCEModel.DimRedMethod.lower() == 'pca': + PCA = PCEModel.pca[Outkey] + self.PCEOutputs[Outkey] = PCA.mean_[:index] + np.dot(PCEOutputs_mean,PCA.components_)[:,:index] + self.PCEOutputs_std[Outkey] = np.sqrt(np.dot(PCEOutputs_std**2, PCA.components_**2))[:,:index] + else: + self.PCEOutputs[Outkey] = PCEOutputs_mean[:,:index] + self.PCEOutputs_std[Outkey] = PCEOutputs_std[:,:index] + else: + if PCEModel.DimRedMethod.lower() == 'pca': + PCA = PCEModel.pca[Outkey] + self.PCEOutputs[Outkey] = PCA.mean_[index:] + np.dot(PCEOutputs_mean,PCA.components_)[:,index:] + self.PCEOutputs_std[Outkey] = np.sqrt(np.dot(PCEOutputs_std**2, PCA.components_**2))[:,index:] + else: + self.PCEOutputs[Outkey] = PCEOutputs_mean[:,index:] + self.PCEOutputs_std[Outkey] = PCEOutputs_std[:,index:] + return self.PCEOutputs, self.PCEOutputs_std #-------------------------------------------------------------------------------------------------------- diff --git a/BayesValidRox/tests/AnalyticalFunction/AnalytFuncValid_Test.py b/BayesValidRox/tests/AnalyticalFunction/AnalytFuncValid_Test.py index c47ea65c3..22b758590 100644 --- a/BayesValidRox/tests/AnalyticalFunction/AnalytFuncValid_Test.py +++ b/BayesValidRox/tests/AnalyticalFunction/AnalytFuncValid_Test.py @@ -37,7 +37,7 @@ if __name__ == "__main__": #===================================================== #============= COMPUTATIONAL MODEL ================ #===================================================== - Model = FuncForwardModel('AnalyticalFunction') + Model = FuncForwardModel() Model.Type = 'Function' Model.pyFile = 'AnalyticalFunction' @@ -47,8 +47,8 @@ if __name__ == "__main__": # For Bayesian inversion synthetic data with X=[0,0] - Model.Observations['Time [s]'] = np.arange(0, 10, 1.) / 5 - Model.Observations['Z'] = np.array([[2]*5]) + Model.Observations['Time [s]'] = np.arange(0, 5, 1.) / 5 + Model.Observations['Z'] = np.repeat([2],5) # For Checking with the MonteCarlo refrence # Model.MCReference['Time [s]'] = np.arange(0, 10, 1.) / 9 @@ -60,9 +60,9 @@ if __name__ == "__main__": # 171.17886925, 171.17660944, 171.17481586]) # For Bayesian inversion synthetic data with X=[1,1] - Model.ObservationsValid['Time [s]'] = np.arange(0, 10, 1.) / 5 - Model.ObservationsValid['Z']= np.array([[2.21773563, 2.11712764, - 2.02460905, 1.93849485, 1.85761462]]).reshape(-1,1) + Model.ObservationsValid['Time [s]'] = np.arange(0, 5, 1.) / 5 + Model.ObservationsValid['Z']= np.array([2.21773563, 2.11712764, + 2.02460905, 1.93849485, 1.85761462]) #===================================================== #========= PROBABILISTIC INPUT MODEL ============== @@ -71,16 +71,12 @@ if __name__ == "__main__": # standard deviation Inputs = Input() - Inputs.addMarginals() - Inputs.Marginals[0].Name = 'x1' - Inputs.Marginals[0].DistType = 'unif' - Inputs.Marginals[0].Parameters = [-5, 5] - - Inputs.addMarginals() - Inputs.Marginals[1].Name = 'x2' - Inputs.Marginals[1].DistType = 'unif' - Inputs.Marginals[1].Parameters = [-5, 5] - + ndim = 2 + for i in range(ndim): + Inputs.addMarginals() + Inputs.Marginals[i].Name = '$X_{%s}$'%(i+1) + Inputs.Marginals[i].DistType = 'unif' + Inputs.Marginals[i].Parameters = [-5, 5] #===================================================== #====== POLYNOMIAL CHAOS EXPANSION METAMODELS ====== @@ -91,7 +87,7 @@ if __name__ == "__main__": # The degree with the lowest Leave-One-Out cross-validation (LOO) # error (or the highest score=1-LOO)estimator is chosen as the final # metamodel. - #MetaModelOpts.MinPceDegree = 1 #3 + MetaModelOpts.MinPceDegree = 1 #3 MetaModelOpts.MaxPceDegree = 8 #15 # q-quasi-norm 0<q<1 (default=1) @@ -99,27 +95,37 @@ if __name__ == "__main__": # Select the sparse least-square minimization method for # the PCE coefficients calculation: - # 1)AaPCE: Adaptive aPCE 2)BRR: Bayesian Ridge Regression - # 3)LARS: Least angle regression 4)ARD: Bayesian ARD Regression (Non-degree Adaptive) - - MetaModelOpts.RegMethod = 'BRR' + # 1)OLS: Ordinary Least Square 2)BRR: Bayesian Ridge Regression + # 3)LARS: Least angle regression 4)ARD: Bayesian ARD Regression + # 5)FastARD: Fast Bayesian ARD Regression + # 6)SGDR: Stochastic gradient descent learning + # MetaModelOpts.metaModel = 'PCEKriging' + MetaModelOpts.RegMethod = 'FastARD' + MetaModelOpts.DimRedMethod = 'PCA' # Print summary of the regression results #MetaModelOpts.DisplayFlag = True + # ------ Experimental Design -------- # Generate an experimental design of size NrExpDesign based on a latin # hypercube sampling of the input model or user-defined values of X and/or Y: MetaModelOpts.addExpDesign() - NrofInitSamples = 25 + # One-shot (normal) or Sequential Adaptive (sequential) Design + MetaModelOpts.ExpDesign.Method = 'normal' + NrofInitSamples = 200 #75 MetaModelOpts.ExpDesign.NrSamples = NrofInitSamples - MetaModelOpts.ExpDesign.SamplingMethod = 'MC' # 1)MC 2)LHS 3)PCM 4)LSCM 5)user - MetaModelOpts.ExpDesign.Method = 'normal' # 1) normal 2) sequential + + # Sampling methods + # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley 6) chebyshev(FT) + # 7) korobov 8) grid(FT) 9) nested_grid(FT) 10)user + MetaModelOpts.ExpDesign.SamplingMethod = 'random' # Sequential experimental design (needed only for sequential ExpDesign) - MetaModelOpts.ExpDesign.MaxNSamples = 50 - MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-15 + MetaModelOpts.ExpDesign.NrofNewSample = 1 + MetaModelOpts.ExpDesign.MaxNSamples = 50 #150 + MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-16 # Defining the measurement error, if it's known a priori obsData = pd.DataFrame(Model.Observations, columns=Model.Output.Names) @@ -171,20 +177,22 @@ if __name__ == "__main__": # MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt' #['D-Opt', 'A-Opt', 'K-Opt'] # >>>>>>>>>>>>>>>>>>>>>> Build Surrogate <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - MetaModelOpts.slicingforValidation = True + # MetaModelOpts.slicingforValidation = True MetaModelOpts.index = 5 # Adaptive sparse arbitrary polynomial chaos expansion - PCEModelCalib, PCEModelValid = MetaModelOpts.create_PCE(Model) - + # PCEModelCalib, PCEModelValid = MetaModelOpts.create_PCE(Model) + PCEModel = MetaModelOpts.create_PCE(Model) #===================================================== #========= POST PROCESSING OF METAMODELS =========== #===================================================== - PostPCE = PostProcessing(PCEModelCalib) + #PostPCE = PostProcessing(PCEModelCalib) + PostPCE = PostProcessing(PCEModel) + PostPCE.Name = 'Calib' PostPCE.NrofSamples = 3 PostPCE.plotFlag = True - t = np.arange(0, 5, 1.) / 9 + t = np.arange(0, 10, 1.) / 5 PostPCE.ValidMetamodel(x_values=t, x_axis="Time [s]") # # Comparison with Monte-Carlo reference @@ -203,7 +211,8 @@ if __name__ == "__main__": SaveFig=True) # Plot the sobol indices - sobol_cell, total_sobol = PostPCE.SobolIndicesPCE('Z', SaveFig=True) + sobol_cell, total_sobol = PostPCE.SobolIndicesPCE(x_values=t, x_axis="Time [s]", + SaveFig=True) # Compute and print RMSE error PostPCE.relErrorPCEModel(nSamples=3000) @@ -212,15 +221,20 @@ if __name__ == "__main__": #===================================================== #======= Bayesian calibration with Emulator ========= #===================================================== - BayesOpts = BayesInference(PCEModelCalib) + # BayesOpts = BayesInference(PCEModelCalib) + BayesOpts = BayesInference(PCEModel) BayesOpts.Name = 'Calib' BayesOpts.emulator = True + # MCMC solver + BayesOpts.SamplingMethod = 'MCMC' + BayesOpts.MCMCnSteps = 1000 + # Evaluation of surrogate model predictions BayesOpts.NrofSamples = 100000 # Bootstrap for BME calulations - BayesOpts.Bootstrap = True + BayesOpts.Bootstrap = False BayesOpts.BootstrapItrNr = 10 BayesOpts.BootstrapNoise = 0.05 @@ -233,7 +247,7 @@ if __name__ == "__main__": # (Option B) DiscrepancyOpts = Discrepancy('') DiscrepancyOpts.Type = 'Gaussian' - DiscrepancyOpts.Parameters = obsData**2 + DiscrepancyOpts.Parameters = obsData[:MetaModelOpts.index]**2 BayesOpts.Discrepancy = DiscrepancyOpts @@ -259,7 +273,7 @@ if __name__ == "__main__": ## Plot the posterior (Model) with sns.axes_style("white"): - g = sns.jointplot(x=Bayes_Calib.Posterior_df['x1'], y=Bayes_Calib.Posterior_df['x2'], + g = sns.jointplot(x=Bayes_Calib.Posterior_df['$X_{1}$'], y=Bayes_Calib.Posterior_df['$X_{2}$'], kind="kde", cmap='jet'); g.ax_joint.collections[0].set_alpha(0) g.set_axis_labels("$X_1$", "$X_2$") @@ -271,15 +285,18 @@ if __name__ == "__main__": #===================================================== #======= Bayesian validation with Emulator ========= #===================================================== - BayesOpts_Valid = BayesInference(PCEModelValid) + # BayesOpts_Valid = BayesInference(PCEModelValid) + BayesOpts_Valid = BayesInference(PCEModel) + BayesOpts_Valid.Name = 'Valid' + BayesOpts_Valid.SamplingMethod = 'MCMC' + BayesOpts_Valid.MCMCnSteps = 2000 BayesOpts_Valid.Samples = Bayes_Calib.Posterior_df - BayesOpts_Valid.emulator = True # Bootstrap for BME calulations - BayesOpts_Valid.Bootstrap = True - BayesOpts_Valid.BootstrapItrNr = 20 + BayesOpts_Valid.Bootstrap = False + BayesOpts_Valid.BootstrapItrNr = 20 BayesOpts_Valid.BootstrapNoise = 0.05 BayesOpts_Valid.PlotPostDist = True @@ -300,7 +317,7 @@ if __name__ == "__main__": ## Plot the posterior (PCEModel) with sns.axes_style("white"): - g = sns.jointplot(x=Bayes_Valid.Posterior_df['x1'], y=Bayes_Valid.Posterior_df['x2'], + g = sns.jointplot(x=Bayes_Valid.Posterior_df['$X_{1}$'], y=Bayes_Valid.Posterior_df['$X_{2}$'], kind="kde", cmap='jet'); g.ax_joint.collections[0].set_alpha(0) g.set_axis_labels("$X_1$", "$X_2$") @@ -312,9 +329,9 @@ if __name__ == "__main__": #============== Save class objects ================= #===================================================== with open('AnalyticFunc_Results.pkl', 'wb') as output: - pickle.dump(PCEModelCalib, output, pickle.HIGHEST_PROTOCOL) + pickle.dump(PCEModel, output, pickle.HIGHEST_PROTOCOL) - pickle.dump(PCEModelValid, output, pickle.HIGHEST_PROTOCOL) + # pickle.dump(PCEModelValid, output, pickle.HIGHEST_PROTOCOL) pickle.dump(PostPCE, output, pickle.HIGHEST_PROTOCOL) diff --git a/BayesValidRox/tests/AnalyticalFunction/Test_AnalyticalFunction.py b/BayesValidRox/tests/AnalyticalFunction/Test_AnalyticalFunction.py index ac097c783..b0ee08ee2 100755 --- a/BayesValidRox/tests/AnalyticalFunction/Test_AnalyticalFunction.py +++ b/BayesValidRox/tests/AnalyticalFunction/Test_AnalyticalFunction.py @@ -39,7 +39,7 @@ from BayesInference.BayesInference import BayesInference, Discrepancy if __name__ == "__main__": # Number of parameters - ndim = 10 # 2, 10 + ndim = 2 # 2, 10 #===================================================== #============= COMPUTATIONAL MODEL ================ @@ -118,13 +118,13 @@ if __name__ == "__main__": # One-shot (normal) or Sequential Adaptive (sequential) Design MetaModelOpts.ExpDesign.Method = 'sequential' - NrofInitSamples = 10 #75 + NrofInitSamples = 20 #75 MetaModelOpts.ExpDesign.NrSamples = NrofInitSamples # Sampling methods # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley 6) chebyshev(FT) # 7) korobov 8) grid(FT) 9) nested_grid(FT) 10)user - MetaModelOpts.ExpDesign.SamplingMethod = 'latin_hypercube' + MetaModelOpts.ExpDesign.SamplingMethod = 'random' # Sequential experimental design (needed only for sequential ExpDesign) MetaModelOpts.ExpDesign.NrofNewSample = 1 @@ -164,16 +164,16 @@ if __name__ == "__main__": # -------- Exploitation ------ # 1)'BayesOptDesign' 2)'BayesActDesign' 3)'VarOptDesign' 4)'alphabetic' # 5)'Space-filling' - MetaModelOpts.ExpDesign.ExploitMethod = 'BayesActDesign' + MetaModelOpts.ExpDesign.ExploitMethod = 'VarOptDesign' # BayesOptDesign -> when data is available # 1)DKL (Kullback-Leibler Divergence) 2)DPP (D-Posterior-percision) # 3)APP (A-Posterior-percision) # ['DKL', 'BME', 'infEntropy'] - MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' + # MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' # VarBasedOptDesign -> when data is not available # Only with Vornoi >>> 1)Entropy 2)EIGF, 3)LOOCV - # MetaModelOpts.ExpDesign.UtilityFunction = 'Entropy'#['EIGF', 'Entropy', 'LOOCV'] + MetaModelOpts.ExpDesign.UtilityFunction = 'Entropy'#['EIGF', 'Entropy', 'LOOCV'] # alphabetic # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality) -- GitLab