diff --git a/.coverage b/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..6ca70d8588916209ef155569ae86387872500843 Binary files /dev/null and b/.coverage differ diff --git a/examples/OHagan-function/test_OHagan.py b/examples/OHagan-function/example_OHagan.py similarity index 100% rename from examples/OHagan-function/test_OHagan.py rename to examples/OHagan-function/example_OHagan.py diff --git a/examples/analytical-function/test_analytical_function.py b/examples/analytical-function/example_analytical_function.py old mode 100755 new mode 100644 similarity index 100% rename from examples/analytical-function/test_analytical_function.py rename to examples/analytical-function/example_analytical_function.py diff --git a/examples/analytical-function/util/AnalytFuncValid_Test.py b/examples/analytical-function/util/AnalytFuncValid_Test.py deleted file mode 100644 index b172de0baf73399ad8a1180ab9e8cfe20b4fb7b8..0000000000000000000000000000000000000000 --- a/examples/analytical-function/util/AnalytFuncValid_Test.py +++ /dev/null @@ -1,353 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Aug 9 16:25:43 2019 -This example is for the calibration and validation scenario. -@author: farid - """ - -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -plt.style.use('seaborn-white') -plt.rcParams.update({'font.size': 18}) -plt.rc('font', family='sans-serif', serif='Arial') -plt.rc('figure', figsize = (24, 16)) -plt.rc('text', usetex=True) -plt.rc('xtick', labelsize=18) -plt.rc('ytick', labelsize=18) -plt.rc('axes', labelsize=18) - -import seaborn as sns -import os - -try: - import cPickle as pickle -except ModuleNotFoundError: - import pickle - -from PyLink.FuncForwardModel import FuncForwardModel -from surrogate_models.Input import Input -from surrogate_models.surrogate_models import Metamodel -from PostProcessing.PostProcessing import PostProcessing -from BayesInference.BayesInference import BayesInference, Discrepancy - -if __name__ == "__main__": - - #===================================================== - #============= COMPUTATIONAL MODEL ================ - #===================================================== - Model = FuncForwardModel() - - Model.Type = 'Function' - Model.pyFile = 'AnalyticalFunction' - Model.Name = 'AnalyticFunc' - - Model.Output.Names = ['Z'] - - - # For Bayesian inversion synthetic data with X=[0,0] - Model.Observations['Time [s]'] = np.arange(0, 5, 1.) / 5 - Model.Observations['Z'] = np.repeat([2],5) - - # For Checking with the MonteCarlo refrence -# Model.MCReference['Time [s]'] = np.arange(0, 10, 1.) / 9 -# Model.MCReference['mean'] = np.array([127.13880108, 127.12652988, 127.12144698, -# 127.11754674, 127.11425868, 127.11136184, 127.1087429, -# 127.10633454, 127.10409289, 127.10198748]) -# Model.MCReference['std'] = np.array([171.2520775, 171.21550753, 171.20352691, -# 171.19559223, 171.18975202, 171.18525035, 171.18169953, -# 171.17886925, 171.17660944, 171.17481586]) - - # For Bayesian inversion synthetic data with X=[1,1] - Model.ObservationsValid['Time [s]'] = np.arange(0, 5, 1.) / 5 - Model.ObservationsValid['Z']= np.array([2.21773563, 2.11712764, - 2.02460905, 1.93849485, 1.85761462]) - - #===================================================== - #========= PROBABILISTIC INPUT MODEL ============== - #===================================================== - # Define the uncertain parameters with their mean and - # standard deviation - Inputs = Input() - - ndim = 2 - for i in range(ndim): - Inputs.add_marginals() - Inputs.Marginals[i].Name = '$X_{%s}$'%(i+1) - Inputs.Marginals[i].DistType = 'unif' - Inputs.Marginals[i].Parameters = [-5, 5] - - #===================================================== - #========== DEFINITION OF THE METAMODEL ============ - #===================================================== - MetaModelOpts = Metamodel(Inputs) - - # Select if you want to preserve the spatial/temporal depencencies - MetaModelOpts.DimRedMethod = 'PCA' - - # Select your metamodel method - # 1) PCE (Polynomial Chaos Expansion) 2) GPE (Gaussian Process Emulator) - # 3) PCEKriging (PCE + GPE) - MetaModelOpts.metaModel = 'GPE' - - # ------------------------------------------------ - # ------------- PCE Specification ---------------- - # ------------------------------------------------ - # Select the sparse least-square minimization method for - # the PCE coefficients calculation: - # 1)OLS: Ordinary Least Square 2)BRR: Bayesian Ridge Regression - # 3)LARS: Least angle regression 4)ARD: Bayesian ARD Regression - # 5)FastARD: Fast Bayesian ARD Regression - # 6)SGDR: Stochastic gradient descent learning - MetaModelOpts.RegMethod = 'FastARD' - - # Specify the max degree to be compared by the adaptive algorithm: - # The degree with the lowest Leave-One-Out cross-validation (LOO) - # error (or the highest score=1-LOO)estimator is chosen as the final - # metamodel. - MetaModelOpts.MinPceDegree = 1 #3 - MetaModelOpts.MaxPceDegree = 8 #15 - - # q-quasi-norm 0<q<1 (default=1) - MetaModelOpts.q = 1 #0.75 - - # Print summary of the regression results - #MetaModelOpts.DisplayFlag = True - - # ------------------------------------------------ - # ------ Experimental Design Configuration ------- - # ------------------------------------------------ - - # Generate an experimental design of size NrExpDesign based on a latin - # hypercube sampling of the input model or user-defined values of X and/or Y: - MetaModelOpts.addExpDesign() - - # One-shot (normal) or Sequential Adaptive (sequential) Design - MetaModelOpts.ExpDesign.Method = 'normal' - NrofInitSamples = 200 #75 - MetaModelOpts.ExpDesign.NrSamples = NrofInitSamples - - # Sampling methods - # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley 6) chebyshev(FT) - # 7) korobov 8) grid(FT) 9) nested_grid(FT) 10)user - MetaModelOpts.ExpDesign.SamplingMethod = 'random' - - # Sequential experimental design (needed only for sequential ExpDesign) - MetaModelOpts.ExpDesign.NrofNewSample = 1 - MetaModelOpts.ExpDesign.MaxNSamples = 50 #150 - MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-16 - - # Defining the measurement error, if it's known a priori - obsData = pd.DataFrame(Model.Observations, columns=Model.Output.Names) - DiscrepancyOpts = Discrepancy('') - DiscrepancyOpts.Type = 'Gaussian' - DiscrepancyOpts.Parameters = obsData ** 2 - MetaModelOpts.Discrepancy = DiscrepancyOpts - - # Plot the posterior snapshots for SeqDesign - MetaModelOpts.ExpDesign.PostSnapshot = False - MetaModelOpts.ExpDesign.stepSnapshot = 1 - MetaModelOpts.ExpDesign.MAP = (0,0) - MetaModelOpts.ExpDesign.parNames = ['$X_1$', '$X_2$'] - - # ------------------------------------------------ - # ------- Sequential Design configuration -------- - # ------------------------------------------------ - # 1) 'None' 2) 'epsilon-decreasing' - MetaModelOpts.ExpDesign.TradeOffScheme = 'epsilon-decreasing' - #MetaModelOpts.ExpDesign.nReprications = 2 - # -------- Exploration ------ - #1)'Voronoi' 2)'MC' 3)'LHS' 4)'dual annealing' - MetaModelOpts.ExpDesign.ExploreMethod = 'Voronoi' - - # Use when 'dual annealing' chosen - MetaModelOpts.ExpDesign.MaxFunItr = 200 - - # Use when 'MC' or 'LHS' chosen - MetaModelOpts.ExpDesign.NCandidate = 100 - MetaModelOpts.ExpDesign.NrofCandGroups = 4 - - # -------- Exploitation ------ - # 1)'BayesOptDesign' 2)'ActiveLearning' 3)'alphabetic' 4)'None' (Space-filling) - MetaModelOpts.ExpDesign.ExploitMethod = 'None' - - # BayesOptDesign -> when data is available - # 1)DKL (Kullback-Leibler Divergence) 2)DPP (D-Posterior-percision) - # 3)APP (A-Posterior-percision) - MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' #['DKL', 'DPP'] - - # ActiveLearning - # 1)ALM (Active learning MacKay) 2)ALC (Active learning Cohn) - # 3) - #MetaModelOpts.ExpDesign.UtilityFunction = 'ALM' #['ALM', 'ALC'] - - # alphabetic - # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality) - # 3)K-Opt (K-Optimality) - # MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt' #['D-Opt', 'A-Opt', 'K-Opt'] - - # >>>>>>>>>>>>>>>>>>>>>> Build Surrogate <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - # MetaModelOpts.slicingforValidation = True - MetaModelOpts.index = 5 - # Adaptive sparse arbitrary polynomial chaos expansion - # PCEModelCalib, PCEModelValid = MetaModelOpts.create_metamodel(Model) - PCEModel = MetaModelOpts.create_metamodel(Model) - - #===================================================== - #========= POST PROCESSING OF METAMODELS =========== - #===================================================== - #PostPCE = PostProcessing(PCEModelCalib) - PostPCE = PostProcessing(PCEModel) - - PostPCE.Name = 'Calib' - PostPCE.validMetamodel(nValidSamples= 3) - - # Compute the moments and compare with the Monte-Carlo reference -# PostPCE.plotMoments() - - if MetaModelOpts.metaModel != 'GPE': - # Plot the evolution of the KLD,BME, and Modified LOOCV error - if MetaModelOpts.ExpDesign.Method == 'sequential': - PostPCE.seqDesignDiagnosticPlots() - - # Plot the sobol indices - sobol_cell, total_sobol = PostPCE.sobolIndicesPCE() - - # Compute and print RMSE error - PostPCE.accuracyCheckMetaModel(nSamples=3000) - - - #===================================================== - #======= Bayesian calibration with Emulator ========= - #===================================================== - # BayesOpts = BayesInference(PCEModelCalib) - BayesOpts = BayesInference(PCEModel) - BayesOpts.Name = 'Calib' - BayesOpts.emulator = True - - # BME Bootstrap - BayesOpts.Bootstrap = True - BayesOpts.NrofSamples = 100000 - BayesOpts.BootstrapItrNr = 10 - BayesOpts.BootstrapNoise = 0.05 - - # Select the inference method - BayesOpts.SamplingMethod = 'MCMC' - BayesOpts.MCMCnSteps = 1000 - BayesOpts.MCMCnwalkers = 50 - BayesOpts.MultiProcessMCMC = True - - BayesOpts.PlotPostDist = True - BayesOpts.PlotPostPred = True - - # ----- Define the discrepancy model ------- - obsData = pd.DataFrame(Model.Observations, columns=Model.Output.Names) - # (Option B) - DiscrepancyOpts = Discrepancy('') - DiscrepancyOpts.Type = 'Gaussian' - DiscrepancyOpts.Parameters = obsData[:MetaModelOpts.index]**2 - - BayesOpts.Discrepancy = DiscrepancyOpts - - # (Option C) -# DiscInputOpts = Input() -# -# DiscInputOpts.addMarginals() -# DiscInputOpts.Marginals[0].Name = 'Sigma2' -# DiscInputOpts.Marginals[0].DistType = 'unif' -# DiscInputOpts.Marginals[0].Parameters = [0, 5e-1] -# -# DiscrepancyOpts = Discrepancy(DiscInputOpts) -# -# BayesOpts.Discrepancy = DiscrepancyOpts - - - Bayes_Calib = BayesOpts.create_Inference() - - # Make directory for saving the plots/outputs - newpath = (r'Outputs_'+MetaModelOpts.ExpDesign.Method) - if not os.path.exists(newpath): os.makedirs(newpath) - - - ## Plot the posterior (Model) - with sns.axes_style("white"): - g = sns.jointplot(x=Bayes_Calib.Posterior_df['$X_{1}$'], y=Bayes_Calib.Posterior_df['$X_{2}$'], - kind="kde", cmap='jet'); - g.ax_joint.collections[0].set_alpha(0) - g.set_axis_labels("$X_1$", "$X_2$") - - g.savefig('./'+newpath+'/Posterior_PCEModel_Calib_'+MetaModelOpts.ExpDesign.Method+'.svg') - plt.close() - - - #===================================================== - #======= Bayesian validation with Emulator ========= - #===================================================== - # BayesOpts_Valid = BayesInference(PCEModelValid) - BayesOpts_Valid = BayesInference(PCEModel) - - BayesOpts_Valid.Name = 'Valid' - BayesOpts_Valid.emulator = True - - # BME Bootstrap - BayesOpts_Valid.Bootstrap = True - BayesOpts_Valid.MCMCinitSamples = Bayes_Calib.Posterior_df - BayesOpts_Valid.BootstrapItrNr = 10 - BayesOpts_Valid.BootstrapNoise = 0.05 - - # Select the inference method - BayesOpts_Valid.SamplingMethod = 'MCMC' - BayesOpts_Valid.MCMCnSteps = 1000 - BayesOpts_Valid.MCMCnwalkers = 50 - BayesOpts.MultiProcessMCMC = True - - BayesOpts_Valid.PlotPostDist = True - BayesOpts_Valid.PlotPostPred = True - - # ----- Define the discrepancy model ------- - obsDataValid = pd.DataFrame(Model.ObservationsValid, columns=Model.Output.Names) - # (Option B) - DiscrepancyOpts = Discrepancy('') - DiscrepancyOpts.Type = 'Gaussian' - DiscrepancyOpts.Parameters = 0.01 * obsDataValid**2 - - BayesOpts_Valid.Discrepancy = DiscrepancyOpts - - Bayes_Valid = BayesOpts_Valid.create_Inference() - - - ## Plot the posterior (PCEModel) - with sns.axes_style("white"): - g = sns.jointplot(x=Bayes_Valid.Posterior_df['$X_{1}$'], y=Bayes_Valid.Posterior_df['$X_{2}$'], - kind="kde", cmap='jet'); - g.ax_joint.collections[0].set_alpha(0) - g.set_axis_labels("$X_1$", "$X_2$") - g.savefig('./'+newpath+'/Posterior_PCEModel_Valid_'+MetaModelOpts.ExpDesign.Method+'.svg') - plt.close() - - - #===================================================== - #============== Save class objects ================= - #===================================================== - with open('AnalyticFunc_Results.pkl', 'wb') as output: - pickle.dump(PCEModel, output, pickle.HIGHEST_PROTOCOL) - - # pickle.dump(PCEModelValid, output, pickle.HIGHEST_PROTOCOL) - - pickle.dump(PostPCE, output, pickle.HIGHEST_PROTOCOL) - - pickle.dump(Bayes_Calib, output, pickle.HIGHEST_PROTOCOL) - - pickle.dump(Bayes_Valid, output, pickle.HIGHEST_PROTOCOL) - -# del PCEModel -# del PostPCE -# del Bayes - - # Load the objects -# with open('AnalyticFunc_Results.pkl', 'rb') as input: -# PCEModel = pickle.load(input) -# PostPCE = pickle.load(input) -# Bayes_PCE = pickle.load(input) -# Bayes_Model = pickle.load(input) \ No newline at end of file diff --git a/examples/analytical-function/util/AnalyticFunc_Demo.py b/examples/analytical-function/util/AnalyticFunc_Demo.py deleted file mode 100644 index dea66f67dd8907fcc2560bd56a184f9e65f17a38..0000000000000000000000000000000000000000 --- a/examples/analytical-function/util/AnalyticFunc_Demo.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Aug 9 16:25:43 2019 - -@author: farid -""" - -import numpy as np -import os -import matplotlib.pyplot as plt -plt.style.use('seaborn-white') -plt.rcParams.update({'font.size': 16}) -plt.rc('font', family='sans-serif', serif='Arial') -plt.rc('figure', figsize = (24, 16)) -plt.rc('text', usetex=True) -plt.rc('xtick', labelsize=16) -plt.rc('ytick', labelsize=16) -plt.rc('axes', labelsize=16) - - -from PyLink.FuncForwardModel import FuncForwardModel -from surrogate_models.Input import Input -from surrogate_models.surrogate_models import Metamodel - -if __name__ == "__main__": - - #===================================================== - #============= COMPUTATIONAL MODEL ================ - #===================================================== - Model = FuncForwardModel() - - Model.Type = 'Function' - Model.pyFile = 'AnalyticalFunction' - Model.Name = 'AnalyticFunc' - - Model.Output.Names = ['Z'] - - - #===================================================== - #========= PROBABILISTIC INPUT MODEL ============== - #===================================================== - # Define the uncertain parameters with their mean and - # standard deviation - Inputs = Input() - - Inputs.addMarginals() - Inputs.Marginals[0].Name = 'x1' - Inputs.Marginals[0].DistType = 'unif' - Inputs.Marginals[0].Parameters = [-5, 5] - - Inputs.addMarginals() - Inputs.Marginals[1].Name = 'x2' - Inputs.Marginals[1].DistType = 'unif' - Inputs.Marginals[1].Parameters = [-5, 5] - - - #===================================================== - #======== PCE METAMODELS with adaptive aPCE ======== - #===================================================== - MetaModelOpts = Metamodel(Inputs) - - # Specify the max degree to be compared by the adaptive algorithm: - # The degree with the lowest Leave-One-Out cross-validation (LOO) - # error (or the highest score=1-LOO)estimator is chosen as the final - # metamodel. - MetaModelOpts.MaxPceDegree = 10 - - # Select the sparse least-square minimization method for - # the PCE coefficients calculation: - # 1)OLS: Ordinary Least Square 2)BRR: Bayesian Ridge Regression - # 3)LARS: Least angle regression 4)ARD: Bayesian ARD Regression - # 5)SGDR: Stochastic gradient descent learning - MetaModelOpts.RegMethod = 'OLS' - - # Print summary of the regression results - #MetaModelOpts.DisplayFlag = True - - # ------ Experimental Design -------- - # Generate an experimental design of size NrExpDesign based on a latin - # hypercube sampling of the input model or user-defined values of X and/or Y: - MetaModelOpts.addExpDesign() - - # One-shot (normal) or Sequential Adaptive (sequential) Design - MetaModelOpts.ExpDesign.Method = 'normal' - NrofInitSamples = 100 - MetaModelOpts.ExpDesign.NrSamples = NrofInitSamples - - # Sampling methods - # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley 6) chebyshev(FT) - # 7) korobov 8) grid(FT) 9) nested_grid(FT) 10)user - MetaModelOpts.ExpDesign.SamplingMethod = 'latin_hypercube' - - # Sequential experimental design (needed only for sequential ExpDesign) - MetaModelOpts.ExpDesign.MaxNSamples = 100 - MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-4 - - # ------------------------------------------------ - # ------- Sequential Design configuration -------- - # ------------------------------------------------ - # -------- Exploration ------ - MetaModelOpts.ExpDesign.ExploreMethod = 'MC' - MetaModelOpts.ExpDesign.NCandidate = 500 - MetaModelOpts.ExpDesign.NrofCandGroups = 4 - # -------- Optimality criteria: alphabetic ------ - MetaModelOpts.ExpDesign.ExploitMethod = 'alphabetic' - - # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality) - # 3)K-Opt (K-Optimality) - MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt'#['D-Opt', 'A-Opt', 'K-Opt'] - - # >>>>>>>>>>>>>>>>>>>>>> Build Surrogate <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - # Adaptive sparse arbitrary polynomial chaos expansion - PCEModel_OLS = MetaModelOpts.create_metamodel(Model) - - # Extract the basis and coefficients - Basis_OLS = PCEModel_OLS.BasisDict['Z']['y_2'] - COFFS_OLS = PCEModel_OLS.CoeffsDict['Z']['y_2'] - - #===================================================== - #=========== DEMO OF BAYESPCE METAMODEL =========== - #===================================================== - fig1=plt.figure() - barWidth = 0.3 - plt.title("Bar plot of the weights (OLS)") - - # Create cyan bars - r = np.arange(len(Basis_OLS)) - plt.bar(r, COFFS_OLS, width = barWidth, color = 'cyan', bottom=0.001, - edgecolor = 'black', capsize=7, label='Cofficients') - - # general layout - plt.xticks([r + barWidth for r in range(len(r))], ['term'+str(i+1) for i in range(len(Basis_OLS))]) - - plt.xlabel("Features") - plt.ylabel("Values of the weights") - plt.yscale('symlog', linthreshy=0.1) #('log') - plt.hlines(0,xmin=r.min() , xmax=r.max() ,color='k') - plt.xticks(rotation=45) - plt.legend(loc="upper left") - plt.show() - - # Make directory for saving the plots/outputs - newpath = (r'Outputs_demo') - if not os.path.exists(newpath): os.makedirs(newpath) - - ## Save the plot the posterior (Model) - fig1.savefig('./'+newpath+'/Coeffs_AaPCE.svg', bbox_inches='tight') - plt.close() - - - #===================================================== - #=========== PCE METAMODELS with BRR aPCE ========== - #===================================================== - BRRMetaModelOpts = Metamodel(Inputs) - BRRMetaModelOpts.MaxPceDegree = 10 - BRRMetaModelOpts.RegMethod = 'BRR' - - # ------ Experimental Design -------- - BRRMetaModelOpts.addExpDesign() - BRRMetaModelOpts.ExpDesign.NrSamples = PCEModel_OLS.ExpDesign.X.shape[0] - BRRMetaModelOpts.ExpDesign.SamplingMethod = 'user' - BRRMetaModelOpts.ExpDesign.X = PCEModel_OLS.ExpDesign.X - BRRMetaModelOpts.ExpDesign.Y = PCEModel_OLS.ExpDesign.Y - BRRMetaModelOpts.ExpDesign.Method = 'normal' - - # >>>>>>>>>>>>>>>>>>>>>> Build Surrogate <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - # Adaptive sparse arbitrary polynomial chaos expansion - PCEModel_BRR = BRRMetaModelOpts.create_metamodel(Model) - - - # Extract the basis and coefficients - Basis_BRR = PCEModel_BRR.BasisDict['Z']['y_2'] - clf_poly = PCEModel_BRR.clf_poly['Z']['y_2'] - - COFFS_BRR = clf_poly.coef_ - # Find the error(std) for coeffs - COFFSError_BRR = clf_poly.sigma_.diagonal() - - #===================================================== - #=========== DEMO OF BAYESPCE METAMODEL ============ - #===================================================== - fig2=plt.figure() - barWidth = 0.3 - plt.title("Bar plot of the weights (BayesPCE)") - - # Create cyan bars - r = np.arange(len(Basis_BRR)) - plt.bar(r, COFFS_BRR, width = barWidth, color = 'cyan', bottom=0.001, - edgecolor = 'black', yerr=COFFSError_BRR, capsize=7, label='Cofficients') - - # general layout - plt.xticks([r + barWidth for r in range(len(r))], ['term'+str(i+1) for i in range(len(Basis_BRR))]) - - plt.xlabel("Features") - plt.ylabel("Values of the weights") - plt.yscale('symlog', linthreshy=0.1) #('log') - plt.hlines(0,xmin=r.min() , xmax=r.max() ,color='k') - plt.xticks(rotation=45) - plt.legend(loc="upper left") - plt.show() - - ## Save the plot the posterior (Model) - fig2.savefig('./'+newpath+'/Coeffs_BRR.svg', bbox_inches='tight') - plt.close() - - diff --git a/examples/analytical-function/util/AnalyticalFunction.py b/examples/analytical-function/util/AnalyticalFunction.py deleted file mode 100644 index c4ed8e05c3ecd841f16d6b2d4757b12d5ecad325..0000000000000000000000000000000000000000 --- a/examples/analytical-function/util/AnalyticalFunction.py +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Nov 20 14:48:43 2019 - -@author: farid -""" -import numpy as np -import scipy.stats as stats -import matplotlib.pyplot as plt -import scipy.stats as st -import seaborn as sns - -def AnalyticalFunction(xx, t=None): - """ - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - % - % Analytical Non-Gaussian Function - % - % Authors: Farid Mohammadi, University of Stuttgart - % Sergey Oladyshkin, University of Stuttgart - % Questions/Comments: Please email Farid Mohammadi at: - % farid.mohammadi@iws.uni-stuttgart.de - % - % Copyright 2019. Farid Mohammadi, University of Stuttgart. - % - % THERE IS NO WARRANTY, EXPRESS OR IMPLIED. WE DO NOT ASSUME ANY LIABILITY - % FOR THE USE OF THIS SOFTWARE. If software is modified to produce - % derivative works, such modified software should be clearly marked. - % Additionally, this program is free software; you can redistribute it - % and/or modify it under the terms of the GNU General Public License as - % published by the Free Software Foundation; version 2.0 of the License. - % Accordingly, this program is distributed in the hope that it will be - % useful, but WITHOUT ANY WARRANTY; without even the implied warranty - % of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - % General Public License for more details. - % - % For function details and reference information, see: - % https://doi.org/10.3390/e21111081 - % - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - % - % OUTPUT AND INPUTS: - % - % Output = row vector of time vectors (s, t) - % Its structure is: - % y(t_1), y(t_2), ..., y(t_dt) - % xx = [x1, x2, ..., xn] - % xn ~ Uinform(-5, 5) - % t = vector of times (optional), with default value - % ( k − 1 ) /9 and k = 1,..., 10 - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - """ - nParamSets, nParams = xx.shape - - if t is None: t = np.arange(0, 10, 1.) / 9 - - term1 = (xx[:,0]**2 + xx[:,1] - 1)**2 - - term2 = xx[:,0]**2 - - term3 = 0.1 * xx[:,0] * np.exp(xx[:,1]) - - term5 = 0 - if nParams > 2: - for i in range(2, nParams): - term5 = term5 + xx[:,i]**3/i - - const = term1 + term2 + term3 + 1 + term5 - - # Compute time dependent term - term4 = np.zeros((nParamSets,len(t))) - for idx in range(nParamSets): - term4[idx] = -2 * xx[idx,0] * np.sqrt(0.5*t) - - Output = term4 + np.repeat(const[:,None], len(t), axis=1) - - return np.vstack((t, Output)) - -if __name__ == "__main__": - - MCSize = 10000 #1000000 - ndim = 10 - sigma = 2 - - # ------------------------------------------------------------------------- - # ----------------------- Synthetic data generation ----------------------- - # ------------------------------------------------------------------------- - t = np.arange(0, 10, 1.) / 9 - - MAP = np.zeros((1, ndim)) - synthethicData = AnalyticalFunction(MAP, t=t) - - # ------------------------------------------------------------------------- - # ---------------------- Generate Prior distribution ---------------------- - # ------------------------------------------------------------------------- - - xx = np.zeros((MCSize, ndim)) - - params = (-5,5) - - for idxDim in range(ndim): - lower, upper = params - xx[:,idxDim] = stats.uniform(loc=lower, scale=upper-lower).rvs(size=MCSize) - - # ------------------------------------------------------------------------- - # ------------- BME and Kullback-Leibler Divergence ----------------------- - # ------------------------------------------------------------------------- - Outputs = AnalyticalFunction(xx, t=t) - - cov_matrix = np.diag(np.repeat(sigma**2, synthethicData.shape[1])) - - Likelihoods = st.multivariate_normal.pdf(Outputs[1:], mean=synthethicData[1], cov=cov_matrix) - - sns.kdeplot(np.log(Likelihoods[Likelihoods>0]), shade=True, color="g", label='Ref. Likelihood') - - normLikelihood = Likelihoods / np.nanmax(Likelihoods) - # Random numbers between 0 and 1 - unif = np.random.rand(1, MCSize)[0] - - # Reject the poorly performed prior - accepted = normLikelihood >= unif - - # Prior-based estimation of BME - logBME = np.log(np.nanmean(Likelihoods)) - print('\nThe Naive MC-Estimation of BME is %.5f.'%(logBME)) - - # Posterior-based expectation of likelihoods - postExpLikelihoods = np.mean(np.log(Likelihoods[accepted])) - - # Calculate Kullback-Leibler Divergence - KLD = postExpLikelihoods - logBME - print("The Kullback-Leibler divergence estimation is %.5f."%KLD) - - # ------------------------------------------------------------------------- - # ----------------- Save the arrays as .npy files ------------------------- - # ------------------------------------------------------------------------- - if MCSize > 500000: - np.save("data/refBME_KLD_"+str(ndim)+".npy", (logBME, KLD)) - np.save("data/mean_"+str(ndim)+".npy", np.mean(Outputs[1:],axis=0)) - np.save("data/std_"+str(ndim)+".npy", np.std(Outputs[1:],axis=0)) - else: - np.save("data/Prior_"+str(ndim)+".npy", xx) - np.save("data/origModelOutput_"+str(ndim)+".npy", Outputs[1:]) - np.save("data/validLikelihoods_"+str(ndim)+".npy", Likelihoods) diff --git a/examples/analytical-function/util/PCE_vs_Chaospy.py b/examples/analytical-function/util/PCE_vs_Chaospy.py deleted file mode 100755 index 87a71cea2389e6b639fb5927baf0c6b8cb488d2b..0000000000000000000000000000000000000000 --- a/examples/analytical-function/util/PCE_vs_Chaospy.py +++ /dev/null @@ -1,319 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Aug 13 09:53:11 2020 - -@author: farid -""" -import sys, os -import numpy as np -import scipy.stats as stats -from itertools import cycle -import pandas as pd -try: - import cPickle as pickle -except ModuleNotFoundError: - import pickle -from sklearn import linear_model as lm -from matplotlib.backends.backend_pdf import PdfPages -import matplotlib.pyplot as plt -plt.rcParams.update({'font.size': 24}) -plt.rc('figure', figsize = (24, 16)) -plt.rc('font', family='serif', serif='Arial') -plt.rc('axes', grid = True) -plt.rc('text', usetex=True) -plt.rc('xtick', labelsize=24) -plt.rc('ytick', labelsize=24) -plt.rc('axes', labelsize=24) -plt.rc('axes', linewidth=2) -plt.rc('axes', grid=True) -plt.rc('grid', linestyle="-") -plt.rc('savefig', dpi=2000) - -import matplotlib -matplotlib.use('agg') -# Local -sys.path.insert(0,'./../bayesian-validation/BayesValidRox/') - -# Batch script -# sys.path.insert(0,'./../../bayesian-validation/BayesValidRox/') - -from PyLink.FuncForwardModel import FuncForwardModel -from surrogate_models.Input import Input -from surrogate_models.surrogate_models import Metamodel -from PostProcessing.PostProcessing import PostProcessing - - -# Number of parameters -ndim = 10 # 2, 10 - -#===================================================== -#============= COMPUTATIONAL MODEL ================ -#===================================================== -Model = FuncForwardModel() - -Model.Type = 'Function' -Model.pyFile = 'AnalyticalFunction' -Model.Name = 'AnalyticFunc' - -Model.Output.Names = ['Z'] -OutputNames = ['Z'] - -# For Bayesian inversion synthetic data with X=[0,0] -Model.Observations['Time [s]'] = np.arange(0, 10, 1.) / 9 -Model.Observations['Z'] = np.repeat([2],10) - -# For Checking with the MonteCarlo refrence -Model.MCReference['Time [s]'] = np.arange(0, 10, 1.) / 9 -Model.MCReference['mean'] = np.load("../data/mean_"+str(ndim)+".npy") -Model.MCReference['std'] = np.load("../data/std_"+str(ndim)+".npy") - -#===================================================== -#========= PROBABILISTIC INPUT MODEL ============== -#===================================================== -# Define the uncertain parameters with their mean and -# standard deviation -Inputs = Input() - -for i in range(ndim): - Inputs.addMarginals() - Inputs.Marginals[i].Name = '$X_{%s}$'%(i+1) - Inputs.Marginals[i].DistType = 'unif' - Inputs.Marginals[i].Parameters = [-5, 5] - -# arbitrary polynomial chaos -# inputParams = np.load('../data/InputParameters_{}.npy'.format(ndim)) -# for i in range(ndim): -# Inputs.addMarginals() -# Inputs.Marginals[i].Name = '$X_{%s}$'%(i+1) -# Inputs.Marginals[i].InputValues = inputParams[:,i] - -#===================================================== -#========== DEFINITION OF THE METAMODEL ============ -#===================================================== -MetaModelOpts = Metamodel(Inputs) - -# Select if you want to preserve the spatial/temporal depencencies -MetaModelOpts.DimRedMethod = 'PCA' -MetaModelOpts.varPCAThreshold = 99.999 - -# Select your metamodel method -# 1) PCE (Polynomial Chaos Expansion) 2) GPE (Gaussian Process Emulator) -# 3) PCEKriging (PCE + GPE) -MetaModelOpts.metaModel = 'PCEKriging' - -# ------------------------------------------------ -# ------------- PCE Specification ---------------- -# ------------------------------------------------ -# Select the sparse least-square minimization method for -# the PCE coefficients calculation: -# 1)OLS: Ordinary Least Square 2)BRR: Bayesian Ridge Regression -# 3)LARS: Least angle regression 4)ARD: Bayesian ARD Regression -# 5)FastARD: Fast Bayesian ARD Regression -MetaModelOpts.RegMethod = 'FastARD' - -# Specify the max degree to be compared by the adaptive algorithm: -# The degree with the lowest Leave-One-Out cross-validation (LOO) -# error (or the highest score=1-LOO)estimator is chosen as the final -# metamodel. -MetaModelOpts.MinPceDegree = 6 -MetaModelOpts.MaxPceDegree = 6 -# q-quasi-norm 0<q<1 (default=1) -MetaModelOpts.q = 1.0 if ndim<5 else 0.75 - -# ------------------------------------------------ -# ------ Experimental Design Configuration ------- -# ------------------------------------------------ -# Generate an experimental design of size NrExpDesign based on a latin -# hypercube sampling of the input model or user-defined values of X and/or Y: -MetaModelOpts.addExpDesign() - -# One-shot (normal) or Sequential Adaptive (sequential) Design -MetaModelOpts.ExpDesign.Method = 'normal' -MetaModelOpts.ExpDesign.NrSamples = 200 - -# Sampling methods -# 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley 6) chebyshev(FT) -# 7) korobov 8) grid(FT) 9) nested_grid(FT) 10)user -MetaModelOpts.ExpDesign.SamplingMethod = 'random' - -# >>>>>>>>>>>>>>>>>>>>>> Build Surrogate <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< -# Adaptive sparse arbitrary polynomial chaos expansion -MetaModelOpts.index = 181 -PCEModel = MetaModelOpts.create_metamodel(Model) - -# PCEModel = BayesObj.PCEModel -# Extract slicing index -# Extract the experimental design -EDX = PCEModel.ExpDesign.X -index = PCEModel.index -EDYDict = PCEModel.ExpDesign.Y -JDist = PCEModel.ExpDesign.JDist -X_train = PCEModel.ExpDesign.X -EDY = PCEModel.ExpDesign.Y -#===================================================== -#========= POST PROCESSING OF METAMODELS =========== -#===================================================== -PostPCE = PostProcessing(PCEModel) - -# Compute the moments and compare with the Monte-Carlo reference -PostPCE.plotMoments() - -# Plot the sobol indices -sobol_cell, total_sobol = PostPCE.sobolIndicesPCE() - - -#===================================================== -#============ METAMODELS WITH CHAOSPY ============= -#===================================================== -import chaospy -from surrogate_models.RegressionFastARD import RegressionFastARD - -def PCATransformation(Output): - - # Transform via Principal Component Analysis - from sklearn.decomposition import PCA as sklearnPCA - - n_samples, n_features = Output.shape - covar_matrix = sklearnPCA(n_components=None, svd_solver='full') - covar_matrix.fit(Output) - var = np.cumsum(np.round(covar_matrix.explained_variance_ratio_, decimals=5)*100) - try: - selected_n_components = np.where(var>=99.0)[0][0] + 1 - except: - selected_n_components = min(n_samples, n_features) - - nComponents = min(n_samples, n_features, selected_n_components) - - pca = sklearnPCA(n_components=nComponents, svd_solver='full', whiten=False) - OutputMatrix = pca.fit_transform(Output) - - return pca, OutputMatrix - -def fit_regression(methodConfig, X_train,Y_train,pca=False): - - from sklearn.multioutput import MultiOutputRegressor - - if pca: - PCA, Y_train = PCATransformation(Y_train) - else: - PCA = None - - deg = methodConfig['deg'] - distribution = methodConfig['distribution'] - q = methodConfig['q'] - regmethod = methodConfig['regmethod'] - # Generate Orthogonal Polynomial Expansion: - orthogonal_expansion = chaospy.orth_ttr(deg, distribution, cross_truncation=q) - - PCEModel={} - PCEModel['config'] = methodConfig - newPsi = orthogonal_expansion(*X_train.T).T - - if regmethod =='FastARD': - PCEModel = MultiOutputRegressor(RegressionFastARD(fit_intercept=False)).fit(newPsi, Y_train) - else: - PCEModel = MultiOutputRegressor(lm.BayesianRidge(fit_intercept=False)).fit(newPsi, Y_train) - - # for idx in range(Y_train.shape[1]): - # clf_poly = RegressionFastARD(fit_intercept=False) - - # PCEModel[idx] = clf_poly.fit(newPsi, Y_train[:,idx]) - - return PCA, PCEModel - -def approx_model(model, X,pca=None): - - global methodConfig - - y_Hat, y_Std = [], [] - deg = methodConfig['deg'] - distribution = methodConfig['distribution'] - q = methodConfig['q'] - - # Generate Orthogonal Polynomial Expansion: - orthogonal_expansion = chaospy.orth_ttr(deg, distribution, cross_truncation=q) - - psi = orthogonal_expansion(*X.T).T - - y_Hat = model.predict(psi).T - - # for idx in list(model.keys())[1:]: - # y_hat, y_std = model[idx].predict(psi, return_std=True) - # y_Hat.append(y_hat) - # y_Std.append(y_std) - - if pca is None: - return np.array(y_Hat).T - else: - return pca.inverse_transform(np.array(y_Hat).T)# , np.array(y_Std).T - -# Select a Distributions: -distribution = JDist #chaospy.J(*[chaospy.Uniform(-5.0,5.0)]*ndim) - -methodConfig = dict() -y_chaos = dict() -methodConfig['regmethod'] = 'FastARD' #'FastARD' -methodConfig['deg'] = PCEModel.MaxPceDegree -methodConfig['distribution'] = distribution -methodConfig['q'] = MetaModelOpts.q - - -# Fit regression model -PCA = True -for key in OutputNames: - - pca, PCEModel_chaospy = fit_regression(methodConfig, X_train,EDY[key],pca=PCA) - y_chaos[key] = approx_model(PCEModel_chaospy, X_train,pca=pca) - - -#===================================================== -#================= Visualization =================== -#===================================================== -# List of markers and colors -Color = ['k','b', 'g', 'r'] -Marker = 'x' - -Time = EDY['x_values'] - -nStes = 1 - -# Run pcemodel -y_hat, y_std = PCEModel.eval_metamodel(samples=EDX[:nStes]) #(nsamples=100, samples=EDX, name=case) - -# create a PdfPages object -pdf = PdfPages('./PCE.pdf') -fig = plt.figure() - -for key in OutputNames: - - fig, ax = plt.subplots() - - - # Plot ExpDesign - for i, output in enumerate(EDY[key][:nStes]): - plt.plot(Time,output, alpha=0.35) - - - # Plot PCE with BayesValidRox - for i, output in enumerate(y_hat[key]): - plt.plot(Time,output, ls='-', marker='*') - - - # Plot PCE with Chaospy - for i, output in enumerate(y_chaos[key][:nStes]): - plt.plot(Time,output, ls='-', marker='*') - - plt.legend(loc='best', frameon=True) - plt.xlabel('Time [s]') - plt.ylabel(key) - - # save the current figure - pdf.savefig(fig, bbox_inches='tight') - - # Destroy the current plot - plt.clf() - -pdf.close() - - diff --git a/examples/analytical-function/util/Psi_BayesValidRox.npy b/examples/analytical-function/util/Psi_BayesValidRox.npy deleted file mode 100644 index 43373abb38884fbe6f5e8c509443b916a3a0aea7..0000000000000000000000000000000000000000 Binary files a/examples/analytical-function/util/Psi_BayesValidRox.npy and /dev/null differ diff --git a/examples/analytical-function/util/Psi_Chaospy.npy b/examples/analytical-function/util/Psi_Chaospy.npy deleted file mode 100644 index dc7974367157dba6f4775091aa09acd314b4c8fe..0000000000000000000000000000000000000000 Binary files a/examples/analytical-function/util/Psi_Chaospy.npy and /dev/null differ diff --git a/examples/analytical-function/util/__pycache__/AnalyticalFunction.cpython-38.pyc b/examples/analytical-function/util/__pycache__/AnalyticalFunction.cpython-38.pyc deleted file mode 100644 index e89c6708d7e9e374610f5dec468e2d7c3a94036f..0000000000000000000000000000000000000000 Binary files a/examples/analytical-function/util/__pycache__/AnalyticalFunction.cpython-38.pyc and /dev/null differ diff --git a/examples/analytical-function/util/dynamic_image.py b/examples/analytical-function/util/dynamic_image.py deleted file mode 100644 index 8c1478164c37739fc1c7b42d4fe4d108c696deee..0000000000000000000000000000000000000000 --- a/examples/analytical-function/util/dynamic_image.py +++ /dev/null @@ -1,142 +0,0 @@ -""" -================================================= -Animated image using a precomputed list of images -================================================= - -""" - -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.animation as animation -plt.rcParams.update({'font.size': 16}) -plt.rc('figure', figsize = (12, 8)) -plt.rc('font', family='sans-serif', serif='Arial') -plt.rc('axes', grid = True) -plt.rc('text', usetex=True) -plt.rc('xtick', labelsize=16) -plt.rc('ytick', labelsize=16) -plt.rc('axes', labelsize=16) - -def posteriorPlot(ax,figPosterior,Posterior, MAP, parNames): - - # Initialization - lw = 3. - BoundTuples = [(-5,5), (-5,5)] - NofPa = len(MAP) - - # This is the true mean of the second mode that we used above: - value1 = MAP - - # This is the empirical mean of the sample: - value2 = np.mean(Posterior, axis=0) - - if NofPa == 2: - - #figPosterior, ax = plt.subplots() - plt.hist2d(Posterior[:,0], Posterior[:,1], bins=(200, 200), - range=np.array([BoundTuples[0],BoundTuples[1]]), cmap=plt.cm.jet) - - plt.xlabel(parNames[0]) - plt.ylabel(parNames[1]) - - plt.xlim(BoundTuples[0]) - plt.ylim(BoundTuples[1]) - - ax.axvline(value1[0], color="g", lw=lw) - ax.axhline(value1[1], color="g", lw=lw) - ax.plot(value1[0], value1[1], "sg", lw=lw+1) - - #ax.axvline(value2[0], ls='--', color="r", lw=lw) - #ax.axhline(value2[1], ls='--', color="r", lw=lw) - #ax.plot(value2[0], value2[1], "sr", lw=lw+1) - - else: - import corner - figPosterior = corner.corner(Posterior, labels=parNames, - show_titles=True, title_kwargs={"fontsize": 12}) - - # Extract the axes - axes = np.array(figPosterior.axes).reshape((NofPa, NofPa)) - - # Loop over the diagonal - for i in range(NofPa): - ax = axes[i, i] - ax.axvline(value1[i], color="g") - ax.axvline(value2[i], ls='--', color="r") - - # Loop over the histograms - for yi in range(NofPa): - for xi in range(yi): - ax = axes[yi, xi] - ax.axvline(value1[xi], color="g") - ax.axvline(value2[xi], ls='--', color="r") - ax.axhline(value1[yi], color="g") - ax.axhline(value2[yi], ls='--', color="r") - ax.plot(value1[xi], value1[yi], "sg") - ax.plot(value2[xi], value2[yi], "sr") - - plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1) - #figPosterior.set_size_inches((10,10)) - return figPosterior - -fig, ax = plt.subplots() -dirPath = '/home/farid/scientific/bayesian-validation/BayesValidRox/tests/AnalyticalFunction/Outputs_SeqPosteriorComparison/' - -nRuns= 35 -Posteriors = {} -nameList = ['init'] + list(range(1,nRuns+1)) -for i, name in enumerate(nameList): -# x = np.random.normal(size=50000) -# y = x * 3 * (i+1) + np.random.normal(size=50000) -# Posteriors[i] = np.stack((x,y)).T - Posteriors[i] = np.load(dirPath+'SeqPosterior_%s.npy'%name) - - -# animation function -def animate(i, Posteriors): - - Posterior = Posteriors[i] - - cont = posteriorPlot(ax,fig,Posterior, MAP = (0,0), parNames=['$X_1$', '$X_2$']) - - plt.title(r'Iteration = %i' % i) - - return cont - -anim = animation.FuncAnimation(fig, animate, fargs=(Posteriors,))#, frames=nRuns) -anim.save('animation.mp4') - - - -#fig = plt.figure() -# -# -#def f(x, y): -# return np.sin(x) + np.cos(y) -# -#x = np.linspace(0, 2 * np.pi, 120) -#y = np.linspace(0, 2 * np.pi, 100).reshape(-1, 1) -## ims is a list of lists, each row is a list of artists to draw in the -## current frame; here we are just animating one artist, the image, in -## each frame -#ims = [] -#for i in range(60): -# x += np.pi / 15. -# y += np.pi / 20. -# im = plt.imshow(f(x, y), animated=True) -# ims.append([im]) -# -#ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True, -# repeat_delay=1000) -# -## To save the animation, use e.g. -## -#ani.save("movie.mp4") -# -# or -# -# from matplotlib.animation import FFMpegWriter -# writer = FFMpegWriter(fps=15, metadata=dict(artist='Me'), bitrate=1800) -# ani.save("movie.mp4", writer=writer) - -#plt.show() diff --git a/examples/analytical-function/util/indices_bayesValid.npy b/examples/analytical-function/util/indices_bayesValid.npy deleted file mode 100644 index 087a2608fb8da023977760ab15582c61c7a8ff6d..0000000000000000000000000000000000000000 Binary files a/examples/analytical-function/util/indices_bayesValid.npy and /dev/null differ diff --git a/examples/analytical-function/util/indices_chaospy.npy b/examples/analytical-function/util/indices_chaospy.npy deleted file mode 100644 index 087a2608fb8da023977760ab15582c61c7a8ff6d..0000000000000000000000000000000000000000 Binary files a/examples/analytical-function/util/indices_chaospy.npy and /dev/null differ diff --git a/examples/analytical-function/util/svg_gif.py b/examples/analytical-function/util/svg_gif.py deleted file mode 100644 index 4430de69c299a495bba9642ea5d5115e7452bdda..0000000000000000000000000000000000000000 --- a/examples/analytical-function/util/svg_gif.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Mon May 16 10:51:20 2022 - -@author: farid -""" - -import imageio -import os -import re - - -def tryint(s): - try: - return int(s) - except: - return s - - -def alphanum_key(s): - """ Turn a string into a list of string and number chunks. - "z23a" -> ["z", 23, "a"] - """ - return [tryint(c) for c in re.split('([0-9]+)', s)] - - -def sort_nicely(input_list): - """ Sort the given list in the way that humans expect. - """ - input_list.sort(key=alphanum_key) - - -path = '../adaptivePlots' -file_ext = 'pdf' - - -filenames = [] -for file in os.listdir(path): - if file.endswith(f'.{file_ext}'): - filenames.append(os.path.join(path, file)) -sort_nicely(filenames) - -images = [] -for filename in filenames: - images.append(imageio.imread(filename)) -imageio.mimsave(f'{path}/movie.gif', images) diff --git a/examples/beam/test_beam.py b/examples/beam/example_beam.py similarity index 100% rename from examples/beam/test_beam.py rename to examples/beam/example_beam.py diff --git a/examples/borehole/test_borehole.py b/examples/borehole/example_borehole.py similarity index 100% rename from examples/borehole/test_borehole.py rename to examples/borehole/example_borehole.py diff --git a/examples/ishigami/test_ishigami.py b/examples/ishigami/example_ishigami.py similarity index 100% rename from examples/ishigami/test_ishigami.py rename to examples/ishigami/example_ishigami.py diff --git a/examples/model-comparison/test_model_comparison.py b/examples/model-comparison/example_model_comparison.py similarity index 100% rename from examples/model-comparison/test_model_comparison.py rename to examples/model-comparison/example_model_comparison.py diff --git a/examples/pollution/test_pollution.py b/examples/pollution/example_pollution.py similarity index 100% rename from examples/pollution/test_pollution.py rename to examples/pollution/example_pollution.py diff --git a/examples/pollution/test_valid_pollution.py b/examples/pollution/example_valid_pollution.py similarity index 100% rename from examples/pollution/test_valid_pollution.py rename to examples/pollution/example_valid_pollution.py diff --git a/src/bayesvalidrox/surrogate_models/exp_designs.py b/src/bayesvalidrox/surrogate_models/exp_designs.py index a078aec9c19c5a85a637ba50d02c48459ceea6d3..2b602b0704ab898ef53d4e672ef4373097450a85 100644 --- a/src/bayesvalidrox/surrogate_models/exp_designs.py +++ b/src/bayesvalidrox/surrogate_models/exp_designs.py @@ -10,6 +10,85 @@ from tqdm import tqdm from .apoly_construction import apoly_construction +# ------------------------------------------------------------------------- +def check_ranges(theta, ranges): + """ + This function checks if theta lies in the given ranges. + + Parameters + ---------- + theta : array + Proposed parameter set. + ranges : nested list + List of the praremeter ranges. + + Returns + ------- + c : bool + If it lies in the given range, it return True else False. + + """ + c = True + # traverse in the list1 + for i, bounds in enumerate(ranges): + x = theta[i] + # condition check + if x < bounds[0] or x > bounds[1]: + c = False + return c + return c + + +# ------------------------------------------------------------------------- +def fit_dist(y): + """ + Fits the known distributions to the data. + + Parameters + ---------- + y : array of shape (n_samples) + Data to be fitted. + + Returns + ------- + sel_dist: string + Selected distribution type from `lognorm`, `norm`, `uniform` or + `expon`. + params : list + Parameters corresponding to the selected distibution type. + + """ + dist_results = [] + params = {} + dist_names = ['lognorm', 'norm', 'uniform', 'expon'] + for dist_name in dist_names: + dist = getattr(st, dist_name) + + try: + if dist_name != 'lognorm': + param = dist.fit(y) + else: + param = dist.fit(np.exp(y), floc=0) + except: + param = dist.fit(y) + + params[dist_name] = param + # Applying the Kolmogorov-Smirnov test + D, p = st.kstest(y, dist_name, args=param) + dist_results.append((dist_name, D)) + + # select the best fitted distribution + sel_dist, D = (min(dist_results, key=lambda item: item[1])) + + if sel_dist == 'uniform': + params[sel_dist] = [params[sel_dist][0], params[sel_dist][0] + + params[sel_dist][1]] + if D < 0.05: + return sel_dist, params[sel_dist] + else: + return None, None + + class ExpDesigns: """ @@ -24,8 +103,8 @@ class ExpDesigns: method : str Type of the experimental design. The default is `'normal'`. Other option is `'sequential'`. - meta_Model : str - Type of the meta_model. + meta_Model_type : str + Type of the meta_Model_type. sampling_method : str Name of the sampling method for the experimental design. The following sampling method are supported: @@ -112,7 +191,7 @@ class ExpDesigns: - K-Opt (K-Optimality) """ - def __init__(self, Input, method='normal', meta_Model='pce', + def __init__(self, Input, method='normal', meta_Model_type='pce', sampling_method='random', hdf5_file=None, n_new_samples=1, n_max_samples=None, mod_LOO_threshold=1e-16, tradeoff_scheme=None, n_canddidate=1, explore_method='random', @@ -122,7 +201,7 @@ class ExpDesigns: self.InputObj = Input self.method = method - self.meta_Model = meta_Model + self.meta_Model_type = meta_Model_type self.sampling_method = sampling_method self.hdf5_file = hdf5_file self.n_new_samples = n_new_samples @@ -139,7 +218,74 @@ class ExpDesigns: self.step_snapshot = step_snapshot self.max_a_post = max_a_post self.adapt_verbose = adapt_verbose + + # Other + self.apce = None + self.ndim = None + + # Init + self.check_valid_inputs() + + + def check_valid_inputs(self)-> None: + """ + Check if the given InputObj is valid to use for further calculations: + Has some Marginals + Marginals have valid priors + All Marginals given as the same type (samples vs dist) + + Returns + ------- + None + """ + Inputs = self.InputObj + self.ndim = len(Inputs.Marginals) + + # Check if PCE or aPCE metamodel is selected. + # TODO: test also for 'pce'?? + if self.meta_Model_type.lower() == 'apce': + self.apce = True + else: + self.apce = False + + # check if marginals given + if not self.ndim >=1: + raise AssertionError('Cannot build distributions if no marginals are given') + + # check that each marginal is valid + for marginals in Inputs.Marginals: + if len(marginals.input_data) == 0: + if marginals.dist_type == None: + raise AssertionError('Not all marginals were provided priors') + break + # if marginals.dist_type != None and marginals.parameters == None: + # raise AssertionError('Some distributions do not have characteristic values') + # break + if np.array(marginals.input_data).shape[0] and (marginals.dist_type != None): + raise AssertionError('Both samples and distribution type are given. Please choose only one.') + break + + # Check if input is given as dist or input_data. + self.input_data_given = -1 + for marg in Inputs.Marginals: + #print(self.input_data_given) + size = np.array(marg.input_data).shape[0] + #print(f'Size: {size}') + if size and abs(self.input_data_given) !=1: + self.input_data_given = 2 + break + if (not size) and self.input_data_given > 0: + self.input_data_given = 2 + break + if not size: + self.input_data_given = 0 + if size: + self.input_data_given = 1 + + if self.input_data_given == 2: + raise AssertionError('Distributions cannot be built as the priors have different types') + # ------------------------------------------------------------------------- def generate_samples(self, n_samples, sampling_method='random', transform=False): @@ -200,18 +346,6 @@ class ExpDesigns: self.n_init_samples = self.ndim + 1 n_samples = int(n_samples) - # Check if PCE or aPCE metamodel is selected. - if self.meta_Model.lower() == 'apce': - self.apce = True - else: - self.apce = False - - # Check if input is given as dist or input_data. - if len(Inputs.Marginals[0].input_data): - self.input_data_given = True - else: - self.input_data_given = False - # Get the bounds if input_data are directly defined by user: if self.input_data_given: for i in range(self.ndim): @@ -295,7 +429,7 @@ class ExpDesigns: # Create a multivariate probability distribution if max_deg is not None: - JDist, poly_types = self.build_dist(rosenblatt=rosenblatt_flag) + JDist, poly_types = self.build_polytypes(rosenblatt=rosenblatt_flag) self.JDist, self.poly_types = JDist, poly_types if self.input_data_given: @@ -349,7 +483,7 @@ class ExpDesigns: return self.raw_data, self.bound_tuples # ------------------------------------------------------------------------- - def build_dist(self, rosenblatt): + def build_polytypes(self, rosenblatt): """ Creates the polynomial types to be passed to univ_basis_vals method of the MetaModel object. @@ -368,11 +502,12 @@ class ExpDesigns: """ Inputs = self.InputObj + all_data = [] all_dist_types = [] orig_joints = [] poly_types = [] - + for parIdx in range(self.ndim): if Inputs.Marginals[parIdx].dist_type is None: @@ -393,37 +528,51 @@ class ExpDesigns: elif 'unif' in dist_type.lower(): polytype = 'legendre' + if not np.array(params).shape[0]>=2: + raise AssertionError(f'Distribution has too few parameters!') dist = chaospy.Uniform(lower=params[0], upper=params[1]) elif 'norm' in dist_type.lower() and \ 'log' not in dist_type.lower(): + if not np.array(params).shape[0]>=2: + raise AssertionError(f'Distribution has too few parameters!') polytype = 'hermite' dist = chaospy.Normal(mu=params[0], sigma=params[1]) elif 'gamma' in dist_type.lower(): polytype = 'laguerre' + if not np.array(params).shape[0]>=3: + raise AssertionError(f'Distribution has too few parameters!') dist = chaospy.Gamma(shape=params[0], scale=params[1], shift=params[2]) elif 'beta' in dist_type.lower(): + if not np.array(params).shape[0]>=4: + raise AssertionError(f'Distribution has too few parameters!') polytype = 'jacobi' dist = chaospy.Beta(alpha=params[0], beta=params[1], lower=params[2], upper=params[3]) elif 'lognorm' in dist_type.lower(): polytype = 'hermite' + if not np.array(params).shape[0]>=2: + raise AssertionError(f'Distribution has too few parameters!') mu = np.log(params[0]**2/np.sqrt(params[0]**2 + params[1]**2)) sigma = np.sqrt(np.log(1 + params[1]**2 / params[0]**2)) dist = chaospy.LogNormal(mu, sigma) # dist = chaospy.LogNormal(mu=params[0], sigma=params[1]) elif 'expon' in dist_type.lower(): - polytype = 'arbitrary' + polytype = 'exponential' + if not np.array(params).shape[0]>=2: + raise AssertionError(f'Distribution has too few parameters!') dist = chaospy.Exponential(scale=params[0], shift=params[1]) elif 'weibull' in dist_type.lower(): - polytype = 'arbitrary' + polytype = 'weibull' + if not np.array(params).shape[0]>=3: + raise AssertionError(f'Distribution has too few parameters!') dist = chaospy.Weibull(shape=params[0], scale=params[1], shift=params[2]) @@ -448,7 +597,10 @@ class ExpDesigns: self.prior_space = orig_space_dist else: orig_space_dist = chaospy.J(*orig_joints) - self.prior_space = st.gaussian_kde(orig_space_dist.sample(10000)) + try: + self.prior_space = st.gaussian_kde(orig_space_dist.sample(10000)) + except: + raise ValueError('Parameter values are not valid, please set differently') return orig_space_dist, poly_types @@ -485,13 +637,13 @@ class ExpDesigns: samples = self.JDist.sample(int(n_samples)).T # Check if all samples are in the bound_tuples for idx, param_set in enumerate(samples): - if not self._check_ranges(param_set, self.bound_tuples): + if not check_ranges(param_set, self.bound_tuples): try: proposed_sample = chaospy.generate_samples( 1, domain=self.JDist, rule='random').T[0] except: proposed_sample = self.JDist.resample(1).T[0] - while not self._check_ranges(proposed_sample, + while not check_ranges(proposed_sample, self.bound_tuples): try: proposed_sample = chaospy.generate_samples( @@ -596,8 +748,11 @@ class ExpDesigns: Transformed samples. """ + if not hasattr(self, 'JDist'): + raise AttributeError('Call function init_param_space first to create JDist') + if self.InputObj.Rosenblatt: - self.origJDist, _ = self.build_dist(False) + self.origJDist, _ = self.build_polytypes(False) if method == 'user': tr_X = self.JDist.inv(self.origJDist.fwd(X.T)).T else: @@ -659,79 +814,4 @@ class ExpDesigns: return tr_X - # ------------------------------------------------------------------------- - def fit_dist(self, y): - """ - Fits the known distributions to the data. - - Parameters - ---------- - y : array of shape (n_samples) - Data to be fitted. - - Returns - ------- - sel_dist: string - Selected distribution type from `lognorm`, `norm`, `uniform` or - `expon`. - params : list - Parameters corresponding to the selected distibution type. - - """ - dist_results = [] - params = {} - dist_names = ['lognorm', 'norm', 'uniform', 'expon'] - for dist_name in dist_names: - dist = getattr(st, dist_name) - - try: - if dist_name != 'lognorm': - param = dist.fit(y) - else: - param = dist.fit(np.exp(y), floc=0) - except: - param = dist.fit(y) - - params[dist_name] = param - # Applying the Kolmogorov-Smirnov test - D, p = st.kstest(y, dist_name, args=param) - dist_results.append((dist_name, D)) - - # select the best fitted distribution - sel_dist, D = (min(dist_results, key=lambda item: item[1])) - if sel_dist == 'uniform': - params[sel_dist] = [params[sel_dist][0], params[sel_dist][0] + - params[sel_dist][1]] - if D < 0.05: - return sel_dist, params[sel_dist] - else: - return None, None - - # ------------------------------------------------------------------------- - def _check_ranges(self, theta, ranges): - """ - This function checks if theta lies in the given ranges. - - Parameters - ---------- - theta : array - Proposed parameter set. - ranges : nested list - List of the praremeter ranges. - - Returns - ------- - c : bool - If it lies in the given range, it return True else False. - - """ - c = True - # traverse in the list1 - for i, bounds in enumerate(ranges): - x = theta[i] - # condition check - if x < bounds[0] or x > bounds[1]: - c = False - return c - return c diff --git a/tests/test_ExpDesign.py b/tests/test_ExpDesign.py new file mode 100644 index 0000000000000000000000000000000000000000..cec63db498c8aaf45bf6da096b9c3f91fbf1cb17 --- /dev/null +++ b/tests/test_ExpDesign.py @@ -0,0 +1,430 @@ +# -*- coding: utf-8 -*- +""" +Test the ExpDesigns class in bayesvalidrox. +Tests are available for the following functions: + _check_ranges - x + fit_dist - x + +Class ExpDesigns: + check_valid_inputs - x + generate_samples + generate_ED + init_param_space + build_polytypes - x + random_sampler + pcm_sampler + transform + +@author: Rebecca Kohlhaas +""" +import sys +sys.path.append("src/") +import pytest +import numpy as np + +import bayesvalidrox.surrogate_models.exp_designs as exp +from bayesvalidrox.surrogate_models.exp_designs import ExpDesigns + +#%% Test check_ranges + +def test_check_ranges() -> None: + """ + Check to see if theta lies in expected ranges + """ + theta = [0.5,1.2] + ranges = [[0,1],[1,2]] + assert exp.check_ranges(theta, ranges) == True + +def test_check_ranges_inv() -> None: + """ + Check to see if theta lies not in expected ranges + """ + theta = [1.5,1.2] + ranges = [[0,1],[1,2]] + assert exp.check_ranges(theta, ranges) == False + +#%% Test fit_dist + +def test_fit_dist_uniform() -> None: + """ + Check to see if recognizes uniform dist and parameters sufficiently well. + """ + y = np.random.uniform(0,1,100000) + dist, params = exp.fit_dist(y) + assert dist == 'uniform' and params[0] <0.01 and params[1]>0.99 + +def test_fit_dist_norm() -> None: + """ + Check to see if recognizes normal dist and parameters sufficiently well. + """ + y = np.random.normal(0,1,100000) + dist, params = exp.fit_dist(y) + assert dist == 'norm' and params[0] <0.1 and params[1]>0.9 + +if 0: + def test_fit_dist_lognorm() -> None: + """ + Check to see if recognizes lognorm dist and parameters sufficiently well. + """ + y = np.random.lognormal(0,1,100000) + dist, params = exp.fit_dist(y) + assert dist == 'lognorm' and params[0] <0.1 and params[1]>0.9 + + def test_fit_dist_expon() -> None: + """ + Check to see if recognizes exponential dist and parameters sufficiently well. + """ + y = np.random.exponential(0,100000) + dist, params = exp.fit_dist(y) + assert dist == 'expon' and params[0] <0.1 + +#%% Test ExpDesign.check_valid_input + +from bayesvalidrox.surrogate_models.inputs import Input + +def test_check_valid_input_hasmarg() -> None: + """ + Distribution not built if no marginals set + """ + inp = Input() + with pytest.raises(AssertionError) as excinfo: + exp = ExpDesigns(inp) + assert str(excinfo.value) == 'Cannot build distributions if no marginals are given' + +def test_check_valid_input_haspriors() -> None: + """ + Distribution not built if no distribution set for the marginals + """ + inp = Input() + inp.add_marginals() + with pytest.raises(AssertionError) as excinfo: + exp = ExpDesigns(inp) + assert str(excinfo.value) == 'Not all marginals were provided priors' + +def test_check_valid_input_priorsmatch() -> None: + """ + Distribution not built if dist types do not align + """ + x = np.random.uniform(0,1,1000) + inp = Input() + inp.add_marginals() + inp.Marginals[0].input_data = x + inp.add_marginals() + inp.Marginals[1].dist_type = 'normal' + inp.Marginals[1].parameters = [0,1] + with pytest.raises(AssertionError) as excinfo: + exp = ExpDesigns(inp) + assert str(excinfo.value) == 'Distributions cannot be built as the priors have different types' + +def test_check_valid_input_samples() -> None: + """ + Design built correctly - samples + """ + x = np.random.uniform(0,1,1000) + inp = Input() + inp.add_marginals() + inp.Marginals[0].input_data = x + inp.add_marginals() + inp.Marginals[1].input_data = x+2 + try: + exp = ExpDesigns(inp) + except AssertionError: + pytest.fail("ExpDesign raised AssertionError unexpectedly!") + # TODO: check for better options to assert that no error at all occurred + + +def test_check_valid_input_both() -> None: + """ + Design no built - samples and dist type given + """ + x = np.random.uniform(0,1,1000) + inp = Input() + inp.add_marginals() + inp.Marginals[0].input_data = x + inp.Marginals[0].dist_type = 'normal' + with pytest.raises(AssertionError) as excinfo: + exp = ExpDesigns(inp) + assert str(excinfo.value) == 'Both samples and distribution type are given. Please choose only one.' + +#def test_check_valid_input_distnotok() -> None: +# """ +# Design built incorrectly - dist types without parameters +# """ +# inp = Input() +# inp.add_marginals() +# inp.Marginals[0].dist_type = 'normal' +# inp.add_marginals() +# inp.Marginals[1].dist_type = 'normal' +# with pytest.raises(AssertionError) as excinfo: +# exp = ExpDesigns(inp) +# assert str(excinfo.value) == 'Some distributions do not have characteristic values' + +def test_check_valid_input_distok() -> None: + """ + Design built correctly - dist types + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'normal' + inp.Marginals[0].parameters = [0,1] + inp.add_marginals() + inp.Marginals[1].dist_type = 'normal' + inp.Marginals[1].parameters = [0,1] + try: + exp = ExpDesigns(inp) + except AssertionError: + pytest.fail("ExpDesign raised AssertionError unexpectedly!") + # TODO: check for better options to assert that no error at all occurred + +#%% Test ExpDesign.build_polytypes +def test_build_polytypes_normalerr() -> None: + """ + Build dist 'normal' - too few params + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'normal' + inp.Marginals[0].parameters = [] + exp = ExpDesigns(inp) + with pytest.raises(AssertionError) as excinfo: + exp.build_polytypes(False) + assert str(excinfo.value) == 'Distribution has too few parameters!' + +def test_build_polytypes_normal() -> None: + """ + Build dist 'normal' + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'normal' + inp.Marginals[0].parameters = [0,1] + exp = ExpDesigns(inp) + exp.build_polytypes(False) + +def test_build_polytypes_uniferr() -> None: + """ + Build dist 'unif' - too few params + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'unif' + inp.Marginals[0].parameters = [] + exp = ExpDesigns(inp) + with pytest.raises(AssertionError) as excinfo: + exp.build_polytypes(False) + assert str(excinfo.value) == 'Distribution has too few parameters!' + +def test_build_polytypes_unif() -> None: + """ + Build dist 'unif' + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'unif' + inp.Marginals[0].parameters = [0,1] + exp = ExpDesigns(inp) + exp.build_polytypes(False) + +def test_build_polytypes_gammaerr() -> None: + """ + Build dist 'gamma' - too few params + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'gamma' + inp.Marginals[0].parameters = [] + exp = ExpDesigns(inp) + with pytest.raises(AssertionError) as excinfo: + exp.build_polytypes(False) + assert str(excinfo.value) == 'Distribution has too few parameters!' + +def test_build_polytypes_gamma() -> None: + """ + Build dist 'gamma' + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'gamma' + inp.Marginals[0].parameters = [0,1,0] + exp = ExpDesigns(inp) + with pytest.raises(ValueError) as excinfo: + exp.build_polytypes(False) + assert str(excinfo.value) == 'Parameter values are not valid, please set differently' + +def test_build_polytypes_betaerr() -> None: + """ + Build dist 'beta' - too few params + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'beta' + inp.Marginals[0].parameters = [] + exp = ExpDesigns(inp) + with pytest.raises(AssertionError) as excinfo: + exp.build_polytypes(False) + assert str(excinfo.value) == 'Distribution has too few parameters!' + +def test_build_polytypes_beta() -> None: + """ + Build dist 'beta' + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'beta' + inp.Marginals[0].parameters = [0.5,1,2,3] + exp = ExpDesigns(inp) + exp.build_polytypes(False) + + +def test_build_polytypes_lognormerr() -> None: + """ + Build dist 'lognorm' - too few params + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'lognorm' + inp.Marginals[0].parameters = [] + exp = ExpDesigns(inp) + with pytest.raises(AssertionError) as excinfo: + exp.build_polytypes(False) + assert str(excinfo.value) == 'Distribution has too few parameters!' + +def test_build_polytypes_lognorm() -> None: + """ + Build dist 'lognorm' + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'lognorm' + inp.Marginals[0].parameters = [0.5,1,2,3] + exp = ExpDesigns(inp) + exp.build_polytypes(False) + + +def test_build_polytypes_exponerr() -> None: + """ + Build dist 'expon' - too few params + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'beta' + inp.Marginals[0].parameters = [] + exp = ExpDesigns(inp) + with pytest.raises(AssertionError) as excinfo: + exp.build_polytypes(False) + assert str(excinfo.value) == 'Distribution has too few parameters!' + +def test_build_polytypes_expon() -> None: + """ + Build dist 'expon' + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'expon' + inp.Marginals[0].parameters = [0.5,1,2,3] + exp = ExpDesigns(inp) + exp.build_polytypes(False) + + +def test_build_polytypes_weibullerr() -> None: + """ + Build dist 'weibull' - too few params + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'weibull' + inp.Marginals[0].parameters = [] + exp = ExpDesigns(inp) + with pytest.raises(AssertionError) as excinfo: + exp.build_polytypes(False) + assert str(excinfo.value) == 'Distribution has too few parameters!' + +def test_build_polytypes_weibull() -> None: + """ + Build dist 'beta' + """ + inp = Input() + inp.add_marginals() + inp.Marginals[0].dist_type = 'weibull' + inp.Marginals[0].parameters = [0.5,1,2,3] + exp = ExpDesigns(inp) + exp.build_polytypes(False) + + +def test_build_polytypes_arbitrary() -> None: + """ + Build poly 'arbitrary' + """ + x = np.random.uniform(0,1,1000) + inp = Input() + inp.add_marginals() + inp.Marginals[0].input_data = x + exp = ExpDesigns(inp) + exp.build_polytypes(False) + +def test_build_polytypes_rosenblatt() -> None: + """ + Build dist with rosenblatt + """ + x = np.random.uniform(0,1,1000) + inp = Input() + inp.add_marginals() + inp.Marginals[0].input_data = x + exp = ExpDesigns(inp) + exp.build_polytypes(True) + +#%% Test ExpDesign.init_param_space + +def test_init_param_space_nomaxdeg() -> None: + """ + Init param space without max_deg + """ + x = np.random.uniform(0,1,(2,1000)) + inp = Input() + inp.add_marginals() + inp.Marginals[0].input_data = x + exp = ExpDesigns(inp) + exp.init_param_space() + +def test_init_param_space_maxdeg() -> None: + """ + Init param space with max_deg + """ + x = np.random.uniform(0,1,(2,1000)) + inp = Input() + inp.add_marginals() + inp.Marginals[0].input_data = x + exp = ExpDesigns(inp) + exp.init_param_space(max_deg=2) + + +#%% Test ExpDesign.transform + +def test_transform() -> None: + """ + Call transform without a built JDist + """ + x = np.random.uniform(0,1,(2,1000)) + inp = Input() + inp.add_marginals() + inp.Marginals[0].input_data = x + exp = ExpDesigns(inp) + with pytest.raises(AttributeError) as excinfo: + exp.transform(x) + assert str(excinfo.value) == 'Call function init_param_space first to create JDist' + +def test_transform() -> None: + """ + Call transform without a built JDist + """ + x = np.random.uniform(0,1,(2,1000)) + inp = Input() + inp.add_marginals() + inp.Marginals[0].input_data = x + exp = ExpDesigns(inp) + with pytest.raises(AttributeError) as excinfo: + exp.transform(x) + assert str(excinfo.value) == 'Call function init_param_space first to create JDist' + +if __name__ == '__main__': + None diff --git a/tests/test_Input.py b/tests/test_Input.py new file mode 100644 index 0000000000000000000000000000000000000000..0923e0d9601d4dab008d6a5bf4536c71a003999c --- /dev/null +++ b/tests/test_Input.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +""" +Test the Input and associated Marginal classes in bayesvalidrox. +Tests are available for the following functions +Class Marginal contains no functions - no tests to write. +Class Input: + add_marginals + +@author: Rebecca Kohlhaas +""" +import sys +sys.path.append("src/") +import pytest + +from bayesvalidrox.surrogate_models.inputs import Marginal, Input + + +def test_addmarginals() -> None: + """ + Tests function 'Input.add_marginals()' + Ensure that marginals get appended + """ + inp = Input() + inp.add_marginals() + assert len(inp.Marginals) == 1 + +if __name__ == '__main__': + None diff --git a/tests/test_MetaModel.py b/tests/test_MetaModel.py new file mode 100644 index 0000000000000000000000000000000000000000..4ae14361a4567b7f9162e1c89dab2fe43abedfad --- /dev/null +++ b/tests/test_MetaModel.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +""" +Test the MetaModel class in bayesvalidrox. +Tests are available for the following functions +Class MetaModel: + create_metamodel + train_norm_design + update_pce_coeffs + +@author: Rebecca Kohlhaas +""" +import sys +sys.path.append("src/") +import pytest + +from bayesvalidrox.surrogate_models.surrogate_models import MetaModel + +if __name__ == '__main__': + None