diff --git a/examples/umbridge_tsunamitutorial/example_umbridge.py b/examples/umbridge_tsunamitutorial/example_umbridge.py deleted file mode 100644 index b83175cf7f9ab40ab1800e42f625f65e96ede59d..0000000000000000000000000000000000000000 --- a/examples/umbridge_tsunamitutorial/example_umbridge.py +++ /dev/null @@ -1,183 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Mon Dec 11 14:03:23 2023 - -Example and test for using umbridge with bayesvalidrox. -Example model is the tsunami model by -docker run -it -p 4242:4242 -v ~/tsunami_output:/output linusseelinger/model-exahype-tsunami - -@author: rkohlhaas -""" - -import joblib -import numpy as np -import umbridge - -import bayesvalidrox as bv - - -if __name__ == '__main__': - - # This model has 2 inputs and four outputs - n_prior_sample = 1000 - priors = bv.Input() - priors.add_marginals() - priors.Marginals[0].name = 'x' - priors.Marginals[0].input_data = np.random.uniform(50,150,n_prior_sample) - priors.add_marginals() - priors.Marginals[1].name = 'y' - priors.Marginals[1].input_data = np.random.uniform(50,150,n_prior_sample) - - # Define the model - level 0 - model0 = bv.PyLinkForwardModel() - #model.link_type = 'Function' - model0.link_type = 'umbridge' - model0.py_file = 'tsunami_model' - model0.name = 'tsunami_model' - model0.Output.names = ['T1', 'T2', 'H1', 'H2'] - #model.observations = data_dict1_times - - # Define the model - level 1 - model1 = bv.PyLinkForwardModel() - #model.link_type = 'Function' - model1.link_type = 'umbridge' - model1.py_file = 'tsunami_model1' - model1.name = 'tsunami_model1' - model1.Output.names = ['T1', 'T2', 'H1', 'H2'] - #model.observations = data_dict1_times - - # Create the surrogate - surrogate_opts0 = bv.MetaModel(priors, model0) - - # Select your metamodel method - surrogate_opts0.meta_model_type = 'aPCE' - surrogate_opts0.pce_reg_method = 'FastARD' - surrogate_opts0.pce_deg = np.arange(1, 5) - surrogate_opts0.pce_q_norm = 0.4#1.0 - - # Select your metamodel method - surrogate_opts0.add_ExpDesign() - surrogate_opts0.ExpDesign.method = 'normal' - surrogate_opts0.ExpDesign.n_init_samples = 50 - surrogate_opts0.ExpDesign.sampling_method = 'latin-hypercube' - surrogate0 = surrogate_opts0.create_metamodel() - print('Surrogate 0 completed') - print('') - - # Create the surrogate - surrogate_opts1 = bv.MetaModel(priors, model1) - - # Select your metamodel method - surrogate_opts1.meta_model_type = 'aPCE' - surrogate_opts1.pce_reg_method = 'FastARD' - surrogate_opts1.pce_deg = np.arange(1, 5) - surrogate_opts1.pce_q_norm = 0.4#1.0 - - # Select your metamodel method - surrogate_opts1.add_ExpDesign() - surrogate_opts1.ExpDesign.method = 'normal' - surrogate_opts1.ExpDesign.n_init_samples = 50 - surrogate_opts1.ExpDesign.sampling_method = 'latin-hypercube' - surrogate1 = surrogate_opts1.create_metamodel() - print('Surrogate 1 completed') - print('') - - # Save surrogate - with open('tsunami0.pk1', 'wb') as output: - joblib.dump(surrogate0, output, 2) - - # Save surrogate - with open('tsunami1.pk1', 'wb') as output: - joblib.dump(surrogate1, output, 2) - - if 0: - # Post processing - L2_PostPCE = bv.PostProcessing(surrogate1) - L2_PostPCE.plot_moments(plot_type='line') - # Plot to check validation visually. - L2_PostPCE.valid_metamodel(n_samples=1) - # Compute and print RMSE error - L2_PostPCE.check_accuracy(n_samples=300) - total_sobol = L2_PostPCE.sobol_indices() - - # Test BayesInference - import copy - from tsunami_model import tsunami_model - true_data = tsunami_model(np.array([[90.0,60.0]])) - model0.observations = true_data - model1.observations = true_data - true_data_nox = copy.deepcopy(true_data) - true_data_nox.pop('x_values') - - # Test surrogate output shape - mean, std = surrogate1.eval_metamodel(np.array([[90.0,60.0]])) - - # Bayesian Inference - if 0: - # Set uncertainty - import pandas as pd - obsData = pd.DataFrame(true_data_nox, columns=model0.Output.names) - DiscrepancyOpts = bv.Discrepancy('') - DiscrepancyOpts.type = 'Gaussian' - DiscrepancyOpts.parameters = (obsData*0.15)**2 - - # Parameter estimation / single model validation via TOM - BayesOpts = bv.BayesInference(surrogate1) - BayesOpts.emulator= True - BayesOpts.plot_post_pred = True - #BayesOpts.inference_method = 'rejection' - BayesOpts.Discrepancy = DiscrepancyOpts - BayesOpts.bootstrap = True - BayesOpts.n_bootstrap_itrs = 10 - BayesOpts.bootstrap_noise = 0.05 - BayesOpts.out_dir = '' - - # Set the MCMC parameters - import emcee - BayesOpts.inference_method = "MCMC" - BayesOpts.mcmc_params = { - 'n_steps': 1e3, - 'n_walkers': 30, - 'moves': emcee.moves.KDEMove(), - 'multiprocessing': False, - 'verbose': False - } - - Bayes = BayesOpts.create_inference() - # Pkl the inference results - with open(f'Bayes_{model0.name}.pkl', 'wb') as output: - joblib.dump(Bayes, output, 2) - - # Model Comparison - if 1: - # Set the models - meta_models = { - "M0": surrogate0, - "M1": surrogate1, - } - - # BME Bootstrap options - opts_bootstrap = { - "bootstrap": True, - "n_samples": 1000, - "Discrepancy": DiscrepancyOpts, - "emulator": True, - "plot_post_pred": False - } - - # Run model comparison - BayesOpts = bv.BayesModelComparison( - justifiability=True, - n_bootstarp=10, - just_n_meas=2 - ) - output_dict = BayesOpts.create_model_comparison( - meta_models, - opts_bootstrap - ) - - import pickle as pkl - # save model comparison results - with open('ModelComparison.pkl', 'wb') as f: - pkl.dump(output_dict, f) - \ No newline at end of file diff --git a/examples/umbridge_tsunamitutorial/example_umbridge_testmodel.py b/examples/umbridge_tsunamitutorial/example_umbridge_testmodel.py index 95ec964fa674a7ff9525c3557f65d7038fd2200a..22a0f750e96c4b7bf14913eb97f0040fb360a257 100644 --- a/examples/umbridge_tsunamitutorial/example_umbridge_testmodel.py +++ b/examples/umbridge_tsunamitutorial/example_umbridge_testmodel.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- """ -Created on Mon Dec 11 14:03:23 2023 +Example and test for using UM-Bridge with bayesvalidrox. +This example uses the `testmodel` and sets the model type explicitly as +UM-Bridge for the pylink model. -Example and test for using umbridge with bayesvalidrox. -Example model is the tsunami model by -docker run -it -p 4242:4242 -v ~/tsunami_output:/output linusseelinger/model-exahype-tsunami +An example for using UM-Bridge with an explicitly defined model can be found in +`example_umbridge_tsunamimodel.py` -@author: rkohlhaas """ import joblib @@ -31,32 +31,30 @@ if __name__ == '__main__': priors.Marginals[0].name = 'x' priors.Marginals[0].input_data = np.random.uniform(50,150,n_prior_sample) - # Define the model - level 0 + # Define the model directly via Pylink model = PyLinkForwardModel() - #model.link_type = 'Function' - #model.py_file = 'testmodel' - #model.name = 'testmodel' model.link_type = 'umbridge' model.host = 'http://testmodel.linusseelinger.de' model.modelparams = {} model.name = 'testmodel' model.Output.names = ['y'] - model.output_type = 'single-valued' + model.x_values = np.array([0]) # Create the surrogate surrogate_opts = MetaModel(priors) - # Select your metamodel method + # Select the surrogate type and properties surrogate_opts.meta_model_type = 'aPCE' surrogate_opts.pce_reg_method = 'FastARD' surrogate_opts.pce_deg = np.arange(1, 5) surrogate_opts.pce_q_norm = 0.4 - # Select your metamodel method + # Define the experimental design and sampling methods ExpDesign = ExpDesigns(priors) ExpDesign.n_init_samples = 50 ExpDesign.sampling_method = 'latin-hypercube' + # Start and run the engine engine = Engine(surrogate_opts, model, ExpDesign) engine.start_engine() engine.train_normal(parallel = False) @@ -70,8 +68,10 @@ if __name__ == '__main__': # Post processing L2_PostPCE = PostProcessing(engine) L2_PostPCE.plot_moments(plot_type='line') + # Plot to check validation visually. L2_PostPCE.valid_metamodel(n_samples=1) + # Compute and print RMSE error L2_PostPCE.check_accuracy(n_samples=300) total_sobol = L2_PostPCE.sobol_indices() diff --git a/examples/umbridge_tsunamitutorial/example_umbridge_tsunamitutorial.py b/examples/umbridge_tsunamitutorial/example_umbridge_tsunamitutorial.py new file mode 100644 index 0000000000000000000000000000000000000000..c456787d0c5d9d9714083b406895b8096a4d16ce --- /dev/null +++ b/examples/umbridge_tsunamitutorial/example_umbridge_tsunamitutorial.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +""" +Example and test for using UM-Bridge with bayesvalidrox. +Example model is the tsunami model described here: + https://um-bridge-benchmarks.readthedocs.io/en/docs/inverse-benchmarks/exahype-tsunami.html +It needs to run separately while starting this example. + docker run -it -p 4242:4242 -v ~/tsunami_output:/output linusseelinger/model-exahype-tsunami + +This example uses models that are exmplicitly defined as functions outside of +PyLink. An example for the link_type 'umbdridge' can be found in +`example_umbridge_testmodel.py`. + +""" + +import copy +import joblib +import numpy as np +import pandas as pd +import umbridge + +import bayesvalidrox as bv +from tsunami_model import tsunami_model + +if __name__ == '__main__': + # This model has 2 inputs and four outputs + n_prior_sample = 1000 + priors = bv.Input() + priors.add_marginals() + priors.Marginals[0].name = 'x' + priors.Marginals[0].input_data = np.random.uniform(50,150,n_prior_sample) + priors.add_marginals() + priors.Marginals[1].name = 'y' + priors.Marginals[1].input_data = np.random.uniform(50,150,n_prior_sample) + + # Define the model - level 0 + model0 = bv.PyLinkForwardModel() + model0.link_type = 'function' + model0.py_file = 'tsunami_model' + model0.name = 'tsunami_model' + model0.Output.names = ['T1', 'T2', 'H1', 'H2'] + #model.observations = data_dict1_times + + # Define the model - level 1 + model1 = bv.PyLinkForwardModel() + model1.link_type = 'function' + model1.py_file = 'tsunami_model1' + model1.name = 'tsunami_model1' + model1.Output.names = ['T1', 'T2', 'H1', 'H2'] + #model.observations = data_dict1_times + + # Create the surrogate + surrogate_opts0 = bv.MetaModel(priors, model0) + + # Select your metamodel method + surrogate_opts0.meta_model_type = 'aPCE' + surrogate_opts0.pce_reg_method = 'FastARD' + surrogate_opts0.pce_deg = np.arange(1, 5) + surrogate_opts0.pce_q_norm = 0.4#1.0 + + # Define ExpDesign - this is the same for both models + ExpDesign0 = bv.ExpDesigns(priors) + ExpDesign0.method = 'normal' + ExpDesign0.n_init_samples = 50 + ExpDesign0.sampling_method = 'latin-hypercube' + ExpDesign1 = copy.deepcopy(ExpDesign0) + + # Start and run the engine + engine0 = bv.Engine(surrogate_opts0, model0, ExpDesign0) + engine0.start_engine() + engine0.train_normal(parallel = False) + print('Surrogate 0 completed') + print('') + + # Create the surrogate + surrogate_opts1 = bv.MetaModel(priors, model1) + + # Select your metamodel method + surrogate_opts1.meta_model_type = 'aPCE' + surrogate_opts1.pce_reg_method = 'FastARD' + surrogate_opts1.pce_deg = np.arange(1, 5) + surrogate_opts1.pce_q_norm = 0.4#1.0 + + # Start and run the engine + engine1 = bv.Engine(surrogate_opts1, model1, ExpDesign1) + engine1.start_engine() + engine1.train_normal(parallel = False) + print('Surrogate 1 completed') + print('') + + # Save surrogate + with open('tsunami0.pk1', 'wb') as output: + joblib.dump(engine0.MetaModel, output, 2) + + # Save surrogate + with open('tsunami1.pk1', 'wb') as output: + joblib.dump(engine1.MetaModel, output, 2) + + # Post processing on model 1 + L2_PostPCE = bv.PostProcessing(engine1) + L2_PostPCE.plot_moments(plot_type='line') + # Plot to check validation visually. + L2_PostPCE.valid_metamodel(n_samples=1) + + # Compute and print RMSE error + L2_PostPCE.check_accuracy(n_samples=300) + total_sobol = L2_PostPCE.sobol_indices() + + # Get reference evaluation as 'true data' + true_data = tsunami_model(np.array([[90.0,60.0]])) + model0.observations = true_data + model1.observations = true_data + true_data_nox = copy.deepcopy(true_data) + true_data_nox.pop('x_values') + + # Direct surrogate evaluation + mean, std = engine1.MetaModel.eval_metamodel(np.array([[90.0,60.0]])) + + # Bayesian Inference + # Set uncertainty + obsData = pd.DataFrame(true_data_nox, columns=model0.Output.names) + DiscrepancyOpts = bv.Discrepancy('') + DiscrepancyOpts.type = 'Gaussian' + DiscrepancyOpts.parameters = (obsData*0.15)**2 + + # Parameter estimation / single model validation via TOM for model 1 + BayesOpts = bv.BayesInference(engine1) + BayesOpts.emulator= True + BayesOpts.plot_post_pred = True + #BayesOpts.inference_method = 'rejection' + BayesOpts.Discrepancy = DiscrepancyOpts + BayesOpts.bootstrap = True + BayesOpts.n_bootstrap_itrs = 10 + BayesOpts.bootstrap_noise = 0.05 + BayesOpts.out_dir = '' + + # Set the MCMC parameters + import emcee + BayesOpts.inference_method = "MCMC" + BayesOpts.mcmc_params = { + 'n_steps': 1e3, + 'n_walkers': 30, + 'moves': emcee.moves.KDEMove(), + 'multiprocessing': False, + 'verbose': False + } + + Bayes = BayesOpts.create_inference() + + # Pkl the inference results + with open(f'Bayes_{model0.name}.pkl', 'wb') as output: + joblib.dump(Bayes, output, 2) + + # Model Comparison + # Set the models + meta_models = { + "M0": engine0, + "M1": engine1, + } + + # BME Bootstrap options + opts_bootstrap = { + "bootstrap": True, + "n_samples": 1000, + "Discrepancy": DiscrepancyOpts, + "emulator": True, + "plot_post_pred": False + } + + # Run model comparison + BayesOpts = bv.BayesModelComparison( + justifiability=True, + n_bootstarp=10, + just_n_meas=2 + ) + output_dict = BayesOpts.create_model_comparison( + meta_models, + opts_bootstrap + ) + + # save model comparison results + with open('ModelComparison.pkl', 'wb') as f: + joblib.dump(output_dict, f) + \ No newline at end of file diff --git a/examples/umbridge_tsunamitutorial/testmodel.pk1 b/examples/umbridge_tsunamitutorial/testmodel.pk1 deleted file mode 100644 index 700c643fc23e631a23cb23dd0972954904268377..0000000000000000000000000000000000000000 Binary files a/examples/umbridge_tsunamitutorial/testmodel.pk1 and /dev/null differ diff --git a/examples/umbridge_tsunamitutorial/testmodel.py b/examples/umbridge_tsunamitutorial/testmodel.py deleted file mode 100644 index b213d2a3acc0b6840f7dd24b7f87670640575368..0000000000000000000000000000000000000000 --- a/examples/umbridge_tsunamitutorial/testmodel.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Runs the umbridge command for the tsunami model - -@author: Rebecca Kohlhaas -""" -import umbridge -import numpy as np - -def testmodel(params): - # Get the model - model = umbridge.HTTPModel('http://testmodel.linusseelinger.de', 'forward') - # Run the model - params = np.ndarray.tolist(params) - out = np.array(model(params)) - #print(out) - return {'y':out[:,0],'x_values':[0]} - - - \ No newline at end of file diff --git a/examples/umbridge_tsunamitutorial/tsunami_model.py b/examples/umbridge_tsunamitutorial/tsunami_model.py index 7f8e54170b2013915bb2cbd3c44c2db43e45bad5..2d837a9ae4bb27e8ef2ca622011e3e67012f2e94 100644 --- a/examples/umbridge_tsunamitutorial/tsunami_model.py +++ b/examples/umbridge_tsunamitutorial/tsunami_model.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- """ -Runs the umbridge command for the tsunami model - -@author: Rebecca Kohlhaas +Runs the umbridge command for the tsunami model. """ import umbridge import numpy as np diff --git a/examples/umbridge_tsunamitutorial/tsunami_model1.py b/examples/umbridge_tsunamitutorial/tsunami_model1.py index 3e56801e5d009d3d8bda0f6c7b19af67d58b3417..239670773cf71e84b4ef0caec35c4e024ca1bd66 100644 --- a/examples/umbridge_tsunamitutorial/tsunami_model1.py +++ b/examples/umbridge_tsunamitutorial/tsunami_model1.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- """ -Runs the umbridge command for the tsunami model - -@author: Rebecca Kohlhaas +Runs the umbridge command for the tsunami model on level 1. """ import umbridge import numpy as np diff --git a/src/bayesvalidrox/pylink/pylink.py b/src/bayesvalidrox/pylink/pylink.py index 5084f1cf1450ea08d91d97b8a64cf1d4a12e494e..8899447c57bff01a2f4976d19df4e2033212f7bd 100644 --- a/src/bayesvalidrox/pylink/pylink.py +++ b/src/bayesvalidrox/pylink/pylink.py @@ -473,6 +473,8 @@ class PyLinkForwardModel(object): # TODO: if umbridge, just run, no parallel from here # If the link type is UM-Bridge, then no parallel needs to be started from here if self.link_type.lower() == 'umbridge': + if not hasattr(self, 'x_values'): + raise AttributeError('For model type `umbridge` the attribute `x_values` needs to be set for the model!') # Init model #model = umbridge.HTTPModel('http://localhost:4242', 'forward') self.model = umbridge.HTTPModel(self.host, 'forward') # TODO: is this always forward? @@ -608,30 +610,31 @@ class PyLinkForwardModel(object): out_dict = {} cnt = 0 for key in self.Output.names: - # If needed resort into single-value outputs - if self.output_type == 'single-valued': - if out.shape[1]>1: # TODO: this doesn't fully seem correct?? - for i in range(out[:,key]): # TODO: this doesn't fully seem correct?? - new_key = key+str(i) - if new_key not in self.Output.names: - self.Output.names.append(new_key) - if i == 0: - self.Ouptut.names.remove(key) - out_dict[new_key] = out[:,cnt,i] # TODO: not sure about this, need to test - else: - out_dict[key] = out[:,cnt] - - - else: - out_dict[key] = out[:,cnt] + # # If needed resort into single-value outputs + # if self.output_type == 'single-valued': + # if out.shape[1]>1: # TODO: this doesn't fully seem correct?? + # for i in range(out[:,key]): # TODO: this doesn't fully seem correct?? + # new_key = key+str(i) + # if new_key not in self.Output.names: + # self.Output.names.append(new_key) + # if i == 0: + # self.Ouptut.names.remove(key) + # out_dict[new_key] = out[:,cnt,i] # TODO: not sure about this, need to test + # else: + # out_dict[key] = out[:,cnt] + # + # + # else: + out_dict[key] = out[:,cnt] cnt += 1 - # TODO: how to deal with the x-values? - if self.output_type == 'single-valued': - out_dict['x_values'] = [0] - else: - out_dict['x_values'] = np.arange(0,out[:,0].shape[0],1) + ## TODO: how to deal with the x-values? + #if self.output_type == 'single-valued': + # out_dict['x_values'] = [0] + #else: + # out_dict['x_values'] = np.arange(0,out[:,0].shape[0],1) + out_dict['x_values'] = self.x_values #return {'T1':out[:,0], 'T2':out[:,1], 'H1':out[:,2], 'H2':out[:,3], # 'x_values':[0]}