From 47b0da8bd2043f546b3360502b458a70c22ad64d Mon Sep 17 00:00:00 2001
From: Farid Mohammadi <farid.mohammadi@iws.uni-stuttgart.de>
Date: Mon, 12 Sep 2022 10:56:42 +0200
Subject: [PATCH] [example][model-comparison] add new test setting.

---
 .../model-comparison/test_model_comparison.py | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/examples/model-comparison/test_model_comparison.py b/examples/model-comparison/test_model_comparison.py
index 00a3ce518..b353cca28 100644
--- a/examples/model-comparison/test_model_comparison.py
+++ b/examples/model-comparison/test_model_comparison.py
@@ -239,7 +239,7 @@ if __name__ == "__main__":
     sobol_cell, total_sobol = NL4_PostPCE.sobol_indices()
 
     # =====================================================
-    # ========  Bayesian inference with Emulator ==========
+    # =========  BAYESIAN MULTIMODEL COMPARISON ===========
     # =====================================================
     # ----- Define the discrepancy model -------
     sigma = np.ones(15) * np.array(sigma).flatten()
@@ -248,7 +248,7 @@ if __name__ == "__main__":
     DiscrepancyOpts.parameters = pd.DataFrame(sigma**2, columns=['Z'])
 
     # ----- Define the options model -------
-    metaModels = {
+    meta_models = {
         "linear": L2_MetaModel,
         "exponential": NL4_MetaModel,
         "cosine": NL4_MetaModel
@@ -261,7 +261,7 @@ if __name__ == "__main__":
         'multiprocessing': False,
         'verbose': False
         }
-    OptsDict_MCMC = {
+    opts_mcmc = {
         "inference_method": "MCMC",
         "mcmc_params": mcmc_params,
         "Discrepancy": DiscrepancyOpts,
@@ -270,22 +270,23 @@ if __name__ == "__main__":
         }
 
     # Option II: BME Bootstrap
-    OptsDict_Bootstrap = {
+    opts_bootstrap = {
         "bootstrap": True,
-        "n_samples": 10000,#10000,
+        "n_samples": 10000,
         "Discrepancy": DiscrepancyOpts,
-        "emulator": False,
+        "emulator": True,
         "plot_post_pred": False
         }
 
     # Run model comparison
     BayesOpts = BayesModelComparison(
         justifiability=True,
-        n_bootstarp=1000
+        n_bootstarp=1000,
+        just_n_meas=2
         )
     output_dict = BayesOpts.create_model_comparison(
-        metaModels,
-        OptsDict_Bootstrap
+        meta_models,
+        opts_bootstrap
         )
 
     # Save the results
-- 
GitLab