From 4067c4a88dbd97b60570f31bb86faa46c4bec7d2 Mon Sep 17 00:00:00 2001
From: kohlhaasrebecca <rebecca.kohlhaas@outlook.com>
Date: Thu, 18 Jul 2024 16:27:30 +0200
Subject: [PATCH] [docs][userguide] Match website to the example

---
 docs/source/bayes_description.rst         |  2 +-
 docs/source/model_description.rst         | 14 +++++++--
 examples/user_guide/example_user_guide.py | 37 +++++++++++++++++------
 examples/user_guide/model.py              |  2 +-
 4 files changed, 40 insertions(+), 15 deletions(-)

diff --git a/docs/source/bayes_description.rst b/docs/source/bayes_description.rst
index 0cb36d10f..6dac51ad1 100644
--- a/docs/source/bayes_description.rst
+++ b/docs/source/bayes_description.rst
@@ -76,7 +76,7 @@ For this example we use the python package ``emcee`` to define the MCMC moves.
 >>> BayesObj.inference_method = 'MCMC'
 >>> import emcee
 >>> BayesObj.mcmc_params = {
->>>     'n_steps': 1e4,#5,
+>>>     'n_steps': 1e4,
 >>>     'n_walkers': 30,
 >>>     'moves': emcee.moves.KDEMove(),
 >>>     'multiprocessing': False,
diff --git a/docs/source/model_description.rst b/docs/source/model_description.rst
index 923f20a7c..42103caa6 100644
--- a/docs/source/model_description.rst
+++ b/docs/source/model_description.rst
@@ -25,9 +25,11 @@ This function takes a single realization of the uncertain parameter as a 2-dimen
 Here we use the key ``A`` for the sample values and ``B`` for their squares.
 Under the key ``x_values`` a list should be given that is of the same length as each output of the model for a single input.
 The values in this list can denote e.g. timesteps and are used in postprocessing as labels of the x-axis.
+If we want to set the ``x_values`` outside of the model, it can also be given as an additional parameter
 
->>> def model(sample):
->>>     square = sample*sample
+>>> def model(samples, x_values):
+>>>     sample = samples[0]*x_values
+>>>     square = np.power(samples[0]*x_values, 2)
 >>>     outputs = {'A': sample, 'B': square, 'x_values': [0]}
 >>>     return outputs
 
@@ -43,7 +45,13 @@ Lastly we list the keys of the outputs that we are interested in.
 >>> Model.link_type = 'Function'
 >>> Model.py_file = 'model'
 >>> Model.name = 'model'
->>> Model.Output.names = ['A', 'B']
+>>> Model.Output.names = ['A']
+
+Any parameters to the model function, that are not the samples, can be set via the ``func_args`` argument.
+In this case we define ``x_values`` as a ``np.array`` and include it.
+
+>>> x_values = np.arange(0,1,0.1)
+>>> Model.func_args = {'x_values':x_values}
 
 With this we have completed an interface to our model.
 We can now evaluate this model on the samples created in the input example.
diff --git a/examples/user_guide/example_user_guide.py b/examples/user_guide/example_user_guide.py
index ea6ea0603..50b555f3b 100644
--- a/examples/user_guide/example_user_guide.py
+++ b/examples/user_guide/example_user_guide.py
@@ -15,7 +15,7 @@ import matplotlib
 
 # Add BayesValidRox path
 sys.path.append("../../src/")
-from bayesvalidrox import Input, ExpDesigns, PyLinkForwardModel, MetaModel, Engine, PostProcessing, Discrepancy, BayesInference
+from bayesvalidrox import Input, ExpDesigns, PyLinkForwardModel, MetaModel, Engine, PostProcessing, Discrepancy, BayesInference, BayesModelComparison
 
 if __name__ == '__main__':
     #### Priors, input space and experimental design
@@ -102,7 +102,7 @@ if __name__ == '__main__':
     PostProc.sobol_indices()
     #PostProc.plot_seq_design_diagnostics()
     
-    # TODO: sanity check - test on training data
+    # Sanity check - test on training data
     mean, stdev = Engine_.eval_metamodel(Engine_.ExpDesign.X)
     print(mean['A']-Engine_.ExpDesign.Y['A'])
     
@@ -130,7 +130,7 @@ if __name__ == '__main__':
     BayesObj.inference_method = 'MCMC'
     import emcee
     BayesObj.mcmc_params = {
-        'n_steps': 1e4,#5
+        'n_steps': 1e4,
         'n_walkers': 30,
         'moves': emcee.moves.KDEMove(),
         'multiprocessing': False,
@@ -156,13 +156,10 @@ if __name__ == '__main__':
     
     ExpDesign1 = ExpDesigns(Inputs)
     ExpDesign1.n_init_samples = 30
-    ExpDesign1.sampling_method = 'random'#'user'
-    #ExpDesign1.X = samples
+    ExpDesign1.sampling_method = 'random'
     
     Engine_1 = Engine(MetaMod1, Model1, ExpDesign1)
     Engine_1.train_normal()
-    # TODO: sanity checks for these two models look great, differently from the model above, perhaps just an issue of the training data?
-    
     
     Model2 = PyLinkForwardModel()
     Model2.link_type = 'Function'
@@ -180,8 +177,28 @@ if __name__ == '__main__':
     
     ExpDesign2 = ExpDesigns(Inputs)
     ExpDesign2.n_init_samples = 30
-    ExpDesign2.sampling_method = 'random'#'user'
-    #ExpDesign1.X = samples
+    ExpDesign2.sampling_method = 'random'
     
     Engine_2 = Engine(MetaMod2, Model2, ExpDesign2)
-    Engine_2.train_normal()
\ No newline at end of file
+    Engine_2.train_normal()
+    
+    meta_models = {
+        "linear": Engine_,
+        #"square": Engine_1,
+        "degthree": Engine_2
+        }
+
+    BayesOpts = BayesModelComparison()
+    
+    opts_bootstrap = {
+        "bootstrap": True,
+        "n_samples": 100,
+        "Discrepancy": DiscrepancyOpts,
+        "emulator": True,
+        "plot_post_pred": False
+        }
+
+    output_dict = BayesOpts.model_comparison_all(
+        meta_models,
+        opts_bootstrap
+        )
\ No newline at end of file
diff --git a/examples/user_guide/model.py b/examples/user_guide/model.py
index 0974cb1fa..744acc6f0 100644
--- a/examples/user_guide/model.py
+++ b/examples/user_guide/model.py
@@ -8,6 +8,6 @@ import numpy as np
 
 def model(samples, x_values):
     samples = samples[0]*x_values
-    square = np.power(samples*x_values, 2)
+    square = np.power(samples[0]*x_values, 2)
     outputs = {'A': samples, 'B': square, 'x_values': x_values}
     return outputs
\ No newline at end of file
-- 
GitLab