diff --git a/docs/Makefile b/docs/Makefile
deleted file mode 100644
index d0c3cbf1020d5c292abdedf27627c6abe25e2293..0000000000000000000000000000000000000000
--- a/docs/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# Minimal makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line, and also
-# from the environment for the first two.
-SPHINXOPTS    ?=
-SPHINXBUILD   ?= sphinx-build
-SOURCEDIR     = source
-BUILDDIR      = build
-
-# Put it first so that "make" without argument is like "make help".
-help:
-	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-.PHONY: help Makefile
-
-# Catch-all target: route all unknown targets to Sphinx using the new
-# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
-%: Makefile
-	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/build/doctrees/discrepancy_GP.doctree b/docs/build/doctrees/discrepancy_GP.doctree
deleted file mode 100644
index 6818a93f9d2e2941e43bd09ddbf01ded7f8abd9f..0000000000000000000000000000000000000000
Binary files a/docs/build/doctrees/discrepancy_GP.doctree and /dev/null differ
diff --git a/docs/build/doctrees/discrepancy_GP_v1.doctree b/docs/build/doctrees/discrepancy_GP_v1.doctree
deleted file mode 100644
index ffd710ac7dd1f8878f06c82bdd56e063ea65c24a..0000000000000000000000000000000000000000
Binary files a/docs/build/doctrees/discrepancy_GP_v1.doctree and /dev/null differ
diff --git a/docs/build/doctrees/environment.pickle b/docs/build/doctrees/environment.pickle
deleted file mode 100644
index c5e6167efa07679df6a6abcf5d17df8298a9b431..0000000000000000000000000000000000000000
Binary files a/docs/build/doctrees/environment.pickle and /dev/null differ
diff --git a/docs/build/doctrees/example.doctree b/docs/build/doctrees/example.doctree
deleted file mode 100644
index 9672111862a03668716ed334270c8180a0862c31..0000000000000000000000000000000000000000
Binary files a/docs/build/doctrees/example.doctree and /dev/null differ
diff --git a/docs/build/doctrees/index.doctree b/docs/build/doctrees/index.doctree
deleted file mode 100644
index 3af49c70557a6a7391368ed928c476fe33c06076..0000000000000000000000000000000000000000
Binary files a/docs/build/doctrees/index.doctree and /dev/null differ
diff --git a/docs/build/doctrees/mcmc.doctree b/docs/build/doctrees/mcmc.doctree
deleted file mode 100644
index 77e21e1d37a441f1a1ff5f7c66ebf4d57bf26e5a..0000000000000000000000000000000000000000
Binary files a/docs/build/doctrees/mcmc.doctree and /dev/null differ
diff --git a/docs/build/html/.buildinfo b/docs/build/html/.buildinfo
deleted file mode 100644
index 145138a4d50656fe26087aff4d8dc812e5870eaf..0000000000000000000000000000000000000000
--- a/docs/build/html/.buildinfo
+++ /dev/null
@@ -1,4 +0,0 @@
-# Sphinx build info version 1
-# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: c83e204b85f6ea5688aa5e0f5b58147d
-tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/docs/build/html/_sources/discrepancy_GP.md.txt b/docs/build/html/_sources/discrepancy_GP.md.txt
deleted file mode 100644
index 94cff5234c8e04cb1658d0fbcbb1cdf56a7c5776..0000000000000000000000000000000000000000
--- a/docs/build/html/_sources/discrepancy_GP.md.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-<!-- markdownlint-disable -->
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP.py#L0"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-# <kbd>module</kbd> `discrepancy_GP`
-Created on Sun Nov  7 09:42:33 2021 
-
-@author: farid 
-
-
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP.py#L14"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-## <kbd>class</kbd> `Bias`
-
-
-
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP.py#L15"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `__init__`
-
-```python
-__init__(verbose=False)
-```
-
-
-
-
-
-
-
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP.py#L18"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `fit_bias`
-
-```python
-fit_bias(EDX, ED_Y, Data)
-```
-
-
-
-
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP.py#L73"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `predict`
-
-```python
-predict(X, Output, return_std=True)
-```
-
-
-
-
-
-
-
-
----
-
-_This file was automatically generated via [lazydocs](https://github.com/ml-tooling/lazydocs)._
diff --git a/docs/build/html/_sources/discrepancy_GP_v1.md.txt b/docs/build/html/_sources/discrepancy_GP_v1.md.txt
deleted file mode 100644
index 67ec6b8b804b2b99d7bf4b5cb8091d76f593b98e..0000000000000000000000000000000000000000
--- a/docs/build/html/_sources/discrepancy_GP_v1.md.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-<!-- markdownlint-disable -->
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP_v1.py#L0"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-# <kbd>module</kbd> `discrepancy_GP_v1`
-Created on Sun Nov  7 09:42:33 2021 
-
-@author: farid 
-
-
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP_v1.py#L14"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-## <kbd>class</kbd> `Bias`
-
-
-
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP_v1.py#L15"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `__init__`
-
-```python
-__init__(verbose=False)
-```
-
-
-
-
-
-
-
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP_v1.py#L18"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `fit_bias`
-
-```python
-fit_bias(BiasInputs, ED_Y, Data)
-```
-
-
-
-
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP_v1.py#L74"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `oldpredict`
-
-```python
-oldpredict(Y, Output, BiasInputs=None, return_cov=True)
-```
-
-
-
-
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/discrepancy_GP_v1.py#L81"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `predict`
-
-```python
-predict(Y, Output, BiasInputs=None, return_cov=True)
-```
-
-
-
-
-
-
-
-
----
-
-_This file was automatically generated via [lazydocs](https://github.com/ml-tooling/lazydocs)._
diff --git a/docs/build/html/_sources/example.md.txt b/docs/build/html/_sources/example.md.txt
deleted file mode 100644
index 65b132ad46d79feeb1fa62b90d91aa77f9d55fb1..0000000000000000000000000000000000000000
--- a/docs/build/html/_sources/example.md.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# My nifty title
-
-Some **text**!
-
-```{admonition} Here's my title
-:class: warning
-
-Here's my admonition content
-```
diff --git a/docs/build/html/_sources/index.rst.txt b/docs/build/html/_sources/index.rst.txt
deleted file mode 100644
index e53f4bfb9f37de2bbb9718c532a55ccc142b1ea5..0000000000000000000000000000000000000000
--- a/docs/build/html/_sources/index.rst.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-.. bayesvalidrox documentation master file, created by
-   sphinx-quickstart on Fri Mar 11 08:09:15 2022.
-   You can adapt this file completely to your liking, but it should at least
-   contain the root `toctree` directive.
-
-Welcome to bayesvalidrox's documentation!
-=========================================
-
-.. toctree::
-   example.md
-   pylink.html
-   :maxdepth: 2
-   :caption: Contents:
-
-
-This is a normal text paragraph. The next paragraph is a code sample::
-
-   It is not processed in any way, except
-   that the indentation is removed.
-
-   It can span multiple lines.
-
-This is a normal text paragraph again.
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-* :ref:`pylink`
diff --git a/docs/build/html/_sources/mcmc.md.txt b/docs/build/html/_sources/mcmc.md.txt
deleted file mode 100644
index f78d0d2ad78d2cb3f2cf2b3975e5ed79583b9c30..0000000000000000000000000000000000000000
--- a/docs/build/html/_sources/mcmc.md.txt
+++ /dev/null
@@ -1,187 +0,0 @@
-<!-- markdownlint-disable -->
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L0"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-# <kbd>module</kbd> `mcmc`
-MCMC class for Bayes inference with emcee package using an Affine Invariant Markov chain Monte Carlo (MCMC) Ensemble sampler [1]. 
-
-1. Foreman-Mackey, D., Hogg, D.W., Lang, D. and Goodman, J., 2013. emcee: the MCMC hammer. Publications of the Astronomical Society of the Pacific, 125(925), p.306. https://emcee.readthedocs.io/en/stable/ 
-
-Author: Farid Mohammadi, M.Sc. E-Mail: farid.mohammadi@iws.uni-stuttgart.de Department of Hydromechanics and Modelling of Hydrosystems (LH2) Institute for Modelling Hydraulic and Environmental Systems (IWS), University of Stuttgart, www.iws.uni-stuttgart.de/lh2/ Pfaffenwaldring 61 70569 Stuttgart 
-
-Created on Wed Jun 3 2020 
-
-
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L36"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-## <kbd>class</kbd> `MCMC`
-A class for bayesian inference using a Markov-Chain Monte-Carlo Sampler. 
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L41"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `__init__`
-
-```python
-__init__(BayesOpts)
-```
-
-
-
-
-
-
-
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L543"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `eval_model`
-
-```python
-eval_model(theta)
-```
-
-Evaluates the (meta-) model at the given theta. 
-
-Parameters 
----------- theta : array of shape (n_samples, n_params)  Parameter set, i.e. proposals of the MCMC chains. 
-
-Returns 
-------- mean_pred : dict  Mean model prediction. std_pred : dict  Std of model prediction. 
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L650"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `gelman_rubin`
-
-```python
-gelman_rubin(chain, return_var=False)
-```
-
-The potential scale reduction factor (PSRF) defined by the variance within one chain, W, with the variance between chains B. Both variances are combined in a weighted sum to obtain an estimate of the variance of a parameter θ.The square root of the ratio of this estimates variance to the within chain variance is called the potential scale reduction. For a well converged chain it should approach 1. Values greater than typically 1.1 indicate that the chains have not yet fully converged. 
-
-Source: http://joergdietrich.github.io/emcee-convergence.html 
-
-https://github.com/jwalton3141/jwalton3141.github.io/blob/master/assets/posts/ESS/rwmh.py 
-
-Parameters 
----------- chain : array (n_walkers, n_steps, n_params)  DESCRIPTION. 
-
-Returns 
-------- R_hat : float  The Gelman-Robin values. 
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L446"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `log_likelihood`
-
-```python
-log_likelihood(theta)
-```
-
-Computes likelihood p(y|theta, obs) of the performance of the (meta-)model in reproducing the observation data. 
-
-Parameters 
----------- theta : array of shape (n_samples, n_params)  Parameter set, i.e. proposals of the MCMC chains. 
-
-Returns 
-------- log_like : array of shape (n_samples)  Log likelihood. 
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L495"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `log_posterior`
-
-```python
-log_posterior(theta)
-```
-
-Computes the posterior likelihood p(theta| obs) for the given parameterset. 
-
-Parameters 
----------- theta : array of shape (n_samples, n_params)  Parameter set, i.e. proposals of the MCMC chains. 
-
-Returns 
-------- log_like : array of shape (n_samples)  Log posterior likelihood. 
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L381"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `log_prior`
-
-```python
-log_prior(theta)
-```
-
-Calculates the log prior likelihood for the given parameter set(s) theta. 
-
-Parameters 
----------- theta : array of shape (n_samples, n_params)  Parameter sets, i.e. proposals of MCMC chains. 
-
-Returns 
-------- logprior: float or array of shape n_samples  Log prior likelihood. If theta has only one row, a single value is  returned otherwise an array. 
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L700"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `marginal_llk_emcee`
-
-```python
-marginal_llk_emcee(sampler, nburn=None, logp=None, maxiter=1000)
-```
-
-The Bridge Sampling Estimator of the Marginal Likelihood based on https://gist.github.com/junpenglao/4d2669d69ddfe1d788318264cdcf0583 
-
-Parameters 
----------- sampler : TYPE  MultiTrace, result of MCMC run. nburn : int, optional  Number of burn-in step. The default is None. logp : TYPE, optional  Model Log-probability function. The default is None. maxiter : int, optional  Maximum number of iterations. The default is 1000. 
-
-Returns 
-------- marg_llk : dict  Estimated Marginal log-Likelihood. 
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L45"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `run_sampler`
-
-```python
-run_sampler(observation, total_sigma2)
-```
-
-
-
-
-
----
-
-<a href="../src/bayesvalidrox/bayes_inference/mcmc.py#L604"><img align="right" style="float:right;" src="https://img.shields.io/badge/-source-cccccc?style=flat-square"></a>
-
-### <kbd>method</kbd> `train_error_model`
-
-```python
-train_error_model(sampler)
-```
-
-Trains an error model using a Gaussian Process Regression. 
-
-Parameters 
----------- sampler : obj  emcee sampler. 
-
-Returns 
-------- error_MetaModel : obj  A error model. 
-
-
-
-
----
-
-_This file was automatically generated via [lazydocs](https://github.com/ml-tooling/lazydocs)._
diff --git a/docs/build/html/_static/alabaster.css b/docs/build/html/_static/alabaster.css
deleted file mode 100644
index 0eddaeb07d19bffb50884d7bd7996418b6461a09..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/alabaster.css
+++ /dev/null
@@ -1,701 +0,0 @@
-@import url("basic.css");
-
-/* -- page layout ----------------------------------------------------------- */
-
-body {
-    font-family: Georgia, serif;
-    font-size: 17px;
-    background-color: #fff;
-    color: #000;
-    margin: 0;
-    padding: 0;
-}
-
-
-div.document {
-    width: 940px;
-    margin: 30px auto 0 auto;
-}
-
-div.documentwrapper {
-    float: left;
-    width: 100%;
-}
-
-div.bodywrapper {
-    margin: 0 0 0 220px;
-}
-
-div.sphinxsidebar {
-    width: 220px;
-    font-size: 14px;
-    line-height: 1.5;
-}
-
-hr {
-    border: 1px solid #B1B4B6;
-}
-
-div.body {
-    background-color: #fff;
-    color: #3E4349;
-    padding: 0 30px 0 30px;
-}
-
-div.body > .section {
-    text-align: left;
-}
-
-div.footer {
-    width: 940px;
-    margin: 20px auto 30px auto;
-    font-size: 14px;
-    color: #888;
-    text-align: right;
-}
-
-div.footer a {
-    color: #888;
-}
-
-p.caption {
-    font-family: inherit;
-    font-size: inherit;
-}
-
-
-div.relations {
-    display: none;
-}
-
-
-div.sphinxsidebar a {
-    color: #444;
-    text-decoration: none;
-    border-bottom: 1px dotted #999;
-}
-
-div.sphinxsidebar a:hover {
-    border-bottom: 1px solid #999;
-}
-
-div.sphinxsidebarwrapper {
-    padding: 18px 10px;
-}
-
-div.sphinxsidebarwrapper p.logo {
-    padding: 0;
-    margin: -10px 0 0 0px;
-    text-align: center;
-}
-
-div.sphinxsidebarwrapper h1.logo {
-    margin-top: -10px;
-    text-align: center;
-    margin-bottom: 5px;
-    text-align: left;
-}
-
-div.sphinxsidebarwrapper h1.logo-name {
-    margin-top: 0px;
-}
-
-div.sphinxsidebarwrapper p.blurb {
-    margin-top: 0;
-    font-style: normal;
-}
-
-div.sphinxsidebar h3,
-div.sphinxsidebar h4 {
-    font-family: Georgia, serif;
-    color: #444;
-    font-size: 24px;
-    font-weight: normal;
-    margin: 0 0 5px 0;
-    padding: 0;
-}
-
-div.sphinxsidebar h4 {
-    font-size: 20px;
-}
-
-div.sphinxsidebar h3 a {
-    color: #444;
-}
-
-div.sphinxsidebar p.logo a,
-div.sphinxsidebar h3 a,
-div.sphinxsidebar p.logo a:hover,
-div.sphinxsidebar h3 a:hover {
-    border: none;
-}
-
-div.sphinxsidebar p {
-    color: #555;
-    margin: 10px 0;
-}
-
-div.sphinxsidebar ul {
-    margin: 10px 0;
-    padding: 0;
-    color: #000;
-}
-
-div.sphinxsidebar ul li.toctree-l1 > a {
-    font-size: 120%;
-}
-
-div.sphinxsidebar ul li.toctree-l2 > a {
-    font-size: 110%;
-}
-
-div.sphinxsidebar input {
-    border: 1px solid #CCC;
-    font-family: Georgia, serif;
-    font-size: 1em;
-}
-
-div.sphinxsidebar hr {
-    border: none;
-    height: 1px;
-    color: #AAA;
-    background: #AAA;
-
-    text-align: left;
-    margin-left: 0;
-    width: 50%;
-}
-
-div.sphinxsidebar .badge {
-    border-bottom: none;
-}
-
-div.sphinxsidebar .badge:hover {
-    border-bottom: none;
-}
-
-/* To address an issue with donation coming after search */
-div.sphinxsidebar h3.donation {
-    margin-top: 10px;
-}
-
-/* -- body styles ----------------------------------------------------------- */
-
-a {
-    color: #004B6B;
-    text-decoration: underline;
-}
-
-a:hover {
-    color: #6D4100;
-    text-decoration: underline;
-}
-
-div.body h1,
-div.body h2,
-div.body h3,
-div.body h4,
-div.body h5,
-div.body h6 {
-    font-family: Georgia, serif;
-    font-weight: normal;
-    margin: 30px 0px 10px 0px;
-    padding: 0;
-}
-
-div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; }
-div.body h2 { font-size: 180%; }
-div.body h3 { font-size: 150%; }
-div.body h4 { font-size: 130%; }
-div.body h5 { font-size: 100%; }
-div.body h6 { font-size: 100%; }
-
-a.headerlink {
-    color: #DDD;
-    padding: 0 4px;
-    text-decoration: none;
-}
-
-a.headerlink:hover {
-    color: #444;
-    background: #EAEAEA;
-}
-
-div.body p, div.body dd, div.body li {
-    line-height: 1.4em;
-}
-
-div.admonition {
-    margin: 20px 0px;
-    padding: 10px 30px;
-    background-color: #EEE;
-    border: 1px solid #CCC;
-}
-
-div.admonition tt.xref, div.admonition code.xref, div.admonition a tt {
-    background-color: #FBFBFB;
-    border-bottom: 1px solid #fafafa;
-}
-
-div.admonition p.admonition-title {
-    font-family: Georgia, serif;
-    font-weight: normal;
-    font-size: 24px;
-    margin: 0 0 10px 0;
-    padding: 0;
-    line-height: 1;
-}
-
-div.admonition p.last {
-    margin-bottom: 0;
-}
-
-div.highlight {
-    background-color: #fff;
-}
-
-dt:target, .highlight {
-    background: #FAF3E8;
-}
-
-div.warning {
-    background-color: #FCC;
-    border: 1px solid #FAA;
-}
-
-div.danger {
-    background-color: #FCC;
-    border: 1px solid #FAA;
-    -moz-box-shadow: 2px 2px 4px #D52C2C;
-    -webkit-box-shadow: 2px 2px 4px #D52C2C;
-    box-shadow: 2px 2px 4px #D52C2C;
-}
-
-div.error {
-    background-color: #FCC;
-    border: 1px solid #FAA;
-    -moz-box-shadow: 2px 2px 4px #D52C2C;
-    -webkit-box-shadow: 2px 2px 4px #D52C2C;
-    box-shadow: 2px 2px 4px #D52C2C;
-}
-
-div.caution {
-    background-color: #FCC;
-    border: 1px solid #FAA;
-}
-
-div.attention {
-    background-color: #FCC;
-    border: 1px solid #FAA;
-}
-
-div.important {
-    background-color: #EEE;
-    border: 1px solid #CCC;
-}
-
-div.note {
-    background-color: #EEE;
-    border: 1px solid #CCC;
-}
-
-div.tip {
-    background-color: #EEE;
-    border: 1px solid #CCC;
-}
-
-div.hint {
-    background-color: #EEE;
-    border: 1px solid #CCC;
-}
-
-div.seealso {
-    background-color: #EEE;
-    border: 1px solid #CCC;
-}
-
-div.topic {
-    background-color: #EEE;
-}
-
-p.admonition-title {
-    display: inline;
-}
-
-p.admonition-title:after {
-    content: ":";
-}
-
-pre, tt, code {
-    font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
-    font-size: 0.9em;
-}
-
-.hll {
-    background-color: #FFC;
-    margin: 0 -12px;
-    padding: 0 12px;
-    display: block;
-}
-
-img.screenshot {
-}
-
-tt.descname, tt.descclassname, code.descname, code.descclassname {
-    font-size: 0.95em;
-}
-
-tt.descname, code.descname {
-    padding-right: 0.08em;
-}
-
-img.screenshot {
-    -moz-box-shadow: 2px 2px 4px #EEE;
-    -webkit-box-shadow: 2px 2px 4px #EEE;
-    box-shadow: 2px 2px 4px #EEE;
-}
-
-table.docutils {
-    border: 1px solid #888;
-    -moz-box-shadow: 2px 2px 4px #EEE;
-    -webkit-box-shadow: 2px 2px 4px #EEE;
-    box-shadow: 2px 2px 4px #EEE;
-}
-
-table.docutils td, table.docutils th {
-    border: 1px solid #888;
-    padding: 0.25em 0.7em;
-}
-
-table.field-list, table.footnote {
-    border: none;
-    -moz-box-shadow: none;
-    -webkit-box-shadow: none;
-    box-shadow: none;
-}
-
-table.footnote {
-    margin: 15px 0;
-    width: 100%;
-    border: 1px solid #EEE;
-    background: #FDFDFD;
-    font-size: 0.9em;
-}
-
-table.footnote + table.footnote {
-    margin-top: -15px;
-    border-top: none;
-}
-
-table.field-list th {
-    padding: 0 0.8em 0 0;
-}
-
-table.field-list td {
-    padding: 0;
-}
-
-table.field-list p {
-    margin-bottom: 0.8em;
-}
-
-/* Cloned from
- * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68
- */
-.field-name {
-    -moz-hyphens: manual;
-    -ms-hyphens: manual;
-    -webkit-hyphens: manual;
-    hyphens: manual;
-}
-
-table.footnote td.label {
-    width: .1px;
-    padding: 0.3em 0 0.3em 0.5em;
-}
-
-table.footnote td {
-    padding: 0.3em 0.5em;
-}
-
-dl {
-    margin: 0;
-    padding: 0;
-}
-
-dl dd {
-    margin-left: 30px;
-}
-
-blockquote {
-    margin: 0 0 0 30px;
-    padding: 0;
-}
-
-ul, ol {
-    /* Matches the 30px from the narrow-screen "li > ul" selector below */
-    margin: 10px 0 10px 30px;
-    padding: 0;
-}
-
-pre {
-    background: #EEE;
-    padding: 7px 30px;
-    margin: 15px 0px;
-    line-height: 1.3em;
-}
-
-div.viewcode-block:target {
-    background: #ffd;
-}
-
-dl pre, blockquote pre, li pre {
-    margin-left: 0;
-    padding-left: 30px;
-}
-
-tt, code {
-    background-color: #ecf0f3;
-    color: #222;
-    /* padding: 1px 2px; */
-}
-
-tt.xref, code.xref, a tt {
-    background-color: #FBFBFB;
-    border-bottom: 1px solid #fff;
-}
-
-a.reference {
-    text-decoration: none;
-    border-bottom: 1px dotted #004B6B;
-}
-
-/* Don't put an underline on images */
-a.image-reference, a.image-reference:hover {
-    border-bottom: none;
-}
-
-a.reference:hover {
-    border-bottom: 1px solid #6D4100;
-}
-
-a.footnote-reference {
-    text-decoration: none;
-    font-size: 0.7em;
-    vertical-align: top;
-    border-bottom: 1px dotted #004B6B;
-}
-
-a.footnote-reference:hover {
-    border-bottom: 1px solid #6D4100;
-}
-
-a:hover tt, a:hover code {
-    background: #EEE;
-}
-
-
-@media screen and (max-width: 870px) {
-
-    div.sphinxsidebar {
-    	display: none;
-    }
-
-    div.document {
-       width: 100%;
-
-    }
-
-    div.documentwrapper {
-    	margin-left: 0;
-    	margin-top: 0;
-    	margin-right: 0;
-    	margin-bottom: 0;
-    }
-
-    div.bodywrapper {
-    	margin-top: 0;
-    	margin-right: 0;
-    	margin-bottom: 0;
-    	margin-left: 0;
-    }
-
-    ul {
-    	margin-left: 0;
-    }
-
-	li > ul {
-        /* Matches the 30px from the "ul, ol" selector above */
-		margin-left: 30px;
-	}
-
-    .document {
-    	width: auto;
-    }
-
-    .footer {
-    	width: auto;
-    }
-
-    .bodywrapper {
-    	margin: 0;
-    }
-
-    .footer {
-    	width: auto;
-    }
-
-    .github {
-        display: none;
-    }
-
-
-
-}
-
-
-
-@media screen and (max-width: 875px) {
-
-    body {
-        margin: 0;
-        padding: 20px 30px;
-    }
-
-    div.documentwrapper {
-        float: none;
-        background: #fff;
-    }
-
-    div.sphinxsidebar {
-        display: block;
-        float: none;
-        width: 102.5%;
-        margin: 50px -30px -20px -30px;
-        padding: 10px 20px;
-        background: #333;
-        color: #FFF;
-    }
-
-    div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p,
-    div.sphinxsidebar h3 a {
-        color: #fff;
-    }
-
-    div.sphinxsidebar a {
-        color: #AAA;
-    }
-
-    div.sphinxsidebar p.logo {
-        display: none;
-    }
-
-    div.document {
-        width: 100%;
-        margin: 0;
-    }
-
-    div.footer {
-        display: none;
-    }
-
-    div.bodywrapper {
-        margin: 0;
-    }
-
-    div.body {
-        min-height: 0;
-        padding: 0;
-    }
-
-    .rtd_doc_footer {
-        display: none;
-    }
-
-    .document {
-        width: auto;
-    }
-
-    .footer {
-        width: auto;
-    }
-
-    .footer {
-        width: auto;
-    }
-
-    .github {
-        display: none;
-    }
-}
-
-
-/* misc. */
-
-.revsys-inline {
-    display: none!important;
-}
-
-/* Make nested-list/multi-paragraph items look better in Releases changelog
- * pages. Without this, docutils' magical list fuckery causes inconsistent
- * formatting between different release sub-lists.
- */
-div#changelog > div.section > ul > li > p:only-child {
-    margin-bottom: 0;
-}
-
-/* Hide fugly table cell borders in ..bibliography:: directive output */
-table.docutils.citation, table.docutils.citation td, table.docutils.citation th {
-  border: none;
-  /* Below needed in some edge cases; if not applied, bottom shadows appear */
-  -moz-box-shadow: none;
-  -webkit-box-shadow: none;
-  box-shadow: none;
-}
-
-
-/* relbar */
-
-.related {
-    line-height: 30px;
-    width: 100%;
-    font-size: 0.9rem;
-}
-
-.related.top {
-    border-bottom: 1px solid #EEE;
-    margin-bottom: 20px;
-}
-
-.related.bottom {
-    border-top: 1px solid #EEE;
-}
-
-.related ul {
-    padding: 0;
-    margin: 0;
-    list-style: none;
-}
-
-.related li {
-    display: inline;
-}
-
-nav#rellinks {
-    float: right;
-}
-
-nav#rellinks li+li:before {
-    content: "|";
-}
-
-nav#breadcrumbs li+li:before {
-    content: "\00BB";
-}
-
-/* Hide certain items when printing */
-@media print {
-    div.related {
-        display: none;
-    }
-}
\ No newline at end of file
diff --git a/docs/build/html/_static/basic.css b/docs/build/html/_static/basic.css
deleted file mode 100644
index bf18350b65c61f31b2f9f717c03e02f17c0ab4f1..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/basic.css
+++ /dev/null
@@ -1,906 +0,0 @@
-/*
- * basic.css
- * ~~~~~~~~~
- *
- * Sphinx stylesheet -- basic theme.
- *
- * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
- * :license: BSD, see LICENSE for details.
- *
- */
-
-/* -- main layout ----------------------------------------------------------- */
-
-div.clearer {
-    clear: both;
-}
-
-div.section::after {
-    display: block;
-    content: '';
-    clear: left;
-}
-
-/* -- relbar ---------------------------------------------------------------- */
-
-div.related {
-    width: 100%;
-    font-size: 90%;
-}
-
-div.related h3 {
-    display: none;
-}
-
-div.related ul {
-    margin: 0;
-    padding: 0 0 0 10px;
-    list-style: none;
-}
-
-div.related li {
-    display: inline;
-}
-
-div.related li.right {
-    float: right;
-    margin-right: 5px;
-}
-
-/* -- sidebar --------------------------------------------------------------- */
-
-div.sphinxsidebarwrapper {
-    padding: 10px 5px 0 10px;
-}
-
-div.sphinxsidebar {
-    float: left;
-    width: 230px;
-    margin-left: -100%;
-    font-size: 90%;
-    word-wrap: break-word;
-    overflow-wrap : break-word;
-}
-
-div.sphinxsidebar ul {
-    list-style: none;
-}
-
-div.sphinxsidebar ul ul,
-div.sphinxsidebar ul.want-points {
-    margin-left: 20px;
-    list-style: square;
-}
-
-div.sphinxsidebar ul ul {
-    margin-top: 0;
-    margin-bottom: 0;
-}
-
-div.sphinxsidebar form {
-    margin-top: 10px;
-}
-
-div.sphinxsidebar input {
-    border: 1px solid #98dbcc;
-    font-family: sans-serif;
-    font-size: 1em;
-}
-
-div.sphinxsidebar #searchbox form.search {
-    overflow: hidden;
-}
-
-div.sphinxsidebar #searchbox input[type="text"] {
-    float: left;
-    width: 80%;
-    padding: 0.25em;
-    box-sizing: border-box;
-}
-
-div.sphinxsidebar #searchbox input[type="submit"] {
-    float: left;
-    width: 20%;
-    border-left: none;
-    padding: 0.25em;
-    box-sizing: border-box;
-}
-
-
-img {
-    border: 0;
-    max-width: 100%;
-}
-
-/* -- search page ----------------------------------------------------------- */
-
-ul.search {
-    margin: 10px 0 0 20px;
-    padding: 0;
-}
-
-ul.search li {
-    padding: 5px 0 5px 20px;
-    background-image: url(file.png);
-    background-repeat: no-repeat;
-    background-position: 0 7px;
-}
-
-ul.search li a {
-    font-weight: bold;
-}
-
-ul.search li p.context {
-    color: #888;
-    margin: 2px 0 0 30px;
-    text-align: left;
-}
-
-ul.keywordmatches li.goodmatch a {
-    font-weight: bold;
-}
-
-/* -- index page ------------------------------------------------------------ */
-
-table.contentstable {
-    width: 90%;
-    margin-left: auto;
-    margin-right: auto;
-}
-
-table.contentstable p.biglink {
-    line-height: 150%;
-}
-
-a.biglink {
-    font-size: 1.3em;
-}
-
-span.linkdescr {
-    font-style: italic;
-    padding-top: 5px;
-    font-size: 90%;
-}
-
-/* -- general index --------------------------------------------------------- */
-
-table.indextable {
-    width: 100%;
-}
-
-table.indextable td {
-    text-align: left;
-    vertical-align: top;
-}
-
-table.indextable ul {
-    margin-top: 0;
-    margin-bottom: 0;
-    list-style-type: none;
-}
-
-table.indextable > tbody > tr > td > ul {
-    padding-left: 0em;
-}
-
-table.indextable tr.pcap {
-    height: 10px;
-}
-
-table.indextable tr.cap {
-    margin-top: 10px;
-    background-color: #f2f2f2;
-}
-
-img.toggler {
-    margin-right: 3px;
-    margin-top: 3px;
-    cursor: pointer;
-}
-
-div.modindex-jumpbox {
-    border-top: 1px solid #ddd;
-    border-bottom: 1px solid #ddd;
-    margin: 1em 0 1em 0;
-    padding: 0.4em;
-}
-
-div.genindex-jumpbox {
-    border-top: 1px solid #ddd;
-    border-bottom: 1px solid #ddd;
-    margin: 1em 0 1em 0;
-    padding: 0.4em;
-}
-
-/* -- domain module index --------------------------------------------------- */
-
-table.modindextable td {
-    padding: 2px;
-    border-collapse: collapse;
-}
-
-/* -- general body styles --------------------------------------------------- */
-
-div.body {
-    min-width: 450px;
-    max-width: 800px;
-}
-
-div.body p, div.body dd, div.body li, div.body blockquote {
-    -moz-hyphens: auto;
-    -ms-hyphens: auto;
-    -webkit-hyphens: auto;
-    hyphens: auto;
-}
-
-a.headerlink {
-    visibility: hidden;
-}
-
-a.brackets:before,
-span.brackets > a:before{
-    content: "[";
-}
-
-a.brackets:after,
-span.brackets > a:after {
-    content: "]";
-}
-
-h1:hover > a.headerlink,
-h2:hover > a.headerlink,
-h3:hover > a.headerlink,
-h4:hover > a.headerlink,
-h5:hover > a.headerlink,
-h6:hover > a.headerlink,
-dt:hover > a.headerlink,
-caption:hover > a.headerlink,
-p.caption:hover > a.headerlink,
-div.code-block-caption:hover > a.headerlink {
-    visibility: visible;
-}
-
-div.body p.caption {
-    text-align: inherit;
-}
-
-div.body td {
-    text-align: left;
-}
-
-.first {
-    margin-top: 0 !important;
-}
-
-p.rubric {
-    margin-top: 30px;
-    font-weight: bold;
-}
-
-img.align-left, figure.align-left, .figure.align-left, object.align-left {
-    clear: left;
-    float: left;
-    margin-right: 1em;
-}
-
-img.align-right, figure.align-right, .figure.align-right, object.align-right {
-    clear: right;
-    float: right;
-    margin-left: 1em;
-}
-
-img.align-center, figure.align-center, .figure.align-center, object.align-center {
-  display: block;
-  margin-left: auto;
-  margin-right: auto;
-}
-
-img.align-default, figure.align-default, .figure.align-default {
-  display: block;
-  margin-left: auto;
-  margin-right: auto;
-}
-
-.align-left {
-    text-align: left;
-}
-
-.align-center {
-    text-align: center;
-}
-
-.align-default {
-    text-align: center;
-}
-
-.align-right {
-    text-align: right;
-}
-
-/* -- sidebars -------------------------------------------------------------- */
-
-div.sidebar,
-aside.sidebar {
-    margin: 0 0 0.5em 1em;
-    border: 1px solid #ddb;
-    padding: 7px;
-    background-color: #ffe;
-    width: 40%;
-    float: right;
-    clear: right;
-    overflow-x: auto;
-}
-
-p.sidebar-title {
-    font-weight: bold;
-}
-
-div.admonition, div.topic, blockquote {
-    clear: left;
-}
-
-/* -- topics ---------------------------------------------------------------- */
-
-div.topic {
-    border: 1px solid #ccc;
-    padding: 7px;
-    margin: 10px 0 10px 0;
-}
-
-p.topic-title {
-    font-size: 1.1em;
-    font-weight: bold;
-    margin-top: 10px;
-}
-
-/* -- admonitions ----------------------------------------------------------- */
-
-div.admonition {
-    margin-top: 10px;
-    margin-bottom: 10px;
-    padding: 7px;
-}
-
-div.admonition dt {
-    font-weight: bold;
-}
-
-p.admonition-title {
-    margin: 0px 10px 5px 0px;
-    font-weight: bold;
-}
-
-div.body p.centered {
-    text-align: center;
-    margin-top: 25px;
-}
-
-/* -- content of sidebars/topics/admonitions -------------------------------- */
-
-div.sidebar > :last-child,
-aside.sidebar > :last-child,
-div.topic > :last-child,
-div.admonition > :last-child {
-    margin-bottom: 0;
-}
-
-div.sidebar::after,
-aside.sidebar::after,
-div.topic::after,
-div.admonition::after,
-blockquote::after {
-    display: block;
-    content: '';
-    clear: both;
-}
-
-/* -- tables ---------------------------------------------------------------- */
-
-table.docutils {
-    margin-top: 10px;
-    margin-bottom: 10px;
-    border: 0;
-    border-collapse: collapse;
-}
-
-table.align-center {
-    margin-left: auto;
-    margin-right: auto;
-}
-
-table.align-default {
-    margin-left: auto;
-    margin-right: auto;
-}
-
-table caption span.caption-number {
-    font-style: italic;
-}
-
-table caption span.caption-text {
-}
-
-table.docutils td, table.docutils th {
-    padding: 1px 8px 1px 5px;
-    border-top: 0;
-    border-left: 0;
-    border-right: 0;
-    border-bottom: 1px solid #aaa;
-}
-
-table.footnote td, table.footnote th {
-    border: 0 !important;
-}
-
-th {
-    text-align: left;
-    padding-right: 5px;
-}
-
-table.citation {
-    border-left: solid 1px gray;
-    margin-left: 1px;
-}
-
-table.citation td {
-    border-bottom: none;
-}
-
-th > :first-child,
-td > :first-child {
-    margin-top: 0px;
-}
-
-th > :last-child,
-td > :last-child {
-    margin-bottom: 0px;
-}
-
-/* -- figures --------------------------------------------------------------- */
-
-div.figure, figure {
-    margin: 0.5em;
-    padding: 0.5em;
-}
-
-div.figure p.caption, figcaption {
-    padding: 0.3em;
-}
-
-div.figure p.caption span.caption-number,
-figcaption span.caption-number {
-    font-style: italic;
-}
-
-div.figure p.caption span.caption-text,
-figcaption span.caption-text {
-}
-
-/* -- field list styles ----------------------------------------------------- */
-
-table.field-list td, table.field-list th {
-    border: 0 !important;
-}
-
-.field-list ul {
-    margin: 0;
-    padding-left: 1em;
-}
-
-.field-list p {
-    margin: 0;
-}
-
-.field-name {
-    -moz-hyphens: manual;
-    -ms-hyphens: manual;
-    -webkit-hyphens: manual;
-    hyphens: manual;
-}
-
-/* -- hlist styles ---------------------------------------------------------- */
-
-table.hlist {
-    margin: 1em 0;
-}
-
-table.hlist td {
-    vertical-align: top;
-}
-
-/* -- object description styles --------------------------------------------- */
-
-.sig {
-	font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
-}
-
-.sig-name, code.descname {
-    background-color: transparent;
-    font-weight: bold;
-}
-
-.sig-name {
-	font-size: 1.1em;
-}
-
-code.descname {
-    font-size: 1.2em;
-}
-
-.sig-prename, code.descclassname {
-    background-color: transparent;
-}
-
-.optional {
-    font-size: 1.3em;
-}
-
-.sig-paren {
-    font-size: larger;
-}
-
-.sig-param.n {
-	font-style: italic;
-}
-
-/* C++ specific styling */
-
-.sig-inline.c-texpr,
-.sig-inline.cpp-texpr {
-	font-family: unset;
-}
-
-.sig.c   .k, .sig.c   .kt,
-.sig.cpp .k, .sig.cpp .kt {
-	color: #0033B3;
-}
-
-.sig.c   .m,
-.sig.cpp .m {
-	color: #1750EB;
-}
-
-.sig.c   .s, .sig.c   .sc,
-.sig.cpp .s, .sig.cpp .sc {
-	color: #067D17;
-}
-
-
-/* -- other body styles ----------------------------------------------------- */
-
-ol.arabic {
-    list-style: decimal;
-}
-
-ol.loweralpha {
-    list-style: lower-alpha;
-}
-
-ol.upperalpha {
-    list-style: upper-alpha;
-}
-
-ol.lowerroman {
-    list-style: lower-roman;
-}
-
-ol.upperroman {
-    list-style: upper-roman;
-}
-
-:not(li) > ol > li:first-child > :first-child,
-:not(li) > ul > li:first-child > :first-child {
-    margin-top: 0px;
-}
-
-:not(li) > ol > li:last-child > :last-child,
-:not(li) > ul > li:last-child > :last-child {
-    margin-bottom: 0px;
-}
-
-ol.simple ol p,
-ol.simple ul p,
-ul.simple ol p,
-ul.simple ul p {
-    margin-top: 0;
-}
-
-ol.simple > li:not(:first-child) > p,
-ul.simple > li:not(:first-child) > p {
-    margin-top: 0;
-}
-
-ol.simple p,
-ul.simple p {
-    margin-bottom: 0;
-}
-
-dl.footnote > dt,
-dl.citation > dt {
-    float: left;
-    margin-right: 0.5em;
-}
-
-dl.footnote > dd,
-dl.citation > dd {
-    margin-bottom: 0em;
-}
-
-dl.footnote > dd:after,
-dl.citation > dd:after {
-    content: "";
-    clear: both;
-}
-
-dl.field-list {
-    display: grid;
-    grid-template-columns: fit-content(30%) auto;
-}
-
-dl.field-list > dt {
-    font-weight: bold;
-    word-break: break-word;
-    padding-left: 0.5em;
-    padding-right: 5px;
-}
-
-dl.field-list > dt:after {
-    content: ":";
-}
-
-dl.field-list > dd {
-    padding-left: 0.5em;
-    margin-top: 0em;
-    margin-left: 0em;
-    margin-bottom: 0em;
-}
-
-dl {
-    margin-bottom: 15px;
-}
-
-dd > :first-child {
-    margin-top: 0px;
-}
-
-dd ul, dd table {
-    margin-bottom: 10px;
-}
-
-dd {
-    margin-top: 3px;
-    margin-bottom: 10px;
-    margin-left: 30px;
-}
-
-dl > dd:last-child,
-dl > dd:last-child > :last-child {
-    margin-bottom: 0;
-}
-
-dt:target, span.highlighted {
-    background-color: #fbe54e;
-}
-
-rect.highlighted {
-    fill: #fbe54e;
-}
-
-dl.glossary dt {
-    font-weight: bold;
-    font-size: 1.1em;
-}
-
-.versionmodified {
-    font-style: italic;
-}
-
-.system-message {
-    background-color: #fda;
-    padding: 5px;
-    border: 3px solid red;
-}
-
-.footnote:target  {
-    background-color: #ffa;
-}
-
-.line-block {
-    display: block;
-    margin-top: 1em;
-    margin-bottom: 1em;
-}
-
-.line-block .line-block {
-    margin-top: 0;
-    margin-bottom: 0;
-    margin-left: 1.5em;
-}
-
-.guilabel, .menuselection {
-    font-family: sans-serif;
-}
-
-.accelerator {
-    text-decoration: underline;
-}
-
-.classifier {
-    font-style: oblique;
-}
-
-.classifier:before {
-    font-style: normal;
-    margin: 0 0.5em;
-    content: ":";
-    display: inline-block;
-}
-
-abbr, acronym {
-    border-bottom: dotted 1px;
-    cursor: help;
-}
-
-/* -- code displays --------------------------------------------------------- */
-
-pre {
-    overflow: auto;
-    overflow-y: hidden;  /* fixes display issues on Chrome browsers */
-}
-
-pre, div[class*="highlight-"] {
-    clear: both;
-}
-
-span.pre {
-    -moz-hyphens: none;
-    -ms-hyphens: none;
-    -webkit-hyphens: none;
-    hyphens: none;
-    white-space: nowrap;
-}
-
-div[class*="highlight-"] {
-    margin: 1em 0;
-}
-
-td.linenos pre {
-    border: 0;
-    background-color: transparent;
-    color: #aaa;
-}
-
-table.highlighttable {
-    display: block;
-}
-
-table.highlighttable tbody {
-    display: block;
-}
-
-table.highlighttable tr {
-    display: flex;
-}
-
-table.highlighttable td {
-    margin: 0;
-    padding: 0;
-}
-
-table.highlighttable td.linenos {
-    padding-right: 0.5em;
-}
-
-table.highlighttable td.code {
-    flex: 1;
-    overflow: hidden;
-}
-
-.highlight .hll {
-    display: block;
-}
-
-div.highlight pre,
-table.highlighttable pre {
-    margin: 0;
-}
-
-div.code-block-caption + div {
-    margin-top: 0;
-}
-
-div.code-block-caption {
-    margin-top: 1em;
-    padding: 2px 5px;
-    font-size: small;
-}
-
-div.code-block-caption code {
-    background-color: transparent;
-}
-
-table.highlighttable td.linenos,
-span.linenos,
-div.highlight span.gp {  /* gp: Generic.Prompt */
-  user-select: none;
-  -webkit-user-select: text; /* Safari fallback only */
-  -webkit-user-select: none; /* Chrome/Safari */
-  -moz-user-select: none; /* Firefox */
-  -ms-user-select: none; /* IE10+ */
-}
-
-div.code-block-caption span.caption-number {
-    padding: 0.1em 0.3em;
-    font-style: italic;
-}
-
-div.code-block-caption span.caption-text {
-}
-
-div.literal-block-wrapper {
-    margin: 1em 0;
-}
-
-code.xref, a code {
-    background-color: transparent;
-    font-weight: bold;
-}
-
-h1 code, h2 code, h3 code, h4 code, h5 code, h6 code {
-    background-color: transparent;
-}
-
-.viewcode-link {
-    float: right;
-}
-
-.viewcode-back {
-    float: right;
-    font-family: sans-serif;
-}
-
-div.viewcode-block:target {
-    margin: -1px -10px;
-    padding: 0 10px;
-}
-
-/* -- math display ---------------------------------------------------------- */
-
-img.math {
-    vertical-align: middle;
-}
-
-div.body div.math p {
-    text-align: center;
-}
-
-span.eqno {
-    float: right;
-}
-
-span.eqno a.headerlink {
-    position: absolute;
-    z-index: 1;
-}
-
-div.math:hover a.headerlink {
-    visibility: visible;
-}
-
-/* -- printout stylesheet --------------------------------------------------- */
-
-@media print {
-    div.document,
-    div.documentwrapper,
-    div.bodywrapper {
-        margin: 0 !important;
-        width: 100%;
-    }
-
-    div.sphinxsidebar,
-    div.related,
-    div.footer,
-    #top-link {
-        display: none;
-    }
-}
\ No newline at end of file
diff --git a/docs/build/html/_static/custom.css b/docs/build/html/_static/custom.css
deleted file mode 100644
index 2a924f1d6a8bc930c5296bdb2d5c2d3e39b04a1c..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/custom.css
+++ /dev/null
@@ -1 +0,0 @@
-/* This file intentionally left blank. */
diff --git a/docs/build/html/_static/doctools.js b/docs/build/html/_static/doctools.js
deleted file mode 100644
index e509e48349c55c7bbf1015cdf9721915cd357370..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/doctools.js
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * doctools.js
- * ~~~~~~~~~~~
- *
- * Sphinx JavaScript utilities for all documentation.
- *
- * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
- * :license: BSD, see LICENSE for details.
- *
- */
-
-/**
- * select a different prefix for underscore
- */
-$u = _.noConflict();
-
-/**
- * make the code below compatible with browsers without
- * an installed firebug like debugger
-if (!window.console || !console.firebug) {
-  var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
-    "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
-    "profile", "profileEnd"];
-  window.console = {};
-  for (var i = 0; i < names.length; ++i)
-    window.console[names[i]] = function() {};
-}
- */
-
-/**
- * small helper function to urldecode strings
- *
- * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL
- */
-jQuery.urldecode = function(x) {
-  if (!x) {
-    return x
-  }
-  return decodeURIComponent(x.replace(/\+/g, ' '));
-};
-
-/**
- * small helper function to urlencode strings
- */
-jQuery.urlencode = encodeURIComponent;
-
-/**
- * This function returns the parsed url parameters of the
- * current request. Multiple values per key are supported,
- * it will always return arrays of strings for the value parts.
- */
-jQuery.getQueryParameters = function(s) {
-  if (typeof s === 'undefined')
-    s = document.location.search;
-  var parts = s.substr(s.indexOf('?') + 1).split('&');
-  var result = {};
-  for (var i = 0; i < parts.length; i++) {
-    var tmp = parts[i].split('=', 2);
-    var key = jQuery.urldecode(tmp[0]);
-    var value = jQuery.urldecode(tmp[1]);
-    if (key in result)
-      result[key].push(value);
-    else
-      result[key] = [value];
-  }
-  return result;
-};
-
-/**
- * highlight a given string on a jquery object by wrapping it in
- * span elements with the given class name.
- */
-jQuery.fn.highlightText = function(text, className) {
-  function highlight(node, addItems) {
-    if (node.nodeType === 3) {
-      var val = node.nodeValue;
-      var pos = val.toLowerCase().indexOf(text);
-      if (pos >= 0 &&
-          !jQuery(node.parentNode).hasClass(className) &&
-          !jQuery(node.parentNode).hasClass("nohighlight")) {
-        var span;
-        var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
-        if (isInSVG) {
-          span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
-        } else {
-          span = document.createElement("span");
-          span.className = className;
-        }
-        span.appendChild(document.createTextNode(val.substr(pos, text.length)));
-        node.parentNode.insertBefore(span, node.parentNode.insertBefore(
-          document.createTextNode(val.substr(pos + text.length)),
-          node.nextSibling));
-        node.nodeValue = val.substr(0, pos);
-        if (isInSVG) {
-          var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
-          var bbox = node.parentElement.getBBox();
-          rect.x.baseVal.value = bbox.x;
-          rect.y.baseVal.value = bbox.y;
-          rect.width.baseVal.value = bbox.width;
-          rect.height.baseVal.value = bbox.height;
-          rect.setAttribute('class', className);
-          addItems.push({
-              "parent": node.parentNode,
-              "target": rect});
-        }
-      }
-    }
-    else if (!jQuery(node).is("button, select, textarea")) {
-      jQuery.each(node.childNodes, function() {
-        highlight(this, addItems);
-      });
-    }
-  }
-  var addItems = [];
-  var result = this.each(function() {
-    highlight(this, addItems);
-  });
-  for (var i = 0; i < addItems.length; ++i) {
-    jQuery(addItems[i].parent).before(addItems[i].target);
-  }
-  return result;
-};
-
-/*
- * backward compatibility for jQuery.browser
- * This will be supported until firefox bug is fixed.
- */
-if (!jQuery.browser) {
-  jQuery.uaMatch = function(ua) {
-    ua = ua.toLowerCase();
-
-    var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
-      /(webkit)[ \/]([\w.]+)/.exec(ua) ||
-      /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
-      /(msie) ([\w.]+)/.exec(ua) ||
-      ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
-      [];
-
-    return {
-      browser: match[ 1 ] || "",
-      version: match[ 2 ] || "0"
-    };
-  };
-  jQuery.browser = {};
-  jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
-}
-
-/**
- * Small JavaScript module for the documentation.
- */
-var Documentation = {
-
-  init : function() {
-    this.fixFirefoxAnchorBug();
-    this.highlightSearchWords();
-    this.initIndexTable();
-    if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) {
-      this.initOnKeyListeners();
-    }
-  },
-
-  /**
-   * i18n support
-   */
-  TRANSLATIONS : {},
-  PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
-  LOCALE : 'unknown',
-
-  // gettext and ngettext don't access this so that the functions
-  // can safely bound to a different name (_ = Documentation.gettext)
-  gettext : function(string) {
-    var translated = Documentation.TRANSLATIONS[string];
-    if (typeof translated === 'undefined')
-      return string;
-    return (typeof translated === 'string') ? translated : translated[0];
-  },
-
-  ngettext : function(singular, plural, n) {
-    var translated = Documentation.TRANSLATIONS[singular];
-    if (typeof translated === 'undefined')
-      return (n == 1) ? singular : plural;
-    return translated[Documentation.PLURALEXPR(n)];
-  },
-
-  addTranslations : function(catalog) {
-    for (var key in catalog.messages)
-      this.TRANSLATIONS[key] = catalog.messages[key];
-    this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
-    this.LOCALE = catalog.locale;
-  },
-
-  /**
-   * add context elements like header anchor links
-   */
-  addContextElements : function() {
-    $('div[id] > :header:first').each(function() {
-      $('<a class="headerlink">\u00B6</a>').
-      attr('href', '#' + this.id).
-      attr('title', _('Permalink to this headline')).
-      appendTo(this);
-    });
-    $('dt[id]').each(function() {
-      $('<a class="headerlink">\u00B6</a>').
-      attr('href', '#' + this.id).
-      attr('title', _('Permalink to this definition')).
-      appendTo(this);
-    });
-  },
-
-  /**
-   * workaround a firefox stupidity
-   * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
-   */
-  fixFirefoxAnchorBug : function() {
-    if (document.location.hash && $.browser.mozilla)
-      window.setTimeout(function() {
-        document.location.href += '';
-      }, 10);
-  },
-
-  /**
-   * highlight the search words provided in the url in the text
-   */
-  highlightSearchWords : function() {
-    var params = $.getQueryParameters();
-    var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
-    if (terms.length) {
-      var body = $('div.body');
-      if (!body.length) {
-        body = $('body');
-      }
-      window.setTimeout(function() {
-        $.each(terms, function() {
-          body.highlightText(this.toLowerCase(), 'highlighted');
-        });
-      }, 10);
-      $('<p class="highlight-link"><a href="javascript:Documentation.' +
-        'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>')
-          .appendTo($('#searchbox'));
-    }
-  },
-
-  /**
-   * init the domain index toggle buttons
-   */
-  initIndexTable : function() {
-    var togglers = $('img.toggler').click(function() {
-      var src = $(this).attr('src');
-      var idnum = $(this).attr('id').substr(7);
-      $('tr.cg-' + idnum).toggle();
-      if (src.substr(-9) === 'minus.png')
-        $(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
-      else
-        $(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
-    }).css('display', '');
-    if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
-        togglers.click();
-    }
-  },
-
-  /**
-   * helper function to hide the search marks again
-   */
-  hideSearchWords : function() {
-    $('#searchbox .highlight-link').fadeOut(300);
-    $('span.highlighted').removeClass('highlighted');
-    var url = new URL(window.location);
-    url.searchParams.delete('highlight');
-    window.history.replaceState({}, '', url);
-  },
-
-  /**
-   * make the url absolute
-   */
-  makeURL : function(relativeURL) {
-    return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
-  },
-
-  /**
-   * get the current relative url
-   */
-  getCurrentURL : function() {
-    var path = document.location.pathname;
-    var parts = path.split(/\//);
-    $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
-      if (this === '..')
-        parts.pop();
-    });
-    var url = parts.join('/');
-    return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
-  },
-
-  initOnKeyListeners: function() {
-    $(document).keydown(function(event) {
-      var activeElementType = document.activeElement.tagName;
-      // don't navigate when in search box, textarea, dropdown or button
-      if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT'
-          && activeElementType !== 'BUTTON' && !event.altKey && !event.ctrlKey && !event.metaKey
-          && !event.shiftKey) {
-        switch (event.keyCode) {
-          case 37: // left
-            var prevHref = $('link[rel="prev"]').prop('href');
-            if (prevHref) {
-              window.location.href = prevHref;
-              return false;
-            }
-            break;
-          case 39: // right
-            var nextHref = $('link[rel="next"]').prop('href');
-            if (nextHref) {
-              window.location.href = nextHref;
-              return false;
-            }
-            break;
-        }
-      }
-    });
-  }
-};
-
-// quick alias for translations
-_ = Documentation.gettext;
-
-$(document).ready(function() {
-  Documentation.init();
-});
diff --git a/docs/build/html/_static/documentation_options.js b/docs/build/html/_static/documentation_options.js
deleted file mode 100644
index cfb312604e5a3b88125f54533601a95b10d3d6cc..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/documentation_options.js
+++ /dev/null
@@ -1,12 +0,0 @@
-var DOCUMENTATION_OPTIONS = {
-    URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
-    VERSION: '0.0.3',
-    LANGUAGE: 'python',
-    COLLAPSE_INDEX: false,
-    BUILDER: 'html',
-    FILE_SUFFIX: '.html',
-    LINK_SUFFIX: '.html',
-    HAS_SOURCE: true,
-    SOURCELINK_SUFFIX: '.txt',
-    NAVIGATION_WITH_KEYS: false
-};
\ No newline at end of file
diff --git a/docs/build/html/_static/jquery-3.5.1.js b/docs/build/html/_static/jquery-3.5.1.js
deleted file mode 100644
index 50937333b99a5e168ac9e8292b22edd7e96c3e6a..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/jquery-3.5.1.js
+++ /dev/null
@@ -1,10872 +0,0 @@
-/*!
- * jQuery JavaScript Library v3.5.1
- * https://jquery.com/
- *
- * Includes Sizzle.js
- * https://sizzlejs.com/
- *
- * Copyright JS Foundation and other contributors
- * Released under the MIT license
- * https://jquery.org/license
- *
- * Date: 2020-05-04T22:49Z
- */
-( function( global, factory ) {
-
-	"use strict";
-
-	if ( typeof module === "object" && typeof module.exports === "object" ) {
-
-		// For CommonJS and CommonJS-like environments where a proper `window`
-		// is present, execute the factory and get jQuery.
-		// For environments that do not have a `window` with a `document`
-		// (such as Node.js), expose a factory as module.exports.
-		// This accentuates the need for the creation of a real `window`.
-		// e.g. var jQuery = require("jquery")(window);
-		// See ticket #14549 for more info.
-		module.exports = global.document ?
-			factory( global, true ) :
-			function( w ) {
-				if ( !w.document ) {
-					throw new Error( "jQuery requires a window with a document" );
-				}
-				return factory( w );
-			};
-	} else {
-		factory( global );
-	}
-
-// Pass this if window is not defined yet
-} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) {
-
-// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1
-// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode
-// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common
-// enough that all such attempts are guarded in a try block.
-"use strict";
-
-var arr = [];
-
-var getProto = Object.getPrototypeOf;
-
-var slice = arr.slice;
-
-var flat = arr.flat ? function( array ) {
-	return arr.flat.call( array );
-} : function( array ) {
-	return arr.concat.apply( [], array );
-};
-
-
-var push = arr.push;
-
-var indexOf = arr.indexOf;
-
-var class2type = {};
-
-var toString = class2type.toString;
-
-var hasOwn = class2type.hasOwnProperty;
-
-var fnToString = hasOwn.toString;
-
-var ObjectFunctionString = fnToString.call( Object );
-
-var support = {};
-
-var isFunction = function isFunction( obj ) {
-
-      // Support: Chrome <=57, Firefox <=52
-      // In some browsers, typeof returns "function" for HTML <object> elements
-      // (i.e., `typeof document.createElement( "object" ) === "function"`).
-      // We don't want to classify *any* DOM node as a function.
-      return typeof obj === "function" && typeof obj.nodeType !== "number";
-  };
-
-
-var isWindow = function isWindow( obj ) {
-		return obj != null && obj === obj.window;
-	};
-
-
-var document = window.document;
-
-
-
-	var preservedScriptAttributes = {
-		type: true,
-		src: true,
-		nonce: true,
-		noModule: true
-	};
-
-	function DOMEval( code, node, doc ) {
-		doc = doc || document;
-
-		var i, val,
-			script = doc.createElement( "script" );
-
-		script.text = code;
-		if ( node ) {
-			for ( i in preservedScriptAttributes ) {
-
-				// Support: Firefox 64+, Edge 18+
-				// Some browsers don't support the "nonce" property on scripts.
-				// On the other hand, just using `getAttribute` is not enough as
-				// the `nonce` attribute is reset to an empty string whenever it
-				// becomes browsing-context connected.
-				// See https://github.com/whatwg/html/issues/2369
-				// See https://html.spec.whatwg.org/#nonce-attributes
-				// The `node.getAttribute` check was added for the sake of
-				// `jQuery.globalEval` so that it can fake a nonce-containing node
-				// via an object.
-				val = node[ i ] || node.getAttribute && node.getAttribute( i );
-				if ( val ) {
-					script.setAttribute( i, val );
-				}
-			}
-		}
-		doc.head.appendChild( script ).parentNode.removeChild( script );
-	}
-
-
-function toType( obj ) {
-	if ( obj == null ) {
-		return obj + "";
-	}
-
-	// Support: Android <=2.3 only (functionish RegExp)
-	return typeof obj === "object" || typeof obj === "function" ?
-		class2type[ toString.call( obj ) ] || "object" :
-		typeof obj;
-}
-/* global Symbol */
-// Defining this global in .eslintrc.json would create a danger of using the global
-// unguarded in another place, it seems safer to define global only for this module
-
-
-
-var
-	version = "3.5.1",
-
-	// Define a local copy of jQuery
-	jQuery = function( selector, context ) {
-
-		// The jQuery object is actually just the init constructor 'enhanced'
-		// Need init if jQuery is called (just allow error to be thrown if not included)
-		return new jQuery.fn.init( selector, context );
-	};
-
-jQuery.fn = jQuery.prototype = {
-
-	// The current version of jQuery being used
-	jquery: version,
-
-	constructor: jQuery,
-
-	// The default length of a jQuery object is 0
-	length: 0,
-
-	toArray: function() {
-		return slice.call( this );
-	},
-
-	// Get the Nth element in the matched element set OR
-	// Get the whole matched element set as a clean array
-	get: function( num ) {
-
-		// Return all the elements in a clean array
-		if ( num == null ) {
-			return slice.call( this );
-		}
-
-		// Return just the one element from the set
-		return num < 0 ? this[ num + this.length ] : this[ num ];
-	},
-
-	// Take an array of elements and push it onto the stack
-	// (returning the new matched element set)
-	pushStack: function( elems ) {
-
-		// Build a new jQuery matched element set
-		var ret = jQuery.merge( this.constructor(), elems );
-
-		// Add the old object onto the stack (as a reference)
-		ret.prevObject = this;
-
-		// Return the newly-formed element set
-		return ret;
-	},
-
-	// Execute a callback for every element in the matched set.
-	each: function( callback ) {
-		return jQuery.each( this, callback );
-	},
-
-	map: function( callback ) {
-		return this.pushStack( jQuery.map( this, function( elem, i ) {
-			return callback.call( elem, i, elem );
-		} ) );
-	},
-
-	slice: function() {
-		return this.pushStack( slice.apply( this, arguments ) );
-	},
-
-	first: function() {
-		return this.eq( 0 );
-	},
-
-	last: function() {
-		return this.eq( -1 );
-	},
-
-	even: function() {
-		return this.pushStack( jQuery.grep( this, function( _elem, i ) {
-			return ( i + 1 ) % 2;
-		} ) );
-	},
-
-	odd: function() {
-		return this.pushStack( jQuery.grep( this, function( _elem, i ) {
-			return i % 2;
-		} ) );
-	},
-
-	eq: function( i ) {
-		var len = this.length,
-			j = +i + ( i < 0 ? len : 0 );
-		return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] );
-	},
-
-	end: function() {
-		return this.prevObject || this.constructor();
-	},
-
-	// For internal use only.
-	// Behaves like an Array's method, not like a jQuery method.
-	push: push,
-	sort: arr.sort,
-	splice: arr.splice
-};
-
-jQuery.extend = jQuery.fn.extend = function() {
-	var options, name, src, copy, copyIsArray, clone,
-		target = arguments[ 0 ] || {},
-		i = 1,
-		length = arguments.length,
-		deep = false;
-
-	// Handle a deep copy situation
-	if ( typeof target === "boolean" ) {
-		deep = target;
-
-		// Skip the boolean and the target
-		target = arguments[ i ] || {};
-		i++;
-	}
-
-	// Handle case when target is a string or something (possible in deep copy)
-	if ( typeof target !== "object" && !isFunction( target ) ) {
-		target = {};
-	}
-
-	// Extend jQuery itself if only one argument is passed
-	if ( i === length ) {
-		target = this;
-		i--;
-	}
-
-	for ( ; i < length; i++ ) {
-
-		// Only deal with non-null/undefined values
-		if ( ( options = arguments[ i ] ) != null ) {
-
-			// Extend the base object
-			for ( name in options ) {
-				copy = options[ name ];
-
-				// Prevent Object.prototype pollution
-				// Prevent never-ending loop
-				if ( name === "__proto__" || target === copy ) {
-					continue;
-				}
-
-				// Recurse if we're merging plain objects or arrays
-				if ( deep && copy && ( jQuery.isPlainObject( copy ) ||
-					( copyIsArray = Array.isArray( copy ) ) ) ) {
-					src = target[ name ];
-
-					// Ensure proper type for the source value
-					if ( copyIsArray && !Array.isArray( src ) ) {
-						clone = [];
-					} else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) {
-						clone = {};
-					} else {
-						clone = src;
-					}
-					copyIsArray = false;
-
-					// Never move original objects, clone them
-					target[ name ] = jQuery.extend( deep, clone, copy );
-
-				// Don't bring in undefined values
-				} else if ( copy !== undefined ) {
-					target[ name ] = copy;
-				}
-			}
-		}
-	}
-
-	// Return the modified object
-	return target;
-};
-
-jQuery.extend( {
-
-	// Unique for each copy of jQuery on the page
-	expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ),
-
-	// Assume jQuery is ready without the ready module
-	isReady: true,
-
-	error: function( msg ) {
-		throw new Error( msg );
-	},
-
-	noop: function() {},
-
-	isPlainObject: function( obj ) {
-		var proto, Ctor;
-
-		// Detect obvious negatives
-		// Use toString instead of jQuery.type to catch host objects
-		if ( !obj || toString.call( obj ) !== "[object Object]" ) {
-			return false;
-		}
-
-		proto = getProto( obj );
-
-		// Objects with no prototype (e.g., `Object.create( null )`) are plain
-		if ( !proto ) {
-			return true;
-		}
-
-		// Objects with prototype are plain iff they were constructed by a global Object function
-		Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor;
-		return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString;
-	},
-
-	isEmptyObject: function( obj ) {
-		var name;
-
-		for ( name in obj ) {
-			return false;
-		}
-		return true;
-	},
-
-	// Evaluates a script in a provided context; falls back to the global one
-	// if not specified.
-	globalEval: function( code, options, doc ) {
-		DOMEval( code, { nonce: options && options.nonce }, doc );
-	},
-
-	each: function( obj, callback ) {
-		var length, i = 0;
-
-		if ( isArrayLike( obj ) ) {
-			length = obj.length;
-			for ( ; i < length; i++ ) {
-				if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
-					break;
-				}
-			}
-		} else {
-			for ( i in obj ) {
-				if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
-					break;
-				}
-			}
-		}
-
-		return obj;
-	},
-
-	// results is for internal usage only
-	makeArray: function( arr, results ) {
-		var ret = results || [];
-
-		if ( arr != null ) {
-			if ( isArrayLike( Object( arr ) ) ) {
-				jQuery.merge( ret,
-					typeof arr === "string" ?
-					[ arr ] : arr
-				);
-			} else {
-				push.call( ret, arr );
-			}
-		}
-
-		return ret;
-	},
-
-	inArray: function( elem, arr, i ) {
-		return arr == null ? -1 : indexOf.call( arr, elem, i );
-	},
-
-	// Support: Android <=4.0 only, PhantomJS 1 only
-	// push.apply(_, arraylike) throws on ancient WebKit
-	merge: function( first, second ) {
-		var len = +second.length,
-			j = 0,
-			i = first.length;
-
-		for ( ; j < len; j++ ) {
-			first[ i++ ] = second[ j ];
-		}
-
-		first.length = i;
-
-		return first;
-	},
-
-	grep: function( elems, callback, invert ) {
-		var callbackInverse,
-			matches = [],
-			i = 0,
-			length = elems.length,
-			callbackExpect = !invert;
-
-		// Go through the array, only saving the items
-		// that pass the validator function
-		for ( ; i < length; i++ ) {
-			callbackInverse = !callback( elems[ i ], i );
-			if ( callbackInverse !== callbackExpect ) {
-				matches.push( elems[ i ] );
-			}
-		}
-
-		return matches;
-	},
-
-	// arg is for internal usage only
-	map: function( elems, callback, arg ) {
-		var length, value,
-			i = 0,
-			ret = [];
-
-		// Go through the array, translating each of the items to their new values
-		if ( isArrayLike( elems ) ) {
-			length = elems.length;
-			for ( ; i < length; i++ ) {
-				value = callback( elems[ i ], i, arg );
-
-				if ( value != null ) {
-					ret.push( value );
-				}
-			}
-
-		// Go through every key on the object,
-		} else {
-			for ( i in elems ) {
-				value = callback( elems[ i ], i, arg );
-
-				if ( value != null ) {
-					ret.push( value );
-				}
-			}
-		}
-
-		// Flatten any nested arrays
-		return flat( ret );
-	},
-
-	// A global GUID counter for objects
-	guid: 1,
-
-	// jQuery.support is not used in Core but other projects attach their
-	// properties to it so it needs to exist.
-	support: support
-} );
-
-if ( typeof Symbol === "function" ) {
-	jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ];
-}
-
-// Populate the class2type map
-jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ),
-function( _i, name ) {
-	class2type[ "[object " + name + "]" ] = name.toLowerCase();
-} );
-
-function isArrayLike( obj ) {
-
-	// Support: real iOS 8.2 only (not reproducible in simulator)
-	// `in` check used to prevent JIT error (gh-2145)
-	// hasOwn isn't used here due to false negatives
-	// regarding Nodelist length in IE
-	var length = !!obj && "length" in obj && obj.length,
-		type = toType( obj );
-
-	if ( isFunction( obj ) || isWindow( obj ) ) {
-		return false;
-	}
-
-	return type === "array" || length === 0 ||
-		typeof length === "number" && length > 0 && ( length - 1 ) in obj;
-}
-var Sizzle =
-/*!
- * Sizzle CSS Selector Engine v2.3.5
- * https://sizzlejs.com/
- *
- * Copyright JS Foundation and other contributors
- * Released under the MIT license
- * https://js.foundation/
- *
- * Date: 2020-03-14
- */
-( function( window ) {
-var i,
-	support,
-	Expr,
-	getText,
-	isXML,
-	tokenize,
-	compile,
-	select,
-	outermostContext,
-	sortInput,
-	hasDuplicate,
-
-	// Local document vars
-	setDocument,
-	document,
-	docElem,
-	documentIsHTML,
-	rbuggyQSA,
-	rbuggyMatches,
-	matches,
-	contains,
-
-	// Instance-specific data
-	expando = "sizzle" + 1 * new Date(),
-	preferredDoc = window.document,
-	dirruns = 0,
-	done = 0,
-	classCache = createCache(),
-	tokenCache = createCache(),
-	compilerCache = createCache(),
-	nonnativeSelectorCache = createCache(),
-	sortOrder = function( a, b ) {
-		if ( a === b ) {
-			hasDuplicate = true;
-		}
-		return 0;
-	},
-
-	// Instance methods
-	hasOwn = ( {} ).hasOwnProperty,
-	arr = [],
-	pop = arr.pop,
-	pushNative = arr.push,
-	push = arr.push,
-	slice = arr.slice,
-
-	// Use a stripped-down indexOf as it's faster than native
-	// https://jsperf.com/thor-indexof-vs-for/5
-	indexOf = function( list, elem ) {
-		var i = 0,
-			len = list.length;
-		for ( ; i < len; i++ ) {
-			if ( list[ i ] === elem ) {
-				return i;
-			}
-		}
-		return -1;
-	},
-
-	booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" +
-		"ismap|loop|multiple|open|readonly|required|scoped",
-
-	// Regular expressions
-
-	// http://www.w3.org/TR/css3-selectors/#whitespace
-	whitespace = "[\\x20\\t\\r\\n\\f]",
-
-	// https://www.w3.org/TR/css-syntax-3/#ident-token-diagram
-	identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace +
-		"?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+",
-
-	// Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors
-	attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace +
-
-		// Operator (capture 2)
-		"*([*^$|!~]?=)" + whitespace +
-
-		// "Attribute values must be CSS identifiers [capture 5]
-		// or strings [capture 3 or capture 4]"
-		"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" +
-		whitespace + "*\\]",
-
-	pseudos = ":(" + identifier + ")(?:\\((" +
-
-		// To reduce the number of selectors needing tokenize in the preFilter, prefer arguments:
-		// 1. quoted (capture 3; capture 4 or capture 5)
-		"('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" +
-
-		// 2. simple (capture 6)
-		"((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" +
-
-		// 3. anything else (capture 2)
-		".*" +
-		")\\)|)",
-
-	// Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter
-	rwhitespace = new RegExp( whitespace + "+", "g" ),
-	rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" +
-		whitespace + "+$", "g" ),
-
-	rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ),
-	rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace +
-		"*" ),
-	rdescend = new RegExp( whitespace + "|>" ),
-
-	rpseudo = new RegExp( pseudos ),
-	ridentifier = new RegExp( "^" + identifier + "$" ),
-
-	matchExpr = {
-		"ID": new RegExp( "^#(" + identifier + ")" ),
-		"CLASS": new RegExp( "^\\.(" + identifier + ")" ),
-		"TAG": new RegExp( "^(" + identifier + "|[*])" ),
-		"ATTR": new RegExp( "^" + attributes ),
-		"PSEUDO": new RegExp( "^" + pseudos ),
-		"CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" +
-			whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" +
-			whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ),
-		"bool": new RegExp( "^(?:" + booleans + ")$", "i" ),
-
-		// For use in libraries implementing .is()
-		// We use this for POS matching in `select`
-		"needsContext": new RegExp( "^" + whitespace +
-			"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace +
-			"*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" )
-	},
-
-	rhtml = /HTML$/i,
-	rinputs = /^(?:input|select|textarea|button)$/i,
-	rheader = /^h\d$/i,
-
-	rnative = /^[^{]+\{\s*\[native \w/,
-
-	// Easily-parseable/retrievable ID or TAG or CLASS selectors
-	rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,
-
-	rsibling = /[+~]/,
-
-	// CSS escapes
-	// http://www.w3.org/TR/CSS21/syndata.html#escaped-characters
-	runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ),
-	funescape = function( escape, nonHex ) {
-		var high = "0x" + escape.slice( 1 ) - 0x10000;
-
-		return nonHex ?
-
-			// Strip the backslash prefix from a non-hex escape sequence
-			nonHex :
-
-			// Replace a hexadecimal escape sequence with the encoded Unicode code point
-			// Support: IE <=11+
-			// For values outside the Basic Multilingual Plane (BMP), manually construct a
-			// surrogate pair
-			high < 0 ?
-				String.fromCharCode( high + 0x10000 ) :
-				String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 );
-	},
-
-	// CSS string/identifier serialization
-	// https://drafts.csswg.org/cssom/#common-serializing-idioms
-	rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,
-	fcssescape = function( ch, asCodePoint ) {
-		if ( asCodePoint ) {
-
-			// U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER
-			if ( ch === "\0" ) {
-				return "\uFFFD";
-			}
-
-			// Control characters and (dependent upon position) numbers get escaped as code points
-			return ch.slice( 0, -1 ) + "\\" +
-				ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " ";
-		}
-
-		// Other potentially-special ASCII characters get backslash-escaped
-		return "\\" + ch;
-	},
-
-	// Used for iframes
-	// See setDocument()
-	// Removing the function wrapper causes a "Permission Denied"
-	// error in IE
-	unloadHandler = function() {
-		setDocument();
-	},
-
-	inDisabledFieldset = addCombinator(
-		function( elem ) {
-			return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset";
-		},
-		{ dir: "parentNode", next: "legend" }
-	);
-
-// Optimize for push.apply( _, NodeList )
-try {
-	push.apply(
-		( arr = slice.call( preferredDoc.childNodes ) ),
-		preferredDoc.childNodes
-	);
-
-	// Support: Android<4.0
-	// Detect silently failing push.apply
-	// eslint-disable-next-line no-unused-expressions
-	arr[ preferredDoc.childNodes.length ].nodeType;
-} catch ( e ) {
-	push = { apply: arr.length ?
-
-		// Leverage slice if possible
-		function( target, els ) {
-			pushNative.apply( target, slice.call( els ) );
-		} :
-
-		// Support: IE<9
-		// Otherwise append directly
-		function( target, els ) {
-			var j = target.length,
-				i = 0;
-
-			// Can't trust NodeList.length
-			while ( ( target[ j++ ] = els[ i++ ] ) ) {}
-			target.length = j - 1;
-		}
-	};
-}
-
-function Sizzle( selector, context, results, seed ) {
-	var m, i, elem, nid, match, groups, newSelector,
-		newContext = context && context.ownerDocument,
-
-		// nodeType defaults to 9, since context defaults to document
-		nodeType = context ? context.nodeType : 9;
-
-	results = results || [];
-
-	// Return early from calls with invalid selector or context
-	if ( typeof selector !== "string" || !selector ||
-		nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) {
-
-		return results;
-	}
-
-	// Try to shortcut find operations (as opposed to filters) in HTML documents
-	if ( !seed ) {
-		setDocument( context );
-		context = context || document;
-
-		if ( documentIsHTML ) {
-
-			// If the selector is sufficiently simple, try using a "get*By*" DOM method
-			// (excepting DocumentFragment context, where the methods don't exist)
-			if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) {
-
-				// ID selector
-				if ( ( m = match[ 1 ] ) ) {
-
-					// Document context
-					if ( nodeType === 9 ) {
-						if ( ( elem = context.getElementById( m ) ) ) {
-
-							// Support: IE, Opera, Webkit
-							// TODO: identify versions
-							// getElementById can match elements by name instead of ID
-							if ( elem.id === m ) {
-								results.push( elem );
-								return results;
-							}
-						} else {
-							return results;
-						}
-
-					// Element context
-					} else {
-
-						// Support: IE, Opera, Webkit
-						// TODO: identify versions
-						// getElementById can match elements by name instead of ID
-						if ( newContext && ( elem = newContext.getElementById( m ) ) &&
-							contains( context, elem ) &&
-							elem.id === m ) {
-
-							results.push( elem );
-							return results;
-						}
-					}
-
-				// Type selector
-				} else if ( match[ 2 ] ) {
-					push.apply( results, context.getElementsByTagName( selector ) );
-					return results;
-
-				// Class selector
-				} else if ( ( m = match[ 3 ] ) && support.getElementsByClassName &&
-					context.getElementsByClassName ) {
-
-					push.apply( results, context.getElementsByClassName( m ) );
-					return results;
-				}
-			}
-
-			// Take advantage of querySelectorAll
-			if ( support.qsa &&
-				!nonnativeSelectorCache[ selector + " " ] &&
-				( !rbuggyQSA || !rbuggyQSA.test( selector ) ) &&
-
-				// Support: IE 8 only
-				// Exclude object elements
-				( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) {
-
-				newSelector = selector;
-				newContext = context;
-
-				// qSA considers elements outside a scoping root when evaluating child or
-				// descendant combinators, which is not what we want.
-				// In such cases, we work around the behavior by prefixing every selector in the
-				// list with an ID selector referencing the scope context.
-				// The technique has to be used as well when a leading combinator is used
-				// as such selectors are not recognized by querySelectorAll.
-				// Thanks to Andrew Dupont for this technique.
-				if ( nodeType === 1 &&
-					( rdescend.test( selector ) || rcombinators.test( selector ) ) ) {
-
-					// Expand context for sibling selectors
-					newContext = rsibling.test( selector ) && testContext( context.parentNode ) ||
-						context;
-
-					// We can use :scope instead of the ID hack if the browser
-					// supports it & if we're not changing the context.
-					if ( newContext !== context || !support.scope ) {
-
-						// Capture the context ID, setting it first if necessary
-						if ( ( nid = context.getAttribute( "id" ) ) ) {
-							nid = nid.replace( rcssescape, fcssescape );
-						} else {
-							context.setAttribute( "id", ( nid = expando ) );
-						}
-					}
-
-					// Prefix every selector in the list
-					groups = tokenize( selector );
-					i = groups.length;
-					while ( i-- ) {
-						groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " +
-							toSelector( groups[ i ] );
-					}
-					newSelector = groups.join( "," );
-				}
-
-				try {
-					push.apply( results,
-						newContext.querySelectorAll( newSelector )
-					);
-					return results;
-				} catch ( qsaError ) {
-					nonnativeSelectorCache( selector, true );
-				} finally {
-					if ( nid === expando ) {
-						context.removeAttribute( "id" );
-					}
-				}
-			}
-		}
-	}
-
-	// All others
-	return select( selector.replace( rtrim, "$1" ), context, results, seed );
-}
-
-/**
- * Create key-value caches of limited size
- * @returns {function(string, object)} Returns the Object data after storing it on itself with
- *	property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength)
- *	deleting the oldest entry
- */
-function createCache() {
-	var keys = [];
-
-	function cache( key, value ) {
-
-		// Use (key + " ") to avoid collision with native prototype properties (see Issue #157)
-		if ( keys.push( key + " " ) > Expr.cacheLength ) {
-
-			// Only keep the most recent entries
-			delete cache[ keys.shift() ];
-		}
-		return ( cache[ key + " " ] = value );
-	}
-	return cache;
-}
-
-/**
- * Mark a function for special use by Sizzle
- * @param {Function} fn The function to mark
- */
-function markFunction( fn ) {
-	fn[ expando ] = true;
-	return fn;
-}
-
-/**
- * Support testing using an element
- * @param {Function} fn Passed the created element and returns a boolean result
- */
-function assert( fn ) {
-	var el = document.createElement( "fieldset" );
-
-	try {
-		return !!fn( el );
-	} catch ( e ) {
-		return false;
-	} finally {
-
-		// Remove from its parent by default
-		if ( el.parentNode ) {
-			el.parentNode.removeChild( el );
-		}
-
-		// release memory in IE
-		el = null;
-	}
-}
-
-/**
- * Adds the same handler for all of the specified attrs
- * @param {String} attrs Pipe-separated list of attributes
- * @param {Function} handler The method that will be applied
- */
-function addHandle( attrs, handler ) {
-	var arr = attrs.split( "|" ),
-		i = arr.length;
-
-	while ( i-- ) {
-		Expr.attrHandle[ arr[ i ] ] = handler;
-	}
-}
-
-/**
- * Checks document order of two siblings
- * @param {Element} a
- * @param {Element} b
- * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b
- */
-function siblingCheck( a, b ) {
-	var cur = b && a,
-		diff = cur && a.nodeType === 1 && b.nodeType === 1 &&
-			a.sourceIndex - b.sourceIndex;
-
-	// Use IE sourceIndex if available on both nodes
-	if ( diff ) {
-		return diff;
-	}
-
-	// Check if b follows a
-	if ( cur ) {
-		while ( ( cur = cur.nextSibling ) ) {
-			if ( cur === b ) {
-				return -1;
-			}
-		}
-	}
-
-	return a ? 1 : -1;
-}
-
-/**
- * Returns a function to use in pseudos for input types
- * @param {String} type
- */
-function createInputPseudo( type ) {
-	return function( elem ) {
-		var name = elem.nodeName.toLowerCase();
-		return name === "input" && elem.type === type;
-	};
-}
-
-/**
- * Returns a function to use in pseudos for buttons
- * @param {String} type
- */
-function createButtonPseudo( type ) {
-	return function( elem ) {
-		var name = elem.nodeName.toLowerCase();
-		return ( name === "input" || name === "button" ) && elem.type === type;
-	};
-}
-
-/**
- * Returns a function to use in pseudos for :enabled/:disabled
- * @param {Boolean} disabled true for :disabled; false for :enabled
- */
-function createDisabledPseudo( disabled ) {
-
-	// Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable
-	return function( elem ) {
-
-		// Only certain elements can match :enabled or :disabled
-		// https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled
-		// https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled
-		if ( "form" in elem ) {
-
-			// Check for inherited disabledness on relevant non-disabled elements:
-			// * listed form-associated elements in a disabled fieldset
-			//   https://html.spec.whatwg.org/multipage/forms.html#category-listed
-			//   https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled
-			// * option elements in a disabled optgroup
-			//   https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled
-			// All such elements have a "form" property.
-			if ( elem.parentNode && elem.disabled === false ) {
-
-				// Option elements defer to a parent optgroup if present
-				if ( "label" in elem ) {
-					if ( "label" in elem.parentNode ) {
-						return elem.parentNode.disabled === disabled;
-					} else {
-						return elem.disabled === disabled;
-					}
-				}
-
-				// Support: IE 6 - 11
-				// Use the isDisabled shortcut property to check for disabled fieldset ancestors
-				return elem.isDisabled === disabled ||
-
-					// Where there is no isDisabled, check manually
-					/* jshint -W018 */
-					elem.isDisabled !== !disabled &&
-					inDisabledFieldset( elem ) === disabled;
-			}
-
-			return elem.disabled === disabled;
-
-		// Try to winnow out elements that can't be disabled before trusting the disabled property.
-		// Some victims get caught in our net (label, legend, menu, track), but it shouldn't
-		// even exist on them, let alone have a boolean value.
-		} else if ( "label" in elem ) {
-			return elem.disabled === disabled;
-		}
-
-		// Remaining elements are neither :enabled nor :disabled
-		return false;
-	};
-}
-
-/**
- * Returns a function to use in pseudos for positionals
- * @param {Function} fn
- */
-function createPositionalPseudo( fn ) {
-	return markFunction( function( argument ) {
-		argument = +argument;
-		return markFunction( function( seed, matches ) {
-			var j,
-				matchIndexes = fn( [], seed.length, argument ),
-				i = matchIndexes.length;
-
-			// Match elements found at the specified indexes
-			while ( i-- ) {
-				if ( seed[ ( j = matchIndexes[ i ] ) ] ) {
-					seed[ j ] = !( matches[ j ] = seed[ j ] );
-				}
-			}
-		} );
-	} );
-}
-
-/**
- * Checks a node for validity as a Sizzle context
- * @param {Element|Object=} context
- * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value
- */
-function testContext( context ) {
-	return context && typeof context.getElementsByTagName !== "undefined" && context;
-}
-
-// Expose support vars for convenience
-support = Sizzle.support = {};
-
-/**
- * Detects XML nodes
- * @param {Element|Object} elem An element or a document
- * @returns {Boolean} True iff elem is a non-HTML XML node
- */
-isXML = Sizzle.isXML = function( elem ) {
-	var namespace = elem.namespaceURI,
-		docElem = ( elem.ownerDocument || elem ).documentElement;
-
-	// Support: IE <=8
-	// Assume HTML when documentElement doesn't yet exist, such as inside loading iframes
-	// https://bugs.jquery.com/ticket/4833
-	return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" );
-};
-
-/**
- * Sets document-related variables once based on the current document
- * @param {Element|Object} [doc] An element or document object to use to set the document
- * @returns {Object} Returns the current document
- */
-setDocument = Sizzle.setDocument = function( node ) {
-	var hasCompare, subWindow,
-		doc = node ? node.ownerDocument || node : preferredDoc;
-
-	// Return early if doc is invalid or already selected
-	// Support: IE 11+, Edge 17 - 18+
-	// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-	// two documents; shallow comparisons work.
-	// eslint-disable-next-line eqeqeq
-	if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) {
-		return document;
-	}
-
-	// Update global variables
-	document = doc;
-	docElem = document.documentElement;
-	documentIsHTML = !isXML( document );
-
-	// Support: IE 9 - 11+, Edge 12 - 18+
-	// Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936)
-	// Support: IE 11+, Edge 17 - 18+
-	// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-	// two documents; shallow comparisons work.
-	// eslint-disable-next-line eqeqeq
-	if ( preferredDoc != document &&
-		( subWindow = document.defaultView ) && subWindow.top !== subWindow ) {
-
-		// Support: IE 11, Edge
-		if ( subWindow.addEventListener ) {
-			subWindow.addEventListener( "unload", unloadHandler, false );
-
-		// Support: IE 9 - 10 only
-		} else if ( subWindow.attachEvent ) {
-			subWindow.attachEvent( "onunload", unloadHandler );
-		}
-	}
-
-	// Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only,
-	// Safari 4 - 5 only, Opera <=11.6 - 12.x only
-	// IE/Edge & older browsers don't support the :scope pseudo-class.
-	// Support: Safari 6.0 only
-	// Safari 6.0 supports :scope but it's an alias of :root there.
-	support.scope = assert( function( el ) {
-		docElem.appendChild( el ).appendChild( document.createElement( "div" ) );
-		return typeof el.querySelectorAll !== "undefined" &&
-			!el.querySelectorAll( ":scope fieldset div" ).length;
-	} );
-
-	/* Attributes
-	---------------------------------------------------------------------- */
-
-	// Support: IE<8
-	// Verify that getAttribute really returns attributes and not properties
-	// (excepting IE8 booleans)
-	support.attributes = assert( function( el ) {
-		el.className = "i";
-		return !el.getAttribute( "className" );
-	} );
-
-	/* getElement(s)By*
-	---------------------------------------------------------------------- */
-
-	// Check if getElementsByTagName("*") returns only elements
-	support.getElementsByTagName = assert( function( el ) {
-		el.appendChild( document.createComment( "" ) );
-		return !el.getElementsByTagName( "*" ).length;
-	} );
-
-	// Support: IE<9
-	support.getElementsByClassName = rnative.test( document.getElementsByClassName );
-
-	// Support: IE<10
-	// Check if getElementById returns elements by name
-	// The broken getElementById methods don't pick up programmatically-set names,
-	// so use a roundabout getElementsByName test
-	support.getById = assert( function( el ) {
-		docElem.appendChild( el ).id = expando;
-		return !document.getElementsByName || !document.getElementsByName( expando ).length;
-	} );
-
-	// ID filter and find
-	if ( support.getById ) {
-		Expr.filter[ "ID" ] = function( id ) {
-			var attrId = id.replace( runescape, funescape );
-			return function( elem ) {
-				return elem.getAttribute( "id" ) === attrId;
-			};
-		};
-		Expr.find[ "ID" ] = function( id, context ) {
-			if ( typeof context.getElementById !== "undefined" && documentIsHTML ) {
-				var elem = context.getElementById( id );
-				return elem ? [ elem ] : [];
-			}
-		};
-	} else {
-		Expr.filter[ "ID" ] =  function( id ) {
-			var attrId = id.replace( runescape, funescape );
-			return function( elem ) {
-				var node = typeof elem.getAttributeNode !== "undefined" &&
-					elem.getAttributeNode( "id" );
-				return node && node.value === attrId;
-			};
-		};
-
-		// Support: IE 6 - 7 only
-		// getElementById is not reliable as a find shortcut
-		Expr.find[ "ID" ] = function( id, context ) {
-			if ( typeof context.getElementById !== "undefined" && documentIsHTML ) {
-				var node, i, elems,
-					elem = context.getElementById( id );
-
-				if ( elem ) {
-
-					// Verify the id attribute
-					node = elem.getAttributeNode( "id" );
-					if ( node && node.value === id ) {
-						return [ elem ];
-					}
-
-					// Fall back on getElementsByName
-					elems = context.getElementsByName( id );
-					i = 0;
-					while ( ( elem = elems[ i++ ] ) ) {
-						node = elem.getAttributeNode( "id" );
-						if ( node && node.value === id ) {
-							return [ elem ];
-						}
-					}
-				}
-
-				return [];
-			}
-		};
-	}
-
-	// Tag
-	Expr.find[ "TAG" ] = support.getElementsByTagName ?
-		function( tag, context ) {
-			if ( typeof context.getElementsByTagName !== "undefined" ) {
-				return context.getElementsByTagName( tag );
-
-			// DocumentFragment nodes don't have gEBTN
-			} else if ( support.qsa ) {
-				return context.querySelectorAll( tag );
-			}
-		} :
-
-		function( tag, context ) {
-			var elem,
-				tmp = [],
-				i = 0,
-
-				// By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too
-				results = context.getElementsByTagName( tag );
-
-			// Filter out possible comments
-			if ( tag === "*" ) {
-				while ( ( elem = results[ i++ ] ) ) {
-					if ( elem.nodeType === 1 ) {
-						tmp.push( elem );
-					}
-				}
-
-				return tmp;
-			}
-			return results;
-		};
-
-	// Class
-	Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) {
-		if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) {
-			return context.getElementsByClassName( className );
-		}
-	};
-
-	/* QSA/matchesSelector
-	---------------------------------------------------------------------- */
-
-	// QSA and matchesSelector support
-
-	// matchesSelector(:active) reports false when true (IE9/Opera 11.5)
-	rbuggyMatches = [];
-
-	// qSa(:focus) reports false when true (Chrome 21)
-	// We allow this because of a bug in IE8/9 that throws an error
-	// whenever `document.activeElement` is accessed on an iframe
-	// So, we allow :focus to pass through QSA all the time to avoid the IE error
-	// See https://bugs.jquery.com/ticket/13378
-	rbuggyQSA = [];
-
-	if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) {
-
-		// Build QSA regex
-		// Regex strategy adopted from Diego Perini
-		assert( function( el ) {
-
-			var input;
-
-			// Select is set to empty string on purpose
-			// This is to test IE's treatment of not explicitly
-			// setting a boolean content attribute,
-			// since its presence should be enough
-			// https://bugs.jquery.com/ticket/12359
-			docElem.appendChild( el ).innerHTML = "<a id='" + expando + "'></a>" +
-				"<select id='" + expando + "-\r\\' msallowcapture=''>" +
-				"<option selected=''></option></select>";
-
-			// Support: IE8, Opera 11-12.16
-			// Nothing should be selected when empty strings follow ^= or $= or *=
-			// The test attribute must be unknown in Opera but "safe" for WinRT
-			// https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section
-			if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) {
-				rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" );
-			}
-
-			// Support: IE8
-			// Boolean attributes and "value" are not treated correctly
-			if ( !el.querySelectorAll( "[selected]" ).length ) {
-				rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" );
-			}
-
-			// Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+
-			if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) {
-				rbuggyQSA.push( "~=" );
-			}
-
-			// Support: IE 11+, Edge 15 - 18+
-			// IE 11/Edge don't find elements on a `[name='']` query in some cases.
-			// Adding a temporary attribute to the document before the selection works
-			// around the issue.
-			// Interestingly, IE 10 & older don't seem to have the issue.
-			input = document.createElement( "input" );
-			input.setAttribute( "name", "" );
-			el.appendChild( input );
-			if ( !el.querySelectorAll( "[name='']" ).length ) {
-				rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" +
-					whitespace + "*(?:''|\"\")" );
-			}
-
-			// Webkit/Opera - :checked should return selected option elements
-			// http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
-			// IE8 throws error here and will not see later tests
-			if ( !el.querySelectorAll( ":checked" ).length ) {
-				rbuggyQSA.push( ":checked" );
-			}
-
-			// Support: Safari 8+, iOS 8+
-			// https://bugs.webkit.org/show_bug.cgi?id=136851
-			// In-page `selector#id sibling-combinator selector` fails
-			if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) {
-				rbuggyQSA.push( ".#.+[+~]" );
-			}
-
-			// Support: Firefox <=3.6 - 5 only
-			// Old Firefox doesn't throw on a badly-escaped identifier.
-			el.querySelectorAll( "\\\f" );
-			rbuggyQSA.push( "[\\r\\n\\f]" );
-		} );
-
-		assert( function( el ) {
-			el.innerHTML = "<a href='' disabled='disabled'></a>" +
-				"<select disabled='disabled'><option/></select>";
-
-			// Support: Windows 8 Native Apps
-			// The type and name attributes are restricted during .innerHTML assignment
-			var input = document.createElement( "input" );
-			input.setAttribute( "type", "hidden" );
-			el.appendChild( input ).setAttribute( "name", "D" );
-
-			// Support: IE8
-			// Enforce case-sensitivity of name attribute
-			if ( el.querySelectorAll( "[name=d]" ).length ) {
-				rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" );
-			}
-
-			// FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled)
-			// IE8 throws error here and will not see later tests
-			if ( el.querySelectorAll( ":enabled" ).length !== 2 ) {
-				rbuggyQSA.push( ":enabled", ":disabled" );
-			}
-
-			// Support: IE9-11+
-			// IE's :disabled selector does not pick up the children of disabled fieldsets
-			docElem.appendChild( el ).disabled = true;
-			if ( el.querySelectorAll( ":disabled" ).length !== 2 ) {
-				rbuggyQSA.push( ":enabled", ":disabled" );
-			}
-
-			// Support: Opera 10 - 11 only
-			// Opera 10-11 does not throw on post-comma invalid pseudos
-			el.querySelectorAll( "*,:x" );
-			rbuggyQSA.push( ",.*:" );
-		} );
-	}
-
-	if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches ||
-		docElem.webkitMatchesSelector ||
-		docElem.mozMatchesSelector ||
-		docElem.oMatchesSelector ||
-		docElem.msMatchesSelector ) ) ) ) {
-
-		assert( function( el ) {
-
-			// Check to see if it's possible to do matchesSelector
-			// on a disconnected node (IE 9)
-			support.disconnectedMatch = matches.call( el, "*" );
-
-			// This should fail with an exception
-			// Gecko does not error, returns false instead
-			matches.call( el, "[s!='']:x" );
-			rbuggyMatches.push( "!=", pseudos );
-		} );
-	}
-
-	rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) );
-	rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) );
-
-	/* Contains
-	---------------------------------------------------------------------- */
-	hasCompare = rnative.test( docElem.compareDocumentPosition );
-
-	// Element contains another
-	// Purposefully self-exclusive
-	// As in, an element does not contain itself
-	contains = hasCompare || rnative.test( docElem.contains ) ?
-		function( a, b ) {
-			var adown = a.nodeType === 9 ? a.documentElement : a,
-				bup = b && b.parentNode;
-			return a === bup || !!( bup && bup.nodeType === 1 && (
-				adown.contains ?
-					adown.contains( bup ) :
-					a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16
-			) );
-		} :
-		function( a, b ) {
-			if ( b ) {
-				while ( ( b = b.parentNode ) ) {
-					if ( b === a ) {
-						return true;
-					}
-				}
-			}
-			return false;
-		};
-
-	/* Sorting
-	---------------------------------------------------------------------- */
-
-	// Document order sorting
-	sortOrder = hasCompare ?
-	function( a, b ) {
-
-		// Flag for duplicate removal
-		if ( a === b ) {
-			hasDuplicate = true;
-			return 0;
-		}
-
-		// Sort on method existence if only one input has compareDocumentPosition
-		var compare = !a.compareDocumentPosition - !b.compareDocumentPosition;
-		if ( compare ) {
-			return compare;
-		}
-
-		// Calculate position if both inputs belong to the same document
-		// Support: IE 11+, Edge 17 - 18+
-		// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-		// two documents; shallow comparisons work.
-		// eslint-disable-next-line eqeqeq
-		compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ?
-			a.compareDocumentPosition( b ) :
-
-			// Otherwise we know they are disconnected
-			1;
-
-		// Disconnected nodes
-		if ( compare & 1 ||
-			( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) {
-
-			// Choose the first element that is related to our preferred document
-			// Support: IE 11+, Edge 17 - 18+
-			// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-			// two documents; shallow comparisons work.
-			// eslint-disable-next-line eqeqeq
-			if ( a == document || a.ownerDocument == preferredDoc &&
-				contains( preferredDoc, a ) ) {
-				return -1;
-			}
-
-			// Support: IE 11+, Edge 17 - 18+
-			// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-			// two documents; shallow comparisons work.
-			// eslint-disable-next-line eqeqeq
-			if ( b == document || b.ownerDocument == preferredDoc &&
-				contains( preferredDoc, b ) ) {
-				return 1;
-			}
-
-			// Maintain original order
-			return sortInput ?
-				( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) :
-				0;
-		}
-
-		return compare & 4 ? -1 : 1;
-	} :
-	function( a, b ) {
-
-		// Exit early if the nodes are identical
-		if ( a === b ) {
-			hasDuplicate = true;
-			return 0;
-		}
-
-		var cur,
-			i = 0,
-			aup = a.parentNode,
-			bup = b.parentNode,
-			ap = [ a ],
-			bp = [ b ];
-
-		// Parentless nodes are either documents or disconnected
-		if ( !aup || !bup ) {
-
-			// Support: IE 11+, Edge 17 - 18+
-			// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-			// two documents; shallow comparisons work.
-			/* eslint-disable eqeqeq */
-			return a == document ? -1 :
-				b == document ? 1 :
-				/* eslint-enable eqeqeq */
-				aup ? -1 :
-				bup ? 1 :
-				sortInput ?
-				( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) :
-				0;
-
-		// If the nodes are siblings, we can do a quick check
-		} else if ( aup === bup ) {
-			return siblingCheck( a, b );
-		}
-
-		// Otherwise we need full lists of their ancestors for comparison
-		cur = a;
-		while ( ( cur = cur.parentNode ) ) {
-			ap.unshift( cur );
-		}
-		cur = b;
-		while ( ( cur = cur.parentNode ) ) {
-			bp.unshift( cur );
-		}
-
-		// Walk down the tree looking for a discrepancy
-		while ( ap[ i ] === bp[ i ] ) {
-			i++;
-		}
-
-		return i ?
-
-			// Do a sibling check if the nodes have a common ancestor
-			siblingCheck( ap[ i ], bp[ i ] ) :
-
-			// Otherwise nodes in our document sort first
-			// Support: IE 11+, Edge 17 - 18+
-			// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-			// two documents; shallow comparisons work.
-			/* eslint-disable eqeqeq */
-			ap[ i ] == preferredDoc ? -1 :
-			bp[ i ] == preferredDoc ? 1 :
-			/* eslint-enable eqeqeq */
-			0;
-	};
-
-	return document;
-};
-
-Sizzle.matches = function( expr, elements ) {
-	return Sizzle( expr, null, null, elements );
-};
-
-Sizzle.matchesSelector = function( elem, expr ) {
-	setDocument( elem );
-
-	if ( support.matchesSelector && documentIsHTML &&
-		!nonnativeSelectorCache[ expr + " " ] &&
-		( !rbuggyMatches || !rbuggyMatches.test( expr ) ) &&
-		( !rbuggyQSA     || !rbuggyQSA.test( expr ) ) ) {
-
-		try {
-			var ret = matches.call( elem, expr );
-
-			// IE 9's matchesSelector returns false on disconnected nodes
-			if ( ret || support.disconnectedMatch ||
-
-				// As well, disconnected nodes are said to be in a document
-				// fragment in IE 9
-				elem.document && elem.document.nodeType !== 11 ) {
-				return ret;
-			}
-		} catch ( e ) {
-			nonnativeSelectorCache( expr, true );
-		}
-	}
-
-	return Sizzle( expr, document, null, [ elem ] ).length > 0;
-};
-
-Sizzle.contains = function( context, elem ) {
-
-	// Set document vars if needed
-	// Support: IE 11+, Edge 17 - 18+
-	// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-	// two documents; shallow comparisons work.
-	// eslint-disable-next-line eqeqeq
-	if ( ( context.ownerDocument || context ) != document ) {
-		setDocument( context );
-	}
-	return contains( context, elem );
-};
-
-Sizzle.attr = function( elem, name ) {
-
-	// Set document vars if needed
-	// Support: IE 11+, Edge 17 - 18+
-	// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-	// two documents; shallow comparisons work.
-	// eslint-disable-next-line eqeqeq
-	if ( ( elem.ownerDocument || elem ) != document ) {
-		setDocument( elem );
-	}
-
-	var fn = Expr.attrHandle[ name.toLowerCase() ],
-
-		// Don't get fooled by Object.prototype properties (jQuery #13807)
-		val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ?
-			fn( elem, name, !documentIsHTML ) :
-			undefined;
-
-	return val !== undefined ?
-		val :
-		support.attributes || !documentIsHTML ?
-			elem.getAttribute( name ) :
-			( val = elem.getAttributeNode( name ) ) && val.specified ?
-				val.value :
-				null;
-};
-
-Sizzle.escape = function( sel ) {
-	return ( sel + "" ).replace( rcssescape, fcssescape );
-};
-
-Sizzle.error = function( msg ) {
-	throw new Error( "Syntax error, unrecognized expression: " + msg );
-};
-
-/**
- * Document sorting and removing duplicates
- * @param {ArrayLike} results
- */
-Sizzle.uniqueSort = function( results ) {
-	var elem,
-		duplicates = [],
-		j = 0,
-		i = 0;
-
-	// Unless we *know* we can detect duplicates, assume their presence
-	hasDuplicate = !support.detectDuplicates;
-	sortInput = !support.sortStable && results.slice( 0 );
-	results.sort( sortOrder );
-
-	if ( hasDuplicate ) {
-		while ( ( elem = results[ i++ ] ) ) {
-			if ( elem === results[ i ] ) {
-				j = duplicates.push( i );
-			}
-		}
-		while ( j-- ) {
-			results.splice( duplicates[ j ], 1 );
-		}
-	}
-
-	// Clear input after sorting to release objects
-	// See https://github.com/jquery/sizzle/pull/225
-	sortInput = null;
-
-	return results;
-};
-
-/**
- * Utility function for retrieving the text value of an array of DOM nodes
- * @param {Array|Element} elem
- */
-getText = Sizzle.getText = function( elem ) {
-	var node,
-		ret = "",
-		i = 0,
-		nodeType = elem.nodeType;
-
-	if ( !nodeType ) {
-
-		// If no nodeType, this is expected to be an array
-		while ( ( node = elem[ i++ ] ) ) {
-
-			// Do not traverse comment nodes
-			ret += getText( node );
-		}
-	} else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
-
-		// Use textContent for elements
-		// innerText usage removed for consistency of new lines (jQuery #11153)
-		if ( typeof elem.textContent === "string" ) {
-			return elem.textContent;
-		} else {
-
-			// Traverse its children
-			for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
-				ret += getText( elem );
-			}
-		}
-	} else if ( nodeType === 3 || nodeType === 4 ) {
-		return elem.nodeValue;
-	}
-
-	// Do not include comment or processing instruction nodes
-
-	return ret;
-};
-
-Expr = Sizzle.selectors = {
-
-	// Can be adjusted by the user
-	cacheLength: 50,
-
-	createPseudo: markFunction,
-
-	match: matchExpr,
-
-	attrHandle: {},
-
-	find: {},
-
-	relative: {
-		">": { dir: "parentNode", first: true },
-		" ": { dir: "parentNode" },
-		"+": { dir: "previousSibling", first: true },
-		"~": { dir: "previousSibling" }
-	},
-
-	preFilter: {
-		"ATTR": function( match ) {
-			match[ 1 ] = match[ 1 ].replace( runescape, funescape );
-
-			// Move the given value to match[3] whether quoted or unquoted
-			match[ 3 ] = ( match[ 3 ] || match[ 4 ] ||
-				match[ 5 ] || "" ).replace( runescape, funescape );
-
-			if ( match[ 2 ] === "~=" ) {
-				match[ 3 ] = " " + match[ 3 ] + " ";
-			}
-
-			return match.slice( 0, 4 );
-		},
-
-		"CHILD": function( match ) {
-
-			/* matches from matchExpr["CHILD"]
-				1 type (only|nth|...)
-				2 what (child|of-type)
-				3 argument (even|odd|\d*|\d*n([+-]\d+)?|...)
-				4 xn-component of xn+y argument ([+-]?\d*n|)
-				5 sign of xn-component
-				6 x of xn-component
-				7 sign of y-component
-				8 y of y-component
-			*/
-			match[ 1 ] = match[ 1 ].toLowerCase();
-
-			if ( match[ 1 ].slice( 0, 3 ) === "nth" ) {
-
-				// nth-* requires argument
-				if ( !match[ 3 ] ) {
-					Sizzle.error( match[ 0 ] );
-				}
-
-				// numeric x and y parameters for Expr.filter.CHILD
-				// remember that false/true cast respectively to 0/1
-				match[ 4 ] = +( match[ 4 ] ?
-					match[ 5 ] + ( match[ 6 ] || 1 ) :
-					2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) );
-				match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" );
-
-				// other types prohibit arguments
-			} else if ( match[ 3 ] ) {
-				Sizzle.error( match[ 0 ] );
-			}
-
-			return match;
-		},
-
-		"PSEUDO": function( match ) {
-			var excess,
-				unquoted = !match[ 6 ] && match[ 2 ];
-
-			if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) {
-				return null;
-			}
-
-			// Accept quoted arguments as-is
-			if ( match[ 3 ] ) {
-				match[ 2 ] = match[ 4 ] || match[ 5 ] || "";
-
-			// Strip excess characters from unquoted arguments
-			} else if ( unquoted && rpseudo.test( unquoted ) &&
-
-				// Get excess from tokenize (recursively)
-				( excess = tokenize( unquoted, true ) ) &&
-
-				// advance to the next closing parenthesis
-				( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) {
-
-				// excess is a negative index
-				match[ 0 ] = match[ 0 ].slice( 0, excess );
-				match[ 2 ] = unquoted.slice( 0, excess );
-			}
-
-			// Return only captures needed by the pseudo filter method (type and argument)
-			return match.slice( 0, 3 );
-		}
-	},
-
-	filter: {
-
-		"TAG": function( nodeNameSelector ) {
-			var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase();
-			return nodeNameSelector === "*" ?
-				function() {
-					return true;
-				} :
-				function( elem ) {
-					return elem.nodeName && elem.nodeName.toLowerCase() === nodeName;
-				};
-		},
-
-		"CLASS": function( className ) {
-			var pattern = classCache[ className + " " ];
-
-			return pattern ||
-				( pattern = new RegExp( "(^|" + whitespace +
-					")" + className + "(" + whitespace + "|$)" ) ) && classCache(
-						className, function( elem ) {
-							return pattern.test(
-								typeof elem.className === "string" && elem.className ||
-								typeof elem.getAttribute !== "undefined" &&
-									elem.getAttribute( "class" ) ||
-								""
-							);
-				} );
-		},
-
-		"ATTR": function( name, operator, check ) {
-			return function( elem ) {
-				var result = Sizzle.attr( elem, name );
-
-				if ( result == null ) {
-					return operator === "!=";
-				}
-				if ( !operator ) {
-					return true;
-				}
-
-				result += "";
-
-				/* eslint-disable max-len */
-
-				return operator === "=" ? result === check :
-					operator === "!=" ? result !== check :
-					operator === "^=" ? check && result.indexOf( check ) === 0 :
-					operator === "*=" ? check && result.indexOf( check ) > -1 :
-					operator === "$=" ? check && result.slice( -check.length ) === check :
-					operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 :
-					operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" :
-					false;
-				/* eslint-enable max-len */
-
-			};
-		},
-
-		"CHILD": function( type, what, _argument, first, last ) {
-			var simple = type.slice( 0, 3 ) !== "nth",
-				forward = type.slice( -4 ) !== "last",
-				ofType = what === "of-type";
-
-			return first === 1 && last === 0 ?
-
-				// Shortcut for :nth-*(n)
-				function( elem ) {
-					return !!elem.parentNode;
-				} :
-
-				function( elem, _context, xml ) {
-					var cache, uniqueCache, outerCache, node, nodeIndex, start,
-						dir = simple !== forward ? "nextSibling" : "previousSibling",
-						parent = elem.parentNode,
-						name = ofType && elem.nodeName.toLowerCase(),
-						useCache = !xml && !ofType,
-						diff = false;
-
-					if ( parent ) {
-
-						// :(first|last|only)-(child|of-type)
-						if ( simple ) {
-							while ( dir ) {
-								node = elem;
-								while ( ( node = node[ dir ] ) ) {
-									if ( ofType ?
-										node.nodeName.toLowerCase() === name :
-										node.nodeType === 1 ) {
-
-										return false;
-									}
-								}
-
-								// Reverse direction for :only-* (if we haven't yet done so)
-								start = dir = type === "only" && !start && "nextSibling";
-							}
-							return true;
-						}
-
-						start = [ forward ? parent.firstChild : parent.lastChild ];
-
-						// non-xml :nth-child(...) stores cache data on `parent`
-						if ( forward && useCache ) {
-
-							// Seek `elem` from a previously-cached index
-
-							// ...in a gzip-friendly way
-							node = parent;
-							outerCache = node[ expando ] || ( node[ expando ] = {} );
-
-							// Support: IE <9 only
-							// Defend against cloned attroperties (jQuery gh-1709)
-							uniqueCache = outerCache[ node.uniqueID ] ||
-								( outerCache[ node.uniqueID ] = {} );
-
-							cache = uniqueCache[ type ] || [];
-							nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ];
-							diff = nodeIndex && cache[ 2 ];
-							node = nodeIndex && parent.childNodes[ nodeIndex ];
-
-							while ( ( node = ++nodeIndex && node && node[ dir ] ||
-
-								// Fallback to seeking `elem` from the start
-								( diff = nodeIndex = 0 ) || start.pop() ) ) {
-
-								// When found, cache indexes on `parent` and break
-								if ( node.nodeType === 1 && ++diff && node === elem ) {
-									uniqueCache[ type ] = [ dirruns, nodeIndex, diff ];
-									break;
-								}
-							}
-
-						} else {
-
-							// Use previously-cached element index if available
-							if ( useCache ) {
-
-								// ...in a gzip-friendly way
-								node = elem;
-								outerCache = node[ expando ] || ( node[ expando ] = {} );
-
-								// Support: IE <9 only
-								// Defend against cloned attroperties (jQuery gh-1709)
-								uniqueCache = outerCache[ node.uniqueID ] ||
-									( outerCache[ node.uniqueID ] = {} );
-
-								cache = uniqueCache[ type ] || [];
-								nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ];
-								diff = nodeIndex;
-							}
-
-							// xml :nth-child(...)
-							// or :nth-last-child(...) or :nth(-last)?-of-type(...)
-							if ( diff === false ) {
-
-								// Use the same loop as above to seek `elem` from the start
-								while ( ( node = ++nodeIndex && node && node[ dir ] ||
-									( diff = nodeIndex = 0 ) || start.pop() ) ) {
-
-									if ( ( ofType ?
-										node.nodeName.toLowerCase() === name :
-										node.nodeType === 1 ) &&
-										++diff ) {
-
-										// Cache the index of each encountered element
-										if ( useCache ) {
-											outerCache = node[ expando ] ||
-												( node[ expando ] = {} );
-
-											// Support: IE <9 only
-											// Defend against cloned attroperties (jQuery gh-1709)
-											uniqueCache = outerCache[ node.uniqueID ] ||
-												( outerCache[ node.uniqueID ] = {} );
-
-											uniqueCache[ type ] = [ dirruns, diff ];
-										}
-
-										if ( node === elem ) {
-											break;
-										}
-									}
-								}
-							}
-						}
-
-						// Incorporate the offset, then check against cycle size
-						diff -= last;
-						return diff === first || ( diff % first === 0 && diff / first >= 0 );
-					}
-				};
-		},
-
-		"PSEUDO": function( pseudo, argument ) {
-
-			// pseudo-class names are case-insensitive
-			// http://www.w3.org/TR/selectors/#pseudo-classes
-			// Prioritize by case sensitivity in case custom pseudos are added with uppercase letters
-			// Remember that setFilters inherits from pseudos
-			var args,
-				fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] ||
-					Sizzle.error( "unsupported pseudo: " + pseudo );
-
-			// The user may use createPseudo to indicate that
-			// arguments are needed to create the filter function
-			// just as Sizzle does
-			if ( fn[ expando ] ) {
-				return fn( argument );
-			}
-
-			// But maintain support for old signatures
-			if ( fn.length > 1 ) {
-				args = [ pseudo, pseudo, "", argument ];
-				return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ?
-					markFunction( function( seed, matches ) {
-						var idx,
-							matched = fn( seed, argument ),
-							i = matched.length;
-						while ( i-- ) {
-							idx = indexOf( seed, matched[ i ] );
-							seed[ idx ] = !( matches[ idx ] = matched[ i ] );
-						}
-					} ) :
-					function( elem ) {
-						return fn( elem, 0, args );
-					};
-			}
-
-			return fn;
-		}
-	},
-
-	pseudos: {
-
-		// Potentially complex pseudos
-		"not": markFunction( function( selector ) {
-
-			// Trim the selector passed to compile
-			// to avoid treating leading and trailing
-			// spaces as combinators
-			var input = [],
-				results = [],
-				matcher = compile( selector.replace( rtrim, "$1" ) );
-
-			return matcher[ expando ] ?
-				markFunction( function( seed, matches, _context, xml ) {
-					var elem,
-						unmatched = matcher( seed, null, xml, [] ),
-						i = seed.length;
-
-					// Match elements unmatched by `matcher`
-					while ( i-- ) {
-						if ( ( elem = unmatched[ i ] ) ) {
-							seed[ i ] = !( matches[ i ] = elem );
-						}
-					}
-				} ) :
-				function( elem, _context, xml ) {
-					input[ 0 ] = elem;
-					matcher( input, null, xml, results );
-
-					// Don't keep the element (issue #299)
-					input[ 0 ] = null;
-					return !results.pop();
-				};
-		} ),
-
-		"has": markFunction( function( selector ) {
-			return function( elem ) {
-				return Sizzle( selector, elem ).length > 0;
-			};
-		} ),
-
-		"contains": markFunction( function( text ) {
-			text = text.replace( runescape, funescape );
-			return function( elem ) {
-				return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1;
-			};
-		} ),
-
-		// "Whether an element is represented by a :lang() selector
-		// is based solely on the element's language value
-		// being equal to the identifier C,
-		// or beginning with the identifier C immediately followed by "-".
-		// The matching of C against the element's language value is performed case-insensitively.
-		// The identifier C does not have to be a valid language name."
-		// http://www.w3.org/TR/selectors/#lang-pseudo
-		"lang": markFunction( function( lang ) {
-
-			// lang value must be a valid identifier
-			if ( !ridentifier.test( lang || "" ) ) {
-				Sizzle.error( "unsupported lang: " + lang );
-			}
-			lang = lang.replace( runescape, funescape ).toLowerCase();
-			return function( elem ) {
-				var elemLang;
-				do {
-					if ( ( elemLang = documentIsHTML ?
-						elem.lang :
-						elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) {
-
-						elemLang = elemLang.toLowerCase();
-						return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0;
-					}
-				} while ( ( elem = elem.parentNode ) && elem.nodeType === 1 );
-				return false;
-			};
-		} ),
-
-		// Miscellaneous
-		"target": function( elem ) {
-			var hash = window.location && window.location.hash;
-			return hash && hash.slice( 1 ) === elem.id;
-		},
-
-		"root": function( elem ) {
-			return elem === docElem;
-		},
-
-		"focus": function( elem ) {
-			return elem === document.activeElement &&
-				( !document.hasFocus || document.hasFocus() ) &&
-				!!( elem.type || elem.href || ~elem.tabIndex );
-		},
-
-		// Boolean properties
-		"enabled": createDisabledPseudo( false ),
-		"disabled": createDisabledPseudo( true ),
-
-		"checked": function( elem ) {
-
-			// In CSS3, :checked should return both checked and selected elements
-			// http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
-			var nodeName = elem.nodeName.toLowerCase();
-			return ( nodeName === "input" && !!elem.checked ) ||
-				( nodeName === "option" && !!elem.selected );
-		},
-
-		"selected": function( elem ) {
-
-			// Accessing this property makes selected-by-default
-			// options in Safari work properly
-			if ( elem.parentNode ) {
-				// eslint-disable-next-line no-unused-expressions
-				elem.parentNode.selectedIndex;
-			}
-
-			return elem.selected === true;
-		},
-
-		// Contents
-		"empty": function( elem ) {
-
-			// http://www.w3.org/TR/selectors/#empty-pseudo
-			// :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5),
-			//   but not by others (comment: 8; processing instruction: 7; etc.)
-			// nodeType < 6 works because attributes (2) do not appear as children
-			for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
-				if ( elem.nodeType < 6 ) {
-					return false;
-				}
-			}
-			return true;
-		},
-
-		"parent": function( elem ) {
-			return !Expr.pseudos[ "empty" ]( elem );
-		},
-
-		// Element/input types
-		"header": function( elem ) {
-			return rheader.test( elem.nodeName );
-		},
-
-		"input": function( elem ) {
-			return rinputs.test( elem.nodeName );
-		},
-
-		"button": function( elem ) {
-			var name = elem.nodeName.toLowerCase();
-			return name === "input" && elem.type === "button" || name === "button";
-		},
-
-		"text": function( elem ) {
-			var attr;
-			return elem.nodeName.toLowerCase() === "input" &&
-				elem.type === "text" &&
-
-				// Support: IE<8
-				// New HTML5 attribute values (e.g., "search") appear with elem.type === "text"
-				( ( attr = elem.getAttribute( "type" ) ) == null ||
-					attr.toLowerCase() === "text" );
-		},
-
-		// Position-in-collection
-		"first": createPositionalPseudo( function() {
-			return [ 0 ];
-		} ),
-
-		"last": createPositionalPseudo( function( _matchIndexes, length ) {
-			return [ length - 1 ];
-		} ),
-
-		"eq": createPositionalPseudo( function( _matchIndexes, length, argument ) {
-			return [ argument < 0 ? argument + length : argument ];
-		} ),
-
-		"even": createPositionalPseudo( function( matchIndexes, length ) {
-			var i = 0;
-			for ( ; i < length; i += 2 ) {
-				matchIndexes.push( i );
-			}
-			return matchIndexes;
-		} ),
-
-		"odd": createPositionalPseudo( function( matchIndexes, length ) {
-			var i = 1;
-			for ( ; i < length; i += 2 ) {
-				matchIndexes.push( i );
-			}
-			return matchIndexes;
-		} ),
-
-		"lt": createPositionalPseudo( function( matchIndexes, length, argument ) {
-			var i = argument < 0 ?
-				argument + length :
-				argument > length ?
-					length :
-					argument;
-			for ( ; --i >= 0; ) {
-				matchIndexes.push( i );
-			}
-			return matchIndexes;
-		} ),
-
-		"gt": createPositionalPseudo( function( matchIndexes, length, argument ) {
-			var i = argument < 0 ? argument + length : argument;
-			for ( ; ++i < length; ) {
-				matchIndexes.push( i );
-			}
-			return matchIndexes;
-		} )
-	}
-};
-
-Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ];
-
-// Add button/input type pseudos
-for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) {
-	Expr.pseudos[ i ] = createInputPseudo( i );
-}
-for ( i in { submit: true, reset: true } ) {
-	Expr.pseudos[ i ] = createButtonPseudo( i );
-}
-
-// Easy API for creating new setFilters
-function setFilters() {}
-setFilters.prototype = Expr.filters = Expr.pseudos;
-Expr.setFilters = new setFilters();
-
-tokenize = Sizzle.tokenize = function( selector, parseOnly ) {
-	var matched, match, tokens, type,
-		soFar, groups, preFilters,
-		cached = tokenCache[ selector + " " ];
-
-	if ( cached ) {
-		return parseOnly ? 0 : cached.slice( 0 );
-	}
-
-	soFar = selector;
-	groups = [];
-	preFilters = Expr.preFilter;
-
-	while ( soFar ) {
-
-		// Comma and first run
-		if ( !matched || ( match = rcomma.exec( soFar ) ) ) {
-			if ( match ) {
-
-				// Don't consume trailing commas as valid
-				soFar = soFar.slice( match[ 0 ].length ) || soFar;
-			}
-			groups.push( ( tokens = [] ) );
-		}
-
-		matched = false;
-
-		// Combinators
-		if ( ( match = rcombinators.exec( soFar ) ) ) {
-			matched = match.shift();
-			tokens.push( {
-				value: matched,
-
-				// Cast descendant combinators to space
-				type: match[ 0 ].replace( rtrim, " " )
-			} );
-			soFar = soFar.slice( matched.length );
-		}
-
-		// Filters
-		for ( type in Expr.filter ) {
-			if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] ||
-				( match = preFilters[ type ]( match ) ) ) ) {
-				matched = match.shift();
-				tokens.push( {
-					value: matched,
-					type: type,
-					matches: match
-				} );
-				soFar = soFar.slice( matched.length );
-			}
-		}
-
-		if ( !matched ) {
-			break;
-		}
-	}
-
-	// Return the length of the invalid excess
-	// if we're just parsing
-	// Otherwise, throw an error or return tokens
-	return parseOnly ?
-		soFar.length :
-		soFar ?
-			Sizzle.error( selector ) :
-
-			// Cache the tokens
-			tokenCache( selector, groups ).slice( 0 );
-};
-
-function toSelector( tokens ) {
-	var i = 0,
-		len = tokens.length,
-		selector = "";
-	for ( ; i < len; i++ ) {
-		selector += tokens[ i ].value;
-	}
-	return selector;
-}
-
-function addCombinator( matcher, combinator, base ) {
-	var dir = combinator.dir,
-		skip = combinator.next,
-		key = skip || dir,
-		checkNonElements = base && key === "parentNode",
-		doneName = done++;
-
-	return combinator.first ?
-
-		// Check against closest ancestor/preceding element
-		function( elem, context, xml ) {
-			while ( ( elem = elem[ dir ] ) ) {
-				if ( elem.nodeType === 1 || checkNonElements ) {
-					return matcher( elem, context, xml );
-				}
-			}
-			return false;
-		} :
-
-		// Check against all ancestor/preceding elements
-		function( elem, context, xml ) {
-			var oldCache, uniqueCache, outerCache,
-				newCache = [ dirruns, doneName ];
-
-			// We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching
-			if ( xml ) {
-				while ( ( elem = elem[ dir ] ) ) {
-					if ( elem.nodeType === 1 || checkNonElements ) {
-						if ( matcher( elem, context, xml ) ) {
-							return true;
-						}
-					}
-				}
-			} else {
-				while ( ( elem = elem[ dir ] ) ) {
-					if ( elem.nodeType === 1 || checkNonElements ) {
-						outerCache = elem[ expando ] || ( elem[ expando ] = {} );
-
-						// Support: IE <9 only
-						// Defend against cloned attroperties (jQuery gh-1709)
-						uniqueCache = outerCache[ elem.uniqueID ] ||
-							( outerCache[ elem.uniqueID ] = {} );
-
-						if ( skip && skip === elem.nodeName.toLowerCase() ) {
-							elem = elem[ dir ] || elem;
-						} else if ( ( oldCache = uniqueCache[ key ] ) &&
-							oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) {
-
-							// Assign to newCache so results back-propagate to previous elements
-							return ( newCache[ 2 ] = oldCache[ 2 ] );
-						} else {
-
-							// Reuse newcache so results back-propagate to previous elements
-							uniqueCache[ key ] = newCache;
-
-							// A match means we're done; a fail means we have to keep checking
-							if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) {
-								return true;
-							}
-						}
-					}
-				}
-			}
-			return false;
-		};
-}
-
-function elementMatcher( matchers ) {
-	return matchers.length > 1 ?
-		function( elem, context, xml ) {
-			var i = matchers.length;
-			while ( i-- ) {
-				if ( !matchers[ i ]( elem, context, xml ) ) {
-					return false;
-				}
-			}
-			return true;
-		} :
-		matchers[ 0 ];
-}
-
-function multipleContexts( selector, contexts, results ) {
-	var i = 0,
-		len = contexts.length;
-	for ( ; i < len; i++ ) {
-		Sizzle( selector, contexts[ i ], results );
-	}
-	return results;
-}
-
-function condense( unmatched, map, filter, context, xml ) {
-	var elem,
-		newUnmatched = [],
-		i = 0,
-		len = unmatched.length,
-		mapped = map != null;
-
-	for ( ; i < len; i++ ) {
-		if ( ( elem = unmatched[ i ] ) ) {
-			if ( !filter || filter( elem, context, xml ) ) {
-				newUnmatched.push( elem );
-				if ( mapped ) {
-					map.push( i );
-				}
-			}
-		}
-	}
-
-	return newUnmatched;
-}
-
-function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) {
-	if ( postFilter && !postFilter[ expando ] ) {
-		postFilter = setMatcher( postFilter );
-	}
-	if ( postFinder && !postFinder[ expando ] ) {
-		postFinder = setMatcher( postFinder, postSelector );
-	}
-	return markFunction( function( seed, results, context, xml ) {
-		var temp, i, elem,
-			preMap = [],
-			postMap = [],
-			preexisting = results.length,
-
-			// Get initial elements from seed or context
-			elems = seed || multipleContexts(
-				selector || "*",
-				context.nodeType ? [ context ] : context,
-				[]
-			),
-
-			// Prefilter to get matcher input, preserving a map for seed-results synchronization
-			matcherIn = preFilter && ( seed || !selector ) ?
-				condense( elems, preMap, preFilter, context, xml ) :
-				elems,
-
-			matcherOut = matcher ?
-
-				// If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results,
-				postFinder || ( seed ? preFilter : preexisting || postFilter ) ?
-
-					// ...intermediate processing is necessary
-					[] :
-
-					// ...otherwise use results directly
-					results :
-				matcherIn;
-
-		// Find primary matches
-		if ( matcher ) {
-			matcher( matcherIn, matcherOut, context, xml );
-		}
-
-		// Apply postFilter
-		if ( postFilter ) {
-			temp = condense( matcherOut, postMap );
-			postFilter( temp, [], context, xml );
-
-			// Un-match failing elements by moving them back to matcherIn
-			i = temp.length;
-			while ( i-- ) {
-				if ( ( elem = temp[ i ] ) ) {
-					matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem );
-				}
-			}
-		}
-
-		if ( seed ) {
-			if ( postFinder || preFilter ) {
-				if ( postFinder ) {
-
-					// Get the final matcherOut by condensing this intermediate into postFinder contexts
-					temp = [];
-					i = matcherOut.length;
-					while ( i-- ) {
-						if ( ( elem = matcherOut[ i ] ) ) {
-
-							// Restore matcherIn since elem is not yet a final match
-							temp.push( ( matcherIn[ i ] = elem ) );
-						}
-					}
-					postFinder( null, ( matcherOut = [] ), temp, xml );
-				}
-
-				// Move matched elements from seed to results to keep them synchronized
-				i = matcherOut.length;
-				while ( i-- ) {
-					if ( ( elem = matcherOut[ i ] ) &&
-						( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) {
-
-						seed[ temp ] = !( results[ temp ] = elem );
-					}
-				}
-			}
-
-		// Add elements to results, through postFinder if defined
-		} else {
-			matcherOut = condense(
-				matcherOut === results ?
-					matcherOut.splice( preexisting, matcherOut.length ) :
-					matcherOut
-			);
-			if ( postFinder ) {
-				postFinder( null, results, matcherOut, xml );
-			} else {
-				push.apply( results, matcherOut );
-			}
-		}
-	} );
-}
-
-function matcherFromTokens( tokens ) {
-	var checkContext, matcher, j,
-		len = tokens.length,
-		leadingRelative = Expr.relative[ tokens[ 0 ].type ],
-		implicitRelative = leadingRelative || Expr.relative[ " " ],
-		i = leadingRelative ? 1 : 0,
-
-		// The foundational matcher ensures that elements are reachable from top-level context(s)
-		matchContext = addCombinator( function( elem ) {
-			return elem === checkContext;
-		}, implicitRelative, true ),
-		matchAnyContext = addCombinator( function( elem ) {
-			return indexOf( checkContext, elem ) > -1;
-		}, implicitRelative, true ),
-		matchers = [ function( elem, context, xml ) {
-			var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || (
-				( checkContext = context ).nodeType ?
-					matchContext( elem, context, xml ) :
-					matchAnyContext( elem, context, xml ) );
-
-			// Avoid hanging onto element (issue #299)
-			checkContext = null;
-			return ret;
-		} ];
-
-	for ( ; i < len; i++ ) {
-		if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) {
-			matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ];
-		} else {
-			matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches );
-
-			// Return special upon seeing a positional matcher
-			if ( matcher[ expando ] ) {
-
-				// Find the next relative operator (if any) for proper handling
-				j = ++i;
-				for ( ; j < len; j++ ) {
-					if ( Expr.relative[ tokens[ j ].type ] ) {
-						break;
-					}
-				}
-				return setMatcher(
-					i > 1 && elementMatcher( matchers ),
-					i > 1 && toSelector(
-
-					// If the preceding token was a descendant combinator, insert an implicit any-element `*`
-					tokens
-						.slice( 0, i - 1 )
-						.concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } )
-					).replace( rtrim, "$1" ),
-					matcher,
-					i < j && matcherFromTokens( tokens.slice( i, j ) ),
-					j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ),
-					j < len && toSelector( tokens )
-				);
-			}
-			matchers.push( matcher );
-		}
-	}
-
-	return elementMatcher( matchers );
-}
-
-function matcherFromGroupMatchers( elementMatchers, setMatchers ) {
-	var bySet = setMatchers.length > 0,
-		byElement = elementMatchers.length > 0,
-		superMatcher = function( seed, context, xml, results, outermost ) {
-			var elem, j, matcher,
-				matchedCount = 0,
-				i = "0",
-				unmatched = seed && [],
-				setMatched = [],
-				contextBackup = outermostContext,
-
-				// We must always have either seed elements or outermost context
-				elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ),
-
-				// Use integer dirruns iff this is the outermost matcher
-				dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ),
-				len = elems.length;
-
-			if ( outermost ) {
-
-				// Support: IE 11+, Edge 17 - 18+
-				// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-				// two documents; shallow comparisons work.
-				// eslint-disable-next-line eqeqeq
-				outermostContext = context == document || context || outermost;
-			}
-
-			// Add elements passing elementMatchers directly to results
-			// Support: IE<9, Safari
-			// Tolerate NodeList properties (IE: "length"; Safari: <number>) matching elements by id
-			for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) {
-				if ( byElement && elem ) {
-					j = 0;
-
-					// Support: IE 11+, Edge 17 - 18+
-					// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
-					// two documents; shallow comparisons work.
-					// eslint-disable-next-line eqeqeq
-					if ( !context && elem.ownerDocument != document ) {
-						setDocument( elem );
-						xml = !documentIsHTML;
-					}
-					while ( ( matcher = elementMatchers[ j++ ] ) ) {
-						if ( matcher( elem, context || document, xml ) ) {
-							results.push( elem );
-							break;
-						}
-					}
-					if ( outermost ) {
-						dirruns = dirrunsUnique;
-					}
-				}
-
-				// Track unmatched elements for set filters
-				if ( bySet ) {
-
-					// They will have gone through all possible matchers
-					if ( ( elem = !matcher && elem ) ) {
-						matchedCount--;
-					}
-
-					// Lengthen the array for every element, matched or not
-					if ( seed ) {
-						unmatched.push( elem );
-					}
-				}
-			}
-
-			// `i` is now the count of elements visited above, and adding it to `matchedCount`
-			// makes the latter nonnegative.
-			matchedCount += i;
-
-			// Apply set filters to unmatched elements
-			// NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount`
-			// equals `i`), unless we didn't visit _any_ elements in the above loop because we have
-			// no element matchers and no seed.
-			// Incrementing an initially-string "0" `i` allows `i` to remain a string only in that
-			// case, which will result in a "00" `matchedCount` that differs from `i` but is also
-			// numerically zero.
-			if ( bySet && i !== matchedCount ) {
-				j = 0;
-				while ( ( matcher = setMatchers[ j++ ] ) ) {
-					matcher( unmatched, setMatched, context, xml );
-				}
-
-				if ( seed ) {
-
-					// Reintegrate element matches to eliminate the need for sorting
-					if ( matchedCount > 0 ) {
-						while ( i-- ) {
-							if ( !( unmatched[ i ] || setMatched[ i ] ) ) {
-								setMatched[ i ] = pop.call( results );
-							}
-						}
-					}
-
-					// Discard index placeholder values to get only actual matches
-					setMatched = condense( setMatched );
-				}
-
-				// Add matches to results
-				push.apply( results, setMatched );
-
-				// Seedless set matches succeeding multiple successful matchers stipulate sorting
-				if ( outermost && !seed && setMatched.length > 0 &&
-					( matchedCount + setMatchers.length ) > 1 ) {
-
-					Sizzle.uniqueSort( results );
-				}
-			}
-
-			// Override manipulation of globals by nested matchers
-			if ( outermost ) {
-				dirruns = dirrunsUnique;
-				outermostContext = contextBackup;
-			}
-
-			return unmatched;
-		};
-
-	return bySet ?
-		markFunction( superMatcher ) :
-		superMatcher;
-}
-
-compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) {
-	var i,
-		setMatchers = [],
-		elementMatchers = [],
-		cached = compilerCache[ selector + " " ];
-
-	if ( !cached ) {
-
-		// Generate a function of recursive functions that can be used to check each element
-		if ( !match ) {
-			match = tokenize( selector );
-		}
-		i = match.length;
-		while ( i-- ) {
-			cached = matcherFromTokens( match[ i ] );
-			if ( cached[ expando ] ) {
-				setMatchers.push( cached );
-			} else {
-				elementMatchers.push( cached );
-			}
-		}
-
-		// Cache the compiled function
-		cached = compilerCache(
-			selector,
-			matcherFromGroupMatchers( elementMatchers, setMatchers )
-		);
-
-		// Save selector and tokenization
-		cached.selector = selector;
-	}
-	return cached;
-};
-
-/**
- * A low-level selection function that works with Sizzle's compiled
- *  selector functions
- * @param {String|Function} selector A selector or a pre-compiled
- *  selector function built with Sizzle.compile
- * @param {Element} context
- * @param {Array} [results]
- * @param {Array} [seed] A set of elements to match against
- */
-select = Sizzle.select = function( selector, context, results, seed ) {
-	var i, tokens, token, type, find,
-		compiled = typeof selector === "function" && selector,
-		match = !seed && tokenize( ( selector = compiled.selector || selector ) );
-
-	results = results || [];
-
-	// Try to minimize operations if there is only one selector in the list and no seed
-	// (the latter of which guarantees us context)
-	if ( match.length === 1 ) {
-
-		// Reduce context if the leading compound selector is an ID
-		tokens = match[ 0 ] = match[ 0 ].slice( 0 );
-		if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" &&
-			context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) {
-
-			context = ( Expr.find[ "ID" ]( token.matches[ 0 ]
-				.replace( runescape, funescape ), context ) || [] )[ 0 ];
-			if ( !context ) {
-				return results;
-
-			// Precompiled matchers will still verify ancestry, so step up a level
-			} else if ( compiled ) {
-				context = context.parentNode;
-			}
-
-			selector = selector.slice( tokens.shift().value.length );
-		}
-
-		// Fetch a seed set for right-to-left matching
-		i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length;
-		while ( i-- ) {
-			token = tokens[ i ];
-
-			// Abort if we hit a combinator
-			if ( Expr.relative[ ( type = token.type ) ] ) {
-				break;
-			}
-			if ( ( find = Expr.find[ type ] ) ) {
-
-				// Search, expanding context for leading sibling combinators
-				if ( ( seed = find(
-					token.matches[ 0 ].replace( runescape, funescape ),
-					rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) ||
-						context
-				) ) ) {
-
-					// If seed is empty or no tokens remain, we can return early
-					tokens.splice( i, 1 );
-					selector = seed.length && toSelector( tokens );
-					if ( !selector ) {
-						push.apply( results, seed );
-						return results;
-					}
-
-					break;
-				}
-			}
-		}
-	}
-
-	// Compile and execute a filtering function if one is not provided
-	// Provide `match` to avoid retokenization if we modified the selector above
-	( compiled || compile( selector, match ) )(
-		seed,
-		context,
-		!documentIsHTML,
-		results,
-		!context || rsibling.test( selector ) && testContext( context.parentNode ) || context
-	);
-	return results;
-};
-
-// One-time assignments
-
-// Sort stability
-support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando;
-
-// Support: Chrome 14-35+
-// Always assume duplicates if they aren't passed to the comparison function
-support.detectDuplicates = !!hasDuplicate;
-
-// Initialize against the default document
-setDocument();
-
-// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27)
-// Detached nodes confoundingly follow *each other*
-support.sortDetached = assert( function( el ) {
-
-	// Should return 1, but returns 4 (following)
-	return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1;
-} );
-
-// Support: IE<8
-// Prevent attribute/property "interpolation"
-// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx
-if ( !assert( function( el ) {
-	el.innerHTML = "<a href='#'></a>";
-	return el.firstChild.getAttribute( "href" ) === "#";
-} ) ) {
-	addHandle( "type|href|height|width", function( elem, name, isXML ) {
-		if ( !isXML ) {
-			return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 );
-		}
-	} );
-}
-
-// Support: IE<9
-// Use defaultValue in place of getAttribute("value")
-if ( !support.attributes || !assert( function( el ) {
-	el.innerHTML = "<input/>";
-	el.firstChild.setAttribute( "value", "" );
-	return el.firstChild.getAttribute( "value" ) === "";
-} ) ) {
-	addHandle( "value", function( elem, _name, isXML ) {
-		if ( !isXML && elem.nodeName.toLowerCase() === "input" ) {
-			return elem.defaultValue;
-		}
-	} );
-}
-
-// Support: IE<9
-// Use getAttributeNode to fetch booleans when getAttribute lies
-if ( !assert( function( el ) {
-	return el.getAttribute( "disabled" ) == null;
-} ) ) {
-	addHandle( booleans, function( elem, name, isXML ) {
-		var val;
-		if ( !isXML ) {
-			return elem[ name ] === true ? name.toLowerCase() :
-				( val = elem.getAttributeNode( name ) ) && val.specified ?
-					val.value :
-					null;
-		}
-	} );
-}
-
-return Sizzle;
-
-} )( window );
-
-
-
-jQuery.find = Sizzle;
-jQuery.expr = Sizzle.selectors;
-
-// Deprecated
-jQuery.expr[ ":" ] = jQuery.expr.pseudos;
-jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort;
-jQuery.text = Sizzle.getText;
-jQuery.isXMLDoc = Sizzle.isXML;
-jQuery.contains = Sizzle.contains;
-jQuery.escapeSelector = Sizzle.escape;
-
-
-
-
-var dir = function( elem, dir, until ) {
-	var matched = [],
-		truncate = until !== undefined;
-
-	while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) {
-		if ( elem.nodeType === 1 ) {
-			if ( truncate && jQuery( elem ).is( until ) ) {
-				break;
-			}
-			matched.push( elem );
-		}
-	}
-	return matched;
-};
-
-
-var siblings = function( n, elem ) {
-	var matched = [];
-
-	for ( ; n; n = n.nextSibling ) {
-		if ( n.nodeType === 1 && n !== elem ) {
-			matched.push( n );
-		}
-	}
-
-	return matched;
-};
-
-
-var rneedsContext = jQuery.expr.match.needsContext;
-
-
-
-function nodeName( elem, name ) {
-
-  return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase();
-
-};
-var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i );
-
-
-
-// Implement the identical functionality for filter and not
-function winnow( elements, qualifier, not ) {
-	if ( isFunction( qualifier ) ) {
-		return jQuery.grep( elements, function( elem, i ) {
-			return !!qualifier.call( elem, i, elem ) !== not;
-		} );
-	}
-
-	// Single element
-	if ( qualifier.nodeType ) {
-		return jQuery.grep( elements, function( elem ) {
-			return ( elem === qualifier ) !== not;
-		} );
-	}
-
-	// Arraylike of elements (jQuery, arguments, Array)
-	if ( typeof qualifier !== "string" ) {
-		return jQuery.grep( elements, function( elem ) {
-			return ( indexOf.call( qualifier, elem ) > -1 ) !== not;
-		} );
-	}
-
-	// Filtered directly for both simple and complex selectors
-	return jQuery.filter( qualifier, elements, not );
-}
-
-jQuery.filter = function( expr, elems, not ) {
-	var elem = elems[ 0 ];
-
-	if ( not ) {
-		expr = ":not(" + expr + ")";
-	}
-
-	if ( elems.length === 1 && elem.nodeType === 1 ) {
-		return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [];
-	}
-
-	return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) {
-		return elem.nodeType === 1;
-	} ) );
-};
-
-jQuery.fn.extend( {
-	find: function( selector ) {
-		var i, ret,
-			len = this.length,
-			self = this;
-
-		if ( typeof selector !== "string" ) {
-			return this.pushStack( jQuery( selector ).filter( function() {
-				for ( i = 0; i < len; i++ ) {
-					if ( jQuery.contains( self[ i ], this ) ) {
-						return true;
-					}
-				}
-			} ) );
-		}
-
-		ret = this.pushStack( [] );
-
-		for ( i = 0; i < len; i++ ) {
-			jQuery.find( selector, self[ i ], ret );
-		}
-
-		return len > 1 ? jQuery.uniqueSort( ret ) : ret;
-	},
-	filter: function( selector ) {
-		return this.pushStack( winnow( this, selector || [], false ) );
-	},
-	not: function( selector ) {
-		return this.pushStack( winnow( this, selector || [], true ) );
-	},
-	is: function( selector ) {
-		return !!winnow(
-			this,
-
-			// If this is a positional/relative selector, check membership in the returned set
-			// so $("p:first").is("p:last") won't return true for a doc with two "p".
-			typeof selector === "string" && rneedsContext.test( selector ) ?
-				jQuery( selector ) :
-				selector || [],
-			false
-		).length;
-	}
-} );
-
-
-// Initialize a jQuery object
-
-
-// A central reference to the root jQuery(document)
-var rootjQuery,
-
-	// A simple way to check for HTML strings
-	// Prioritize #id over <tag> to avoid XSS via location.hash (#9521)
-	// Strict HTML recognition (#11290: must start with <)
-	// Shortcut simple #id case for speed
-	rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,
-
-	init = jQuery.fn.init = function( selector, context, root ) {
-		var match, elem;
-
-		// HANDLE: $(""), $(null), $(undefined), $(false)
-		if ( !selector ) {
-			return this;
-		}
-
-		// Method init() accepts an alternate rootjQuery
-		// so migrate can support jQuery.sub (gh-2101)
-		root = root || rootjQuery;
-
-		// Handle HTML strings
-		if ( typeof selector === "string" ) {
-			if ( selector[ 0 ] === "<" &&
-				selector[ selector.length - 1 ] === ">" &&
-				selector.length >= 3 ) {
-
-				// Assume that strings that start and end with <> are HTML and skip the regex check
-				match = [ null, selector, null ];
-
-			} else {
-				match = rquickExpr.exec( selector );
-			}
-
-			// Match html or make sure no context is specified for #id
-			if ( match && ( match[ 1 ] || !context ) ) {
-
-				// HANDLE: $(html) -> $(array)
-				if ( match[ 1 ] ) {
-					context = context instanceof jQuery ? context[ 0 ] : context;
-
-					// Option to run scripts is true for back-compat
-					// Intentionally let the error be thrown if parseHTML is not present
-					jQuery.merge( this, jQuery.parseHTML(
-						match[ 1 ],
-						context && context.nodeType ? context.ownerDocument || context : document,
-						true
-					) );
-
-					// HANDLE: $(html, props)
-					if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) {
-						for ( match in context ) {
-
-							// Properties of context are called as methods if possible
-							if ( isFunction( this[ match ] ) ) {
-								this[ match ]( context[ match ] );
-
-							// ...and otherwise set as attributes
-							} else {
-								this.attr( match, context[ match ] );
-							}
-						}
-					}
-
-					return this;
-
-				// HANDLE: $(#id)
-				} else {
-					elem = document.getElementById( match[ 2 ] );
-
-					if ( elem ) {
-
-						// Inject the element directly into the jQuery object
-						this[ 0 ] = elem;
-						this.length = 1;
-					}
-					return this;
-				}
-
-			// HANDLE: $(expr, $(...))
-			} else if ( !context || context.jquery ) {
-				return ( context || root ).find( selector );
-
-			// HANDLE: $(expr, context)
-			// (which is just equivalent to: $(context).find(expr)
-			} else {
-				return this.constructor( context ).find( selector );
-			}
-
-		// HANDLE: $(DOMElement)
-		} else if ( selector.nodeType ) {
-			this[ 0 ] = selector;
-			this.length = 1;
-			return this;
-
-		// HANDLE: $(function)
-		// Shortcut for document ready
-		} else if ( isFunction( selector ) ) {
-			return root.ready !== undefined ?
-				root.ready( selector ) :
-
-				// Execute immediately if ready is not present
-				selector( jQuery );
-		}
-
-		return jQuery.makeArray( selector, this );
-	};
-
-// Give the init function the jQuery prototype for later instantiation
-init.prototype = jQuery.fn;
-
-// Initialize central reference
-rootjQuery = jQuery( document );
-
-
-var rparentsprev = /^(?:parents|prev(?:Until|All))/,
-
-	// Methods guaranteed to produce a unique set when starting from a unique set
-	guaranteedUnique = {
-		children: true,
-		contents: true,
-		next: true,
-		prev: true
-	};
-
-jQuery.fn.extend( {
-	has: function( target ) {
-		var targets = jQuery( target, this ),
-			l = targets.length;
-
-		return this.filter( function() {
-			var i = 0;
-			for ( ; i < l; i++ ) {
-				if ( jQuery.contains( this, targets[ i ] ) ) {
-					return true;
-				}
-			}
-		} );
-	},
-
-	closest: function( selectors, context ) {
-		var cur,
-			i = 0,
-			l = this.length,
-			matched = [],
-			targets = typeof selectors !== "string" && jQuery( selectors );
-
-		// Positional selectors never match, since there's no _selection_ context
-		if ( !rneedsContext.test( selectors ) ) {
-			for ( ; i < l; i++ ) {
-				for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) {
-
-					// Always skip document fragments
-					if ( cur.nodeType < 11 && ( targets ?
-						targets.index( cur ) > -1 :
-
-						// Don't pass non-elements to Sizzle
-						cur.nodeType === 1 &&
-							jQuery.find.matchesSelector( cur, selectors ) ) ) {
-
-						matched.push( cur );
-						break;
-					}
-				}
-			}
-		}
-
-		return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched );
-	},
-
-	// Determine the position of an element within the set
-	index: function( elem ) {
-
-		// No argument, return index in parent
-		if ( !elem ) {
-			return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1;
-		}
-
-		// Index in selector
-		if ( typeof elem === "string" ) {
-			return indexOf.call( jQuery( elem ), this[ 0 ] );
-		}
-
-		// Locate the position of the desired element
-		return indexOf.call( this,
-
-			// If it receives a jQuery object, the first element is used
-			elem.jquery ? elem[ 0 ] : elem
-		);
-	},
-
-	add: function( selector, context ) {
-		return this.pushStack(
-			jQuery.uniqueSort(
-				jQuery.merge( this.get(), jQuery( selector, context ) )
-			)
-		);
-	},
-
-	addBack: function( selector ) {
-		return this.add( selector == null ?
-			this.prevObject : this.prevObject.filter( selector )
-		);
-	}
-} );
-
-function sibling( cur, dir ) {
-	while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {}
-	return cur;
-}
-
-jQuery.each( {
-	parent: function( elem ) {
-		var parent = elem.parentNode;
-		return parent && parent.nodeType !== 11 ? parent : null;
-	},
-	parents: function( elem ) {
-		return dir( elem, "parentNode" );
-	},
-	parentsUntil: function( elem, _i, until ) {
-		return dir( elem, "parentNode", until );
-	},
-	next: function( elem ) {
-		return sibling( elem, "nextSibling" );
-	},
-	prev: function( elem ) {
-		return sibling( elem, "previousSibling" );
-	},
-	nextAll: function( elem ) {
-		return dir( elem, "nextSibling" );
-	},
-	prevAll: function( elem ) {
-		return dir( elem, "previousSibling" );
-	},
-	nextUntil: function( elem, _i, until ) {
-		return dir( elem, "nextSibling", until );
-	},
-	prevUntil: function( elem, _i, until ) {
-		return dir( elem, "previousSibling", until );
-	},
-	siblings: function( elem ) {
-		return siblings( ( elem.parentNode || {} ).firstChild, elem );
-	},
-	children: function( elem ) {
-		return siblings( elem.firstChild );
-	},
-	contents: function( elem ) {
-		if ( elem.contentDocument != null &&
-
-			// Support: IE 11+
-			// <object> elements with no `data` attribute has an object
-			// `contentDocument` with a `null` prototype.
-			getProto( elem.contentDocument ) ) {
-
-			return elem.contentDocument;
-		}
-
-		// Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only
-		// Treat the template element as a regular one in browsers that
-		// don't support it.
-		if ( nodeName( elem, "template" ) ) {
-			elem = elem.content || elem;
-		}
-
-		return jQuery.merge( [], elem.childNodes );
-	}
-}, function( name, fn ) {
-	jQuery.fn[ name ] = function( until, selector ) {
-		var matched = jQuery.map( this, fn, until );
-
-		if ( name.slice( -5 ) !== "Until" ) {
-			selector = until;
-		}
-
-		if ( selector && typeof selector === "string" ) {
-			matched = jQuery.filter( selector, matched );
-		}
-
-		if ( this.length > 1 ) {
-
-			// Remove duplicates
-			if ( !guaranteedUnique[ name ] ) {
-				jQuery.uniqueSort( matched );
-			}
-
-			// Reverse order for parents* and prev-derivatives
-			if ( rparentsprev.test( name ) ) {
-				matched.reverse();
-			}
-		}
-
-		return this.pushStack( matched );
-	};
-} );
-var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g );
-
-
-
-// Convert String-formatted options into Object-formatted ones
-function createOptions( options ) {
-	var object = {};
-	jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) {
-		object[ flag ] = true;
-	} );
-	return object;
-}
-
-/*
- * Create a callback list using the following parameters:
- *
- *	options: an optional list of space-separated options that will change how
- *			the callback list behaves or a more traditional option object
- *
- * By default a callback list will act like an event callback list and can be
- * "fired" multiple times.
- *
- * Possible options:
- *
- *	once:			will ensure the callback list can only be fired once (like a Deferred)
- *
- *	memory:			will keep track of previous values and will call any callback added
- *					after the list has been fired right away with the latest "memorized"
- *					values (like a Deferred)
- *
- *	unique:			will ensure a callback can only be added once (no duplicate in the list)
- *
- *	stopOnFalse:	interrupt callings when a callback returns false
- *
- */
-jQuery.Callbacks = function( options ) {
-
-	// Convert options from String-formatted to Object-formatted if needed
-	// (we check in cache first)
-	options = typeof options === "string" ?
-		createOptions( options ) :
-		jQuery.extend( {}, options );
-
-	var // Flag to know if list is currently firing
-		firing,
-
-		// Last fire value for non-forgettable lists
-		memory,
-
-		// Flag to know if list was already fired
-		fired,
-
-		// Flag to prevent firing
-		locked,
-
-		// Actual callback list
-		list = [],
-
-		// Queue of execution data for repeatable lists
-		queue = [],
-
-		// Index of currently firing callback (modified by add/remove as needed)
-		firingIndex = -1,
-
-		// Fire callbacks
-		fire = function() {
-
-			// Enforce single-firing
-			locked = locked || options.once;
-
-			// Execute callbacks for all pending executions,
-			// respecting firingIndex overrides and runtime changes
-			fired = firing = true;
-			for ( ; queue.length; firingIndex = -1 ) {
-				memory = queue.shift();
-				while ( ++firingIndex < list.length ) {
-
-					// Run callback and check for early termination
-					if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false &&
-						options.stopOnFalse ) {
-
-						// Jump to end and forget the data so .add doesn't re-fire
-						firingIndex = list.length;
-						memory = false;
-					}
-				}
-			}
-
-			// Forget the data if we're done with it
-			if ( !options.memory ) {
-				memory = false;
-			}
-
-			firing = false;
-
-			// Clean up if we're done firing for good
-			if ( locked ) {
-
-				// Keep an empty list if we have data for future add calls
-				if ( memory ) {
-					list = [];
-
-				// Otherwise, this object is spent
-				} else {
-					list = "";
-				}
-			}
-		},
-
-		// Actual Callbacks object
-		self = {
-
-			// Add a callback or a collection of callbacks to the list
-			add: function() {
-				if ( list ) {
-
-					// If we have memory from a past run, we should fire after adding
-					if ( memory && !firing ) {
-						firingIndex = list.length - 1;
-						queue.push( memory );
-					}
-
-					( function add( args ) {
-						jQuery.each( args, function( _, arg ) {
-							if ( isFunction( arg ) ) {
-								if ( !options.unique || !self.has( arg ) ) {
-									list.push( arg );
-								}
-							} else if ( arg && arg.length && toType( arg ) !== "string" ) {
-
-								// Inspect recursively
-								add( arg );
-							}
-						} );
-					} )( arguments );
-
-					if ( memory && !firing ) {
-						fire();
-					}
-				}
-				return this;
-			},
-
-			// Remove a callback from the list
-			remove: function() {
-				jQuery.each( arguments, function( _, arg ) {
-					var index;
-					while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) {
-						list.splice( index, 1 );
-
-						// Handle firing indexes
-						if ( index <= firingIndex ) {
-							firingIndex--;
-						}
-					}
-				} );
-				return this;
-			},
-
-			// Check if a given callback is in the list.
-			// If no argument is given, return whether or not list has callbacks attached.
-			has: function( fn ) {
-				return fn ?
-					jQuery.inArray( fn, list ) > -1 :
-					list.length > 0;
-			},
-
-			// Remove all callbacks from the list
-			empty: function() {
-				if ( list ) {
-					list = [];
-				}
-				return this;
-			},
-
-			// Disable .fire and .add
-			// Abort any current/pending executions
-			// Clear all callbacks and values
-			disable: function() {
-				locked = queue = [];
-				list = memory = "";
-				return this;
-			},
-			disabled: function() {
-				return !list;
-			},
-
-			// Disable .fire
-			// Also disable .add unless we have memory (since it would have no effect)
-			// Abort any pending executions
-			lock: function() {
-				locked = queue = [];
-				if ( !memory && !firing ) {
-					list = memory = "";
-				}
-				return this;
-			},
-			locked: function() {
-				return !!locked;
-			},
-
-			// Call all callbacks with the given context and arguments
-			fireWith: function( context, args ) {
-				if ( !locked ) {
-					args = args || [];
-					args = [ context, args.slice ? args.slice() : args ];
-					queue.push( args );
-					if ( !firing ) {
-						fire();
-					}
-				}
-				return this;
-			},
-
-			// Call all the callbacks with the given arguments
-			fire: function() {
-				self.fireWith( this, arguments );
-				return this;
-			},
-
-			// To know if the callbacks have already been called at least once
-			fired: function() {
-				return !!fired;
-			}
-		};
-
-	return self;
-};
-
-
-function Identity( v ) {
-	return v;
-}
-function Thrower( ex ) {
-	throw ex;
-}
-
-function adoptValue( value, resolve, reject, noValue ) {
-	var method;
-
-	try {
-
-		// Check for promise aspect first to privilege synchronous behavior
-		if ( value && isFunction( ( method = value.promise ) ) ) {
-			method.call( value ).done( resolve ).fail( reject );
-
-		// Other thenables
-		} else if ( value && isFunction( ( method = value.then ) ) ) {
-			method.call( value, resolve, reject );
-
-		// Other non-thenables
-		} else {
-
-			// Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer:
-			// * false: [ value ].slice( 0 ) => resolve( value )
-			// * true: [ value ].slice( 1 ) => resolve()
-			resolve.apply( undefined, [ value ].slice( noValue ) );
-		}
-
-	// For Promises/A+, convert exceptions into rejections
-	// Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in
-	// Deferred#then to conditionally suppress rejection.
-	} catch ( value ) {
-
-		// Support: Android 4.0 only
-		// Strict mode functions invoked without .call/.apply get global-object context
-		reject.apply( undefined, [ value ] );
-	}
-}
-
-jQuery.extend( {
-
-	Deferred: function( func ) {
-		var tuples = [
-
-				// action, add listener, callbacks,
-				// ... .then handlers, argument index, [final state]
-				[ "notify", "progress", jQuery.Callbacks( "memory" ),
-					jQuery.Callbacks( "memory" ), 2 ],
-				[ "resolve", "done", jQuery.Callbacks( "once memory" ),
-					jQuery.Callbacks( "once memory" ), 0, "resolved" ],
-				[ "reject", "fail", jQuery.Callbacks( "once memory" ),
-					jQuery.Callbacks( "once memory" ), 1, "rejected" ]
-			],
-			state = "pending",
-			promise = {
-				state: function() {
-					return state;
-				},
-				always: function() {
-					deferred.done( arguments ).fail( arguments );
-					return this;
-				},
-				"catch": function( fn ) {
-					return promise.then( null, fn );
-				},
-
-				// Keep pipe for back-compat
-				pipe: function( /* fnDone, fnFail, fnProgress */ ) {
-					var fns = arguments;
-
-					return jQuery.Deferred( function( newDefer ) {
-						jQuery.each( tuples, function( _i, tuple ) {
-
-							// Map tuples (progress, done, fail) to arguments (done, fail, progress)
-							var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ];
-
-							// deferred.progress(function() { bind to newDefer or newDefer.notify })
-							// deferred.done(function() { bind to newDefer or newDefer.resolve })
-							// deferred.fail(function() { bind to newDefer or newDefer.reject })
-							deferred[ tuple[ 1 ] ]( function() {
-								var returned = fn && fn.apply( this, arguments );
-								if ( returned && isFunction( returned.promise ) ) {
-									returned.promise()
-										.progress( newDefer.notify )
-										.done( newDefer.resolve )
-										.fail( newDefer.reject );
-								} else {
-									newDefer[ tuple[ 0 ] + "With" ](
-										this,
-										fn ? [ returned ] : arguments
-									);
-								}
-							} );
-						} );
-						fns = null;
-					} ).promise();
-				},
-				then: function( onFulfilled, onRejected, onProgress ) {
-					var maxDepth = 0;
-					function resolve( depth, deferred, handler, special ) {
-						return function() {
-							var that = this,
-								args = arguments,
-								mightThrow = function() {
-									var returned, then;
-
-									// Support: Promises/A+ section 2.3.3.3.3
-									// https://promisesaplus.com/#point-59
-									// Ignore double-resolution attempts
-									if ( depth < maxDepth ) {
-										return;
-									}
-
-									returned = handler.apply( that, args );
-
-									// Support: Promises/A+ section 2.3.1
-									// https://promisesaplus.com/#point-48
-									if ( returned === deferred.promise() ) {
-										throw new TypeError( "Thenable self-resolution" );
-									}
-
-									// Support: Promises/A+ sections 2.3.3.1, 3.5
-									// https://promisesaplus.com/#point-54
-									// https://promisesaplus.com/#point-75
-									// Retrieve `then` only once
-									then = returned &&
-
-										// Support: Promises/A+ section 2.3.4
-										// https://promisesaplus.com/#point-64
-										// Only check objects and functions for thenability
-										( typeof returned === "object" ||
-											typeof returned === "function" ) &&
-										returned.then;
-
-									// Handle a returned thenable
-									if ( isFunction( then ) ) {
-
-										// Special processors (notify) just wait for resolution
-										if ( special ) {
-											then.call(
-												returned,
-												resolve( maxDepth, deferred, Identity, special ),
-												resolve( maxDepth, deferred, Thrower, special )
-											);
-
-										// Normal processors (resolve) also hook into progress
-										} else {
-
-											// ...and disregard older resolution values
-											maxDepth++;
-
-											then.call(
-												returned,
-												resolve( maxDepth, deferred, Identity, special ),
-												resolve( maxDepth, deferred, Thrower, special ),
-												resolve( maxDepth, deferred, Identity,
-													deferred.notifyWith )
-											);
-										}
-
-									// Handle all other returned values
-									} else {
-
-										// Only substitute handlers pass on context
-										// and multiple values (non-spec behavior)
-										if ( handler !== Identity ) {
-											that = undefined;
-											args = [ returned ];
-										}
-
-										// Process the value(s)
-										// Default process is resolve
-										( special || deferred.resolveWith )( that, args );
-									}
-								},
-
-								// Only normal processors (resolve) catch and reject exceptions
-								process = special ?
-									mightThrow :
-									function() {
-										try {
-											mightThrow();
-										} catch ( e ) {
-
-											if ( jQuery.Deferred.exceptionHook ) {
-												jQuery.Deferred.exceptionHook( e,
-													process.stackTrace );
-											}
-
-											// Support: Promises/A+ section 2.3.3.3.4.1
-											// https://promisesaplus.com/#point-61
-											// Ignore post-resolution exceptions
-											if ( depth + 1 >= maxDepth ) {
-
-												// Only substitute handlers pass on context
-												// and multiple values (non-spec behavior)
-												if ( handler !== Thrower ) {
-													that = undefined;
-													args = [ e ];
-												}
-
-												deferred.rejectWith( that, args );
-											}
-										}
-									};
-
-							// Support: Promises/A+ section 2.3.3.3.1
-							// https://promisesaplus.com/#point-57
-							// Re-resolve promises immediately to dodge false rejection from
-							// subsequent errors
-							if ( depth ) {
-								process();
-							} else {
-
-								// Call an optional hook to record the stack, in case of exception
-								// since it's otherwise lost when execution goes async
-								if ( jQuery.Deferred.getStackHook ) {
-									process.stackTrace = jQuery.Deferred.getStackHook();
-								}
-								window.setTimeout( process );
-							}
-						};
-					}
-
-					return jQuery.Deferred( function( newDefer ) {
-
-						// progress_handlers.add( ... )
-						tuples[ 0 ][ 3 ].add(
-							resolve(
-								0,
-								newDefer,
-								isFunction( onProgress ) ?
-									onProgress :
-									Identity,
-								newDefer.notifyWith
-							)
-						);
-
-						// fulfilled_handlers.add( ... )
-						tuples[ 1 ][ 3 ].add(
-							resolve(
-								0,
-								newDefer,
-								isFunction( onFulfilled ) ?
-									onFulfilled :
-									Identity
-							)
-						);
-
-						// rejected_handlers.add( ... )
-						tuples[ 2 ][ 3 ].add(
-							resolve(
-								0,
-								newDefer,
-								isFunction( onRejected ) ?
-									onRejected :
-									Thrower
-							)
-						);
-					} ).promise();
-				},
-
-				// Get a promise for this deferred
-				// If obj is provided, the promise aspect is added to the object
-				promise: function( obj ) {
-					return obj != null ? jQuery.extend( obj, promise ) : promise;
-				}
-			},
-			deferred = {};
-
-		// Add list-specific methods
-		jQuery.each( tuples, function( i, tuple ) {
-			var list = tuple[ 2 ],
-				stateString = tuple[ 5 ];
-
-			// promise.progress = list.add
-			// promise.done = list.add
-			// promise.fail = list.add
-			promise[ tuple[ 1 ] ] = list.add;
-
-			// Handle state
-			if ( stateString ) {
-				list.add(
-					function() {
-
-						// state = "resolved" (i.e., fulfilled)
-						// state = "rejected"
-						state = stateString;
-					},
-
-					// rejected_callbacks.disable
-					// fulfilled_callbacks.disable
-					tuples[ 3 - i ][ 2 ].disable,
-
-					// rejected_handlers.disable
-					// fulfilled_handlers.disable
-					tuples[ 3 - i ][ 3 ].disable,
-
-					// progress_callbacks.lock
-					tuples[ 0 ][ 2 ].lock,
-
-					// progress_handlers.lock
-					tuples[ 0 ][ 3 ].lock
-				);
-			}
-
-			// progress_handlers.fire
-			// fulfilled_handlers.fire
-			// rejected_handlers.fire
-			list.add( tuple[ 3 ].fire );
-
-			// deferred.notify = function() { deferred.notifyWith(...) }
-			// deferred.resolve = function() { deferred.resolveWith(...) }
-			// deferred.reject = function() { deferred.rejectWith(...) }
-			deferred[ tuple[ 0 ] ] = function() {
-				deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments );
-				return this;
-			};
-
-			// deferred.notifyWith = list.fireWith
-			// deferred.resolveWith = list.fireWith
-			// deferred.rejectWith = list.fireWith
-			deferred[ tuple[ 0 ] + "With" ] = list.fireWith;
-		} );
-
-		// Make the deferred a promise
-		promise.promise( deferred );
-
-		// Call given func if any
-		if ( func ) {
-			func.call( deferred, deferred );
-		}
-
-		// All done!
-		return deferred;
-	},
-
-	// Deferred helper
-	when: function( singleValue ) {
-		var
-
-			// count of uncompleted subordinates
-			remaining = arguments.length,
-
-			// count of unprocessed arguments
-			i = remaining,
-
-			// subordinate fulfillment data
-			resolveContexts = Array( i ),
-			resolveValues = slice.call( arguments ),
-
-			// the master Deferred
-			master = jQuery.Deferred(),
-
-			// subordinate callback factory
-			updateFunc = function( i ) {
-				return function( value ) {
-					resolveContexts[ i ] = this;
-					resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value;
-					if ( !( --remaining ) ) {
-						master.resolveWith( resolveContexts, resolveValues );
-					}
-				};
-			};
-
-		// Single- and empty arguments are adopted like Promise.resolve
-		if ( remaining <= 1 ) {
-			adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject,
-				!remaining );
-
-			// Use .then() to unwrap secondary thenables (cf. gh-3000)
-			if ( master.state() === "pending" ||
-				isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) {
-
-				return master.then();
-			}
-		}
-
-		// Multiple arguments are aggregated like Promise.all array elements
-		while ( i-- ) {
-			adoptValue( resolveValues[ i ], updateFunc( i ), master.reject );
-		}
-
-		return master.promise();
-	}
-} );
-
-
-// These usually indicate a programmer mistake during development,
-// warn about them ASAP rather than swallowing them by default.
-var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;
-
-jQuery.Deferred.exceptionHook = function( error, stack ) {
-
-	// Support: IE 8 - 9 only
-	// Console exists when dev tools are open, which can happen at any time
-	if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) {
-		window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack );
-	}
-};
-
-
-
-
-jQuery.readyException = function( error ) {
-	window.setTimeout( function() {
-		throw error;
-	} );
-};
-
-
-
-
-// The deferred used on DOM ready
-var readyList = jQuery.Deferred();
-
-jQuery.fn.ready = function( fn ) {
-
-	readyList
-		.then( fn )
-
-		// Wrap jQuery.readyException in a function so that the lookup
-		// happens at the time of error handling instead of callback
-		// registration.
-		.catch( function( error ) {
-			jQuery.readyException( error );
-		} );
-
-	return this;
-};
-
-jQuery.extend( {
-
-	// Is the DOM ready to be used? Set to true once it occurs.
-	isReady: false,
-
-	// A counter to track how many items to wait for before
-	// the ready event fires. See #6781
-	readyWait: 1,
-
-	// Handle when the DOM is ready
-	ready: function( wait ) {
-
-		// Abort if there are pending holds or we're already ready
-		if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) {
-			return;
-		}
-
-		// Remember that the DOM is ready
-		jQuery.isReady = true;
-
-		// If a normal DOM Ready event fired, decrement, and wait if need be
-		if ( wait !== true && --jQuery.readyWait > 0 ) {
-			return;
-		}
-
-		// If there are functions bound, to execute
-		readyList.resolveWith( document, [ jQuery ] );
-	}
-} );
-
-jQuery.ready.then = readyList.then;
-
-// The ready event handler and self cleanup method
-function completed() {
-	document.removeEventListener( "DOMContentLoaded", completed );
-	window.removeEventListener( "load", completed );
-	jQuery.ready();
-}
-
-// Catch cases where $(document).ready() is called
-// after the browser event has already occurred.
-// Support: IE <=9 - 10 only
-// Older IE sometimes signals "interactive" too soon
-if ( document.readyState === "complete" ||
-	( document.readyState !== "loading" && !document.documentElement.doScroll ) ) {
-
-	// Handle it asynchronously to allow scripts the opportunity to delay ready
-	window.setTimeout( jQuery.ready );
-
-} else {
-
-	// Use the handy event callback
-	document.addEventListener( "DOMContentLoaded", completed );
-
-	// A fallback to window.onload, that will always work
-	window.addEventListener( "load", completed );
-}
-
-
-
-
-// Multifunctional method to get and set values of a collection
-// The value/s can optionally be executed if it's a function
-var access = function( elems, fn, key, value, chainable, emptyGet, raw ) {
-	var i = 0,
-		len = elems.length,
-		bulk = key == null;
-
-	// Sets many values
-	if ( toType( key ) === "object" ) {
-		chainable = true;
-		for ( i in key ) {
-			access( elems, fn, i, key[ i ], true, emptyGet, raw );
-		}
-
-	// Sets one value
-	} else if ( value !== undefined ) {
-		chainable = true;
-
-		if ( !isFunction( value ) ) {
-			raw = true;
-		}
-
-		if ( bulk ) {
-
-			// Bulk operations run against the entire set
-			if ( raw ) {
-				fn.call( elems, value );
-				fn = null;
-
-			// ...except when executing function values
-			} else {
-				bulk = fn;
-				fn = function( elem, _key, value ) {
-					return bulk.call( jQuery( elem ), value );
-				};
-			}
-		}
-
-		if ( fn ) {
-			for ( ; i < len; i++ ) {
-				fn(
-					elems[ i ], key, raw ?
-					value :
-					value.call( elems[ i ], i, fn( elems[ i ], key ) )
-				);
-			}
-		}
-	}
-
-	if ( chainable ) {
-		return elems;
-	}
-
-	// Gets
-	if ( bulk ) {
-		return fn.call( elems );
-	}
-
-	return len ? fn( elems[ 0 ], key ) : emptyGet;
-};
-
-
-// Matches dashed string for camelizing
-var rmsPrefix = /^-ms-/,
-	rdashAlpha = /-([a-z])/g;
-
-// Used by camelCase as callback to replace()
-function fcamelCase( _all, letter ) {
-	return letter.toUpperCase();
-}
-
-// Convert dashed to camelCase; used by the css and data modules
-// Support: IE <=9 - 11, Edge 12 - 15
-// Microsoft forgot to hump their vendor prefix (#9572)
-function camelCase( string ) {
-	return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
-}
-var acceptData = function( owner ) {
-
-	// Accepts only:
-	//  - Node
-	//    - Node.ELEMENT_NODE
-	//    - Node.DOCUMENT_NODE
-	//  - Object
-	//    - Any
-	return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType );
-};
-
-
-
-
-function Data() {
-	this.expando = jQuery.expando + Data.uid++;
-}
-
-Data.uid = 1;
-
-Data.prototype = {
-
-	cache: function( owner ) {
-
-		// Check if the owner object already has a cache
-		var value = owner[ this.expando ];
-
-		// If not, create one
-		if ( !value ) {
-			value = {};
-
-			// We can accept data for non-element nodes in modern browsers,
-			// but we should not, see #8335.
-			// Always return an empty object.
-			if ( acceptData( owner ) ) {
-
-				// If it is a node unlikely to be stringify-ed or looped over
-				// use plain assignment
-				if ( owner.nodeType ) {
-					owner[ this.expando ] = value;
-
-				// Otherwise secure it in a non-enumerable property
-				// configurable must be true to allow the property to be
-				// deleted when data is removed
-				} else {
-					Object.defineProperty( owner, this.expando, {
-						value: value,
-						configurable: true
-					} );
-				}
-			}
-		}
-
-		return value;
-	},
-	set: function( owner, data, value ) {
-		var prop,
-			cache = this.cache( owner );
-
-		// Handle: [ owner, key, value ] args
-		// Always use camelCase key (gh-2257)
-		if ( typeof data === "string" ) {
-			cache[ camelCase( data ) ] = value;
-
-		// Handle: [ owner, { properties } ] args
-		} else {
-
-			// Copy the properties one-by-one to the cache object
-			for ( prop in data ) {
-				cache[ camelCase( prop ) ] = data[ prop ];
-			}
-		}
-		return cache;
-	},
-	get: function( owner, key ) {
-		return key === undefined ?
-			this.cache( owner ) :
-
-			// Always use camelCase key (gh-2257)
-			owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ];
-	},
-	access: function( owner, key, value ) {
-
-		// In cases where either:
-		//
-		//   1. No key was specified
-		//   2. A string key was specified, but no value provided
-		//
-		// Take the "read" path and allow the get method to determine
-		// which value to return, respectively either:
-		//
-		//   1. The entire cache object
-		//   2. The data stored at the key
-		//
-		if ( key === undefined ||
-				( ( key && typeof key === "string" ) && value === undefined ) ) {
-
-			return this.get( owner, key );
-		}
-
-		// When the key is not a string, or both a key and value
-		// are specified, set or extend (existing objects) with either:
-		//
-		//   1. An object of properties
-		//   2. A key and value
-		//
-		this.set( owner, key, value );
-
-		// Since the "set" path can have two possible entry points
-		// return the expected data based on which path was taken[*]
-		return value !== undefined ? value : key;
-	},
-	remove: function( owner, key ) {
-		var i,
-			cache = owner[ this.expando ];
-
-		if ( cache === undefined ) {
-			return;
-		}
-
-		if ( key !== undefined ) {
-
-			// Support array or space separated string of keys
-			if ( Array.isArray( key ) ) {
-
-				// If key is an array of keys...
-				// We always set camelCase keys, so remove that.
-				key = key.map( camelCase );
-			} else {
-				key = camelCase( key );
-
-				// If a key with the spaces exists, use it.
-				// Otherwise, create an array by matching non-whitespace
-				key = key in cache ?
-					[ key ] :
-					( key.match( rnothtmlwhite ) || [] );
-			}
-
-			i = key.length;
-
-			while ( i-- ) {
-				delete cache[ key[ i ] ];
-			}
-		}
-
-		// Remove the expando if there's no more data
-		if ( key === undefined || jQuery.isEmptyObject( cache ) ) {
-
-			// Support: Chrome <=35 - 45
-			// Webkit & Blink performance suffers when deleting properties
-			// from DOM nodes, so set to undefined instead
-			// https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted)
-			if ( owner.nodeType ) {
-				owner[ this.expando ] = undefined;
-			} else {
-				delete owner[ this.expando ];
-			}
-		}
-	},
-	hasData: function( owner ) {
-		var cache = owner[ this.expando ];
-		return cache !== undefined && !jQuery.isEmptyObject( cache );
-	}
-};
-var dataPriv = new Data();
-
-var dataUser = new Data();
-
-
-
-//	Implementation Summary
-//
-//	1. Enforce API surface and semantic compatibility with 1.9.x branch
-//	2. Improve the module's maintainability by reducing the storage
-//		paths to a single mechanism.
-//	3. Use the same single mechanism to support "private" and "user" data.
-//	4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData)
-//	5. Avoid exposing implementation details on user objects (eg. expando properties)
-//	6. Provide a clear path for implementation upgrade to WeakMap in 2014
-
-var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,
-	rmultiDash = /[A-Z]/g;
-
-function getData( data ) {
-	if ( data === "true" ) {
-		return true;
-	}
-
-	if ( data === "false" ) {
-		return false;
-	}
-
-	if ( data === "null" ) {
-		return null;
-	}
-
-	// Only convert to a number if it doesn't change the string
-	if ( data === +data + "" ) {
-		return +data;
-	}
-
-	if ( rbrace.test( data ) ) {
-		return JSON.parse( data );
-	}
-
-	return data;
-}
-
-function dataAttr( elem, key, data ) {
-	var name;
-
-	// If nothing was found internally, try to fetch any
-	// data from the HTML5 data-* attribute
-	if ( data === undefined && elem.nodeType === 1 ) {
-		name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase();
-		data = elem.getAttribute( name );
-
-		if ( typeof data === "string" ) {
-			try {
-				data = getData( data );
-			} catch ( e ) {}
-
-			// Make sure we set the data so it isn't changed later
-			dataUser.set( elem, key, data );
-		} else {
-			data = undefined;
-		}
-	}
-	return data;
-}
-
-jQuery.extend( {
-	hasData: function( elem ) {
-		return dataUser.hasData( elem ) || dataPriv.hasData( elem );
-	},
-
-	data: function( elem, name, data ) {
-		return dataUser.access( elem, name, data );
-	},
-
-	removeData: function( elem, name ) {
-		dataUser.remove( elem, name );
-	},
-
-	// TODO: Now that all calls to _data and _removeData have been replaced
-	// with direct calls to dataPriv methods, these can be deprecated.
-	_data: function( elem, name, data ) {
-		return dataPriv.access( elem, name, data );
-	},
-
-	_removeData: function( elem, name ) {
-		dataPriv.remove( elem, name );
-	}
-} );
-
-jQuery.fn.extend( {
-	data: function( key, value ) {
-		var i, name, data,
-			elem = this[ 0 ],
-			attrs = elem && elem.attributes;
-
-		// Gets all values
-		if ( key === undefined ) {
-			if ( this.length ) {
-				data = dataUser.get( elem );
-
-				if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) {
-					i = attrs.length;
-					while ( i-- ) {
-
-						// Support: IE 11 only
-						// The attrs elements can be null (#14894)
-						if ( attrs[ i ] ) {
-							name = attrs[ i ].name;
-							if ( name.indexOf( "data-" ) === 0 ) {
-								name = camelCase( name.slice( 5 ) );
-								dataAttr( elem, name, data[ name ] );
-							}
-						}
-					}
-					dataPriv.set( elem, "hasDataAttrs", true );
-				}
-			}
-
-			return data;
-		}
-
-		// Sets multiple values
-		if ( typeof key === "object" ) {
-			return this.each( function() {
-				dataUser.set( this, key );
-			} );
-		}
-
-		return access( this, function( value ) {
-			var data;
-
-			// The calling jQuery object (element matches) is not empty
-			// (and therefore has an element appears at this[ 0 ]) and the
-			// `value` parameter was not undefined. An empty jQuery object
-			// will result in `undefined` for elem = this[ 0 ] which will
-			// throw an exception if an attempt to read a data cache is made.
-			if ( elem && value === undefined ) {
-
-				// Attempt to get data from the cache
-				// The key will always be camelCased in Data
-				data = dataUser.get( elem, key );
-				if ( data !== undefined ) {
-					return data;
-				}
-
-				// Attempt to "discover" the data in
-				// HTML5 custom data-* attrs
-				data = dataAttr( elem, key );
-				if ( data !== undefined ) {
-					return data;
-				}
-
-				// We tried really hard, but the data doesn't exist.
-				return;
-			}
-
-			// Set the data...
-			this.each( function() {
-
-				// We always store the camelCased key
-				dataUser.set( this, key, value );
-			} );
-		}, null, value, arguments.length > 1, null, true );
-	},
-
-	removeData: function( key ) {
-		return this.each( function() {
-			dataUser.remove( this, key );
-		} );
-	}
-} );
-
-
-jQuery.extend( {
-	queue: function( elem, type, data ) {
-		var queue;
-
-		if ( elem ) {
-			type = ( type || "fx" ) + "queue";
-			queue = dataPriv.get( elem, type );
-
-			// Speed up dequeue by getting out quickly if this is just a lookup
-			if ( data ) {
-				if ( !queue || Array.isArray( data ) ) {
-					queue = dataPriv.access( elem, type, jQuery.makeArray( data ) );
-				} else {
-					queue.push( data );
-				}
-			}
-			return queue || [];
-		}
-	},
-
-	dequeue: function( elem, type ) {
-		type = type || "fx";
-
-		var queue = jQuery.queue( elem, type ),
-			startLength = queue.length,
-			fn = queue.shift(),
-			hooks = jQuery._queueHooks( elem, type ),
-			next = function() {
-				jQuery.dequeue( elem, type );
-			};
-
-		// If the fx queue is dequeued, always remove the progress sentinel
-		if ( fn === "inprogress" ) {
-			fn = queue.shift();
-			startLength--;
-		}
-
-		if ( fn ) {
-
-			// Add a progress sentinel to prevent the fx queue from being
-			// automatically dequeued
-			if ( type === "fx" ) {
-				queue.unshift( "inprogress" );
-			}
-
-			// Clear up the last queue stop function
-			delete hooks.stop;
-			fn.call( elem, next, hooks );
-		}
-
-		if ( !startLength && hooks ) {
-			hooks.empty.fire();
-		}
-	},
-
-	// Not public - generate a queueHooks object, or return the current one
-	_queueHooks: function( elem, type ) {
-		var key = type + "queueHooks";
-		return dataPriv.get( elem, key ) || dataPriv.access( elem, key, {
-			empty: jQuery.Callbacks( "once memory" ).add( function() {
-				dataPriv.remove( elem, [ type + "queue", key ] );
-			} )
-		} );
-	}
-} );
-
-jQuery.fn.extend( {
-	queue: function( type, data ) {
-		var setter = 2;
-
-		if ( typeof type !== "string" ) {
-			data = type;
-			type = "fx";
-			setter--;
-		}
-
-		if ( arguments.length < setter ) {
-			return jQuery.queue( this[ 0 ], type );
-		}
-
-		return data === undefined ?
-			this :
-			this.each( function() {
-				var queue = jQuery.queue( this, type, data );
-
-				// Ensure a hooks for this queue
-				jQuery._queueHooks( this, type );
-
-				if ( type === "fx" && queue[ 0 ] !== "inprogress" ) {
-					jQuery.dequeue( this, type );
-				}
-			} );
-	},
-	dequeue: function( type ) {
-		return this.each( function() {
-			jQuery.dequeue( this, type );
-		} );
-	},
-	clearQueue: function( type ) {
-		return this.queue( type || "fx", [] );
-	},
-
-	// Get a promise resolved when queues of a certain type
-	// are emptied (fx is the type by default)
-	promise: function( type, obj ) {
-		var tmp,
-			count = 1,
-			defer = jQuery.Deferred(),
-			elements = this,
-			i = this.length,
-			resolve = function() {
-				if ( !( --count ) ) {
-					defer.resolveWith( elements, [ elements ] );
-				}
-			};
-
-		if ( typeof type !== "string" ) {
-			obj = type;
-			type = undefined;
-		}
-		type = type || "fx";
-
-		while ( i-- ) {
-			tmp = dataPriv.get( elements[ i ], type + "queueHooks" );
-			if ( tmp && tmp.empty ) {
-				count++;
-				tmp.empty.add( resolve );
-			}
-		}
-		resolve();
-		return defer.promise( obj );
-	}
-} );
-var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source;
-
-var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" );
-
-
-var cssExpand = [ "Top", "Right", "Bottom", "Left" ];
-
-var documentElement = document.documentElement;
-
-
-
-	var isAttached = function( elem ) {
-			return jQuery.contains( elem.ownerDocument, elem );
-		},
-		composed = { composed: true };
-
-	// Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only
-	// Check attachment across shadow DOM boundaries when possible (gh-3504)
-	// Support: iOS 10.0-10.2 only
-	// Early iOS 10 versions support `attachShadow` but not `getRootNode`,
-	// leading to errors. We need to check for `getRootNode`.
-	if ( documentElement.getRootNode ) {
-		isAttached = function( elem ) {
-			return jQuery.contains( elem.ownerDocument, elem ) ||
-				elem.getRootNode( composed ) === elem.ownerDocument;
-		};
-	}
-var isHiddenWithinTree = function( elem, el ) {
-
-		// isHiddenWithinTree might be called from jQuery#filter function;
-		// in that case, element will be second argument
-		elem = el || elem;
-
-		// Inline style trumps all
-		return elem.style.display === "none" ||
-			elem.style.display === "" &&
-
-			// Otherwise, check computed style
-			// Support: Firefox <=43 - 45
-			// Disconnected elements can have computed display: none, so first confirm that elem is
-			// in the document.
-			isAttached( elem ) &&
-
-			jQuery.css( elem, "display" ) === "none";
-	};
-
-
-
-function adjustCSS( elem, prop, valueParts, tween ) {
-	var adjusted, scale,
-		maxIterations = 20,
-		currentValue = tween ?
-			function() {
-				return tween.cur();
-			} :
-			function() {
-				return jQuery.css( elem, prop, "" );
-			},
-		initial = currentValue(),
-		unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ),
-
-		// Starting value computation is required for potential unit mismatches
-		initialInUnit = elem.nodeType &&
-			( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) &&
-			rcssNum.exec( jQuery.css( elem, prop ) );
-
-	if ( initialInUnit && initialInUnit[ 3 ] !== unit ) {
-
-		// Support: Firefox <=54
-		// Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144)
-		initial = initial / 2;
-
-		// Trust units reported by jQuery.css
-		unit = unit || initialInUnit[ 3 ];
-
-		// Iteratively approximate from a nonzero starting point
-		initialInUnit = +initial || 1;
-
-		while ( maxIterations-- ) {
-
-			// Evaluate and update our best guess (doubling guesses that zero out).
-			// Finish if the scale equals or crosses 1 (making the old*new product non-positive).
-			jQuery.style( elem, prop, initialInUnit + unit );
-			if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) {
-				maxIterations = 0;
-			}
-			initialInUnit = initialInUnit / scale;
-
-		}
-
-		initialInUnit = initialInUnit * 2;
-		jQuery.style( elem, prop, initialInUnit + unit );
-
-		// Make sure we update the tween properties later on
-		valueParts = valueParts || [];
-	}
-
-	if ( valueParts ) {
-		initialInUnit = +initialInUnit || +initial || 0;
-
-		// Apply relative offset (+=/-=) if specified
-		adjusted = valueParts[ 1 ] ?
-			initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] :
-			+valueParts[ 2 ];
-		if ( tween ) {
-			tween.unit = unit;
-			tween.start = initialInUnit;
-			tween.end = adjusted;
-		}
-	}
-	return adjusted;
-}
-
-
-var defaultDisplayMap = {};
-
-function getDefaultDisplay( elem ) {
-	var temp,
-		doc = elem.ownerDocument,
-		nodeName = elem.nodeName,
-		display = defaultDisplayMap[ nodeName ];
-
-	if ( display ) {
-		return display;
-	}
-
-	temp = doc.body.appendChild( doc.createElement( nodeName ) );
-	display = jQuery.css( temp, "display" );
-
-	temp.parentNode.removeChild( temp );
-
-	if ( display === "none" ) {
-		display = "block";
-	}
-	defaultDisplayMap[ nodeName ] = display;
-
-	return display;
-}
-
-function showHide( elements, show ) {
-	var display, elem,
-		values = [],
-		index = 0,
-		length = elements.length;
-
-	// Determine new display value for elements that need to change
-	for ( ; index < length; index++ ) {
-		elem = elements[ index ];
-		if ( !elem.style ) {
-			continue;
-		}
-
-		display = elem.style.display;
-		if ( show ) {
-
-			// Since we force visibility upon cascade-hidden elements, an immediate (and slow)
-			// check is required in this first loop unless we have a nonempty display value (either
-			// inline or about-to-be-restored)
-			if ( display === "none" ) {
-				values[ index ] = dataPriv.get( elem, "display" ) || null;
-				if ( !values[ index ] ) {
-					elem.style.display = "";
-				}
-			}
-			if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) {
-				values[ index ] = getDefaultDisplay( elem );
-			}
-		} else {
-			if ( display !== "none" ) {
-				values[ index ] = "none";
-
-				// Remember what we're overwriting
-				dataPriv.set( elem, "display", display );
-			}
-		}
-	}
-
-	// Set the display of the elements in a second loop to avoid constant reflow
-	for ( index = 0; index < length; index++ ) {
-		if ( values[ index ] != null ) {
-			elements[ index ].style.display = values[ index ];
-		}
-	}
-
-	return elements;
-}
-
-jQuery.fn.extend( {
-	show: function() {
-		return showHide( this, true );
-	},
-	hide: function() {
-		return showHide( this );
-	},
-	toggle: function( state ) {
-		if ( typeof state === "boolean" ) {
-			return state ? this.show() : this.hide();
-		}
-
-		return this.each( function() {
-			if ( isHiddenWithinTree( this ) ) {
-				jQuery( this ).show();
-			} else {
-				jQuery( this ).hide();
-			}
-		} );
-	}
-} );
-var rcheckableType = ( /^(?:checkbox|radio)$/i );
-
-var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i );
-
-var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i );
-
-
-
-( function() {
-	var fragment = document.createDocumentFragment(),
-		div = fragment.appendChild( document.createElement( "div" ) ),
-		input = document.createElement( "input" );
-
-	// Support: Android 4.0 - 4.3 only
-	// Check state lost if the name is set (#11217)
-	// Support: Windows Web Apps (WWA)
-	// `name` and `type` must use .setAttribute for WWA (#14901)
-	input.setAttribute( "type", "radio" );
-	input.setAttribute( "checked", "checked" );
-	input.setAttribute( "name", "t" );
-
-	div.appendChild( input );
-
-	// Support: Android <=4.1 only
-	// Older WebKit doesn't clone checked state correctly in fragments
-	support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked;
-
-	// Support: IE <=11 only
-	// Make sure textarea (and checkbox) defaultValue is properly cloned
-	div.innerHTML = "<textarea>x</textarea>";
-	support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue;
-
-	// Support: IE <=9 only
-	// IE <=9 replaces <option> tags with their contents when inserted outside of
-	// the select element.
-	div.innerHTML = "<option></option>";
-	support.option = !!div.lastChild;
-} )();
-
-
-// We have to close these tags to support XHTML (#13200)
-var wrapMap = {
-
-	// XHTML parsers do not magically insert elements in the
-	// same way that tag soup parsers do. So we cannot shorten
-	// this by omitting <tbody> or other required elements.
-	thead: [ 1, "<table>", "</table>" ],
-	col: [ 2, "<table><colgroup>", "</colgroup></table>" ],
-	tr: [ 2, "<table><tbody>", "</tbody></table>" ],
-	td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ],
-
-	_default: [ 0, "", "" ]
-};
-
-wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead;
-wrapMap.th = wrapMap.td;
-
-// Support: IE <=9 only
-if ( !support.option ) {
-	wrapMap.optgroup = wrapMap.option = [ 1, "<select multiple='multiple'>", "</select>" ];
-}
-
-
-function getAll( context, tag ) {
-
-	// Support: IE <=9 - 11 only
-	// Use typeof to avoid zero-argument method invocation on host objects (#15151)
-	var ret;
-
-	if ( typeof context.getElementsByTagName !== "undefined" ) {
-		ret = context.getElementsByTagName( tag || "*" );
-
-	} else if ( typeof context.querySelectorAll !== "undefined" ) {
-		ret = context.querySelectorAll( tag || "*" );
-
-	} else {
-		ret = [];
-	}
-
-	if ( tag === undefined || tag && nodeName( context, tag ) ) {
-		return jQuery.merge( [ context ], ret );
-	}
-
-	return ret;
-}
-
-
-// Mark scripts as having already been evaluated
-function setGlobalEval( elems, refElements ) {
-	var i = 0,
-		l = elems.length;
-
-	for ( ; i < l; i++ ) {
-		dataPriv.set(
-			elems[ i ],
-			"globalEval",
-			!refElements || dataPriv.get( refElements[ i ], "globalEval" )
-		);
-	}
-}
-
-
-var rhtml = /<|&#?\w+;/;
-
-function buildFragment( elems, context, scripts, selection, ignored ) {
-	var elem, tmp, tag, wrap, attached, j,
-		fragment = context.createDocumentFragment(),
-		nodes = [],
-		i = 0,
-		l = elems.length;
-
-	for ( ; i < l; i++ ) {
-		elem = elems[ i ];
-
-		if ( elem || elem === 0 ) {
-
-			// Add nodes directly
-			if ( toType( elem ) === "object" ) {
-
-				// Support: Android <=4.0 only, PhantomJS 1 only
-				// push.apply(_, arraylike) throws on ancient WebKit
-				jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem );
-
-			// Convert non-html into a text node
-			} else if ( !rhtml.test( elem ) ) {
-				nodes.push( context.createTextNode( elem ) );
-
-			// Convert html into DOM nodes
-			} else {
-				tmp = tmp || fragment.appendChild( context.createElement( "div" ) );
-
-				// Deserialize a standard representation
-				tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase();
-				wrap = wrapMap[ tag ] || wrapMap._default;
-				tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ];
-
-				// Descend through wrappers to the right content
-				j = wrap[ 0 ];
-				while ( j-- ) {
-					tmp = tmp.lastChild;
-				}
-
-				// Support: Android <=4.0 only, PhantomJS 1 only
-				// push.apply(_, arraylike) throws on ancient WebKit
-				jQuery.merge( nodes, tmp.childNodes );
-
-				// Remember the top-level container
-				tmp = fragment.firstChild;
-
-				// Ensure the created nodes are orphaned (#12392)
-				tmp.textContent = "";
-			}
-		}
-	}
-
-	// Remove wrapper from fragment
-	fragment.textContent = "";
-
-	i = 0;
-	while ( ( elem = nodes[ i++ ] ) ) {
-
-		// Skip elements already in the context collection (trac-4087)
-		if ( selection && jQuery.inArray( elem, selection ) > -1 ) {
-			if ( ignored ) {
-				ignored.push( elem );
-			}
-			continue;
-		}
-
-		attached = isAttached( elem );
-
-		// Append to fragment
-		tmp = getAll( fragment.appendChild( elem ), "script" );
-
-		// Preserve script evaluation history
-		if ( attached ) {
-			setGlobalEval( tmp );
-		}
-
-		// Capture executables
-		if ( scripts ) {
-			j = 0;
-			while ( ( elem = tmp[ j++ ] ) ) {
-				if ( rscriptType.test( elem.type || "" ) ) {
-					scripts.push( elem );
-				}
-			}
-		}
-	}
-
-	return fragment;
-}
-
-
-var
-	rkeyEvent = /^key/,
-	rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/,
-	rtypenamespace = /^([^.]*)(?:\.(.+)|)/;
-
-function returnTrue() {
-	return true;
-}
-
-function returnFalse() {
-	return false;
-}
-
-// Support: IE <=9 - 11+
-// focus() and blur() are asynchronous, except when they are no-op.
-// So expect focus to be synchronous when the element is already active,
-// and blur to be synchronous when the element is not already active.
-// (focus and blur are always synchronous in other supported browsers,
-// this just defines when we can count on it).
-function expectSync( elem, type ) {
-	return ( elem === safeActiveElement() ) === ( type === "focus" );
-}
-
-// Support: IE <=9 only
-// Accessing document.activeElement can throw unexpectedly
-// https://bugs.jquery.com/ticket/13393
-function safeActiveElement() {
-	try {
-		return document.activeElement;
-	} catch ( err ) { }
-}
-
-function on( elem, types, selector, data, fn, one ) {
-	var origFn, type;
-
-	// Types can be a map of types/handlers
-	if ( typeof types === "object" ) {
-
-		// ( types-Object, selector, data )
-		if ( typeof selector !== "string" ) {
-
-			// ( types-Object, data )
-			data = data || selector;
-			selector = undefined;
-		}
-		for ( type in types ) {
-			on( elem, type, selector, data, types[ type ], one );
-		}
-		return elem;
-	}
-
-	if ( data == null && fn == null ) {
-
-		// ( types, fn )
-		fn = selector;
-		data = selector = undefined;
-	} else if ( fn == null ) {
-		if ( typeof selector === "string" ) {
-
-			// ( types, selector, fn )
-			fn = data;
-			data = undefined;
-		} else {
-
-			// ( types, data, fn )
-			fn = data;
-			data = selector;
-			selector = undefined;
-		}
-	}
-	if ( fn === false ) {
-		fn = returnFalse;
-	} else if ( !fn ) {
-		return elem;
-	}
-
-	if ( one === 1 ) {
-		origFn = fn;
-		fn = function( event ) {
-
-			// Can use an empty set, since event contains the info
-			jQuery().off( event );
-			return origFn.apply( this, arguments );
-		};
-
-		// Use same guid so caller can remove using origFn
-		fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ );
-	}
-	return elem.each( function() {
-		jQuery.event.add( this, types, fn, data, selector );
-	} );
-}
-
-/*
- * Helper functions for managing events -- not part of the public interface.
- * Props to Dean Edwards' addEvent library for many of the ideas.
- */
-jQuery.event = {
-
-	global: {},
-
-	add: function( elem, types, handler, data, selector ) {
-
-		var handleObjIn, eventHandle, tmp,
-			events, t, handleObj,
-			special, handlers, type, namespaces, origType,
-			elemData = dataPriv.get( elem );
-
-		// Only attach events to objects that accept data
-		if ( !acceptData( elem ) ) {
-			return;
-		}
-
-		// Caller can pass in an object of custom data in lieu of the handler
-		if ( handler.handler ) {
-			handleObjIn = handler;
-			handler = handleObjIn.handler;
-			selector = handleObjIn.selector;
-		}
-
-		// Ensure that invalid selectors throw exceptions at attach time
-		// Evaluate against documentElement in case elem is a non-element node (e.g., document)
-		if ( selector ) {
-			jQuery.find.matchesSelector( documentElement, selector );
-		}
-
-		// Make sure that the handler has a unique ID, used to find/remove it later
-		if ( !handler.guid ) {
-			handler.guid = jQuery.guid++;
-		}
-
-		// Init the element's event structure and main handler, if this is the first
-		if ( !( events = elemData.events ) ) {
-			events = elemData.events = Object.create( null );
-		}
-		if ( !( eventHandle = elemData.handle ) ) {
-			eventHandle = elemData.handle = function( e ) {
-
-				// Discard the second event of a jQuery.event.trigger() and
-				// when an event is called after a page has unloaded
-				return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ?
-					jQuery.event.dispatch.apply( elem, arguments ) : undefined;
-			};
-		}
-
-		// Handle multiple events separated by a space
-		types = ( types || "" ).match( rnothtmlwhite ) || [ "" ];
-		t = types.length;
-		while ( t-- ) {
-			tmp = rtypenamespace.exec( types[ t ] ) || [];
-			type = origType = tmp[ 1 ];
-			namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort();
-
-			// There *must* be a type, no attaching namespace-only handlers
-			if ( !type ) {
-				continue;
-			}
-
-			// If event changes its type, use the special event handlers for the changed type
-			special = jQuery.event.special[ type ] || {};
-
-			// If selector defined, determine special event api type, otherwise given type
-			type = ( selector ? special.delegateType : special.bindType ) || type;
-
-			// Update special based on newly reset type
-			special = jQuery.event.special[ type ] || {};
-
-			// handleObj is passed to all event handlers
-			handleObj = jQuery.extend( {
-				type: type,
-				origType: origType,
-				data: data,
-				handler: handler,
-				guid: handler.guid,
-				selector: selector,
-				needsContext: selector && jQuery.expr.match.needsContext.test( selector ),
-				namespace: namespaces.join( "." )
-			}, handleObjIn );
-
-			// Init the event handler queue if we're the first
-			if ( !( handlers = events[ type ] ) ) {
-				handlers = events[ type ] = [];
-				handlers.delegateCount = 0;
-
-				// Only use addEventListener if the special events handler returns false
-				if ( !special.setup ||
-					special.setup.call( elem, data, namespaces, eventHandle ) === false ) {
-
-					if ( elem.addEventListener ) {
-						elem.addEventListener( type, eventHandle );
-					}
-				}
-			}
-
-			if ( special.add ) {
-				special.add.call( elem, handleObj );
-
-				if ( !handleObj.handler.guid ) {
-					handleObj.handler.guid = handler.guid;
-				}
-			}
-
-			// Add to the element's handler list, delegates in front
-			if ( selector ) {
-				handlers.splice( handlers.delegateCount++, 0, handleObj );
-			} else {
-				handlers.push( handleObj );
-			}
-
-			// Keep track of which events have ever been used, for event optimization
-			jQuery.event.global[ type ] = true;
-		}
-
-	},
-
-	// Detach an event or set of events from an element
-	remove: function( elem, types, handler, selector, mappedTypes ) {
-
-		var j, origCount, tmp,
-			events, t, handleObj,
-			special, handlers, type, namespaces, origType,
-			elemData = dataPriv.hasData( elem ) && dataPriv.get( elem );
-
-		if ( !elemData || !( events = elemData.events ) ) {
-			return;
-		}
-
-		// Once for each type.namespace in types; type may be omitted
-		types = ( types || "" ).match( rnothtmlwhite ) || [ "" ];
-		t = types.length;
-		while ( t-- ) {
-			tmp = rtypenamespace.exec( types[ t ] ) || [];
-			type = origType = tmp[ 1 ];
-			namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort();
-
-			// Unbind all events (on this namespace, if provided) for the element
-			if ( !type ) {
-				for ( type in events ) {
-					jQuery.event.remove( elem, type + types[ t ], handler, selector, true );
-				}
-				continue;
-			}
-
-			special = jQuery.event.special[ type ] || {};
-			type = ( selector ? special.delegateType : special.bindType ) || type;
-			handlers = events[ type ] || [];
-			tmp = tmp[ 2 ] &&
-				new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" );
-
-			// Remove matching events
-			origCount = j = handlers.length;
-			while ( j-- ) {
-				handleObj = handlers[ j ];
-
-				if ( ( mappedTypes || origType === handleObj.origType ) &&
-					( !handler || handler.guid === handleObj.guid ) &&
-					( !tmp || tmp.test( handleObj.namespace ) ) &&
-					( !selector || selector === handleObj.selector ||
-						selector === "**" && handleObj.selector ) ) {
-					handlers.splice( j, 1 );
-
-					if ( handleObj.selector ) {
-						handlers.delegateCount--;
-					}
-					if ( special.remove ) {
-						special.remove.call( elem, handleObj );
-					}
-				}
-			}
-
-			// Remove generic event handler if we removed something and no more handlers exist
-			// (avoids potential for endless recursion during removal of special event handlers)
-			if ( origCount && !handlers.length ) {
-				if ( !special.teardown ||
-					special.teardown.call( elem, namespaces, elemData.handle ) === false ) {
-
-					jQuery.removeEvent( elem, type, elemData.handle );
-				}
-
-				delete events[ type ];
-			}
-		}
-
-		// Remove data and the expando if it's no longer used
-		if ( jQuery.isEmptyObject( events ) ) {
-			dataPriv.remove( elem, "handle events" );
-		}
-	},
-
-	dispatch: function( nativeEvent ) {
-
-		var i, j, ret, matched, handleObj, handlerQueue,
-			args = new Array( arguments.length ),
-
-			// Make a writable jQuery.Event from the native event object
-			event = jQuery.event.fix( nativeEvent ),
-
-			handlers = (
-					dataPriv.get( this, "events" ) || Object.create( null )
-				)[ event.type ] || [],
-			special = jQuery.event.special[ event.type ] || {};
-
-		// Use the fix-ed jQuery.Event rather than the (read-only) native event
-		args[ 0 ] = event;
-
-		for ( i = 1; i < arguments.length; i++ ) {
-			args[ i ] = arguments[ i ];
-		}
-
-		event.delegateTarget = this;
-
-		// Call the preDispatch hook for the mapped type, and let it bail if desired
-		if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) {
-			return;
-		}
-
-		// Determine handlers
-		handlerQueue = jQuery.event.handlers.call( this, event, handlers );
-
-		// Run delegates first; they may want to stop propagation beneath us
-		i = 0;
-		while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) {
-			event.currentTarget = matched.elem;
-
-			j = 0;
-			while ( ( handleObj = matched.handlers[ j++ ] ) &&
-				!event.isImmediatePropagationStopped() ) {
-
-				// If the event is namespaced, then each handler is only invoked if it is
-				// specially universal or its namespaces are a superset of the event's.
-				if ( !event.rnamespace || handleObj.namespace === false ||
-					event.rnamespace.test( handleObj.namespace ) ) {
-
-					event.handleObj = handleObj;
-					event.data = handleObj.data;
-
-					ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle ||
-						handleObj.handler ).apply( matched.elem, args );
-
-					if ( ret !== undefined ) {
-						if ( ( event.result = ret ) === false ) {
-							event.preventDefault();
-							event.stopPropagation();
-						}
-					}
-				}
-			}
-		}
-
-		// Call the postDispatch hook for the mapped type
-		if ( special.postDispatch ) {
-			special.postDispatch.call( this, event );
-		}
-
-		return event.result;
-	},
-
-	handlers: function( event, handlers ) {
-		var i, handleObj, sel, matchedHandlers, matchedSelectors,
-			handlerQueue = [],
-			delegateCount = handlers.delegateCount,
-			cur = event.target;
-
-		// Find delegate handlers
-		if ( delegateCount &&
-
-			// Support: IE <=9
-			// Black-hole SVG <use> instance trees (trac-13180)
-			cur.nodeType &&
-
-			// Support: Firefox <=42
-			// Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861)
-			// https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click
-			// Support: IE 11 only
-			// ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343)
-			!( event.type === "click" && event.button >= 1 ) ) {
-
-			for ( ; cur !== this; cur = cur.parentNode || this ) {
-
-				// Don't check non-elements (#13208)
-				// Don't process clicks on disabled elements (#6911, #8165, #11382, #11764)
-				if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) {
-					matchedHandlers = [];
-					matchedSelectors = {};
-					for ( i = 0; i < delegateCount; i++ ) {
-						handleObj = handlers[ i ];
-
-						// Don't conflict with Object.prototype properties (#13203)
-						sel = handleObj.selector + " ";
-
-						if ( matchedSelectors[ sel ] === undefined ) {
-							matchedSelectors[ sel ] = handleObj.needsContext ?
-								jQuery( sel, this ).index( cur ) > -1 :
-								jQuery.find( sel, this, null, [ cur ] ).length;
-						}
-						if ( matchedSelectors[ sel ] ) {
-							matchedHandlers.push( handleObj );
-						}
-					}
-					if ( matchedHandlers.length ) {
-						handlerQueue.push( { elem: cur, handlers: matchedHandlers } );
-					}
-				}
-			}
-		}
-
-		// Add the remaining (directly-bound) handlers
-		cur = this;
-		if ( delegateCount < handlers.length ) {
-			handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } );
-		}
-
-		return handlerQueue;
-	},
-
-	addProp: function( name, hook ) {
-		Object.defineProperty( jQuery.Event.prototype, name, {
-			enumerable: true,
-			configurable: true,
-
-			get: isFunction( hook ) ?
-				function() {
-					if ( this.originalEvent ) {
-							return hook( this.originalEvent );
-					}
-				} :
-				function() {
-					if ( this.originalEvent ) {
-							return this.originalEvent[ name ];
-					}
-				},
-
-			set: function( value ) {
-				Object.defineProperty( this, name, {
-					enumerable: true,
-					configurable: true,
-					writable: true,
-					value: value
-				} );
-			}
-		} );
-	},
-
-	fix: function( originalEvent ) {
-		return originalEvent[ jQuery.expando ] ?
-			originalEvent :
-			new jQuery.Event( originalEvent );
-	},
-
-	special: {
-		load: {
-
-			// Prevent triggered image.load events from bubbling to window.load
-			noBubble: true
-		},
-		click: {
-
-			// Utilize native event to ensure correct state for checkable inputs
-			setup: function( data ) {
-
-				// For mutual compressibility with _default, replace `this` access with a local var.
-				// `|| data` is dead code meant only to preserve the variable through minification.
-				var el = this || data;
-
-				// Claim the first handler
-				if ( rcheckableType.test( el.type ) &&
-					el.click && nodeName( el, "input" ) ) {
-
-					// dataPriv.set( el, "click", ... )
-					leverageNative( el, "click", returnTrue );
-				}
-
-				// Return false to allow normal processing in the caller
-				return false;
-			},
-			trigger: function( data ) {
-
-				// For mutual compressibility with _default, replace `this` access with a local var.
-				// `|| data` is dead code meant only to preserve the variable through minification.
-				var el = this || data;
-
-				// Force setup before triggering a click
-				if ( rcheckableType.test( el.type ) &&
-					el.click && nodeName( el, "input" ) ) {
-
-					leverageNative( el, "click" );
-				}
-
-				// Return non-false to allow normal event-path propagation
-				return true;
-			},
-
-			// For cross-browser consistency, suppress native .click() on links
-			// Also prevent it if we're currently inside a leveraged native-event stack
-			_default: function( event ) {
-				var target = event.target;
-				return rcheckableType.test( target.type ) &&
-					target.click && nodeName( target, "input" ) &&
-					dataPriv.get( target, "click" ) ||
-					nodeName( target, "a" );
-			}
-		},
-
-		beforeunload: {
-			postDispatch: function( event ) {
-
-				// Support: Firefox 20+
-				// Firefox doesn't alert if the returnValue field is not set.
-				if ( event.result !== undefined && event.originalEvent ) {
-					event.originalEvent.returnValue = event.result;
-				}
-			}
-		}
-	}
-};
-
-// Ensure the presence of an event listener that handles manually-triggered
-// synthetic events by interrupting progress until reinvoked in response to
-// *native* events that it fires directly, ensuring that state changes have
-// already occurred before other listeners are invoked.
-function leverageNative( el, type, expectSync ) {
-
-	// Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add
-	if ( !expectSync ) {
-		if ( dataPriv.get( el, type ) === undefined ) {
-			jQuery.event.add( el, type, returnTrue );
-		}
-		return;
-	}
-
-	// Register the controller as a special universal handler for all event namespaces
-	dataPriv.set( el, type, false );
-	jQuery.event.add( el, type, {
-		namespace: false,
-		handler: function( event ) {
-			var notAsync, result,
-				saved = dataPriv.get( this, type );
-
-			if ( ( event.isTrigger & 1 ) && this[ type ] ) {
-
-				// Interrupt processing of the outer synthetic .trigger()ed event
-				// Saved data should be false in such cases, but might be a leftover capture object
-				// from an async native handler (gh-4350)
-				if ( !saved.length ) {
-
-					// Store arguments for use when handling the inner native event
-					// There will always be at least one argument (an event object), so this array
-					// will not be confused with a leftover capture object.
-					saved = slice.call( arguments );
-					dataPriv.set( this, type, saved );
-
-					// Trigger the native event and capture its result
-					// Support: IE <=9 - 11+
-					// focus() and blur() are asynchronous
-					notAsync = expectSync( this, type );
-					this[ type ]();
-					result = dataPriv.get( this, type );
-					if ( saved !== result || notAsync ) {
-						dataPriv.set( this, type, false );
-					} else {
-						result = {};
-					}
-					if ( saved !== result ) {
-
-						// Cancel the outer synthetic event
-						event.stopImmediatePropagation();
-						event.preventDefault();
-						return result.value;
-					}
-
-				// If this is an inner synthetic event for an event with a bubbling surrogate
-				// (focus or blur), assume that the surrogate already propagated from triggering the
-				// native event and prevent that from happening again here.
-				// This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the
-				// bubbling surrogate propagates *after* the non-bubbling base), but that seems
-				// less bad than duplication.
-				} else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) {
-					event.stopPropagation();
-				}
-
-			// If this is a native event triggered above, everything is now in order
-			// Fire an inner synthetic event with the original arguments
-			} else if ( saved.length ) {
-
-				// ...and capture the result
-				dataPriv.set( this, type, {
-					value: jQuery.event.trigger(
-
-						// Support: IE <=9 - 11+
-						// Extend with the prototype to reset the above stopImmediatePropagation()
-						jQuery.extend( saved[ 0 ], jQuery.Event.prototype ),
-						saved.slice( 1 ),
-						this
-					)
-				} );
-
-				// Abort handling of the native event
-				event.stopImmediatePropagation();
-			}
-		}
-	} );
-}
-
-jQuery.removeEvent = function( elem, type, handle ) {
-
-	// This "if" is needed for plain objects
-	if ( elem.removeEventListener ) {
-		elem.removeEventListener( type, handle );
-	}
-};
-
-jQuery.Event = function( src, props ) {
-
-	// Allow instantiation without the 'new' keyword
-	if ( !( this instanceof jQuery.Event ) ) {
-		return new jQuery.Event( src, props );
-	}
-
-	// Event object
-	if ( src && src.type ) {
-		this.originalEvent = src;
-		this.type = src.type;
-
-		// Events bubbling up the document may have been marked as prevented
-		// by a handler lower down the tree; reflect the correct value.
-		this.isDefaultPrevented = src.defaultPrevented ||
-				src.defaultPrevented === undefined &&
-
-				// Support: Android <=2.3 only
-				src.returnValue === false ?
-			returnTrue :
-			returnFalse;
-
-		// Create target properties
-		// Support: Safari <=6 - 7 only
-		// Target should not be a text node (#504, #13143)
-		this.target = ( src.target && src.target.nodeType === 3 ) ?
-			src.target.parentNode :
-			src.target;
-
-		this.currentTarget = src.currentTarget;
-		this.relatedTarget = src.relatedTarget;
-
-	// Event type
-	} else {
-		this.type = src;
-	}
-
-	// Put explicitly provided properties onto the event object
-	if ( props ) {
-		jQuery.extend( this, props );
-	}
-
-	// Create a timestamp if incoming event doesn't have one
-	this.timeStamp = src && src.timeStamp || Date.now();
-
-	// Mark it as fixed
-	this[ jQuery.expando ] = true;
-};
-
-// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding
-// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
-jQuery.Event.prototype = {
-	constructor: jQuery.Event,
-	isDefaultPrevented: returnFalse,
-	isPropagationStopped: returnFalse,
-	isImmediatePropagationStopped: returnFalse,
-	isSimulated: false,
-
-	preventDefault: function() {
-		var e = this.originalEvent;
-
-		this.isDefaultPrevented = returnTrue;
-
-		if ( e && !this.isSimulated ) {
-			e.preventDefault();
-		}
-	},
-	stopPropagation: function() {
-		var e = this.originalEvent;
-
-		this.isPropagationStopped = returnTrue;
-
-		if ( e && !this.isSimulated ) {
-			e.stopPropagation();
-		}
-	},
-	stopImmediatePropagation: function() {
-		var e = this.originalEvent;
-
-		this.isImmediatePropagationStopped = returnTrue;
-
-		if ( e && !this.isSimulated ) {
-			e.stopImmediatePropagation();
-		}
-
-		this.stopPropagation();
-	}
-};
-
-// Includes all common event props including KeyEvent and MouseEvent specific props
-jQuery.each( {
-	altKey: true,
-	bubbles: true,
-	cancelable: true,
-	changedTouches: true,
-	ctrlKey: true,
-	detail: true,
-	eventPhase: true,
-	metaKey: true,
-	pageX: true,
-	pageY: true,
-	shiftKey: true,
-	view: true,
-	"char": true,
-	code: true,
-	charCode: true,
-	key: true,
-	keyCode: true,
-	button: true,
-	buttons: true,
-	clientX: true,
-	clientY: true,
-	offsetX: true,
-	offsetY: true,
-	pointerId: true,
-	pointerType: true,
-	screenX: true,
-	screenY: true,
-	targetTouches: true,
-	toElement: true,
-	touches: true,
-
-	which: function( event ) {
-		var button = event.button;
-
-		// Add which for key events
-		if ( event.which == null && rkeyEvent.test( event.type ) ) {
-			return event.charCode != null ? event.charCode : event.keyCode;
-		}
-
-		// Add which for click: 1 === left; 2 === middle; 3 === right
-		if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) {
-			if ( button & 1 ) {
-				return 1;
-			}
-
-			if ( button & 2 ) {
-				return 3;
-			}
-
-			if ( button & 4 ) {
-				return 2;
-			}
-
-			return 0;
-		}
-
-		return event.which;
-	}
-}, jQuery.event.addProp );
-
-jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) {
-	jQuery.event.special[ type ] = {
-
-		// Utilize native event if possible so blur/focus sequence is correct
-		setup: function() {
-
-			// Claim the first handler
-			// dataPriv.set( this, "focus", ... )
-			// dataPriv.set( this, "blur", ... )
-			leverageNative( this, type, expectSync );
-
-			// Return false to allow normal processing in the caller
-			return false;
-		},
-		trigger: function() {
-
-			// Force setup before trigger
-			leverageNative( this, type );
-
-			// Return non-false to allow normal event-path propagation
-			return true;
-		},
-
-		delegateType: delegateType
-	};
-} );
-
-// Create mouseenter/leave events using mouseover/out and event-time checks
-// so that event delegation works in jQuery.
-// Do the same for pointerenter/pointerleave and pointerover/pointerout
-//
-// Support: Safari 7 only
-// Safari sends mouseenter too often; see:
-// https://bugs.chromium.org/p/chromium/issues/detail?id=470258
-// for the description of the bug (it existed in older Chrome versions as well).
-jQuery.each( {
-	mouseenter: "mouseover",
-	mouseleave: "mouseout",
-	pointerenter: "pointerover",
-	pointerleave: "pointerout"
-}, function( orig, fix ) {
-	jQuery.event.special[ orig ] = {
-		delegateType: fix,
-		bindType: fix,
-
-		handle: function( event ) {
-			var ret,
-				target = this,
-				related = event.relatedTarget,
-				handleObj = event.handleObj;
-
-			// For mouseenter/leave call the handler if related is outside the target.
-			// NB: No relatedTarget if the mouse left/entered the browser window
-			if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) {
-				event.type = handleObj.origType;
-				ret = handleObj.handler.apply( this, arguments );
-				event.type = fix;
-			}
-			return ret;
-		}
-	};
-} );
-
-jQuery.fn.extend( {
-
-	on: function( types, selector, data, fn ) {
-		return on( this, types, selector, data, fn );
-	},
-	one: function( types, selector, data, fn ) {
-		return on( this, types, selector, data, fn, 1 );
-	},
-	off: function( types, selector, fn ) {
-		var handleObj, type;
-		if ( types && types.preventDefault && types.handleObj ) {
-
-			// ( event )  dispatched jQuery.Event
-			handleObj = types.handleObj;
-			jQuery( types.delegateTarget ).off(
-				handleObj.namespace ?
-					handleObj.origType + "." + handleObj.namespace :
-					handleObj.origType,
-				handleObj.selector,
-				handleObj.handler
-			);
-			return this;
-		}
-		if ( typeof types === "object" ) {
-
-			// ( types-object [, selector] )
-			for ( type in types ) {
-				this.off( type, selector, types[ type ] );
-			}
-			return this;
-		}
-		if ( selector === false || typeof selector === "function" ) {
-
-			// ( types [, fn] )
-			fn = selector;
-			selector = undefined;
-		}
-		if ( fn === false ) {
-			fn = returnFalse;
-		}
-		return this.each( function() {
-			jQuery.event.remove( this, types, fn, selector );
-		} );
-	}
-} );
-
-
-var
-
-	// Support: IE <=10 - 11, Edge 12 - 13 only
-	// In IE/Edge using regex groups here causes severe slowdowns.
-	// See https://connect.microsoft.com/IE/feedback/details/1736512/
-	rnoInnerhtml = /<script|<style|<link/i,
-
-	// checked="checked" or checked
-	rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i,
-	rcleanScript = /^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;
-
-// Prefer a tbody over its parent table for containing new rows
-function manipulationTarget( elem, content ) {
-	if ( nodeName( elem, "table" ) &&
-		nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) {
-
-		return jQuery( elem ).children( "tbody" )[ 0 ] || elem;
-	}
-
-	return elem;
-}
-
-// Replace/restore the type attribute of script elements for safe DOM manipulation
-function disableScript( elem ) {
-	elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type;
-	return elem;
-}
-function restoreScript( elem ) {
-	if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) {
-		elem.type = elem.type.slice( 5 );
-	} else {
-		elem.removeAttribute( "type" );
-	}
-
-	return elem;
-}
-
-function cloneCopyEvent( src, dest ) {
-	var i, l, type, pdataOld, udataOld, udataCur, events;
-
-	if ( dest.nodeType !== 1 ) {
-		return;
-	}
-
-	// 1. Copy private data: events, handlers, etc.
-	if ( dataPriv.hasData( src ) ) {
-		pdataOld = dataPriv.get( src );
-		events = pdataOld.events;
-
-		if ( events ) {
-			dataPriv.remove( dest, "handle events" );
-
-			for ( type in events ) {
-				for ( i = 0, l = events[ type ].length; i < l; i++ ) {
-					jQuery.event.add( dest, type, events[ type ][ i ] );
-				}
-			}
-		}
-	}
-
-	// 2. Copy user data
-	if ( dataUser.hasData( src ) ) {
-		udataOld = dataUser.access( src );
-		udataCur = jQuery.extend( {}, udataOld );
-
-		dataUser.set( dest, udataCur );
-	}
-}
-
-// Fix IE bugs, see support tests
-function fixInput( src, dest ) {
-	var nodeName = dest.nodeName.toLowerCase();
-
-	// Fails to persist the checked state of a cloned checkbox or radio button.
-	if ( nodeName === "input" && rcheckableType.test( src.type ) ) {
-		dest.checked = src.checked;
-
-	// Fails to return the selected option to the default selected state when cloning options
-	} else if ( nodeName === "input" || nodeName === "textarea" ) {
-		dest.defaultValue = src.defaultValue;
-	}
-}
-
-function domManip( collection, args, callback, ignored ) {
-
-	// Flatten any nested arrays
-	args = flat( args );
-
-	var fragment, first, scripts, hasScripts, node, doc,
-		i = 0,
-		l = collection.length,
-		iNoClone = l - 1,
-		value = args[ 0 ],
-		valueIsFunction = isFunction( value );
-
-	// We can't cloneNode fragments that contain checked, in WebKit
-	if ( valueIsFunction ||
-			( l > 1 && typeof value === "string" &&
-				!support.checkClone && rchecked.test( value ) ) ) {
-		return collection.each( function( index ) {
-			var self = collection.eq( index );
-			if ( valueIsFunction ) {
-				args[ 0 ] = value.call( this, index, self.html() );
-			}
-			domManip( self, args, callback, ignored );
-		} );
-	}
-
-	if ( l ) {
-		fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored );
-		first = fragment.firstChild;
-
-		if ( fragment.childNodes.length === 1 ) {
-			fragment = first;
-		}
-
-		// Require either new content or an interest in ignored elements to invoke the callback
-		if ( first || ignored ) {
-			scripts = jQuery.map( getAll( fragment, "script" ), disableScript );
-			hasScripts = scripts.length;
-
-			// Use the original fragment for the last item
-			// instead of the first because it can end up
-			// being emptied incorrectly in certain situations (#8070).
-			for ( ; i < l; i++ ) {
-				node = fragment;
-
-				if ( i !== iNoClone ) {
-					node = jQuery.clone( node, true, true );
-
-					// Keep references to cloned scripts for later restoration
-					if ( hasScripts ) {
-
-						// Support: Android <=4.0 only, PhantomJS 1 only
-						// push.apply(_, arraylike) throws on ancient WebKit
-						jQuery.merge( scripts, getAll( node, "script" ) );
-					}
-				}
-
-				callback.call( collection[ i ], node, i );
-			}
-
-			if ( hasScripts ) {
-				doc = scripts[ scripts.length - 1 ].ownerDocument;
-
-				// Reenable scripts
-				jQuery.map( scripts, restoreScript );
-
-				// Evaluate executable scripts on first document insertion
-				for ( i = 0; i < hasScripts; i++ ) {
-					node = scripts[ i ];
-					if ( rscriptType.test( node.type || "" ) &&
-						!dataPriv.access( node, "globalEval" ) &&
-						jQuery.contains( doc, node ) ) {
-
-						if ( node.src && ( node.type || "" ).toLowerCase()  !== "module" ) {
-
-							// Optional AJAX dependency, but won't run scripts if not present
-							if ( jQuery._evalUrl && !node.noModule ) {
-								jQuery._evalUrl( node.src, {
-									nonce: node.nonce || node.getAttribute( "nonce" )
-								}, doc );
-							}
-						} else {
-							DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc );
-						}
-					}
-				}
-			}
-		}
-	}
-
-	return collection;
-}
-
-function remove( elem, selector, keepData ) {
-	var node,
-		nodes = selector ? jQuery.filter( selector, elem ) : elem,
-		i = 0;
-
-	for ( ; ( node = nodes[ i ] ) != null; i++ ) {
-		if ( !keepData && node.nodeType === 1 ) {
-			jQuery.cleanData( getAll( node ) );
-		}
-
-		if ( node.parentNode ) {
-			if ( keepData && isAttached( node ) ) {
-				setGlobalEval( getAll( node, "script" ) );
-			}
-			node.parentNode.removeChild( node );
-		}
-	}
-
-	return elem;
-}
-
-jQuery.extend( {
-	htmlPrefilter: function( html ) {
-		return html;
-	},
-
-	clone: function( elem, dataAndEvents, deepDataAndEvents ) {
-		var i, l, srcElements, destElements,
-			clone = elem.cloneNode( true ),
-			inPage = isAttached( elem );
-
-		// Fix IE cloning issues
-		if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) &&
-				!jQuery.isXMLDoc( elem ) ) {
-
-			// We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2
-			destElements = getAll( clone );
-			srcElements = getAll( elem );
-
-			for ( i = 0, l = srcElements.length; i < l; i++ ) {
-				fixInput( srcElements[ i ], destElements[ i ] );
-			}
-		}
-
-		// Copy the events from the original to the clone
-		if ( dataAndEvents ) {
-			if ( deepDataAndEvents ) {
-				srcElements = srcElements || getAll( elem );
-				destElements = destElements || getAll( clone );
-
-				for ( i = 0, l = srcElements.length; i < l; i++ ) {
-					cloneCopyEvent( srcElements[ i ], destElements[ i ] );
-				}
-			} else {
-				cloneCopyEvent( elem, clone );
-			}
-		}
-
-		// Preserve script evaluation history
-		destElements = getAll( clone, "script" );
-		if ( destElements.length > 0 ) {
-			setGlobalEval( destElements, !inPage && getAll( elem, "script" ) );
-		}
-
-		// Return the cloned set
-		return clone;
-	},
-
-	cleanData: function( elems ) {
-		var data, elem, type,
-			special = jQuery.event.special,
-			i = 0;
-
-		for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) {
-			if ( acceptData( elem ) ) {
-				if ( ( data = elem[ dataPriv.expando ] ) ) {
-					if ( data.events ) {
-						for ( type in data.events ) {
-							if ( special[ type ] ) {
-								jQuery.event.remove( elem, type );
-
-							// This is a shortcut to avoid jQuery.event.remove's overhead
-							} else {
-								jQuery.removeEvent( elem, type, data.handle );
-							}
-						}
-					}
-
-					// Support: Chrome <=35 - 45+
-					// Assign undefined instead of using delete, see Data#remove
-					elem[ dataPriv.expando ] = undefined;
-				}
-				if ( elem[ dataUser.expando ] ) {
-
-					// Support: Chrome <=35 - 45+
-					// Assign undefined instead of using delete, see Data#remove
-					elem[ dataUser.expando ] = undefined;
-				}
-			}
-		}
-	}
-} );
-
-jQuery.fn.extend( {
-	detach: function( selector ) {
-		return remove( this, selector, true );
-	},
-
-	remove: function( selector ) {
-		return remove( this, selector );
-	},
-
-	text: function( value ) {
-		return access( this, function( value ) {
-			return value === undefined ?
-				jQuery.text( this ) :
-				this.empty().each( function() {
-					if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
-						this.textContent = value;
-					}
-				} );
-		}, null, value, arguments.length );
-	},
-
-	append: function() {
-		return domManip( this, arguments, function( elem ) {
-			if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
-				var target = manipulationTarget( this, elem );
-				target.appendChild( elem );
-			}
-		} );
-	},
-
-	prepend: function() {
-		return domManip( this, arguments, function( elem ) {
-			if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
-				var target = manipulationTarget( this, elem );
-				target.insertBefore( elem, target.firstChild );
-			}
-		} );
-	},
-
-	before: function() {
-		return domManip( this, arguments, function( elem ) {
-			if ( this.parentNode ) {
-				this.parentNode.insertBefore( elem, this );
-			}
-		} );
-	},
-
-	after: function() {
-		return domManip( this, arguments, function( elem ) {
-			if ( this.parentNode ) {
-				this.parentNode.insertBefore( elem, this.nextSibling );
-			}
-		} );
-	},
-
-	empty: function() {
-		var elem,
-			i = 0;
-
-		for ( ; ( elem = this[ i ] ) != null; i++ ) {
-			if ( elem.nodeType === 1 ) {
-
-				// Prevent memory leaks
-				jQuery.cleanData( getAll( elem, false ) );
-
-				// Remove any remaining nodes
-				elem.textContent = "";
-			}
-		}
-
-		return this;
-	},
-
-	clone: function( dataAndEvents, deepDataAndEvents ) {
-		dataAndEvents = dataAndEvents == null ? false : dataAndEvents;
-		deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents;
-
-		return this.map( function() {
-			return jQuery.clone( this, dataAndEvents, deepDataAndEvents );
-		} );
-	},
-
-	html: function( value ) {
-		return access( this, function( value ) {
-			var elem = this[ 0 ] || {},
-				i = 0,
-				l = this.length;
-
-			if ( value === undefined && elem.nodeType === 1 ) {
-				return elem.innerHTML;
-			}
-
-			// See if we can take a shortcut and just use innerHTML
-			if ( typeof value === "string" && !rnoInnerhtml.test( value ) &&
-				!wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) {
-
-				value = jQuery.htmlPrefilter( value );
-
-				try {
-					for ( ; i < l; i++ ) {
-						elem = this[ i ] || {};
-
-						// Remove element nodes and prevent memory leaks
-						if ( elem.nodeType === 1 ) {
-							jQuery.cleanData( getAll( elem, false ) );
-							elem.innerHTML = value;
-						}
-					}
-
-					elem = 0;
-
-				// If using innerHTML throws an exception, use the fallback method
-				} catch ( e ) {}
-			}
-
-			if ( elem ) {
-				this.empty().append( value );
-			}
-		}, null, value, arguments.length );
-	},
-
-	replaceWith: function() {
-		var ignored = [];
-
-		// Make the changes, replacing each non-ignored context element with the new content
-		return domManip( this, arguments, function( elem ) {
-			var parent = this.parentNode;
-
-			if ( jQuery.inArray( this, ignored ) < 0 ) {
-				jQuery.cleanData( getAll( this ) );
-				if ( parent ) {
-					parent.replaceChild( elem, this );
-				}
-			}
-
-		// Force callback invocation
-		}, ignored );
-	}
-} );
-
-jQuery.each( {
-	appendTo: "append",
-	prependTo: "prepend",
-	insertBefore: "before",
-	insertAfter: "after",
-	replaceAll: "replaceWith"
-}, function( name, original ) {
-	jQuery.fn[ name ] = function( selector ) {
-		var elems,
-			ret = [],
-			insert = jQuery( selector ),
-			last = insert.length - 1,
-			i = 0;
-
-		for ( ; i <= last; i++ ) {
-			elems = i === last ? this : this.clone( true );
-			jQuery( insert[ i ] )[ original ]( elems );
-
-			// Support: Android <=4.0 only, PhantomJS 1 only
-			// .get() because push.apply(_, arraylike) throws on ancient WebKit
-			push.apply( ret, elems.get() );
-		}
-
-		return this.pushStack( ret );
-	};
-} );
-var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" );
-
-var getStyles = function( elem ) {
-
-		// Support: IE <=11 only, Firefox <=30 (#15098, #14150)
-		// IE throws on elements created in popups
-		// FF meanwhile throws on frame elements through "defaultView.getComputedStyle"
-		var view = elem.ownerDocument.defaultView;
-
-		if ( !view || !view.opener ) {
-			view = window;
-		}
-
-		return view.getComputedStyle( elem );
-	};
-
-var swap = function( elem, options, callback ) {
-	var ret, name,
-		old = {};
-
-	// Remember the old values, and insert the new ones
-	for ( name in options ) {
-		old[ name ] = elem.style[ name ];
-		elem.style[ name ] = options[ name ];
-	}
-
-	ret = callback.call( elem );
-
-	// Revert the old values
-	for ( name in options ) {
-		elem.style[ name ] = old[ name ];
-	}
-
-	return ret;
-};
-
-
-var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" );
-
-
-
-( function() {
-
-	// Executing both pixelPosition & boxSizingReliable tests require only one layout
-	// so they're executed at the same time to save the second computation.
-	function computeStyleTests() {
-
-		// This is a singleton, we need to execute it only once
-		if ( !div ) {
-			return;
-		}
-
-		container.style.cssText = "position:absolute;left:-11111px;width:60px;" +
-			"margin-top:1px;padding:0;border:0";
-		div.style.cssText =
-			"position:relative;display:block;box-sizing:border-box;overflow:scroll;" +
-			"margin:auto;border:1px;padding:1px;" +
-			"width:60%;top:1%";
-		documentElement.appendChild( container ).appendChild( div );
-
-		var divStyle = window.getComputedStyle( div );
-		pixelPositionVal = divStyle.top !== "1%";
-
-		// Support: Android 4.0 - 4.3 only, Firefox <=3 - 44
-		reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12;
-
-		// Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3
-		// Some styles come back with percentage values, even though they shouldn't
-		div.style.right = "60%";
-		pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36;
-
-		// Support: IE 9 - 11 only
-		// Detect misreporting of content dimensions for box-sizing:border-box elements
-		boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36;
-
-		// Support: IE 9 only
-		// Detect overflow:scroll screwiness (gh-3699)
-		// Support: Chrome <=64
-		// Don't get tricked when zoom affects offsetWidth (gh-4029)
-		div.style.position = "absolute";
-		scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12;
-
-		documentElement.removeChild( container );
-
-		// Nullify the div so it wouldn't be stored in the memory and
-		// it will also be a sign that checks already performed
-		div = null;
-	}
-
-	function roundPixelMeasures( measure ) {
-		return Math.round( parseFloat( measure ) );
-	}
-
-	var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal,
-		reliableTrDimensionsVal, reliableMarginLeftVal,
-		container = document.createElement( "div" ),
-		div = document.createElement( "div" );
-
-	// Finish early in limited (non-browser) environments
-	if ( !div.style ) {
-		return;
-	}
-
-	// Support: IE <=9 - 11 only
-	// Style of cloned element affects source element cloned (#8908)
-	div.style.backgroundClip = "content-box";
-	div.cloneNode( true ).style.backgroundClip = "";
-	support.clearCloneStyle = div.style.backgroundClip === "content-box";
-
-	jQuery.extend( support, {
-		boxSizingReliable: function() {
-			computeStyleTests();
-			return boxSizingReliableVal;
-		},
-		pixelBoxStyles: function() {
-			computeStyleTests();
-			return pixelBoxStylesVal;
-		},
-		pixelPosition: function() {
-			computeStyleTests();
-			return pixelPositionVal;
-		},
-		reliableMarginLeft: function() {
-			computeStyleTests();
-			return reliableMarginLeftVal;
-		},
-		scrollboxSize: function() {
-			computeStyleTests();
-			return scrollboxSizeVal;
-		},
-
-		// Support: IE 9 - 11+, Edge 15 - 18+
-		// IE/Edge misreport `getComputedStyle` of table rows with width/height
-		// set in CSS while `offset*` properties report correct values.
-		// Behavior in IE 9 is more subtle than in newer versions & it passes
-		// some versions of this test; make sure not to make it pass there!
-		reliableTrDimensions: function() {
-			var table, tr, trChild, trStyle;
-			if ( reliableTrDimensionsVal == null ) {
-				table = document.createElement( "table" );
-				tr = document.createElement( "tr" );
-				trChild = document.createElement( "div" );
-
-				table.style.cssText = "position:absolute;left:-11111px";
-				tr.style.height = "1px";
-				trChild.style.height = "9px";
-
-				documentElement
-					.appendChild( table )
-					.appendChild( tr )
-					.appendChild( trChild );
-
-				trStyle = window.getComputedStyle( tr );
-				reliableTrDimensionsVal = parseInt( trStyle.height ) > 3;
-
-				documentElement.removeChild( table );
-			}
-			return reliableTrDimensionsVal;
-		}
-	} );
-} )();
-
-
-function curCSS( elem, name, computed ) {
-	var width, minWidth, maxWidth, ret,
-
-		// Support: Firefox 51+
-		// Retrieving style before computed somehow
-		// fixes an issue with getting wrong values
-		// on detached elements
-		style = elem.style;
-
-	computed = computed || getStyles( elem );
-
-	// getPropertyValue is needed for:
-	//   .css('filter') (IE 9 only, #12537)
-	//   .css('--customProperty) (#3144)
-	if ( computed ) {
-		ret = computed.getPropertyValue( name ) || computed[ name ];
-
-		if ( ret === "" && !isAttached( elem ) ) {
-			ret = jQuery.style( elem, name );
-		}
-
-		// A tribute to the "awesome hack by Dean Edwards"
-		// Android Browser returns percentage for some values,
-		// but width seems to be reliably pixels.
-		// This is against the CSSOM draft spec:
-		// https://drafts.csswg.org/cssom/#resolved-values
-		if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) {
-
-			// Remember the original values
-			width = style.width;
-			minWidth = style.minWidth;
-			maxWidth = style.maxWidth;
-
-			// Put in the new values to get a computed value out
-			style.minWidth = style.maxWidth = style.width = ret;
-			ret = computed.width;
-
-			// Revert the changed values
-			style.width = width;
-			style.minWidth = minWidth;
-			style.maxWidth = maxWidth;
-		}
-	}
-
-	return ret !== undefined ?
-
-		// Support: IE <=9 - 11 only
-		// IE returns zIndex value as an integer.
-		ret + "" :
-		ret;
-}
-
-
-function addGetHookIf( conditionFn, hookFn ) {
-
-	// Define the hook, we'll check on the first run if it's really needed.
-	return {
-		get: function() {
-			if ( conditionFn() ) {
-
-				// Hook not needed (or it's not possible to use it due
-				// to missing dependency), remove it.
-				delete this.get;
-				return;
-			}
-
-			// Hook needed; redefine it so that the support test is not executed again.
-			return ( this.get = hookFn ).apply( this, arguments );
-		}
-	};
-}
-
-
-var cssPrefixes = [ "Webkit", "Moz", "ms" ],
-	emptyStyle = document.createElement( "div" ).style,
-	vendorProps = {};
-
-// Return a vendor-prefixed property or undefined
-function vendorPropName( name ) {
-
-	// Check for vendor prefixed names
-	var capName = name[ 0 ].toUpperCase() + name.slice( 1 ),
-		i = cssPrefixes.length;
-
-	while ( i-- ) {
-		name = cssPrefixes[ i ] + capName;
-		if ( name in emptyStyle ) {
-			return name;
-		}
-	}
-}
-
-// Return a potentially-mapped jQuery.cssProps or vendor prefixed property
-function finalPropName( name ) {
-	var final = jQuery.cssProps[ name ] || vendorProps[ name ];
-
-	if ( final ) {
-		return final;
-	}
-	if ( name in emptyStyle ) {
-		return name;
-	}
-	return vendorProps[ name ] = vendorPropName( name ) || name;
-}
-
-
-var
-
-	// Swappable if display is none or starts with table
-	// except "table", "table-cell", or "table-caption"
-	// See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display
-	rdisplayswap = /^(none|table(?!-c[ea]).+)/,
-	rcustomProp = /^--/,
-	cssShow = { position: "absolute", visibility: "hidden", display: "block" },
-	cssNormalTransform = {
-		letterSpacing: "0",
-		fontWeight: "400"
-	};
-
-function setPositiveNumber( _elem, value, subtract ) {
-
-	// Any relative (+/-) values have already been
-	// normalized at this point
-	var matches = rcssNum.exec( value );
-	return matches ?
-
-		// Guard against undefined "subtract", e.g., when used as in cssHooks
-		Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) :
-		value;
-}
-
-function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) {
-	var i = dimension === "width" ? 1 : 0,
-		extra = 0,
-		delta = 0;
-
-	// Adjustment may not be necessary
-	if ( box === ( isBorderBox ? "border" : "content" ) ) {
-		return 0;
-	}
-
-	for ( ; i < 4; i += 2 ) {
-
-		// Both box models exclude margin
-		if ( box === "margin" ) {
-			delta += jQuery.css( elem, box + cssExpand[ i ], true, styles );
-		}
-
-		// If we get here with a content-box, we're seeking "padding" or "border" or "margin"
-		if ( !isBorderBox ) {
-
-			// Add padding
-			delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles );
-
-			// For "border" or "margin", add border
-			if ( box !== "padding" ) {
-				delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
-
-			// But still keep track of it otherwise
-			} else {
-				extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
-			}
-
-		// If we get here with a border-box (content + padding + border), we're seeking "content" or
-		// "padding" or "margin"
-		} else {
-
-			// For "content", subtract padding
-			if ( box === "content" ) {
-				delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles );
-			}
-
-			// For "content" or "padding", subtract border
-			if ( box !== "margin" ) {
-				delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
-			}
-		}
-	}
-
-	// Account for positive content-box scroll gutter when requested by providing computedVal
-	if ( !isBorderBox && computedVal >= 0 ) {
-
-		// offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border
-		// Assuming integer scroll gutter, subtract the rest and round down
-		delta += Math.max( 0, Math.ceil(
-			elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] -
-			computedVal -
-			delta -
-			extra -
-			0.5
-
-		// If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter
-		// Use an explicit zero to avoid NaN (gh-3964)
-		) ) || 0;
-	}
-
-	return delta;
-}
-
-function getWidthOrHeight( elem, dimension, extra ) {
-
-	// Start with computed style
-	var styles = getStyles( elem ),
-
-		// To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322).
-		// Fake content-box until we know it's needed to know the true value.
-		boxSizingNeeded = !support.boxSizingReliable() || extra,
-		isBorderBox = boxSizingNeeded &&
-			jQuery.css( elem, "boxSizing", false, styles ) === "border-box",
-		valueIsBorderBox = isBorderBox,
-
-		val = curCSS( elem, dimension, styles ),
-		offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 );
-
-	// Support: Firefox <=54
-	// Return a confounding non-pixel value or feign ignorance, as appropriate.
-	if ( rnumnonpx.test( val ) ) {
-		if ( !extra ) {
-			return val;
-		}
-		val = "auto";
-	}
-
-
-	// Support: IE 9 - 11 only
-	// Use offsetWidth/offsetHeight for when box sizing is unreliable.
-	// In those cases, the computed value can be trusted to be border-box.
-	if ( ( !support.boxSizingReliable() && isBorderBox ||
-
-		// Support: IE 10 - 11+, Edge 15 - 18+
-		// IE/Edge misreport `getComputedStyle` of table rows with width/height
-		// set in CSS while `offset*` properties report correct values.
-		// Interestingly, in some cases IE 9 doesn't suffer from this issue.
-		!support.reliableTrDimensions() && nodeName( elem, "tr" ) ||
-
-		// Fall back to offsetWidth/offsetHeight when value is "auto"
-		// This happens for inline elements with no explicit setting (gh-3571)
-		val === "auto" ||
-
-		// Support: Android <=4.1 - 4.3 only
-		// Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602)
-		!parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) &&
-
-		// Make sure the element is visible & connected
-		elem.getClientRects().length ) {
-
-		isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box";
-
-		// Where available, offsetWidth/offsetHeight approximate border box dimensions.
-		// Where not available (e.g., SVG), assume unreliable box-sizing and interpret the
-		// retrieved value as a content box dimension.
-		valueIsBorderBox = offsetProp in elem;
-		if ( valueIsBorderBox ) {
-			val = elem[ offsetProp ];
-		}
-	}
-
-	// Normalize "" and auto
-	val = parseFloat( val ) || 0;
-
-	// Adjust for the element's box model
-	return ( val +
-		boxModelAdjustment(
-			elem,
-			dimension,
-			extra || ( isBorderBox ? "border" : "content" ),
-			valueIsBorderBox,
-			styles,
-
-			// Provide the current computed size to request scroll gutter calculation (gh-3589)
-			val
-		)
-	) + "px";
-}
-
-jQuery.extend( {
-
-	// Add in style property hooks for overriding the default
-	// behavior of getting and setting a style property
-	cssHooks: {
-		opacity: {
-			get: function( elem, computed ) {
-				if ( computed ) {
-
-					// We should always get a number back from opacity
-					var ret = curCSS( elem, "opacity" );
-					return ret === "" ? "1" : ret;
-				}
-			}
-		}
-	},
-
-	// Don't automatically add "px" to these possibly-unitless properties
-	cssNumber: {
-		"animationIterationCount": true,
-		"columnCount": true,
-		"fillOpacity": true,
-		"flexGrow": true,
-		"flexShrink": true,
-		"fontWeight": true,
-		"gridArea": true,
-		"gridColumn": true,
-		"gridColumnEnd": true,
-		"gridColumnStart": true,
-		"gridRow": true,
-		"gridRowEnd": true,
-		"gridRowStart": true,
-		"lineHeight": true,
-		"opacity": true,
-		"order": true,
-		"orphans": true,
-		"widows": true,
-		"zIndex": true,
-		"zoom": true
-	},
-
-	// Add in properties whose names you wish to fix before
-	// setting or getting the value
-	cssProps: {},
-
-	// Get and set the style property on a DOM Node
-	style: function( elem, name, value, extra ) {
-
-		// Don't set styles on text and comment nodes
-		if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) {
-			return;
-		}
-
-		// Make sure that we're working with the right name
-		var ret, type, hooks,
-			origName = camelCase( name ),
-			isCustomProp = rcustomProp.test( name ),
-			style = elem.style;
-
-		// Make sure that we're working with the right name. We don't
-		// want to query the value if it is a CSS custom property
-		// since they are user-defined.
-		if ( !isCustomProp ) {
-			name = finalPropName( origName );
-		}
-
-		// Gets hook for the prefixed version, then unprefixed version
-		hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
-
-		// Check if we're setting a value
-		if ( value !== undefined ) {
-			type = typeof value;
-
-			// Convert "+=" or "-=" to relative numbers (#7345)
-			if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) {
-				value = adjustCSS( elem, name, ret );
-
-				// Fixes bug #9237
-				type = "number";
-			}
-
-			// Make sure that null and NaN values aren't set (#7116)
-			if ( value == null || value !== value ) {
-				return;
-			}
-
-			// If a number was passed in, add the unit (except for certain CSS properties)
-			// The isCustomProp check can be removed in jQuery 4.0 when we only auto-append
-			// "px" to a few hardcoded values.
-			if ( type === "number" && !isCustomProp ) {
-				value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" );
-			}
-
-			// background-* props affect original clone's values
-			if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) {
-				style[ name ] = "inherit";
-			}
-
-			// If a hook was provided, use that value, otherwise just set the specified value
-			if ( !hooks || !( "set" in hooks ) ||
-				( value = hooks.set( elem, value, extra ) ) !== undefined ) {
-
-				if ( isCustomProp ) {
-					style.setProperty( name, value );
-				} else {
-					style[ name ] = value;
-				}
-			}
-
-		} else {
-
-			// If a hook was provided get the non-computed value from there
-			if ( hooks && "get" in hooks &&
-				( ret = hooks.get( elem, false, extra ) ) !== undefined ) {
-
-				return ret;
-			}
-
-			// Otherwise just get the value from the style object
-			return style[ name ];
-		}
-	},
-
-	css: function( elem, name, extra, styles ) {
-		var val, num, hooks,
-			origName = camelCase( name ),
-			isCustomProp = rcustomProp.test( name );
-
-		// Make sure that we're working with the right name. We don't
-		// want to modify the value if it is a CSS custom property
-		// since they are user-defined.
-		if ( !isCustomProp ) {
-			name = finalPropName( origName );
-		}
-
-		// Try prefixed name followed by the unprefixed name
-		hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
-
-		// If a hook was provided get the computed value from there
-		if ( hooks && "get" in hooks ) {
-			val = hooks.get( elem, true, extra );
-		}
-
-		// Otherwise, if a way to get the computed value exists, use that
-		if ( val === undefined ) {
-			val = curCSS( elem, name, styles );
-		}
-
-		// Convert "normal" to computed value
-		if ( val === "normal" && name in cssNormalTransform ) {
-			val = cssNormalTransform[ name ];
-		}
-
-		// Make numeric if forced or a qualifier was provided and val looks numeric
-		if ( extra === "" || extra ) {
-			num = parseFloat( val );
-			return extra === true || isFinite( num ) ? num || 0 : val;
-		}
-
-		return val;
-	}
-} );
-
-jQuery.each( [ "height", "width" ], function( _i, dimension ) {
-	jQuery.cssHooks[ dimension ] = {
-		get: function( elem, computed, extra ) {
-			if ( computed ) {
-
-				// Certain elements can have dimension info if we invisibly show them
-				// but it must have a current display style that would benefit
-				return rdisplayswap.test( jQuery.css( elem, "display" ) ) &&
-
-					// Support: Safari 8+
-					// Table columns in Safari have non-zero offsetWidth & zero
-					// getBoundingClientRect().width unless display is changed.
-					// Support: IE <=11 only
-					// Running getBoundingClientRect on a disconnected node
-					// in IE throws an error.
-					( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ?
-						swap( elem, cssShow, function() {
-							return getWidthOrHeight( elem, dimension, extra );
-						} ) :
-						getWidthOrHeight( elem, dimension, extra );
-			}
-		},
-
-		set: function( elem, value, extra ) {
-			var matches,
-				styles = getStyles( elem ),
-
-				// Only read styles.position if the test has a chance to fail
-				// to avoid forcing a reflow.
-				scrollboxSizeBuggy = !support.scrollboxSize() &&
-					styles.position === "absolute",
-
-				// To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991)
-				boxSizingNeeded = scrollboxSizeBuggy || extra,
-				isBorderBox = boxSizingNeeded &&
-					jQuery.css( elem, "boxSizing", false, styles ) === "border-box",
-				subtract = extra ?
-					boxModelAdjustment(
-						elem,
-						dimension,
-						extra,
-						isBorderBox,
-						styles
-					) :
-					0;
-
-			// Account for unreliable border-box dimensions by comparing offset* to computed and
-			// faking a content-box to get border and padding (gh-3699)
-			if ( isBorderBox && scrollboxSizeBuggy ) {
-				subtract -= Math.ceil(
-					elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] -
-					parseFloat( styles[ dimension ] ) -
-					boxModelAdjustment( elem, dimension, "border", false, styles ) -
-					0.5
-				);
-			}
-
-			// Convert to pixels if value adjustment is needed
-			if ( subtract && ( matches = rcssNum.exec( value ) ) &&
-				( matches[ 3 ] || "px" ) !== "px" ) {
-
-				elem.style[ dimension ] = value;
-				value = jQuery.css( elem, dimension );
-			}
-
-			return setPositiveNumber( elem, value, subtract );
-		}
-	};
-} );
-
-jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft,
-	function( elem, computed ) {
-		if ( computed ) {
-			return ( parseFloat( curCSS( elem, "marginLeft" ) ) ||
-				elem.getBoundingClientRect().left -
-					swap( elem, { marginLeft: 0 }, function() {
-						return elem.getBoundingClientRect().left;
-					} )
-				) + "px";
-		}
-	}
-);
-
-// These hooks are used by animate to expand properties
-jQuery.each( {
-	margin: "",
-	padding: "",
-	border: "Width"
-}, function( prefix, suffix ) {
-	jQuery.cssHooks[ prefix + suffix ] = {
-		expand: function( value ) {
-			var i = 0,
-				expanded = {},
-
-				// Assumes a single number if not a string
-				parts = typeof value === "string" ? value.split( " " ) : [ value ];
-
-			for ( ; i < 4; i++ ) {
-				expanded[ prefix + cssExpand[ i ] + suffix ] =
-					parts[ i ] || parts[ i - 2 ] || parts[ 0 ];
-			}
-
-			return expanded;
-		}
-	};
-
-	if ( prefix !== "margin" ) {
-		jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber;
-	}
-} );
-
-jQuery.fn.extend( {
-	css: function( name, value ) {
-		return access( this, function( elem, name, value ) {
-			var styles, len,
-				map = {},
-				i = 0;
-
-			if ( Array.isArray( name ) ) {
-				styles = getStyles( elem );
-				len = name.length;
-
-				for ( ; i < len; i++ ) {
-					map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles );
-				}
-
-				return map;
-			}
-
-			return value !== undefined ?
-				jQuery.style( elem, name, value ) :
-				jQuery.css( elem, name );
-		}, name, value, arguments.length > 1 );
-	}
-} );
-
-
-function Tween( elem, options, prop, end, easing ) {
-	return new Tween.prototype.init( elem, options, prop, end, easing );
-}
-jQuery.Tween = Tween;
-
-Tween.prototype = {
-	constructor: Tween,
-	init: function( elem, options, prop, end, easing, unit ) {
-		this.elem = elem;
-		this.prop = prop;
-		this.easing = easing || jQuery.easing._default;
-		this.options = options;
-		this.start = this.now = this.cur();
-		this.end = end;
-		this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" );
-	},
-	cur: function() {
-		var hooks = Tween.propHooks[ this.prop ];
-
-		return hooks && hooks.get ?
-			hooks.get( this ) :
-			Tween.propHooks._default.get( this );
-	},
-	run: function( percent ) {
-		var eased,
-			hooks = Tween.propHooks[ this.prop ];
-
-		if ( this.options.duration ) {
-			this.pos = eased = jQuery.easing[ this.easing ](
-				percent, this.options.duration * percent, 0, 1, this.options.duration
-			);
-		} else {
-			this.pos = eased = percent;
-		}
-		this.now = ( this.end - this.start ) * eased + this.start;
-
-		if ( this.options.step ) {
-			this.options.step.call( this.elem, this.now, this );
-		}
-
-		if ( hooks && hooks.set ) {
-			hooks.set( this );
-		} else {
-			Tween.propHooks._default.set( this );
-		}
-		return this;
-	}
-};
-
-Tween.prototype.init.prototype = Tween.prototype;
-
-Tween.propHooks = {
-	_default: {
-		get: function( tween ) {
-			var result;
-
-			// Use a property on the element directly when it is not a DOM element,
-			// or when there is no matching style property that exists.
-			if ( tween.elem.nodeType !== 1 ||
-				tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) {
-				return tween.elem[ tween.prop ];
-			}
-
-			// Passing an empty string as a 3rd parameter to .css will automatically
-			// attempt a parseFloat and fallback to a string if the parse fails.
-			// Simple values such as "10px" are parsed to Float;
-			// complex values such as "rotate(1rad)" are returned as-is.
-			result = jQuery.css( tween.elem, tween.prop, "" );
-
-			// Empty strings, null, undefined and "auto" are converted to 0.
-			return !result || result === "auto" ? 0 : result;
-		},
-		set: function( tween ) {
-
-			// Use step hook for back compat.
-			// Use cssHook if its there.
-			// Use .style if available and use plain properties where available.
-			if ( jQuery.fx.step[ tween.prop ] ) {
-				jQuery.fx.step[ tween.prop ]( tween );
-			} else if ( tween.elem.nodeType === 1 && (
-					jQuery.cssHooks[ tween.prop ] ||
-					tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) {
-				jQuery.style( tween.elem, tween.prop, tween.now + tween.unit );
-			} else {
-				tween.elem[ tween.prop ] = tween.now;
-			}
-		}
-	}
-};
-
-// Support: IE <=9 only
-// Panic based approach to setting things on disconnected nodes
-Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = {
-	set: function( tween ) {
-		if ( tween.elem.nodeType && tween.elem.parentNode ) {
-			tween.elem[ tween.prop ] = tween.now;
-		}
-	}
-};
-
-jQuery.easing = {
-	linear: function( p ) {
-		return p;
-	},
-	swing: function( p ) {
-		return 0.5 - Math.cos( p * Math.PI ) / 2;
-	},
-	_default: "swing"
-};
-
-jQuery.fx = Tween.prototype.init;
-
-// Back compat <1.8 extension point
-jQuery.fx.step = {};
-
-
-
-
-var
-	fxNow, inProgress,
-	rfxtypes = /^(?:toggle|show|hide)$/,
-	rrun = /queueHooks$/;
-
-function schedule() {
-	if ( inProgress ) {
-		if ( document.hidden === false && window.requestAnimationFrame ) {
-			window.requestAnimationFrame( schedule );
-		} else {
-			window.setTimeout( schedule, jQuery.fx.interval );
-		}
-
-		jQuery.fx.tick();
-	}
-}
-
-// Animations created synchronously will run synchronously
-function createFxNow() {
-	window.setTimeout( function() {
-		fxNow = undefined;
-	} );
-	return ( fxNow = Date.now() );
-}
-
-// Generate parameters to create a standard animation
-function genFx( type, includeWidth ) {
-	var which,
-		i = 0,
-		attrs = { height: type };
-
-	// If we include width, step value is 1 to do all cssExpand values,
-	// otherwise step value is 2 to skip over Left and Right
-	includeWidth = includeWidth ? 1 : 0;
-	for ( ; i < 4; i += 2 - includeWidth ) {
-		which = cssExpand[ i ];
-		attrs[ "margin" + which ] = attrs[ "padding" + which ] = type;
-	}
-
-	if ( includeWidth ) {
-		attrs.opacity = attrs.width = type;
-	}
-
-	return attrs;
-}
-
-function createTween( value, prop, animation ) {
-	var tween,
-		collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ),
-		index = 0,
-		length = collection.length;
-	for ( ; index < length; index++ ) {
-		if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) {
-
-			// We're done with this property
-			return tween;
-		}
-	}
-}
-
-function defaultPrefilter( elem, props, opts ) {
-	var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display,
-		isBox = "width" in props || "height" in props,
-		anim = this,
-		orig = {},
-		style = elem.style,
-		hidden = elem.nodeType && isHiddenWithinTree( elem ),
-		dataShow = dataPriv.get( elem, "fxshow" );
-
-	// Queue-skipping animations hijack the fx hooks
-	if ( !opts.queue ) {
-		hooks = jQuery._queueHooks( elem, "fx" );
-		if ( hooks.unqueued == null ) {
-			hooks.unqueued = 0;
-			oldfire = hooks.empty.fire;
-			hooks.empty.fire = function() {
-				if ( !hooks.unqueued ) {
-					oldfire();
-				}
-			};
-		}
-		hooks.unqueued++;
-
-		anim.always( function() {
-
-			// Ensure the complete handler is called before this completes
-			anim.always( function() {
-				hooks.unqueued--;
-				if ( !jQuery.queue( elem, "fx" ).length ) {
-					hooks.empty.fire();
-				}
-			} );
-		} );
-	}
-
-	// Detect show/hide animations
-	for ( prop in props ) {
-		value = props[ prop ];
-		if ( rfxtypes.test( value ) ) {
-			delete props[ prop ];
-			toggle = toggle || value === "toggle";
-			if ( value === ( hidden ? "hide" : "show" ) ) {
-
-				// Pretend to be hidden if this is a "show" and
-				// there is still data from a stopped show/hide
-				if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) {
-					hidden = true;
-
-				// Ignore all other no-op show/hide data
-				} else {
-					continue;
-				}
-			}
-			orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop );
-		}
-	}
-
-	// Bail out if this is a no-op like .hide().hide()
-	propTween = !jQuery.isEmptyObject( props );
-	if ( !propTween && jQuery.isEmptyObject( orig ) ) {
-		return;
-	}
-
-	// Restrict "overflow" and "display" styles during box animations
-	if ( isBox && elem.nodeType === 1 ) {
-
-		// Support: IE <=9 - 11, Edge 12 - 15
-		// Record all 3 overflow attributes because IE does not infer the shorthand
-		// from identically-valued overflowX and overflowY and Edge just mirrors
-		// the overflowX value there.
-		opts.overflow = [ style.overflow, style.overflowX, style.overflowY ];
-
-		// Identify a display type, preferring old show/hide data over the CSS cascade
-		restoreDisplay = dataShow && dataShow.display;
-		if ( restoreDisplay == null ) {
-			restoreDisplay = dataPriv.get( elem, "display" );
-		}
-		display = jQuery.css( elem, "display" );
-		if ( display === "none" ) {
-			if ( restoreDisplay ) {
-				display = restoreDisplay;
-			} else {
-
-				// Get nonempty value(s) by temporarily forcing visibility
-				showHide( [ elem ], true );
-				restoreDisplay = elem.style.display || restoreDisplay;
-				display = jQuery.css( elem, "display" );
-				showHide( [ elem ] );
-			}
-		}
-
-		// Animate inline elements as inline-block
-		if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) {
-			if ( jQuery.css( elem, "float" ) === "none" ) {
-
-				// Restore the original display value at the end of pure show/hide animations
-				if ( !propTween ) {
-					anim.done( function() {
-						style.display = restoreDisplay;
-					} );
-					if ( restoreDisplay == null ) {
-						display = style.display;
-						restoreDisplay = display === "none" ? "" : display;
-					}
-				}
-				style.display = "inline-block";
-			}
-		}
-	}
-
-	if ( opts.overflow ) {
-		style.overflow = "hidden";
-		anim.always( function() {
-			style.overflow = opts.overflow[ 0 ];
-			style.overflowX = opts.overflow[ 1 ];
-			style.overflowY = opts.overflow[ 2 ];
-		} );
-	}
-
-	// Implement show/hide animations
-	propTween = false;
-	for ( prop in orig ) {
-
-		// General show/hide setup for this element animation
-		if ( !propTween ) {
-			if ( dataShow ) {
-				if ( "hidden" in dataShow ) {
-					hidden = dataShow.hidden;
-				}
-			} else {
-				dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } );
-			}
-
-			// Store hidden/visible for toggle so `.stop().toggle()` "reverses"
-			if ( toggle ) {
-				dataShow.hidden = !hidden;
-			}
-
-			// Show elements before animating them
-			if ( hidden ) {
-				showHide( [ elem ], true );
-			}
-
-			/* eslint-disable no-loop-func */
-
-			anim.done( function() {
-
-			/* eslint-enable no-loop-func */
-
-				// The final step of a "hide" animation is actually hiding the element
-				if ( !hidden ) {
-					showHide( [ elem ] );
-				}
-				dataPriv.remove( elem, "fxshow" );
-				for ( prop in orig ) {
-					jQuery.style( elem, prop, orig[ prop ] );
-				}
-			} );
-		}
-
-		// Per-property setup
-		propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim );
-		if ( !( prop in dataShow ) ) {
-			dataShow[ prop ] = propTween.start;
-			if ( hidden ) {
-				propTween.end = propTween.start;
-				propTween.start = 0;
-			}
-		}
-	}
-}
-
-function propFilter( props, specialEasing ) {
-	var index, name, easing, value, hooks;
-
-	// camelCase, specialEasing and expand cssHook pass
-	for ( index in props ) {
-		name = camelCase( index );
-		easing = specialEasing[ name ];
-		value = props[ index ];
-		if ( Array.isArray( value ) ) {
-			easing = value[ 1 ];
-			value = props[ index ] = value[ 0 ];
-		}
-
-		if ( index !== name ) {
-			props[ name ] = value;
-			delete props[ index ];
-		}
-
-		hooks = jQuery.cssHooks[ name ];
-		if ( hooks && "expand" in hooks ) {
-			value = hooks.expand( value );
-			delete props[ name ];
-
-			// Not quite $.extend, this won't overwrite existing keys.
-			// Reusing 'index' because we have the correct "name"
-			for ( index in value ) {
-				if ( !( index in props ) ) {
-					props[ index ] = value[ index ];
-					specialEasing[ index ] = easing;
-				}
-			}
-		} else {
-			specialEasing[ name ] = easing;
-		}
-	}
-}
-
-function Animation( elem, properties, options ) {
-	var result,
-		stopped,
-		index = 0,
-		length = Animation.prefilters.length,
-		deferred = jQuery.Deferred().always( function() {
-
-			// Don't match elem in the :animated selector
-			delete tick.elem;
-		} ),
-		tick = function() {
-			if ( stopped ) {
-				return false;
-			}
-			var currentTime = fxNow || createFxNow(),
-				remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ),
-
-				// Support: Android 2.3 only
-				// Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497)
-				temp = remaining / animation.duration || 0,
-				percent = 1 - temp,
-				index = 0,
-				length = animation.tweens.length;
-
-			for ( ; index < length; index++ ) {
-				animation.tweens[ index ].run( percent );
-			}
-
-			deferred.notifyWith( elem, [ animation, percent, remaining ] );
-
-			// If there's more to do, yield
-			if ( percent < 1 && length ) {
-				return remaining;
-			}
-
-			// If this was an empty animation, synthesize a final progress notification
-			if ( !length ) {
-				deferred.notifyWith( elem, [ animation, 1, 0 ] );
-			}
-
-			// Resolve the animation and report its conclusion
-			deferred.resolveWith( elem, [ animation ] );
-			return false;
-		},
-		animation = deferred.promise( {
-			elem: elem,
-			props: jQuery.extend( {}, properties ),
-			opts: jQuery.extend( true, {
-				specialEasing: {},
-				easing: jQuery.easing._default
-			}, options ),
-			originalProperties: properties,
-			originalOptions: options,
-			startTime: fxNow || createFxNow(),
-			duration: options.duration,
-			tweens: [],
-			createTween: function( prop, end ) {
-				var tween = jQuery.Tween( elem, animation.opts, prop, end,
-						animation.opts.specialEasing[ prop ] || animation.opts.easing );
-				animation.tweens.push( tween );
-				return tween;
-			},
-			stop: function( gotoEnd ) {
-				var index = 0,
-
-					// If we are going to the end, we want to run all the tweens
-					// otherwise we skip this part
-					length = gotoEnd ? animation.tweens.length : 0;
-				if ( stopped ) {
-					return this;
-				}
-				stopped = true;
-				for ( ; index < length; index++ ) {
-					animation.tweens[ index ].run( 1 );
-				}
-
-				// Resolve when we played the last frame; otherwise, reject
-				if ( gotoEnd ) {
-					deferred.notifyWith( elem, [ animation, 1, 0 ] );
-					deferred.resolveWith( elem, [ animation, gotoEnd ] );
-				} else {
-					deferred.rejectWith( elem, [ animation, gotoEnd ] );
-				}
-				return this;
-			}
-		} ),
-		props = animation.props;
-
-	propFilter( props, animation.opts.specialEasing );
-
-	for ( ; index < length; index++ ) {
-		result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts );
-		if ( result ) {
-			if ( isFunction( result.stop ) ) {
-				jQuery._queueHooks( animation.elem, animation.opts.queue ).stop =
-					result.stop.bind( result );
-			}
-			return result;
-		}
-	}
-
-	jQuery.map( props, createTween, animation );
-
-	if ( isFunction( animation.opts.start ) ) {
-		animation.opts.start.call( elem, animation );
-	}
-
-	// Attach callbacks from options
-	animation
-		.progress( animation.opts.progress )
-		.done( animation.opts.done, animation.opts.complete )
-		.fail( animation.opts.fail )
-		.always( animation.opts.always );
-
-	jQuery.fx.timer(
-		jQuery.extend( tick, {
-			elem: elem,
-			anim: animation,
-			queue: animation.opts.queue
-		} )
-	);
-
-	return animation;
-}
-
-jQuery.Animation = jQuery.extend( Animation, {
-
-	tweeners: {
-		"*": [ function( prop, value ) {
-			var tween = this.createTween( prop, value );
-			adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween );
-			return tween;
-		} ]
-	},
-
-	tweener: function( props, callback ) {
-		if ( isFunction( props ) ) {
-			callback = props;
-			props = [ "*" ];
-		} else {
-			props = props.match( rnothtmlwhite );
-		}
-
-		var prop,
-			index = 0,
-			length = props.length;
-
-		for ( ; index < length; index++ ) {
-			prop = props[ index ];
-			Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || [];
-			Animation.tweeners[ prop ].unshift( callback );
-		}
-	},
-
-	prefilters: [ defaultPrefilter ],
-
-	prefilter: function( callback, prepend ) {
-		if ( prepend ) {
-			Animation.prefilters.unshift( callback );
-		} else {
-			Animation.prefilters.push( callback );
-		}
-	}
-} );
-
-jQuery.speed = function( speed, easing, fn ) {
-	var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : {
-		complete: fn || !fn && easing ||
-			isFunction( speed ) && speed,
-		duration: speed,
-		easing: fn && easing || easing && !isFunction( easing ) && easing
-	};
-
-	// Go to the end state if fx are off
-	if ( jQuery.fx.off ) {
-		opt.duration = 0;
-
-	} else {
-		if ( typeof opt.duration !== "number" ) {
-			if ( opt.duration in jQuery.fx.speeds ) {
-				opt.duration = jQuery.fx.speeds[ opt.duration ];
-
-			} else {
-				opt.duration = jQuery.fx.speeds._default;
-			}
-		}
-	}
-
-	// Normalize opt.queue - true/undefined/null -> "fx"
-	if ( opt.queue == null || opt.queue === true ) {
-		opt.queue = "fx";
-	}
-
-	// Queueing
-	opt.old = opt.complete;
-
-	opt.complete = function() {
-		if ( isFunction( opt.old ) ) {
-			opt.old.call( this );
-		}
-
-		if ( opt.queue ) {
-			jQuery.dequeue( this, opt.queue );
-		}
-	};
-
-	return opt;
-};
-
-jQuery.fn.extend( {
-	fadeTo: function( speed, to, easing, callback ) {
-
-		// Show any hidden elements after setting opacity to 0
-		return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show()
-
-			// Animate to the value specified
-			.end().animate( { opacity: to }, speed, easing, callback );
-	},
-	animate: function( prop, speed, easing, callback ) {
-		var empty = jQuery.isEmptyObject( prop ),
-			optall = jQuery.speed( speed, easing, callback ),
-			doAnimation = function() {
-
-				// Operate on a copy of prop so per-property easing won't be lost
-				var anim = Animation( this, jQuery.extend( {}, prop ), optall );
-
-				// Empty animations, or finishing resolves immediately
-				if ( empty || dataPriv.get( this, "finish" ) ) {
-					anim.stop( true );
-				}
-			};
-			doAnimation.finish = doAnimation;
-
-		return empty || optall.queue === false ?
-			this.each( doAnimation ) :
-			this.queue( optall.queue, doAnimation );
-	},
-	stop: function( type, clearQueue, gotoEnd ) {
-		var stopQueue = function( hooks ) {
-			var stop = hooks.stop;
-			delete hooks.stop;
-			stop( gotoEnd );
-		};
-
-		if ( typeof type !== "string" ) {
-			gotoEnd = clearQueue;
-			clearQueue = type;
-			type = undefined;
-		}
-		if ( clearQueue ) {
-			this.queue( type || "fx", [] );
-		}
-
-		return this.each( function() {
-			var dequeue = true,
-				index = type != null && type + "queueHooks",
-				timers = jQuery.timers,
-				data = dataPriv.get( this );
-
-			if ( index ) {
-				if ( data[ index ] && data[ index ].stop ) {
-					stopQueue( data[ index ] );
-				}
-			} else {
-				for ( index in data ) {
-					if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) {
-						stopQueue( data[ index ] );
-					}
-				}
-			}
-
-			for ( index = timers.length; index--; ) {
-				if ( timers[ index ].elem === this &&
-					( type == null || timers[ index ].queue === type ) ) {
-
-					timers[ index ].anim.stop( gotoEnd );
-					dequeue = false;
-					timers.splice( index, 1 );
-				}
-			}
-
-			// Start the next in the queue if the last step wasn't forced.
-			// Timers currently will call their complete callbacks, which
-			// will dequeue but only if they were gotoEnd.
-			if ( dequeue || !gotoEnd ) {
-				jQuery.dequeue( this, type );
-			}
-		} );
-	},
-	finish: function( type ) {
-		if ( type !== false ) {
-			type = type || "fx";
-		}
-		return this.each( function() {
-			var index,
-				data = dataPriv.get( this ),
-				queue = data[ type + "queue" ],
-				hooks = data[ type + "queueHooks" ],
-				timers = jQuery.timers,
-				length = queue ? queue.length : 0;
-
-			// Enable finishing flag on private data
-			data.finish = true;
-
-			// Empty the queue first
-			jQuery.queue( this, type, [] );
-
-			if ( hooks && hooks.stop ) {
-				hooks.stop.call( this, true );
-			}
-
-			// Look for any active animations, and finish them
-			for ( index = timers.length; index--; ) {
-				if ( timers[ index ].elem === this && timers[ index ].queue === type ) {
-					timers[ index ].anim.stop( true );
-					timers.splice( index, 1 );
-				}
-			}
-
-			// Look for any animations in the old queue and finish them
-			for ( index = 0; index < length; index++ ) {
-				if ( queue[ index ] && queue[ index ].finish ) {
-					queue[ index ].finish.call( this );
-				}
-			}
-
-			// Turn off finishing flag
-			delete data.finish;
-		} );
-	}
-} );
-
-jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) {
-	var cssFn = jQuery.fn[ name ];
-	jQuery.fn[ name ] = function( speed, easing, callback ) {
-		return speed == null || typeof speed === "boolean" ?
-			cssFn.apply( this, arguments ) :
-			this.animate( genFx( name, true ), speed, easing, callback );
-	};
-} );
-
-// Generate shortcuts for custom animations
-jQuery.each( {
-	slideDown: genFx( "show" ),
-	slideUp: genFx( "hide" ),
-	slideToggle: genFx( "toggle" ),
-	fadeIn: { opacity: "show" },
-	fadeOut: { opacity: "hide" },
-	fadeToggle: { opacity: "toggle" }
-}, function( name, props ) {
-	jQuery.fn[ name ] = function( speed, easing, callback ) {
-		return this.animate( props, speed, easing, callback );
-	};
-} );
-
-jQuery.timers = [];
-jQuery.fx.tick = function() {
-	var timer,
-		i = 0,
-		timers = jQuery.timers;
-
-	fxNow = Date.now();
-
-	for ( ; i < timers.length; i++ ) {
-		timer = timers[ i ];
-
-		// Run the timer and safely remove it when done (allowing for external removal)
-		if ( !timer() && timers[ i ] === timer ) {
-			timers.splice( i--, 1 );
-		}
-	}
-
-	if ( !timers.length ) {
-		jQuery.fx.stop();
-	}
-	fxNow = undefined;
-};
-
-jQuery.fx.timer = function( timer ) {
-	jQuery.timers.push( timer );
-	jQuery.fx.start();
-};
-
-jQuery.fx.interval = 13;
-jQuery.fx.start = function() {
-	if ( inProgress ) {
-		return;
-	}
-
-	inProgress = true;
-	schedule();
-};
-
-jQuery.fx.stop = function() {
-	inProgress = null;
-};
-
-jQuery.fx.speeds = {
-	slow: 600,
-	fast: 200,
-
-	// Default speed
-	_default: 400
-};
-
-
-// Based off of the plugin by Clint Helfers, with permission.
-// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/
-jQuery.fn.delay = function( time, type ) {
-	time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time;
-	type = type || "fx";
-
-	return this.queue( type, function( next, hooks ) {
-		var timeout = window.setTimeout( next, time );
-		hooks.stop = function() {
-			window.clearTimeout( timeout );
-		};
-	} );
-};
-
-
-( function() {
-	var input = document.createElement( "input" ),
-		select = document.createElement( "select" ),
-		opt = select.appendChild( document.createElement( "option" ) );
-
-	input.type = "checkbox";
-
-	// Support: Android <=4.3 only
-	// Default value for a checkbox should be "on"
-	support.checkOn = input.value !== "";
-
-	// Support: IE <=11 only
-	// Must access selectedIndex to make default options select
-	support.optSelected = opt.selected;
-
-	// Support: IE <=11 only
-	// An input loses its value after becoming a radio
-	input = document.createElement( "input" );
-	input.value = "t";
-	input.type = "radio";
-	support.radioValue = input.value === "t";
-} )();
-
-
-var boolHook,
-	attrHandle = jQuery.expr.attrHandle;
-
-jQuery.fn.extend( {
-	attr: function( name, value ) {
-		return access( this, jQuery.attr, name, value, arguments.length > 1 );
-	},
-
-	removeAttr: function( name ) {
-		return this.each( function() {
-			jQuery.removeAttr( this, name );
-		} );
-	}
-} );
-
-jQuery.extend( {
-	attr: function( elem, name, value ) {
-		var ret, hooks,
-			nType = elem.nodeType;
-
-		// Don't get/set attributes on text, comment and attribute nodes
-		if ( nType === 3 || nType === 8 || nType === 2 ) {
-			return;
-		}
-
-		// Fallback to prop when attributes are not supported
-		if ( typeof elem.getAttribute === "undefined" ) {
-			return jQuery.prop( elem, name, value );
-		}
-
-		// Attribute hooks are determined by the lowercase version
-		// Grab necessary hook if one is defined
-		if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) {
-			hooks = jQuery.attrHooks[ name.toLowerCase() ] ||
-				( jQuery.expr.match.bool.test( name ) ? boolHook : undefined );
-		}
-
-		if ( value !== undefined ) {
-			if ( value === null ) {
-				jQuery.removeAttr( elem, name );
-				return;
-			}
-
-			if ( hooks && "set" in hooks &&
-				( ret = hooks.set( elem, value, name ) ) !== undefined ) {
-				return ret;
-			}
-
-			elem.setAttribute( name, value + "" );
-			return value;
-		}
-
-		if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) {
-			return ret;
-		}
-
-		ret = jQuery.find.attr( elem, name );
-
-		// Non-existent attributes return null, we normalize to undefined
-		return ret == null ? undefined : ret;
-	},
-
-	attrHooks: {
-		type: {
-			set: function( elem, value ) {
-				if ( !support.radioValue && value === "radio" &&
-					nodeName( elem, "input" ) ) {
-					var val = elem.value;
-					elem.setAttribute( "type", value );
-					if ( val ) {
-						elem.value = val;
-					}
-					return value;
-				}
-			}
-		}
-	},
-
-	removeAttr: function( elem, value ) {
-		var name,
-			i = 0,
-
-			// Attribute names can contain non-HTML whitespace characters
-			// https://html.spec.whatwg.org/multipage/syntax.html#attributes-2
-			attrNames = value && value.match( rnothtmlwhite );
-
-		if ( attrNames && elem.nodeType === 1 ) {
-			while ( ( name = attrNames[ i++ ] ) ) {
-				elem.removeAttribute( name );
-			}
-		}
-	}
-} );
-
-// Hooks for boolean attributes
-boolHook = {
-	set: function( elem, value, name ) {
-		if ( value === false ) {
-
-			// Remove boolean attributes when set to false
-			jQuery.removeAttr( elem, name );
-		} else {
-			elem.setAttribute( name, name );
-		}
-		return name;
-	}
-};
-
-jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) {
-	var getter = attrHandle[ name ] || jQuery.find.attr;
-
-	attrHandle[ name ] = function( elem, name, isXML ) {
-		var ret, handle,
-			lowercaseName = name.toLowerCase();
-
-		if ( !isXML ) {
-
-			// Avoid an infinite loop by temporarily removing this function from the getter
-			handle = attrHandle[ lowercaseName ];
-			attrHandle[ lowercaseName ] = ret;
-			ret = getter( elem, name, isXML ) != null ?
-				lowercaseName :
-				null;
-			attrHandle[ lowercaseName ] = handle;
-		}
-		return ret;
-	};
-} );
-
-
-
-
-var rfocusable = /^(?:input|select|textarea|button)$/i,
-	rclickable = /^(?:a|area)$/i;
-
-jQuery.fn.extend( {
-	prop: function( name, value ) {
-		return access( this, jQuery.prop, name, value, arguments.length > 1 );
-	},
-
-	removeProp: function( name ) {
-		return this.each( function() {
-			delete this[ jQuery.propFix[ name ] || name ];
-		} );
-	}
-} );
-
-jQuery.extend( {
-	prop: function( elem, name, value ) {
-		var ret, hooks,
-			nType = elem.nodeType;
-
-		// Don't get/set properties on text, comment and attribute nodes
-		if ( nType === 3 || nType === 8 || nType === 2 ) {
-			return;
-		}
-
-		if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) {
-
-			// Fix name and attach hooks
-			name = jQuery.propFix[ name ] || name;
-			hooks = jQuery.propHooks[ name ];
-		}
-
-		if ( value !== undefined ) {
-			if ( hooks && "set" in hooks &&
-				( ret = hooks.set( elem, value, name ) ) !== undefined ) {
-				return ret;
-			}
-
-			return ( elem[ name ] = value );
-		}
-
-		if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) {
-			return ret;
-		}
-
-		return elem[ name ];
-	},
-
-	propHooks: {
-		tabIndex: {
-			get: function( elem ) {
-
-				// Support: IE <=9 - 11 only
-				// elem.tabIndex doesn't always return the
-				// correct value when it hasn't been explicitly set
-				// https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/
-				// Use proper attribute retrieval(#12072)
-				var tabindex = jQuery.find.attr( elem, "tabindex" );
-
-				if ( tabindex ) {
-					return parseInt( tabindex, 10 );
-				}
-
-				if (
-					rfocusable.test( elem.nodeName ) ||
-					rclickable.test( elem.nodeName ) &&
-					elem.href
-				) {
-					return 0;
-				}
-
-				return -1;
-			}
-		}
-	},
-
-	propFix: {
-		"for": "htmlFor",
-		"class": "className"
-	}
-} );
-
-// Support: IE <=11 only
-// Accessing the selectedIndex property
-// forces the browser to respect setting selected
-// on the option
-// The getter ensures a default option is selected
-// when in an optgroup
-// eslint rule "no-unused-expressions" is disabled for this code
-// since it considers such accessions noop
-if ( !support.optSelected ) {
-	jQuery.propHooks.selected = {
-		get: function( elem ) {
-
-			/* eslint no-unused-expressions: "off" */
-
-			var parent = elem.parentNode;
-			if ( parent && parent.parentNode ) {
-				parent.parentNode.selectedIndex;
-			}
-			return null;
-		},
-		set: function( elem ) {
-
-			/* eslint no-unused-expressions: "off" */
-
-			var parent = elem.parentNode;
-			if ( parent ) {
-				parent.selectedIndex;
-
-				if ( parent.parentNode ) {
-					parent.parentNode.selectedIndex;
-				}
-			}
-		}
-	};
-}
-
-jQuery.each( [
-	"tabIndex",
-	"readOnly",
-	"maxLength",
-	"cellSpacing",
-	"cellPadding",
-	"rowSpan",
-	"colSpan",
-	"useMap",
-	"frameBorder",
-	"contentEditable"
-], function() {
-	jQuery.propFix[ this.toLowerCase() ] = this;
-} );
-
-
-
-
-	// Strip and collapse whitespace according to HTML spec
-	// https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace
-	function stripAndCollapse( value ) {
-		var tokens = value.match( rnothtmlwhite ) || [];
-		return tokens.join( " " );
-	}
-
-
-function getClass( elem ) {
-	return elem.getAttribute && elem.getAttribute( "class" ) || "";
-}
-
-function classesToArray( value ) {
-	if ( Array.isArray( value ) ) {
-		return value;
-	}
-	if ( typeof value === "string" ) {
-		return value.match( rnothtmlwhite ) || [];
-	}
-	return [];
-}
-
-jQuery.fn.extend( {
-	addClass: function( value ) {
-		var classes, elem, cur, curValue, clazz, j, finalValue,
-			i = 0;
-
-		if ( isFunction( value ) ) {
-			return this.each( function( j ) {
-				jQuery( this ).addClass( value.call( this, j, getClass( this ) ) );
-			} );
-		}
-
-		classes = classesToArray( value );
-
-		if ( classes.length ) {
-			while ( ( elem = this[ i++ ] ) ) {
-				curValue = getClass( elem );
-				cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
-
-				if ( cur ) {
-					j = 0;
-					while ( ( clazz = classes[ j++ ] ) ) {
-						if ( cur.indexOf( " " + clazz + " " ) < 0 ) {
-							cur += clazz + " ";
-						}
-					}
-
-					// Only assign if different to avoid unneeded rendering.
-					finalValue = stripAndCollapse( cur );
-					if ( curValue !== finalValue ) {
-						elem.setAttribute( "class", finalValue );
-					}
-				}
-			}
-		}
-
-		return this;
-	},
-
-	removeClass: function( value ) {
-		var classes, elem, cur, curValue, clazz, j, finalValue,
-			i = 0;
-
-		if ( isFunction( value ) ) {
-			return this.each( function( j ) {
-				jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) );
-			} );
-		}
-
-		if ( !arguments.length ) {
-			return this.attr( "class", "" );
-		}
-
-		classes = classesToArray( value );
-
-		if ( classes.length ) {
-			while ( ( elem = this[ i++ ] ) ) {
-				curValue = getClass( elem );
-
-				// This expression is here for better compressibility (see addClass)
-				cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
-
-				if ( cur ) {
-					j = 0;
-					while ( ( clazz = classes[ j++ ] ) ) {
-
-						// Remove *all* instances
-						while ( cur.indexOf( " " + clazz + " " ) > -1 ) {
-							cur = cur.replace( " " + clazz + " ", " " );
-						}
-					}
-
-					// Only assign if different to avoid unneeded rendering.
-					finalValue = stripAndCollapse( cur );
-					if ( curValue !== finalValue ) {
-						elem.setAttribute( "class", finalValue );
-					}
-				}
-			}
-		}
-
-		return this;
-	},
-
-	toggleClass: function( value, stateVal ) {
-		var type = typeof value,
-			isValidValue = type === "string" || Array.isArray( value );
-
-		if ( typeof stateVal === "boolean" && isValidValue ) {
-			return stateVal ? this.addClass( value ) : this.removeClass( value );
-		}
-
-		if ( isFunction( value ) ) {
-			return this.each( function( i ) {
-				jQuery( this ).toggleClass(
-					value.call( this, i, getClass( this ), stateVal ),
-					stateVal
-				);
-			} );
-		}
-
-		return this.each( function() {
-			var className, i, self, classNames;
-
-			if ( isValidValue ) {
-
-				// Toggle individual class names
-				i = 0;
-				self = jQuery( this );
-				classNames = classesToArray( value );
-
-				while ( ( className = classNames[ i++ ] ) ) {
-
-					// Check each className given, space separated list
-					if ( self.hasClass( className ) ) {
-						self.removeClass( className );
-					} else {
-						self.addClass( className );
-					}
-				}
-
-			// Toggle whole class name
-			} else if ( value === undefined || type === "boolean" ) {
-				className = getClass( this );
-				if ( className ) {
-
-					// Store className if set
-					dataPriv.set( this, "__className__", className );
-				}
-
-				// If the element has a class name or if we're passed `false`,
-				// then remove the whole classname (if there was one, the above saved it).
-				// Otherwise bring back whatever was previously saved (if anything),
-				// falling back to the empty string if nothing was stored.
-				if ( this.setAttribute ) {
-					this.setAttribute( "class",
-						className || value === false ?
-						"" :
-						dataPriv.get( this, "__className__" ) || ""
-					);
-				}
-			}
-		} );
-	},
-
-	hasClass: function( selector ) {
-		var className, elem,
-			i = 0;
-
-		className = " " + selector + " ";
-		while ( ( elem = this[ i++ ] ) ) {
-			if ( elem.nodeType === 1 &&
-				( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) {
-					return true;
-			}
-		}
-
-		return false;
-	}
-} );
-
-
-
-
-var rreturn = /\r/g;
-
-jQuery.fn.extend( {
-	val: function( value ) {
-		var hooks, ret, valueIsFunction,
-			elem = this[ 0 ];
-
-		if ( !arguments.length ) {
-			if ( elem ) {
-				hooks = jQuery.valHooks[ elem.type ] ||
-					jQuery.valHooks[ elem.nodeName.toLowerCase() ];
-
-				if ( hooks &&
-					"get" in hooks &&
-					( ret = hooks.get( elem, "value" ) ) !== undefined
-				) {
-					return ret;
-				}
-
-				ret = elem.value;
-
-				// Handle most common string cases
-				if ( typeof ret === "string" ) {
-					return ret.replace( rreturn, "" );
-				}
-
-				// Handle cases where value is null/undef or number
-				return ret == null ? "" : ret;
-			}
-
-			return;
-		}
-
-		valueIsFunction = isFunction( value );
-
-		return this.each( function( i ) {
-			var val;
-
-			if ( this.nodeType !== 1 ) {
-				return;
-			}
-
-			if ( valueIsFunction ) {
-				val = value.call( this, i, jQuery( this ).val() );
-			} else {
-				val = value;
-			}
-
-			// Treat null/undefined as ""; convert numbers to string
-			if ( val == null ) {
-				val = "";
-
-			} else if ( typeof val === "number" ) {
-				val += "";
-
-			} else if ( Array.isArray( val ) ) {
-				val = jQuery.map( val, function( value ) {
-					return value == null ? "" : value + "";
-				} );
-			}
-
-			hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ];
-
-			// If set returns undefined, fall back to normal setting
-			if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) {
-				this.value = val;
-			}
-		} );
-	}
-} );
-
-jQuery.extend( {
-	valHooks: {
-		option: {
-			get: function( elem ) {
-
-				var val = jQuery.find.attr( elem, "value" );
-				return val != null ?
-					val :
-
-					// Support: IE <=10 - 11 only
-					// option.text throws exceptions (#14686, #14858)
-					// Strip and collapse whitespace
-					// https://html.spec.whatwg.org/#strip-and-collapse-whitespace
-					stripAndCollapse( jQuery.text( elem ) );
-			}
-		},
-		select: {
-			get: function( elem ) {
-				var value, option, i,
-					options = elem.options,
-					index = elem.selectedIndex,
-					one = elem.type === "select-one",
-					values = one ? null : [],
-					max = one ? index + 1 : options.length;
-
-				if ( index < 0 ) {
-					i = max;
-
-				} else {
-					i = one ? index : 0;
-				}
-
-				// Loop through all the selected options
-				for ( ; i < max; i++ ) {
-					option = options[ i ];
-
-					// Support: IE <=9 only
-					// IE8-9 doesn't update selected after form reset (#2551)
-					if ( ( option.selected || i === index ) &&
-
-							// Don't return options that are disabled or in a disabled optgroup
-							!option.disabled &&
-							( !option.parentNode.disabled ||
-								!nodeName( option.parentNode, "optgroup" ) ) ) {
-
-						// Get the specific value for the option
-						value = jQuery( option ).val();
-
-						// We don't need an array for one selects
-						if ( one ) {
-							return value;
-						}
-
-						// Multi-Selects return an array
-						values.push( value );
-					}
-				}
-
-				return values;
-			},
-
-			set: function( elem, value ) {
-				var optionSet, option,
-					options = elem.options,
-					values = jQuery.makeArray( value ),
-					i = options.length;
-
-				while ( i-- ) {
-					option = options[ i ];
-
-					/* eslint-disable no-cond-assign */
-
-					if ( option.selected =
-						jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1
-					) {
-						optionSet = true;
-					}
-
-					/* eslint-enable no-cond-assign */
-				}
-
-				// Force browsers to behave consistently when non-matching value is set
-				if ( !optionSet ) {
-					elem.selectedIndex = -1;
-				}
-				return values;
-			}
-		}
-	}
-} );
-
-// Radios and checkboxes getter/setter
-jQuery.each( [ "radio", "checkbox" ], function() {
-	jQuery.valHooks[ this ] = {
-		set: function( elem, value ) {
-			if ( Array.isArray( value ) ) {
-				return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 );
-			}
-		}
-	};
-	if ( !support.checkOn ) {
-		jQuery.valHooks[ this ].get = function( elem ) {
-			return elem.getAttribute( "value" ) === null ? "on" : elem.value;
-		};
-	}
-} );
-
-
-
-
-// Return jQuery for attributes-only inclusion
-
-
-support.focusin = "onfocusin" in window;
-
-
-var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/,
-	stopPropagationCallback = function( e ) {
-		e.stopPropagation();
-	};
-
-jQuery.extend( jQuery.event, {
-
-	trigger: function( event, data, elem, onlyHandlers ) {
-
-		var i, cur, tmp, bubbleType, ontype, handle, special, lastElement,
-			eventPath = [ elem || document ],
-			type = hasOwn.call( event, "type" ) ? event.type : event,
-			namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : [];
-
-		cur = lastElement = tmp = elem = elem || document;
-
-		// Don't do events on text and comment nodes
-		if ( elem.nodeType === 3 || elem.nodeType === 8 ) {
-			return;
-		}
-
-		// focus/blur morphs to focusin/out; ensure we're not firing them right now
-		if ( rfocusMorph.test( type + jQuery.event.triggered ) ) {
-			return;
-		}
-
-		if ( type.indexOf( "." ) > -1 ) {
-
-			// Namespaced trigger; create a regexp to match event type in handle()
-			namespaces = type.split( "." );
-			type = namespaces.shift();
-			namespaces.sort();
-		}
-		ontype = type.indexOf( ":" ) < 0 && "on" + type;
-
-		// Caller can pass in a jQuery.Event object, Object, or just an event type string
-		event = event[ jQuery.expando ] ?
-			event :
-			new jQuery.Event( type, typeof event === "object" && event );
-
-		// Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true)
-		event.isTrigger = onlyHandlers ? 2 : 3;
-		event.namespace = namespaces.join( "." );
-		event.rnamespace = event.namespace ?
-			new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) :
-			null;
-
-		// Clean up the event in case it is being reused
-		event.result = undefined;
-		if ( !event.target ) {
-			event.target = elem;
-		}
-
-		// Clone any incoming data and prepend the event, creating the handler arg list
-		data = data == null ?
-			[ event ] :
-			jQuery.makeArray( data, [ event ] );
-
-		// Allow special events to draw outside the lines
-		special = jQuery.event.special[ type ] || {};
-		if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) {
-			return;
-		}
-
-		// Determine event propagation path in advance, per W3C events spec (#9951)
-		// Bubble up to document, then to window; watch for a global ownerDocument var (#9724)
-		if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) {
-
-			bubbleType = special.delegateType || type;
-			if ( !rfocusMorph.test( bubbleType + type ) ) {
-				cur = cur.parentNode;
-			}
-			for ( ; cur; cur = cur.parentNode ) {
-				eventPath.push( cur );
-				tmp = cur;
-			}
-
-			// Only add window if we got to document (e.g., not plain obj or detached DOM)
-			if ( tmp === ( elem.ownerDocument || document ) ) {
-				eventPath.push( tmp.defaultView || tmp.parentWindow || window );
-			}
-		}
-
-		// Fire handlers on the event path
-		i = 0;
-		while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) {
-			lastElement = cur;
-			event.type = i > 1 ?
-				bubbleType :
-				special.bindType || type;
-
-			// jQuery handler
-			handle = (
-					dataPriv.get( cur, "events" ) || Object.create( null )
-				)[ event.type ] &&
-				dataPriv.get( cur, "handle" );
-			if ( handle ) {
-				handle.apply( cur, data );
-			}
-
-			// Native handler
-			handle = ontype && cur[ ontype ];
-			if ( handle && handle.apply && acceptData( cur ) ) {
-				event.result = handle.apply( cur, data );
-				if ( event.result === false ) {
-					event.preventDefault();
-				}
-			}
-		}
-		event.type = type;
-
-		// If nobody prevented the default action, do it now
-		if ( !onlyHandlers && !event.isDefaultPrevented() ) {
-
-			if ( ( !special._default ||
-				special._default.apply( eventPath.pop(), data ) === false ) &&
-				acceptData( elem ) ) {
-
-				// Call a native DOM method on the target with the same name as the event.
-				// Don't do default actions on window, that's where global variables be (#6170)
-				if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) {
-
-					// Don't re-trigger an onFOO event when we call its FOO() method
-					tmp = elem[ ontype ];
-
-					if ( tmp ) {
-						elem[ ontype ] = null;
-					}
-
-					// Prevent re-triggering of the same event, since we already bubbled it above
-					jQuery.event.triggered = type;
-
-					if ( event.isPropagationStopped() ) {
-						lastElement.addEventListener( type, stopPropagationCallback );
-					}
-
-					elem[ type ]();
-
-					if ( event.isPropagationStopped() ) {
-						lastElement.removeEventListener( type, stopPropagationCallback );
-					}
-
-					jQuery.event.triggered = undefined;
-
-					if ( tmp ) {
-						elem[ ontype ] = tmp;
-					}
-				}
-			}
-		}
-
-		return event.result;
-	},
-
-	// Piggyback on a donor event to simulate a different one
-	// Used only for `focus(in | out)` events
-	simulate: function( type, elem, event ) {
-		var e = jQuery.extend(
-			new jQuery.Event(),
-			event,
-			{
-				type: type,
-				isSimulated: true
-			}
-		);
-
-		jQuery.event.trigger( e, null, elem );
-	}
-
-} );
-
-jQuery.fn.extend( {
-
-	trigger: function( type, data ) {
-		return this.each( function() {
-			jQuery.event.trigger( type, data, this );
-		} );
-	},
-	triggerHandler: function( type, data ) {
-		var elem = this[ 0 ];
-		if ( elem ) {
-			return jQuery.event.trigger( type, data, elem, true );
-		}
-	}
-} );
-
-
-// Support: Firefox <=44
-// Firefox doesn't have focus(in | out) events
-// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787
-//
-// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1
-// focus(in | out) events fire after focus & blur events,
-// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order
-// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857
-if ( !support.focusin ) {
-	jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) {
-
-		// Attach a single capturing handler on the document while someone wants focusin/focusout
-		var handler = function( event ) {
-			jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) );
-		};
-
-		jQuery.event.special[ fix ] = {
-			setup: function() {
-
-				// Handle: regular nodes (via `this.ownerDocument`), window
-				// (via `this.document`) & document (via `this`).
-				var doc = this.ownerDocument || this.document || this,
-					attaches = dataPriv.access( doc, fix );
-
-				if ( !attaches ) {
-					doc.addEventListener( orig, handler, true );
-				}
-				dataPriv.access( doc, fix, ( attaches || 0 ) + 1 );
-			},
-			teardown: function() {
-				var doc = this.ownerDocument || this.document || this,
-					attaches = dataPriv.access( doc, fix ) - 1;
-
-				if ( !attaches ) {
-					doc.removeEventListener( orig, handler, true );
-					dataPriv.remove( doc, fix );
-
-				} else {
-					dataPriv.access( doc, fix, attaches );
-				}
-			}
-		};
-	} );
-}
-var location = window.location;
-
-var nonce = { guid: Date.now() };
-
-var rquery = ( /\?/ );
-
-
-
-// Cross-browser xml parsing
-jQuery.parseXML = function( data ) {
-	var xml;
-	if ( !data || typeof data !== "string" ) {
-		return null;
-	}
-
-	// Support: IE 9 - 11 only
-	// IE throws on parseFromString with invalid input.
-	try {
-		xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" );
-	} catch ( e ) {
-		xml = undefined;
-	}
-
-	if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) {
-		jQuery.error( "Invalid XML: " + data );
-	}
-	return xml;
-};
-
-
-var
-	rbracket = /\[\]$/,
-	rCRLF = /\r?\n/g,
-	rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i,
-	rsubmittable = /^(?:input|select|textarea|keygen)/i;
-
-function buildParams( prefix, obj, traditional, add ) {
-	var name;
-
-	if ( Array.isArray( obj ) ) {
-
-		// Serialize array item.
-		jQuery.each( obj, function( i, v ) {
-			if ( traditional || rbracket.test( prefix ) ) {
-
-				// Treat each array item as a scalar.
-				add( prefix, v );
-
-			} else {
-
-				// Item is non-scalar (array or object), encode its numeric index.
-				buildParams(
-					prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]",
-					v,
-					traditional,
-					add
-				);
-			}
-		} );
-
-	} else if ( !traditional && toType( obj ) === "object" ) {
-
-		// Serialize object item.
-		for ( name in obj ) {
-			buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add );
-		}
-
-	} else {
-
-		// Serialize scalar item.
-		add( prefix, obj );
-	}
-}
-
-// Serialize an array of form elements or a set of
-// key/values into a query string
-jQuery.param = function( a, traditional ) {
-	var prefix,
-		s = [],
-		add = function( key, valueOrFunction ) {
-
-			// If value is a function, invoke it and use its return value
-			var value = isFunction( valueOrFunction ) ?
-				valueOrFunction() :
-				valueOrFunction;
-
-			s[ s.length ] = encodeURIComponent( key ) + "=" +
-				encodeURIComponent( value == null ? "" : value );
-		};
-
-	if ( a == null ) {
-		return "";
-	}
-
-	// If an array was passed in, assume that it is an array of form elements.
-	if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) {
-
-		// Serialize the form elements
-		jQuery.each( a, function() {
-			add( this.name, this.value );
-		} );
-
-	} else {
-
-		// If traditional, encode the "old" way (the way 1.3.2 or older
-		// did it), otherwise encode params recursively.
-		for ( prefix in a ) {
-			buildParams( prefix, a[ prefix ], traditional, add );
-		}
-	}
-
-	// Return the resulting serialization
-	return s.join( "&" );
-};
-
-jQuery.fn.extend( {
-	serialize: function() {
-		return jQuery.param( this.serializeArray() );
-	},
-	serializeArray: function() {
-		return this.map( function() {
-
-			// Can add propHook for "elements" to filter or add form elements
-			var elements = jQuery.prop( this, "elements" );
-			return elements ? jQuery.makeArray( elements ) : this;
-		} )
-		.filter( function() {
-			var type = this.type;
-
-			// Use .is( ":disabled" ) so that fieldset[disabled] works
-			return this.name && !jQuery( this ).is( ":disabled" ) &&
-				rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) &&
-				( this.checked || !rcheckableType.test( type ) );
-		} )
-		.map( function( _i, elem ) {
-			var val = jQuery( this ).val();
-
-			if ( val == null ) {
-				return null;
-			}
-
-			if ( Array.isArray( val ) ) {
-				return jQuery.map( val, function( val ) {
-					return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
-				} );
-			}
-
-			return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
-		} ).get();
-	}
-} );
-
-
-var
-	r20 = /%20/g,
-	rhash = /#.*$/,
-	rantiCache = /([?&])_=[^&]*/,
-	rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg,
-
-	// #7653, #8125, #8152: local protocol detection
-	rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/,
-	rnoContent = /^(?:GET|HEAD)$/,
-	rprotocol = /^\/\//,
-
-	/* Prefilters
-	 * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example)
-	 * 2) These are called:
-	 *    - BEFORE asking for a transport
-	 *    - AFTER param serialization (s.data is a string if s.processData is true)
-	 * 3) key is the dataType
-	 * 4) the catchall symbol "*" can be used
-	 * 5) execution will start with transport dataType and THEN continue down to "*" if needed
-	 */
-	prefilters = {},
-
-	/* Transports bindings
-	 * 1) key is the dataType
-	 * 2) the catchall symbol "*" can be used
-	 * 3) selection will start with transport dataType and THEN go to "*" if needed
-	 */
-	transports = {},
-
-	// Avoid comment-prolog char sequence (#10098); must appease lint and evade compression
-	allTypes = "*/".concat( "*" ),
-
-	// Anchor tag for parsing the document origin
-	originAnchor = document.createElement( "a" );
-	originAnchor.href = location.href;
-
-// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport
-function addToPrefiltersOrTransports( structure ) {
-
-	// dataTypeExpression is optional and defaults to "*"
-	return function( dataTypeExpression, func ) {
-
-		if ( typeof dataTypeExpression !== "string" ) {
-			func = dataTypeExpression;
-			dataTypeExpression = "*";
-		}
-
-		var dataType,
-			i = 0,
-			dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || [];
-
-		if ( isFunction( func ) ) {
-
-			// For each dataType in the dataTypeExpression
-			while ( ( dataType = dataTypes[ i++ ] ) ) {
-
-				// Prepend if requested
-				if ( dataType[ 0 ] === "+" ) {
-					dataType = dataType.slice( 1 ) || "*";
-					( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func );
-
-				// Otherwise append
-				} else {
-					( structure[ dataType ] = structure[ dataType ] || [] ).push( func );
-				}
-			}
-		}
-	};
-}
-
-// Base inspection function for prefilters and transports
-function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) {
-
-	var inspected = {},
-		seekingTransport = ( structure === transports );
-
-	function inspect( dataType ) {
-		var selected;
-		inspected[ dataType ] = true;
-		jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) {
-			var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR );
-			if ( typeof dataTypeOrTransport === "string" &&
-				!seekingTransport && !inspected[ dataTypeOrTransport ] ) {
-
-				options.dataTypes.unshift( dataTypeOrTransport );
-				inspect( dataTypeOrTransport );
-				return false;
-			} else if ( seekingTransport ) {
-				return !( selected = dataTypeOrTransport );
-			}
-		} );
-		return selected;
-	}
-
-	return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" );
-}
-
-// A special extend for ajax options
-// that takes "flat" options (not to be deep extended)
-// Fixes #9887
-function ajaxExtend( target, src ) {
-	var key, deep,
-		flatOptions = jQuery.ajaxSettings.flatOptions || {};
-
-	for ( key in src ) {
-		if ( src[ key ] !== undefined ) {
-			( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ];
-		}
-	}
-	if ( deep ) {
-		jQuery.extend( true, target, deep );
-	}
-
-	return target;
-}
-
-/* Handles responses to an ajax request:
- * - finds the right dataType (mediates between content-type and expected dataType)
- * - returns the corresponding response
- */
-function ajaxHandleResponses( s, jqXHR, responses ) {
-
-	var ct, type, finalDataType, firstDataType,
-		contents = s.contents,
-		dataTypes = s.dataTypes;
-
-	// Remove auto dataType and get content-type in the process
-	while ( dataTypes[ 0 ] === "*" ) {
-		dataTypes.shift();
-		if ( ct === undefined ) {
-			ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" );
-		}
-	}
-
-	// Check if we're dealing with a known content-type
-	if ( ct ) {
-		for ( type in contents ) {
-			if ( contents[ type ] && contents[ type ].test( ct ) ) {
-				dataTypes.unshift( type );
-				break;
-			}
-		}
-	}
-
-	// Check to see if we have a response for the expected dataType
-	if ( dataTypes[ 0 ] in responses ) {
-		finalDataType = dataTypes[ 0 ];
-	} else {
-
-		// Try convertible dataTypes
-		for ( type in responses ) {
-			if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) {
-				finalDataType = type;
-				break;
-			}
-			if ( !firstDataType ) {
-				firstDataType = type;
-			}
-		}
-
-		// Or just use first one
-		finalDataType = finalDataType || firstDataType;
-	}
-
-	// If we found a dataType
-	// We add the dataType to the list if needed
-	// and return the corresponding response
-	if ( finalDataType ) {
-		if ( finalDataType !== dataTypes[ 0 ] ) {
-			dataTypes.unshift( finalDataType );
-		}
-		return responses[ finalDataType ];
-	}
-}
-
-/* Chain conversions given the request and the original response
- * Also sets the responseXXX fields on the jqXHR instance
- */
-function ajaxConvert( s, response, jqXHR, isSuccess ) {
-	var conv2, current, conv, tmp, prev,
-		converters = {},
-
-		// Work with a copy of dataTypes in case we need to modify it for conversion
-		dataTypes = s.dataTypes.slice();
-
-	// Create converters map with lowercased keys
-	if ( dataTypes[ 1 ] ) {
-		for ( conv in s.converters ) {
-			converters[ conv.toLowerCase() ] = s.converters[ conv ];
-		}
-	}
-
-	current = dataTypes.shift();
-
-	// Convert to each sequential dataType
-	while ( current ) {
-
-		if ( s.responseFields[ current ] ) {
-			jqXHR[ s.responseFields[ current ] ] = response;
-		}
-
-		// Apply the dataFilter if provided
-		if ( !prev && isSuccess && s.dataFilter ) {
-			response = s.dataFilter( response, s.dataType );
-		}
-
-		prev = current;
-		current = dataTypes.shift();
-
-		if ( current ) {
-
-			// There's only work to do if current dataType is non-auto
-			if ( current === "*" ) {
-
-				current = prev;
-
-			// Convert response if prev dataType is non-auto and differs from current
-			} else if ( prev !== "*" && prev !== current ) {
-
-				// Seek a direct converter
-				conv = converters[ prev + " " + current ] || converters[ "* " + current ];
-
-				// If none found, seek a pair
-				if ( !conv ) {
-					for ( conv2 in converters ) {
-
-						// If conv2 outputs current
-						tmp = conv2.split( " " );
-						if ( tmp[ 1 ] === current ) {
-
-							// If prev can be converted to accepted input
-							conv = converters[ prev + " " + tmp[ 0 ] ] ||
-								converters[ "* " + tmp[ 0 ] ];
-							if ( conv ) {
-
-								// Condense equivalence converters
-								if ( conv === true ) {
-									conv = converters[ conv2 ];
-
-								// Otherwise, insert the intermediate dataType
-								} else if ( converters[ conv2 ] !== true ) {
-									current = tmp[ 0 ];
-									dataTypes.unshift( tmp[ 1 ] );
-								}
-								break;
-							}
-						}
-					}
-				}
-
-				// Apply converter (if not an equivalence)
-				if ( conv !== true ) {
-
-					// Unless errors are allowed to bubble, catch and return them
-					if ( conv && s.throws ) {
-						response = conv( response );
-					} else {
-						try {
-							response = conv( response );
-						} catch ( e ) {
-							return {
-								state: "parsererror",
-								error: conv ? e : "No conversion from " + prev + " to " + current
-							};
-						}
-					}
-				}
-			}
-		}
-	}
-
-	return { state: "success", data: response };
-}
-
-jQuery.extend( {
-
-	// Counter for holding the number of active queries
-	active: 0,
-
-	// Last-Modified header cache for next request
-	lastModified: {},
-	etag: {},
-
-	ajaxSettings: {
-		url: location.href,
-		type: "GET",
-		isLocal: rlocalProtocol.test( location.protocol ),
-		global: true,
-		processData: true,
-		async: true,
-		contentType: "application/x-www-form-urlencoded; charset=UTF-8",
-
-		/*
-		timeout: 0,
-		data: null,
-		dataType: null,
-		username: null,
-		password: null,
-		cache: null,
-		throws: false,
-		traditional: false,
-		headers: {},
-		*/
-
-		accepts: {
-			"*": allTypes,
-			text: "text/plain",
-			html: "text/html",
-			xml: "application/xml, text/xml",
-			json: "application/json, text/javascript"
-		},
-
-		contents: {
-			xml: /\bxml\b/,
-			html: /\bhtml/,
-			json: /\bjson\b/
-		},
-
-		responseFields: {
-			xml: "responseXML",
-			text: "responseText",
-			json: "responseJSON"
-		},
-
-		// Data converters
-		// Keys separate source (or catchall "*") and destination types with a single space
-		converters: {
-
-			// Convert anything to text
-			"* text": String,
-
-			// Text to html (true = no transformation)
-			"text html": true,
-
-			// Evaluate text as a json expression
-			"text json": JSON.parse,
-
-			// Parse text as xml
-			"text xml": jQuery.parseXML
-		},
-
-		// For options that shouldn't be deep extended:
-		// you can add your own custom options here if
-		// and when you create one that shouldn't be
-		// deep extended (see ajaxExtend)
-		flatOptions: {
-			url: true,
-			context: true
-		}
-	},
-
-	// Creates a full fledged settings object into target
-	// with both ajaxSettings and settings fields.
-	// If target is omitted, writes into ajaxSettings.
-	ajaxSetup: function( target, settings ) {
-		return settings ?
-
-			// Building a settings object
-			ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) :
-
-			// Extending ajaxSettings
-			ajaxExtend( jQuery.ajaxSettings, target );
-	},
-
-	ajaxPrefilter: addToPrefiltersOrTransports( prefilters ),
-	ajaxTransport: addToPrefiltersOrTransports( transports ),
-
-	// Main method
-	ajax: function( url, options ) {
-
-		// If url is an object, simulate pre-1.5 signature
-		if ( typeof url === "object" ) {
-			options = url;
-			url = undefined;
-		}
-
-		// Force options to be an object
-		options = options || {};
-
-		var transport,
-
-			// URL without anti-cache param
-			cacheURL,
-
-			// Response headers
-			responseHeadersString,
-			responseHeaders,
-
-			// timeout handle
-			timeoutTimer,
-
-			// Url cleanup var
-			urlAnchor,
-
-			// Request state (becomes false upon send and true upon completion)
-			completed,
-
-			// To know if global events are to be dispatched
-			fireGlobals,
-
-			// Loop variable
-			i,
-
-			// uncached part of the url
-			uncached,
-
-			// Create the final options object
-			s = jQuery.ajaxSetup( {}, options ),
-
-			// Callbacks context
-			callbackContext = s.context || s,
-
-			// Context for global events is callbackContext if it is a DOM node or jQuery collection
-			globalEventContext = s.context &&
-				( callbackContext.nodeType || callbackContext.jquery ) ?
-					jQuery( callbackContext ) :
-					jQuery.event,
-
-			// Deferreds
-			deferred = jQuery.Deferred(),
-			completeDeferred = jQuery.Callbacks( "once memory" ),
-
-			// Status-dependent callbacks
-			statusCode = s.statusCode || {},
-
-			// Headers (they are sent all at once)
-			requestHeaders = {},
-			requestHeadersNames = {},
-
-			// Default abort message
-			strAbort = "canceled",
-
-			// Fake xhr
-			jqXHR = {
-				readyState: 0,
-
-				// Builds headers hashtable if needed
-				getResponseHeader: function( key ) {
-					var match;
-					if ( completed ) {
-						if ( !responseHeaders ) {
-							responseHeaders = {};
-							while ( ( match = rheaders.exec( responseHeadersString ) ) ) {
-								responseHeaders[ match[ 1 ].toLowerCase() + " " ] =
-									( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] )
-										.concat( match[ 2 ] );
-							}
-						}
-						match = responseHeaders[ key.toLowerCase() + " " ];
-					}
-					return match == null ? null : match.join( ", " );
-				},
-
-				// Raw string
-				getAllResponseHeaders: function() {
-					return completed ? responseHeadersString : null;
-				},
-
-				// Caches the header
-				setRequestHeader: function( name, value ) {
-					if ( completed == null ) {
-						name = requestHeadersNames[ name.toLowerCase() ] =
-							requestHeadersNames[ name.toLowerCase() ] || name;
-						requestHeaders[ name ] = value;
-					}
-					return this;
-				},
-
-				// Overrides response content-type header
-				overrideMimeType: function( type ) {
-					if ( completed == null ) {
-						s.mimeType = type;
-					}
-					return this;
-				},
-
-				// Status-dependent callbacks
-				statusCode: function( map ) {
-					var code;
-					if ( map ) {
-						if ( completed ) {
-
-							// Execute the appropriate callbacks
-							jqXHR.always( map[ jqXHR.status ] );
-						} else {
-
-							// Lazy-add the new callbacks in a way that preserves old ones
-							for ( code in map ) {
-								statusCode[ code ] = [ statusCode[ code ], map[ code ] ];
-							}
-						}
-					}
-					return this;
-				},
-
-				// Cancel the request
-				abort: function( statusText ) {
-					var finalText = statusText || strAbort;
-					if ( transport ) {
-						transport.abort( finalText );
-					}
-					done( 0, finalText );
-					return this;
-				}
-			};
-
-		// Attach deferreds
-		deferred.promise( jqXHR );
-
-		// Add protocol if not provided (prefilters might expect it)
-		// Handle falsy url in the settings object (#10093: consistency with old signature)
-		// We also use the url parameter if available
-		s.url = ( ( url || s.url || location.href ) + "" )
-			.replace( rprotocol, location.protocol + "//" );
-
-		// Alias method option to type as per ticket #12004
-		s.type = options.method || options.type || s.method || s.type;
-
-		// Extract dataTypes list
-		s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ];
-
-		// A cross-domain request is in order when the origin doesn't match the current origin.
-		if ( s.crossDomain == null ) {
-			urlAnchor = document.createElement( "a" );
-
-			// Support: IE <=8 - 11, Edge 12 - 15
-			// IE throws exception on accessing the href property if url is malformed,
-			// e.g. http://example.com:80x/
-			try {
-				urlAnchor.href = s.url;
-
-				// Support: IE <=8 - 11 only
-				// Anchor's host property isn't correctly set when s.url is relative
-				urlAnchor.href = urlAnchor.href;
-				s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !==
-					urlAnchor.protocol + "//" + urlAnchor.host;
-			} catch ( e ) {
-
-				// If there is an error parsing the URL, assume it is crossDomain,
-				// it can be rejected by the transport if it is invalid
-				s.crossDomain = true;
-			}
-		}
-
-		// Convert data if not already a string
-		if ( s.data && s.processData && typeof s.data !== "string" ) {
-			s.data = jQuery.param( s.data, s.traditional );
-		}
-
-		// Apply prefilters
-		inspectPrefiltersOrTransports( prefilters, s, options, jqXHR );
-
-		// If request was aborted inside a prefilter, stop there
-		if ( completed ) {
-			return jqXHR;
-		}
-
-		// We can fire global events as of now if asked to
-		// Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118)
-		fireGlobals = jQuery.event && s.global;
-
-		// Watch for a new set of requests
-		if ( fireGlobals && jQuery.active++ === 0 ) {
-			jQuery.event.trigger( "ajaxStart" );
-		}
-
-		// Uppercase the type
-		s.type = s.type.toUpperCase();
-
-		// Determine if request has content
-		s.hasContent = !rnoContent.test( s.type );
-
-		// Save the URL in case we're toying with the If-Modified-Since
-		// and/or If-None-Match header later on
-		// Remove hash to simplify url manipulation
-		cacheURL = s.url.replace( rhash, "" );
-
-		// More options handling for requests with no content
-		if ( !s.hasContent ) {
-
-			// Remember the hash so we can put it back
-			uncached = s.url.slice( cacheURL.length );
-
-			// If data is available and should be processed, append data to url
-			if ( s.data && ( s.processData || typeof s.data === "string" ) ) {
-				cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data;
-
-				// #9682: remove data so that it's not used in an eventual retry
-				delete s.data;
-			}
-
-			// Add or update anti-cache param if needed
-			if ( s.cache === false ) {
-				cacheURL = cacheURL.replace( rantiCache, "$1" );
-				uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) +
-					uncached;
-			}
-
-			// Put hash and anti-cache on the URL that will be requested (gh-1732)
-			s.url = cacheURL + uncached;
-
-		// Change '%20' to '+' if this is encoded form body content (gh-2658)
-		} else if ( s.data && s.processData &&
-			( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) {
-			s.data = s.data.replace( r20, "+" );
-		}
-
-		// Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
-		if ( s.ifModified ) {
-			if ( jQuery.lastModified[ cacheURL ] ) {
-				jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] );
-			}
-			if ( jQuery.etag[ cacheURL ] ) {
-				jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] );
-			}
-		}
-
-		// Set the correct header, if data is being sent
-		if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) {
-			jqXHR.setRequestHeader( "Content-Type", s.contentType );
-		}
-
-		// Set the Accepts header for the server, depending on the dataType
-		jqXHR.setRequestHeader(
-			"Accept",
-			s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ?
-				s.accepts[ s.dataTypes[ 0 ] ] +
-					( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) :
-				s.accepts[ "*" ]
-		);
-
-		// Check for headers option
-		for ( i in s.headers ) {
-			jqXHR.setRequestHeader( i, s.headers[ i ] );
-		}
-
-		// Allow custom headers/mimetypes and early abort
-		if ( s.beforeSend &&
-			( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) {
-
-			// Abort if not done already and return
-			return jqXHR.abort();
-		}
-
-		// Aborting is no longer a cancellation
-		strAbort = "abort";
-
-		// Install callbacks on deferreds
-		completeDeferred.add( s.complete );
-		jqXHR.done( s.success );
-		jqXHR.fail( s.error );
-
-		// Get transport
-		transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR );
-
-		// If no transport, we auto-abort
-		if ( !transport ) {
-			done( -1, "No Transport" );
-		} else {
-			jqXHR.readyState = 1;
-
-			// Send global event
-			if ( fireGlobals ) {
-				globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] );
-			}
-
-			// If request was aborted inside ajaxSend, stop there
-			if ( completed ) {
-				return jqXHR;
-			}
-
-			// Timeout
-			if ( s.async && s.timeout > 0 ) {
-				timeoutTimer = window.setTimeout( function() {
-					jqXHR.abort( "timeout" );
-				}, s.timeout );
-			}
-
-			try {
-				completed = false;
-				transport.send( requestHeaders, done );
-			} catch ( e ) {
-
-				// Rethrow post-completion exceptions
-				if ( completed ) {
-					throw e;
-				}
-
-				// Propagate others as results
-				done( -1, e );
-			}
-		}
-
-		// Callback for when everything is done
-		function done( status, nativeStatusText, responses, headers ) {
-			var isSuccess, success, error, response, modified,
-				statusText = nativeStatusText;
-
-			// Ignore repeat invocations
-			if ( completed ) {
-				return;
-			}
-
-			completed = true;
-
-			// Clear timeout if it exists
-			if ( timeoutTimer ) {
-				window.clearTimeout( timeoutTimer );
-			}
-
-			// Dereference transport for early garbage collection
-			// (no matter how long the jqXHR object will be used)
-			transport = undefined;
-
-			// Cache response headers
-			responseHeadersString = headers || "";
-
-			// Set readyState
-			jqXHR.readyState = status > 0 ? 4 : 0;
-
-			// Determine if successful
-			isSuccess = status >= 200 && status < 300 || status === 304;
-
-			// Get response data
-			if ( responses ) {
-				response = ajaxHandleResponses( s, jqXHR, responses );
-			}
-
-			// Use a noop converter for missing script
-			if ( !isSuccess && jQuery.inArray( "script", s.dataTypes ) > -1 ) {
-				s.converters[ "text script" ] = function() {};
-			}
-
-			// Convert no matter what (that way responseXXX fields are always set)
-			response = ajaxConvert( s, response, jqXHR, isSuccess );
-
-			// If successful, handle type chaining
-			if ( isSuccess ) {
-
-				// Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
-				if ( s.ifModified ) {
-					modified = jqXHR.getResponseHeader( "Last-Modified" );
-					if ( modified ) {
-						jQuery.lastModified[ cacheURL ] = modified;
-					}
-					modified = jqXHR.getResponseHeader( "etag" );
-					if ( modified ) {
-						jQuery.etag[ cacheURL ] = modified;
-					}
-				}
-
-				// if no content
-				if ( status === 204 || s.type === "HEAD" ) {
-					statusText = "nocontent";
-
-				// if not modified
-				} else if ( status === 304 ) {
-					statusText = "notmodified";
-
-				// If we have data, let's convert it
-				} else {
-					statusText = response.state;
-					success = response.data;
-					error = response.error;
-					isSuccess = !error;
-				}
-			} else {
-
-				// Extract error from statusText and normalize for non-aborts
-				error = statusText;
-				if ( status || !statusText ) {
-					statusText = "error";
-					if ( status < 0 ) {
-						status = 0;
-					}
-				}
-			}
-
-			// Set data for the fake xhr object
-			jqXHR.status = status;
-			jqXHR.statusText = ( nativeStatusText || statusText ) + "";
-
-			// Success/Error
-			if ( isSuccess ) {
-				deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] );
-			} else {
-				deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] );
-			}
-
-			// Status-dependent callbacks
-			jqXHR.statusCode( statusCode );
-			statusCode = undefined;
-
-			if ( fireGlobals ) {
-				globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError",
-					[ jqXHR, s, isSuccess ? success : error ] );
-			}
-
-			// Complete
-			completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] );
-
-			if ( fireGlobals ) {
-				globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] );
-
-				// Handle the global AJAX counter
-				if ( !( --jQuery.active ) ) {
-					jQuery.event.trigger( "ajaxStop" );
-				}
-			}
-		}
-
-		return jqXHR;
-	},
-
-	getJSON: function( url, data, callback ) {
-		return jQuery.get( url, data, callback, "json" );
-	},
-
-	getScript: function( url, callback ) {
-		return jQuery.get( url, undefined, callback, "script" );
-	}
-} );
-
-jQuery.each( [ "get", "post" ], function( _i, method ) {
-	jQuery[ method ] = function( url, data, callback, type ) {
-
-		// Shift arguments if data argument was omitted
-		if ( isFunction( data ) ) {
-			type = type || callback;
-			callback = data;
-			data = undefined;
-		}
-
-		// The url can be an options object (which then must have .url)
-		return jQuery.ajax( jQuery.extend( {
-			url: url,
-			type: method,
-			dataType: type,
-			data: data,
-			success: callback
-		}, jQuery.isPlainObject( url ) && url ) );
-	};
-} );
-
-jQuery.ajaxPrefilter( function( s ) {
-	var i;
-	for ( i in s.headers ) {
-		if ( i.toLowerCase() === "content-type" ) {
-			s.contentType = s.headers[ i ] || "";
-		}
-	}
-} );
-
-
-jQuery._evalUrl = function( url, options, doc ) {
-	return jQuery.ajax( {
-		url: url,
-
-		// Make this explicit, since user can override this through ajaxSetup (#11264)
-		type: "GET",
-		dataType: "script",
-		cache: true,
-		async: false,
-		global: false,
-
-		// Only evaluate the response if it is successful (gh-4126)
-		// dataFilter is not invoked for failure responses, so using it instead
-		// of the default converter is kludgy but it works.
-		converters: {
-			"text script": function() {}
-		},
-		dataFilter: function( response ) {
-			jQuery.globalEval( response, options, doc );
-		}
-	} );
-};
-
-
-jQuery.fn.extend( {
-	wrapAll: function( html ) {
-		var wrap;
-
-		if ( this[ 0 ] ) {
-			if ( isFunction( html ) ) {
-				html = html.call( this[ 0 ] );
-			}
-
-			// The elements to wrap the target around
-			wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true );
-
-			if ( this[ 0 ].parentNode ) {
-				wrap.insertBefore( this[ 0 ] );
-			}
-
-			wrap.map( function() {
-				var elem = this;
-
-				while ( elem.firstElementChild ) {
-					elem = elem.firstElementChild;
-				}
-
-				return elem;
-			} ).append( this );
-		}
-
-		return this;
-	},
-
-	wrapInner: function( html ) {
-		if ( isFunction( html ) ) {
-			return this.each( function( i ) {
-				jQuery( this ).wrapInner( html.call( this, i ) );
-			} );
-		}
-
-		return this.each( function() {
-			var self = jQuery( this ),
-				contents = self.contents();
-
-			if ( contents.length ) {
-				contents.wrapAll( html );
-
-			} else {
-				self.append( html );
-			}
-		} );
-	},
-
-	wrap: function( html ) {
-		var htmlIsFunction = isFunction( html );
-
-		return this.each( function( i ) {
-			jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html );
-		} );
-	},
-
-	unwrap: function( selector ) {
-		this.parent( selector ).not( "body" ).each( function() {
-			jQuery( this ).replaceWith( this.childNodes );
-		} );
-		return this;
-	}
-} );
-
-
-jQuery.expr.pseudos.hidden = function( elem ) {
-	return !jQuery.expr.pseudos.visible( elem );
-};
-jQuery.expr.pseudos.visible = function( elem ) {
-	return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length );
-};
-
-
-
-
-jQuery.ajaxSettings.xhr = function() {
-	try {
-		return new window.XMLHttpRequest();
-	} catch ( e ) {}
-};
-
-var xhrSuccessStatus = {
-
-		// File protocol always yields status code 0, assume 200
-		0: 200,
-
-		// Support: IE <=9 only
-		// #1450: sometimes IE returns 1223 when it should be 204
-		1223: 204
-	},
-	xhrSupported = jQuery.ajaxSettings.xhr();
-
-support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported );
-support.ajax = xhrSupported = !!xhrSupported;
-
-jQuery.ajaxTransport( function( options ) {
-	var callback, errorCallback;
-
-	// Cross domain only allowed if supported through XMLHttpRequest
-	if ( support.cors || xhrSupported && !options.crossDomain ) {
-		return {
-			send: function( headers, complete ) {
-				var i,
-					xhr = options.xhr();
-
-				xhr.open(
-					options.type,
-					options.url,
-					options.async,
-					options.username,
-					options.password
-				);
-
-				// Apply custom fields if provided
-				if ( options.xhrFields ) {
-					for ( i in options.xhrFields ) {
-						xhr[ i ] = options.xhrFields[ i ];
-					}
-				}
-
-				// Override mime type if needed
-				if ( options.mimeType && xhr.overrideMimeType ) {
-					xhr.overrideMimeType( options.mimeType );
-				}
-
-				// X-Requested-With header
-				// For cross-domain requests, seeing as conditions for a preflight are
-				// akin to a jigsaw puzzle, we simply never set it to be sure.
-				// (it can always be set on a per-request basis or even using ajaxSetup)
-				// For same-domain requests, won't change header if already provided.
-				if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) {
-					headers[ "X-Requested-With" ] = "XMLHttpRequest";
-				}
-
-				// Set headers
-				for ( i in headers ) {
-					xhr.setRequestHeader( i, headers[ i ] );
-				}
-
-				// Callback
-				callback = function( type ) {
-					return function() {
-						if ( callback ) {
-							callback = errorCallback = xhr.onload =
-								xhr.onerror = xhr.onabort = xhr.ontimeout =
-									xhr.onreadystatechange = null;
-
-							if ( type === "abort" ) {
-								xhr.abort();
-							} else if ( type === "error" ) {
-
-								// Support: IE <=9 only
-								// On a manual native abort, IE9 throws
-								// errors on any property access that is not readyState
-								if ( typeof xhr.status !== "number" ) {
-									complete( 0, "error" );
-								} else {
-									complete(
-
-										// File: protocol always yields status 0; see #8605, #14207
-										xhr.status,
-										xhr.statusText
-									);
-								}
-							} else {
-								complete(
-									xhrSuccessStatus[ xhr.status ] || xhr.status,
-									xhr.statusText,
-
-									// Support: IE <=9 only
-									// IE9 has no XHR2 but throws on binary (trac-11426)
-									// For XHR2 non-text, let the caller handle it (gh-2498)
-									( xhr.responseType || "text" ) !== "text"  ||
-									typeof xhr.responseText !== "string" ?
-										{ binary: xhr.response } :
-										{ text: xhr.responseText },
-									xhr.getAllResponseHeaders()
-								);
-							}
-						}
-					};
-				};
-
-				// Listen to events
-				xhr.onload = callback();
-				errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" );
-
-				// Support: IE 9 only
-				// Use onreadystatechange to replace onabort
-				// to handle uncaught aborts
-				if ( xhr.onabort !== undefined ) {
-					xhr.onabort = errorCallback;
-				} else {
-					xhr.onreadystatechange = function() {
-
-						// Check readyState before timeout as it changes
-						if ( xhr.readyState === 4 ) {
-
-							// Allow onerror to be called first,
-							// but that will not handle a native abort
-							// Also, save errorCallback to a variable
-							// as xhr.onerror cannot be accessed
-							window.setTimeout( function() {
-								if ( callback ) {
-									errorCallback();
-								}
-							} );
-						}
-					};
-				}
-
-				// Create the abort callback
-				callback = callback( "abort" );
-
-				try {
-
-					// Do send the request (this may raise an exception)
-					xhr.send( options.hasContent && options.data || null );
-				} catch ( e ) {
-
-					// #14683: Only rethrow if this hasn't been notified as an error yet
-					if ( callback ) {
-						throw e;
-					}
-				}
-			},
-
-			abort: function() {
-				if ( callback ) {
-					callback();
-				}
-			}
-		};
-	}
-} );
-
-
-
-
-// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432)
-jQuery.ajaxPrefilter( function( s ) {
-	if ( s.crossDomain ) {
-		s.contents.script = false;
-	}
-} );
-
-// Install script dataType
-jQuery.ajaxSetup( {
-	accepts: {
-		script: "text/javascript, application/javascript, " +
-			"application/ecmascript, application/x-ecmascript"
-	},
-	contents: {
-		script: /\b(?:java|ecma)script\b/
-	},
-	converters: {
-		"text script": function( text ) {
-			jQuery.globalEval( text );
-			return text;
-		}
-	}
-} );
-
-// Handle cache's special case and crossDomain
-jQuery.ajaxPrefilter( "script", function( s ) {
-	if ( s.cache === undefined ) {
-		s.cache = false;
-	}
-	if ( s.crossDomain ) {
-		s.type = "GET";
-	}
-} );
-
-// Bind script tag hack transport
-jQuery.ajaxTransport( "script", function( s ) {
-
-	// This transport only deals with cross domain or forced-by-attrs requests
-	if ( s.crossDomain || s.scriptAttrs ) {
-		var script, callback;
-		return {
-			send: function( _, complete ) {
-				script = jQuery( "<script>" )
-					.attr( s.scriptAttrs || {} )
-					.prop( { charset: s.scriptCharset, src: s.url } )
-					.on( "load error", callback = function( evt ) {
-						script.remove();
-						callback = null;
-						if ( evt ) {
-							complete( evt.type === "error" ? 404 : 200, evt.type );
-						}
-					} );
-
-				// Use native DOM manipulation to avoid our domManip AJAX trickery
-				document.head.appendChild( script[ 0 ] );
-			},
-			abort: function() {
-				if ( callback ) {
-					callback();
-				}
-			}
-		};
-	}
-} );
-
-
-
-
-var oldCallbacks = [],
-	rjsonp = /(=)\?(?=&|$)|\?\?/;
-
-// Default jsonp settings
-jQuery.ajaxSetup( {
-	jsonp: "callback",
-	jsonpCallback: function() {
-		var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce.guid++ ) );
-		this[ callback ] = true;
-		return callback;
-	}
-} );
-
-// Detect, normalize options and install callbacks for jsonp requests
-jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) {
-
-	var callbackName, overwritten, responseContainer,
-		jsonProp = s.jsonp !== false && ( rjsonp.test( s.url ) ?
-			"url" :
-			typeof s.data === "string" &&
-				( s.contentType || "" )
-					.indexOf( "application/x-www-form-urlencoded" ) === 0 &&
-				rjsonp.test( s.data ) && "data"
-		);
-
-	// Handle iff the expected data type is "jsonp" or we have a parameter to set
-	if ( jsonProp || s.dataTypes[ 0 ] === "jsonp" ) {
-
-		// Get callback name, remembering preexisting value associated with it
-		callbackName = s.jsonpCallback = isFunction( s.jsonpCallback ) ?
-			s.jsonpCallback() :
-			s.jsonpCallback;
-
-		// Insert callback into url or form data
-		if ( jsonProp ) {
-			s[ jsonProp ] = s[ jsonProp ].replace( rjsonp, "$1" + callbackName );
-		} else if ( s.jsonp !== false ) {
-			s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName;
-		}
-
-		// Use data converter to retrieve json after script execution
-		s.converters[ "script json" ] = function() {
-			if ( !responseContainer ) {
-				jQuery.error( callbackName + " was not called" );
-			}
-			return responseContainer[ 0 ];
-		};
-
-		// Force json dataType
-		s.dataTypes[ 0 ] = "json";
-
-		// Install callback
-		overwritten = window[ callbackName ];
-		window[ callbackName ] = function() {
-			responseContainer = arguments;
-		};
-
-		// Clean-up function (fires after converters)
-		jqXHR.always( function() {
-
-			// If previous value didn't exist - remove it
-			if ( overwritten === undefined ) {
-				jQuery( window ).removeProp( callbackName );
-
-			// Otherwise restore preexisting value
-			} else {
-				window[ callbackName ] = overwritten;
-			}
-
-			// Save back as free
-			if ( s[ callbackName ] ) {
-
-				// Make sure that re-using the options doesn't screw things around
-				s.jsonpCallback = originalSettings.jsonpCallback;
-
-				// Save the callback name for future use
-				oldCallbacks.push( callbackName );
-			}
-
-			// Call if it was a function and we have a response
-			if ( responseContainer && isFunction( overwritten ) ) {
-				overwritten( responseContainer[ 0 ] );
-			}
-
-			responseContainer = overwritten = undefined;
-		} );
-
-		// Delegate to script
-		return "script";
-	}
-} );
-
-
-
-
-// Support: Safari 8 only
-// In Safari 8 documents created via document.implementation.createHTMLDocument
-// collapse sibling forms: the second one becomes a child of the first one.
-// Because of that, this security measure has to be disabled in Safari 8.
-// https://bugs.webkit.org/show_bug.cgi?id=137337
-support.createHTMLDocument = ( function() {
-	var body = document.implementation.createHTMLDocument( "" ).body;
-	body.innerHTML = "<form></form><form></form>";
-	return body.childNodes.length === 2;
-} )();
-
-
-// Argument "data" should be string of html
-// context (optional): If specified, the fragment will be created in this context,
-// defaults to document
-// keepScripts (optional): If true, will include scripts passed in the html string
-jQuery.parseHTML = function( data, context, keepScripts ) {
-	if ( typeof data !== "string" ) {
-		return [];
-	}
-	if ( typeof context === "boolean" ) {
-		keepScripts = context;
-		context = false;
-	}
-
-	var base, parsed, scripts;
-
-	if ( !context ) {
-
-		// Stop scripts or inline event handlers from being executed immediately
-		// by using document.implementation
-		if ( support.createHTMLDocument ) {
-			context = document.implementation.createHTMLDocument( "" );
-
-			// Set the base href for the created document
-			// so any parsed elements with URLs
-			// are based on the document's URL (gh-2965)
-			base = context.createElement( "base" );
-			base.href = document.location.href;
-			context.head.appendChild( base );
-		} else {
-			context = document;
-		}
-	}
-
-	parsed = rsingleTag.exec( data );
-	scripts = !keepScripts && [];
-
-	// Single tag
-	if ( parsed ) {
-		return [ context.createElement( parsed[ 1 ] ) ];
-	}
-
-	parsed = buildFragment( [ data ], context, scripts );
-
-	if ( scripts && scripts.length ) {
-		jQuery( scripts ).remove();
-	}
-
-	return jQuery.merge( [], parsed.childNodes );
-};
-
-
-/**
- * Load a url into a page
- */
-jQuery.fn.load = function( url, params, callback ) {
-	var selector, type, response,
-		self = this,
-		off = url.indexOf( " " );
-
-	if ( off > -1 ) {
-		selector = stripAndCollapse( url.slice( off ) );
-		url = url.slice( 0, off );
-	}
-
-	// If it's a function
-	if ( isFunction( params ) ) {
-
-		// We assume that it's the callback
-		callback = params;
-		params = undefined;
-
-	// Otherwise, build a param string
-	} else if ( params && typeof params === "object" ) {
-		type = "POST";
-	}
-
-	// If we have elements to modify, make the request
-	if ( self.length > 0 ) {
-		jQuery.ajax( {
-			url: url,
-
-			// If "type" variable is undefined, then "GET" method will be used.
-			// Make value of this field explicit since
-			// user can override it through ajaxSetup method
-			type: type || "GET",
-			dataType: "html",
-			data: params
-		} ).done( function( responseText ) {
-
-			// Save response for use in complete callback
-			response = arguments;
-
-			self.html( selector ?
-
-				// If a selector was specified, locate the right elements in a dummy div
-				// Exclude scripts to avoid IE 'Permission Denied' errors
-				jQuery( "<div>" ).append( jQuery.parseHTML( responseText ) ).find( selector ) :
-
-				// Otherwise use the full result
-				responseText );
-
-		// If the request succeeds, this function gets "data", "status", "jqXHR"
-		// but they are ignored because response was set above.
-		// If it fails, this function gets "jqXHR", "status", "error"
-		} ).always( callback && function( jqXHR, status ) {
-			self.each( function() {
-				callback.apply( this, response || [ jqXHR.responseText, status, jqXHR ] );
-			} );
-		} );
-	}
-
-	return this;
-};
-
-
-
-
-jQuery.expr.pseudos.animated = function( elem ) {
-	return jQuery.grep( jQuery.timers, function( fn ) {
-		return elem === fn.elem;
-	} ).length;
-};
-
-
-
-
-jQuery.offset = {
-	setOffset: function( elem, options, i ) {
-		var curPosition, curLeft, curCSSTop, curTop, curOffset, curCSSLeft, calculatePosition,
-			position = jQuery.css( elem, "position" ),
-			curElem = jQuery( elem ),
-			props = {};
-
-		// Set position first, in-case top/left are set even on static elem
-		if ( position === "static" ) {
-			elem.style.position = "relative";
-		}
-
-		curOffset = curElem.offset();
-		curCSSTop = jQuery.css( elem, "top" );
-		curCSSLeft = jQuery.css( elem, "left" );
-		calculatePosition = ( position === "absolute" || position === "fixed" ) &&
-			( curCSSTop + curCSSLeft ).indexOf( "auto" ) > -1;
-
-		// Need to be able to calculate position if either
-		// top or left is auto and position is either absolute or fixed
-		if ( calculatePosition ) {
-			curPosition = curElem.position();
-			curTop = curPosition.top;
-			curLeft = curPosition.left;
-
-		} else {
-			curTop = parseFloat( curCSSTop ) || 0;
-			curLeft = parseFloat( curCSSLeft ) || 0;
-		}
-
-		if ( isFunction( options ) ) {
-
-			// Use jQuery.extend here to allow modification of coordinates argument (gh-1848)
-			options = options.call( elem, i, jQuery.extend( {}, curOffset ) );
-		}
-
-		if ( options.top != null ) {
-			props.top = ( options.top - curOffset.top ) + curTop;
-		}
-		if ( options.left != null ) {
-			props.left = ( options.left - curOffset.left ) + curLeft;
-		}
-
-		if ( "using" in options ) {
-			options.using.call( elem, props );
-
-		} else {
-			if ( typeof props.top === "number" ) {
-				props.top += "px";
-			}
-			if ( typeof props.left === "number" ) {
-				props.left += "px";
-			}
-			curElem.css( props );
-		}
-	}
-};
-
-jQuery.fn.extend( {
-
-	// offset() relates an element's border box to the document origin
-	offset: function( options ) {
-
-		// Preserve chaining for setter
-		if ( arguments.length ) {
-			return options === undefined ?
-				this :
-				this.each( function( i ) {
-					jQuery.offset.setOffset( this, options, i );
-				} );
-		}
-
-		var rect, win,
-			elem = this[ 0 ];
-
-		if ( !elem ) {
-			return;
-		}
-
-		// Return zeros for disconnected and hidden (display: none) elements (gh-2310)
-		// Support: IE <=11 only
-		// Running getBoundingClientRect on a
-		// disconnected node in IE throws an error
-		if ( !elem.getClientRects().length ) {
-			return { top: 0, left: 0 };
-		}
-
-		// Get document-relative position by adding viewport scroll to viewport-relative gBCR
-		rect = elem.getBoundingClientRect();
-		win = elem.ownerDocument.defaultView;
-		return {
-			top: rect.top + win.pageYOffset,
-			left: rect.left + win.pageXOffset
-		};
-	},
-
-	// position() relates an element's margin box to its offset parent's padding box
-	// This corresponds to the behavior of CSS absolute positioning
-	position: function() {
-		if ( !this[ 0 ] ) {
-			return;
-		}
-
-		var offsetParent, offset, doc,
-			elem = this[ 0 ],
-			parentOffset = { top: 0, left: 0 };
-
-		// position:fixed elements are offset from the viewport, which itself always has zero offset
-		if ( jQuery.css( elem, "position" ) === "fixed" ) {
-
-			// Assume position:fixed implies availability of getBoundingClientRect
-			offset = elem.getBoundingClientRect();
-
-		} else {
-			offset = this.offset();
-
-			// Account for the *real* offset parent, which can be the document or its root element
-			// when a statically positioned element is identified
-			doc = elem.ownerDocument;
-			offsetParent = elem.offsetParent || doc.documentElement;
-			while ( offsetParent &&
-				( offsetParent === doc.body || offsetParent === doc.documentElement ) &&
-				jQuery.css( offsetParent, "position" ) === "static" ) {
-
-				offsetParent = offsetParent.parentNode;
-			}
-			if ( offsetParent && offsetParent !== elem && offsetParent.nodeType === 1 ) {
-
-				// Incorporate borders into its offset, since they are outside its content origin
-				parentOffset = jQuery( offsetParent ).offset();
-				parentOffset.top += jQuery.css( offsetParent, "borderTopWidth", true );
-				parentOffset.left += jQuery.css( offsetParent, "borderLeftWidth", true );
-			}
-		}
-
-		// Subtract parent offsets and element margins
-		return {
-			top: offset.top - parentOffset.top - jQuery.css( elem, "marginTop", true ),
-			left: offset.left - parentOffset.left - jQuery.css( elem, "marginLeft", true )
-		};
-	},
-
-	// This method will return documentElement in the following cases:
-	// 1) For the element inside the iframe without offsetParent, this method will return
-	//    documentElement of the parent window
-	// 2) For the hidden or detached element
-	// 3) For body or html element, i.e. in case of the html node - it will return itself
-	//
-	// but those exceptions were never presented as a real life use-cases
-	// and might be considered as more preferable results.
-	//
-	// This logic, however, is not guaranteed and can change at any point in the future
-	offsetParent: function() {
-		return this.map( function() {
-			var offsetParent = this.offsetParent;
-
-			while ( offsetParent && jQuery.css( offsetParent, "position" ) === "static" ) {
-				offsetParent = offsetParent.offsetParent;
-			}
-
-			return offsetParent || documentElement;
-		} );
-	}
-} );
-
-// Create scrollLeft and scrollTop methods
-jQuery.each( { scrollLeft: "pageXOffset", scrollTop: "pageYOffset" }, function( method, prop ) {
-	var top = "pageYOffset" === prop;
-
-	jQuery.fn[ method ] = function( val ) {
-		return access( this, function( elem, method, val ) {
-
-			// Coalesce documents and windows
-			var win;
-			if ( isWindow( elem ) ) {
-				win = elem;
-			} else if ( elem.nodeType === 9 ) {
-				win = elem.defaultView;
-			}
-
-			if ( val === undefined ) {
-				return win ? win[ prop ] : elem[ method ];
-			}
-
-			if ( win ) {
-				win.scrollTo(
-					!top ? val : win.pageXOffset,
-					top ? val : win.pageYOffset
-				);
-
-			} else {
-				elem[ method ] = val;
-			}
-		}, method, val, arguments.length );
-	};
-} );
-
-// Support: Safari <=7 - 9.1, Chrome <=37 - 49
-// Add the top/left cssHooks using jQuery.fn.position
-// Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084
-// Blink bug: https://bugs.chromium.org/p/chromium/issues/detail?id=589347
-// getComputedStyle returns percent when specified for top/left/bottom/right;
-// rather than make the css module depend on the offset module, just check for it here
-jQuery.each( [ "top", "left" ], function( _i, prop ) {
-	jQuery.cssHooks[ prop ] = addGetHookIf( support.pixelPosition,
-		function( elem, computed ) {
-			if ( computed ) {
-				computed = curCSS( elem, prop );
-
-				// If curCSS returns percentage, fallback to offset
-				return rnumnonpx.test( computed ) ?
-					jQuery( elem ).position()[ prop ] + "px" :
-					computed;
-			}
-		}
-	);
-} );
-
-
-// Create innerHeight, innerWidth, height, width, outerHeight and outerWidth methods
-jQuery.each( { Height: "height", Width: "width" }, function( name, type ) {
-	jQuery.each( { padding: "inner" + name, content: type, "": "outer" + name },
-		function( defaultExtra, funcName ) {
-
-		// Margin is only for outerHeight, outerWidth
-		jQuery.fn[ funcName ] = function( margin, value ) {
-			var chainable = arguments.length && ( defaultExtra || typeof margin !== "boolean" ),
-				extra = defaultExtra || ( margin === true || value === true ? "margin" : "border" );
-
-			return access( this, function( elem, type, value ) {
-				var doc;
-
-				if ( isWindow( elem ) ) {
-
-					// $( window ).outerWidth/Height return w/h including scrollbars (gh-1729)
-					return funcName.indexOf( "outer" ) === 0 ?
-						elem[ "inner" + name ] :
-						elem.document.documentElement[ "client" + name ];
-				}
-
-				// Get document width or height
-				if ( elem.nodeType === 9 ) {
-					doc = elem.documentElement;
-
-					// Either scroll[Width/Height] or offset[Width/Height] or client[Width/Height],
-					// whichever is greatest
-					return Math.max(
-						elem.body[ "scroll" + name ], doc[ "scroll" + name ],
-						elem.body[ "offset" + name ], doc[ "offset" + name ],
-						doc[ "client" + name ]
-					);
-				}
-
-				return value === undefined ?
-
-					// Get width or height on the element, requesting but not forcing parseFloat
-					jQuery.css( elem, type, extra ) :
-
-					// Set width or height on the element
-					jQuery.style( elem, type, value, extra );
-			}, type, chainable ? margin : undefined, chainable );
-		};
-	} );
-} );
-
-
-jQuery.each( [
-	"ajaxStart",
-	"ajaxStop",
-	"ajaxComplete",
-	"ajaxError",
-	"ajaxSuccess",
-	"ajaxSend"
-], function( _i, type ) {
-	jQuery.fn[ type ] = function( fn ) {
-		return this.on( type, fn );
-	};
-} );
-
-
-
-
-jQuery.fn.extend( {
-
-	bind: function( types, data, fn ) {
-		return this.on( types, null, data, fn );
-	},
-	unbind: function( types, fn ) {
-		return this.off( types, null, fn );
-	},
-
-	delegate: function( selector, types, data, fn ) {
-		return this.on( types, selector, data, fn );
-	},
-	undelegate: function( selector, types, fn ) {
-
-		// ( namespace ) or ( selector, types [, fn] )
-		return arguments.length === 1 ?
-			this.off( selector, "**" ) :
-			this.off( types, selector || "**", fn );
-	},
-
-	hover: function( fnOver, fnOut ) {
-		return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver );
-	}
-} );
-
-jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " +
-	"mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " +
-	"change select submit keydown keypress keyup contextmenu" ).split( " " ),
-	function( _i, name ) {
-
-		// Handle event binding
-		jQuery.fn[ name ] = function( data, fn ) {
-			return arguments.length > 0 ?
-				this.on( name, null, data, fn ) :
-				this.trigger( name );
-		};
-	} );
-
-
-
-
-// Support: Android <=4.0 only
-// Make sure we trim BOM and NBSP
-var rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;
-
-// Bind a function to a context, optionally partially applying any
-// arguments.
-// jQuery.proxy is deprecated to promote standards (specifically Function#bind)
-// However, it is not slated for removal any time soon
-jQuery.proxy = function( fn, context ) {
-	var tmp, args, proxy;
-
-	if ( typeof context === "string" ) {
-		tmp = fn[ context ];
-		context = fn;
-		fn = tmp;
-	}
-
-	// Quick check to determine if target is callable, in the spec
-	// this throws a TypeError, but we will just return undefined.
-	if ( !isFunction( fn ) ) {
-		return undefined;
-	}
-
-	// Simulated bind
-	args = slice.call( arguments, 2 );
-	proxy = function() {
-		return fn.apply( context || this, args.concat( slice.call( arguments ) ) );
-	};
-
-	// Set the guid of unique handler to the same of original handler, so it can be removed
-	proxy.guid = fn.guid = fn.guid || jQuery.guid++;
-
-	return proxy;
-};
-
-jQuery.holdReady = function( hold ) {
-	if ( hold ) {
-		jQuery.readyWait++;
-	} else {
-		jQuery.ready( true );
-	}
-};
-jQuery.isArray = Array.isArray;
-jQuery.parseJSON = JSON.parse;
-jQuery.nodeName = nodeName;
-jQuery.isFunction = isFunction;
-jQuery.isWindow = isWindow;
-jQuery.camelCase = camelCase;
-jQuery.type = toType;
-
-jQuery.now = Date.now;
-
-jQuery.isNumeric = function( obj ) {
-
-	// As of jQuery 3.0, isNumeric is limited to
-	// strings and numbers (primitives or objects)
-	// that can be coerced to finite numbers (gh-2662)
-	var type = jQuery.type( obj );
-	return ( type === "number" || type === "string" ) &&
-
-		// parseFloat NaNs numeric-cast false positives ("")
-		// ...but misinterprets leading-number strings, particularly hex literals ("0x...")
-		// subtraction forces infinities to NaN
-		!isNaN( obj - parseFloat( obj ) );
-};
-
-jQuery.trim = function( text ) {
-	return text == null ?
-		"" :
-		( text + "" ).replace( rtrim, "" );
-};
-
-
-
-// Register as a named AMD module, since jQuery can be concatenated with other
-// files that may use define, but not via a proper concatenation script that
-// understands anonymous AMD modules. A named AMD is safest and most robust
-// way to register. Lowercase jquery is used because AMD module names are
-// derived from file names, and jQuery is normally delivered in a lowercase
-// file name. Do this after creating the global so that if an AMD module wants
-// to call noConflict to hide this version of jQuery, it will work.
-
-// Note that for maximum portability, libraries that are not jQuery should
-// declare themselves as anonymous modules, and avoid setting a global if an
-// AMD loader is present. jQuery is a special case. For more information, see
-// https://github.com/jrburke/requirejs/wiki/Updating-existing-libraries#wiki-anon
-
-if ( typeof define === "function" && define.amd ) {
-	define( "jquery", [], function() {
-		return jQuery;
-	} );
-}
-
-
-
-
-var
-
-	// Map over jQuery in case of overwrite
-	_jQuery = window.jQuery,
-
-	// Map over the $ in case of overwrite
-	_$ = window.$;
-
-jQuery.noConflict = function( deep ) {
-	if ( window.$ === jQuery ) {
-		window.$ = _$;
-	}
-
-	if ( deep && window.jQuery === jQuery ) {
-		window.jQuery = _jQuery;
-	}
-
-	return jQuery;
-};
-
-// Expose jQuery and $ identifiers, even in AMD
-// (#7102#comment:10, https://github.com/jquery/jquery/pull/557)
-// and CommonJS for browser emulators (#13566)
-if ( typeof noGlobal === "undefined" ) {
-	window.jQuery = window.$ = jQuery;
-}
-
-
-
-
-return jQuery;
-} );
diff --git a/docs/build/html/_static/jquery.js b/docs/build/html/_static/jquery.js
deleted file mode 100644
index b0614034ad3a95e4ae9f53c2b015eeb3e8d68bde..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/jquery.js
+++ /dev/null
@@ -1,2 +0,0 @@
-/*! jQuery v3.5.1 | (c) JS Foundation and other contributors | jquery.org/license */
-!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.5.1",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0<t&&t-1 in e)}S.fn=S.prototype={jquery:f,constructor:S,length:0,toArray:function(){return s.call(this)},get:function(e){return null==e?s.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=S.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return S.each(this,e)},map:function(n){return this.pushStack(S.map(this,function(e,t){return n.call(e,t,e)}))},slice:function(){return this.pushStack(s.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},even:function(){return this.pushStack(S.grep(this,function(e,t){return(t+1)%2}))},odd:function(){return this.pushStack(S.grep(this,function(e,t){return t%2}))},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(0<=n&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:u,sort:t.sort,splice:t.splice},S.extend=S.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||m(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)r=e[t],"__proto__"!==t&&a!==r&&(l&&r&&(S.isPlainObject(r)||(i=Array.isArray(r)))?(n=a[t],o=i&&!Array.isArray(n)?[]:i||S.isPlainObject(n)?n:{},i=!1,a[t]=S.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},S.extend({expando:"jQuery"+(f+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==o.call(e))&&(!(t=r(e))||"function"==typeof(n=v.call(t,"constructor")&&t.constructor)&&a.call(n)===l)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e,t,n){b(e,{nonce:t&&t.nonce},n)},each:function(e,t){var n,r=0;if(p(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},makeArray:function(e,t){var n=t||[];return null!=e&&(p(Object(e))?S.merge(n,"string"==typeof e?[e]:e):u.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:i.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r=[],i=0,o=e.length,a=!n;i<o;i++)!t(e[i],i)!==a&&r.push(e[i]);return r},map:function(e,t,n){var r,i,o=0,a=[];if(p(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&a.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&a.push(i);return g(a)},guid:1,support:y}),"function"==typeof Symbol&&(S.fn[Symbol.iterator]=t[Symbol.iterator]),S.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){n["[object "+t+"]"]=t.toLowerCase()});var d=function(n){var e,d,b,o,i,h,f,g,w,u,l,T,C,a,E,v,s,c,y,S="sizzle"+1*new Date,p=n.document,k=0,r=0,m=ue(),x=ue(),A=ue(),N=ue(),D=function(e,t){return e===t&&(l=!0),0},j={}.hasOwnProperty,t=[],q=t.pop,L=t.push,H=t.push,O=t.slice,P=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},R="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",I="(?:\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+",W="\\["+M+"*("+I+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+I+"))|)"+M+"*\\]",F=":("+I+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+W+")*)|.*)\\)|)",B=new RegExp(M+"+","g"),$=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),_=new RegExp("^"+M+"*,"+M+"*"),z=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="<a id='"+S+"'></a><select id='"+S+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0<se(t,C,null,[e]).length},se.contains=function(e,t){return(e.ownerDocument||e)!=C&&T(e),y(e,t)},se.attr=function(e,t){(e.ownerDocument||e)!=C&&T(e);var n=b.attrHandle[t.toLowerCase()],r=n&&j.call(b.attrHandle,t.toLowerCase())?n(e,t,!E):void 0;return void 0!==r?r:d.attributes||!E?e.getAttribute(t):(r=e.getAttributeNode(t))&&r.specified?r.value:null},se.escape=function(e){return(e+"").replace(re,ie)},se.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},se.uniqueSort=function(e){var t,n=[],r=0,i=0;if(l=!d.detectDuplicates,u=!d.sortStable&&e.slice(0),e.sort(D),l){while(t=e[i++])t===e[i]&&(r=n.push(i));while(r--)e.splice(n[r],1)}return u=null,e},o=se.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else while(t=e[r++])n+=o(t);return n},(b=se.selectors={cacheLength:50,createPseudo:le,match:G,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1<t.indexOf(i):"$="===r?i&&t.slice(-i.length)===i:"~="===r?-1<(" "+t.replace(B," ")+" ").indexOf(i):"|="===r&&(t===i||t.slice(0,i.length+1)===i+"-"))}},CHILD:function(h,e,t,g,v){var y="nth"!==h.slice(0,3),m="last"!==h.slice(-4),x="of-type"===e;return 1===g&&0===v?function(e){return!!e.parentNode}:function(e,t,n){var r,i,o,a,s,u,l=y!==m?"nextSibling":"previousSibling",c=e.parentNode,f=x&&e.nodeName.toLowerCase(),p=!n&&!x,d=!1;if(c){if(y){while(l){a=e;while(a=a[l])if(x?a.nodeName.toLowerCase()===f:1===a.nodeType)return!1;u=l="only"===h&&!u&&"nextSibling"}return!0}if(u=[m?c.firstChild:c.lastChild],m&&p){d=(s=(r=(i=(o=(a=c)[S]||(a[S]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===k&&r[1])&&r[2],a=s&&c.childNodes[s];while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if(1===a.nodeType&&++d&&a===e){i[h]=[k,s,d];break}}else if(p&&(d=s=(r=(i=(o=(a=e)[S]||(a[S]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===k&&r[1]),!1===d)while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if((x?a.nodeName.toLowerCase()===f:1===a.nodeType)&&++d&&(p&&((i=(o=a[S]||(a[S]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]=[k,d]),a===e))break;return(d-=v)===g||d%g==0&&0<=d/g}}},PSEUDO:function(e,o){var t,a=b.pseudos[e]||b.setFilters[e.toLowerCase()]||se.error("unsupported pseudo: "+e);return a[S]?a(o):1<a.length?(t=[e,e,"",o],b.setFilters.hasOwnProperty(e.toLowerCase())?le(function(e,t){var n,r=a(e,o),i=r.length;while(i--)e[n=P(e,r[i])]=!(t[n]=r[i])}):function(e){return a(e,0,t)}):a}},pseudos:{not:le(function(e){var r=[],i=[],s=f(e.replace($,"$1"));return s[S]?le(function(e,t,n,r){var i,o=s(e,null,r,[]),a=e.length;while(a--)(i=o[a])&&(e[a]=!(t[a]=i))}):function(e,t,n){return r[0]=e,s(r,null,n,i),r[0]=null,!i.pop()}}),has:le(function(t){return function(e){return 0<se(t,e).length}}),contains:le(function(t){return t=t.replace(te,ne),function(e){return-1<(e.textContent||o(e)).indexOf(t)}}),lang:le(function(n){return V.test(n||"")||se.error("unsupported lang: "+n),n=n.replace(te,ne).toLowerCase(),function(e){var t;do{if(t=E?e.lang:e.getAttribute("xml:lang")||e.getAttribute("lang"))return(t=t.toLowerCase())===n||0===t.indexOf(n+"-")}while((e=e.parentNode)&&1===e.nodeType);return!1}}),target:function(e){var t=n.location&&n.location.hash;return t&&t.slice(1)===e.id},root:function(e){return e===a},focus:function(e){return e===C.activeElement&&(!C.hasFocus||C.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:ge(!1),disabled:ge(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!b.pseudos.empty(e)},header:function(e){return J.test(e.nodeName)},input:function(e){return Q.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:ve(function(){return[0]}),last:ve(function(e,t){return[t-1]}),eq:ve(function(e,t,n){return[n<0?n+t:n]}),even:ve(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:ve(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:ve(function(e,t,n){for(var r=n<0?n+t:t<n?t:n;0<=--r;)e.push(r);return e}),gt:ve(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=b.pseudos.eq,{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})b.pseudos[e]=de(e);for(e in{submit:!0,reset:!0})b.pseudos[e]=he(e);function me(){}function xe(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function be(s,e,t){var u=e.dir,l=e.next,c=l||u,f=t&&"parentNode"===c,p=r++;return e.first?function(e,t,n){while(e=e[u])if(1===e.nodeType||f)return s(e,t,n);return!1}:function(e,t,n){var r,i,o,a=[k,p];if(n){while(e=e[u])if((1===e.nodeType||f)&&s(e,t,n))return!0}else while(e=e[u])if(1===e.nodeType||f)if(i=(o=e[S]||(e[S]={}))[e.uniqueID]||(o[e.uniqueID]={}),l&&l===e.nodeName.toLowerCase())e=e[u]||e;else{if((r=i[c])&&r[0]===k&&r[1]===p)return a[2]=r[2];if((i[c]=a)[2]=s(e,t,n))return!0}return!1}}function we(i){return 1<i.length?function(e,t,n){var r=i.length;while(r--)if(!i[r](e,t,n))return!1;return!0}:i[0]}function Te(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Ce(d,h,g,v,y,e){return v&&!v[S]&&(v=Ce(v)),y&&!y[S]&&(y=Ce(y,e)),le(function(e,t,n,r){var i,o,a,s=[],u=[],l=t.length,c=e||function(e,t,n){for(var r=0,i=t.length;r<i;r++)se(e,t[r],n);return n}(h||"*",n.nodeType?[n]:n,[]),f=!d||!e&&h?c:Te(c,s,d,n,r),p=g?y||(e?d:l||v)?[]:t:f;if(g&&g(f,p,n,r),v){i=Te(p,u),v(i,[],n,r),o=i.length;while(o--)(a=i[o])&&(p[u[o]]=!(f[u[o]]=a))}if(e){if(y||d){if(y){i=[],o=p.length;while(o--)(a=p[o])&&i.push(f[o]=a);y(null,p=[],i,r)}o=p.length;while(o--)(a=p[o])&&-1<(i=y?P(e,a):s[o])&&(e[i]=!(t[i]=a))}}else p=Te(p===t?p.splice(l,p.length):p),y?y(null,t,p,r):H.apply(t,p)})}function Ee(e){for(var i,t,n,r=e.length,o=b.relative[e[0].type],a=o||b.relative[" "],s=o?1:0,u=be(function(e){return e===i},a,!0),l=be(function(e){return-1<P(i,e)},a,!0),c=[function(e,t,n){var r=!o&&(n||t!==w)||((i=t).nodeType?u(e,t,n):l(e,t,n));return i=null,r}];s<r;s++)if(t=b.relative[e[s].type])c=[be(we(c),t)];else{if((t=b.filter[e[s].type].apply(null,e[s].matches))[S]){for(n=++s;n<r;n++)if(b.relative[e[n].type])break;return Ce(1<s&&we(c),1<s&&xe(e.slice(0,s-1).concat({value:" "===e[s-2].type?"*":""})).replace($,"$1"),t,s<n&&Ee(e.slice(s,n)),n<r&&Ee(e=e.slice(n)),n<r&&xe(e))}c.push(t)}return we(c)}return me.prototype=b.filters=b.pseudos,b.setFilters=new me,h=se.tokenize=function(e,t){var n,r,i,o,a,s,u,l=x[e+" "];if(l)return t?0:l.slice(0);a=e,s=[],u=b.preFilter;while(a){for(o in n&&!(r=_.exec(a))||(r&&(a=a.slice(r[0].length)||a),s.push(i=[])),n=!1,(r=z.exec(a))&&(n=r.shift(),i.push({value:n,type:r[0].replace($," ")}),a=a.slice(n.length)),b.filter)!(r=G[o].exec(a))||u[o]&&!(r=u[o](r))||(n=r.shift(),i.push({value:n,type:o,matches:r}),a=a.slice(n.length));if(!n)break}return t?a.length:a?se.error(e):x(e,s).slice(0)},f=se.compile=function(e,t){var n,v,y,m,x,r,i=[],o=[],a=A[e+" "];if(!a){t||(t=h(e)),n=t.length;while(n--)(a=Ee(t[n]))[S]?i.push(a):o.push(a);(a=A(e,(v=o,m=0<(y=i).length,x=0<v.length,r=function(e,t,n,r,i){var o,a,s,u=0,l="0",c=e&&[],f=[],p=w,d=e||x&&b.find.TAG("*",i),h=k+=null==p?1:Math.random()||.1,g=d.length;for(i&&(w=t==C||t||i);l!==g&&null!=(o=d[l]);l++){if(x&&o){a=0,t||o.ownerDocument==C||(T(o),n=!E);while(s=v[a++])if(s(o,t||C,n)){r.push(o);break}i&&(k=h)}m&&((o=!s&&o)&&u--,e&&c.push(o))}if(u+=l,m&&l!==u){a=0;while(s=y[a++])s(c,f,t,n);if(e){if(0<u)while(l--)c[l]||f[l]||(f[l]=q.call(r));f=Te(f)}H.apply(r,f),i&&!e&&0<f.length&&1<u+y.length&&se.uniqueSort(r)}return i&&(k=h,w=p),c},m?le(r):r))).selector=e}return a},g=se.select=function(e,t,n,r){var i,o,a,s,u,l="function"==typeof e&&e,c=!r&&h(e=l.selector||e);if(n=n||[],1===c.length){if(2<(o=c[0]=c[0].slice(0)).length&&"ID"===(a=o[0]).type&&9===t.nodeType&&E&&b.relative[o[1].type]){if(!(t=(b.find.ID(a.matches[0].replace(te,ne),t)||[])[0]))return n;l&&(t=t.parentNode),e=e.slice(o.shift().value.length)}i=G.needsContext.test(e)?0:o.length;while(i--){if(a=o[i],b.relative[s=a.type])break;if((u=b.find[s])&&(r=u(a.matches[0].replace(te,ne),ee.test(o[0].type)&&ye(t.parentNode)||t))){if(o.splice(i,1),!(e=r.length&&xe(o)))return H.apply(n,r),n;break}}}return(l||f(e,c))(r,t,!E,n,!t||ee.test(e)&&ye(t.parentNode)||t),n},d.sortStable=S.split("").sort(D).join("")===S,d.detectDuplicates=!!l,T(),d.sortDetached=ce(function(e){return 1&e.compareDocumentPosition(C.createElement("fieldset"))}),ce(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||fe("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),d.attributes&&ce(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||fe("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ce(function(e){return null==e.getAttribute("disabled")})||fe(R,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),se}(C);S.find=d,S.expr=d.selectors,S.expr[":"]=S.expr.pseudos,S.uniqueSort=S.unique=d.uniqueSort,S.text=d.getText,S.isXMLDoc=d.isXML,S.contains=d.contains,S.escapeSelector=d.escape;var h=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&S(e).is(n))break;r.push(e)}return r},T=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},k=S.expr.match.needsContext;function A(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var N=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function D(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1<i.call(n,e)!==r}):S.filter(n,e,r)}S.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?S.find.matchesSelector(r,e)?[r]:[]:S.find.matches(e,S.grep(t,function(e){return 1===e.nodeType}))},S.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(S(e).filter(function(){for(t=0;t<r;t++)if(S.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)S.find(e,i[t],n);return 1<r?S.uniqueSort(n):n},filter:function(e){return this.pushStack(D(this,e||[],!1))},not:function(e){return this.pushStack(D(this,e||[],!0))},is:function(e){return!!D(this,"string"==typeof e&&k.test(e)?S(e):e||[],!1).length}});var j,q=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||j,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,j=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(S.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&S(e);if(!k.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?-1<a.index(n):1===n.nodeType&&S.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(1<o.length?S.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?i.call(S(e),this[0]):i.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(S.uniqueSort(S.merge(this.get(),S(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),S.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return h(e,"parentNode")},parentsUntil:function(e,t,n){return h(e,"parentNode",n)},next:function(e){return O(e,"nextSibling")},prev:function(e){return O(e,"previousSibling")},nextAll:function(e){return h(e,"nextSibling")},prevAll:function(e){return h(e,"previousSibling")},nextUntil:function(e,t,n){return h(e,"nextSibling",n)},prevUntil:function(e,t,n){return h(e,"previousSibling",n)},siblings:function(e){return T((e.parentNode||{}).firstChild,e)},children:function(e){return T(e.firstChild)},contents:function(e){return null!=e.contentDocument&&r(e.contentDocument)?e.contentDocument:(A(e,"template")&&(e=e.content||e),S.merge([],e.childNodes))}},function(r,i){S.fn[r]=function(e,t){var n=S.map(this,i,e);return"Until"!==r.slice(-5)&&(t=e),t&&"string"==typeof t&&(n=S.filter(t,n)),1<this.length&&(H[r]||S.uniqueSort(n),L.test(r)&&n.reverse()),this.pushStack(n)}});var P=/[^\x20\t\r\n\f]+/g;function R(e){return e}function M(e){throw e}function I(e,t,n,r){var i;try{e&&m(i=e.promise)?i.call(e).done(t).fail(n):e&&m(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}S.Callbacks=function(r){var e,n;r="string"==typeof r?(e=r,n={},S.each(e.match(P)||[],function(e,t){n[t]=!0}),n):S.extend({},r);var i,t,o,a,s=[],u=[],l=-1,c=function(){for(a=a||r.once,o=i=!0;u.length;l=-1){t=u.shift();while(++l<s.length)!1===s[l].apply(t[0],t[1])&&r.stopOnFalse&&(l=s.length,t=!1)}r.memory||(t=!1),i=!1,a&&(s=t?[]:"")},f={add:function(){return s&&(t&&!i&&(l=s.length-1,u.push(t)),function n(e){S.each(e,function(e,t){m(t)?r.unique&&f.has(t)||s.push(t):t&&t.length&&"string"!==w(t)&&n(t)})}(arguments),t&&!i&&c()),this},remove:function(){return S.each(arguments,function(e,t){var n;while(-1<(n=S.inArray(t,s,n)))s.splice(n,1),n<=l&&l--}),this},has:function(e){return e?-1<S.inArray(e,s):0<s.length},empty:function(){return s&&(s=[]),this},disable:function(){return a=u=[],s=t="",this},disabled:function(){return!s},lock:function(){return a=u=[],t||i||(s=t=""),this},locked:function(){return!!a},fireWith:function(e,t){return a||(t=[e,(t=t||[]).slice?t.slice():t],u.push(t),i||c()),this},fire:function(){return f.fireWith(this,arguments),this},fired:function(){return!!o}};return f},S.extend({Deferred:function(e){var o=[["notify","progress",S.Callbacks("memory"),S.Callbacks("memory"),2],["resolve","done",S.Callbacks("once memory"),S.Callbacks("once memory"),0,"resolved"],["reject","fail",S.Callbacks("once memory"),S.Callbacks("once memory"),1,"rejected"]],i="pending",a={state:function(){return i},always:function(){return s.done(arguments).fail(arguments),this},"catch":function(e){return a.then(null,e)},pipe:function(){var i=arguments;return S.Deferred(function(r){S.each(o,function(e,t){var n=m(i[t[4]])&&i[t[4]];s[t[1]](function(){var e=n&&n.apply(this,arguments);e&&m(e.promise)?e.promise().progress(r.notify).done(r.resolve).fail(r.reject):r[t[0]+"With"](this,n?[e]:arguments)})}),i=null}).promise()},then:function(t,n,r){var u=0;function l(i,o,a,s){return function(){var n=this,r=arguments,e=function(){var e,t;if(!(i<u)){if((e=a.apply(n,r))===o.promise())throw new TypeError("Thenable self-resolution");t=e&&("object"==typeof e||"function"==typeof e)&&e.then,m(t)?s?t.call(e,l(u,o,R,s),l(u,o,M,s)):(u++,t.call(e,l(u,o,R,s),l(u,o,M,s),l(u,o,R,o.notifyWith))):(a!==R&&(n=void 0,r=[e]),(s||o.resolveWith)(n,r))}},t=s?e:function(){try{e()}catch(e){S.Deferred.exceptionHook&&S.Deferred.exceptionHook(e,t.stackTrace),u<=i+1&&(a!==M&&(n=void 0,r=[e]),o.rejectWith(n,r))}};i?t():(S.Deferred.getStackHook&&(t.stackTrace=S.Deferred.getStackHook()),C.setTimeout(t))}}return S.Deferred(function(e){o[0][3].add(l(0,e,m(r)?r:R,e.notifyWith)),o[1][3].add(l(0,e,m(t)?t:R)),o[2][3].add(l(0,e,m(n)?n:M))}).promise()},promise:function(e){return null!=e?S.extend(e,a):a}},s={};return S.each(o,function(e,t){var n=t[2],r=t[5];a[t[1]]=n.add,r&&n.add(function(){i=r},o[3-e][2].disable,o[3-e][3].disable,o[0][2].lock,o[0][3].lock),n.add(t[3].fire),s[t[0]]=function(){return s[t[0]+"With"](this===s?void 0:this,arguments),this},s[t[0]+"With"]=n.fireWith}),a.promise(s),e&&e.call(s,s),s},when:function(e){var n=arguments.length,t=n,r=Array(t),i=s.call(arguments),o=S.Deferred(),a=function(t){return function(e){r[t]=this,i[t]=1<arguments.length?s.call(arguments):e,--n||o.resolveWith(r,i)}};if(n<=1&&(I(e,o.done(a(t)).resolve,o.reject,!n),"pending"===o.state()||m(i[t]&&i[t].then)))return o.then();while(t--)I(i[t],a(t),o.reject);return o.promise()}});var W=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;S.Deferred.exceptionHook=function(e,t){C.console&&C.console.warn&&e&&W.test(e.name)&&C.console.warn("jQuery.Deferred exception: "+e.message,e.stack,t)},S.readyException=function(e){C.setTimeout(function(){throw e})};var F=S.Deferred();function B(){E.removeEventListener("DOMContentLoaded",B),C.removeEventListener("load",B),S.ready()}S.fn.ready=function(e){return F.then(e)["catch"](function(e){S.readyException(e)}),this},S.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--S.readyWait:S.isReady)||(S.isReady=!0)!==e&&0<--S.readyWait||F.resolveWith(E,[S])}}),S.ready.then=F.then,"complete"===E.readyState||"loading"!==E.readyState&&!E.documentElement.doScroll?C.setTimeout(S.ready):(E.addEventListener("DOMContentLoaded",B),C.addEventListener("load",B));var $=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===w(n))for(s in i=!0,n)$(e,t,s,n[s],!0,o,a);else if(void 0!==r&&(i=!0,m(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(S(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},_=/^-ms-/,z=/-([a-z])/g;function U(e,t){return t.toUpperCase()}function X(e){return e.replace(_,"ms-").replace(z,U)}var V=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function G(){this.expando=S.expando+G.uid++}G.uid=1,G.prototype={cache:function(e){var t=e[this.expando];return t||(t={},V(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[X(t)]=n;else for(r in t)i[X(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][X(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(X):(t=X(t))in r?[t]:t.match(P)||[]).length;while(n--)delete r[t[n]]}(void 0===t||S.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!S.isEmptyObject(t)}};var Y=new G,Q=new G,J=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,K=/[A-Z]/g;function Z(e,t,n){var r,i;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(K,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n="true"===(i=n)||"false"!==i&&("null"===i?null:i===+i+""?+i:J.test(i)?JSON.parse(i):i)}catch(e){}Q.set(e,t,n)}else n=void 0;return n}S.extend({hasData:function(e){return Q.hasData(e)||Y.hasData(e)},data:function(e,t,n){return Q.access(e,t,n)},removeData:function(e,t){Q.remove(e,t)},_data:function(e,t,n){return Y.access(e,t,n)},_removeData:function(e,t){Y.remove(e,t)}}),S.fn.extend({data:function(n,e){var t,r,i,o=this[0],a=o&&o.attributes;if(void 0===n){if(this.length&&(i=Q.get(o),1===o.nodeType&&!Y.get(o,"hasDataAttrs"))){t=a.length;while(t--)a[t]&&0===(r=a[t].name).indexOf("data-")&&(r=X(r.slice(5)),Z(o,r,i[r]));Y.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof n?this.each(function(){Q.set(this,n)}):$(this,function(e){var t;if(o&&void 0===e)return void 0!==(t=Q.get(o,n))?t:void 0!==(t=Z(o,n))?t:void 0;this.each(function(){Q.set(this,n,e)})},null,e,1<arguments.length,null,!0)},removeData:function(e){return this.each(function(){Q.remove(this,e)})}}),S.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=Y.get(e,t),n&&(!r||Array.isArray(n)?r=Y.access(e,t,S.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=S.queue(e,t),r=n.length,i=n.shift(),o=S._queueHooks(e,t);"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,function(){S.dequeue(e,t)},o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return Y.get(e,n)||Y.access(e,n,{empty:S.Callbacks("once memory").add(function(){Y.remove(e,[t+"queue",n])})})}}),S.fn.extend({queue:function(t,n){var e=2;return"string"!=typeof t&&(n=t,t="fx",e--),arguments.length<e?S.queue(this[0],t):void 0===n?this:this.each(function(){var e=S.queue(this,t,n);S._queueHooks(this,t),"fx"===t&&"inprogress"!==e[0]&&S.dequeue(this,t)})},dequeue:function(e){return this.each(function(){S.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=S.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=Y.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var ee=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,te=new RegExp("^(?:([+-])=|)("+ee+")([a-z%]*)$","i"),ne=["Top","Right","Bottom","Left"],re=E.documentElement,ie=function(e){return S.contains(e.ownerDocument,e)},oe={composed:!0};re.getRootNode&&(ie=function(e){return S.contains(e.ownerDocument,e)||e.getRootNode(oe)===e.ownerDocument});var ae=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&ie(e)&&"none"===S.css(e,"display")};function se(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return S.css(e,t,"")},u=s(),l=n&&n[3]||(S.cssNumber[t]?"":"px"),c=e.nodeType&&(S.cssNumber[t]||"px"!==l&&+u)&&te.exec(S.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)S.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,S.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var ue={};function le(e,t){for(var n,r,i,o,a,s,u,l=[],c=0,f=e.length;c<f;c++)(r=e[c]).style&&(n=r.style.display,t?("none"===n&&(l[c]=Y.get(r,"display")||null,l[c]||(r.style.display="")),""===r.style.display&&ae(r)&&(l[c]=(u=a=o=void 0,a=(i=r).ownerDocument,s=i.nodeName,(u=ue[s])||(o=a.body.appendChild(a.createElement(s)),u=S.css(o,"display"),o.parentNode.removeChild(o),"none"===u&&(u="block"),ue[s]=u)))):"none"!==n&&(l[c]="none",Y.set(r,"display",n)));for(c=0;c<f;c++)null!=l[c]&&(e[c].style.display=l[c]);return e}S.fn.extend({show:function(){return le(this,!0)},hide:function(){return le(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){ae(this)?S(this).show():S(this).hide()})}});var ce,fe,pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="<textarea>x</textarea>",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="<option></option>",y.option=!!ce.lastChild;var ge={thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n<r;n++)Y.set(e[n],"globalEval",!t||Y.get(t[n],"globalEval"))}ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td,y.option||(ge.optgroup=ge.option=[1,"<select multiple='multiple'>","</select>"]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===w(o))S.merge(p,o.nodeType?[o]:o);else if(me.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+S.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;S.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&-1<S.inArray(o,r))i&&i.push(o);else if(l=ie(o),a=ve(f.appendChild(o),"script"),l&&ye(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}var be=/^key/,we=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Te=/^([^.]*)(?:\.(.+)|)/;function Ce(){return!0}function Ee(){return!1}function Se(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function ke(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)ke(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Ee;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return S().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=S.guid++)),e.each(function(){S.event.add(this,t,i,r,n)})}function Ae(e,i,o){o?(Y.set(e,i,!1),S.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Y.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(S.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Y.set(this,i,r),t=o(this,i),this[i](),r!==(n=Y.get(this,i))||t?Y.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Y.set(this,i,{value:S.event.trigger(S.extend(r[0],S.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Y.get(e,i)&&S.event.add(e,i,Ce)}S.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Y.get(t);if(V(t)){n.handler&&(n=(o=n).handler,i=o.selector),i&&S.find.matchesSelector(re,i),n.guid||(n.guid=S.guid++),(u=v.events)||(u=v.events=Object.create(null)),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof S&&S.event.triggered!==e.type?S.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(P)||[""]).length;while(l--)d=g=(s=Te.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=S.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=S.event.special[d]||{},c=S.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&S.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),S.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Y.hasData(e)&&Y.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(P)||[""]).length;while(l--)if(d=g=(s=Te.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=S.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||S.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)S.event.remove(e,d+t[l],n,r,!0);S.isEmptyObject(u)&&Y.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=new Array(arguments.length),u=S.event.fix(e),l=(Y.get(this,"events")||Object.create(null))[u.type]||[],c=S.event.special[u.type]||{};for(s[0]=u,t=1;t<arguments.length;t++)s[t]=arguments[t];if(u.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,u)){a=S.event.handlers.call(this,u,l),t=0;while((i=a[t++])&&!u.isPropagationStopped()){u.currentTarget=i.elem,n=0;while((o=i.handlers[n++])&&!u.isImmediatePropagationStopped())u.rnamespace&&!1!==o.namespace&&!u.rnamespace.test(o.namespace)||(u.handleObj=o,u.data=o.data,void 0!==(r=((S.event.special[o.origType]||{}).handle||o.handler).apply(i.elem,s))&&!1===(u.result=r)&&(u.preventDefault(),u.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,u),u.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&1<=e.button))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?-1<S(i,this).index(l):S.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(t,e){Object.defineProperty(S.Event.prototype,t,{enumerable:!0,configurable:!0,get:m(e)?function(){if(this.originalEvent)return e(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[t]},set:function(e){Object.defineProperty(this,t,{enumerable:!0,configurable:!0,writable:!0,value:e})}})},fix:function(e){return e[S.expando]?e:new S.Event(e)},special:{load:{noBubble:!0},click:{setup:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&Ae(t,"click",Ce),!1},trigger:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&Ae(t,"click"),!0},_default:function(e){var t=e.target;return pe.test(t.type)&&t.click&&A(t,"input")&&Y.get(t,"click")||A(t,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},S.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},S.Event=function(e,t){if(!(this instanceof S.Event))return new S.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?Ce:Ee,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&S.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[S.expando]=!0},S.Event.prototype={constructor:S.Event,isDefaultPrevented:Ee,isPropagationStopped:Ee,isImmediatePropagationStopped:Ee,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=Ce,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=Ce,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=Ce,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},S.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,code:!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&be.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&we.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},S.event.addProp),S.each({focus:"focusin",blur:"focusout"},function(e,t){S.event.special[e]={setup:function(){return Ae(this,e,Se),!1},trigger:function(){return Ae(this,e),!0},delegateType:t}}),S.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,i){S.event.special[e]={delegateType:i,bindType:i,handle:function(e){var t,n=e.relatedTarget,r=e.handleObj;return n&&(n===this||S.contains(this,n))||(e.type=r.origType,t=r.handler.apply(this,arguments),e.type=i),t}}}),S.fn.extend({on:function(e,t,n,r){return ke(this,e,t,n,r)},one:function(e,t,n,r){return ke(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,S(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=Ee),this.each(function(){S.event.remove(this,e,n,t)})}});var Ne=/<script|<style|<link/i,De=/checked\s*(?:[^=]|=\s*.checked.)/i,je=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function qe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function Le(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function He(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Oe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n<r;n++)S.event.add(t,i,s[i][n]);Q.hasData(e)&&(o=Q.access(e),a=S.extend({},o),Q.set(t,a))}}function Pe(n,r,i,o){r=g(r);var e,t,a,s,u,l,c=0,f=n.length,p=f-1,d=r[0],h=m(d);if(h||1<f&&"string"==typeof d&&!y.checkClone&&De.test(d))return n.each(function(e){var t=n.eq(e);h&&(r[0]=d.call(this,e,t.html())),Pe(t,r,i,o)});if(f&&(t=(e=xe(r,n[0].ownerDocument,!1,n,o)).firstChild,1===e.childNodes.length&&(e=t),t||o)){for(s=(a=S.map(ve(e,"script"),Le)).length;c<f;c++)u=e,c!==p&&(u=S.clone(u,!0,!0),s&&S.merge(a,ve(u,"script"))),i.call(n[c],u,c);if(s)for(l=a[a.length-1].ownerDocument,S.map(a,He),c=0;c<s;c++)u=a[c],he.test(u.type||"")&&!Y.access(u,"globalEval")&&S.contains(l,u)&&(u.src&&"module"!==(u.type||"").toLowerCase()?S._evalUrl&&!u.noModule&&S._evalUrl(u.src,{nonce:u.nonce||u.getAttribute("nonce")},l):b(u.textContent.replace(je,""),u,l))}return n}function Re(e,t,n){for(var r,i=t?S.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||S.cleanData(ve(r)),r.parentNode&&(n&&ie(r)&&ye(ve(r,"script")),r.parentNode.removeChild(r));return e}S.extend({htmlPrefilter:function(e){return e},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=ie(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||S.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r<i;r++)s=o[r],u=a[r],void 0,"input"===(l=u.nodeName.toLowerCase())&&pe.test(s.type)?u.checked=s.checked:"input"!==l&&"textarea"!==l||(u.defaultValue=s.defaultValue);if(t)if(n)for(o=o||ve(e),a=a||ve(c),r=0,i=o.length;r<i;r++)Oe(o[r],a[r]);else Oe(e,c);return 0<(a=ve(c,"script")).length&&ye(a,!f&&ve(e,"script")),c},cleanData:function(e){for(var t,n,r,i=S.event.special,o=0;void 0!==(n=e[o]);o++)if(V(n)){if(t=n[Y.expando]){if(t.events)for(r in t.events)i[r]?S.event.remove(n,r):S.removeEvent(n,r,t.handle);n[Y.expando]=void 0}n[Q.expando]&&(n[Q.expando]=void 0)}}}),S.fn.extend({detach:function(e){return Re(this,e,!0)},remove:function(e){return Re(this,e)},text:function(e){return $(this,function(e){return void 0===e?S.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Pe(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||qe(this,e).appendChild(e)})},prepend:function(){return Pe(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=qe(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Pe(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Pe(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(S.cleanData(ve(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return S.clone(this,e,t)})},html:function(e){return $(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!Ne.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=S.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(S.cleanData(ve(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var n=[];return Pe(this,arguments,function(e){var t=this.parentNode;S.inArray(this,n)<0&&(S.cleanData(ve(this)),t&&t.replaceChild(e,this))},n)}}),S.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,a){S.fn[e]=function(e){for(var t,n=[],r=S(e),i=r.length-1,o=0;o<=i;o++)t=o===i?this:this.clone(!0),S(r[o])[a](t),u.apply(n,t.get());return this.pushStack(n)}});var Me=new RegExp("^("+ee+")(?!px)[a-z%]+$","i"),Ie=function(e){var t=e.ownerDocument.defaultView;return t&&t.opener||(t=C),t.getComputedStyle(e)},We=function(e,t,n){var r,i,o={};for(i in t)o[i]=e.style[i],e.style[i]=t[i];for(i in r=n.call(e),t)e.style[i]=o[i];return r},Fe=new RegExp(ne.join("|"),"i");function Be(e,t,n){var r,i,o,a,s=e.style;return(n=n||Ie(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||ie(e)||(a=S.style(e,t)),!y.pixelBoxStyles()&&Me.test(a)&&Fe.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function $e(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}!function(){function e(){if(l){u.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",l.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",re.appendChild(u).appendChild(l);var e=C.getComputedStyle(l);n="1%"!==e.top,s=12===t(e.marginLeft),l.style.right="60%",o=36===t(e.right),r=36===t(e.width),l.style.position="absolute",i=12===t(l.offsetWidth/3),re.removeChild(u),l=null}}function t(e){return Math.round(parseFloat(e))}var n,r,i,o,a,s,u=E.createElement("div"),l=E.createElement("div");l.style&&(l.style.backgroundClip="content-box",l.cloneNode(!0).style.backgroundClip="",y.clearCloneStyle="content-box"===l.style.backgroundClip,S.extend(y,{boxSizingReliable:function(){return e(),r},pixelBoxStyles:function(){return e(),o},pixelPosition:function(){return e(),n},reliableMarginLeft:function(){return e(),s},scrollboxSize:function(){return e(),i},reliableTrDimensions:function(){var e,t,n,r;return null==a&&(e=E.createElement("table"),t=E.createElement("tr"),n=E.createElement("div"),e.style.cssText="position:absolute;left:-11111px",t.style.height="1px",n.style.height="9px",re.appendChild(e).appendChild(t).appendChild(n),r=C.getComputedStyle(t),a=3<parseInt(r.height),re.removeChild(e)),a}}))}();var _e=["Webkit","Moz","ms"],ze=E.createElement("div").style,Ue={};function Xe(e){var t=S.cssProps[e]||Ue[e];return t||(e in ze?e:Ue[e]=function(e){var t=e[0].toUpperCase()+e.slice(1),n=_e.length;while(n--)if((e=_e[n]+t)in ze)return e}(e)||e)}var Ve=/^(none|table(?!-c[ea]).+)/,Ge=/^--/,Ye={position:"absolute",visibility:"hidden",display:"block"},Qe={letterSpacing:"0",fontWeight:"400"};function Je(e,t,n){var r=te.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function Ke(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=S.css(e,n+ne[a],!0,i)),r?("content"===n&&(u-=S.css(e,"padding"+ne[a],!0,i)),"margin"!==n&&(u-=S.css(e,"border"+ne[a]+"Width",!0,i))):(u+=S.css(e,"padding"+ne[a],!0,i),"padding"!==n?u+=S.css(e,"border"+ne[a]+"Width",!0,i):s+=S.css(e,"border"+ne[a]+"Width",!0,i));return!r&&0<=o&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))||0),u}function Ze(e,t,n){var r=Ie(e),i=(!y.boxSizingReliable()||n)&&"border-box"===S.css(e,"boxSizing",!1,r),o=i,a=Be(e,t,r),s="offset"+t[0].toUpperCase()+t.slice(1);if(Me.test(a)){if(!n)return a;a="auto"}return(!y.boxSizingReliable()&&i||!y.reliableTrDimensions()&&A(e,"tr")||"auto"===a||!parseFloat(a)&&"inline"===S.css(e,"display",!1,r))&&e.getClientRects().length&&(i="border-box"===S.css(e,"boxSizing",!1,r),(o=s in e)&&(a=e[s])),(a=parseFloat(a)||0)+Ke(e,t,n||(i?"border":"content"),o,r,a)+"px"}function et(e,t,n,r,i){return new et.prototype.init(e,t,n,r,i)}S.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Be(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,gridArea:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnStart:!0,gridRow:!0,gridRowEnd:!0,gridRowStart:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=X(t),u=Ge.test(t),l=e.style;if(u||(t=Xe(s)),a=S.cssHooks[t]||S.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"===(o=typeof n)&&(i=te.exec(n))&&i[1]&&(n=se(e,t,i),o="number"),null!=n&&n==n&&("number"!==o||u||(n+=i&&i[3]||(S.cssNumber[s]?"":"px")),y.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=X(t);return Ge.test(t)||(t=Xe(s)),(a=S.cssHooks[t]||S.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=Be(e,t,r)),"normal"===i&&t in Qe&&(i=Qe[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),S.each(["height","width"],function(e,u){S.cssHooks[u]={get:function(e,t,n){if(t)return!Ve.test(S.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?Ze(e,u,n):We(e,Ye,function(){return Ze(e,u,n)})},set:function(e,t,n){var r,i=Ie(e),o=!y.scrollboxSize()&&"absolute"===i.position,a=(o||n)&&"border-box"===S.css(e,"boxSizing",!1,i),s=n?Ke(e,u,n,a,i):0;return a&&o&&(s-=Math.ceil(e["offset"+u[0].toUpperCase()+u.slice(1)]-parseFloat(i[u])-Ke(e,u,"border",!1,i)-.5)),s&&(r=te.exec(t))&&"px"!==(r[3]||"px")&&(e.style[u]=t,t=S.css(e,u)),Je(0,t,s)}}}),S.cssHooks.marginLeft=$e(y.reliableMarginLeft,function(e,t){if(t)return(parseFloat(Be(e,"marginLeft"))||e.getBoundingClientRect().left-We(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),S.each({margin:"",padding:"",border:"Width"},function(i,o){S.cssHooks[i+o]={expand:function(e){for(var t=0,n={},r="string"==typeof e?e.split(" "):[e];t<4;t++)n[i+ne[t]+o]=r[t]||r[t-2]||r[0];return n}},"margin"!==i&&(S.cssHooks[i+o].set=Je)}),S.fn.extend({css:function(e,t){return $(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=Ie(e),i=t.length;a<i;a++)o[t[a]]=S.css(e,t[a],!1,r);return o}return void 0!==n?S.style(e,t,n):S.css(e,t)},e,t,1<arguments.length)}}),((S.Tween=et).prototype={constructor:et,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||S.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(S.cssNumber[n]?"":"px")},cur:function(){var e=et.propHooks[this.prop];return e&&e.get?e.get(this):et.propHooks._default.get(this)},run:function(e){var t,n=et.propHooks[this.prop];return this.options.duration?this.pos=t=S.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):et.propHooks._default.set(this),this}}).init.prototype=et.prototype,(et.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=S.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){S.fx.step[e.prop]?S.fx.step[e.prop](e):1!==e.elem.nodeType||!S.cssHooks[e.prop]&&null==e.elem.style[Xe(e.prop)]?e.elem[e.prop]=e.now:S.style(e.elem,e.prop,e.now+e.unit)}}}).scrollTop=et.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},S.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},S.fx=et.prototype.init,S.fx.step={};var tt,nt,rt,it,ot=/^(?:toggle|show|hide)$/,at=/queueHooks$/;function st(){nt&&(!1===E.hidden&&C.requestAnimationFrame?C.requestAnimationFrame(st):C.setTimeout(st,S.fx.interval),S.fx.tick())}function ut(){return C.setTimeout(function(){tt=void 0}),tt=Date.now()}function lt(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=ne[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function ct(e,t,n){for(var r,i=(ft.tweeners[t]||[]).concat(ft.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function ft(o,e,t){var n,a,r=0,i=ft.prefilters.length,s=S.Deferred().always(function(){delete u.elem}),u=function(){if(a)return!1;for(var e=tt||ut(),t=Math.max(0,l.startTime+l.duration-e),n=1-(t/l.duration||0),r=0,i=l.tweens.length;r<i;r++)l.tweens[r].run(n);return s.notifyWith(o,[l,n,t]),n<1&&i?t:(i||s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l]),!1)},l=s.promise({elem:o,props:S.extend({},e),opts:S.extend(!0,{specialEasing:{},easing:S.easing._default},t),originalProperties:e,originalOptions:t,startTime:tt||ut(),duration:t.duration,tweens:[],createTween:function(e,t){var n=S.Tween(o,l.opts,e,t,l.opts.specialEasing[e]||l.opts.easing);return l.tweens.push(n),n},stop:function(e){var t=0,n=e?l.tweens.length:0;if(a)return this;for(a=!0;t<n;t++)l.tweens[t].run(1);return e?(s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l,e])):s.rejectWith(o,[l,e]),this}}),c=l.props;for(!function(e,t){var n,r,i,o,a;for(n in e)if(i=t[r=X(n)],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=S.cssHooks[r])&&"expand"in a)for(n in o=a.expand(o),delete e[r],o)n in e||(e[n]=o[n],t[n]=i);else t[r]=i}(c,l.opts.specialEasing);r<i;r++)if(n=ft.prefilters[r].call(l,o,c,l.opts))return m(n.stop)&&(S._queueHooks(l.elem,l.opts.queue).stop=n.stop.bind(n)),n;return S.map(c,ct,l),m(l.opts.start)&&l.opts.start.call(o,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),S.fx.timer(S.extend(u,{elem:o,anim:l,queue:l.opts.queue})),l}S.Animation=S.extend(ft,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return se(n.elem,e,te.exec(t),n),n}]},tweener:function(e,t){m(e)?(t=e,e=["*"]):e=e.match(P);for(var n,r=0,i=e.length;r<i;r++)n=e[r],ft.tweeners[n]=ft.tweeners[n]||[],ft.tweeners[n].unshift(t)},prefilters:[function(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&ae(e),v=Y.get(e,"fxshow");for(r in n.queue||(null==(a=S._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,S.queue(e,"fx").length||a.empty.fire()})})),t)if(i=t[r],ot.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!v||void 0===v[r])continue;g=!0}d[r]=v&&v[r]||S.style(e,r)}if((u=!S.isEmptyObject(t))||!S.isEmptyObject(d))for(r in f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=v&&v.display)&&(l=Y.get(e,"display")),"none"===(c=S.css(e,"display"))&&(l?c=l:(le([e],!0),l=e.style.display||l,c=S.css(e,"display"),le([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===S.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1,d)u||(v?"hidden"in v&&(g=v.hidden):v=Y.access(e,"fxshow",{display:l}),o&&(v.hidden=!g),g&&le([e],!0),p.done(function(){for(r in g||le([e]),Y.remove(e,"fxshow"),d)S.style(e,r,d[r])})),u=ct(g?v[r]:0,r,p),r in v||(v[r]=u.start,g&&(u.end=u.start,u.start=0))}],prefilter:function(e,t){t?ft.prefilters.unshift(e):ft.prefilters.push(e)}}),S.speed=function(e,t,n){var r=e&&"object"==typeof e?S.extend({},e):{complete:n||!n&&t||m(e)&&e,duration:e,easing:n&&t||t&&!m(t)&&t};return S.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in S.fx.speeds?r.duration=S.fx.speeds[r.duration]:r.duration=S.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){m(r.old)&&r.old.call(this),r.queue&&S.dequeue(this,r.queue)},r},S.fn.extend({fadeTo:function(e,t,n,r){return this.filter(ae).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(t,e,n,r){var i=S.isEmptyObject(t),o=S.speed(e,n,r),a=function(){var e=ft(this,S.extend({},t),o);(i||Y.get(this,"finish"))&&e.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(i,e,o){var a=function(e){var t=e.stop;delete e.stop,t(o)};return"string"!=typeof i&&(o=e,e=i,i=void 0),e&&this.queue(i||"fx",[]),this.each(function(){var e=!0,t=null!=i&&i+"queueHooks",n=S.timers,r=Y.get(this);if(t)r[t]&&r[t].stop&&a(r[t]);else for(t in r)r[t]&&r[t].stop&&at.test(t)&&a(r[t]);for(t=n.length;t--;)n[t].elem!==this||null!=i&&n[t].queue!==i||(n[t].anim.stop(o),e=!1,n.splice(t,1));!e&&o||S.dequeue(this,i)})},finish:function(a){return!1!==a&&(a=a||"fx"),this.each(function(){var e,t=Y.get(this),n=t[a+"queue"],r=t[a+"queueHooks"],i=S.timers,o=n?n.length:0;for(t.finish=!0,S.queue(this,a,[]),r&&r.stop&&r.stop.call(this,!0),e=i.length;e--;)i[e].elem===this&&i[e].queue===a&&(i[e].anim.stop(!0),i.splice(e,1));for(e=0;e<o;e++)n[e]&&n[e].finish&&n[e].finish.call(this);delete t.finish})}}),S.each(["toggle","show","hide"],function(e,r){var i=S.fn[r];S.fn[r]=function(e,t,n){return null==e||"boolean"==typeof e?i.apply(this,arguments):this.animate(lt(r,!0),e,t,n)}}),S.each({slideDown:lt("show"),slideUp:lt("hide"),slideToggle:lt("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,r){S.fn[e]=function(e,t,n){return this.animate(r,e,t,n)}}),S.timers=[],S.fx.tick=function(){var e,t=0,n=S.timers;for(tt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||S.fx.stop(),tt=void 0},S.fx.timer=function(e){S.timers.push(e),S.fx.start()},S.fx.interval=13,S.fx.start=function(){nt||(nt=!0,st())},S.fx.stop=function(){nt=null},S.fx.speeds={slow:600,fast:200,_default:400},S.fn.delay=function(r,e){return r=S.fx&&S.fx.speeds[r]||r,e=e||"fx",this.queue(e,function(e,t){var n=C.setTimeout(e,r);t.stop=function(){C.clearTimeout(n)}})},rt=E.createElement("input"),it=E.createElement("select").appendChild(E.createElement("option")),rt.type="checkbox",y.checkOn=""!==rt.value,y.optSelected=it.selected,(rt=E.createElement("input")).value="t",rt.type="radio",y.radioValue="t"===rt.value;var pt,dt=S.expr.attrHandle;S.fn.extend({attr:function(e,t){return $(this,S.attr,e,t,1<arguments.length)},removeAttr:function(e){return this.each(function(){S.removeAttr(this,e)})}}),S.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?S.prop(e,t,n):(1===o&&S.isXMLDoc(e)||(i=S.attrHooks[t.toLowerCase()]||(S.expr.match.bool.test(t)?pt:void 0)),void 0!==n?null===n?void S.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=S.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!y.radioValue&&"radio"===t&&A(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(P);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),pt={set:function(e,t,n){return!1===t?S.removeAttr(e,n):e.setAttribute(n,n),n}},S.each(S.expr.match.bool.source.match(/\w+/g),function(e,t){var a=dt[t]||S.find.attr;dt[t]=function(e,t,n){var r,i,o=t.toLowerCase();return n||(i=dt[o],dt[o]=r,r=null!=a(e,t,n)?o:null,dt[o]=i),r}});var ht=/^(?:input|select|textarea|button)$/i,gt=/^(?:a|area)$/i;function vt(e){return(e.match(P)||[]).join(" ")}function yt(e){return e.getAttribute&&e.getAttribute("class")||""}function mt(e){return Array.isArray(e)?e:"string"==typeof e&&e.match(P)||[]}S.fn.extend({prop:function(e,t){return $(this,S.prop,e,t,1<arguments.length)},removeProp:function(e){return this.each(function(){delete this[S.propFix[e]||e]})}}),S.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&S.isXMLDoc(e)||(t=S.propFix[t]||t,i=S.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=S.find.attr(e,"tabindex");return t?parseInt(t,10):ht.test(e.nodeName)||gt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),y.optSelected||(S.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),S.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){S.propFix[this.toLowerCase()]=this}),S.fn.extend({addClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){S(this).addClass(t.call(this,e,yt(this)))});if((e=mt(t)).length)while(n=this[u++])if(i=yt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=e[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},removeClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){S(this).removeClass(t.call(this,e,yt(this)))});if(!arguments.length)return this.attr("class","");if((e=mt(t)).length)while(n=this[u++])if(i=yt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=e[a++])while(-1<r.indexOf(" "+o+" "))r=r.replace(" "+o+" "," ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(i,t){var o=typeof i,a="string"===o||Array.isArray(i);return"boolean"==typeof t&&a?t?this.addClass(i):this.removeClass(i):m(i)?this.each(function(e){S(this).toggleClass(i.call(this,e,yt(this),t),t)}):this.each(function(){var e,t,n,r;if(a){t=0,n=S(this),r=mt(i);while(e=r[t++])n.hasClass(e)?n.removeClass(e):n.addClass(e)}else void 0!==i&&"boolean"!==o||((e=yt(this))&&Y.set(this,"__className__",e),this.setAttribute&&this.setAttribute("class",e||!1===i?"":Y.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&-1<(" "+vt(yt(n))+" ").indexOf(t))return!0;return!1}});var xt=/\r/g;S.fn.extend({val:function(n){var r,e,i,t=this[0];return arguments.length?(i=m(n),this.each(function(e){var t;1===this.nodeType&&(null==(t=i?n.call(this,e,S(this).val()):n)?t="":"number"==typeof t?t+="":Array.isArray(t)&&(t=S.map(t,function(e){return null==e?"":e+""})),(r=S.valHooks[this.type]||S.valHooks[this.nodeName.toLowerCase()])&&"set"in r&&void 0!==r.set(this,t,"value")||(this.value=t))})):t?(r=S.valHooks[t.type]||S.valHooks[t.nodeName.toLowerCase()])&&"get"in r&&void 0!==(e=r.get(t,"value"))?e:"string"==typeof(e=t.value)?e.replace(xt,""):null==e?"":e:void 0}}),S.extend({valHooks:{option:{get:function(e){var t=S.find.attr(e,"value");return null!=t?t:vt(S.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!A(n.parentNode,"optgroup"))){if(t=S(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=S.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=-1<S.inArray(S.valHooks.option.get(r),o))&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),S.each(["radio","checkbox"],function(){S.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=-1<S.inArray(S(e).val(),t)}},y.checkOn||(S.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),y.focusin="onfocusin"in C;var bt=/^(?:focusinfocus|focusoutblur)$/,wt=function(e){e.stopPropagation()};S.extend(S.event,{trigger:function(e,t,n,r){var i,o,a,s,u,l,c,f,p=[n||E],d=v.call(e,"type")?e.type:e,h=v.call(e,"namespace")?e.namespace.split("."):[];if(o=f=a=n=n||E,3!==n.nodeType&&8!==n.nodeType&&!bt.test(d+S.event.triggered)&&(-1<d.indexOf(".")&&(d=(h=d.split(".")).shift(),h.sort()),u=d.indexOf(":")<0&&"on"+d,(e=e[S.expando]?e:new S.Event(d,"object"==typeof e&&e)).isTrigger=r?2:3,e.namespace=h.join("."),e.rnamespace=e.namespace?new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,e.result=void 0,e.target||(e.target=n),t=null==t?[e]:S.makeArray(t,[e]),c=S.event.special[d]||{},r||!c.trigger||!1!==c.trigger.apply(n,t))){if(!r&&!c.noBubble&&!x(n)){for(s=c.delegateType||d,bt.test(s+d)||(o=o.parentNode);o;o=o.parentNode)p.push(o),a=o;a===(n.ownerDocument||E)&&p.push(a.defaultView||a.parentWindow||C)}i=0;while((o=p[i++])&&!e.isPropagationStopped())f=o,e.type=1<i?s:c.bindType||d,(l=(Y.get(o,"events")||Object.create(null))[e.type]&&Y.get(o,"handle"))&&l.apply(o,t),(l=u&&o[u])&&l.apply&&V(o)&&(e.result=l.apply(o,t),!1===e.result&&e.preventDefault());return e.type=d,r||e.isDefaultPrevented()||c._default&&!1!==c._default.apply(p.pop(),t)||!V(n)||u&&m(n[d])&&!x(n)&&((a=n[u])&&(n[u]=null),S.event.triggered=d,e.isPropagationStopped()&&f.addEventListener(d,wt),n[d](),e.isPropagationStopped()&&f.removeEventListener(d,wt),S.event.triggered=void 0,a&&(n[u]=a)),e.result}},simulate:function(e,t,n){var r=S.extend(new S.Event,n,{type:e,isSimulated:!0});S.event.trigger(r,null,t)}}),S.fn.extend({trigger:function(e,t){return this.each(function(){S.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return S.event.trigger(e,t,n,!0)}}),y.focusin||S.each({focus:"focusin",blur:"focusout"},function(n,r){var i=function(e){S.event.simulate(r,e.target,S.event.fix(e))};S.event.special[r]={setup:function(){var e=this.ownerDocument||this.document||this,t=Y.access(e,r);t||e.addEventListener(n,i,!0),Y.access(e,r,(t||0)+1)},teardown:function(){var e=this.ownerDocument||this.document||this,t=Y.access(e,r)-1;t?Y.access(e,r,t):(e.removeEventListener(n,i,!0),Y.remove(e,r))}}});var Tt=C.location,Ct={guid:Date.now()},Et=/\?/;S.parseXML=function(e){var t;if(!e||"string"!=typeof e)return null;try{t=(new C.DOMParser).parseFromString(e,"text/xml")}catch(e){t=void 0}return t&&!t.getElementsByTagName("parsererror").length||S.error("Invalid XML: "+e),t};var St=/\[\]$/,kt=/\r?\n/g,At=/^(?:submit|button|image|reset|file)$/i,Nt=/^(?:input|select|textarea|keygen)/i;function Dt(n,e,r,i){var t;if(Array.isArray(e))S.each(e,function(e,t){r||St.test(n)?i(n,t):Dt(n+"["+("object"==typeof t&&null!=t?e:"")+"]",t,r,i)});else if(r||"object"!==w(e))i(n,e);else for(t in e)Dt(n+"["+t+"]",e[t],r,i)}S.param=function(e,t){var n,r=[],i=function(e,t){var n=m(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(null==e)return"";if(Array.isArray(e)||e.jquery&&!S.isPlainObject(e))S.each(e,function(){i(this.name,this.value)});else for(n in e)Dt(n,e[n],t,i);return r.join("&")},S.fn.extend({serialize:function(){return S.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=S.prop(this,"elements");return e?S.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!S(this).is(":disabled")&&Nt.test(this.nodeName)&&!At.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=S(this).val();return null==n?null:Array.isArray(n)?S.map(n,function(e){return{name:t.name,value:e.replace(kt,"\r\n")}}):{name:t.name,value:n.replace(kt,"\r\n")}}).get()}});var jt=/%20/g,qt=/#.*$/,Lt=/([?&])_=[^&]*/,Ht=/^(.*?):[ \t]*([^\r\n]*)$/gm,Ot=/^(?:GET|HEAD)$/,Pt=/^\/\//,Rt={},Mt={},It="*/".concat("*"),Wt=E.createElement("a");function Ft(o){return function(e,t){"string"!=typeof e&&(t=e,e="*");var n,r=0,i=e.toLowerCase().match(P)||[];if(m(t))while(n=i[r++])"+"===n[0]?(n=n.slice(1)||"*",(o[n]=o[n]||[]).unshift(t)):(o[n]=o[n]||[]).push(t)}}function Bt(t,i,o,a){var s={},u=t===Mt;function l(e){var r;return s[e]=!0,S.each(t[e]||[],function(e,t){var n=t(i,o,a);return"string"!=typeof n||u||s[n]?u?!(r=n):void 0:(i.dataTypes.unshift(n),l(n),!1)}),r}return l(i.dataTypes[0])||!s["*"]&&l("*")}function $t(e,t){var n,r,i=S.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&S.extend(!0,e,r),e}Wt.href=Tt.href,S.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Tt.href,type:"GET",isLocal:/^(?:about|app|app-storage|.+-extension|file|res|widget):$/.test(Tt.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":It,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":S.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?$t($t(e,S.ajaxSettings),t):$t(S.ajaxSettings,e)},ajaxPrefilter:Ft(Rt),ajaxTransport:Ft(Mt),ajax:function(e,t){"object"==typeof e&&(t=e,e=void 0),t=t||{};var c,f,p,n,d,r,h,g,i,o,v=S.ajaxSetup({},t),y=v.context||v,m=v.context&&(y.nodeType||y.jquery)?S(y):S.event,x=S.Deferred(),b=S.Callbacks("once memory"),w=v.statusCode||{},a={},s={},u="canceled",T={readyState:0,getResponseHeader:function(e){var t;if(h){if(!n){n={};while(t=Ht.exec(p))n[t[1].toLowerCase()+" "]=(n[t[1].toLowerCase()+" "]||[]).concat(t[2])}t=n[e.toLowerCase()+" "]}return null==t?null:t.join(", ")},getAllResponseHeaders:function(){return h?p:null},setRequestHeader:function(e,t){return null==h&&(e=s[e.toLowerCase()]=s[e.toLowerCase()]||e,a[e]=t),this},overrideMimeType:function(e){return null==h&&(v.mimeType=e),this},statusCode:function(e){var t;if(e)if(h)T.always(e[T.status]);else for(t in e)w[t]=[w[t],e[t]];return this},abort:function(e){var t=e||u;return c&&c.abort(t),l(0,t),this}};if(x.promise(T),v.url=((e||v.url||Tt.href)+"").replace(Pt,Tt.protocol+"//"),v.type=t.method||t.type||v.method||v.type,v.dataTypes=(v.dataType||"*").toLowerCase().match(P)||[""],null==v.crossDomain){r=E.createElement("a");try{r.href=v.url,r.href=r.href,v.crossDomain=Wt.protocol+"//"+Wt.host!=r.protocol+"//"+r.host}catch(e){v.crossDomain=!0}}if(v.data&&v.processData&&"string"!=typeof v.data&&(v.data=S.param(v.data,v.traditional)),Bt(Rt,v,t,T),h)return T;for(i in(g=S.event&&v.global)&&0==S.active++&&S.event.trigger("ajaxStart"),v.type=v.type.toUpperCase(),v.hasContent=!Ot.test(v.type),f=v.url.replace(qt,""),v.hasContent?v.data&&v.processData&&0===(v.contentType||"").indexOf("application/x-www-form-urlencoded")&&(v.data=v.data.replace(jt,"+")):(o=v.url.slice(f.length),v.data&&(v.processData||"string"==typeof v.data)&&(f+=(Et.test(f)?"&":"?")+v.data,delete v.data),!1===v.cache&&(f=f.replace(Lt,"$1"),o=(Et.test(f)?"&":"?")+"_="+Ct.guid+++o),v.url=f+o),v.ifModified&&(S.lastModified[f]&&T.setRequestHeader("If-Modified-Since",S.lastModified[f]),S.etag[f]&&T.setRequestHeader("If-None-Match",S.etag[f])),(v.data&&v.hasContent&&!1!==v.contentType||t.contentType)&&T.setRequestHeader("Content-Type",v.contentType),T.setRequestHeader("Accept",v.dataTypes[0]&&v.accepts[v.dataTypes[0]]?v.accepts[v.dataTypes[0]]+("*"!==v.dataTypes[0]?", "+It+"; q=0.01":""):v.accepts["*"]),v.headers)T.setRequestHeader(i,v.headers[i]);if(v.beforeSend&&(!1===v.beforeSend.call(y,T,v)||h))return T.abort();if(u="abort",b.add(v.complete),T.done(v.success),T.fail(v.error),c=Bt(Mt,v,t,T)){if(T.readyState=1,g&&m.trigger("ajaxSend",[T,v]),h)return T;v.async&&0<v.timeout&&(d=C.setTimeout(function(){T.abort("timeout")},v.timeout));try{h=!1,c.send(a,l)}catch(e){if(h)throw e;l(-1,e)}}else l(-1,"No Transport");function l(e,t,n,r){var i,o,a,s,u,l=t;h||(h=!0,d&&C.clearTimeout(d),c=void 0,p=r||"",T.readyState=0<e?4:0,i=200<=e&&e<300||304===e,n&&(s=function(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}(v,T,n)),!i&&-1<S.inArray("script",v.dataTypes)&&(v.converters["text script"]=function(){}),s=function(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}(v,s,T,i),i?(v.ifModified&&((u=T.getResponseHeader("Last-Modified"))&&(S.lastModified[f]=u),(u=T.getResponseHeader("etag"))&&(S.etag[f]=u)),204===e||"HEAD"===v.type?l="nocontent":304===e?l="notmodified":(l=s.state,o=s.data,i=!(a=s.error))):(a=l,!e&&l||(l="error",e<0&&(e=0))),T.status=e,T.statusText=(t||l)+"",i?x.resolveWith(y,[o,l,T]):x.rejectWith(y,[T,l,a]),T.statusCode(w),w=void 0,g&&m.trigger(i?"ajaxSuccess":"ajaxError",[T,v,i?o:a]),b.fireWith(y,[T,l]),g&&(m.trigger("ajaxComplete",[T,v]),--S.active||S.event.trigger("ajaxStop")))}return T},getJSON:function(e,t,n){return S.get(e,t,n,"json")},getScript:function(e,t){return S.get(e,void 0,t,"script")}}),S.each(["get","post"],function(e,i){S[i]=function(e,t,n,r){return m(t)&&(r=r||n,n=t,t=void 0),S.ajax(S.extend({url:e,type:i,dataType:r,data:t,success:n},S.isPlainObject(e)&&e))}}),S.ajaxPrefilter(function(e){var t;for(t in e.headers)"content-type"===t.toLowerCase()&&(e.contentType=e.headers[t]||"")}),S._evalUrl=function(e,t,n){return S.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,converters:{"text script":function(){}},dataFilter:function(e){S.globalEval(e,t,n)}})},S.fn.extend({wrapAll:function(e){var t;return this[0]&&(m(e)&&(e=e.call(this[0])),t=S(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(n){return m(n)?this.each(function(e){S(this).wrapInner(n.call(this,e))}):this.each(function(){var e=S(this),t=e.contents();t.length?t.wrapAll(n):e.append(n)})},wrap:function(t){var n=m(t);return this.each(function(e){S(this).wrapAll(n?t.call(this,e):t)})},unwrap:function(e){return this.parent(e).not("body").each(function(){S(this).replaceWith(this.childNodes)}),this}}),S.expr.pseudos.hidden=function(e){return!S.expr.pseudos.visible(e)},S.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},S.ajaxSettings.xhr=function(){try{return new C.XMLHttpRequest}catch(e){}};var _t={0:200,1223:204},zt=S.ajaxSettings.xhr();y.cors=!!zt&&"withCredentials"in zt,y.ajax=zt=!!zt,S.ajaxTransport(function(i){var o,a;if(y.cors||zt&&!i.crossDomain)return{send:function(e,t){var n,r=i.xhr();if(r.open(i.type,i.url,i.async,i.username,i.password),i.xhrFields)for(n in i.xhrFields)r[n]=i.xhrFields[n];for(n in i.mimeType&&r.overrideMimeType&&r.overrideMimeType(i.mimeType),i.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest"),e)r.setRequestHeader(n,e[n]);o=function(e){return function(){o&&(o=a=r.onload=r.onerror=r.onabort=r.ontimeout=r.onreadystatechange=null,"abort"===e?r.abort():"error"===e?"number"!=typeof r.status?t(0,"error"):t(r.status,r.statusText):t(_t[r.status]||r.status,r.statusText,"text"!==(r.responseType||"text")||"string"!=typeof r.responseText?{binary:r.response}:{text:r.responseText},r.getAllResponseHeaders()))}},r.onload=o(),a=r.onerror=r.ontimeout=o("error"),void 0!==r.onabort?r.onabort=a:r.onreadystatechange=function(){4===r.readyState&&C.setTimeout(function(){o&&a()})},o=o("abort");try{r.send(i.hasContent&&i.data||null)}catch(e){if(o)throw e}},abort:function(){o&&o()}}}),S.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),S.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return S.globalEval(e),e}}}),S.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),S.ajaxTransport("script",function(n){var r,i;if(n.crossDomain||n.scriptAttrs)return{send:function(e,t){r=S("<script>").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Ut,Xt=[],Vt=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Xt.pop()||S.expando+"_"+Ct.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Vt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Vt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Vt,"$1"+r):!1!==e.jsonp&&(e.url+=(Et.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Xt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Ut=E.implementation.createHTMLDocument("").body).innerHTML="<form></form><form></form>",2===Ut.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1<s&&(r=vt(e.slice(s)),e=e.slice(0,s)),m(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),0<a.length&&S.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?S("<div>").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):("number"==typeof f.top&&(f.top+="px"),"number"==typeof f.left&&(f.left+="px"),c.css(f))}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=$e(y.pixelPosition,function(e,t){if(t)return t=Be(e,n),Me.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0<arguments.length?this.on(n,null,e,t):this.trigger(n)}});var Gt=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;S.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),m(e))return r=s.call(arguments,2),(i=function(){return e.apply(t||this,r.concat(s.call(arguments)))}).guid=e.guid=e.guid||S.guid++,i},S.holdReady=function(e){e?S.readyWait++:S.ready(!0)},S.isArray=Array.isArray,S.parseJSON=JSON.parse,S.nodeName=A,S.isFunction=m,S.isWindow=x,S.camelCase=X,S.type=w,S.now=Date.now,S.isNumeric=function(e){var t=S.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},S.trim=function(e){return null==e?"":(e+"").replace(Gt,"")},"function"==typeof define&&define.amd&&define("jquery",[],function(){return S});var Yt=C.jQuery,Qt=C.$;return S.noConflict=function(e){return C.$===S&&(C.$=Qt),e&&C.jQuery===S&&(C.jQuery=Yt),S},"undefined"==typeof e&&(C.jQuery=C.$=S),S});
diff --git a/docs/build/html/_static/language_data.js b/docs/build/html/_static/language_data.js
deleted file mode 100644
index ebe2f03bf03b7f72481f8f483039ef9b7013f062..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/language_data.js
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * language_data.js
- * ~~~~~~~~~~~~~~~~
- *
- * This script contains the language-specific data used by searchtools.js,
- * namely the list of stopwords, stemmer, scorer and splitter.
- *
- * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
- * :license: BSD, see LICENSE for details.
- *
- */
-
-var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
-
-
-/* Non-minified version is copied as a separate JS file, is available */
-
-/**
- * Porter Stemmer
- */
-var Stemmer = function() {
-
-  var step2list = {
-    ational: 'ate',
-    tional: 'tion',
-    enci: 'ence',
-    anci: 'ance',
-    izer: 'ize',
-    bli: 'ble',
-    alli: 'al',
-    entli: 'ent',
-    eli: 'e',
-    ousli: 'ous',
-    ization: 'ize',
-    ation: 'ate',
-    ator: 'ate',
-    alism: 'al',
-    iveness: 'ive',
-    fulness: 'ful',
-    ousness: 'ous',
-    aliti: 'al',
-    iviti: 'ive',
-    biliti: 'ble',
-    logi: 'log'
-  };
-
-  var step3list = {
-    icate: 'ic',
-    ative: '',
-    alize: 'al',
-    iciti: 'ic',
-    ical: 'ic',
-    ful: '',
-    ness: ''
-  };
-
-  var c = "[^aeiou]";          // consonant
-  var v = "[aeiouy]";          // vowel
-  var C = c + "[^aeiouy]*";    // consonant sequence
-  var V = v + "[aeiou]*";      // vowel sequence
-
-  var mgr0 = "^(" + C + ")?" + V + C;                      // [C]VC... is m>0
-  var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$";    // [C]VC[V] is m=1
-  var mgr1 = "^(" + C + ")?" + V + C + V + C;              // [C]VCVC... is m>1
-  var s_v   = "^(" + C + ")?" + v;                         // vowel in stem
-
-  this.stemWord = function (w) {
-    var stem;
-    var suffix;
-    var firstch;
-    var origword = w;
-
-    if (w.length < 3)
-      return w;
-
-    var re;
-    var re2;
-    var re3;
-    var re4;
-
-    firstch = w.substr(0,1);
-    if (firstch == "y")
-      w = firstch.toUpperCase() + w.substr(1);
-
-    // Step 1a
-    re = /^(.+?)(ss|i)es$/;
-    re2 = /^(.+?)([^s])s$/;
-
-    if (re.test(w))
-      w = w.replace(re,"$1$2");
-    else if (re2.test(w))
-      w = w.replace(re2,"$1$2");
-
-    // Step 1b
-    re = /^(.+?)eed$/;
-    re2 = /^(.+?)(ed|ing)$/;
-    if (re.test(w)) {
-      var fp = re.exec(w);
-      re = new RegExp(mgr0);
-      if (re.test(fp[1])) {
-        re = /.$/;
-        w = w.replace(re,"");
-      }
-    }
-    else if (re2.test(w)) {
-      var fp = re2.exec(w);
-      stem = fp[1];
-      re2 = new RegExp(s_v);
-      if (re2.test(stem)) {
-        w = stem;
-        re2 = /(at|bl|iz)$/;
-        re3 = new RegExp("([^aeiouylsz])\\1$");
-        re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
-        if (re2.test(w))
-          w = w + "e";
-        else if (re3.test(w)) {
-          re = /.$/;
-          w = w.replace(re,"");
-        }
-        else if (re4.test(w))
-          w = w + "e";
-      }
-    }
-
-    // Step 1c
-    re = /^(.+?)y$/;
-    if (re.test(w)) {
-      var fp = re.exec(w);
-      stem = fp[1];
-      re = new RegExp(s_v);
-      if (re.test(stem))
-        w = stem + "i";
-    }
-
-    // Step 2
-    re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
-    if (re.test(w)) {
-      var fp = re.exec(w);
-      stem = fp[1];
-      suffix = fp[2];
-      re = new RegExp(mgr0);
-      if (re.test(stem))
-        w = stem + step2list[suffix];
-    }
-
-    // Step 3
-    re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
-    if (re.test(w)) {
-      var fp = re.exec(w);
-      stem = fp[1];
-      suffix = fp[2];
-      re = new RegExp(mgr0);
-      if (re.test(stem))
-        w = stem + step3list[suffix];
-    }
-
-    // Step 4
-    re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
-    re2 = /^(.+?)(s|t)(ion)$/;
-    if (re.test(w)) {
-      var fp = re.exec(w);
-      stem = fp[1];
-      re = new RegExp(mgr1);
-      if (re.test(stem))
-        w = stem;
-    }
-    else if (re2.test(w)) {
-      var fp = re2.exec(w);
-      stem = fp[1] + fp[2];
-      re2 = new RegExp(mgr1);
-      if (re2.test(stem))
-        w = stem;
-    }
-
-    // Step 5
-    re = /^(.+?)e$/;
-    if (re.test(w)) {
-      var fp = re.exec(w);
-      stem = fp[1];
-      re = new RegExp(mgr1);
-      re2 = new RegExp(meq1);
-      re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
-      if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
-        w = stem;
-    }
-    re = /ll$/;
-    re2 = new RegExp(mgr1);
-    if (re.test(w) && re2.test(w)) {
-      re = /.$/;
-      w = w.replace(re,"");
-    }
-
-    // and turn initial Y back to y
-    if (firstch == "y")
-      w = firstch.toLowerCase() + w.substr(1);
-    return w;
-  }
-}
-
-
-
-
-var splitChars = (function() {
-    var result = {};
-    var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
-         1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
-         2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
-         2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
-         3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
-         3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
-         4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
-         8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
-         11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
-         43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
-    var i, j, start, end;
-    for (i = 0; i < singles.length; i++) {
-        result[singles[i]] = true;
-    }
-    var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
-         [722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
-         [1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
-         [1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
-         [1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
-         [2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
-         [2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
-         [2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
-         [2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
-         [2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
-         [2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
-         [2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
-         [3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
-         [3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
-         [3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
-         [3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
-         [3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
-         [3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
-         [4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
-         [4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
-         [4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
-         [4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
-         [5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
-         [6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
-         [6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
-         [6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
-         [6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
-         [7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
-         [7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
-         [8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
-         [8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
-         [8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
-         [10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
-         [11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
-         [12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
-         [12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
-         [12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
-         [19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
-         [42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
-         [42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
-         [43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
-         [43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
-         [43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
-         [43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
-         [44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
-         [57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
-         [64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
-         [65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
-         [65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
-    for (i = 0; i < ranges.length; i++) {
-        start = ranges[i][0];
-        end = ranges[i][1];
-        for (j = start; j <= end; j++) {
-            result[j] = true;
-        }
-    }
-    return result;
-})();
-
-function splitQuery(query) {
-    var result = [];
-    var start = -1;
-    for (var i = 0; i < query.length; i++) {
-        if (splitChars[query.charCodeAt(i)]) {
-            if (start !== -1) {
-                result.push(query.slice(start, i));
-                start = -1;
-            }
-        } else if (start === -1) {
-            start = i;
-        }
-    }
-    if (start !== -1) {
-        result.push(query.slice(start));
-    }
-    return result;
-}
-
-
diff --git a/docs/build/html/_static/pygments.css b/docs/build/html/_static/pygments.css
deleted file mode 100644
index 87f8bd121b2352751b730a3d04fff0e1ecd946b2..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/pygments.css
+++ /dev/null
@@ -1,82 +0,0 @@
-pre { line-height: 125%; }
-td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
-span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
-td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
-span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
-.highlight .hll { background-color: #ffffcc }
-.highlight { background: #f8f8f8; }
-.highlight .c { color: #8f5902; font-style: italic } /* Comment */
-.highlight .err { color: #a40000; border: 1px solid #ef2929 } /* Error */
-.highlight .g { color: #000000 } /* Generic */
-.highlight .k { color: #004461; font-weight: bold } /* Keyword */
-.highlight .l { color: #000000 } /* Literal */
-.highlight .n { color: #000000 } /* Name */
-.highlight .o { color: #582800 } /* Operator */
-.highlight .x { color: #000000 } /* Other */
-.highlight .p { color: #000000; font-weight: bold } /* Punctuation */
-.highlight .ch { color: #8f5902; font-style: italic } /* Comment.Hashbang */
-.highlight .cm { color: #8f5902; font-style: italic } /* Comment.Multiline */
-.highlight .cp { color: #8f5902 } /* Comment.Preproc */
-.highlight .cpf { color: #8f5902; font-style: italic } /* Comment.PreprocFile */
-.highlight .c1 { color: #8f5902; font-style: italic } /* Comment.Single */
-.highlight .cs { color: #8f5902; font-style: italic } /* Comment.Special */
-.highlight .gd { color: #a40000 } /* Generic.Deleted */
-.highlight .ge { color: #000000; font-style: italic } /* Generic.Emph */
-.highlight .gr { color: #ef2929 } /* Generic.Error */
-.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
-.highlight .gi { color: #00A000 } /* Generic.Inserted */
-.highlight .go { color: #888888 } /* Generic.Output */
-.highlight .gp { color: #745334 } /* Generic.Prompt */
-.highlight .gs { color: #000000; font-weight: bold } /* Generic.Strong */
-.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
-.highlight .gt { color: #a40000; font-weight: bold } /* Generic.Traceback */
-.highlight .kc { color: #004461; font-weight: bold } /* Keyword.Constant */
-.highlight .kd { color: #004461; font-weight: bold } /* Keyword.Declaration */
-.highlight .kn { color: #004461; font-weight: bold } /* Keyword.Namespace */
-.highlight .kp { color: #004461; font-weight: bold } /* Keyword.Pseudo */
-.highlight .kr { color: #004461; font-weight: bold } /* Keyword.Reserved */
-.highlight .kt { color: #004461; font-weight: bold } /* Keyword.Type */
-.highlight .ld { color: #000000 } /* Literal.Date */
-.highlight .m { color: #990000 } /* Literal.Number */
-.highlight .s { color: #4e9a06 } /* Literal.String */
-.highlight .na { color: #c4a000 } /* Name.Attribute */
-.highlight .nb { color: #004461 } /* Name.Builtin */
-.highlight .nc { color: #000000 } /* Name.Class */
-.highlight .no { color: #000000 } /* Name.Constant */
-.highlight .nd { color: #888888 } /* Name.Decorator */
-.highlight .ni { color: #ce5c00 } /* Name.Entity */
-.highlight .ne { color: #cc0000; font-weight: bold } /* Name.Exception */
-.highlight .nf { color: #000000 } /* Name.Function */
-.highlight .nl { color: #f57900 } /* Name.Label */
-.highlight .nn { color: #000000 } /* Name.Namespace */
-.highlight .nx { color: #000000 } /* Name.Other */
-.highlight .py { color: #000000 } /* Name.Property */
-.highlight .nt { color: #004461; font-weight: bold } /* Name.Tag */
-.highlight .nv { color: #000000 } /* Name.Variable */
-.highlight .ow { color: #004461; font-weight: bold } /* Operator.Word */
-.highlight .w { color: #f8f8f8; text-decoration: underline } /* Text.Whitespace */
-.highlight .mb { color: #990000 } /* Literal.Number.Bin */
-.highlight .mf { color: #990000 } /* Literal.Number.Float */
-.highlight .mh { color: #990000 } /* Literal.Number.Hex */
-.highlight .mi { color: #990000 } /* Literal.Number.Integer */
-.highlight .mo { color: #990000 } /* Literal.Number.Oct */
-.highlight .sa { color: #4e9a06 } /* Literal.String.Affix */
-.highlight .sb { color: #4e9a06 } /* Literal.String.Backtick */
-.highlight .sc { color: #4e9a06 } /* Literal.String.Char */
-.highlight .dl { color: #4e9a06 } /* Literal.String.Delimiter */
-.highlight .sd { color: #8f5902; font-style: italic } /* Literal.String.Doc */
-.highlight .s2 { color: #4e9a06 } /* Literal.String.Double */
-.highlight .se { color: #4e9a06 } /* Literal.String.Escape */
-.highlight .sh { color: #4e9a06 } /* Literal.String.Heredoc */
-.highlight .si { color: #4e9a06 } /* Literal.String.Interpol */
-.highlight .sx { color: #4e9a06 } /* Literal.String.Other */
-.highlight .sr { color: #4e9a06 } /* Literal.String.Regex */
-.highlight .s1 { color: #4e9a06 } /* Literal.String.Single */
-.highlight .ss { color: #4e9a06 } /* Literal.String.Symbol */
-.highlight .bp { color: #3465a4 } /* Name.Builtin.Pseudo */
-.highlight .fm { color: #000000 } /* Name.Function.Magic */
-.highlight .vc { color: #000000 } /* Name.Variable.Class */
-.highlight .vg { color: #000000 } /* Name.Variable.Global */
-.highlight .vi { color: #000000 } /* Name.Variable.Instance */
-.highlight .vm { color: #000000 } /* Name.Variable.Magic */
-.highlight .il { color: #990000 } /* Literal.Number.Integer.Long */
\ No newline at end of file
diff --git a/docs/build/html/_static/searchtools.js b/docs/build/html/_static/searchtools.js
deleted file mode 100644
index 2d7785937bb75d4c04aca720525940817bf5f92e..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/searchtools.js
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * searchtools.js
- * ~~~~~~~~~~~~~~~~
- *
- * Sphinx JavaScript utilities for the full-text search.
- *
- * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
- * :license: BSD, see LICENSE for details.
- *
- */
-
-if (!Scorer) {
-  /**
-   * Simple result scoring code.
-   */
-  var Scorer = {
-    // Implement the following function to further tweak the score for each result
-    // The function takes a result array [filename, title, anchor, descr, score]
-    // and returns the new score.
-    /*
-    score: function(result) {
-      return result[4];
-    },
-    */
-
-    // query matches the full name of an object
-    objNameMatch: 11,
-    // or matches in the last dotted part of the object name
-    objPartialMatch: 6,
-    // Additive scores depending on the priority of the object
-    objPrio: {0:  15,   // used to be importantResults
-              1:  5,   // used to be objectResults
-              2: -5},  // used to be unimportantResults
-    //  Used when the priority is not in the mapping.
-    objPrioDefault: 0,
-
-    // query found in title
-    title: 15,
-    partialTitle: 7,
-    // query found in terms
-    term: 5,
-    partialTerm: 2
-  };
-}
-
-if (!splitQuery) {
-  function splitQuery(query) {
-    return query.split(/\s+/);
-  }
-}
-
-/**
- * Search Module
- */
-var Search = {
-
-  _index : null,
-  _queued_query : null,
-  _pulse_status : -1,
-
-  htmlToText : function(htmlString) {
-      var virtualDocument = document.implementation.createHTMLDocument('virtual');
-      var htmlElement = $(htmlString, virtualDocument);
-      htmlElement.find('.headerlink').remove();
-      docContent = htmlElement.find('[role=main]')[0];
-      if(docContent === undefined) {
-          console.warn("Content block not found. Sphinx search tries to obtain it " +
-                       "via '[role=main]'. Could you check your theme or template.");
-          return "";
-      }
-      return docContent.textContent || docContent.innerText;
-  },
-
-  init : function() {
-      var params = $.getQueryParameters();
-      if (params.q) {
-          var query = params.q[0];
-          $('input[name="q"]')[0].value = query;
-          this.performSearch(query);
-      }
-  },
-
-  loadIndex : function(url) {
-    $.ajax({type: "GET", url: url, data: null,
-            dataType: "script", cache: true,
-            complete: function(jqxhr, textstatus) {
-              if (textstatus != "success") {
-                document.getElementById("searchindexloader").src = url;
-              }
-            }});
-  },
-
-  setIndex : function(index) {
-    var q;
-    this._index = index;
-    if ((q = this._queued_query) !== null) {
-      this._queued_query = null;
-      Search.query(q);
-    }
-  },
-
-  hasIndex : function() {
-      return this._index !== null;
-  },
-
-  deferQuery : function(query) {
-      this._queued_query = query;
-  },
-
-  stopPulse : function() {
-      this._pulse_status = 0;
-  },
-
-  startPulse : function() {
-    if (this._pulse_status >= 0)
-        return;
-    function pulse() {
-      var i;
-      Search._pulse_status = (Search._pulse_status + 1) % 4;
-      var dotString = '';
-      for (i = 0; i < Search._pulse_status; i++)
-        dotString += '.';
-      Search.dots.text(dotString);
-      if (Search._pulse_status > -1)
-        window.setTimeout(pulse, 500);
-    }
-    pulse();
-  },
-
-  /**
-   * perform a search for something (or wait until index is loaded)
-   */
-  performSearch : function(query) {
-    // create the required interface elements
-    this.out = $('#search-results');
-    this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out);
-    this.dots = $('<span></span>').appendTo(this.title);
-    this.status = $('<p class="search-summary">&nbsp;</p>').appendTo(this.out);
-    this.output = $('<ul class="search"/>').appendTo(this.out);
-
-    $('#search-progress').text(_('Preparing search...'));
-    this.startPulse();
-
-    // index already loaded, the browser was quick!
-    if (this.hasIndex())
-      this.query(query);
-    else
-      this.deferQuery(query);
-  },
-
-  /**
-   * execute search (requires search index to be loaded)
-   */
-  query : function(query) {
-    var i;
-
-    // stem the searchterms and add them to the correct list
-    var stemmer = new Stemmer();
-    var searchterms = [];
-    var excluded = [];
-    var hlterms = [];
-    var tmp = splitQuery(query);
-    var objectterms = [];
-    for (i = 0; i < tmp.length; i++) {
-      if (tmp[i] !== "") {
-          objectterms.push(tmp[i].toLowerCase());
-      }
-
-      if ($u.indexOf(stopwords, tmp[i].toLowerCase()) != -1 || tmp[i] === "") {
-        // skip this "word"
-        continue;
-      }
-      // stem the word
-      var word = stemmer.stemWord(tmp[i].toLowerCase());
-      // prevent stemmer from cutting word smaller than two chars
-      if(word.length < 3 && tmp[i].length >= 3) {
-        word = tmp[i];
-      }
-      var toAppend;
-      // select the correct list
-      if (word[0] == '-') {
-        toAppend = excluded;
-        word = word.substr(1);
-      }
-      else {
-        toAppend = searchterms;
-        hlterms.push(tmp[i].toLowerCase());
-      }
-      // only add if not already in the list
-      if (!$u.contains(toAppend, word))
-        toAppend.push(word);
-    }
-    var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
-
-    // console.debug('SEARCH: searching for:');
-    // console.info('required: ', searchterms);
-    // console.info('excluded: ', excluded);
-
-    // prepare search
-    var terms = this._index.terms;
-    var titleterms = this._index.titleterms;
-
-    // array of [filename, title, anchor, descr, score]
-    var results = [];
-    $('#search-progress').empty();
-
-    // lookup as object
-    for (i = 0; i < objectterms.length; i++) {
-      var others = [].concat(objectterms.slice(0, i),
-                             objectterms.slice(i+1, objectterms.length));
-      results = results.concat(this.performObjectSearch(objectterms[i], others));
-    }
-
-    // lookup as search terms in fulltext
-    results = results.concat(this.performTermsSearch(searchterms, excluded, terms, titleterms));
-
-    // let the scorer override scores with a custom scoring function
-    if (Scorer.score) {
-      for (i = 0; i < results.length; i++)
-        results[i][4] = Scorer.score(results[i]);
-    }
-
-    // now sort the results by score (in opposite order of appearance, since the
-    // display function below uses pop() to retrieve items) and then
-    // alphabetically
-    results.sort(function(a, b) {
-      var left = a[4];
-      var right = b[4];
-      if (left > right) {
-        return 1;
-      } else if (left < right) {
-        return -1;
-      } else {
-        // same score: sort alphabetically
-        left = a[1].toLowerCase();
-        right = b[1].toLowerCase();
-        return (left > right) ? -1 : ((left < right) ? 1 : 0);
-      }
-    });
-
-    // for debugging
-    //Search.lastresults = results.slice();  // a copy
-    //console.info('search results:', Search.lastresults);
-
-    // print the results
-    var resultCount = results.length;
-    function displayNextItem() {
-      // results left, load the summary and display it
-      if (results.length) {
-        var item = results.pop();
-        var listItem = $('<li></li>');
-        var requestUrl = "";
-        var linkUrl = "";
-        if (DOCUMENTATION_OPTIONS.BUILDER === 'dirhtml') {
-          // dirhtml builder
-          var dirname = item[0] + '/';
-          if (dirname.match(/\/index\/$/)) {
-            dirname = dirname.substring(0, dirname.length-6);
-          } else if (dirname == 'index/') {
-            dirname = '';
-          }
-          requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + dirname;
-          linkUrl = requestUrl;
-
-        } else {
-          // normal html builders
-          requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX;
-          linkUrl = item[0] + DOCUMENTATION_OPTIONS.LINK_SUFFIX;
-        }
-        listItem.append($('<a/>').attr('href',
-            linkUrl +
-            highlightstring + item[2]).html(item[1]));
-        if (item[3]) {
-          listItem.append($('<span> (' + item[3] + ')</span>'));
-          Search.output.append(listItem);
-          setTimeout(function() {
-            displayNextItem();
-          }, 5);
-        } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
-          $.ajax({url: requestUrl,
-                  dataType: "text",
-                  complete: function(jqxhr, textstatus) {
-                    var data = jqxhr.responseText;
-                    if (data !== '' && data !== undefined) {
-                      var summary = Search.makeSearchSummary(data, searchterms, hlterms);
-                      if (summary) {
-                        listItem.append(summary);
-                      }
-                    }
-                    Search.output.append(listItem);
-                    setTimeout(function() {
-                      displayNextItem();
-                    }, 5);
-                  }});
-        } else {
-          // no source available, just display title
-          Search.output.append(listItem);
-          setTimeout(function() {
-            displayNextItem();
-          }, 5);
-        }
-      }
-      // search finished, update title and status message
-      else {
-        Search.stopPulse();
-        Search.title.text(_('Search Results'));
-        if (!resultCount)
-          Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.'));
-        else
-            Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount));
-        Search.status.fadeIn(500);
-      }
-    }
-    displayNextItem();
-  },
-
-  /**
-   * search for object names
-   */
-  performObjectSearch : function(object, otherterms) {
-    var filenames = this._index.filenames;
-    var docnames = this._index.docnames;
-    var objects = this._index.objects;
-    var objnames = this._index.objnames;
-    var titles = this._index.titles;
-
-    var i;
-    var results = [];
-
-    for (var prefix in objects) {
-      for (var iMatch = 0; iMatch != objects[prefix].length; ++iMatch) {
-        var match = objects[prefix][iMatch];
-        var name = match[4];
-        var fullname = (prefix ? prefix + '.' : '') + name;
-        var fullnameLower = fullname.toLowerCase()
-        if (fullnameLower.indexOf(object) > -1) {
-          var score = 0;
-          var parts = fullnameLower.split('.');
-          // check for different match types: exact matches of full name or
-          // "last name" (i.e. last dotted part)
-          if (fullnameLower == object || parts[parts.length - 1] == object) {
-            score += Scorer.objNameMatch;
-          // matches in last name
-          } else if (parts[parts.length - 1].indexOf(object) > -1) {
-            score += Scorer.objPartialMatch;
-          }
-          var objname = objnames[match[1]][2];
-          var title = titles[match[0]];
-          // If more than one term searched for, we require other words to be
-          // found in the name/title/description
-          if (otherterms.length > 0) {
-            var haystack = (prefix + ' ' + name + ' ' +
-                            objname + ' ' + title).toLowerCase();
-            var allfound = true;
-            for (i = 0; i < otherterms.length; i++) {
-              if (haystack.indexOf(otherterms[i]) == -1) {
-                allfound = false;
-                break;
-              }
-            }
-            if (!allfound) {
-              continue;
-            }
-          }
-          var descr = objname + _(', in ') + title;
-
-          var anchor = match[3];
-          if (anchor === '')
-            anchor = fullname;
-          else if (anchor == '-')
-            anchor = objnames[match[1]][1] + '-' + fullname;
-          // add custom score for some objects according to scorer
-          if (Scorer.objPrio.hasOwnProperty(match[2])) {
-            score += Scorer.objPrio[match[2]];
-          } else {
-            score += Scorer.objPrioDefault;
-          }
-          results.push([docnames[match[0]], fullname, '#'+anchor, descr, score, filenames[match[0]]]);
-        }
-      }
-    }
-
-    return results;
-  },
-
-  /**
-   * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions
-   */
-  escapeRegExp : function(string) {
-    return string.replace(/[.*+\-?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
-  },
-
-  /**
-   * search for full-text terms in the index
-   */
-  performTermsSearch : function(searchterms, excluded, terms, titleterms) {
-    var docnames = this._index.docnames;
-    var filenames = this._index.filenames;
-    var titles = this._index.titles;
-
-    var i, j, file;
-    var fileMap = {};
-    var scoreMap = {};
-    var results = [];
-
-    // perform the search on the required terms
-    for (i = 0; i < searchterms.length; i++) {
-      var word = searchterms[i];
-      var files = [];
-      var _o = [
-        {files: terms[word], score: Scorer.term},
-        {files: titleterms[word], score: Scorer.title}
-      ];
-      // add support for partial matches
-      if (word.length > 2) {
-        var word_regex = this.escapeRegExp(word);
-        for (var w in terms) {
-          if (w.match(word_regex) && !terms[word]) {
-            _o.push({files: terms[w], score: Scorer.partialTerm})
-          }
-        }
-        for (var w in titleterms) {
-          if (w.match(word_regex) && !titleterms[word]) {
-              _o.push({files: titleterms[w], score: Scorer.partialTitle})
-          }
-        }
-      }
-
-      // no match but word was a required one
-      if ($u.every(_o, function(o){return o.files === undefined;})) {
-        break;
-      }
-      // found search word in contents
-      $u.each(_o, function(o) {
-        var _files = o.files;
-        if (_files === undefined)
-          return
-
-        if (_files.length === undefined)
-          _files = [_files];
-        files = files.concat(_files);
-
-        // set score for the word in each file to Scorer.term
-        for (j = 0; j < _files.length; j++) {
-          file = _files[j];
-          if (!(file in scoreMap))
-            scoreMap[file] = {};
-          scoreMap[file][word] = o.score;
-        }
-      });
-
-      // create the mapping
-      for (j = 0; j < files.length; j++) {
-        file = files[j];
-        if (file in fileMap && fileMap[file].indexOf(word) === -1)
-          fileMap[file].push(word);
-        else
-          fileMap[file] = [word];
-      }
-    }
-
-    // now check if the files don't contain excluded terms
-    for (file in fileMap) {
-      var valid = true;
-
-      // check if all requirements are matched
-      var filteredTermCount = // as search terms with length < 3 are discarded: ignore
-        searchterms.filter(function(term){return term.length > 2}).length
-      if (
-        fileMap[file].length != searchterms.length &&
-        fileMap[file].length != filteredTermCount
-      ) continue;
-
-      // ensure that none of the excluded terms is in the search result
-      for (i = 0; i < excluded.length; i++) {
-        if (terms[excluded[i]] == file ||
-            titleterms[excluded[i]] == file ||
-            $u.contains(terms[excluded[i]] || [], file) ||
-            $u.contains(titleterms[excluded[i]] || [], file)) {
-          valid = false;
-          break;
-        }
-      }
-
-      // if we have still a valid result we can add it to the result list
-      if (valid) {
-        // select one (max) score for the file.
-        // for better ranking, we should calculate ranking by using words statistics like basic tf-idf...
-        var score = $u.max($u.map(fileMap[file], function(w){return scoreMap[file][w]}));
-        results.push([docnames[file], titles[file], '', null, score, filenames[file]]);
-      }
-    }
-    return results;
-  },
-
-  /**
-   * helper function to return a node containing the
-   * search summary for a given text. keywords is a list
-   * of stemmed words, hlwords is the list of normal, unstemmed
-   * words. the first one is used to find the occurrence, the
-   * latter for highlighting it.
-   */
-  makeSearchSummary : function(htmlText, keywords, hlwords) {
-    var text = Search.htmlToText(htmlText);
-    if (text == "") {
-      return null;
-    }
-    var textLower = text.toLowerCase();
-    var start = 0;
-    $.each(keywords, function() {
-      var i = textLower.indexOf(this.toLowerCase());
-      if (i > -1)
-        start = i;
-    });
-    start = Math.max(start - 120, 0);
-    var excerpt = ((start > 0) ? '...' : '') +
-      $.trim(text.substr(start, 240)) +
-      ((start + 240 - text.length) ? '...' : '');
-    var rv = $('<p class="context"></p>').text(excerpt);
-    $.each(hlwords, function() {
-      rv = rv.highlightText(this, 'highlighted');
-    });
-    return rv;
-  }
-};
-
-$(document).ready(function() {
-  Search.init();
-});
diff --git a/docs/build/html/_static/underscore-1.13.1.js b/docs/build/html/_static/underscore-1.13.1.js
deleted file mode 100644
index ffd77af9648a47d389f2d6976d4aa1c44d7ce7ce..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/underscore-1.13.1.js
+++ /dev/null
@@ -1,2042 +0,0 @@
-(function (global, factory) {
-  typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
-  typeof define === 'function' && define.amd ? define('underscore', factory) :
-  (global = typeof globalThis !== 'undefined' ? globalThis : global || self, (function () {
-    var current = global._;
-    var exports = global._ = factory();
-    exports.noConflict = function () { global._ = current; return exports; };
-  }()));
-}(this, (function () {
-  //     Underscore.js 1.13.1
-  //     https://underscorejs.org
-  //     (c) 2009-2021 Jeremy Ashkenas, Julian Gonggrijp, and DocumentCloud and Investigative Reporters & Editors
-  //     Underscore may be freely distributed under the MIT license.
-
-  // Current version.
-  var VERSION = '1.13.1';
-
-  // Establish the root object, `window` (`self`) in the browser, `global`
-  // on the server, or `this` in some virtual machines. We use `self`
-  // instead of `window` for `WebWorker` support.
-  var root = typeof self == 'object' && self.self === self && self ||
-            typeof global == 'object' && global.global === global && global ||
-            Function('return this')() ||
-            {};
-
-  // Save bytes in the minified (but not gzipped) version:
-  var ArrayProto = Array.prototype, ObjProto = Object.prototype;
-  var SymbolProto = typeof Symbol !== 'undefined' ? Symbol.prototype : null;
-
-  // Create quick reference variables for speed access to core prototypes.
-  var push = ArrayProto.push,
-      slice = ArrayProto.slice,
-      toString = ObjProto.toString,
-      hasOwnProperty = ObjProto.hasOwnProperty;
-
-  // Modern feature detection.
-  var supportsArrayBuffer = typeof ArrayBuffer !== 'undefined',
-      supportsDataView = typeof DataView !== 'undefined';
-
-  // All **ECMAScript 5+** native function implementations that we hope to use
-  // are declared here.
-  var nativeIsArray = Array.isArray,
-      nativeKeys = Object.keys,
-      nativeCreate = Object.create,
-      nativeIsView = supportsArrayBuffer && ArrayBuffer.isView;
-
-  // Create references to these builtin functions because we override them.
-  var _isNaN = isNaN,
-      _isFinite = isFinite;
-
-  // Keys in IE < 9 that won't be iterated by `for key in ...` and thus missed.
-  var hasEnumBug = !{toString: null}.propertyIsEnumerable('toString');
-  var nonEnumerableProps = ['valueOf', 'isPrototypeOf', 'toString',
-    'propertyIsEnumerable', 'hasOwnProperty', 'toLocaleString'];
-
-  // The largest integer that can be represented exactly.
-  var MAX_ARRAY_INDEX = Math.pow(2, 53) - 1;
-
-  // Some functions take a variable number of arguments, or a few expected
-  // arguments at the beginning and then a variable number of values to operate
-  // on. This helper accumulates all remaining arguments past the function’s
-  // argument length (or an explicit `startIndex`), into an array that becomes
-  // the last argument. Similar to ES6’s "rest parameter".
-  function restArguments(func, startIndex) {
-    startIndex = startIndex == null ? func.length - 1 : +startIndex;
-    return function() {
-      var length = Math.max(arguments.length - startIndex, 0),
-          rest = Array(length),
-          index = 0;
-      for (; index < length; index++) {
-        rest[index] = arguments[index + startIndex];
-      }
-      switch (startIndex) {
-        case 0: return func.call(this, rest);
-        case 1: return func.call(this, arguments[0], rest);
-        case 2: return func.call(this, arguments[0], arguments[1], rest);
-      }
-      var args = Array(startIndex + 1);
-      for (index = 0; index < startIndex; index++) {
-        args[index] = arguments[index];
-      }
-      args[startIndex] = rest;
-      return func.apply(this, args);
-    };
-  }
-
-  // Is a given variable an object?
-  function isObject(obj) {
-    var type = typeof obj;
-    return type === 'function' || type === 'object' && !!obj;
-  }
-
-  // Is a given value equal to null?
-  function isNull(obj) {
-    return obj === null;
-  }
-
-  // Is a given variable undefined?
-  function isUndefined(obj) {
-    return obj === void 0;
-  }
-
-  // Is a given value a boolean?
-  function isBoolean(obj) {
-    return obj === true || obj === false || toString.call(obj) === '[object Boolean]';
-  }
-
-  // Is a given value a DOM element?
-  function isElement(obj) {
-    return !!(obj && obj.nodeType === 1);
-  }
-
-  // Internal function for creating a `toString`-based type tester.
-  function tagTester(name) {
-    var tag = '[object ' + name + ']';
-    return function(obj) {
-      return toString.call(obj) === tag;
-    };
-  }
-
-  var isString = tagTester('String');
-
-  var isNumber = tagTester('Number');
-
-  var isDate = tagTester('Date');
-
-  var isRegExp = tagTester('RegExp');
-
-  var isError = tagTester('Error');
-
-  var isSymbol = tagTester('Symbol');
-
-  var isArrayBuffer = tagTester('ArrayBuffer');
-
-  var isFunction = tagTester('Function');
-
-  // Optimize `isFunction` if appropriate. Work around some `typeof` bugs in old
-  // v8, IE 11 (#1621), Safari 8 (#1929), and PhantomJS (#2236).
-  var nodelist = root.document && root.document.childNodes;
-  if (typeof /./ != 'function' && typeof Int8Array != 'object' && typeof nodelist != 'function') {
-    isFunction = function(obj) {
-      return typeof obj == 'function' || false;
-    };
-  }
-
-  var isFunction$1 = isFunction;
-
-  var hasObjectTag = tagTester('Object');
-
-  // In IE 10 - Edge 13, `DataView` has string tag `'[object Object]'`.
-  // In IE 11, the most common among them, this problem also applies to
-  // `Map`, `WeakMap` and `Set`.
-  var hasStringTagBug = (
-        supportsDataView && hasObjectTag(new DataView(new ArrayBuffer(8)))
-      ),
-      isIE11 = (typeof Map !== 'undefined' && hasObjectTag(new Map));
-
-  var isDataView = tagTester('DataView');
-
-  // In IE 10 - Edge 13, we need a different heuristic
-  // to determine whether an object is a `DataView`.
-  function ie10IsDataView(obj) {
-    return obj != null && isFunction$1(obj.getInt8) && isArrayBuffer(obj.buffer);
-  }
-
-  var isDataView$1 = (hasStringTagBug ? ie10IsDataView : isDataView);
-
-  // Is a given value an array?
-  // Delegates to ECMA5's native `Array.isArray`.
-  var isArray = nativeIsArray || tagTester('Array');
-
-  // Internal function to check whether `key` is an own property name of `obj`.
-  function has$1(obj, key) {
-    return obj != null && hasOwnProperty.call(obj, key);
-  }
-
-  var isArguments = tagTester('Arguments');
-
-  // Define a fallback version of the method in browsers (ahem, IE < 9), where
-  // there isn't any inspectable "Arguments" type.
-  (function() {
-    if (!isArguments(arguments)) {
-      isArguments = function(obj) {
-        return has$1(obj, 'callee');
-      };
-    }
-  }());
-
-  var isArguments$1 = isArguments;
-
-  // Is a given object a finite number?
-  function isFinite$1(obj) {
-    return !isSymbol(obj) && _isFinite(obj) && !isNaN(parseFloat(obj));
-  }
-
-  // Is the given value `NaN`?
-  function isNaN$1(obj) {
-    return isNumber(obj) && _isNaN(obj);
-  }
-
-  // Predicate-generating function. Often useful outside of Underscore.
-  function constant(value) {
-    return function() {
-      return value;
-    };
-  }
-
-  // Common internal logic for `isArrayLike` and `isBufferLike`.
-  function createSizePropertyCheck(getSizeProperty) {
-    return function(collection) {
-      var sizeProperty = getSizeProperty(collection);
-      return typeof sizeProperty == 'number' && sizeProperty >= 0 && sizeProperty <= MAX_ARRAY_INDEX;
-    }
-  }
-
-  // Internal helper to generate a function to obtain property `key` from `obj`.
-  function shallowProperty(key) {
-    return function(obj) {
-      return obj == null ? void 0 : obj[key];
-    };
-  }
-
-  // Internal helper to obtain the `byteLength` property of an object.
-  var getByteLength = shallowProperty('byteLength');
-
-  // Internal helper to determine whether we should spend extensive checks against
-  // `ArrayBuffer` et al.
-  var isBufferLike = createSizePropertyCheck(getByteLength);
-
-  // Is a given value a typed array?
-  var typedArrayPattern = /\[object ((I|Ui)nt(8|16|32)|Float(32|64)|Uint8Clamped|Big(I|Ui)nt64)Array\]/;
-  function isTypedArray(obj) {
-    // `ArrayBuffer.isView` is the most future-proof, so use it when available.
-    // Otherwise, fall back on the above regular expression.
-    return nativeIsView ? (nativeIsView(obj) && !isDataView$1(obj)) :
-                  isBufferLike(obj) && typedArrayPattern.test(toString.call(obj));
-  }
-
-  var isTypedArray$1 = supportsArrayBuffer ? isTypedArray : constant(false);
-
-  // Internal helper to obtain the `length` property of an object.
-  var getLength = shallowProperty('length');
-
-  // Internal helper to create a simple lookup structure.
-  // `collectNonEnumProps` used to depend on `_.contains`, but this led to
-  // circular imports. `emulatedSet` is a one-off solution that only works for
-  // arrays of strings.
-  function emulatedSet(keys) {
-    var hash = {};
-    for (var l = keys.length, i = 0; i < l; ++i) hash[keys[i]] = true;
-    return {
-      contains: function(key) { return hash[key]; },
-      push: function(key) {
-        hash[key] = true;
-        return keys.push(key);
-      }
-    };
-  }
-
-  // Internal helper. Checks `keys` for the presence of keys in IE < 9 that won't
-  // be iterated by `for key in ...` and thus missed. Extends `keys` in place if
-  // needed.
-  function collectNonEnumProps(obj, keys) {
-    keys = emulatedSet(keys);
-    var nonEnumIdx = nonEnumerableProps.length;
-    var constructor = obj.constructor;
-    var proto = isFunction$1(constructor) && constructor.prototype || ObjProto;
-
-    // Constructor is a special case.
-    var prop = 'constructor';
-    if (has$1(obj, prop) && !keys.contains(prop)) keys.push(prop);
-
-    while (nonEnumIdx--) {
-      prop = nonEnumerableProps[nonEnumIdx];
-      if (prop in obj && obj[prop] !== proto[prop] && !keys.contains(prop)) {
-        keys.push(prop);
-      }
-    }
-  }
-
-  // Retrieve the names of an object's own properties.
-  // Delegates to **ECMAScript 5**'s native `Object.keys`.
-  function keys(obj) {
-    if (!isObject(obj)) return [];
-    if (nativeKeys) return nativeKeys(obj);
-    var keys = [];
-    for (var key in obj) if (has$1(obj, key)) keys.push(key);
-    // Ahem, IE < 9.
-    if (hasEnumBug) collectNonEnumProps(obj, keys);
-    return keys;
-  }
-
-  // Is a given array, string, or object empty?
-  // An "empty" object has no enumerable own-properties.
-  function isEmpty(obj) {
-    if (obj == null) return true;
-    // Skip the more expensive `toString`-based type checks if `obj` has no
-    // `.length`.
-    var length = getLength(obj);
-    if (typeof length == 'number' && (
-      isArray(obj) || isString(obj) || isArguments$1(obj)
-    )) return length === 0;
-    return getLength(keys(obj)) === 0;
-  }
-
-  // Returns whether an object has a given set of `key:value` pairs.
-  function isMatch(object, attrs) {
-    var _keys = keys(attrs), length = _keys.length;
-    if (object == null) return !length;
-    var obj = Object(object);
-    for (var i = 0; i < length; i++) {
-      var key = _keys[i];
-      if (attrs[key] !== obj[key] || !(key in obj)) return false;
-    }
-    return true;
-  }
-
-  // If Underscore is called as a function, it returns a wrapped object that can
-  // be used OO-style. This wrapper holds altered versions of all functions added
-  // through `_.mixin`. Wrapped objects may be chained.
-  function _$1(obj) {
-    if (obj instanceof _$1) return obj;
-    if (!(this instanceof _$1)) return new _$1(obj);
-    this._wrapped = obj;
-  }
-
-  _$1.VERSION = VERSION;
-
-  // Extracts the result from a wrapped and chained object.
-  _$1.prototype.value = function() {
-    return this._wrapped;
-  };
-
-  // Provide unwrapping proxies for some methods used in engine operations
-  // such as arithmetic and JSON stringification.
-  _$1.prototype.valueOf = _$1.prototype.toJSON = _$1.prototype.value;
-
-  _$1.prototype.toString = function() {
-    return String(this._wrapped);
-  };
-
-  // Internal function to wrap or shallow-copy an ArrayBuffer,
-  // typed array or DataView to a new view, reusing the buffer.
-  function toBufferView(bufferSource) {
-    return new Uint8Array(
-      bufferSource.buffer || bufferSource,
-      bufferSource.byteOffset || 0,
-      getByteLength(bufferSource)
-    );
-  }
-
-  // We use this string twice, so give it a name for minification.
-  var tagDataView = '[object DataView]';
-
-  // Internal recursive comparison function for `_.isEqual`.
-  function eq(a, b, aStack, bStack) {
-    // Identical objects are equal. `0 === -0`, but they aren't identical.
-    // See the [Harmony `egal` proposal](https://wiki.ecmascript.org/doku.php?id=harmony:egal).
-    if (a === b) return a !== 0 || 1 / a === 1 / b;
-    // `null` or `undefined` only equal to itself (strict comparison).
-    if (a == null || b == null) return false;
-    // `NaN`s are equivalent, but non-reflexive.
-    if (a !== a) return b !== b;
-    // Exhaust primitive checks
-    var type = typeof a;
-    if (type !== 'function' && type !== 'object' && typeof b != 'object') return false;
-    return deepEq(a, b, aStack, bStack);
-  }
-
-  // Internal recursive comparison function for `_.isEqual`.
-  function deepEq(a, b, aStack, bStack) {
-    // Unwrap any wrapped objects.
-    if (a instanceof _$1) a = a._wrapped;
-    if (b instanceof _$1) b = b._wrapped;
-    // Compare `[[Class]]` names.
-    var className = toString.call(a);
-    if (className !== toString.call(b)) return false;
-    // Work around a bug in IE 10 - Edge 13.
-    if (hasStringTagBug && className == '[object Object]' && isDataView$1(a)) {
-      if (!isDataView$1(b)) return false;
-      className = tagDataView;
-    }
-    switch (className) {
-      // These types are compared by value.
-      case '[object RegExp]':
-        // RegExps are coerced to strings for comparison (Note: '' + /a/i === '/a/i')
-      case '[object String]':
-        // Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is
-        // equivalent to `new String("5")`.
-        return '' + a === '' + b;
-      case '[object Number]':
-        // `NaN`s are equivalent, but non-reflexive.
-        // Object(NaN) is equivalent to NaN.
-        if (+a !== +a) return +b !== +b;
-        // An `egal` comparison is performed for other numeric values.
-        return +a === 0 ? 1 / +a === 1 / b : +a === +b;
-      case '[object Date]':
-      case '[object Boolean]':
-        // Coerce dates and booleans to numeric primitive values. Dates are compared by their
-        // millisecond representations. Note that invalid dates with millisecond representations
-        // of `NaN` are not equivalent.
-        return +a === +b;
-      case '[object Symbol]':
-        return SymbolProto.valueOf.call(a) === SymbolProto.valueOf.call(b);
-      case '[object ArrayBuffer]':
-      case tagDataView:
-        // Coerce to typed array so we can fall through.
-        return deepEq(toBufferView(a), toBufferView(b), aStack, bStack);
-    }
-
-    var areArrays = className === '[object Array]';
-    if (!areArrays && isTypedArray$1(a)) {
-        var byteLength = getByteLength(a);
-        if (byteLength !== getByteLength(b)) return false;
-        if (a.buffer === b.buffer && a.byteOffset === b.byteOffset) return true;
-        areArrays = true;
-    }
-    if (!areArrays) {
-      if (typeof a != 'object' || typeof b != 'object') return false;
-
-      // Objects with different constructors are not equivalent, but `Object`s or `Array`s
-      // from different frames are.
-      var aCtor = a.constructor, bCtor = b.constructor;
-      if (aCtor !== bCtor && !(isFunction$1(aCtor) && aCtor instanceof aCtor &&
-                               isFunction$1(bCtor) && bCtor instanceof bCtor)
-                          && ('constructor' in a && 'constructor' in b)) {
-        return false;
-      }
-    }
-    // Assume equality for cyclic structures. The algorithm for detecting cyclic
-    // structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`.
-
-    // Initializing stack of traversed objects.
-    // It's done here since we only need them for objects and arrays comparison.
-    aStack = aStack || [];
-    bStack = bStack || [];
-    var length = aStack.length;
-    while (length--) {
-      // Linear search. Performance is inversely proportional to the number of
-      // unique nested structures.
-      if (aStack[length] === a) return bStack[length] === b;
-    }
-
-    // Add the first object to the stack of traversed objects.
-    aStack.push(a);
-    bStack.push(b);
-
-    // Recursively compare objects and arrays.
-    if (areArrays) {
-      // Compare array lengths to determine if a deep comparison is necessary.
-      length = a.length;
-      if (length !== b.length) return false;
-      // Deep compare the contents, ignoring non-numeric properties.
-      while (length--) {
-        if (!eq(a[length], b[length], aStack, bStack)) return false;
-      }
-    } else {
-      // Deep compare objects.
-      var _keys = keys(a), key;
-      length = _keys.length;
-      // Ensure that both objects contain the same number of properties before comparing deep equality.
-      if (keys(b).length !== length) return false;
-      while (length--) {
-        // Deep compare each member
-        key = _keys[length];
-        if (!(has$1(b, key) && eq(a[key], b[key], aStack, bStack))) return false;
-      }
-    }
-    // Remove the first object from the stack of traversed objects.
-    aStack.pop();
-    bStack.pop();
-    return true;
-  }
-
-  // Perform a deep comparison to check if two objects are equal.
-  function isEqual(a, b) {
-    return eq(a, b);
-  }
-
-  // Retrieve all the enumerable property names of an object.
-  function allKeys(obj) {
-    if (!isObject(obj)) return [];
-    var keys = [];
-    for (var key in obj) keys.push(key);
-    // Ahem, IE < 9.
-    if (hasEnumBug) collectNonEnumProps(obj, keys);
-    return keys;
-  }
-
-  // Since the regular `Object.prototype.toString` type tests don't work for
-  // some types in IE 11, we use a fingerprinting heuristic instead, based
-  // on the methods. It's not great, but it's the best we got.
-  // The fingerprint method lists are defined below.
-  function ie11fingerprint(methods) {
-    var length = getLength(methods);
-    return function(obj) {
-      if (obj == null) return false;
-      // `Map`, `WeakMap` and `Set` have no enumerable keys.
-      var keys = allKeys(obj);
-      if (getLength(keys)) return false;
-      for (var i = 0; i < length; i++) {
-        if (!isFunction$1(obj[methods[i]])) return false;
-      }
-      // If we are testing against `WeakMap`, we need to ensure that
-      // `obj` doesn't have a `forEach` method in order to distinguish
-      // it from a regular `Map`.
-      return methods !== weakMapMethods || !isFunction$1(obj[forEachName]);
-    };
-  }
-
-  // In the interest of compact minification, we write
-  // each string in the fingerprints only once.
-  var forEachName = 'forEach',
-      hasName = 'has',
-      commonInit = ['clear', 'delete'],
-      mapTail = ['get', hasName, 'set'];
-
-  // `Map`, `WeakMap` and `Set` each have slightly different
-  // combinations of the above sublists.
-  var mapMethods = commonInit.concat(forEachName, mapTail),
-      weakMapMethods = commonInit.concat(mapTail),
-      setMethods = ['add'].concat(commonInit, forEachName, hasName);
-
-  var isMap = isIE11 ? ie11fingerprint(mapMethods) : tagTester('Map');
-
-  var isWeakMap = isIE11 ? ie11fingerprint(weakMapMethods) : tagTester('WeakMap');
-
-  var isSet = isIE11 ? ie11fingerprint(setMethods) : tagTester('Set');
-
-  var isWeakSet = tagTester('WeakSet');
-
-  // Retrieve the values of an object's properties.
-  function values(obj) {
-    var _keys = keys(obj);
-    var length = _keys.length;
-    var values = Array(length);
-    for (var i = 0; i < length; i++) {
-      values[i] = obj[_keys[i]];
-    }
-    return values;
-  }
-
-  // Convert an object into a list of `[key, value]` pairs.
-  // The opposite of `_.object` with one argument.
-  function pairs(obj) {
-    var _keys = keys(obj);
-    var length = _keys.length;
-    var pairs = Array(length);
-    for (var i = 0; i < length; i++) {
-      pairs[i] = [_keys[i], obj[_keys[i]]];
-    }
-    return pairs;
-  }
-
-  // Invert the keys and values of an object. The values must be serializable.
-  function invert(obj) {
-    var result = {};
-    var _keys = keys(obj);
-    for (var i = 0, length = _keys.length; i < length; i++) {
-      result[obj[_keys[i]]] = _keys[i];
-    }
-    return result;
-  }
-
-  // Return a sorted list of the function names available on the object.
-  function functions(obj) {
-    var names = [];
-    for (var key in obj) {
-      if (isFunction$1(obj[key])) names.push(key);
-    }
-    return names.sort();
-  }
-
-  // An internal function for creating assigner functions.
-  function createAssigner(keysFunc, defaults) {
-    return function(obj) {
-      var length = arguments.length;
-      if (defaults) obj = Object(obj);
-      if (length < 2 || obj == null) return obj;
-      for (var index = 1; index < length; index++) {
-        var source = arguments[index],
-            keys = keysFunc(source),
-            l = keys.length;
-        for (var i = 0; i < l; i++) {
-          var key = keys[i];
-          if (!defaults || obj[key] === void 0) obj[key] = source[key];
-        }
-      }
-      return obj;
-    };
-  }
-
-  // Extend a given object with all the properties in passed-in object(s).
-  var extend = createAssigner(allKeys);
-
-  // Assigns a given object with all the own properties in the passed-in
-  // object(s).
-  // (https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object/assign)
-  var extendOwn = createAssigner(keys);
-
-  // Fill in a given object with default properties.
-  var defaults = createAssigner(allKeys, true);
-
-  // Create a naked function reference for surrogate-prototype-swapping.
-  function ctor() {
-    return function(){};
-  }
-
-  // An internal function for creating a new object that inherits from another.
-  function baseCreate(prototype) {
-    if (!isObject(prototype)) return {};
-    if (nativeCreate) return nativeCreate(prototype);
-    var Ctor = ctor();
-    Ctor.prototype = prototype;
-    var result = new Ctor;
-    Ctor.prototype = null;
-    return result;
-  }
-
-  // Creates an object that inherits from the given prototype object.
-  // If additional properties are provided then they will be added to the
-  // created object.
-  function create(prototype, props) {
-    var result = baseCreate(prototype);
-    if (props) extendOwn(result, props);
-    return result;
-  }
-
-  // Create a (shallow-cloned) duplicate of an object.
-  function clone(obj) {
-    if (!isObject(obj)) return obj;
-    return isArray(obj) ? obj.slice() : extend({}, obj);
-  }
-
-  // Invokes `interceptor` with the `obj` and then returns `obj`.
-  // The primary purpose of this method is to "tap into" a method chain, in
-  // order to perform operations on intermediate results within the chain.
-  function tap(obj, interceptor) {
-    interceptor(obj);
-    return obj;
-  }
-
-  // Normalize a (deep) property `path` to array.
-  // Like `_.iteratee`, this function can be customized.
-  function toPath$1(path) {
-    return isArray(path) ? path : [path];
-  }
-  _$1.toPath = toPath$1;
-
-  // Internal wrapper for `_.toPath` to enable minification.
-  // Similar to `cb` for `_.iteratee`.
-  function toPath(path) {
-    return _$1.toPath(path);
-  }
-
-  // Internal function to obtain a nested property in `obj` along `path`.
-  function deepGet(obj, path) {
-    var length = path.length;
-    for (var i = 0; i < length; i++) {
-      if (obj == null) return void 0;
-      obj = obj[path[i]];
-    }
-    return length ? obj : void 0;
-  }
-
-  // Get the value of the (deep) property on `path` from `object`.
-  // If any property in `path` does not exist or if the value is
-  // `undefined`, return `defaultValue` instead.
-  // The `path` is normalized through `_.toPath`.
-  function get(object, path, defaultValue) {
-    var value = deepGet(object, toPath(path));
-    return isUndefined(value) ? defaultValue : value;
-  }
-
-  // Shortcut function for checking if an object has a given property directly on
-  // itself (in other words, not on a prototype). Unlike the internal `has`
-  // function, this public version can also traverse nested properties.
-  function has(obj, path) {
-    path = toPath(path);
-    var length = path.length;
-    for (var i = 0; i < length; i++) {
-      var key = path[i];
-      if (!has$1(obj, key)) return false;
-      obj = obj[key];
-    }
-    return !!length;
-  }
-
-  // Keep the identity function around for default iteratees.
-  function identity(value) {
-    return value;
-  }
-
-  // Returns a predicate for checking whether an object has a given set of
-  // `key:value` pairs.
-  function matcher(attrs) {
-    attrs = extendOwn({}, attrs);
-    return function(obj) {
-      return isMatch(obj, attrs);
-    };
-  }
-
-  // Creates a function that, when passed an object, will traverse that object’s
-  // properties down the given `path`, specified as an array of keys or indices.
-  function property(path) {
-    path = toPath(path);
-    return function(obj) {
-      return deepGet(obj, path);
-    };
-  }
-
-  // Internal function that returns an efficient (for current engines) version
-  // of the passed-in callback, to be repeatedly applied in other Underscore
-  // functions.
-  function optimizeCb(func, context, argCount) {
-    if (context === void 0) return func;
-    switch (argCount == null ? 3 : argCount) {
-      case 1: return function(value) {
-        return func.call(context, value);
-      };
-      // The 2-argument case is omitted because we’re not using it.
-      case 3: return function(value, index, collection) {
-        return func.call(context, value, index, collection);
-      };
-      case 4: return function(accumulator, value, index, collection) {
-        return func.call(context, accumulator, value, index, collection);
-      };
-    }
-    return function() {
-      return func.apply(context, arguments);
-    };
-  }
-
-  // An internal function to generate callbacks that can be applied to each
-  // element in a collection, returning the desired result — either `_.identity`,
-  // an arbitrary callback, a property matcher, or a property accessor.
-  function baseIteratee(value, context, argCount) {
-    if (value == null) return identity;
-    if (isFunction$1(value)) return optimizeCb(value, context, argCount);
-    if (isObject(value) && !isArray(value)) return matcher(value);
-    return property(value);
-  }
-
-  // External wrapper for our callback generator. Users may customize
-  // `_.iteratee` if they want additional predicate/iteratee shorthand styles.
-  // This abstraction hides the internal-only `argCount` argument.
-  function iteratee(value, context) {
-    return baseIteratee(value, context, Infinity);
-  }
-  _$1.iteratee = iteratee;
-
-  // The function we call internally to generate a callback. It invokes
-  // `_.iteratee` if overridden, otherwise `baseIteratee`.
-  function cb(value, context, argCount) {
-    if (_$1.iteratee !== iteratee) return _$1.iteratee(value, context);
-    return baseIteratee(value, context, argCount);
-  }
-
-  // Returns the results of applying the `iteratee` to each element of `obj`.
-  // In contrast to `_.map` it returns an object.
-  function mapObject(obj, iteratee, context) {
-    iteratee = cb(iteratee, context);
-    var _keys = keys(obj),
-        length = _keys.length,
-        results = {};
-    for (var index = 0; index < length; index++) {
-      var currentKey = _keys[index];
-      results[currentKey] = iteratee(obj[currentKey], currentKey, obj);
-    }
-    return results;
-  }
-
-  // Predicate-generating function. Often useful outside of Underscore.
-  function noop(){}
-
-  // Generates a function for a given object that returns a given property.
-  function propertyOf(obj) {
-    if (obj == null) return noop;
-    return function(path) {
-      return get(obj, path);
-    };
-  }
-
-  // Run a function **n** times.
-  function times(n, iteratee, context) {
-    var accum = Array(Math.max(0, n));
-    iteratee = optimizeCb(iteratee, context, 1);
-    for (var i = 0; i < n; i++) accum[i] = iteratee(i);
-    return accum;
-  }
-
-  // Return a random integer between `min` and `max` (inclusive).
-  function random(min, max) {
-    if (max == null) {
-      max = min;
-      min = 0;
-    }
-    return min + Math.floor(Math.random() * (max - min + 1));
-  }
-
-  // A (possibly faster) way to get the current timestamp as an integer.
-  var now = Date.now || function() {
-    return new Date().getTime();
-  };
-
-  // Internal helper to generate functions for escaping and unescaping strings
-  // to/from HTML interpolation.
-  function createEscaper(map) {
-    var escaper = function(match) {
-      return map[match];
-    };
-    // Regexes for identifying a key that needs to be escaped.
-    var source = '(?:' + keys(map).join('|') + ')';
-    var testRegexp = RegExp(source);
-    var replaceRegexp = RegExp(source, 'g');
-    return function(string) {
-      string = string == null ? '' : '' + string;
-      return testRegexp.test(string) ? string.replace(replaceRegexp, escaper) : string;
-    };
-  }
-
-  // Internal list of HTML entities for escaping.
-  var escapeMap = {
-    '&': '&amp;',
-    '<': '&lt;',
-    '>': '&gt;',
-    '"': '&quot;',
-    "'": '&#x27;',
-    '`': '&#x60;'
-  };
-
-  // Function for escaping strings to HTML interpolation.
-  var _escape = createEscaper(escapeMap);
-
-  // Internal list of HTML entities for unescaping.
-  var unescapeMap = invert(escapeMap);
-
-  // Function for unescaping strings from HTML interpolation.
-  var _unescape = createEscaper(unescapeMap);
-
-  // By default, Underscore uses ERB-style template delimiters. Change the
-  // following template settings to use alternative delimiters.
-  var templateSettings = _$1.templateSettings = {
-    evaluate: /<%([\s\S]+?)%>/g,
-    interpolate: /<%=([\s\S]+?)%>/g,
-    escape: /<%-([\s\S]+?)%>/g
-  };
-
-  // When customizing `_.templateSettings`, if you don't want to define an
-  // interpolation, evaluation or escaping regex, we need one that is
-  // guaranteed not to match.
-  var noMatch = /(.)^/;
-
-  // Certain characters need to be escaped so that they can be put into a
-  // string literal.
-  var escapes = {
-    "'": "'",
-    '\\': '\\',
-    '\r': 'r',
-    '\n': 'n',
-    '\u2028': 'u2028',
-    '\u2029': 'u2029'
-  };
-
-  var escapeRegExp = /\\|'|\r|\n|\u2028|\u2029/g;
-
-  function escapeChar(match) {
-    return '\\' + escapes[match];
-  }
-
-  // In order to prevent third-party code injection through
-  // `_.templateSettings.variable`, we test it against the following regular
-  // expression. It is intentionally a bit more liberal than just matching valid
-  // identifiers, but still prevents possible loopholes through defaults or
-  // destructuring assignment.
-  var bareIdentifier = /^\s*(\w|\$)+\s*$/;
-
-  // JavaScript micro-templating, similar to John Resig's implementation.
-  // Underscore templating handles arbitrary delimiters, preserves whitespace,
-  // and correctly escapes quotes within interpolated code.
-  // NB: `oldSettings` only exists for backwards compatibility.
-  function template(text, settings, oldSettings) {
-    if (!settings && oldSettings) settings = oldSettings;
-    settings = defaults({}, settings, _$1.templateSettings);
-
-    // Combine delimiters into one regular expression via alternation.
-    var matcher = RegExp([
-      (settings.escape || noMatch).source,
-      (settings.interpolate || noMatch).source,
-      (settings.evaluate || noMatch).source
-    ].join('|') + '|$', 'g');
-
-    // Compile the template source, escaping string literals appropriately.
-    var index = 0;
-    var source = "__p+='";
-    text.replace(matcher, function(match, escape, interpolate, evaluate, offset) {
-      source += text.slice(index, offset).replace(escapeRegExp, escapeChar);
-      index = offset + match.length;
-
-      if (escape) {
-        source += "'+\n((__t=(" + escape + "))==null?'':_.escape(__t))+\n'";
-      } else if (interpolate) {
-        source += "'+\n((__t=(" + interpolate + "))==null?'':__t)+\n'";
-      } else if (evaluate) {
-        source += "';\n" + evaluate + "\n__p+='";
-      }
-
-      // Adobe VMs need the match returned to produce the correct offset.
-      return match;
-    });
-    source += "';\n";
-
-    var argument = settings.variable;
-    if (argument) {
-      // Insure against third-party code injection. (CVE-2021-23358)
-      if (!bareIdentifier.test(argument)) throw new Error(
-        'variable is not a bare identifier: ' + argument
-      );
-    } else {
-      // If a variable is not specified, place data values in local scope.
-      source = 'with(obj||{}){\n' + source + '}\n';
-      argument = 'obj';
-    }
-
-    source = "var __t,__p='',__j=Array.prototype.join," +
-      "print=function(){__p+=__j.call(arguments,'');};\n" +
-      source + 'return __p;\n';
-
-    var render;
-    try {
-      render = new Function(argument, '_', source);
-    } catch (e) {
-      e.source = source;
-      throw e;
-    }
-
-    var template = function(data) {
-      return render.call(this, data, _$1);
-    };
-
-    // Provide the compiled source as a convenience for precompilation.
-    template.source = 'function(' + argument + '){\n' + source + '}';
-
-    return template;
-  }
-
-  // Traverses the children of `obj` along `path`. If a child is a function, it
-  // is invoked with its parent as context. Returns the value of the final
-  // child, or `fallback` if any child is undefined.
-  function result(obj, path, fallback) {
-    path = toPath(path);
-    var length = path.length;
-    if (!length) {
-      return isFunction$1(fallback) ? fallback.call(obj) : fallback;
-    }
-    for (var i = 0; i < length; i++) {
-      var prop = obj == null ? void 0 : obj[path[i]];
-      if (prop === void 0) {
-        prop = fallback;
-        i = length; // Ensure we don't continue iterating.
-      }
-      obj = isFunction$1(prop) ? prop.call(obj) : prop;
-    }
-    return obj;
-  }
-
-  // Generate a unique integer id (unique within the entire client session).
-  // Useful for temporary DOM ids.
-  var idCounter = 0;
-  function uniqueId(prefix) {
-    var id = ++idCounter + '';
-    return prefix ? prefix + id : id;
-  }
-
-  // Start chaining a wrapped Underscore object.
-  function chain(obj) {
-    var instance = _$1(obj);
-    instance._chain = true;
-    return instance;
-  }
-
-  // Internal function to execute `sourceFunc` bound to `context` with optional
-  // `args`. Determines whether to execute a function as a constructor or as a
-  // normal function.
-  function executeBound(sourceFunc, boundFunc, context, callingContext, args) {
-    if (!(callingContext instanceof boundFunc)) return sourceFunc.apply(context, args);
-    var self = baseCreate(sourceFunc.prototype);
-    var result = sourceFunc.apply(self, args);
-    if (isObject(result)) return result;
-    return self;
-  }
-
-  // Partially apply a function by creating a version that has had some of its
-  // arguments pre-filled, without changing its dynamic `this` context. `_` acts
-  // as a placeholder by default, allowing any combination of arguments to be
-  // pre-filled. Set `_.partial.placeholder` for a custom placeholder argument.
-  var partial = restArguments(function(func, boundArgs) {
-    var placeholder = partial.placeholder;
-    var bound = function() {
-      var position = 0, length = boundArgs.length;
-      var args = Array(length);
-      for (var i = 0; i < length; i++) {
-        args[i] = boundArgs[i] === placeholder ? arguments[position++] : boundArgs[i];
-      }
-      while (position < arguments.length) args.push(arguments[position++]);
-      return executeBound(func, bound, this, this, args);
-    };
-    return bound;
-  });
-
-  partial.placeholder = _$1;
-
-  // Create a function bound to a given object (assigning `this`, and arguments,
-  // optionally).
-  var bind = restArguments(function(func, context, args) {
-    if (!isFunction$1(func)) throw new TypeError('Bind must be called on a function');
-    var bound = restArguments(function(callArgs) {
-      return executeBound(func, bound, context, this, args.concat(callArgs));
-    });
-    return bound;
-  });
-
-  // Internal helper for collection methods to determine whether a collection
-  // should be iterated as an array or as an object.
-  // Related: https://people.mozilla.org/~jorendorff/es6-draft.html#sec-tolength
-  // Avoids a very nasty iOS 8 JIT bug on ARM-64. #2094
-  var isArrayLike = createSizePropertyCheck(getLength);
-
-  // Internal implementation of a recursive `flatten` function.
-  function flatten$1(input, depth, strict, output) {
-    output = output || [];
-    if (!depth && depth !== 0) {
-      depth = Infinity;
-    } else if (depth <= 0) {
-      return output.concat(input);
-    }
-    var idx = output.length;
-    for (var i = 0, length = getLength(input); i < length; i++) {
-      var value = input[i];
-      if (isArrayLike(value) && (isArray(value) || isArguments$1(value))) {
-        // Flatten current level of array or arguments object.
-        if (depth > 1) {
-          flatten$1(value, depth - 1, strict, output);
-          idx = output.length;
-        } else {
-          var j = 0, len = value.length;
-          while (j < len) output[idx++] = value[j++];
-        }
-      } else if (!strict) {
-        output[idx++] = value;
-      }
-    }
-    return output;
-  }
-
-  // Bind a number of an object's methods to that object. Remaining arguments
-  // are the method names to be bound. Useful for ensuring that all callbacks
-  // defined on an object belong to it.
-  var bindAll = restArguments(function(obj, keys) {
-    keys = flatten$1(keys, false, false);
-    var index = keys.length;
-    if (index < 1) throw new Error('bindAll must be passed function names');
-    while (index--) {
-      var key = keys[index];
-      obj[key] = bind(obj[key], obj);
-    }
-    return obj;
-  });
-
-  // Memoize an expensive function by storing its results.
-  function memoize(func, hasher) {
-    var memoize = function(key) {
-      var cache = memoize.cache;
-      var address = '' + (hasher ? hasher.apply(this, arguments) : key);
-      if (!has$1(cache, address)) cache[address] = func.apply(this, arguments);
-      return cache[address];
-    };
-    memoize.cache = {};
-    return memoize;
-  }
-
-  // Delays a function for the given number of milliseconds, and then calls
-  // it with the arguments supplied.
-  var delay = restArguments(function(func, wait, args) {
-    return setTimeout(function() {
-      return func.apply(null, args);
-    }, wait);
-  });
-
-  // Defers a function, scheduling it to run after the current call stack has
-  // cleared.
-  var defer = partial(delay, _$1, 1);
-
-  // Returns a function, that, when invoked, will only be triggered at most once
-  // during a given window of time. Normally, the throttled function will run
-  // as much as it can, without ever going more than once per `wait` duration;
-  // but if you'd like to disable the execution on the leading edge, pass
-  // `{leading: false}`. To disable execution on the trailing edge, ditto.
-  function throttle(func, wait, options) {
-    var timeout, context, args, result;
-    var previous = 0;
-    if (!options) options = {};
-
-    var later = function() {
-      previous = options.leading === false ? 0 : now();
-      timeout = null;
-      result = func.apply(context, args);
-      if (!timeout) context = args = null;
-    };
-
-    var throttled = function() {
-      var _now = now();
-      if (!previous && options.leading === false) previous = _now;
-      var remaining = wait - (_now - previous);
-      context = this;
-      args = arguments;
-      if (remaining <= 0 || remaining > wait) {
-        if (timeout) {
-          clearTimeout(timeout);
-          timeout = null;
-        }
-        previous = _now;
-        result = func.apply(context, args);
-        if (!timeout) context = args = null;
-      } else if (!timeout && options.trailing !== false) {
-        timeout = setTimeout(later, remaining);
-      }
-      return result;
-    };
-
-    throttled.cancel = function() {
-      clearTimeout(timeout);
-      previous = 0;
-      timeout = context = args = null;
-    };
-
-    return throttled;
-  }
-
-  // When a sequence of calls of the returned function ends, the argument
-  // function is triggered. The end of a sequence is defined by the `wait`
-  // parameter. If `immediate` is passed, the argument function will be
-  // triggered at the beginning of the sequence instead of at the end.
-  function debounce(func, wait, immediate) {
-    var timeout, previous, args, result, context;
-
-    var later = function() {
-      var passed = now() - previous;
-      if (wait > passed) {
-        timeout = setTimeout(later, wait - passed);
-      } else {
-        timeout = null;
-        if (!immediate) result = func.apply(context, args);
-        // This check is needed because `func` can recursively invoke `debounced`.
-        if (!timeout) args = context = null;
-      }
-    };
-
-    var debounced = restArguments(function(_args) {
-      context = this;
-      args = _args;
-      previous = now();
-      if (!timeout) {
-        timeout = setTimeout(later, wait);
-        if (immediate) result = func.apply(context, args);
-      }
-      return result;
-    });
-
-    debounced.cancel = function() {
-      clearTimeout(timeout);
-      timeout = args = context = null;
-    };
-
-    return debounced;
-  }
-
-  // Returns the first function passed as an argument to the second,
-  // allowing you to adjust arguments, run code before and after, and
-  // conditionally execute the original function.
-  function wrap(func, wrapper) {
-    return partial(wrapper, func);
-  }
-
-  // Returns a negated version of the passed-in predicate.
-  function negate(predicate) {
-    return function() {
-      return !predicate.apply(this, arguments);
-    };
-  }
-
-  // Returns a function that is the composition of a list of functions, each
-  // consuming the return value of the function that follows.
-  function compose() {
-    var args = arguments;
-    var start = args.length - 1;
-    return function() {
-      var i = start;
-      var result = args[start].apply(this, arguments);
-      while (i--) result = args[i].call(this, result);
-      return result;
-    };
-  }
-
-  // Returns a function that will only be executed on and after the Nth call.
-  function after(times, func) {
-    return function() {
-      if (--times < 1) {
-        return func.apply(this, arguments);
-      }
-    };
-  }
-
-  // Returns a function that will only be executed up to (but not including) the
-  // Nth call.
-  function before(times, func) {
-    var memo;
-    return function() {
-      if (--times > 0) {
-        memo = func.apply(this, arguments);
-      }
-      if (times <= 1) func = null;
-      return memo;
-    };
-  }
-
-  // Returns a function that will be executed at most one time, no matter how
-  // often you call it. Useful for lazy initialization.
-  var once = partial(before, 2);
-
-  // Returns the first key on an object that passes a truth test.
-  function findKey(obj, predicate, context) {
-    predicate = cb(predicate, context);
-    var _keys = keys(obj), key;
-    for (var i = 0, length = _keys.length; i < length; i++) {
-      key = _keys[i];
-      if (predicate(obj[key], key, obj)) return key;
-    }
-  }
-
-  // Internal function to generate `_.findIndex` and `_.findLastIndex`.
-  function createPredicateIndexFinder(dir) {
-    return function(array, predicate, context) {
-      predicate = cb(predicate, context);
-      var length = getLength(array);
-      var index = dir > 0 ? 0 : length - 1;
-      for (; index >= 0 && index < length; index += dir) {
-        if (predicate(array[index], index, array)) return index;
-      }
-      return -1;
-    };
-  }
-
-  // Returns the first index on an array-like that passes a truth test.
-  var findIndex = createPredicateIndexFinder(1);
-
-  // Returns the last index on an array-like that passes a truth test.
-  var findLastIndex = createPredicateIndexFinder(-1);
-
-  // Use a comparator function to figure out the smallest index at which
-  // an object should be inserted so as to maintain order. Uses binary search.
-  function sortedIndex(array, obj, iteratee, context) {
-    iteratee = cb(iteratee, context, 1);
-    var value = iteratee(obj);
-    var low = 0, high = getLength(array);
-    while (low < high) {
-      var mid = Math.floor((low + high) / 2);
-      if (iteratee(array[mid]) < value) low = mid + 1; else high = mid;
-    }
-    return low;
-  }
-
-  // Internal function to generate the `_.indexOf` and `_.lastIndexOf` functions.
-  function createIndexFinder(dir, predicateFind, sortedIndex) {
-    return function(array, item, idx) {
-      var i = 0, length = getLength(array);
-      if (typeof idx == 'number') {
-        if (dir > 0) {
-          i = idx >= 0 ? idx : Math.max(idx + length, i);
-        } else {
-          length = idx >= 0 ? Math.min(idx + 1, length) : idx + length + 1;
-        }
-      } else if (sortedIndex && idx && length) {
-        idx = sortedIndex(array, item);
-        return array[idx] === item ? idx : -1;
-      }
-      if (item !== item) {
-        idx = predicateFind(slice.call(array, i, length), isNaN$1);
-        return idx >= 0 ? idx + i : -1;
-      }
-      for (idx = dir > 0 ? i : length - 1; idx >= 0 && idx < length; idx += dir) {
-        if (array[idx] === item) return idx;
-      }
-      return -1;
-    };
-  }
-
-  // Return the position of the first occurrence of an item in an array,
-  // or -1 if the item is not included in the array.
-  // If the array is large and already in sort order, pass `true`
-  // for **isSorted** to use binary search.
-  var indexOf = createIndexFinder(1, findIndex, sortedIndex);
-
-  // Return the position of the last occurrence of an item in an array,
-  // or -1 if the item is not included in the array.
-  var lastIndexOf = createIndexFinder(-1, findLastIndex);
-
-  // Return the first value which passes a truth test.
-  function find(obj, predicate, context) {
-    var keyFinder = isArrayLike(obj) ? findIndex : findKey;
-    var key = keyFinder(obj, predicate, context);
-    if (key !== void 0 && key !== -1) return obj[key];
-  }
-
-  // Convenience version of a common use case of `_.find`: getting the first
-  // object containing specific `key:value` pairs.
-  function findWhere(obj, attrs) {
-    return find(obj, matcher(attrs));
-  }
-
-  // The cornerstone for collection functions, an `each`
-  // implementation, aka `forEach`.
-  // Handles raw objects in addition to array-likes. Treats all
-  // sparse array-likes as if they were dense.
-  function each(obj, iteratee, context) {
-    iteratee = optimizeCb(iteratee, context);
-    var i, length;
-    if (isArrayLike(obj)) {
-      for (i = 0, length = obj.length; i < length; i++) {
-        iteratee(obj[i], i, obj);
-      }
-    } else {
-      var _keys = keys(obj);
-      for (i = 0, length = _keys.length; i < length; i++) {
-        iteratee(obj[_keys[i]], _keys[i], obj);
-      }
-    }
-    return obj;
-  }
-
-  // Return the results of applying the iteratee to each element.
-  function map(obj, iteratee, context) {
-    iteratee = cb(iteratee, context);
-    var _keys = !isArrayLike(obj) && keys(obj),
-        length = (_keys || obj).length,
-        results = Array(length);
-    for (var index = 0; index < length; index++) {
-      var currentKey = _keys ? _keys[index] : index;
-      results[index] = iteratee(obj[currentKey], currentKey, obj);
-    }
-    return results;
-  }
-
-  // Internal helper to create a reducing function, iterating left or right.
-  function createReduce(dir) {
-    // Wrap code that reassigns argument variables in a separate function than
-    // the one that accesses `arguments.length` to avoid a perf hit. (#1991)
-    var reducer = function(obj, iteratee, memo, initial) {
-      var _keys = !isArrayLike(obj) && keys(obj),
-          length = (_keys || obj).length,
-          index = dir > 0 ? 0 : length - 1;
-      if (!initial) {
-        memo = obj[_keys ? _keys[index] : index];
-        index += dir;
-      }
-      for (; index >= 0 && index < length; index += dir) {
-        var currentKey = _keys ? _keys[index] : index;
-        memo = iteratee(memo, obj[currentKey], currentKey, obj);
-      }
-      return memo;
-    };
-
-    return function(obj, iteratee, memo, context) {
-      var initial = arguments.length >= 3;
-      return reducer(obj, optimizeCb(iteratee, context, 4), memo, initial);
-    };
-  }
-
-  // **Reduce** builds up a single result from a list of values, aka `inject`,
-  // or `foldl`.
-  var reduce = createReduce(1);
-
-  // The right-associative version of reduce, also known as `foldr`.
-  var reduceRight = createReduce(-1);
-
-  // Return all the elements that pass a truth test.
-  function filter(obj, predicate, context) {
-    var results = [];
-    predicate = cb(predicate, context);
-    each(obj, function(value, index, list) {
-      if (predicate(value, index, list)) results.push(value);
-    });
-    return results;
-  }
-
-  // Return all the elements for which a truth test fails.
-  function reject(obj, predicate, context) {
-    return filter(obj, negate(cb(predicate)), context);
-  }
-
-  // Determine whether all of the elements pass a truth test.
-  function every(obj, predicate, context) {
-    predicate = cb(predicate, context);
-    var _keys = !isArrayLike(obj) && keys(obj),
-        length = (_keys || obj).length;
-    for (var index = 0; index < length; index++) {
-      var currentKey = _keys ? _keys[index] : index;
-      if (!predicate(obj[currentKey], currentKey, obj)) return false;
-    }
-    return true;
-  }
-
-  // Determine if at least one element in the object passes a truth test.
-  function some(obj, predicate, context) {
-    predicate = cb(predicate, context);
-    var _keys = !isArrayLike(obj) && keys(obj),
-        length = (_keys || obj).length;
-    for (var index = 0; index < length; index++) {
-      var currentKey = _keys ? _keys[index] : index;
-      if (predicate(obj[currentKey], currentKey, obj)) return true;
-    }
-    return false;
-  }
-
-  // Determine if the array or object contains a given item (using `===`).
-  function contains(obj, item, fromIndex, guard) {
-    if (!isArrayLike(obj)) obj = values(obj);
-    if (typeof fromIndex != 'number' || guard) fromIndex = 0;
-    return indexOf(obj, item, fromIndex) >= 0;
-  }
-
-  // Invoke a method (with arguments) on every item in a collection.
-  var invoke = restArguments(function(obj, path, args) {
-    var contextPath, func;
-    if (isFunction$1(path)) {
-      func = path;
-    } else {
-      path = toPath(path);
-      contextPath = path.slice(0, -1);
-      path = path[path.length - 1];
-    }
-    return map(obj, function(context) {
-      var method = func;
-      if (!method) {
-        if (contextPath && contextPath.length) {
-          context = deepGet(context, contextPath);
-        }
-        if (context == null) return void 0;
-        method = context[path];
-      }
-      return method == null ? method : method.apply(context, args);
-    });
-  });
-
-  // Convenience version of a common use case of `_.map`: fetching a property.
-  function pluck(obj, key) {
-    return map(obj, property(key));
-  }
-
-  // Convenience version of a common use case of `_.filter`: selecting only
-  // objects containing specific `key:value` pairs.
-  function where(obj, attrs) {
-    return filter(obj, matcher(attrs));
-  }
-
-  // Return the maximum element (or element-based computation).
-  function max(obj, iteratee, context) {
-    var result = -Infinity, lastComputed = -Infinity,
-        value, computed;
-    if (iteratee == null || typeof iteratee == 'number' && typeof obj[0] != 'object' && obj != null) {
-      obj = isArrayLike(obj) ? obj : values(obj);
-      for (var i = 0, length = obj.length; i < length; i++) {
-        value = obj[i];
-        if (value != null && value > result) {
-          result = value;
-        }
-      }
-    } else {
-      iteratee = cb(iteratee, context);
-      each(obj, function(v, index, list) {
-        computed = iteratee(v, index, list);
-        if (computed > lastComputed || computed === -Infinity && result === -Infinity) {
-          result = v;
-          lastComputed = computed;
-        }
-      });
-    }
-    return result;
-  }
-
-  // Return the minimum element (or element-based computation).
-  function min(obj, iteratee, context) {
-    var result = Infinity, lastComputed = Infinity,
-        value, computed;
-    if (iteratee == null || typeof iteratee == 'number' && typeof obj[0] != 'object' && obj != null) {
-      obj = isArrayLike(obj) ? obj : values(obj);
-      for (var i = 0, length = obj.length; i < length; i++) {
-        value = obj[i];
-        if (value != null && value < result) {
-          result = value;
-        }
-      }
-    } else {
-      iteratee = cb(iteratee, context);
-      each(obj, function(v, index, list) {
-        computed = iteratee(v, index, list);
-        if (computed < lastComputed || computed === Infinity && result === Infinity) {
-          result = v;
-          lastComputed = computed;
-        }
-      });
-    }
-    return result;
-  }
-
-  // Sample **n** random values from a collection using the modern version of the
-  // [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher–Yates_shuffle).
-  // If **n** is not specified, returns a single random element.
-  // The internal `guard` argument allows it to work with `_.map`.
-  function sample(obj, n, guard) {
-    if (n == null || guard) {
-      if (!isArrayLike(obj)) obj = values(obj);
-      return obj[random(obj.length - 1)];
-    }
-    var sample = isArrayLike(obj) ? clone(obj) : values(obj);
-    var length = getLength(sample);
-    n = Math.max(Math.min(n, length), 0);
-    var last = length - 1;
-    for (var index = 0; index < n; index++) {
-      var rand = random(index, last);
-      var temp = sample[index];
-      sample[index] = sample[rand];
-      sample[rand] = temp;
-    }
-    return sample.slice(0, n);
-  }
-
-  // Shuffle a collection.
-  function shuffle(obj) {
-    return sample(obj, Infinity);
-  }
-
-  // Sort the object's values by a criterion produced by an iteratee.
-  function sortBy(obj, iteratee, context) {
-    var index = 0;
-    iteratee = cb(iteratee, context);
-    return pluck(map(obj, function(value, key, list) {
-      return {
-        value: value,
-        index: index++,
-        criteria: iteratee(value, key, list)
-      };
-    }).sort(function(left, right) {
-      var a = left.criteria;
-      var b = right.criteria;
-      if (a !== b) {
-        if (a > b || a === void 0) return 1;
-        if (a < b || b === void 0) return -1;
-      }
-      return left.index - right.index;
-    }), 'value');
-  }
-
-  // An internal function used for aggregate "group by" operations.
-  function group(behavior, partition) {
-    return function(obj, iteratee, context) {
-      var result = partition ? [[], []] : {};
-      iteratee = cb(iteratee, context);
-      each(obj, function(value, index) {
-        var key = iteratee(value, index, obj);
-        behavior(result, value, key);
-      });
-      return result;
-    };
-  }
-
-  // Groups the object's values by a criterion. Pass either a string attribute
-  // to group by, or a function that returns the criterion.
-  var groupBy = group(function(result, value, key) {
-    if (has$1(result, key)) result[key].push(value); else result[key] = [value];
-  });
-
-  // Indexes the object's values by a criterion, similar to `_.groupBy`, but for
-  // when you know that your index values will be unique.
-  var indexBy = group(function(result, value, key) {
-    result[key] = value;
-  });
-
-  // Counts instances of an object that group by a certain criterion. Pass
-  // either a string attribute to count by, or a function that returns the
-  // criterion.
-  var countBy = group(function(result, value, key) {
-    if (has$1(result, key)) result[key]++; else result[key] = 1;
-  });
-
-  // Split a collection into two arrays: one whose elements all pass the given
-  // truth test, and one whose elements all do not pass the truth test.
-  var partition = group(function(result, value, pass) {
-    result[pass ? 0 : 1].push(value);
-  }, true);
-
-  // Safely create a real, live array from anything iterable.
-  var reStrSymbol = /[^\ud800-\udfff]|[\ud800-\udbff][\udc00-\udfff]|[\ud800-\udfff]/g;
-  function toArray(obj) {
-    if (!obj) return [];
-    if (isArray(obj)) return slice.call(obj);
-    if (isString(obj)) {
-      // Keep surrogate pair characters together.
-      return obj.match(reStrSymbol);
-    }
-    if (isArrayLike(obj)) return map(obj, identity);
-    return values(obj);
-  }
-
-  // Return the number of elements in a collection.
-  function size(obj) {
-    if (obj == null) return 0;
-    return isArrayLike(obj) ? obj.length : keys(obj).length;
-  }
-
-  // Internal `_.pick` helper function to determine whether `key` is an enumerable
-  // property name of `obj`.
-  function keyInObj(value, key, obj) {
-    return key in obj;
-  }
-
-  // Return a copy of the object only containing the allowed properties.
-  var pick = restArguments(function(obj, keys) {
-    var result = {}, iteratee = keys[0];
-    if (obj == null) return result;
-    if (isFunction$1(iteratee)) {
-      if (keys.length > 1) iteratee = optimizeCb(iteratee, keys[1]);
-      keys = allKeys(obj);
-    } else {
-      iteratee = keyInObj;
-      keys = flatten$1(keys, false, false);
-      obj = Object(obj);
-    }
-    for (var i = 0, length = keys.length; i < length; i++) {
-      var key = keys[i];
-      var value = obj[key];
-      if (iteratee(value, key, obj)) result[key] = value;
-    }
-    return result;
-  });
-
-  // Return a copy of the object without the disallowed properties.
-  var omit = restArguments(function(obj, keys) {
-    var iteratee = keys[0], context;
-    if (isFunction$1(iteratee)) {
-      iteratee = negate(iteratee);
-      if (keys.length > 1) context = keys[1];
-    } else {
-      keys = map(flatten$1(keys, false, false), String);
-      iteratee = function(value, key) {
-        return !contains(keys, key);
-      };
-    }
-    return pick(obj, iteratee, context);
-  });
-
-  // Returns everything but the last entry of the array. Especially useful on
-  // the arguments object. Passing **n** will return all the values in
-  // the array, excluding the last N.
-  function initial(array, n, guard) {
-    return slice.call(array, 0, Math.max(0, array.length - (n == null || guard ? 1 : n)));
-  }
-
-  // Get the first element of an array. Passing **n** will return the first N
-  // values in the array. The **guard** check allows it to work with `_.map`.
-  function first(array, n, guard) {
-    if (array == null || array.length < 1) return n == null || guard ? void 0 : [];
-    if (n == null || guard) return array[0];
-    return initial(array, array.length - n);
-  }
-
-  // Returns everything but the first entry of the `array`. Especially useful on
-  // the `arguments` object. Passing an **n** will return the rest N values in the
-  // `array`.
-  function rest(array, n, guard) {
-    return slice.call(array, n == null || guard ? 1 : n);
-  }
-
-  // Get the last element of an array. Passing **n** will return the last N
-  // values in the array.
-  function last(array, n, guard) {
-    if (array == null || array.length < 1) return n == null || guard ? void 0 : [];
-    if (n == null || guard) return array[array.length - 1];
-    return rest(array, Math.max(0, array.length - n));
-  }
-
-  // Trim out all falsy values from an array.
-  function compact(array) {
-    return filter(array, Boolean);
-  }
-
-  // Flatten out an array, either recursively (by default), or up to `depth`.
-  // Passing `true` or `false` as `depth` means `1` or `Infinity`, respectively.
-  function flatten(array, depth) {
-    return flatten$1(array, depth, false);
-  }
-
-  // Take the difference between one array and a number of other arrays.
-  // Only the elements present in just the first array will remain.
-  var difference = restArguments(function(array, rest) {
-    rest = flatten$1(rest, true, true);
-    return filter(array, function(value){
-      return !contains(rest, value);
-    });
-  });
-
-  // Return a version of the array that does not contain the specified value(s).
-  var without = restArguments(function(array, otherArrays) {
-    return difference(array, otherArrays);
-  });
-
-  // Produce a duplicate-free version of the array. If the array has already
-  // been sorted, you have the option of using a faster algorithm.
-  // The faster algorithm will not work with an iteratee if the iteratee
-  // is not a one-to-one function, so providing an iteratee will disable
-  // the faster algorithm.
-  function uniq(array, isSorted, iteratee, context) {
-    if (!isBoolean(isSorted)) {
-      context = iteratee;
-      iteratee = isSorted;
-      isSorted = false;
-    }
-    if (iteratee != null) iteratee = cb(iteratee, context);
-    var result = [];
-    var seen = [];
-    for (var i = 0, length = getLength(array); i < length; i++) {
-      var value = array[i],
-          computed = iteratee ? iteratee(value, i, array) : value;
-      if (isSorted && !iteratee) {
-        if (!i || seen !== computed) result.push(value);
-        seen = computed;
-      } else if (iteratee) {
-        if (!contains(seen, computed)) {
-          seen.push(computed);
-          result.push(value);
-        }
-      } else if (!contains(result, value)) {
-        result.push(value);
-      }
-    }
-    return result;
-  }
-
-  // Produce an array that contains the union: each distinct element from all of
-  // the passed-in arrays.
-  var union = restArguments(function(arrays) {
-    return uniq(flatten$1(arrays, true, true));
-  });
-
-  // Produce an array that contains every item shared between all the
-  // passed-in arrays.
-  function intersection(array) {
-    var result = [];
-    var argsLength = arguments.length;
-    for (var i = 0, length = getLength(array); i < length; i++) {
-      var item = array[i];
-      if (contains(result, item)) continue;
-      var j;
-      for (j = 1; j < argsLength; j++) {
-        if (!contains(arguments[j], item)) break;
-      }
-      if (j === argsLength) result.push(item);
-    }
-    return result;
-  }
-
-  // Complement of zip. Unzip accepts an array of arrays and groups
-  // each array's elements on shared indices.
-  function unzip(array) {
-    var length = array && max(array, getLength).length || 0;
-    var result = Array(length);
-
-    for (var index = 0; index < length; index++) {
-      result[index] = pluck(array, index);
-    }
-    return result;
-  }
-
-  // Zip together multiple lists into a single array -- elements that share
-  // an index go together.
-  var zip = restArguments(unzip);
-
-  // Converts lists into objects. Pass either a single array of `[key, value]`
-  // pairs, or two parallel arrays of the same length -- one of keys, and one of
-  // the corresponding values. Passing by pairs is the reverse of `_.pairs`.
-  function object(list, values) {
-    var result = {};
-    for (var i = 0, length = getLength(list); i < length; i++) {
-      if (values) {
-        result[list[i]] = values[i];
-      } else {
-        result[list[i][0]] = list[i][1];
-      }
-    }
-    return result;
-  }
-
-  // Generate an integer Array containing an arithmetic progression. A port of
-  // the native Python `range()` function. See
-  // [the Python documentation](https://docs.python.org/library/functions.html#range).
-  function range(start, stop, step) {
-    if (stop == null) {
-      stop = start || 0;
-      start = 0;
-    }
-    if (!step) {
-      step = stop < start ? -1 : 1;
-    }
-
-    var length = Math.max(Math.ceil((stop - start) / step), 0);
-    var range = Array(length);
-
-    for (var idx = 0; idx < length; idx++, start += step) {
-      range[idx] = start;
-    }
-
-    return range;
-  }
-
-  // Chunk a single array into multiple arrays, each containing `count` or fewer
-  // items.
-  function chunk(array, count) {
-    if (count == null || count < 1) return [];
-    var result = [];
-    var i = 0, length = array.length;
-    while (i < length) {
-      result.push(slice.call(array, i, i += count));
-    }
-    return result;
-  }
-
-  // Helper function to continue chaining intermediate results.
-  function chainResult(instance, obj) {
-    return instance._chain ? _$1(obj).chain() : obj;
-  }
-
-  // Add your own custom functions to the Underscore object.
-  function mixin(obj) {
-    each(functions(obj), function(name) {
-      var func = _$1[name] = obj[name];
-      _$1.prototype[name] = function() {
-        var args = [this._wrapped];
-        push.apply(args, arguments);
-        return chainResult(this, func.apply(_$1, args));
-      };
-    });
-    return _$1;
-  }
-
-  // Add all mutator `Array` functions to the wrapper.
-  each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) {
-    var method = ArrayProto[name];
-    _$1.prototype[name] = function() {
-      var obj = this._wrapped;
-      if (obj != null) {
-        method.apply(obj, arguments);
-        if ((name === 'shift' || name === 'splice') && obj.length === 0) {
-          delete obj[0];
-        }
-      }
-      return chainResult(this, obj);
-    };
-  });
-
-  // Add all accessor `Array` functions to the wrapper.
-  each(['concat', 'join', 'slice'], function(name) {
-    var method = ArrayProto[name];
-    _$1.prototype[name] = function() {
-      var obj = this._wrapped;
-      if (obj != null) obj = method.apply(obj, arguments);
-      return chainResult(this, obj);
-    };
-  });
-
-  // Named Exports
-
-  var allExports = {
-    __proto__: null,
-    VERSION: VERSION,
-    restArguments: restArguments,
-    isObject: isObject,
-    isNull: isNull,
-    isUndefined: isUndefined,
-    isBoolean: isBoolean,
-    isElement: isElement,
-    isString: isString,
-    isNumber: isNumber,
-    isDate: isDate,
-    isRegExp: isRegExp,
-    isError: isError,
-    isSymbol: isSymbol,
-    isArrayBuffer: isArrayBuffer,
-    isDataView: isDataView$1,
-    isArray: isArray,
-    isFunction: isFunction$1,
-    isArguments: isArguments$1,
-    isFinite: isFinite$1,
-    isNaN: isNaN$1,
-    isTypedArray: isTypedArray$1,
-    isEmpty: isEmpty,
-    isMatch: isMatch,
-    isEqual: isEqual,
-    isMap: isMap,
-    isWeakMap: isWeakMap,
-    isSet: isSet,
-    isWeakSet: isWeakSet,
-    keys: keys,
-    allKeys: allKeys,
-    values: values,
-    pairs: pairs,
-    invert: invert,
-    functions: functions,
-    methods: functions,
-    extend: extend,
-    extendOwn: extendOwn,
-    assign: extendOwn,
-    defaults: defaults,
-    create: create,
-    clone: clone,
-    tap: tap,
-    get: get,
-    has: has,
-    mapObject: mapObject,
-    identity: identity,
-    constant: constant,
-    noop: noop,
-    toPath: toPath$1,
-    property: property,
-    propertyOf: propertyOf,
-    matcher: matcher,
-    matches: matcher,
-    times: times,
-    random: random,
-    now: now,
-    escape: _escape,
-    unescape: _unescape,
-    templateSettings: templateSettings,
-    template: template,
-    result: result,
-    uniqueId: uniqueId,
-    chain: chain,
-    iteratee: iteratee,
-    partial: partial,
-    bind: bind,
-    bindAll: bindAll,
-    memoize: memoize,
-    delay: delay,
-    defer: defer,
-    throttle: throttle,
-    debounce: debounce,
-    wrap: wrap,
-    negate: negate,
-    compose: compose,
-    after: after,
-    before: before,
-    once: once,
-    findKey: findKey,
-    findIndex: findIndex,
-    findLastIndex: findLastIndex,
-    sortedIndex: sortedIndex,
-    indexOf: indexOf,
-    lastIndexOf: lastIndexOf,
-    find: find,
-    detect: find,
-    findWhere: findWhere,
-    each: each,
-    forEach: each,
-    map: map,
-    collect: map,
-    reduce: reduce,
-    foldl: reduce,
-    inject: reduce,
-    reduceRight: reduceRight,
-    foldr: reduceRight,
-    filter: filter,
-    select: filter,
-    reject: reject,
-    every: every,
-    all: every,
-    some: some,
-    any: some,
-    contains: contains,
-    includes: contains,
-    include: contains,
-    invoke: invoke,
-    pluck: pluck,
-    where: where,
-    max: max,
-    min: min,
-    shuffle: shuffle,
-    sample: sample,
-    sortBy: sortBy,
-    groupBy: groupBy,
-    indexBy: indexBy,
-    countBy: countBy,
-    partition: partition,
-    toArray: toArray,
-    size: size,
-    pick: pick,
-    omit: omit,
-    first: first,
-    head: first,
-    take: first,
-    initial: initial,
-    last: last,
-    rest: rest,
-    tail: rest,
-    drop: rest,
-    compact: compact,
-    flatten: flatten,
-    without: without,
-    uniq: uniq,
-    unique: uniq,
-    union: union,
-    intersection: intersection,
-    difference: difference,
-    unzip: unzip,
-    transpose: unzip,
-    zip: zip,
-    object: object,
-    range: range,
-    chunk: chunk,
-    mixin: mixin,
-    'default': _$1
-  };
-
-  // Default Export
-
-  // Add all of the Underscore functions to the wrapper object.
-  var _ = mixin(allExports);
-  // Legacy Node.js API.
-  _._ = _;
-
-  return _;
-
-})));
-//# sourceMappingURL=underscore-umd.js.map
diff --git a/docs/build/html/_static/underscore.js b/docs/build/html/_static/underscore.js
deleted file mode 100644
index cf177d4285ab55fbc16406a5ec827b80e7eecd53..0000000000000000000000000000000000000000
--- a/docs/build/html/_static/underscore.js
+++ /dev/null
@@ -1,6 +0,0 @@
-!function(n,r){"object"==typeof exports&&"undefined"!=typeof module?module.exports=r():"function"==typeof define&&define.amd?define("underscore",r):(n="undefined"!=typeof globalThis?globalThis:n||self,function(){var t=n._,e=n._=r();e.noConflict=function(){return n._=t,e}}())}(this,(function(){
-//     Underscore.js 1.13.1
-//     https://underscorejs.org
-//     (c) 2009-2021 Jeremy Ashkenas, Julian Gonggrijp, and DocumentCloud and Investigative Reporters & Editors
-//     Underscore may be freely distributed under the MIT license.
-var n="1.13.1",r="object"==typeof self&&self.self===self&&self||"object"==typeof global&&global.global===global&&global||Function("return this")()||{},t=Array.prototype,e=Object.prototype,u="undefined"!=typeof Symbol?Symbol.prototype:null,o=t.push,i=t.slice,a=e.toString,f=e.hasOwnProperty,c="undefined"!=typeof ArrayBuffer,l="undefined"!=typeof DataView,s=Array.isArray,p=Object.keys,v=Object.create,h=c&&ArrayBuffer.isView,y=isNaN,d=isFinite,g=!{toString:null}.propertyIsEnumerable("toString"),b=["valueOf","isPrototypeOf","toString","propertyIsEnumerable","hasOwnProperty","toLocaleString"],m=Math.pow(2,53)-1;function j(n,r){return r=null==r?n.length-1:+r,function(){for(var t=Math.max(arguments.length-r,0),e=Array(t),u=0;u<t;u++)e[u]=arguments[u+r];switch(r){case 0:return n.call(this,e);case 1:return n.call(this,arguments[0],e);case 2:return n.call(this,arguments[0],arguments[1],e)}var o=Array(r+1);for(u=0;u<r;u++)o[u]=arguments[u];return o[r]=e,n.apply(this,o)}}function _(n){var r=typeof n;return"function"===r||"object"===r&&!!n}function w(n){return void 0===n}function A(n){return!0===n||!1===n||"[object Boolean]"===a.call(n)}function x(n){var r="[object "+n+"]";return function(n){return a.call(n)===r}}var S=x("String"),O=x("Number"),M=x("Date"),E=x("RegExp"),B=x("Error"),N=x("Symbol"),I=x("ArrayBuffer"),T=x("Function"),k=r.document&&r.document.childNodes;"function"!=typeof/./&&"object"!=typeof Int8Array&&"function"!=typeof k&&(T=function(n){return"function"==typeof n||!1});var D=T,R=x("Object"),F=l&&R(new DataView(new ArrayBuffer(8))),V="undefined"!=typeof Map&&R(new Map),P=x("DataView");var q=F?function(n){return null!=n&&D(n.getInt8)&&I(n.buffer)}:P,U=s||x("Array");function W(n,r){return null!=n&&f.call(n,r)}var z=x("Arguments");!function(){z(arguments)||(z=function(n){return W(n,"callee")})}();var L=z;function $(n){return O(n)&&y(n)}function C(n){return function(){return n}}function K(n){return function(r){var t=n(r);return"number"==typeof t&&t>=0&&t<=m}}function J(n){return function(r){return null==r?void 0:r[n]}}var G=J("byteLength"),H=K(G),Q=/\[object ((I|Ui)nt(8|16|32)|Float(32|64)|Uint8Clamped|Big(I|Ui)nt64)Array\]/;var X=c?function(n){return h?h(n)&&!q(n):H(n)&&Q.test(a.call(n))}:C(!1),Y=J("length");function Z(n,r){r=function(n){for(var r={},t=n.length,e=0;e<t;++e)r[n[e]]=!0;return{contains:function(n){return r[n]},push:function(t){return r[t]=!0,n.push(t)}}}(r);var t=b.length,u=n.constructor,o=D(u)&&u.prototype||e,i="constructor";for(W(n,i)&&!r.contains(i)&&r.push(i);t--;)(i=b[t])in n&&n[i]!==o[i]&&!r.contains(i)&&r.push(i)}function nn(n){if(!_(n))return[];if(p)return p(n);var r=[];for(var t in n)W(n,t)&&r.push(t);return g&&Z(n,r),r}function rn(n,r){var t=nn(r),e=t.length;if(null==n)return!e;for(var u=Object(n),o=0;o<e;o++){var i=t[o];if(r[i]!==u[i]||!(i in u))return!1}return!0}function tn(n){return n instanceof tn?n:this instanceof tn?void(this._wrapped=n):new tn(n)}function en(n){return new Uint8Array(n.buffer||n,n.byteOffset||0,G(n))}tn.VERSION=n,tn.prototype.value=function(){return this._wrapped},tn.prototype.valueOf=tn.prototype.toJSON=tn.prototype.value,tn.prototype.toString=function(){return String(this._wrapped)};var un="[object DataView]";function on(n,r,t,e){if(n===r)return 0!==n||1/n==1/r;if(null==n||null==r)return!1;if(n!=n)return r!=r;var o=typeof n;return("function"===o||"object"===o||"object"==typeof r)&&function n(r,t,e,o){r instanceof tn&&(r=r._wrapped);t instanceof tn&&(t=t._wrapped);var i=a.call(r);if(i!==a.call(t))return!1;if(F&&"[object Object]"==i&&q(r)){if(!q(t))return!1;i=un}switch(i){case"[object RegExp]":case"[object String]":return""+r==""+t;case"[object Number]":return+r!=+r?+t!=+t:0==+r?1/+r==1/t:+r==+t;case"[object Date]":case"[object Boolean]":return+r==+t;case"[object Symbol]":return u.valueOf.call(r)===u.valueOf.call(t);case"[object ArrayBuffer]":case un:return n(en(r),en(t),e,o)}var f="[object Array]"===i;if(!f&&X(r)){if(G(r)!==G(t))return!1;if(r.buffer===t.buffer&&r.byteOffset===t.byteOffset)return!0;f=!0}if(!f){if("object"!=typeof r||"object"!=typeof t)return!1;var c=r.constructor,l=t.constructor;if(c!==l&&!(D(c)&&c instanceof c&&D(l)&&l instanceof l)&&"constructor"in r&&"constructor"in t)return!1}o=o||[];var s=(e=e||[]).length;for(;s--;)if(e[s]===r)return o[s]===t;if(e.push(r),o.push(t),f){if((s=r.length)!==t.length)return!1;for(;s--;)if(!on(r[s],t[s],e,o))return!1}else{var p,v=nn(r);if(s=v.length,nn(t).length!==s)return!1;for(;s--;)if(p=v[s],!W(t,p)||!on(r[p],t[p],e,o))return!1}return e.pop(),o.pop(),!0}(n,r,t,e)}function an(n){if(!_(n))return[];var r=[];for(var t in n)r.push(t);return g&&Z(n,r),r}function fn(n){var r=Y(n);return function(t){if(null==t)return!1;var e=an(t);if(Y(e))return!1;for(var u=0;u<r;u++)if(!D(t[n[u]]))return!1;return n!==hn||!D(t[cn])}}var cn="forEach",ln="has",sn=["clear","delete"],pn=["get",ln,"set"],vn=sn.concat(cn,pn),hn=sn.concat(pn),yn=["add"].concat(sn,cn,ln),dn=V?fn(vn):x("Map"),gn=V?fn(hn):x("WeakMap"),bn=V?fn(yn):x("Set"),mn=x("WeakSet");function jn(n){for(var r=nn(n),t=r.length,e=Array(t),u=0;u<t;u++)e[u]=n[r[u]];return e}function _n(n){for(var r={},t=nn(n),e=0,u=t.length;e<u;e++)r[n[t[e]]]=t[e];return r}function wn(n){var r=[];for(var t in n)D(n[t])&&r.push(t);return r.sort()}function An(n,r){return function(t){var e=arguments.length;if(r&&(t=Object(t)),e<2||null==t)return t;for(var u=1;u<e;u++)for(var o=arguments[u],i=n(o),a=i.length,f=0;f<a;f++){var c=i[f];r&&void 0!==t[c]||(t[c]=o[c])}return t}}var xn=An(an),Sn=An(nn),On=An(an,!0);function Mn(n){if(!_(n))return{};if(v)return v(n);var r=function(){};r.prototype=n;var t=new r;return r.prototype=null,t}function En(n){return _(n)?U(n)?n.slice():xn({},n):n}function Bn(n){return U(n)?n:[n]}function Nn(n){return tn.toPath(n)}function In(n,r){for(var t=r.length,e=0;e<t;e++){if(null==n)return;n=n[r[e]]}return t?n:void 0}function Tn(n,r,t){var e=In(n,Nn(r));return w(e)?t:e}function kn(n){return n}function Dn(n){return n=Sn({},n),function(r){return rn(r,n)}}function Rn(n){return n=Nn(n),function(r){return In(r,n)}}function Fn(n,r,t){if(void 0===r)return n;switch(null==t?3:t){case 1:return function(t){return n.call(r,t)};case 3:return function(t,e,u){return n.call(r,t,e,u)};case 4:return function(t,e,u,o){return n.call(r,t,e,u,o)}}return function(){return n.apply(r,arguments)}}function Vn(n,r,t){return null==n?kn:D(n)?Fn(n,r,t):_(n)&&!U(n)?Dn(n):Rn(n)}function Pn(n,r){return Vn(n,r,1/0)}function qn(n,r,t){return tn.iteratee!==Pn?tn.iteratee(n,r):Vn(n,r,t)}function Un(){}function Wn(n,r){return null==r&&(r=n,n=0),n+Math.floor(Math.random()*(r-n+1))}tn.toPath=Bn,tn.iteratee=Pn;var zn=Date.now||function(){return(new Date).getTime()};function Ln(n){var r=function(r){return n[r]},t="(?:"+nn(n).join("|")+")",e=RegExp(t),u=RegExp(t,"g");return function(n){return n=null==n?"":""+n,e.test(n)?n.replace(u,r):n}}var $n={"&":"&amp;","<":"&lt;",">":"&gt;",'"':"&quot;","'":"&#x27;","`":"&#x60;"},Cn=Ln($n),Kn=Ln(_n($n)),Jn=tn.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g},Gn=/(.)^/,Hn={"'":"'","\\":"\\","\r":"r","\n":"n","\u2028":"u2028","\u2029":"u2029"},Qn=/\\|'|\r|\n|\u2028|\u2029/g;function Xn(n){return"\\"+Hn[n]}var Yn=/^\s*(\w|\$)+\s*$/;var Zn=0;function nr(n,r,t,e,u){if(!(e instanceof r))return n.apply(t,u);var o=Mn(n.prototype),i=n.apply(o,u);return _(i)?i:o}var rr=j((function(n,r){var t=rr.placeholder,e=function(){for(var u=0,o=r.length,i=Array(o),a=0;a<o;a++)i[a]=r[a]===t?arguments[u++]:r[a];for(;u<arguments.length;)i.push(arguments[u++]);return nr(n,e,this,this,i)};return e}));rr.placeholder=tn;var tr=j((function(n,r,t){if(!D(n))throw new TypeError("Bind must be called on a function");var e=j((function(u){return nr(n,e,r,this,t.concat(u))}));return e})),er=K(Y);function ur(n,r,t,e){if(e=e||[],r||0===r){if(r<=0)return e.concat(n)}else r=1/0;for(var u=e.length,o=0,i=Y(n);o<i;o++){var a=n[o];if(er(a)&&(U(a)||L(a)))if(r>1)ur(a,r-1,t,e),u=e.length;else for(var f=0,c=a.length;f<c;)e[u++]=a[f++];else t||(e[u++]=a)}return e}var or=j((function(n,r){var t=(r=ur(r,!1,!1)).length;if(t<1)throw new Error("bindAll must be passed function names");for(;t--;){var e=r[t];n[e]=tr(n[e],n)}return n}));var ir=j((function(n,r,t){return setTimeout((function(){return n.apply(null,t)}),r)})),ar=rr(ir,tn,1);function fr(n){return function(){return!n.apply(this,arguments)}}function cr(n,r){var t;return function(){return--n>0&&(t=r.apply(this,arguments)),n<=1&&(r=null),t}}var lr=rr(cr,2);function sr(n,r,t){r=qn(r,t);for(var e,u=nn(n),o=0,i=u.length;o<i;o++)if(r(n[e=u[o]],e,n))return e}function pr(n){return function(r,t,e){t=qn(t,e);for(var u=Y(r),o=n>0?0:u-1;o>=0&&o<u;o+=n)if(t(r[o],o,r))return o;return-1}}var vr=pr(1),hr=pr(-1);function yr(n,r,t,e){for(var u=(t=qn(t,e,1))(r),o=0,i=Y(n);o<i;){var a=Math.floor((o+i)/2);t(n[a])<u?o=a+1:i=a}return o}function dr(n,r,t){return function(e,u,o){var a=0,f=Y(e);if("number"==typeof o)n>0?a=o>=0?o:Math.max(o+f,a):f=o>=0?Math.min(o+1,f):o+f+1;else if(t&&o&&f)return e[o=t(e,u)]===u?o:-1;if(u!=u)return(o=r(i.call(e,a,f),$))>=0?o+a:-1;for(o=n>0?a:f-1;o>=0&&o<f;o+=n)if(e[o]===u)return o;return-1}}var gr=dr(1,vr,yr),br=dr(-1,hr);function mr(n,r,t){var e=(er(n)?vr:sr)(n,r,t);if(void 0!==e&&-1!==e)return n[e]}function jr(n,r,t){var e,u;if(r=Fn(r,t),er(n))for(e=0,u=n.length;e<u;e++)r(n[e],e,n);else{var o=nn(n);for(e=0,u=o.length;e<u;e++)r(n[o[e]],o[e],n)}return n}function _r(n,r,t){r=qn(r,t);for(var e=!er(n)&&nn(n),u=(e||n).length,o=Array(u),i=0;i<u;i++){var a=e?e[i]:i;o[i]=r(n[a],a,n)}return o}function wr(n){var r=function(r,t,e,u){var o=!er(r)&&nn(r),i=(o||r).length,a=n>0?0:i-1;for(u||(e=r[o?o[a]:a],a+=n);a>=0&&a<i;a+=n){var f=o?o[a]:a;e=t(e,r[f],f,r)}return e};return function(n,t,e,u){var o=arguments.length>=3;return r(n,Fn(t,u,4),e,o)}}var Ar=wr(1),xr=wr(-1);function Sr(n,r,t){var e=[];return r=qn(r,t),jr(n,(function(n,t,u){r(n,t,u)&&e.push(n)})),e}function Or(n,r,t){r=qn(r,t);for(var e=!er(n)&&nn(n),u=(e||n).length,o=0;o<u;o++){var i=e?e[o]:o;if(!r(n[i],i,n))return!1}return!0}function Mr(n,r,t){r=qn(r,t);for(var e=!er(n)&&nn(n),u=(e||n).length,o=0;o<u;o++){var i=e?e[o]:o;if(r(n[i],i,n))return!0}return!1}function Er(n,r,t,e){return er(n)||(n=jn(n)),("number"!=typeof t||e)&&(t=0),gr(n,r,t)>=0}var Br=j((function(n,r,t){var e,u;return D(r)?u=r:(r=Nn(r),e=r.slice(0,-1),r=r[r.length-1]),_r(n,(function(n){var o=u;if(!o){if(e&&e.length&&(n=In(n,e)),null==n)return;o=n[r]}return null==o?o:o.apply(n,t)}))}));function Nr(n,r){return _r(n,Rn(r))}function Ir(n,r,t){var e,u,o=-1/0,i=-1/0;if(null==r||"number"==typeof r&&"object"!=typeof n[0]&&null!=n)for(var a=0,f=(n=er(n)?n:jn(n)).length;a<f;a++)null!=(e=n[a])&&e>o&&(o=e);else r=qn(r,t),jr(n,(function(n,t,e){((u=r(n,t,e))>i||u===-1/0&&o===-1/0)&&(o=n,i=u)}));return o}function Tr(n,r,t){if(null==r||t)return er(n)||(n=jn(n)),n[Wn(n.length-1)];var e=er(n)?En(n):jn(n),u=Y(e);r=Math.max(Math.min(r,u),0);for(var o=u-1,i=0;i<r;i++){var a=Wn(i,o),f=e[i];e[i]=e[a],e[a]=f}return e.slice(0,r)}function kr(n,r){return function(t,e,u){var o=r?[[],[]]:{};return e=qn(e,u),jr(t,(function(r,u){var i=e(r,u,t);n(o,r,i)})),o}}var Dr=kr((function(n,r,t){W(n,t)?n[t].push(r):n[t]=[r]})),Rr=kr((function(n,r,t){n[t]=r})),Fr=kr((function(n,r,t){W(n,t)?n[t]++:n[t]=1})),Vr=kr((function(n,r,t){n[t?0:1].push(r)}),!0),Pr=/[^\ud800-\udfff]|[\ud800-\udbff][\udc00-\udfff]|[\ud800-\udfff]/g;function qr(n,r,t){return r in t}var Ur=j((function(n,r){var t={},e=r[0];if(null==n)return t;D(e)?(r.length>1&&(e=Fn(e,r[1])),r=an(n)):(e=qr,r=ur(r,!1,!1),n=Object(n));for(var u=0,o=r.length;u<o;u++){var i=r[u],a=n[i];e(a,i,n)&&(t[i]=a)}return t})),Wr=j((function(n,r){var t,e=r[0];return D(e)?(e=fr(e),r.length>1&&(t=r[1])):(r=_r(ur(r,!1,!1),String),e=function(n,t){return!Er(r,t)}),Ur(n,e,t)}));function zr(n,r,t){return i.call(n,0,Math.max(0,n.length-(null==r||t?1:r)))}function Lr(n,r,t){return null==n||n.length<1?null==r||t?void 0:[]:null==r||t?n[0]:zr(n,n.length-r)}function $r(n,r,t){return i.call(n,null==r||t?1:r)}var Cr=j((function(n,r){return r=ur(r,!0,!0),Sr(n,(function(n){return!Er(r,n)}))})),Kr=j((function(n,r){return Cr(n,r)}));function Jr(n,r,t,e){A(r)||(e=t,t=r,r=!1),null!=t&&(t=qn(t,e));for(var u=[],o=[],i=0,a=Y(n);i<a;i++){var f=n[i],c=t?t(f,i,n):f;r&&!t?(i&&o===c||u.push(f),o=c):t?Er(o,c)||(o.push(c),u.push(f)):Er(u,f)||u.push(f)}return u}var Gr=j((function(n){return Jr(ur(n,!0,!0))}));function Hr(n){for(var r=n&&Ir(n,Y).length||0,t=Array(r),e=0;e<r;e++)t[e]=Nr(n,e);return t}var Qr=j(Hr);function Xr(n,r){return n._chain?tn(r).chain():r}function Yr(n){return jr(wn(n),(function(r){var t=tn[r]=n[r];tn.prototype[r]=function(){var n=[this._wrapped];return o.apply(n,arguments),Xr(this,t.apply(tn,n))}})),tn}jr(["pop","push","reverse","shift","sort","splice","unshift"],(function(n){var r=t[n];tn.prototype[n]=function(){var t=this._wrapped;return null!=t&&(r.apply(t,arguments),"shift"!==n&&"splice"!==n||0!==t.length||delete t[0]),Xr(this,t)}})),jr(["concat","join","slice"],(function(n){var r=t[n];tn.prototype[n]=function(){var n=this._wrapped;return null!=n&&(n=r.apply(n,arguments)),Xr(this,n)}}));var Zr=Yr({__proto__:null,VERSION:n,restArguments:j,isObject:_,isNull:function(n){return null===n},isUndefined:w,isBoolean:A,isElement:function(n){return!(!n||1!==n.nodeType)},isString:S,isNumber:O,isDate:M,isRegExp:E,isError:B,isSymbol:N,isArrayBuffer:I,isDataView:q,isArray:U,isFunction:D,isArguments:L,isFinite:function(n){return!N(n)&&d(n)&&!isNaN(parseFloat(n))},isNaN:$,isTypedArray:X,isEmpty:function(n){if(null==n)return!0;var r=Y(n);return"number"==typeof r&&(U(n)||S(n)||L(n))?0===r:0===Y(nn(n))},isMatch:rn,isEqual:function(n,r){return on(n,r)},isMap:dn,isWeakMap:gn,isSet:bn,isWeakSet:mn,keys:nn,allKeys:an,values:jn,pairs:function(n){for(var r=nn(n),t=r.length,e=Array(t),u=0;u<t;u++)e[u]=[r[u],n[r[u]]];return e},invert:_n,functions:wn,methods:wn,extend:xn,extendOwn:Sn,assign:Sn,defaults:On,create:function(n,r){var t=Mn(n);return r&&Sn(t,r),t},clone:En,tap:function(n,r){return r(n),n},get:Tn,has:function(n,r){for(var t=(r=Nn(r)).length,e=0;e<t;e++){var u=r[e];if(!W(n,u))return!1;n=n[u]}return!!t},mapObject:function(n,r,t){r=qn(r,t);for(var e=nn(n),u=e.length,o={},i=0;i<u;i++){var a=e[i];o[a]=r(n[a],a,n)}return o},identity:kn,constant:C,noop:Un,toPath:Bn,property:Rn,propertyOf:function(n){return null==n?Un:function(r){return Tn(n,r)}},matcher:Dn,matches:Dn,times:function(n,r,t){var e=Array(Math.max(0,n));r=Fn(r,t,1);for(var u=0;u<n;u++)e[u]=r(u);return e},random:Wn,now:zn,escape:Cn,unescape:Kn,templateSettings:Jn,template:function(n,r,t){!r&&t&&(r=t),r=On({},r,tn.templateSettings);var e=RegExp([(r.escape||Gn).source,(r.interpolate||Gn).source,(r.evaluate||Gn).source].join("|")+"|$","g"),u=0,o="__p+='";n.replace(e,(function(r,t,e,i,a){return o+=n.slice(u,a).replace(Qn,Xn),u=a+r.length,t?o+="'+\n((__t=("+t+"))==null?'':_.escape(__t))+\n'":e?o+="'+\n((__t=("+e+"))==null?'':__t)+\n'":i&&(o+="';\n"+i+"\n__p+='"),r})),o+="';\n";var i,a=r.variable;if(a){if(!Yn.test(a))throw new Error("variable is not a bare identifier: "+a)}else o="with(obj||{}){\n"+o+"}\n",a="obj";o="var __t,__p='',__j=Array.prototype.join,"+"print=function(){__p+=__j.call(arguments,'');};\n"+o+"return __p;\n";try{i=new Function(a,"_",o)}catch(n){throw n.source=o,n}var f=function(n){return i.call(this,n,tn)};return f.source="function("+a+"){\n"+o+"}",f},result:function(n,r,t){var e=(r=Nn(r)).length;if(!e)return D(t)?t.call(n):t;for(var u=0;u<e;u++){var o=null==n?void 0:n[r[u]];void 0===o&&(o=t,u=e),n=D(o)?o.call(n):o}return n},uniqueId:function(n){var r=++Zn+"";return n?n+r:r},chain:function(n){var r=tn(n);return r._chain=!0,r},iteratee:Pn,partial:rr,bind:tr,bindAll:or,memoize:function(n,r){var t=function(e){var u=t.cache,o=""+(r?r.apply(this,arguments):e);return W(u,o)||(u[o]=n.apply(this,arguments)),u[o]};return t.cache={},t},delay:ir,defer:ar,throttle:function(n,r,t){var e,u,o,i,a=0;t||(t={});var f=function(){a=!1===t.leading?0:zn(),e=null,i=n.apply(u,o),e||(u=o=null)},c=function(){var c=zn();a||!1!==t.leading||(a=c);var l=r-(c-a);return u=this,o=arguments,l<=0||l>r?(e&&(clearTimeout(e),e=null),a=c,i=n.apply(u,o),e||(u=o=null)):e||!1===t.trailing||(e=setTimeout(f,l)),i};return c.cancel=function(){clearTimeout(e),a=0,e=u=o=null},c},debounce:function(n,r,t){var e,u,o,i,a,f=function(){var c=zn()-u;r>c?e=setTimeout(f,r-c):(e=null,t||(i=n.apply(a,o)),e||(o=a=null))},c=j((function(c){return a=this,o=c,u=zn(),e||(e=setTimeout(f,r),t&&(i=n.apply(a,o))),i}));return c.cancel=function(){clearTimeout(e),e=o=a=null},c},wrap:function(n,r){return rr(r,n)},negate:fr,compose:function(){var n=arguments,r=n.length-1;return function(){for(var t=r,e=n[r].apply(this,arguments);t--;)e=n[t].call(this,e);return e}},after:function(n,r){return function(){if(--n<1)return r.apply(this,arguments)}},before:cr,once:lr,findKey:sr,findIndex:vr,findLastIndex:hr,sortedIndex:yr,indexOf:gr,lastIndexOf:br,find:mr,detect:mr,findWhere:function(n,r){return mr(n,Dn(r))},each:jr,forEach:jr,map:_r,collect:_r,reduce:Ar,foldl:Ar,inject:Ar,reduceRight:xr,foldr:xr,filter:Sr,select:Sr,reject:function(n,r,t){return Sr(n,fr(qn(r)),t)},every:Or,all:Or,some:Mr,any:Mr,contains:Er,includes:Er,include:Er,invoke:Br,pluck:Nr,where:function(n,r){return Sr(n,Dn(r))},max:Ir,min:function(n,r,t){var e,u,o=1/0,i=1/0;if(null==r||"number"==typeof r&&"object"!=typeof n[0]&&null!=n)for(var a=0,f=(n=er(n)?n:jn(n)).length;a<f;a++)null!=(e=n[a])&&e<o&&(o=e);else r=qn(r,t),jr(n,(function(n,t,e){((u=r(n,t,e))<i||u===1/0&&o===1/0)&&(o=n,i=u)}));return o},shuffle:function(n){return Tr(n,1/0)},sample:Tr,sortBy:function(n,r,t){var e=0;return r=qn(r,t),Nr(_r(n,(function(n,t,u){return{value:n,index:e++,criteria:r(n,t,u)}})).sort((function(n,r){var t=n.criteria,e=r.criteria;if(t!==e){if(t>e||void 0===t)return 1;if(t<e||void 0===e)return-1}return n.index-r.index})),"value")},groupBy:Dr,indexBy:Rr,countBy:Fr,partition:Vr,toArray:function(n){return n?U(n)?i.call(n):S(n)?n.match(Pr):er(n)?_r(n,kn):jn(n):[]},size:function(n){return null==n?0:er(n)?n.length:nn(n).length},pick:Ur,omit:Wr,first:Lr,head:Lr,take:Lr,initial:zr,last:function(n,r,t){return null==n||n.length<1?null==r||t?void 0:[]:null==r||t?n[n.length-1]:$r(n,Math.max(0,n.length-r))},rest:$r,tail:$r,drop:$r,compact:function(n){return Sr(n,Boolean)},flatten:function(n,r){return ur(n,r,!1)},without:Kr,uniq:Jr,unique:Jr,union:Gr,intersection:function(n){for(var r=[],t=arguments.length,e=0,u=Y(n);e<u;e++){var o=n[e];if(!Er(r,o)){var i;for(i=1;i<t&&Er(arguments[i],o);i++);i===t&&r.push(o)}}return r},difference:Cr,unzip:Hr,transpose:Hr,zip:Qr,object:function(n,r){for(var t={},e=0,u=Y(n);e<u;e++)r?t[n[e]]=r[e]:t[n[e][0]]=n[e][1];return t},range:function(n,r,t){null==r&&(r=n||0,n=0),t||(t=r<n?-1:1);for(var e=Math.max(Math.ceil((r-n)/t),0),u=Array(e),o=0;o<e;o++,n+=t)u[o]=n;return u},chunk:function(n,r){if(null==r||r<1)return[];for(var t=[],e=0,u=n.length;e<u;)t.push(i.call(n,e,e+=r));return t},mixin:Yr,default:tn});return Zr._=Zr,Zr}));
\ No newline at end of file
diff --git a/docs/build/html/example.html b/docs/build/html/example.html
deleted file mode 100644
index 5e9d170f5f2782113fafe2d734fabfee104f59dc..0000000000000000000000000000000000000000
--- a/docs/build/html/example.html
+++ /dev/null
@@ -1,112 +0,0 @@
-
-<!DOCTYPE html>
-
-<html lang="python">
-  <head>
-    <meta charset="utf-8" />
-    <meta name="viewport" content="width=device-width, initial-scale=1.0" /><meta name="generator" content="Docutils 0.17.1: http://docutils.sourceforge.net/" />
-
-    <title>My nifty title &#8212; bayesvalidrox 0.0.3 documentation</title>
-    <link rel="stylesheet" type="text/css" href="_static/pygments.css" />
-    <link rel="stylesheet" type="text/css" href="_static/alabaster.css" />
-    <script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
-    <script src="_static/jquery.js"></script>
-    <script src="_static/underscore.js"></script>
-    <script src="_static/doctools.js"></script>
-    <link rel="index" title="Index" href="genindex.html" />
-    <link rel="search" title="Search" href="search.html" />
-    <link rel="prev" title="Welcome to bayesvalidrox&#39;s documentation!" href="index.html" />
-   
-  <link rel="stylesheet" href="_static/custom.css" type="text/css" />
-  
-  
-  <meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
-
-  </head><body>
-  
-
-    <div class="document">
-      <div class="documentwrapper">
-        <div class="bodywrapper">
-          
-
-          <div class="body" role="main">
-            
-  <section id="my-nifty-title">
-<h1>My nifty title<a class="headerlink" href="#my-nifty-title" title="Permalink to this headline">¶</a></h1>
-<p>Some <strong>text</strong>!</p>
-<div class="warning admonition">
-<p class="admonition-title">Here's my title</p>
-<p>Here's my admonition content</p>
-</div>
-</section>
-
-
-          </div>
-          
-        </div>
-      </div>
-      <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
-        <div class="sphinxsidebarwrapper">
-<h1 class="logo"><a href="index.html">bayesvalidrox</a></h1>
-
-
-
-
-
-
-
-
-<h3>Navigation</h3>
-<p class="caption" role="heading"><span class="caption-text">Contents:</span></p>
-<ul class="current">
-<li class="toctree-l1 current"><a class="current reference internal" href="#">My nifty title</a></li>
-</ul>
-
-<div class="relations">
-<h3>Related Topics</h3>
-<ul>
-  <li><a href="index.html">Documentation overview</a><ul>
-      <li>Previous: <a href="index.html" title="previous chapter">Welcome to bayesvalidrox's documentation!</a></li>
-  </ul></li>
-</ul>
-</div>
-<div id="searchbox" style="display: none" role="search">
-  <h3 id="searchlabel">Quick search</h3>
-    <div class="searchformwrapper">
-    <form class="search" action="search.html" method="get">
-      <input type="text" name="q" aria-labelledby="searchlabel" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false"/>
-      <input type="submit" value="Go" />
-    </form>
-    </div>
-</div>
-<script>$('#searchbox').show(0);</script>
-
-
-
-
-
-
-
-
-        </div>
-      </div>
-      <div class="clearer"></div>
-    </div>
-    <div class="footer">
-      &copy;2022, Farid Mohammadi.
-      
-      |
-      Powered by <a href="http://sphinx-doc.org/">Sphinx 4.4.0</a>
-      &amp; <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
-      
-      |
-      <a href="_sources/example.md.txt"
-          rel="nofollow">Page source</a>
-    </div>
-
-    
-
-    
-  </body>
-</html>
\ No newline at end of file
diff --git a/docs/build/html/genindex.html b/docs/build/html/genindex.html
deleted file mode 100644
index 1ff415690d8027e91ffcf627a659cd70a1c8612a..0000000000000000000000000000000000000000
--- a/docs/build/html/genindex.html
+++ /dev/null
@@ -1,104 +0,0 @@
-
-<!DOCTYPE html>
-
-<html lang="python">
-  <head>
-    <meta charset="utf-8" />
-    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <title>Index &#8212; bayesvalidrox 0.0.3 documentation</title>
-    <link rel="stylesheet" type="text/css" href="_static/pygments.css" />
-    <link rel="stylesheet" type="text/css" href="_static/alabaster.css" />
-    <script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
-    <script src="_static/jquery.js"></script>
-    <script src="_static/underscore.js"></script>
-    <script src="_static/doctools.js"></script>
-    <link rel="index" title="Index" href="#" />
-    <link rel="search" title="Search" href="search.html" />
-   
-  <link rel="stylesheet" href="_static/custom.css" type="text/css" />
-  
-  
-  <meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
-
-  </head><body>
-  
-
-    <div class="document">
-      <div class="documentwrapper">
-        <div class="bodywrapper">
-          
-
-          <div class="body" role="main">
-            
-
-<h1 id="index">Index</h1>
-
-<div class="genindex-jumpbox">
- 
-</div>
-
-
-          </div>
-          
-        </div>
-      </div>
-      <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
-        <div class="sphinxsidebarwrapper">
-<h1 class="logo"><a href="index.html">bayesvalidrox</a></h1>
-
-
-
-
-
-
-
-
-<h3>Navigation</h3>
-<p class="caption" role="heading"><span class="caption-text">Contents:</span></p>
-<ul>
-<li class="toctree-l1"><a class="reference internal" href="example.html">My nifty title</a></li>
-</ul>
-
-<div class="relations">
-<h3>Related Topics</h3>
-<ul>
-  <li><a href="index.html">Documentation overview</a><ul>
-  </ul></li>
-</ul>
-</div>
-<div id="searchbox" style="display: none" role="search">
-  <h3 id="searchlabel">Quick search</h3>
-    <div class="searchformwrapper">
-    <form class="search" action="search.html" method="get">
-      <input type="text" name="q" aria-labelledby="searchlabel" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false"/>
-      <input type="submit" value="Go" />
-    </form>
-    </div>
-</div>
-<script>$('#searchbox').show(0);</script>
-
-
-
-
-
-
-
-
-        </div>
-      </div>
-      <div class="clearer"></div>
-    </div>
-    <div class="footer">
-      &copy;2022, Farid Mohammadi.
-      
-      |
-      Powered by <a href="http://sphinx-doc.org/">Sphinx 4.4.0</a>
-      &amp; <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
-      
-    </div>
-
-    
-
-    
-  </body>
-</html>
\ No newline at end of file
diff --git a/docs/build/html/index.html b/docs/build/html/index.html
deleted file mode 100644
index 5995ac4dcf04a9ef84c1d92b9a5906b90049766c..0000000000000000000000000000000000000000
--- a/docs/build/html/index.html
+++ /dev/null
@@ -1,130 +0,0 @@
-
-<!DOCTYPE html>
-
-<html lang="python">
-  <head>
-    <meta charset="utf-8" />
-    <meta name="viewport" content="width=device-width, initial-scale=1.0" /><meta name="generator" content="Docutils 0.17.1: http://docutils.sourceforge.net/" />
-
-    <title>Welcome to bayesvalidrox&#39;s documentation! &#8212; bayesvalidrox 0.0.3 documentation</title>
-    <link rel="stylesheet" type="text/css" href="_static/pygments.css" />
-    <link rel="stylesheet" type="text/css" href="_static/alabaster.css" />
-    <script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
-    <script src="_static/jquery.js"></script>
-    <script src="_static/underscore.js"></script>
-    <script src="_static/doctools.js"></script>
-    <link rel="index" title="Index" href="genindex.html" />
-    <link rel="search" title="Search" href="search.html" />
-    <link rel="next" title="My nifty title" href="example.html" />
-   
-  <link rel="stylesheet" href="_static/custom.css" type="text/css" />
-  
-  
-  <meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
-
-  </head><body>
-  
-
-    <div class="document">
-      <div class="documentwrapper">
-        <div class="bodywrapper">
-          
-
-          <div class="body" role="main">
-            
-  <section id="welcome-to-bayesvalidrox-s-documentation">
-<h1>Welcome to bayesvalidrox's documentation!<a class="headerlink" href="#welcome-to-bayesvalidrox-s-documentation" title="Permalink to this headline">¶</a></h1>
-<div class="toctree-wrapper compound">
-<p class="caption" role="heading"><span class="caption-text">Contents:</span></p>
-<ul>
-<li class="toctree-l1"><a class="reference internal" href="example.html">My nifty title</a></li>
-</ul>
-</div>
-<p>This is a normal text paragraph. The next paragraph is a code sample:</p>
-<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">It</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">processed</span> <span class="ow">in</span> <span class="nb">any</span> <span class="n">way</span><span class="p">,</span> <span class="k">except</span>
-<span class="n">that</span> <span class="n">the</span> <span class="n">indentation</span> <span class="ow">is</span> <span class="n">removed</span><span class="o">.</span>
-
-<span class="n">It</span> <span class="n">can</span> <span class="n">span</span> <span class="n">multiple</span> <span class="n">lines</span><span class="o">.</span>
-</pre></div>
-</div>
-<p>This is a normal text paragraph again.</p>
-</section>
-<section id="indices-and-tables">
-<h1>Indices and tables<a class="headerlink" href="#indices-and-tables" title="Permalink to this headline">¶</a></h1>
-<ul class="simple">
-<li><p><a class="reference internal" href="genindex.html"><span class="std std-ref">Index</span></a></p></li>
-<li><p><a class="reference internal" href="py-modindex.html"><span class="std std-ref">Module Index</span></a></p></li>
-<li><p><a class="reference internal" href="search.html"><span class="std std-ref">Search Page</span></a></p></li>
-<li><p><span class="xref std std-ref">pylink</span></p></li>
-</ul>
-</section>
-
-
-          </div>
-          
-        </div>
-      </div>
-      <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
-        <div class="sphinxsidebarwrapper">
-<h1 class="logo"><a href="#">bayesvalidrox</a></h1>
-
-
-
-
-
-
-
-
-<h3>Navigation</h3>
-<p class="caption" role="heading"><span class="caption-text">Contents:</span></p>
-<ul>
-<li class="toctree-l1"><a class="reference internal" href="example.html">My nifty title</a></li>
-</ul>
-
-<div class="relations">
-<h3>Related Topics</h3>
-<ul>
-  <li><a href="#">Documentation overview</a><ul>
-      <li>Next: <a href="example.html" title="next chapter">My nifty title</a></li>
-  </ul></li>
-</ul>
-</div>
-<div id="searchbox" style="display: none" role="search">
-  <h3 id="searchlabel">Quick search</h3>
-    <div class="searchformwrapper">
-    <form class="search" action="search.html" method="get">
-      <input type="text" name="q" aria-labelledby="searchlabel" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false"/>
-      <input type="submit" value="Go" />
-    </form>
-    </div>
-</div>
-<script>$('#searchbox').show(0);</script>
-
-
-
-
-
-
-
-
-        </div>
-      </div>
-      <div class="clearer"></div>
-    </div>
-    <div class="footer">
-      &copy;2022, Farid Mohammadi.
-      
-      |
-      Powered by <a href="http://sphinx-doc.org/">Sphinx 4.4.0</a>
-      &amp; <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
-      
-      |
-      <a href="_sources/index.rst.txt"
-          rel="nofollow">Page source</a>
-    </div>
-
-    
-
-    
-  </body>
-</html>
\ No newline at end of file
diff --git a/docs/build/html/objects.inv b/docs/build/html/objects.inv
deleted file mode 100644
index d8003617a63c550d09d36d1084476cb88cd223df..0000000000000000000000000000000000000000
--- a/docs/build/html/objects.inv
+++ /dev/null
@@ -1,6 +0,0 @@
-# Sphinx inventory version 2
-# Project: bayesvalidrox
-# Version: 
-# The remainder of this file is compressed using zlib.
-xÚ…±Â0D÷|…™˜ŠÄÊ0 Ub`vÓFrâªqQó÷´¤­„ÄfÝÝóÙ4¢ï˜ ª½X©¡8eéÔªg¸%î©	Ô)“i(¸`iüä+â™XÕŒ\çÑ|cËÚMâA\‹'P
-Ų³½ŒÇSxðÕI8/öG_—ŠÕX®;LOäæû+“¶öt$ìëvf-3÷ì—ؐy¢¶q#
\ No newline at end of file
diff --git a/docs/build/html/pylink.html b/docs/build/html/pylink.html
deleted file mode 100755
index 87ce4f1822ee2789f229e15b7cbd2991df7f85a8..0000000000000000000000000000000000000000
--- a/docs/build/html/pylink.html
+++ /dev/null
@@ -1,1991 +0,0 @@
-<!doctype html>
-<html lang="en">
-<head>
-<meta charset="utf-8">
-<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
-<meta name="generator" content="pdoc 0.10.0" />
-<title>pylink API documentation</title>
-<meta name="description" content="" />
-<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
-<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
-<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
-<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
-<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
-<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
-<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
-<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
-</head>
-<body>
-<main>
-<article id="content">
-<header>
-<h1 class="title">Module <code>pylink</code></h1>
-</header>
-<section id="section-intro">
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-import os
-import shutil
-import h5py
-import numpy as np
-import time
-import zipfile
-import pandas as pd
-import multiprocessing
-import tqdm
-
-
-class PyLinkForwardModel(object):
-    r&#34;&#34;&#34;
-    A forward model binder
-
-    This calss serves as a code wrapper. This wrapper allows the execution of
-    a third-party software/solver within the scope of BayesValidRox.
-
-    Attributes
-    ----------
-    link_type : str
-        The type of the wrapper. The default is `&#39;pylink&#39;`. This runs the
-        third-party software or an executable using a sell command with given
-        input files.
-        Second option is `&#39;function&#39;` which assumed that model can be run using
-        a function written separately in a Python script.
-    name : str
-        Name of the model.
-    shell_command : str
-        Shell command to be executed for the `&#39;pylink&#39;` wrapper.
-    py_file : str
-        Python file name without `.py` extension to be run for the `&#39;function&#39;`
-        wrapper. Note that the name of the python file and that of the function
-        must be simillar. This function must recieve the parameters in an array
-        of shape `(n_samples, n_params)` and returns a dictionary with the
-        x_values and output arrays for given output names.
-    input_file : str or list
-        The input file to be passed to the `&#39;pylink&#39;` wrapper.
-    input_template : str or list
-        A template input file to be passed to the `&#39;pylink&#39;` wrapper. This file
-        must be a copy of `input_file` with `&lt;Xi&gt;` place holder for the input
-        parameters defined using `inputs` class, with i being the number of
-        parameter. The file name ending should include `.tpl` before the actual
-        extension of the input file, for example, `params.tpl.input`.
-    aux_file : str or list
-        The list of auxiliary files needed for the `&#39;pylink&#39;` wrapper.
-    exe_path : str
-        Execution path if you wish to run the model for the `&#39;pylink&#39;` wrapper
-        in another directory. The default is `None`, which corresponds to the
-        currecnt working directory.
-    output_names : list of str
-        List of the model outputs to be used for the analysis.
-    output_file_names : list of str
-        List of the name of the model output text files for the `&#39;pylink&#39;`
-        wrapper.
-    output_parser : str
-        Name of the model parser file (without `.py` extension) that recieves
-        the `output_file_names` and returns a 2d-array with the first row being
-        the x_values, e.g. x coordinates or time and the rest of raws pass the
-        simulation output for each model output defined in `output_names`. Note
-        that again here the name of the file and that of the function must be
-        the same.
-    multi_process: bool
-        Whether the model runs to be executed in parallel for the `&#39;pylink&#39;`
-        wrapper. The default is `True`.
-    n_cpus: int
-        The number of cpus to be used for the parallel model execution for the
-        `&#39;pylink&#39;` wrapper. The default is `None`, which corresponds to all
-        available cpus.
-    meas_file : str
-        The name of the measurement text-based file. This file must contain
-        x_values as the first column and one column for each model output. The
-        default is `None`. Only needed for the Bayesian Inference.
-    meas_file_valid : str
-        The name of the measurement text-based file for the validation. The
-        default is `None`. Only needed for the validation with Bayesian
-        Inference.
-    mc_ref_file : str
-        The name of the text file for the Monte-Carlo reference (mean and
-        standard deviation) values. It must contain `x_values` as the first
-        column, `mean` as the second column and `std` as the third. It can be
-        used to compare the estimated moments using meta-model in the post-
-        processing step.
-    obs_dict : dict
-        A dictionary containing the measurement text-based file. It must
-        contain `x_values` as the first item and one item for each model output
-        . The default is `{}`. Only needed for the Bayesian Inference.
-    obs_dict_valid : dict
-        A dictionary containing the validation measurement text-based file. It
-        must contain `x_values` as the first item and one item for each model
-        output. The default is `{}`.
-    mc_ref_dict : dict
-        A dictionary containing the Monte-Carlo reference (mean and standard
-        deviation) values. It must contain `x_values` as the first item and
-        `mean` as the second item and `std` as the third. The default is `{}`.
-    &#34;&#34;&#34;
-
-    def __init__(self, link_type=&#39;pylink&#39;, name=None, shell_command=&#39;&#39;,
-                 py_file=None, input_file=None, input_template=None,
-                 aux_file=None, exe_path=&#39;&#39;, output_names=[], output_parser=&#39;&#39;,
-                 output_file_names=[], multi_process=True, n_cpus=None,
-                 meas_file=None, meas_file_valid=None, mc_ref_file=None,
-                 obs_dict={}, obs_dict_valid={}, mc_ref_dict={}):
-        self.link_type = link_type
-        self.name = name
-        self.shell_command = shell_command
-        self.py_file = py_file
-        self.input_file = input_file
-        self.input_template = input_template
-        self.aux_file = aux_file
-        self.exe_path = exe_path
-        self.multi_process = multi_process
-        self.n_cpus = n_cpus
-        self.Output.parser = output_parser
-        self.Output.names = output_names
-        self.Output.file_names = output_file_names
-        self.meas_file = meas_file
-        self.meas_file_valid = meas_file_valid
-        self.mc_ref_file = mc_ref_file
-        self.observations = obs_dict
-        self.observations_valid = obs_dict_valid
-        self.mc_reference = mc_ref_dict
-
-    # Nested class
-    class Output:
-        def __init__(self):
-            self.parser = None
-            self.names = None
-            self.file_names = None
-
-    # -------------------------------------------------------------------------
-    def within_range(self, out, minout, maxout):
-        inside = False
-        if (out &gt; minout).all() and (out &lt; maxout).all():
-            inside = True
-        return inside
-
-    # -------------------------------------------------------------------------
-    def read_observation(self, case=&#39;calib&#39;):
-        &#34;&#34;&#34;
-        Reads/prepare the observation/measurement data for
-        calibration.
-
-        Returns
-        -------
-        DataFrame
-            A dataframe with the calibration data.
-
-        &#34;&#34;&#34;
-        if case.lower() == &#39;calib&#39;:
-            if bool(self.observations):
-                obs = pd.DataFrame.from_dict(self.observations)
-            elif self.meas_file is not None:
-                file_path = os.path.join(os.getcwd(), self.meas_file)
-                obs = pd.read_csv(file_path, delimiter=&#39;,&#39;)
-            else:
-                raise Exception(&#34;Please provide the observation data as a &#34;
-                                &#34;dictionary via observations attribute or pass&#34;
-                                &#34; the csv-file path to MeasurementFile &#34;
-                                &#34;attribute&#34;)
-        elif case.lower() == &#39;valid&#39;:
-            if bool(self.observations_valid):
-                obs = pd.DataFrame.from_dict(self.observations_valid)
-            elif self.meas_file_valid is not None:
-                file_path = os.path.join(os.getcwd(), self.meas_file_valid)
-                obs = pd.read_csv(file_path, delimiter=&#39;,&#39;)
-            else:
-                raise Exception(&#34;Please provide the observation data as a &#34;
-                                &#34;dictionary via Observations attribute or pass&#34;
-                                &#34; the csv-file path to MeasurementFile &#34;
-                                &#34;attribute&#34;)
-
-        # Compute the number of observation
-        n_obs = obs[self.Output.names].notnull().sum().values.sum()
-
-        if case.lower() == &#39;calib&#39;:
-            self.observations = obs
-            self.n_obs = n_obs
-            return self.observations
-        elif case.lower() == &#39;valid&#39;:
-            self.observations_valid = obs
-            self.n_obs_valid = n_obs
-            return self.observations_valid
-
-    # -------------------------------------------------------------------------
-    def read_mc_reference(self):
-        &#34;&#34;&#34;
-        Is used, if a Monte-Carlo reference is available for
-        further in-depth post-processing after meta-model training.
-
-        Returns
-        -------
-        None
-
-        &#34;&#34;&#34;
-        if self.mc_ref_file is None and not hasattr(self, &#39;mc_reference&#39;):
-            return
-        elif isinstance(self.mc_reference, dict) and bool(self.mc_reference):
-            self.mc_reference = pd.DataFrame.from_dict(self.mc_reference)
-        elif self.mc_ref_file is not None:
-            file_path = os.path.join(os.getcwd(), self.mc_ref_file)
-            self.mc_reference = pd.read_csv(file_path, delimiter=&#39;,&#39;)
-        else:
-            raise Exception(&#34;Please provide the MC reference data as a &#34;
-                            &#34;dictionary via mc_reference attribute or pass the&#34;
-                            &#34; csv-file path to mc_ref_file attribute&#34;)
-        return self.mc_reference
-
-    # -------------------------------------------------------------------------
-    def read_output(self):
-        &#34;&#34;&#34;
-        Reads the the parser output file and returns it as an
-         executable function. It is required when the models returns the
-         simulation outputs in csv files.
-
-        Returns
-        -------
-        Output : func
-            Output parser function.
-
-        &#34;&#34;&#34;
-        output_func_name = self.Output.parser
-
-        output_func = getattr(__import__(output_func_name), output_func_name)
-
-        file_names = []
-        for File in self.Output.file_names:
-            file_names.append(os.path.join(self.exe_path, File))
-        try:
-            output = output_func(self.name, file_names)
-        except TypeError:
-            output = output_func(file_names)
-        return output
-
-    # -------------------------------------------------------------------------
-    def update_input_params(self, new_input_file, param_sets):
-        &#34;&#34;&#34;
-        Finds this pattern with &lt;X1&gt; in the new_input_file and replace it with
-         the new value from the array param_sets.
-
-        Parameters
-        ----------
-        new_input_file : TYPE
-            DESCRIPTION.
-        param_sets : TYPE
-            DESCRIPTION.
-
-        Returns
-        -------
-        None.
-
-        &#34;&#34;&#34;
-        NofPa = param_sets.shape[0]
-        text_to_search_list = [f&#39;&lt;X{i+1}&gt;&#39; for i in range(NofPa)]
-
-        for filename in new_input_file:
-            # Read in the file
-            with open(filename, &#39;r&#39;) as file:
-                filedata = file.read()
-
-            # Replace the target string
-            for text_to_search, params in zip(text_to_search_list, param_sets):
-                filedata = filedata.replace(text_to_search, f&#39;{params:0.4e}&#39;)
-
-            # Write the file out again
-            with open(filename, &#39;w&#39;) as file:
-                file.write(filedata)
-
-    # -------------------------------------------------------------------------
-    def run_command(self, command, output_file_names):
-        &#34;&#34;&#34;
-        Runs the execution command given by the user to run the given model.
-        It checks if the output files have been generated. If yes, the jobe is
-         done and it extracts and returns the requested output(s). Otherwise,
-         it executes the command again.
-
-        Parameters
-        ----------
-        command : string
-            The command to be executed.
-        output_file_names : list
-            Name of the output file names.
-
-        Returns
-        -------
-        simulation_outputs : array of shape (n_obs, n_outputs)
-            Simulation outputs.
-
-        &#34;&#34;&#34;
-
-        # Check if simulation is finished
-        while True:
-            time.sleep(3)
-            files = os.listdir(&#34;.&#34;)
-            if all(elem in files for elem in output_file_names):
-                break
-            else:
-                # Run command
-                Process = os.system(f&#39;./../{command}&#39;)
-                if Process != 0:
-                    print(&#39;\nMessage 1:&#39;)
-                    print(f&#39;\tIf value of \&#39;{Process}\&#39; is a non-zero value, &#39;
-                          &#39;then compilation problems \n&#39; % Process)
-
-        os.chdir(&#34;..&#34;)
-
-        # Read the output
-        simulation_outputs = self.read_output()
-
-        return simulation_outputs
-
-    # -------------------------------------------------------------------------
-    def run_forwardmodel(self, xx):
-        &#34;&#34;&#34;
-        This function creates subdirectory for the current run and copies the
-        necessary files to this directory and renames them. Next, it executes
-        the given command.
-        &#34;&#34;&#34;
-        c_points, run_no, key_str = xx
-
-        # Handle if only one imput file is provided
-        if not isinstance(self.input_template, list):
-            self.input_template = [self.input_template]
-        if not isinstance(self.input_file, list):
-            self.input_file = [self.input_file]
-
-        new_input_file = []
-        # Loop over the InputTemplates:
-        for in_temp in self.input_template:
-            if &#39;/&#39; in in_temp:
-                in_temp = in_temp.split(&#39;/&#39;)[-1]
-            new_input_file.append(in_temp.split(&#39;.tpl&#39;)[0] + key_str +
-                                  f&#34;_{run_no+1}&#34; + in_temp.split(&#39;.tpl&#39;)[1])
-
-        # Create directories
-        newpath = self.name + key_str + f&#39;_{run_no+1}&#39;
-        if not os.path.exists(newpath):
-            os.makedirs(newpath)
-
-        # Copy the necessary files to the directories
-        for in_temp in self.input_template:
-            # Input file(s) of the model
-            shutil.copy2(in_temp, newpath)
-        # Auxiliary file
-        if self.aux_file is not None:
-            shutil.copy2(self.aux_file, newpath)  # Auxiliary file
-
-        # Rename the Inputfile and/or auxiliary file
-        os.chdir(newpath)
-        for input_tem, input_file in zip(self.input_template, new_input_file):
-            if &#39;/&#39; in input_tem:
-                input_tem = input_tem.split(&#39;/&#39;)[-1]
-            os.rename(input_tem, input_file)
-
-        # Update the parametrs in Input file
-        self.update_input_params(new_input_file, c_points)
-
-        # Update the user defined command and the execution path
-        try:
-            new_command = self.shell_command.replace(self.input_file[0],
-                                                     new_input_file[0])
-            new_command = new_command.replace(self.input_file[1],
-                                              new_input_file[1])
-        except:
-            new_command = self.shell_command.replace(self.input_file[0],
-                                                     new_input_file[0])
-        # Set the exe path if not provided
-        if not bool(self.exe_path):
-            self.exe_path = os.getcwd()
-
-        # Run the model
-        output = self.run_command(new_command, self.Output.file_names)
-
-        return output
-
-    # -------------------------------------------------------------------------
-    def run_model_parallel(self, c_points, prevRun_No=0, key_str=&#39;&#39;,
-                           mp=True):
-        &#34;&#34;&#34;
-        Runs model simulations. If mp is true (default), then the simulations
-         are started in parallel.
-
-        Parameters
-        ----------
-        c_points : array like of shape (n_samples, n_params)
-            Collocation points (training set).
-        prevRun_No : int, optional
-            Previous run number, in case the sequential design is selected.
-            The default is 0.
-        key_str : string, optional
-            A descriptive string for validation runs. The default is &#39;&#39;.
-        mp : bool, optional
-            Multiprocessing. The default is True.
-
-        Returns
-        -------
-        all_outputs : dict
-            A dictionary with x values (time step or point id) and all outputs.
-            Each key contains an array of the shape (n_samples, n_obs).
-        new_c_points : array
-            Updated collocation points (training set). If a simulation does not
-            executed successfully, the parameter set is removed.
-
-        &#34;&#34;&#34;
-
-        # Create hdf5 metadata
-        hdf5file = f&#39;ExpDesign_{self.name}.hdf5&#39;
-        hdf5_exist = os.path.exists(hdf5file)
-        file = h5py.File(hdf5file, &#39;a&#39;)
-
-        # Initilization
-        n_c_points = len(c_points)
-        self.n_outputs = len(self.Output.names)
-        all_outputs = {}
-
-        # Extract the function
-        if self.link_type.lower() == &#39;function&#39;:
-            # Prepare the function
-            Function = getattr(__import__(self.py_file), self.py_file)
-        # ---------------------------------------------------------------
-        # -------------- Multiprocessing with Pool Class ----------------
-        # ---------------------------------------------------------------
-        # Start a pool with the number of CPUs
-        if self.n_cpus is None:
-            n_cpus = multiprocessing.cpu_count()
-        else:
-            n_cpus = self.n_cpus
-
-        # Run forward model either normal or with multiprocessing
-        if not self.multi_process:
-            group_results = list([self.run_forwardmodel((c_points,
-                                                         prevRun_No,
-                                                         key_str))])
-        else:
-            with multiprocessing.Pool(n_cpus) as p:
-                desc = f&#39;Running forward model {key_str}&#39;
-                if self.link_type.lower() == &#39;function&#39;:
-                    imap_var = p.imap(Function, c_points[:, np.newaxis])
-                else:
-                    args = zip(c_points,
-                               [prevRun_No+i for i in range(n_c_points)],
-                               [key_str]*n_c_points)
-                    imap_var = p.imap(self.run_forwardmodel, args)
-
-                group_results = list(tqdm.tqdm(imap_var, total=n_c_points,
-                                               desc=desc))
-
-        # Save time steps or x-values
-        x_values = group_results[0][0]
-        all_outputs[&#34;x_values&#34;] = x_values
-        if not hdf5_exist:
-            if type(x_values) is dict:
-                grp_x_values = file.create_group(&#34;x_values/&#34;)
-                for varIdx, var in enumerate(self.Output.names):
-                    grp_x_values.create_dataset(var, data=x_values[var])
-            else:
-                file.create_dataset(&#34;x_values&#34;, data=x_values)
-
-        # save each output in their corresponding array
-        NaN_idx = []
-        for varIdx, var in enumerate(self.Output.names):
-
-            if not hdf5_exist:
-                grpY = file.create_group(&#34;EDY/&#34;+var)
-            else:
-                grpY = file.get(&#34;EDY/&#34;+var)
-
-            Outputs = np.asarray([item[varIdx+1] for item in group_results],
-                                 dtype=np.float64)
-
-            if prevRun_No == 0 and key_str == &#39;&#39;:
-                grpY.create_dataset(f&#39;init_{key_str}&#39;, data=Outputs)
-            else:
-                try:
-                    oldEDY = np.array(file[f&#39;EDY/{var}/adaptive_{key_str}&#39;])
-                    del file[f&#39;EDY/{var}/adaptive_{key_str}&#39;]
-                    data = np.vstack((oldEDY, Outputs))
-                except KeyError:
-                    data = Outputs
-                grpY.create_dataset(&#39;adaptive_&#39;+key_str, data=data)
-
-            NaN_idx = np.unique(np.argwhere(np.isnan(Outputs))[:, 0])
-            all_outputs[var] = np.delete(Outputs, NaN_idx, axis=0)
-
-            if prevRun_No == 0 and key_str == &#39;&#39;:
-                grpY.create_dataset(f&#34;New_init_{key_str}&#34;,
-                                    data=all_outputs[var])
-            else:
-                try:
-                    name = f&#39;EDY/{var}/New_adaptive_{key_str}&#39;
-                    oldEDY = np.array(file[name])
-                    del file[f&#39;EDY/{var}/New_adaptive_{key_str}&#39;]
-                    data = np.vstack((oldEDY, all_outputs[var]))
-                except KeyError:
-                    data = all_outputs[var]
-                grpY.create_dataset(f&#39;New_adaptive_{key_str}&#39;, data=data)
-
-        # Print the collocation points whose simulations crashed
-        if len(NaN_idx) != 0:
-            print(&#39;\n&#39;)
-            print(&#39;*&#39;*20)
-            print(&#34;\nThe following parametersets have been removed:\n&#34;,
-                  c_points[NaN_idx])
-            print(&#34;\n&#34;)
-            print(&#39;*&#39;*20)
-
-        # Pass it to the attribute
-        new_c_points = np.delete(c_points, NaN_idx, axis=0)
-        self.OutputMatrix = all_outputs
-
-        # Save CollocationPoints
-        grpX = file.create_group(&#34;EDX&#34;) if not hdf5_exist else file.get(&#34;EDX&#34;)
-        if prevRun_No == 0 and key_str == &#39;&#39;:
-            grpX.create_dataset(&#34;init_&#34;+key_str, data=c_points)
-            if len(NaN_idx) != 0:
-                grpX.create_dataset(&#34;New_init_&#34;+key_str, data=new_c_points)
-
-        else:
-            try:
-                name = f&#39;EDX/adaptive_{key_str}&#39;
-                oldCollocationPoints = np.array(file[name])
-                del file[f&#39;EDX/adaptive_{key_str}&#39;]
-                data = np.vstack((oldCollocationPoints, new_c_points))
-            except KeyError:
-                data = new_c_points
-            grpX.create_dataset(&#39;adaptive_&#39;+key_str, data=data)
-
-            if len(NaN_idx) != 0:
-                try:
-                    name = f&#39;EDX/New_adaptive_{key_str}&#39;
-                    oldCollocationPoints = np.array(file[name])
-                    del file[f&#39;EDX/New_adaptive_{key_str}&#39;]
-                    data = np.vstack((oldCollocationPoints, new_c_points))
-                except KeyError:
-                    data = new_c_points
-                grpX.create_dataset(&#39;New_adaptive_&#39;+key_str, data=data)
-
-        # Close h5py file
-        file.close()
-
-        return all_outputs, new_c_points
-
-    # -------------------------------------------------------------------------
-    def zip_subdirs(self, dir_name, key):
-        &#34;&#34;&#34;
-        Zips all the files containing the key(word).
-
-        Parameters
-        ----------
-        dir_name : string
-            Directory name.
-        key : string
-            Keyword to search for.
-
-        Returns
-        -------
-        None.
-
-        &#34;&#34;&#34;
-        # setup file paths variable
-        dir_list = []
-        file_paths = []
-
-        # Read all directory, subdirectories and file lists
-        dir_path = os.getcwd()
-
-        for root, directories, files in os.walk(dir_path):
-            for directory in directories:
-                # Create the full filepath by using os module.
-                if key in directory:
-                    folderPath = os.path.join(dir_path, directory)
-                    dir_list.append(folderPath)
-
-        # Loop over the identified directories to store the file paths
-        for direct_name in dir_list:
-            for root, directories, files in os.walk(direct_name):
-                for filename in files:
-                    # Create the full filepath by using os module.
-                    filePath = os.path.join(root, filename)
-                    file_paths.append(&#39;.&#39;+filePath.split(dir_path)[1])
-
-        # writing files to a zipfile
-        if len(file_paths) != 0:
-            zip_file = zipfile.ZipFile(dir_name+&#39;.zip&#39;, &#39;w&#39;)
-            with zip_file:
-                # writing each file one by one
-                for file in file_paths:
-                    zip_file.write(file)
-
-            file_paths = [path for path in os.listdir(&#39;.&#39;) if key in path]
-
-            for path in file_paths:
-                shutil.rmtree(path)
-
-            print(&#34;\n&#34;)
-            print(f&#39;{dir_name}.zip file has been created successfully!\n&#39;)
-
-        return</code></pre>
-</details>
-</section>
-<section>
-</section>
-<section>
-</section>
-<section>
-</section>
-<section>
-<h2 class="section-title" id="header-classes">Classes</h2>
-<dl>
-<dt id="pylink.PyLinkForwardModel"><code class="flex name class">
-<span>class <span class="ident">PyLinkForwardModel</span></span>
-<span>(</span><span>link_type='pylink', name=None, shell_command='', py_file=None, input_file=None, input_template=None, aux_file=None, exe_path='', output_names=[], output_parser='', output_file_names=[], multi_process=True, n_cpus=None, meas_file=None, meas_file_valid=None, mc_ref_file=None, obs_dict={}, obs_dict_valid={}, mc_ref_dict={})</span>
-</code></dt>
-<dd>
-<div class="desc"><p>A forward model binder</p>
-<p>This calss serves as a code wrapper. This wrapper allows the execution of
-a third-party software/solver within the scope of BayesValidRox.</p>
-<h2 id="attributes">Attributes</h2>
-<dl>
-<dt><strong><code>link_type</code></strong> :&ensp;<code>str</code></dt>
-<dd>The type of the wrapper. The default is <code>'pylink'</code>. This runs the
-third-party software or an executable using a sell command with given
-input files.
-Second option is <code>'function'</code> which assumed that model can be run using
-a function written separately in a Python script.</dd>
-<dt><strong><code>name</code></strong> :&ensp;<code>str</code></dt>
-<dd>Name of the model.</dd>
-<dt><strong><code>shell_command</code></strong> :&ensp;<code>str</code></dt>
-<dd>Shell command to be executed for the <code>'pylink'</code> wrapper.</dd>
-<dt><strong><code>py_file</code></strong> :&ensp;<code>str</code></dt>
-<dd>Python file name without <code>.py</code> extension to be run for the <code>'function'</code>
-wrapper. Note that the name of the python file and that of the function
-must be simillar. This function must recieve the parameters in an array
-of shape <code>(n_samples, n_params)</code> and returns a dictionary with the
-x_values and output arrays for given output names.</dd>
-<dt><strong><code>input_file</code></strong> :&ensp;<code>str</code> or <code>list</code></dt>
-<dd>The input file to be passed to the <code>'pylink'</code> wrapper.</dd>
-<dt><strong><code>input_template</code></strong> :&ensp;<code>str</code> or <code>list</code></dt>
-<dd>A template input file to be passed to the <code>'pylink'</code> wrapper. This file
-must be a copy of <code>input_file</code> with <code>&lt;Xi&gt;</code> place holder for the input
-parameters defined using <code>inputs</code> class, with i being the number of
-parameter. The file name ending should include <code>.tpl</code> before the actual
-extension of the input file, for example, <code>params.tpl.input</code>.</dd>
-<dt><strong><code>aux_file</code></strong> :&ensp;<code>str</code> or <code>list</code></dt>
-<dd>The list of auxiliary files needed for the <code>'pylink'</code> wrapper.</dd>
-<dt><strong><code>exe_path</code></strong> :&ensp;<code>str</code></dt>
-<dd>Execution path if you wish to run the model for the <code>'pylink'</code> wrapper
-in another directory. The default is <code>None</code>, which corresponds to the
-currecnt working directory.</dd>
-<dt><strong><code>output_names</code></strong> :&ensp;<code>list</code> of <code>str</code></dt>
-<dd>List of the model outputs to be used for the analysis.</dd>
-<dt><strong><code>output_file_names</code></strong> :&ensp;<code>list</code> of <code>str</code></dt>
-<dd>List of the name of the model output text files for the <code>'pylink'</code>
-wrapper.</dd>
-<dt><strong><code>output_parser</code></strong> :&ensp;<code>str</code></dt>
-<dd>Name of the model parser file (without <code>.py</code> extension) that recieves
-the <code>output_file_names</code> and returns a 2d-array with the first row being
-the x_values, e.g. x coordinates or time and the rest of raws pass the
-simulation output for each model output defined in <code>output_names</code>. Note
-that again here the name of the file and that of the function must be
-the same.</dd>
-<dt><strong><code>multi_process</code></strong> :&ensp;<code>bool</code></dt>
-<dd>Whether the model runs to be executed in parallel for the <code>'pylink'</code>
-wrapper. The default is <code>True</code>.</dd>
-<dt><strong><code>n_cpus</code></strong> :&ensp;<code>int</code></dt>
-<dd>The number of cpus to be used for the parallel model execution for the
-<code>'pylink'</code> wrapper. The default is <code>None</code>, which corresponds to all
-available cpus.</dd>
-<dt><strong><code>meas_file</code></strong> :&ensp;<code>str</code></dt>
-<dd>The name of the measurement text-based file. This file must contain
-x_values as the first column and one column for each model output. The
-default is <code>None</code>. Only needed for the Bayesian Inference.</dd>
-<dt><strong><code>meas_file_valid</code></strong> :&ensp;<code>str</code></dt>
-<dd>The name of the measurement text-based file for the validation. The
-default is <code>None</code>. Only needed for the validation with Bayesian
-Inference.</dd>
-<dt><strong><code>mc_ref_file</code></strong> :&ensp;<code>str</code></dt>
-<dd>The name of the text file for the Monte-Carlo reference (mean and
-standard deviation) values. It must contain <code>x_values</code> as the first
-column, <code>mean</code> as the second column and <code>std</code> as the third. It can be
-used to compare the estimated moments using meta-model in the post-
-processing step.</dd>
-<dt><strong><code>obs_dict</code></strong> :&ensp;<code>dict</code></dt>
-<dd>A dictionary containing the measurement text-based file. It must
-contain <code>x_values</code> as the first item and one item for each model output
-. The default is <code>{}</code>. Only needed for the Bayesian Inference.</dd>
-<dt><strong><code>obs_dict_valid</code></strong> :&ensp;<code>dict</code></dt>
-<dd>A dictionary containing the validation measurement text-based file. It
-must contain <code>x_values</code> as the first item and one item for each model
-output. The default is <code>{}</code>.</dd>
-<dt><strong><code>mc_ref_dict</code></strong> :&ensp;<code>dict</code></dt>
-<dd>A dictionary containing the Monte-Carlo reference (mean and standard
-deviation) values. It must contain <code>x_values</code> as the first item and
-<code>mean</code> as the second item and <code>std</code> as the third. The default is <code>{}</code>.</dd>
-</dl></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">class PyLinkForwardModel(object):
-    r&#34;&#34;&#34;
-    A forward model binder
-
-    This calss serves as a code wrapper. This wrapper allows the execution of
-    a third-party software/solver within the scope of BayesValidRox.
-
-    Attributes
-    ----------
-    link_type : str
-        The type of the wrapper. The default is `&#39;pylink&#39;`. This runs the
-        third-party software or an executable using a sell command with given
-        input files.
-        Second option is `&#39;function&#39;` which assumed that model can be run using
-        a function written separately in a Python script.
-    name : str
-        Name of the model.
-    shell_command : str
-        Shell command to be executed for the `&#39;pylink&#39;` wrapper.
-    py_file : str
-        Python file name without `.py` extension to be run for the `&#39;function&#39;`
-        wrapper. Note that the name of the python file and that of the function
-        must be simillar. This function must recieve the parameters in an array
-        of shape `(n_samples, n_params)` and returns a dictionary with the
-        x_values and output arrays for given output names.
-    input_file : str or list
-        The input file to be passed to the `&#39;pylink&#39;` wrapper.
-    input_template : str or list
-        A template input file to be passed to the `&#39;pylink&#39;` wrapper. This file
-        must be a copy of `input_file` with `&lt;Xi&gt;` place holder for the input
-        parameters defined using `inputs` class, with i being the number of
-        parameter. The file name ending should include `.tpl` before the actual
-        extension of the input file, for example, `params.tpl.input`.
-    aux_file : str or list
-        The list of auxiliary files needed for the `&#39;pylink&#39;` wrapper.
-    exe_path : str
-        Execution path if you wish to run the model for the `&#39;pylink&#39;` wrapper
-        in another directory. The default is `None`, which corresponds to the
-        currecnt working directory.
-    output_names : list of str
-        List of the model outputs to be used for the analysis.
-    output_file_names : list of str
-        List of the name of the model output text files for the `&#39;pylink&#39;`
-        wrapper.
-    output_parser : str
-        Name of the model parser file (without `.py` extension) that recieves
-        the `output_file_names` and returns a 2d-array with the first row being
-        the x_values, e.g. x coordinates or time and the rest of raws pass the
-        simulation output for each model output defined in `output_names`. Note
-        that again here the name of the file and that of the function must be
-        the same.
-    multi_process: bool
-        Whether the model runs to be executed in parallel for the `&#39;pylink&#39;`
-        wrapper. The default is `True`.
-    n_cpus: int
-        The number of cpus to be used for the parallel model execution for the
-        `&#39;pylink&#39;` wrapper. The default is `None`, which corresponds to all
-        available cpus.
-    meas_file : str
-        The name of the measurement text-based file. This file must contain
-        x_values as the first column and one column for each model output. The
-        default is `None`. Only needed for the Bayesian Inference.
-    meas_file_valid : str
-        The name of the measurement text-based file for the validation. The
-        default is `None`. Only needed for the validation with Bayesian
-        Inference.
-    mc_ref_file : str
-        The name of the text file for the Monte-Carlo reference (mean and
-        standard deviation) values. It must contain `x_values` as the first
-        column, `mean` as the second column and `std` as the third. It can be
-        used to compare the estimated moments using meta-model in the post-
-        processing step.
-    obs_dict : dict
-        A dictionary containing the measurement text-based file. It must
-        contain `x_values` as the first item and one item for each model output
-        . The default is `{}`. Only needed for the Bayesian Inference.
-    obs_dict_valid : dict
-        A dictionary containing the validation measurement text-based file. It
-        must contain `x_values` as the first item and one item for each model
-        output. The default is `{}`.
-    mc_ref_dict : dict
-        A dictionary containing the Monte-Carlo reference (mean and standard
-        deviation) values. It must contain `x_values` as the first item and
-        `mean` as the second item and `std` as the third. The default is `{}`.
-    &#34;&#34;&#34;
-
-    def __init__(self, link_type=&#39;pylink&#39;, name=None, shell_command=&#39;&#39;,
-                 py_file=None, input_file=None, input_template=None,
-                 aux_file=None, exe_path=&#39;&#39;, output_names=[], output_parser=&#39;&#39;,
-                 output_file_names=[], multi_process=True, n_cpus=None,
-                 meas_file=None, meas_file_valid=None, mc_ref_file=None,
-                 obs_dict={}, obs_dict_valid={}, mc_ref_dict={}):
-        self.link_type = link_type
-        self.name = name
-        self.shell_command = shell_command
-        self.py_file = py_file
-        self.input_file = input_file
-        self.input_template = input_template
-        self.aux_file = aux_file
-        self.exe_path = exe_path
-        self.multi_process = multi_process
-        self.n_cpus = n_cpus
-        self.Output.parser = output_parser
-        self.Output.names = output_names
-        self.Output.file_names = output_file_names
-        self.meas_file = meas_file
-        self.meas_file_valid = meas_file_valid
-        self.mc_ref_file = mc_ref_file
-        self.observations = obs_dict
-        self.observations_valid = obs_dict_valid
-        self.mc_reference = mc_ref_dict
-
-    # Nested class
-    class Output:
-        def __init__(self):
-            self.parser = None
-            self.names = None
-            self.file_names = None
-
-    # -------------------------------------------------------------------------
-    def within_range(self, out, minout, maxout):
-        inside = False
-        if (out &gt; minout).all() and (out &lt; maxout).all():
-            inside = True
-        return inside
-
-    # -------------------------------------------------------------------------
-    def read_observation(self, case=&#39;calib&#39;):
-        &#34;&#34;&#34;
-        Reads/prepare the observation/measurement data for
-        calibration.
-
-        Returns
-        -------
-        DataFrame
-            A dataframe with the calibration data.
-
-        &#34;&#34;&#34;
-        if case.lower() == &#39;calib&#39;:
-            if bool(self.observations):
-                obs = pd.DataFrame.from_dict(self.observations)
-            elif self.meas_file is not None:
-                file_path = os.path.join(os.getcwd(), self.meas_file)
-                obs = pd.read_csv(file_path, delimiter=&#39;,&#39;)
-            else:
-                raise Exception(&#34;Please provide the observation data as a &#34;
-                                &#34;dictionary via observations attribute or pass&#34;
-                                &#34; the csv-file path to MeasurementFile &#34;
-                                &#34;attribute&#34;)
-        elif case.lower() == &#39;valid&#39;:
-            if bool(self.observations_valid):
-                obs = pd.DataFrame.from_dict(self.observations_valid)
-            elif self.meas_file_valid is not None:
-                file_path = os.path.join(os.getcwd(), self.meas_file_valid)
-                obs = pd.read_csv(file_path, delimiter=&#39;,&#39;)
-            else:
-                raise Exception(&#34;Please provide the observation data as a &#34;
-                                &#34;dictionary via Observations attribute or pass&#34;
-                                &#34; the csv-file path to MeasurementFile &#34;
-                                &#34;attribute&#34;)
-
-        # Compute the number of observation
-        n_obs = obs[self.Output.names].notnull().sum().values.sum()
-
-        if case.lower() == &#39;calib&#39;:
-            self.observations = obs
-            self.n_obs = n_obs
-            return self.observations
-        elif case.lower() == &#39;valid&#39;:
-            self.observations_valid = obs
-            self.n_obs_valid = n_obs
-            return self.observations_valid
-
-    # -------------------------------------------------------------------------
-    def read_mc_reference(self):
-        &#34;&#34;&#34;
-        Is used, if a Monte-Carlo reference is available for
-        further in-depth post-processing after meta-model training.
-
-        Returns
-        -------
-        None
-
-        &#34;&#34;&#34;
-        if self.mc_ref_file is None and not hasattr(self, &#39;mc_reference&#39;):
-            return
-        elif isinstance(self.mc_reference, dict) and bool(self.mc_reference):
-            self.mc_reference = pd.DataFrame.from_dict(self.mc_reference)
-        elif self.mc_ref_file is not None:
-            file_path = os.path.join(os.getcwd(), self.mc_ref_file)
-            self.mc_reference = pd.read_csv(file_path, delimiter=&#39;,&#39;)
-        else:
-            raise Exception(&#34;Please provide the MC reference data as a &#34;
-                            &#34;dictionary via mc_reference attribute or pass the&#34;
-                            &#34; csv-file path to mc_ref_file attribute&#34;)
-        return self.mc_reference
-
-    # -------------------------------------------------------------------------
-    def read_output(self):
-        &#34;&#34;&#34;
-        Reads the the parser output file and returns it as an
-         executable function. It is required when the models returns the
-         simulation outputs in csv files.
-
-        Returns
-        -------
-        Output : func
-            Output parser function.
-
-        &#34;&#34;&#34;
-        output_func_name = self.Output.parser
-
-        output_func = getattr(__import__(output_func_name), output_func_name)
-
-        file_names = []
-        for File in self.Output.file_names:
-            file_names.append(os.path.join(self.exe_path, File))
-        try:
-            output = output_func(self.name, file_names)
-        except TypeError:
-            output = output_func(file_names)
-        return output
-
-    # -------------------------------------------------------------------------
-    def update_input_params(self, new_input_file, param_sets):
-        &#34;&#34;&#34;
-        Finds this pattern with &lt;X1&gt; in the new_input_file and replace it with
-         the new value from the array param_sets.
-
-        Parameters
-        ----------
-        new_input_file : TYPE
-            DESCRIPTION.
-        param_sets : TYPE
-            DESCRIPTION.
-
-        Returns
-        -------
-        None.
-
-        &#34;&#34;&#34;
-        NofPa = param_sets.shape[0]
-        text_to_search_list = [f&#39;&lt;X{i+1}&gt;&#39; for i in range(NofPa)]
-
-        for filename in new_input_file:
-            # Read in the file
-            with open(filename, &#39;r&#39;) as file:
-                filedata = file.read()
-
-            # Replace the target string
-            for text_to_search, params in zip(text_to_search_list, param_sets):
-                filedata = filedata.replace(text_to_search, f&#39;{params:0.4e}&#39;)
-
-            # Write the file out again
-            with open(filename, &#39;w&#39;) as file:
-                file.write(filedata)
-
-    # -------------------------------------------------------------------------
-    def run_command(self, command, output_file_names):
-        &#34;&#34;&#34;
-        Runs the execution command given by the user to run the given model.
-        It checks if the output files have been generated. If yes, the jobe is
-         done and it extracts and returns the requested output(s). Otherwise,
-         it executes the command again.
-
-        Parameters
-        ----------
-        command : string
-            The command to be executed.
-        output_file_names : list
-            Name of the output file names.
-
-        Returns
-        -------
-        simulation_outputs : array of shape (n_obs, n_outputs)
-            Simulation outputs.
-
-        &#34;&#34;&#34;
-
-        # Check if simulation is finished
-        while True:
-            time.sleep(3)
-            files = os.listdir(&#34;.&#34;)
-            if all(elem in files for elem in output_file_names):
-                break
-            else:
-                # Run command
-                Process = os.system(f&#39;./../{command}&#39;)
-                if Process != 0:
-                    print(&#39;\nMessage 1:&#39;)
-                    print(f&#39;\tIf value of \&#39;{Process}\&#39; is a non-zero value, &#39;
-                          &#39;then compilation problems \n&#39; % Process)
-
-        os.chdir(&#34;..&#34;)
-
-        # Read the output
-        simulation_outputs = self.read_output()
-
-        return simulation_outputs
-
-    # -------------------------------------------------------------------------
-    def run_forwardmodel(self, xx):
-        &#34;&#34;&#34;
-        This function creates subdirectory for the current run and copies the
-        necessary files to this directory and renames them. Next, it executes
-        the given command.
-        &#34;&#34;&#34;
-        c_points, run_no, key_str = xx
-
-        # Handle if only one imput file is provided
-        if not isinstance(self.input_template, list):
-            self.input_template = [self.input_template]
-        if not isinstance(self.input_file, list):
-            self.input_file = [self.input_file]
-
-        new_input_file = []
-        # Loop over the InputTemplates:
-        for in_temp in self.input_template:
-            if &#39;/&#39; in in_temp:
-                in_temp = in_temp.split(&#39;/&#39;)[-1]
-            new_input_file.append(in_temp.split(&#39;.tpl&#39;)[0] + key_str +
-                                  f&#34;_{run_no+1}&#34; + in_temp.split(&#39;.tpl&#39;)[1])
-
-        # Create directories
-        newpath = self.name + key_str + f&#39;_{run_no+1}&#39;
-        if not os.path.exists(newpath):
-            os.makedirs(newpath)
-
-        # Copy the necessary files to the directories
-        for in_temp in self.input_template:
-            # Input file(s) of the model
-            shutil.copy2(in_temp, newpath)
-        # Auxiliary file
-        if self.aux_file is not None:
-            shutil.copy2(self.aux_file, newpath)  # Auxiliary file
-
-        # Rename the Inputfile and/or auxiliary file
-        os.chdir(newpath)
-        for input_tem, input_file in zip(self.input_template, new_input_file):
-            if &#39;/&#39; in input_tem:
-                input_tem = input_tem.split(&#39;/&#39;)[-1]
-            os.rename(input_tem, input_file)
-
-        # Update the parametrs in Input file
-        self.update_input_params(new_input_file, c_points)
-
-        # Update the user defined command and the execution path
-        try:
-            new_command = self.shell_command.replace(self.input_file[0],
-                                                     new_input_file[0])
-            new_command = new_command.replace(self.input_file[1],
-                                              new_input_file[1])
-        except:
-            new_command = self.shell_command.replace(self.input_file[0],
-                                                     new_input_file[0])
-        # Set the exe path if not provided
-        if not bool(self.exe_path):
-            self.exe_path = os.getcwd()
-
-        # Run the model
-        output = self.run_command(new_command, self.Output.file_names)
-
-        return output
-
-    # -------------------------------------------------------------------------
-    def run_model_parallel(self, c_points, prevRun_No=0, key_str=&#39;&#39;,
-                           mp=True):
-        &#34;&#34;&#34;
-        Runs model simulations. If mp is true (default), then the simulations
-         are started in parallel.
-
-        Parameters
-        ----------
-        c_points : array like of shape (n_samples, n_params)
-            Collocation points (training set).
-        prevRun_No : int, optional
-            Previous run number, in case the sequential design is selected.
-            The default is 0.
-        key_str : string, optional
-            A descriptive string for validation runs. The default is &#39;&#39;.
-        mp : bool, optional
-            Multiprocessing. The default is True.
-
-        Returns
-        -------
-        all_outputs : dict
-            A dictionary with x values (time step or point id) and all outputs.
-            Each key contains an array of the shape (n_samples, n_obs).
-        new_c_points : array
-            Updated collocation points (training set). If a simulation does not
-            executed successfully, the parameter set is removed.
-
-        &#34;&#34;&#34;
-
-        # Create hdf5 metadata
-        hdf5file = f&#39;ExpDesign_{self.name}.hdf5&#39;
-        hdf5_exist = os.path.exists(hdf5file)
-        file = h5py.File(hdf5file, &#39;a&#39;)
-
-        # Initilization
-        n_c_points = len(c_points)
-        self.n_outputs = len(self.Output.names)
-        all_outputs = {}
-
-        # Extract the function
-        if self.link_type.lower() == &#39;function&#39;:
-            # Prepare the function
-            Function = getattr(__import__(self.py_file), self.py_file)
-        # ---------------------------------------------------------------
-        # -------------- Multiprocessing with Pool Class ----------------
-        # ---------------------------------------------------------------
-        # Start a pool with the number of CPUs
-        if self.n_cpus is None:
-            n_cpus = multiprocessing.cpu_count()
-        else:
-            n_cpus = self.n_cpus
-
-        # Run forward model either normal or with multiprocessing
-        if not self.multi_process:
-            group_results = list([self.run_forwardmodel((c_points,
-                                                         prevRun_No,
-                                                         key_str))])
-        else:
-            with multiprocessing.Pool(n_cpus) as p:
-                desc = f&#39;Running forward model {key_str}&#39;
-                if self.link_type.lower() == &#39;function&#39;:
-                    imap_var = p.imap(Function, c_points[:, np.newaxis])
-                else:
-                    args = zip(c_points,
-                               [prevRun_No+i for i in range(n_c_points)],
-                               [key_str]*n_c_points)
-                    imap_var = p.imap(self.run_forwardmodel, args)
-
-                group_results = list(tqdm.tqdm(imap_var, total=n_c_points,
-                                               desc=desc))
-
-        # Save time steps or x-values
-        x_values = group_results[0][0]
-        all_outputs[&#34;x_values&#34;] = x_values
-        if not hdf5_exist:
-            if type(x_values) is dict:
-                grp_x_values = file.create_group(&#34;x_values/&#34;)
-                for varIdx, var in enumerate(self.Output.names):
-                    grp_x_values.create_dataset(var, data=x_values[var])
-            else:
-                file.create_dataset(&#34;x_values&#34;, data=x_values)
-
-        # save each output in their corresponding array
-        NaN_idx = []
-        for varIdx, var in enumerate(self.Output.names):
-
-            if not hdf5_exist:
-                grpY = file.create_group(&#34;EDY/&#34;+var)
-            else:
-                grpY = file.get(&#34;EDY/&#34;+var)
-
-            Outputs = np.asarray([item[varIdx+1] for item in group_results],
-                                 dtype=np.float64)
-
-            if prevRun_No == 0 and key_str == &#39;&#39;:
-                grpY.create_dataset(f&#39;init_{key_str}&#39;, data=Outputs)
-            else:
-                try:
-                    oldEDY = np.array(file[f&#39;EDY/{var}/adaptive_{key_str}&#39;])
-                    del file[f&#39;EDY/{var}/adaptive_{key_str}&#39;]
-                    data = np.vstack((oldEDY, Outputs))
-                except KeyError:
-                    data = Outputs
-                grpY.create_dataset(&#39;adaptive_&#39;+key_str, data=data)
-
-            NaN_idx = np.unique(np.argwhere(np.isnan(Outputs))[:, 0])
-            all_outputs[var] = np.delete(Outputs, NaN_idx, axis=0)
-
-            if prevRun_No == 0 and key_str == &#39;&#39;:
-                grpY.create_dataset(f&#34;New_init_{key_str}&#34;,
-                                    data=all_outputs[var])
-            else:
-                try:
-                    name = f&#39;EDY/{var}/New_adaptive_{key_str}&#39;
-                    oldEDY = np.array(file[name])
-                    del file[f&#39;EDY/{var}/New_adaptive_{key_str}&#39;]
-                    data = np.vstack((oldEDY, all_outputs[var]))
-                except KeyError:
-                    data = all_outputs[var]
-                grpY.create_dataset(f&#39;New_adaptive_{key_str}&#39;, data=data)
-
-        # Print the collocation points whose simulations crashed
-        if len(NaN_idx) != 0:
-            print(&#39;\n&#39;)
-            print(&#39;*&#39;*20)
-            print(&#34;\nThe following parametersets have been removed:\n&#34;,
-                  c_points[NaN_idx])
-            print(&#34;\n&#34;)
-            print(&#39;*&#39;*20)
-
-        # Pass it to the attribute
-        new_c_points = np.delete(c_points, NaN_idx, axis=0)
-        self.OutputMatrix = all_outputs
-
-        # Save CollocationPoints
-        grpX = file.create_group(&#34;EDX&#34;) if not hdf5_exist else file.get(&#34;EDX&#34;)
-        if prevRun_No == 0 and key_str == &#39;&#39;:
-            grpX.create_dataset(&#34;init_&#34;+key_str, data=c_points)
-            if len(NaN_idx) != 0:
-                grpX.create_dataset(&#34;New_init_&#34;+key_str, data=new_c_points)
-
-        else:
-            try:
-                name = f&#39;EDX/adaptive_{key_str}&#39;
-                oldCollocationPoints = np.array(file[name])
-                del file[f&#39;EDX/adaptive_{key_str}&#39;]
-                data = np.vstack((oldCollocationPoints, new_c_points))
-            except KeyError:
-                data = new_c_points
-            grpX.create_dataset(&#39;adaptive_&#39;+key_str, data=data)
-
-            if len(NaN_idx) != 0:
-                try:
-                    name = f&#39;EDX/New_adaptive_{key_str}&#39;
-                    oldCollocationPoints = np.array(file[name])
-                    del file[f&#39;EDX/New_adaptive_{key_str}&#39;]
-                    data = np.vstack((oldCollocationPoints, new_c_points))
-                except KeyError:
-                    data = new_c_points
-                grpX.create_dataset(&#39;New_adaptive_&#39;+key_str, data=data)
-
-        # Close h5py file
-        file.close()
-
-        return all_outputs, new_c_points
-
-    # -------------------------------------------------------------------------
-    def zip_subdirs(self, dir_name, key):
-        &#34;&#34;&#34;
-        Zips all the files containing the key(word).
-
-        Parameters
-        ----------
-        dir_name : string
-            Directory name.
-        key : string
-            Keyword to search for.
-
-        Returns
-        -------
-        None.
-
-        &#34;&#34;&#34;
-        # setup file paths variable
-        dir_list = []
-        file_paths = []
-
-        # Read all directory, subdirectories and file lists
-        dir_path = os.getcwd()
-
-        for root, directories, files in os.walk(dir_path):
-            for directory in directories:
-                # Create the full filepath by using os module.
-                if key in directory:
-                    folderPath = os.path.join(dir_path, directory)
-                    dir_list.append(folderPath)
-
-        # Loop over the identified directories to store the file paths
-        for direct_name in dir_list:
-            for root, directories, files in os.walk(direct_name):
-                for filename in files:
-                    # Create the full filepath by using os module.
-                    filePath = os.path.join(root, filename)
-                    file_paths.append(&#39;.&#39;+filePath.split(dir_path)[1])
-
-        # writing files to a zipfile
-        if len(file_paths) != 0:
-            zip_file = zipfile.ZipFile(dir_name+&#39;.zip&#39;, &#39;w&#39;)
-            with zip_file:
-                # writing each file one by one
-                for file in file_paths:
-                    zip_file.write(file)
-
-            file_paths = [path for path in os.listdir(&#39;.&#39;) if key in path]
-
-            for path in file_paths:
-                shutil.rmtree(path)
-
-            print(&#34;\n&#34;)
-            print(f&#39;{dir_name}.zip file has been created successfully!\n&#39;)
-
-        return</code></pre>
-</details>
-<h3>Class variables</h3>
-<dl>
-<dt id="pylink.PyLinkForwardModel.Output"><code class="name">var <span class="ident">Output</span></code></dt>
-<dd>
-<div class="desc"></div>
-</dd>
-</dl>
-<h3>Methods</h3>
-<dl>
-<dt id="pylink.PyLinkForwardModel.read_mc_reference"><code class="name flex">
-<span>def <span class="ident">read_mc_reference</span></span>(<span>self)</span>
-</code></dt>
-<dd>
-<div class="desc"><p>Is used, if a Monte-Carlo reference is available for
-further in-depth post-processing after meta-model training.</p>
-<h2 id="returns">Returns</h2>
-<dl>
-<dt><code>None</code></dt>
-<dd>&nbsp;</dd>
-</dl></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def read_mc_reference(self):
-    &#34;&#34;&#34;
-    Is used, if a Monte-Carlo reference is available for
-    further in-depth post-processing after meta-model training.
-
-    Returns
-    -------
-    None
-
-    &#34;&#34;&#34;
-    if self.mc_ref_file is None and not hasattr(self, &#39;mc_reference&#39;):
-        return
-    elif isinstance(self.mc_reference, dict) and bool(self.mc_reference):
-        self.mc_reference = pd.DataFrame.from_dict(self.mc_reference)
-    elif self.mc_ref_file is not None:
-        file_path = os.path.join(os.getcwd(), self.mc_ref_file)
-        self.mc_reference = pd.read_csv(file_path, delimiter=&#39;,&#39;)
-    else:
-        raise Exception(&#34;Please provide the MC reference data as a &#34;
-                        &#34;dictionary via mc_reference attribute or pass the&#34;
-                        &#34; csv-file path to mc_ref_file attribute&#34;)
-    return self.mc_reference</code></pre>
-</details>
-</dd>
-<dt id="pylink.PyLinkForwardModel.read_observation"><code class="name flex">
-<span>def <span class="ident">read_observation</span></span>(<span>self, case='calib')</span>
-</code></dt>
-<dd>
-<div class="desc"><p>Reads/prepare the observation/measurement data for
-calibration.</p>
-<h2 id="returns">Returns</h2>
-<dl>
-<dt><code>DataFrame</code></dt>
-<dd>A dataframe with the calibration data.</dd>
-</dl></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def read_observation(self, case=&#39;calib&#39;):
-    &#34;&#34;&#34;
-    Reads/prepare the observation/measurement data for
-    calibration.
-
-    Returns
-    -------
-    DataFrame
-        A dataframe with the calibration data.
-
-    &#34;&#34;&#34;
-    if case.lower() == &#39;calib&#39;:
-        if bool(self.observations):
-            obs = pd.DataFrame.from_dict(self.observations)
-        elif self.meas_file is not None:
-            file_path = os.path.join(os.getcwd(), self.meas_file)
-            obs = pd.read_csv(file_path, delimiter=&#39;,&#39;)
-        else:
-            raise Exception(&#34;Please provide the observation data as a &#34;
-                            &#34;dictionary via observations attribute or pass&#34;
-                            &#34; the csv-file path to MeasurementFile &#34;
-                            &#34;attribute&#34;)
-    elif case.lower() == &#39;valid&#39;:
-        if bool(self.observations_valid):
-            obs = pd.DataFrame.from_dict(self.observations_valid)
-        elif self.meas_file_valid is not None:
-            file_path = os.path.join(os.getcwd(), self.meas_file_valid)
-            obs = pd.read_csv(file_path, delimiter=&#39;,&#39;)
-        else:
-            raise Exception(&#34;Please provide the observation data as a &#34;
-                            &#34;dictionary via Observations attribute or pass&#34;
-                            &#34; the csv-file path to MeasurementFile &#34;
-                            &#34;attribute&#34;)
-
-    # Compute the number of observation
-    n_obs = obs[self.Output.names].notnull().sum().values.sum()
-
-    if case.lower() == &#39;calib&#39;:
-        self.observations = obs
-        self.n_obs = n_obs
-        return self.observations
-    elif case.lower() == &#39;valid&#39;:
-        self.observations_valid = obs
-        self.n_obs_valid = n_obs
-        return self.observations_valid</code></pre>
-</details>
-</dd>
-<dt id="pylink.PyLinkForwardModel.read_output"><code class="name flex">
-<span>def <span class="ident">read_output</span></span>(<span>self)</span>
-</code></dt>
-<dd>
-<div class="desc"><p>Reads the the parser output file and returns it as an
-executable function. It is required when the models returns the
-simulation outputs in csv files.</p>
-<h2 id="returns">Returns</h2>
-<dl>
-<dt><strong><code>Output</code></strong> :&ensp;<code>func</code></dt>
-<dd>Output parser function.</dd>
-</dl></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def read_output(self):
-    &#34;&#34;&#34;
-    Reads the the parser output file and returns it as an
-     executable function. It is required when the models returns the
-     simulation outputs in csv files.
-
-    Returns
-    -------
-    Output : func
-        Output parser function.
-
-    &#34;&#34;&#34;
-    output_func_name = self.Output.parser
-
-    output_func = getattr(__import__(output_func_name), output_func_name)
-
-    file_names = []
-    for File in self.Output.file_names:
-        file_names.append(os.path.join(self.exe_path, File))
-    try:
-        output = output_func(self.name, file_names)
-    except TypeError:
-        output = output_func(file_names)
-    return output</code></pre>
-</details>
-</dd>
-<dt id="pylink.PyLinkForwardModel.run_command"><code class="name flex">
-<span>def <span class="ident">run_command</span></span>(<span>self, command, output_file_names)</span>
-</code></dt>
-<dd>
-<div class="desc"><p>Runs the execution command given by the user to run the given model.
-It checks if the output files have been generated. If yes, the jobe is
-done and it extracts and returns the requested output(s). Otherwise,
-it executes the command again.</p>
-<h2 id="parameters">Parameters</h2>
-<dl>
-<dt><strong><code>command</code></strong> :&ensp;<code>string</code></dt>
-<dd>The command to be executed.</dd>
-<dt><strong><code>output_file_names</code></strong> :&ensp;<code>list</code></dt>
-<dd>Name of the output file names.</dd>
-</dl>
-<h2 id="returns">Returns</h2>
-<dl>
-<dt><strong><code>simulation_outputs</code></strong> :&ensp;<code>array</code> of <code>shape (n_obs, n_outputs)</code></dt>
-<dd>Simulation outputs.</dd>
-</dl></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def run_command(self, command, output_file_names):
-    &#34;&#34;&#34;
-    Runs the execution command given by the user to run the given model.
-    It checks if the output files have been generated. If yes, the jobe is
-     done and it extracts and returns the requested output(s). Otherwise,
-     it executes the command again.
-
-    Parameters
-    ----------
-    command : string
-        The command to be executed.
-    output_file_names : list
-        Name of the output file names.
-
-    Returns
-    -------
-    simulation_outputs : array of shape (n_obs, n_outputs)
-        Simulation outputs.
-
-    &#34;&#34;&#34;
-
-    # Check if simulation is finished
-    while True:
-        time.sleep(3)
-        files = os.listdir(&#34;.&#34;)
-        if all(elem in files for elem in output_file_names):
-            break
-        else:
-            # Run command
-            Process = os.system(f&#39;./../{command}&#39;)
-            if Process != 0:
-                print(&#39;\nMessage 1:&#39;)
-                print(f&#39;\tIf value of \&#39;{Process}\&#39; is a non-zero value, &#39;
-                      &#39;then compilation problems \n&#39; % Process)
-
-    os.chdir(&#34;..&#34;)
-
-    # Read the output
-    simulation_outputs = self.read_output()
-
-    return simulation_outputs</code></pre>
-</details>
-</dd>
-<dt id="pylink.PyLinkForwardModel.run_forwardmodel"><code class="name flex">
-<span>def <span class="ident">run_forwardmodel</span></span>(<span>self, xx)</span>
-</code></dt>
-<dd>
-<div class="desc"><p>This function creates subdirectory for the current run and copies the
-necessary files to this directory and renames them. Next, it executes
-the given command.</p></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def run_forwardmodel(self, xx):
-    &#34;&#34;&#34;
-    This function creates subdirectory for the current run and copies the
-    necessary files to this directory and renames them. Next, it executes
-    the given command.
-    &#34;&#34;&#34;
-    c_points, run_no, key_str = xx
-
-    # Handle if only one imput file is provided
-    if not isinstance(self.input_template, list):
-        self.input_template = [self.input_template]
-    if not isinstance(self.input_file, list):
-        self.input_file = [self.input_file]
-
-    new_input_file = []
-    # Loop over the InputTemplates:
-    for in_temp in self.input_template:
-        if &#39;/&#39; in in_temp:
-            in_temp = in_temp.split(&#39;/&#39;)[-1]
-        new_input_file.append(in_temp.split(&#39;.tpl&#39;)[0] + key_str +
-                              f&#34;_{run_no+1}&#34; + in_temp.split(&#39;.tpl&#39;)[1])
-
-    # Create directories
-    newpath = self.name + key_str + f&#39;_{run_no+1}&#39;
-    if not os.path.exists(newpath):
-        os.makedirs(newpath)
-
-    # Copy the necessary files to the directories
-    for in_temp in self.input_template:
-        # Input file(s) of the model
-        shutil.copy2(in_temp, newpath)
-    # Auxiliary file
-    if self.aux_file is not None:
-        shutil.copy2(self.aux_file, newpath)  # Auxiliary file
-
-    # Rename the Inputfile and/or auxiliary file
-    os.chdir(newpath)
-    for input_tem, input_file in zip(self.input_template, new_input_file):
-        if &#39;/&#39; in input_tem:
-            input_tem = input_tem.split(&#39;/&#39;)[-1]
-        os.rename(input_tem, input_file)
-
-    # Update the parametrs in Input file
-    self.update_input_params(new_input_file, c_points)
-
-    # Update the user defined command and the execution path
-    try:
-        new_command = self.shell_command.replace(self.input_file[0],
-                                                 new_input_file[0])
-        new_command = new_command.replace(self.input_file[1],
-                                          new_input_file[1])
-    except:
-        new_command = self.shell_command.replace(self.input_file[0],
-                                                 new_input_file[0])
-    # Set the exe path if not provided
-    if not bool(self.exe_path):
-        self.exe_path = os.getcwd()
-
-    # Run the model
-    output = self.run_command(new_command, self.Output.file_names)
-
-    return output</code></pre>
-</details>
-</dd>
-<dt id="pylink.PyLinkForwardModel.run_model_parallel"><code class="name flex">
-<span>def <span class="ident">run_model_parallel</span></span>(<span>self, c_points, prevRun_No=0, key_str='', mp=True)</span>
-</code></dt>
-<dd>
-<div class="desc"><p>Runs model simulations. If mp is true (default), then the simulations
-are started in parallel.</p>
-<h2 id="parameters">Parameters</h2>
-<dl>
-<dt><strong><code>c_points</code></strong> :&ensp;<code>array like</code> of <code>shape (n_samples, n_params)</code></dt>
-<dd>Collocation points (training set).</dd>
-<dt><strong><code>prevRun_No</code></strong> :&ensp;<code>int</code>, optional</dt>
-<dd>Previous run number, in case the sequential design is selected.
-The default is 0.</dd>
-<dt><strong><code>key_str</code></strong> :&ensp;<code>string</code>, optional</dt>
-<dd>A descriptive string for validation runs. The default is ''.</dd>
-<dt><strong><code>mp</code></strong> :&ensp;<code>bool</code>, optional</dt>
-<dd>Multiprocessing. The default is True.</dd>
-</dl>
-<h2 id="returns">Returns</h2>
-<dl>
-<dt><strong><code>all_outputs</code></strong> :&ensp;<code>dict</code></dt>
-<dd>A dictionary with x values (time step or point id) and all outputs.
-Each key contains an array of the shape (n_samples, n_obs).</dd>
-<dt><strong><code>new_c_points</code></strong> :&ensp;<code>array</code></dt>
-<dd>Updated collocation points (training set). If a simulation does not
-executed successfully, the parameter set is removed.</dd>
-</dl></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def run_model_parallel(self, c_points, prevRun_No=0, key_str=&#39;&#39;,
-                       mp=True):
-    &#34;&#34;&#34;
-    Runs model simulations. If mp is true (default), then the simulations
-     are started in parallel.
-
-    Parameters
-    ----------
-    c_points : array like of shape (n_samples, n_params)
-        Collocation points (training set).
-    prevRun_No : int, optional
-        Previous run number, in case the sequential design is selected.
-        The default is 0.
-    key_str : string, optional
-        A descriptive string for validation runs. The default is &#39;&#39;.
-    mp : bool, optional
-        Multiprocessing. The default is True.
-
-    Returns
-    -------
-    all_outputs : dict
-        A dictionary with x values (time step or point id) and all outputs.
-        Each key contains an array of the shape (n_samples, n_obs).
-    new_c_points : array
-        Updated collocation points (training set). If a simulation does not
-        executed successfully, the parameter set is removed.
-
-    &#34;&#34;&#34;
-
-    # Create hdf5 metadata
-    hdf5file = f&#39;ExpDesign_{self.name}.hdf5&#39;
-    hdf5_exist = os.path.exists(hdf5file)
-    file = h5py.File(hdf5file, &#39;a&#39;)
-
-    # Initilization
-    n_c_points = len(c_points)
-    self.n_outputs = len(self.Output.names)
-    all_outputs = {}
-
-    # Extract the function
-    if self.link_type.lower() == &#39;function&#39;:
-        # Prepare the function
-        Function = getattr(__import__(self.py_file), self.py_file)
-    # ---------------------------------------------------------------
-    # -------------- Multiprocessing with Pool Class ----------------
-    # ---------------------------------------------------------------
-    # Start a pool with the number of CPUs
-    if self.n_cpus is None:
-        n_cpus = multiprocessing.cpu_count()
-    else:
-        n_cpus = self.n_cpus
-
-    # Run forward model either normal or with multiprocessing
-    if not self.multi_process:
-        group_results = list([self.run_forwardmodel((c_points,
-                                                     prevRun_No,
-                                                     key_str))])
-    else:
-        with multiprocessing.Pool(n_cpus) as p:
-            desc = f&#39;Running forward model {key_str}&#39;
-            if self.link_type.lower() == &#39;function&#39;:
-                imap_var = p.imap(Function, c_points[:, np.newaxis])
-            else:
-                args = zip(c_points,
-                           [prevRun_No+i for i in range(n_c_points)],
-                           [key_str]*n_c_points)
-                imap_var = p.imap(self.run_forwardmodel, args)
-
-            group_results = list(tqdm.tqdm(imap_var, total=n_c_points,
-                                           desc=desc))
-
-    # Save time steps or x-values
-    x_values = group_results[0][0]
-    all_outputs[&#34;x_values&#34;] = x_values
-    if not hdf5_exist:
-        if type(x_values) is dict:
-            grp_x_values = file.create_group(&#34;x_values/&#34;)
-            for varIdx, var in enumerate(self.Output.names):
-                grp_x_values.create_dataset(var, data=x_values[var])
-        else:
-            file.create_dataset(&#34;x_values&#34;, data=x_values)
-
-    # save each output in their corresponding array
-    NaN_idx = []
-    for varIdx, var in enumerate(self.Output.names):
-
-        if not hdf5_exist:
-            grpY = file.create_group(&#34;EDY/&#34;+var)
-        else:
-            grpY = file.get(&#34;EDY/&#34;+var)
-
-        Outputs = np.asarray([item[varIdx+1] for item in group_results],
-                             dtype=np.float64)
-
-        if prevRun_No == 0 and key_str == &#39;&#39;:
-            grpY.create_dataset(f&#39;init_{key_str}&#39;, data=Outputs)
-        else:
-            try:
-                oldEDY = np.array(file[f&#39;EDY/{var}/adaptive_{key_str}&#39;])
-                del file[f&#39;EDY/{var}/adaptive_{key_str}&#39;]
-                data = np.vstack((oldEDY, Outputs))
-            except KeyError:
-                data = Outputs
-            grpY.create_dataset(&#39;adaptive_&#39;+key_str, data=data)
-
-        NaN_idx = np.unique(np.argwhere(np.isnan(Outputs))[:, 0])
-        all_outputs[var] = np.delete(Outputs, NaN_idx, axis=0)
-
-        if prevRun_No == 0 and key_str == &#39;&#39;:
-            grpY.create_dataset(f&#34;New_init_{key_str}&#34;,
-                                data=all_outputs[var])
-        else:
-            try:
-                name = f&#39;EDY/{var}/New_adaptive_{key_str}&#39;
-                oldEDY = np.array(file[name])
-                del file[f&#39;EDY/{var}/New_adaptive_{key_str}&#39;]
-                data = np.vstack((oldEDY, all_outputs[var]))
-            except KeyError:
-                data = all_outputs[var]
-            grpY.create_dataset(f&#39;New_adaptive_{key_str}&#39;, data=data)
-
-    # Print the collocation points whose simulations crashed
-    if len(NaN_idx) != 0:
-        print(&#39;\n&#39;)
-        print(&#39;*&#39;*20)
-        print(&#34;\nThe following parametersets have been removed:\n&#34;,
-              c_points[NaN_idx])
-        print(&#34;\n&#34;)
-        print(&#39;*&#39;*20)
-
-    # Pass it to the attribute
-    new_c_points = np.delete(c_points, NaN_idx, axis=0)
-    self.OutputMatrix = all_outputs
-
-    # Save CollocationPoints
-    grpX = file.create_group(&#34;EDX&#34;) if not hdf5_exist else file.get(&#34;EDX&#34;)
-    if prevRun_No == 0 and key_str == &#39;&#39;:
-        grpX.create_dataset(&#34;init_&#34;+key_str, data=c_points)
-        if len(NaN_idx) != 0:
-            grpX.create_dataset(&#34;New_init_&#34;+key_str, data=new_c_points)
-
-    else:
-        try:
-            name = f&#39;EDX/adaptive_{key_str}&#39;
-            oldCollocationPoints = np.array(file[name])
-            del file[f&#39;EDX/adaptive_{key_str}&#39;]
-            data = np.vstack((oldCollocationPoints, new_c_points))
-        except KeyError:
-            data = new_c_points
-        grpX.create_dataset(&#39;adaptive_&#39;+key_str, data=data)
-
-        if len(NaN_idx) != 0:
-            try:
-                name = f&#39;EDX/New_adaptive_{key_str}&#39;
-                oldCollocationPoints = np.array(file[name])
-                del file[f&#39;EDX/New_adaptive_{key_str}&#39;]
-                data = np.vstack((oldCollocationPoints, new_c_points))
-            except KeyError:
-                data = new_c_points
-            grpX.create_dataset(&#39;New_adaptive_&#39;+key_str, data=data)
-
-    # Close h5py file
-    file.close()
-
-    return all_outputs, new_c_points</code></pre>
-</details>
-</dd>
-<dt id="pylink.PyLinkForwardModel.update_input_params"><code class="name flex">
-<span>def <span class="ident">update_input_params</span></span>(<span>self, new_input_file, param_sets)</span>
-</code></dt>
-<dd>
-<div class="desc"><p>Finds this pattern with <X1> in the new_input_file and replace it with
-the new value from the array param_sets.</p>
-<h2 id="parameters">Parameters</h2>
-<dl>
-<dt><strong><code>new_input_file</code></strong> :&ensp;<code>TYPE</code></dt>
-<dd>DESCRIPTION.</dd>
-<dt><strong><code>param_sets</code></strong> :&ensp;<code>TYPE</code></dt>
-<dd>DESCRIPTION.</dd>
-</dl>
-<h2 id="returns">Returns</h2>
-<p>None.</p></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def update_input_params(self, new_input_file, param_sets):
-    &#34;&#34;&#34;
-    Finds this pattern with &lt;X1&gt; in the new_input_file and replace it with
-     the new value from the array param_sets.
-
-    Parameters
-    ----------
-    new_input_file : TYPE
-        DESCRIPTION.
-    param_sets : TYPE
-        DESCRIPTION.
-
-    Returns
-    -------
-    None.
-
-    &#34;&#34;&#34;
-    NofPa = param_sets.shape[0]
-    text_to_search_list = [f&#39;&lt;X{i+1}&gt;&#39; for i in range(NofPa)]
-
-    for filename in new_input_file:
-        # Read in the file
-        with open(filename, &#39;r&#39;) as file:
-            filedata = file.read()
-
-        # Replace the target string
-        for text_to_search, params in zip(text_to_search_list, param_sets):
-            filedata = filedata.replace(text_to_search, f&#39;{params:0.4e}&#39;)
-
-        # Write the file out again
-        with open(filename, &#39;w&#39;) as file:
-            file.write(filedata)</code></pre>
-</details>
-</dd>
-<dt id="pylink.PyLinkForwardModel.within_range"><code class="name flex">
-<span>def <span class="ident">within_range</span></span>(<span>self, out, minout, maxout)</span>
-</code></dt>
-<dd>
-<div class="desc"></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def within_range(self, out, minout, maxout):
-    inside = False
-    if (out &gt; minout).all() and (out &lt; maxout).all():
-        inside = True
-    return inside</code></pre>
-</details>
-</dd>
-<dt id="pylink.PyLinkForwardModel.zip_subdirs"><code class="name flex">
-<span>def <span class="ident">zip_subdirs</span></span>(<span>self, dir_name, key)</span>
-</code></dt>
-<dd>
-<div class="desc"><p>Zips all the files containing the key(word).</p>
-<h2 id="parameters">Parameters</h2>
-<dl>
-<dt><strong><code>dir_name</code></strong> :&ensp;<code>string</code></dt>
-<dd>Directory name.</dd>
-<dt><strong><code>key</code></strong> :&ensp;<code>string</code></dt>
-<dd>Keyword to search for.</dd>
-</dl>
-<h2 id="returns">Returns</h2>
-<p>None.</p></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def zip_subdirs(self, dir_name, key):
-    &#34;&#34;&#34;
-    Zips all the files containing the key(word).
-
-    Parameters
-    ----------
-    dir_name : string
-        Directory name.
-    key : string
-        Keyword to search for.
-
-    Returns
-    -------
-    None.
-
-    &#34;&#34;&#34;
-    # setup file paths variable
-    dir_list = []
-    file_paths = []
-
-    # Read all directory, subdirectories and file lists
-    dir_path = os.getcwd()
-
-    for root, directories, files in os.walk(dir_path):
-        for directory in directories:
-            # Create the full filepath by using os module.
-            if key in directory:
-                folderPath = os.path.join(dir_path, directory)
-                dir_list.append(folderPath)
-
-    # Loop over the identified directories to store the file paths
-    for direct_name in dir_list:
-        for root, directories, files in os.walk(direct_name):
-            for filename in files:
-                # Create the full filepath by using os module.
-                filePath = os.path.join(root, filename)
-                file_paths.append(&#39;.&#39;+filePath.split(dir_path)[1])
-
-    # writing files to a zipfile
-    if len(file_paths) != 0:
-        zip_file = zipfile.ZipFile(dir_name+&#39;.zip&#39;, &#39;w&#39;)
-        with zip_file:
-            # writing each file one by one
-            for file in file_paths:
-                zip_file.write(file)
-
-        file_paths = [path for path in os.listdir(&#39;.&#39;) if key in path]
-
-        for path in file_paths:
-            shutil.rmtree(path)
-
-        print(&#34;\n&#34;)
-        print(f&#39;{dir_name}.zip file has been created successfully!\n&#39;)
-
-    return</code></pre>
-</details>
-</dd>
-</dl>
-</dd>
-</dl>
-</section>
-</article>
-<nav id="sidebar">
-<h1>Index</h1>
-<div class="toc">
-<ul></ul>
-</div>
-<ul id="index">
-<li><h3><a href="#header-classes">Classes</a></h3>
-<ul>
-<li>
-<h4><code><a title="pylink.PyLinkForwardModel" href="#pylink.PyLinkForwardModel">PyLinkForwardModel</a></code></h4>
-<ul class="two-column">
-<li><code><a title="pylink.PyLinkForwardModel.Output" href="#pylink.PyLinkForwardModel.Output">Output</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.read_mc_reference" href="#pylink.PyLinkForwardModel.read_mc_reference">read_mc_reference</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.read_observation" href="#pylink.PyLinkForwardModel.read_observation">read_observation</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.read_output" href="#pylink.PyLinkForwardModel.read_output">read_output</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.run_command" href="#pylink.PyLinkForwardModel.run_command">run_command</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.run_forwardmodel" href="#pylink.PyLinkForwardModel.run_forwardmodel">run_forwardmodel</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.run_model_parallel" href="#pylink.PyLinkForwardModel.run_model_parallel">run_model_parallel</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.update_input_params" href="#pylink.PyLinkForwardModel.update_input_params">update_input_params</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.within_range" href="#pylink.PyLinkForwardModel.within_range">within_range</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.zip_subdirs" href="#pylink.PyLinkForwardModel.zip_subdirs">zip_subdirs</a></code></li>
-</ul>
-</li>
-</ul>
-</li>
-</ul>
-</nav>
-</main>
-<footer id="footer">
-<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.10.0</a>.</p>
-</footer>
-</body>
-</html>
\ No newline at end of file
diff --git a/docs/build/html/search.html b/docs/build/html/search.html
deleted file mode 100644
index 946e9ed6b60251af4392ce2fc6645cb4a37f06df..0000000000000000000000000000000000000000
--- a/docs/build/html/search.html
+++ /dev/null
@@ -1,123 +0,0 @@
-
-<!DOCTYPE html>
-
-<html lang="python">
-  <head>
-    <meta charset="utf-8" />
-    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <title>Search &#8212; bayesvalidrox 0.0.3 documentation</title>
-    <link rel="stylesheet" type="text/css" href="_static/pygments.css" />
-    <link rel="stylesheet" type="text/css" href="_static/alabaster.css" />
-    
-    <script data-url_root="./" id="documentation_options" src="_static/documentation_options.js"></script>
-    <script src="_static/jquery.js"></script>
-    <script src="_static/underscore.js"></script>
-    <script src="_static/doctools.js"></script>
-    <script src="_static/searchtools.js"></script>
-    <script src="_static/language_data.js"></script>
-    <link rel="index" title="Index" href="genindex.html" />
-    <link rel="search" title="Search" href="#" />
-  <script src="searchindex.js" defer></script>
-  
-   
-  <link rel="stylesheet" href="_static/custom.css" type="text/css" />
-  
-  
-  <meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9" />
-
-
-  </head><body>
-  
-
-    <div class="document">
-      <div class="documentwrapper">
-        <div class="bodywrapper">
-          
-
-          <div class="body" role="main">
-            
-  <h1 id="search-documentation">Search</h1>
-  
-  <noscript>
-  <div class="admonition warning">
-  <p>
-    Please activate JavaScript to enable the search
-    functionality.
-  </p>
-  </div>
-  </noscript>
-  
-  
-  <p>
-    Searching for multiple words only shows matches that contain
-    all words.
-  </p>
-  
-  
-  <form action="" method="get">
-    <input type="text" name="q" aria-labelledby="search-documentation" value="" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false"/>
-    <input type="submit" value="search" />
-    <span id="search-progress" style="padding-left: 10px"></span>
-  </form>
-  
-  
-  
-  <div id="search-results">
-  
-  </div>
-  
-
-          </div>
-          
-        </div>
-      </div>
-      <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
-        <div class="sphinxsidebarwrapper">
-<h1 class="logo"><a href="index.html">bayesvalidrox</a></h1>
-
-
-
-
-
-
-
-
-<h3>Navigation</h3>
-<p class="caption" role="heading"><span class="caption-text">Contents:</span></p>
-<ul>
-<li class="toctree-l1"><a class="reference internal" href="example.html">My nifty title</a></li>
-</ul>
-
-<div class="relations">
-<h3>Related Topics</h3>
-<ul>
-  <li><a href="index.html">Documentation overview</a><ul>
-  </ul></li>
-</ul>
-</div>
-
-
-
-
-
-
-
-
-        </div>
-      </div>
-      <div class="clearer"></div>
-    </div>
-    <div class="footer">
-      &copy;2022, Farid Mohammadi.
-      
-      |
-      Powered by <a href="http://sphinx-doc.org/">Sphinx 4.4.0</a>
-      &amp; <a href="https://github.com/bitprophet/alabaster">Alabaster 0.7.12</a>
-      
-    </div>
-
-    
-
-    
-  </body>
-</html>
\ No newline at end of file
diff --git a/docs/build/html/searchindex.js b/docs/build/html/searchindex.js
deleted file mode 100644
index 7489ea94ff4815f9bc11b3300692d6115f86139b..0000000000000000000000000000000000000000
--- a/docs/build/html/searchindex.js
+++ /dev/null
@@ -1 +0,0 @@
-Search.setIndex({docnames:["example","index"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":4,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":3,"sphinx.domains.rst":2,"sphinx.domains.std":2,sphinx:56},filenames:["example.md","index.rst"],objects:{},objnames:{},objtypes:{},terms:{"09":[],"1":[],"1000":[],"125":[],"2013":[],"2020":[],"2021":[],"3":[],"306":[],"33":[],"42":[],"4d2669d69ddfe1d788318264cdcf0583":[],"61":[],"7":[],"70569":[],"925":[],"\u03b8":[],"default":[],"float":[],"function":[],"int":[],"public":[],"return":[],"true":[],A:[],For:[],If:[],It:1,The:1,admonit:0,affin:[],again:1,an:[],ani:1,approach:[],ar:[],arrai:[],asset:[],astronom:[],author:[],automat:[],b:[],base:[],bay:[],bayesian:[],bayesopt:[],between:[],biasinput:[],blob:[],both:[],bridg:[],burn:[],calcul:[],call:[],can:1,carlo:[],chain:[],code:1,com:[],combin:[],comput:[],content:0,converg:[],creat:[],d:[],data:[],de:[],defin:[],depart:[],descript:[],dict:[],e:[],ed_i:[],edx:[],emce:[],en:[],ensembl:[],environment:[],error:[],error_metamodel:[],ess:[],estim:[],evalu:[],exampl:[],except:1,factor:[],fals:[],farid:[],file:[],foreman:[],fulli:[],gaussian:[],gelman:[],gener:[],gist:[],github:[],given:[],goodman:[],greater:[],ha:[],hammer:[],have:[],hogg:[],html:[],http:[],hydraul:[],hydromechan:[],hydrosystem:[],i:[],indent:1,index:1,indic:[],infer:[],institut:[],invari:[],io:[],iter:[],iw:[],j:[],joergdietrich:[],jun:[],junpenglao:[],jwalton3141:[],lang:[],lazydoc:[],lh2:[],likelihood:[],line:1,log:[],log_lik:[],logp:[],logprior:[],m:[],mackei:[],mail:[],marg_llk:[],margin:[],markov:[],master:[],maximum:[],maxit:[],mean:[],mean_pr:[],meta:[],model:[],modul:1,mohammadi:[],mont:[],multipl:1,multitrac:[],my:1,n_param:[],n_sampl:[],n_step:[],n_walker:[],nburn:[],next:1,nifti:1,none:[],normal:1,nov:[],number:[],ob:[],obj:[],observ:[],obtain:[],one:[],onli:[],option:[],otherwis:[],output:[],p:[],pacif:[],packag:[],page:1,paragraph:1,paramet:[],parameterset:[],perform:[],pfaffenwaldr:[],post:[],posterior:[],potenti:[],predict:[],prior:[],probabl:[],process:1,propos:[],psrf:[],py:[],pylink:1,r_hat:[],ratio:[],readthedoc:[],reduct:[],regress:[],remov:1,reproduc:[],result:[],return_cov:[],return_std:[],return_var:[],robin:[],root:[],row:[],run:[],rwmh:[],s:[],sampl:1,sampler:[],sc:[],scale:[],search:1,set:[],shape:[],should:[],singl:[],societi:[],some:0,sourc:[],span:1,squar:[],stabl:[],std:[],std_pred:[],step:[],stuttgart:[],sum:[],sun:[],system:[],text:[0,1],than:[],theta:[],thi:1,titl:1,total_sigma2:[],train:[],type:[],typic:[],uni:[],univers:[],us:[],valu:[],varianc:[],verbos:[],via:[],w:[],wa:[],wai:1,wed:[],weight:[],well:[],within:[],www:[],x:[],y:[],yet:[]},titles:["My nifty title","Welcome to bayesvalidrox's documentation!"],titleterms:{"class":[],__init__:[],bayesvalidrox:1,bia:[],content:1,discrepancy_gp:[],discrepancy_gp_v1:[],document:1,eval_model:[],fit_bia:[],gelman_rubin:[],here:0,indic:1,kbd:[],log_likelihood:[],log_posterior:[],log_prior:[],marginal_llk_emce:[],mcmc:[],method:[],modul:[],my:0,nifti:0,oldpredict:[],predict:[],run_sampl:[],s:[0,1],tabl:1,titl:0,train_error_model:[],welcom:1}})
\ No newline at end of file
diff --git a/docs/html/bayes_inference.html b/docs/html/bayes_inference.html
new file mode 100644
index 0000000000000000000000000000000000000000..5fd3ac603907ba8b997dc75f0cb0cba2e587e69b
--- /dev/null
+++ b/docs/html/bayes_inference.html
@@ -0,0 +1,3653 @@
+<!doctype html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
+<meta name="generator" content="pdoc 0.10.0" />
+<title>bayes_inference API documentation</title>
+<meta name="description" content="" />
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
+<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
+<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
+<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
+<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
+<script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML" integrity="sha256-kZafAc6mZvK3W3v1pHOcUix30OHQN6pU/NO2oFkqZVw=" crossorigin></script>
+<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
+<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
+</head>
+<body>
+<main>
+<article id="content">
+<header>
+<h1 class="title">Module <code>bayes_inference</code></h1>
+</header>
+<section id="section-intro">
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import numpy as np
+import os
+import copy
+import pandas as pd
+from tqdm import tqdm
+from scipy import stats
+import scipy.linalg as spla
+
+import seaborn as sns
+import corner
+import h5py
+import gc
+from sklearn.metrics import mean_squared_error, r2_score
+from sklearn import preprocessing
+from matplotlib.patches import Patch
+import matplotlib.lines as mlines
+from matplotlib.backends.backend_pdf import PdfPages
+import matplotlib.pylab as plt
+
+from mcmc import MCMC
+
+# Load the mplstyle
+plt.style.use(os.path.join(os.path.split(__file__)[0],
+                           &#39;../&#39;, &#39;bayesvalidrox.mplstyle&#39;))
+
+
+class BayesInference:
+    &#34;&#34;&#34;
+    A class to perform Bayesian Analysis.
+
+
+    Attributes
+    ----------
+    MetaModel : obj
+        Meta model object.
+    discrepancy : obj
+        The discrepancy object for the sigma2s, i.e. the diagonal entries
+        of the variance matrix for a multivariate normal likelihood.
+    name : str, optional
+        The type of analysis, either calibration (`Calib`) or validation
+        (`Valid`). The default is `&#39;Calib&#39;`.
+    emulator : bool, optional
+        Analysis with emulator (MetaModel). The default is `True`.
+    bootstrap : bool, optional
+        Bootstrap the analysis. The default is `False`.
+    req_outputs : list, optional
+        The list of requested output to be used for the analysis.
+        The default is `None`. If None, all the defined outputs for the model
+        object is used.
+    selected_indices : dict, optional
+        A dictionary with the selected indices of each model output. The
+        default is `None`. If `None`, all measurement points are used in the
+        analysis.
+    samples : array of shape (n_samples, n_params), optional
+        The samples to be used in the analysis. The default is `None`. If
+        None the samples are drawn from the probablistic input parameter
+        object of the MetaModel object.
+    n_samples : int, optional
+        Number of samples to be used in the analysis. The default is `500000`.
+        If samples is not `None`, this argument will be assigned based on the
+        number of samples given.
+    measured_data : dict, optional
+        A dictionary containing the observation data. The default is `None`.
+        if `None`, the observation defined in the Model object of the
+        MetaModel is used.
+    inference_method : str, optional
+        A method for approximating the posterior distribution in the Bayesian
+        inference step. The default is `&#39;rejection&#39;`, which stands for
+        rejection sampling. A Markov Chain Monte Carlo sampler can be simply
+        selected by passing `&#39;MCMC&#39;`.
+    mcmc_params : dict, optional
+        A dictionary with args required for the Bayesian inference with
+        `MCMC`. The default is `None`.
+
+        Pass the mcmc_params like the following:
+
+            &gt;&gt;&gt; mcmc_params:{
+                &#39;init_samples&#39;: None,  # initial samples
+                &#39;n_walkers&#39;: 100,  # number of walkers (chain)
+                &#39;n_steps&#39;: 100000,  # number of maximum steps
+                &#39;n_burn&#39;: 200,  # number of burn-in steps
+                &#39;moves&#39;: None,  # Moves for the emcee sampler
+                &#39;multiprocessing&#39;: False,  # multiprocessing
+                &#39;verbose&#39;: False # verbosity
+                }
+        The items shown above are the default values. If any parmeter is
+        not defined, the default value will be assigned to it.
+    bayes_loocv : bool, optional
+        Bayesian Leave-one-out Cross Validation. The default is `False`. If
+        `True`, the LOOCV procedure is used to estimate the bayesian Model
+        Evidence (BME).
+    n_bootstrap_itrs : int, optional
+        Number of bootstrap iteration. The default is `1`. If bayes_loocv is
+        `True`, this is qualt to the total length of the observation data
+        set.
+    perturbed_data : array of shape (n_bootstrap_itrs, n_obs), optional
+        User defined perturbed data. The default is `[]`.
+    bootstrap_noise : float, optional
+        A noise level to perturb the data set. The default is `0.05`.
+    plot_post_pred : bool, optional
+        Plot posterior predictive plots. The default is `True`.
+    plot_map_pred : bool, optional
+        Plot the model outputs vs the metamodel predictions for the maximum
+        a posteriori (defined as `max_a_posteriori`) parameter set. The
+        default is `False`.
+    max_a_posteriori : str, optional
+        Maximum a posteriori. `&#39;mean&#39;` and `&#39;mode&#39;` are available. The default
+        is `&#39;mean&#39;`.
+    corner_title_fmt : str, optional
+        Title format for the posterior distribution plot with python
+        package `corner`. The default is `&#39;.3e&#39;`.
+
+    &#34;&#34;&#34;
+
+    def __init__(self, MetaModel, discrepancy=None, emulator=True,
+                 name=&#39;Calib&#39;, bootstrap=False, req_outputs=None,
+                 selected_indices=None, samples=None, n_samples=500000,
+                 measured_data=None, inference_method=&#39;rejection&#39;,
+                 mcmc_params=None, bayes_loocv=False, n_bootstrap_itrs=1,
+                 perturbed_data=[], bootstrap_noise=0.05, plot_post_pred=True,
+                 plot_map_pred=False, max_a_posteriori=&#39;mean&#39;,
+                 corner_title_fmt=&#39;.3e&#39;):
+
+        self.MetaModel = MetaModel
+        self.Discrepancy = discrepancy
+        self.emulator = emulator
+        self.name = name
+        self.bootstrap = bootstrap
+        self.req_outputs = req_outputs
+        self.selected_indices = selected_indices
+        self.samples = samples
+        self.n_samples = n_samples
+        self.measured_data = measured_data
+        self.inference_method = inference_method
+        self.mcmc_params = mcmc_params
+        self.perturbed_data = perturbed_data
+        self.bayes_loocv = bayes_loocv
+        self.n_bootstrap_itrs = n_bootstrap_itrs
+        self.bootstrap_noise = bootstrap_noise
+        self.plot_post_pred = plot_post_pred
+        self.plot_map_pred = plot_map_pred
+        self.max_a_posteriori = max_a_posteriori
+        self.corner_title_fmt = corner_title_fmt
+
+    # -------------------------------------------------------------------------
+    def create_inference(self):
+        &#34;&#34;&#34;
+        Starts the inference.
+
+        Returns
+        -------
+        BayesInference : obj
+            The Bayes inference object.
+
+        &#34;&#34;&#34;
+
+        # Set some variables
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+        n_params = MetaModel.n_params
+        output_names = Model.Output.names
+        par_names = MetaModel.ExpDesign.par_names
+
+        # If the prior is set by the user, take it.
+        if self.samples is None:
+            self.samples = MetaModel.ExpDesign.generate_samples(
+                self.n_samples, &#39;random&#39;)
+        else:
+            try:
+                samples = self.samples.values
+            except AttributeError:
+                samples = self.samples
+
+            # Take care of an additional Sigma2s
+            self.samples = samples[:, :n_params]
+
+            # Update number of samples
+            self.n_samples = self.samples.shape[0]
+
+        # ---------- Preparation of observation data ----------
+        # Read observation data and perturb it if requested.
+        if self.measured_data is None:
+            self.measured_data = Model.read_observation(case=self.name)
+        # Convert measured_data to a data frame
+        if not isinstance(self.measured_data, pd.DataFrame):
+            self.measured_data = pd.DataFrame(self.measured_data)
+
+        # Extract the total number of measurement points
+        if self.name.lower() == &#39;calib&#39;:
+            self.n_tot_measurement = Model.n_obs
+        else:
+            self.n_tot_measurement = Model.n_obs_valid
+
+        # Find measurement error (if not given) for post predictive plot
+        if not hasattr(self, &#39;measurement_error&#39;):
+            if isinstance(self.Discrepancy, dict):
+                Disc = self.Discrepancy[&#39;known&#39;]
+            else:
+                Disc = self.Discrepancy
+            if isinstance(Disc.parameters, dict):
+                self.measurement_error = {k: np.sqrt(Disc.parameters[k]) for k
+                                          in Disc.parameters.keys()}
+            else:
+                try:
+                    self.measurement_error = np.sqrt(Disc.parameters)
+                except TypeError:
+                    pass
+
+        # ---------- Preparation of variance for covariance matrix ----------
+        # Independent and identically distributed
+        total_sigma2 = dict()
+        opt_sigma_flag = isinstance(self.Discrepancy, dict)
+        opt_sigma = None
+        for key_idx, key in enumerate(output_names):
+
+            # Find opt_sigma
+            if opt_sigma_flag and opt_sigma is None:
+                # Option A: known error with unknown bias term
+                opt_sigma = &#39;A&#39;
+                known_discrepancy = self.Discrepancy[&#39;known&#39;]
+                self.Discrepancy = self.Discrepancy[&#39;infer&#39;]
+                sigma2 = np.array(known_discrepancy.parameters[key])
+
+            elif opt_sigma == &#39;A&#39; or self.Discrepancy.parameters is not None:
+                # Option B: The sigma2 is known (no bias term)
+                if opt_sigma == &#39;A&#39;:
+                    sigma2 = np.array(known_discrepancy.parameters[key])
+                else:
+                    opt_sigma = &#39;B&#39;
+                    sigma2 = np.array(self.Discrepancy.parameters[key])
+
+            elif not isinstance(self.Discrepancy.InputDisc, str):
+                # Option C: The sigma2 is unknown (bias term including error)
+                opt_sigma = &#39;C&#39;
+                self.Discrepancy.opt_sigma = opt_sigma
+                n_measurement = self.measured_data[key].values.shape
+                sigma2 = np.zeros((n_measurement[0]))
+
+            total_sigma2[key] = sigma2
+
+            self.Discrepancy.opt_sigma = opt_sigma
+            self.Discrepancy.total_sigma2 = total_sigma2
+
+        # If inferred sigma2s obtained from e.g. calibration are given
+        try:
+            self.sigma2s = self.Discrepancy.get_sample(self.n_samples)
+        except:
+            pass
+
+        # ---------------- Bootstrap &amp; TOM --------------------
+        if self.bootstrap or self.bayes_loocv:
+            if len(self.perturbed_data) == 0:
+                # zero mean noise Adding some noise to the observation function
+                self.perturbed_data = self._perturb_data(
+                    self.measured_data, output_names
+                    )
+            else:
+                self.n_bootstrap_itrs = len(self.perturbed_data)
+
+            # -------- Model Discrepancy -----------
+            if hasattr(self, &#39;error_model&#39;) and self.error_model \
+               and self.name.lower() != &#39;calib&#39;:
+                # Select posterior mean as MAP
+                MAP_theta = self.samples.mean(axis=0).reshape((1, n_params))
+                # MAP_theta = stats.mode(self.samples,axis=0)[0]
+
+                # Evaluate the (meta-)model at the MAP
+                y_MAP, y_std_MAP = MetaModel.eval_metamodel(samples=MAP_theta)
+
+                # Train a GPR meta-model using MAP
+                self.error_MetaModel = MetaModel.create_model_error(
+                    self.bias_inputs, y_MAP, Name=self.name
+                    )
+
+            # -----------------------------------------------------
+            # ----- Loop over the perturbed observation data ------
+            # -----------------------------------------------------
+            # Initilize arrays
+            logLikelihoods = np.zeros((self.n_samples, self.n_bootstrap_itrs),
+                                      dtype=np.float16)
+            BME_Corr = np.zeros((self.n_bootstrap_itrs))
+            log_BME = np.zeros((self.n_bootstrap_itrs))
+            KLD = np.zeros((self.n_bootstrap_itrs))
+            inf_entropy = np.zeros((self.n_bootstrap_itrs))
+
+            # Compute the prior predtions
+            # Evaluate the MetaModel
+            if self.emulator:
+                y_hat, y_std = MetaModel.eval_metamodel(samples=self.samples)
+                self.__mean_pce_prior_pred = y_hat
+                self._std_pce_prior_pred = y_std
+
+                # Correct the predictions with Model discrepancy
+                if hasattr(self, &#39;error_model&#39;) and self.error_model:
+                    y_hat_corr, y_std = self.error_MetaModel.eval_model_error(
+                        self.bias_inputs, self.__mean_pce_prior_pred
+                        )
+                    self.__mean_pce_prior_pred = y_hat_corr
+                    self._std_pce_prior_pred = y_std
+
+                # Surrogate model&#39;s error using RMSE of test data
+                if hasattr(MetaModel, &#39;rmse&#39;):
+                    surrError = MetaModel.rmse
+                else:
+                    surrError = None
+
+            else:
+                # Evaluate the original model
+                self.__model_prior_pred = self._eval_model(
+                    samples=self.samples, key=&#39;PriorPred&#39;
+                    )
+
+            # Start the likelihood-BME computations for the perturbed data
+            for itr_idx, data in tqdm(
+                    enumerate(self.perturbed_data), ascii=True,
+                    desc=&#34;Boostraping the BME calculations&#34;
+                    ):
+
+                # ---------------- Likelihood calculation ----------------
+                if self.emulator:
+                    model_evals = self.__mean_pce_prior_pred
+                else:
+                    model_evals = self.__model_prior_pred
+
+                # Leave one out
+                if self.bayes_loocv:
+                    self.selected_indices = np.nonzero(data)[0]
+
+                # Prepare data dataframe
+                nobs = list(self.measured_data.count().values[1:])
+                numbers = list(map(sum, zip([0] + nobs, nobs)))
+                indices = list(zip([0] + numbers, numbers))
+                data_dict = {
+                    output_names[i]: data[j:k] for i, (j, k) in
+                    enumerate(indices)
+                    }
+
+                # Unknown sigma2
+                if opt_sigma == &#39;C&#39; or hasattr(self, &#39;sigma2s&#39;):
+                    logLikelihoods[:, itr_idx] = self.normpdf(
+                        model_evals, data_dict, total_sigma2,
+                        sigma2=self.sigma2s, std=surrError
+                        )
+                else:
+                    # known sigma2
+                    logLikelihoods[:, itr_idx] = self.normpdf(
+                        model_evals, data_dict, total_sigma2,
+                        std=surrError
+                        )
+
+                # ---------------- BME Calculations ----------------
+                # BME (log)
+                log_BME[itr_idx] = np.log(
+                    np.nanmean(np.exp(logLikelihoods[:, itr_idx],
+                                      dtype=np.float128))
+                    )
+
+                # Rejection Step
+                # Random numbers between 0 and 1
+                unif = np.random.rand(1, self.n_samples)[0]
+
+                # Reject the poorly performed prior
+                Likelihoods = np.exp(logLikelihoods[:, itr_idx],
+                                     dtype=np.float64)
+                accepted = (Likelihoods/np.max(Likelihoods)) &gt;= unif
+                posterior = self.samples[accepted]
+
+                # Posterior-based expectation of likelihoods
+                postExpLikelihoods = np.mean(
+                    logLikelihoods[:, itr_idx][accepted]
+                    )
+
+                # Posterior-based expectation of prior densities
+                postExpPrior = np.mean(
+                    np.log([MetaModel.ExpDesign.JDist.pdf(posterior.T)])
+                    )
+
+                # Calculate Kullback-Leibler Divergence
+                KLD[itr_idx] = postExpLikelihoods - log_BME[itr_idx]
+
+                # Information Entropy based on Entropy paper Eq. 38
+                inf_entropy[itr_idx] = log_BME[itr_idx] - postExpPrior - \
+                    postExpLikelihoods
+
+                # TODO: BME correction when using Emulator
+                # if self.emulator:
+                #     BME_Corr[itr_idx] = self._corr_factor_BME(
+                #         data, total_sigma2, posterior
+                #         )
+
+                # Clear memory
+                gc.collect(generation=2)
+
+            # ---------------- Store BME, Likelihoods for all ----------------
+            # Likelihoods (Size: n_samples, n_bootstrap_itr)
+            self.log_likes = logLikelihoods
+
+            # BME (log), KLD, infEntropy (Size: 1,n_bootstrap_itr)
+            self.log_BME = log_BME
+            self.KLD = KLD
+            self.inf_entropy = inf_entropy
+
+            # TODO: BMECorrFactor (log) (Size: 1,n_bootstrap_itr)
+            # if self.emulator: self.BMECorrFactor = BME_Corr
+
+            # BME = BME + BMECorrFactor
+            if self.emulator:
+                self.log_BME = self.log_BME  # + self.BMECorrFactor
+
+        # ---------------- Parameter Bayesian inference ----------------
+        if self.inference_method.lower() == &#39;mcmc&#39;:
+            # Instantiate the MCMC object
+            MCMC_Obj = MCMC(self)
+            self.posterior_df = MCMC_Obj.run_sampler(
+                self.measured_data, total_sigma2
+                )
+
+        elif self.name.lower() == &#39;valid&#39;:
+            # Convert to a dataframe if samples are provided after calibration.
+            self.posterior_df = pd.DataFrame(self.samples, columns=par_names)
+
+        else:
+            # Rejection sampling
+            self.posterior_df = self._rejection_sampling()
+
+        # Provide posterior&#39;s summary
+        print(&#39;\n&#39;)
+        print(&#39;-&#39;*15 + &#39;Posterior summary&#39; + &#39;-&#39;*15)
+        pd.options.display.max_columns = None
+        pd.options.display.max_rows = None
+        print(self.posterior_df.describe())
+        print(&#39;-&#39;*50)
+
+        # -------- Model Discrepancy -----------
+        if hasattr(self, &#39;error_model&#39;) and self.error_model \
+           and self.name.lower() == &#39;calib&#39;:
+            if self.inference_method.lower() == &#39;mcmc&#39;:
+                self.error_MetaModel = MCMC_Obj.error_MetaModel
+            else:
+                # Select posterior mean as MAP
+                if opt_sigma == &#34;B&#34;:
+                    posterior_df = self.posterior_df.values
+                else:
+                    posterior_df = self.posterior_df.values[:, :-Model.n_outputs]
+
+                # Select posterior mean as Maximum a posteriori
+                map_theta = posterior_df.mean(axis=0).reshape((1, n_params))
+                # map_theta = stats.mode(Posterior_df,axis=0)[0]
+
+                # Evaluate the (meta-)model at the MAP
+                y_MAP, y_std_MAP = MetaModel.eval_metamodel(samples=map_theta)
+
+                # Train a GPR meta-model using MAP
+                self.error_MetaModel = MetaModel.create_model_error(
+                    self.bias_inputs, y_MAP, Name=self.name
+                    )
+
+        # -------- Posterior perdictive -----------
+        self._posterior_predictive()
+
+        # -----------------------------------------------------
+        # ------------------ Visualization --------------------
+        # -----------------------------------------------------
+        # Create Output directory, if it doesn&#39;t exist already.
+        out_dir = f&#39;Outputs_Bayes_{Model.name}_{self.name}&#39;
+        os.makedirs(out_dir, exist_ok=True)
+
+        # -------- Posteior parameters --------
+        if opt_sigma != &#34;B&#34;:
+            par_names.extend(
+                [self.Discrepancy.InputDisc.Marginals[i].name for i
+                 in range(len(self.Discrepancy.InputDisc.Marginals))]
+                )
+        # Pot with corner
+        figPosterior = corner.corner(self.posterior_df.to_numpy(),
+                                     labels=par_names,
+                                     quantiles=[0.15, 0.5, 0.85],
+                                     show_titles=True,
+                                     title_fmt=self.corner_title_fmt,
+                                     labelpad=0.2,
+                                     use_math_text=True,
+                                     title_kwargs={&#34;fontsize&#34;: 28},
+                                     plot_datapoints=False,
+                                     plot_density=False,
+                                     fill_contours=True,
+                                     smooth=0.5,
+                                     smooth1d=0.5)
+
+        # Loop over axes and set x limits
+        if opt_sigma == &#34;B&#34;:
+            axes = np.array(figPosterior.axes).reshape(
+                (len(par_names), len(par_names))
+                )
+            for yi in range(len(par_names)):
+                ax = axes[yi, yi]
+                ax.set_xlim(MetaModel.bound_tuples[yi])
+                for xi in range(yi):
+                    ax = axes[yi, xi]
+                    ax.set_xlim(MetaModel.bound_tuples[xi])
+
+        # Turn off gridlines
+        for ax in figPosterior.axes:
+            ax.grid(False)
+
+        if self.emulator:
+            plotname = f&#39;/Posterior_Dist_{Model.name}_emulator&#39;
+        else:
+            plotname = f&#39;/Posterior_Dist_{Model.name}&#39;
+
+        figPosterior.set_size_inches((24, 16))
+        figPosterior.savefig(f&#39;./{out_dir}{plotname}.pdf&#39;,
+                             bbox_inches=&#39;tight&#39;)
+
+        # -------- Plot MAP --------
+        if self.plot_map_pred:
+            self._plot_max_a_posteriori()
+
+        # -------- Plot log_BME dist --------
+        if self.bootstrap and self.n_bootstrap_itrs &gt; 1:
+            # Computing the TOM performance
+            self.log_BME_tom = stats.chi2.rvs(
+                self.n_tot_measurement, size=self.log_BME.shape[0]
+                )
+
+            fig, ax = plt.subplots()
+            sns.kdeplot(self.log_BME_tom, ax=ax, color=&#34;green&#34;, shade=True)
+            sns.kdeplot(
+                self.log_BME, ax=ax, color=&#34;blue&#34;, shade=True,
+                label=&#39;Model BME&#39;)
+
+            ax.set_xlabel(&#39;log$_{10}$(BME)&#39;)
+            ax.set_ylabel(&#39;Probability density&#39;)
+
+            legend_elements = [
+                Patch(facecolor=&#39;green&#39;, edgecolor=&#39;green&#39;, label=&#39;TOM BME&#39;),
+                Patch(facecolor=&#39;blue&#39;, edgecolor=&#39;blue&#39;, label=&#39;Model BME&#39;)
+                ]
+            ax.legend(handles=legend_elements)
+
+            if self.emulator:
+                plotname = f&#39;/BME_hist_{Model.name}_emulator&#39;
+            else:
+                plotname = f&#39;/BME_hist_{Model.name}&#39;
+
+            plt.savefig(f&#39;./{out_dir}{plotname}.pdf&#39;, bbox_inches=&#39;tight&#39;)
+            plt.show()
+            plt.close()
+
+        # -------- Posteior perdictives --------
+        if self.plot_post_pred:
+            # Plot the posterior predictive
+            self._plot_post_predictive()
+
+        return self
+
+    # -------------------------------------------------------------------------
+    def _perturb_data(self, data, output_names):
+        &#34;&#34;&#34;
+        Returns an array with n_bootstrap_itrs rowsof perturbed data.
+        The first row includes the original observation data.
+        If `self.bayes_loocv` is True, a 2d-array will be returned with
+        repeated rows and zero diagonal entries.
+
+        Parameters
+        ----------
+        data : pandas DataFrame
+            Observation data.
+        output_names : list
+            List of the output names.
+
+        Returns
+        -------
+        final_data : array
+            Perturbed data set.
+
+        &#34;&#34;&#34;
+        noise_level = self.bootstrap_noise
+        obs_data = data[output_names].values
+        n_measurement, n_outs = obs_data.shape
+        self.n_tot_measurement = obs_data[~np.isnan(obs_data)].shape[0]
+        # Number of bootstrap iterations
+        if self.bayes_loocv:
+            self.n_bootstrap_itrs = self.n_tot_measurement
+
+        # Pass loocv dataset
+        if self.bayes_loocv:
+            obs = obs_data.T[~np.isnan(obs_data.T)]
+            final_data = np.repeat(np.atleast_2d(obs), self.n_bootstrap_itrs,
+                                   axis=0)
+            np.fill_diagonal(final_data, 0)
+            return final_data
+
+        else:
+            final_data = np.zeros(
+                (self.n_bootstrap_itrs, self.n_tot_measurement)
+                )
+            final_data[0] = obs_data.T[~np.isnan(obs_data.T)]
+            for itrIdx in range(1, self.n_bootstrap_itrs):
+                data = np.zeros((n_measurement, n_outs))
+                for idx in range(len(output_names)):
+                    std = np.nanstd(obs_data[:, idx])
+                    if std == 0:
+                        std = 0.001
+                    noise = std * noise_level
+                    data[:, idx] = np.add(
+                        obs_data[:, idx],
+                        np.random.normal(0, 1, obs_data.shape[0]) * noise,
+                    )
+
+                final_data[itrIdx] = data.T[~np.isnan(data.T)]
+
+            return final_data
+
+    # -------------------------------------------------------------------------
+    def _logpdf(self, x, mean, cov):
+        &#34;&#34;&#34;
+        computes the likelihood based on a multivariate normal distribution.
+
+        Parameters
+        ----------
+        x : TYPE
+            DESCRIPTION.
+        mean : array_like
+            Observation data.
+        cov : 2d array
+            Covariance matrix of the distribution.
+
+        Returns
+        -------
+        log_lik : float
+            Log likelihood.
+
+        &#34;&#34;&#34;
+        n = len(mean)
+        L = spla.cholesky(cov, lower=True)
+        beta = np.sum(np.log(np.diag(L)))
+        dev = x - mean
+        alpha = dev.dot(spla.cho_solve((L, True), dev))
+        log_lik = -0.5 * alpha - beta - n / 2. * np.log(2 * np.pi)
+        return log_lik
+
+    # -------------------------------------------------------------------------
+    def _eval_model(self, samples=None, key=&#39;MAP&#39;):
+        &#34;&#34;&#34;
+        Evaluates Forward Model.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params), optional
+            Parameter sets. The default is None.
+        key : str, optional
+            Key string to be passed to the run_model_parallel method.
+            The default is &#39;MAP&#39;.
+
+        Returns
+        -------
+        model_outputs : TYPE
+            DESCRIPTION.
+
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+
+        if samples is None:
+            self.samples = MetaModel.ExpDesign.generate_samples(
+                self.n_samples, &#39;random&#39;)
+        else:
+            self.samples = samples
+            self.n_samples = len(samples)
+
+        model_outputs, _ = Model.run_model_parallel(
+            self.samples, key_str=key+self.name)
+
+        # Clean up
+        # Zip the subdirectories
+        try:
+            dir_name = f&#39;{Model.name}MAP{self.name}&#39;
+            key = dir_name + &#39;_&#39;
+            Model.zip_subdirs(dir_name, key)
+        except:
+            pass
+
+        return model_outputs
+
+    # -------------------------------------------------------------------------
+    def _kernel_rbf(self, X, hyperparameters):
+        &#34;&#34;&#34;
+        Isotropic squared exponential kernel.
+
+        Higher l values lead to smoother functions and therefore to coarser
+        approximations of the training data. Lower l values make functions
+        more wiggly with wide uncertainty regions between training data points.
+
+        sigma_f controls the marginal variance of b(x)
+
+        Parameters
+        ----------
+        X : ndarray of shape (n_samples_X, n_features)
+
+        hyperparameters : Dict
+            Lambda characteristic length
+            sigma_f controls the marginal variance of b(x)
+            sigma_0 unresolvable error nugget term, interpreted as random
+                    error that cannot be attributed to measurement error.
+        Returns
+        -------
+        var_cov_matrix : ndarray of shape (n_samples_X,n_samples_X)
+            Kernel k(X, X).
+
+        &#34;&#34;&#34;
+        from sklearn.gaussian_process.kernels import RBF
+        min_max_scaler = preprocessing.MinMaxScaler()
+        X_minmax = min_max_scaler.fit_transform(X)
+
+        nparams = len(hyperparameters)
+        # characteristic length (0,1]
+        Lambda = hyperparameters[0]
+        # sigma_f controls the marginal variance of b(x)
+        sigma2_f = hyperparameters[1]
+
+        # cov_matrix = sigma2_f*rbf_kernel(X_minmax, gamma = 1/Lambda**2)
+
+        rbf = RBF(length_scale=Lambda)
+        cov_matrix = sigma2_f * rbf(X_minmax)
+        if nparams &gt; 2:
+            # (unresolvable error) nugget term that is interpreted as random
+            # error that cannot be attributed to measurement error.
+            sigma2_0 = hyperparameters[2:]
+            for i, j in np.ndindex(cov_matrix.shape):
+                cov_matrix[i, j] += np.sum(sigma2_0) if i == j else 0
+
+        return cov_matrix
+
+    # -------------------------------------------------------------------------
+    def normpdf(self, outputs, obs_data, total_sigma2s, sigma2=None, std=None):
+        &#34;&#34;&#34;
+        Calculates the likelihood of simulation outputs compared with
+        observation data.
+
+        Parameters
+        ----------
+        outputs : dict
+            A dictionary containing the simulation outputs as array of shape
+            (n_samples, n_measurement) for each model output.
+        obs_data : dict
+            A dictionary/dataframe containing the observation data.
+        total_sigma2s : dict
+            A dictionary with known values of the covariance diagonal entries,
+            a.k.a sigma^2.
+        sigma2 : array, optional
+            An array of the sigma^2 samples, when the covariance diagonal
+            entries are unknown and are being jointly inferred. The default is
+            None.
+        std : dict, optional
+            A dictionary containing the root mean squared error as array of
+            shape (n_samples, n_measurement) for each model output. The default
+            is None.
+
+        Returns
+        -------
+        logLik : array of shape (n_samples)
+            Likelihoods.
+
+        &#34;&#34;&#34;
+        Model = self.MetaModel.ModelObj
+        logLik = 0.0
+
+        # Extract the requested model outputs for likelihood calulation
+        if self.req_outputs is None:
+            req_outputs = Model.Output.names
+        else:
+            req_outputs = list(self.req_outputs)
+
+        # Loop over the outputs
+        for idx, out in enumerate(req_outputs):
+
+            # (Meta)Model Output
+            nsamples, nout = outputs[out].shape
+
+            # Prepare data and remove NaN
+            try:
+                data = obs_data[out].values[~np.isnan(obs_data[out])]
+            except AttributeError:
+                data = obs_data[out][~np.isnan(obs_data[out])]
+
+            # Prepare sigma2s
+            tot_sigma2s = total_sigma2s[out][~np.isnan(
+                total_sigma2s[out])][:nout]
+
+            # Add the std of the PCE is chosen as emulator.
+            if self.emulator:
+                if std is not None:
+                    std_pce = std[out]
+                else:
+                    std_pce = np.mean(
+                        self._std_pce_prior_pred[out], axis=0)
+                # Expected value of variance (Assump: i.i.d stds)
+                tot_sigma2s += std_pce**2
+
+            # If sigma2 is not given, use given total_sigma2s
+            if sigma2 is None:
+                logLik += stats.multivariate_normal.logpdf(
+                    outputs[out], data, np.diag(tot_sigma2s))
+                continue
+
+            # Loop over each run/sample and calculate logLikelihood
+            logliks = np.zeros(nsamples)
+            for s_idx in range(nsamples):
+
+                # Simulation run
+                tot_outputs = outputs[out]
+
+                # Covariance Matrix
+                covMatrix = np.diag(tot_sigma2s)
+
+                if sigma2 is not None:
+                    # Check the type error term
+                    if hasattr(self, &#39;bias_inputs&#39;) and \
+                       not hasattr(self, &#39;error_model&#39;):
+                        # Infer a Bias model usig Gaussian Process Regression
+                        bias_inputs = np.hstack(
+                            (self.bias_inputs[out],
+                             tot_outputs[s_idx].reshape(-1, 1)))
+
+                        params = sigma2[s_idx, idx*3:(idx+1)*3]
+                        covMatrix = self._kernel_rbf(bias_inputs, params)
+                    else:
+                        # Infer equal sigma2s
+                        try:
+                            sigma_2 = sigma2[s_idx, idx]
+                        except TypeError:
+                            sigma_2 = 0.0
+
+                        covMatrix += sigma_2 * np.eye(nout)
+                        # covMatrix = np.diag(sigma2 * total_sigma2s)
+
+                # Select the data points to compare
+                if self.selected_indices is not None:
+                    indices = self.selected_indices[out]
+                    covMatrix = np.diag(covMatrix[indices, indices])
+                else:
+                    indices = list(range(nout))
+
+                # Compute loglikelihood
+                logliks[s_idx] = self._logpdf(
+                    tot_outputs[s_idx, indices], data[indices], covMatrix
+                    )
+
+            logLik += logliks
+        return logLik
+
+    # -------------------------------------------------------------------------
+    def _corr_factor_BME(self, Data, total_sigma2s, posterior):
+        &#34;&#34;&#34;
+        Calculates the correction factor for BMEs.
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        OrigModelOutput = MetaModel.ExpDesign.Y
+        Model = MetaModel.ModelObj
+
+        # Posterior with guassian-likelihood
+        postDist = stats.gaussian_kde(posterior.T)
+
+        # Remove NaN
+        Data = Data[~np.isnan(Data)]
+        total_sigma2s = total_sigma2s[~np.isnan(total_sigma2s)]
+
+        # Covariance Matrix
+        covMatrix = np.diag(total_sigma2s[:self.n_tot_measurement])
+
+        # Extract the requested model outputs for likelihood calulation
+        if self.req_outputs is None:
+            OutputType = Model.Output.names
+        else:
+            OutputType = list(self.req_outputs)
+
+        # SampleSize = OrigModelOutput[OutputType[0]].shape[0]
+
+
+        # Flatten the OutputType for OrigModel
+        TotalOutputs = np.concatenate([OrigModelOutput[x] for x in OutputType], 1)
+
+        NrofBayesSamples = self.n_samples
+        # Evaluate MetaModel on the experimental design
+        Samples = MetaModel.ExpDesign.X
+        OutputRS, stdOutputRS = MetaModel.eval_metamodel(samples=Samples)
+
+        # Reset the NrofSamples to NrofBayesSamples
+        self.n_samples = NrofBayesSamples
+
+        # Flatten the OutputType for MetaModel
+        TotalPCEOutputs = np.concatenate([OutputRS[x] for x in OutputRS], 1)
+        TotalPCEstdOutputRS= np.concatenate([stdOutputRS[x] for x in stdOutputRS], 1)
+
+        logweight = 0
+        for i,sample in enumerate(Samples):
+            # Compute likelilhood output vs RS
+            covMatrix = np.diag(TotalPCEstdOutputRS[i]**2)
+            logLik = self._logpdf(TotalOutputs[i], TotalPCEOutputs[i], covMatrix)
+            # Compute posterior likelihood of the collocation points
+            logpostLik = np.log(postDist.pdf(sample[:,None]))[0]
+            if logpostLik != -np.inf:
+                logweight += logLik + logpostLik
+        return logweight
+#         # Initialization
+#         covMatrix=np.zeros((NofMeasurements, NofMeasurements), float)
+#         BME_RM_Model_Weight = np.zeros((SampleSize))
+#         BME_RM_Data_Weight = np.zeros((SampleSize))
+#         BME_Corr = np.zeros((1))
+
+
+#         # Deviation Computations
+#         RM_Model_Deviation = np.zeros((SampleSize,NofMeasurements))
+#         RM_Data_Deviation = np.zeros((SampleSize,NofMeasurements))
+#         for i in range(SampleSize):
+#             RM_Model_Deviation[i] = TotalOutputs[i][:NofMeasurements] - TotalPCEOutputs[i, :] # Reduce model- Full Model
+#             RM_Data_Deviation[i] = Observations - TotalPCEOutputs[i, :] # Reduce model- Measurement Data
+
+
+#         # Initialization  of Co-Variance Matrix
+#         # For BME_RM_ModelWeight
+#         if NofMeasurements == 1:
+#             RM_Model_Error = np.zeros((NofMeasurements, NofMeasurements), float)
+#             np.fill_diagonal(RM_Model_Error, np.cov(RM_Model_Deviation.T))
+#         else:
+#             RM_Model_Error = np.cov(RM_Model_Deviation.T)
+
+
+#         # Computation of Weight according to the deviations
+#         for i in range(SampleSize):
+#             # For BME_RM_DataWeight
+#             try:
+#                 var = Sigma[i]
+#                 if len(var)==1:
+#                     np.fill_diagonal(covMatrix, var)
+#                 else:
+#                     row,col = np.diag_indices(covMatrix.shape[0])
+#                     covMatrix[row,col] = np.hstack((np.repeat(var[0], NofMeasurements*0.5),np.repeat(var[1], NofMeasurements*0.5)))
+
+#             except:
+#                 var = Sigma
+
+#             np.fill_diagonal(covMatrix,  var)
+
+#             # Add the std of the PCE is emulator is chosen.
+# #            if self.emulator:
+# #                covMatrix_PCE = np.zeros((NofMeasurements, NofMeasurements), float)
+# #                stdPCE = np.empty((SampleSize,0))
+# #                for outputType in OutputType:
+# #                    stdPCE = np.hstack((stdPCE, stdOutputRS[outputType]))
+# #
+# #                stdPCE = np.mean(stdPCE, axis=1)
+# #                np.fill_diagonal(covMatrix_PCE, stdPCE**2)
+# #
+# #                covMatrix = covMatrix + covMatrix_PCE
+
+#             # Calculate the denomitor
+#             denom1 = (np.sqrt(2*np.pi)) ** NofMeasurements
+#             denom2 = (((2*np.pi)**(NofMeasurements/2)) * np.sqrt(np.linalg.det(covMatrix)))
+
+#             BME_RM_Model_Weight[i] =  (np.exp(-0.5 * np.dot(np.dot(RM_Model_Deviation[i], np.linalg.pinv(RM_Model_Error)), RM_Model_Deviation[i])))/denom1
+#             BME_RM_Data_Weight[i] =  (np.exp(-0.5 * np.dot(np.dot(RM_Data_Deviation[i], np.linalg.pinv(covMatrix)), RM_Data_Deviation[i][:,np.newaxis])))/denom2
+
+#         for i in range(SampleSize):
+#             BME_Corr[0] += BME_RM_Model_Weight[i] * BME_RM_Data_Weight[i] / np.nansum(BME_RM_Data_Weight)
+
+#         return np.log(BME_Corr[0])
+
+    # -------------------------------------------------------------------------
+    def _rejection_sampling(self):
+        &#34;&#34;&#34;
+        Performs rejection sampling to update the prior distribution on the
+        input parameters.
+
+        Returns
+        -------
+        posterior : pandas.dataframe
+            Posterior samples of the input parameters.
+
+        &#34;&#34;&#34;
+
+        MetaModel = self.MetaModel
+        try:
+            sigma2_prior = self.Discrepancy.sigma2_prior
+        except:
+            sigma2_prior = None
+
+        # Check if the discrepancy is defined as a distribution:
+        samples = self.samples
+
+        if sigma2_prior is not None:
+            samples = np.hstack((samples, sigma2_prior))
+
+        # Take the first column of Likelihoods (Observation data without noise)
+        likelihoods = np.exp(self.log_likes[:, 0], dtype=np.float128)
+        n_samples = len(likelihoods)
+        norm_ikelihoods = likelihoods / np.max(likelihoods)
+
+        # Normalize based on min if all Likelihoods are zero
+        if all(likelihoods == 0.0):
+            likelihoods = self.log_likes[:, 0]
+            norm_ikelihoods = likelihoods / np.min(likelihoods)
+
+        # Random numbers between 0 and 1
+        unif = np.random.rand(1, n_samples)[0]
+
+        # Reject the poorly performed prior
+        accepted_samples = samples[norm_ikelihoods &gt;= unif]
+
+        # Output the Posterior
+        par_names = MetaModel.ExpDesign.par_names
+        if sigma2_prior is not None:
+            for name in self.Discrepancy.name:
+                par_names.append(name)
+
+        return pd.DataFrame(accepted_samples, columns=sigma2_prior)
+
+    # -------------------------------------------------------------------------
+    def _posterior_predictive(self):
+        &#34;&#34;&#34;
+        Stores the prior- and posterior predictive samples, i.e. model
+        evaluations using the samples, into hdf5 files.
+
+        priorPredictive.hdf5 : Prior predictive samples.
+        postPredictive_wo_noise.hdf5 : Posterior predictive samples without
+        the additive noise.
+        postPredictive.hdf5 : Posterior predictive samples with the additive
+        noise.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+
+        # Make a directory to save the prior/posterior predictive
+        out_dir = f&#39;Outputs_Bayes_{Model.name}_{self.name}&#39;
+        os.makedirs(out_dir, exist_ok=True)
+
+        # Read observation data and perturb it if requested
+        if self.measured_data is None:
+            self.measured_data = Model.read_observation(case=self.name)
+
+        if not isinstance(self.measured_data, pd.DataFrame):
+            self.measured_data = pd.DataFrame(self.measured_data)
+
+        # X_values
+        x_values = MetaModel.ExpDesign.x_values
+
+        try:
+            sigma2_prior = self.Discrepancy.sigma2_prior
+        except:
+            sigma2_prior = None
+
+        # Extract posterior samples
+        posterior_df = self.posterior_df
+
+        # Take care of the sigma2
+        if sigma2_prior is not None:
+            try:
+                sigma2s = posterior_df[self.Discrepancy.name].values
+                posterior_df = posterior_df.drop(
+                    labels=self.Discrepancy.name, axis=1
+                    )
+            except:
+                sigma2s = self.sigma2s
+
+        # Posterior predictive
+        if self.emulator:
+            if self.inference_method == &#39;rejection&#39;:
+                prior_pred = self.__mean_pce_prior_pred
+            if self.name.lower() != &#39;calib&#39;:
+                post_pred = self.__mean_pce_prior_pred
+                post_pred_std = self._std_pce_prior_pred
+            else:
+                post_pred, post_pred_std = MetaModel.eval_metamodel(
+                    samples=posterior_df.values
+                    )
+
+        else:
+            if self.inference_method == &#39;rejection&#39;:
+                prior_pred = self.__model_prior_pred
+            if self.name.lower() != &#39;calib&#39;:
+                post_pred = self.__mean_pce_prior_pred,
+                post_pred_std = self._std_pce_prior_pred
+            else:
+                post_pred = self._eval_model(
+                    samples=posterior_df.values, key=&#39;PostPred&#39;
+                    )
+        # Correct the predictions with Model discrepancy
+        if hasattr(self, &#39;error_model&#39;) and self.error_model:
+            y_hat, y_std = self.error_MetaModel.eval_model_error(
+                self.bias_inputs, post_pred
+                )
+            post_pred, post_pred_std = y_hat, y_std
+
+        # Add discrepancy from likelihood Sample to the current posterior runs
+        total_sigma2 = self.Discrepancy.total_sigma2
+        post_pred_withnoise = copy.deepcopy(post_pred)
+        for varIdx, var in enumerate(Model.Output.names):
+            for i in range(len(post_pred[var])):
+                pred = post_pred[var][i]
+
+                # Known sigma2s
+                clean_sigma2 = total_sigma2[var][~np.isnan(total_sigma2[var])]
+                tot_sigma2 = clean_sigma2[:len(pred)]
+                cov = np.diag(tot_sigma2)
+
+                # Check the type error term
+                if sigma2_prior is not None:
+                    # Inferred sigma2s
+                    if hasattr(self, &#39;bias_inputs&#39;) and \
+                       not hasattr(self, &#39;error_model&#39;):
+                        # TODO: Infer a Bias model usig GPR
+                        bias_inputs = np.hstack((
+                            self.bias_inputs[var], pred.reshape(-1, 1)))
+                        params = sigma2s[i, varIdx*3:(varIdx+1)*3]
+                        cov = self._kernel_rbf(bias_inputs, params)
+                    else:
+                        # Infer equal sigma2s
+                        try:
+                            sigma2 = sigma2s[i, varIdx]
+                        except TypeError:
+                            sigma2 = 0.0
+
+                        # Convert biasSigma2s to a covMatrix
+                        cov += sigma2 * np.eye(len(pred))
+
+                if self.emulator:
+                    if hasattr(MetaModel, &#39;rmse&#39;) and \
+                       MetaModel.rmse is not None:
+                        stdPCE = MetaModel.rmse[var]
+                    else:
+                        stdPCE = post_pred_std[var][i]
+                    # Expected value of variance (Assump: i.i.d stds)
+                    cov += np.diag(stdPCE**2)
+
+                # Sample a multivariate normal distribution with mean of
+                # prediction and variance of cov
+                post_pred_withnoise[var][i] = np.random.multivariate_normal(
+                    pred, cov, 1
+                    )
+
+        # ----- Prior Predictive -----
+        if self.inference_method.lower() == &#39;rejection&#39;:
+            # Create hdf5 metadata
+            hdf5file = f&#39;{out_dir}/priorPredictive.hdf5&#39;
+            hdf5_exist = os.path.exists(hdf5file)
+            if hdf5_exist:
+                os.remove(hdf5file)
+            file = h5py.File(hdf5file, &#39;a&#39;)
+
+            # Store x_values
+            if type(x_values) is dict:
+                grp_x_values = file.create_group(&#34;x_values/&#34;)
+                for varIdx, var in enumerate(Model.Output.names):
+                    grp_x_values.create_dataset(var, data=x_values[var])
+            else:
+                file.create_dataset(&#34;x_values&#34;, data=x_values)
+
+            # Store posterior predictive
+            grpY = file.create_group(&#34;EDY/&#34;)
+            for varIdx, var in enumerate(Model.Output.names):
+                grpY.create_dataset(var, data=prior_pred[var])
+
+        # ----- Posterior Predictive only model evaluations -----
+        # Create hdf5 metadata
+        hdf5file = out_dir+&#39;/postPredictive_wo_noise.hdf5&#39;
+        hdf5_exist = os.path.exists(hdf5file)
+        if hdf5_exist:
+            os.remove(hdf5file)
+        file = h5py.File(hdf5file, &#39;a&#39;)
+
+        # Store x_values
+        if type(x_values) is dict:
+            grp_x_values = file.create_group(&#34;x_values/&#34;)
+            for varIdx, var in enumerate(Model.Output.names):
+                grp_x_values.create_dataset(var, data=x_values[var])
+        else:
+            file.create_dataset(&#34;x_values&#34;, data=x_values)
+
+        # Store posterior predictive
+        grpY = file.create_group(&#34;EDY/&#34;)
+        for varIdx, var in enumerate(Model.Output.names):
+            grpY.create_dataset(var, data=post_pred[var])
+
+        # ----- Posterior Predictive with noise -----
+        # Create hdf5 metadata
+        hdf5file = out_dir+&#39;/postPredictive.hdf5&#39;
+        hdf5_exist = os.path.exists(hdf5file)
+        if hdf5_exist:
+            os.remove(hdf5file)
+        file = h5py.File(hdf5file, &#39;a&#39;)
+
+        # Store x_values
+        if type(x_values) is dict:
+            grp_x_values = file.create_group(&#34;x_values/&#34;)
+            for varIdx, var in enumerate(Model.Output.names):
+                grp_x_values.create_dataset(var, data=x_values[var])
+        else:
+            file.create_dataset(&#34;x_values&#34;, data=x_values)
+
+        # Store posterior predictive
+        grpY = file.create_group(&#34;EDY/&#34;)
+        for varIdx, var in enumerate(Model.Output.names):
+            grpY.create_dataset(var, data=post_pred_withnoise[var])
+
+        return
+
+    # -------------------------------------------------------------------------
+    def _plot_max_a_posteriori(self):
+        &#34;&#34;&#34;
+        Plots the response of the model output against that of the metamodel at
+        the maximum a posteriori sample (mean or mode of posterior.)
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+        out_dir = f&#39;Outputs_Bayes_{Model.name}_{self.name}&#39;
+        opt_sigma = self.Discrepancy.opt_sigma
+
+        # -------- Find MAP and run MetaModel and origModel --------
+        # Compute the MAP
+        if self.max_a_posteriori.lower() == &#39;mean&#39;:
+            if opt_sigma == &#34;B&#34;:
+                Posterior_df = self.posterior_df.values
+            else:
+                Posterior_df = self.posterior_df.values[:, :-Model.n_outputs]
+            map_theta = Posterior_df.mean(axis=0).reshape(
+                (1, MetaModel.n_params))
+        else:
+            map_theta = stats.mode(Posterior_df.values, axis=0)[0]
+        # Prin report
+        print(&#34;\nPoint estimator:\n&#34;, map_theta[0])
+
+        # Run the models for MAP
+        # MetaModel
+        map_metamodel_mean, map_metamodel_std = MetaModel.eval_metamodel(
+            samples=map_theta)
+        self.map_metamodel_mean = map_metamodel_mean
+        self.map_metamodel_std = map_metamodel_std
+
+        # origModel
+        map_orig_model = self._eval_model(samples=map_theta)
+        self.map_orig_model = map_orig_model
+
+        # Extract slicing index
+        x_values = map_orig_model[&#39;x_values&#39;]
+
+        # List of markers and colors
+        Color = [&#39;k&#39;, &#39;b&#39;, &#39;g&#39;, &#39;r&#39;]
+        Marker = &#39;x&#39;
+
+        # Create a PdfPages object
+        pdf = PdfPages(f&#39;./{out_dir}MAP_PCE_vs_Model_{self.name}.pdf&#39;)
+        fig = plt.figure()
+        for i, key in enumerate(Model.Output.names):
+
+            y_val = map_orig_model[key]
+            y_pce_val = map_metamodel_mean[key]
+            y_pce_val_std = map_metamodel_std[key]
+
+            plt.plot(x_values, y_val, color=Color[i], marker=Marker,
+                     lw=2.0, label=&#39;$Y_{MAP}^{M}$&#39;)
+
+            plt.plot(
+                x_values, y_pce_val[i], color=Color[i], lw=2.0,
+                marker=Marker, linestyle=&#39;--&#39;, label=&#39;$Y_{MAP}^{PCE}$&#39;
+                )
+            # plot the confidence interval
+            plt.fill_between(
+                x_values, y_pce_val[i] - 1.96*y_pce_val_std[i],
+                y_pce_val[i] + 1.96*y_pce_val_std[i],
+                color=Color[i], alpha=0.15
+                )
+
+            # Calculate the adjusted R_squared and RMSE
+            R2 = r2_score(y_pce_val.reshape(-1, 1), y_val.reshape(-1, 1))
+            rmse = np.sqrt(mean_squared_error(y_pce_val, y_val))
+
+            plt.ylabel(key)
+            plt.xlabel(&#34;Time [s]&#34;)
+            plt.title(f&#39;Model vs MetaModel {key}&#39;)
+
+            ax = fig.axes[0]
+            leg = ax.legend(loc=&#39;best&#39;, frameon=True)
+            fig.canvas.draw()
+            p = leg.get_window_extent().inverse_transformed(ax.transAxes)
+            ax.text(
+                p.p0[1]-0.05, p.p1[1]-0.25,
+                f&#39;RMSE = {rmse:.3f}\n$R^2$ = {R2:.3f}&#39;,
+                transform=ax.transAxes, color=&#39;black&#39;,
+                bbox=dict(facecolor=&#39;none&#39;, edgecolor=&#39;black&#39;,
+                          boxstyle=&#39;round,pad=1&#39;))
+
+            plt.show()
+
+            # save the current figure
+            pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+
+            # Destroy the current plot
+            plt.clf()
+
+        pdf.close()
+
+    # -------------------------------------------------------------------------
+    def _plot_post_predictive(self):
+        &#34;&#34;&#34;
+        Plots the posterior predictives against the observation data.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+
+        Model = self.MetaModel.ModelObj
+        out_dir = f&#39;Outputs_Bayes_{Model.name}_{self.name}&#39;
+        # Plot the posterior predictive
+        for out_idx, out_name in enumerate(Model.Output.names):
+            fig, ax = plt.subplots()
+            with sns.axes_style(&#34;ticks&#34;):
+                x_key = list(self.measured_data)[0]
+
+                # --- Read prior and posterior predictive ---
+                if self.inference_method == &#39;rejection&#39;:
+                    #  --- Prior ---
+                    # Load posterior predictive
+                    f = h5py.File(
+                        f&#39;{out_dir}/priorPredictive.hdf5&#39;, &#39;r+&#39;)
+
+                    try:
+                        x_coords = np.array(f[f&#34;x_values/{out_name}&#34;])
+                    except:
+                        x_coords = np.array(f[&#34;x_values&#34;])
+
+                    X_values = np.repeat(x_coords, 10000)
+
+                    prior_pred_df = {}
+                    prior_pred_df[x_key] = X_values
+                    prior_pred_df[out_name] = np.array(
+                        f[f&#34;EDY/{out_name}&#34;])[:10000].flatten(&#39;F&#39;)
+                    prior_pred_df = pd.DataFrame(prior_pred_df)
+
+                    tags_post = [&#39;prior&#39;] * len(prior_pred_df)
+                    prior_pred_df.insert(
+                        len(prior_pred_df.columns), &#34;Tags&#34;, tags_post,
+                        True)
+                    f.close()
+
+                    # --- Posterior ---
+                    f = h5py.File(f&#34;{out_dir}/postPredictive.hdf5&#34;, &#39;r+&#39;)
+
+                    X_values = np.repeat(
+                        x_coords, np.array(f[f&#34;EDY/{out_name}&#34;]).shape[0])
+
+                    post_pred_df = {}
+                    post_pred_df[x_key] = X_values
+                    post_pred_df[out_name] = np.array(
+                        f[f&#34;EDY/{out_name}&#34;]).flatten(&#39;F&#39;)
+
+                    post_pred_df = pd.DataFrame(post_pred_df)
+
+                    tags_post = [&#39;posterior&#39;] * len(post_pred_df)
+                    post_pred_df.insert(
+                        len(post_pred_df.columns), &#34;Tags&#34;, tags_post, True)
+                    f.close()
+                    # Concatenate two dataframes based on x_values
+                    frames = [prior_pred_df, post_pred_df]
+                    all_pred_df = pd.concat(frames)
+
+                    # --- Plot posterior predictive ---
+                    sns.violinplot(
+                        x_key, y=out_name, data=all_pred_df, hue=&#34;Tags&#34;,
+                        legend=False, ax=ax, split=True, inner=None,
+                        color=&#34;.8&#34;)
+
+                    # --- Plot Data ---
+                    # Find the x,y coordinates for each point
+                    x_coords = np.arange(x_coords.shape[0])
+                    first_header = list(self.measured_data)[0]
+                    obs_data = self.measured_data.round({first_header: 6})
+                    sns.pointplot(
+                        x=first_header, y=out_name, color=&#39;g&#39;, markers=&#39;x&#39;,
+                        linestyles=&#39;&#39;, capsize=16, data=obs_data, ax=ax)
+
+                    ax.errorbar(
+                        x_coords, obs_data[out_name].values,
+                        yerr=1.96*self.measurement_error[out_name],
+                        ecolor=&#39;g&#39;, fmt=&#39; &#39;, zorder=-1)
+
+                    # Add labels to the legend
+                    handles, labels = ax.get_legend_handles_labels()
+                    labels.append(&#39;Data&#39;)
+
+                    data_marker = mlines.Line2D(
+                        [], [], color=&#39;lime&#39;, marker=&#39;+&#39;, linestyle=&#39;None&#39;,
+                        markersize=10)
+                    handles.append(data_marker)
+
+                    # Add legend
+                    ax.legend(handles=handles, labels=labels, loc=&#39;best&#39;,
+                              fontsize=&#39;large&#39;, frameon=True)
+
+                else:
+                    # Load posterior predictive
+                    f = h5py.File(f&#34;{out_dir}/postPredictive.hdf5&#34;, &#39;r+&#39;)
+
+                    try:
+                        x_coords = np.array(f[&#34;x_values&#34;])
+                    except:
+                        x_coords = np.array(f[f&#34;x_values/{out_name}&#34;])
+
+                    mu = np.mean(np.array(f[f&#34;EDY/{out_name}&#34;]), axis=0)
+                    std = np.std(np.array(f[f&#34;EDY/{out_name}&#34;]), axis=0)
+
+                    # --- Plot posterior predictive ---
+                    plt.plot(
+                        x_coords, mu, marker=&#39;o&#39;, color=&#39;b&#39;,
+                        label=&#39;Mean Post. Predictive&#39;)
+                    plt.fill_between(
+                        x_coords, mu-1.96*std, mu+1.96*std, color=&#39;b&#39;,
+                        alpha=0.15)
+
+                    # --- Plot Data ---
+                    ax.plot(
+                        x_coords, self.measured_data[out_name].values,
+                        &#39;ko&#39;, label=&#39;data&#39;, markeredgecolor=&#39;w&#39;)
+
+                    # --- Plot ExpDesign ---
+                    orig_ED_Y = self.MetaModel.ExpDesign.Y[out_name]
+                    for output in orig_ED_Y:
+                        plt.plot(
+                            x_coords, output, color=&#39;grey&#39;, alpha=0.15
+                            )
+
+                    # Add labels for axes
+                    plt.xlabel(&#39;Time [s]&#39;)
+                    plt.ylabel(out_name)
+
+                    # Add labels to the legend
+                    handles, labels = ax.get_legend_handles_labels()
+
+                    patch = Patch(color=&#39;b&#39;, alpha=0.15)
+                    handles.insert(1, patch)
+                    labels.insert(1, &#39;95 $\\%$ CI&#39;)
+
+                    # Add legend
+                    ax.legend(handles=handles, labels=labels, loc=&#39;best&#39;,
+                              frameon=True)
+
+                # Save figure in pdf format
+                if self.emulator:
+                    plotname = f&#39;/Post_Prior_Perd_{Model.name}_emulator&#39;
+                else:
+                    plotname = f&#39;/Post_Prior_Perd_{Model.name}&#39;
+
+                fig.savefig(f&#39;./{out_dir}{plotname}_{out_name}.pdf&#39;,
+                            bbox_inches=&#39;tight&#39;)</code></pre>
+</details>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+<h2 class="section-title" id="header-classes">Classes</h2>
+<dl>
+<dt id="bayes_inference.BayesInference"><code class="flex name class">
+<span>class <span class="ident">BayesInference</span></span>
+<span>(</span><span>MetaModel, discrepancy=None, emulator=True, name='Calib', bootstrap=False, req_outputs=None, selected_indices=None, samples=None, n_samples=500000, measured_data=None, inference_method='rejection', mcmc_params=None, bayes_loocv=False, n_bootstrap_itrs=1, perturbed_data=[], bootstrap_noise=0.05, plot_post_pred=True, plot_map_pred=False, max_a_posteriori='mean', corner_title_fmt='.3e')</span>
+</code></dt>
+<dd>
+<div class="desc"><p>A class to perform Bayesian Analysis.</p>
+<h2 id="attributes">Attributes</h2>
+<dl>
+<dt><strong><code>MetaModel</code></strong> :&ensp;<code>obj</code></dt>
+<dd>Meta model object.</dd>
+<dt><strong><code>discrepancy</code></strong> :&ensp;<code>obj</code></dt>
+<dd>The discrepancy object for the sigma2s, i.e. the diagonal entries
+of the variance matrix for a multivariate normal likelihood.</dd>
+<dt><strong><code>name</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>The type of analysis, either calibration (<code>Calib</code>) or validation
+(<code>Valid</code>). The default is <code>'Calib'</code>.</dd>
+<dt><strong><code>emulator</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Analysis with emulator (MetaModel). The default is <code>True</code>.</dd>
+<dt><strong><code>bootstrap</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Bootstrap the analysis. The default is <code>False</code>.</dd>
+<dt><strong><code>req_outputs</code></strong> :&ensp;<code>list</code>, optional</dt>
+<dd>The list of requested output to be used for the analysis.
+The default is <code>None</code>. If None, all the defined outputs for the model
+object is used.</dd>
+<dt><strong><code>selected_indices</code></strong> :&ensp;<code>dict</code>, optional</dt>
+<dd>A dictionary with the selected indices of each model output. The
+default is <code>None</code>. If <code>None</code>, all measurement points are used in the
+analysis.</dd>
+<dt><strong><code>samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code>, optional</dt>
+<dd>The samples to be used in the analysis. The default is <code>None</code>. If
+None the samples are drawn from the probablistic input parameter
+object of the MetaModel object.</dd>
+<dt><strong><code>n_samples</code></strong> :&ensp;<code>int</code>, optional</dt>
+<dd>Number of samples to be used in the analysis. The default is <code>500000</code>.
+If samples is not <code>None</code>, this argument will be assigned based on the
+number of samples given.</dd>
+<dt><strong><code>measured_data</code></strong> :&ensp;<code>dict</code>, optional</dt>
+<dd>A dictionary containing the observation data. The default is <code>None</code>.
+if <code>None</code>, the observation defined in the Model object of the
+MetaModel is used.</dd>
+<dt><strong><code>inference_method</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>A method for approximating the posterior distribution in the Bayesian
+inference step. The default is <code>'rejection'</code>, which stands for
+rejection sampling. A Markov Chain Monte Carlo sampler can be simply
+selected by passing <code>'MCMC'</code>.</dd>
+<dt><strong><code>mcmc_params</code></strong> :&ensp;<code>dict</code>, optional</dt>
+<dd>
+<p>A dictionary with args required for the Bayesian inference with
+<code>MCMC</code>. The default is <code>None</code>.</p>
+<p>Pass the mcmc_params like the following:</p>
+<pre><code>&gt;&gt;&gt; mcmc_params:{
+    'init_samples': None,  # initial samples
+    'n_walkers': 100,  # number of walkers (chain)
+    'n_steps': 100000,  # number of maximum steps
+    'n_burn': 200,  # number of burn-in steps
+    'moves': None,  # Moves for the emcee sampler
+    'multiprocessing': False,  # multiprocessing
+    'verbose': False # verbosity
+    }
+</code></pre>
+<p>The items shown above are the default values. If any parmeter is
+not defined, the default value will be assigned to it.</p>
+</dd>
+<dt><strong><code>bayes_loocv</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Bayesian Leave-one-out Cross Validation. The default is <code>False</code>. If
+<code>True</code>, the LOOCV procedure is used to estimate the bayesian Model
+Evidence (BME).</dd>
+<dt><strong><code>n_bootstrap_itrs</code></strong> :&ensp;<code>int</code>, optional</dt>
+<dd>Number of bootstrap iteration. The default is <code>1</code>. If bayes_loocv is
+<code>True</code>, this is qualt to the total length of the observation data
+set.</dd>
+<dt><strong><code>perturbed_data</code></strong> :&ensp;<code>array</code> of <code>shape (n_bootstrap_itrs, n_obs)</code>, optional</dt>
+<dd>User defined perturbed data. The default is <code>[]</code>.</dd>
+<dt><strong><code>bootstrap_noise</code></strong> :&ensp;<code>float</code>, optional</dt>
+<dd>A noise level to perturb the data set. The default is <code>0.05</code>.</dd>
+<dt><strong><code>plot_post_pred</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Plot posterior predictive plots. The default is <code>True</code>.</dd>
+<dt><strong><code>plot_map_pred</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Plot the model outputs vs the metamodel predictions for the maximum
+a posteriori (defined as <code>max_a_posteriori</code>) parameter set. The
+default is <code>False</code>.</dd>
+<dt><strong><code>max_a_posteriori</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>Maximum a posteriori. <code>'mean'</code> and <code>'mode'</code> are available. The default
+is <code>'mean'</code>.</dd>
+<dt><strong><code>corner_title_fmt</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>Title format for the posterior distribution plot with python
+package <code>corner</code>. The default is <code>'.3e'</code>.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">class BayesInference:
+    &#34;&#34;&#34;
+    A class to perform Bayesian Analysis.
+
+
+    Attributes
+    ----------
+    MetaModel : obj
+        Meta model object.
+    discrepancy : obj
+        The discrepancy object for the sigma2s, i.e. the diagonal entries
+        of the variance matrix for a multivariate normal likelihood.
+    name : str, optional
+        The type of analysis, either calibration (`Calib`) or validation
+        (`Valid`). The default is `&#39;Calib&#39;`.
+    emulator : bool, optional
+        Analysis with emulator (MetaModel). The default is `True`.
+    bootstrap : bool, optional
+        Bootstrap the analysis. The default is `False`.
+    req_outputs : list, optional
+        The list of requested output to be used for the analysis.
+        The default is `None`. If None, all the defined outputs for the model
+        object is used.
+    selected_indices : dict, optional
+        A dictionary with the selected indices of each model output. The
+        default is `None`. If `None`, all measurement points are used in the
+        analysis.
+    samples : array of shape (n_samples, n_params), optional
+        The samples to be used in the analysis. The default is `None`. If
+        None the samples are drawn from the probablistic input parameter
+        object of the MetaModel object.
+    n_samples : int, optional
+        Number of samples to be used in the analysis. The default is `500000`.
+        If samples is not `None`, this argument will be assigned based on the
+        number of samples given.
+    measured_data : dict, optional
+        A dictionary containing the observation data. The default is `None`.
+        if `None`, the observation defined in the Model object of the
+        MetaModel is used.
+    inference_method : str, optional
+        A method for approximating the posterior distribution in the Bayesian
+        inference step. The default is `&#39;rejection&#39;`, which stands for
+        rejection sampling. A Markov Chain Monte Carlo sampler can be simply
+        selected by passing `&#39;MCMC&#39;`.
+    mcmc_params : dict, optional
+        A dictionary with args required for the Bayesian inference with
+        `MCMC`. The default is `None`.
+
+        Pass the mcmc_params like the following:
+
+            &gt;&gt;&gt; mcmc_params:{
+                &#39;init_samples&#39;: None,  # initial samples
+                &#39;n_walkers&#39;: 100,  # number of walkers (chain)
+                &#39;n_steps&#39;: 100000,  # number of maximum steps
+                &#39;n_burn&#39;: 200,  # number of burn-in steps
+                &#39;moves&#39;: None,  # Moves for the emcee sampler
+                &#39;multiprocessing&#39;: False,  # multiprocessing
+                &#39;verbose&#39;: False # verbosity
+                }
+        The items shown above are the default values. If any parmeter is
+        not defined, the default value will be assigned to it.
+    bayes_loocv : bool, optional
+        Bayesian Leave-one-out Cross Validation. The default is `False`. If
+        `True`, the LOOCV procedure is used to estimate the bayesian Model
+        Evidence (BME).
+    n_bootstrap_itrs : int, optional
+        Number of bootstrap iteration. The default is `1`. If bayes_loocv is
+        `True`, this is qualt to the total length of the observation data
+        set.
+    perturbed_data : array of shape (n_bootstrap_itrs, n_obs), optional
+        User defined perturbed data. The default is `[]`.
+    bootstrap_noise : float, optional
+        A noise level to perturb the data set. The default is `0.05`.
+    plot_post_pred : bool, optional
+        Plot posterior predictive plots. The default is `True`.
+    plot_map_pred : bool, optional
+        Plot the model outputs vs the metamodel predictions for the maximum
+        a posteriori (defined as `max_a_posteriori`) parameter set. The
+        default is `False`.
+    max_a_posteriori : str, optional
+        Maximum a posteriori. `&#39;mean&#39;` and `&#39;mode&#39;` are available. The default
+        is `&#39;mean&#39;`.
+    corner_title_fmt : str, optional
+        Title format for the posterior distribution plot with python
+        package `corner`. The default is `&#39;.3e&#39;`.
+
+    &#34;&#34;&#34;
+
+    def __init__(self, MetaModel, discrepancy=None, emulator=True,
+                 name=&#39;Calib&#39;, bootstrap=False, req_outputs=None,
+                 selected_indices=None, samples=None, n_samples=500000,
+                 measured_data=None, inference_method=&#39;rejection&#39;,
+                 mcmc_params=None, bayes_loocv=False, n_bootstrap_itrs=1,
+                 perturbed_data=[], bootstrap_noise=0.05, plot_post_pred=True,
+                 plot_map_pred=False, max_a_posteriori=&#39;mean&#39;,
+                 corner_title_fmt=&#39;.3e&#39;):
+
+        self.MetaModel = MetaModel
+        self.Discrepancy = discrepancy
+        self.emulator = emulator
+        self.name = name
+        self.bootstrap = bootstrap
+        self.req_outputs = req_outputs
+        self.selected_indices = selected_indices
+        self.samples = samples
+        self.n_samples = n_samples
+        self.measured_data = measured_data
+        self.inference_method = inference_method
+        self.mcmc_params = mcmc_params
+        self.perturbed_data = perturbed_data
+        self.bayes_loocv = bayes_loocv
+        self.n_bootstrap_itrs = n_bootstrap_itrs
+        self.bootstrap_noise = bootstrap_noise
+        self.plot_post_pred = plot_post_pred
+        self.plot_map_pred = plot_map_pred
+        self.max_a_posteriori = max_a_posteriori
+        self.corner_title_fmt = corner_title_fmt
+
+    # -------------------------------------------------------------------------
+    def create_inference(self):
+        &#34;&#34;&#34;
+        Starts the inference.
+
+        Returns
+        -------
+        BayesInference : obj
+            The Bayes inference object.
+
+        &#34;&#34;&#34;
+
+        # Set some variables
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+        n_params = MetaModel.n_params
+        output_names = Model.Output.names
+        par_names = MetaModel.ExpDesign.par_names
+
+        # If the prior is set by the user, take it.
+        if self.samples is None:
+            self.samples = MetaModel.ExpDesign.generate_samples(
+                self.n_samples, &#39;random&#39;)
+        else:
+            try:
+                samples = self.samples.values
+            except AttributeError:
+                samples = self.samples
+
+            # Take care of an additional Sigma2s
+            self.samples = samples[:, :n_params]
+
+            # Update number of samples
+            self.n_samples = self.samples.shape[0]
+
+        # ---------- Preparation of observation data ----------
+        # Read observation data and perturb it if requested.
+        if self.measured_data is None:
+            self.measured_data = Model.read_observation(case=self.name)
+        # Convert measured_data to a data frame
+        if not isinstance(self.measured_data, pd.DataFrame):
+            self.measured_data = pd.DataFrame(self.measured_data)
+
+        # Extract the total number of measurement points
+        if self.name.lower() == &#39;calib&#39;:
+            self.n_tot_measurement = Model.n_obs
+        else:
+            self.n_tot_measurement = Model.n_obs_valid
+
+        # Find measurement error (if not given) for post predictive plot
+        if not hasattr(self, &#39;measurement_error&#39;):
+            if isinstance(self.Discrepancy, dict):
+                Disc = self.Discrepancy[&#39;known&#39;]
+            else:
+                Disc = self.Discrepancy
+            if isinstance(Disc.parameters, dict):
+                self.measurement_error = {k: np.sqrt(Disc.parameters[k]) for k
+                                          in Disc.parameters.keys()}
+            else:
+                try:
+                    self.measurement_error = np.sqrt(Disc.parameters)
+                except TypeError:
+                    pass
+
+        # ---------- Preparation of variance for covariance matrix ----------
+        # Independent and identically distributed
+        total_sigma2 = dict()
+        opt_sigma_flag = isinstance(self.Discrepancy, dict)
+        opt_sigma = None
+        for key_idx, key in enumerate(output_names):
+
+            # Find opt_sigma
+            if opt_sigma_flag and opt_sigma is None:
+                # Option A: known error with unknown bias term
+                opt_sigma = &#39;A&#39;
+                known_discrepancy = self.Discrepancy[&#39;known&#39;]
+                self.Discrepancy = self.Discrepancy[&#39;infer&#39;]
+                sigma2 = np.array(known_discrepancy.parameters[key])
+
+            elif opt_sigma == &#39;A&#39; or self.Discrepancy.parameters is not None:
+                # Option B: The sigma2 is known (no bias term)
+                if opt_sigma == &#39;A&#39;:
+                    sigma2 = np.array(known_discrepancy.parameters[key])
+                else:
+                    opt_sigma = &#39;B&#39;
+                    sigma2 = np.array(self.Discrepancy.parameters[key])
+
+            elif not isinstance(self.Discrepancy.InputDisc, str):
+                # Option C: The sigma2 is unknown (bias term including error)
+                opt_sigma = &#39;C&#39;
+                self.Discrepancy.opt_sigma = opt_sigma
+                n_measurement = self.measured_data[key].values.shape
+                sigma2 = np.zeros((n_measurement[0]))
+
+            total_sigma2[key] = sigma2
+
+            self.Discrepancy.opt_sigma = opt_sigma
+            self.Discrepancy.total_sigma2 = total_sigma2
+
+        # If inferred sigma2s obtained from e.g. calibration are given
+        try:
+            self.sigma2s = self.Discrepancy.get_sample(self.n_samples)
+        except:
+            pass
+
+        # ---------------- Bootstrap &amp; TOM --------------------
+        if self.bootstrap or self.bayes_loocv:
+            if len(self.perturbed_data) == 0:
+                # zero mean noise Adding some noise to the observation function
+                self.perturbed_data = self._perturb_data(
+                    self.measured_data, output_names
+                    )
+            else:
+                self.n_bootstrap_itrs = len(self.perturbed_data)
+
+            # -------- Model Discrepancy -----------
+            if hasattr(self, &#39;error_model&#39;) and self.error_model \
+               and self.name.lower() != &#39;calib&#39;:
+                # Select posterior mean as MAP
+                MAP_theta = self.samples.mean(axis=0).reshape((1, n_params))
+                # MAP_theta = stats.mode(self.samples,axis=0)[0]
+
+                # Evaluate the (meta-)model at the MAP
+                y_MAP, y_std_MAP = MetaModel.eval_metamodel(samples=MAP_theta)
+
+                # Train a GPR meta-model using MAP
+                self.error_MetaModel = MetaModel.create_model_error(
+                    self.bias_inputs, y_MAP, Name=self.name
+                    )
+
+            # -----------------------------------------------------
+            # ----- Loop over the perturbed observation data ------
+            # -----------------------------------------------------
+            # Initilize arrays
+            logLikelihoods = np.zeros((self.n_samples, self.n_bootstrap_itrs),
+                                      dtype=np.float16)
+            BME_Corr = np.zeros((self.n_bootstrap_itrs))
+            log_BME = np.zeros((self.n_bootstrap_itrs))
+            KLD = np.zeros((self.n_bootstrap_itrs))
+            inf_entropy = np.zeros((self.n_bootstrap_itrs))
+
+            # Compute the prior predtions
+            # Evaluate the MetaModel
+            if self.emulator:
+                y_hat, y_std = MetaModel.eval_metamodel(samples=self.samples)
+                self.__mean_pce_prior_pred = y_hat
+                self._std_pce_prior_pred = y_std
+
+                # Correct the predictions with Model discrepancy
+                if hasattr(self, &#39;error_model&#39;) and self.error_model:
+                    y_hat_corr, y_std = self.error_MetaModel.eval_model_error(
+                        self.bias_inputs, self.__mean_pce_prior_pred
+                        )
+                    self.__mean_pce_prior_pred = y_hat_corr
+                    self._std_pce_prior_pred = y_std
+
+                # Surrogate model&#39;s error using RMSE of test data
+                if hasattr(MetaModel, &#39;rmse&#39;):
+                    surrError = MetaModel.rmse
+                else:
+                    surrError = None
+
+            else:
+                # Evaluate the original model
+                self.__model_prior_pred = self._eval_model(
+                    samples=self.samples, key=&#39;PriorPred&#39;
+                    )
+
+            # Start the likelihood-BME computations for the perturbed data
+            for itr_idx, data in tqdm(
+                    enumerate(self.perturbed_data), ascii=True,
+                    desc=&#34;Boostraping the BME calculations&#34;
+                    ):
+
+                # ---------------- Likelihood calculation ----------------
+                if self.emulator:
+                    model_evals = self.__mean_pce_prior_pred
+                else:
+                    model_evals = self.__model_prior_pred
+
+                # Leave one out
+                if self.bayes_loocv:
+                    self.selected_indices = np.nonzero(data)[0]
+
+                # Prepare data dataframe
+                nobs = list(self.measured_data.count().values[1:])
+                numbers = list(map(sum, zip([0] + nobs, nobs)))
+                indices = list(zip([0] + numbers, numbers))
+                data_dict = {
+                    output_names[i]: data[j:k] for i, (j, k) in
+                    enumerate(indices)
+                    }
+
+                # Unknown sigma2
+                if opt_sigma == &#39;C&#39; or hasattr(self, &#39;sigma2s&#39;):
+                    logLikelihoods[:, itr_idx] = self.normpdf(
+                        model_evals, data_dict, total_sigma2,
+                        sigma2=self.sigma2s, std=surrError
+                        )
+                else:
+                    # known sigma2
+                    logLikelihoods[:, itr_idx] = self.normpdf(
+                        model_evals, data_dict, total_sigma2,
+                        std=surrError
+                        )
+
+                # ---------------- BME Calculations ----------------
+                # BME (log)
+                log_BME[itr_idx] = np.log(
+                    np.nanmean(np.exp(logLikelihoods[:, itr_idx],
+                                      dtype=np.float128))
+                    )
+
+                # Rejection Step
+                # Random numbers between 0 and 1
+                unif = np.random.rand(1, self.n_samples)[0]
+
+                # Reject the poorly performed prior
+                Likelihoods = np.exp(logLikelihoods[:, itr_idx],
+                                     dtype=np.float64)
+                accepted = (Likelihoods/np.max(Likelihoods)) &gt;= unif
+                posterior = self.samples[accepted]
+
+                # Posterior-based expectation of likelihoods
+                postExpLikelihoods = np.mean(
+                    logLikelihoods[:, itr_idx][accepted]
+                    )
+
+                # Posterior-based expectation of prior densities
+                postExpPrior = np.mean(
+                    np.log([MetaModel.ExpDesign.JDist.pdf(posterior.T)])
+                    )
+
+                # Calculate Kullback-Leibler Divergence
+                KLD[itr_idx] = postExpLikelihoods - log_BME[itr_idx]
+
+                # Information Entropy based on Entropy paper Eq. 38
+                inf_entropy[itr_idx] = log_BME[itr_idx] - postExpPrior - \
+                    postExpLikelihoods
+
+                # TODO: BME correction when using Emulator
+                # if self.emulator:
+                #     BME_Corr[itr_idx] = self._corr_factor_BME(
+                #         data, total_sigma2, posterior
+                #         )
+
+                # Clear memory
+                gc.collect(generation=2)
+
+            # ---------------- Store BME, Likelihoods for all ----------------
+            # Likelihoods (Size: n_samples, n_bootstrap_itr)
+            self.log_likes = logLikelihoods
+
+            # BME (log), KLD, infEntropy (Size: 1,n_bootstrap_itr)
+            self.log_BME = log_BME
+            self.KLD = KLD
+            self.inf_entropy = inf_entropy
+
+            # TODO: BMECorrFactor (log) (Size: 1,n_bootstrap_itr)
+            # if self.emulator: self.BMECorrFactor = BME_Corr
+
+            # BME = BME + BMECorrFactor
+            if self.emulator:
+                self.log_BME = self.log_BME  # + self.BMECorrFactor
+
+        # ---------------- Parameter Bayesian inference ----------------
+        if self.inference_method.lower() == &#39;mcmc&#39;:
+            # Instantiate the MCMC object
+            MCMC_Obj = MCMC(self)
+            self.posterior_df = MCMC_Obj.run_sampler(
+                self.measured_data, total_sigma2
+                )
+
+        elif self.name.lower() == &#39;valid&#39;:
+            # Convert to a dataframe if samples are provided after calibration.
+            self.posterior_df = pd.DataFrame(self.samples, columns=par_names)
+
+        else:
+            # Rejection sampling
+            self.posterior_df = self._rejection_sampling()
+
+        # Provide posterior&#39;s summary
+        print(&#39;\n&#39;)
+        print(&#39;-&#39;*15 + &#39;Posterior summary&#39; + &#39;-&#39;*15)
+        pd.options.display.max_columns = None
+        pd.options.display.max_rows = None
+        print(self.posterior_df.describe())
+        print(&#39;-&#39;*50)
+
+        # -------- Model Discrepancy -----------
+        if hasattr(self, &#39;error_model&#39;) and self.error_model \
+           and self.name.lower() == &#39;calib&#39;:
+            if self.inference_method.lower() == &#39;mcmc&#39;:
+                self.error_MetaModel = MCMC_Obj.error_MetaModel
+            else:
+                # Select posterior mean as MAP
+                if opt_sigma == &#34;B&#34;:
+                    posterior_df = self.posterior_df.values
+                else:
+                    posterior_df = self.posterior_df.values[:, :-Model.n_outputs]
+
+                # Select posterior mean as Maximum a posteriori
+                map_theta = posterior_df.mean(axis=0).reshape((1, n_params))
+                # map_theta = stats.mode(Posterior_df,axis=0)[0]
+
+                # Evaluate the (meta-)model at the MAP
+                y_MAP, y_std_MAP = MetaModel.eval_metamodel(samples=map_theta)
+
+                # Train a GPR meta-model using MAP
+                self.error_MetaModel = MetaModel.create_model_error(
+                    self.bias_inputs, y_MAP, Name=self.name
+                    )
+
+        # -------- Posterior perdictive -----------
+        self._posterior_predictive()
+
+        # -----------------------------------------------------
+        # ------------------ Visualization --------------------
+        # -----------------------------------------------------
+        # Create Output directory, if it doesn&#39;t exist already.
+        out_dir = f&#39;Outputs_Bayes_{Model.name}_{self.name}&#39;
+        os.makedirs(out_dir, exist_ok=True)
+
+        # -------- Posteior parameters --------
+        if opt_sigma != &#34;B&#34;:
+            par_names.extend(
+                [self.Discrepancy.InputDisc.Marginals[i].name for i
+                 in range(len(self.Discrepancy.InputDisc.Marginals))]
+                )
+        # Pot with corner
+        figPosterior = corner.corner(self.posterior_df.to_numpy(),
+                                     labels=par_names,
+                                     quantiles=[0.15, 0.5, 0.85],
+                                     show_titles=True,
+                                     title_fmt=self.corner_title_fmt,
+                                     labelpad=0.2,
+                                     use_math_text=True,
+                                     title_kwargs={&#34;fontsize&#34;: 28},
+                                     plot_datapoints=False,
+                                     plot_density=False,
+                                     fill_contours=True,
+                                     smooth=0.5,
+                                     smooth1d=0.5)
+
+        # Loop over axes and set x limits
+        if opt_sigma == &#34;B&#34;:
+            axes = np.array(figPosterior.axes).reshape(
+                (len(par_names), len(par_names))
+                )
+            for yi in range(len(par_names)):
+                ax = axes[yi, yi]
+                ax.set_xlim(MetaModel.bound_tuples[yi])
+                for xi in range(yi):
+                    ax = axes[yi, xi]
+                    ax.set_xlim(MetaModel.bound_tuples[xi])
+
+        # Turn off gridlines
+        for ax in figPosterior.axes:
+            ax.grid(False)
+
+        if self.emulator:
+            plotname = f&#39;/Posterior_Dist_{Model.name}_emulator&#39;
+        else:
+            plotname = f&#39;/Posterior_Dist_{Model.name}&#39;
+
+        figPosterior.set_size_inches((24, 16))
+        figPosterior.savefig(f&#39;./{out_dir}{plotname}.pdf&#39;,
+                             bbox_inches=&#39;tight&#39;)
+
+        # -------- Plot MAP --------
+        if self.plot_map_pred:
+            self._plot_max_a_posteriori()
+
+        # -------- Plot log_BME dist --------
+        if self.bootstrap and self.n_bootstrap_itrs &gt; 1:
+            # Computing the TOM performance
+            self.log_BME_tom = stats.chi2.rvs(
+                self.n_tot_measurement, size=self.log_BME.shape[0]
+                )
+
+            fig, ax = plt.subplots()
+            sns.kdeplot(self.log_BME_tom, ax=ax, color=&#34;green&#34;, shade=True)
+            sns.kdeplot(
+                self.log_BME, ax=ax, color=&#34;blue&#34;, shade=True,
+                label=&#39;Model BME&#39;)
+
+            ax.set_xlabel(&#39;log$_{10}$(BME)&#39;)
+            ax.set_ylabel(&#39;Probability density&#39;)
+
+            legend_elements = [
+                Patch(facecolor=&#39;green&#39;, edgecolor=&#39;green&#39;, label=&#39;TOM BME&#39;),
+                Patch(facecolor=&#39;blue&#39;, edgecolor=&#39;blue&#39;, label=&#39;Model BME&#39;)
+                ]
+            ax.legend(handles=legend_elements)
+
+            if self.emulator:
+                plotname = f&#39;/BME_hist_{Model.name}_emulator&#39;
+            else:
+                plotname = f&#39;/BME_hist_{Model.name}&#39;
+
+            plt.savefig(f&#39;./{out_dir}{plotname}.pdf&#39;, bbox_inches=&#39;tight&#39;)
+            plt.show()
+            plt.close()
+
+        # -------- Posteior perdictives --------
+        if self.plot_post_pred:
+            # Plot the posterior predictive
+            self._plot_post_predictive()
+
+        return self
+
+    # -------------------------------------------------------------------------
+    def _perturb_data(self, data, output_names):
+        &#34;&#34;&#34;
+        Returns an array with n_bootstrap_itrs rowsof perturbed data.
+        The first row includes the original observation data.
+        If `self.bayes_loocv` is True, a 2d-array will be returned with
+        repeated rows and zero diagonal entries.
+
+        Parameters
+        ----------
+        data : pandas DataFrame
+            Observation data.
+        output_names : list
+            List of the output names.
+
+        Returns
+        -------
+        final_data : array
+            Perturbed data set.
+
+        &#34;&#34;&#34;
+        noise_level = self.bootstrap_noise
+        obs_data = data[output_names].values
+        n_measurement, n_outs = obs_data.shape
+        self.n_tot_measurement = obs_data[~np.isnan(obs_data)].shape[0]
+        # Number of bootstrap iterations
+        if self.bayes_loocv:
+            self.n_bootstrap_itrs = self.n_tot_measurement
+
+        # Pass loocv dataset
+        if self.bayes_loocv:
+            obs = obs_data.T[~np.isnan(obs_data.T)]
+            final_data = np.repeat(np.atleast_2d(obs), self.n_bootstrap_itrs,
+                                   axis=0)
+            np.fill_diagonal(final_data, 0)
+            return final_data
+
+        else:
+            final_data = np.zeros(
+                (self.n_bootstrap_itrs, self.n_tot_measurement)
+                )
+            final_data[0] = obs_data.T[~np.isnan(obs_data.T)]
+            for itrIdx in range(1, self.n_bootstrap_itrs):
+                data = np.zeros((n_measurement, n_outs))
+                for idx in range(len(output_names)):
+                    std = np.nanstd(obs_data[:, idx])
+                    if std == 0:
+                        std = 0.001
+                    noise = std * noise_level
+                    data[:, idx] = np.add(
+                        obs_data[:, idx],
+                        np.random.normal(0, 1, obs_data.shape[0]) * noise,
+                    )
+
+                final_data[itrIdx] = data.T[~np.isnan(data.T)]
+
+            return final_data
+
+    # -------------------------------------------------------------------------
+    def _logpdf(self, x, mean, cov):
+        &#34;&#34;&#34;
+        computes the likelihood based on a multivariate normal distribution.
+
+        Parameters
+        ----------
+        x : TYPE
+            DESCRIPTION.
+        mean : array_like
+            Observation data.
+        cov : 2d array
+            Covariance matrix of the distribution.
+
+        Returns
+        -------
+        log_lik : float
+            Log likelihood.
+
+        &#34;&#34;&#34;
+        n = len(mean)
+        L = spla.cholesky(cov, lower=True)
+        beta = np.sum(np.log(np.diag(L)))
+        dev = x - mean
+        alpha = dev.dot(spla.cho_solve((L, True), dev))
+        log_lik = -0.5 * alpha - beta - n / 2. * np.log(2 * np.pi)
+        return log_lik
+
+    # -------------------------------------------------------------------------
+    def _eval_model(self, samples=None, key=&#39;MAP&#39;):
+        &#34;&#34;&#34;
+        Evaluates Forward Model.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params), optional
+            Parameter sets. The default is None.
+        key : str, optional
+            Key string to be passed to the run_model_parallel method.
+            The default is &#39;MAP&#39;.
+
+        Returns
+        -------
+        model_outputs : TYPE
+            DESCRIPTION.
+
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+
+        if samples is None:
+            self.samples = MetaModel.ExpDesign.generate_samples(
+                self.n_samples, &#39;random&#39;)
+        else:
+            self.samples = samples
+            self.n_samples = len(samples)
+
+        model_outputs, _ = Model.run_model_parallel(
+            self.samples, key_str=key+self.name)
+
+        # Clean up
+        # Zip the subdirectories
+        try:
+            dir_name = f&#39;{Model.name}MAP{self.name}&#39;
+            key = dir_name + &#39;_&#39;
+            Model.zip_subdirs(dir_name, key)
+        except:
+            pass
+
+        return model_outputs
+
+    # -------------------------------------------------------------------------
+    def _kernel_rbf(self, X, hyperparameters):
+        &#34;&#34;&#34;
+        Isotropic squared exponential kernel.
+
+        Higher l values lead to smoother functions and therefore to coarser
+        approximations of the training data. Lower l values make functions
+        more wiggly with wide uncertainty regions between training data points.
+
+        sigma_f controls the marginal variance of b(x)
+
+        Parameters
+        ----------
+        X : ndarray of shape (n_samples_X, n_features)
+
+        hyperparameters : Dict
+            Lambda characteristic length
+            sigma_f controls the marginal variance of b(x)
+            sigma_0 unresolvable error nugget term, interpreted as random
+                    error that cannot be attributed to measurement error.
+        Returns
+        -------
+        var_cov_matrix : ndarray of shape (n_samples_X,n_samples_X)
+            Kernel k(X, X).
+
+        &#34;&#34;&#34;
+        from sklearn.gaussian_process.kernels import RBF
+        min_max_scaler = preprocessing.MinMaxScaler()
+        X_minmax = min_max_scaler.fit_transform(X)
+
+        nparams = len(hyperparameters)
+        # characteristic length (0,1]
+        Lambda = hyperparameters[0]
+        # sigma_f controls the marginal variance of b(x)
+        sigma2_f = hyperparameters[1]
+
+        # cov_matrix = sigma2_f*rbf_kernel(X_minmax, gamma = 1/Lambda**2)
+
+        rbf = RBF(length_scale=Lambda)
+        cov_matrix = sigma2_f * rbf(X_minmax)
+        if nparams &gt; 2:
+            # (unresolvable error) nugget term that is interpreted as random
+            # error that cannot be attributed to measurement error.
+            sigma2_0 = hyperparameters[2:]
+            for i, j in np.ndindex(cov_matrix.shape):
+                cov_matrix[i, j] += np.sum(sigma2_0) if i == j else 0
+
+        return cov_matrix
+
+    # -------------------------------------------------------------------------
+    def normpdf(self, outputs, obs_data, total_sigma2s, sigma2=None, std=None):
+        &#34;&#34;&#34;
+        Calculates the likelihood of simulation outputs compared with
+        observation data.
+
+        Parameters
+        ----------
+        outputs : dict
+            A dictionary containing the simulation outputs as array of shape
+            (n_samples, n_measurement) for each model output.
+        obs_data : dict
+            A dictionary/dataframe containing the observation data.
+        total_sigma2s : dict
+            A dictionary with known values of the covariance diagonal entries,
+            a.k.a sigma^2.
+        sigma2 : array, optional
+            An array of the sigma^2 samples, when the covariance diagonal
+            entries are unknown and are being jointly inferred. The default is
+            None.
+        std : dict, optional
+            A dictionary containing the root mean squared error as array of
+            shape (n_samples, n_measurement) for each model output. The default
+            is None.
+
+        Returns
+        -------
+        logLik : array of shape (n_samples)
+            Likelihoods.
+
+        &#34;&#34;&#34;
+        Model = self.MetaModel.ModelObj
+        logLik = 0.0
+
+        # Extract the requested model outputs for likelihood calulation
+        if self.req_outputs is None:
+            req_outputs = Model.Output.names
+        else:
+            req_outputs = list(self.req_outputs)
+
+        # Loop over the outputs
+        for idx, out in enumerate(req_outputs):
+
+            # (Meta)Model Output
+            nsamples, nout = outputs[out].shape
+
+            # Prepare data and remove NaN
+            try:
+                data = obs_data[out].values[~np.isnan(obs_data[out])]
+            except AttributeError:
+                data = obs_data[out][~np.isnan(obs_data[out])]
+
+            # Prepare sigma2s
+            tot_sigma2s = total_sigma2s[out][~np.isnan(
+                total_sigma2s[out])][:nout]
+
+            # Add the std of the PCE is chosen as emulator.
+            if self.emulator:
+                if std is not None:
+                    std_pce = std[out]
+                else:
+                    std_pce = np.mean(
+                        self._std_pce_prior_pred[out], axis=0)
+                # Expected value of variance (Assump: i.i.d stds)
+                tot_sigma2s += std_pce**2
+
+            # If sigma2 is not given, use given total_sigma2s
+            if sigma2 is None:
+                logLik += stats.multivariate_normal.logpdf(
+                    outputs[out], data, np.diag(tot_sigma2s))
+                continue
+
+            # Loop over each run/sample and calculate logLikelihood
+            logliks = np.zeros(nsamples)
+            for s_idx in range(nsamples):
+
+                # Simulation run
+                tot_outputs = outputs[out]
+
+                # Covariance Matrix
+                covMatrix = np.diag(tot_sigma2s)
+
+                if sigma2 is not None:
+                    # Check the type error term
+                    if hasattr(self, &#39;bias_inputs&#39;) and \
+                       not hasattr(self, &#39;error_model&#39;):
+                        # Infer a Bias model usig Gaussian Process Regression
+                        bias_inputs = np.hstack(
+                            (self.bias_inputs[out],
+                             tot_outputs[s_idx].reshape(-1, 1)))
+
+                        params = sigma2[s_idx, idx*3:(idx+1)*3]
+                        covMatrix = self._kernel_rbf(bias_inputs, params)
+                    else:
+                        # Infer equal sigma2s
+                        try:
+                            sigma_2 = sigma2[s_idx, idx]
+                        except TypeError:
+                            sigma_2 = 0.0
+
+                        covMatrix += sigma_2 * np.eye(nout)
+                        # covMatrix = np.diag(sigma2 * total_sigma2s)
+
+                # Select the data points to compare
+                if self.selected_indices is not None:
+                    indices = self.selected_indices[out]
+                    covMatrix = np.diag(covMatrix[indices, indices])
+                else:
+                    indices = list(range(nout))
+
+                # Compute loglikelihood
+                logliks[s_idx] = self._logpdf(
+                    tot_outputs[s_idx, indices], data[indices], covMatrix
+                    )
+
+            logLik += logliks
+        return logLik
+
+    # -------------------------------------------------------------------------
+    def _corr_factor_BME(self, Data, total_sigma2s, posterior):
+        &#34;&#34;&#34;
+        Calculates the correction factor for BMEs.
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        OrigModelOutput = MetaModel.ExpDesign.Y
+        Model = MetaModel.ModelObj
+
+        # Posterior with guassian-likelihood
+        postDist = stats.gaussian_kde(posterior.T)
+
+        # Remove NaN
+        Data = Data[~np.isnan(Data)]
+        total_sigma2s = total_sigma2s[~np.isnan(total_sigma2s)]
+
+        # Covariance Matrix
+        covMatrix = np.diag(total_sigma2s[:self.n_tot_measurement])
+
+        # Extract the requested model outputs for likelihood calulation
+        if self.req_outputs is None:
+            OutputType = Model.Output.names
+        else:
+            OutputType = list(self.req_outputs)
+
+        # SampleSize = OrigModelOutput[OutputType[0]].shape[0]
+
+
+        # Flatten the OutputType for OrigModel
+        TotalOutputs = np.concatenate([OrigModelOutput[x] for x in OutputType], 1)
+
+        NrofBayesSamples = self.n_samples
+        # Evaluate MetaModel on the experimental design
+        Samples = MetaModel.ExpDesign.X
+        OutputRS, stdOutputRS = MetaModel.eval_metamodel(samples=Samples)
+
+        # Reset the NrofSamples to NrofBayesSamples
+        self.n_samples = NrofBayesSamples
+
+        # Flatten the OutputType for MetaModel
+        TotalPCEOutputs = np.concatenate([OutputRS[x] for x in OutputRS], 1)
+        TotalPCEstdOutputRS= np.concatenate([stdOutputRS[x] for x in stdOutputRS], 1)
+
+        logweight = 0
+        for i,sample in enumerate(Samples):
+            # Compute likelilhood output vs RS
+            covMatrix = np.diag(TotalPCEstdOutputRS[i]**2)
+            logLik = self._logpdf(TotalOutputs[i], TotalPCEOutputs[i], covMatrix)
+            # Compute posterior likelihood of the collocation points
+            logpostLik = np.log(postDist.pdf(sample[:,None]))[0]
+            if logpostLik != -np.inf:
+                logweight += logLik + logpostLik
+        return logweight
+#         # Initialization
+#         covMatrix=np.zeros((NofMeasurements, NofMeasurements), float)
+#         BME_RM_Model_Weight = np.zeros((SampleSize))
+#         BME_RM_Data_Weight = np.zeros((SampleSize))
+#         BME_Corr = np.zeros((1))
+
+
+#         # Deviation Computations
+#         RM_Model_Deviation = np.zeros((SampleSize,NofMeasurements))
+#         RM_Data_Deviation = np.zeros((SampleSize,NofMeasurements))
+#         for i in range(SampleSize):
+#             RM_Model_Deviation[i] = TotalOutputs[i][:NofMeasurements] - TotalPCEOutputs[i, :] # Reduce model- Full Model
+#             RM_Data_Deviation[i] = Observations - TotalPCEOutputs[i, :] # Reduce model- Measurement Data
+
+
+#         # Initialization  of Co-Variance Matrix
+#         # For BME_RM_ModelWeight
+#         if NofMeasurements == 1:
+#             RM_Model_Error = np.zeros((NofMeasurements, NofMeasurements), float)
+#             np.fill_diagonal(RM_Model_Error, np.cov(RM_Model_Deviation.T))
+#         else:
+#             RM_Model_Error = np.cov(RM_Model_Deviation.T)
+
+
+#         # Computation of Weight according to the deviations
+#         for i in range(SampleSize):
+#             # For BME_RM_DataWeight
+#             try:
+#                 var = Sigma[i]
+#                 if len(var)==1:
+#                     np.fill_diagonal(covMatrix, var)
+#                 else:
+#                     row,col = np.diag_indices(covMatrix.shape[0])
+#                     covMatrix[row,col] = np.hstack((np.repeat(var[0], NofMeasurements*0.5),np.repeat(var[1], NofMeasurements*0.5)))
+
+#             except:
+#                 var = Sigma
+
+#             np.fill_diagonal(covMatrix,  var)
+
+#             # Add the std of the PCE is emulator is chosen.
+# #            if self.emulator:
+# #                covMatrix_PCE = np.zeros((NofMeasurements, NofMeasurements), float)
+# #                stdPCE = np.empty((SampleSize,0))
+# #                for outputType in OutputType:
+# #                    stdPCE = np.hstack((stdPCE, stdOutputRS[outputType]))
+# #
+# #                stdPCE = np.mean(stdPCE, axis=1)
+# #                np.fill_diagonal(covMatrix_PCE, stdPCE**2)
+# #
+# #                covMatrix = covMatrix + covMatrix_PCE
+
+#             # Calculate the denomitor
+#             denom1 = (np.sqrt(2*np.pi)) ** NofMeasurements
+#             denom2 = (((2*np.pi)**(NofMeasurements/2)) * np.sqrt(np.linalg.det(covMatrix)))
+
+#             BME_RM_Model_Weight[i] =  (np.exp(-0.5 * np.dot(np.dot(RM_Model_Deviation[i], np.linalg.pinv(RM_Model_Error)), RM_Model_Deviation[i])))/denom1
+#             BME_RM_Data_Weight[i] =  (np.exp(-0.5 * np.dot(np.dot(RM_Data_Deviation[i], np.linalg.pinv(covMatrix)), RM_Data_Deviation[i][:,np.newaxis])))/denom2
+
+#         for i in range(SampleSize):
+#             BME_Corr[0] += BME_RM_Model_Weight[i] * BME_RM_Data_Weight[i] / np.nansum(BME_RM_Data_Weight)
+
+#         return np.log(BME_Corr[0])
+
+    # -------------------------------------------------------------------------
+    def _rejection_sampling(self):
+        &#34;&#34;&#34;
+        Performs rejection sampling to update the prior distribution on the
+        input parameters.
+
+        Returns
+        -------
+        posterior : pandas.dataframe
+            Posterior samples of the input parameters.
+
+        &#34;&#34;&#34;
+
+        MetaModel = self.MetaModel
+        try:
+            sigma2_prior = self.Discrepancy.sigma2_prior
+        except:
+            sigma2_prior = None
+
+        # Check if the discrepancy is defined as a distribution:
+        samples = self.samples
+
+        if sigma2_prior is not None:
+            samples = np.hstack((samples, sigma2_prior))
+
+        # Take the first column of Likelihoods (Observation data without noise)
+        likelihoods = np.exp(self.log_likes[:, 0], dtype=np.float128)
+        n_samples = len(likelihoods)
+        norm_ikelihoods = likelihoods / np.max(likelihoods)
+
+        # Normalize based on min if all Likelihoods are zero
+        if all(likelihoods == 0.0):
+            likelihoods = self.log_likes[:, 0]
+            norm_ikelihoods = likelihoods / np.min(likelihoods)
+
+        # Random numbers between 0 and 1
+        unif = np.random.rand(1, n_samples)[0]
+
+        # Reject the poorly performed prior
+        accepted_samples = samples[norm_ikelihoods &gt;= unif]
+
+        # Output the Posterior
+        par_names = MetaModel.ExpDesign.par_names
+        if sigma2_prior is not None:
+            for name in self.Discrepancy.name:
+                par_names.append(name)
+
+        return pd.DataFrame(accepted_samples, columns=sigma2_prior)
+
+    # -------------------------------------------------------------------------
+    def _posterior_predictive(self):
+        &#34;&#34;&#34;
+        Stores the prior- and posterior predictive samples, i.e. model
+        evaluations using the samples, into hdf5 files.
+
+        priorPredictive.hdf5 : Prior predictive samples.
+        postPredictive_wo_noise.hdf5 : Posterior predictive samples without
+        the additive noise.
+        postPredictive.hdf5 : Posterior predictive samples with the additive
+        noise.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+
+        # Make a directory to save the prior/posterior predictive
+        out_dir = f&#39;Outputs_Bayes_{Model.name}_{self.name}&#39;
+        os.makedirs(out_dir, exist_ok=True)
+
+        # Read observation data and perturb it if requested
+        if self.measured_data is None:
+            self.measured_data = Model.read_observation(case=self.name)
+
+        if not isinstance(self.measured_data, pd.DataFrame):
+            self.measured_data = pd.DataFrame(self.measured_data)
+
+        # X_values
+        x_values = MetaModel.ExpDesign.x_values
+
+        try:
+            sigma2_prior = self.Discrepancy.sigma2_prior
+        except:
+            sigma2_prior = None
+
+        # Extract posterior samples
+        posterior_df = self.posterior_df
+
+        # Take care of the sigma2
+        if sigma2_prior is not None:
+            try:
+                sigma2s = posterior_df[self.Discrepancy.name].values
+                posterior_df = posterior_df.drop(
+                    labels=self.Discrepancy.name, axis=1
+                    )
+            except:
+                sigma2s = self.sigma2s
+
+        # Posterior predictive
+        if self.emulator:
+            if self.inference_method == &#39;rejection&#39;:
+                prior_pred = self.__mean_pce_prior_pred
+            if self.name.lower() != &#39;calib&#39;:
+                post_pred = self.__mean_pce_prior_pred
+                post_pred_std = self._std_pce_prior_pred
+            else:
+                post_pred, post_pred_std = MetaModel.eval_metamodel(
+                    samples=posterior_df.values
+                    )
+
+        else:
+            if self.inference_method == &#39;rejection&#39;:
+                prior_pred = self.__model_prior_pred
+            if self.name.lower() != &#39;calib&#39;:
+                post_pred = self.__mean_pce_prior_pred,
+                post_pred_std = self._std_pce_prior_pred
+            else:
+                post_pred = self._eval_model(
+                    samples=posterior_df.values, key=&#39;PostPred&#39;
+                    )
+        # Correct the predictions with Model discrepancy
+        if hasattr(self, &#39;error_model&#39;) and self.error_model:
+            y_hat, y_std = self.error_MetaModel.eval_model_error(
+                self.bias_inputs, post_pred
+                )
+            post_pred, post_pred_std = y_hat, y_std
+
+        # Add discrepancy from likelihood Sample to the current posterior runs
+        total_sigma2 = self.Discrepancy.total_sigma2
+        post_pred_withnoise = copy.deepcopy(post_pred)
+        for varIdx, var in enumerate(Model.Output.names):
+            for i in range(len(post_pred[var])):
+                pred = post_pred[var][i]
+
+                # Known sigma2s
+                clean_sigma2 = total_sigma2[var][~np.isnan(total_sigma2[var])]
+                tot_sigma2 = clean_sigma2[:len(pred)]
+                cov = np.diag(tot_sigma2)
+
+                # Check the type error term
+                if sigma2_prior is not None:
+                    # Inferred sigma2s
+                    if hasattr(self, &#39;bias_inputs&#39;) and \
+                       not hasattr(self, &#39;error_model&#39;):
+                        # TODO: Infer a Bias model usig GPR
+                        bias_inputs = np.hstack((
+                            self.bias_inputs[var], pred.reshape(-1, 1)))
+                        params = sigma2s[i, varIdx*3:(varIdx+1)*3]
+                        cov = self._kernel_rbf(bias_inputs, params)
+                    else:
+                        # Infer equal sigma2s
+                        try:
+                            sigma2 = sigma2s[i, varIdx]
+                        except TypeError:
+                            sigma2 = 0.0
+
+                        # Convert biasSigma2s to a covMatrix
+                        cov += sigma2 * np.eye(len(pred))
+
+                if self.emulator:
+                    if hasattr(MetaModel, &#39;rmse&#39;) and \
+                       MetaModel.rmse is not None:
+                        stdPCE = MetaModel.rmse[var]
+                    else:
+                        stdPCE = post_pred_std[var][i]
+                    # Expected value of variance (Assump: i.i.d stds)
+                    cov += np.diag(stdPCE**2)
+
+                # Sample a multivariate normal distribution with mean of
+                # prediction and variance of cov
+                post_pred_withnoise[var][i] = np.random.multivariate_normal(
+                    pred, cov, 1
+                    )
+
+        # ----- Prior Predictive -----
+        if self.inference_method.lower() == &#39;rejection&#39;:
+            # Create hdf5 metadata
+            hdf5file = f&#39;{out_dir}/priorPredictive.hdf5&#39;
+            hdf5_exist = os.path.exists(hdf5file)
+            if hdf5_exist:
+                os.remove(hdf5file)
+            file = h5py.File(hdf5file, &#39;a&#39;)
+
+            # Store x_values
+            if type(x_values) is dict:
+                grp_x_values = file.create_group(&#34;x_values/&#34;)
+                for varIdx, var in enumerate(Model.Output.names):
+                    grp_x_values.create_dataset(var, data=x_values[var])
+            else:
+                file.create_dataset(&#34;x_values&#34;, data=x_values)
+
+            # Store posterior predictive
+            grpY = file.create_group(&#34;EDY/&#34;)
+            for varIdx, var in enumerate(Model.Output.names):
+                grpY.create_dataset(var, data=prior_pred[var])
+
+        # ----- Posterior Predictive only model evaluations -----
+        # Create hdf5 metadata
+        hdf5file = out_dir+&#39;/postPredictive_wo_noise.hdf5&#39;
+        hdf5_exist = os.path.exists(hdf5file)
+        if hdf5_exist:
+            os.remove(hdf5file)
+        file = h5py.File(hdf5file, &#39;a&#39;)
+
+        # Store x_values
+        if type(x_values) is dict:
+            grp_x_values = file.create_group(&#34;x_values/&#34;)
+            for varIdx, var in enumerate(Model.Output.names):
+                grp_x_values.create_dataset(var, data=x_values[var])
+        else:
+            file.create_dataset(&#34;x_values&#34;, data=x_values)
+
+        # Store posterior predictive
+        grpY = file.create_group(&#34;EDY/&#34;)
+        for varIdx, var in enumerate(Model.Output.names):
+            grpY.create_dataset(var, data=post_pred[var])
+
+        # ----- Posterior Predictive with noise -----
+        # Create hdf5 metadata
+        hdf5file = out_dir+&#39;/postPredictive.hdf5&#39;
+        hdf5_exist = os.path.exists(hdf5file)
+        if hdf5_exist:
+            os.remove(hdf5file)
+        file = h5py.File(hdf5file, &#39;a&#39;)
+
+        # Store x_values
+        if type(x_values) is dict:
+            grp_x_values = file.create_group(&#34;x_values/&#34;)
+            for varIdx, var in enumerate(Model.Output.names):
+                grp_x_values.create_dataset(var, data=x_values[var])
+        else:
+            file.create_dataset(&#34;x_values&#34;, data=x_values)
+
+        # Store posterior predictive
+        grpY = file.create_group(&#34;EDY/&#34;)
+        for varIdx, var in enumerate(Model.Output.names):
+            grpY.create_dataset(var, data=post_pred_withnoise[var])
+
+        return
+
+    # -------------------------------------------------------------------------
+    def _plot_max_a_posteriori(self):
+        &#34;&#34;&#34;
+        Plots the response of the model output against that of the metamodel at
+        the maximum a posteriori sample (mean or mode of posterior.)
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+        out_dir = f&#39;Outputs_Bayes_{Model.name}_{self.name}&#39;
+        opt_sigma = self.Discrepancy.opt_sigma
+
+        # -------- Find MAP and run MetaModel and origModel --------
+        # Compute the MAP
+        if self.max_a_posteriori.lower() == &#39;mean&#39;:
+            if opt_sigma == &#34;B&#34;:
+                Posterior_df = self.posterior_df.values
+            else:
+                Posterior_df = self.posterior_df.values[:, :-Model.n_outputs]
+            map_theta = Posterior_df.mean(axis=0).reshape(
+                (1, MetaModel.n_params))
+        else:
+            map_theta = stats.mode(Posterior_df.values, axis=0)[0]
+        # Prin report
+        print(&#34;\nPoint estimator:\n&#34;, map_theta[0])
+
+        # Run the models for MAP
+        # MetaModel
+        map_metamodel_mean, map_metamodel_std = MetaModel.eval_metamodel(
+            samples=map_theta)
+        self.map_metamodel_mean = map_metamodel_mean
+        self.map_metamodel_std = map_metamodel_std
+
+        # origModel
+        map_orig_model = self._eval_model(samples=map_theta)
+        self.map_orig_model = map_orig_model
+
+        # Extract slicing index
+        x_values = map_orig_model[&#39;x_values&#39;]
+
+        # List of markers and colors
+        Color = [&#39;k&#39;, &#39;b&#39;, &#39;g&#39;, &#39;r&#39;]
+        Marker = &#39;x&#39;
+
+        # Create a PdfPages object
+        pdf = PdfPages(f&#39;./{out_dir}MAP_PCE_vs_Model_{self.name}.pdf&#39;)
+        fig = plt.figure()
+        for i, key in enumerate(Model.Output.names):
+
+            y_val = map_orig_model[key]
+            y_pce_val = map_metamodel_mean[key]
+            y_pce_val_std = map_metamodel_std[key]
+
+            plt.plot(x_values, y_val, color=Color[i], marker=Marker,
+                     lw=2.0, label=&#39;$Y_{MAP}^{M}$&#39;)
+
+            plt.plot(
+                x_values, y_pce_val[i], color=Color[i], lw=2.0,
+                marker=Marker, linestyle=&#39;--&#39;, label=&#39;$Y_{MAP}^{PCE}$&#39;
+                )
+            # plot the confidence interval
+            plt.fill_between(
+                x_values, y_pce_val[i] - 1.96*y_pce_val_std[i],
+                y_pce_val[i] + 1.96*y_pce_val_std[i],
+                color=Color[i], alpha=0.15
+                )
+
+            # Calculate the adjusted R_squared and RMSE
+            R2 = r2_score(y_pce_val.reshape(-1, 1), y_val.reshape(-1, 1))
+            rmse = np.sqrt(mean_squared_error(y_pce_val, y_val))
+
+            plt.ylabel(key)
+            plt.xlabel(&#34;Time [s]&#34;)
+            plt.title(f&#39;Model vs MetaModel {key}&#39;)
+
+            ax = fig.axes[0]
+            leg = ax.legend(loc=&#39;best&#39;, frameon=True)
+            fig.canvas.draw()
+            p = leg.get_window_extent().inverse_transformed(ax.transAxes)
+            ax.text(
+                p.p0[1]-0.05, p.p1[1]-0.25,
+                f&#39;RMSE = {rmse:.3f}\n$R^2$ = {R2:.3f}&#39;,
+                transform=ax.transAxes, color=&#39;black&#39;,
+                bbox=dict(facecolor=&#39;none&#39;, edgecolor=&#39;black&#39;,
+                          boxstyle=&#39;round,pad=1&#39;))
+
+            plt.show()
+
+            # save the current figure
+            pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+
+            # Destroy the current plot
+            plt.clf()
+
+        pdf.close()
+
+    # -------------------------------------------------------------------------
+    def _plot_post_predictive(self):
+        &#34;&#34;&#34;
+        Plots the posterior predictives against the observation data.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+
+        Model = self.MetaModel.ModelObj
+        out_dir = f&#39;Outputs_Bayes_{Model.name}_{self.name}&#39;
+        # Plot the posterior predictive
+        for out_idx, out_name in enumerate(Model.Output.names):
+            fig, ax = plt.subplots()
+            with sns.axes_style(&#34;ticks&#34;):
+                x_key = list(self.measured_data)[0]
+
+                # --- Read prior and posterior predictive ---
+                if self.inference_method == &#39;rejection&#39;:
+                    #  --- Prior ---
+                    # Load posterior predictive
+                    f = h5py.File(
+                        f&#39;{out_dir}/priorPredictive.hdf5&#39;, &#39;r+&#39;)
+
+                    try:
+                        x_coords = np.array(f[f&#34;x_values/{out_name}&#34;])
+                    except:
+                        x_coords = np.array(f[&#34;x_values&#34;])
+
+                    X_values = np.repeat(x_coords, 10000)
+
+                    prior_pred_df = {}
+                    prior_pred_df[x_key] = X_values
+                    prior_pred_df[out_name] = np.array(
+                        f[f&#34;EDY/{out_name}&#34;])[:10000].flatten(&#39;F&#39;)
+                    prior_pred_df = pd.DataFrame(prior_pred_df)
+
+                    tags_post = [&#39;prior&#39;] * len(prior_pred_df)
+                    prior_pred_df.insert(
+                        len(prior_pred_df.columns), &#34;Tags&#34;, tags_post,
+                        True)
+                    f.close()
+
+                    # --- Posterior ---
+                    f = h5py.File(f&#34;{out_dir}/postPredictive.hdf5&#34;, &#39;r+&#39;)
+
+                    X_values = np.repeat(
+                        x_coords, np.array(f[f&#34;EDY/{out_name}&#34;]).shape[0])
+
+                    post_pred_df = {}
+                    post_pred_df[x_key] = X_values
+                    post_pred_df[out_name] = np.array(
+                        f[f&#34;EDY/{out_name}&#34;]).flatten(&#39;F&#39;)
+
+                    post_pred_df = pd.DataFrame(post_pred_df)
+
+                    tags_post = [&#39;posterior&#39;] * len(post_pred_df)
+                    post_pred_df.insert(
+                        len(post_pred_df.columns), &#34;Tags&#34;, tags_post, True)
+                    f.close()
+                    # Concatenate two dataframes based on x_values
+                    frames = [prior_pred_df, post_pred_df]
+                    all_pred_df = pd.concat(frames)
+
+                    # --- Plot posterior predictive ---
+                    sns.violinplot(
+                        x_key, y=out_name, data=all_pred_df, hue=&#34;Tags&#34;,
+                        legend=False, ax=ax, split=True, inner=None,
+                        color=&#34;.8&#34;)
+
+                    # --- Plot Data ---
+                    # Find the x,y coordinates for each point
+                    x_coords = np.arange(x_coords.shape[0])
+                    first_header = list(self.measured_data)[0]
+                    obs_data = self.measured_data.round({first_header: 6})
+                    sns.pointplot(
+                        x=first_header, y=out_name, color=&#39;g&#39;, markers=&#39;x&#39;,
+                        linestyles=&#39;&#39;, capsize=16, data=obs_data, ax=ax)
+
+                    ax.errorbar(
+                        x_coords, obs_data[out_name].values,
+                        yerr=1.96*self.measurement_error[out_name],
+                        ecolor=&#39;g&#39;, fmt=&#39; &#39;, zorder=-1)
+
+                    # Add labels to the legend
+                    handles, labels = ax.get_legend_handles_labels()
+                    labels.append(&#39;Data&#39;)
+
+                    data_marker = mlines.Line2D(
+                        [], [], color=&#39;lime&#39;, marker=&#39;+&#39;, linestyle=&#39;None&#39;,
+                        markersize=10)
+                    handles.append(data_marker)
+
+                    # Add legend
+                    ax.legend(handles=handles, labels=labels, loc=&#39;best&#39;,
+                              fontsize=&#39;large&#39;, frameon=True)
+
+                else:
+                    # Load posterior predictive
+                    f = h5py.File(f&#34;{out_dir}/postPredictive.hdf5&#34;, &#39;r+&#39;)
+
+                    try:
+                        x_coords = np.array(f[&#34;x_values&#34;])
+                    except:
+                        x_coords = np.array(f[f&#34;x_values/{out_name}&#34;])
+
+                    mu = np.mean(np.array(f[f&#34;EDY/{out_name}&#34;]), axis=0)
+                    std = np.std(np.array(f[f&#34;EDY/{out_name}&#34;]), axis=0)
+
+                    # --- Plot posterior predictive ---
+                    plt.plot(
+                        x_coords, mu, marker=&#39;o&#39;, color=&#39;b&#39;,
+                        label=&#39;Mean Post. Predictive&#39;)
+                    plt.fill_between(
+                        x_coords, mu-1.96*std, mu+1.96*std, color=&#39;b&#39;,
+                        alpha=0.15)
+
+                    # --- Plot Data ---
+                    ax.plot(
+                        x_coords, self.measured_data[out_name].values,
+                        &#39;ko&#39;, label=&#39;data&#39;, markeredgecolor=&#39;w&#39;)
+
+                    # --- Plot ExpDesign ---
+                    orig_ED_Y = self.MetaModel.ExpDesign.Y[out_name]
+                    for output in orig_ED_Y:
+                        plt.plot(
+                            x_coords, output, color=&#39;grey&#39;, alpha=0.15
+                            )
+
+                    # Add labels for axes
+                    plt.xlabel(&#39;Time [s]&#39;)
+                    plt.ylabel(out_name)
+
+                    # Add labels to the legend
+                    handles, labels = ax.get_legend_handles_labels()
+
+                    patch = Patch(color=&#39;b&#39;, alpha=0.15)
+                    handles.insert(1, patch)
+                    labels.insert(1, &#39;95 $\\%$ CI&#39;)
+
+                    # Add legend
+                    ax.legend(handles=handles, labels=labels, loc=&#39;best&#39;,
+                              frameon=True)
+
+                # Save figure in pdf format
+                if self.emulator:
+                    plotname = f&#39;/Post_Prior_Perd_{Model.name}_emulator&#39;
+                else:
+                    plotname = f&#39;/Post_Prior_Perd_{Model.name}&#39;
+
+                fig.savefig(f&#39;./{out_dir}{plotname}_{out_name}.pdf&#39;,
+                            bbox_inches=&#39;tight&#39;)</code></pre>
+</details>
+<h3>Methods</h3>
+<dl>
+<dt id="bayes_inference.BayesInference.create_inference"><code class="name flex">
+<span>def <span class="ident">create_inference</span></span>(<span>self)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Starts the inference.</p>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>BayesInference</code></strong> :&ensp;<code>obj</code></dt>
+<dd>The Bayes inference object.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def create_inference(self):
+    &#34;&#34;&#34;
+    Starts the inference.
+
+    Returns
+    -------
+    BayesInference : obj
+        The Bayes inference object.
+
+    &#34;&#34;&#34;
+
+    # Set some variables
+    MetaModel = self.MetaModel
+    Model = MetaModel.ModelObj
+    n_params = MetaModel.n_params
+    output_names = Model.Output.names
+    par_names = MetaModel.ExpDesign.par_names
+
+    # If the prior is set by the user, take it.
+    if self.samples is None:
+        self.samples = MetaModel.ExpDesign.generate_samples(
+            self.n_samples, &#39;random&#39;)
+    else:
+        try:
+            samples = self.samples.values
+        except AttributeError:
+            samples = self.samples
+
+        # Take care of an additional Sigma2s
+        self.samples = samples[:, :n_params]
+
+        # Update number of samples
+        self.n_samples = self.samples.shape[0]
+
+    # ---------- Preparation of observation data ----------
+    # Read observation data and perturb it if requested.
+    if self.measured_data is None:
+        self.measured_data = Model.read_observation(case=self.name)
+    # Convert measured_data to a data frame
+    if not isinstance(self.measured_data, pd.DataFrame):
+        self.measured_data = pd.DataFrame(self.measured_data)
+
+    # Extract the total number of measurement points
+    if self.name.lower() == &#39;calib&#39;:
+        self.n_tot_measurement = Model.n_obs
+    else:
+        self.n_tot_measurement = Model.n_obs_valid
+
+    # Find measurement error (if not given) for post predictive plot
+    if not hasattr(self, &#39;measurement_error&#39;):
+        if isinstance(self.Discrepancy, dict):
+            Disc = self.Discrepancy[&#39;known&#39;]
+        else:
+            Disc = self.Discrepancy
+        if isinstance(Disc.parameters, dict):
+            self.measurement_error = {k: np.sqrt(Disc.parameters[k]) for k
+                                      in Disc.parameters.keys()}
+        else:
+            try:
+                self.measurement_error = np.sqrt(Disc.parameters)
+            except TypeError:
+                pass
+
+    # ---------- Preparation of variance for covariance matrix ----------
+    # Independent and identically distributed
+    total_sigma2 = dict()
+    opt_sigma_flag = isinstance(self.Discrepancy, dict)
+    opt_sigma = None
+    for key_idx, key in enumerate(output_names):
+
+        # Find opt_sigma
+        if opt_sigma_flag and opt_sigma is None:
+            # Option A: known error with unknown bias term
+            opt_sigma = &#39;A&#39;
+            known_discrepancy = self.Discrepancy[&#39;known&#39;]
+            self.Discrepancy = self.Discrepancy[&#39;infer&#39;]
+            sigma2 = np.array(known_discrepancy.parameters[key])
+
+        elif opt_sigma == &#39;A&#39; or self.Discrepancy.parameters is not None:
+            # Option B: The sigma2 is known (no bias term)
+            if opt_sigma == &#39;A&#39;:
+                sigma2 = np.array(known_discrepancy.parameters[key])
+            else:
+                opt_sigma = &#39;B&#39;
+                sigma2 = np.array(self.Discrepancy.parameters[key])
+
+        elif not isinstance(self.Discrepancy.InputDisc, str):
+            # Option C: The sigma2 is unknown (bias term including error)
+            opt_sigma = &#39;C&#39;
+            self.Discrepancy.opt_sigma = opt_sigma
+            n_measurement = self.measured_data[key].values.shape
+            sigma2 = np.zeros((n_measurement[0]))
+
+        total_sigma2[key] = sigma2
+
+        self.Discrepancy.opt_sigma = opt_sigma
+        self.Discrepancy.total_sigma2 = total_sigma2
+
+    # If inferred sigma2s obtained from e.g. calibration are given
+    try:
+        self.sigma2s = self.Discrepancy.get_sample(self.n_samples)
+    except:
+        pass
+
+    # ---------------- Bootstrap &amp; TOM --------------------
+    if self.bootstrap or self.bayes_loocv:
+        if len(self.perturbed_data) == 0:
+            # zero mean noise Adding some noise to the observation function
+            self.perturbed_data = self._perturb_data(
+                self.measured_data, output_names
+                )
+        else:
+            self.n_bootstrap_itrs = len(self.perturbed_data)
+
+        # -------- Model Discrepancy -----------
+        if hasattr(self, &#39;error_model&#39;) and self.error_model \
+           and self.name.lower() != &#39;calib&#39;:
+            # Select posterior mean as MAP
+            MAP_theta = self.samples.mean(axis=0).reshape((1, n_params))
+            # MAP_theta = stats.mode(self.samples,axis=0)[0]
+
+            # Evaluate the (meta-)model at the MAP
+            y_MAP, y_std_MAP = MetaModel.eval_metamodel(samples=MAP_theta)
+
+            # Train a GPR meta-model using MAP
+            self.error_MetaModel = MetaModel.create_model_error(
+                self.bias_inputs, y_MAP, Name=self.name
+                )
+
+        # -----------------------------------------------------
+        # ----- Loop over the perturbed observation data ------
+        # -----------------------------------------------------
+        # Initilize arrays
+        logLikelihoods = np.zeros((self.n_samples, self.n_bootstrap_itrs),
+                                  dtype=np.float16)
+        BME_Corr = np.zeros((self.n_bootstrap_itrs))
+        log_BME = np.zeros((self.n_bootstrap_itrs))
+        KLD = np.zeros((self.n_bootstrap_itrs))
+        inf_entropy = np.zeros((self.n_bootstrap_itrs))
+
+        # Compute the prior predtions
+        # Evaluate the MetaModel
+        if self.emulator:
+            y_hat, y_std = MetaModel.eval_metamodel(samples=self.samples)
+            self.__mean_pce_prior_pred = y_hat
+            self._std_pce_prior_pred = y_std
+
+            # Correct the predictions with Model discrepancy
+            if hasattr(self, &#39;error_model&#39;) and self.error_model:
+                y_hat_corr, y_std = self.error_MetaModel.eval_model_error(
+                    self.bias_inputs, self.__mean_pce_prior_pred
+                    )
+                self.__mean_pce_prior_pred = y_hat_corr
+                self._std_pce_prior_pred = y_std
+
+            # Surrogate model&#39;s error using RMSE of test data
+            if hasattr(MetaModel, &#39;rmse&#39;):
+                surrError = MetaModel.rmse
+            else:
+                surrError = None
+
+        else:
+            # Evaluate the original model
+            self.__model_prior_pred = self._eval_model(
+                samples=self.samples, key=&#39;PriorPred&#39;
+                )
+
+        # Start the likelihood-BME computations for the perturbed data
+        for itr_idx, data in tqdm(
+                enumerate(self.perturbed_data), ascii=True,
+                desc=&#34;Boostraping the BME calculations&#34;
+                ):
+
+            # ---------------- Likelihood calculation ----------------
+            if self.emulator:
+                model_evals = self.__mean_pce_prior_pred
+            else:
+                model_evals = self.__model_prior_pred
+
+            # Leave one out
+            if self.bayes_loocv:
+                self.selected_indices = np.nonzero(data)[0]
+
+            # Prepare data dataframe
+            nobs = list(self.measured_data.count().values[1:])
+            numbers = list(map(sum, zip([0] + nobs, nobs)))
+            indices = list(zip([0] + numbers, numbers))
+            data_dict = {
+                output_names[i]: data[j:k] for i, (j, k) in
+                enumerate(indices)
+                }
+
+            # Unknown sigma2
+            if opt_sigma == &#39;C&#39; or hasattr(self, &#39;sigma2s&#39;):
+                logLikelihoods[:, itr_idx] = self.normpdf(
+                    model_evals, data_dict, total_sigma2,
+                    sigma2=self.sigma2s, std=surrError
+                    )
+            else:
+                # known sigma2
+                logLikelihoods[:, itr_idx] = self.normpdf(
+                    model_evals, data_dict, total_sigma2,
+                    std=surrError
+                    )
+
+            # ---------------- BME Calculations ----------------
+            # BME (log)
+            log_BME[itr_idx] = np.log(
+                np.nanmean(np.exp(logLikelihoods[:, itr_idx],
+                                  dtype=np.float128))
+                )
+
+            # Rejection Step
+            # Random numbers between 0 and 1
+            unif = np.random.rand(1, self.n_samples)[0]
+
+            # Reject the poorly performed prior
+            Likelihoods = np.exp(logLikelihoods[:, itr_idx],
+                                 dtype=np.float64)
+            accepted = (Likelihoods/np.max(Likelihoods)) &gt;= unif
+            posterior = self.samples[accepted]
+
+            # Posterior-based expectation of likelihoods
+            postExpLikelihoods = np.mean(
+                logLikelihoods[:, itr_idx][accepted]
+                )
+
+            # Posterior-based expectation of prior densities
+            postExpPrior = np.mean(
+                np.log([MetaModel.ExpDesign.JDist.pdf(posterior.T)])
+                )
+
+            # Calculate Kullback-Leibler Divergence
+            KLD[itr_idx] = postExpLikelihoods - log_BME[itr_idx]
+
+            # Information Entropy based on Entropy paper Eq. 38
+            inf_entropy[itr_idx] = log_BME[itr_idx] - postExpPrior - \
+                postExpLikelihoods
+
+            # TODO: BME correction when using Emulator
+            # if self.emulator:
+            #     BME_Corr[itr_idx] = self._corr_factor_BME(
+            #         data, total_sigma2, posterior
+            #         )
+
+            # Clear memory
+            gc.collect(generation=2)
+
+        # ---------------- Store BME, Likelihoods for all ----------------
+        # Likelihoods (Size: n_samples, n_bootstrap_itr)
+        self.log_likes = logLikelihoods
+
+        # BME (log), KLD, infEntropy (Size: 1,n_bootstrap_itr)
+        self.log_BME = log_BME
+        self.KLD = KLD
+        self.inf_entropy = inf_entropy
+
+        # TODO: BMECorrFactor (log) (Size: 1,n_bootstrap_itr)
+        # if self.emulator: self.BMECorrFactor = BME_Corr
+
+        # BME = BME + BMECorrFactor
+        if self.emulator:
+            self.log_BME = self.log_BME  # + self.BMECorrFactor
+
+    # ---------------- Parameter Bayesian inference ----------------
+    if self.inference_method.lower() == &#39;mcmc&#39;:
+        # Instantiate the MCMC object
+        MCMC_Obj = MCMC(self)
+        self.posterior_df = MCMC_Obj.run_sampler(
+            self.measured_data, total_sigma2
+            )
+
+    elif self.name.lower() == &#39;valid&#39;:
+        # Convert to a dataframe if samples are provided after calibration.
+        self.posterior_df = pd.DataFrame(self.samples, columns=par_names)
+
+    else:
+        # Rejection sampling
+        self.posterior_df = self._rejection_sampling()
+
+    # Provide posterior&#39;s summary
+    print(&#39;\n&#39;)
+    print(&#39;-&#39;*15 + &#39;Posterior summary&#39; + &#39;-&#39;*15)
+    pd.options.display.max_columns = None
+    pd.options.display.max_rows = None
+    print(self.posterior_df.describe())
+    print(&#39;-&#39;*50)
+
+    # -------- Model Discrepancy -----------
+    if hasattr(self, &#39;error_model&#39;) and self.error_model \
+       and self.name.lower() == &#39;calib&#39;:
+        if self.inference_method.lower() == &#39;mcmc&#39;:
+            self.error_MetaModel = MCMC_Obj.error_MetaModel
+        else:
+            # Select posterior mean as MAP
+            if opt_sigma == &#34;B&#34;:
+                posterior_df = self.posterior_df.values
+            else:
+                posterior_df = self.posterior_df.values[:, :-Model.n_outputs]
+
+            # Select posterior mean as Maximum a posteriori
+            map_theta = posterior_df.mean(axis=0).reshape((1, n_params))
+            # map_theta = stats.mode(Posterior_df,axis=0)[0]
+
+            # Evaluate the (meta-)model at the MAP
+            y_MAP, y_std_MAP = MetaModel.eval_metamodel(samples=map_theta)
+
+            # Train a GPR meta-model using MAP
+            self.error_MetaModel = MetaModel.create_model_error(
+                self.bias_inputs, y_MAP, Name=self.name
+                )
+
+    # -------- Posterior perdictive -----------
+    self._posterior_predictive()
+
+    # -----------------------------------------------------
+    # ------------------ Visualization --------------------
+    # -----------------------------------------------------
+    # Create Output directory, if it doesn&#39;t exist already.
+    out_dir = f&#39;Outputs_Bayes_{Model.name}_{self.name}&#39;
+    os.makedirs(out_dir, exist_ok=True)
+
+    # -------- Posteior parameters --------
+    if opt_sigma != &#34;B&#34;:
+        par_names.extend(
+            [self.Discrepancy.InputDisc.Marginals[i].name for i
+             in range(len(self.Discrepancy.InputDisc.Marginals))]
+            )
+    # Pot with corner
+    figPosterior = corner.corner(self.posterior_df.to_numpy(),
+                                 labels=par_names,
+                                 quantiles=[0.15, 0.5, 0.85],
+                                 show_titles=True,
+                                 title_fmt=self.corner_title_fmt,
+                                 labelpad=0.2,
+                                 use_math_text=True,
+                                 title_kwargs={&#34;fontsize&#34;: 28},
+                                 plot_datapoints=False,
+                                 plot_density=False,
+                                 fill_contours=True,
+                                 smooth=0.5,
+                                 smooth1d=0.5)
+
+    # Loop over axes and set x limits
+    if opt_sigma == &#34;B&#34;:
+        axes = np.array(figPosterior.axes).reshape(
+            (len(par_names), len(par_names))
+            )
+        for yi in range(len(par_names)):
+            ax = axes[yi, yi]
+            ax.set_xlim(MetaModel.bound_tuples[yi])
+            for xi in range(yi):
+                ax = axes[yi, xi]
+                ax.set_xlim(MetaModel.bound_tuples[xi])
+
+    # Turn off gridlines
+    for ax in figPosterior.axes:
+        ax.grid(False)
+
+    if self.emulator:
+        plotname = f&#39;/Posterior_Dist_{Model.name}_emulator&#39;
+    else:
+        plotname = f&#39;/Posterior_Dist_{Model.name}&#39;
+
+    figPosterior.set_size_inches((24, 16))
+    figPosterior.savefig(f&#39;./{out_dir}{plotname}.pdf&#39;,
+                         bbox_inches=&#39;tight&#39;)
+
+    # -------- Plot MAP --------
+    if self.plot_map_pred:
+        self._plot_max_a_posteriori()
+
+    # -------- Plot log_BME dist --------
+    if self.bootstrap and self.n_bootstrap_itrs &gt; 1:
+        # Computing the TOM performance
+        self.log_BME_tom = stats.chi2.rvs(
+            self.n_tot_measurement, size=self.log_BME.shape[0]
+            )
+
+        fig, ax = plt.subplots()
+        sns.kdeplot(self.log_BME_tom, ax=ax, color=&#34;green&#34;, shade=True)
+        sns.kdeplot(
+            self.log_BME, ax=ax, color=&#34;blue&#34;, shade=True,
+            label=&#39;Model BME&#39;)
+
+        ax.set_xlabel(&#39;log$_{10}$(BME)&#39;)
+        ax.set_ylabel(&#39;Probability density&#39;)
+
+        legend_elements = [
+            Patch(facecolor=&#39;green&#39;, edgecolor=&#39;green&#39;, label=&#39;TOM BME&#39;),
+            Patch(facecolor=&#39;blue&#39;, edgecolor=&#39;blue&#39;, label=&#39;Model BME&#39;)
+            ]
+        ax.legend(handles=legend_elements)
+
+        if self.emulator:
+            plotname = f&#39;/BME_hist_{Model.name}_emulator&#39;
+        else:
+            plotname = f&#39;/BME_hist_{Model.name}&#39;
+
+        plt.savefig(f&#39;./{out_dir}{plotname}.pdf&#39;, bbox_inches=&#39;tight&#39;)
+        plt.show()
+        plt.close()
+
+    # -------- Posteior perdictives --------
+    if self.plot_post_pred:
+        # Plot the posterior predictive
+        self._plot_post_predictive()
+
+    return self</code></pre>
+</details>
+</dd>
+<dt id="bayes_inference.BayesInference.normpdf"><code class="name flex">
+<span>def <span class="ident">normpdf</span></span>(<span>self, outputs, obs_data, total_sigma2s, sigma2=None, std=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Calculates the likelihood of simulation outputs compared with
+observation data.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>outputs</code></strong> :&ensp;<code>dict</code></dt>
+<dd>A dictionary containing the simulation outputs as array of shape
+(n_samples, n_measurement) for each model output.</dd>
+<dt><strong><code>obs_data</code></strong> :&ensp;<code>dict</code></dt>
+<dd>A dictionary/dataframe containing the observation data.</dd>
+<dt><strong><code>total_sigma2s</code></strong> :&ensp;<code>dict</code></dt>
+<dd>A dictionary with known values of the covariance diagonal entries,
+a.k.a sigma^2.</dd>
+<dt><strong><code>sigma2</code></strong> :&ensp;<code>array</code>, optional</dt>
+<dd>An array of the sigma^2 samples, when the covariance diagonal
+entries are unknown and are being jointly inferred. The default is
+None.</dd>
+<dt><strong><code>std</code></strong> :&ensp;<code>dict</code>, optional</dt>
+<dd>A dictionary containing the root mean squared error as array of
+shape (n_samples, n_measurement) for each model output. The default
+is None.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>logLik</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples)</code></dt>
+<dd>Likelihoods.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def normpdf(self, outputs, obs_data, total_sigma2s, sigma2=None, std=None):
+    &#34;&#34;&#34;
+    Calculates the likelihood of simulation outputs compared with
+    observation data.
+
+    Parameters
+    ----------
+    outputs : dict
+        A dictionary containing the simulation outputs as array of shape
+        (n_samples, n_measurement) for each model output.
+    obs_data : dict
+        A dictionary/dataframe containing the observation data.
+    total_sigma2s : dict
+        A dictionary with known values of the covariance diagonal entries,
+        a.k.a sigma^2.
+    sigma2 : array, optional
+        An array of the sigma^2 samples, when the covariance diagonal
+        entries are unknown and are being jointly inferred. The default is
+        None.
+    std : dict, optional
+        A dictionary containing the root mean squared error as array of
+        shape (n_samples, n_measurement) for each model output. The default
+        is None.
+
+    Returns
+    -------
+    logLik : array of shape (n_samples)
+        Likelihoods.
+
+    &#34;&#34;&#34;
+    Model = self.MetaModel.ModelObj
+    logLik = 0.0
+
+    # Extract the requested model outputs for likelihood calulation
+    if self.req_outputs is None:
+        req_outputs = Model.Output.names
+    else:
+        req_outputs = list(self.req_outputs)
+
+    # Loop over the outputs
+    for idx, out in enumerate(req_outputs):
+
+        # (Meta)Model Output
+        nsamples, nout = outputs[out].shape
+
+        # Prepare data and remove NaN
+        try:
+            data = obs_data[out].values[~np.isnan(obs_data[out])]
+        except AttributeError:
+            data = obs_data[out][~np.isnan(obs_data[out])]
+
+        # Prepare sigma2s
+        tot_sigma2s = total_sigma2s[out][~np.isnan(
+            total_sigma2s[out])][:nout]
+
+        # Add the std of the PCE is chosen as emulator.
+        if self.emulator:
+            if std is not None:
+                std_pce = std[out]
+            else:
+                std_pce = np.mean(
+                    self._std_pce_prior_pred[out], axis=0)
+            # Expected value of variance (Assump: i.i.d stds)
+            tot_sigma2s += std_pce**2
+
+        # If sigma2 is not given, use given total_sigma2s
+        if sigma2 is None:
+            logLik += stats.multivariate_normal.logpdf(
+                outputs[out], data, np.diag(tot_sigma2s))
+            continue
+
+        # Loop over each run/sample and calculate logLikelihood
+        logliks = np.zeros(nsamples)
+        for s_idx in range(nsamples):
+
+            # Simulation run
+            tot_outputs = outputs[out]
+
+            # Covariance Matrix
+            covMatrix = np.diag(tot_sigma2s)
+
+            if sigma2 is not None:
+                # Check the type error term
+                if hasattr(self, &#39;bias_inputs&#39;) and \
+                   not hasattr(self, &#39;error_model&#39;):
+                    # Infer a Bias model usig Gaussian Process Regression
+                    bias_inputs = np.hstack(
+                        (self.bias_inputs[out],
+                         tot_outputs[s_idx].reshape(-1, 1)))
+
+                    params = sigma2[s_idx, idx*3:(idx+1)*3]
+                    covMatrix = self._kernel_rbf(bias_inputs, params)
+                else:
+                    # Infer equal sigma2s
+                    try:
+                        sigma_2 = sigma2[s_idx, idx]
+                    except TypeError:
+                        sigma_2 = 0.0
+
+                    covMatrix += sigma_2 * np.eye(nout)
+                    # covMatrix = np.diag(sigma2 * total_sigma2s)
+
+            # Select the data points to compare
+            if self.selected_indices is not None:
+                indices = self.selected_indices[out]
+                covMatrix = np.diag(covMatrix[indices, indices])
+            else:
+                indices = list(range(nout))
+
+            # Compute loglikelihood
+            logliks[s_idx] = self._logpdf(
+                tot_outputs[s_idx, indices], data[indices], covMatrix
+                )
+
+        logLik += logliks
+    return logLik</code></pre>
+</details>
+</dd>
+</dl>
+</dd>
+</dl>
+</section>
+</article>
+<nav id="sidebar">
+<h1>Index</h1>
+<div class="toc">
+<ul></ul>
+</div>
+<ul id="index">
+<li><h3><a href="#header-classes">Classes</a></h3>
+<ul>
+<li>
+<h4><code><a title="bayes_inference.BayesInference" href="#bayes_inference.BayesInference">BayesInference</a></code></h4>
+<ul class="">
+<li><code><a title="bayes_inference.BayesInference.create_inference" href="#bayes_inference.BayesInference.create_inference">create_inference</a></code></li>
+<li><code><a title="bayes_inference.BayesInference.normpdf" href="#bayes_inference.BayesInference.normpdf">normpdf</a></code></li>
+</ul>
+</li>
+</ul>
+</li>
+</ul>
+</nav>
+</main>
+<footer id="footer">
+<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.10.0</a>.</p>
+</footer>
+</body>
+</html>
\ No newline at end of file
diff --git a/docs/html/discrepancy.html b/docs/html/discrepancy.html
new file mode 100644
index 0000000000000000000000000000000000000000..f77ed739da71d0cdf02b98ca7406a0cafba482da
--- /dev/null
+++ b/docs/html/discrepancy.html
@@ -0,0 +1,375 @@
+<!doctype html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
+<meta name="generator" content="pdoc 0.10.0" />
+<title>discrepancy API documentation</title>
+<meta name="description" content="" />
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
+<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
+<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
+<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
+<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
+<script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML" integrity="sha256-kZafAc6mZvK3W3v1pHOcUix30OHQN6pU/NO2oFkqZVw=" crossorigin></script>
+<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
+<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
+</head>
+<body>
+<main>
+<article id="content">
+<header>
+<h1 class="title">Module <code>discrepancy</code></h1>
+</header>
+<section id="section-intro">
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import scipy.stats as stats
+# from src.bayesvalidrox.surrogate_models.exp_designs import ExpDesigns
+
+
+class Discrepancy:
+    &#34;&#34;&#34;
+    Discrepancy class for Bayesian inference method.
+    We define the reference or reality to be equal to what we can model and a
+    descripancy term \\( \\epsilon \\). We consider the followin format:
+
+    $$\\textbf{y}_{\\text{reality}} = \\mathcal{M}(\\theta) + \\epsilon,$$
+
+    where \\( \\epsilon \\in R^{N_{out}} \\) represents the the effects of
+    measurement error and model inaccuracy. For simplicity, it can be defined
+    as an additive Gaussian disrepancy with zeromean and given covariance
+    matrix \\( \\Sigma \\):
+
+    $$\\epsilon \\sim \\mathcal{N}(\\epsilon|0, \\Sigma). $$
+
+    In the context of model inversion or calibration, an observation point
+    \\( \\textbf{y}_i \\in \\mathcal{y} \\) is a realization of a Gaussian
+    distribution with mean value of \\(\\mathcal{M}(\\theta) \\) and covariance
+    matrix of \\( \\Sigma \\).
+
+    $$ p(\\textbf{y}|\\theta) = \\mathcal{N}(\\textbf{y}|\\mathcal{M}
+                                             (\\theta))$$
+
+    The following options are available:
+
+    * Option A: With known redidual covariance matrix \\(\\Sigma\\) for
+    independent measurements.
+
+    * Option B: With unknown redidual covariance matrix \\(\\Sigma\\),
+    paramethrized as \\(\\Sigma(\\theta_{\\epsilon})=\\sigma^2 \\textbf{I}_
+    {N_{out}}\\) with unknown residual variances \\(\\sigma^2\\).
+    This term will be jointly infer with the uncertain input parameters. For
+    the inversion, you need to define a prior marginal via `Input` class. Note
+    that \\(\\sigma^2\\) is only a single scalar multiplier for the diagonal
+    entries of the covariance matrix \\(\\Sigma\\).
+
+    Attributes
+    ----------
+    InputDisc : obj
+        Input object. When the \\(\\sigma^2\\) is expected to be inferred
+        jointly with the parameters (`Option B`).If multiple output groups are
+        defined by `Model.Output.names`, each model output needs to have.
+        a prior marginal using the `Input` class. The default is `&#39;&#39;`.
+    disc_type : str
+        Type of the noise definition. `&#39;Gaussian&#39;` is only supported so far.
+    parameters : dict or pandas.DataFrame
+        Known residual variance \\(\\sigma^2\\), i.e. diagonal entry of the
+        covariance matrix of the multivariate normal likelihood in case of
+        `Option A`.
+
+    &#34;&#34;&#34;
+
+    def __init__(self, InputDisc=&#39;&#39;, disc_type=&#39;Gaussian&#39;, parameters=None):
+        self.InputDisc = InputDisc
+        self.disc_type = disc_type
+        self.parameters = parameters
+
+    # -------------------------------------------------------------------------
+    def get_sample(self, n_samples):
+        &#34;&#34;&#34;
+        Generate samples for the \\(\\sigma^2\\), i.e. the diagonal entries of
+        the variance-covariance matrix in the multivariate normal distribution.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of samples (parameter sets).
+
+        Returns
+        -------
+        sigma2_prior: array of shape (n_samples, n_params)
+            \\(\\sigma^2\\) samples.
+
+        &#34;&#34;&#34;
+        self.n_samples = n_samples
+        ExpDesign = ExpDesigns(self.InputDisc)
+        self.sigma2_prior = ExpDesign.generate_ED(
+            n_samples, sampling_method=&#39;random&#39;, max_pce_deg=1
+            )
+        # Store BoundTuples
+        self.ExpDesign = ExpDesign
+
+        # Naive approach: Fit a gaussian kernel to the provided data
+        self.ExpDesign.JDist = stats.gaussian_kde(ExpDesign.raw_data)
+
+        # Save the names of sigmas
+        if len(self.InputDisc.Marginals) != 0:
+            self.name = []
+            for Marginalidx in range(len(self.InputDisc.Marginals)):
+                self.name.append(self.InputDisc.Marginals[Marginalidx].name)
+
+        return self.sigma2_prior</code></pre>
+</details>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+<h2 class="section-title" id="header-classes">Classes</h2>
+<dl>
+<dt id="discrepancy.Discrepancy"><code class="flex name class">
+<span>class <span class="ident">Discrepancy</span></span>
+<span>(</span><span>InputDisc='', disc_type='Gaussian', parameters=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Discrepancy class for Bayesian inference method.
+We define the reference or reality to be equal to what we can model and a
+descripancy term <span><span class="MathJax_Preview"> \epsilon </span><script type="math/tex"> \epsilon </script></span>. We consider the followin format:</p>
+<p><span><span class="MathJax_Preview">\textbf{y}_{\text{reality}} = \mathcal{M}(\theta) + \epsilon,</span><script type="math/tex; mode=display">\textbf{y}_{\text{reality}} = \mathcal{M}(\theta) + \epsilon,</script></span></p>
+<p>where <span><span class="MathJax_Preview"> \epsilon \in R^{N_{out}} </span><script type="math/tex"> \epsilon \in R^{N_{out}} </script></span> represents the the effects of
+measurement error and model inaccuracy. For simplicity, it can be defined
+as an additive Gaussian disrepancy with zeromean and given covariance
+matrix <span><span class="MathJax_Preview"> \Sigma </span><script type="math/tex"> \Sigma </script></span>:</p>
+<p><span><span class="MathJax_Preview">\epsilon \sim \mathcal{N}(\epsilon|0, \Sigma). </span><script type="math/tex; mode=display">\epsilon \sim \mathcal{N}(\epsilon|0, \Sigma). </script></span></p>
+<p>In the context of model inversion or calibration, an observation point
+<span><span class="MathJax_Preview"> \textbf{y}_i \in \mathcal{y} </span><script type="math/tex"> \textbf{y}_i \in \mathcal{y} </script></span> is a realization of a Gaussian
+distribution with mean value of <span><span class="MathJax_Preview">\mathcal{M}(\theta) </span><script type="math/tex">\mathcal{M}(\theta) </script></span> and covariance
+matrix of <span><span class="MathJax_Preview"> \Sigma </span><script type="math/tex"> \Sigma </script></span>.</p>
+<p><span><span class="MathJax_Preview"> p(\textbf{y}|\theta) = \mathcal{N}(\textbf{y}|\mathcal{M}
+(\theta))</span><script type="math/tex; mode=display"> p(\textbf{y}|\theta) = \mathcal{N}(\textbf{y}|\mathcal{M}
+(\theta))</script></span></p>
+<p>The following options are available:</p>
+<ul>
+<li>
+<p>Option A: With known redidual covariance matrix <span><span class="MathJax_Preview">\Sigma</span><script type="math/tex">\Sigma</script></span> for
+independent measurements.</p>
+</li>
+<li>
+<p>Option B: With unknown redidual covariance matrix <span><span class="MathJax_Preview">\Sigma</span><script type="math/tex">\Sigma</script></span>,
+paramethrized as <span><span class="MathJax_Preview">\Sigma(\theta_{\epsilon})=\sigma^2 \textbf{I}_
+{N_{out}}</span><script type="math/tex">\Sigma(\theta_{\epsilon})=\sigma^2 \textbf{I}_
+{N_{out}}</script></span> with unknown residual variances <span><span class="MathJax_Preview">\sigma^2</span><script type="math/tex">\sigma^2</script></span>.
+This term will be jointly infer with the uncertain input parameters. For
+the inversion, you need to define a prior marginal via <code>Input</code> class. Note
+that <span><span class="MathJax_Preview">\sigma^2</span><script type="math/tex">\sigma^2</script></span> is only a single scalar multiplier for the diagonal
+entries of the covariance matrix <span><span class="MathJax_Preview">\Sigma</span><script type="math/tex">\Sigma</script></span>.</p>
+</li>
+</ul>
+<h2 id="attributes">Attributes</h2>
+<dl>
+<dt><strong><code>InputDisc</code></strong> :&ensp;<code>obj</code></dt>
+<dd>Input object. When the <span><span class="MathJax_Preview">\sigma^2</span><script type="math/tex">\sigma^2</script></span> is expected to be inferred
+jointly with the parameters (<code>Option B</code>).If multiple output groups are
+defined by <code>Model.Output.names</code>, each model output needs to have.
+a prior marginal using the <code>Input</code> class. The default is <code>''</code>.</dd>
+<dt><strong><code>disc_type</code></strong> :&ensp;<code>str</code></dt>
+<dd>Type of the noise definition. <code>'Gaussian'</code> is only supported so far.</dd>
+<dt><strong><code>parameters</code></strong> :&ensp;<code>dict</code> or <code>pandas.DataFrame</code></dt>
+<dd>Known residual variance <span><span class="MathJax_Preview">\sigma^2</span><script type="math/tex">\sigma^2</script></span>, i.e. diagonal entry of the
+covariance matrix of the multivariate normal likelihood in case of
+<code>Option A</code>.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">class Discrepancy:
+    &#34;&#34;&#34;
+    Discrepancy class for Bayesian inference method.
+    We define the reference or reality to be equal to what we can model and a
+    descripancy term \\( \\epsilon \\). We consider the followin format:
+
+    $$\\textbf{y}_{\\text{reality}} = \\mathcal{M}(\\theta) + \\epsilon,$$
+
+    where \\( \\epsilon \\in R^{N_{out}} \\) represents the the effects of
+    measurement error and model inaccuracy. For simplicity, it can be defined
+    as an additive Gaussian disrepancy with zeromean and given covariance
+    matrix \\( \\Sigma \\):
+
+    $$\\epsilon \\sim \\mathcal{N}(\\epsilon|0, \\Sigma). $$
+
+    In the context of model inversion or calibration, an observation point
+    \\( \\textbf{y}_i \\in \\mathcal{y} \\) is a realization of a Gaussian
+    distribution with mean value of \\(\\mathcal{M}(\\theta) \\) and covariance
+    matrix of \\( \\Sigma \\).
+
+    $$ p(\\textbf{y}|\\theta) = \\mathcal{N}(\\textbf{y}|\\mathcal{M}
+                                             (\\theta))$$
+
+    The following options are available:
+
+    * Option A: With known redidual covariance matrix \\(\\Sigma\\) for
+    independent measurements.
+
+    * Option B: With unknown redidual covariance matrix \\(\\Sigma\\),
+    paramethrized as \\(\\Sigma(\\theta_{\\epsilon})=\\sigma^2 \\textbf{I}_
+    {N_{out}}\\) with unknown residual variances \\(\\sigma^2\\).
+    This term will be jointly infer with the uncertain input parameters. For
+    the inversion, you need to define a prior marginal via `Input` class. Note
+    that \\(\\sigma^2\\) is only a single scalar multiplier for the diagonal
+    entries of the covariance matrix \\(\\Sigma\\).
+
+    Attributes
+    ----------
+    InputDisc : obj
+        Input object. When the \\(\\sigma^2\\) is expected to be inferred
+        jointly with the parameters (`Option B`).If multiple output groups are
+        defined by `Model.Output.names`, each model output needs to have.
+        a prior marginal using the `Input` class. The default is `&#39;&#39;`.
+    disc_type : str
+        Type of the noise definition. `&#39;Gaussian&#39;` is only supported so far.
+    parameters : dict or pandas.DataFrame
+        Known residual variance \\(\\sigma^2\\), i.e. diagonal entry of the
+        covariance matrix of the multivariate normal likelihood in case of
+        `Option A`.
+
+    &#34;&#34;&#34;
+
+    def __init__(self, InputDisc=&#39;&#39;, disc_type=&#39;Gaussian&#39;, parameters=None):
+        self.InputDisc = InputDisc
+        self.disc_type = disc_type
+        self.parameters = parameters
+
+    # -------------------------------------------------------------------------
+    def get_sample(self, n_samples):
+        &#34;&#34;&#34;
+        Generate samples for the \\(\\sigma^2\\), i.e. the diagonal entries of
+        the variance-covariance matrix in the multivariate normal distribution.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of samples (parameter sets).
+
+        Returns
+        -------
+        sigma2_prior: array of shape (n_samples, n_params)
+            \\(\\sigma^2\\) samples.
+
+        &#34;&#34;&#34;
+        self.n_samples = n_samples
+        ExpDesign = ExpDesigns(self.InputDisc)
+        self.sigma2_prior = ExpDesign.generate_ED(
+            n_samples, sampling_method=&#39;random&#39;, max_pce_deg=1
+            )
+        # Store BoundTuples
+        self.ExpDesign = ExpDesign
+
+        # Naive approach: Fit a gaussian kernel to the provided data
+        self.ExpDesign.JDist = stats.gaussian_kde(ExpDesign.raw_data)
+
+        # Save the names of sigmas
+        if len(self.InputDisc.Marginals) != 0:
+            self.name = []
+            for Marginalidx in range(len(self.InputDisc.Marginals)):
+                self.name.append(self.InputDisc.Marginals[Marginalidx].name)
+
+        return self.sigma2_prior</code></pre>
+</details>
+<h3>Methods</h3>
+<dl>
+<dt id="discrepancy.Discrepancy.get_sample"><code class="name flex">
+<span>def <span class="ident">get_sample</span></span>(<span>self, n_samples)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Generate samples for the <span><span class="MathJax_Preview">\sigma^2</span><script type="math/tex">\sigma^2</script></span>, i.e. the diagonal entries of
+the variance-covariance matrix in the multivariate normal distribution.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>n_samples</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of samples (parameter sets).</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>sigma2_prior</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
+<dd><span><span class="MathJax_Preview">\sigma^2</span><script type="math/tex">\sigma^2</script></span> samples.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def get_sample(self, n_samples):
+    &#34;&#34;&#34;
+    Generate samples for the \\(\\sigma^2\\), i.e. the diagonal entries of
+    the variance-covariance matrix in the multivariate normal distribution.
+
+    Parameters
+    ----------
+    n_samples : int
+        Number of samples (parameter sets).
+
+    Returns
+    -------
+    sigma2_prior: array of shape (n_samples, n_params)
+        \\(\\sigma^2\\) samples.
+
+    &#34;&#34;&#34;
+    self.n_samples = n_samples
+    ExpDesign = ExpDesigns(self.InputDisc)
+    self.sigma2_prior = ExpDesign.generate_ED(
+        n_samples, sampling_method=&#39;random&#39;, max_pce_deg=1
+        )
+    # Store BoundTuples
+    self.ExpDesign = ExpDesign
+
+    # Naive approach: Fit a gaussian kernel to the provided data
+    self.ExpDesign.JDist = stats.gaussian_kde(ExpDesign.raw_data)
+
+    # Save the names of sigmas
+    if len(self.InputDisc.Marginals) != 0:
+        self.name = []
+        for Marginalidx in range(len(self.InputDisc.Marginals)):
+            self.name.append(self.InputDisc.Marginals[Marginalidx].name)
+
+    return self.sigma2_prior</code></pre>
+</details>
+</dd>
+</dl>
+</dd>
+</dl>
+</section>
+</article>
+<nav id="sidebar">
+<h1>Index</h1>
+<div class="toc">
+<ul></ul>
+</div>
+<ul id="index">
+<li><h3><a href="#header-classes">Classes</a></h3>
+<ul>
+<li>
+<h4><code><a title="discrepancy.Discrepancy" href="#discrepancy.Discrepancy">Discrepancy</a></code></h4>
+<ul class="">
+<li><code><a title="discrepancy.Discrepancy.get_sample" href="#discrepancy.Discrepancy.get_sample">get_sample</a></code></li>
+</ul>
+</li>
+</ul>
+</li>
+</ul>
+</nav>
+</main>
+<footer id="footer">
+<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.10.0</a>.</p>
+</footer>
+</body>
+</html>
\ No newline at end of file
diff --git a/docs/html/exp_designs.html b/docs/html/exp_designs.html
new file mode 100644
index 0000000000000000000000000000000000000000..6771184ec8c8e5e7c567add4c5e4e032f37724ec
--- /dev/null
+++ b/docs/html/exp_designs.html
@@ -0,0 +1,2245 @@
+<!doctype html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
+<meta name="generator" content="pdoc 0.10.0" />
+<title>exp_designs API documentation</title>
+<meta name="description" content="" />
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
+<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
+<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
+<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
+<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
+<script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML" integrity="sha256-kZafAc6mZvK3W3v1pHOcUix30OHQN6pU/NO2oFkqZVw=" crossorigin></script>
+<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
+<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
+</head>
+<body>
+<main>
+<article id="content">
+<header>
+<h1 class="title">Module <code>exp_designs</code></h1>
+</header>
+<section id="section-intro">
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import numpy as np
+import math
+import itertools
+import chaospy
+import scipy.stats as st
+from tqdm import tqdm
+
+from apoly_construction import apoly_construction
+
+
+class ExpDesigns:
+    &#34;&#34;&#34;
+    This class generates samples from the prescribed marginals for the model
+    parameters using the `Input` object.
+
+    Attributes
+    ----------
+    Input : obj
+        Input object containing the parameter marginals, i.e. name,
+        distribution type and distribution parameters or available raw data.
+    method : str
+        Type of the experimental design. The default is `&#39;normal&#39;`. Other
+        option is `&#39;sequential&#39;`.
+    meta_Model : str
+        Type of the meta_model.
+    sampling_method : str
+        Name of the sampling method for the experimental design. The following
+        sampling method are supported:
+
+        * random
+        * latin_hypercube
+        * sobol
+        * halton
+        * hammersley
+        * korobov
+        * chebyshev(FT)
+        * grid(FT)
+        * nested_grid(FT)
+        * user
+    hdf5_file : str
+        Name of the hdf5 file that contains the experimental design.
+    n_new_samples : int
+        Number of (initial) training points.
+    n_max_samples : int
+        Number of maximum training points.
+    mod_LOO_threshold : float
+        The modified leave-one-out cross validation threshold where the
+        sequential design stops.
+    tradeoff_scheme : str
+        Trade-off scheme to assign weights to the exploration and exploitation
+        scores in the sequential design.
+    n_canddidate : int
+        Number of candidate training sets to calculate the scores for.
+    explore_method : str
+        Type of the exploration method for the sequential design. The following
+        methods are supported:
+
+        * Voronoi
+        * random
+        * latin_hypercube
+        * LOOCV
+        * dual annealing
+    exploit_method : str
+        Type of the exploitation method for the sequential design. The
+        following methods are supported:
+
+        * BayesOptDesign
+        * BayesActDesign
+        * VarOptDesign
+        * alphabetic
+        * Space-filling
+    util_func : str or list
+        The utility function to be specified for the `exploit_method`. For the
+        available utility functions see Note section.
+    n_cand_groups : int
+        Number of candidate groups. Each group of candidate training sets will
+        be evaulated separately in parallel.
+    n_replication : int
+        Number of replications. Only for comparison. The default is 1.
+    post_snapshot : int
+        Whether to plot the posterior in the sequential design. The default is
+        `True`.
+    step_snapshot : int
+        The number of steps to plot the posterior in the sequential design. The
+        default is 1.
+    max_a_post : list or array
+        Maximum a posteriori of the posterior distribution, if known. The
+        default is `[]`.
+    adapt_verbose : bool
+        Whether to plot the model response vs that of metamodel for the new
+        trining point in the sequential design.
+
+    Note
+    ----------
+    The following utiliy functions for the **exploitation** methods are
+    supported:
+
+    #### BayesOptDesign (when data is available)
+    - DKL (Kullback-Leibler Divergence)
+    - DPP (D-Posterior-percision)
+    - APP (A-Posterior-percision)
+
+    #### VarBasedOptDesign -&gt; when data is not available
+    - Entropy (Entropy/MMSE/active learning)
+    - EIGF (Expected Improvement for Global fit)
+    - LOOCV (Leave-one-out Cross Validation)
+
+    #### alphabetic
+    - D-Opt (D-Optimality)
+    - A-Opt (A-Optimality)
+    - K-Opt (K-Optimality)
+    &#34;&#34;&#34;
+
+    def __init__(self, Input, method=&#39;normal&#39;, meta_Model=&#39;pce&#39;,
+                 sampling_method=&#39;random&#39;, hdf5_file=None,
+                 n_new_samples=1, n_max_samples=None, mod_LOO_threshold=1e-16,
+                 tradeoff_scheme=None, n_canddidate=1, explore_method=&#39;random&#39;,
+                 exploit_method=&#39;Space-filling&#39;, util_func=&#39;Space-filling&#39;,
+                 n_cand_groups=4, n_replication=1, post_snapshot=False,
+                 step_snapshot=1, max_a_post=[], adapt_verbose=False):
+
+        self.InputObj = Input
+        self.method = method
+        self.meta_Model = meta_Model
+        self.sampling_method = sampling_method
+        self.hdf5_file = hdf5_file
+        self.n_new_samples = n_new_samples
+        self.n_max_samples = n_max_samples
+        self.mod_LOO_threshold = mod_LOO_threshold
+        self.explore_method = explore_method
+        self.exploit_method = exploit_method
+        self.util_func = util_func
+        self.tradeoff_scheme = tradeoff_scheme
+        self.n_canddidate = n_canddidate
+        self.n_cand_groups = n_cand_groups
+        self.n_replication = n_replication
+        self.post_snapshot = post_snapshot
+        self.step_snapshot = step_snapshot
+        self.max_a_post = max_a_post
+        self.adapt_verbose = adapt_verbose
+
+    # -------------------------------------------------------------------------
+    def generate_samples(self, n_samples, sampling_method=&#39;random&#39;,
+                         transform=False):
+        &#34;&#34;&#34;
+        Generates samples with given sampling method
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of requested samples.
+        sampling_method : str, optional
+            Sampling method. The default is `&#39;random&#39;`.
+        transform : bool, optional
+            Transformation via an isoprobabilistic transformation method. The
+            default is `False`.
+
+        Returns
+        -------
+        samples: array of shape (n_samples, n_params)
+            Generated samples from defined model input object.
+
+        &#34;&#34;&#34;
+        try:
+            samples = chaospy.generate_samples(
+                int(n_samples), domain=self.JDist, rule=sampling_method
+                )
+        except:
+            samples = self.JDist.resample(int(n_samples))
+
+        # Transform samples to the original space
+        if transform:
+            tr_samples = self.transform(samples.T)
+            return samples.T, tr_samples
+        else:
+            return samples.T
+
+    # -------------------------------------------------------------------------
+    def generate_ED(self, n_samples, sampling_method=&#39;random&#39;, transform=False,
+                    max_pce_deg=None):
+        &#34;&#34;&#34;
+        Generates experimental designs (training set) with the given method.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of requested training points.
+        sampling_method : str, optional
+            Sampling method. The default is `&#39;random&#39;`.
+        transform : bool, optional
+            Isoprobabilistic transformation. The default is `False`.
+        max_pce_deg : int, optional
+            Maximum PCE polynomial degree. The default is `None`.
+
+        Returns
+        -------
+        samples : array of shape (n_samples, n_params)
+            Selected training samples.
+
+        &#34;&#34;&#34;
+        Inputs = self.InputObj
+        self.ndim = len(Inputs.Marginals)
+        if not hasattr(self, &#39;n_init_samples&#39;):
+            self.n_init_samples = self.ndim + 1
+        n_samples = int(n_samples)
+
+        # Check if PCE or aPCE metamodel is selected.
+        if self.meta_Model.lower() == &#39;apce&#39;:
+            self.apce = True
+        else:
+            self.apce = False
+
+        # Check if input is given as dist or input_data.
+        if len(Inputs.Marginals[0].input_data):
+            self.input_data_given = True
+        else:
+            self.input_data_given = False
+
+        # Get the bounds if input_data are directly defined by user:
+        if self.input_data_given:
+            for i in range(self.ndim):
+                low_bound = np.min(Inputs.Marginals[i].input_data)
+                up_bound = np.max(Inputs.Marginals[i].input_data)
+                Inputs.Marginals[i].parameters = [low_bound, up_bound]
+
+        # Generate the samples based on requested method
+        self.raw_data, self.bound_tuples = self.init_param_space(max_pce_deg)
+
+        # Pass user-defined samples as ED
+        if sampling_method == &#39;user&#39;:
+            samples = self.X
+            self.n_samples = len(samples)
+
+        # Sample the distribution of parameters
+        elif self.input_data_given:
+            # Case II: Input values are directly given by the user.
+
+            if sampling_method == &#39;random&#39;:
+                samples = self.random_sampler(n_samples)
+
+            elif sampling_method == &#39;PCM&#39; or \
+                    sampling_method == &#39;LSCM&#39;:
+                samples = self.pcm_sampler(max_pce_deg)
+
+            else:
+                # Create ExpDesign in the actual space using chaospy
+                try:
+                    samples = chaospy.generate_samples(n_samples,
+                                                       domain=self.JDist,
+                                                       rule=sampling_method).T
+                except:
+                    samples = self.JDist.sample(n_samples)
+
+        elif not self.input_data_given:
+            # Case I = User passed known distributions
+            samples = chaospy.generate_samples(n_samples, domain=self.JDist,
+                                               rule=sampling_method).T
+
+        # Transform samples to the original space
+        if transform:
+            tr_samples = self.transform(samples)
+            return samples, tr_samples
+        else:
+            return samples
+
+    # -------------------------------------------------------------------------
+    def init_param_space(self, max_deg=None):
+        &#34;&#34;&#34;
+        Initializes parameter space.
+
+        Parameters
+        ----------
+        max_deg : int, optional
+            Maximum degree. The default is `None`.
+
+        Returns
+        -------
+        raw_data : array of shape (n_params, n_samples)
+            Raw data.
+        bound_tuples : list of tuples
+            A list containing lower and upper bounds of parameters.
+
+        &#34;&#34;&#34;
+        Inputs = self.InputObj
+        ndim = self.ndim
+        rosenblatt_flag = Inputs.Rosenblatt
+        mc_size = 50000
+
+        # Save parameter names
+        self.par_names = []
+        for parIdx in range(ndim):
+            self.par_names.append(Inputs.Marginals[parIdx].name)
+
+        # Create a multivariate probability distribution
+        if max_deg is not None:
+            JDist, poly_types = self.build_dist(rosenblatt=rosenblatt_flag)
+            self.JDist, self.poly_types = JDist, poly_types
+
+        if self.input_data_given:
+
+            self.MCSize = len(Inputs.Marginals[0].input_data)
+            self.raw_data = np.zeros((ndim, self.MCSize))
+
+            for parIdx in range(ndim):
+                # Save parameter names
+                try:
+                    self.raw_data[parIdx] = np.array(
+                        Inputs.Marginals[parIdx].input_data)
+                except:
+                    self.raw_data[parIdx] = self.JDist[parIdx].sample(mc_size)
+
+        else:
+            # Generate random samples based on parameter distributions
+            self.raw_data = chaospy.generate_samples(mc_size,
+                                                     domain=self.JDist)
+
+        # Create orthogonal polynomial coefficients if necessary
+        if self.apce and max_deg is not None and Inputs.poly_coeffs_flag:
+            self.polycoeffs = {}
+            for parIdx in tqdm(range(ndim), ascii=True,
+                               desc=&#34;Computing orth. polynomial coeffs&#34;):
+                poly_coeffs = apoly_construction(self.raw_data[parIdx],
+                                                 max_deg)
+                self.polycoeffs[f&#39;p_{parIdx+1}&#39;] = poly_coeffs
+
+        # Extract moments
+        for parIdx in range(ndim):
+            mu = np.mean(self.raw_data[parIdx])
+            std = np.std(self.raw_data[parIdx])
+            self.InputObj.Marginals[parIdx].moments = [mu, std]
+
+        # Generate the bounds based on given inputs for marginals
+        bound_tuples = []
+        for i in range(ndim):
+            if Inputs.Marginals[i].dist_type == &#39;unif&#39;:
+                low_bound, up_bound = Inputs.Marginals[i].parameters
+            else:
+                low_bound = np.min(self.raw_data[i])
+                up_bound = np.max(self.raw_data[i])
+
+            bound_tuples.append((low_bound, up_bound))
+
+        self.bound_tuples = tuple(bound_tuples)
+
+        return self.raw_data, self.bound_tuples
+
+    # -------------------------------------------------------------------------
+    def build_dist(self, rosenblatt):
+        &#34;&#34;&#34;
+        Creates the polynomial types to be passed to univ_basis_vals method of
+        the MetaModel object.
+
+        Parameters
+        ----------
+        rosenblatt : bool
+            Rosenblatt transformation flag.
+
+        Returns
+        -------
+        orig_space_dist : object
+            A chaospy JDist object or a gaussian_kde object.
+        poly_types : list
+            List of polynomial types for the parameters.
+
+        &#34;&#34;&#34;
+        Inputs = self.InputObj
+        all_data = []
+        all_dist_types = []
+        orig_joints = []
+        poly_types = []
+
+        for parIdx in range(self.ndim):
+
+            if Inputs.Marginals[parIdx].dist_type is None:
+                data = Inputs.Marginals[parIdx].input_data
+                all_data.append(data)
+                dist_type = None
+            else:
+                dist_type = Inputs.Marginals[parIdx].dist_type
+                params = Inputs.Marginals[parIdx].parameters
+
+            if rosenblatt:
+                polytype = &#39;hermite&#39;
+                dist = chaospy.Normal()
+
+            elif dist_type is None:
+                polytype = &#39;arbitrary&#39;
+                dist = None
+
+            elif &#39;unif&#39; in dist_type.lower():
+                polytype = &#39;legendre&#39;
+                dist = chaospy.Uniform(lower=params[0], upper=params[1])
+
+            elif &#39;norm&#39; in dist_type.lower() and \
+                 &#39;log&#39; not in dist_type.lower():
+                polytype = &#39;hermite&#39;
+                dist = chaospy.Normal(mu=params[0], sigma=params[1])
+
+            elif &#39;gamma&#39; in dist_type.lower():
+                polytype = &#39;laguerre&#39;
+                dist = chaospy.Gamma(shape=params[0],
+                                     scale=params[1],
+                                     shift=params[2])
+
+            elif &#39;beta&#39; in dist_type.lower():
+                polytype = &#39;jacobi&#39;
+                dist = chaospy.Beta(alpha=params[0], beta=params[1],
+                                    lower=params[2], upper=params[3])
+
+            elif &#39;lognorm&#39; in dist_type.lower():
+                polytype = &#39;hermite&#39;
+                Mu = np.log(params[0]**2/np.sqrt(params[0]**2 + params[1]**2))
+                Sigma = np.sqrt(np.log(1 + params[1]**2 / params[0]**2))
+                dist = chaospy.LogNormal(mu=Mu, sigma=Sigma)
+
+            elif &#39;expon&#39; in dist_type.lower():
+                polytype = &#39;arbitrary&#39;
+                dist = chaospy.Exponential(scale=params[0], shift=params[1])
+
+            elif &#39;weibull&#39; in dist_type.lower():
+                polytype = &#39;arbitrary&#39;
+                dist = chaospy.Weibull(shape=params[0], scale=params[1],
+                                       shift=params[2])
+
+            else:
+                message = (f&#34;DistType {dist_type} for parameter&#34;
+                           f&#34;{parIdx+1} is not available.&#34;)
+                raise ValueError(message)
+
+            if self.input_data_given or self.apce:
+                polytype = &#39;arbitrary&#39;
+
+            # Store dists and poly_types
+            orig_joints.append(dist)
+            poly_types.append(polytype)
+            all_dist_types.append(dist_type)
+
+        # Prepare final output to return
+        if None in all_dist_types:
+            # Naive approach: Fit a gaussian kernel to the provided data
+            Data = np.asarray(all_data)
+            orig_space_dist = st.gaussian_kde(Data)
+            self.prior_space = orig_space_dist
+        else:
+            orig_space_dist = chaospy.J(*orig_joints)
+            self.prior_space = st.gaussian_kde(orig_space_dist.sample(10000))
+
+        return orig_space_dist, poly_types
+
+    # -------------------------------------------------------------------------
+    def random_sampler(self, n_samples):
+        &#34;&#34;&#34;
+        Samples the given raw data randomly.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of requested samples.
+
+        Returns
+        -------
+        samples: array of shape (n_samples, n_params)
+            The sampling locations in the input space.
+
+        &#34;&#34;&#34;
+        samples = np.zeros((n_samples, self.ndim))
+
+        for idxPa in range(self.ndim):
+            # input_data given
+            sample_size = len(self.raw_data[idxPa])
+            randIdx = np.random.randint(0, sample_size, n_samples)
+            samples[:, idxPa] = self.raw_data[idxPa, randIdx]
+
+        return samples
+
+    # -------------------------------------------------------------------------
+    def pcm_sampler(self, max_deg):
+        &#34;&#34;&#34;
+        Generates collocation points based on the root of the polynomial
+        degrees.
+
+        Parameters
+        ----------
+        max_deg : int
+            Maximum degree defined by user.
+
+        Returns
+        -------
+        opt_col_points: array of shape (n_samples, n_params)
+            Collocation points.
+
+        &#34;&#34;&#34;
+
+        raw_data = self.raw_data
+
+        # Guess the closest degree to self.n_samples
+        def M_uptoMax(deg):
+            result = []
+            for d in range(1, deg+1):
+                result.append(math.factorial(self.ndim+d) //
+                              (math.factorial(self.ndim) * math.factorial(d)))
+            return np.array(result)
+
+        guess_Deg = np.where(M_uptoMax(max_deg) &gt; self.n_samples)[0][0]
+
+        c_points = np.zeros((guess_Deg+1, self.ndim))
+
+        def PolynomialPa(parIdx):
+            return apoly_construction(self.raw_data[parIdx], max_deg)
+
+        for i in range(self.ndim):
+            poly_coeffs = PolynomialPa(i)[guess_Deg+1][::-1]
+            c_points[:, i] = np.trim_zeros(np.roots(poly_coeffs))
+
+        #  Construction of optimal integration points
+        Prod = itertools.product(np.arange(1, guess_Deg+2), repeat=self.ndim)
+        sort_dig_unique_combos = np.array(list(filter(lambda x: x, Prod)))
+
+        # Ranking relatively mean
+        Temp = np.empty(shape=[0, guess_Deg+1])
+        for j in range(self.ndim):
+            s = abs(c_points[:, j]-np.mean(raw_data[j]))
+            Temp = np.append(Temp, [s], axis=0)
+        temp = Temp.T
+
+        index_CP = np.sort(temp, axis=0)
+        sort_cpoints = np.empty((0, guess_Deg+1))
+
+        for j in range(self.ndim):
+            sort_cp = c_points[index_CP[:, j], j]
+            sort_cpoints = np.vstack((sort_cpoints, sort_cp))
+
+        # Mapping of Combination to Cpoint Combination
+        sort_unique_combos = np.empty(shape=[0, self.ndim])
+        for i in range(len(sort_dig_unique_combos)):
+            sort_un_comb = []
+            for j in range(self.ndim):
+                SortUC = sort_cpoints[j, sort_dig_unique_combos[i, j]-1]
+                sort_un_comb.append(SortUC)
+                sort_uni_comb = np.asarray(sort_un_comb)
+            sort_unique_combos = np.vstack((sort_unique_combos, sort_uni_comb))
+
+        # Output the collocation points
+        if self.sampling_method.lower() == &#39;lscm&#39;:
+            opt_col_points = sort_unique_combos
+        else:
+            opt_col_points = sort_unique_combos[0:self.n_samples]
+
+        return opt_col_points
+
+    # -------------------------------------------------------------------------
+    def transform(self, X, params=None):
+        &#34;&#34;&#34;
+        Transform the samples via either a Rosenblatt or an isoprobabilistic
+        transformation.
+
+        Parameters
+        ----------
+        X : array of shape (n_samples,n_params)
+            Samples to be transformed.
+
+        Returns
+        -------
+        tr_X: array of shape (n_samples,n_params)
+            Transformed samples.
+
+        &#34;&#34;&#34;
+        if self.InputObj.Rosenblatt:
+            self.origJDist, _ = self.build_dist(False)
+            tr_X = self.origJDist.inv(self.JDist.fwd(X.T)).T
+        else:
+            # Transform samples via an isoprobabilistic transformation
+            n_samples, n_params = X.shape
+            Inputs = self.InputObj
+            origJDist = self.JDist
+            poly_types = self.poly_types
+
+            disttypes = []
+            for par_i in range(n_params):
+                disttypes.append(Inputs.Marginals[par_i].dist_type)
+
+            # Pass non-transformed X, if arbitrary PCE is selected.
+            if None in disttypes or self.input_data_given or self.apce:
+                return X
+
+            cdfx = np.zeros((X.shape))
+            tr_X = np.zeros((X.shape))
+
+            for par_i in range(n_params):
+
+                # Extract the parameters of the original space
+                disttype = disttypes[par_i]
+                if disttype is not None:
+                    dist = origJDist[par_i]
+                else:
+                    dist = None
+                polytype = poly_types[par_i]
+                cdf = np.vectorize(lambda x: dist.cdf(x))
+
+                # Extract the parameters of the transformation space based on
+                # polyType
+                if polytype == &#39;legendre&#39; or disttype == &#39;uniform&#39;:
+                    # Generate Y_Dists based
+                    params_Y = [-1, 1]
+                    dist_Y = st.uniform(loc=params_Y[0],
+                                        scale=params_Y[1]-params_Y[0])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                elif polytype == &#39;hermite&#39; or disttype == &#39;norm&#39;:
+                    params_Y = [0, 1]
+                    dist_Y = st.norm(loc=params_Y[0], scale=params_Y[1])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                elif polytype == &#39;laguerre&#39; or disttype == &#39;gamma&#39;:
+                    params_Y = [1, params[1]]
+                    dist_Y = st.gamma(loc=params_Y[0], scale=params_Y[1])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                # Compute CDF_x(X)
+                cdfx[:, par_i] = cdf(X[:, par_i])
+
+                # Compute invCDF_y(cdfx)
+                tr_X[:, par_i] = inv_cdf(cdfx[:, par_i])
+
+        return tr_X
+
+    # -------------------------------------------------------------------------
+    def fit_dist(self, y):
+        &#34;&#34;&#34;
+        Fits the known distributions to the data.
+
+        Parameters
+        ----------
+        y : array of shape (n_samples)
+            Data to be fitted.
+
+        Returns
+        -------
+        sel_dist: string
+            Selected distribution type from `lognorm`, `norm`, `uniform` or
+            `expon`.
+        params : list
+            Parameters corresponding to the selected distibution type.
+
+        &#34;&#34;&#34;
+        dist_results = []
+        params = {}
+        dist_names = [&#39;lognorm&#39;, &#39;norm&#39;, &#39;uniform&#39;, &#39;expon&#39;]
+        for dist_name in dist_names:
+            dist = getattr(st, dist_name)
+
+            try:
+                if dist_name != &#39;lognorm&#39;:
+                    param = dist.fit(y)
+                else:
+                    param = dist.fit(np.exp(y), floc=0)
+            except:
+                param = dist.fit(y)
+
+            params[dist_name] = param
+            # Applying the Kolmogorov-Smirnov test
+            D, p = st.kstest(y, dist_name, args=param)
+            dist_results.append((dist_name, D))
+
+        # select the best fitted distribution
+        sel_dist, D = (min(dist_results, key=lambda item: item[1]))
+
+        if sel_dist == &#39;uniform&#39;:
+            params[sel_dist] = [params[sel_dist][0], params[sel_dist][0] +
+                                params[sel_dist][1]]
+        if D &lt; 0.05:
+            return sel_dist, params[sel_dist]
+        else:
+            return None, None</code></pre>
+</details>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+<h2 class="section-title" id="header-classes">Classes</h2>
+<dl>
+<dt id="exp_designs.ExpDesigns"><code class="flex name class">
+<span>class <span class="ident">ExpDesigns</span></span>
+<span>(</span><span>Input, method='normal', meta_Model='pce', sampling_method='random', hdf5_file=None, n_new_samples=1, n_max_samples=None, mod_LOO_threshold=1e-16, tradeoff_scheme=None, n_canddidate=1, explore_method='random', exploit_method='Space-filling', util_func='Space-filling', n_cand_groups=4, n_replication=1, post_snapshot=False, step_snapshot=1, max_a_post=[], adapt_verbose=False)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>This class generates samples from the prescribed marginals for the model
+parameters using the <code>Input</code> object.</p>
+<h2 id="attributes">Attributes</h2>
+<dl>
+<dt><strong><code>Input</code></strong> :&ensp;<code>obj</code></dt>
+<dd>Input object containing the parameter marginals, i.e. name,
+distribution type and distribution parameters or available raw data.</dd>
+<dt><strong><code>method</code></strong> :&ensp;<code>str</code></dt>
+<dd>Type of the experimental design. The default is <code>'normal'</code>. Other
+option is <code>'sequential'</code>.</dd>
+<dt><strong><code>meta_Model</code></strong> :&ensp;<code>str</code></dt>
+<dd>Type of the meta_model.</dd>
+<dt><strong><code>sampling_method</code></strong> :&ensp;<code>str</code></dt>
+<dd>
+<p>Name of the sampling method for the experimental design. The following
+sampling method are supported:</p>
+<ul>
+<li>random</li>
+<li>latin_hypercube</li>
+<li>sobol</li>
+<li>halton</li>
+<li>hammersley</li>
+<li>korobov</li>
+<li>chebyshev(FT)</li>
+<li>grid(FT)</li>
+<li>nested_grid(FT)</li>
+<li>user</li>
+</ul>
+</dd>
+<dt><strong><code>hdf5_file</code></strong> :&ensp;<code>str</code></dt>
+<dd>Name of the hdf5 file that contains the experimental design.</dd>
+<dt><strong><code>n_new_samples</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of (initial) training points.</dd>
+<dt><strong><code>n_max_samples</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of maximum training points.</dd>
+<dt><strong><code>mod_LOO_threshold</code></strong> :&ensp;<code>float</code></dt>
+<dd>The modified leave-one-out cross validation threshold where the
+sequential design stops.</dd>
+<dt><strong><code>tradeoff_scheme</code></strong> :&ensp;<code>str</code></dt>
+<dd>Trade-off scheme to assign weights to the exploration and exploitation
+scores in the sequential design.</dd>
+<dt><strong><code>n_canddidate</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of candidate training sets to calculate the scores for.</dd>
+<dt><strong><code>explore_method</code></strong> :&ensp;<code>str</code></dt>
+<dd>
+<p>Type of the exploration method for the sequential design. The following
+methods are supported:</p>
+<ul>
+<li>Voronoi</li>
+<li>random</li>
+<li>latin_hypercube</li>
+<li>LOOCV</li>
+<li>dual annealing</li>
+</ul>
+</dd>
+<dt><strong><code>exploit_method</code></strong> :&ensp;<code>str</code></dt>
+<dd>
+<p>Type of the exploitation method for the sequential design. The
+following methods are supported:</p>
+<ul>
+<li>BayesOptDesign</li>
+<li>BayesActDesign</li>
+<li>VarOptDesign</li>
+<li>alphabetic</li>
+<li>Space-filling</li>
+</ul>
+</dd>
+<dt><strong><code>util_func</code></strong> :&ensp;<code>str</code> or <code>list</code></dt>
+<dd>The utility function to be specified for the <code>exploit_method</code>. For the
+available utility functions see Note section.</dd>
+<dt><strong><code>n_cand_groups</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of candidate groups. Each group of candidate training sets will
+be evaulated separately in parallel.</dd>
+<dt><strong><code>n_replication</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of replications. Only for comparison. The default is 1.</dd>
+<dt><strong><code>post_snapshot</code></strong> :&ensp;<code>int</code></dt>
+<dd>Whether to plot the posterior in the sequential design. The default is
+<code>True</code>.</dd>
+<dt><strong><code>step_snapshot</code></strong> :&ensp;<code>int</code></dt>
+<dd>The number of steps to plot the posterior in the sequential design. The
+default is 1.</dd>
+<dt><strong><code>max_a_post</code></strong> :&ensp;<code>list</code> or <code>array</code></dt>
+<dd>Maximum a posteriori of the posterior distribution, if known. The
+default is <code>[]</code>.</dd>
+<dt><strong><code>adapt_verbose</code></strong> :&ensp;<code>bool</code></dt>
+<dd>Whether to plot the model response vs that of metamodel for the new
+trining point in the sequential design.</dd>
+</dl>
+<h2 id="note">Note</h2>
+<p>The following utiliy functions for the <strong>exploitation</strong> methods are
+supported:</p>
+<h4 id="bayesoptdesign-when-data-is-available">BayesOptDesign (when data is available)</h4>
+<ul>
+<li>DKL (Kullback-Leibler Divergence)</li>
+<li>DPP (D-Posterior-percision)</li>
+<li>APP (A-Posterior-percision)</li>
+</ul>
+<h4 id="varbasedoptdesign-when-data-is-not-available">VarBasedOptDesign -&gt; when data is not available</h4>
+<ul>
+<li>Entropy (Entropy/MMSE/active learning)</li>
+<li>EIGF (Expected Improvement for Global fit)</li>
+<li>LOOCV (Leave-one-out Cross Validation)</li>
+</ul>
+<h4 id="alphabetic">alphabetic</h4>
+<ul>
+<li>D-Opt (D-Optimality)</li>
+<li>A-Opt (A-Optimality)</li>
+<li>K-Opt (K-Optimality)</li>
+</ul></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">class ExpDesigns:
+    &#34;&#34;&#34;
+    This class generates samples from the prescribed marginals for the model
+    parameters using the `Input` object.
+
+    Attributes
+    ----------
+    Input : obj
+        Input object containing the parameter marginals, i.e. name,
+        distribution type and distribution parameters or available raw data.
+    method : str
+        Type of the experimental design. The default is `&#39;normal&#39;`. Other
+        option is `&#39;sequential&#39;`.
+    meta_Model : str
+        Type of the meta_model.
+    sampling_method : str
+        Name of the sampling method for the experimental design. The following
+        sampling method are supported:
+
+        * random
+        * latin_hypercube
+        * sobol
+        * halton
+        * hammersley
+        * korobov
+        * chebyshev(FT)
+        * grid(FT)
+        * nested_grid(FT)
+        * user
+    hdf5_file : str
+        Name of the hdf5 file that contains the experimental design.
+    n_new_samples : int
+        Number of (initial) training points.
+    n_max_samples : int
+        Number of maximum training points.
+    mod_LOO_threshold : float
+        The modified leave-one-out cross validation threshold where the
+        sequential design stops.
+    tradeoff_scheme : str
+        Trade-off scheme to assign weights to the exploration and exploitation
+        scores in the sequential design.
+    n_canddidate : int
+        Number of candidate training sets to calculate the scores for.
+    explore_method : str
+        Type of the exploration method for the sequential design. The following
+        methods are supported:
+
+        * Voronoi
+        * random
+        * latin_hypercube
+        * LOOCV
+        * dual annealing
+    exploit_method : str
+        Type of the exploitation method for the sequential design. The
+        following methods are supported:
+
+        * BayesOptDesign
+        * BayesActDesign
+        * VarOptDesign
+        * alphabetic
+        * Space-filling
+    util_func : str or list
+        The utility function to be specified for the `exploit_method`. For the
+        available utility functions see Note section.
+    n_cand_groups : int
+        Number of candidate groups. Each group of candidate training sets will
+        be evaulated separately in parallel.
+    n_replication : int
+        Number of replications. Only for comparison. The default is 1.
+    post_snapshot : int
+        Whether to plot the posterior in the sequential design. The default is
+        `True`.
+    step_snapshot : int
+        The number of steps to plot the posterior in the sequential design. The
+        default is 1.
+    max_a_post : list or array
+        Maximum a posteriori of the posterior distribution, if known. The
+        default is `[]`.
+    adapt_verbose : bool
+        Whether to plot the model response vs that of metamodel for the new
+        trining point in the sequential design.
+
+    Note
+    ----------
+    The following utiliy functions for the **exploitation** methods are
+    supported:
+
+    #### BayesOptDesign (when data is available)
+    - DKL (Kullback-Leibler Divergence)
+    - DPP (D-Posterior-percision)
+    - APP (A-Posterior-percision)
+
+    #### VarBasedOptDesign -&gt; when data is not available
+    - Entropy (Entropy/MMSE/active learning)
+    - EIGF (Expected Improvement for Global fit)
+    - LOOCV (Leave-one-out Cross Validation)
+
+    #### alphabetic
+    - D-Opt (D-Optimality)
+    - A-Opt (A-Optimality)
+    - K-Opt (K-Optimality)
+    &#34;&#34;&#34;
+
+    def __init__(self, Input, method=&#39;normal&#39;, meta_Model=&#39;pce&#39;,
+                 sampling_method=&#39;random&#39;, hdf5_file=None,
+                 n_new_samples=1, n_max_samples=None, mod_LOO_threshold=1e-16,
+                 tradeoff_scheme=None, n_canddidate=1, explore_method=&#39;random&#39;,
+                 exploit_method=&#39;Space-filling&#39;, util_func=&#39;Space-filling&#39;,
+                 n_cand_groups=4, n_replication=1, post_snapshot=False,
+                 step_snapshot=1, max_a_post=[], adapt_verbose=False):
+
+        self.InputObj = Input
+        self.method = method
+        self.meta_Model = meta_Model
+        self.sampling_method = sampling_method
+        self.hdf5_file = hdf5_file
+        self.n_new_samples = n_new_samples
+        self.n_max_samples = n_max_samples
+        self.mod_LOO_threshold = mod_LOO_threshold
+        self.explore_method = explore_method
+        self.exploit_method = exploit_method
+        self.util_func = util_func
+        self.tradeoff_scheme = tradeoff_scheme
+        self.n_canddidate = n_canddidate
+        self.n_cand_groups = n_cand_groups
+        self.n_replication = n_replication
+        self.post_snapshot = post_snapshot
+        self.step_snapshot = step_snapshot
+        self.max_a_post = max_a_post
+        self.adapt_verbose = adapt_verbose
+
+    # -------------------------------------------------------------------------
+    def generate_samples(self, n_samples, sampling_method=&#39;random&#39;,
+                         transform=False):
+        &#34;&#34;&#34;
+        Generates samples with given sampling method
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of requested samples.
+        sampling_method : str, optional
+            Sampling method. The default is `&#39;random&#39;`.
+        transform : bool, optional
+            Transformation via an isoprobabilistic transformation method. The
+            default is `False`.
+
+        Returns
+        -------
+        samples: array of shape (n_samples, n_params)
+            Generated samples from defined model input object.
+
+        &#34;&#34;&#34;
+        try:
+            samples = chaospy.generate_samples(
+                int(n_samples), domain=self.JDist, rule=sampling_method
+                )
+        except:
+            samples = self.JDist.resample(int(n_samples))
+
+        # Transform samples to the original space
+        if transform:
+            tr_samples = self.transform(samples.T)
+            return samples.T, tr_samples
+        else:
+            return samples.T
+
+    # -------------------------------------------------------------------------
+    def generate_ED(self, n_samples, sampling_method=&#39;random&#39;, transform=False,
+                    max_pce_deg=None):
+        &#34;&#34;&#34;
+        Generates experimental designs (training set) with the given method.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of requested training points.
+        sampling_method : str, optional
+            Sampling method. The default is `&#39;random&#39;`.
+        transform : bool, optional
+            Isoprobabilistic transformation. The default is `False`.
+        max_pce_deg : int, optional
+            Maximum PCE polynomial degree. The default is `None`.
+
+        Returns
+        -------
+        samples : array of shape (n_samples, n_params)
+            Selected training samples.
+
+        &#34;&#34;&#34;
+        Inputs = self.InputObj
+        self.ndim = len(Inputs.Marginals)
+        if not hasattr(self, &#39;n_init_samples&#39;):
+            self.n_init_samples = self.ndim + 1
+        n_samples = int(n_samples)
+
+        # Check if PCE or aPCE metamodel is selected.
+        if self.meta_Model.lower() == &#39;apce&#39;:
+            self.apce = True
+        else:
+            self.apce = False
+
+        # Check if input is given as dist or input_data.
+        if len(Inputs.Marginals[0].input_data):
+            self.input_data_given = True
+        else:
+            self.input_data_given = False
+
+        # Get the bounds if input_data are directly defined by user:
+        if self.input_data_given:
+            for i in range(self.ndim):
+                low_bound = np.min(Inputs.Marginals[i].input_data)
+                up_bound = np.max(Inputs.Marginals[i].input_data)
+                Inputs.Marginals[i].parameters = [low_bound, up_bound]
+
+        # Generate the samples based on requested method
+        self.raw_data, self.bound_tuples = self.init_param_space(max_pce_deg)
+
+        # Pass user-defined samples as ED
+        if sampling_method == &#39;user&#39;:
+            samples = self.X
+            self.n_samples = len(samples)
+
+        # Sample the distribution of parameters
+        elif self.input_data_given:
+            # Case II: Input values are directly given by the user.
+
+            if sampling_method == &#39;random&#39;:
+                samples = self.random_sampler(n_samples)
+
+            elif sampling_method == &#39;PCM&#39; or \
+                    sampling_method == &#39;LSCM&#39;:
+                samples = self.pcm_sampler(max_pce_deg)
+
+            else:
+                # Create ExpDesign in the actual space using chaospy
+                try:
+                    samples = chaospy.generate_samples(n_samples,
+                                                       domain=self.JDist,
+                                                       rule=sampling_method).T
+                except:
+                    samples = self.JDist.sample(n_samples)
+
+        elif not self.input_data_given:
+            # Case I = User passed known distributions
+            samples = chaospy.generate_samples(n_samples, domain=self.JDist,
+                                               rule=sampling_method).T
+
+        # Transform samples to the original space
+        if transform:
+            tr_samples = self.transform(samples)
+            return samples, tr_samples
+        else:
+            return samples
+
+    # -------------------------------------------------------------------------
+    def init_param_space(self, max_deg=None):
+        &#34;&#34;&#34;
+        Initializes parameter space.
+
+        Parameters
+        ----------
+        max_deg : int, optional
+            Maximum degree. The default is `None`.
+
+        Returns
+        -------
+        raw_data : array of shape (n_params, n_samples)
+            Raw data.
+        bound_tuples : list of tuples
+            A list containing lower and upper bounds of parameters.
+
+        &#34;&#34;&#34;
+        Inputs = self.InputObj
+        ndim = self.ndim
+        rosenblatt_flag = Inputs.Rosenblatt
+        mc_size = 50000
+
+        # Save parameter names
+        self.par_names = []
+        for parIdx in range(ndim):
+            self.par_names.append(Inputs.Marginals[parIdx].name)
+
+        # Create a multivariate probability distribution
+        if max_deg is not None:
+            JDist, poly_types = self.build_dist(rosenblatt=rosenblatt_flag)
+            self.JDist, self.poly_types = JDist, poly_types
+
+        if self.input_data_given:
+
+            self.MCSize = len(Inputs.Marginals[0].input_data)
+            self.raw_data = np.zeros((ndim, self.MCSize))
+
+            for parIdx in range(ndim):
+                # Save parameter names
+                try:
+                    self.raw_data[parIdx] = np.array(
+                        Inputs.Marginals[parIdx].input_data)
+                except:
+                    self.raw_data[parIdx] = self.JDist[parIdx].sample(mc_size)
+
+        else:
+            # Generate random samples based on parameter distributions
+            self.raw_data = chaospy.generate_samples(mc_size,
+                                                     domain=self.JDist)
+
+        # Create orthogonal polynomial coefficients if necessary
+        if self.apce and max_deg is not None and Inputs.poly_coeffs_flag:
+            self.polycoeffs = {}
+            for parIdx in tqdm(range(ndim), ascii=True,
+                               desc=&#34;Computing orth. polynomial coeffs&#34;):
+                poly_coeffs = apoly_construction(self.raw_data[parIdx],
+                                                 max_deg)
+                self.polycoeffs[f&#39;p_{parIdx+1}&#39;] = poly_coeffs
+
+        # Extract moments
+        for parIdx in range(ndim):
+            mu = np.mean(self.raw_data[parIdx])
+            std = np.std(self.raw_data[parIdx])
+            self.InputObj.Marginals[parIdx].moments = [mu, std]
+
+        # Generate the bounds based on given inputs for marginals
+        bound_tuples = []
+        for i in range(ndim):
+            if Inputs.Marginals[i].dist_type == &#39;unif&#39;:
+                low_bound, up_bound = Inputs.Marginals[i].parameters
+            else:
+                low_bound = np.min(self.raw_data[i])
+                up_bound = np.max(self.raw_data[i])
+
+            bound_tuples.append((low_bound, up_bound))
+
+        self.bound_tuples = tuple(bound_tuples)
+
+        return self.raw_data, self.bound_tuples
+
+    # -------------------------------------------------------------------------
+    def build_dist(self, rosenblatt):
+        &#34;&#34;&#34;
+        Creates the polynomial types to be passed to univ_basis_vals method of
+        the MetaModel object.
+
+        Parameters
+        ----------
+        rosenblatt : bool
+            Rosenblatt transformation flag.
+
+        Returns
+        -------
+        orig_space_dist : object
+            A chaospy JDist object or a gaussian_kde object.
+        poly_types : list
+            List of polynomial types for the parameters.
+
+        &#34;&#34;&#34;
+        Inputs = self.InputObj
+        all_data = []
+        all_dist_types = []
+        orig_joints = []
+        poly_types = []
+
+        for parIdx in range(self.ndim):
+
+            if Inputs.Marginals[parIdx].dist_type is None:
+                data = Inputs.Marginals[parIdx].input_data
+                all_data.append(data)
+                dist_type = None
+            else:
+                dist_type = Inputs.Marginals[parIdx].dist_type
+                params = Inputs.Marginals[parIdx].parameters
+
+            if rosenblatt:
+                polytype = &#39;hermite&#39;
+                dist = chaospy.Normal()
+
+            elif dist_type is None:
+                polytype = &#39;arbitrary&#39;
+                dist = None
+
+            elif &#39;unif&#39; in dist_type.lower():
+                polytype = &#39;legendre&#39;
+                dist = chaospy.Uniform(lower=params[0], upper=params[1])
+
+            elif &#39;norm&#39; in dist_type.lower() and \
+                 &#39;log&#39; not in dist_type.lower():
+                polytype = &#39;hermite&#39;
+                dist = chaospy.Normal(mu=params[0], sigma=params[1])
+
+            elif &#39;gamma&#39; in dist_type.lower():
+                polytype = &#39;laguerre&#39;
+                dist = chaospy.Gamma(shape=params[0],
+                                     scale=params[1],
+                                     shift=params[2])
+
+            elif &#39;beta&#39; in dist_type.lower():
+                polytype = &#39;jacobi&#39;
+                dist = chaospy.Beta(alpha=params[0], beta=params[1],
+                                    lower=params[2], upper=params[3])
+
+            elif &#39;lognorm&#39; in dist_type.lower():
+                polytype = &#39;hermite&#39;
+                Mu = np.log(params[0]**2/np.sqrt(params[0]**2 + params[1]**2))
+                Sigma = np.sqrt(np.log(1 + params[1]**2 / params[0]**2))
+                dist = chaospy.LogNormal(mu=Mu, sigma=Sigma)
+
+            elif &#39;expon&#39; in dist_type.lower():
+                polytype = &#39;arbitrary&#39;
+                dist = chaospy.Exponential(scale=params[0], shift=params[1])
+
+            elif &#39;weibull&#39; in dist_type.lower():
+                polytype = &#39;arbitrary&#39;
+                dist = chaospy.Weibull(shape=params[0], scale=params[1],
+                                       shift=params[2])
+
+            else:
+                message = (f&#34;DistType {dist_type} for parameter&#34;
+                           f&#34;{parIdx+1} is not available.&#34;)
+                raise ValueError(message)
+
+            if self.input_data_given or self.apce:
+                polytype = &#39;arbitrary&#39;
+
+            # Store dists and poly_types
+            orig_joints.append(dist)
+            poly_types.append(polytype)
+            all_dist_types.append(dist_type)
+
+        # Prepare final output to return
+        if None in all_dist_types:
+            # Naive approach: Fit a gaussian kernel to the provided data
+            Data = np.asarray(all_data)
+            orig_space_dist = st.gaussian_kde(Data)
+            self.prior_space = orig_space_dist
+        else:
+            orig_space_dist = chaospy.J(*orig_joints)
+            self.prior_space = st.gaussian_kde(orig_space_dist.sample(10000))
+
+        return orig_space_dist, poly_types
+
+    # -------------------------------------------------------------------------
+    def random_sampler(self, n_samples):
+        &#34;&#34;&#34;
+        Samples the given raw data randomly.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of requested samples.
+
+        Returns
+        -------
+        samples: array of shape (n_samples, n_params)
+            The sampling locations in the input space.
+
+        &#34;&#34;&#34;
+        samples = np.zeros((n_samples, self.ndim))
+
+        for idxPa in range(self.ndim):
+            # input_data given
+            sample_size = len(self.raw_data[idxPa])
+            randIdx = np.random.randint(0, sample_size, n_samples)
+            samples[:, idxPa] = self.raw_data[idxPa, randIdx]
+
+        return samples
+
+    # -------------------------------------------------------------------------
+    def pcm_sampler(self, max_deg):
+        &#34;&#34;&#34;
+        Generates collocation points based on the root of the polynomial
+        degrees.
+
+        Parameters
+        ----------
+        max_deg : int
+            Maximum degree defined by user.
+
+        Returns
+        -------
+        opt_col_points: array of shape (n_samples, n_params)
+            Collocation points.
+
+        &#34;&#34;&#34;
+
+        raw_data = self.raw_data
+
+        # Guess the closest degree to self.n_samples
+        def M_uptoMax(deg):
+            result = []
+            for d in range(1, deg+1):
+                result.append(math.factorial(self.ndim+d) //
+                              (math.factorial(self.ndim) * math.factorial(d)))
+            return np.array(result)
+
+        guess_Deg = np.where(M_uptoMax(max_deg) &gt; self.n_samples)[0][0]
+
+        c_points = np.zeros((guess_Deg+1, self.ndim))
+
+        def PolynomialPa(parIdx):
+            return apoly_construction(self.raw_data[parIdx], max_deg)
+
+        for i in range(self.ndim):
+            poly_coeffs = PolynomialPa(i)[guess_Deg+1][::-1]
+            c_points[:, i] = np.trim_zeros(np.roots(poly_coeffs))
+
+        #  Construction of optimal integration points
+        Prod = itertools.product(np.arange(1, guess_Deg+2), repeat=self.ndim)
+        sort_dig_unique_combos = np.array(list(filter(lambda x: x, Prod)))
+
+        # Ranking relatively mean
+        Temp = np.empty(shape=[0, guess_Deg+1])
+        for j in range(self.ndim):
+            s = abs(c_points[:, j]-np.mean(raw_data[j]))
+            Temp = np.append(Temp, [s], axis=0)
+        temp = Temp.T
+
+        index_CP = np.sort(temp, axis=0)
+        sort_cpoints = np.empty((0, guess_Deg+1))
+
+        for j in range(self.ndim):
+            sort_cp = c_points[index_CP[:, j], j]
+            sort_cpoints = np.vstack((sort_cpoints, sort_cp))
+
+        # Mapping of Combination to Cpoint Combination
+        sort_unique_combos = np.empty(shape=[0, self.ndim])
+        for i in range(len(sort_dig_unique_combos)):
+            sort_un_comb = []
+            for j in range(self.ndim):
+                SortUC = sort_cpoints[j, sort_dig_unique_combos[i, j]-1]
+                sort_un_comb.append(SortUC)
+                sort_uni_comb = np.asarray(sort_un_comb)
+            sort_unique_combos = np.vstack((sort_unique_combos, sort_uni_comb))
+
+        # Output the collocation points
+        if self.sampling_method.lower() == &#39;lscm&#39;:
+            opt_col_points = sort_unique_combos
+        else:
+            opt_col_points = sort_unique_combos[0:self.n_samples]
+
+        return opt_col_points
+
+    # -------------------------------------------------------------------------
+    def transform(self, X, params=None):
+        &#34;&#34;&#34;
+        Transform the samples via either a Rosenblatt or an isoprobabilistic
+        transformation.
+
+        Parameters
+        ----------
+        X : array of shape (n_samples,n_params)
+            Samples to be transformed.
+
+        Returns
+        -------
+        tr_X: array of shape (n_samples,n_params)
+            Transformed samples.
+
+        &#34;&#34;&#34;
+        if self.InputObj.Rosenblatt:
+            self.origJDist, _ = self.build_dist(False)
+            tr_X = self.origJDist.inv(self.JDist.fwd(X.T)).T
+        else:
+            # Transform samples via an isoprobabilistic transformation
+            n_samples, n_params = X.shape
+            Inputs = self.InputObj
+            origJDist = self.JDist
+            poly_types = self.poly_types
+
+            disttypes = []
+            for par_i in range(n_params):
+                disttypes.append(Inputs.Marginals[par_i].dist_type)
+
+            # Pass non-transformed X, if arbitrary PCE is selected.
+            if None in disttypes or self.input_data_given or self.apce:
+                return X
+
+            cdfx = np.zeros((X.shape))
+            tr_X = np.zeros((X.shape))
+
+            for par_i in range(n_params):
+
+                # Extract the parameters of the original space
+                disttype = disttypes[par_i]
+                if disttype is not None:
+                    dist = origJDist[par_i]
+                else:
+                    dist = None
+                polytype = poly_types[par_i]
+                cdf = np.vectorize(lambda x: dist.cdf(x))
+
+                # Extract the parameters of the transformation space based on
+                # polyType
+                if polytype == &#39;legendre&#39; or disttype == &#39;uniform&#39;:
+                    # Generate Y_Dists based
+                    params_Y = [-1, 1]
+                    dist_Y = st.uniform(loc=params_Y[0],
+                                        scale=params_Y[1]-params_Y[0])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                elif polytype == &#39;hermite&#39; or disttype == &#39;norm&#39;:
+                    params_Y = [0, 1]
+                    dist_Y = st.norm(loc=params_Y[0], scale=params_Y[1])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                elif polytype == &#39;laguerre&#39; or disttype == &#39;gamma&#39;:
+                    params_Y = [1, params[1]]
+                    dist_Y = st.gamma(loc=params_Y[0], scale=params_Y[1])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                # Compute CDF_x(X)
+                cdfx[:, par_i] = cdf(X[:, par_i])
+
+                # Compute invCDF_y(cdfx)
+                tr_X[:, par_i] = inv_cdf(cdfx[:, par_i])
+
+        return tr_X
+
+    # -------------------------------------------------------------------------
+    def fit_dist(self, y):
+        &#34;&#34;&#34;
+        Fits the known distributions to the data.
+
+        Parameters
+        ----------
+        y : array of shape (n_samples)
+            Data to be fitted.
+
+        Returns
+        -------
+        sel_dist: string
+            Selected distribution type from `lognorm`, `norm`, `uniform` or
+            `expon`.
+        params : list
+            Parameters corresponding to the selected distibution type.
+
+        &#34;&#34;&#34;
+        dist_results = []
+        params = {}
+        dist_names = [&#39;lognorm&#39;, &#39;norm&#39;, &#39;uniform&#39;, &#39;expon&#39;]
+        for dist_name in dist_names:
+            dist = getattr(st, dist_name)
+
+            try:
+                if dist_name != &#39;lognorm&#39;:
+                    param = dist.fit(y)
+                else:
+                    param = dist.fit(np.exp(y), floc=0)
+            except:
+                param = dist.fit(y)
+
+            params[dist_name] = param
+            # Applying the Kolmogorov-Smirnov test
+            D, p = st.kstest(y, dist_name, args=param)
+            dist_results.append((dist_name, D))
+
+        # select the best fitted distribution
+        sel_dist, D = (min(dist_results, key=lambda item: item[1]))
+
+        if sel_dist == &#39;uniform&#39;:
+            params[sel_dist] = [params[sel_dist][0], params[sel_dist][0] +
+                                params[sel_dist][1]]
+        if D &lt; 0.05:
+            return sel_dist, params[sel_dist]
+        else:
+            return None, None</code></pre>
+</details>
+<h3>Methods</h3>
+<dl>
+<dt id="exp_designs.ExpDesigns.generate_samples"><code class="name flex">
+<span>def <span class="ident">generate_samples</span></span>(<span>self, n_samples, sampling_method='random', transform=False)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Generates samples with given sampling method</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>n_samples</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of requested samples.</dd>
+<dt><strong><code>sampling_method</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>Sampling method. The default is <code>'random'</code>.</dd>
+<dt><strong><code>transform</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Transformation via an isoprobabilistic transformation method. The
+default is <code>False</code>.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
+<dd>Generated samples from defined model input object.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def generate_samples(self, n_samples, sampling_method=&#39;random&#39;,
+                     transform=False):
+    &#34;&#34;&#34;
+    Generates samples with given sampling method
+
+    Parameters
+    ----------
+    n_samples : int
+        Number of requested samples.
+    sampling_method : str, optional
+        Sampling method. The default is `&#39;random&#39;`.
+    transform : bool, optional
+        Transformation via an isoprobabilistic transformation method. The
+        default is `False`.
+
+    Returns
+    -------
+    samples: array of shape (n_samples, n_params)
+        Generated samples from defined model input object.
+
+    &#34;&#34;&#34;
+    try:
+        samples = chaospy.generate_samples(
+            int(n_samples), domain=self.JDist, rule=sampling_method
+            )
+    except:
+        samples = self.JDist.resample(int(n_samples))
+
+    # Transform samples to the original space
+    if transform:
+        tr_samples = self.transform(samples.T)
+        return samples.T, tr_samples
+    else:
+        return samples.T</code></pre>
+</details>
+</dd>
+<dt id="exp_designs.ExpDesigns.generate_ED"><code class="name flex">
+<span>def <span class="ident">generate_ED</span></span>(<span>self, n_samples, sampling_method='random', transform=False, max_pce_deg=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Generates experimental designs (training set) with the given method.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>n_samples</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of requested training points.</dd>
+<dt><strong><code>sampling_method</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>Sampling method. The default is <code>'random'</code>.</dd>
+<dt><strong><code>transform</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Isoprobabilistic transformation. The default is <code>False</code>.</dd>
+<dt><strong><code>max_pce_deg</code></strong> :&ensp;<code>int</code>, optional</dt>
+<dd>Maximum PCE polynomial degree. The default is <code>None</code>.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
+<dd>Selected training samples.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def generate_ED(self, n_samples, sampling_method=&#39;random&#39;, transform=False,
+                max_pce_deg=None):
+    &#34;&#34;&#34;
+    Generates experimental designs (training set) with the given method.
+
+    Parameters
+    ----------
+    n_samples : int
+        Number of requested training points.
+    sampling_method : str, optional
+        Sampling method. The default is `&#39;random&#39;`.
+    transform : bool, optional
+        Isoprobabilistic transformation. The default is `False`.
+    max_pce_deg : int, optional
+        Maximum PCE polynomial degree. The default is `None`.
+
+    Returns
+    -------
+    samples : array of shape (n_samples, n_params)
+        Selected training samples.
+
+    &#34;&#34;&#34;
+    Inputs = self.InputObj
+    self.ndim = len(Inputs.Marginals)
+    if not hasattr(self, &#39;n_init_samples&#39;):
+        self.n_init_samples = self.ndim + 1
+    n_samples = int(n_samples)
+
+    # Check if PCE or aPCE metamodel is selected.
+    if self.meta_Model.lower() == &#39;apce&#39;:
+        self.apce = True
+    else:
+        self.apce = False
+
+    # Check if input is given as dist or input_data.
+    if len(Inputs.Marginals[0].input_data):
+        self.input_data_given = True
+    else:
+        self.input_data_given = False
+
+    # Get the bounds if input_data are directly defined by user:
+    if self.input_data_given:
+        for i in range(self.ndim):
+            low_bound = np.min(Inputs.Marginals[i].input_data)
+            up_bound = np.max(Inputs.Marginals[i].input_data)
+            Inputs.Marginals[i].parameters = [low_bound, up_bound]
+
+    # Generate the samples based on requested method
+    self.raw_data, self.bound_tuples = self.init_param_space(max_pce_deg)
+
+    # Pass user-defined samples as ED
+    if sampling_method == &#39;user&#39;:
+        samples = self.X
+        self.n_samples = len(samples)
+
+    # Sample the distribution of parameters
+    elif self.input_data_given:
+        # Case II: Input values are directly given by the user.
+
+        if sampling_method == &#39;random&#39;:
+            samples = self.random_sampler(n_samples)
+
+        elif sampling_method == &#39;PCM&#39; or \
+                sampling_method == &#39;LSCM&#39;:
+            samples = self.pcm_sampler(max_pce_deg)
+
+        else:
+            # Create ExpDesign in the actual space using chaospy
+            try:
+                samples = chaospy.generate_samples(n_samples,
+                                                   domain=self.JDist,
+                                                   rule=sampling_method).T
+            except:
+                samples = self.JDist.sample(n_samples)
+
+    elif not self.input_data_given:
+        # Case I = User passed known distributions
+        samples = chaospy.generate_samples(n_samples, domain=self.JDist,
+                                           rule=sampling_method).T
+
+    # Transform samples to the original space
+    if transform:
+        tr_samples = self.transform(samples)
+        return samples, tr_samples
+    else:
+        return samples</code></pre>
+</details>
+</dd>
+<dt id="exp_designs.ExpDesigns.init_param_space"><code class="name flex">
+<span>def <span class="ident">init_param_space</span></span>(<span>self, max_deg=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Initializes parameter space.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>max_deg</code></strong> :&ensp;<code>int</code>, optional</dt>
+<dd>Maximum degree. The default is <code>None</code>.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>raw_data</code></strong> :&ensp;<code>array</code> of <code>shape (n_params, n_samples)</code></dt>
+<dd>Raw data.</dd>
+<dt><strong><code>bound_tuples</code></strong> :&ensp;<code>list</code> of <code>tuples</code></dt>
+<dd>A list containing lower and upper bounds of parameters.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def init_param_space(self, max_deg=None):
+    &#34;&#34;&#34;
+    Initializes parameter space.
+
+    Parameters
+    ----------
+    max_deg : int, optional
+        Maximum degree. The default is `None`.
+
+    Returns
+    -------
+    raw_data : array of shape (n_params, n_samples)
+        Raw data.
+    bound_tuples : list of tuples
+        A list containing lower and upper bounds of parameters.
+
+    &#34;&#34;&#34;
+    Inputs = self.InputObj
+    ndim = self.ndim
+    rosenblatt_flag = Inputs.Rosenblatt
+    mc_size = 50000
+
+    # Save parameter names
+    self.par_names = []
+    for parIdx in range(ndim):
+        self.par_names.append(Inputs.Marginals[parIdx].name)
+
+    # Create a multivariate probability distribution
+    if max_deg is not None:
+        JDist, poly_types = self.build_dist(rosenblatt=rosenblatt_flag)
+        self.JDist, self.poly_types = JDist, poly_types
+
+    if self.input_data_given:
+
+        self.MCSize = len(Inputs.Marginals[0].input_data)
+        self.raw_data = np.zeros((ndim, self.MCSize))
+
+        for parIdx in range(ndim):
+            # Save parameter names
+            try:
+                self.raw_data[parIdx] = np.array(
+                    Inputs.Marginals[parIdx].input_data)
+            except:
+                self.raw_data[parIdx] = self.JDist[parIdx].sample(mc_size)
+
+    else:
+        # Generate random samples based on parameter distributions
+        self.raw_data = chaospy.generate_samples(mc_size,
+                                                 domain=self.JDist)
+
+    # Create orthogonal polynomial coefficients if necessary
+    if self.apce and max_deg is not None and Inputs.poly_coeffs_flag:
+        self.polycoeffs = {}
+        for parIdx in tqdm(range(ndim), ascii=True,
+                           desc=&#34;Computing orth. polynomial coeffs&#34;):
+            poly_coeffs = apoly_construction(self.raw_data[parIdx],
+                                             max_deg)
+            self.polycoeffs[f&#39;p_{parIdx+1}&#39;] = poly_coeffs
+
+    # Extract moments
+    for parIdx in range(ndim):
+        mu = np.mean(self.raw_data[parIdx])
+        std = np.std(self.raw_data[parIdx])
+        self.InputObj.Marginals[parIdx].moments = [mu, std]
+
+    # Generate the bounds based on given inputs for marginals
+    bound_tuples = []
+    for i in range(ndim):
+        if Inputs.Marginals[i].dist_type == &#39;unif&#39;:
+            low_bound, up_bound = Inputs.Marginals[i].parameters
+        else:
+            low_bound = np.min(self.raw_data[i])
+            up_bound = np.max(self.raw_data[i])
+
+        bound_tuples.append((low_bound, up_bound))
+
+    self.bound_tuples = tuple(bound_tuples)
+
+    return self.raw_data, self.bound_tuples</code></pre>
+</details>
+</dd>
+<dt id="exp_designs.ExpDesigns.build_dist"><code class="name flex">
+<span>def <span class="ident">build_dist</span></span>(<span>self, rosenblatt)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Creates the polynomial types to be passed to univ_basis_vals method of
+the MetaModel object.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>rosenblatt</code></strong> :&ensp;<code>bool</code></dt>
+<dd>Rosenblatt transformation flag.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>orig_space_dist</code></strong> :&ensp;<code>object</code></dt>
+<dd>A chaospy JDist object or a gaussian_kde object.</dd>
+<dt><strong><code>poly_types</code></strong> :&ensp;<code>list</code></dt>
+<dd>List of polynomial types for the parameters.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def build_dist(self, rosenblatt):
+    &#34;&#34;&#34;
+    Creates the polynomial types to be passed to univ_basis_vals method of
+    the MetaModel object.
+
+    Parameters
+    ----------
+    rosenblatt : bool
+        Rosenblatt transformation flag.
+
+    Returns
+    -------
+    orig_space_dist : object
+        A chaospy JDist object or a gaussian_kde object.
+    poly_types : list
+        List of polynomial types for the parameters.
+
+    &#34;&#34;&#34;
+    Inputs = self.InputObj
+    all_data = []
+    all_dist_types = []
+    orig_joints = []
+    poly_types = []
+
+    for parIdx in range(self.ndim):
+
+        if Inputs.Marginals[parIdx].dist_type is None:
+            data = Inputs.Marginals[parIdx].input_data
+            all_data.append(data)
+            dist_type = None
+        else:
+            dist_type = Inputs.Marginals[parIdx].dist_type
+            params = Inputs.Marginals[parIdx].parameters
+
+        if rosenblatt:
+            polytype = &#39;hermite&#39;
+            dist = chaospy.Normal()
+
+        elif dist_type is None:
+            polytype = &#39;arbitrary&#39;
+            dist = None
+
+        elif &#39;unif&#39; in dist_type.lower():
+            polytype = &#39;legendre&#39;
+            dist = chaospy.Uniform(lower=params[0], upper=params[1])
+
+        elif &#39;norm&#39; in dist_type.lower() and \
+             &#39;log&#39; not in dist_type.lower():
+            polytype = &#39;hermite&#39;
+            dist = chaospy.Normal(mu=params[0], sigma=params[1])
+
+        elif &#39;gamma&#39; in dist_type.lower():
+            polytype = &#39;laguerre&#39;
+            dist = chaospy.Gamma(shape=params[0],
+                                 scale=params[1],
+                                 shift=params[2])
+
+        elif &#39;beta&#39; in dist_type.lower():
+            polytype = &#39;jacobi&#39;
+            dist = chaospy.Beta(alpha=params[0], beta=params[1],
+                                lower=params[2], upper=params[3])
+
+        elif &#39;lognorm&#39; in dist_type.lower():
+            polytype = &#39;hermite&#39;
+            Mu = np.log(params[0]**2/np.sqrt(params[0]**2 + params[1]**2))
+            Sigma = np.sqrt(np.log(1 + params[1]**2 / params[0]**2))
+            dist = chaospy.LogNormal(mu=Mu, sigma=Sigma)
+
+        elif &#39;expon&#39; in dist_type.lower():
+            polytype = &#39;arbitrary&#39;
+            dist = chaospy.Exponential(scale=params[0], shift=params[1])
+
+        elif &#39;weibull&#39; in dist_type.lower():
+            polytype = &#39;arbitrary&#39;
+            dist = chaospy.Weibull(shape=params[0], scale=params[1],
+                                   shift=params[2])
+
+        else:
+            message = (f&#34;DistType {dist_type} for parameter&#34;
+                       f&#34;{parIdx+1} is not available.&#34;)
+            raise ValueError(message)
+
+        if self.input_data_given or self.apce:
+            polytype = &#39;arbitrary&#39;
+
+        # Store dists and poly_types
+        orig_joints.append(dist)
+        poly_types.append(polytype)
+        all_dist_types.append(dist_type)
+
+    # Prepare final output to return
+    if None in all_dist_types:
+        # Naive approach: Fit a gaussian kernel to the provided data
+        Data = np.asarray(all_data)
+        orig_space_dist = st.gaussian_kde(Data)
+        self.prior_space = orig_space_dist
+    else:
+        orig_space_dist = chaospy.J(*orig_joints)
+        self.prior_space = st.gaussian_kde(orig_space_dist.sample(10000))
+
+    return orig_space_dist, poly_types</code></pre>
+</details>
+</dd>
+<dt id="exp_designs.ExpDesigns.random_sampler"><code class="name flex">
+<span>def <span class="ident">random_sampler</span></span>(<span>self, n_samples)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Samples the given raw data randomly.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>n_samples</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of requested samples.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
+<dd>The sampling locations in the input space.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def random_sampler(self, n_samples):
+    &#34;&#34;&#34;
+    Samples the given raw data randomly.
+
+    Parameters
+    ----------
+    n_samples : int
+        Number of requested samples.
+
+    Returns
+    -------
+    samples: array of shape (n_samples, n_params)
+        The sampling locations in the input space.
+
+    &#34;&#34;&#34;
+    samples = np.zeros((n_samples, self.ndim))
+
+    for idxPa in range(self.ndim):
+        # input_data given
+        sample_size = len(self.raw_data[idxPa])
+        randIdx = np.random.randint(0, sample_size, n_samples)
+        samples[:, idxPa] = self.raw_data[idxPa, randIdx]
+
+    return samples</code></pre>
+</details>
+</dd>
+<dt id="exp_designs.ExpDesigns.pcm_sampler"><code class="name flex">
+<span>def <span class="ident">pcm_sampler</span></span>(<span>self, max_deg)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Generates collocation points based on the root of the polynomial
+degrees.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>max_deg</code></strong> :&ensp;<code>int</code></dt>
+<dd>Maximum degree defined by user.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>opt_col_points</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
+<dd>Collocation points.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def pcm_sampler(self, max_deg):
+    &#34;&#34;&#34;
+    Generates collocation points based on the root of the polynomial
+    degrees.
+
+    Parameters
+    ----------
+    max_deg : int
+        Maximum degree defined by user.
+
+    Returns
+    -------
+    opt_col_points: array of shape (n_samples, n_params)
+        Collocation points.
+
+    &#34;&#34;&#34;
+
+    raw_data = self.raw_data
+
+    # Guess the closest degree to self.n_samples
+    def M_uptoMax(deg):
+        result = []
+        for d in range(1, deg+1):
+            result.append(math.factorial(self.ndim+d) //
+                          (math.factorial(self.ndim) * math.factorial(d)))
+        return np.array(result)
+
+    guess_Deg = np.where(M_uptoMax(max_deg) &gt; self.n_samples)[0][0]
+
+    c_points = np.zeros((guess_Deg+1, self.ndim))
+
+    def PolynomialPa(parIdx):
+        return apoly_construction(self.raw_data[parIdx], max_deg)
+
+    for i in range(self.ndim):
+        poly_coeffs = PolynomialPa(i)[guess_Deg+1][::-1]
+        c_points[:, i] = np.trim_zeros(np.roots(poly_coeffs))
+
+    #  Construction of optimal integration points
+    Prod = itertools.product(np.arange(1, guess_Deg+2), repeat=self.ndim)
+    sort_dig_unique_combos = np.array(list(filter(lambda x: x, Prod)))
+
+    # Ranking relatively mean
+    Temp = np.empty(shape=[0, guess_Deg+1])
+    for j in range(self.ndim):
+        s = abs(c_points[:, j]-np.mean(raw_data[j]))
+        Temp = np.append(Temp, [s], axis=0)
+    temp = Temp.T
+
+    index_CP = np.sort(temp, axis=0)
+    sort_cpoints = np.empty((0, guess_Deg+1))
+
+    for j in range(self.ndim):
+        sort_cp = c_points[index_CP[:, j], j]
+        sort_cpoints = np.vstack((sort_cpoints, sort_cp))
+
+    # Mapping of Combination to Cpoint Combination
+    sort_unique_combos = np.empty(shape=[0, self.ndim])
+    for i in range(len(sort_dig_unique_combos)):
+        sort_un_comb = []
+        for j in range(self.ndim):
+            SortUC = sort_cpoints[j, sort_dig_unique_combos[i, j]-1]
+            sort_un_comb.append(SortUC)
+            sort_uni_comb = np.asarray(sort_un_comb)
+        sort_unique_combos = np.vstack((sort_unique_combos, sort_uni_comb))
+
+    # Output the collocation points
+    if self.sampling_method.lower() == &#39;lscm&#39;:
+        opt_col_points = sort_unique_combos
+    else:
+        opt_col_points = sort_unique_combos[0:self.n_samples]
+
+    return opt_col_points</code></pre>
+</details>
+</dd>
+<dt id="exp_designs.ExpDesigns.transform"><code class="name flex">
+<span>def <span class="ident">transform</span></span>(<span>self, X, params=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Transform the samples via either a Rosenblatt or an isoprobabilistic
+transformation.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>X</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples,n_params)</code></dt>
+<dd>Samples to be transformed.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>tr_X</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples,n_params)</code></dt>
+<dd>Transformed samples.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def transform(self, X, params=None):
+    &#34;&#34;&#34;
+    Transform the samples via either a Rosenblatt or an isoprobabilistic
+    transformation.
+
+    Parameters
+    ----------
+    X : array of shape (n_samples,n_params)
+        Samples to be transformed.
+
+    Returns
+    -------
+    tr_X: array of shape (n_samples,n_params)
+        Transformed samples.
+
+    &#34;&#34;&#34;
+    if self.InputObj.Rosenblatt:
+        self.origJDist, _ = self.build_dist(False)
+        tr_X = self.origJDist.inv(self.JDist.fwd(X.T)).T
+    else:
+        # Transform samples via an isoprobabilistic transformation
+        n_samples, n_params = X.shape
+        Inputs = self.InputObj
+        origJDist = self.JDist
+        poly_types = self.poly_types
+
+        disttypes = []
+        for par_i in range(n_params):
+            disttypes.append(Inputs.Marginals[par_i].dist_type)
+
+        # Pass non-transformed X, if arbitrary PCE is selected.
+        if None in disttypes or self.input_data_given or self.apce:
+            return X
+
+        cdfx = np.zeros((X.shape))
+        tr_X = np.zeros((X.shape))
+
+        for par_i in range(n_params):
+
+            # Extract the parameters of the original space
+            disttype = disttypes[par_i]
+            if disttype is not None:
+                dist = origJDist[par_i]
+            else:
+                dist = None
+            polytype = poly_types[par_i]
+            cdf = np.vectorize(lambda x: dist.cdf(x))
+
+            # Extract the parameters of the transformation space based on
+            # polyType
+            if polytype == &#39;legendre&#39; or disttype == &#39;uniform&#39;:
+                # Generate Y_Dists based
+                params_Y = [-1, 1]
+                dist_Y = st.uniform(loc=params_Y[0],
+                                    scale=params_Y[1]-params_Y[0])
+                inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+            elif polytype == &#39;hermite&#39; or disttype == &#39;norm&#39;:
+                params_Y = [0, 1]
+                dist_Y = st.norm(loc=params_Y[0], scale=params_Y[1])
+                inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+            elif polytype == &#39;laguerre&#39; or disttype == &#39;gamma&#39;:
+                params_Y = [1, params[1]]
+                dist_Y = st.gamma(loc=params_Y[0], scale=params_Y[1])
+                inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+            # Compute CDF_x(X)
+            cdfx[:, par_i] = cdf(X[:, par_i])
+
+            # Compute invCDF_y(cdfx)
+            tr_X[:, par_i] = inv_cdf(cdfx[:, par_i])
+
+    return tr_X</code></pre>
+</details>
+</dd>
+<dt id="exp_designs.ExpDesigns.fit_dist"><code class="name flex">
+<span>def <span class="ident">fit_dist</span></span>(<span>self, y)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Fits the known distributions to the data.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>y</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples)</code></dt>
+<dd>Data to be fitted.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>sel_dist</code></strong> :&ensp;<code>string</code></dt>
+<dd>Selected distribution type from <code>lognorm</code>, <code>norm</code>, <code>uniform</code> or
+<code>expon</code>.</dd>
+<dt><strong><code>params</code></strong> :&ensp;<code>list</code></dt>
+<dd>Parameters corresponding to the selected distibution type.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def fit_dist(self, y):
+    &#34;&#34;&#34;
+    Fits the known distributions to the data.
+
+    Parameters
+    ----------
+    y : array of shape (n_samples)
+        Data to be fitted.
+
+    Returns
+    -------
+    sel_dist: string
+        Selected distribution type from `lognorm`, `norm`, `uniform` or
+        `expon`.
+    params : list
+        Parameters corresponding to the selected distibution type.
+
+    &#34;&#34;&#34;
+    dist_results = []
+    params = {}
+    dist_names = [&#39;lognorm&#39;, &#39;norm&#39;, &#39;uniform&#39;, &#39;expon&#39;]
+    for dist_name in dist_names:
+        dist = getattr(st, dist_name)
+
+        try:
+            if dist_name != &#39;lognorm&#39;:
+                param = dist.fit(y)
+            else:
+                param = dist.fit(np.exp(y), floc=0)
+        except:
+            param = dist.fit(y)
+
+        params[dist_name] = param
+        # Applying the Kolmogorov-Smirnov test
+        D, p = st.kstest(y, dist_name, args=param)
+        dist_results.append((dist_name, D))
+
+    # select the best fitted distribution
+    sel_dist, D = (min(dist_results, key=lambda item: item[1]))
+
+    if sel_dist == &#39;uniform&#39;:
+        params[sel_dist] = [params[sel_dist][0], params[sel_dist][0] +
+                            params[sel_dist][1]]
+    if D &lt; 0.05:
+        return sel_dist, params[sel_dist]
+    else:
+        return None, None</code></pre>
+</details>
+</dd>
+</dl>
+</dd>
+</dl>
+</section>
+</article>
+<nav id="sidebar">
+<h1>Index</h1>
+<div class="toc">
+<ul></ul>
+</div>
+<ul id="index">
+<li><h3><a href="#header-classes">Classes</a></h3>
+<ul>
+<li>
+<h4><code><a title="exp_designs.ExpDesigns" href="#exp_designs.ExpDesigns">ExpDesigns</a></code></h4>
+<ul class="two-column">
+<li><code><a title="exp_designs.ExpDesigns.generate_samples" href="#exp_designs.ExpDesigns.generate_samples">generate_samples</a></code></li>
+<li><code><a title="exp_designs.ExpDesigns.generate_ED" href="#exp_designs.ExpDesigns.generate_ED">generate_ED</a></code></li>
+<li><code><a title="exp_designs.ExpDesigns.init_param_space" href="#exp_designs.ExpDesigns.init_param_space">init_param_space</a></code></li>
+<li><code><a title="exp_designs.ExpDesigns.build_dist" href="#exp_designs.ExpDesigns.build_dist">build_dist</a></code></li>
+<li><code><a title="exp_designs.ExpDesigns.random_sampler" href="#exp_designs.ExpDesigns.random_sampler">random_sampler</a></code></li>
+<li><code><a title="exp_designs.ExpDesigns.pcm_sampler" href="#exp_designs.ExpDesigns.pcm_sampler">pcm_sampler</a></code></li>
+<li><code><a title="exp_designs.ExpDesigns.transform" href="#exp_designs.ExpDesigns.transform">transform</a></code></li>
+<li><code><a title="exp_designs.ExpDesigns.fit_dist" href="#exp_designs.ExpDesigns.fit_dist">fit_dist</a></code></li>
+</ul>
+</li>
+</ul>
+</li>
+</ul>
+</nav>
+</main>
+<footer id="footer">
+<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.10.0</a>.</p>
+</footer>
+</body>
+</html>
\ No newline at end of file
diff --git a/docs/html/exploration.html b/docs/html/exploration.html
new file mode 100644
index 0000000000000000000000000000000000000000..73fb78861b052750714019b14e11c06fd250022d
--- /dev/null
+++ b/docs/html/exploration.html
@@ -0,0 +1,1323 @@
+<!doctype html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
+<meta name="generator" content="pdoc 0.10.0" />
+<title>exploration API documentation</title>
+<meta name="description" content="" />
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
+<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
+<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
+<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
+<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
+<script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML" integrity="sha256-kZafAc6mZvK3W3v1pHOcUix30OHQN6pU/NO2oFkqZVw=" crossorigin></script>
+<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
+<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
+</head>
+<body>
+<main>
+<article id="content">
+<header>
+<h1 class="title">Module <code>exploration</code></h1>
+</header>
+<section id="section-intro">
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import numpy as np
+from scipy.spatial import distance
+
+# from .exp_designs import ExpDesigns
+
+
+class Exploration:
+    &#34;&#34;&#34;
+    Created based on the Surrogate Modeling Toolbox (SUMO) [1].
+
+    [1] Gorissen, D., Couckuyt, I., Demeester, P., Dhaene, T. and Crombecq, K.,
+        2010. A surrogate modeling and adaptive sampling toolbox for computer
+        based design. Journal of machine learning research.-Cambridge, Mass.,
+        11, pp.2051-2055. sumo@sumo.intec.ugent.be - http://sumo.intec.ugent.be
+
+    Attributes
+    ----------
+    MetaModel : obj
+        MetaModel object.
+    n_candidate : int
+        Number of candidate samples.
+    mc_criterion : str
+        Selection crieterion. The default is `&#39;mc-intersite-proj-th&#39;`. Another
+        option is `&#39;mc-intersite-proj&#39;`.
+    w : int
+        Number of random points in the domain for each sample of the
+        training set.
+    &#34;&#34;&#34;
+
+    def __init__(self, MetaModel, n_candidate,
+                 mc_criterion=&#39;mc-intersite-proj-th&#39;):
+        self.MetaModel = MetaModel
+        self.Marginals = []
+        self.n_candidate = n_candidate
+        self.mc_criterion = mc_criterion
+        self.w = 100
+
+    def get_exploration_samples(self):
+        &#34;&#34;&#34;
+        This function generates candidates to be selected as new design and
+        their associated exploration scores.
+
+        Returns
+        -------
+        all_candidates : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        explore_method = MetaModel.ExpDesign.explore_method
+
+        print(&#34;\n&#34;)
+        print(f&#39; The {explore_method}-Method is selected as the exploration&#39;
+              &#39;method.&#39;)
+        print(&#34;\n&#34;)
+
+        if explore_method == &#39;Voronoi&#39;:
+            # Generate samples using the Voronoi method
+            all_candidates, exploration_scores = self.get_vornoi_samples()
+        else:
+            # Generate samples using the MC method
+            all_candidates, exploration_scores = self.get_mc_samples()
+
+        return all_candidates, exploration_scores
+
+    # -------------------------------------------------------------------------
+    def get_vornoi_samples(self):
+        &#34;&#34;&#34;
+        This function generates samples based on voronoi cells and their
+        corresponding scores
+
+        Returns
+        -------
+        new_samples : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
+        &#34;&#34;&#34;
+
+        mc_criterion = self.mc_criterion
+        n_candidate = self.n_candidate
+        # Get the Old ExpDesign #samples
+        old_ED_X = self.MetaModel.ExpDesign.X
+        ndim = old_ED_X.shape[1]
+
+        # calculate error #averageErrors
+        error_voronoi, all_candidates = self.approximate_voronoi(
+            self.w, old_ED_X
+            )
+
+        # Pick the best candidate point in the voronoi cell
+        # for each best sample
+        selected_samples = np.empty((0, ndim))
+        bad_samples = []
+
+        for index in range(len(error_voronoi)):
+
+            # get candidate new samples from voronoi tesselation
+            candidates = self.closestPoints[index]
+
+            # get total number of candidates
+            n_new_samples = candidates.shape[0]
+
+            # still no candidate samples around this one, skip it!
+            if n_new_samples == 0:
+                print(&#39;The following sample has been skipped because there &#39;
+                      &#39;were no candidate samples around it...&#39;)
+                print(old_ED_X[index])
+                bad_samples.append(index)
+                continue
+
+            # find candidate that is farthest away from any existing sample
+            max_min_distance = 0
+            best_candidate = 0
+            min_intersite_dist = np.zeros((n_new_samples))
+            min_projected_dist = np.zeros((n_new_samples))
+
+            for j in range(n_new_samples):
+
+                new_samples = np.vstack((old_ED_X, selected_samples))
+
+                # find min distorted distance from all other samples
+                euclidean_dist = self._build_dist_matrix_point(
+                    new_samples, candidates[j], do_sqrt=True)
+                min_euclidean_dist = np.min(euclidean_dist)
+                min_intersite_dist[j] = min_euclidean_dist
+
+                # Check if this is the maximum minimum distance from all other
+                # samples
+                if min_euclidean_dist &gt;= max_min_distance:
+                    max_min_distance = min_euclidean_dist
+                    best_candidate = j
+
+                # Projected distance
+                projected_dist = distance.cdist(
+                    new_samples, [candidates[j]], &#39;chebyshev&#39;)
+                min_projected_dist[j] = np.min(projected_dist)
+
+            if mc_criterion == &#39;mc-intersite-proj&#39;:
+                weight_euclidean_dist = 0.5 * ((n_new_samples+1)**(1/ndim) - 1)
+                weight_projected_dist = 0.5 * (n_new_samples+1)
+                total_dist_scores = weight_euclidean_dist * min_intersite_dist
+                total_dist_scores += weight_projected_dist * min_projected_dist
+
+            elif mc_criterion == &#39;mc-intersite-proj-th&#39;:
+                alpha = 0.5  # chosen (tradeoff)
+                d_min = 2 * alpha / n_new_samples
+                if any(min_projected_dist &lt; d_min):
+                    candidates = np.delete(
+                        candidates, [min_projected_dist &lt; d_min], axis=0
+                        )
+                    total_dist_scores = np.delete(
+                        min_intersite_dist, [min_projected_dist &lt; d_min],
+                        axis=0
+                        )
+                else:
+                    total_dist_scores = min_intersite_dist
+            else:
+                raise NameError(
+                    &#39;The MC-Criterion you requested is not available.&#39;
+                    )
+
+            # Add the best candidate to the list of new samples
+            best_candidate = np.argsort(total_dist_scores)[::-1][:n_candidate]
+            selected_samples = np.vstack(
+                (selected_samples, candidates[best_candidate])
+                )
+
+        self.new_samples = selected_samples
+        self.exploration_scores = np.delete(error_voronoi, bad_samples, axis=0)
+
+        return self.new_samples, self.exploration_scores
+
+    # -------------------------------------------------------------------------
+    def get_mc_samples(self, all_candidates=None):
+        &#34;&#34;&#34;
+        This function generates random samples based on Global Monte Carlo
+        methods and their corresponding scores, based on [1].
+
+        [1] Crombecq, K., Laermans, E. and Dhaene, T., 2011. Efficient
+            space-filling and non-collapsing sequential design strategies for
+            simulation-based modeling. European Journal of Operational Research
+            , 214(3), pp.683-696.
+            DOI: https://doi.org/10.1016/j.ejor.2011.05.032
+
+        Implemented methods to compute scores:
+            1) mc-intersite-proj
+            2) mc-intersite-proj-th
+
+        Arguments
+        ---------
+        all_candidates : array, optional
+            Samples to compute the scores for. The default is `None`. In this
+            case, samples will be generated by defined model input marginals.
+
+        Returns
+        -------
+        new_samples : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        explore_method = MetaModel.ExpDesign.explore_method
+        mc_criterion = self.mc_criterion
+        if all_candidates is None:
+            n_candidate = self.n_candidate
+        else:
+            n_candidate = all_candidates.shape[0]
+
+        # Get the Old ExpDesign #samples
+        old_ED_X = MetaModel.ExpDesign.X
+        ndim = old_ED_X.shape[1]
+
+        # ----- Compute the number of random points -----
+        if all_candidates is None:
+            # Generate MC Samples
+            all_candidates = MetaModel.ExpDesign.generate_samples(
+                self.n_candidate, explore_method
+                )
+        self.all_candidates = all_candidates
+
+        # initialization
+        new_samples = np.empty((0, ndim))
+        min_intersite_dist = np.zeros((n_candidate))
+        min_projected_dist = np.zeros((n_candidate))
+
+        for i, candidate in enumerate(all_candidates):
+
+            # find candidate that is farthest away from any existing sample
+            maxMinDistance = 0
+
+            new_samples = np.vstack((old_ED_X, new_samples))
+            # find min distorted distance from all other samples
+            euclidean_dist = self._build_dist_matrix_point(
+                new_samples, candidate, do_sqrt=True
+                )
+            min_euclidean_dist = np.min(euclidean_dist)
+            min_intersite_dist[i] = min_euclidean_dist
+
+            # Check if this is the maximum minimum distance from all other
+            # samples
+            if min_euclidean_dist &gt;= maxMinDistance:
+                maxMinDistance = min_euclidean_dist
+
+            # Projected distance
+            projected_dist = distance.cdist(
+                new_samples, [candidate], &#39;chebyshev&#39;
+                )
+            min_projected_dist[i] = np.min(projected_dist)
+
+        if mc_criterion == &#39;mc-intersite-proj&#39;:
+            weight_euclidean_dist = ((n_candidate+1)**(1/ndim) - 1) * 0.5
+            weight_projected_dist = (n_candidate+1) * 0.5
+            total_dist_scores = weight_euclidean_dist * min_intersite_dist
+            total_dist_scores += weight_projected_dist * min_projected_dist
+
+        elif mc_criterion == &#39;mc-intersite-proj-th&#39;:
+            alpha = 0.5  # chosen (tradeoff)
+            d_min = 2 * alpha / n_candidate
+            if any(min_projected_dist &lt; d_min):
+                all_candidates = np.delete(
+                    all_candidates, [min_projected_dist &lt; d_min], axis=0
+                    )
+                total_dist_scores = np.delete(
+                    min_intersite_dist, [min_projected_dist &lt; d_min], axis=0
+                    )
+            else:
+                total_dist_scores = min_intersite_dist
+        else:
+            raise NameError(&#39;The MC-Criterion you requested is not available.&#39;)
+
+        self.new_samples = all_candidates
+        self.exploration_scores = total_dist_scores
+        self.exploration_scores /= np.nansum(total_dist_scores)
+
+        return self.new_samples, self.exploration_scores
+
+    # -------------------------------------------------------------------------
+    def approximate_voronoi(self, w, samples):
+        &#34;&#34;&#34;
+        An approximate (monte carlo) version of Matlab&#39;s voronoi command.
+
+        Arguments
+        ---------
+        samples : array
+            Old experimental design to be used as center points for voronoi
+            cells.
+
+        Returns
+        -------
+        areas : array
+            An approximation of the voronoi cells&#39; areas.
+        all_candidates: list of arrays
+            A list of samples in each voronoi cell.
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+
+        n_samples = samples.shape[0]
+        ndim = samples.shape[1]
+
+        # Compute the number of random points
+        n_points = w * samples.shape[0]
+        # Generate w random points in the domain for each sample
+        ExpDesign = ExpDesigns(MetaModel.Inputs)
+        points = ExpDesign.generate_samples(n_points, &#39;random&#39;)
+        self.all_candidates = points
+
+        # Calculate the nearest sample to each point
+        self.areas = np.zeros((n_samples))
+        self.closestPoints = [np.empty((0, ndim)) for i in range(n_samples)]
+
+        # Compute the minimum distance from all the samples of old_ED_X for
+        # each test point
+        for idx in range(n_points):
+            # calculate the minimum distance
+            distances = self._build_dist_matrix_point(
+                samples, points[idx], do_sqrt=True
+                )
+            closest_sample = np.argmin(distances)
+
+            # Add to the voronoi list of the closest sample
+            self.areas[closest_sample] = self.areas[closest_sample] + 1
+            prevclosestPoints = self.closestPoints[closest_sample]
+            self.closestPoints[closest_sample] = np.vstack(
+                (prevclosestPoints, points[idx])
+                )
+
+        # Divide by the amount of points to get the estimated volume of each
+        # voronoi cell
+        self.areas = self.areas / n_points
+
+        self.perc = np.max(self.areas * 100)
+
+        self.errors = self.areas
+
+        return self.areas, self.all_candidates
+
+    # -------------------------------------------------------------------------
+    def _build_dist_matrix_point(self, samples, point, do_sqrt=False):
+        &#34;&#34;&#34;
+        Calculates the intersite distance of all points in samples from point.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params)
+            The old experimental design.
+        point : array
+            A candidate point.
+        do_sqrt : bool, optional
+            Whether to return distances or squared distances. The default is
+            `False`.
+
+        Returns
+        -------
+        distances : array
+            Distances.
+
+        &#34;&#34;&#34;
+        distances = distance.cdist(samples, np.array([point]), &#39;euclidean&#39;)
+
+        # do square root?
+        if do_sqrt:
+            return distances
+        else:
+            return distances**2
+
+#if __name__ == &#34;__main__&#34;:
+#    import scipy.stats as stats
+#    import matplotlib.pyplot as plt
+#    import matplotlib as mpl
+#    import matplotlib.cm as cm
+#    plt.rc(&#39;font&#39;, family=&#39;sans-serif&#39;, serif=&#39;Arial&#39;)
+#    plt.rc(&#39;figure&#39;, figsize = (12, 8))
+#    
+#    def plotter(old_ED_X, all_candidates, exploration_scores):
+#        global Bounds
+#        
+#        from scipy.spatial import Voronoi, voronoi_plot_2d
+#        vor = Voronoi(old_ED_X)
+#        
+#        fig = voronoi_plot_2d(vor)
+#        
+#        # find min/max values for normalization
+##        minima = min(exploration_scores)
+##        maxima = max(exploration_scores)
+##        
+##        # normalize chosen colormap
+##        norm = mpl.colors.Normalize(vmin=minima, vmax=maxima, clip=True)
+##        mapper = cm.ScalarMappable(norm=norm, cmap=cm.Blues_r)
+##        
+##        for r in range(len(vor.point_region)):
+##            region = vor.regions[vor.point_region[r]]
+##            if not -1 in region:
+##                polygon = [vor.vertices[i] for i in region]
+##                plt.fill(*zip(*polygon), color=mapper.to_rgba(exploration_scores[r]))
+#        
+#        
+#        ax1 = fig.add_subplot(111)
+#        
+#        ax1.scatter(old_ED_X[:,0], old_ED_X[:,1], s=10, c=&#39;r&#39;, marker=&#34;s&#34;, label=&#39;Old Design Points&#39;)
+#        for i in range(old_ED_X.shape[0]):
+#            txt = &#39;p&#39;+str(i+1)
+#            ax1.annotate(txt, (old_ED_X[i,0],old_ED_X[i,1]))
+#            
+##        for i in range(NrofCandGroups):
+##            Candidates = all_candidates[&#39;group_&#39;+str(i+1)]
+##            ax1.scatter(Candidates[:,0],Candidates[:,1], s=10, c=&#39;b&#39;, marker=&#34;o&#34;, label=&#39;Design candidates&#39;)
+#        ax1.scatter(all_candidates[:,0],all_candidates[:,1], s=10, c=&#39;b&#39;, marker=&#34;o&#34;, label=&#39;Design candidates&#39;)
+#        
+#        ax1.set_xlim(Bounds[0][0], Bounds[0][1])
+#        ax1.set_ylim(Bounds[1][0], Bounds[1][1])
+#        
+#        plt.legend(loc=&#39;best&#39;);
+#        plt.show()
+#        
+#    def voronoi_volumes(points):
+#        from scipy.spatial import Voronoi, ConvexHull
+#        v = Voronoi(points)
+#        vol = np.zeros(v.npoints)
+#        
+#        for i, reg_num in enumerate(v.point_region):
+#            indices = v.regions[reg_num]
+#            if -1 in indices: # some regions can be opened
+#                vol[i] = np.inf
+#            else:
+#                
+#                #print(&#34;reg_num={0: 3.3f} X1={1: 3.3f} X2={2: 3.3f}&#34;.format(reg_num, v.points[reg_num-1, 0], v.points[reg_num-1, 1]))
+#                vol[i] = ConvexHull(v.vertices[indices]).volume
+#        
+#        print(&#39;-&#39;*40)
+#        for i in range(nrofSamples):
+#            print(&#34;idx={0:d} X1={1: 3.3f} X2={2: 3.3f} Volume={3: 3.3f}&#34;.format(i+1, v.points[i, 0], v.points[i, 1], vol[i]))
+#        
+#        return vol    
+#    
+#    NofPa = 2
+#    
+#    Bounds = ((-5,10), (0,15))
+#    
+#    nrofSamples = 10
+#    old_ED_X = np.zeros((nrofSamples, NofPa))
+#    for idx in range(NofPa):
+#        Loc = Bounds[idx][0]
+#        Scale = Bounds[idx][1] - Bounds[idx][0]
+#        old_ED_X[:,idx] = stats.uniform(loc=Loc, scale=Scale).rvs(size=nrofSamples)
+#    
+#    
+#    nNewCandidate = 40
+#    
+#    # New Function
+#    volumes = voronoi_volumes(old_ED_X)
+#    
+#    
+#    # SUMO
+#    Exploration = Exploration(Bounds, old_ED_X, nNewCandidate)
+#    
+#    #all_candidates, Score = Exploration.get_vornoi_samples()
+#    all_candidates, Score = Exploration.get_mc_samples()
+#    
+#    print(&#39;-&#39;*40)
+##    for i in range(nrofSamples):
+##        print(&#34;idx={0:d} X1={1: 3.3f} X2={2: 3.3f} Volume={3: 3.3f}&#34;.format(i+1, old_ED_X[i,0], old_ED_X[i,1], vornoi.areas[i]))
+#        
+#    plotter(old_ED_X, all_candidates, volumes)
+    </code></pre>
+</details>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+<h2 class="section-title" id="header-classes">Classes</h2>
+<dl>
+<dt id="exploration.Exploration"><code class="flex name class">
+<span>class <span class="ident">Exploration</span></span>
+<span>(</span><span>MetaModel, n_candidate, mc_criterion='mc-intersite-proj-th')</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Created based on the Surrogate Modeling Toolbox (SUMO) [1].</p>
+<p>[1] Gorissen, D., Couckuyt, I., Demeester, P., Dhaene, T. and Crombecq, K.,
+2010. A surrogate modeling and adaptive sampling toolbox for computer
+based design. Journal of machine learning research.-Cambridge, Mass.,
+11, pp.2051-2055. sumo@sumo.intec.ugent.be - <a href="http://sumo.intec.ugent.be">http://sumo.intec.ugent.be</a></p>
+<h2 id="attributes">Attributes</h2>
+<dl>
+<dt><strong><code>MetaModel</code></strong> :&ensp;<code>obj</code></dt>
+<dd>MetaModel object.</dd>
+<dt><strong><code>n_candidate</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of candidate samples.</dd>
+<dt><strong><code>mc_criterion</code></strong> :&ensp;<code>str</code></dt>
+<dd>Selection crieterion. The default is <code>'mc-intersite-proj-th'</code>. Another
+option is <code>'mc-intersite-proj'</code>.</dd>
+<dt><strong><code>w</code></strong> :&ensp;<code>int</code></dt>
+<dd>Number of random points in the domain for each sample of the
+training set.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">class Exploration:
+    &#34;&#34;&#34;
+    Created based on the Surrogate Modeling Toolbox (SUMO) [1].
+
+    [1] Gorissen, D., Couckuyt, I., Demeester, P., Dhaene, T. and Crombecq, K.,
+        2010. A surrogate modeling and adaptive sampling toolbox for computer
+        based design. Journal of machine learning research.-Cambridge, Mass.,
+        11, pp.2051-2055. sumo@sumo.intec.ugent.be - http://sumo.intec.ugent.be
+
+    Attributes
+    ----------
+    MetaModel : obj
+        MetaModel object.
+    n_candidate : int
+        Number of candidate samples.
+    mc_criterion : str
+        Selection crieterion. The default is `&#39;mc-intersite-proj-th&#39;`. Another
+        option is `&#39;mc-intersite-proj&#39;`.
+    w : int
+        Number of random points in the domain for each sample of the
+        training set.
+    &#34;&#34;&#34;
+
+    def __init__(self, MetaModel, n_candidate,
+                 mc_criterion=&#39;mc-intersite-proj-th&#39;):
+        self.MetaModel = MetaModel
+        self.Marginals = []
+        self.n_candidate = n_candidate
+        self.mc_criterion = mc_criterion
+        self.w = 100
+
+    def get_exploration_samples(self):
+        &#34;&#34;&#34;
+        This function generates candidates to be selected as new design and
+        their associated exploration scores.
+
+        Returns
+        -------
+        all_candidates : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        explore_method = MetaModel.ExpDesign.explore_method
+
+        print(&#34;\n&#34;)
+        print(f&#39; The {explore_method}-Method is selected as the exploration&#39;
+              &#39;method.&#39;)
+        print(&#34;\n&#34;)
+
+        if explore_method == &#39;Voronoi&#39;:
+            # Generate samples using the Voronoi method
+            all_candidates, exploration_scores = self.get_vornoi_samples()
+        else:
+            # Generate samples using the MC method
+            all_candidates, exploration_scores = self.get_mc_samples()
+
+        return all_candidates, exploration_scores
+
+    # -------------------------------------------------------------------------
+    def get_vornoi_samples(self):
+        &#34;&#34;&#34;
+        This function generates samples based on voronoi cells and their
+        corresponding scores
+
+        Returns
+        -------
+        new_samples : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
+        &#34;&#34;&#34;
+
+        mc_criterion = self.mc_criterion
+        n_candidate = self.n_candidate
+        # Get the Old ExpDesign #samples
+        old_ED_X = self.MetaModel.ExpDesign.X
+        ndim = old_ED_X.shape[1]
+
+        # calculate error #averageErrors
+        error_voronoi, all_candidates = self.approximate_voronoi(
+            self.w, old_ED_X
+            )
+
+        # Pick the best candidate point in the voronoi cell
+        # for each best sample
+        selected_samples = np.empty((0, ndim))
+        bad_samples = []
+
+        for index in range(len(error_voronoi)):
+
+            # get candidate new samples from voronoi tesselation
+            candidates = self.closestPoints[index]
+
+            # get total number of candidates
+            n_new_samples = candidates.shape[0]
+
+            # still no candidate samples around this one, skip it!
+            if n_new_samples == 0:
+                print(&#39;The following sample has been skipped because there &#39;
+                      &#39;were no candidate samples around it...&#39;)
+                print(old_ED_X[index])
+                bad_samples.append(index)
+                continue
+
+            # find candidate that is farthest away from any existing sample
+            max_min_distance = 0
+            best_candidate = 0
+            min_intersite_dist = np.zeros((n_new_samples))
+            min_projected_dist = np.zeros((n_new_samples))
+
+            for j in range(n_new_samples):
+
+                new_samples = np.vstack((old_ED_X, selected_samples))
+
+                # find min distorted distance from all other samples
+                euclidean_dist = self._build_dist_matrix_point(
+                    new_samples, candidates[j], do_sqrt=True)
+                min_euclidean_dist = np.min(euclidean_dist)
+                min_intersite_dist[j] = min_euclidean_dist
+
+                # Check if this is the maximum minimum distance from all other
+                # samples
+                if min_euclidean_dist &gt;= max_min_distance:
+                    max_min_distance = min_euclidean_dist
+                    best_candidate = j
+
+                # Projected distance
+                projected_dist = distance.cdist(
+                    new_samples, [candidates[j]], &#39;chebyshev&#39;)
+                min_projected_dist[j] = np.min(projected_dist)
+
+            if mc_criterion == &#39;mc-intersite-proj&#39;:
+                weight_euclidean_dist = 0.5 * ((n_new_samples+1)**(1/ndim) - 1)
+                weight_projected_dist = 0.5 * (n_new_samples+1)
+                total_dist_scores = weight_euclidean_dist * min_intersite_dist
+                total_dist_scores += weight_projected_dist * min_projected_dist
+
+            elif mc_criterion == &#39;mc-intersite-proj-th&#39;:
+                alpha = 0.5  # chosen (tradeoff)
+                d_min = 2 * alpha / n_new_samples
+                if any(min_projected_dist &lt; d_min):
+                    candidates = np.delete(
+                        candidates, [min_projected_dist &lt; d_min], axis=0
+                        )
+                    total_dist_scores = np.delete(
+                        min_intersite_dist, [min_projected_dist &lt; d_min],
+                        axis=0
+                        )
+                else:
+                    total_dist_scores = min_intersite_dist
+            else:
+                raise NameError(
+                    &#39;The MC-Criterion you requested is not available.&#39;
+                    )
+
+            # Add the best candidate to the list of new samples
+            best_candidate = np.argsort(total_dist_scores)[::-1][:n_candidate]
+            selected_samples = np.vstack(
+                (selected_samples, candidates[best_candidate])
+                )
+
+        self.new_samples = selected_samples
+        self.exploration_scores = np.delete(error_voronoi, bad_samples, axis=0)
+
+        return self.new_samples, self.exploration_scores
+
+    # -------------------------------------------------------------------------
+    def get_mc_samples(self, all_candidates=None):
+        &#34;&#34;&#34;
+        This function generates random samples based on Global Monte Carlo
+        methods and their corresponding scores, based on [1].
+
+        [1] Crombecq, K., Laermans, E. and Dhaene, T., 2011. Efficient
+            space-filling and non-collapsing sequential design strategies for
+            simulation-based modeling. European Journal of Operational Research
+            , 214(3), pp.683-696.
+            DOI: https://doi.org/10.1016/j.ejor.2011.05.032
+
+        Implemented methods to compute scores:
+            1) mc-intersite-proj
+            2) mc-intersite-proj-th
+
+        Arguments
+        ---------
+        all_candidates : array, optional
+            Samples to compute the scores for. The default is `None`. In this
+            case, samples will be generated by defined model input marginals.
+
+        Returns
+        -------
+        new_samples : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        explore_method = MetaModel.ExpDesign.explore_method
+        mc_criterion = self.mc_criterion
+        if all_candidates is None:
+            n_candidate = self.n_candidate
+        else:
+            n_candidate = all_candidates.shape[0]
+
+        # Get the Old ExpDesign #samples
+        old_ED_X = MetaModel.ExpDesign.X
+        ndim = old_ED_X.shape[1]
+
+        # ----- Compute the number of random points -----
+        if all_candidates is None:
+            # Generate MC Samples
+            all_candidates = MetaModel.ExpDesign.generate_samples(
+                self.n_candidate, explore_method
+                )
+        self.all_candidates = all_candidates
+
+        # initialization
+        new_samples = np.empty((0, ndim))
+        min_intersite_dist = np.zeros((n_candidate))
+        min_projected_dist = np.zeros((n_candidate))
+
+        for i, candidate in enumerate(all_candidates):
+
+            # find candidate that is farthest away from any existing sample
+            maxMinDistance = 0
+
+            new_samples = np.vstack((old_ED_X, new_samples))
+            # find min distorted distance from all other samples
+            euclidean_dist = self._build_dist_matrix_point(
+                new_samples, candidate, do_sqrt=True
+                )
+            min_euclidean_dist = np.min(euclidean_dist)
+            min_intersite_dist[i] = min_euclidean_dist
+
+            # Check if this is the maximum minimum distance from all other
+            # samples
+            if min_euclidean_dist &gt;= maxMinDistance:
+                maxMinDistance = min_euclidean_dist
+
+            # Projected distance
+            projected_dist = distance.cdist(
+                new_samples, [candidate], &#39;chebyshev&#39;
+                )
+            min_projected_dist[i] = np.min(projected_dist)
+
+        if mc_criterion == &#39;mc-intersite-proj&#39;:
+            weight_euclidean_dist = ((n_candidate+1)**(1/ndim) - 1) * 0.5
+            weight_projected_dist = (n_candidate+1) * 0.5
+            total_dist_scores = weight_euclidean_dist * min_intersite_dist
+            total_dist_scores += weight_projected_dist * min_projected_dist
+
+        elif mc_criterion == &#39;mc-intersite-proj-th&#39;:
+            alpha = 0.5  # chosen (tradeoff)
+            d_min = 2 * alpha / n_candidate
+            if any(min_projected_dist &lt; d_min):
+                all_candidates = np.delete(
+                    all_candidates, [min_projected_dist &lt; d_min], axis=0
+                    )
+                total_dist_scores = np.delete(
+                    min_intersite_dist, [min_projected_dist &lt; d_min], axis=0
+                    )
+            else:
+                total_dist_scores = min_intersite_dist
+        else:
+            raise NameError(&#39;The MC-Criterion you requested is not available.&#39;)
+
+        self.new_samples = all_candidates
+        self.exploration_scores = total_dist_scores
+        self.exploration_scores /= np.nansum(total_dist_scores)
+
+        return self.new_samples, self.exploration_scores
+
+    # -------------------------------------------------------------------------
+    def approximate_voronoi(self, w, samples):
+        &#34;&#34;&#34;
+        An approximate (monte carlo) version of Matlab&#39;s voronoi command.
+
+        Arguments
+        ---------
+        samples : array
+            Old experimental design to be used as center points for voronoi
+            cells.
+
+        Returns
+        -------
+        areas : array
+            An approximation of the voronoi cells&#39; areas.
+        all_candidates: list of arrays
+            A list of samples in each voronoi cell.
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+
+        n_samples = samples.shape[0]
+        ndim = samples.shape[1]
+
+        # Compute the number of random points
+        n_points = w * samples.shape[0]
+        # Generate w random points in the domain for each sample
+        ExpDesign = ExpDesigns(MetaModel.Inputs)
+        points = ExpDesign.generate_samples(n_points, &#39;random&#39;)
+        self.all_candidates = points
+
+        # Calculate the nearest sample to each point
+        self.areas = np.zeros((n_samples))
+        self.closestPoints = [np.empty((0, ndim)) for i in range(n_samples)]
+
+        # Compute the minimum distance from all the samples of old_ED_X for
+        # each test point
+        for idx in range(n_points):
+            # calculate the minimum distance
+            distances = self._build_dist_matrix_point(
+                samples, points[idx], do_sqrt=True
+                )
+            closest_sample = np.argmin(distances)
+
+            # Add to the voronoi list of the closest sample
+            self.areas[closest_sample] = self.areas[closest_sample] + 1
+            prevclosestPoints = self.closestPoints[closest_sample]
+            self.closestPoints[closest_sample] = np.vstack(
+                (prevclosestPoints, points[idx])
+                )
+
+        # Divide by the amount of points to get the estimated volume of each
+        # voronoi cell
+        self.areas = self.areas / n_points
+
+        self.perc = np.max(self.areas * 100)
+
+        self.errors = self.areas
+
+        return self.areas, self.all_candidates
+
+    # -------------------------------------------------------------------------
+    def _build_dist_matrix_point(self, samples, point, do_sqrt=False):
+        &#34;&#34;&#34;
+        Calculates the intersite distance of all points in samples from point.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params)
+            The old experimental design.
+        point : array
+            A candidate point.
+        do_sqrt : bool, optional
+            Whether to return distances or squared distances. The default is
+            `False`.
+
+        Returns
+        -------
+        distances : array
+            Distances.
+
+        &#34;&#34;&#34;
+        distances = distance.cdist(samples, np.array([point]), &#39;euclidean&#39;)
+
+        # do square root?
+        if do_sqrt:
+            return distances
+        else:
+            return distances**2</code></pre>
+</details>
+<h3>Methods</h3>
+<dl>
+<dt id="exploration.Exploration.get_exploration_samples"><code class="name flex">
+<span>def <span class="ident">get_exploration_samples</span></span>(<span>self)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>This function generates candidates to be selected as new design and
+their associated exploration scores.</p>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>all_candidates</code></strong> :&ensp;<code>array</code> of <code>shape (n_candidate, n_params)</code></dt>
+<dd>A list of samples.</dd>
+<dt><strong><code>exploration_scores</code></strong> :&ensp;<code>arrays</code> of <code>shape (n_candidate)</code></dt>
+<dd>Exploration scores.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def get_exploration_samples(self):
+    &#34;&#34;&#34;
+    This function generates candidates to be selected as new design and
+    their associated exploration scores.
+
+    Returns
+    -------
+    all_candidates : array of shape (n_candidate, n_params)
+        A list of samples.
+    exploration_scores: arrays of shape (n_candidate)
+        Exploration scores.
+    &#34;&#34;&#34;
+    MetaModel = self.MetaModel
+    explore_method = MetaModel.ExpDesign.explore_method
+
+    print(&#34;\n&#34;)
+    print(f&#39; The {explore_method}-Method is selected as the exploration&#39;
+          &#39;method.&#39;)
+    print(&#34;\n&#34;)
+
+    if explore_method == &#39;Voronoi&#39;:
+        # Generate samples using the Voronoi method
+        all_candidates, exploration_scores = self.get_vornoi_samples()
+    else:
+        # Generate samples using the MC method
+        all_candidates, exploration_scores = self.get_mc_samples()
+
+    return all_candidates, exploration_scores</code></pre>
+</details>
+</dd>
+<dt id="exploration.Exploration.get_vornoi_samples"><code class="name flex">
+<span>def <span class="ident">get_vornoi_samples</span></span>(<span>self)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>This function generates samples based on voronoi cells and their
+corresponding scores</p>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>new_samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_candidate, n_params)</code></dt>
+<dd>A list of samples.</dd>
+<dt><strong><code>exploration_scores</code></strong> :&ensp;<code>arrays</code> of <code>shape (n_candidate)</code></dt>
+<dd>Exploration scores.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def get_vornoi_samples(self):
+    &#34;&#34;&#34;
+    This function generates samples based on voronoi cells and their
+    corresponding scores
+
+    Returns
+    -------
+    new_samples : array of shape (n_candidate, n_params)
+        A list of samples.
+    exploration_scores: arrays of shape (n_candidate)
+        Exploration scores.
+    &#34;&#34;&#34;
+
+    mc_criterion = self.mc_criterion
+    n_candidate = self.n_candidate
+    # Get the Old ExpDesign #samples
+    old_ED_X = self.MetaModel.ExpDesign.X
+    ndim = old_ED_X.shape[1]
+
+    # calculate error #averageErrors
+    error_voronoi, all_candidates = self.approximate_voronoi(
+        self.w, old_ED_X
+        )
+
+    # Pick the best candidate point in the voronoi cell
+    # for each best sample
+    selected_samples = np.empty((0, ndim))
+    bad_samples = []
+
+    for index in range(len(error_voronoi)):
+
+        # get candidate new samples from voronoi tesselation
+        candidates = self.closestPoints[index]
+
+        # get total number of candidates
+        n_new_samples = candidates.shape[0]
+
+        # still no candidate samples around this one, skip it!
+        if n_new_samples == 0:
+            print(&#39;The following sample has been skipped because there &#39;
+                  &#39;were no candidate samples around it...&#39;)
+            print(old_ED_X[index])
+            bad_samples.append(index)
+            continue
+
+        # find candidate that is farthest away from any existing sample
+        max_min_distance = 0
+        best_candidate = 0
+        min_intersite_dist = np.zeros((n_new_samples))
+        min_projected_dist = np.zeros((n_new_samples))
+
+        for j in range(n_new_samples):
+
+            new_samples = np.vstack((old_ED_X, selected_samples))
+
+            # find min distorted distance from all other samples
+            euclidean_dist = self._build_dist_matrix_point(
+                new_samples, candidates[j], do_sqrt=True)
+            min_euclidean_dist = np.min(euclidean_dist)
+            min_intersite_dist[j] = min_euclidean_dist
+
+            # Check if this is the maximum minimum distance from all other
+            # samples
+            if min_euclidean_dist &gt;= max_min_distance:
+                max_min_distance = min_euclidean_dist
+                best_candidate = j
+
+            # Projected distance
+            projected_dist = distance.cdist(
+                new_samples, [candidates[j]], &#39;chebyshev&#39;)
+            min_projected_dist[j] = np.min(projected_dist)
+
+        if mc_criterion == &#39;mc-intersite-proj&#39;:
+            weight_euclidean_dist = 0.5 * ((n_new_samples+1)**(1/ndim) - 1)
+            weight_projected_dist = 0.5 * (n_new_samples+1)
+            total_dist_scores = weight_euclidean_dist * min_intersite_dist
+            total_dist_scores += weight_projected_dist * min_projected_dist
+
+        elif mc_criterion == &#39;mc-intersite-proj-th&#39;:
+            alpha = 0.5  # chosen (tradeoff)
+            d_min = 2 * alpha / n_new_samples
+            if any(min_projected_dist &lt; d_min):
+                candidates = np.delete(
+                    candidates, [min_projected_dist &lt; d_min], axis=0
+                    )
+                total_dist_scores = np.delete(
+                    min_intersite_dist, [min_projected_dist &lt; d_min],
+                    axis=0
+                    )
+            else:
+                total_dist_scores = min_intersite_dist
+        else:
+            raise NameError(
+                &#39;The MC-Criterion you requested is not available.&#39;
+                )
+
+        # Add the best candidate to the list of new samples
+        best_candidate = np.argsort(total_dist_scores)[::-1][:n_candidate]
+        selected_samples = np.vstack(
+            (selected_samples, candidates[best_candidate])
+            )
+
+    self.new_samples = selected_samples
+    self.exploration_scores = np.delete(error_voronoi, bad_samples, axis=0)
+
+    return self.new_samples, self.exploration_scores</code></pre>
+</details>
+</dd>
+<dt id="exploration.Exploration.get_mc_samples"><code class="name flex">
+<span>def <span class="ident">get_mc_samples</span></span>(<span>self, all_candidates=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>This function generates random samples based on Global Monte Carlo
+methods and their corresponding scores, based on [1].</p>
+<p>[1] Crombecq, K., Laermans, E. and Dhaene, T., 2011. Efficient
+space-filling and non-collapsing sequential design strategies for
+simulation-based modeling. European Journal of Operational Research
+, 214(3), pp.683-696.
+DOI: <a href="https://doi.org/10.1016/j.ejor.2011.05.032">https://doi.org/10.1016/j.ejor.2011.05.032</a></p>
+<p>Implemented methods to compute scores:
+1) mc-intersite-proj
+2) mc-intersite-proj-th</p>
+<h2 id="arguments">Arguments</h2>
+<dl>
+<dt><strong><code>all_candidates</code></strong> :&ensp;<code>array</code>, optional</dt>
+<dd>Samples to compute the scores for. The default is <code>None</code>. In this
+case, samples will be generated by defined model input marginals.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>new_samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_candidate, n_params)</code></dt>
+<dd>A list of samples.</dd>
+<dt><strong><code>exploration_scores</code></strong> :&ensp;<code>arrays</code> of <code>shape (n_candidate)</code></dt>
+<dd>Exploration scores.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def get_mc_samples(self, all_candidates=None):
+    &#34;&#34;&#34;
+    This function generates random samples based on Global Monte Carlo
+    methods and their corresponding scores, based on [1].
+
+    [1] Crombecq, K., Laermans, E. and Dhaene, T., 2011. Efficient
+        space-filling and non-collapsing sequential design strategies for
+        simulation-based modeling. European Journal of Operational Research
+        , 214(3), pp.683-696.
+        DOI: https://doi.org/10.1016/j.ejor.2011.05.032
+
+    Implemented methods to compute scores:
+        1) mc-intersite-proj
+        2) mc-intersite-proj-th
+
+    Arguments
+    ---------
+    all_candidates : array, optional
+        Samples to compute the scores for. The default is `None`. In this
+        case, samples will be generated by defined model input marginals.
+
+    Returns
+    -------
+    new_samples : array of shape (n_candidate, n_params)
+        A list of samples.
+    exploration_scores: arrays of shape (n_candidate)
+        Exploration scores.
+    &#34;&#34;&#34;
+    MetaModel = self.MetaModel
+    explore_method = MetaModel.ExpDesign.explore_method
+    mc_criterion = self.mc_criterion
+    if all_candidates is None:
+        n_candidate = self.n_candidate
+    else:
+        n_candidate = all_candidates.shape[0]
+
+    # Get the Old ExpDesign #samples
+    old_ED_X = MetaModel.ExpDesign.X
+    ndim = old_ED_X.shape[1]
+
+    # ----- Compute the number of random points -----
+    if all_candidates is None:
+        # Generate MC Samples
+        all_candidates = MetaModel.ExpDesign.generate_samples(
+            self.n_candidate, explore_method
+            )
+    self.all_candidates = all_candidates
+
+    # initialization
+    new_samples = np.empty((0, ndim))
+    min_intersite_dist = np.zeros((n_candidate))
+    min_projected_dist = np.zeros((n_candidate))
+
+    for i, candidate in enumerate(all_candidates):
+
+        # find candidate that is farthest away from any existing sample
+        maxMinDistance = 0
+
+        new_samples = np.vstack((old_ED_X, new_samples))
+        # find min distorted distance from all other samples
+        euclidean_dist = self._build_dist_matrix_point(
+            new_samples, candidate, do_sqrt=True
+            )
+        min_euclidean_dist = np.min(euclidean_dist)
+        min_intersite_dist[i] = min_euclidean_dist
+
+        # Check if this is the maximum minimum distance from all other
+        # samples
+        if min_euclidean_dist &gt;= maxMinDistance:
+            maxMinDistance = min_euclidean_dist
+
+        # Projected distance
+        projected_dist = distance.cdist(
+            new_samples, [candidate], &#39;chebyshev&#39;
+            )
+        min_projected_dist[i] = np.min(projected_dist)
+
+    if mc_criterion == &#39;mc-intersite-proj&#39;:
+        weight_euclidean_dist = ((n_candidate+1)**(1/ndim) - 1) * 0.5
+        weight_projected_dist = (n_candidate+1) * 0.5
+        total_dist_scores = weight_euclidean_dist * min_intersite_dist
+        total_dist_scores += weight_projected_dist * min_projected_dist
+
+    elif mc_criterion == &#39;mc-intersite-proj-th&#39;:
+        alpha = 0.5  # chosen (tradeoff)
+        d_min = 2 * alpha / n_candidate
+        if any(min_projected_dist &lt; d_min):
+            all_candidates = np.delete(
+                all_candidates, [min_projected_dist &lt; d_min], axis=0
+                )
+            total_dist_scores = np.delete(
+                min_intersite_dist, [min_projected_dist &lt; d_min], axis=0
+                )
+        else:
+            total_dist_scores = min_intersite_dist
+    else:
+        raise NameError(&#39;The MC-Criterion you requested is not available.&#39;)
+
+    self.new_samples = all_candidates
+    self.exploration_scores = total_dist_scores
+    self.exploration_scores /= np.nansum(total_dist_scores)
+
+    return self.new_samples, self.exploration_scores</code></pre>
+</details>
+</dd>
+<dt id="exploration.Exploration.approximate_voronoi"><code class="name flex">
+<span>def <span class="ident">approximate_voronoi</span></span>(<span>self, w, samples)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>An approximate (monte carlo) version of Matlab's voronoi command.</p>
+<h2 id="arguments">Arguments</h2>
+<dl>
+<dt><strong><code>samples</code></strong> :&ensp;<code>array</code></dt>
+<dd>Old experimental design to be used as center points for voronoi
+cells.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>areas</code></strong> :&ensp;<code>array</code></dt>
+<dd>An approximation of the voronoi cells' areas.</dd>
+<dt><strong><code>all_candidates</code></strong> :&ensp;<code>list</code> of <code>arrays</code></dt>
+<dd>A list of samples in each voronoi cell.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def approximate_voronoi(self, w, samples):
+    &#34;&#34;&#34;
+    An approximate (monte carlo) version of Matlab&#39;s voronoi command.
+
+    Arguments
+    ---------
+    samples : array
+        Old experimental design to be used as center points for voronoi
+        cells.
+
+    Returns
+    -------
+    areas : array
+        An approximation of the voronoi cells&#39; areas.
+    all_candidates: list of arrays
+        A list of samples in each voronoi cell.
+    &#34;&#34;&#34;
+    MetaModel = self.MetaModel
+
+    n_samples = samples.shape[0]
+    ndim = samples.shape[1]
+
+    # Compute the number of random points
+    n_points = w * samples.shape[0]
+    # Generate w random points in the domain for each sample
+    ExpDesign = ExpDesigns(MetaModel.Inputs)
+    points = ExpDesign.generate_samples(n_points, &#39;random&#39;)
+    self.all_candidates = points
+
+    # Calculate the nearest sample to each point
+    self.areas = np.zeros((n_samples))
+    self.closestPoints = [np.empty((0, ndim)) for i in range(n_samples)]
+
+    # Compute the minimum distance from all the samples of old_ED_X for
+    # each test point
+    for idx in range(n_points):
+        # calculate the minimum distance
+        distances = self._build_dist_matrix_point(
+            samples, points[idx], do_sqrt=True
+            )
+        closest_sample = np.argmin(distances)
+
+        # Add to the voronoi list of the closest sample
+        self.areas[closest_sample] = self.areas[closest_sample] + 1
+        prevclosestPoints = self.closestPoints[closest_sample]
+        self.closestPoints[closest_sample] = np.vstack(
+            (prevclosestPoints, points[idx])
+            )
+
+    # Divide by the amount of points to get the estimated volume of each
+    # voronoi cell
+    self.areas = self.areas / n_points
+
+    self.perc = np.max(self.areas * 100)
+
+    self.errors = self.areas
+
+    return self.areas, self.all_candidates</code></pre>
+</details>
+</dd>
+</dl>
+</dd>
+</dl>
+</section>
+</article>
+<nav id="sidebar">
+<h1>Index</h1>
+<div class="toc">
+<ul></ul>
+</div>
+<ul id="index">
+<li><h3><a href="#header-classes">Classes</a></h3>
+<ul>
+<li>
+<h4><code><a title="exploration.Exploration" href="#exploration.Exploration">Exploration</a></code></h4>
+<ul class="">
+<li><code><a title="exploration.Exploration.get_exploration_samples" href="#exploration.Exploration.get_exploration_samples">get_exploration_samples</a></code></li>
+<li><code><a title="exploration.Exploration.get_vornoi_samples" href="#exploration.Exploration.get_vornoi_samples">get_vornoi_samples</a></code></li>
+<li><code><a title="exploration.Exploration.get_mc_samples" href="#exploration.Exploration.get_mc_samples">get_mc_samples</a></code></li>
+<li><code><a title="exploration.Exploration.approximate_voronoi" href="#exploration.Exploration.approximate_voronoi">approximate_voronoi</a></code></li>
+</ul>
+</li>
+</ul>
+</li>
+</ul>
+</nav>
+</main>
+<footer id="footer">
+<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.10.0</a>.</p>
+</footer>
+</body>
+</html>
\ No newline at end of file
diff --git a/docs/build/html/inputs.html b/docs/html/inputs.html
similarity index 90%
rename from docs/build/html/inputs.html
rename to docs/html/inputs.html
index f97e314db0a107d73a0aeefa4280a649096d3248..e518ad65394a1acbebbac4ec35254a61ae13c753 100644
--- a/docs/build/html/inputs.html
+++ b/docs/html/inputs.html
@@ -12,6 +12,7 @@
 <style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
 <style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
 <style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
+<script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML" integrity="sha256-kZafAc6mZvK3W3v1pHOcUix30OHQN6pU/NO2oFkqZVw=" crossorigin></script>
 <script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
 <script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
 </head>
@@ -57,10 +58,10 @@ class Input:
     &gt;&gt;&gt; Inputs.Marginals[0].name = &#39;X_1&#39;
     &gt;&gt;&gt; Inputs.Marginals[0].input_data = input_data
     &#34;&#34;&#34;
+    poly_coeffs_flag = True
 
     def __init__(self):
         self.Marginals = []
-        self.__poly_coeffs_flag = True
         self.Rosenblatt = False
 
     def add_marginals(self):
@@ -84,14 +85,14 @@ class Marginal:
     Attributes
     ----------
     name : string
-        Name of the parameter. The default is &#39;$x_1$&#39;.
+        Name of the parameter. The default is `&#39;$x_1$&#39;`.
     dist_type : string
-        Name of the distribution. The default is None.
+        Name of the distribution. The default is `None`.
     parameters : list
         List of the parameters corresponding to the distribution type. The
-        default is None.
-    input_data : array_like
-        The available data corresponding to the parameters. The default is [].
+        default is `None`.
+    input_data : array
+        Available input data. The default is `[]`.
     moments : list
         List of the moments.
     &#34;&#34;&#34;
@@ -171,10 +172,10 @@ as following:</p>
     &gt;&gt;&gt; Inputs.Marginals[0].name = &#39;X_1&#39;
     &gt;&gt;&gt; Inputs.Marginals[0].input_data = input_data
     &#34;&#34;&#34;
+    poly_coeffs_flag = True
 
     def __init__(self):
         self.Marginals = []
-        self.__poly_coeffs_flag = True
         self.Rosenblatt = False
 
     def add_marginals(self):
@@ -188,6 +189,13 @@ as following:</p>
         &#34;&#34;&#34;
         self.Marginals.append(Marginal())</code></pre>
 </details>
+<h3>Class variables</h3>
+<dl>
+<dt id="inputs.Input.poly_coeffs_flag"><code class="name">var <span class="ident">poly_coeffs_flag</span></code></dt>
+<dd>
+<div class="desc"></div>
+</dd>
+</dl>
 <h3>Methods</h3>
 <dl>
 <dt id="inputs.Input.add_marginals"><code class="name flex">
@@ -224,14 +232,14 @@ parameter.</p>
 <h2 id="attributes">Attributes</h2>
 <dl>
 <dt><strong><code>name</code></strong> :&ensp;<code>string</code></dt>
-<dd>Name of the parameter. The default is '$x_1$'.</dd>
+<dd>Name of the parameter. The default is <code>'$x_1$'</code>.</dd>
 <dt><strong><code>dist_type</code></strong> :&ensp;<code>string</code></dt>
-<dd>Name of the distribution. The default is None.</dd>
+<dd>Name of the distribution. The default is <code>None</code>.</dd>
 <dt><strong><code>parameters</code></strong> :&ensp;<code>list</code></dt>
 <dd>List of the parameters corresponding to the distribution type. The
-default is None.</dd>
-<dt><strong><code>input_data</code></strong> :&ensp;<code>array_like</code></dt>
-<dd>The available data corresponding to the parameters. The default is [].</dd>
+default is <code>None</code>.</dd>
+<dt><strong><code>input_data</code></strong> :&ensp;<code>array</code></dt>
+<dd>Available input data. The default is <code>[]</code>.</dd>
 <dt><strong><code>moments</code></strong> :&ensp;<code>list</code></dt>
 <dd>List of the moments.</dd>
 </dl></div>
@@ -247,14 +255,14 @@ default is None.</dd>
     Attributes
     ----------
     name : string
-        Name of the parameter. The default is &#39;$x_1$&#39;.
+        Name of the parameter. The default is `&#39;$x_1$&#39;`.
     dist_type : string
-        Name of the distribution. The default is None.
+        Name of the distribution. The default is `None`.
     parameters : list
         List of the parameters corresponding to the distribution type. The
-        default is None.
-    input_data : array_like
-        The available data corresponding to the parameters. The default is [].
+        default is `None`.
+    input_data : array
+        Available input data. The default is `[]`.
     moments : list
         List of the moments.
     &#34;&#34;&#34;
@@ -282,6 +290,7 @@ default is None.</dd>
 <h4><code><a title="inputs.Input" href="#inputs.Input">Input</a></code></h4>
 <ul class="">
 <li><code><a title="inputs.Input.add_marginals" href="#inputs.Input.add_marginals">add_marginals</a></code></li>
+<li><code><a title="inputs.Input.poly_coeffs_flag" href="#inputs.Input.poly_coeffs_flag">poly_coeffs_flag</a></code></li>
 </ul>
 </li>
 <li>
diff --git a/docs/html/mcmc.html b/docs/html/mcmc.html
index 6f71c6c1614999e949660a8e357e49bbee5aad3e..9b025da0e76601eb20291b23eb2bf35af8ff558b 100644
--- a/docs/html/mcmc.html
+++ b/docs/html/mcmc.html
@@ -5,14 +5,14 @@
 <meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
 <meta name="generator" content="pdoc 0.10.0" />
 <title>mcmc API documentation</title>
-<meta name="description" content="MCMC class for Bayes inference with emcee package using an Affine Invariant
-Markov chain Monte Carlo (MCMC) Ensemble sampler [1] …" />
+<meta name="description" content="" />
 <link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
 <link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
 <link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
 <style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
 <style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
 <style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
+<script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML" integrity="sha256-kZafAc6mZvK3W3v1pHOcUix30OHQN6pU/NO2oFkqZVw=" crossorigin></script>
 <script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
 <script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
 </head>
@@ -23,45 +23,12 @@ Markov chain Monte Carlo (MCMC) Ensemble sampler [1] …" />
 <h1 class="title">Module <code>mcmc</code></h1>
 </header>
 <section id="section-intro">
-<p>MCMC class for Bayes inference with emcee package using an Affine Invariant
-Markov chain Monte Carlo (MCMC) Ensemble sampler [1].</p>
-<ol>
-<li>Foreman-Mackey, D., Hogg, D.W., Lang, D. and Goodman, J., 2013. emcee: the
-MCMC hammer. Publications of the Astronomical Society of the Pacific, 125(925),
-p.306. <a href="https://emcee.readthedocs.io/en/stable/">https://emcee.readthedocs.io/en/stable/</a></li>
-</ol>
-<p>Author: Farid Mohammadi, M.Sc.
-E-Mail: farid.mohammadi@iws.uni-stuttgart.de
-Department of Hydromechanics and Modelling of Hydrosystems (LH2)
-Institute for Modelling Hydraulic and Environmental Systems (IWS), University
-of Stuttgart, www.iws.uni-stuttgart.de/lh2/
-Pfaffenwaldring 61
-70569 Stuttgart</p>
-<p>Created on Wed Jun 3 2020</p>
 <details class="source">
 <summary>
 <span>Expand source code</span>
 </summary>
 <pre><code class="python">#!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-&#34;&#34;&#34;
-MCMC class for Bayes inference with emcee package using an Affine Invariant
-Markov chain Monte Carlo (MCMC) Ensemble sampler [1].
-
-1. Foreman-Mackey, D., Hogg, D.W., Lang, D. and Goodman, J., 2013. emcee: the
-MCMC hammer. Publications of the Astronomical Society of the Pacific, 125(925),
-p.306. https://emcee.readthedocs.io/en/stable/
-
-Author: Farid Mohammadi, M.Sc.
-E-Mail: farid.mohammadi@iws.uni-stuttgart.de
-Department of Hydromechanics and Modelling of Hydrosystems (LH2)
-Institute for Modelling Hydraulic and Environmental Systems (IWS), University
-of Stuttgart, www.iws.uni-stuttgart.de/lh2/
-Pfaffenwaldring 61
-70569 Stuttgart
-
-Created on Wed Jun 3 2020
-&#34;&#34;&#34;
 
 import os
 import numpy as np
@@ -79,7 +46,27 @@ os.environ[&#34;OMP_NUM_THREADS&#34;] = &#34;1&#34;
 
 class MCMC:
     &#34;&#34;&#34;
-    A class for bayesian inference using a Markov-Chain Monte-Carlo Sampler.
+    A class for bayesian inference via a Markov-Chain Monte-Carlo (MCMC)
+    Sampler to approximate the posterior distribution of the Bayes theorem:
+    $$p(\\theta|\\mathcal{y}) = \\frac{p(\\mathcal{y}|\\theta) p(\\theta)}
+                                         {p(\\mathcal{y})}.$$
+
+    This class make inference with emcee package [1] using an Affine Invariant
+    Ensemble sampler (AIES) [2].
+
+    [1] Foreman-Mackey, D., Hogg, D.W., Lang, D. and Goodman, J., 2013.emcee:
+        the MCMC hammer. Publications of the Astronomical Society of the
+        Pacific, 125(925), p.306. https://emcee.readthedocs.io/en/stable/
+
+    [2] Goodman, J. and Weare, J., 2010. Ensemble samplers with affine
+        invariance. Communications in applied mathematics and computational
+        science, 5(1), pp.65-80.
+
+
+    Attributes
+    ----------
+    BayesOpts : obj
+        Bayes object.
     &#34;&#34;&#34;
 
     def __init__(self, BayesOpts):
@@ -424,8 +411,8 @@ class MCMC:
     # -------------------------------------------------------------------------
     def log_prior(self, theta):
         &#34;&#34;&#34;
-        Calculates the log prior likelihood for the given parameter set(s)
-        theta.
+        Calculates the log prior likelihood \\( p(\\theta)\\) for the given
+        parameter set(s) \\( \\theta \\).
 
         Parameters
         ----------
@@ -489,8 +476,8 @@ class MCMC:
     # -------------------------------------------------------------------------
     def log_likelihood(self, theta):
         &#34;&#34;&#34;
-        Computes likelihood p(y|theta, obs) of the performance of the
-        (meta-)model in reproducing the observation data.
+        Computes likelihood \\( p(\\mathcal{Y}|\\theta)\\) of the performance
+        of the (meta-)model in reproducing the observation data.
 
         Parameters
         ----------
@@ -538,8 +525,8 @@ class MCMC:
     # -------------------------------------------------------------------------
     def log_posterior(self, theta):
         &#34;&#34;&#34;
-        Computes the posterior likelihood p(theta| obs) for the given
-        parameterset.
+        Computes the posterior likelihood \\(p(\\theta| \\mathcal{Y})\\) for
+        the given parameterset.
 
         Parameters
         ----------
@@ -696,11 +683,11 @@ class MCMC:
         The potential scale reduction factor (PSRF) defined by the variance
         within one chain, W, with the variance between chains B.
         Both variances are combined in a weighted sum to obtain an estimate of
-        the variance of a parameter θ.The square root of the ratio of this
-        estimates variance to the within chain variance is called the potential
-        scale reduction.
+        the variance of a parameter \\( \\theta \\).The square root of the
+        ratio of this estimates variance to the within chain variance is called
+        the potential scale reduction.
         For a well converged chain it should approach 1. Values greater than
-        typically 1.1 indicate that the chains have not yet fully converged.
+        1.1 typically indicate that the chains have not yet fully converged.
 
         Source: http://joergdietrich.github.io/emcee-convergence.html
 
@@ -709,7 +696,7 @@ class MCMC:
         Parameters
         ----------
         chain : array (n_walkers, n_steps, n_params)
-            DESCRIPTION.
+            The emcee ensamples.
 
         Returns
         -------
@@ -849,36 +836,6 @@ class MCMC:
         Iterative scheme as proposed in Meng and Wong (1996) to estimate the
         marginal likelihood
 
-        Parameters
-        ----------
-        N1 : TYPE
-            DESCRIPTION.
-        N2 : TYPE
-            DESCRIPTION.
-        q11 : TYPE
-            DESCRIPTION.
-        q12 : TYPE
-            DESCRIPTION.
-        q21 : TYPE
-            DESCRIPTION.
-        q22 : TYPE
-            DESCRIPTION.
-        r0 : TYPE
-            DESCRIPTION.
-        neff : TYPE
-            DESCRIPTION.
-        tol : TYPE
-            DESCRIPTION.
-        maxiter : TYPE
-            DESCRIPTION.
-        criterion : TYPE
-            DESCRIPTION.
-
-        Returns
-        -------
-        TYPE
-            DESCRIPTION.
-
         &#34;&#34;&#34;
         l1 = q11 - q12
         l2 = q21 - q22
@@ -1003,14 +960,51 @@ class MCMC:
 <span>(</span><span>BayesOpts)</span>
 </code></dt>
 <dd>
-<div class="desc"><p>A class for bayesian inference using a Markov-Chain Monte-Carlo Sampler.</p></div>
+<div class="desc"><p>A class for bayesian inference via a Markov-Chain Monte-Carlo (MCMC)
+Sampler to approximate the posterior distribution of the Bayes theorem:
+<span><span class="MathJax_Preview">p(\theta|\mathcal{y}) = \frac{p(\mathcal{y}|\theta) p(\theta)}
+{p(\mathcal{y})}.</span><script type="math/tex; mode=display">p(\theta|\mathcal{y}) = \frac{p(\mathcal{y}|\theta) p(\theta)}
+{p(\mathcal{y})}.</script></span></p>
+<p>This class make inference with emcee package [1] using an Affine Invariant
+Ensemble sampler (AIES) [2].</p>
+<p>[1] Foreman-Mackey, D., Hogg, D.W., Lang, D. and Goodman, J., 2013.emcee:
+the MCMC hammer. Publications of the Astronomical Society of the
+Pacific, 125(925), p.306. <a href="https://emcee.readthedocs.io/en/stable/">https://emcee.readthedocs.io/en/stable/</a></p>
+<p>[2] Goodman, J. and Weare, J., 2010. Ensemble samplers with affine
+invariance. Communications in applied mathematics and computational
+science, 5(1), pp.65-80.</p>
+<h2 id="attributes">Attributes</h2>
+<dl>
+<dt><strong><code>BayesOpts</code></strong> :&ensp;<code>obj</code></dt>
+<dd>Bayes object.</dd>
+</dl></div>
 <details class="source">
 <summary>
 <span>Expand source code</span>
 </summary>
 <pre><code class="python">class MCMC:
     &#34;&#34;&#34;
-    A class for bayesian inference using a Markov-Chain Monte-Carlo Sampler.
+    A class for bayesian inference via a Markov-Chain Monte-Carlo (MCMC)
+    Sampler to approximate the posterior distribution of the Bayes theorem:
+    $$p(\\theta|\\mathcal{y}) = \\frac{p(\\mathcal{y}|\\theta) p(\\theta)}
+                                         {p(\\mathcal{y})}.$$
+
+    This class make inference with emcee package [1] using an Affine Invariant
+    Ensemble sampler (AIES) [2].
+
+    [1] Foreman-Mackey, D., Hogg, D.W., Lang, D. and Goodman, J., 2013.emcee:
+        the MCMC hammer. Publications of the Astronomical Society of the
+        Pacific, 125(925), p.306. https://emcee.readthedocs.io/en/stable/
+
+    [2] Goodman, J. and Weare, J., 2010. Ensemble samplers with affine
+        invariance. Communications in applied mathematics and computational
+        science, 5(1), pp.65-80.
+
+
+    Attributes
+    ----------
+    BayesOpts : obj
+        Bayes object.
     &#34;&#34;&#34;
 
     def __init__(self, BayesOpts):
@@ -1355,8 +1349,8 @@ class MCMC:
     # -------------------------------------------------------------------------
     def log_prior(self, theta):
         &#34;&#34;&#34;
-        Calculates the log prior likelihood for the given parameter set(s)
-        theta.
+        Calculates the log prior likelihood \\( p(\\theta)\\) for the given
+        parameter set(s) \\( \\theta \\).
 
         Parameters
         ----------
@@ -1420,8 +1414,8 @@ class MCMC:
     # -------------------------------------------------------------------------
     def log_likelihood(self, theta):
         &#34;&#34;&#34;
-        Computes likelihood p(y|theta, obs) of the performance of the
-        (meta-)model in reproducing the observation data.
+        Computes likelihood \\( p(\\mathcal{Y}|\\theta)\\) of the performance
+        of the (meta-)model in reproducing the observation data.
 
         Parameters
         ----------
@@ -1469,8 +1463,8 @@ class MCMC:
     # -------------------------------------------------------------------------
     def log_posterior(self, theta):
         &#34;&#34;&#34;
-        Computes the posterior likelihood p(theta| obs) for the given
-        parameterset.
+        Computes the posterior likelihood \\(p(\\theta| \\mathcal{Y})\\) for
+        the given parameterset.
 
         Parameters
         ----------
@@ -1627,11 +1621,11 @@ class MCMC:
         The potential scale reduction factor (PSRF) defined by the variance
         within one chain, W, with the variance between chains B.
         Both variances are combined in a weighted sum to obtain an estimate of
-        the variance of a parameter θ.The square root of the ratio of this
-        estimates variance to the within chain variance is called the potential
-        scale reduction.
+        the variance of a parameter \\( \\theta \\).The square root of the
+        ratio of this estimates variance to the within chain variance is called
+        the potential scale reduction.
         For a well converged chain it should approach 1. Values greater than
-        typically 1.1 indicate that the chains have not yet fully converged.
+        1.1 typically indicate that the chains have not yet fully converged.
 
         Source: http://joergdietrich.github.io/emcee-convergence.html
 
@@ -1640,7 +1634,7 @@ class MCMC:
         Parameters
         ----------
         chain : array (n_walkers, n_steps, n_params)
-            DESCRIPTION.
+            The emcee ensamples.
 
         Returns
         -------
@@ -1780,36 +1774,6 @@ class MCMC:
         Iterative scheme as proposed in Meng and Wong (1996) to estimate the
         marginal likelihood
 
-        Parameters
-        ----------
-        N1 : TYPE
-            DESCRIPTION.
-        N2 : TYPE
-            DESCRIPTION.
-        q11 : TYPE
-            DESCRIPTION.
-        q12 : TYPE
-            DESCRIPTION.
-        q21 : TYPE
-            DESCRIPTION.
-        q22 : TYPE
-            DESCRIPTION.
-        r0 : TYPE
-            DESCRIPTION.
-        neff : TYPE
-            DESCRIPTION.
-        tol : TYPE
-            DESCRIPTION.
-        maxiter : TYPE
-            DESCRIPTION.
-        criterion : TYPE
-            DESCRIPTION.
-
-        Returns
-        -------
-        TYPE
-            DESCRIPTION.
-
         &#34;&#34;&#34;
         l1 = q11 - q12
         l2 = q21 - q22
@@ -2010,17 +1974,17 @@ class MCMC:
 <div class="desc"><p>The potential scale reduction factor (PSRF) defined by the variance
 within one chain, W, with the variance between chains B.
 Both variances are combined in a weighted sum to obtain an estimate of
-the variance of a parameter θ.The square root of the ratio of this
-estimates variance to the within chain variance is called the potential
-scale reduction.
+the variance of a parameter <span><span class="MathJax_Preview"> \theta </span><script type="math/tex"> \theta </script></span>.The square root of the
+ratio of this estimates variance to the within chain variance is called
+the potential scale reduction.
 For a well converged chain it should approach 1. Values greater than
-typically 1.1 indicate that the chains have not yet fully converged.</p>
+1.1 typically indicate that the chains have not yet fully converged.</p>
 <p>Source: <a href="http://joergdietrich.github.io/emcee-convergence.html">http://joergdietrich.github.io/emcee-convergence.html</a></p>
 <p><a href="https://github.com/jwalton3141/jwalton3141.github.io/blob/master/assets/posts/ESS/rwmh.py">https://github.com/jwalton3141/jwalton3141.github.io/blob/master/assets/posts/ESS/rwmh.py</a></p>
 <h2 id="parameters">Parameters</h2>
 <dl>
 <dt><strong><code>chain</code></strong> :&ensp;<code>array (n_walkers, n_steps, n_params)</code></dt>
-<dd>DESCRIPTION.</dd>
+<dd>The emcee ensamples.</dd>
 </dl>
 <h2 id="returns">Returns</h2>
 <dl>
@@ -2036,11 +2000,11 @@ typically 1.1 indicate that the chains have not yet fully converged.</p>
     The potential scale reduction factor (PSRF) defined by the variance
     within one chain, W, with the variance between chains B.
     Both variances are combined in a weighted sum to obtain an estimate of
-    the variance of a parameter θ.The square root of the ratio of this
-    estimates variance to the within chain variance is called the potential
-    scale reduction.
+    the variance of a parameter \\( \\theta \\).The square root of the
+    ratio of this estimates variance to the within chain variance is called
+    the potential scale reduction.
     For a well converged chain it should approach 1. Values greater than
-    typically 1.1 indicate that the chains have not yet fully converged.
+    1.1 typically indicate that the chains have not yet fully converged.
 
     Source: http://joergdietrich.github.io/emcee-convergence.html
 
@@ -2049,7 +2013,7 @@ typically 1.1 indicate that the chains have not yet fully converged.</p>
     Parameters
     ----------
     chain : array (n_walkers, n_steps, n_params)
-        DESCRIPTION.
+        The emcee ensamples.
 
     Returns
     -------
@@ -2085,8 +2049,8 @@ typically 1.1 indicate that the chains have not yet fully converged.</p>
 <span>def <span class="ident">log_likelihood</span></span>(<span>self, theta)</span>
 </code></dt>
 <dd>
-<div class="desc"><p>Computes likelihood p(y|theta, obs) of the performance of the
-(meta-)model in reproducing the observation data.</p>
+<div class="desc"><p>Computes likelihood <span><span class="MathJax_Preview"> p(\mathcal{Y}|\theta)</span><script type="math/tex"> p(\mathcal{Y}|\theta)</script></span> of the performance
+of the (meta-)model in reproducing the observation data.</p>
 <h2 id="parameters">Parameters</h2>
 <dl>
 <dt><strong><code>theta</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
@@ -2103,8 +2067,8 @@ typically 1.1 indicate that the chains have not yet fully converged.</p>
 </summary>
 <pre><code class="python">def log_likelihood(self, theta):
     &#34;&#34;&#34;
-    Computes likelihood p(y|theta, obs) of the performance of the
-    (meta-)model in reproducing the observation data.
+    Computes likelihood \\( p(\\mathcal{Y}|\\theta)\\) of the performance
+    of the (meta-)model in reproducing the observation data.
 
     Parameters
     ----------
@@ -2154,8 +2118,8 @@ typically 1.1 indicate that the chains have not yet fully converged.</p>
 <span>def <span class="ident">log_posterior</span></span>(<span>self, theta)</span>
 </code></dt>
 <dd>
-<div class="desc"><p>Computes the posterior likelihood p(theta| obs) for the given
-parameterset.</p>
+<div class="desc"><p>Computes the posterior likelihood <span><span class="MathJax_Preview">p(\theta| \mathcal{Y})</span><script type="math/tex">p(\theta| \mathcal{Y})</script></span> for
+the given parameterset.</p>
 <h2 id="parameters">Parameters</h2>
 <dl>
 <dt><strong><code>theta</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
@@ -2172,8 +2136,8 @@ parameterset.</p>
 </summary>
 <pre><code class="python">def log_posterior(self, theta):
     &#34;&#34;&#34;
-    Computes the posterior likelihood p(theta| obs) for the given
-    parameterset.
+    Computes the posterior likelihood \\(p(\\theta| \\mathcal{Y})\\) for
+    the given parameterset.
 
     Parameters
     ----------
@@ -2222,8 +2186,8 @@ parameterset.</p>
 <span>def <span class="ident">log_prior</span></span>(<span>self, theta)</span>
 </code></dt>
 <dd>
-<div class="desc"><p>Calculates the log prior likelihood for the given parameter set(s)
-theta.</p>
+<div class="desc"><p>Calculates the log prior likelihood <span><span class="MathJax_Preview"> p(\theta)</span><script type="math/tex"> p(\theta)</script></span> for the given
+parameter set(s) <span><span class="MathJax_Preview"> \theta </span><script type="math/tex"> \theta </script></span>.</p>
 <h2 id="parameters">Parameters</h2>
 <dl>
 <dt><strong><code>theta</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
@@ -2241,8 +2205,8 @@ returned otherwise an array.</dd>
 </summary>
 <pre><code class="python">def log_prior(self, theta):
     &#34;&#34;&#34;
-    Calculates the log prior likelihood for the given parameter set(s)
-    theta.
+    Calculates the log prior likelihood \\( p(\\theta)\\) for the given
+    parameter set(s) \\( \\theta \\).
 
     Parameters
     ----------
diff --git a/docs/html/post_processing.html b/docs/html/post_processing.html
new file mode 100644
index 0000000000000000000000000000000000000000..af165b3d4f984bbc1353f474ddc8abf600b237d9
--- /dev/null
+++ b/docs/html/post_processing.html
@@ -0,0 +1,3859 @@
+<!doctype html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
+<meta name="generator" content="pdoc 0.10.0" />
+<title>post_processing API documentation</title>
+<meta name="description" content="" />
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
+<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
+<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
+<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
+<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
+<script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML" integrity="sha256-kZafAc6mZvK3W3v1pHOcUix30OHQN6pU/NO2oFkqZVw=" crossorigin></script>
+<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
+<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
+</head>
+<body>
+<main>
+<article id="content">
+<header>
+<h1 class="title">Module <code>post_processing</code></h1>
+</header>
+<section id="section-intro">
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import numpy as np
+import math
+import os
+from itertools import combinations, cycle
+import pandas as pd
+import scipy.stats as stats
+from sklearn.linear_model import LinearRegression
+from sklearn.metrics import mean_squared_error, r2_score
+from statsmodels.graphics.gofplots import qqplot
+from matplotlib.backends.backend_pdf import PdfPages
+import matplotlib.pyplot as plt
+import matplotlib.ticker as ticker
+from matplotlib.offsetbox import AnchoredText
+from matplotlib.patches import Patch
+# Load the mplstyle
+plt.style.use(os.path.join(os.path.split(__file__)[0],
+                           &#39;../&#39;, &#39;bayesvalidrox.mplstyle&#39;))
+
+
+class PostProcessing:
+    &#34;&#34;&#34;
+    This class provides many helper functions to post-process the trained
+    meta-model.
+
+    Attributes
+    ----------
+    MetaModel : obj
+        MetaModel object to do postprocessing on.
+    name : str
+        Type of the anaylsis. The default is `&#39;calib&#39;`. If a validation is
+        expected to be performed change this to `&#39;valid&#39;`.
+    &#34;&#34;&#34;
+
+    def __init__(self, MetaModel, name=&#39;calib&#39;):
+        self.MetaModel = MetaModel
+        self.name = name
+
+    # -------------------------------------------------------------------------
+    def plot_moments(self, xlabel=&#39;Time [s]&#39;, plot_type=None, save_fig=True):
+        &#34;&#34;&#34;
+        Plots the moments in a pdf format in the directory
+        `Outputs_PostProcessing`.
+
+        Parameters
+        ----------
+        xlabel : str, optional
+            String to be displayed as x-label. The default is `&#39;Time [s]&#39;`.
+        plot_type : str, optional
+            Options: bar or line. The default is `None`.
+        save_fig : bool, optional
+            Save figure or not. The default is `True`.
+
+        Returns
+        -------
+        pce_means: dict
+            Mean of the model outputs.
+        pce_means: dict
+            Standard deviation of the model outputs.
+
+        &#34;&#34;&#34;
+
+        bar_plot = True if plot_type == &#39;bar&#39; else False
+        meta_model_type = self.MetaModel.meta_model_type
+        Model = self.MetaModel.ModelObj
+
+        # Read Monte-Carlo reference
+        self.mc_reference = Model.read_mc_reference()
+        print(self.mc_reference)
+
+        # Set the x values
+        x_values_orig = self.MetaModel.ExpDesign.x_values
+
+        # Compute the moments with the PCEModel object
+        self._compute_pce_moments()
+
+        # Get the variables
+        out_names = Model.Output.names
+
+        # Open a pdf for the plots
+        if save_fig:
+            newpath = (f&#39;Outputs_PostProcessing_{self.name}/&#39;)
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            # create a PdfPages object
+            pdf = PdfPages(f&#39;./{newpath}Mean_Std_PCE.pdf&#39;)
+
+        # Plot the best fit line, set the linewidth (lw), color and
+        # transparency (alpha) of the line
+        for idx, key in enumerate(out_names):
+            fig, ax = plt.subplots(nrows=1, ncols=2)
+
+            # Extract mean and std
+            mean_data = self.pce_means[key]
+            std_data = self.pce_stds[key]
+
+            # Extract a list of x values
+            if type(x_values_orig) is dict:
+                x = x_values_orig[key]
+            else:
+                x = x_values_orig
+
+            # Plot: bar plot or line plot
+            if bar_plot:
+                ax[0].bar(list(map(str, x)), mean_data, color=&#39;b&#39;,
+                          width=0.25)
+                ax[1].bar(list(map(str, x)), std_data, color=&#39;b&#39;,
+                          width=0.25)
+                ax[0].legend(labels=[meta_model_type])
+                ax[1].legend(labels=[meta_model_type])
+            else:
+                ax[0].plot(x, mean_data, lw=3, color=&#39;k&#39;, marker=&#39;x&#39;,
+                           label=meta_model_type)
+                ax[1].plot(x, std_data, lw=3, color=&#39;k&#39;, marker=&#39;x&#39;,
+                           label=meta_model_type)
+
+            if self.mc_reference is not None:
+                if bar_plot:
+                    ax[0].bar(list(map(str, x)), self.mc_reference[&#39;mean&#39;],
+                              color=&#39;r&#39;, width=0.25)
+                    ax[1].bar(list(map(str, x)), self.mc_reference[&#39;std&#39;],
+                              color=&#39;r&#39;, width=0.25)
+                    ax[0].legend(labels=[meta_model_type])
+                    ax[1].legend(labels=[meta_model_type])
+                else:
+                    ax[0].plot(x, self.mc_reference[&#39;mean&#39;], lw=3, marker=&#39;x&#39;,
+                               color=&#39;r&#39;, label=&#39;Ref.&#39;)
+                    ax[1].plot(x, self.mc_reference[&#39;std&#39;], lw=3, marker=&#39;x&#39;,
+                               color=&#39;r&#39;, label=&#39;Ref.&#39;)
+
+            # Label the axes and provide a title
+            ax[0].set_xlabel(xlabel)
+            ax[1].set_xlabel(xlabel)
+            ax[0].set_ylabel(key)
+            ax[1].set_ylabel(key)
+
+            # Provide a title
+            ax[0].set_title(&#39;Mean of &#39; + key)
+            ax[1].set_title(&#39;Std of &#39; + key)
+
+            if not bar_plot:
+                ax[0].legend(loc=&#39;best&#39;)
+                ax[1].legend(loc=&#39;best&#39;)
+
+            plt.tight_layout()
+
+            if save_fig:
+                # save the current figure
+                pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+
+                # Destroy the current plot
+                plt.clf()
+
+        pdf.close()
+
+        return self.pce_means, self.pce_stds
+
+    # -------------------------------------------------------------------------
+    def valid_metamodel(self, n_samples=1, samples=None, x_axis=&#39;Time [s]&#39;):
+        &#34;&#34;&#34;
+        Evaluates and plots the meta model and the PCEModel outputs for the
+        given number of samples or the given samples.
+
+        Parameters
+        ----------
+        n_samples : int, optional
+            Number of samples to be evaluated. The default is 1.
+        samples : array of shape (n_samples, n_params), optional
+            Samples to be evaluated. The default is None.
+        x_axis : str, optional
+            Label of x axis. The default is `&#39;Time [s]&#39;`.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+
+        if samples is None:
+            self.n_samples = n_samples
+            samples = self._get_sample()
+        else:
+            self.n_samples = samples.shape[0]
+
+        # Extract x_values
+        x_values = MetaModel.ExpDesign.x_values
+
+        self.model_out_dict = self._eval_model(samples, key_str=&#39;valid&#39;)
+        self.pce_out_mean, self.pce_out_std = MetaModel.eval_metamodel(samples)
+
+        try:
+            key = Model.Output.names[1]
+        except IndexError:
+            key = Model.Output.names[0]
+
+        n_obs = self.model_out_dict[key].shape[1]
+
+        if n_obs == 1:
+            self._plot_validation()
+        else:
+            self._plot_validation_multi(x_values=x_values, x_axis=x_axis)
+
+    # -------------------------------------------------------------------------
+    def check_accuracy(self, n_samples=None, samples=None, outputs=None):
+        &#34;&#34;&#34;
+        Checks accuracy of the metamodel by computing the root mean square
+        error and validation error for all outputs.
+
+        Parameters
+        ----------
+        n_samples : int, optional
+            Number of samples. The default is None.
+        samples : array of shape (n_samples, n_params), optional
+            Parameter sets to be checked. The default is None.
+        outputs : dict, optional
+            Output dictionary with model outputs for all given output types in
+            `Model.Output.names`. The default is None.
+
+        Raises
+        ------
+        Exception
+            When neither n_samples nor samples are provided.
+
+        Returns
+        -------
+        rmse: dict
+            Root mean squared error for each output.
+        valid_error : dict
+            Validation error for each output.
+
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+
+        # Set the number of samples
+        if n_samples:
+            self.n_samples = n_samples
+        elif samples is not None:
+            self.n_samples = samples.shape[0]
+        else:
+            raise Exception(&#34;Please provide either samples or pass number of &#34;
+                            &#34;samples!&#34;)
+
+        # Generate random samples if necessary
+        Samples = self._get_sample() if samples is None else samples
+
+        # Run the original model with the generated samples
+        if outputs is None:
+            outputs = self._eval_model(Samples, key_str=&#39;validSet&#39;)
+
+        # Run the PCE model with the generated samples
+        pce_outputs, _ = MetaModel.eval_metamodel(samples=Samples)
+
+        self.rmse = {}
+        self.valid_error = {}
+        # Loop over the keys and compute RMSE error.
+        for key in Model.Output.names:
+            # Root mena square
+            self.rmse[key] = mean_squared_error(outputs[key], pce_outputs[key],
+                                                squared=False,
+                                                multioutput=&#39;raw_values&#39;)
+            # Validation error
+            self.valid_error[key] = (self.rmse[key]**2 / self.n_samples) / \
+                np.var(outputs[key], ddof=1, axis=0)
+
+            # Print a report table
+            print(&#34;\n&gt;&gt;&gt;&gt;&gt; Errors of {} &lt;&lt;&lt;&lt;&lt;&#34;.format(key))
+            print(&#34;\nIndex  |  RMSE   |  Validation Error&#34;)
+            print(&#39;-&#39;*35)
+            print(&#39;\n&#39;.join(f&#39;{i+1}  |  {k:.3e}  |  {j:.3e}&#39; for i, (k, j)
+                            in enumerate(zip(self.rmse[key],
+                                             self.valid_error[key]))))
+        # Save error dicts in PCEModel object
+        self.MetaModel.rmse = self.rmse
+        self.MetaModel.valid_error = self.valid_error
+
+        return self.rmse, self.valid_error
+
+    # -------------------------------------------------------------------------
+    def plot_seq_design_diagnostics(self, ref_BME_KLD=None, save_fig=True):
+        &#34;&#34;&#34;
+        Plots the Bayesian Model Evidence (BME) and Kullback-Leibler divergence
+        (KLD) for the sequential design.
+
+        Parameters
+        ----------
+        ref_BME_KLD : array, optional
+            Reference BME and KLD . The default is `None`.
+        save_fig : bool, optional
+            Whether to save the figures. The default is `True`.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        PCEModel = self.MetaModel
+        n_init_samples = PCEModel.ExpDesign.n_init_samples
+        n_total_samples = PCEModel.ExpDesign.X.shape[0]
+
+        if save_fig:
+            newpath = f&#39;Outputs_PostProcessing_{self.name}/&#39;
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            # create a PdfPages object
+            pdf = PdfPages(f&#39;./{newpath}/seqPCEModelDiagnostics.pdf&#39;)
+
+        plotList = [&#39;Modified LOO error&#39;, &#39;Validation error&#39;, &#39;KLD&#39;, &#39;BME&#39;,
+                    &#39;RMSEMean&#39;, &#39;RMSEStd&#39;, &#39;Hellinger distance&#39;]
+        seqList = [PCEModel.SeqModifiedLOO, PCEModel.seqValidError,
+                   PCEModel.SeqKLD, PCEModel.SeqBME, PCEModel.seqRMSEMean,
+                   PCEModel.seqRMSEStd, PCEModel.SeqDistHellinger]
+
+        markers = (&#39;x&#39;, &#39;o&#39;, &#39;d&#39;, &#39;*&#39;, &#39;+&#39;)
+        colors = (&#39;k&#39;, &#39;darkgreen&#39;, &#39;b&#39;, &#39;navy&#39;, &#39;darkred&#39;)
+
+        # Plot the evolution of the diagnostic criteria of the
+        # Sequential Experimental Design.
+        for plotidx, plot in enumerate(plotList):
+            fig, ax = plt.subplots()
+            seq_dict = seqList[plotidx]
+            name_util = list(seq_dict.keys())
+
+            if len(name_util) == 0:
+                continue
+
+            # Box plot when Replications have been detected.
+            if any(int(name.split(&#34;rep_&#34;, 1)[1]) &gt; 1 for name in name_util):
+                # Extract the values from dict
+                sorted_seq_opt = {}
+                # Number of replications
+                n_reps = PCEModel.ExpDesign.nReprications
+
+                # Get the list of utility function names
+                # Handle if only one UtilityFunction is provided
+                if not isinstance(PCEModel.ExpDesign.UtilityFunction, list):
+                    util_funcs = [PCEModel.ExpDesign.UtilityFunction]
+                else:
+                    util_funcs = PCEModel.ExpDesign.UtilityFunction
+
+                for util in util_funcs:
+                    sortedSeq = {}
+                    # min number of runs available from reps
+                    n_runs = min([seq_dict[f&#39;{util}_rep_{i+1}&#39;].shape[0]
+                                 for i in range(n_reps)])
+
+                    for runIdx in range(n_runs):
+                        values = []
+                        for key in seq_dict.keys():
+                            if util in key:
+                                values.append(seq_dict[key][runIdx].mean())
+                        sortedSeq[&#39;SeqItr_&#39;+str(runIdx)] = np.array(values)
+                    sorted_seq_opt[util] = sortedSeq
+
+                # BoxPlot
+                def draw_plot(data, labels, edge_color, fill_color, idx):
+                    pos = labels - (idx-1)
+                    bp = plt.boxplot(data, positions=pos, labels=labels,
+                                     patch_artist=True, sym=&#39;&#39;, widths=0.75)
+                    elements = [&#39;boxes&#39;, &#39;whiskers&#39;, &#39;fliers&#39;, &#39;means&#39;,
+                                &#39;medians&#39;, &#39;caps&#39;]
+                    for element in elements:
+                        plt.setp(bp[element], color=edge_color[idx])
+
+                    for patch in bp[&#39;boxes&#39;]:
+                        patch.set(facecolor=fill_color[idx])
+
+                if PCEModel.ExpDesign.n_new_samples != 1:
+                    step1 = PCEModel.ExpDesign.n_new_samples
+                    step2 = 1
+                else:
+                    step1 = 5
+                    step2 = 5
+                edge_color = [&#39;red&#39;, &#39;blue&#39;, &#39;green&#39;]
+                fill_color = [&#39;tan&#39;, &#39;cyan&#39;, &#39;lightgreen&#39;]
+                plot_label = plot
+                # Plot for different Utility Functions
+                for idx, util in enumerate(util_funcs):
+                    all_errors = np.empty((n_reps, 0))
+
+                    for key in list(sorted_seq_opt[util].keys()):
+                        errors = sorted_seq_opt.get(util, {}).get(key)[:, None]
+                        all_errors = np.hstack((all_errors, errors))
+
+                    # Special cases for BME and KLD
+                    if plot == &#39;KLD&#39; or plot == &#39;BME&#39;:
+                        # BME convergence if refBME is provided
+                        if ref_BME_KLD is not None:
+                            if plot == &#39;BME&#39;:
+                                refValue = ref_BME_KLD[0]
+                                plot_label = r&#39;$BME/BME^{Ref.}$&#39;
+                            if plot == &#39;KLD&#39;:
+                                refValue = ref_BME_KLD[1]
+                                plot_label = &#39;$D_{KL}[p(\theta|y_*),p(\theta)]&#39;\
+                                    &#39; / D_{KL}^{Ref.}[p(\theta|y_*), &#39;\
+                                    &#39;p(\theta)]$&#39;
+
+                            # Difference between BME/KLD and the ref. values
+                            all_errors = np.divide(all_errors,
+                                                   np.full((all_errors.shape),
+                                                           refValue))
+
+                            # Plot baseline for zero, i.e. no difference
+                            plt.axhline(y=1.0, xmin=0, xmax=1, c=&#39;green&#39;,
+                                        ls=&#39;--&#39;, lw=2)
+
+                    # Plot each UtilFuncs
+                    labels = np.arange(n_init_samples, n_total_samples+1, step1)
+                    draw_plot(all_errors[:, ::step2], labels, edge_color,
+                              fill_color, idx)
+
+                plt.xticks(labels, labels)
+                # Set the major and minor locators
+                ax.xaxis.set_major_locator(ticker.AutoLocator())
+                ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
+                ax.xaxis.grid(True, which=&#39;major&#39;, linestyle=&#39;-&#39;)
+                ax.xaxis.grid(True, which=&#39;minor&#39;, linestyle=&#39;--&#39;)
+
+                # Legend
+                legend_elements = []
+                for idx, util in enumerate(util_funcs):
+                    legend_elements.append(Patch(facecolor=fill_color[idx],
+                                                 edgecolor=edge_color[idx],
+                                                 label=util))
+                plt.legend(handles=legend_elements[::-1], loc=&#39;best&#39;)
+
+                if plot != &#39;BME&#39; and plot != &#39;KLD&#39;:
+                    plt.yscale(&#39;log&#39;)
+                plt.autoscale(True)
+                plt.xlabel(&#39;\\# of training samples&#39;)
+                plt.ylabel(plot_label)
+                plt.title(plot)
+
+                if save_fig:
+                    # save the current figure
+                    pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+                    # Destroy the current plot
+                    plt.clf()
+                    # Save arrays into files
+                    f = open(f&#39;./{newpath}/Seq{plot}.txt&#39;, &#39;w&#39;)
+                    f.write(str(sorted_seq_opt))
+                    f.close()
+            else:
+                for idx, name in enumerate(name_util):
+                    seq_values = seq_dict[name]
+                    if PCEModel.ExpDesign.n_new_samples != 1:
+                        step = PCEModel.ExpDesign.n_new_samples
+                    else:
+                        step = 1
+                    x_idx = np.arange(n_init_samples, n_total_samples+1, step)
+                    if n_total_samples not in x_idx:
+                        x_idx = np.hstack((x_idx, n_total_samples))
+
+                    if plot == &#39;KLD&#39; or plot == &#39;BME&#39;:
+                        # BME convergence if refBME is provided
+                        if ref_BME_KLD is not None:
+                            if plot == &#39;BME&#39;:
+                                refValue = ref_BME_KLD[0]
+                                plot_label = r&#39;$BME/BME^{Ref.}$&#39;
+                            if plot == &#39;KLD&#39;:
+                                refValue = ref_BME_KLD[1]
+                                plot_label = &#39;$D_{KL}[p(\theta|y_*),p(\theta)]&#39;\
+                                    &#39; / D_{KL}^{Ref.}[p(\theta|y_*), &#39;\
+                                    &#39;p(\theta)]$&#39;
+
+                            # Difference between BME/KLD and the ref. values
+                            values = np.divide(seq_values,
+                                               np.full((seq_values.shape),
+                                                       refValue))
+
+                            # Plot baseline for zero, i.e. no difference
+                            plt.axhline(y=1.0, xmin=0, xmax=1, c=&#39;green&#39;,
+                                        ls=&#39;--&#39;, lw=2)
+
+                            # Set the limits
+                            plt.ylim([1e-1, 1e1])
+
+                            # Create the plots
+                            plt.semilogy(x_idx, values, marker=markers[idx],
+                                         color=colors[idx], ls=&#39;--&#39;, lw=2,
+                                         label=name.split(&#34;_rep&#34;, 1)[0])
+                        else:
+                            plot_label = plot
+
+                            # Create the plots
+                            plt.plot(x_idx, seq_values, marker=markers[idx],
+                                     color=colors[idx], ls=&#39;--&#39;, lw=2,
+                                     label=name.split(&#34;_rep&#34;, 1)[0])
+
+                    else:
+                        plot_label = plot
+                        seq_values = np.nan_to_num(seq_values)
+
+                        # Plot the error evolution for each output
+                        for i in range(seq_values.shape[1]):
+                            plt.semilogy(x_idx, seq_values[:, i], ls=&#39;--&#39;,
+                                         lw=2, marker=markers[idx],
+                                         color=colors[idx], alpha=0.15)
+
+                        plt.semilogy(x_idx, seq_values, marker=markers[idx],
+                                     ls=&#39;--&#39;, lw=2, color=colors[idx],
+                                     label=name.split(&#34;_rep&#34;, 1)[0])
+
+                # Set the major and minor locators
+                ax.xaxis.set_major_locator(ticker.AutoLocator())
+                ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
+                ax.xaxis.grid(True, which=&#39;major&#39;, linestyle=&#39;-&#39;)
+                ax.xaxis.grid(True, which=&#39;minor&#39;, linestyle=&#39;--&#39;)
+
+                ax.tick_params(axis=&#39;both&#39;, which=&#39;major&#39;, direction=&#39;in&#39;,
+                               width=3, length=10)
+                ax.tick_params(axis=&#39;both&#39;, which=&#39;minor&#39;, direction=&#39;in&#39;,
+                               width=2, length=8)
+                plt.xlabel(&#39;Number of runs&#39;)
+                plt.ylabel(plot_label)
+                plt.title(plot)
+                plt.legend(frameon=True)
+
+                if save_fig:
+                    # save the current figure
+                    pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+                    # Destroy the current plot
+                    plt.clf()
+
+                    # ---------------- Saving arrays into files ---------------
+                    np.save(f&#39;./{newpath}/Seq{plot}.npy&#39;, seq_values)
+
+        # Close the pdf
+        pdf.close()
+        return
+
+    # -------------------------------------------------------------------------
+    def sobol_indices(self, xlabel=&#39;Time [s]&#39;, plot_type=None, save_fig=True):
+        &#34;&#34;&#34;
+        Provides Sobol indices as a sensitivity measure to infer the importance
+        of the input parameters. See Eq. 27 in [1] for more details. For the
+        case with Principal component analysis refer to [2].
+
+        [1] Global sensitivity analysis: A flexible and efficient framework
+        with an example from stochastic hydrogeology S. Oladyshkin, F.P.
+        de Barros, W. Nowak  https://doi.org/10.1016/j.advwatres.2011.11.001
+
+        [2] Nagel, J.B., Rieckermann, J. and Sudret, B., 2020. Principal
+        component analysis and sparse polynomial chaos expansions for global
+        sensitivity analysis and model calibration: Application to urban
+        drainage simulation. Reliability Engineering &amp; System Safety, 195,
+        p.106737.
+
+        Parameters
+        ----------
+        xlabel : str, optional
+            Label of the x-axis. The default is `&#39;Time [s]&#39;`.
+        plot_type : str, optional
+            Plot type. The default is `None`. This corresponds to line plot.
+            Bar chart can be selected by `bar`.
+        save_fig : bool, optional
+            Whether to save the figures. The default is `True`.
+
+        Returns
+        -------
+        sobol_cell: dict
+            Sobol indices.
+        total_sobol: dict
+            Total Sobol indices.
+
+        &#34;&#34;&#34;
+        # Extract the necessary variables
+        PCEModel = self.MetaModel
+        basis_dict = PCEModel.basis_dict
+        coeffs_dict = PCEModel.coeffs_dict
+        n_params = PCEModel.n_params
+        max_order = np.max(PCEModel.pce_deg)
+        self.sobol_cell = {}
+        self.total_sobol = {}
+
+        for Output in PCEModel.ModelObj.Output.names:
+
+            n_meas_points = len(coeffs_dict[Output])
+
+            # Initialize the (cell) array containing the (total) Sobol indices.
+            sobol_array = dict.fromkeys(range(1, max_order+1), [])
+            sobol_cell_array = dict.fromkeys(range(1, max_order+1), [])
+
+            for i_order in range(1, max_order+1):
+                n_comb = math.comb(n_params, i_order)
+
+                sobol_cell_array[i_order] = np.zeros((n_comb, n_meas_points))
+
+            total_sobol_array = np.zeros((n_params, n_meas_points))
+
+            # Initialize the cell to store the names of the variables
+            TotalVariance = np.zeros((n_meas_points))
+
+            # Loop over all measurement points and calculate sobol indices
+            for pIdx in range(n_meas_points):
+
+                # Extract the basis indices (alpha) and coefficients
+                Basis = basis_dict[Output][f&#39;y_{pIdx+1}&#39;]
+
+                try:
+                    clf_poly = PCEModel.clf_poly[Output][f&#39;y_{pIdx+1}&#39;]
+                    PCECoeffs = clf_poly.coef_
+                except:
+                    PCECoeffs = coeffs_dict[Output][f&#39;y_{pIdx+1}&#39;]
+
+                # Compute total variance
+                TotalVariance[pIdx] = np.sum(np.square(PCECoeffs[1:]))
+
+                nzidx = np.where(PCECoeffs != 0)[0]
+                # Set all the Sobol indices equal to zero in the presence of a
+                # null output.
+                if len(nzidx) == 0:
+                    # This is buggy.
+                    for i_order in range(1, max_order+1):
+                        sobol_cell_array[i_order][:, pIdx] = 0
+
+                # Otherwise compute them by summing well-chosen coefficients
+                else:
+                    nz_basis = Basis[nzidx]
+                    for i_order in range(1, max_order+1):
+                        idx = np.where(np.sum(nz_basis &gt; 0, axis=1) == i_order)
+                        subbasis = nz_basis[idx]
+                        Z = np.array(list(combinations(range(n_params), i_order)))
+
+                        for q in range(Z.shape[0]):
+                            Zq = Z[q]
+                            subsubbasis = subbasis[:, Zq]
+                            subidx = np.prod(subsubbasis, axis=1) &gt; 0
+                            sum_ind = nzidx[idx[0][subidx]]
+                            if TotalVariance[pIdx] == 0.0:
+                                sobol_cell_array[i_order][q, pIdx] = 0.0
+                            else:
+                                sobol = np.sum(np.square(PCECoeffs[sum_ind]))
+                                sobol /= TotalVariance[pIdx]
+                                sobol_cell_array[i_order][q, pIdx] = sobol
+
+                    # Compute the TOTAL Sobol indices.
+                    for ParIdx in range(n_params):
+                        idx = nz_basis[:, ParIdx] &gt; 0
+                        sum_ind = nzidx[idx]
+
+                        if TotalVariance[pIdx] == 0.0:
+                            total_sobol_array[ParIdx, pIdx] = 0.0
+                        else:
+                            sobol = np.sum(np.square(PCECoeffs[sum_ind]))
+                            sobol /= TotalVariance[pIdx]
+                            total_sobol_array[ParIdx, pIdx] = sobol
+
+                # ----- if PCA selected: Compute covariance -----
+                if PCEModel.dim_red_method.lower() == &#39;pca&#39;:
+                    cov_Z_p_q = np.zeros((n_params))
+                    # Extract the basis indices (alpha) and coefficients for 
+                    # next component
+                    if pIdx &lt; n_meas_points-1:
+                        nextBasis = basis_dict[Output][f&#39;y_{pIdx+2}&#39;]
+
+                        try:
+                            clf_poly = PCEModel.clf_poly[Output][f&#39;y_{pIdx+2}&#39;]
+                            nextPCECoeffs = clf_poly.coef_
+                        except:
+                            nextPCECoeffs = coeffs_dict[Output][f&#39;y_{pIdx+2}&#39;]
+
+                        # Choose the common non-zero basis
+                        mask = (Basis[:, None] == nextBasis).all(-1).any(-1)
+                        similar_basis = Basis[mask]
+                        # Compute the TOTAL Sobol indices.
+                        for ParIdx in range(n_params):
+                            idx = similar_basis[:, ParIdx] &gt; 0
+                            try:
+                                sum_is = nzidx[idx]
+                                cov_Z_p_q[ParIdx] = np.sum(PCECoeffs[sum_ind] *
+                                                           nextPCECoeffs[sum_is])
+                            except:
+                                cov_Z_p_q[ParIdx] = 0.0
+
+            # Compute the sobol indices according to Ref. 2
+            if PCEModel.dim_red_method.lower() == &#39;pca&#39;:
+                n_c_points = PCEModel.ExpDesign.Y[Output].shape[1]
+                PCA = PCEModel.pca[Output]
+                compPCA = PCA.components_
+                nComp = compPCA.shape[0]
+                var_Z_p = PCA.explained_variance_
+
+                # Extract the sobol index of the components
+                for i_order in range(1, max_order+1):
+                    n_comb = math.comb(n_params, i_order)
+                    sobol_array[i_order] = np.zeros((n_comb, n_c_points))
+                    Z = np.array(list(combinations(range(n_params), i_order)))
+
+                    for q in range(Z.shape[0]):
+                        S_Z_i = sobol_cell_array[i_order][q]
+
+                        for tIdx in range(n_c_points):
+                            var_Y_t = np.var(PCEModel.ExpDesign.Y[Output][:, tIdx])
+                            if var_Y_t == 0.0:
+                                term1, term2 = 0.0, 0.0
+                            else:
+                                term1 = np.sum([S_Z_i[i]*(var_Z_p[i]*(compPCA[i, tIdx]**2)/var_Y_t) for i in range(nComp)])
+
+                                # Term 2
+                                # cov_Z_p_q = np.ones((nComp))# TODO: from coeffs
+                                Phi_t_p = compPCA[:nComp-1]
+                                Phi_t_q = compPCA
+                                term2 = 2 * np.sum([cov_Z_p_q[ParIdx] * Phi_t_p[i,tIdx] * Phi_t_q[i,tIdx]/var_Y_t for i in range(nComp-1)])
+
+                            sobol_array[i_order][q, tIdx] = term1 #+ term2
+
+                # Compute the TOTAL Sobol indices.
+                total_sobol = np.zeros((n_params, n_c_points))
+                for ParIdx in range(n_params):
+                    S_Z_i = total_sobol_array[ParIdx]
+
+                    for tIdx in range(n_c_points):
+                        var_Y_t = np.var(PCEModel.ExpDesign.Y[Output][:, tIdx])
+                        if var_Y_t == 0.0:
+                            term1, term2 = 0.0, 0.0
+                        else:
+                            term1 = 0
+                            for i in range(nComp):
+                                term1 += S_Z_i[i] * var_Z_p[i] * \
+                                    (compPCA[i, tIdx]**2) / var_Y_t
+
+                            # Term 2
+                            # cov_Z_p_q = np.ones((nComp))# TODO: from coeffs
+                            Phi_t_p = compPCA[:nComp-1]
+                            Phi_t_q = compPCA
+                            term2 = 0
+                            for i in range(nComp-1):
+                                term2 += cov_Z_p_q[ParIdx] * Phi_t_p[i, tIdx] \
+                                    * Phi_t_q[i, tIdx] / var_Y_t
+                            term2 *= 2
+
+                        total_sobol[ParIdx, tIdx] = term1 + term2
+
+                self.sobol_cell[Output] = sobol_array
+                self.total_sobol[Output] = total_sobol
+            else:
+                self.sobol_cell[Output] = sobol_cell_array
+                self.total_sobol[Output] = total_sobol_array
+
+        # ---------------- Plot -----------------------
+        par_names = PCEModel.ExpDesign.par_names
+        x_values_orig = PCEModel.ExpDesign.x_values
+
+        cases = [&#39;&#39;]
+
+        for case in cases:
+            newpath = (f&#39;Outputs_PostProcessing_{self.name}/&#39;)
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            if save_fig:
+                # create a PdfPages object
+                name = case+&#39;_&#39; if &#39;Valid&#39; in cases else &#39;&#39;
+                pdf = PdfPages(&#39;./&#39;+newpath+name+&#39;Sobol_indices.pdf&#39;)
+
+            fig = plt.figure()
+
+            for outIdx, Output in enumerate(PCEModel.ModelObj.Output.names):
+
+                # Extract total Sobol indices
+                total_sobol = self.total_sobol[Output]
+
+                # Extract a list of x values
+                if type(x_values_orig) is dict:
+                    x = x_values_orig[Output]
+                else:
+                    x = x_values_orig
+
+                if plot_type == &#39;bar&#39;:
+                    ax = fig.add_axes([0, 0, 1, 1])
+                    dict1 = {xlabel: x}
+                    dict2 = {param: sobolIndices for param, sobolIndices
+                             in zip(par_names, total_sobol)}
+
+                    df = pd.DataFrame({**dict1, **dict2})
+                    df.plot(x=xlabel, y=par_names, kind=&#34;bar&#34;, ax=ax, rot=0,
+                            colormap=&#39;Dark2&#39;)
+                    ax.set_ylabel(&#39;Total Sobol indices, $S^T$&#39;)
+
+                else:
+                    for i, sobolIndices in enumerate(total_sobol):
+                        plt.plot(x, sobolIndices, label=par_names[i],
+                                 marker=&#39;x&#39;, lw=2.5)
+
+                    plt.ylabel(&#39;Total Sobol indices, $S^T$&#39;)
+                    plt.xlabel(xlabel)
+
+                plt.title(f&#39;Sensitivity analysis of {Output}&#39;)
+                if plot_type != &#39;bar&#39;:
+                    plt.legend(loc=&#39;best&#39;, frameon=True)
+
+                # Save indices
+                np.savetxt(f&#39;./{newpath}{name}totalsobol_&#39; +
+                           Output.replace(&#39;/&#39;, &#39;_&#39;) + &#39;.csv&#39;,
+                           total_sobol.T, delimiter=&#39;,&#39;,
+                           header=&#39;,&#39;.join(par_names), comments=&#39;&#39;)
+
+                if save_fig:
+                    # save the current figure
+                    pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+
+                    # Destroy the current plot
+                    plt.clf()
+
+            pdf.close()
+
+        return self.sobol_cell, self.total_sobol
+
+    # -------------------------------------------------------------------------
+    def check_reg_quality(self, n_samples=1000, samples=None, save_fig=True):
+        &#34;&#34;&#34;
+        Checks the quality of the metamodel for single output models based on:
+        https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685
+
+
+        Parameters
+        ----------
+        n_samples : int, optional
+            Number of parameter sets to use for the check. The default is 1000.
+        samples : array of shape (n_samples, n_params), optional
+            Parameter sets to use for the check. The default is None.
+        save_fig : bool, optional
+            Whether to save the figures. The default is True.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+
+        if samples is None:
+            self.n_samples = n_samples
+            samples = self._get_sample()
+        else:
+            self.n_samples = samples.shape[0]
+
+        # Evaluate the original and the surrogate model
+        y_val = self._eval_model(samples, key_str=&#39;valid&#39;)
+        y_pce_val, _ = MetaModel.eval_metamodel(samples=samples)
+
+        # Open a pdf for the plots
+        if save_fig:
+            newpath = (r&#39;Outputs_PostProcessing_{0}/&#39;.format(self.name))
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+        # Fit the data(train the model)
+        for key in y_pce_val.keys():
+
+            y_pce_val_ = y_pce_val[key]
+            y_val_ = y_val[key]
+
+            # ------ Residuals vs. predicting variables ------
+            # Check the assumptions of linearity and independence
+            fig1 = plt.figure()
+            plt.title(key+&#34;: Residuals vs. Predicting variables&#34;)
+            residuals = y_val_ - y_pce_val_
+            plt.scatter(x=y_val_, y=residuals, color=&#39;blue&#39;, edgecolor=&#39;k&#39;)
+            plt.grid(True)
+            xmin, xmax = min(y_val_), max(y_val_)
+            plt.hlines(y=0, xmin=xmin*0.9, xmax=xmax*1.1, color=&#39;red&#39;, lw=3,
+                       linestyle=&#39;--&#39;)
+            plt.xlabel(key)
+            plt.ylabel(&#39;Residuals&#39;)
+            plt.show()
+
+            if save_fig:
+                # save the current figure
+                fig1.savefig(f&#39;./{newpath}/Residuals_vs_PredVariables.pdf&#39;,
+                             bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+            # ------ Fitted vs. residuals ------
+            # Check the assumptions of linearity and independence
+            fig2 = plt.figure()
+            plt.title(key+&#34;: Residuals vs. predicting variables&#34;)
+            plt.scatter(x=y_pce_val_, y=residuals, color=&#39;blue&#39;, edgecolor=&#39;k&#39;)
+            plt.grid(True)
+            xmin, xmax = min(y_val_), max(y_val_)
+            plt.hlines(y=0, xmin=xmin*0.9, xmax=xmax*1.1, color=&#39;red&#39;, lw=3,
+                       linestyle=&#39;--&#39;)
+            plt.xlabel(key)
+            plt.ylabel(&#39;Residuals&#39;)
+            plt.show()
+
+            if save_fig:
+                # save the current figure
+                fig2.savefig(f&#39;./{newpath}/Fitted_vs_Residuals.pdf&#39;,
+                             bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+            # ------ Histogram of normalized residuals ------
+            fig3 = plt.figure()
+            resid_pearson = residuals / (max(residuals)-min(residuals))
+            plt.hist(resid_pearson, bins=20, edgecolor=&#39;k&#39;)
+            plt.ylabel(&#39;Count&#39;)
+            plt.xlabel(&#39;Normalized residuals&#39;)
+            plt.title(f&#34;{key}: Histogram of normalized residuals&#34;)
+
+            # Normality (Shapiro-Wilk) test of the residuals
+            ax = plt.gca()
+            _, p = stats.shapiro(residuals)
+            if p &lt; 0.01:
+                annText = &#34;The residuals seem to come from Gaussian process.&#34;
+            else:
+                annText = &#34;The normality assumption may not hold.&#34;
+            at = AnchoredText(annText, prop=dict(size=30), frameon=True,
+                              loc=&#39;upper left&#39;)
+            at.patch.set_boxstyle(&#34;round,pad=0.,rounding_size=0.2&#34;)
+            ax.add_artist(at)
+
+            plt.show()
+
+            if save_fig:
+                # save the current figure
+                fig3.savefig(f&#39;./{newpath}/Hist_NormResiduals.pdf&#39;,
+                             bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+            # ------ Q-Q plot of the normalized residuals ------
+            plt.figure()
+            fig4 = qqplot(resid_pearson, line=&#39;45&#39;, fit=&#39;True&#39;)
+            plt.xticks()
+            plt.yticks()
+            plt.xlabel(&#34;Theoretical quantiles&#34;)
+            plt.ylabel(&#34;Sample quantiles&#34;)
+            plt.title(key+&#34;: Q-Q plot of normalized residuals&#34;)
+            plt.grid(True)
+            plt.show()
+
+            if save_fig:
+                # save the current figure
+                fig4.savefig(f&#39;./{newpath}/QQPlot_NormResiduals.pdf&#39;,
+                             bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+    # -------------------------------------------------------------------------
+    def eval_pce_model_3d(self, save_fig=True):
+
+        self.n_samples = 1000
+
+        PCEModel = self.MetaModel
+        Model = self.MetaModel.ModelObj
+        n_samples = self.n_samples
+
+        # Create 3D-Grid
+        # TODO: Make it general
+        x = np.linspace(-5, 10, n_samples)
+        y = np.linspace(0, 15, n_samples)
+
+        X, Y = np.meshgrid(x, y)
+        PCE_Z = np.zeros((self.n_samples, self.n_samples))
+        Model_Z = np.zeros((self.n_samples, self.n_samples))
+
+        for idxMesh in range(self.n_samples):
+            sample_mesh = np.vstack((X[:, idxMesh], Y[:, idxMesh])).T
+
+            univ_p_val = PCEModel.univ_basis_vals(sample_mesh)
+
+            for Outkey, ValuesDict in PCEModel.coeffs_dict.items():
+
+                pce_out_mean = np.zeros((len(sample_mesh), len(ValuesDict)))
+                pce_out_std = np.zeros((len(sample_mesh), len(ValuesDict)))
+                model_outs = np.zeros((len(sample_mesh), len(ValuesDict)))
+
+                for Inkey, InIdxValues in ValuesDict.items():
+                    idx = int(Inkey.split(&#39;_&#39;)[1]) - 1
+                    basis_deg_ind = PCEModel.basis_dict[Outkey][Inkey]
+                    clf_poly = PCEModel.clf_poly[Outkey][Inkey]
+
+                    PSI_Val = PCEModel.create_psi(basis_deg_ind, univ_p_val)
+
+                    # Perdiction with error bar
+                    y_mean, y_std = clf_poly.predict(PSI_Val, return_std=True)
+
+                    pce_out_mean[:, idx] = y_mean
+                    pce_out_std[:, idx] = y_std
+
+                    # Model evaluation
+                    model_out_dict, _ = Model.run_model_parallel(sample_mesh,
+                                                                 key_str=&#39;Valid3D&#39;)
+                    model_outs[:, idx] = model_out_dict[Outkey].T
+
+                PCE_Z[:, idxMesh] = y_mean
+                Model_Z[:, idxMesh] = model_outs[:, 0]
+
+        # ---------------- 3D plot for PCEModel -----------------------
+        fig_PCE = plt.figure()
+        ax = plt.axes(projection=&#39;3d&#39;)
+        ax.plot_surface(X, Y, PCE_Z, rstride=1, cstride=1,
+                        cmap=&#39;viridis&#39;, edgecolor=&#39;none&#39;)
+        ax.set_title(&#39;PCEModel&#39;)
+        ax.set_xlabel(&#39;$x_1$&#39;)
+        ax.set_ylabel(&#39;$x_2$&#39;)
+        ax.set_zlabel(&#39;$f(x_1,x_2)$&#39;)
+
+        plt.grid()
+        plt.show()
+
+        if save_fig:
+            #  Saving the figure
+            newpath = f&#39;Outputs_PostProcessing_{self.name}/&#39;
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            # save the figure to file
+            fig_PCE.savefig(f&#39;./{newpath}/3DPlot_PCEModel.pdf&#39;, format=&#34;pdf&#34;,
+                            bbox_inches=&#39;tight&#39;)
+            plt.close(fig_PCE)
+
+        # ---------------- 3D plot for Model -----------------------
+        fig_Model = plt.figure()
+        ax = plt.axes(projection=&#39;3d&#39;)
+        ax.plot_surface(X, Y, PCE_Z, rstride=1, cstride=1,
+                        cmap=&#39;viridis&#39;, edgecolor=&#39;none&#39;)
+        ax.set_title(&#39;Model&#39;)
+        ax.set_xlabel(&#39;$x_1$&#39;)
+        ax.set_ylabel(&#39;$x_2$&#39;)
+        ax.set_zlabel(&#39;$f(x_1,x_2)$&#39;)
+
+        plt.grid()
+        plt.show()
+
+        if save_fig:
+            # Save the figure
+            fig_Model.savefig(f&#39;./{newpath}/3DPlot_Model.pdf&#39;, format=&#34;pdf&#34;,
+                              bbox_inches=&#39;tight&#39;)
+            plt.close(fig_Model)
+
+        return
+
+    # -------------------------------------------------------------------------
+    def _compute_pce_moments(self):
+        &#34;&#34;&#34;
+        Computes the first two moments using the PCE-based meta-model.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+
+        MetaModel = self.MetaModel
+        self.pce_means = {}
+        self.pce_stds = {}
+
+        for Outkey, ValuesDict in MetaModel.coeffs_dict.items():
+
+            pce_mean = np.zeros((len(ValuesDict)))
+            pce_var = np.zeros((len(ValuesDict)))
+
+            for Inkey, InIdxValues in ValuesDict.items():
+                idx = int(Inkey.split(&#39;_&#39;)[1]) - 1
+                coeffs = MetaModel.coeffs_dict[Outkey][Inkey]
+
+                # Mean = c_0
+                if coeffs[0] != 0:
+                    pce_mean[idx] = coeffs[0]
+                else:
+                    pce_mean[idx] = MetaModel.clf_poly[Outkey][Inkey].intercept_
+
+                # Var = sum(coeffs[1:]**2)
+                pce_var[idx] = np.sum(np.square(coeffs[1:]))
+
+            # Back transformation if PCA is selected.
+            if MetaModel.dim_red_method.lower() == &#39;pca&#39;:
+                PCA = MetaModel.pca[Outkey]
+                self.pce_means[Outkey] = PCA.mean_
+                self.pce_means[Outkey] += np.dot(pce_mean, PCA.components_)
+                self.pce_stds[Outkey] = np.sqrt(np.dot(pce_var,
+                                                       PCA.components_**2))
+            else:
+                self.pce_means[Outkey] = pce_mean
+                self.pce_stds[Outkey] = np.sqrt(pce_var)
+
+            # Print a report table
+            print(&#34;\n&gt;&gt;&gt;&gt;&gt; Moments of {} &lt;&lt;&lt;&lt;&lt;&#34;.format(Outkey))
+            print(&#34;\nIndex  |  Mean   |  Std. deviation&#34;)
+            print(&#39;-&#39;*35)
+            print(&#39;\n&#39;.join(f&#39;{i+1}  |  {k:.3e}  |  {j:.3e}&#39; for i, (k, j)
+                            in enumerate(zip(self.pce_means[Outkey],
+                                             self.pce_stds[Outkey]))))
+        print(&#39;-&#39;*40)
+
+    # -------------------------------------------------------------------------
+    def _get_sample(self, n_samples=None):
+        &#34;&#34;&#34;
+        Generates random samples taken from the input parameter space.
+
+        Returns
+        -------
+        samples : array of shape (n_samples, n_params)
+            Generated samples.
+
+        &#34;&#34;&#34;
+        if n_samples is None:
+            n_samples = self.n_samples
+        PCEModel = self.MetaModel
+        self.samples = PCEModel.ExpDesign.generate_samples(n_samples, &#39;random&#39;)
+        return self.samples
+
+    # -------------------------------------------------------------------------
+    def _eval_model(self, samples=None, key_str=&#39;Valid&#39;):
+        &#34;&#34;&#34;
+        Evaluates Forward Model for the given number of self.samples or given
+        samples.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params), optional
+            Samples to evaluate the model at. The default is None.
+        key_str : str, optional
+            Key string pass to the model. The default is &#39;Valid&#39;.
+
+        Returns
+        -------
+        model_outs : dict
+            Dictionary of results.
+
+        &#34;&#34;&#34;
+        Model = self.MetaModel.ModelObj
+
+        if samples is None:
+            samples = self._get_sample()
+            self.samples = samples
+        else:
+            self.n_samples = len(samples)
+
+        model_outs, _ = Model.run_model_parallel(samples, key_str=key_str)
+
+        return model_outs
+
+    # -------------------------------------------------------------------------
+    def _plot_validation(self, save_fig=True):
+        &#34;&#34;&#34;
+        Plots outputs for visual comparison of metamodel outputs with that of
+        the (full) original model.
+
+        Parameters
+        ----------
+        save_fig : bool, optional
+            Save the plots. The default is True.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        PCEModel = self.MetaModel
+
+        # get the samples
+        x_val = self.samples
+        y_pce_val = self.pce_out_mean
+        y_val = self.model_out_dict
+
+        # Open a pdf for the plots
+        if save_fig:
+            newpath = f&#39;Outputs_PostProcessing_{self.name}/&#39;
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            # create a PdfPages object
+            pdf1 = PdfPages(f&#39;./{newpath}/Model_vs_PCEModel.pdf&#39;)
+
+        fig = plt.figure()
+        # Fit the data(train the model)
+        for key in y_pce_val.keys():
+
+            y_pce_val_ = y_pce_val[key]
+            y_val_ = y_val[key]
+
+            regression_model = LinearRegression()
+            regression_model.fit(y_pce_val_, y_val_)
+
+            # Predict
+            x_new = np.linspace(np.min(y_pce_val_), np.max(y_val_), 100)
+            y_predicted = regression_model.predict(x_new[:, np.newaxis])
+
+            plt.scatter(y_pce_val_, y_val_, color=&#39;gold&#39;, linewidth=2)
+            plt.plot(x_new, y_predicted, color=&#39;k&#39;)
+
+            # Calculate the adjusted R_squared and RMSE
+            # the total number of explanatory variables in the model
+            # (not including the constant term)
+            length_list = []
+            for key, value in PCEModel.coeffs_dict[key].items():
+                length_list.append(len(value))
+            n_predictors = min(length_list)
+            n_samples = x_val.shape[0]
+
+            R2 = r2_score(y_pce_val_, y_val_)
+            AdjR2 = 1 - (1 - R2) * (n_samples - 1) / \
+                (n_samples - n_predictors - 1)
+            rmse = mean_squared_error(y_pce_val_, y_val_, squared=False)
+
+            plt.annotate(f&#39;RMSE = {rmse:.3f}\n Adjusted $R^2$ = {AdjR2:.3f}&#39;,
+                         xy=(0.05, 0.85), xycoords=&#39;axes fraction&#39;)
+
+            plt.ylabel(&#34;Original Model&#34;)
+            plt.xlabel(&#34;PCE Model&#34;)
+            plt.grid()
+            plt.show()
+
+            if save_fig:
+                # save the current figure
+                pdf1.savefig(fig, bbox_inches=&#39;tight&#39;)
+
+                # Destroy the current plot
+                plt.clf()
+
+        # Close the pdfs
+        pdf1.close()
+
+    # -------------------------------------------------------------------------
+    def _plot_validation_multi(self, x_values=[], x_axis=&#34;x [m]&#34;, save_fig=True):
+        &#34;&#34;&#34;
+        Plots outputs for visual comparison of metamodel outputs with that of
+        the (full) multioutput original model
+
+        Parameters
+        ----------
+        x_values : list or array, optional
+            List of x values. The default is [].
+        x_axis : str, optional
+            Label of the x axis. The default is &#34;x [m]&#34;.
+        save_fig : bool, optional
+            Whether to save the figures. The default is True.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        Model = self.MetaModel.ModelObj
+
+        if save_fig:
+            newpath = f&#39;Outputs_PostProcessing_{self.name}/&#39;
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            # create a PdfPages object
+            pdf = PdfPages(f&#39;./{newpath}/Model_vs_PCEModel.pdf&#39;)
+
+        # List of markers and colors
+        color = cycle(([&#39;b&#39;, &#39;g&#39;, &#39;r&#39;, &#39;y&#39;, &#39;k&#39;]))
+        marker = cycle((&#39;x&#39;, &#39;d&#39;, &#39;+&#39;, &#39;o&#39;, &#39;*&#39;))
+
+        fig = plt.figure()
+        # Plot the model vs PCE model
+        for keyIdx, key in enumerate(Model.Output.names):
+
+            y_pce_val = self.pce_out_mean[key]
+            y_pce_val_std = self.pce_out_std[key]
+            y_val = self.model_out_dict[key]
+            try:
+                x = self.model_out_dict[&#39;x_values&#39;][key]
+            except IndexError:
+                x = x_values
+
+            for idx in range(y_val.shape[0]):
+                Color = next(color)
+                Marker = next(marker)
+
+                plt.plot(x, y_val[idx], color=Color, marker=Marker,
+                         label=&#39;$Y_{%s}^M$&#39;%(idx+1))
+
+                plt.plot(x, y_pce_val[idx], color=Color, marker=Marker,
+                         linestyle=&#39;--&#39;,
+                         label=&#39;$Y_{%s}^{PCE}$&#39;%(idx+1))
+                plt.fill_between(x, y_pce_val[idx]-1.96*y_pce_val_std[idx],
+                                 y_pce_val[idx]+1.96*y_pce_val_std[idx],
+                                 color=Color, alpha=0.15)
+
+            # Calculate the RMSE
+            rmse = mean_squared_error(y_pce_val, y_val, squared=False)
+            R2 = r2_score(y_pce_val[idx].reshape(-1, 1),
+                          y_val[idx].reshape(-1, 1))
+
+            plt.annotate(f&#39;RMSE = {rmse:.3f}\n $R^2$ = {R2:.3f}&#39;,
+                         xy=(0.2, 0.75), xycoords=&#39;axes fraction&#39;)
+
+            plt.ylabel(key)
+            plt.xlabel(x_axis)
+            plt.legend(loc=&#39;best&#39;)
+            plt.grid()
+
+            if save_fig:
+                # save the current figure
+                pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+        pdf.close()
+
+        # Zip the subdirectories
+        Model.zip_subdirs(f&#39;{Model.name}valid&#39;, f&#39;{Model.name}valid_&#39;)</code></pre>
+</details>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+<h2 class="section-title" id="header-classes">Classes</h2>
+<dl>
+<dt id="post_processing.PostProcessing"><code class="flex name class">
+<span>class <span class="ident">PostProcessing</span></span>
+<span>(</span><span>MetaModel, name='calib')</span>
+</code></dt>
+<dd>
+<div class="desc"><p>This class provides many helper functions to post-process the trained
+meta-model.</p>
+<h2 id="attributes">Attributes</h2>
+<dl>
+<dt><strong><code>MetaModel</code></strong> :&ensp;<code>obj</code></dt>
+<dd>MetaModel object to do postprocessing on.</dd>
+<dt><strong><code>name</code></strong> :&ensp;<code>str</code></dt>
+<dd>Type of the anaylsis. The default is <code>'calib'</code>. If a validation is
+expected to be performed change this to <code>'valid'</code>.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">class PostProcessing:
+    &#34;&#34;&#34;
+    This class provides many helper functions to post-process the trained
+    meta-model.
+
+    Attributes
+    ----------
+    MetaModel : obj
+        MetaModel object to do postprocessing on.
+    name : str
+        Type of the anaylsis. The default is `&#39;calib&#39;`. If a validation is
+        expected to be performed change this to `&#39;valid&#39;`.
+    &#34;&#34;&#34;
+
+    def __init__(self, MetaModel, name=&#39;calib&#39;):
+        self.MetaModel = MetaModel
+        self.name = name
+
+    # -------------------------------------------------------------------------
+    def plot_moments(self, xlabel=&#39;Time [s]&#39;, plot_type=None, save_fig=True):
+        &#34;&#34;&#34;
+        Plots the moments in a pdf format in the directory
+        `Outputs_PostProcessing`.
+
+        Parameters
+        ----------
+        xlabel : str, optional
+            String to be displayed as x-label. The default is `&#39;Time [s]&#39;`.
+        plot_type : str, optional
+            Options: bar or line. The default is `None`.
+        save_fig : bool, optional
+            Save figure or not. The default is `True`.
+
+        Returns
+        -------
+        pce_means: dict
+            Mean of the model outputs.
+        pce_means: dict
+            Standard deviation of the model outputs.
+
+        &#34;&#34;&#34;
+
+        bar_plot = True if plot_type == &#39;bar&#39; else False
+        meta_model_type = self.MetaModel.meta_model_type
+        Model = self.MetaModel.ModelObj
+
+        # Read Monte-Carlo reference
+        self.mc_reference = Model.read_mc_reference()
+        print(self.mc_reference)
+
+        # Set the x values
+        x_values_orig = self.MetaModel.ExpDesign.x_values
+
+        # Compute the moments with the PCEModel object
+        self._compute_pce_moments()
+
+        # Get the variables
+        out_names = Model.Output.names
+
+        # Open a pdf for the plots
+        if save_fig:
+            newpath = (f&#39;Outputs_PostProcessing_{self.name}/&#39;)
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            # create a PdfPages object
+            pdf = PdfPages(f&#39;./{newpath}Mean_Std_PCE.pdf&#39;)
+
+        # Plot the best fit line, set the linewidth (lw), color and
+        # transparency (alpha) of the line
+        for idx, key in enumerate(out_names):
+            fig, ax = plt.subplots(nrows=1, ncols=2)
+
+            # Extract mean and std
+            mean_data = self.pce_means[key]
+            std_data = self.pce_stds[key]
+
+            # Extract a list of x values
+            if type(x_values_orig) is dict:
+                x = x_values_orig[key]
+            else:
+                x = x_values_orig
+
+            # Plot: bar plot or line plot
+            if bar_plot:
+                ax[0].bar(list(map(str, x)), mean_data, color=&#39;b&#39;,
+                          width=0.25)
+                ax[1].bar(list(map(str, x)), std_data, color=&#39;b&#39;,
+                          width=0.25)
+                ax[0].legend(labels=[meta_model_type])
+                ax[1].legend(labels=[meta_model_type])
+            else:
+                ax[0].plot(x, mean_data, lw=3, color=&#39;k&#39;, marker=&#39;x&#39;,
+                           label=meta_model_type)
+                ax[1].plot(x, std_data, lw=3, color=&#39;k&#39;, marker=&#39;x&#39;,
+                           label=meta_model_type)
+
+            if self.mc_reference is not None:
+                if bar_plot:
+                    ax[0].bar(list(map(str, x)), self.mc_reference[&#39;mean&#39;],
+                              color=&#39;r&#39;, width=0.25)
+                    ax[1].bar(list(map(str, x)), self.mc_reference[&#39;std&#39;],
+                              color=&#39;r&#39;, width=0.25)
+                    ax[0].legend(labels=[meta_model_type])
+                    ax[1].legend(labels=[meta_model_type])
+                else:
+                    ax[0].plot(x, self.mc_reference[&#39;mean&#39;], lw=3, marker=&#39;x&#39;,
+                               color=&#39;r&#39;, label=&#39;Ref.&#39;)
+                    ax[1].plot(x, self.mc_reference[&#39;std&#39;], lw=3, marker=&#39;x&#39;,
+                               color=&#39;r&#39;, label=&#39;Ref.&#39;)
+
+            # Label the axes and provide a title
+            ax[0].set_xlabel(xlabel)
+            ax[1].set_xlabel(xlabel)
+            ax[0].set_ylabel(key)
+            ax[1].set_ylabel(key)
+
+            # Provide a title
+            ax[0].set_title(&#39;Mean of &#39; + key)
+            ax[1].set_title(&#39;Std of &#39; + key)
+
+            if not bar_plot:
+                ax[0].legend(loc=&#39;best&#39;)
+                ax[1].legend(loc=&#39;best&#39;)
+
+            plt.tight_layout()
+
+            if save_fig:
+                # save the current figure
+                pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+
+                # Destroy the current plot
+                plt.clf()
+
+        pdf.close()
+
+        return self.pce_means, self.pce_stds
+
+    # -------------------------------------------------------------------------
+    def valid_metamodel(self, n_samples=1, samples=None, x_axis=&#39;Time [s]&#39;):
+        &#34;&#34;&#34;
+        Evaluates and plots the meta model and the PCEModel outputs for the
+        given number of samples or the given samples.
+
+        Parameters
+        ----------
+        n_samples : int, optional
+            Number of samples to be evaluated. The default is 1.
+        samples : array of shape (n_samples, n_params), optional
+            Samples to be evaluated. The default is None.
+        x_axis : str, optional
+            Label of x axis. The default is `&#39;Time [s]&#39;`.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+
+        if samples is None:
+            self.n_samples = n_samples
+            samples = self._get_sample()
+        else:
+            self.n_samples = samples.shape[0]
+
+        # Extract x_values
+        x_values = MetaModel.ExpDesign.x_values
+
+        self.model_out_dict = self._eval_model(samples, key_str=&#39;valid&#39;)
+        self.pce_out_mean, self.pce_out_std = MetaModel.eval_metamodel(samples)
+
+        try:
+            key = Model.Output.names[1]
+        except IndexError:
+            key = Model.Output.names[0]
+
+        n_obs = self.model_out_dict[key].shape[1]
+
+        if n_obs == 1:
+            self._plot_validation()
+        else:
+            self._plot_validation_multi(x_values=x_values, x_axis=x_axis)
+
+    # -------------------------------------------------------------------------
+    def check_accuracy(self, n_samples=None, samples=None, outputs=None):
+        &#34;&#34;&#34;
+        Checks accuracy of the metamodel by computing the root mean square
+        error and validation error for all outputs.
+
+        Parameters
+        ----------
+        n_samples : int, optional
+            Number of samples. The default is None.
+        samples : array of shape (n_samples, n_params), optional
+            Parameter sets to be checked. The default is None.
+        outputs : dict, optional
+            Output dictionary with model outputs for all given output types in
+            `Model.Output.names`. The default is None.
+
+        Raises
+        ------
+        Exception
+            When neither n_samples nor samples are provided.
+
+        Returns
+        -------
+        rmse: dict
+            Root mean squared error for each output.
+        valid_error : dict
+            Validation error for each output.
+
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+        Model = MetaModel.ModelObj
+
+        # Set the number of samples
+        if n_samples:
+            self.n_samples = n_samples
+        elif samples is not None:
+            self.n_samples = samples.shape[0]
+        else:
+            raise Exception(&#34;Please provide either samples or pass number of &#34;
+                            &#34;samples!&#34;)
+
+        # Generate random samples if necessary
+        Samples = self._get_sample() if samples is None else samples
+
+        # Run the original model with the generated samples
+        if outputs is None:
+            outputs = self._eval_model(Samples, key_str=&#39;validSet&#39;)
+
+        # Run the PCE model with the generated samples
+        pce_outputs, _ = MetaModel.eval_metamodel(samples=Samples)
+
+        self.rmse = {}
+        self.valid_error = {}
+        # Loop over the keys and compute RMSE error.
+        for key in Model.Output.names:
+            # Root mena square
+            self.rmse[key] = mean_squared_error(outputs[key], pce_outputs[key],
+                                                squared=False,
+                                                multioutput=&#39;raw_values&#39;)
+            # Validation error
+            self.valid_error[key] = (self.rmse[key]**2 / self.n_samples) / \
+                np.var(outputs[key], ddof=1, axis=0)
+
+            # Print a report table
+            print(&#34;\n&gt;&gt;&gt;&gt;&gt; Errors of {} &lt;&lt;&lt;&lt;&lt;&#34;.format(key))
+            print(&#34;\nIndex  |  RMSE   |  Validation Error&#34;)
+            print(&#39;-&#39;*35)
+            print(&#39;\n&#39;.join(f&#39;{i+1}  |  {k:.3e}  |  {j:.3e}&#39; for i, (k, j)
+                            in enumerate(zip(self.rmse[key],
+                                             self.valid_error[key]))))
+        # Save error dicts in PCEModel object
+        self.MetaModel.rmse = self.rmse
+        self.MetaModel.valid_error = self.valid_error
+
+        return self.rmse, self.valid_error
+
+    # -------------------------------------------------------------------------
+    def plot_seq_design_diagnostics(self, ref_BME_KLD=None, save_fig=True):
+        &#34;&#34;&#34;
+        Plots the Bayesian Model Evidence (BME) and Kullback-Leibler divergence
+        (KLD) for the sequential design.
+
+        Parameters
+        ----------
+        ref_BME_KLD : array, optional
+            Reference BME and KLD . The default is `None`.
+        save_fig : bool, optional
+            Whether to save the figures. The default is `True`.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        PCEModel = self.MetaModel
+        n_init_samples = PCEModel.ExpDesign.n_init_samples
+        n_total_samples = PCEModel.ExpDesign.X.shape[0]
+
+        if save_fig:
+            newpath = f&#39;Outputs_PostProcessing_{self.name}/&#39;
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            # create a PdfPages object
+            pdf = PdfPages(f&#39;./{newpath}/seqPCEModelDiagnostics.pdf&#39;)
+
+        plotList = [&#39;Modified LOO error&#39;, &#39;Validation error&#39;, &#39;KLD&#39;, &#39;BME&#39;,
+                    &#39;RMSEMean&#39;, &#39;RMSEStd&#39;, &#39;Hellinger distance&#39;]
+        seqList = [PCEModel.SeqModifiedLOO, PCEModel.seqValidError,
+                   PCEModel.SeqKLD, PCEModel.SeqBME, PCEModel.seqRMSEMean,
+                   PCEModel.seqRMSEStd, PCEModel.SeqDistHellinger]
+
+        markers = (&#39;x&#39;, &#39;o&#39;, &#39;d&#39;, &#39;*&#39;, &#39;+&#39;)
+        colors = (&#39;k&#39;, &#39;darkgreen&#39;, &#39;b&#39;, &#39;navy&#39;, &#39;darkred&#39;)
+
+        # Plot the evolution of the diagnostic criteria of the
+        # Sequential Experimental Design.
+        for plotidx, plot in enumerate(plotList):
+            fig, ax = plt.subplots()
+            seq_dict = seqList[plotidx]
+            name_util = list(seq_dict.keys())
+
+            if len(name_util) == 0:
+                continue
+
+            # Box plot when Replications have been detected.
+            if any(int(name.split(&#34;rep_&#34;, 1)[1]) &gt; 1 for name in name_util):
+                # Extract the values from dict
+                sorted_seq_opt = {}
+                # Number of replications
+                n_reps = PCEModel.ExpDesign.nReprications
+
+                # Get the list of utility function names
+                # Handle if only one UtilityFunction is provided
+                if not isinstance(PCEModel.ExpDesign.UtilityFunction, list):
+                    util_funcs = [PCEModel.ExpDesign.UtilityFunction]
+                else:
+                    util_funcs = PCEModel.ExpDesign.UtilityFunction
+
+                for util in util_funcs:
+                    sortedSeq = {}
+                    # min number of runs available from reps
+                    n_runs = min([seq_dict[f&#39;{util}_rep_{i+1}&#39;].shape[0]
+                                 for i in range(n_reps)])
+
+                    for runIdx in range(n_runs):
+                        values = []
+                        for key in seq_dict.keys():
+                            if util in key:
+                                values.append(seq_dict[key][runIdx].mean())
+                        sortedSeq[&#39;SeqItr_&#39;+str(runIdx)] = np.array(values)
+                    sorted_seq_opt[util] = sortedSeq
+
+                # BoxPlot
+                def draw_plot(data, labels, edge_color, fill_color, idx):
+                    pos = labels - (idx-1)
+                    bp = plt.boxplot(data, positions=pos, labels=labels,
+                                     patch_artist=True, sym=&#39;&#39;, widths=0.75)
+                    elements = [&#39;boxes&#39;, &#39;whiskers&#39;, &#39;fliers&#39;, &#39;means&#39;,
+                                &#39;medians&#39;, &#39;caps&#39;]
+                    for element in elements:
+                        plt.setp(bp[element], color=edge_color[idx])
+
+                    for patch in bp[&#39;boxes&#39;]:
+                        patch.set(facecolor=fill_color[idx])
+
+                if PCEModel.ExpDesign.n_new_samples != 1:
+                    step1 = PCEModel.ExpDesign.n_new_samples
+                    step2 = 1
+                else:
+                    step1 = 5
+                    step2 = 5
+                edge_color = [&#39;red&#39;, &#39;blue&#39;, &#39;green&#39;]
+                fill_color = [&#39;tan&#39;, &#39;cyan&#39;, &#39;lightgreen&#39;]
+                plot_label = plot
+                # Plot for different Utility Functions
+                for idx, util in enumerate(util_funcs):
+                    all_errors = np.empty((n_reps, 0))
+
+                    for key in list(sorted_seq_opt[util].keys()):
+                        errors = sorted_seq_opt.get(util, {}).get(key)[:, None]
+                        all_errors = np.hstack((all_errors, errors))
+
+                    # Special cases for BME and KLD
+                    if plot == &#39;KLD&#39; or plot == &#39;BME&#39;:
+                        # BME convergence if refBME is provided
+                        if ref_BME_KLD is not None:
+                            if plot == &#39;BME&#39;:
+                                refValue = ref_BME_KLD[0]
+                                plot_label = r&#39;$BME/BME^{Ref.}$&#39;
+                            if plot == &#39;KLD&#39;:
+                                refValue = ref_BME_KLD[1]
+                                plot_label = &#39;$D_{KL}[p(\theta|y_*),p(\theta)]&#39;\
+                                    &#39; / D_{KL}^{Ref.}[p(\theta|y_*), &#39;\
+                                    &#39;p(\theta)]$&#39;
+
+                            # Difference between BME/KLD and the ref. values
+                            all_errors = np.divide(all_errors,
+                                                   np.full((all_errors.shape),
+                                                           refValue))
+
+                            # Plot baseline for zero, i.e. no difference
+                            plt.axhline(y=1.0, xmin=0, xmax=1, c=&#39;green&#39;,
+                                        ls=&#39;--&#39;, lw=2)
+
+                    # Plot each UtilFuncs
+                    labels = np.arange(n_init_samples, n_total_samples+1, step1)
+                    draw_plot(all_errors[:, ::step2], labels, edge_color,
+                              fill_color, idx)
+
+                plt.xticks(labels, labels)
+                # Set the major and minor locators
+                ax.xaxis.set_major_locator(ticker.AutoLocator())
+                ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
+                ax.xaxis.grid(True, which=&#39;major&#39;, linestyle=&#39;-&#39;)
+                ax.xaxis.grid(True, which=&#39;minor&#39;, linestyle=&#39;--&#39;)
+
+                # Legend
+                legend_elements = []
+                for idx, util in enumerate(util_funcs):
+                    legend_elements.append(Patch(facecolor=fill_color[idx],
+                                                 edgecolor=edge_color[idx],
+                                                 label=util))
+                plt.legend(handles=legend_elements[::-1], loc=&#39;best&#39;)
+
+                if plot != &#39;BME&#39; and plot != &#39;KLD&#39;:
+                    plt.yscale(&#39;log&#39;)
+                plt.autoscale(True)
+                plt.xlabel(&#39;\\# of training samples&#39;)
+                plt.ylabel(plot_label)
+                plt.title(plot)
+
+                if save_fig:
+                    # save the current figure
+                    pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+                    # Destroy the current plot
+                    plt.clf()
+                    # Save arrays into files
+                    f = open(f&#39;./{newpath}/Seq{plot}.txt&#39;, &#39;w&#39;)
+                    f.write(str(sorted_seq_opt))
+                    f.close()
+            else:
+                for idx, name in enumerate(name_util):
+                    seq_values = seq_dict[name]
+                    if PCEModel.ExpDesign.n_new_samples != 1:
+                        step = PCEModel.ExpDesign.n_new_samples
+                    else:
+                        step = 1
+                    x_idx = np.arange(n_init_samples, n_total_samples+1, step)
+                    if n_total_samples not in x_idx:
+                        x_idx = np.hstack((x_idx, n_total_samples))
+
+                    if plot == &#39;KLD&#39; or plot == &#39;BME&#39;:
+                        # BME convergence if refBME is provided
+                        if ref_BME_KLD is not None:
+                            if plot == &#39;BME&#39;:
+                                refValue = ref_BME_KLD[0]
+                                plot_label = r&#39;$BME/BME^{Ref.}$&#39;
+                            if plot == &#39;KLD&#39;:
+                                refValue = ref_BME_KLD[1]
+                                plot_label = &#39;$D_{KL}[p(\theta|y_*),p(\theta)]&#39;\
+                                    &#39; / D_{KL}^{Ref.}[p(\theta|y_*), &#39;\
+                                    &#39;p(\theta)]$&#39;
+
+                            # Difference between BME/KLD and the ref. values
+                            values = np.divide(seq_values,
+                                               np.full((seq_values.shape),
+                                                       refValue))
+
+                            # Plot baseline for zero, i.e. no difference
+                            plt.axhline(y=1.0, xmin=0, xmax=1, c=&#39;green&#39;,
+                                        ls=&#39;--&#39;, lw=2)
+
+                            # Set the limits
+                            plt.ylim([1e-1, 1e1])
+
+                            # Create the plots
+                            plt.semilogy(x_idx, values, marker=markers[idx],
+                                         color=colors[idx], ls=&#39;--&#39;, lw=2,
+                                         label=name.split(&#34;_rep&#34;, 1)[0])
+                        else:
+                            plot_label = plot
+
+                            # Create the plots
+                            plt.plot(x_idx, seq_values, marker=markers[idx],
+                                     color=colors[idx], ls=&#39;--&#39;, lw=2,
+                                     label=name.split(&#34;_rep&#34;, 1)[0])
+
+                    else:
+                        plot_label = plot
+                        seq_values = np.nan_to_num(seq_values)
+
+                        # Plot the error evolution for each output
+                        for i in range(seq_values.shape[1]):
+                            plt.semilogy(x_idx, seq_values[:, i], ls=&#39;--&#39;,
+                                         lw=2, marker=markers[idx],
+                                         color=colors[idx], alpha=0.15)
+
+                        plt.semilogy(x_idx, seq_values, marker=markers[idx],
+                                     ls=&#39;--&#39;, lw=2, color=colors[idx],
+                                     label=name.split(&#34;_rep&#34;, 1)[0])
+
+                # Set the major and minor locators
+                ax.xaxis.set_major_locator(ticker.AutoLocator())
+                ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
+                ax.xaxis.grid(True, which=&#39;major&#39;, linestyle=&#39;-&#39;)
+                ax.xaxis.grid(True, which=&#39;minor&#39;, linestyle=&#39;--&#39;)
+
+                ax.tick_params(axis=&#39;both&#39;, which=&#39;major&#39;, direction=&#39;in&#39;,
+                               width=3, length=10)
+                ax.tick_params(axis=&#39;both&#39;, which=&#39;minor&#39;, direction=&#39;in&#39;,
+                               width=2, length=8)
+                plt.xlabel(&#39;Number of runs&#39;)
+                plt.ylabel(plot_label)
+                plt.title(plot)
+                plt.legend(frameon=True)
+
+                if save_fig:
+                    # save the current figure
+                    pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+                    # Destroy the current plot
+                    plt.clf()
+
+                    # ---------------- Saving arrays into files ---------------
+                    np.save(f&#39;./{newpath}/Seq{plot}.npy&#39;, seq_values)
+
+        # Close the pdf
+        pdf.close()
+        return
+
+    # -------------------------------------------------------------------------
+    def sobol_indices(self, xlabel=&#39;Time [s]&#39;, plot_type=None, save_fig=True):
+        &#34;&#34;&#34;
+        Provides Sobol indices as a sensitivity measure to infer the importance
+        of the input parameters. See Eq. 27 in [1] for more details. For the
+        case with Principal component analysis refer to [2].
+
+        [1] Global sensitivity analysis: A flexible and efficient framework
+        with an example from stochastic hydrogeology S. Oladyshkin, F.P.
+        de Barros, W. Nowak  https://doi.org/10.1016/j.advwatres.2011.11.001
+
+        [2] Nagel, J.B., Rieckermann, J. and Sudret, B., 2020. Principal
+        component analysis and sparse polynomial chaos expansions for global
+        sensitivity analysis and model calibration: Application to urban
+        drainage simulation. Reliability Engineering &amp; System Safety, 195,
+        p.106737.
+
+        Parameters
+        ----------
+        xlabel : str, optional
+            Label of the x-axis. The default is `&#39;Time [s]&#39;`.
+        plot_type : str, optional
+            Plot type. The default is `None`. This corresponds to line plot.
+            Bar chart can be selected by `bar`.
+        save_fig : bool, optional
+            Whether to save the figures. The default is `True`.
+
+        Returns
+        -------
+        sobol_cell: dict
+            Sobol indices.
+        total_sobol: dict
+            Total Sobol indices.
+
+        &#34;&#34;&#34;
+        # Extract the necessary variables
+        PCEModel = self.MetaModel
+        basis_dict = PCEModel.basis_dict
+        coeffs_dict = PCEModel.coeffs_dict
+        n_params = PCEModel.n_params
+        max_order = np.max(PCEModel.pce_deg)
+        self.sobol_cell = {}
+        self.total_sobol = {}
+
+        for Output in PCEModel.ModelObj.Output.names:
+
+            n_meas_points = len(coeffs_dict[Output])
+
+            # Initialize the (cell) array containing the (total) Sobol indices.
+            sobol_array = dict.fromkeys(range(1, max_order+1), [])
+            sobol_cell_array = dict.fromkeys(range(1, max_order+1), [])
+
+            for i_order in range(1, max_order+1):
+                n_comb = math.comb(n_params, i_order)
+
+                sobol_cell_array[i_order] = np.zeros((n_comb, n_meas_points))
+
+            total_sobol_array = np.zeros((n_params, n_meas_points))
+
+            # Initialize the cell to store the names of the variables
+            TotalVariance = np.zeros((n_meas_points))
+
+            # Loop over all measurement points and calculate sobol indices
+            for pIdx in range(n_meas_points):
+
+                # Extract the basis indices (alpha) and coefficients
+                Basis = basis_dict[Output][f&#39;y_{pIdx+1}&#39;]
+
+                try:
+                    clf_poly = PCEModel.clf_poly[Output][f&#39;y_{pIdx+1}&#39;]
+                    PCECoeffs = clf_poly.coef_
+                except:
+                    PCECoeffs = coeffs_dict[Output][f&#39;y_{pIdx+1}&#39;]
+
+                # Compute total variance
+                TotalVariance[pIdx] = np.sum(np.square(PCECoeffs[1:]))
+
+                nzidx = np.where(PCECoeffs != 0)[0]
+                # Set all the Sobol indices equal to zero in the presence of a
+                # null output.
+                if len(nzidx) == 0:
+                    # This is buggy.
+                    for i_order in range(1, max_order+1):
+                        sobol_cell_array[i_order][:, pIdx] = 0
+
+                # Otherwise compute them by summing well-chosen coefficients
+                else:
+                    nz_basis = Basis[nzidx]
+                    for i_order in range(1, max_order+1):
+                        idx = np.where(np.sum(nz_basis &gt; 0, axis=1) == i_order)
+                        subbasis = nz_basis[idx]
+                        Z = np.array(list(combinations(range(n_params), i_order)))
+
+                        for q in range(Z.shape[0]):
+                            Zq = Z[q]
+                            subsubbasis = subbasis[:, Zq]
+                            subidx = np.prod(subsubbasis, axis=1) &gt; 0
+                            sum_ind = nzidx[idx[0][subidx]]
+                            if TotalVariance[pIdx] == 0.0:
+                                sobol_cell_array[i_order][q, pIdx] = 0.0
+                            else:
+                                sobol = np.sum(np.square(PCECoeffs[sum_ind]))
+                                sobol /= TotalVariance[pIdx]
+                                sobol_cell_array[i_order][q, pIdx] = sobol
+
+                    # Compute the TOTAL Sobol indices.
+                    for ParIdx in range(n_params):
+                        idx = nz_basis[:, ParIdx] &gt; 0
+                        sum_ind = nzidx[idx]
+
+                        if TotalVariance[pIdx] == 0.0:
+                            total_sobol_array[ParIdx, pIdx] = 0.0
+                        else:
+                            sobol = np.sum(np.square(PCECoeffs[sum_ind]))
+                            sobol /= TotalVariance[pIdx]
+                            total_sobol_array[ParIdx, pIdx] = sobol
+
+                # ----- if PCA selected: Compute covariance -----
+                if PCEModel.dim_red_method.lower() == &#39;pca&#39;:
+                    cov_Z_p_q = np.zeros((n_params))
+                    # Extract the basis indices (alpha) and coefficients for 
+                    # next component
+                    if pIdx &lt; n_meas_points-1:
+                        nextBasis = basis_dict[Output][f&#39;y_{pIdx+2}&#39;]
+
+                        try:
+                            clf_poly = PCEModel.clf_poly[Output][f&#39;y_{pIdx+2}&#39;]
+                            nextPCECoeffs = clf_poly.coef_
+                        except:
+                            nextPCECoeffs = coeffs_dict[Output][f&#39;y_{pIdx+2}&#39;]
+
+                        # Choose the common non-zero basis
+                        mask = (Basis[:, None] == nextBasis).all(-1).any(-1)
+                        similar_basis = Basis[mask]
+                        # Compute the TOTAL Sobol indices.
+                        for ParIdx in range(n_params):
+                            idx = similar_basis[:, ParIdx] &gt; 0
+                            try:
+                                sum_is = nzidx[idx]
+                                cov_Z_p_q[ParIdx] = np.sum(PCECoeffs[sum_ind] *
+                                                           nextPCECoeffs[sum_is])
+                            except:
+                                cov_Z_p_q[ParIdx] = 0.0
+
+            # Compute the sobol indices according to Ref. 2
+            if PCEModel.dim_red_method.lower() == &#39;pca&#39;:
+                n_c_points = PCEModel.ExpDesign.Y[Output].shape[1]
+                PCA = PCEModel.pca[Output]
+                compPCA = PCA.components_
+                nComp = compPCA.shape[0]
+                var_Z_p = PCA.explained_variance_
+
+                # Extract the sobol index of the components
+                for i_order in range(1, max_order+1):
+                    n_comb = math.comb(n_params, i_order)
+                    sobol_array[i_order] = np.zeros((n_comb, n_c_points))
+                    Z = np.array(list(combinations(range(n_params), i_order)))
+
+                    for q in range(Z.shape[0]):
+                        S_Z_i = sobol_cell_array[i_order][q]
+
+                        for tIdx in range(n_c_points):
+                            var_Y_t = np.var(PCEModel.ExpDesign.Y[Output][:, tIdx])
+                            if var_Y_t == 0.0:
+                                term1, term2 = 0.0, 0.0
+                            else:
+                                term1 = np.sum([S_Z_i[i]*(var_Z_p[i]*(compPCA[i, tIdx]**2)/var_Y_t) for i in range(nComp)])
+
+                                # Term 2
+                                # cov_Z_p_q = np.ones((nComp))# TODO: from coeffs
+                                Phi_t_p = compPCA[:nComp-1]
+                                Phi_t_q = compPCA
+                                term2 = 2 * np.sum([cov_Z_p_q[ParIdx] * Phi_t_p[i,tIdx] * Phi_t_q[i,tIdx]/var_Y_t for i in range(nComp-1)])
+
+                            sobol_array[i_order][q, tIdx] = term1 #+ term2
+
+                # Compute the TOTAL Sobol indices.
+                total_sobol = np.zeros((n_params, n_c_points))
+                for ParIdx in range(n_params):
+                    S_Z_i = total_sobol_array[ParIdx]
+
+                    for tIdx in range(n_c_points):
+                        var_Y_t = np.var(PCEModel.ExpDesign.Y[Output][:, tIdx])
+                        if var_Y_t == 0.0:
+                            term1, term2 = 0.0, 0.0
+                        else:
+                            term1 = 0
+                            for i in range(nComp):
+                                term1 += S_Z_i[i] * var_Z_p[i] * \
+                                    (compPCA[i, tIdx]**2) / var_Y_t
+
+                            # Term 2
+                            # cov_Z_p_q = np.ones((nComp))# TODO: from coeffs
+                            Phi_t_p = compPCA[:nComp-1]
+                            Phi_t_q = compPCA
+                            term2 = 0
+                            for i in range(nComp-1):
+                                term2 += cov_Z_p_q[ParIdx] * Phi_t_p[i, tIdx] \
+                                    * Phi_t_q[i, tIdx] / var_Y_t
+                            term2 *= 2
+
+                        total_sobol[ParIdx, tIdx] = term1 + term2
+
+                self.sobol_cell[Output] = sobol_array
+                self.total_sobol[Output] = total_sobol
+            else:
+                self.sobol_cell[Output] = sobol_cell_array
+                self.total_sobol[Output] = total_sobol_array
+
+        # ---------------- Plot -----------------------
+        par_names = PCEModel.ExpDesign.par_names
+        x_values_orig = PCEModel.ExpDesign.x_values
+
+        cases = [&#39;&#39;]
+
+        for case in cases:
+            newpath = (f&#39;Outputs_PostProcessing_{self.name}/&#39;)
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            if save_fig:
+                # create a PdfPages object
+                name = case+&#39;_&#39; if &#39;Valid&#39; in cases else &#39;&#39;
+                pdf = PdfPages(&#39;./&#39;+newpath+name+&#39;Sobol_indices.pdf&#39;)
+
+            fig = plt.figure()
+
+            for outIdx, Output in enumerate(PCEModel.ModelObj.Output.names):
+
+                # Extract total Sobol indices
+                total_sobol = self.total_sobol[Output]
+
+                # Extract a list of x values
+                if type(x_values_orig) is dict:
+                    x = x_values_orig[Output]
+                else:
+                    x = x_values_orig
+
+                if plot_type == &#39;bar&#39;:
+                    ax = fig.add_axes([0, 0, 1, 1])
+                    dict1 = {xlabel: x}
+                    dict2 = {param: sobolIndices for param, sobolIndices
+                             in zip(par_names, total_sobol)}
+
+                    df = pd.DataFrame({**dict1, **dict2})
+                    df.plot(x=xlabel, y=par_names, kind=&#34;bar&#34;, ax=ax, rot=0,
+                            colormap=&#39;Dark2&#39;)
+                    ax.set_ylabel(&#39;Total Sobol indices, $S^T$&#39;)
+
+                else:
+                    for i, sobolIndices in enumerate(total_sobol):
+                        plt.plot(x, sobolIndices, label=par_names[i],
+                                 marker=&#39;x&#39;, lw=2.5)
+
+                    plt.ylabel(&#39;Total Sobol indices, $S^T$&#39;)
+                    plt.xlabel(xlabel)
+
+                plt.title(f&#39;Sensitivity analysis of {Output}&#39;)
+                if plot_type != &#39;bar&#39;:
+                    plt.legend(loc=&#39;best&#39;, frameon=True)
+
+                # Save indices
+                np.savetxt(f&#39;./{newpath}{name}totalsobol_&#39; +
+                           Output.replace(&#39;/&#39;, &#39;_&#39;) + &#39;.csv&#39;,
+                           total_sobol.T, delimiter=&#39;,&#39;,
+                           header=&#39;,&#39;.join(par_names), comments=&#39;&#39;)
+
+                if save_fig:
+                    # save the current figure
+                    pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+
+                    # Destroy the current plot
+                    plt.clf()
+
+            pdf.close()
+
+        return self.sobol_cell, self.total_sobol
+
+    # -------------------------------------------------------------------------
+    def check_reg_quality(self, n_samples=1000, samples=None, save_fig=True):
+        &#34;&#34;&#34;
+        Checks the quality of the metamodel for single output models based on:
+        https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685
+
+
+        Parameters
+        ----------
+        n_samples : int, optional
+            Number of parameter sets to use for the check. The default is 1000.
+        samples : array of shape (n_samples, n_params), optional
+            Parameter sets to use for the check. The default is None.
+        save_fig : bool, optional
+            Whether to save the figures. The default is True.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        MetaModel = self.MetaModel
+
+        if samples is None:
+            self.n_samples = n_samples
+            samples = self._get_sample()
+        else:
+            self.n_samples = samples.shape[0]
+
+        # Evaluate the original and the surrogate model
+        y_val = self._eval_model(samples, key_str=&#39;valid&#39;)
+        y_pce_val, _ = MetaModel.eval_metamodel(samples=samples)
+
+        # Open a pdf for the plots
+        if save_fig:
+            newpath = (r&#39;Outputs_PostProcessing_{0}/&#39;.format(self.name))
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+        # Fit the data(train the model)
+        for key in y_pce_val.keys():
+
+            y_pce_val_ = y_pce_val[key]
+            y_val_ = y_val[key]
+
+            # ------ Residuals vs. predicting variables ------
+            # Check the assumptions of linearity and independence
+            fig1 = plt.figure()
+            plt.title(key+&#34;: Residuals vs. Predicting variables&#34;)
+            residuals = y_val_ - y_pce_val_
+            plt.scatter(x=y_val_, y=residuals, color=&#39;blue&#39;, edgecolor=&#39;k&#39;)
+            plt.grid(True)
+            xmin, xmax = min(y_val_), max(y_val_)
+            plt.hlines(y=0, xmin=xmin*0.9, xmax=xmax*1.1, color=&#39;red&#39;, lw=3,
+                       linestyle=&#39;--&#39;)
+            plt.xlabel(key)
+            plt.ylabel(&#39;Residuals&#39;)
+            plt.show()
+
+            if save_fig:
+                # save the current figure
+                fig1.savefig(f&#39;./{newpath}/Residuals_vs_PredVariables.pdf&#39;,
+                             bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+            # ------ Fitted vs. residuals ------
+            # Check the assumptions of linearity and independence
+            fig2 = plt.figure()
+            plt.title(key+&#34;: Residuals vs. predicting variables&#34;)
+            plt.scatter(x=y_pce_val_, y=residuals, color=&#39;blue&#39;, edgecolor=&#39;k&#39;)
+            plt.grid(True)
+            xmin, xmax = min(y_val_), max(y_val_)
+            plt.hlines(y=0, xmin=xmin*0.9, xmax=xmax*1.1, color=&#39;red&#39;, lw=3,
+                       linestyle=&#39;--&#39;)
+            plt.xlabel(key)
+            plt.ylabel(&#39;Residuals&#39;)
+            plt.show()
+
+            if save_fig:
+                # save the current figure
+                fig2.savefig(f&#39;./{newpath}/Fitted_vs_Residuals.pdf&#39;,
+                             bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+            # ------ Histogram of normalized residuals ------
+            fig3 = plt.figure()
+            resid_pearson = residuals / (max(residuals)-min(residuals))
+            plt.hist(resid_pearson, bins=20, edgecolor=&#39;k&#39;)
+            plt.ylabel(&#39;Count&#39;)
+            plt.xlabel(&#39;Normalized residuals&#39;)
+            plt.title(f&#34;{key}: Histogram of normalized residuals&#34;)
+
+            # Normality (Shapiro-Wilk) test of the residuals
+            ax = plt.gca()
+            _, p = stats.shapiro(residuals)
+            if p &lt; 0.01:
+                annText = &#34;The residuals seem to come from Gaussian process.&#34;
+            else:
+                annText = &#34;The normality assumption may not hold.&#34;
+            at = AnchoredText(annText, prop=dict(size=30), frameon=True,
+                              loc=&#39;upper left&#39;)
+            at.patch.set_boxstyle(&#34;round,pad=0.,rounding_size=0.2&#34;)
+            ax.add_artist(at)
+
+            plt.show()
+
+            if save_fig:
+                # save the current figure
+                fig3.savefig(f&#39;./{newpath}/Hist_NormResiduals.pdf&#39;,
+                             bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+            # ------ Q-Q plot of the normalized residuals ------
+            plt.figure()
+            fig4 = qqplot(resid_pearson, line=&#39;45&#39;, fit=&#39;True&#39;)
+            plt.xticks()
+            plt.yticks()
+            plt.xlabel(&#34;Theoretical quantiles&#34;)
+            plt.ylabel(&#34;Sample quantiles&#34;)
+            plt.title(key+&#34;: Q-Q plot of normalized residuals&#34;)
+            plt.grid(True)
+            plt.show()
+
+            if save_fig:
+                # save the current figure
+                fig4.savefig(f&#39;./{newpath}/QQPlot_NormResiduals.pdf&#39;,
+                             bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+    # -------------------------------------------------------------------------
+    def eval_pce_model_3d(self, save_fig=True):
+
+        self.n_samples = 1000
+
+        PCEModel = self.MetaModel
+        Model = self.MetaModel.ModelObj
+        n_samples = self.n_samples
+
+        # Create 3D-Grid
+        # TODO: Make it general
+        x = np.linspace(-5, 10, n_samples)
+        y = np.linspace(0, 15, n_samples)
+
+        X, Y = np.meshgrid(x, y)
+        PCE_Z = np.zeros((self.n_samples, self.n_samples))
+        Model_Z = np.zeros((self.n_samples, self.n_samples))
+
+        for idxMesh in range(self.n_samples):
+            sample_mesh = np.vstack((X[:, idxMesh], Y[:, idxMesh])).T
+
+            univ_p_val = PCEModel.univ_basis_vals(sample_mesh)
+
+            for Outkey, ValuesDict in PCEModel.coeffs_dict.items():
+
+                pce_out_mean = np.zeros((len(sample_mesh), len(ValuesDict)))
+                pce_out_std = np.zeros((len(sample_mesh), len(ValuesDict)))
+                model_outs = np.zeros((len(sample_mesh), len(ValuesDict)))
+
+                for Inkey, InIdxValues in ValuesDict.items():
+                    idx = int(Inkey.split(&#39;_&#39;)[1]) - 1
+                    basis_deg_ind = PCEModel.basis_dict[Outkey][Inkey]
+                    clf_poly = PCEModel.clf_poly[Outkey][Inkey]
+
+                    PSI_Val = PCEModel.create_psi(basis_deg_ind, univ_p_val)
+
+                    # Perdiction with error bar
+                    y_mean, y_std = clf_poly.predict(PSI_Val, return_std=True)
+
+                    pce_out_mean[:, idx] = y_mean
+                    pce_out_std[:, idx] = y_std
+
+                    # Model evaluation
+                    model_out_dict, _ = Model.run_model_parallel(sample_mesh,
+                                                                 key_str=&#39;Valid3D&#39;)
+                    model_outs[:, idx] = model_out_dict[Outkey].T
+
+                PCE_Z[:, idxMesh] = y_mean
+                Model_Z[:, idxMesh] = model_outs[:, 0]
+
+        # ---------------- 3D plot for PCEModel -----------------------
+        fig_PCE = plt.figure()
+        ax = plt.axes(projection=&#39;3d&#39;)
+        ax.plot_surface(X, Y, PCE_Z, rstride=1, cstride=1,
+                        cmap=&#39;viridis&#39;, edgecolor=&#39;none&#39;)
+        ax.set_title(&#39;PCEModel&#39;)
+        ax.set_xlabel(&#39;$x_1$&#39;)
+        ax.set_ylabel(&#39;$x_2$&#39;)
+        ax.set_zlabel(&#39;$f(x_1,x_2)$&#39;)
+
+        plt.grid()
+        plt.show()
+
+        if save_fig:
+            #  Saving the figure
+            newpath = f&#39;Outputs_PostProcessing_{self.name}/&#39;
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            # save the figure to file
+            fig_PCE.savefig(f&#39;./{newpath}/3DPlot_PCEModel.pdf&#39;, format=&#34;pdf&#34;,
+                            bbox_inches=&#39;tight&#39;)
+            plt.close(fig_PCE)
+
+        # ---------------- 3D plot for Model -----------------------
+        fig_Model = plt.figure()
+        ax = plt.axes(projection=&#39;3d&#39;)
+        ax.plot_surface(X, Y, PCE_Z, rstride=1, cstride=1,
+                        cmap=&#39;viridis&#39;, edgecolor=&#39;none&#39;)
+        ax.set_title(&#39;Model&#39;)
+        ax.set_xlabel(&#39;$x_1$&#39;)
+        ax.set_ylabel(&#39;$x_2$&#39;)
+        ax.set_zlabel(&#39;$f(x_1,x_2)$&#39;)
+
+        plt.grid()
+        plt.show()
+
+        if save_fig:
+            # Save the figure
+            fig_Model.savefig(f&#39;./{newpath}/3DPlot_Model.pdf&#39;, format=&#34;pdf&#34;,
+                              bbox_inches=&#39;tight&#39;)
+            plt.close(fig_Model)
+
+        return
+
+    # -------------------------------------------------------------------------
+    def _compute_pce_moments(self):
+        &#34;&#34;&#34;
+        Computes the first two moments using the PCE-based meta-model.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+
+        MetaModel = self.MetaModel
+        self.pce_means = {}
+        self.pce_stds = {}
+
+        for Outkey, ValuesDict in MetaModel.coeffs_dict.items():
+
+            pce_mean = np.zeros((len(ValuesDict)))
+            pce_var = np.zeros((len(ValuesDict)))
+
+            for Inkey, InIdxValues in ValuesDict.items():
+                idx = int(Inkey.split(&#39;_&#39;)[1]) - 1
+                coeffs = MetaModel.coeffs_dict[Outkey][Inkey]
+
+                # Mean = c_0
+                if coeffs[0] != 0:
+                    pce_mean[idx] = coeffs[0]
+                else:
+                    pce_mean[idx] = MetaModel.clf_poly[Outkey][Inkey].intercept_
+
+                # Var = sum(coeffs[1:]**2)
+                pce_var[idx] = np.sum(np.square(coeffs[1:]))
+
+            # Back transformation if PCA is selected.
+            if MetaModel.dim_red_method.lower() == &#39;pca&#39;:
+                PCA = MetaModel.pca[Outkey]
+                self.pce_means[Outkey] = PCA.mean_
+                self.pce_means[Outkey] += np.dot(pce_mean, PCA.components_)
+                self.pce_stds[Outkey] = np.sqrt(np.dot(pce_var,
+                                                       PCA.components_**2))
+            else:
+                self.pce_means[Outkey] = pce_mean
+                self.pce_stds[Outkey] = np.sqrt(pce_var)
+
+            # Print a report table
+            print(&#34;\n&gt;&gt;&gt;&gt;&gt; Moments of {} &lt;&lt;&lt;&lt;&lt;&#34;.format(Outkey))
+            print(&#34;\nIndex  |  Mean   |  Std. deviation&#34;)
+            print(&#39;-&#39;*35)
+            print(&#39;\n&#39;.join(f&#39;{i+1}  |  {k:.3e}  |  {j:.3e}&#39; for i, (k, j)
+                            in enumerate(zip(self.pce_means[Outkey],
+                                             self.pce_stds[Outkey]))))
+        print(&#39;-&#39;*40)
+
+    # -------------------------------------------------------------------------
+    def _get_sample(self, n_samples=None):
+        &#34;&#34;&#34;
+        Generates random samples taken from the input parameter space.
+
+        Returns
+        -------
+        samples : array of shape (n_samples, n_params)
+            Generated samples.
+
+        &#34;&#34;&#34;
+        if n_samples is None:
+            n_samples = self.n_samples
+        PCEModel = self.MetaModel
+        self.samples = PCEModel.ExpDesign.generate_samples(n_samples, &#39;random&#39;)
+        return self.samples
+
+    # -------------------------------------------------------------------------
+    def _eval_model(self, samples=None, key_str=&#39;Valid&#39;):
+        &#34;&#34;&#34;
+        Evaluates Forward Model for the given number of self.samples or given
+        samples.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params), optional
+            Samples to evaluate the model at. The default is None.
+        key_str : str, optional
+            Key string pass to the model. The default is &#39;Valid&#39;.
+
+        Returns
+        -------
+        model_outs : dict
+            Dictionary of results.
+
+        &#34;&#34;&#34;
+        Model = self.MetaModel.ModelObj
+
+        if samples is None:
+            samples = self._get_sample()
+            self.samples = samples
+        else:
+            self.n_samples = len(samples)
+
+        model_outs, _ = Model.run_model_parallel(samples, key_str=key_str)
+
+        return model_outs
+
+    # -------------------------------------------------------------------------
+    def _plot_validation(self, save_fig=True):
+        &#34;&#34;&#34;
+        Plots outputs for visual comparison of metamodel outputs with that of
+        the (full) original model.
+
+        Parameters
+        ----------
+        save_fig : bool, optional
+            Save the plots. The default is True.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        PCEModel = self.MetaModel
+
+        # get the samples
+        x_val = self.samples
+        y_pce_val = self.pce_out_mean
+        y_val = self.model_out_dict
+
+        # Open a pdf for the plots
+        if save_fig:
+            newpath = f&#39;Outputs_PostProcessing_{self.name}/&#39;
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            # create a PdfPages object
+            pdf1 = PdfPages(f&#39;./{newpath}/Model_vs_PCEModel.pdf&#39;)
+
+        fig = plt.figure()
+        # Fit the data(train the model)
+        for key in y_pce_val.keys():
+
+            y_pce_val_ = y_pce_val[key]
+            y_val_ = y_val[key]
+
+            regression_model = LinearRegression()
+            regression_model.fit(y_pce_val_, y_val_)
+
+            # Predict
+            x_new = np.linspace(np.min(y_pce_val_), np.max(y_val_), 100)
+            y_predicted = regression_model.predict(x_new[:, np.newaxis])
+
+            plt.scatter(y_pce_val_, y_val_, color=&#39;gold&#39;, linewidth=2)
+            plt.plot(x_new, y_predicted, color=&#39;k&#39;)
+
+            # Calculate the adjusted R_squared and RMSE
+            # the total number of explanatory variables in the model
+            # (not including the constant term)
+            length_list = []
+            for key, value in PCEModel.coeffs_dict[key].items():
+                length_list.append(len(value))
+            n_predictors = min(length_list)
+            n_samples = x_val.shape[0]
+
+            R2 = r2_score(y_pce_val_, y_val_)
+            AdjR2 = 1 - (1 - R2) * (n_samples - 1) / \
+                (n_samples - n_predictors - 1)
+            rmse = mean_squared_error(y_pce_val_, y_val_, squared=False)
+
+            plt.annotate(f&#39;RMSE = {rmse:.3f}\n Adjusted $R^2$ = {AdjR2:.3f}&#39;,
+                         xy=(0.05, 0.85), xycoords=&#39;axes fraction&#39;)
+
+            plt.ylabel(&#34;Original Model&#34;)
+            plt.xlabel(&#34;PCE Model&#34;)
+            plt.grid()
+            plt.show()
+
+            if save_fig:
+                # save the current figure
+                pdf1.savefig(fig, bbox_inches=&#39;tight&#39;)
+
+                # Destroy the current plot
+                plt.clf()
+
+        # Close the pdfs
+        pdf1.close()
+
+    # -------------------------------------------------------------------------
+    def _plot_validation_multi(self, x_values=[], x_axis=&#34;x [m]&#34;, save_fig=True):
+        &#34;&#34;&#34;
+        Plots outputs for visual comparison of metamodel outputs with that of
+        the (full) multioutput original model
+
+        Parameters
+        ----------
+        x_values : list or array, optional
+            List of x values. The default is [].
+        x_axis : str, optional
+            Label of the x axis. The default is &#34;x [m]&#34;.
+        save_fig : bool, optional
+            Whether to save the figures. The default is True.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        Model = self.MetaModel.ModelObj
+
+        if save_fig:
+            newpath = f&#39;Outputs_PostProcessing_{self.name}/&#39;
+            if not os.path.exists(newpath):
+                os.makedirs(newpath)
+
+            # create a PdfPages object
+            pdf = PdfPages(f&#39;./{newpath}/Model_vs_PCEModel.pdf&#39;)
+
+        # List of markers and colors
+        color = cycle(([&#39;b&#39;, &#39;g&#39;, &#39;r&#39;, &#39;y&#39;, &#39;k&#39;]))
+        marker = cycle((&#39;x&#39;, &#39;d&#39;, &#39;+&#39;, &#39;o&#39;, &#39;*&#39;))
+
+        fig = plt.figure()
+        # Plot the model vs PCE model
+        for keyIdx, key in enumerate(Model.Output.names):
+
+            y_pce_val = self.pce_out_mean[key]
+            y_pce_val_std = self.pce_out_std[key]
+            y_val = self.model_out_dict[key]
+            try:
+                x = self.model_out_dict[&#39;x_values&#39;][key]
+            except IndexError:
+                x = x_values
+
+            for idx in range(y_val.shape[0]):
+                Color = next(color)
+                Marker = next(marker)
+
+                plt.plot(x, y_val[idx], color=Color, marker=Marker,
+                         label=&#39;$Y_{%s}^M$&#39;%(idx+1))
+
+                plt.plot(x, y_pce_val[idx], color=Color, marker=Marker,
+                         linestyle=&#39;--&#39;,
+                         label=&#39;$Y_{%s}^{PCE}$&#39;%(idx+1))
+                plt.fill_between(x, y_pce_val[idx]-1.96*y_pce_val_std[idx],
+                                 y_pce_val[idx]+1.96*y_pce_val_std[idx],
+                                 color=Color, alpha=0.15)
+
+            # Calculate the RMSE
+            rmse = mean_squared_error(y_pce_val, y_val, squared=False)
+            R2 = r2_score(y_pce_val[idx].reshape(-1, 1),
+                          y_val[idx].reshape(-1, 1))
+
+            plt.annotate(f&#39;RMSE = {rmse:.3f}\n $R^2$ = {R2:.3f}&#39;,
+                         xy=(0.2, 0.75), xycoords=&#39;axes fraction&#39;)
+
+            plt.ylabel(key)
+            plt.xlabel(x_axis)
+            plt.legend(loc=&#39;best&#39;)
+            plt.grid()
+
+            if save_fig:
+                # save the current figure
+                pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+        pdf.close()
+
+        # Zip the subdirectories
+        Model.zip_subdirs(f&#39;{Model.name}valid&#39;, f&#39;{Model.name}valid_&#39;)</code></pre>
+</details>
+<h3>Methods</h3>
+<dl>
+<dt id="post_processing.PostProcessing.plot_moments"><code class="name flex">
+<span>def <span class="ident">plot_moments</span></span>(<span>self, xlabel='Time [s]', plot_type=None, save_fig=True)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Plots the moments in a pdf format in the directory
+<code>Outputs_PostProcessing</code>.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>xlabel</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>String to be displayed as x-label. The default is <code>'Time [s]'</code>.</dd>
+<dt><strong><code>plot_type</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>Options: bar or line. The default is <code>None</code>.</dd>
+<dt><strong><code>save_fig</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Save figure or not. The default is <code>True</code>.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>pce_means</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Mean of the model outputs.</dd>
+<dt><strong><code>pce_means</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Standard deviation of the model outputs.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def plot_moments(self, xlabel=&#39;Time [s]&#39;, plot_type=None, save_fig=True):
+    &#34;&#34;&#34;
+    Plots the moments in a pdf format in the directory
+    `Outputs_PostProcessing`.
+
+    Parameters
+    ----------
+    xlabel : str, optional
+        String to be displayed as x-label. The default is `&#39;Time [s]&#39;`.
+    plot_type : str, optional
+        Options: bar or line. The default is `None`.
+    save_fig : bool, optional
+        Save figure or not. The default is `True`.
+
+    Returns
+    -------
+    pce_means: dict
+        Mean of the model outputs.
+    pce_means: dict
+        Standard deviation of the model outputs.
+
+    &#34;&#34;&#34;
+
+    bar_plot = True if plot_type == &#39;bar&#39; else False
+    meta_model_type = self.MetaModel.meta_model_type
+    Model = self.MetaModel.ModelObj
+
+    # Read Monte-Carlo reference
+    self.mc_reference = Model.read_mc_reference()
+    print(self.mc_reference)
+
+    # Set the x values
+    x_values_orig = self.MetaModel.ExpDesign.x_values
+
+    # Compute the moments with the PCEModel object
+    self._compute_pce_moments()
+
+    # Get the variables
+    out_names = Model.Output.names
+
+    # Open a pdf for the plots
+    if save_fig:
+        newpath = (f&#39;Outputs_PostProcessing_{self.name}/&#39;)
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        # create a PdfPages object
+        pdf = PdfPages(f&#39;./{newpath}Mean_Std_PCE.pdf&#39;)
+
+    # Plot the best fit line, set the linewidth (lw), color and
+    # transparency (alpha) of the line
+    for idx, key in enumerate(out_names):
+        fig, ax = plt.subplots(nrows=1, ncols=2)
+
+        # Extract mean and std
+        mean_data = self.pce_means[key]
+        std_data = self.pce_stds[key]
+
+        # Extract a list of x values
+        if type(x_values_orig) is dict:
+            x = x_values_orig[key]
+        else:
+            x = x_values_orig
+
+        # Plot: bar plot or line plot
+        if bar_plot:
+            ax[0].bar(list(map(str, x)), mean_data, color=&#39;b&#39;,
+                      width=0.25)
+            ax[1].bar(list(map(str, x)), std_data, color=&#39;b&#39;,
+                      width=0.25)
+            ax[0].legend(labels=[meta_model_type])
+            ax[1].legend(labels=[meta_model_type])
+        else:
+            ax[0].plot(x, mean_data, lw=3, color=&#39;k&#39;, marker=&#39;x&#39;,
+                       label=meta_model_type)
+            ax[1].plot(x, std_data, lw=3, color=&#39;k&#39;, marker=&#39;x&#39;,
+                       label=meta_model_type)
+
+        if self.mc_reference is not None:
+            if bar_plot:
+                ax[0].bar(list(map(str, x)), self.mc_reference[&#39;mean&#39;],
+                          color=&#39;r&#39;, width=0.25)
+                ax[1].bar(list(map(str, x)), self.mc_reference[&#39;std&#39;],
+                          color=&#39;r&#39;, width=0.25)
+                ax[0].legend(labels=[meta_model_type])
+                ax[1].legend(labels=[meta_model_type])
+            else:
+                ax[0].plot(x, self.mc_reference[&#39;mean&#39;], lw=3, marker=&#39;x&#39;,
+                           color=&#39;r&#39;, label=&#39;Ref.&#39;)
+                ax[1].plot(x, self.mc_reference[&#39;std&#39;], lw=3, marker=&#39;x&#39;,
+                           color=&#39;r&#39;, label=&#39;Ref.&#39;)
+
+        # Label the axes and provide a title
+        ax[0].set_xlabel(xlabel)
+        ax[1].set_xlabel(xlabel)
+        ax[0].set_ylabel(key)
+        ax[1].set_ylabel(key)
+
+        # Provide a title
+        ax[0].set_title(&#39;Mean of &#39; + key)
+        ax[1].set_title(&#39;Std of &#39; + key)
+
+        if not bar_plot:
+            ax[0].legend(loc=&#39;best&#39;)
+            ax[1].legend(loc=&#39;best&#39;)
+
+        plt.tight_layout()
+
+        if save_fig:
+            # save the current figure
+            pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+
+            # Destroy the current plot
+            plt.clf()
+
+    pdf.close()
+
+    return self.pce_means, self.pce_stds</code></pre>
+</details>
+</dd>
+<dt id="post_processing.PostProcessing.valid_metamodel"><code class="name flex">
+<span>def <span class="ident">valid_metamodel</span></span>(<span>self, n_samples=1, samples=None, x_axis='Time [s]')</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Evaluates and plots the meta model and the PCEModel outputs for the
+given number of samples or the given samples.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>n_samples</code></strong> :&ensp;<code>int</code>, optional</dt>
+<dd>Number of samples to be evaluated. The default is 1.</dd>
+<dt><strong><code>samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code>, optional</dt>
+<dd>Samples to be evaluated. The default is None.</dd>
+<dt><strong><code>x_axis</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>Label of x axis. The default is <code>'Time [s]'</code>.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<p>None.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def valid_metamodel(self, n_samples=1, samples=None, x_axis=&#39;Time [s]&#39;):
+    &#34;&#34;&#34;
+    Evaluates and plots the meta model and the PCEModel outputs for the
+    given number of samples or the given samples.
+
+    Parameters
+    ----------
+    n_samples : int, optional
+        Number of samples to be evaluated. The default is 1.
+    samples : array of shape (n_samples, n_params), optional
+        Samples to be evaluated. The default is None.
+    x_axis : str, optional
+        Label of x axis. The default is `&#39;Time [s]&#39;`.
+
+    Returns
+    -------
+    None.
+
+    &#34;&#34;&#34;
+    MetaModel = self.MetaModel
+    Model = MetaModel.ModelObj
+
+    if samples is None:
+        self.n_samples = n_samples
+        samples = self._get_sample()
+    else:
+        self.n_samples = samples.shape[0]
+
+    # Extract x_values
+    x_values = MetaModel.ExpDesign.x_values
+
+    self.model_out_dict = self._eval_model(samples, key_str=&#39;valid&#39;)
+    self.pce_out_mean, self.pce_out_std = MetaModel.eval_metamodel(samples)
+
+    try:
+        key = Model.Output.names[1]
+    except IndexError:
+        key = Model.Output.names[0]
+
+    n_obs = self.model_out_dict[key].shape[1]
+
+    if n_obs == 1:
+        self._plot_validation()
+    else:
+        self._plot_validation_multi(x_values=x_values, x_axis=x_axis)</code></pre>
+</details>
+</dd>
+<dt id="post_processing.PostProcessing.check_accuracy"><code class="name flex">
+<span>def <span class="ident">check_accuracy</span></span>(<span>self, n_samples=None, samples=None, outputs=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Checks accuracy of the metamodel by computing the root mean square
+error and validation error for all outputs.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>n_samples</code></strong> :&ensp;<code>int</code>, optional</dt>
+<dd>Number of samples. The default is None.</dd>
+<dt><strong><code>samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code>, optional</dt>
+<dd>Parameter sets to be checked. The default is None.</dd>
+<dt><strong><code>outputs</code></strong> :&ensp;<code>dict</code>, optional</dt>
+<dd>Output dictionary with model outputs for all given output types in
+<code>Model.Output.names</code>. The default is None.</dd>
+</dl>
+<h2 id="raises">Raises</h2>
+<dl>
+<dt><code>Exception</code></dt>
+<dd>When neither n_samples nor samples are provided.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>rmse</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Root mean squared error for each output.</dd>
+<dt><strong><code>valid_error</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Validation error for each output.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def check_accuracy(self, n_samples=None, samples=None, outputs=None):
+    &#34;&#34;&#34;
+    Checks accuracy of the metamodel by computing the root mean square
+    error and validation error for all outputs.
+
+    Parameters
+    ----------
+    n_samples : int, optional
+        Number of samples. The default is None.
+    samples : array of shape (n_samples, n_params), optional
+        Parameter sets to be checked. The default is None.
+    outputs : dict, optional
+        Output dictionary with model outputs for all given output types in
+        `Model.Output.names`. The default is None.
+
+    Raises
+    ------
+    Exception
+        When neither n_samples nor samples are provided.
+
+    Returns
+    -------
+    rmse: dict
+        Root mean squared error for each output.
+    valid_error : dict
+        Validation error for each output.
+
+    &#34;&#34;&#34;
+    MetaModel = self.MetaModel
+    Model = MetaModel.ModelObj
+
+    # Set the number of samples
+    if n_samples:
+        self.n_samples = n_samples
+    elif samples is not None:
+        self.n_samples = samples.shape[0]
+    else:
+        raise Exception(&#34;Please provide either samples or pass number of &#34;
+                        &#34;samples!&#34;)
+
+    # Generate random samples if necessary
+    Samples = self._get_sample() if samples is None else samples
+
+    # Run the original model with the generated samples
+    if outputs is None:
+        outputs = self._eval_model(Samples, key_str=&#39;validSet&#39;)
+
+    # Run the PCE model with the generated samples
+    pce_outputs, _ = MetaModel.eval_metamodel(samples=Samples)
+
+    self.rmse = {}
+    self.valid_error = {}
+    # Loop over the keys and compute RMSE error.
+    for key in Model.Output.names:
+        # Root mena square
+        self.rmse[key] = mean_squared_error(outputs[key], pce_outputs[key],
+                                            squared=False,
+                                            multioutput=&#39;raw_values&#39;)
+        # Validation error
+        self.valid_error[key] = (self.rmse[key]**2 / self.n_samples) / \
+            np.var(outputs[key], ddof=1, axis=0)
+
+        # Print a report table
+        print(&#34;\n&gt;&gt;&gt;&gt;&gt; Errors of {} &lt;&lt;&lt;&lt;&lt;&#34;.format(key))
+        print(&#34;\nIndex  |  RMSE   |  Validation Error&#34;)
+        print(&#39;-&#39;*35)
+        print(&#39;\n&#39;.join(f&#39;{i+1}  |  {k:.3e}  |  {j:.3e}&#39; for i, (k, j)
+                        in enumerate(zip(self.rmse[key],
+                                         self.valid_error[key]))))
+    # Save error dicts in PCEModel object
+    self.MetaModel.rmse = self.rmse
+    self.MetaModel.valid_error = self.valid_error
+
+    return self.rmse, self.valid_error</code></pre>
+</details>
+</dd>
+<dt id="post_processing.PostProcessing.plot_seq_design_diagnostics"><code class="name flex">
+<span>def <span class="ident">plot_seq_design_diagnostics</span></span>(<span>self, ref_BME_KLD=None, save_fig=True)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Plots the Bayesian Model Evidence (BME) and Kullback-Leibler divergence
+(KLD) for the sequential design.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>ref_BME_KLD</code></strong> :&ensp;<code>array</code>, optional</dt>
+<dd>Reference BME and KLD . The default is <code>None</code>.</dd>
+<dt><strong><code>save_fig</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Whether to save the figures. The default is <code>True</code>.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<p>None.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def plot_seq_design_diagnostics(self, ref_BME_KLD=None, save_fig=True):
+    &#34;&#34;&#34;
+    Plots the Bayesian Model Evidence (BME) and Kullback-Leibler divergence
+    (KLD) for the sequential design.
+
+    Parameters
+    ----------
+    ref_BME_KLD : array, optional
+        Reference BME and KLD . The default is `None`.
+    save_fig : bool, optional
+        Whether to save the figures. The default is `True`.
+
+    Returns
+    -------
+    None.
+
+    &#34;&#34;&#34;
+    PCEModel = self.MetaModel
+    n_init_samples = PCEModel.ExpDesign.n_init_samples
+    n_total_samples = PCEModel.ExpDesign.X.shape[0]
+
+    if save_fig:
+        newpath = f&#39;Outputs_PostProcessing_{self.name}/&#39;
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        # create a PdfPages object
+        pdf = PdfPages(f&#39;./{newpath}/seqPCEModelDiagnostics.pdf&#39;)
+
+    plotList = [&#39;Modified LOO error&#39;, &#39;Validation error&#39;, &#39;KLD&#39;, &#39;BME&#39;,
+                &#39;RMSEMean&#39;, &#39;RMSEStd&#39;, &#39;Hellinger distance&#39;]
+    seqList = [PCEModel.SeqModifiedLOO, PCEModel.seqValidError,
+               PCEModel.SeqKLD, PCEModel.SeqBME, PCEModel.seqRMSEMean,
+               PCEModel.seqRMSEStd, PCEModel.SeqDistHellinger]
+
+    markers = (&#39;x&#39;, &#39;o&#39;, &#39;d&#39;, &#39;*&#39;, &#39;+&#39;)
+    colors = (&#39;k&#39;, &#39;darkgreen&#39;, &#39;b&#39;, &#39;navy&#39;, &#39;darkred&#39;)
+
+    # Plot the evolution of the diagnostic criteria of the
+    # Sequential Experimental Design.
+    for plotidx, plot in enumerate(plotList):
+        fig, ax = plt.subplots()
+        seq_dict = seqList[plotidx]
+        name_util = list(seq_dict.keys())
+
+        if len(name_util) == 0:
+            continue
+
+        # Box plot when Replications have been detected.
+        if any(int(name.split(&#34;rep_&#34;, 1)[1]) &gt; 1 for name in name_util):
+            # Extract the values from dict
+            sorted_seq_opt = {}
+            # Number of replications
+            n_reps = PCEModel.ExpDesign.nReprications
+
+            # Get the list of utility function names
+            # Handle if only one UtilityFunction is provided
+            if not isinstance(PCEModel.ExpDesign.UtilityFunction, list):
+                util_funcs = [PCEModel.ExpDesign.UtilityFunction]
+            else:
+                util_funcs = PCEModel.ExpDesign.UtilityFunction
+
+            for util in util_funcs:
+                sortedSeq = {}
+                # min number of runs available from reps
+                n_runs = min([seq_dict[f&#39;{util}_rep_{i+1}&#39;].shape[0]
+                             for i in range(n_reps)])
+
+                for runIdx in range(n_runs):
+                    values = []
+                    for key in seq_dict.keys():
+                        if util in key:
+                            values.append(seq_dict[key][runIdx].mean())
+                    sortedSeq[&#39;SeqItr_&#39;+str(runIdx)] = np.array(values)
+                sorted_seq_opt[util] = sortedSeq
+
+            # BoxPlot
+            def draw_plot(data, labels, edge_color, fill_color, idx):
+                pos = labels - (idx-1)
+                bp = plt.boxplot(data, positions=pos, labels=labels,
+                                 patch_artist=True, sym=&#39;&#39;, widths=0.75)
+                elements = [&#39;boxes&#39;, &#39;whiskers&#39;, &#39;fliers&#39;, &#39;means&#39;,
+                            &#39;medians&#39;, &#39;caps&#39;]
+                for element in elements:
+                    plt.setp(bp[element], color=edge_color[idx])
+
+                for patch in bp[&#39;boxes&#39;]:
+                    patch.set(facecolor=fill_color[idx])
+
+            if PCEModel.ExpDesign.n_new_samples != 1:
+                step1 = PCEModel.ExpDesign.n_new_samples
+                step2 = 1
+            else:
+                step1 = 5
+                step2 = 5
+            edge_color = [&#39;red&#39;, &#39;blue&#39;, &#39;green&#39;]
+            fill_color = [&#39;tan&#39;, &#39;cyan&#39;, &#39;lightgreen&#39;]
+            plot_label = plot
+            # Plot for different Utility Functions
+            for idx, util in enumerate(util_funcs):
+                all_errors = np.empty((n_reps, 0))
+
+                for key in list(sorted_seq_opt[util].keys()):
+                    errors = sorted_seq_opt.get(util, {}).get(key)[:, None]
+                    all_errors = np.hstack((all_errors, errors))
+
+                # Special cases for BME and KLD
+                if plot == &#39;KLD&#39; or plot == &#39;BME&#39;:
+                    # BME convergence if refBME is provided
+                    if ref_BME_KLD is not None:
+                        if plot == &#39;BME&#39;:
+                            refValue = ref_BME_KLD[0]
+                            plot_label = r&#39;$BME/BME^{Ref.}$&#39;
+                        if plot == &#39;KLD&#39;:
+                            refValue = ref_BME_KLD[1]
+                            plot_label = &#39;$D_{KL}[p(\theta|y_*),p(\theta)]&#39;\
+                                &#39; / D_{KL}^{Ref.}[p(\theta|y_*), &#39;\
+                                &#39;p(\theta)]$&#39;
+
+                        # Difference between BME/KLD and the ref. values
+                        all_errors = np.divide(all_errors,
+                                               np.full((all_errors.shape),
+                                                       refValue))
+
+                        # Plot baseline for zero, i.e. no difference
+                        plt.axhline(y=1.0, xmin=0, xmax=1, c=&#39;green&#39;,
+                                    ls=&#39;--&#39;, lw=2)
+
+                # Plot each UtilFuncs
+                labels = np.arange(n_init_samples, n_total_samples+1, step1)
+                draw_plot(all_errors[:, ::step2], labels, edge_color,
+                          fill_color, idx)
+
+            plt.xticks(labels, labels)
+            # Set the major and minor locators
+            ax.xaxis.set_major_locator(ticker.AutoLocator())
+            ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
+            ax.xaxis.grid(True, which=&#39;major&#39;, linestyle=&#39;-&#39;)
+            ax.xaxis.grid(True, which=&#39;minor&#39;, linestyle=&#39;--&#39;)
+
+            # Legend
+            legend_elements = []
+            for idx, util in enumerate(util_funcs):
+                legend_elements.append(Patch(facecolor=fill_color[idx],
+                                             edgecolor=edge_color[idx],
+                                             label=util))
+            plt.legend(handles=legend_elements[::-1], loc=&#39;best&#39;)
+
+            if plot != &#39;BME&#39; and plot != &#39;KLD&#39;:
+                plt.yscale(&#39;log&#39;)
+            plt.autoscale(True)
+            plt.xlabel(&#39;\\# of training samples&#39;)
+            plt.ylabel(plot_label)
+            plt.title(plot)
+
+            if save_fig:
+                # save the current figure
+                pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+                # Save arrays into files
+                f = open(f&#39;./{newpath}/Seq{plot}.txt&#39;, &#39;w&#39;)
+                f.write(str(sorted_seq_opt))
+                f.close()
+        else:
+            for idx, name in enumerate(name_util):
+                seq_values = seq_dict[name]
+                if PCEModel.ExpDesign.n_new_samples != 1:
+                    step = PCEModel.ExpDesign.n_new_samples
+                else:
+                    step = 1
+                x_idx = np.arange(n_init_samples, n_total_samples+1, step)
+                if n_total_samples not in x_idx:
+                    x_idx = np.hstack((x_idx, n_total_samples))
+
+                if plot == &#39;KLD&#39; or plot == &#39;BME&#39;:
+                    # BME convergence if refBME is provided
+                    if ref_BME_KLD is not None:
+                        if plot == &#39;BME&#39;:
+                            refValue = ref_BME_KLD[0]
+                            plot_label = r&#39;$BME/BME^{Ref.}$&#39;
+                        if plot == &#39;KLD&#39;:
+                            refValue = ref_BME_KLD[1]
+                            plot_label = &#39;$D_{KL}[p(\theta|y_*),p(\theta)]&#39;\
+                                &#39; / D_{KL}^{Ref.}[p(\theta|y_*), &#39;\
+                                &#39;p(\theta)]$&#39;
+
+                        # Difference between BME/KLD and the ref. values
+                        values = np.divide(seq_values,
+                                           np.full((seq_values.shape),
+                                                   refValue))
+
+                        # Plot baseline for zero, i.e. no difference
+                        plt.axhline(y=1.0, xmin=0, xmax=1, c=&#39;green&#39;,
+                                    ls=&#39;--&#39;, lw=2)
+
+                        # Set the limits
+                        plt.ylim([1e-1, 1e1])
+
+                        # Create the plots
+                        plt.semilogy(x_idx, values, marker=markers[idx],
+                                     color=colors[idx], ls=&#39;--&#39;, lw=2,
+                                     label=name.split(&#34;_rep&#34;, 1)[0])
+                    else:
+                        plot_label = plot
+
+                        # Create the plots
+                        plt.plot(x_idx, seq_values, marker=markers[idx],
+                                 color=colors[idx], ls=&#39;--&#39;, lw=2,
+                                 label=name.split(&#34;_rep&#34;, 1)[0])
+
+                else:
+                    plot_label = plot
+                    seq_values = np.nan_to_num(seq_values)
+
+                    # Plot the error evolution for each output
+                    for i in range(seq_values.shape[1]):
+                        plt.semilogy(x_idx, seq_values[:, i], ls=&#39;--&#39;,
+                                     lw=2, marker=markers[idx],
+                                     color=colors[idx], alpha=0.15)
+
+                    plt.semilogy(x_idx, seq_values, marker=markers[idx],
+                                 ls=&#39;--&#39;, lw=2, color=colors[idx],
+                                 label=name.split(&#34;_rep&#34;, 1)[0])
+
+            # Set the major and minor locators
+            ax.xaxis.set_major_locator(ticker.AutoLocator())
+            ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
+            ax.xaxis.grid(True, which=&#39;major&#39;, linestyle=&#39;-&#39;)
+            ax.xaxis.grid(True, which=&#39;minor&#39;, linestyle=&#39;--&#39;)
+
+            ax.tick_params(axis=&#39;both&#39;, which=&#39;major&#39;, direction=&#39;in&#39;,
+                           width=3, length=10)
+            ax.tick_params(axis=&#39;both&#39;, which=&#39;minor&#39;, direction=&#39;in&#39;,
+                           width=2, length=8)
+            plt.xlabel(&#39;Number of runs&#39;)
+            plt.ylabel(plot_label)
+            plt.title(plot)
+            plt.legend(frameon=True)
+
+            if save_fig:
+                # save the current figure
+                pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+                # Destroy the current plot
+                plt.clf()
+
+                # ---------------- Saving arrays into files ---------------
+                np.save(f&#39;./{newpath}/Seq{plot}.npy&#39;, seq_values)
+
+    # Close the pdf
+    pdf.close()
+    return</code></pre>
+</details>
+</dd>
+<dt id="post_processing.PostProcessing.sobol_indices"><code class="name flex">
+<span>def <span class="ident">sobol_indices</span></span>(<span>self, xlabel='Time [s]', plot_type=None, save_fig=True)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Provides Sobol indices as a sensitivity measure to infer the importance
+of the input parameters. See Eq. 27 in [1] for more details. For the
+case with Principal component analysis refer to [2].</p>
+<p>[1] Global sensitivity analysis: A flexible and efficient framework
+with an example from stochastic hydrogeology S. Oladyshkin, F.P.
+de Barros, W. Nowak
+<a href="https://doi.org/10.1016/j.advwatres.2011.11.001">https://doi.org/10.1016/j.advwatres.2011.11.001</a></p>
+<p>[2] Nagel, J.B., Rieckermann, J. and Sudret, B., 2020. Principal
+component analysis and sparse polynomial chaos expansions for global
+sensitivity analysis and model calibration: Application to urban
+drainage simulation. Reliability Engineering &amp; System Safety, 195,
+p.106737.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>xlabel</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>Label of the x-axis. The default is <code>'Time [s]'</code>.</dd>
+<dt><strong><code>plot_type</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>Plot type. The default is <code>None</code>. This corresponds to line plot.
+Bar chart can be selected by <code>bar</code>.</dd>
+<dt><strong><code>save_fig</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Whether to save the figures. The default is <code>True</code>.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>sobol_cell</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Sobol indices.</dd>
+<dt><strong><code>total_sobol</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Total Sobol indices.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def sobol_indices(self, xlabel=&#39;Time [s]&#39;, plot_type=None, save_fig=True):
+    &#34;&#34;&#34;
+    Provides Sobol indices as a sensitivity measure to infer the importance
+    of the input parameters. See Eq. 27 in [1] for more details. For the
+    case with Principal component analysis refer to [2].
+
+    [1] Global sensitivity analysis: A flexible and efficient framework
+    with an example from stochastic hydrogeology S. Oladyshkin, F.P.
+    de Barros, W. Nowak  https://doi.org/10.1016/j.advwatres.2011.11.001
+
+    [2] Nagel, J.B., Rieckermann, J. and Sudret, B., 2020. Principal
+    component analysis and sparse polynomial chaos expansions for global
+    sensitivity analysis and model calibration: Application to urban
+    drainage simulation. Reliability Engineering &amp; System Safety, 195,
+    p.106737.
+
+    Parameters
+    ----------
+    xlabel : str, optional
+        Label of the x-axis. The default is `&#39;Time [s]&#39;`.
+    plot_type : str, optional
+        Plot type. The default is `None`. This corresponds to line plot.
+        Bar chart can be selected by `bar`.
+    save_fig : bool, optional
+        Whether to save the figures. The default is `True`.
+
+    Returns
+    -------
+    sobol_cell: dict
+        Sobol indices.
+    total_sobol: dict
+        Total Sobol indices.
+
+    &#34;&#34;&#34;
+    # Extract the necessary variables
+    PCEModel = self.MetaModel
+    basis_dict = PCEModel.basis_dict
+    coeffs_dict = PCEModel.coeffs_dict
+    n_params = PCEModel.n_params
+    max_order = np.max(PCEModel.pce_deg)
+    self.sobol_cell = {}
+    self.total_sobol = {}
+
+    for Output in PCEModel.ModelObj.Output.names:
+
+        n_meas_points = len(coeffs_dict[Output])
+
+        # Initialize the (cell) array containing the (total) Sobol indices.
+        sobol_array = dict.fromkeys(range(1, max_order+1), [])
+        sobol_cell_array = dict.fromkeys(range(1, max_order+1), [])
+
+        for i_order in range(1, max_order+1):
+            n_comb = math.comb(n_params, i_order)
+
+            sobol_cell_array[i_order] = np.zeros((n_comb, n_meas_points))
+
+        total_sobol_array = np.zeros((n_params, n_meas_points))
+
+        # Initialize the cell to store the names of the variables
+        TotalVariance = np.zeros((n_meas_points))
+
+        # Loop over all measurement points and calculate sobol indices
+        for pIdx in range(n_meas_points):
+
+            # Extract the basis indices (alpha) and coefficients
+            Basis = basis_dict[Output][f&#39;y_{pIdx+1}&#39;]
+
+            try:
+                clf_poly = PCEModel.clf_poly[Output][f&#39;y_{pIdx+1}&#39;]
+                PCECoeffs = clf_poly.coef_
+            except:
+                PCECoeffs = coeffs_dict[Output][f&#39;y_{pIdx+1}&#39;]
+
+            # Compute total variance
+            TotalVariance[pIdx] = np.sum(np.square(PCECoeffs[1:]))
+
+            nzidx = np.where(PCECoeffs != 0)[0]
+            # Set all the Sobol indices equal to zero in the presence of a
+            # null output.
+            if len(nzidx) == 0:
+                # This is buggy.
+                for i_order in range(1, max_order+1):
+                    sobol_cell_array[i_order][:, pIdx] = 0
+
+            # Otherwise compute them by summing well-chosen coefficients
+            else:
+                nz_basis = Basis[nzidx]
+                for i_order in range(1, max_order+1):
+                    idx = np.where(np.sum(nz_basis &gt; 0, axis=1) == i_order)
+                    subbasis = nz_basis[idx]
+                    Z = np.array(list(combinations(range(n_params), i_order)))
+
+                    for q in range(Z.shape[0]):
+                        Zq = Z[q]
+                        subsubbasis = subbasis[:, Zq]
+                        subidx = np.prod(subsubbasis, axis=1) &gt; 0
+                        sum_ind = nzidx[idx[0][subidx]]
+                        if TotalVariance[pIdx] == 0.0:
+                            sobol_cell_array[i_order][q, pIdx] = 0.0
+                        else:
+                            sobol = np.sum(np.square(PCECoeffs[sum_ind]))
+                            sobol /= TotalVariance[pIdx]
+                            sobol_cell_array[i_order][q, pIdx] = sobol
+
+                # Compute the TOTAL Sobol indices.
+                for ParIdx in range(n_params):
+                    idx = nz_basis[:, ParIdx] &gt; 0
+                    sum_ind = nzidx[idx]
+
+                    if TotalVariance[pIdx] == 0.0:
+                        total_sobol_array[ParIdx, pIdx] = 0.0
+                    else:
+                        sobol = np.sum(np.square(PCECoeffs[sum_ind]))
+                        sobol /= TotalVariance[pIdx]
+                        total_sobol_array[ParIdx, pIdx] = sobol
+
+            # ----- if PCA selected: Compute covariance -----
+            if PCEModel.dim_red_method.lower() == &#39;pca&#39;:
+                cov_Z_p_q = np.zeros((n_params))
+                # Extract the basis indices (alpha) and coefficients for 
+                # next component
+                if pIdx &lt; n_meas_points-1:
+                    nextBasis = basis_dict[Output][f&#39;y_{pIdx+2}&#39;]
+
+                    try:
+                        clf_poly = PCEModel.clf_poly[Output][f&#39;y_{pIdx+2}&#39;]
+                        nextPCECoeffs = clf_poly.coef_
+                    except:
+                        nextPCECoeffs = coeffs_dict[Output][f&#39;y_{pIdx+2}&#39;]
+
+                    # Choose the common non-zero basis
+                    mask = (Basis[:, None] == nextBasis).all(-1).any(-1)
+                    similar_basis = Basis[mask]
+                    # Compute the TOTAL Sobol indices.
+                    for ParIdx in range(n_params):
+                        idx = similar_basis[:, ParIdx] &gt; 0
+                        try:
+                            sum_is = nzidx[idx]
+                            cov_Z_p_q[ParIdx] = np.sum(PCECoeffs[sum_ind] *
+                                                       nextPCECoeffs[sum_is])
+                        except:
+                            cov_Z_p_q[ParIdx] = 0.0
+
+        # Compute the sobol indices according to Ref. 2
+        if PCEModel.dim_red_method.lower() == &#39;pca&#39;:
+            n_c_points = PCEModel.ExpDesign.Y[Output].shape[1]
+            PCA = PCEModel.pca[Output]
+            compPCA = PCA.components_
+            nComp = compPCA.shape[0]
+            var_Z_p = PCA.explained_variance_
+
+            # Extract the sobol index of the components
+            for i_order in range(1, max_order+1):
+                n_comb = math.comb(n_params, i_order)
+                sobol_array[i_order] = np.zeros((n_comb, n_c_points))
+                Z = np.array(list(combinations(range(n_params), i_order)))
+
+                for q in range(Z.shape[0]):
+                    S_Z_i = sobol_cell_array[i_order][q]
+
+                    for tIdx in range(n_c_points):
+                        var_Y_t = np.var(PCEModel.ExpDesign.Y[Output][:, tIdx])
+                        if var_Y_t == 0.0:
+                            term1, term2 = 0.0, 0.0
+                        else:
+                            term1 = np.sum([S_Z_i[i]*(var_Z_p[i]*(compPCA[i, tIdx]**2)/var_Y_t) for i in range(nComp)])
+
+                            # Term 2
+                            # cov_Z_p_q = np.ones((nComp))# TODO: from coeffs
+                            Phi_t_p = compPCA[:nComp-1]
+                            Phi_t_q = compPCA
+                            term2 = 2 * np.sum([cov_Z_p_q[ParIdx] * Phi_t_p[i,tIdx] * Phi_t_q[i,tIdx]/var_Y_t for i in range(nComp-1)])
+
+                        sobol_array[i_order][q, tIdx] = term1 #+ term2
+
+            # Compute the TOTAL Sobol indices.
+            total_sobol = np.zeros((n_params, n_c_points))
+            for ParIdx in range(n_params):
+                S_Z_i = total_sobol_array[ParIdx]
+
+                for tIdx in range(n_c_points):
+                    var_Y_t = np.var(PCEModel.ExpDesign.Y[Output][:, tIdx])
+                    if var_Y_t == 0.0:
+                        term1, term2 = 0.0, 0.0
+                    else:
+                        term1 = 0
+                        for i in range(nComp):
+                            term1 += S_Z_i[i] * var_Z_p[i] * \
+                                (compPCA[i, tIdx]**2) / var_Y_t
+
+                        # Term 2
+                        # cov_Z_p_q = np.ones((nComp))# TODO: from coeffs
+                        Phi_t_p = compPCA[:nComp-1]
+                        Phi_t_q = compPCA
+                        term2 = 0
+                        for i in range(nComp-1):
+                            term2 += cov_Z_p_q[ParIdx] * Phi_t_p[i, tIdx] \
+                                * Phi_t_q[i, tIdx] / var_Y_t
+                        term2 *= 2
+
+                    total_sobol[ParIdx, tIdx] = term1 + term2
+
+            self.sobol_cell[Output] = sobol_array
+            self.total_sobol[Output] = total_sobol
+        else:
+            self.sobol_cell[Output] = sobol_cell_array
+            self.total_sobol[Output] = total_sobol_array
+
+    # ---------------- Plot -----------------------
+    par_names = PCEModel.ExpDesign.par_names
+    x_values_orig = PCEModel.ExpDesign.x_values
+
+    cases = [&#39;&#39;]
+
+    for case in cases:
+        newpath = (f&#39;Outputs_PostProcessing_{self.name}/&#39;)
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        if save_fig:
+            # create a PdfPages object
+            name = case+&#39;_&#39; if &#39;Valid&#39; in cases else &#39;&#39;
+            pdf = PdfPages(&#39;./&#39;+newpath+name+&#39;Sobol_indices.pdf&#39;)
+
+        fig = plt.figure()
+
+        for outIdx, Output in enumerate(PCEModel.ModelObj.Output.names):
+
+            # Extract total Sobol indices
+            total_sobol = self.total_sobol[Output]
+
+            # Extract a list of x values
+            if type(x_values_orig) is dict:
+                x = x_values_orig[Output]
+            else:
+                x = x_values_orig
+
+            if plot_type == &#39;bar&#39;:
+                ax = fig.add_axes([0, 0, 1, 1])
+                dict1 = {xlabel: x}
+                dict2 = {param: sobolIndices for param, sobolIndices
+                         in zip(par_names, total_sobol)}
+
+                df = pd.DataFrame({**dict1, **dict2})
+                df.plot(x=xlabel, y=par_names, kind=&#34;bar&#34;, ax=ax, rot=0,
+                        colormap=&#39;Dark2&#39;)
+                ax.set_ylabel(&#39;Total Sobol indices, $S^T$&#39;)
+
+            else:
+                for i, sobolIndices in enumerate(total_sobol):
+                    plt.plot(x, sobolIndices, label=par_names[i],
+                             marker=&#39;x&#39;, lw=2.5)
+
+                plt.ylabel(&#39;Total Sobol indices, $S^T$&#39;)
+                plt.xlabel(xlabel)
+
+            plt.title(f&#39;Sensitivity analysis of {Output}&#39;)
+            if plot_type != &#39;bar&#39;:
+                plt.legend(loc=&#39;best&#39;, frameon=True)
+
+            # Save indices
+            np.savetxt(f&#39;./{newpath}{name}totalsobol_&#39; +
+                       Output.replace(&#39;/&#39;, &#39;_&#39;) + &#39;.csv&#39;,
+                       total_sobol.T, delimiter=&#39;,&#39;,
+                       header=&#39;,&#39;.join(par_names), comments=&#39;&#39;)
+
+            if save_fig:
+                # save the current figure
+                pdf.savefig(fig, bbox_inches=&#39;tight&#39;)
+
+                # Destroy the current plot
+                plt.clf()
+
+        pdf.close()
+
+    return self.sobol_cell, self.total_sobol</code></pre>
+</details>
+</dd>
+<dt id="post_processing.PostProcessing.check_reg_quality"><code class="name flex">
+<span>def <span class="ident">check_reg_quality</span></span>(<span>self, n_samples=1000, samples=None, save_fig=True)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Checks the quality of the metamodel for single output models based on:
+<a href="https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685">https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685</a></p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>n_samples</code></strong> :&ensp;<code>int</code>, optional</dt>
+<dd>Number of parameter sets to use for the check. The default is 1000.</dd>
+<dt><strong><code>samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code>, optional</dt>
+<dd>Parameter sets to use for the check. The default is None.</dd>
+<dt><strong><code>save_fig</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Whether to save the figures. The default is True.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<p>None.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def check_reg_quality(self, n_samples=1000, samples=None, save_fig=True):
+    &#34;&#34;&#34;
+    Checks the quality of the metamodel for single output models based on:
+    https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685
+
+
+    Parameters
+    ----------
+    n_samples : int, optional
+        Number of parameter sets to use for the check. The default is 1000.
+    samples : array of shape (n_samples, n_params), optional
+        Parameter sets to use for the check. The default is None.
+    save_fig : bool, optional
+        Whether to save the figures. The default is True.
+
+    Returns
+    -------
+    None.
+
+    &#34;&#34;&#34;
+    MetaModel = self.MetaModel
+
+    if samples is None:
+        self.n_samples = n_samples
+        samples = self._get_sample()
+    else:
+        self.n_samples = samples.shape[0]
+
+    # Evaluate the original and the surrogate model
+    y_val = self._eval_model(samples, key_str=&#39;valid&#39;)
+    y_pce_val, _ = MetaModel.eval_metamodel(samples=samples)
+
+    # Open a pdf for the plots
+    if save_fig:
+        newpath = (r&#39;Outputs_PostProcessing_{0}/&#39;.format(self.name))
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+    # Fit the data(train the model)
+    for key in y_pce_val.keys():
+
+        y_pce_val_ = y_pce_val[key]
+        y_val_ = y_val[key]
+
+        # ------ Residuals vs. predicting variables ------
+        # Check the assumptions of linearity and independence
+        fig1 = plt.figure()
+        plt.title(key+&#34;: Residuals vs. Predicting variables&#34;)
+        residuals = y_val_ - y_pce_val_
+        plt.scatter(x=y_val_, y=residuals, color=&#39;blue&#39;, edgecolor=&#39;k&#39;)
+        plt.grid(True)
+        xmin, xmax = min(y_val_), max(y_val_)
+        plt.hlines(y=0, xmin=xmin*0.9, xmax=xmax*1.1, color=&#39;red&#39;, lw=3,
+                   linestyle=&#39;--&#39;)
+        plt.xlabel(key)
+        plt.ylabel(&#39;Residuals&#39;)
+        plt.show()
+
+        if save_fig:
+            # save the current figure
+            fig1.savefig(f&#39;./{newpath}/Residuals_vs_PredVariables.pdf&#39;,
+                         bbox_inches=&#39;tight&#39;)
+            # Destroy the current plot
+            plt.clf()
+
+        # ------ Fitted vs. residuals ------
+        # Check the assumptions of linearity and independence
+        fig2 = plt.figure()
+        plt.title(key+&#34;: Residuals vs. predicting variables&#34;)
+        plt.scatter(x=y_pce_val_, y=residuals, color=&#39;blue&#39;, edgecolor=&#39;k&#39;)
+        plt.grid(True)
+        xmin, xmax = min(y_val_), max(y_val_)
+        plt.hlines(y=0, xmin=xmin*0.9, xmax=xmax*1.1, color=&#39;red&#39;, lw=3,
+                   linestyle=&#39;--&#39;)
+        plt.xlabel(key)
+        plt.ylabel(&#39;Residuals&#39;)
+        plt.show()
+
+        if save_fig:
+            # save the current figure
+            fig2.savefig(f&#39;./{newpath}/Fitted_vs_Residuals.pdf&#39;,
+                         bbox_inches=&#39;tight&#39;)
+            # Destroy the current plot
+            plt.clf()
+
+        # ------ Histogram of normalized residuals ------
+        fig3 = plt.figure()
+        resid_pearson = residuals / (max(residuals)-min(residuals))
+        plt.hist(resid_pearson, bins=20, edgecolor=&#39;k&#39;)
+        plt.ylabel(&#39;Count&#39;)
+        plt.xlabel(&#39;Normalized residuals&#39;)
+        plt.title(f&#34;{key}: Histogram of normalized residuals&#34;)
+
+        # Normality (Shapiro-Wilk) test of the residuals
+        ax = plt.gca()
+        _, p = stats.shapiro(residuals)
+        if p &lt; 0.01:
+            annText = &#34;The residuals seem to come from Gaussian process.&#34;
+        else:
+            annText = &#34;The normality assumption may not hold.&#34;
+        at = AnchoredText(annText, prop=dict(size=30), frameon=True,
+                          loc=&#39;upper left&#39;)
+        at.patch.set_boxstyle(&#34;round,pad=0.,rounding_size=0.2&#34;)
+        ax.add_artist(at)
+
+        plt.show()
+
+        if save_fig:
+            # save the current figure
+            fig3.savefig(f&#39;./{newpath}/Hist_NormResiduals.pdf&#39;,
+                         bbox_inches=&#39;tight&#39;)
+            # Destroy the current plot
+            plt.clf()
+
+        # ------ Q-Q plot of the normalized residuals ------
+        plt.figure()
+        fig4 = qqplot(resid_pearson, line=&#39;45&#39;, fit=&#39;True&#39;)
+        plt.xticks()
+        plt.yticks()
+        plt.xlabel(&#34;Theoretical quantiles&#34;)
+        plt.ylabel(&#34;Sample quantiles&#34;)
+        plt.title(key+&#34;: Q-Q plot of normalized residuals&#34;)
+        plt.grid(True)
+        plt.show()
+
+        if save_fig:
+            # save the current figure
+            fig4.savefig(f&#39;./{newpath}/QQPlot_NormResiduals.pdf&#39;,
+                         bbox_inches=&#39;tight&#39;)
+            # Destroy the current plot
+            plt.clf()</code></pre>
+</details>
+</dd>
+<dt id="post_processing.PostProcessing.eval_pce_model_3d"><code class="name flex">
+<span>def <span class="ident">eval_pce_model_3d</span></span>(<span>self, save_fig=True)</span>
+</code></dt>
+<dd>
+<div class="desc"></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def eval_pce_model_3d(self, save_fig=True):
+
+    self.n_samples = 1000
+
+    PCEModel = self.MetaModel
+    Model = self.MetaModel.ModelObj
+    n_samples = self.n_samples
+
+    # Create 3D-Grid
+    # TODO: Make it general
+    x = np.linspace(-5, 10, n_samples)
+    y = np.linspace(0, 15, n_samples)
+
+    X, Y = np.meshgrid(x, y)
+    PCE_Z = np.zeros((self.n_samples, self.n_samples))
+    Model_Z = np.zeros((self.n_samples, self.n_samples))
+
+    for idxMesh in range(self.n_samples):
+        sample_mesh = np.vstack((X[:, idxMesh], Y[:, idxMesh])).T
+
+        univ_p_val = PCEModel.univ_basis_vals(sample_mesh)
+
+        for Outkey, ValuesDict in PCEModel.coeffs_dict.items():
+
+            pce_out_mean = np.zeros((len(sample_mesh), len(ValuesDict)))
+            pce_out_std = np.zeros((len(sample_mesh), len(ValuesDict)))
+            model_outs = np.zeros((len(sample_mesh), len(ValuesDict)))
+
+            for Inkey, InIdxValues in ValuesDict.items():
+                idx = int(Inkey.split(&#39;_&#39;)[1]) - 1
+                basis_deg_ind = PCEModel.basis_dict[Outkey][Inkey]
+                clf_poly = PCEModel.clf_poly[Outkey][Inkey]
+
+                PSI_Val = PCEModel.create_psi(basis_deg_ind, univ_p_val)
+
+                # Perdiction with error bar
+                y_mean, y_std = clf_poly.predict(PSI_Val, return_std=True)
+
+                pce_out_mean[:, idx] = y_mean
+                pce_out_std[:, idx] = y_std
+
+                # Model evaluation
+                model_out_dict, _ = Model.run_model_parallel(sample_mesh,
+                                                             key_str=&#39;Valid3D&#39;)
+                model_outs[:, idx] = model_out_dict[Outkey].T
+
+            PCE_Z[:, idxMesh] = y_mean
+            Model_Z[:, idxMesh] = model_outs[:, 0]
+
+    # ---------------- 3D plot for PCEModel -----------------------
+    fig_PCE = plt.figure()
+    ax = plt.axes(projection=&#39;3d&#39;)
+    ax.plot_surface(X, Y, PCE_Z, rstride=1, cstride=1,
+                    cmap=&#39;viridis&#39;, edgecolor=&#39;none&#39;)
+    ax.set_title(&#39;PCEModel&#39;)
+    ax.set_xlabel(&#39;$x_1$&#39;)
+    ax.set_ylabel(&#39;$x_2$&#39;)
+    ax.set_zlabel(&#39;$f(x_1,x_2)$&#39;)
+
+    plt.grid()
+    plt.show()
+
+    if save_fig:
+        #  Saving the figure
+        newpath = f&#39;Outputs_PostProcessing_{self.name}/&#39;
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        # save the figure to file
+        fig_PCE.savefig(f&#39;./{newpath}/3DPlot_PCEModel.pdf&#39;, format=&#34;pdf&#34;,
+                        bbox_inches=&#39;tight&#39;)
+        plt.close(fig_PCE)
+
+    # ---------------- 3D plot for Model -----------------------
+    fig_Model = plt.figure()
+    ax = plt.axes(projection=&#39;3d&#39;)
+    ax.plot_surface(X, Y, PCE_Z, rstride=1, cstride=1,
+                    cmap=&#39;viridis&#39;, edgecolor=&#39;none&#39;)
+    ax.set_title(&#39;Model&#39;)
+    ax.set_xlabel(&#39;$x_1$&#39;)
+    ax.set_ylabel(&#39;$x_2$&#39;)
+    ax.set_zlabel(&#39;$f(x_1,x_2)$&#39;)
+
+    plt.grid()
+    plt.show()
+
+    if save_fig:
+        # Save the figure
+        fig_Model.savefig(f&#39;./{newpath}/3DPlot_Model.pdf&#39;, format=&#34;pdf&#34;,
+                          bbox_inches=&#39;tight&#39;)
+        plt.close(fig_Model)
+
+    return</code></pre>
+</details>
+</dd>
+</dl>
+</dd>
+</dl>
+</section>
+</article>
+<nav id="sidebar">
+<h1>Index</h1>
+<div class="toc">
+<ul></ul>
+</div>
+<ul id="index">
+<li><h3><a href="#header-classes">Classes</a></h3>
+<ul>
+<li>
+<h4><code><a title="post_processing.PostProcessing" href="#post_processing.PostProcessing">PostProcessing</a></code></h4>
+<ul class="">
+<li><code><a title="post_processing.PostProcessing.plot_moments" href="#post_processing.PostProcessing.plot_moments">plot_moments</a></code></li>
+<li><code><a title="post_processing.PostProcessing.valid_metamodel" href="#post_processing.PostProcessing.valid_metamodel">valid_metamodel</a></code></li>
+<li><code><a title="post_processing.PostProcessing.check_accuracy" href="#post_processing.PostProcessing.check_accuracy">check_accuracy</a></code></li>
+<li><code><a title="post_processing.PostProcessing.plot_seq_design_diagnostics" href="#post_processing.PostProcessing.plot_seq_design_diagnostics">plot_seq_design_diagnostics</a></code></li>
+<li><code><a title="post_processing.PostProcessing.sobol_indices" href="#post_processing.PostProcessing.sobol_indices">sobol_indices</a></code></li>
+<li><code><a title="post_processing.PostProcessing.check_reg_quality" href="#post_processing.PostProcessing.check_reg_quality">check_reg_quality</a></code></li>
+<li><code><a title="post_processing.PostProcessing.eval_pce_model_3d" href="#post_processing.PostProcessing.eval_pce_model_3d">eval_pce_model_3d</a></code></li>
+</ul>
+</li>
+</ul>
+</li>
+</ul>
+</nav>
+</main>
+<footer id="footer">
+<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.10.0</a>.</p>
+</footer>
+</body>
+</html>
\ No newline at end of file
diff --git a/docs/html/pylink.html b/docs/html/pylink.html
old mode 100644
new mode 100755
index 78bb5bf01ecf808e383142550a4c0eb8a1d2a73f..4131ebbf8e2d5874c48e210f3eb80992c4a3d4dc
--- a/docs/html/pylink.html
+++ b/docs/html/pylink.html
@@ -5,13 +5,14 @@
 <meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
 <meta name="generator" content="pdoc 0.10.0" />
 <title>pylink API documentation</title>
-<meta name="description" content="This program runs the model defined by the user …" />
+<meta name="description" content="" />
 <link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
 <link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
 <link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
 <style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
 <style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
 <style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
+<script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML" integrity="sha256-kZafAc6mZvK3W3v1pHOcUix30OHQN6pU/NO2oFkqZVw=" crossorigin></script>
 <script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
 <script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
 </head>
@@ -22,37 +23,12 @@
 <h1 class="title">Module <code>pylink</code></h1>
 </header>
 <section id="section-intro">
-<p>This program runs the model defined by the user.</p>
-<p>Author: Farid Mohammadi, M.Sc.
-E-Mail: farid.mohammadi@iws.uni-stuttgart.de
-Department of Hydromechanics and Modelling of Hydrosystems (LH2)
-Institute for Modelling Hydraulic and Environmental Systems (IWS), University
-of Stuttgart, www.iws.uni-stuttgart.de/lh2/
-Pfaffenwaldring 61
-70569 Stuttgart</p>
-<p>Created in July 2019</p>
 <details class="source">
 <summary>
 <span>Expand source code</span>
 </summary>
 <pre><code class="python">#!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-
-&#34;&#34;&#34;
-This program runs the model defined by the user.
-
-Author: Farid Mohammadi, M.Sc.
-E-Mail: farid.mohammadi@iws.uni-stuttgart.de
-Department of Hydromechanics and Modelling of Hydrosystems (LH2)
-Institute for Modelling Hydraulic and Environmental Systems (IWS), University
-of Stuttgart, www.iws.uni-stuttgart.de/lh2/
-Pfaffenwaldring 61
-70569 Stuttgart
-
-Created in July 2019
-
-&#34;&#34;&#34;
-
 import os
 import shutil
 import h5py
@@ -60,33 +36,104 @@ import numpy as np
 import time
 import zipfile
 import pandas as pd
-from functools import reduce
 import multiprocessing
 import tqdm
 
 
 class PyLinkForwardModel(object):
-    &#34;&#34;&#34;A forward model binder
+    &#34;&#34;&#34;
+    A forward model binder
+
     This calss serves as a code wrapper. This wrapper allows the execution of
-        a third-party software/solver within the scope of BayesValidRox.
-    The wrapper provides two options:
-        1) link_type=&#39;PyLink&#39;:
-            Runs the third-party software using a sell command with given input
-            files.
-        2) link_type=&#39;function&#39;:
-            For this case, it is assumed that model can be run using a function
-            written separately in a Python script. This function recieves the
-            parameters in an array of shape (n_samples, n_params) and returns
-            a dictionary with the x_values and output arrays for given output
-            names.
+    a third-party software/solver within the scope of BayesValidRox.
+
+    Attributes
+    ----------
+    link_type : str
+        The type of the wrapper. The default is `&#39;pylink&#39;`. This runs the
+        third-party software or an executable using a sell command with given
+        input files.
+        Second option is `&#39;function&#39;` which assumed that model can be run using
+        a function written separately in a Python script.
+    name : str
+        Name of the model.
+    py_file : str
+        Python file name without `.py` extension to be run for the `&#39;function&#39;`
+        wrapper. Note that the name of the python file and that of the function
+        must be simillar. This function must recieve the parameters in an array
+        of shape `(n_samples, n_params)` and returns a dictionary with the
+        x_values and output arrays for given output names.
+    shell_command : str
+        Shell command to be executed for the `&#39;pylink&#39;` wrapper.
+    input_file : str or list
+        The input file to be passed to the `&#39;pylink&#39;` wrapper.
+    input_template : str or list
+        A template input file to be passed to the `&#39;pylink&#39;` wrapper. This file
+        must be a copy of `input_file` with `&lt;Xi&gt;` place holder for the input
+        parameters defined using `inputs` class, with i being the number of
+        parameter. The file name ending should include `.tpl` before the actual
+        extension of the input file, for example, `params.tpl.input`.
+    aux_file : str or list
+        The list of auxiliary files needed for the `&#39;pylink&#39;` wrapper.
+    exe_path : str
+        Execution path if you wish to run the model for the `&#39;pylink&#39;` wrapper
+        in another directory. The default is `None`, which corresponds to the
+        currecnt working directory.
+    output_file_names : list of str
+        List of the name of the model output text files for the `&#39;pylink&#39;`
+        wrapper.
+    output_names : list of str
+        List of the model outputs to be used for the analysis.
+    output_parser : str
+        Name of the model parser file (without `.py` extension) that recieves
+        the `output_file_names` and returns a 2d-array with the first row being
+        the x_values, e.g. x coordinates or time and the rest of raws pass the
+        simulation output for each model output defined in `output_names`. Note
+        that again here the name of the file and that of the function must be
+        the same.
+    multi_process: bool
+        Whether the model runs to be executed in parallel for the `&#39;pylink&#39;`
+        wrapper. The default is `True`.
+    n_cpus: int
+        The number of cpus to be used for the parallel model execution for the
+        `&#39;pylink&#39;` wrapper. The default is `None`, which corresponds to all
+        available cpus.
+    meas_file : str
+        The name of the measurement text-based file. This file must contain
+        x_values as the first column and one column for each model output. The
+        default is `None`. Only needed for the Bayesian Inference.
+    meas_file_valid : str
+        The name of the measurement text-based file for the validation. The
+        default is `None`. Only needed for the validation with Bayesian
+        Inference.
+    mc_ref_file : str
+        The name of the text file for the Monte-Carlo reference (mean and
+        standard deviation) values. It must contain `x_values` as the first
+        column, `mean` as the second column and `std` as the third. It can be
+        used to compare the estimated moments using meta-model in the post-
+        processing step. This is only available for one output.
+    obs_dict : dict
+        A dictionary containing the measurement text-based file. It must
+        contain `x_values` as the first item and one item for each model output
+        . The default is `{}`. Only needed for the Bayesian Inference.
+    obs_dict_valid : dict
+        A dictionary containing the validation measurement text-based file. It
+        must contain `x_values` as the first item and one item for each model
+        output. The default is `{}`.
+    mc_ref_dict : dict
+        A dictionary containing the Monte-Carlo reference (mean and standard
+        deviation) values. It must contain `x_values` as the first item and
+        `mean` as the second item and `std` as the third. The default is `{}`.
+        This is only available for one output.
     &#34;&#34;&#34;
 
-    def __init__(self, link_type=&#39;PyLink&#39;, name=None, shell_command=&#39;&#39;,
-                 py_file=None, input_file=None, input_template=None,
-                 aux_file=None, exe_path=&#39;&#39;, multi_process=True, n_cpus=None,
-                 output_parser=&#39;&#39;, output_names=[], output_file_names=[],
-                 meas_file=None, meas_file_valid=None, mc_ref_file=None,
-                 obs_dict={}, obs_dict_valid={}, mc_ref_dict={}):
+    def __init__(self, link_type=&#39;pylink&#39;, name=None, py_file=None,
+                 shell_command=&#39;&#39;, input_file=None, input_template=None,
+                 aux_file=None, exe_path=&#39;&#39;, output_file_names=[],
+                 output_names=[], output_parser=&#39;&#39;, multi_process=True,
+                 n_cpus=None, meas_file=None, meas_file_valid=None,
+                 mc_ref_file=None, obs_dict={}, obs_dict_valid={},
+                 mc_ref_dict={}):
         self.link_type = link_type
         self.name = name
         self.shell_command = shell_command
@@ -219,24 +266,24 @@ class PyLinkForwardModel(object):
         return output
 
     # -------------------------------------------------------------------------
-    def update_input_params(self, new_input_file, param_sets):
+    def update_input_params(self, new_input_file, param_set):
         &#34;&#34;&#34;
         Finds this pattern with &lt;X1&gt; in the new_input_file and replace it with
          the new value from the array param_sets.
 
         Parameters
         ----------
-        new_input_file : TYPE
-            DESCRIPTION.
-        param_sets : TYPE
-            DESCRIPTION.
+        new_input_file : list
+            List of the input files with the adapted names.
+        param_set : array of shape (n_params)
+            Parameter set.
 
         Returns
         -------
         None.
 
         &#34;&#34;&#34;
-        NofPa = param_sets.shape[0]
+        NofPa = param_set.shape[0]
         text_to_search_list = [f&#39;&lt;X{i+1}&gt;&#39; for i in range(NofPa)]
 
         for filename in new_input_file:
@@ -245,7 +292,7 @@ class PyLinkForwardModel(object):
                 filedata = file.read()
 
             # Replace the target string
-            for text_to_search, params in zip(text_to_search_list, param_sets):
+            for text_to_search, params in zip(text_to_search_list, param_set):
                 filedata = filedata.replace(text_to_search, f&#39;{params:0.4e}&#39;)
 
             # Write the file out again
@@ -257,13 +304,13 @@ class PyLinkForwardModel(object):
         &#34;&#34;&#34;
         Runs the execution command given by the user to run the given model.
         It checks if the output files have been generated. If yes, the jobe is
-         done and it extracts and returns the requested output(s). Otherwise,
-         it executes the command again.
+        done and it extracts and returns the requested output(s). Otherwise,
+        it executes the command again.
 
         Parameters
         ----------
-        command : string
-            The command to be executed.
+        command : str
+            The shell command to be executed.
         output_file_names : list
             Name of the output file names.
 
@@ -301,6 +348,19 @@ class PyLinkForwardModel(object):
         This function creates subdirectory for the current run and copies the
         necessary files to this directory and renames them. Next, it executes
         the given command.
+
+        Parameters
+        ----------
+        xx : tuple
+            A tuple including parameter set, simulation number and key string.
+
+        Returns
+        -------
+        output : array of shape (n_outputs+1, n_obs)
+            An array passed by the output paraser containing the x_values as
+            the first row and the simulations results stored in the the rest of
+            the array.
+
         &#34;&#34;&#34;
         c_points, run_no, key_str = xx
 
@@ -368,21 +428,21 @@ class PyLinkForwardModel(object):
 
         Parameters
         ----------
-        c_points : array like of shape (n_samples, n_params)
+        c_points : array of shape (n_samples, n_params)
             Collocation points (training set).
         prevRun_No : int, optional
             Previous run number, in case the sequential design is selected.
-            The default is 0.
-        key_str : string, optional
-            A descriptive string for validation runs. The default is &#39;&#39;.
+            The default is `0`.
+        key_str : str, optional
+            A descriptive string for validation runs. The default is `&#39;&#39;`.
         mp : bool, optional
-            Multiprocessing. The default is True.
+            Multiprocessing. The default is `True`.
 
         Returns
         -------
         all_outputs : dict
             A dictionary with x values (time step or point id) and all outputs.
-            Each key contains an array of the shape (n_samples, n_obs).
+            Each key contains an array of the shape `(n_samples, n_obs)`.
         new_c_points : array
             Updated collocation points (training set). If a simulation does not
             executed successfully, the parameter set is removed.
@@ -533,9 +593,9 @@ class PyLinkForwardModel(object):
 
         Parameters
         ----------
-        dir_name : string
+        dir_name : str
             Directory name.
-        key : string
+        key : str
             Keyword to search for.
 
         Returns
@@ -595,48 +655,189 @@ class PyLinkForwardModel(object):
 <dl>
 <dt id="pylink.PyLinkForwardModel"><code class="flex name class">
 <span>class <span class="ident">PyLinkForwardModel</span></span>
-<span>(</span><span>link_type='PyLink', name=None, shell_command='', py_file=None, input_file=None, input_template=None, aux_file=None, exe_path='', multi_process=True, n_cpus=None, output_parser='', output_names=[], output_file_names=[], meas_file=None, meas_file_valid=None, mc_ref_file=None, obs_dict={}, obs_dict_valid={}, mc_ref_dict={})</span>
+<span>(</span><span>link_type='pylink', name=None, py_file=None, shell_command='', input_file=None, input_template=None, aux_file=None, exe_path='', output_file_names=[], output_names=[], output_parser='', multi_process=True, n_cpus=None, meas_file=None, meas_file_valid=None, mc_ref_file=None, obs_dict={}, obs_dict_valid={}, mc_ref_dict={})</span>
 </code></dt>
 <dd>
-<div class="desc"><p>A forward model binder
-This calss serves as a code wrapper. This wrapper allows the execution of
-a third-party software/solver within the scope of BayesValidRox.
-The wrapper provides two options:
-1) link_type='PyLink':
-Runs the third-party software using a sell command with given input
-files.
-2) link_type='function':
-For this case, it is assumed that model can be run using a function
-written separately in a Python script. This function recieves the
-parameters in an array of shape (n_samples, n_params) and returns
-a dictionary with the x_values and output arrays for given output
-names.</p></div>
+<div class="desc"><p>A forward model binder</p>
+<p>This calss serves as a code wrapper. This wrapper allows the execution of
+a third-party software/solver within the scope of BayesValidRox.</p>
+<h2 id="attributes">Attributes</h2>
+<dl>
+<dt><strong><code>link_type</code></strong> :&ensp;<code>str</code></dt>
+<dd>The type of the wrapper. The default is <code>'pylink'</code>. This runs the
+third-party software or an executable using a sell command with given
+input files.
+Second option is <code>'function'</code> which assumed that model can be run using
+a function written separately in a Python script.</dd>
+<dt><strong><code>name</code></strong> :&ensp;<code>str</code></dt>
+<dd>Name of the model.</dd>
+<dt><strong><code>py_file</code></strong> :&ensp;<code>str</code></dt>
+<dd>Python file name without <code>.py</code> extension to be run for the <code>'function'</code>
+wrapper. Note that the name of the python file and that of the function
+must be simillar. This function must recieve the parameters in an array
+of shape <code>(n_samples, n_params)</code> and returns a dictionary with the
+x_values and output arrays for given output names.</dd>
+<dt><strong><code>shell_command</code></strong> :&ensp;<code>str</code></dt>
+<dd>Shell command to be executed for the <code>'pylink'</code> wrapper.</dd>
+<dt><strong><code>input_file</code></strong> :&ensp;<code>str</code> or <code>list</code></dt>
+<dd>The input file to be passed to the <code>'pylink'</code> wrapper.</dd>
+<dt><strong><code>input_template</code></strong> :&ensp;<code>str</code> or <code>list</code></dt>
+<dd>A template input file to be passed to the <code>'pylink'</code> wrapper. This file
+must be a copy of <code>input_file</code> with <code>&lt;Xi&gt;</code> place holder for the input
+parameters defined using <code>inputs</code> class, with i being the number of
+parameter. The file name ending should include <code>.tpl</code> before the actual
+extension of the input file, for example, <code>params.tpl.input</code>.</dd>
+<dt><strong><code>aux_file</code></strong> :&ensp;<code>str</code> or <code>list</code></dt>
+<dd>The list of auxiliary files needed for the <code>'pylink'</code> wrapper.</dd>
+<dt><strong><code>exe_path</code></strong> :&ensp;<code>str</code></dt>
+<dd>Execution path if you wish to run the model for the <code>'pylink'</code> wrapper
+in another directory. The default is <code>None</code>, which corresponds to the
+currecnt working directory.</dd>
+<dt><strong><code>output_file_names</code></strong> :&ensp;<code>list</code> of <code>str</code></dt>
+<dd>List of the name of the model output text files for the <code>'pylink'</code>
+wrapper.</dd>
+<dt><strong><code>output_names</code></strong> :&ensp;<code>list</code> of <code>str</code></dt>
+<dd>List of the model outputs to be used for the analysis.</dd>
+<dt><strong><code>output_parser</code></strong> :&ensp;<code>str</code></dt>
+<dd>Name of the model parser file (without <code>.py</code> extension) that recieves
+the <code>output_file_names</code> and returns a 2d-array with the first row being
+the x_values, e.g. x coordinates or time and the rest of raws pass the
+simulation output for each model output defined in <code>output_names</code>. Note
+that again here the name of the file and that of the function must be
+the same.</dd>
+<dt><strong><code>multi_process</code></strong> :&ensp;<code>bool</code></dt>
+<dd>Whether the model runs to be executed in parallel for the <code>'pylink'</code>
+wrapper. The default is <code>True</code>.</dd>
+<dt><strong><code>n_cpus</code></strong> :&ensp;<code>int</code></dt>
+<dd>The number of cpus to be used for the parallel model execution for the
+<code>'pylink'</code> wrapper. The default is <code>None</code>, which corresponds to all
+available cpus.</dd>
+<dt><strong><code>meas_file</code></strong> :&ensp;<code>str</code></dt>
+<dd>The name of the measurement text-based file. This file must contain
+x_values as the first column and one column for each model output. The
+default is <code>None</code>. Only needed for the Bayesian Inference.</dd>
+<dt><strong><code>meas_file_valid</code></strong> :&ensp;<code>str</code></dt>
+<dd>The name of the measurement text-based file for the validation. The
+default is <code>None</code>. Only needed for the validation with Bayesian
+Inference.</dd>
+<dt><strong><code>mc_ref_file</code></strong> :&ensp;<code>str</code></dt>
+<dd>The name of the text file for the Monte-Carlo reference (mean and
+standard deviation) values. It must contain <code>x_values</code> as the first
+column, <code>mean</code> as the second column and <code>std</code> as the third. It can be
+used to compare the estimated moments using meta-model in the post-
+processing step. This is only available for one output.</dd>
+<dt><strong><code>obs_dict</code></strong> :&ensp;<code>dict</code></dt>
+<dd>A dictionary containing the measurement text-based file. It must
+contain <code>x_values</code> as the first item and one item for each model output
+. The default is <code>{}</code>. Only needed for the Bayesian Inference.</dd>
+<dt><strong><code>obs_dict_valid</code></strong> :&ensp;<code>dict</code></dt>
+<dd>A dictionary containing the validation measurement text-based file. It
+must contain <code>x_values</code> as the first item and one item for each model
+output. The default is <code>{}</code>.</dd>
+<dt><strong><code>mc_ref_dict</code></strong> :&ensp;<code>dict</code></dt>
+<dd>A dictionary containing the Monte-Carlo reference (mean and standard
+deviation) values. It must contain <code>x_values</code> as the first item and
+<code>mean</code> as the second item and <code>std</code> as the third. The default is <code>{}</code>.
+This is only available for one output.</dd>
+</dl></div>
 <details class="source">
 <summary>
 <span>Expand source code</span>
 </summary>
 <pre><code class="python">class PyLinkForwardModel(object):
-    &#34;&#34;&#34;A forward model binder
+    &#34;&#34;&#34;
+    A forward model binder
+
     This calss serves as a code wrapper. This wrapper allows the execution of
-        a third-party software/solver within the scope of BayesValidRox.
-    The wrapper provides two options:
-        1) link_type=&#39;PyLink&#39;:
-            Runs the third-party software using a sell command with given input
-            files.
-        2) link_type=&#39;function&#39;:
-            For this case, it is assumed that model can be run using a function
-            written separately in a Python script. This function recieves the
-            parameters in an array of shape (n_samples, n_params) and returns
-            a dictionary with the x_values and output arrays for given output
-            names.
+    a third-party software/solver within the scope of BayesValidRox.
+
+    Attributes
+    ----------
+    link_type : str
+        The type of the wrapper. The default is `&#39;pylink&#39;`. This runs the
+        third-party software or an executable using a sell command with given
+        input files.
+        Second option is `&#39;function&#39;` which assumed that model can be run using
+        a function written separately in a Python script.
+    name : str
+        Name of the model.
+    py_file : str
+        Python file name without `.py` extension to be run for the `&#39;function&#39;`
+        wrapper. Note that the name of the python file and that of the function
+        must be simillar. This function must recieve the parameters in an array
+        of shape `(n_samples, n_params)` and returns a dictionary with the
+        x_values and output arrays for given output names.
+    shell_command : str
+        Shell command to be executed for the `&#39;pylink&#39;` wrapper.
+    input_file : str or list
+        The input file to be passed to the `&#39;pylink&#39;` wrapper.
+    input_template : str or list
+        A template input file to be passed to the `&#39;pylink&#39;` wrapper. This file
+        must be a copy of `input_file` with `&lt;Xi&gt;` place holder for the input
+        parameters defined using `inputs` class, with i being the number of
+        parameter. The file name ending should include `.tpl` before the actual
+        extension of the input file, for example, `params.tpl.input`.
+    aux_file : str or list
+        The list of auxiliary files needed for the `&#39;pylink&#39;` wrapper.
+    exe_path : str
+        Execution path if you wish to run the model for the `&#39;pylink&#39;` wrapper
+        in another directory. The default is `None`, which corresponds to the
+        currecnt working directory.
+    output_file_names : list of str
+        List of the name of the model output text files for the `&#39;pylink&#39;`
+        wrapper.
+    output_names : list of str
+        List of the model outputs to be used for the analysis.
+    output_parser : str
+        Name of the model parser file (without `.py` extension) that recieves
+        the `output_file_names` and returns a 2d-array with the first row being
+        the x_values, e.g. x coordinates or time and the rest of raws pass the
+        simulation output for each model output defined in `output_names`. Note
+        that again here the name of the file and that of the function must be
+        the same.
+    multi_process: bool
+        Whether the model runs to be executed in parallel for the `&#39;pylink&#39;`
+        wrapper. The default is `True`.
+    n_cpus: int
+        The number of cpus to be used for the parallel model execution for the
+        `&#39;pylink&#39;` wrapper. The default is `None`, which corresponds to all
+        available cpus.
+    meas_file : str
+        The name of the measurement text-based file. This file must contain
+        x_values as the first column and one column for each model output. The
+        default is `None`. Only needed for the Bayesian Inference.
+    meas_file_valid : str
+        The name of the measurement text-based file for the validation. The
+        default is `None`. Only needed for the validation with Bayesian
+        Inference.
+    mc_ref_file : str
+        The name of the text file for the Monte-Carlo reference (mean and
+        standard deviation) values. It must contain `x_values` as the first
+        column, `mean` as the second column and `std` as the third. It can be
+        used to compare the estimated moments using meta-model in the post-
+        processing step. This is only available for one output.
+    obs_dict : dict
+        A dictionary containing the measurement text-based file. It must
+        contain `x_values` as the first item and one item for each model output
+        . The default is `{}`. Only needed for the Bayesian Inference.
+    obs_dict_valid : dict
+        A dictionary containing the validation measurement text-based file. It
+        must contain `x_values` as the first item and one item for each model
+        output. The default is `{}`.
+    mc_ref_dict : dict
+        A dictionary containing the Monte-Carlo reference (mean and standard
+        deviation) values. It must contain `x_values` as the first item and
+        `mean` as the second item and `std` as the third. The default is `{}`.
+        This is only available for one output.
     &#34;&#34;&#34;
 
-    def __init__(self, link_type=&#39;PyLink&#39;, name=None, shell_command=&#39;&#39;,
-                 py_file=None, input_file=None, input_template=None,
-                 aux_file=None, exe_path=&#39;&#39;, multi_process=True, n_cpus=None,
-                 output_parser=&#39;&#39;, output_names=[], output_file_names=[],
-                 meas_file=None, meas_file_valid=None, mc_ref_file=None,
-                 obs_dict={}, obs_dict_valid={}, mc_ref_dict={}):
+    def __init__(self, link_type=&#39;pylink&#39;, name=None, py_file=None,
+                 shell_command=&#39;&#39;, input_file=None, input_template=None,
+                 aux_file=None, exe_path=&#39;&#39;, output_file_names=[],
+                 output_names=[], output_parser=&#39;&#39;, multi_process=True,
+                 n_cpus=None, meas_file=None, meas_file_valid=None,
+                 mc_ref_file=None, obs_dict={}, obs_dict_valid={},
+                 mc_ref_dict={}):
         self.link_type = link_type
         self.name = name
         self.shell_command = shell_command
@@ -769,24 +970,24 @@ names.</p></div>
         return output
 
     # -------------------------------------------------------------------------
-    def update_input_params(self, new_input_file, param_sets):
+    def update_input_params(self, new_input_file, param_set):
         &#34;&#34;&#34;
         Finds this pattern with &lt;X1&gt; in the new_input_file and replace it with
          the new value from the array param_sets.
 
         Parameters
         ----------
-        new_input_file : TYPE
-            DESCRIPTION.
-        param_sets : TYPE
-            DESCRIPTION.
+        new_input_file : list
+            List of the input files with the adapted names.
+        param_set : array of shape (n_params)
+            Parameter set.
 
         Returns
         -------
         None.
 
         &#34;&#34;&#34;
-        NofPa = param_sets.shape[0]
+        NofPa = param_set.shape[0]
         text_to_search_list = [f&#39;&lt;X{i+1}&gt;&#39; for i in range(NofPa)]
 
         for filename in new_input_file:
@@ -795,7 +996,7 @@ names.</p></div>
                 filedata = file.read()
 
             # Replace the target string
-            for text_to_search, params in zip(text_to_search_list, param_sets):
+            for text_to_search, params in zip(text_to_search_list, param_set):
                 filedata = filedata.replace(text_to_search, f&#39;{params:0.4e}&#39;)
 
             # Write the file out again
@@ -807,13 +1008,13 @@ names.</p></div>
         &#34;&#34;&#34;
         Runs the execution command given by the user to run the given model.
         It checks if the output files have been generated. If yes, the jobe is
-         done and it extracts and returns the requested output(s). Otherwise,
-         it executes the command again.
+        done and it extracts and returns the requested output(s). Otherwise,
+        it executes the command again.
 
         Parameters
         ----------
-        command : string
-            The command to be executed.
+        command : str
+            The shell command to be executed.
         output_file_names : list
             Name of the output file names.
 
@@ -851,6 +1052,19 @@ names.</p></div>
         This function creates subdirectory for the current run and copies the
         necessary files to this directory and renames them. Next, it executes
         the given command.
+
+        Parameters
+        ----------
+        xx : tuple
+            A tuple including parameter set, simulation number and key string.
+
+        Returns
+        -------
+        output : array of shape (n_outputs+1, n_obs)
+            An array passed by the output paraser containing the x_values as
+            the first row and the simulations results stored in the the rest of
+            the array.
+
         &#34;&#34;&#34;
         c_points, run_no, key_str = xx
 
@@ -918,21 +1132,21 @@ names.</p></div>
 
         Parameters
         ----------
-        c_points : array like of shape (n_samples, n_params)
+        c_points : array of shape (n_samples, n_params)
             Collocation points (training set).
         prevRun_No : int, optional
             Previous run number, in case the sequential design is selected.
-            The default is 0.
-        key_str : string, optional
-            A descriptive string for validation runs. The default is &#39;&#39;.
+            The default is `0`.
+        key_str : str, optional
+            A descriptive string for validation runs. The default is `&#39;&#39;`.
         mp : bool, optional
-            Multiprocessing. The default is True.
+            Multiprocessing. The default is `True`.
 
         Returns
         -------
         all_outputs : dict
             A dictionary with x values (time step or point id) and all outputs.
-            Each key contains an array of the shape (n_samples, n_obs).
+            Each key contains an array of the shape `(n_samples, n_obs)`.
         new_c_points : array
             Updated collocation points (training set). If a simulation does not
             executed successfully, the parameter set is removed.
@@ -1083,9 +1297,9 @@ names.</p></div>
 
         Parameters
         ----------
-        dir_name : string
+        dir_name : str
             Directory name.
-        key : string
+        key : str
             Keyword to search for.
 
         Returns
@@ -1142,43 +1356,20 @@ names.</p></div>
 </dl>
 <h3>Methods</h3>
 <dl>
-<dt id="pylink.PyLinkForwardModel.read_mc_reference"><code class="name flex">
-<span>def <span class="ident">read_mc_reference</span></span>(<span>self)</span>
+<dt id="pylink.PyLinkForwardModel.within_range"><code class="name flex">
+<span>def <span class="ident">within_range</span></span>(<span>self, out, minout, maxout)</span>
 </code></dt>
 <dd>
-<div class="desc"><p>Is used, if a Monte-Carlo reference is available for
-further in-depth post-processing after meta-model training.</p>
-<h2 id="returns">Returns</h2>
-<dl>
-<dt><code>None</code></dt>
-<dd>&nbsp;</dd>
-</dl></div>
+<div class="desc"></div>
 <details class="source">
 <summary>
 <span>Expand source code</span>
 </summary>
-<pre><code class="python">def read_mc_reference(self):
-    &#34;&#34;&#34;
-    Is used, if a Monte-Carlo reference is available for
-    further in-depth post-processing after meta-model training.
-
-    Returns
-    -------
-    None
-
-    &#34;&#34;&#34;
-    if self.mc_ref_file is None and not hasattr(self, &#39;mc_reference&#39;):
-        return
-    elif isinstance(self.mc_reference, dict) and bool(self.mc_reference):
-        self.mc_reference = pd.DataFrame.from_dict(self.mc_reference)
-    elif self.mc_ref_file is not None:
-        file_path = os.path.join(os.getcwd(), self.mc_ref_file)
-        self.mc_reference = pd.read_csv(file_path, delimiter=&#39;,&#39;)
-    else:
-        raise Exception(&#34;Please provide the MC reference data as a &#34;
-                        &#34;dictionary via mc_reference attribute or pass the&#34;
-                        &#34; csv-file path to mc_ref_file attribute&#34;)
-    return self.mc_reference</code></pre>
+<pre><code class="python">def within_range(self, out, minout, maxout):
+    inside = False
+    if (out &gt; minout).all() and (out &lt; maxout).all():
+        inside = True
+    return inside</code></pre>
 </details>
 </dd>
 <dt id="pylink.PyLinkForwardModel.read_observation"><code class="name flex">
@@ -1243,6 +1434,45 @@ calibration.</p>
         return self.observations_valid</code></pre>
 </details>
 </dd>
+<dt id="pylink.PyLinkForwardModel.read_mc_reference"><code class="name flex">
+<span>def <span class="ident">read_mc_reference</span></span>(<span>self)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Is used, if a Monte-Carlo reference is available for
+further in-depth post-processing after meta-model training.</p>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><code>None</code></dt>
+<dd>&nbsp;</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def read_mc_reference(self):
+    &#34;&#34;&#34;
+    Is used, if a Monte-Carlo reference is available for
+    further in-depth post-processing after meta-model training.
+
+    Returns
+    -------
+    None
+
+    &#34;&#34;&#34;
+    if self.mc_ref_file is None and not hasattr(self, &#39;mc_reference&#39;):
+        return
+    elif isinstance(self.mc_reference, dict) and bool(self.mc_reference):
+        self.mc_reference = pd.DataFrame.from_dict(self.mc_reference)
+    elif self.mc_ref_file is not None:
+        file_path = os.path.join(os.getcwd(), self.mc_ref_file)
+        self.mc_reference = pd.read_csv(file_path, delimiter=&#39;,&#39;)
+    else:
+        raise Exception(&#34;Please provide the MC reference data as a &#34;
+                        &#34;dictionary via mc_reference attribute or pass the&#34;
+                        &#34; csv-file path to mc_ref_file attribute&#34;)
+    return self.mc_reference</code></pre>
+</details>
+</dd>
 <dt id="pylink.PyLinkForwardModel.read_output"><code class="name flex">
 <span>def <span class="ident">read_output</span></span>(<span>self)</span>
 </code></dt>
@@ -1285,6 +1515,59 @@ simulation outputs in csv files.</p>
     return output</code></pre>
 </details>
 </dd>
+<dt id="pylink.PyLinkForwardModel.update_input_params"><code class="name flex">
+<span>def <span class="ident">update_input_params</span></span>(<span>self, new_input_file, param_set)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Finds this pattern with <X1> in the new_input_file and replace it with
+the new value from the array param_sets.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>new_input_file</code></strong> :&ensp;<code>list</code></dt>
+<dd>List of the input files with the adapted names.</dd>
+<dt><strong><code>param_set</code></strong> :&ensp;<code>array</code> of <code>shape (n_params)</code></dt>
+<dd>Parameter set.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<p>None.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def update_input_params(self, new_input_file, param_set):
+    &#34;&#34;&#34;
+    Finds this pattern with &lt;X1&gt; in the new_input_file and replace it with
+     the new value from the array param_sets.
+
+    Parameters
+    ----------
+    new_input_file : list
+        List of the input files with the adapted names.
+    param_set : array of shape (n_params)
+        Parameter set.
+
+    Returns
+    -------
+    None.
+
+    &#34;&#34;&#34;
+    NofPa = param_set.shape[0]
+    text_to_search_list = [f&#39;&lt;X{i+1}&gt;&#39; for i in range(NofPa)]
+
+    for filename in new_input_file:
+        # Read in the file
+        with open(filename, &#39;r&#39;) as file:
+            filedata = file.read()
+
+        # Replace the target string
+        for text_to_search, params in zip(text_to_search_list, param_set):
+            filedata = filedata.replace(text_to_search, f&#39;{params:0.4e}&#39;)
+
+        # Write the file out again
+        with open(filename, &#39;w&#39;) as file:
+            file.write(filedata)</code></pre>
+</details>
+</dd>
 <dt id="pylink.PyLinkForwardModel.run_command"><code class="name flex">
 <span>def <span class="ident">run_command</span></span>(<span>self, command, output_file_names)</span>
 </code></dt>
@@ -1295,8 +1578,8 @@ done and it extracts and returns the requested output(s). Otherwise,
 it executes the command again.</p>
 <h2 id="parameters">Parameters</h2>
 <dl>
-<dt><strong><code>command</code></strong> :&ensp;<code>string</code></dt>
-<dd>The command to be executed.</dd>
+<dt><strong><code>command</code></strong> :&ensp;<code>str</code></dt>
+<dd>The shell command to be executed.</dd>
 <dt><strong><code>output_file_names</code></strong> :&ensp;<code>list</code></dt>
 <dd>Name of the output file names.</dd>
 </dl>
@@ -1313,13 +1596,13 @@ it executes the command again.</p>
     &#34;&#34;&#34;
     Runs the execution command given by the user to run the given model.
     It checks if the output files have been generated. If yes, the jobe is
-     done and it extracts and returns the requested output(s). Otherwise,
-     it executes the command again.
+    done and it extracts and returns the requested output(s). Otherwise,
+    it executes the command again.
 
     Parameters
     ----------
-    command : string
-        The command to be executed.
+    command : str
+        The shell command to be executed.
     output_file_names : list
         Name of the output file names.
 
@@ -1358,7 +1641,19 @@ it executes the command again.</p>
 <dd>
 <div class="desc"><p>This function creates subdirectory for the current run and copies the
 necessary files to this directory and renames them. Next, it executes
-the given command.</p></div>
+the given command.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>xx</code></strong> :&ensp;<code>tuple</code></dt>
+<dd>A tuple including parameter set, simulation number and key string.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>output</code></strong> :&ensp;<code>array</code> of <code>shape (n_outputs+1, n_obs)</code></dt>
+<dd>An array passed by the output paraser containing the x_values as
+the first row and the simulations results stored in the the rest of
+the array.</dd>
+</dl></div>
 <details class="source">
 <summary>
 <span>Expand source code</span>
@@ -1368,6 +1663,19 @@ the given command.</p></div>
     This function creates subdirectory for the current run and copies the
     necessary files to this directory and renames them. Next, it executes
     the given command.
+
+    Parameters
+    ----------
+    xx : tuple
+        A tuple including parameter set, simulation number and key string.
+
+    Returns
+    -------
+    output : array of shape (n_outputs+1, n_obs)
+        An array passed by the output paraser containing the x_values as
+        the first row and the simulations results stored in the the rest of
+        the array.
+
     &#34;&#34;&#34;
     c_points, run_no, key_str = xx
 
@@ -1435,21 +1743,21 @@ the given command.</p></div>
 are started in parallel.</p>
 <h2 id="parameters">Parameters</h2>
 <dl>
-<dt><strong><code>c_points</code></strong> :&ensp;<code>array like</code> of <code>shape (n_samples, n_params)</code></dt>
+<dt><strong><code>c_points</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
 <dd>Collocation points (training set).</dd>
 <dt><strong><code>prevRun_No</code></strong> :&ensp;<code>int</code>, optional</dt>
 <dd>Previous run number, in case the sequential design is selected.
-The default is 0.</dd>
-<dt><strong><code>key_str</code></strong> :&ensp;<code>string</code>, optional</dt>
-<dd>A descriptive string for validation runs. The default is ''.</dd>
+The default is <code>0</code>.</dd>
+<dt><strong><code>key_str</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>A descriptive string for validation runs. The default is <code>''</code>.</dd>
 <dt><strong><code>mp</code></strong> :&ensp;<code>bool</code>, optional</dt>
-<dd>Multiprocessing. The default is True.</dd>
+<dd>Multiprocessing. The default is <code>True</code>.</dd>
 </dl>
 <h2 id="returns">Returns</h2>
 <dl>
 <dt><strong><code>all_outputs</code></strong> :&ensp;<code>dict</code></dt>
 <dd>A dictionary with x values (time step or point id) and all outputs.
-Each key contains an array of the shape (n_samples, n_obs).</dd>
+Each key contains an array of the shape <code>(n_samples, n_obs)</code>.</dd>
 <dt><strong><code>new_c_points</code></strong> :&ensp;<code>array</code></dt>
 <dd>Updated collocation points (training set). If a simulation does not
 executed successfully, the parameter set is removed.</dd>
@@ -1466,21 +1774,21 @@ executed successfully, the parameter set is removed.</dd>
 
     Parameters
     ----------
-    c_points : array like of shape (n_samples, n_params)
+    c_points : array of shape (n_samples, n_params)
         Collocation points (training set).
     prevRun_No : int, optional
         Previous run number, in case the sequential design is selected.
-        The default is 0.
-    key_str : string, optional
-        A descriptive string for validation runs. The default is &#39;&#39;.
+        The default is `0`.
+    key_str : str, optional
+        A descriptive string for validation runs. The default is `&#39;&#39;`.
     mp : bool, optional
-        Multiprocessing. The default is True.
+        Multiprocessing. The default is `True`.
 
     Returns
     -------
     all_outputs : dict
         A dictionary with x values (time step or point id) and all outputs.
-        Each key contains an array of the shape (n_samples, n_obs).
+        Each key contains an array of the shape `(n_samples, n_obs)`.
     new_c_points : array
         Updated collocation points (training set). If a simulation does not
         executed successfully, the parameter set is removed.
@@ -1625,75 +1933,6 @@ executed successfully, the parameter set is removed.</dd>
     return all_outputs, new_c_points</code></pre>
 </details>
 </dd>
-<dt id="pylink.PyLinkForwardModel.update_input_params"><code class="name flex">
-<span>def <span class="ident">update_input_params</span></span>(<span>self, new_input_file, param_sets)</span>
-</code></dt>
-<dd>
-<div class="desc"><p>Finds this pattern with <X1> in the new_input_file and replace it with
-the new value from the array param_sets.</p>
-<h2 id="parameters">Parameters</h2>
-<dl>
-<dt><strong><code>new_input_file</code></strong> :&ensp;<code>TYPE</code></dt>
-<dd>DESCRIPTION.</dd>
-<dt><strong><code>param_sets</code></strong> :&ensp;<code>TYPE</code></dt>
-<dd>DESCRIPTION.</dd>
-</dl>
-<h2 id="returns">Returns</h2>
-<p>None.</p></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def update_input_params(self, new_input_file, param_sets):
-    &#34;&#34;&#34;
-    Finds this pattern with &lt;X1&gt; in the new_input_file and replace it with
-     the new value from the array param_sets.
-
-    Parameters
-    ----------
-    new_input_file : TYPE
-        DESCRIPTION.
-    param_sets : TYPE
-        DESCRIPTION.
-
-    Returns
-    -------
-    None.
-
-    &#34;&#34;&#34;
-    NofPa = param_sets.shape[0]
-    text_to_search_list = [f&#39;&lt;X{i+1}&gt;&#39; for i in range(NofPa)]
-
-    for filename in new_input_file:
-        # Read in the file
-        with open(filename, &#39;r&#39;) as file:
-            filedata = file.read()
-
-        # Replace the target string
-        for text_to_search, params in zip(text_to_search_list, param_sets):
-            filedata = filedata.replace(text_to_search, f&#39;{params:0.4e}&#39;)
-
-        # Write the file out again
-        with open(filename, &#39;w&#39;) as file:
-            file.write(filedata)</code></pre>
-</details>
-</dd>
-<dt id="pylink.PyLinkForwardModel.within_range"><code class="name flex">
-<span>def <span class="ident">within_range</span></span>(<span>self, out, minout, maxout)</span>
-</code></dt>
-<dd>
-<div class="desc"></div>
-<details class="source">
-<summary>
-<span>Expand source code</span>
-</summary>
-<pre><code class="python">def within_range(self, out, minout, maxout):
-    inside = False
-    if (out &gt; minout).all() and (out &lt; maxout).all():
-        inside = True
-    return inside</code></pre>
-</details>
-</dd>
 <dt id="pylink.PyLinkForwardModel.zip_subdirs"><code class="name flex">
 <span>def <span class="ident">zip_subdirs</span></span>(<span>self, dir_name, key)</span>
 </code></dt>
@@ -1701,9 +1940,9 @@ the new value from the array param_sets.</p>
 <div class="desc"><p>Zips all the files containing the key(word).</p>
 <h2 id="parameters">Parameters</h2>
 <dl>
-<dt><strong><code>dir_name</code></strong> :&ensp;<code>string</code></dt>
+<dt><strong><code>dir_name</code></strong> :&ensp;<code>str</code></dt>
 <dd>Directory name.</dd>
-<dt><strong><code>key</code></strong> :&ensp;<code>string</code></dt>
+<dt><strong><code>key</code></strong> :&ensp;<code>str</code></dt>
 <dd>Keyword to search for.</dd>
 </dl>
 <h2 id="returns">Returns</h2>
@@ -1718,9 +1957,9 @@ the new value from the array param_sets.</p>
 
     Parameters
     ----------
-    dir_name : string
+    dir_name : str
         Directory name.
-    key : string
+    key : str
         Keyword to search for.
 
     Returns
@@ -1785,16 +2024,16 @@ the new value from the array param_sets.</p>
 <li>
 <h4><code><a title="pylink.PyLinkForwardModel" href="#pylink.PyLinkForwardModel">PyLinkForwardModel</a></code></h4>
 <ul class="two-column">
-<li><code><a title="pylink.PyLinkForwardModel.Output" href="#pylink.PyLinkForwardModel.Output">Output</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.read_mc_reference" href="#pylink.PyLinkForwardModel.read_mc_reference">read_mc_reference</a></code></li>
+<li><code><a title="pylink.PyLinkForwardModel.within_range" href="#pylink.PyLinkForwardModel.within_range">within_range</a></code></li>
 <li><code><a title="pylink.PyLinkForwardModel.read_observation" href="#pylink.PyLinkForwardModel.read_observation">read_observation</a></code></li>
+<li><code><a title="pylink.PyLinkForwardModel.read_mc_reference" href="#pylink.PyLinkForwardModel.read_mc_reference">read_mc_reference</a></code></li>
 <li><code><a title="pylink.PyLinkForwardModel.read_output" href="#pylink.PyLinkForwardModel.read_output">read_output</a></code></li>
+<li><code><a title="pylink.PyLinkForwardModel.update_input_params" href="#pylink.PyLinkForwardModel.update_input_params">update_input_params</a></code></li>
 <li><code><a title="pylink.PyLinkForwardModel.run_command" href="#pylink.PyLinkForwardModel.run_command">run_command</a></code></li>
 <li><code><a title="pylink.PyLinkForwardModel.run_forwardmodel" href="#pylink.PyLinkForwardModel.run_forwardmodel">run_forwardmodel</a></code></li>
 <li><code><a title="pylink.PyLinkForwardModel.run_model_parallel" href="#pylink.PyLinkForwardModel.run_model_parallel">run_model_parallel</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.update_input_params" href="#pylink.PyLinkForwardModel.update_input_params">update_input_params</a></code></li>
-<li><code><a title="pylink.PyLinkForwardModel.within_range" href="#pylink.PyLinkForwardModel.within_range">within_range</a></code></li>
 <li><code><a title="pylink.PyLinkForwardModel.zip_subdirs" href="#pylink.PyLinkForwardModel.zip_subdirs">zip_subdirs</a></code></li>
+<li><code><a title="pylink.PyLinkForwardModel.Output" href="#pylink.PyLinkForwardModel.Output">Output</a></code></li>
 </ul>
 </li>
 </ul>
diff --git a/docs/html/surrogate_models.html b/docs/html/surrogate_models.html
new file mode 100644
index 0000000000000000000000000000000000000000..f1cb980f7a9cc8f8fd800606cf156e824d51c1c5
--- /dev/null
+++ b/docs/html/surrogate_models.html
@@ -0,0 +1,4282 @@
+<!doctype html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
+<meta name="generator" content="pdoc 0.10.0" />
+<title>surrogate_models API documentation</title>
+<meta name="description" content="" />
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
+<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
+<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
+<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
+<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
+<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
+<script async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS_CHTML" integrity="sha256-kZafAc6mZvK3W3v1pHOcUix30OHQN6pU/NO2oFkqZVw=" crossorigin></script>
+<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
+<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
+</head>
+<body>
+<main>
+<article id="content">
+<header>
+<h1 class="title">Module <code>surrogate_models</code></h1>
+</header>
+<section id="section-intro">
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import warnings
+import numpy as np
+import math
+import h5py
+import matplotlib.pyplot as plt
+from sklearn.preprocessing import MinMaxScaler
+import scipy as sp
+from tqdm import tqdm
+from sklearn.decomposition import PCA as sklearnPCA
+import sklearn.linear_model as lm
+from sklearn.gaussian_process import GaussianProcessRegressor
+import sklearn.gaussian_process.kernels as kernels
+import os
+import sys
+from joblib import Parallel, delayed
+
+from exp_designs import ExpDesigns
+from glexindex import glexindex
+from eval_rec_rule import eval_univ_basis
+from reg_fast_ard import RegressionFastARD
+from reg_fast_laplace import RegressionFastLaplace
+from bayes_linear import VBLinearRegression, EBLinearRegression
+warnings.filterwarnings(&#34;ignore&#34;)
+# Load the mplstyle
+plt.style.use(os.path.join(os.path.split(__file__)[0],
+                           &#39;../&#39;, &#39;bayesvalidrox.mplstyle&#39;))
+
+
+class MetaModel:
+    &#34;&#34;&#34;
+    Meta (surrogate) model
+
+    This class trains a surrogate model. It accepts an input object (input_obj)
+    containing the specification of the distributions for uncertain parameters
+    and a model object with instructions on how to run the computational model.
+
+    Attributes
+    ----------
+    input_obj : obj
+        Input object with the information on the model input parameters.
+    meta_model_type : str
+        Surrogate model types. Three surrogate model types are supported:
+        polynomial chaos expansion (`PCE`), arbitrary PCE (`aPCE`) and
+        Gaussian process regression (`GPE`). Default is PCE.
+    pce_reg_method : str
+        PCE regression method to compute the coefficients. The following
+        regression methods are available:
+
+        1. OLS: Ordinary Least Square method
+        2. BRR: Bayesian Ridge Regression
+        3. LARS: Least angle regression
+        4. ARD: Bayesian ARD Regression
+        5. FastARD: Fast Bayesian ARD Regression
+        6. VBL: Variational Bayesian Learning
+        7. EBL: Emperical Bayesian Learning
+        Default is `OLS`.
+    pce_deg : int or list of int
+        Polynomial degree(s). If a list is given, an adaptive algorithm is used
+        to find the best degree with the lowest Leave-One-Out cross-validation
+        (LOO) error (or the highest score=1-LOO). Default is `1`.
+    pce_q_norm : float
+        Hyperbolic (or q-norm) truncation for multi-indices of multivariate
+        polynomials. Default is `1.0`.
+    dim_red_method : str
+        Dimensionality reduction method for the output space. The available
+        method is based on principal component analysis (PCA). The Default is
+        `&#39;no&#39;`. There are two ways to select number of components: use
+        percentage of the explainable variance threshold (between 0 and 100)
+        (Option A) or direct prescription of components&#39; number (Option B):
+
+            &gt;&gt;&gt; MetaModelOpts.dim_red_method = &#39;PCA&#39;
+            &gt;&gt;&gt; MetaModelOpts.var_pca_threshold = 99.999  # Option A
+            &gt;&gt;&gt; MetaModelOpts.n_pca_components = 12 # Option B
+
+    verbose : bool
+        Prints summary of the regression results. Default is `False`.
+
+    Note
+    -------
+    To define the sampling methods and the training set, an experimental design
+    instance shall be defined. This can be done by:
+
+    &gt;&gt;&gt; MetaModelOpts.add_ExpDesign()
+
+    Two experimental design schemes are supported: one-shot (`normal`) and
+    adaptive sequential (`sequential`) designs.
+    For experimental design refer to `ExpDesigns`.
+
+    &#34;&#34;&#34;
+
+    def __init__(self, input_obj, meta_model_type=&#39;PCE&#39;, pce_reg_method=&#39;OLS&#39;,
+                 pce_deg=1, pce_q_norm=1.0, dim_red_method=&#39;no&#39;,
+                 verbose=False):
+
+        self.input_obj = input_obj
+        self.meta_model_type = meta_model_type
+        self.pce_reg_method = pce_reg_method
+        self.pce_deg = pce_deg
+        self.pce_q_norm = pce_q_norm
+        self.dim_red_method = dim_red_method
+        self.verbose = False
+
+    # -------------------------------------------------------------------------
+    def create_metamodel(self, Model):
+        &#34;&#34;&#34;
+        Starts the training of the meta-model for the model objects containg
+         the given computational model.
+
+        Parameters
+        ----------
+        Model : obj
+            Model object.
+
+        Returns
+        -------
+        metamodel : obj
+            The meta model object.
+
+        &#34;&#34;&#34;
+        self.ModelObj = Model
+        self.n_params = len(self.input_obj.Marginals)
+        self.ExpDesignFlag = &#39;normal&#39;
+        # --- Prepare pce degree ---
+        if self.meta_model_type.lower() == &#39;pce&#39;:
+            if type(self.pce_deg) is not np.ndarray:
+                self.pce_deg = np.array(self.pce_deg)
+
+        if self.ExpDesign.method == &#39;sequential&#39;:
+            from .sequential_design import SeqDesign
+            seq_design = SeqDesign(self)
+            metamodel = seq_design.train_seq_design(Model)
+
+        elif self.ExpDesign.method == &#39;normal&#39;:
+            self.ExpDesignFlag = &#39;normal&#39;
+            metamodel = self.train_norm_design(Model)
+
+        else:
+            raise Exception(&#34;The method for experimental design you requested&#34;
+                            &#34; has not been implemented yet.&#34;)
+
+        # Zip the model run directories
+        if self.ModelObj.link_type.lower() == &#39;pylink&#39;:
+            Model.zip_subdirs(Model.name, f&#39;{Model.name}_&#39;)
+
+        return metamodel
+
+    # -------------------------------------------------------------------------
+    def train_norm_design(self, Model, verbose=False):
+        &#34;&#34;&#34;
+        This function loops over the outputs and each time step/point and fits
+        the meta model.
+
+        Parameters
+        ----------
+        Model : obj
+            Model object.
+        verbose : bool, optional
+            Flag for a sequential design in silent mode. The default is False.
+
+        Returns
+        -------
+        self: obj
+            Meta-model object.
+
+        &#34;&#34;&#34;
+
+        # Get the collocation points to run the forward model
+        CollocationPoints, OutputDict = self.generate_ExpDesign(Model)
+
+        # Initialize the nested dictionaries
+        self.deg_dict = self.auto_vivification()
+        self.q_norm_dict = self.auto_vivification()
+        self.coeffs_dict = self.auto_vivification()
+        self.basis_dict = self.auto_vivification()
+        self.score_dict = self.auto_vivification()
+        self.clf_poly = self.auto_vivification()
+        self.gp_poly = self.auto_vivification()
+        self.pca = self.auto_vivification()
+        self.LCerror = self.auto_vivification()
+        self.x_scaler = {}
+
+        # Define the DegreeArray
+        nSamples, ndim = CollocationPoints.shape
+        self.DegreeArray = self.__select_degree(ndim, nSamples)
+
+        # Generate all basis indices
+        self.allBasisIndices = self.auto_vivification()
+        for deg in self.DegreeArray:
+            keys = self.allBasisIndices.keys()
+            if deg not in np.fromiter(keys, dtype=float):
+                # Generate the polynomial basis indices
+                for qidx, q in enumerate(self.pce_q_norm):
+                    basis_indices = self.create_basis_indices(degree=deg,
+                                                              q_norm=q)
+                    self.allBasisIndices[str(deg)][str(q)] = basis_indices
+
+        # Evaluate the univariate polynomials on ExpDesign
+        if self.meta_model_type.lower() != &#39;gpe&#39;:
+            self.univ_p_val = self.univ_basis_vals(CollocationPoints)
+
+        if &#39;x_values&#39; in OutputDict:
+            self.ExpDesign.x_values = OutputDict[&#39;x_values&#39;]
+            del OutputDict[&#39;x_values&#39;]
+
+        # --- Loop through data points and fit the surrogate ---
+        if not verbose:
+            print(f&#34;\n&gt;&gt;&gt;&gt; Training the {self.meta_model_type} metamodel &#34;
+                  &#34;started. &lt;&lt;&lt;&lt;&lt;&lt;\n&#34;)
+            items = tqdm(OutputDict.items(), desc=&#34;Fitting regression&#34;)
+        else:
+            items = OutputDict.items()
+
+        # For loop over the components/outputs
+        for key, Output in items:
+
+            # Dimensionality reduction with PCA, if specified
+            if self.dim_red_method.lower() == &#39;pca&#39;:
+                self.pca[key], target = self.pca_transformation(Output)
+            else:
+                target = Output
+
+            # Parallel fit regression
+            if self.meta_model_type.lower() == &#39;gpe&#39;:
+                # Prepare the input matrix
+                scaler = MinMaxScaler()
+                X_S = scaler.fit_transform(CollocationPoints)
+
+                self.x_scaler[key] = scaler
+
+                out = Parallel(n_jobs=-1, prefer=&#39;threads&#39;)(
+                    delayed(self.gaussian_process_emulator)(X_S, target[:, idx])
+                    for idx in range(target.shape[1]))
+
+                for idx in range(target.shape[1]):
+                    self.gp_poly[key][f&#34;y_{idx+1}&#34;] = out[idx]
+
+            else:
+                out = Parallel(n_jobs=-1, prefer=&#39;threads&#39;)(
+                    delayed(self.adaptive_regression)(CollocationPoints,
+                                                      target[:, idx], idx)
+                    for idx in range(target.shape[1]))
+
+                for i in range(target.shape[1]):
+                    # Create a dict to pass the variables
+                    self.deg_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;degree&#39;]
+                    self.q_norm_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;qnorm&#39;]
+                    self.coeffs_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;coeffs&#39;]
+                    self.basis_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;multi_indices&#39;]
+                    self.score_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;LOOCVScore&#39;]
+                    self.clf_poly[key][f&#34;y_{i+1}&#34;] = out[i][&#39;clf_poly&#39;]
+                    self.LCerror[key][f&#34;y_{i+1}&#34;] = out[i][&#39;LCerror&#39;]
+
+        if not verbose:
+            print(f&#34;\n&gt;&gt;&gt;&gt; Training the {self.meta_model_type} metamodel&#34;
+                  &#34; sucessfully completed. &lt;&lt;&lt;&lt;&lt;&lt;\n&#34;)
+
+        return self
+
+    # -------------------------------------------------------------------------
+    def create_basis_indices(self, degree, q_norm):
+        &#34;&#34;&#34;
+        Creates set of selected multi-indices of multivariate polynomials for
+        certain parameter numbers, polynomial degree, hyperbolic (or q-norm)
+        truncation scheme.
+
+        Parameters
+        ----------
+        degree : int
+            Polynomial degree.
+        q_norm : float
+            hyperbolic (or q-norm) truncation.
+
+        Returns
+        -------
+        basis_indices : array of shape (n_terms, n_params)
+            Multi-indices of multivariate polynomials.
+
+        &#34;&#34;&#34;
+        basis_indices = glexindex(start=0, stop=degree+1,
+                                  dimensions=self.n_params,
+                                  cross_truncation=q_norm,
+                                  reverse=False, graded=True)
+        return basis_indices
+
+    # -------------------------------------------------------------------------
+    def add_ExpDesign(self):
+        &#34;&#34;&#34;
+        Instanciates experimental design object.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        self.ExpDesign = ExpDesigns(self.input_obj,
+                                    meta_Model=self.meta_model_type)
+
+    # -------------------------------------------------------------------------
+    def generate_ExpDesign(self, Model):
+        &#34;&#34;&#34;
+        Prepares the experimental design either by reading from the prescribed
+        data or running simulations.
+
+        Parameters
+        ----------
+        Model : obj
+            Model object.
+
+        Raises
+        ------
+        Exception
+            If model sumulations are not provided properly.
+
+        Returns
+        -------
+        ED_X_tr: array of shape (n_samples, n_params)
+            Training samples transformed by an isoprobabilistic transformation.
+        ED_Y: dict
+            Model simulations (target) for all outputs.
+        &#34;&#34;&#34;
+        ExpDesign = self.ExpDesign
+        if self.ExpDesignFlag != &#39;sequential&#39;:
+            # Read ExpDesign (training and targets) from the provided hdf5
+            if ExpDesign.hdf5_file is not None:
+
+                # Read hdf5 file
+                f = h5py.File(ExpDesign.hdf5_file, &#39;r+&#39;)
+
+                # Read EDX and pass it to ExpDesign object
+                try:
+                    ExpDesign.X = np.array(f[&#34;EDX/New_init_&#34;])
+                except KeyError:
+                    ExpDesign.X = np.array(f[&#34;EDX/init_&#34;])
+
+                # Update number of initial samples
+                ExpDesign.n_init_samples = ExpDesign.X.shape[0]
+
+                # Read EDX and pass it to ExpDesign object
+                out_names = self.ModelObj.Output.names
+                ExpDesign.Y = {}
+
+                # Extract x values
+                try:
+                    ExpDesign.Y[&#34;x_values&#34;] = dict()
+                    for varIdx, var in enumerate(out_names):
+                        x = np.array(f[f&#34;x_values/{var}&#34;])
+                        ExpDesign.Y[&#34;x_values&#34;][var] = x
+                except KeyError:
+                    ExpDesign.Y[&#34;x_values&#34;] = np.array(f[&#34;x_values&#34;])
+
+                # Store the output
+                for varIdx, var in enumerate(out_names):
+                    try:
+                        y = np.array(f[f&#34;EDY/{var}/New_init_&#34;])
+                    except KeyError:
+                        y = np.array(f[f&#34;EDY/{var}/init_&#34;])
+                    ExpDesign.Y[var] = y
+                f.close()
+            else:
+                # Check if an old hdf5 file exists: if yes, rename it
+                hdf5file = f&#39;ExpDesign_{self.ModelObj.name}.hdf5&#39;
+                if os.path.exists(hdf5file):
+                    os.rename(hdf5file, &#39;old_&#39;+hdf5file)
+
+        # ---- Prepare X samples ----
+        ED_X, ED_X_tr = ExpDesign.generate_ED(ExpDesign.n_init_samples,
+                                              ExpDesign.sampling_method,
+                                              transform=True,
+                                              max_pce_deg=np.max(self.pce_deg))
+        ExpDesign.X = ED_X
+        ExpDesign.collocationPoints = ED_X_tr
+        self.bound_tuples = ExpDesign.bound_tuples
+
+        # ---- Run simulations at X ----
+        if not hasattr(ExpDesign, &#39;Y&#39;) or ExpDesign.Y is None:
+            print(&#39;\n Now the forward model needs to be run!\n&#39;)
+            ED_Y, up_ED_X = Model.run_model_parallel(ED_X)
+            ExpDesign.X = up_ED_X
+            self.ModelOutputDict = ED_Y
+            ExpDesign.Y = ED_Y
+        else:
+            # Check if a dict has been passed.
+            if type(ExpDesign.Y) is dict:
+                self.ModelOutputDict = ExpDesign.Y
+            else:
+                raise Exception(&#39;Please provide either a dictionary or a hdf5&#39;
+                                &#39;file to ExpDesign.hdf5_file argument.&#39;)
+
+        return ED_X_tr, self.ModelOutputDict
+
+    # -------------------------------------------------------------------------
+    def univ_basis_vals(self, samples, n_max=None):
+        &#34;&#34;&#34;
+        Evaluates univariate regressors along input directions.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params)
+            Samples.
+        n_max : int, optional
+            Maximum polynomial degree. The default is `None`.
+
+        Returns
+        -------
+        univ_basis: array of shape (n_samples, n_params, n_max+1)
+            All univariate regressors up to n_max.
+        &#34;&#34;&#34;
+        # Extract information
+        poly_types = self.ExpDesign.poly_types
+        if samples.ndim != 2:
+            samples = samples.reshape(1, len(samples))
+        n_max = np.max(self.pce_deg) if n_max is None else n_max
+
+        # Extract poly coeffs
+        if self.ExpDesign.input_data_given or self.ExpDesign.apce:
+            apolycoeffs = self.ExpDesign.polycoeffs
+        else:
+            apolycoeffs = None
+
+        # Evaluate univariate basis
+        univ_basis = eval_univ_basis(samples, n_max, poly_types, apolycoeffs)
+
+        return univ_basis
+
+    # -------------------------------------------------------------------------
+    def create_psi(self, basis_indices, univ_p_val):
+        &#34;&#34;&#34;
+        This function assemble the design matrix Psi from the given basis index
+        set INDICES and the univariate polynomial evaluations univ_p_val.
+
+        Parameters
+        ----------
+        basis_indices : array of shape (n_terms, n_params)
+            Multi-indices of multivariate polynomials.
+        univ_p_val : array of (n_samples, n_params, n_max+1)
+            All univariate regressors up to `n_max`.
+
+        Raises
+        ------
+        ValueError
+            n_terms in arguments do not match.
+
+        Returns
+        -------
+        psi : array of shape (n_samples, n_terms)
+            Multivariate regressors.
+
+        &#34;&#34;&#34;
+        # Check if BasisIndices is a sparse matrix
+        sparsity = sp.sparse.issparse(basis_indices)
+        if sparsity:
+            basis_indices = basis_indices.toarray()
+
+        # Initialization and consistency checks
+        # number of input variables
+        n_params = univ_p_val.shape[1]
+
+        # Size of the experimental design
+        n_samples = univ_p_val.shape[0]
+
+        # number of basis terms
+        n_terms = basis_indices.shape[0]
+
+        # check that the variables have consistent sizes
+        if n_params != basis_indices.shape[1]:
+            raise ValueError(&#34;The shapes of basis_indices and univ_p_val don&#39;t&#34;
+                             &#34; match!!&#34;)
+
+        # Preallocate the Psi matrix for performance
+        psi = np.ones((n_samples, n_terms))
+        # Assemble the Psi matrix
+        for m in range(basis_indices.shape[1]):
+            aa = np.where(basis_indices[:, m] &gt; 0)[0]
+            try:
+                basisIdx = basis_indices[aa, m]
+                bb = np.reshape(univ_p_val[:, m, basisIdx], psi[:, aa].shape)
+                psi[:, aa] = np.multiply(psi[:, aa], bb)
+            except ValueError as err:
+                raise err
+
+        return psi
+
+    # -------------------------------------------------------------------------
+    def fit(self, X, y, basis_indices, reg_method=None):
+        &#34;&#34;&#34;
+        Fit regression using the regression method provided.
+
+        Parameters
+        ----------
+        X : array of shape (n_samples, n_features)
+            Training vector, where n_samples is the number of samples and
+            n_features is the number of features.
+        y : array of shape (n_samples,)
+            Target values.
+        basis_indices : array of shape (n_terms, n_params)
+            Multi-indices of multivariate polynomials.
+        reg_method : str, optional
+            DESCRIPTION. The default is None.
+
+        Returns
+        -------
+        return_out_dict : Dict
+            Fitted estimator, spareMulti-Index, sparseX and coefficients.
+
+        &#34;&#34;&#34;
+        if reg_method is None:
+            reg_method = self.pce_reg_method
+
+        # Check if BasisIndices is a sparse matrix
+        sparsity = sp.sparse.issparse(basis_indices)
+
+        clf_poly = []
+        compute_score = True if self.verbose else False
+
+        #  inverse of the observed variance of the data
+        if np.var(y) != 0:
+            Lambda = 1 / np.var(y)
+        else:
+            Lambda = 1e-6
+
+        # Bayes sparse adaptive aPCE
+        if reg_method.lower() != &#39;ols&#39;:
+            if reg_method.lower() == &#39;brr&#39; or np.var(y) == 0:
+                clf_poly = lm.BayesianRidge(n_iter=1000, tol=1e-7,
+                                            fit_intercept=True,
+                                            normalize=True,
+                                            compute_score=compute_score,
+                                            alpha_1=1e-04, alpha_2=1e-04,
+                                            lambda_1=Lambda, lambda_2=Lambda)
+                clf_poly.converged = True
+
+            elif reg_method.lower() == &#39;ard&#39;:
+                clf_poly = lm.ARDRegression(fit_intercept=True,
+                                            normalize=True,
+                                            compute_score=compute_score,
+                                            n_iter=1000, tol=0.0001,
+                                            alpha_1=1e-3, alpha_2=1e-3,
+                                            lambda_1=Lambda, lambda_2=Lambda)
+
+            elif reg_method.lower() == &#39;fastard&#39;:
+                clf_poly = RegressionFastARD(fit_intercept=True,
+                                             normalize=True,
+                                             compute_score=compute_score,
+                                             n_iter=300, tol=1e-10)
+
+            elif reg_method.lower() == &#39;bcs&#39;:
+                clf_poly = RegressionFastLaplace(fit_intercept=False,
+                                                 n_iter=1000, tol=1e-7)
+
+            elif reg_method.lower() == &#39;lars&#39;:
+                clf_poly = lm.LassoLarsCV(fit_intercept=False)
+
+            elif reg_method.lower() == &#39;sgdr&#39;:
+                clf_poly = lm.SGDRegressor(fit_intercept=False,
+                                           max_iter=5000, tol=1e-7)
+
+            elif reg_method.lower() == &#39;omp&#39;:
+                clf_poly = lm.OrthogonalMatchingPursuitCV(fit_intercept=False,
+                                                          max_iter=10)
+
+            elif reg_method.lower() == &#39;vbl&#39;:
+                clf_poly = VBLinearRegression(fit_intercept=False)
+
+            elif reg_method.lower() == &#39;ebl&#39;:
+                clf_poly = EBLinearRegression(optimizer=&#39;em&#39;)
+
+            # Fit
+            clf_poly.fit(X, y)
+
+            # Select the nonzero entries of coefficients
+            # The first column must be kept (For mean calculations)
+            nnz_idx = np.nonzero(clf_poly.coef_)[0]
+
+            if len(nnz_idx) == 0 or nnz_idx[0] != 0:
+                nnz_idx = np.insert(np.nonzero(clf_poly.coef_)[0], 0, 0)
+                # Remove the zero entries for Bases and PSI if need be
+                if sparsity:
+                    sparse_basis_indices = basis_indices.toarray()[nnz_idx]
+                else:
+                    sparse_basis_indices = basis_indices[nnz_idx]
+                sparse_X = X[:, nnz_idx]
+
+                # Store the coefficients of the regression model
+                clf_poly.fit(sparse_X, y)
+                coeffs = clf_poly.coef_
+            else:
+                # This is for the case where all outputs are zero, thereby
+                # all coefficients are zero
+                if sparsity:
+                    sparse_basis_indices = basis_indices.toarray()
+                else:
+                    sparse_basis_indices = basis_indices
+                sparse_X = X
+                coeffs = clf_poly.coef_
+
+        # Ordinary least square method (OLS)
+        else:
+            if sparsity:
+                sparse_basis_indices = basis_indices.toarray()
+            else:
+                sparse_basis_indices = basis_indices
+            sparse_X = X
+
+            X_T_X = np.dot(sparse_X.T, sparse_X)
+
+            if np.linalg.cond(X_T_X) &gt; 1e-12 and \
+               np.linalg.cond(X_T_X) &lt; 1 / sys.float_info.epsilon:
+                # faster
+                coeffs = sp.linalg.solve(X_T_X, np.dot(sparse_X.T, y))
+            else:
+                # stabler
+                coeffs = np.dot(np.dot(np.linalg.pinv(X_T_X), sparse_X.T), y)
+
+        # Create a dict to pass the outputs
+        return_out_dict = dict()
+        return_out_dict[&#39;clf_poly&#39;] = clf_poly
+        return_out_dict[&#39;spareMulti-Index&#39;] = sparse_basis_indices
+        return_out_dict[&#39;sparePsi&#39;] = sparse_X
+        return_out_dict[&#39;coeffs&#39;] = coeffs
+        return return_out_dict
+
+    # --------------------------------------------------------------------------------------------------------
+    def adaptive_regression(self, ED_X, ED_Y, varIdx, verbose=False):
+        &#34;&#34;&#34;
+        Adaptively fits the PCE model by comparing the scores of different
+        degrees and q-norm.
+
+        Parameters
+        ----------
+        ED_X : array of shape (n_samples, n_params)
+            Experimental design.
+        ED_Y : array of shape (n_samples,)
+            Target values, i.e. simulation results for the Experimental design.
+        varIdx : int
+            Index of the output.
+        verbose : bool, optional
+            Print out summary. The default is False.
+
+        Returns
+        -------
+        returnVars : Dict
+            Fitted estimator, best degree, best q-norm, LOOCVScore and
+            coefficients.
+
+        &#34;&#34;&#34;
+
+        NrSamples, n_params = ED_X.shape
+        # Initialization
+        qAllCoeffs, AllCoeffs = {}, {}
+        qAllIndices_Sparse, AllIndices_Sparse = {}, {}
+        qAllclf_poly, Allclf_poly = {}, {}
+        qAllnTerms, AllnTerms = {}, {}
+        qAllLCerror, AllLCerror = {}, {}
+
+        # Extract degree array and qnorm array
+        DegreeArray = np.array([*self.allBasisIndices], dtype=int)
+        qnorm = [*self.allBasisIndices[str(int(DegreeArray[0]))]]
+
+        # Some options for EarlyStop
+        errorIncreases = False
+        # Stop degree, if LOO error does not decrease n_checks_degree times
+        n_checks_degree = 3
+        # Stop qNorm, if criterion isn&#39;t fulfilled n_checks_qNorm times
+        n_checks_qNorm = 2
+        nqnorms = len(qnorm)
+        qNormEarlyStop = True
+        if nqnorms &lt; n_checks_qNorm+1:
+            qNormEarlyStop = False
+
+        # =====================================================================
+        # basis adaptive polynomial chaos: repeat the calculation by increasing
+        # polynomial degree until the highest accuracy is reached
+        # =====================================================================
+        # For each degree check all q-norms and choose the best one
+        scores = -np.inf * np.ones(DegreeArray.shape[0])
+        qNormScores = -np.inf * np.ones(nqnorms)
+
+        for degIdx, deg in enumerate(DegreeArray):
+
+            for qidx, q in enumerate(qnorm):
+
+                # Extract the polynomial basis indices from the pool of
+                # allBasisIndices
+                BasisIndices = self.allBasisIndices[str(deg)][str(q)]
+
+                # Assemble the Psi matrix
+                Psi = self.create_psi(BasisIndices, self.univ_p_val)
+
+                # Calulate the cofficients of the meta model
+                outs = self.fit(Psi, ED_Y, BasisIndices)
+
+                # Calculate and save the score of LOOCV
+                score, LCerror = self.corr_loocv_error(outs[&#39;clf_poly&#39;],
+                                                       outs[&#39;sparePsi&#39;],
+                                                       outs[&#39;coeffs&#39;],
+                                                       ED_Y)
+
+                # Check the convergence of noise for FastARD
+                if self.pce_reg_method == &#39;FastARD&#39; and \
+                   outs[&#39;clf_poly&#39;].alpha_ &lt; np.finfo(np.float32).eps:
+                    score = -np.inf
+
+                qNormScores[qidx] = score
+                qAllCoeffs[str(qidx+1)] = outs[&#39;coeffs&#39;]
+                qAllIndices_Sparse[str(qidx+1)] = outs[&#39;spareMulti-Index&#39;]
+                qAllclf_poly[str(qidx+1)] = outs[&#39;clf_poly&#39;]
+                qAllnTerms[str(qidx+1)] = BasisIndices.shape[0]
+                qAllLCerror[str(qidx+1)] = LCerror
+
+                # EarlyStop check
+                # if there are at least n_checks_qNorm entries after the
+                # best one, we stop
+                if qNormEarlyStop and \
+                   sum(np.isfinite(qNormScores)) &gt; n_checks_qNorm:
+                    # If the error has increased the last two iterations, stop!
+                    qNormScores_nonInf = qNormScores[np.isfinite(qNormScores)]
+                    deltas = np.sign(np.diff(qNormScores_nonInf))
+                    if sum(deltas[-n_checks_qNorm+1:]) == 2:
+                        # stop the q-norm loop here
+                        break
+                if np.var(ED_Y) == 0:
+                    break
+
+            # Store the score in the scores list
+            best_q = np.nanargmax(qNormScores)
+            scores[degIdx] = qNormScores[best_q]
+
+            AllCoeffs[str(degIdx+1)] = qAllCoeffs[str(best_q+1)]
+            AllIndices_Sparse[str(degIdx+1)] = qAllIndices_Sparse[str(best_q+1)]
+            Allclf_poly[str(degIdx+1)] = qAllclf_poly[str(best_q+1)]
+            AllnTerms[str(degIdx+1)] = qAllnTerms[str(best_q+1)]
+            AllLCerror[str(degIdx+1)] = qAllLCerror[str(best_q+1)]
+
+            # Check the direction of the error (on average):
+            # if it increases consistently stop the iterations
+            if len(scores[scores != -np.inf]) &gt; n_checks_degree:
+                scores_nonInf = scores[scores != -np.inf]
+                ss = np.sign(scores_nonInf - np.max(scores_nonInf))
+                # ss&lt;0 error decreasing
+                errorIncreases = np.sum(np.sum(ss[-2:])) &lt;= -1*n_checks_degree
+
+            if errorIncreases:
+                break
+
+            # Check only one degree, if target matrix has zero variance
+            if np.var(ED_Y) == 0:
+                break
+
+        # ------------------ Summary of results ------------------
+        # Select the one with the best score and save the necessary outputs
+        best_deg = np.nanargmax(scores)+1
+        coeffs = AllCoeffs[str(best_deg)]
+        basis_indices = AllIndices_Sparse[str(best_deg)]
+        clf_poly = Allclf_poly[str(best_deg)]
+        LOOCVScore = np.nanmax(scores)
+        P = AllnTerms[str(best_deg)]
+        LCerror = AllLCerror[str(best_deg)]
+        degree = DegreeArray[np.nanargmax(scores)]
+        qnorm = float(qnorm[best_q])
+
+        # ------------------ Print out Summary of results ------------------
+        if self.verbose:
+            # Create PSI_Sparse by removing redundent terms
+            nnz_idx = np.nonzero(coeffs)[0]
+            BasisIndices_Sparse = basis_indices[nnz_idx]
+
+            print(f&#39;Output variable {varIdx+1}:&#39;)
+            print(&#39;The estimation of PCE coefficients converged at polynomial &#39;
+                  f&#39;degree {DegreeArray[best_deg-1]} with &#39;
+                  f&#39;{len(BasisIndices_Sparse)} terms (Sparsity index = &#39;
+                  f&#39;{round(len(BasisIndices_Sparse)/P, 3)}).&#39;)
+
+            print(f&#39;Final ModLOO error estimate: {1-max(scores):.3e}&#39;)
+            print(&#39;\n&#39;+&#39;-&#39;*50)
+
+        if verbose:
+            print(&#39;=&#39;*50)
+            print(&#39; &#39;*10 + &#39; Summary of results &#39;)
+            print(&#39;=&#39;*50)
+
+            print(&#34;scores:\n&#34;, scores)
+            print(&#34;Best score&#39;s degree:&#34;, self.DegreeArray[best_deg-1])
+            print(&#34;NO. of terms:&#34;, len(basis_indices))
+            print(&#34;Sparsity index:&#34;, round(len(basis_indices)/P, 3))
+            print(&#34;Best Indices:\n&#34;, basis_indices)
+
+            if self.pce_reg_method in [&#39;BRR&#39;, &#39;ARD&#39;]:
+                fig, ax = plt.subplots(figsize=(12, 10))
+                plt.title(&#34;Marginal log-likelihood&#34;)
+                plt.plot(clf_poly.scores_, color=&#39;navy&#39;, linewidth=2)
+                plt.ylabel(&#34;Score&#34;)
+                plt.xlabel(&#34;Iterations&#34;)
+                if self.pce_reg_method.lower() == &#39;bbr&#39;:
+                    text = f&#34;$\\alpha={clf_poly.alpha_:.1f}$\n&#34;
+                    f&#34;$\\lambda={clf_poly.lambda_:.3f}$\n&#34;
+                    f&#34;$L={clf_poly.scores_[-1]:.1f}$&#34;
+                else:
+                    text = f&#34;$\\alpha={clf_poly.alpha_:.1f}$\n$&#34;
+                    f&#34;\\L={clf_poly.scores_[-1]:.1f}$&#34;
+
+                plt.text(0.75, 0.5, text, fontsize=18, transform=ax.transAxes)
+                plt.show()
+            print(&#39;=&#39;*80)
+
+        # Create a dict to pass the outputs
+        returnVars = dict()
+        returnVars[&#39;clf_poly&#39;] = clf_poly
+        returnVars[&#39;degree&#39;] = degree
+        returnVars[&#39;qnorm&#39;] = qnorm
+        returnVars[&#39;coeffs&#39;] = coeffs
+        returnVars[&#39;multi_indices&#39;] = basis_indices
+        returnVars[&#39;LOOCVScore&#39;] = LOOCVScore
+        returnVars[&#39;LCerror&#39;] = LCerror
+
+        return returnVars
+
+    # -------------------------------------------------------------------------
+    def corr_loocv_error(self, clf, psi, coeffs, y):
+        &#34;&#34;&#34;
+        Calculates the corrected LOO error for regression on regressor
+        matrix `psi` that generated the coefficients based on [1] and [2].
+
+        [1] Blatman, G., 2009. Adaptive sparse polynomial chaos expansions for
+            uncertainty propagation and sensitivity analysis (Doctoral
+            dissertation, Clermont-Ferrand 2).
+
+        [2] Blatman, G. and Sudret, B., 2011. Adaptive sparse polynomial chaos
+            expansion based on least angle regression. Journal of computational
+            Physics, 230(6), pp.2345-2367.
+
+        Parameters
+        ----------
+        clf : object
+            Fitted estimator.
+        psi : array of shape (n_samples, n_features)
+            The multivariate orthogonal polynomials (regressor).
+        coeffs : array-like of shape (n_features,)
+            Estimated cofficients.
+        y : array of shape (n_samples,)
+            Target values.
+
+        Returns
+        -------
+        Q_2 : float
+            LOOCV Validation score (1-LOOCV erro).
+        residual : array of shape (n_samples,)
+            Residual values (y - predicted targets).
+
+        &#34;&#34;&#34;
+        psi = np.array(psi, dtype=float)
+
+        # Create PSI_Sparse by removing redundent terms
+        nnz_idx = np.nonzero(coeffs)[0]
+        if len(nnz_idx) == 0:
+            nnz_idx = [0]
+        psi_sparse = psi[:, nnz_idx]
+
+        # NrCoeffs of aPCEs
+        P = len(nnz_idx)
+        # NrEvaluation (Size of experimental design)
+        N = psi.shape[0]
+
+        # Build the projection matrix
+        PsiTPsi = np.dot(psi_sparse.T, psi_sparse)
+
+        if np.linalg.cond(PsiTPsi) &gt; 1e-12 and \
+           np.linalg.cond(PsiTPsi) &lt; 1/sys.float_info.epsilon:
+            # faster
+            M = sp.linalg.solve(PsiTPsi,
+                                sp.sparse.eye(PsiTPsi.shape[0]).toarray())
+        else:
+            # stabler
+            M = np.linalg.pinv(PsiTPsi)
+
+        # h factor (the full matrix is not calculated explicitly,
+        # only the trace is, to save memory)
+        PsiM = np.dot(psi_sparse, M)
+
+        h = np.sum(np.multiply(PsiM, psi_sparse), axis=1, dtype=np.float128)
+
+        # ------ Calculate Error Loocv for each measurement point ----
+        # Residuals
+        if isinstance(clf, list):
+            residual = np.dot(psi, coeffs) - y
+        else:
+            residual = clf.predict(psi) - y
+
+        # Variance
+        varY = np.var(y)
+
+        if varY == 0:
+            normEmpErr = 0
+            ErrLoo = 0
+            LCerror = np.zeros((y.shape))
+        else:
+            normEmpErr = np.mean(residual**2)/varY
+
+            # LCerror = np.divide(residual, (1-h))
+            LCerror = residual / (1-h)[:, np.newaxis]
+            ErrLoo = np.mean(np.square(LCerror)) / varY
+            # if there are NaNs, just return an infinite LOO error (this
+            # happens, e.g., when a strongly underdetermined problem is solved)
+            if np.isnan(ErrLoo):
+                ErrLoo = np.inf
+
+        # Corrected Error for over-determined system
+        trM = np.trace(M)
+        if trM &lt; 0 or abs(trM) &gt; 1e6:
+            trM = np.trace(np.linalg.pinv(np.dot(psi.T, psi)))
+
+        # Over-determined system of Equation
+        if N &gt; P:
+            T_factor = N/(N-P) * (1 + trM)
+
+        # Under-determined system of Equation
+        else:
+            T_factor = np.inf
+
+        CorrectedErrLoo = ErrLoo * T_factor
+
+        Q_2 = 1 - CorrectedErrLoo
+
+        return Q_2, residual
+
+    # -------------------------------------------------------------------------
+    def pca_transformation(self, Output):
+        &#34;&#34;&#34;
+        Transforms the targets (outputs) via Principal Component Analysis
+
+        Parameters
+        ----------
+        Output : array of shape (n_samples,)
+            Target values.
+
+        Returns
+        -------
+        pca : obj
+            Fitted sklearnPCA object.
+        OutputMatrix : array of shape (n_samples,)
+            Transformed target values.
+
+        &#34;&#34;&#34;
+        # Transform via Principal Component Analysis
+        if hasattr(self, &#39;var_pca_threshold&#39;):
+            var_pca_threshold = self.var_pca_threshold
+        else:
+            var_pca_threshold = 100.0
+        n_samples, n_features = Output.shape
+
+        if hasattr(self, &#39;n_pca_components&#39;):
+            n_pca_components = self.n_pca_components
+        else:
+            # Instantiate and fit sklearnPCA object
+            covar_matrix = sklearnPCA(n_components=None)
+            covar_matrix.fit(Output)
+            var = np.cumsum(np.round(covar_matrix.explained_variance_ratio_,
+                                     decimals=5)*100)
+            # Find the number of components to explain self.varPCAThreshold of
+            # variance
+            try:
+                n_components = np.where(var &gt;= var_pca_threshold)[0][0] + 1
+            except IndexError:
+                n_components = min(n_samples, n_features)
+
+            n_pca_components = min(n_samples, n_features, n_components)
+
+        # Print out a report
+        print()
+        print(&#39;-&#39; * 50)
+        print(f&#34;PCA transformation is performed with {n_pca_components}&#34;
+              &#34; components.&#34;)
+        print(&#39;-&#39; * 50)
+        print()
+
+        # Fit and transform with the selected number of components
+        pca = sklearnPCA(n_components=n_pca_components,
+                         svd_solver=&#39;randomized&#39;)
+        OutputMatrix = pca.fit_transform(Output)
+
+        return pca, OutputMatrix
+
+    # -------------------------------------------------------------------------
+    def gaussian_process_emulator(self, X, y, nug_term=None, autoSelect=False,
+                                  varIdx=None):
+        &#34;&#34;&#34;
+        Fits a Gaussian Process Emulator to the target given the training
+         points.
+
+        Parameters
+        ----------
+        X : array of shape (n_samples, n_params)
+            Training points.
+        y : array of shape (n_samples,)
+            Target values.
+        nug_term : float, optional
+            Nugget term. The default is None, i.e. variance of y.
+        autoSelect : bool, optional
+            Loop over some kernels and select the best. The default is False.
+        varIdx : int, optional
+            The index number. The default is None.
+
+        Returns
+        -------
+        gp : object
+            Fitted estimator.
+
+        &#34;&#34;&#34;
+
+        nug_term = nug_term if nug_term else np.var(y)
+
+        Kernels = [nug_term * kernels.RBF(length_scale=1.0,
+                                          length_scale_bounds=(1e-25, 1e15)),
+                   nug_term * kernels.RationalQuadratic(length_scale=0.2,
+                                                        alpha=1.0),
+                   nug_term * kernels.Matern(length_scale=1.0,
+                                             length_scale_bounds=(1e-15, 1e5),
+                                             nu=1.5)]
+
+        # Automatic selection of the kernel
+        if autoSelect:
+            gp = {}
+            BME = []
+            for i, kernel in enumerate(Kernels):
+                gp[i] = GaussianProcessRegressor(kernel=kernel,
+                                                 n_restarts_optimizer=3,
+                                                 normalize_y=False)
+
+                # Fit to data using Maximum Likelihood Estimation
+                gp[i].fit(X, y)
+
+                # Store the MLE as BME score
+                BME.append(gp[i].log_marginal_likelihood())
+
+            gp = gp[np.argmax(BME)]
+
+        else:
+            gp = GaussianProcessRegressor(kernel=Kernels[0],
+                                          n_restarts_optimizer=3,
+                                          normalize_y=False)
+            gp.fit(X, y)
+
+        # Compute score
+        if varIdx is not None:
+            Score = gp.score(X, y)
+            print(&#39;-&#39;*50)
+            print(f&#39;Output variable {varIdx}:&#39;)
+            print(&#39;The estimation of GPE coefficients converged,&#39;)
+            print(f&#39;with the R^2 score: {Score:.3f}&#39;)
+            print(&#39;-&#39;*50)
+
+        return gp
+
+    # -------------------------------------------------------------------------
+    def eval_metamodel(self, samples=None, nsamples=None,
+                       sampling_method=&#39;random&#39;, return_samples=False):
+        &#34;&#34;&#34;
+        Evaluates meta-model at the requested samples. One can also generate
+        nsamples.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params), optional
+            Samples to evaluate meta-model at. The default is None.
+        nsamples : int, optional
+            Number of samples to generate, if no `samples` is provided. The
+            default is None.
+        sampling_method : str, optional
+            Type of sampling, if no `samples` is provided. The default is
+            &#39;random&#39;.
+        return_samples : bool, optional
+            Retun samples, if no `samples` is provided. The default is False.
+
+        Returns
+        -------
+        mean_pred : dict
+            Mean of the predictions.
+        std_pred : dict
+            Standard deviatioon of the predictions.
+        &#34;&#34;&#34;
+        if self.meta_model_type.lower() == &#39;gpe&#39;:
+            model_dict = self.gp_poly
+        else:
+            model_dict = self.coeffs_dict
+
+        if samples is None:
+            if nsamples is None:
+                self.n_samples = 100000
+            else:
+                self.n_samples = nsamples
+
+            samples = self.ExpDesign.generate_samples(self.n_samples,
+                                                      sampling_method)
+        else:
+            self.samples = samples
+            self.n_samples = len(samples)
+
+        # Transform samples
+        samples = self.ExpDesign.transform(samples)
+
+        if self.meta_model_type.lower() != &#39;gpe&#39;:
+            univ_p_val = self.univ_basis_vals(samples,
+                                              n_max=np.max(self.pce_deg))
+
+        mean_pred = {}
+        std_pred = {}
+
+        # Loop over outputs
+        for ouput, values in model_dict.items():
+
+            mean = np.zeros((len(samples), len(values)))
+            std = np.zeros((len(samples), len(values)))
+            idx = 0
+            for in_key, InIdxValues in values.items():
+
+                # Perdiction with GPE
+                if self.meta_model_type.lower() == &#39;gpe&#39;:
+                    X_T = self.x_scaler[ouput].transform(samples)
+                    gp = self.gp_poly[ouput][in_key]
+                    y_mean, y_std = gp.predict(X_T, return_std=True)
+
+                else:
+                    # Perdiction with PCE or pcekriging
+                    # Assemble Psi matrix
+                    psi = self.create_psi(self.basis_dict[ouput][in_key],
+                                          univ_p_val)
+                    # Perdiction
+                    try:
+                        # with error bar
+                        clf_poly = self.clf_poly[ouput][in_key]
+                        y_mean, y_std = clf_poly.predict(psi, return_std=True)
+
+                    except:
+                        # without error bar
+                        coeffs = self.coeffs_dict[ouput][in_key]
+                        y_mean = np.dot(psi, coeffs)
+                        y_std = np.zeros_like(y_mean)
+
+                mean[:, idx] = y_mean
+                std[:, idx] = y_std
+                idx += 1
+
+            if self.dim_red_method.lower() == &#39;pca&#39;:
+                PCA = self.pca[ouput]
+                mean_pred[ouput] = PCA.mean_ + np.dot(mean, PCA.components_)
+                std_pred[ouput] = np.sqrt(np.dot(std**2, PCA.components_**2))
+            else:
+                mean_pred[ouput] = mean
+                std_pred[ouput] = std
+
+        if return_samples:
+            return mean_pred, std_pred, samples
+        else:
+            return mean_pred, std_pred
+
+    # -------------------------------------------------------------------------
+    def create_model_error(self, X, y, name=&#39;Calib&#39;):
+        &#34;&#34;&#34;
+        Fits a GPE-based model error.
+
+        Parameters
+        ----------
+        X : array of shape (n_outputs, n_inputs)
+            Input array. It can contain any forcing inputs or coordinates of
+             extracted data.
+        y : array of shape (n_outputs,)
+            The model response for the MAP parameter set.
+        name : str, optional
+            Calibration or validation. The default is `&#39;Calib&#39;`.
+
+        Returns
+        -------
+        self: object
+            Self object.
+
+        &#34;&#34;&#34;
+        Model = self.ModelObj
+        outputNames = Model.Output.Names
+        self.errorRegMethod = &#39;GPE&#39;
+        self.errorclf_poly = self.auto_vivification()
+        self.errorScale = self.auto_vivification()
+
+        # Read data
+        MeasuredData = Model.read_observation(case=name)
+
+        # Fitting GPR based bias model
+        for out in outputNames:
+            nan_idx = ~np.isnan(MeasuredData[out])
+            # Select data
+            try:
+                data = MeasuredData[out].values[nan_idx]
+            except AttributeError:
+                data = MeasuredData[out][nan_idx]
+
+            # Prepare the input matrix
+            scaler = MinMaxScaler()
+            delta = data  # - y[out][0]
+            BiasInputs = np.hstack((X[out], y[out].reshape(-1, 1)))
+            X_S = scaler.fit_transform(BiasInputs)
+            gp = self.gaussian_process_emulator(X_S, delta)
+
+            self.errorScale[out][&#34;y_1&#34;] = scaler
+            self.errorclf_poly[out][&#34;y_1&#34;] = gp
+
+        return self
+
+    # -------------------------------------------------------------------------
+    def eval_model_error(self, X, y_pred):
+        &#34;&#34;&#34;
+        Evaluates the error model.
+
+        Parameters
+        ----------
+        X : array
+            Inputs.
+        y_pred : dict
+            Predictions.
+
+        Returns
+        -------
+        mean_pred : dict
+            Mean predition of the GPE-based error model.
+        std_pred : dict
+            standard deviation of the GPE-based error model.
+
+        &#34;&#34;&#34;
+        mean_pred = {}
+        std_pred = {}
+
+        for Outkey, ValuesDict in self.errorclf_poly.items():
+
+            pred_mean = np.zeros_like(y_pred[Outkey])
+            pred_std = np.zeros_like(y_pred[Outkey])
+
+            for Inkey, InIdxValues in ValuesDict.items():
+
+                gp = self.errorclf_poly[Outkey][Inkey]
+                scaler = self.errorScale[Outkey][Inkey]
+
+                # Transform Samples using scaler
+                for j, pred in enumerate(y_pred[Outkey]):
+                    BiasInputs = np.hstack((X[Outkey], pred.reshape(-1, 1)))
+                    Samples_S = scaler.transform(BiasInputs)
+                    y_hat, y_std = gp.predict(Samples_S, return_std=True)
+                    pred_mean[j] = y_hat
+                    pred_std[j] = y_std
+                    # pred_mean[j] += pred
+
+            mean_pred[Outkey] = pred_mean
+            std_pred[Outkey] = pred_std
+
+        return mean_pred, std_pred
+
+    # -------------------------------------------------------------------------
+    class auto_vivification(dict):
+        &#34;&#34;&#34;
+        Implementation of perl&#39;s AutoVivification feature.
+
+        Source: https://stackoverflow.com/a/651879/18082457
+        &#34;&#34;&#34;
+
+        def __getitem__(self, item):
+            try:
+                return dict.__getitem__(self, item)
+            except KeyError:
+                value = self[item] = type(self)()
+                return value
+
+    # -------------------------------------------------------------------------
+    def __select_degree(self, ndim, nSamples):
+        &#34;&#34;&#34;
+        Selects degree based on the number of samples and parameters in the
+        sequential design.
+
+        Parameters
+        ----------
+        ndim : TYPE
+            DESCRIPTION.
+        nSamples : TYPE
+            DESCRIPTION.
+
+        Returns
+        -------
+        TYPE
+            DESCRIPTION.
+
+        &#34;&#34;&#34;
+        # Define the DegreeArray
+        max_deg = np.max(self.pce_deg)
+        min_Deg = np.min(self.pce_deg)
+        nitr = nSamples - self.ExpDesign.n_init_samples
+
+        # Check q-norm
+        if not np.isscalar(self.pce_q_norm):
+            self.pce_q_norm = np.array(self.pce_q_norm)
+        else:
+            self.pce_q_norm = np.array([self.pce_q_norm])
+
+        def M_uptoMax(maxDeg):
+            n_combo = np.zeros(maxDeg)
+            for i, d in enumerate(range(1, maxDeg+1)):
+                n_combo[i] = math.factorial(ndim+d)
+                n_combo[i] /= math.factorial(ndim) * math.factorial(d)
+            return n_combo
+
+        if self.ExpDesignFlag != &#39;sequential&#39;:
+            degNew = max_deg
+        else:
+            d = nitr if nitr != 0 and self.n_params &gt; 5 else 1
+            min_index = np.argmin(abs(M_uptoMax(max_deg)-ndim*nSamples*d))
+            degNew = range(1, max_deg+1)[min_index]
+
+        if degNew &gt; min_Deg and self.pce_reg_method.lower() != &#39;fastard&#39;:
+            DegreeArray = np.arange(min_Deg, degNew+1)
+        else:
+            DegreeArray = np.array([degNew])
+
+        return DegreeArray</code></pre>
+</details>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+</section>
+<section>
+<h2 class="section-title" id="header-classes">Classes</h2>
+<dl>
+<dt id="surrogate_models.MetaModel"><code class="flex name class">
+<span>class <span class="ident">MetaModel</span></span>
+<span>(</span><span>input_obj, meta_model_type='PCE', pce_reg_method='OLS', pce_deg=1, pce_q_norm=1.0, dim_red_method='no', verbose=False)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Meta (surrogate) model</p>
+<p>This class trains a surrogate model. It accepts an input object (input_obj)
+containing the specification of the distributions for uncertain parameters
+and a model object with instructions on how to run the computational model.</p>
+<h2 id="attributes">Attributes</h2>
+<dl>
+<dt><strong><code>input_obj</code></strong> :&ensp;<code>obj</code></dt>
+<dd>Input object with the information on the model input parameters.</dd>
+<dt><strong><code>meta_model_type</code></strong> :&ensp;<code>str</code></dt>
+<dd>Surrogate model types. Three surrogate model types are supported:
+polynomial chaos expansion (<code>PCE</code>), arbitrary PCE (<code>aPCE</code>) and
+Gaussian process regression (<code>GPE</code>). Default is PCE.</dd>
+<dt><strong><code>pce_reg_method</code></strong> :&ensp;<code>str</code></dt>
+<dd>
+<p>PCE regression method to compute the coefficients. The following
+regression methods are available:</p>
+<ol>
+<li>OLS: Ordinary Least Square method</li>
+<li>BRR: Bayesian Ridge Regression</li>
+<li>LARS: Least angle regression</li>
+<li>ARD: Bayesian ARD Regression</li>
+<li>FastARD: Fast Bayesian ARD Regression</li>
+<li>VBL: Variational Bayesian Learning</li>
+<li>EBL: Emperical Bayesian Learning
+Default is <code>OLS</code>.</li>
+</ol>
+</dd>
+<dt><strong><code>pce_deg</code></strong> :&ensp;<code>int</code> or <code>list</code> of <code>int</code></dt>
+<dd>Polynomial degree(s). If a list is given, an adaptive algorithm is used
+to find the best degree with the lowest Leave-One-Out cross-validation
+(LOO) error (or the highest score=1-LOO). Default is <code>1</code>.</dd>
+<dt><strong><code>pce_q_norm</code></strong> :&ensp;<code>float</code></dt>
+<dd>Hyperbolic (or q-norm) truncation for multi-indices of multivariate
+polynomials. Default is <code>1.0</code>.</dd>
+<dt><strong><code>dim_red_method</code></strong> :&ensp;<code>str</code></dt>
+<dd>Dimensionality reduction method for the output space. The available
+method is based on principal component analysis (PCA). The Default is
+<code>'no'</code>. There are two ways to select number of components: use
+percentage of the explainable variance threshold (between 0 and 100)
+(Option A) or direct prescription of components' number (Option B):<pre><code>&gt;&gt;&gt; MetaModelOpts.dim_red_method = 'PCA'
+&gt;&gt;&gt; MetaModelOpts.var_pca_threshold = 99.999  # Option A
+&gt;&gt;&gt; MetaModelOpts.n_pca_components = 12 # Option B
+</code></pre>
+</dd>
+<dt><strong><code>verbose</code></strong> :&ensp;<code>bool</code></dt>
+<dd>Prints summary of the regression results. Default is <code>False</code>.</dd>
+</dl>
+<h2 id="note">Note</h2>
+<p>To define the sampling methods and the training set, an experimental design
+instance shall be defined. This can be done by:</p>
+<pre><code class="language-python-repl">&gt;&gt;&gt; MetaModelOpts.add_ExpDesign()
+</code></pre>
+<p>Two experimental design schemes are supported: one-shot (<code>normal</code>) and
+adaptive sequential (<code>sequential</code>) designs.
+For experimental design refer to <code>ExpDesigns</code>.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">class MetaModel:
+    &#34;&#34;&#34;
+    Meta (surrogate) model
+
+    This class trains a surrogate model. It accepts an input object (input_obj)
+    containing the specification of the distributions for uncertain parameters
+    and a model object with instructions on how to run the computational model.
+
+    Attributes
+    ----------
+    input_obj : obj
+        Input object with the information on the model input parameters.
+    meta_model_type : str
+        Surrogate model types. Three surrogate model types are supported:
+        polynomial chaos expansion (`PCE`), arbitrary PCE (`aPCE`) and
+        Gaussian process regression (`GPE`). Default is PCE.
+    pce_reg_method : str
+        PCE regression method to compute the coefficients. The following
+        regression methods are available:
+
+        1. OLS: Ordinary Least Square method
+        2. BRR: Bayesian Ridge Regression
+        3. LARS: Least angle regression
+        4. ARD: Bayesian ARD Regression
+        5. FastARD: Fast Bayesian ARD Regression
+        6. VBL: Variational Bayesian Learning
+        7. EBL: Emperical Bayesian Learning
+        Default is `OLS`.
+    pce_deg : int or list of int
+        Polynomial degree(s). If a list is given, an adaptive algorithm is used
+        to find the best degree with the lowest Leave-One-Out cross-validation
+        (LOO) error (or the highest score=1-LOO). Default is `1`.
+    pce_q_norm : float
+        Hyperbolic (or q-norm) truncation for multi-indices of multivariate
+        polynomials. Default is `1.0`.
+    dim_red_method : str
+        Dimensionality reduction method for the output space. The available
+        method is based on principal component analysis (PCA). The Default is
+        `&#39;no&#39;`. There are two ways to select number of components: use
+        percentage of the explainable variance threshold (between 0 and 100)
+        (Option A) or direct prescription of components&#39; number (Option B):
+
+            &gt;&gt;&gt; MetaModelOpts.dim_red_method = &#39;PCA&#39;
+            &gt;&gt;&gt; MetaModelOpts.var_pca_threshold = 99.999  # Option A
+            &gt;&gt;&gt; MetaModelOpts.n_pca_components = 12 # Option B
+
+    verbose : bool
+        Prints summary of the regression results. Default is `False`.
+
+    Note
+    -------
+    To define the sampling methods and the training set, an experimental design
+    instance shall be defined. This can be done by:
+
+    &gt;&gt;&gt; MetaModelOpts.add_ExpDesign()
+
+    Two experimental design schemes are supported: one-shot (`normal`) and
+    adaptive sequential (`sequential`) designs.
+    For experimental design refer to `ExpDesigns`.
+
+    &#34;&#34;&#34;
+
+    def __init__(self, input_obj, meta_model_type=&#39;PCE&#39;, pce_reg_method=&#39;OLS&#39;,
+                 pce_deg=1, pce_q_norm=1.0, dim_red_method=&#39;no&#39;,
+                 verbose=False):
+
+        self.input_obj = input_obj
+        self.meta_model_type = meta_model_type
+        self.pce_reg_method = pce_reg_method
+        self.pce_deg = pce_deg
+        self.pce_q_norm = pce_q_norm
+        self.dim_red_method = dim_red_method
+        self.verbose = False
+
+    # -------------------------------------------------------------------------
+    def create_metamodel(self, Model):
+        &#34;&#34;&#34;
+        Starts the training of the meta-model for the model objects containg
+         the given computational model.
+
+        Parameters
+        ----------
+        Model : obj
+            Model object.
+
+        Returns
+        -------
+        metamodel : obj
+            The meta model object.
+
+        &#34;&#34;&#34;
+        self.ModelObj = Model
+        self.n_params = len(self.input_obj.Marginals)
+        self.ExpDesignFlag = &#39;normal&#39;
+        # --- Prepare pce degree ---
+        if self.meta_model_type.lower() == &#39;pce&#39;:
+            if type(self.pce_deg) is not np.ndarray:
+                self.pce_deg = np.array(self.pce_deg)
+
+        if self.ExpDesign.method == &#39;sequential&#39;:
+            from .sequential_design import SeqDesign
+            seq_design = SeqDesign(self)
+            metamodel = seq_design.train_seq_design(Model)
+
+        elif self.ExpDesign.method == &#39;normal&#39;:
+            self.ExpDesignFlag = &#39;normal&#39;
+            metamodel = self.train_norm_design(Model)
+
+        else:
+            raise Exception(&#34;The method for experimental design you requested&#34;
+                            &#34; has not been implemented yet.&#34;)
+
+        # Zip the model run directories
+        if self.ModelObj.link_type.lower() == &#39;pylink&#39;:
+            Model.zip_subdirs(Model.name, f&#39;{Model.name}_&#39;)
+
+        return metamodel
+
+    # -------------------------------------------------------------------------
+    def train_norm_design(self, Model, verbose=False):
+        &#34;&#34;&#34;
+        This function loops over the outputs and each time step/point and fits
+        the meta model.
+
+        Parameters
+        ----------
+        Model : obj
+            Model object.
+        verbose : bool, optional
+            Flag for a sequential design in silent mode. The default is False.
+
+        Returns
+        -------
+        self: obj
+            Meta-model object.
+
+        &#34;&#34;&#34;
+
+        # Get the collocation points to run the forward model
+        CollocationPoints, OutputDict = self.generate_ExpDesign(Model)
+
+        # Initialize the nested dictionaries
+        self.deg_dict = self.auto_vivification()
+        self.q_norm_dict = self.auto_vivification()
+        self.coeffs_dict = self.auto_vivification()
+        self.basis_dict = self.auto_vivification()
+        self.score_dict = self.auto_vivification()
+        self.clf_poly = self.auto_vivification()
+        self.gp_poly = self.auto_vivification()
+        self.pca = self.auto_vivification()
+        self.LCerror = self.auto_vivification()
+        self.x_scaler = {}
+
+        # Define the DegreeArray
+        nSamples, ndim = CollocationPoints.shape
+        self.DegreeArray = self.__select_degree(ndim, nSamples)
+
+        # Generate all basis indices
+        self.allBasisIndices = self.auto_vivification()
+        for deg in self.DegreeArray:
+            keys = self.allBasisIndices.keys()
+            if deg not in np.fromiter(keys, dtype=float):
+                # Generate the polynomial basis indices
+                for qidx, q in enumerate(self.pce_q_norm):
+                    basis_indices = self.create_basis_indices(degree=deg,
+                                                              q_norm=q)
+                    self.allBasisIndices[str(deg)][str(q)] = basis_indices
+
+        # Evaluate the univariate polynomials on ExpDesign
+        if self.meta_model_type.lower() != &#39;gpe&#39;:
+            self.univ_p_val = self.univ_basis_vals(CollocationPoints)
+
+        if &#39;x_values&#39; in OutputDict:
+            self.ExpDesign.x_values = OutputDict[&#39;x_values&#39;]
+            del OutputDict[&#39;x_values&#39;]
+
+        # --- Loop through data points and fit the surrogate ---
+        if not verbose:
+            print(f&#34;\n&gt;&gt;&gt;&gt; Training the {self.meta_model_type} metamodel &#34;
+                  &#34;started. &lt;&lt;&lt;&lt;&lt;&lt;\n&#34;)
+            items = tqdm(OutputDict.items(), desc=&#34;Fitting regression&#34;)
+        else:
+            items = OutputDict.items()
+
+        # For loop over the components/outputs
+        for key, Output in items:
+
+            # Dimensionality reduction with PCA, if specified
+            if self.dim_red_method.lower() == &#39;pca&#39;:
+                self.pca[key], target = self.pca_transformation(Output)
+            else:
+                target = Output
+
+            # Parallel fit regression
+            if self.meta_model_type.lower() == &#39;gpe&#39;:
+                # Prepare the input matrix
+                scaler = MinMaxScaler()
+                X_S = scaler.fit_transform(CollocationPoints)
+
+                self.x_scaler[key] = scaler
+
+                out = Parallel(n_jobs=-1, prefer=&#39;threads&#39;)(
+                    delayed(self.gaussian_process_emulator)(X_S, target[:, idx])
+                    for idx in range(target.shape[1]))
+
+                for idx in range(target.shape[1]):
+                    self.gp_poly[key][f&#34;y_{idx+1}&#34;] = out[idx]
+
+            else:
+                out = Parallel(n_jobs=-1, prefer=&#39;threads&#39;)(
+                    delayed(self.adaptive_regression)(CollocationPoints,
+                                                      target[:, idx], idx)
+                    for idx in range(target.shape[1]))
+
+                for i in range(target.shape[1]):
+                    # Create a dict to pass the variables
+                    self.deg_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;degree&#39;]
+                    self.q_norm_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;qnorm&#39;]
+                    self.coeffs_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;coeffs&#39;]
+                    self.basis_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;multi_indices&#39;]
+                    self.score_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;LOOCVScore&#39;]
+                    self.clf_poly[key][f&#34;y_{i+1}&#34;] = out[i][&#39;clf_poly&#39;]
+                    self.LCerror[key][f&#34;y_{i+1}&#34;] = out[i][&#39;LCerror&#39;]
+
+        if not verbose:
+            print(f&#34;\n&gt;&gt;&gt;&gt; Training the {self.meta_model_type} metamodel&#34;
+                  &#34; sucessfully completed. &lt;&lt;&lt;&lt;&lt;&lt;\n&#34;)
+
+        return self
+
+    # -------------------------------------------------------------------------
+    def create_basis_indices(self, degree, q_norm):
+        &#34;&#34;&#34;
+        Creates set of selected multi-indices of multivariate polynomials for
+        certain parameter numbers, polynomial degree, hyperbolic (or q-norm)
+        truncation scheme.
+
+        Parameters
+        ----------
+        degree : int
+            Polynomial degree.
+        q_norm : float
+            hyperbolic (or q-norm) truncation.
+
+        Returns
+        -------
+        basis_indices : array of shape (n_terms, n_params)
+            Multi-indices of multivariate polynomials.
+
+        &#34;&#34;&#34;
+        basis_indices = glexindex(start=0, stop=degree+1,
+                                  dimensions=self.n_params,
+                                  cross_truncation=q_norm,
+                                  reverse=False, graded=True)
+        return basis_indices
+
+    # -------------------------------------------------------------------------
+    def add_ExpDesign(self):
+        &#34;&#34;&#34;
+        Instanciates experimental design object.
+
+        Returns
+        -------
+        None.
+
+        &#34;&#34;&#34;
+        self.ExpDesign = ExpDesigns(self.input_obj,
+                                    meta_Model=self.meta_model_type)
+
+    # -------------------------------------------------------------------------
+    def generate_ExpDesign(self, Model):
+        &#34;&#34;&#34;
+        Prepares the experimental design either by reading from the prescribed
+        data or running simulations.
+
+        Parameters
+        ----------
+        Model : obj
+            Model object.
+
+        Raises
+        ------
+        Exception
+            If model sumulations are not provided properly.
+
+        Returns
+        -------
+        ED_X_tr: array of shape (n_samples, n_params)
+            Training samples transformed by an isoprobabilistic transformation.
+        ED_Y: dict
+            Model simulations (target) for all outputs.
+        &#34;&#34;&#34;
+        ExpDesign = self.ExpDesign
+        if self.ExpDesignFlag != &#39;sequential&#39;:
+            # Read ExpDesign (training and targets) from the provided hdf5
+            if ExpDesign.hdf5_file is not None:
+
+                # Read hdf5 file
+                f = h5py.File(ExpDesign.hdf5_file, &#39;r+&#39;)
+
+                # Read EDX and pass it to ExpDesign object
+                try:
+                    ExpDesign.X = np.array(f[&#34;EDX/New_init_&#34;])
+                except KeyError:
+                    ExpDesign.X = np.array(f[&#34;EDX/init_&#34;])
+
+                # Update number of initial samples
+                ExpDesign.n_init_samples = ExpDesign.X.shape[0]
+
+                # Read EDX and pass it to ExpDesign object
+                out_names = self.ModelObj.Output.names
+                ExpDesign.Y = {}
+
+                # Extract x values
+                try:
+                    ExpDesign.Y[&#34;x_values&#34;] = dict()
+                    for varIdx, var in enumerate(out_names):
+                        x = np.array(f[f&#34;x_values/{var}&#34;])
+                        ExpDesign.Y[&#34;x_values&#34;][var] = x
+                except KeyError:
+                    ExpDesign.Y[&#34;x_values&#34;] = np.array(f[&#34;x_values&#34;])
+
+                # Store the output
+                for varIdx, var in enumerate(out_names):
+                    try:
+                        y = np.array(f[f&#34;EDY/{var}/New_init_&#34;])
+                    except KeyError:
+                        y = np.array(f[f&#34;EDY/{var}/init_&#34;])
+                    ExpDesign.Y[var] = y
+                f.close()
+            else:
+                # Check if an old hdf5 file exists: if yes, rename it
+                hdf5file = f&#39;ExpDesign_{self.ModelObj.name}.hdf5&#39;
+                if os.path.exists(hdf5file):
+                    os.rename(hdf5file, &#39;old_&#39;+hdf5file)
+
+        # ---- Prepare X samples ----
+        ED_X, ED_X_tr = ExpDesign.generate_ED(ExpDesign.n_init_samples,
+                                              ExpDesign.sampling_method,
+                                              transform=True,
+                                              max_pce_deg=np.max(self.pce_deg))
+        ExpDesign.X = ED_X
+        ExpDesign.collocationPoints = ED_X_tr
+        self.bound_tuples = ExpDesign.bound_tuples
+
+        # ---- Run simulations at X ----
+        if not hasattr(ExpDesign, &#39;Y&#39;) or ExpDesign.Y is None:
+            print(&#39;\n Now the forward model needs to be run!\n&#39;)
+            ED_Y, up_ED_X = Model.run_model_parallel(ED_X)
+            ExpDesign.X = up_ED_X
+            self.ModelOutputDict = ED_Y
+            ExpDesign.Y = ED_Y
+        else:
+            # Check if a dict has been passed.
+            if type(ExpDesign.Y) is dict:
+                self.ModelOutputDict = ExpDesign.Y
+            else:
+                raise Exception(&#39;Please provide either a dictionary or a hdf5&#39;
+                                &#39;file to ExpDesign.hdf5_file argument.&#39;)
+
+        return ED_X_tr, self.ModelOutputDict
+
+    # -------------------------------------------------------------------------
+    def univ_basis_vals(self, samples, n_max=None):
+        &#34;&#34;&#34;
+        Evaluates univariate regressors along input directions.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params)
+            Samples.
+        n_max : int, optional
+            Maximum polynomial degree. The default is `None`.
+
+        Returns
+        -------
+        univ_basis: array of shape (n_samples, n_params, n_max+1)
+            All univariate regressors up to n_max.
+        &#34;&#34;&#34;
+        # Extract information
+        poly_types = self.ExpDesign.poly_types
+        if samples.ndim != 2:
+            samples = samples.reshape(1, len(samples))
+        n_max = np.max(self.pce_deg) if n_max is None else n_max
+
+        # Extract poly coeffs
+        if self.ExpDesign.input_data_given or self.ExpDesign.apce:
+            apolycoeffs = self.ExpDesign.polycoeffs
+        else:
+            apolycoeffs = None
+
+        # Evaluate univariate basis
+        univ_basis = eval_univ_basis(samples, n_max, poly_types, apolycoeffs)
+
+        return univ_basis
+
+    # -------------------------------------------------------------------------
+    def create_psi(self, basis_indices, univ_p_val):
+        &#34;&#34;&#34;
+        This function assemble the design matrix Psi from the given basis index
+        set INDICES and the univariate polynomial evaluations univ_p_val.
+
+        Parameters
+        ----------
+        basis_indices : array of shape (n_terms, n_params)
+            Multi-indices of multivariate polynomials.
+        univ_p_val : array of (n_samples, n_params, n_max+1)
+            All univariate regressors up to `n_max`.
+
+        Raises
+        ------
+        ValueError
+            n_terms in arguments do not match.
+
+        Returns
+        -------
+        psi : array of shape (n_samples, n_terms)
+            Multivariate regressors.
+
+        &#34;&#34;&#34;
+        # Check if BasisIndices is a sparse matrix
+        sparsity = sp.sparse.issparse(basis_indices)
+        if sparsity:
+            basis_indices = basis_indices.toarray()
+
+        # Initialization and consistency checks
+        # number of input variables
+        n_params = univ_p_val.shape[1]
+
+        # Size of the experimental design
+        n_samples = univ_p_val.shape[0]
+
+        # number of basis terms
+        n_terms = basis_indices.shape[0]
+
+        # check that the variables have consistent sizes
+        if n_params != basis_indices.shape[1]:
+            raise ValueError(&#34;The shapes of basis_indices and univ_p_val don&#39;t&#34;
+                             &#34; match!!&#34;)
+
+        # Preallocate the Psi matrix for performance
+        psi = np.ones((n_samples, n_terms))
+        # Assemble the Psi matrix
+        for m in range(basis_indices.shape[1]):
+            aa = np.where(basis_indices[:, m] &gt; 0)[0]
+            try:
+                basisIdx = basis_indices[aa, m]
+                bb = np.reshape(univ_p_val[:, m, basisIdx], psi[:, aa].shape)
+                psi[:, aa] = np.multiply(psi[:, aa], bb)
+            except ValueError as err:
+                raise err
+
+        return psi
+
+    # -------------------------------------------------------------------------
+    def fit(self, X, y, basis_indices, reg_method=None):
+        &#34;&#34;&#34;
+        Fit regression using the regression method provided.
+
+        Parameters
+        ----------
+        X : array of shape (n_samples, n_features)
+            Training vector, where n_samples is the number of samples and
+            n_features is the number of features.
+        y : array of shape (n_samples,)
+            Target values.
+        basis_indices : array of shape (n_terms, n_params)
+            Multi-indices of multivariate polynomials.
+        reg_method : str, optional
+            DESCRIPTION. The default is None.
+
+        Returns
+        -------
+        return_out_dict : Dict
+            Fitted estimator, spareMulti-Index, sparseX and coefficients.
+
+        &#34;&#34;&#34;
+        if reg_method is None:
+            reg_method = self.pce_reg_method
+
+        # Check if BasisIndices is a sparse matrix
+        sparsity = sp.sparse.issparse(basis_indices)
+
+        clf_poly = []
+        compute_score = True if self.verbose else False
+
+        #  inverse of the observed variance of the data
+        if np.var(y) != 0:
+            Lambda = 1 / np.var(y)
+        else:
+            Lambda = 1e-6
+
+        # Bayes sparse adaptive aPCE
+        if reg_method.lower() != &#39;ols&#39;:
+            if reg_method.lower() == &#39;brr&#39; or np.var(y) == 0:
+                clf_poly = lm.BayesianRidge(n_iter=1000, tol=1e-7,
+                                            fit_intercept=True,
+                                            normalize=True,
+                                            compute_score=compute_score,
+                                            alpha_1=1e-04, alpha_2=1e-04,
+                                            lambda_1=Lambda, lambda_2=Lambda)
+                clf_poly.converged = True
+
+            elif reg_method.lower() == &#39;ard&#39;:
+                clf_poly = lm.ARDRegression(fit_intercept=True,
+                                            normalize=True,
+                                            compute_score=compute_score,
+                                            n_iter=1000, tol=0.0001,
+                                            alpha_1=1e-3, alpha_2=1e-3,
+                                            lambda_1=Lambda, lambda_2=Lambda)
+
+            elif reg_method.lower() == &#39;fastard&#39;:
+                clf_poly = RegressionFastARD(fit_intercept=True,
+                                             normalize=True,
+                                             compute_score=compute_score,
+                                             n_iter=300, tol=1e-10)
+
+            elif reg_method.lower() == &#39;bcs&#39;:
+                clf_poly = RegressionFastLaplace(fit_intercept=False,
+                                                 n_iter=1000, tol=1e-7)
+
+            elif reg_method.lower() == &#39;lars&#39;:
+                clf_poly = lm.LassoLarsCV(fit_intercept=False)
+
+            elif reg_method.lower() == &#39;sgdr&#39;:
+                clf_poly = lm.SGDRegressor(fit_intercept=False,
+                                           max_iter=5000, tol=1e-7)
+
+            elif reg_method.lower() == &#39;omp&#39;:
+                clf_poly = lm.OrthogonalMatchingPursuitCV(fit_intercept=False,
+                                                          max_iter=10)
+
+            elif reg_method.lower() == &#39;vbl&#39;:
+                clf_poly = VBLinearRegression(fit_intercept=False)
+
+            elif reg_method.lower() == &#39;ebl&#39;:
+                clf_poly = EBLinearRegression(optimizer=&#39;em&#39;)
+
+            # Fit
+            clf_poly.fit(X, y)
+
+            # Select the nonzero entries of coefficients
+            # The first column must be kept (For mean calculations)
+            nnz_idx = np.nonzero(clf_poly.coef_)[0]
+
+            if len(nnz_idx) == 0 or nnz_idx[0] != 0:
+                nnz_idx = np.insert(np.nonzero(clf_poly.coef_)[0], 0, 0)
+                # Remove the zero entries for Bases and PSI if need be
+                if sparsity:
+                    sparse_basis_indices = basis_indices.toarray()[nnz_idx]
+                else:
+                    sparse_basis_indices = basis_indices[nnz_idx]
+                sparse_X = X[:, nnz_idx]
+
+                # Store the coefficients of the regression model
+                clf_poly.fit(sparse_X, y)
+                coeffs = clf_poly.coef_
+            else:
+                # This is for the case where all outputs are zero, thereby
+                # all coefficients are zero
+                if sparsity:
+                    sparse_basis_indices = basis_indices.toarray()
+                else:
+                    sparse_basis_indices = basis_indices
+                sparse_X = X
+                coeffs = clf_poly.coef_
+
+        # Ordinary least square method (OLS)
+        else:
+            if sparsity:
+                sparse_basis_indices = basis_indices.toarray()
+            else:
+                sparse_basis_indices = basis_indices
+            sparse_X = X
+
+            X_T_X = np.dot(sparse_X.T, sparse_X)
+
+            if np.linalg.cond(X_T_X) &gt; 1e-12 and \
+               np.linalg.cond(X_T_X) &lt; 1 / sys.float_info.epsilon:
+                # faster
+                coeffs = sp.linalg.solve(X_T_X, np.dot(sparse_X.T, y))
+            else:
+                # stabler
+                coeffs = np.dot(np.dot(np.linalg.pinv(X_T_X), sparse_X.T), y)
+
+        # Create a dict to pass the outputs
+        return_out_dict = dict()
+        return_out_dict[&#39;clf_poly&#39;] = clf_poly
+        return_out_dict[&#39;spareMulti-Index&#39;] = sparse_basis_indices
+        return_out_dict[&#39;sparePsi&#39;] = sparse_X
+        return_out_dict[&#39;coeffs&#39;] = coeffs
+        return return_out_dict
+
+    # --------------------------------------------------------------------------------------------------------
+    def adaptive_regression(self, ED_X, ED_Y, varIdx, verbose=False):
+        &#34;&#34;&#34;
+        Adaptively fits the PCE model by comparing the scores of different
+        degrees and q-norm.
+
+        Parameters
+        ----------
+        ED_X : array of shape (n_samples, n_params)
+            Experimental design.
+        ED_Y : array of shape (n_samples,)
+            Target values, i.e. simulation results for the Experimental design.
+        varIdx : int
+            Index of the output.
+        verbose : bool, optional
+            Print out summary. The default is False.
+
+        Returns
+        -------
+        returnVars : Dict
+            Fitted estimator, best degree, best q-norm, LOOCVScore and
+            coefficients.
+
+        &#34;&#34;&#34;
+
+        NrSamples, n_params = ED_X.shape
+        # Initialization
+        qAllCoeffs, AllCoeffs = {}, {}
+        qAllIndices_Sparse, AllIndices_Sparse = {}, {}
+        qAllclf_poly, Allclf_poly = {}, {}
+        qAllnTerms, AllnTerms = {}, {}
+        qAllLCerror, AllLCerror = {}, {}
+
+        # Extract degree array and qnorm array
+        DegreeArray = np.array([*self.allBasisIndices], dtype=int)
+        qnorm = [*self.allBasisIndices[str(int(DegreeArray[0]))]]
+
+        # Some options for EarlyStop
+        errorIncreases = False
+        # Stop degree, if LOO error does not decrease n_checks_degree times
+        n_checks_degree = 3
+        # Stop qNorm, if criterion isn&#39;t fulfilled n_checks_qNorm times
+        n_checks_qNorm = 2
+        nqnorms = len(qnorm)
+        qNormEarlyStop = True
+        if nqnorms &lt; n_checks_qNorm+1:
+            qNormEarlyStop = False
+
+        # =====================================================================
+        # basis adaptive polynomial chaos: repeat the calculation by increasing
+        # polynomial degree until the highest accuracy is reached
+        # =====================================================================
+        # For each degree check all q-norms and choose the best one
+        scores = -np.inf * np.ones(DegreeArray.shape[0])
+        qNormScores = -np.inf * np.ones(nqnorms)
+
+        for degIdx, deg in enumerate(DegreeArray):
+
+            for qidx, q in enumerate(qnorm):
+
+                # Extract the polynomial basis indices from the pool of
+                # allBasisIndices
+                BasisIndices = self.allBasisIndices[str(deg)][str(q)]
+
+                # Assemble the Psi matrix
+                Psi = self.create_psi(BasisIndices, self.univ_p_val)
+
+                # Calulate the cofficients of the meta model
+                outs = self.fit(Psi, ED_Y, BasisIndices)
+
+                # Calculate and save the score of LOOCV
+                score, LCerror = self.corr_loocv_error(outs[&#39;clf_poly&#39;],
+                                                       outs[&#39;sparePsi&#39;],
+                                                       outs[&#39;coeffs&#39;],
+                                                       ED_Y)
+
+                # Check the convergence of noise for FastARD
+                if self.pce_reg_method == &#39;FastARD&#39; and \
+                   outs[&#39;clf_poly&#39;].alpha_ &lt; np.finfo(np.float32).eps:
+                    score = -np.inf
+
+                qNormScores[qidx] = score
+                qAllCoeffs[str(qidx+1)] = outs[&#39;coeffs&#39;]
+                qAllIndices_Sparse[str(qidx+1)] = outs[&#39;spareMulti-Index&#39;]
+                qAllclf_poly[str(qidx+1)] = outs[&#39;clf_poly&#39;]
+                qAllnTerms[str(qidx+1)] = BasisIndices.shape[0]
+                qAllLCerror[str(qidx+1)] = LCerror
+
+                # EarlyStop check
+                # if there are at least n_checks_qNorm entries after the
+                # best one, we stop
+                if qNormEarlyStop and \
+                   sum(np.isfinite(qNormScores)) &gt; n_checks_qNorm:
+                    # If the error has increased the last two iterations, stop!
+                    qNormScores_nonInf = qNormScores[np.isfinite(qNormScores)]
+                    deltas = np.sign(np.diff(qNormScores_nonInf))
+                    if sum(deltas[-n_checks_qNorm+1:]) == 2:
+                        # stop the q-norm loop here
+                        break
+                if np.var(ED_Y) == 0:
+                    break
+
+            # Store the score in the scores list
+            best_q = np.nanargmax(qNormScores)
+            scores[degIdx] = qNormScores[best_q]
+
+            AllCoeffs[str(degIdx+1)] = qAllCoeffs[str(best_q+1)]
+            AllIndices_Sparse[str(degIdx+1)] = qAllIndices_Sparse[str(best_q+1)]
+            Allclf_poly[str(degIdx+1)] = qAllclf_poly[str(best_q+1)]
+            AllnTerms[str(degIdx+1)] = qAllnTerms[str(best_q+1)]
+            AllLCerror[str(degIdx+1)] = qAllLCerror[str(best_q+1)]
+
+            # Check the direction of the error (on average):
+            # if it increases consistently stop the iterations
+            if len(scores[scores != -np.inf]) &gt; n_checks_degree:
+                scores_nonInf = scores[scores != -np.inf]
+                ss = np.sign(scores_nonInf - np.max(scores_nonInf))
+                # ss&lt;0 error decreasing
+                errorIncreases = np.sum(np.sum(ss[-2:])) &lt;= -1*n_checks_degree
+
+            if errorIncreases:
+                break
+
+            # Check only one degree, if target matrix has zero variance
+            if np.var(ED_Y) == 0:
+                break
+
+        # ------------------ Summary of results ------------------
+        # Select the one with the best score and save the necessary outputs
+        best_deg = np.nanargmax(scores)+1
+        coeffs = AllCoeffs[str(best_deg)]
+        basis_indices = AllIndices_Sparse[str(best_deg)]
+        clf_poly = Allclf_poly[str(best_deg)]
+        LOOCVScore = np.nanmax(scores)
+        P = AllnTerms[str(best_deg)]
+        LCerror = AllLCerror[str(best_deg)]
+        degree = DegreeArray[np.nanargmax(scores)]
+        qnorm = float(qnorm[best_q])
+
+        # ------------------ Print out Summary of results ------------------
+        if self.verbose:
+            # Create PSI_Sparse by removing redundent terms
+            nnz_idx = np.nonzero(coeffs)[0]
+            BasisIndices_Sparse = basis_indices[nnz_idx]
+
+            print(f&#39;Output variable {varIdx+1}:&#39;)
+            print(&#39;The estimation of PCE coefficients converged at polynomial &#39;
+                  f&#39;degree {DegreeArray[best_deg-1]} with &#39;
+                  f&#39;{len(BasisIndices_Sparse)} terms (Sparsity index = &#39;
+                  f&#39;{round(len(BasisIndices_Sparse)/P, 3)}).&#39;)
+
+            print(f&#39;Final ModLOO error estimate: {1-max(scores):.3e}&#39;)
+            print(&#39;\n&#39;+&#39;-&#39;*50)
+
+        if verbose:
+            print(&#39;=&#39;*50)
+            print(&#39; &#39;*10 + &#39; Summary of results &#39;)
+            print(&#39;=&#39;*50)
+
+            print(&#34;scores:\n&#34;, scores)
+            print(&#34;Best score&#39;s degree:&#34;, self.DegreeArray[best_deg-1])
+            print(&#34;NO. of terms:&#34;, len(basis_indices))
+            print(&#34;Sparsity index:&#34;, round(len(basis_indices)/P, 3))
+            print(&#34;Best Indices:\n&#34;, basis_indices)
+
+            if self.pce_reg_method in [&#39;BRR&#39;, &#39;ARD&#39;]:
+                fig, ax = plt.subplots(figsize=(12, 10))
+                plt.title(&#34;Marginal log-likelihood&#34;)
+                plt.plot(clf_poly.scores_, color=&#39;navy&#39;, linewidth=2)
+                plt.ylabel(&#34;Score&#34;)
+                plt.xlabel(&#34;Iterations&#34;)
+                if self.pce_reg_method.lower() == &#39;bbr&#39;:
+                    text = f&#34;$\\alpha={clf_poly.alpha_:.1f}$\n&#34;
+                    f&#34;$\\lambda={clf_poly.lambda_:.3f}$\n&#34;
+                    f&#34;$L={clf_poly.scores_[-1]:.1f}$&#34;
+                else:
+                    text = f&#34;$\\alpha={clf_poly.alpha_:.1f}$\n$&#34;
+                    f&#34;\\L={clf_poly.scores_[-1]:.1f}$&#34;
+
+                plt.text(0.75, 0.5, text, fontsize=18, transform=ax.transAxes)
+                plt.show()
+            print(&#39;=&#39;*80)
+
+        # Create a dict to pass the outputs
+        returnVars = dict()
+        returnVars[&#39;clf_poly&#39;] = clf_poly
+        returnVars[&#39;degree&#39;] = degree
+        returnVars[&#39;qnorm&#39;] = qnorm
+        returnVars[&#39;coeffs&#39;] = coeffs
+        returnVars[&#39;multi_indices&#39;] = basis_indices
+        returnVars[&#39;LOOCVScore&#39;] = LOOCVScore
+        returnVars[&#39;LCerror&#39;] = LCerror
+
+        return returnVars
+
+    # -------------------------------------------------------------------------
+    def corr_loocv_error(self, clf, psi, coeffs, y):
+        &#34;&#34;&#34;
+        Calculates the corrected LOO error for regression on regressor
+        matrix `psi` that generated the coefficients based on [1] and [2].
+
+        [1] Blatman, G., 2009. Adaptive sparse polynomial chaos expansions for
+            uncertainty propagation and sensitivity analysis (Doctoral
+            dissertation, Clermont-Ferrand 2).
+
+        [2] Blatman, G. and Sudret, B., 2011. Adaptive sparse polynomial chaos
+            expansion based on least angle regression. Journal of computational
+            Physics, 230(6), pp.2345-2367.
+
+        Parameters
+        ----------
+        clf : object
+            Fitted estimator.
+        psi : array of shape (n_samples, n_features)
+            The multivariate orthogonal polynomials (regressor).
+        coeffs : array-like of shape (n_features,)
+            Estimated cofficients.
+        y : array of shape (n_samples,)
+            Target values.
+
+        Returns
+        -------
+        Q_2 : float
+            LOOCV Validation score (1-LOOCV erro).
+        residual : array of shape (n_samples,)
+            Residual values (y - predicted targets).
+
+        &#34;&#34;&#34;
+        psi = np.array(psi, dtype=float)
+
+        # Create PSI_Sparse by removing redundent terms
+        nnz_idx = np.nonzero(coeffs)[0]
+        if len(nnz_idx) == 0:
+            nnz_idx = [0]
+        psi_sparse = psi[:, nnz_idx]
+
+        # NrCoeffs of aPCEs
+        P = len(nnz_idx)
+        # NrEvaluation (Size of experimental design)
+        N = psi.shape[0]
+
+        # Build the projection matrix
+        PsiTPsi = np.dot(psi_sparse.T, psi_sparse)
+
+        if np.linalg.cond(PsiTPsi) &gt; 1e-12 and \
+           np.linalg.cond(PsiTPsi) &lt; 1/sys.float_info.epsilon:
+            # faster
+            M = sp.linalg.solve(PsiTPsi,
+                                sp.sparse.eye(PsiTPsi.shape[0]).toarray())
+        else:
+            # stabler
+            M = np.linalg.pinv(PsiTPsi)
+
+        # h factor (the full matrix is not calculated explicitly,
+        # only the trace is, to save memory)
+        PsiM = np.dot(psi_sparse, M)
+
+        h = np.sum(np.multiply(PsiM, psi_sparse), axis=1, dtype=np.float128)
+
+        # ------ Calculate Error Loocv for each measurement point ----
+        # Residuals
+        if isinstance(clf, list):
+            residual = np.dot(psi, coeffs) - y
+        else:
+            residual = clf.predict(psi) - y
+
+        # Variance
+        varY = np.var(y)
+
+        if varY == 0:
+            normEmpErr = 0
+            ErrLoo = 0
+            LCerror = np.zeros((y.shape))
+        else:
+            normEmpErr = np.mean(residual**2)/varY
+
+            # LCerror = np.divide(residual, (1-h))
+            LCerror = residual / (1-h)[:, np.newaxis]
+            ErrLoo = np.mean(np.square(LCerror)) / varY
+            # if there are NaNs, just return an infinite LOO error (this
+            # happens, e.g., when a strongly underdetermined problem is solved)
+            if np.isnan(ErrLoo):
+                ErrLoo = np.inf
+
+        # Corrected Error for over-determined system
+        trM = np.trace(M)
+        if trM &lt; 0 or abs(trM) &gt; 1e6:
+            trM = np.trace(np.linalg.pinv(np.dot(psi.T, psi)))
+
+        # Over-determined system of Equation
+        if N &gt; P:
+            T_factor = N/(N-P) * (1 + trM)
+
+        # Under-determined system of Equation
+        else:
+            T_factor = np.inf
+
+        CorrectedErrLoo = ErrLoo * T_factor
+
+        Q_2 = 1 - CorrectedErrLoo
+
+        return Q_2, residual
+
+    # -------------------------------------------------------------------------
+    def pca_transformation(self, Output):
+        &#34;&#34;&#34;
+        Transforms the targets (outputs) via Principal Component Analysis
+
+        Parameters
+        ----------
+        Output : array of shape (n_samples,)
+            Target values.
+
+        Returns
+        -------
+        pca : obj
+            Fitted sklearnPCA object.
+        OutputMatrix : array of shape (n_samples,)
+            Transformed target values.
+
+        &#34;&#34;&#34;
+        # Transform via Principal Component Analysis
+        if hasattr(self, &#39;var_pca_threshold&#39;):
+            var_pca_threshold = self.var_pca_threshold
+        else:
+            var_pca_threshold = 100.0
+        n_samples, n_features = Output.shape
+
+        if hasattr(self, &#39;n_pca_components&#39;):
+            n_pca_components = self.n_pca_components
+        else:
+            # Instantiate and fit sklearnPCA object
+            covar_matrix = sklearnPCA(n_components=None)
+            covar_matrix.fit(Output)
+            var = np.cumsum(np.round(covar_matrix.explained_variance_ratio_,
+                                     decimals=5)*100)
+            # Find the number of components to explain self.varPCAThreshold of
+            # variance
+            try:
+                n_components = np.where(var &gt;= var_pca_threshold)[0][0] + 1
+            except IndexError:
+                n_components = min(n_samples, n_features)
+
+            n_pca_components = min(n_samples, n_features, n_components)
+
+        # Print out a report
+        print()
+        print(&#39;-&#39; * 50)
+        print(f&#34;PCA transformation is performed with {n_pca_components}&#34;
+              &#34; components.&#34;)
+        print(&#39;-&#39; * 50)
+        print()
+
+        # Fit and transform with the selected number of components
+        pca = sklearnPCA(n_components=n_pca_components,
+                         svd_solver=&#39;randomized&#39;)
+        OutputMatrix = pca.fit_transform(Output)
+
+        return pca, OutputMatrix
+
+    # -------------------------------------------------------------------------
+    def gaussian_process_emulator(self, X, y, nug_term=None, autoSelect=False,
+                                  varIdx=None):
+        &#34;&#34;&#34;
+        Fits a Gaussian Process Emulator to the target given the training
+         points.
+
+        Parameters
+        ----------
+        X : array of shape (n_samples, n_params)
+            Training points.
+        y : array of shape (n_samples,)
+            Target values.
+        nug_term : float, optional
+            Nugget term. The default is None, i.e. variance of y.
+        autoSelect : bool, optional
+            Loop over some kernels and select the best. The default is False.
+        varIdx : int, optional
+            The index number. The default is None.
+
+        Returns
+        -------
+        gp : object
+            Fitted estimator.
+
+        &#34;&#34;&#34;
+
+        nug_term = nug_term if nug_term else np.var(y)
+
+        Kernels = [nug_term * kernels.RBF(length_scale=1.0,
+                                          length_scale_bounds=(1e-25, 1e15)),
+                   nug_term * kernels.RationalQuadratic(length_scale=0.2,
+                                                        alpha=1.0),
+                   nug_term * kernels.Matern(length_scale=1.0,
+                                             length_scale_bounds=(1e-15, 1e5),
+                                             nu=1.5)]
+
+        # Automatic selection of the kernel
+        if autoSelect:
+            gp = {}
+            BME = []
+            for i, kernel in enumerate(Kernels):
+                gp[i] = GaussianProcessRegressor(kernel=kernel,
+                                                 n_restarts_optimizer=3,
+                                                 normalize_y=False)
+
+                # Fit to data using Maximum Likelihood Estimation
+                gp[i].fit(X, y)
+
+                # Store the MLE as BME score
+                BME.append(gp[i].log_marginal_likelihood())
+
+            gp = gp[np.argmax(BME)]
+
+        else:
+            gp = GaussianProcessRegressor(kernel=Kernels[0],
+                                          n_restarts_optimizer=3,
+                                          normalize_y=False)
+            gp.fit(X, y)
+
+        # Compute score
+        if varIdx is not None:
+            Score = gp.score(X, y)
+            print(&#39;-&#39;*50)
+            print(f&#39;Output variable {varIdx}:&#39;)
+            print(&#39;The estimation of GPE coefficients converged,&#39;)
+            print(f&#39;with the R^2 score: {Score:.3f}&#39;)
+            print(&#39;-&#39;*50)
+
+        return gp
+
+    # -------------------------------------------------------------------------
+    def eval_metamodel(self, samples=None, nsamples=None,
+                       sampling_method=&#39;random&#39;, return_samples=False):
+        &#34;&#34;&#34;
+        Evaluates meta-model at the requested samples. One can also generate
+        nsamples.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params), optional
+            Samples to evaluate meta-model at. The default is None.
+        nsamples : int, optional
+            Number of samples to generate, if no `samples` is provided. The
+            default is None.
+        sampling_method : str, optional
+            Type of sampling, if no `samples` is provided. The default is
+            &#39;random&#39;.
+        return_samples : bool, optional
+            Retun samples, if no `samples` is provided. The default is False.
+
+        Returns
+        -------
+        mean_pred : dict
+            Mean of the predictions.
+        std_pred : dict
+            Standard deviatioon of the predictions.
+        &#34;&#34;&#34;
+        if self.meta_model_type.lower() == &#39;gpe&#39;:
+            model_dict = self.gp_poly
+        else:
+            model_dict = self.coeffs_dict
+
+        if samples is None:
+            if nsamples is None:
+                self.n_samples = 100000
+            else:
+                self.n_samples = nsamples
+
+            samples = self.ExpDesign.generate_samples(self.n_samples,
+                                                      sampling_method)
+        else:
+            self.samples = samples
+            self.n_samples = len(samples)
+
+        # Transform samples
+        samples = self.ExpDesign.transform(samples)
+
+        if self.meta_model_type.lower() != &#39;gpe&#39;:
+            univ_p_val = self.univ_basis_vals(samples,
+                                              n_max=np.max(self.pce_deg))
+
+        mean_pred = {}
+        std_pred = {}
+
+        # Loop over outputs
+        for ouput, values in model_dict.items():
+
+            mean = np.zeros((len(samples), len(values)))
+            std = np.zeros((len(samples), len(values)))
+            idx = 0
+            for in_key, InIdxValues in values.items():
+
+                # Perdiction with GPE
+                if self.meta_model_type.lower() == &#39;gpe&#39;:
+                    X_T = self.x_scaler[ouput].transform(samples)
+                    gp = self.gp_poly[ouput][in_key]
+                    y_mean, y_std = gp.predict(X_T, return_std=True)
+
+                else:
+                    # Perdiction with PCE or pcekriging
+                    # Assemble Psi matrix
+                    psi = self.create_psi(self.basis_dict[ouput][in_key],
+                                          univ_p_val)
+                    # Perdiction
+                    try:
+                        # with error bar
+                        clf_poly = self.clf_poly[ouput][in_key]
+                        y_mean, y_std = clf_poly.predict(psi, return_std=True)
+
+                    except:
+                        # without error bar
+                        coeffs = self.coeffs_dict[ouput][in_key]
+                        y_mean = np.dot(psi, coeffs)
+                        y_std = np.zeros_like(y_mean)
+
+                mean[:, idx] = y_mean
+                std[:, idx] = y_std
+                idx += 1
+
+            if self.dim_red_method.lower() == &#39;pca&#39;:
+                PCA = self.pca[ouput]
+                mean_pred[ouput] = PCA.mean_ + np.dot(mean, PCA.components_)
+                std_pred[ouput] = np.sqrt(np.dot(std**2, PCA.components_**2))
+            else:
+                mean_pred[ouput] = mean
+                std_pred[ouput] = std
+
+        if return_samples:
+            return mean_pred, std_pred, samples
+        else:
+            return mean_pred, std_pred
+
+    # -------------------------------------------------------------------------
+    def create_model_error(self, X, y, name=&#39;Calib&#39;):
+        &#34;&#34;&#34;
+        Fits a GPE-based model error.
+
+        Parameters
+        ----------
+        X : array of shape (n_outputs, n_inputs)
+            Input array. It can contain any forcing inputs or coordinates of
+             extracted data.
+        y : array of shape (n_outputs,)
+            The model response for the MAP parameter set.
+        name : str, optional
+            Calibration or validation. The default is `&#39;Calib&#39;`.
+
+        Returns
+        -------
+        self: object
+            Self object.
+
+        &#34;&#34;&#34;
+        Model = self.ModelObj
+        outputNames = Model.Output.Names
+        self.errorRegMethod = &#39;GPE&#39;
+        self.errorclf_poly = self.auto_vivification()
+        self.errorScale = self.auto_vivification()
+
+        # Read data
+        MeasuredData = Model.read_observation(case=name)
+
+        # Fitting GPR based bias model
+        for out in outputNames:
+            nan_idx = ~np.isnan(MeasuredData[out])
+            # Select data
+            try:
+                data = MeasuredData[out].values[nan_idx]
+            except AttributeError:
+                data = MeasuredData[out][nan_idx]
+
+            # Prepare the input matrix
+            scaler = MinMaxScaler()
+            delta = data  # - y[out][0]
+            BiasInputs = np.hstack((X[out], y[out].reshape(-1, 1)))
+            X_S = scaler.fit_transform(BiasInputs)
+            gp = self.gaussian_process_emulator(X_S, delta)
+
+            self.errorScale[out][&#34;y_1&#34;] = scaler
+            self.errorclf_poly[out][&#34;y_1&#34;] = gp
+
+        return self
+
+    # -------------------------------------------------------------------------
+    def eval_model_error(self, X, y_pred):
+        &#34;&#34;&#34;
+        Evaluates the error model.
+
+        Parameters
+        ----------
+        X : array
+            Inputs.
+        y_pred : dict
+            Predictions.
+
+        Returns
+        -------
+        mean_pred : dict
+            Mean predition of the GPE-based error model.
+        std_pred : dict
+            standard deviation of the GPE-based error model.
+
+        &#34;&#34;&#34;
+        mean_pred = {}
+        std_pred = {}
+
+        for Outkey, ValuesDict in self.errorclf_poly.items():
+
+            pred_mean = np.zeros_like(y_pred[Outkey])
+            pred_std = np.zeros_like(y_pred[Outkey])
+
+            for Inkey, InIdxValues in ValuesDict.items():
+
+                gp = self.errorclf_poly[Outkey][Inkey]
+                scaler = self.errorScale[Outkey][Inkey]
+
+                # Transform Samples using scaler
+                for j, pred in enumerate(y_pred[Outkey]):
+                    BiasInputs = np.hstack((X[Outkey], pred.reshape(-1, 1)))
+                    Samples_S = scaler.transform(BiasInputs)
+                    y_hat, y_std = gp.predict(Samples_S, return_std=True)
+                    pred_mean[j] = y_hat
+                    pred_std[j] = y_std
+                    # pred_mean[j] += pred
+
+            mean_pred[Outkey] = pred_mean
+            std_pred[Outkey] = pred_std
+
+        return mean_pred, std_pred
+
+    # -------------------------------------------------------------------------
+    class auto_vivification(dict):
+        &#34;&#34;&#34;
+        Implementation of perl&#39;s AutoVivification feature.
+
+        Source: https://stackoverflow.com/a/651879/18082457
+        &#34;&#34;&#34;
+
+        def __getitem__(self, item):
+            try:
+                return dict.__getitem__(self, item)
+            except KeyError:
+                value = self[item] = type(self)()
+                return value
+
+    # -------------------------------------------------------------------------
+    def __select_degree(self, ndim, nSamples):
+        &#34;&#34;&#34;
+        Selects degree based on the number of samples and parameters in the
+        sequential design.
+
+        Parameters
+        ----------
+        ndim : TYPE
+            DESCRIPTION.
+        nSamples : TYPE
+            DESCRIPTION.
+
+        Returns
+        -------
+        TYPE
+            DESCRIPTION.
+
+        &#34;&#34;&#34;
+        # Define the DegreeArray
+        max_deg = np.max(self.pce_deg)
+        min_Deg = np.min(self.pce_deg)
+        nitr = nSamples - self.ExpDesign.n_init_samples
+
+        # Check q-norm
+        if not np.isscalar(self.pce_q_norm):
+            self.pce_q_norm = np.array(self.pce_q_norm)
+        else:
+            self.pce_q_norm = np.array([self.pce_q_norm])
+
+        def M_uptoMax(maxDeg):
+            n_combo = np.zeros(maxDeg)
+            for i, d in enumerate(range(1, maxDeg+1)):
+                n_combo[i] = math.factorial(ndim+d)
+                n_combo[i] /= math.factorial(ndim) * math.factorial(d)
+            return n_combo
+
+        if self.ExpDesignFlag != &#39;sequential&#39;:
+            degNew = max_deg
+        else:
+            d = nitr if nitr != 0 and self.n_params &gt; 5 else 1
+            min_index = np.argmin(abs(M_uptoMax(max_deg)-ndim*nSamples*d))
+            degNew = range(1, max_deg+1)[min_index]
+
+        if degNew &gt; min_Deg and self.pce_reg_method.lower() != &#39;fastard&#39;:
+            DegreeArray = np.arange(min_Deg, degNew+1)
+        else:
+            DegreeArray = np.array([degNew])
+
+        return DegreeArray</code></pre>
+</details>
+<h3>Class variables</h3>
+<dl>
+<dt id="surrogate_models.MetaModel.auto_vivification"><code class="name">var <span class="ident">auto_vivification</span></code></dt>
+<dd>
+<div class="desc"><p>Implementation of perl's AutoVivification feature.</p>
+<p>Source: <a href="https://stackoverflow.com/a/651879/18082457">https://stackoverflow.com/a/651879/18082457</a></p></div>
+</dd>
+</dl>
+<h3>Methods</h3>
+<dl>
+<dt id="surrogate_models.MetaModel.create_metamodel"><code class="name flex">
+<span>def <span class="ident">create_metamodel</span></span>(<span>self, Model)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Starts the training of the meta-model for the model objects containg
+the given computational model.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>Model</code></strong> :&ensp;<code>obj</code></dt>
+<dd>Model object.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>metamodel</code></strong> :&ensp;<code>obj</code></dt>
+<dd>The meta model object.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def create_metamodel(self, Model):
+    &#34;&#34;&#34;
+    Starts the training of the meta-model for the model objects containg
+     the given computational model.
+
+    Parameters
+    ----------
+    Model : obj
+        Model object.
+
+    Returns
+    -------
+    metamodel : obj
+        The meta model object.
+
+    &#34;&#34;&#34;
+    self.ModelObj = Model
+    self.n_params = len(self.input_obj.Marginals)
+    self.ExpDesignFlag = &#39;normal&#39;
+    # --- Prepare pce degree ---
+    if self.meta_model_type.lower() == &#39;pce&#39;:
+        if type(self.pce_deg) is not np.ndarray:
+            self.pce_deg = np.array(self.pce_deg)
+
+    if self.ExpDesign.method == &#39;sequential&#39;:
+        from .sequential_design import SeqDesign
+        seq_design = SeqDesign(self)
+        metamodel = seq_design.train_seq_design(Model)
+
+    elif self.ExpDesign.method == &#39;normal&#39;:
+        self.ExpDesignFlag = &#39;normal&#39;
+        metamodel = self.train_norm_design(Model)
+
+    else:
+        raise Exception(&#34;The method for experimental design you requested&#34;
+                        &#34; has not been implemented yet.&#34;)
+
+    # Zip the model run directories
+    if self.ModelObj.link_type.lower() == &#39;pylink&#39;:
+        Model.zip_subdirs(Model.name, f&#39;{Model.name}_&#39;)
+
+    return metamodel</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.train_norm_design"><code class="name flex">
+<span>def <span class="ident">train_norm_design</span></span>(<span>self, Model, verbose=False)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>This function loops over the outputs and each time step/point and fits
+the meta model.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>Model</code></strong> :&ensp;<code>obj</code></dt>
+<dd>Model object.</dd>
+<dt><strong><code>verbose</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Flag for a sequential design in silent mode. The default is False.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>self</code></strong> :&ensp;<code>obj</code></dt>
+<dd>Meta-model object.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def train_norm_design(self, Model, verbose=False):
+    &#34;&#34;&#34;
+    This function loops over the outputs and each time step/point and fits
+    the meta model.
+
+    Parameters
+    ----------
+    Model : obj
+        Model object.
+    verbose : bool, optional
+        Flag for a sequential design in silent mode. The default is False.
+
+    Returns
+    -------
+    self: obj
+        Meta-model object.
+
+    &#34;&#34;&#34;
+
+    # Get the collocation points to run the forward model
+    CollocationPoints, OutputDict = self.generate_ExpDesign(Model)
+
+    # Initialize the nested dictionaries
+    self.deg_dict = self.auto_vivification()
+    self.q_norm_dict = self.auto_vivification()
+    self.coeffs_dict = self.auto_vivification()
+    self.basis_dict = self.auto_vivification()
+    self.score_dict = self.auto_vivification()
+    self.clf_poly = self.auto_vivification()
+    self.gp_poly = self.auto_vivification()
+    self.pca = self.auto_vivification()
+    self.LCerror = self.auto_vivification()
+    self.x_scaler = {}
+
+    # Define the DegreeArray
+    nSamples, ndim = CollocationPoints.shape
+    self.DegreeArray = self.__select_degree(ndim, nSamples)
+
+    # Generate all basis indices
+    self.allBasisIndices = self.auto_vivification()
+    for deg in self.DegreeArray:
+        keys = self.allBasisIndices.keys()
+        if deg not in np.fromiter(keys, dtype=float):
+            # Generate the polynomial basis indices
+            for qidx, q in enumerate(self.pce_q_norm):
+                basis_indices = self.create_basis_indices(degree=deg,
+                                                          q_norm=q)
+                self.allBasisIndices[str(deg)][str(q)] = basis_indices
+
+    # Evaluate the univariate polynomials on ExpDesign
+    if self.meta_model_type.lower() != &#39;gpe&#39;:
+        self.univ_p_val = self.univ_basis_vals(CollocationPoints)
+
+    if &#39;x_values&#39; in OutputDict:
+        self.ExpDesign.x_values = OutputDict[&#39;x_values&#39;]
+        del OutputDict[&#39;x_values&#39;]
+
+    # --- Loop through data points and fit the surrogate ---
+    if not verbose:
+        print(f&#34;\n&gt;&gt;&gt;&gt; Training the {self.meta_model_type} metamodel &#34;
+              &#34;started. &lt;&lt;&lt;&lt;&lt;&lt;\n&#34;)
+        items = tqdm(OutputDict.items(), desc=&#34;Fitting regression&#34;)
+    else:
+        items = OutputDict.items()
+
+    # For loop over the components/outputs
+    for key, Output in items:
+
+        # Dimensionality reduction with PCA, if specified
+        if self.dim_red_method.lower() == &#39;pca&#39;:
+            self.pca[key], target = self.pca_transformation(Output)
+        else:
+            target = Output
+
+        # Parallel fit regression
+        if self.meta_model_type.lower() == &#39;gpe&#39;:
+            # Prepare the input matrix
+            scaler = MinMaxScaler()
+            X_S = scaler.fit_transform(CollocationPoints)
+
+            self.x_scaler[key] = scaler
+
+            out = Parallel(n_jobs=-1, prefer=&#39;threads&#39;)(
+                delayed(self.gaussian_process_emulator)(X_S, target[:, idx])
+                for idx in range(target.shape[1]))
+
+            for idx in range(target.shape[1]):
+                self.gp_poly[key][f&#34;y_{idx+1}&#34;] = out[idx]
+
+        else:
+            out = Parallel(n_jobs=-1, prefer=&#39;threads&#39;)(
+                delayed(self.adaptive_regression)(CollocationPoints,
+                                                  target[:, idx], idx)
+                for idx in range(target.shape[1]))
+
+            for i in range(target.shape[1]):
+                # Create a dict to pass the variables
+                self.deg_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;degree&#39;]
+                self.q_norm_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;qnorm&#39;]
+                self.coeffs_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;coeffs&#39;]
+                self.basis_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;multi_indices&#39;]
+                self.score_dict[key][f&#34;y_{i+1}&#34;] = out[i][&#39;LOOCVScore&#39;]
+                self.clf_poly[key][f&#34;y_{i+1}&#34;] = out[i][&#39;clf_poly&#39;]
+                self.LCerror[key][f&#34;y_{i+1}&#34;] = out[i][&#39;LCerror&#39;]
+
+    if not verbose:
+        print(f&#34;\n&gt;&gt;&gt;&gt; Training the {self.meta_model_type} metamodel&#34;
+              &#34; sucessfully completed. &lt;&lt;&lt;&lt;&lt;&lt;\n&#34;)
+
+    return self</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.create_basis_indices"><code class="name flex">
+<span>def <span class="ident">create_basis_indices</span></span>(<span>self, degree, q_norm)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Creates set of selected multi-indices of multivariate polynomials for
+certain parameter numbers, polynomial degree, hyperbolic (or q-norm)
+truncation scheme.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>degree</code></strong> :&ensp;<code>int</code></dt>
+<dd>Polynomial degree.</dd>
+<dt><strong><code>q_norm</code></strong> :&ensp;<code>float</code></dt>
+<dd>hyperbolic (or q-norm) truncation.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>basis_indices</code></strong> :&ensp;<code>array</code> of <code>shape (n_terms, n_params)</code></dt>
+<dd>Multi-indices of multivariate polynomials.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def create_basis_indices(self, degree, q_norm):
+    &#34;&#34;&#34;
+    Creates set of selected multi-indices of multivariate polynomials for
+    certain parameter numbers, polynomial degree, hyperbolic (or q-norm)
+    truncation scheme.
+
+    Parameters
+    ----------
+    degree : int
+        Polynomial degree.
+    q_norm : float
+        hyperbolic (or q-norm) truncation.
+
+    Returns
+    -------
+    basis_indices : array of shape (n_terms, n_params)
+        Multi-indices of multivariate polynomials.
+
+    &#34;&#34;&#34;
+    basis_indices = glexindex(start=0, stop=degree+1,
+                              dimensions=self.n_params,
+                              cross_truncation=q_norm,
+                              reverse=False, graded=True)
+    return basis_indices</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.add_ExpDesign"><code class="name flex">
+<span>def <span class="ident">add_ExpDesign</span></span>(<span>self)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Instanciates experimental design object.</p>
+<h2 id="returns">Returns</h2>
+<p>None.</p></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def add_ExpDesign(self):
+    &#34;&#34;&#34;
+    Instanciates experimental design object.
+
+    Returns
+    -------
+    None.
+
+    &#34;&#34;&#34;
+    self.ExpDesign = ExpDesigns(self.input_obj,
+                                meta_Model=self.meta_model_type)</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.generate_ExpDesign"><code class="name flex">
+<span>def <span class="ident">generate_ExpDesign</span></span>(<span>self, Model)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Prepares the experimental design either by reading from the prescribed
+data or running simulations.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>Model</code></strong> :&ensp;<code>obj</code></dt>
+<dd>Model object.</dd>
+</dl>
+<h2 id="raises">Raises</h2>
+<dl>
+<dt><code>Exception</code></dt>
+<dd>If model sumulations are not provided properly.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>ED_X_tr</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
+<dd>Training samples transformed by an isoprobabilistic transformation.</dd>
+<dt><strong><code>ED_Y</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Model simulations (target) for all outputs.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def generate_ExpDesign(self, Model):
+    &#34;&#34;&#34;
+    Prepares the experimental design either by reading from the prescribed
+    data or running simulations.
+
+    Parameters
+    ----------
+    Model : obj
+        Model object.
+
+    Raises
+    ------
+    Exception
+        If model sumulations are not provided properly.
+
+    Returns
+    -------
+    ED_X_tr: array of shape (n_samples, n_params)
+        Training samples transformed by an isoprobabilistic transformation.
+    ED_Y: dict
+        Model simulations (target) for all outputs.
+    &#34;&#34;&#34;
+    ExpDesign = self.ExpDesign
+    if self.ExpDesignFlag != &#39;sequential&#39;:
+        # Read ExpDesign (training and targets) from the provided hdf5
+        if ExpDesign.hdf5_file is not None:
+
+            # Read hdf5 file
+            f = h5py.File(ExpDesign.hdf5_file, &#39;r+&#39;)
+
+            # Read EDX and pass it to ExpDesign object
+            try:
+                ExpDesign.X = np.array(f[&#34;EDX/New_init_&#34;])
+            except KeyError:
+                ExpDesign.X = np.array(f[&#34;EDX/init_&#34;])
+
+            # Update number of initial samples
+            ExpDesign.n_init_samples = ExpDesign.X.shape[0]
+
+            # Read EDX and pass it to ExpDesign object
+            out_names = self.ModelObj.Output.names
+            ExpDesign.Y = {}
+
+            # Extract x values
+            try:
+                ExpDesign.Y[&#34;x_values&#34;] = dict()
+                for varIdx, var in enumerate(out_names):
+                    x = np.array(f[f&#34;x_values/{var}&#34;])
+                    ExpDesign.Y[&#34;x_values&#34;][var] = x
+            except KeyError:
+                ExpDesign.Y[&#34;x_values&#34;] = np.array(f[&#34;x_values&#34;])
+
+            # Store the output
+            for varIdx, var in enumerate(out_names):
+                try:
+                    y = np.array(f[f&#34;EDY/{var}/New_init_&#34;])
+                except KeyError:
+                    y = np.array(f[f&#34;EDY/{var}/init_&#34;])
+                ExpDesign.Y[var] = y
+            f.close()
+        else:
+            # Check if an old hdf5 file exists: if yes, rename it
+            hdf5file = f&#39;ExpDesign_{self.ModelObj.name}.hdf5&#39;
+            if os.path.exists(hdf5file):
+                os.rename(hdf5file, &#39;old_&#39;+hdf5file)
+
+    # ---- Prepare X samples ----
+    ED_X, ED_X_tr = ExpDesign.generate_ED(ExpDesign.n_init_samples,
+                                          ExpDesign.sampling_method,
+                                          transform=True,
+                                          max_pce_deg=np.max(self.pce_deg))
+    ExpDesign.X = ED_X
+    ExpDesign.collocationPoints = ED_X_tr
+    self.bound_tuples = ExpDesign.bound_tuples
+
+    # ---- Run simulations at X ----
+    if not hasattr(ExpDesign, &#39;Y&#39;) or ExpDesign.Y is None:
+        print(&#39;\n Now the forward model needs to be run!\n&#39;)
+        ED_Y, up_ED_X = Model.run_model_parallel(ED_X)
+        ExpDesign.X = up_ED_X
+        self.ModelOutputDict = ED_Y
+        ExpDesign.Y = ED_Y
+    else:
+        # Check if a dict has been passed.
+        if type(ExpDesign.Y) is dict:
+            self.ModelOutputDict = ExpDesign.Y
+        else:
+            raise Exception(&#39;Please provide either a dictionary or a hdf5&#39;
+                            &#39;file to ExpDesign.hdf5_file argument.&#39;)
+
+    return ED_X_tr, self.ModelOutputDict</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.univ_basis_vals"><code class="name flex">
+<span>def <span class="ident">univ_basis_vals</span></span>(<span>self, samples, n_max=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Evaluates univariate regressors along input directions.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
+<dd>Samples.</dd>
+<dt><strong><code>n_max</code></strong> :&ensp;<code>int</code>, optional</dt>
+<dd>Maximum polynomial degree. The default is <code>None</code>.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>univ_basis</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params, n_max+1)</code></dt>
+<dd>All univariate regressors up to n_max.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def univ_basis_vals(self, samples, n_max=None):
+    &#34;&#34;&#34;
+    Evaluates univariate regressors along input directions.
+
+    Parameters
+    ----------
+    samples : array of shape (n_samples, n_params)
+        Samples.
+    n_max : int, optional
+        Maximum polynomial degree. The default is `None`.
+
+    Returns
+    -------
+    univ_basis: array of shape (n_samples, n_params, n_max+1)
+        All univariate regressors up to n_max.
+    &#34;&#34;&#34;
+    # Extract information
+    poly_types = self.ExpDesign.poly_types
+    if samples.ndim != 2:
+        samples = samples.reshape(1, len(samples))
+    n_max = np.max(self.pce_deg) if n_max is None else n_max
+
+    # Extract poly coeffs
+    if self.ExpDesign.input_data_given or self.ExpDesign.apce:
+        apolycoeffs = self.ExpDesign.polycoeffs
+    else:
+        apolycoeffs = None
+
+    # Evaluate univariate basis
+    univ_basis = eval_univ_basis(samples, n_max, poly_types, apolycoeffs)
+
+    return univ_basis</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.create_psi"><code class="name flex">
+<span>def <span class="ident">create_psi</span></span>(<span>self, basis_indices, univ_p_val)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>This function assemble the design matrix Psi from the given basis index
+set INDICES and the univariate polynomial evaluations univ_p_val.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>basis_indices</code></strong> :&ensp;<code>array</code> of <code>shape (n_terms, n_params)</code></dt>
+<dd>Multi-indices of multivariate polynomials.</dd>
+<dt><strong><code>univ_p_val</code></strong> :&ensp;<code>array</code> of <code>(n_samples, n_params, n_max+1)</code></dt>
+<dd>All univariate regressors up to <code>n_max</code>.</dd>
+</dl>
+<h2 id="raises">Raises</h2>
+<dl>
+<dt><code>ValueError</code></dt>
+<dd>n_terms in arguments do not match.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>psi</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_terms)</code></dt>
+<dd>Multivariate regressors.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def create_psi(self, basis_indices, univ_p_val):
+    &#34;&#34;&#34;
+    This function assemble the design matrix Psi from the given basis index
+    set INDICES and the univariate polynomial evaluations univ_p_val.
+
+    Parameters
+    ----------
+    basis_indices : array of shape (n_terms, n_params)
+        Multi-indices of multivariate polynomials.
+    univ_p_val : array of (n_samples, n_params, n_max+1)
+        All univariate regressors up to `n_max`.
+
+    Raises
+    ------
+    ValueError
+        n_terms in arguments do not match.
+
+    Returns
+    -------
+    psi : array of shape (n_samples, n_terms)
+        Multivariate regressors.
+
+    &#34;&#34;&#34;
+    # Check if BasisIndices is a sparse matrix
+    sparsity = sp.sparse.issparse(basis_indices)
+    if sparsity:
+        basis_indices = basis_indices.toarray()
+
+    # Initialization and consistency checks
+    # number of input variables
+    n_params = univ_p_val.shape[1]
+
+    # Size of the experimental design
+    n_samples = univ_p_val.shape[0]
+
+    # number of basis terms
+    n_terms = basis_indices.shape[0]
+
+    # check that the variables have consistent sizes
+    if n_params != basis_indices.shape[1]:
+        raise ValueError(&#34;The shapes of basis_indices and univ_p_val don&#39;t&#34;
+                         &#34; match!!&#34;)
+
+    # Preallocate the Psi matrix for performance
+    psi = np.ones((n_samples, n_terms))
+    # Assemble the Psi matrix
+    for m in range(basis_indices.shape[1]):
+        aa = np.where(basis_indices[:, m] &gt; 0)[0]
+        try:
+            basisIdx = basis_indices[aa, m]
+            bb = np.reshape(univ_p_val[:, m, basisIdx], psi[:, aa].shape)
+            psi[:, aa] = np.multiply(psi[:, aa], bb)
+        except ValueError as err:
+            raise err
+
+    return psi</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.fit"><code class="name flex">
+<span>def <span class="ident">fit</span></span>(<span>self, X, y, basis_indices, reg_method=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Fit regression using the regression method provided.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>X</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_features)</code></dt>
+<dd>Training vector, where n_samples is the number of samples and
+n_features is the number of features.</dd>
+<dt><strong><code>y</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples,)</code></dt>
+<dd>Target values.</dd>
+<dt><strong><code>basis_indices</code></strong> :&ensp;<code>array</code> of <code>shape (n_terms, n_params)</code></dt>
+<dd>Multi-indices of multivariate polynomials.</dd>
+<dt><strong><code>reg_method</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>DESCRIPTION. The default is None.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>return_out_dict</code></strong> :&ensp;<code>Dict</code></dt>
+<dd>Fitted estimator, spareMulti-Index, sparseX and coefficients.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def fit(self, X, y, basis_indices, reg_method=None):
+    &#34;&#34;&#34;
+    Fit regression using the regression method provided.
+
+    Parameters
+    ----------
+    X : array of shape (n_samples, n_features)
+        Training vector, where n_samples is the number of samples and
+        n_features is the number of features.
+    y : array of shape (n_samples,)
+        Target values.
+    basis_indices : array of shape (n_terms, n_params)
+        Multi-indices of multivariate polynomials.
+    reg_method : str, optional
+        DESCRIPTION. The default is None.
+
+    Returns
+    -------
+    return_out_dict : Dict
+        Fitted estimator, spareMulti-Index, sparseX and coefficients.
+
+    &#34;&#34;&#34;
+    if reg_method is None:
+        reg_method = self.pce_reg_method
+
+    # Check if BasisIndices is a sparse matrix
+    sparsity = sp.sparse.issparse(basis_indices)
+
+    clf_poly = []
+    compute_score = True if self.verbose else False
+
+    #  inverse of the observed variance of the data
+    if np.var(y) != 0:
+        Lambda = 1 / np.var(y)
+    else:
+        Lambda = 1e-6
+
+    # Bayes sparse adaptive aPCE
+    if reg_method.lower() != &#39;ols&#39;:
+        if reg_method.lower() == &#39;brr&#39; or np.var(y) == 0:
+            clf_poly = lm.BayesianRidge(n_iter=1000, tol=1e-7,
+                                        fit_intercept=True,
+                                        normalize=True,
+                                        compute_score=compute_score,
+                                        alpha_1=1e-04, alpha_2=1e-04,
+                                        lambda_1=Lambda, lambda_2=Lambda)
+            clf_poly.converged = True
+
+        elif reg_method.lower() == &#39;ard&#39;:
+            clf_poly = lm.ARDRegression(fit_intercept=True,
+                                        normalize=True,
+                                        compute_score=compute_score,
+                                        n_iter=1000, tol=0.0001,
+                                        alpha_1=1e-3, alpha_2=1e-3,
+                                        lambda_1=Lambda, lambda_2=Lambda)
+
+        elif reg_method.lower() == &#39;fastard&#39;:
+            clf_poly = RegressionFastARD(fit_intercept=True,
+                                         normalize=True,
+                                         compute_score=compute_score,
+                                         n_iter=300, tol=1e-10)
+
+        elif reg_method.lower() == &#39;bcs&#39;:
+            clf_poly = RegressionFastLaplace(fit_intercept=False,
+                                             n_iter=1000, tol=1e-7)
+
+        elif reg_method.lower() == &#39;lars&#39;:
+            clf_poly = lm.LassoLarsCV(fit_intercept=False)
+
+        elif reg_method.lower() == &#39;sgdr&#39;:
+            clf_poly = lm.SGDRegressor(fit_intercept=False,
+                                       max_iter=5000, tol=1e-7)
+
+        elif reg_method.lower() == &#39;omp&#39;:
+            clf_poly = lm.OrthogonalMatchingPursuitCV(fit_intercept=False,
+                                                      max_iter=10)
+
+        elif reg_method.lower() == &#39;vbl&#39;:
+            clf_poly = VBLinearRegression(fit_intercept=False)
+
+        elif reg_method.lower() == &#39;ebl&#39;:
+            clf_poly = EBLinearRegression(optimizer=&#39;em&#39;)
+
+        # Fit
+        clf_poly.fit(X, y)
+
+        # Select the nonzero entries of coefficients
+        # The first column must be kept (For mean calculations)
+        nnz_idx = np.nonzero(clf_poly.coef_)[0]
+
+        if len(nnz_idx) == 0 or nnz_idx[0] != 0:
+            nnz_idx = np.insert(np.nonzero(clf_poly.coef_)[0], 0, 0)
+            # Remove the zero entries for Bases and PSI if need be
+            if sparsity:
+                sparse_basis_indices = basis_indices.toarray()[nnz_idx]
+            else:
+                sparse_basis_indices = basis_indices[nnz_idx]
+            sparse_X = X[:, nnz_idx]
+
+            # Store the coefficients of the regression model
+            clf_poly.fit(sparse_X, y)
+            coeffs = clf_poly.coef_
+        else:
+            # This is for the case where all outputs are zero, thereby
+            # all coefficients are zero
+            if sparsity:
+                sparse_basis_indices = basis_indices.toarray()
+            else:
+                sparse_basis_indices = basis_indices
+            sparse_X = X
+            coeffs = clf_poly.coef_
+
+    # Ordinary least square method (OLS)
+    else:
+        if sparsity:
+            sparse_basis_indices = basis_indices.toarray()
+        else:
+            sparse_basis_indices = basis_indices
+        sparse_X = X
+
+        X_T_X = np.dot(sparse_X.T, sparse_X)
+
+        if np.linalg.cond(X_T_X) &gt; 1e-12 and \
+           np.linalg.cond(X_T_X) &lt; 1 / sys.float_info.epsilon:
+            # faster
+            coeffs = sp.linalg.solve(X_T_X, np.dot(sparse_X.T, y))
+        else:
+            # stabler
+            coeffs = np.dot(np.dot(np.linalg.pinv(X_T_X), sparse_X.T), y)
+
+    # Create a dict to pass the outputs
+    return_out_dict = dict()
+    return_out_dict[&#39;clf_poly&#39;] = clf_poly
+    return_out_dict[&#39;spareMulti-Index&#39;] = sparse_basis_indices
+    return_out_dict[&#39;sparePsi&#39;] = sparse_X
+    return_out_dict[&#39;coeffs&#39;] = coeffs
+    return return_out_dict</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.adaptive_regression"><code class="name flex">
+<span>def <span class="ident">adaptive_regression</span></span>(<span>self, ED_X, ED_Y, varIdx, verbose=False)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Adaptively fits the PCE model by comparing the scores of different
+degrees and q-norm.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>ED_X</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
+<dd>Experimental design.</dd>
+<dt><strong><code>ED_Y</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples,)</code></dt>
+<dd>Target values, i.e. simulation results for the Experimental design.</dd>
+<dt><strong><code>varIdx</code></strong> :&ensp;<code>int</code></dt>
+<dd>Index of the output.</dd>
+<dt><strong><code>verbose</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Print out summary. The default is False.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>returnVars</code></strong> :&ensp;<code>Dict</code></dt>
+<dd>Fitted estimator, best degree, best q-norm, LOOCVScore and
+coefficients.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def adaptive_regression(self, ED_X, ED_Y, varIdx, verbose=False):
+    &#34;&#34;&#34;
+    Adaptively fits the PCE model by comparing the scores of different
+    degrees and q-norm.
+
+    Parameters
+    ----------
+    ED_X : array of shape (n_samples, n_params)
+        Experimental design.
+    ED_Y : array of shape (n_samples,)
+        Target values, i.e. simulation results for the Experimental design.
+    varIdx : int
+        Index of the output.
+    verbose : bool, optional
+        Print out summary. The default is False.
+
+    Returns
+    -------
+    returnVars : Dict
+        Fitted estimator, best degree, best q-norm, LOOCVScore and
+        coefficients.
+
+    &#34;&#34;&#34;
+
+    NrSamples, n_params = ED_X.shape
+    # Initialization
+    qAllCoeffs, AllCoeffs = {}, {}
+    qAllIndices_Sparse, AllIndices_Sparse = {}, {}
+    qAllclf_poly, Allclf_poly = {}, {}
+    qAllnTerms, AllnTerms = {}, {}
+    qAllLCerror, AllLCerror = {}, {}
+
+    # Extract degree array and qnorm array
+    DegreeArray = np.array([*self.allBasisIndices], dtype=int)
+    qnorm = [*self.allBasisIndices[str(int(DegreeArray[0]))]]
+
+    # Some options for EarlyStop
+    errorIncreases = False
+    # Stop degree, if LOO error does not decrease n_checks_degree times
+    n_checks_degree = 3
+    # Stop qNorm, if criterion isn&#39;t fulfilled n_checks_qNorm times
+    n_checks_qNorm = 2
+    nqnorms = len(qnorm)
+    qNormEarlyStop = True
+    if nqnorms &lt; n_checks_qNorm+1:
+        qNormEarlyStop = False
+
+    # =====================================================================
+    # basis adaptive polynomial chaos: repeat the calculation by increasing
+    # polynomial degree until the highest accuracy is reached
+    # =====================================================================
+    # For each degree check all q-norms and choose the best one
+    scores = -np.inf * np.ones(DegreeArray.shape[0])
+    qNormScores = -np.inf * np.ones(nqnorms)
+
+    for degIdx, deg in enumerate(DegreeArray):
+
+        for qidx, q in enumerate(qnorm):
+
+            # Extract the polynomial basis indices from the pool of
+            # allBasisIndices
+            BasisIndices = self.allBasisIndices[str(deg)][str(q)]
+
+            # Assemble the Psi matrix
+            Psi = self.create_psi(BasisIndices, self.univ_p_val)
+
+            # Calulate the cofficients of the meta model
+            outs = self.fit(Psi, ED_Y, BasisIndices)
+
+            # Calculate and save the score of LOOCV
+            score, LCerror = self.corr_loocv_error(outs[&#39;clf_poly&#39;],
+                                                   outs[&#39;sparePsi&#39;],
+                                                   outs[&#39;coeffs&#39;],
+                                                   ED_Y)
+
+            # Check the convergence of noise for FastARD
+            if self.pce_reg_method == &#39;FastARD&#39; and \
+               outs[&#39;clf_poly&#39;].alpha_ &lt; np.finfo(np.float32).eps:
+                score = -np.inf
+
+            qNormScores[qidx] = score
+            qAllCoeffs[str(qidx+1)] = outs[&#39;coeffs&#39;]
+            qAllIndices_Sparse[str(qidx+1)] = outs[&#39;spareMulti-Index&#39;]
+            qAllclf_poly[str(qidx+1)] = outs[&#39;clf_poly&#39;]
+            qAllnTerms[str(qidx+1)] = BasisIndices.shape[0]
+            qAllLCerror[str(qidx+1)] = LCerror
+
+            # EarlyStop check
+            # if there are at least n_checks_qNorm entries after the
+            # best one, we stop
+            if qNormEarlyStop and \
+               sum(np.isfinite(qNormScores)) &gt; n_checks_qNorm:
+                # If the error has increased the last two iterations, stop!
+                qNormScores_nonInf = qNormScores[np.isfinite(qNormScores)]
+                deltas = np.sign(np.diff(qNormScores_nonInf))
+                if sum(deltas[-n_checks_qNorm+1:]) == 2:
+                    # stop the q-norm loop here
+                    break
+            if np.var(ED_Y) == 0:
+                break
+
+        # Store the score in the scores list
+        best_q = np.nanargmax(qNormScores)
+        scores[degIdx] = qNormScores[best_q]
+
+        AllCoeffs[str(degIdx+1)] = qAllCoeffs[str(best_q+1)]
+        AllIndices_Sparse[str(degIdx+1)] = qAllIndices_Sparse[str(best_q+1)]
+        Allclf_poly[str(degIdx+1)] = qAllclf_poly[str(best_q+1)]
+        AllnTerms[str(degIdx+1)] = qAllnTerms[str(best_q+1)]
+        AllLCerror[str(degIdx+1)] = qAllLCerror[str(best_q+1)]
+
+        # Check the direction of the error (on average):
+        # if it increases consistently stop the iterations
+        if len(scores[scores != -np.inf]) &gt; n_checks_degree:
+            scores_nonInf = scores[scores != -np.inf]
+            ss = np.sign(scores_nonInf - np.max(scores_nonInf))
+            # ss&lt;0 error decreasing
+            errorIncreases = np.sum(np.sum(ss[-2:])) &lt;= -1*n_checks_degree
+
+        if errorIncreases:
+            break
+
+        # Check only one degree, if target matrix has zero variance
+        if np.var(ED_Y) == 0:
+            break
+
+    # ------------------ Summary of results ------------------
+    # Select the one with the best score and save the necessary outputs
+    best_deg = np.nanargmax(scores)+1
+    coeffs = AllCoeffs[str(best_deg)]
+    basis_indices = AllIndices_Sparse[str(best_deg)]
+    clf_poly = Allclf_poly[str(best_deg)]
+    LOOCVScore = np.nanmax(scores)
+    P = AllnTerms[str(best_deg)]
+    LCerror = AllLCerror[str(best_deg)]
+    degree = DegreeArray[np.nanargmax(scores)]
+    qnorm = float(qnorm[best_q])
+
+    # ------------------ Print out Summary of results ------------------
+    if self.verbose:
+        # Create PSI_Sparse by removing redundent terms
+        nnz_idx = np.nonzero(coeffs)[0]
+        BasisIndices_Sparse = basis_indices[nnz_idx]
+
+        print(f&#39;Output variable {varIdx+1}:&#39;)
+        print(&#39;The estimation of PCE coefficients converged at polynomial &#39;
+              f&#39;degree {DegreeArray[best_deg-1]} with &#39;
+              f&#39;{len(BasisIndices_Sparse)} terms (Sparsity index = &#39;
+              f&#39;{round(len(BasisIndices_Sparse)/P, 3)}).&#39;)
+
+        print(f&#39;Final ModLOO error estimate: {1-max(scores):.3e}&#39;)
+        print(&#39;\n&#39;+&#39;-&#39;*50)
+
+    if verbose:
+        print(&#39;=&#39;*50)
+        print(&#39; &#39;*10 + &#39; Summary of results &#39;)
+        print(&#39;=&#39;*50)
+
+        print(&#34;scores:\n&#34;, scores)
+        print(&#34;Best score&#39;s degree:&#34;, self.DegreeArray[best_deg-1])
+        print(&#34;NO. of terms:&#34;, len(basis_indices))
+        print(&#34;Sparsity index:&#34;, round(len(basis_indices)/P, 3))
+        print(&#34;Best Indices:\n&#34;, basis_indices)
+
+        if self.pce_reg_method in [&#39;BRR&#39;, &#39;ARD&#39;]:
+            fig, ax = plt.subplots(figsize=(12, 10))
+            plt.title(&#34;Marginal log-likelihood&#34;)
+            plt.plot(clf_poly.scores_, color=&#39;navy&#39;, linewidth=2)
+            plt.ylabel(&#34;Score&#34;)
+            plt.xlabel(&#34;Iterations&#34;)
+            if self.pce_reg_method.lower() == &#39;bbr&#39;:
+                text = f&#34;$\\alpha={clf_poly.alpha_:.1f}$\n&#34;
+                f&#34;$\\lambda={clf_poly.lambda_:.3f}$\n&#34;
+                f&#34;$L={clf_poly.scores_[-1]:.1f}$&#34;
+            else:
+                text = f&#34;$\\alpha={clf_poly.alpha_:.1f}$\n$&#34;
+                f&#34;\\L={clf_poly.scores_[-1]:.1f}$&#34;
+
+            plt.text(0.75, 0.5, text, fontsize=18, transform=ax.transAxes)
+            plt.show()
+        print(&#39;=&#39;*80)
+
+    # Create a dict to pass the outputs
+    returnVars = dict()
+    returnVars[&#39;clf_poly&#39;] = clf_poly
+    returnVars[&#39;degree&#39;] = degree
+    returnVars[&#39;qnorm&#39;] = qnorm
+    returnVars[&#39;coeffs&#39;] = coeffs
+    returnVars[&#39;multi_indices&#39;] = basis_indices
+    returnVars[&#39;LOOCVScore&#39;] = LOOCVScore
+    returnVars[&#39;LCerror&#39;] = LCerror
+
+    return returnVars</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.corr_loocv_error"><code class="name flex">
+<span>def <span class="ident">corr_loocv_error</span></span>(<span>self, clf, psi, coeffs, y)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Calculates the corrected LOO error for regression on regressor
+matrix <code>psi</code> that generated the coefficients based on [1] and [2].</p>
+<p>[1] Blatman, G., 2009. Adaptive sparse polynomial chaos expansions for
+uncertainty propagation and sensitivity analysis (Doctoral
+dissertation, Clermont-Ferrand 2).</p>
+<p>[2] Blatman, G. and Sudret, B., 2011. Adaptive sparse polynomial chaos
+expansion based on least angle regression. Journal of computational
+Physics, 230(6), pp.2345-2367.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>clf</code></strong> :&ensp;<code>object</code></dt>
+<dd>Fitted estimator.</dd>
+<dt><strong><code>psi</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_features)</code></dt>
+<dd>The multivariate orthogonal polynomials (regressor).</dd>
+<dt><strong><code>coeffs</code></strong> :&ensp;<code>array-like</code> of <code>shape (n_features,)</code></dt>
+<dd>Estimated cofficients.</dd>
+<dt><strong><code>y</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples,)</code></dt>
+<dd>Target values.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>Q_2</code></strong> :&ensp;<code>float</code></dt>
+<dd>LOOCV Validation score (1-LOOCV erro).</dd>
+<dt><strong><code>residual</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples,)</code></dt>
+<dd>Residual values (y - predicted targets).</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def corr_loocv_error(self, clf, psi, coeffs, y):
+    &#34;&#34;&#34;
+    Calculates the corrected LOO error for regression on regressor
+    matrix `psi` that generated the coefficients based on [1] and [2].
+
+    [1] Blatman, G., 2009. Adaptive sparse polynomial chaos expansions for
+        uncertainty propagation and sensitivity analysis (Doctoral
+        dissertation, Clermont-Ferrand 2).
+
+    [2] Blatman, G. and Sudret, B., 2011. Adaptive sparse polynomial chaos
+        expansion based on least angle regression. Journal of computational
+        Physics, 230(6), pp.2345-2367.
+
+    Parameters
+    ----------
+    clf : object
+        Fitted estimator.
+    psi : array of shape (n_samples, n_features)
+        The multivariate orthogonal polynomials (regressor).
+    coeffs : array-like of shape (n_features,)
+        Estimated cofficients.
+    y : array of shape (n_samples,)
+        Target values.
+
+    Returns
+    -------
+    Q_2 : float
+        LOOCV Validation score (1-LOOCV erro).
+    residual : array of shape (n_samples,)
+        Residual values (y - predicted targets).
+
+    &#34;&#34;&#34;
+    psi = np.array(psi, dtype=float)
+
+    # Create PSI_Sparse by removing redundent terms
+    nnz_idx = np.nonzero(coeffs)[0]
+    if len(nnz_idx) == 0:
+        nnz_idx = [0]
+    psi_sparse = psi[:, nnz_idx]
+
+    # NrCoeffs of aPCEs
+    P = len(nnz_idx)
+    # NrEvaluation (Size of experimental design)
+    N = psi.shape[0]
+
+    # Build the projection matrix
+    PsiTPsi = np.dot(psi_sparse.T, psi_sparse)
+
+    if np.linalg.cond(PsiTPsi) &gt; 1e-12 and \
+       np.linalg.cond(PsiTPsi) &lt; 1/sys.float_info.epsilon:
+        # faster
+        M = sp.linalg.solve(PsiTPsi,
+                            sp.sparse.eye(PsiTPsi.shape[0]).toarray())
+    else:
+        # stabler
+        M = np.linalg.pinv(PsiTPsi)
+
+    # h factor (the full matrix is not calculated explicitly,
+    # only the trace is, to save memory)
+    PsiM = np.dot(psi_sparse, M)
+
+    h = np.sum(np.multiply(PsiM, psi_sparse), axis=1, dtype=np.float128)
+
+    # ------ Calculate Error Loocv for each measurement point ----
+    # Residuals
+    if isinstance(clf, list):
+        residual = np.dot(psi, coeffs) - y
+    else:
+        residual = clf.predict(psi) - y
+
+    # Variance
+    varY = np.var(y)
+
+    if varY == 0:
+        normEmpErr = 0
+        ErrLoo = 0
+        LCerror = np.zeros((y.shape))
+    else:
+        normEmpErr = np.mean(residual**2)/varY
+
+        # LCerror = np.divide(residual, (1-h))
+        LCerror = residual / (1-h)[:, np.newaxis]
+        ErrLoo = np.mean(np.square(LCerror)) / varY
+        # if there are NaNs, just return an infinite LOO error (this
+        # happens, e.g., when a strongly underdetermined problem is solved)
+        if np.isnan(ErrLoo):
+            ErrLoo = np.inf
+
+    # Corrected Error for over-determined system
+    trM = np.trace(M)
+    if trM &lt; 0 or abs(trM) &gt; 1e6:
+        trM = np.trace(np.linalg.pinv(np.dot(psi.T, psi)))
+
+    # Over-determined system of Equation
+    if N &gt; P:
+        T_factor = N/(N-P) * (1 + trM)
+
+    # Under-determined system of Equation
+    else:
+        T_factor = np.inf
+
+    CorrectedErrLoo = ErrLoo * T_factor
+
+    Q_2 = 1 - CorrectedErrLoo
+
+    return Q_2, residual</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.pca_transformation"><code class="name flex">
+<span>def <span class="ident">pca_transformation</span></span>(<span>self, Output)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Transforms the targets (outputs) via Principal Component Analysis</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>Output</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples,)</code></dt>
+<dd>Target values.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>pca</code></strong> :&ensp;<code>obj</code></dt>
+<dd>Fitted sklearnPCA object.</dd>
+<dt><strong><code>OutputMatrix</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples,)</code></dt>
+<dd>Transformed target values.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def pca_transformation(self, Output):
+    &#34;&#34;&#34;
+    Transforms the targets (outputs) via Principal Component Analysis
+
+    Parameters
+    ----------
+    Output : array of shape (n_samples,)
+        Target values.
+
+    Returns
+    -------
+    pca : obj
+        Fitted sklearnPCA object.
+    OutputMatrix : array of shape (n_samples,)
+        Transformed target values.
+
+    &#34;&#34;&#34;
+    # Transform via Principal Component Analysis
+    if hasattr(self, &#39;var_pca_threshold&#39;):
+        var_pca_threshold = self.var_pca_threshold
+    else:
+        var_pca_threshold = 100.0
+    n_samples, n_features = Output.shape
+
+    if hasattr(self, &#39;n_pca_components&#39;):
+        n_pca_components = self.n_pca_components
+    else:
+        # Instantiate and fit sklearnPCA object
+        covar_matrix = sklearnPCA(n_components=None)
+        covar_matrix.fit(Output)
+        var = np.cumsum(np.round(covar_matrix.explained_variance_ratio_,
+                                 decimals=5)*100)
+        # Find the number of components to explain self.varPCAThreshold of
+        # variance
+        try:
+            n_components = np.where(var &gt;= var_pca_threshold)[0][0] + 1
+        except IndexError:
+            n_components = min(n_samples, n_features)
+
+        n_pca_components = min(n_samples, n_features, n_components)
+
+    # Print out a report
+    print()
+    print(&#39;-&#39; * 50)
+    print(f&#34;PCA transformation is performed with {n_pca_components}&#34;
+          &#34; components.&#34;)
+    print(&#39;-&#39; * 50)
+    print()
+
+    # Fit and transform with the selected number of components
+    pca = sklearnPCA(n_components=n_pca_components,
+                     svd_solver=&#39;randomized&#39;)
+    OutputMatrix = pca.fit_transform(Output)
+
+    return pca, OutputMatrix</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.gaussian_process_emulator"><code class="name flex">
+<span>def <span class="ident">gaussian_process_emulator</span></span>(<span>self, X, y, nug_term=None, autoSelect=False, varIdx=None)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Fits a Gaussian Process Emulator to the target given the training
+points.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>X</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code></dt>
+<dd>Training points.</dd>
+<dt><strong><code>y</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples,)</code></dt>
+<dd>Target values.</dd>
+<dt><strong><code>nug_term</code></strong> :&ensp;<code>float</code>, optional</dt>
+<dd>Nugget term. The default is None, i.e. variance of y.</dd>
+<dt><strong><code>autoSelect</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Loop over some kernels and select the best. The default is False.</dd>
+<dt><strong><code>varIdx</code></strong> :&ensp;<code>int</code>, optional</dt>
+<dd>The index number. The default is None.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>gp</code></strong> :&ensp;<code>object</code></dt>
+<dd>Fitted estimator.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def gaussian_process_emulator(self, X, y, nug_term=None, autoSelect=False,
+                              varIdx=None):
+    &#34;&#34;&#34;
+    Fits a Gaussian Process Emulator to the target given the training
+     points.
+
+    Parameters
+    ----------
+    X : array of shape (n_samples, n_params)
+        Training points.
+    y : array of shape (n_samples,)
+        Target values.
+    nug_term : float, optional
+        Nugget term. The default is None, i.e. variance of y.
+    autoSelect : bool, optional
+        Loop over some kernels and select the best. The default is False.
+    varIdx : int, optional
+        The index number. The default is None.
+
+    Returns
+    -------
+    gp : object
+        Fitted estimator.
+
+    &#34;&#34;&#34;
+
+    nug_term = nug_term if nug_term else np.var(y)
+
+    Kernels = [nug_term * kernels.RBF(length_scale=1.0,
+                                      length_scale_bounds=(1e-25, 1e15)),
+               nug_term * kernels.RationalQuadratic(length_scale=0.2,
+                                                    alpha=1.0),
+               nug_term * kernels.Matern(length_scale=1.0,
+                                         length_scale_bounds=(1e-15, 1e5),
+                                         nu=1.5)]
+
+    # Automatic selection of the kernel
+    if autoSelect:
+        gp = {}
+        BME = []
+        for i, kernel in enumerate(Kernels):
+            gp[i] = GaussianProcessRegressor(kernel=kernel,
+                                             n_restarts_optimizer=3,
+                                             normalize_y=False)
+
+            # Fit to data using Maximum Likelihood Estimation
+            gp[i].fit(X, y)
+
+            # Store the MLE as BME score
+            BME.append(gp[i].log_marginal_likelihood())
+
+        gp = gp[np.argmax(BME)]
+
+    else:
+        gp = GaussianProcessRegressor(kernel=Kernels[0],
+                                      n_restarts_optimizer=3,
+                                      normalize_y=False)
+        gp.fit(X, y)
+
+    # Compute score
+    if varIdx is not None:
+        Score = gp.score(X, y)
+        print(&#39;-&#39;*50)
+        print(f&#39;Output variable {varIdx}:&#39;)
+        print(&#39;The estimation of GPE coefficients converged,&#39;)
+        print(f&#39;with the R^2 score: {Score:.3f}&#39;)
+        print(&#39;-&#39;*50)
+
+    return gp</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.eval_metamodel"><code class="name flex">
+<span>def <span class="ident">eval_metamodel</span></span>(<span>self, samples=None, nsamples=None, sampling_method='random', return_samples=False)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Evaluates meta-model at the requested samples. One can also generate
+nsamples.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>samples</code></strong> :&ensp;<code>array</code> of <code>shape (n_samples, n_params)</code>, optional</dt>
+<dd>Samples to evaluate meta-model at. The default is None.</dd>
+<dt><strong><code>nsamples</code></strong> :&ensp;<code>int</code>, optional</dt>
+<dd>Number of samples to generate, if no <code>samples</code> is provided. The
+default is None.</dd>
+<dt><strong><code>sampling_method</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>Type of sampling, if no <code>samples</code> is provided. The default is
+'random'.</dd>
+<dt><strong><code>return_samples</code></strong> :&ensp;<code>bool</code>, optional</dt>
+<dd>Retun samples, if no <code>samples</code> is provided. The default is False.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>mean_pred</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Mean of the predictions.</dd>
+<dt><strong><code>std_pred</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Standard deviatioon of the predictions.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def eval_metamodel(self, samples=None, nsamples=None,
+                   sampling_method=&#39;random&#39;, return_samples=False):
+    &#34;&#34;&#34;
+    Evaluates meta-model at the requested samples. One can also generate
+    nsamples.
+
+    Parameters
+    ----------
+    samples : array of shape (n_samples, n_params), optional
+        Samples to evaluate meta-model at. The default is None.
+    nsamples : int, optional
+        Number of samples to generate, if no `samples` is provided. The
+        default is None.
+    sampling_method : str, optional
+        Type of sampling, if no `samples` is provided. The default is
+        &#39;random&#39;.
+    return_samples : bool, optional
+        Retun samples, if no `samples` is provided. The default is False.
+
+    Returns
+    -------
+    mean_pred : dict
+        Mean of the predictions.
+    std_pred : dict
+        Standard deviatioon of the predictions.
+    &#34;&#34;&#34;
+    if self.meta_model_type.lower() == &#39;gpe&#39;:
+        model_dict = self.gp_poly
+    else:
+        model_dict = self.coeffs_dict
+
+    if samples is None:
+        if nsamples is None:
+            self.n_samples = 100000
+        else:
+            self.n_samples = nsamples
+
+        samples = self.ExpDesign.generate_samples(self.n_samples,
+                                                  sampling_method)
+    else:
+        self.samples = samples
+        self.n_samples = len(samples)
+
+    # Transform samples
+    samples = self.ExpDesign.transform(samples)
+
+    if self.meta_model_type.lower() != &#39;gpe&#39;:
+        univ_p_val = self.univ_basis_vals(samples,
+                                          n_max=np.max(self.pce_deg))
+
+    mean_pred = {}
+    std_pred = {}
+
+    # Loop over outputs
+    for ouput, values in model_dict.items():
+
+        mean = np.zeros((len(samples), len(values)))
+        std = np.zeros((len(samples), len(values)))
+        idx = 0
+        for in_key, InIdxValues in values.items():
+
+            # Perdiction with GPE
+            if self.meta_model_type.lower() == &#39;gpe&#39;:
+                X_T = self.x_scaler[ouput].transform(samples)
+                gp = self.gp_poly[ouput][in_key]
+                y_mean, y_std = gp.predict(X_T, return_std=True)
+
+            else:
+                # Perdiction with PCE or pcekriging
+                # Assemble Psi matrix
+                psi = self.create_psi(self.basis_dict[ouput][in_key],
+                                      univ_p_val)
+                # Perdiction
+                try:
+                    # with error bar
+                    clf_poly = self.clf_poly[ouput][in_key]
+                    y_mean, y_std = clf_poly.predict(psi, return_std=True)
+
+                except:
+                    # without error bar
+                    coeffs = self.coeffs_dict[ouput][in_key]
+                    y_mean = np.dot(psi, coeffs)
+                    y_std = np.zeros_like(y_mean)
+
+            mean[:, idx] = y_mean
+            std[:, idx] = y_std
+            idx += 1
+
+        if self.dim_red_method.lower() == &#39;pca&#39;:
+            PCA = self.pca[ouput]
+            mean_pred[ouput] = PCA.mean_ + np.dot(mean, PCA.components_)
+            std_pred[ouput] = np.sqrt(np.dot(std**2, PCA.components_**2))
+        else:
+            mean_pred[ouput] = mean
+            std_pred[ouput] = std
+
+    if return_samples:
+        return mean_pred, std_pred, samples
+    else:
+        return mean_pred, std_pred</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.create_model_error"><code class="name flex">
+<span>def <span class="ident">create_model_error</span></span>(<span>self, X, y, name='Calib')</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Fits a GPE-based model error.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>X</code></strong> :&ensp;<code>array</code> of <code>shape (n_outputs, n_inputs)</code></dt>
+<dd>Input array. It can contain any forcing inputs or coordinates of
+extracted data.</dd>
+<dt><strong><code>y</code></strong> :&ensp;<code>array</code> of <code>shape (n_outputs,)</code></dt>
+<dd>The model response for the MAP parameter set.</dd>
+<dt><strong><code>name</code></strong> :&ensp;<code>str</code>, optional</dt>
+<dd>Calibration or validation. The default is <code>'Calib'</code>.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>self</code></strong> :&ensp;<code>object</code></dt>
+<dd>Self object.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def create_model_error(self, X, y, name=&#39;Calib&#39;):
+    &#34;&#34;&#34;
+    Fits a GPE-based model error.
+
+    Parameters
+    ----------
+    X : array of shape (n_outputs, n_inputs)
+        Input array. It can contain any forcing inputs or coordinates of
+         extracted data.
+    y : array of shape (n_outputs,)
+        The model response for the MAP parameter set.
+    name : str, optional
+        Calibration or validation. The default is `&#39;Calib&#39;`.
+
+    Returns
+    -------
+    self: object
+        Self object.
+
+    &#34;&#34;&#34;
+    Model = self.ModelObj
+    outputNames = Model.Output.Names
+    self.errorRegMethod = &#39;GPE&#39;
+    self.errorclf_poly = self.auto_vivification()
+    self.errorScale = self.auto_vivification()
+
+    # Read data
+    MeasuredData = Model.read_observation(case=name)
+
+    # Fitting GPR based bias model
+    for out in outputNames:
+        nan_idx = ~np.isnan(MeasuredData[out])
+        # Select data
+        try:
+            data = MeasuredData[out].values[nan_idx]
+        except AttributeError:
+            data = MeasuredData[out][nan_idx]
+
+        # Prepare the input matrix
+        scaler = MinMaxScaler()
+        delta = data  # - y[out][0]
+        BiasInputs = np.hstack((X[out], y[out].reshape(-1, 1)))
+        X_S = scaler.fit_transform(BiasInputs)
+        gp = self.gaussian_process_emulator(X_S, delta)
+
+        self.errorScale[out][&#34;y_1&#34;] = scaler
+        self.errorclf_poly[out][&#34;y_1&#34;] = gp
+
+    return self</code></pre>
+</details>
+</dd>
+<dt id="surrogate_models.MetaModel.eval_model_error"><code class="name flex">
+<span>def <span class="ident">eval_model_error</span></span>(<span>self, X, y_pred)</span>
+</code></dt>
+<dd>
+<div class="desc"><p>Evaluates the error model.</p>
+<h2 id="parameters">Parameters</h2>
+<dl>
+<dt><strong><code>X</code></strong> :&ensp;<code>array</code></dt>
+<dd>Inputs.</dd>
+<dt><strong><code>y_pred</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Predictions.</dd>
+</dl>
+<h2 id="returns">Returns</h2>
+<dl>
+<dt><strong><code>mean_pred</code></strong> :&ensp;<code>dict</code></dt>
+<dd>Mean predition of the GPE-based error model.</dd>
+<dt><strong><code>std_pred</code></strong> :&ensp;<code>dict</code></dt>
+<dd>standard deviation of the GPE-based error model.</dd>
+</dl></div>
+<details class="source">
+<summary>
+<span>Expand source code</span>
+</summary>
+<pre><code class="python">def eval_model_error(self, X, y_pred):
+    &#34;&#34;&#34;
+    Evaluates the error model.
+
+    Parameters
+    ----------
+    X : array
+        Inputs.
+    y_pred : dict
+        Predictions.
+
+    Returns
+    -------
+    mean_pred : dict
+        Mean predition of the GPE-based error model.
+    std_pred : dict
+        standard deviation of the GPE-based error model.
+
+    &#34;&#34;&#34;
+    mean_pred = {}
+    std_pred = {}
+
+    for Outkey, ValuesDict in self.errorclf_poly.items():
+
+        pred_mean = np.zeros_like(y_pred[Outkey])
+        pred_std = np.zeros_like(y_pred[Outkey])
+
+        for Inkey, InIdxValues in ValuesDict.items():
+
+            gp = self.errorclf_poly[Outkey][Inkey]
+            scaler = self.errorScale[Outkey][Inkey]
+
+            # Transform Samples using scaler
+            for j, pred in enumerate(y_pred[Outkey]):
+                BiasInputs = np.hstack((X[Outkey], pred.reshape(-1, 1)))
+                Samples_S = scaler.transform(BiasInputs)
+                y_hat, y_std = gp.predict(Samples_S, return_std=True)
+                pred_mean[j] = y_hat
+                pred_std[j] = y_std
+                # pred_mean[j] += pred
+
+        mean_pred[Outkey] = pred_mean
+        std_pred[Outkey] = pred_std
+
+    return mean_pred, std_pred</code></pre>
+</details>
+</dd>
+</dl>
+</dd>
+</dl>
+</section>
+</article>
+<nav id="sidebar">
+<h1>Index</h1>
+<div class="toc">
+<ul></ul>
+</div>
+<ul id="index">
+<li><h3><a href="#header-classes">Classes</a></h3>
+<ul>
+<li>
+<h4><code><a title="surrogate_models.MetaModel" href="#surrogate_models.MetaModel">MetaModel</a></code></h4>
+<ul class="">
+<li><code><a title="surrogate_models.MetaModel.create_metamodel" href="#surrogate_models.MetaModel.create_metamodel">create_metamodel</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.train_norm_design" href="#surrogate_models.MetaModel.train_norm_design">train_norm_design</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.create_basis_indices" href="#surrogate_models.MetaModel.create_basis_indices">create_basis_indices</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.add_ExpDesign" href="#surrogate_models.MetaModel.add_ExpDesign">add_ExpDesign</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.generate_ExpDesign" href="#surrogate_models.MetaModel.generate_ExpDesign">generate_ExpDesign</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.univ_basis_vals" href="#surrogate_models.MetaModel.univ_basis_vals">univ_basis_vals</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.create_psi" href="#surrogate_models.MetaModel.create_psi">create_psi</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.fit" href="#surrogate_models.MetaModel.fit">fit</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.adaptive_regression" href="#surrogate_models.MetaModel.adaptive_regression">adaptive_regression</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.corr_loocv_error" href="#surrogate_models.MetaModel.corr_loocv_error">corr_loocv_error</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.pca_transformation" href="#surrogate_models.MetaModel.pca_transformation">pca_transformation</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.gaussian_process_emulator" href="#surrogate_models.MetaModel.gaussian_process_emulator">gaussian_process_emulator</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.eval_metamodel" href="#surrogate_models.MetaModel.eval_metamodel">eval_metamodel</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.create_model_error" href="#surrogate_models.MetaModel.create_model_error">create_model_error</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.eval_model_error" href="#surrogate_models.MetaModel.eval_model_error">eval_model_error</a></code></li>
+<li><code><a title="surrogate_models.MetaModel.auto_vivification" href="#surrogate_models.MetaModel.auto_vivification">auto_vivification</a></code></li>
+</ul>
+</li>
+</ul>
+</li>
+</ul>
+</nav>
+</main>
+<footer id="footer">
+<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.10.0</a>.</p>
+</footer>
+</body>
+</html>
\ No newline at end of file
diff --git a/docs/make.bat b/docs/make.bat
deleted file mode 100644
index 6fcf05b4b76f8b9774c317ac8ada402f8a7087de..0000000000000000000000000000000000000000
--- a/docs/make.bat
+++ /dev/null
@@ -1,35 +0,0 @@
-@ECHO OFF
-
-pushd %~dp0
-
-REM Command file for Sphinx documentation
-
-if "%SPHINXBUILD%" == "" (
-	set SPHINXBUILD=sphinx-build
-)
-set SOURCEDIR=source
-set BUILDDIR=build
-
-if "%1" == "" goto help
-
-%SPHINXBUILD% >NUL 2>NUL
-if errorlevel 9009 (
-	echo.
-	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
-	echo.installed, then set the SPHINXBUILD environment variable to point
-	echo.to the full path of the 'sphinx-build' executable. Alternatively you
-	echo.may add the Sphinx directory to PATH.
-	echo.
-	echo.If you don't have Sphinx installed, grab it from
-	echo.https://www.sphinx-doc.org/
-	exit /b 1
-)
-
-%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
-goto end
-
-:help
-%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
-
-:end
-popd
diff --git a/docs/source/conf.py b/docs/source/conf.py
deleted file mode 100644
index d0454bf0422df1e3e8b5184e0f3638234eaa8530..0000000000000000000000000000000000000000
--- a/docs/source/conf.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Configuration file for the Sphinx documentation builder.
-#
-# This file only contains a selection of the most common options. For a full
-# list see the documentation:
-# https://www.sphinx-doc.org/en/master/usage/configuration.html
-
-# -- Path setup --------------------------------------------------------------
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
-
-
-# -- Project information -----------------------------------------------------
-
-project = 'bayesvalidrox'
-copyright = '2022, Farid Mohammadi'
-author = 'Farid Mohammadi'
-
-# The full version, including alpha/beta/rc tags
-release = '0.0.3'
-
-
-# -- General configuration ---------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = ['myst_parser']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#
-# This is also used if you do content translation via gettext catalogs.
-# Usually you set "language" from the command line for these cases.
-language = 'python'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = []
-
-
-# -- Options for HTML output -------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  See the documentation for
-# a list of builtin themes.
-#
-html_theme = 'alabaster'
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
diff --git a/docs/source/example.md b/docs/source/example.md
deleted file mode 100644
index 65b132ad46d79feeb1fa62b90d91aa77f9d55fb1..0000000000000000000000000000000000000000
--- a/docs/source/example.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# My nifty title
-
-Some **text**!
-
-```{admonition} Here's my title
-:class: warning
-
-Here's my admonition content
-```
diff --git a/docs/source/index.rst b/docs/source/index.rst
deleted file mode 100644
index e53f4bfb9f37de2bbb9718c532a55ccc142b1ea5..0000000000000000000000000000000000000000
--- a/docs/source/index.rst
+++ /dev/null
@@ -1,32 +0,0 @@
-.. bayesvalidrox documentation master file, created by
-   sphinx-quickstart on Fri Mar 11 08:09:15 2022.
-   You can adapt this file completely to your liking, but it should at least
-   contain the root `toctree` directive.
-
-Welcome to bayesvalidrox's documentation!
-=========================================
-
-.. toctree::
-   example.md
-   pylink.html
-   :maxdepth: 2
-   :caption: Contents:
-
-
-This is a normal text paragraph. The next paragraph is a code sample::
-
-   It is not processed in any way, except
-   that the indentation is removed.
-
-   It can span multiple lines.
-
-This is a normal text paragraph again.
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-* :ref:`pylink`
diff --git a/src/bayesvalidrox/bayes_inference/bayes_inference.py b/src/bayesvalidrox/bayes_inference/bayes_inference.py
index f3d0417f216cf020a3ee0308782d51724b0eeb38..0e82c5901dd54ab7b1de38a9603c9ae152d8bd95 100644
--- a/src/bayesvalidrox/bayes_inference/bayes_inference.py
+++ b/src/bayesvalidrox/bayes_inference/bayes_inference.py
@@ -1,17 +1,6 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-"""
-
-Author: Farid Mohammadi, M.Sc.
-E-Mail: farid.mohammadi@iws.uni-stuttgart.de
-Department of Hydromechanics and Modelling of Hydrosystems (LH2)
-Institute for Modelling Hydraulic and Environmental Systems (IWS), University
-of Stuttgart, www.iws.uni-stuttgart.de/lh2/
-Pfaffenwaldring 61
-70569 Stuttgart
-
-Created on Sat Aug 24 2019
-"""
+
 import numpy as np
 import os
 import copy
@@ -50,83 +39,80 @@ class BayesInference:
     discrepancy : obj
         The discrepancy object for the sigma2s, i.e. the diagonal entries
         of the variance matrix for a multivariate normal likelihood.
-    name : string, optional
-        The type of analysis, either calibration (Calib) or validation
-        (Valid). The default is 'Calib'.
+    name : str, optional
+        The type of analysis, either calibration (`Calib`) or validation
+        (`Valid`). The default is `'Calib'`.
     emulator : bool, optional
-        Analysis with emulator (MetaModel). The default is True.
+        Analysis with emulator (MetaModel). The default is `True`.
     bootstrap : bool, optional
-        Bootstrap the analysis. The default is False.
+        Bootstrap the analysis. The default is `False`.
     req_outputs : list, optional
         The list of requested output to be used for the analysis.
-        The default is None. If None, all the defined outputs for the model
+        The default is `None`. If None, all the defined outputs for the model
         object is used.
     selected_indices : dict, optional
         A dictionary with the selected indices of each model output. The
-        default is None. If None, all measurement points are used in the
+        default is `None`. If `None`, all measurement points are used in the
         analysis.
     samples : array of shape (n_samples, n_params), optional
-        The samples to be used in the analysis. The default is None. If
+        The samples to be used in the analysis. The default is `None`. If
         None the samples are drawn from the probablistic input parameter
         object of the MetaModel object.
     n_samples : int, optional
-        Number of samples to be used in the analysis. The default is 5e5.
-        If samples is not None, this argument will be assigned based on the
+        Number of samples to be used in the analysis. The default is `500000`.
+        If samples is not `None`, this argument will be assigned based on the
         number of samples given.
     measured_data : dict, optional
-        A dictionary containing the observation data. The default is None.
-        if None, the observation defined in the Model object of the
+        A dictionary containing the observation data. The default is `None`.
+        if `None`, the observation defined in the Model object of the
         MetaModel is used.
-    inference_method : string, optional
-        A method for Bayesian inference. The default is 'rejection'. A
-        Markov Chain Monte Carlo sampler can be simply selected by passing
-        'MCMC'.
+    inference_method : str, optional
+        A method for approximating the posterior distribution in the Bayesian
+        inference step. The default is `'rejection'`, which stands for
+        rejection sampling. A Markov Chain Monte Carlo sampler can be simply
+        selected by passing `'MCMC'`.
     mcmc_params : dict, optional
         A dictionary with args required for the Bayesian inference with
-        MCMC. The default is None.
-
-        Pass the mcmc_params like the following::
-
-            mcmc_params:{
-            'init_samples': None,  # initial samples
-            'n_walkers': 100,  # number of walkers (chain)
-            'n_steps': 100000,  # number of maximum steps
-            'n_burn': 200,  # number of burn-in steps
-            'moves': None,  # Moves for the emcee sampler
-            'multiprocessing': False,  # multiprocessing
-            'verbose': False # verbosity
-            }
+        `MCMC`. The default is `None`.
+
+        Pass the mcmc_params like the following:
+
+            >>> mcmc_params:{
+                'init_samples': None,  # initial samples
+                'n_walkers': 100,  # number of walkers (chain)
+                'n_steps': 100000,  # number of maximum steps
+                'n_burn': 200,  # number of burn-in steps
+                'moves': None,  # Moves for the emcee sampler
+                'multiprocessing': False,  # multiprocessing
+                'verbose': False # verbosity
+                }
         The items shown above are the default values. If any parmeter is
         not defined, the default value will be assigned to it.
     bayes_loocv : bool, optional
-        Bayesian Leave-one-out Cross Validation. The default is False. If
-        True, the LOOCV procedure is used to estimate the bayesian Model
+        Bayesian Leave-one-out Cross Validation. The default is `False`. If
+        `True`, the LOOCV procedure is used to estimate the bayesian Model
         Evidence (BME).
     n_bootstrap_itrs : int, optional
-        Number of bootstrap iteration. The default is 1. If bayes_loocv is
-        True, this is qualt to the total length of the observation data
+        Number of bootstrap iteration. The default is `1`. If bayes_loocv is
+        `True`, this is qualt to the total length of the observation data
         set.
     perturbed_data : array of shape (n_bootstrap_itrs, n_obs), optional
-        User defined perturbed data. The default is [].
+        User defined perturbed data. The default is `[]`.
     bootstrap_noise : float, optional
-        A noise level to perturb the data set. The default is 0.05.
+        A noise level to perturb the data set. The default is `0.05`.
     plot_post_pred : bool, optional
-        Plot posterior predictive plots. The default is True.
+        Plot posterior predictive plots. The default is `True`.
     plot_map_pred : bool, optional
         Plot the model outputs vs the metamodel predictions for the maximum
-        a posteriori (defined as max_a_posteriori) parameter set. The
-        default is False.
-    max_a_posteriori : string, optional
-        Maximum a posteriori. 'mean' and 'mode' are available. The default
-        is 'mean'.
-    corner_title_fmt : string, optional
+        a posteriori (defined as `max_a_posteriori`) parameter set. The
+        default is `False`.
+    max_a_posteriori : str, optional
+        Maximum a posteriori. `'mean'` and `'mode'` are available. The default
+        is `'mean'`.
+    corner_title_fmt : str, optional
         Title format for the posterior distribution plot with python
-        package corner. The default is '.3f'.
+        package `corner`. The default is `'.3e'`.
 
-    Methods
-    -------
-    create_inference():
-        This method starts the analysis.
     """
 
     def __init__(self, MetaModel, discrepancy=None, emulator=True,
@@ -665,7 +651,7 @@ class BayesInference:
         ----------
         samples : array of shape (n_samples, n_params), optional
             Parameter sets. The default is None.
-        key : string, optional
+        key : str, optional
             Key string to be passed to the run_model_parallel method.
             The default is 'MAP'.
 
@@ -801,8 +787,8 @@ class BayesInference:
                 data = obs_data[out][~np.isnan(obs_data[out])]
 
             # Prepare sigma2s
-            tot_sigma2s = total_sigma2s[out][~np.isnan(
-                total_sigma2s[out])][:nout]
+            non_nan_indices = ~np.isnan(total_sigma2s[out])
+            tot_sigma2s = total_sigma2s[out][non_nan_indices][:nout]
 
             # Add the std of the PCE is chosen as emulator.
             if self.emulator:
diff --git a/src/bayesvalidrox/bayes_inference/discrepancy.py b/src/bayesvalidrox/bayes_inference/discrepancy.py
index a812f2995479e5c42457e408e6eee51abb546942..5e24aa03325a6edbceb1839c6d75b2fb9e7613a5 100644
--- a/src/bayesvalidrox/bayes_inference/discrepancy.py
+++ b/src/bayesvalidrox/bayes_inference/discrepancy.py
@@ -1,55 +1,72 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-"""
-Discrepancy class for Bayesian inference method
 
-Option A:
-    With no explicitly-specified model
-
-Option B:
-    With known redidual variance sigma2 (Gaussian or others)
-
-Option C:
-    With unknown residual variance sigma2 (Gaussian)
-    with given distribution
-
-Author: Farid Mohammadi, M.Sc.
-E-Mail: farid.mohammadi@iws.uni-stuttgart.de
-Department of Hydromechanics and Modelling of Hydrosystems (LH2)
-Institute for Modelling Hydraulic and Environmental Systems (IWS), University
-of Stuttgart, www.iws.uni-stuttgart.de/lh2/
-Pfaffenwaldring 61
-70569 Stuttgart
-
-Created on Mon Sep  2 10:48:35 2019
-"""
 import scipy.stats as stats
-from surrogate_models.exp_designs import ExpDesigns
-from surrogate_models.inputs import Input
+from src.bayesvalidrox.surrogate_models.exp_designs import ExpDesigns
 
 
 class Discrepancy:
-    def __init__(self, InputDisc):
-        self.type = 'Gaussian'
-        self.parameters = None
-        self.name = 'Sigma2'
-        self.prior = None
-        self.Marginals = []
+    """
+    Discrepancy class for Bayesian inference method.
+    We define the reference or reality to be equal to what we can model and a
+    descripancy term \\( \\epsilon \\). We consider the followin format:
+
+    $$\\textbf{y}_{\\text{reality}} = \\mathcal{M}(\\theta) + \\epsilon,$$
+
+    where \\( \\epsilon \\in R^{N_{out}} \\) represents the the effects of
+    measurement error and model inaccuracy. For simplicity, it can be defined
+    as an additive Gaussian disrepancy with zeromean and given covariance
+    matrix \\( \\Sigma \\):
+
+    $$\\epsilon \\sim \\mathcal{N}(\\epsilon|0, \\Sigma). $$
+
+    In the context of model inversion or calibration, an observation point
+    \\( \\textbf{y}_i \\in \\mathcal{y} \\) is a realization of a Gaussian
+    distribution with mean value of \\(\\mathcal{M}(\\theta) \\) and covariance
+    matrix of \\( \\Sigma \\).
+
+    $$ p(\\textbf{y}|\\theta) = \\mathcal{N}(\\textbf{y}|\\mathcal{M}
+                                             (\\theta))$$
+
+    The following options are available:
+
+    * Option A: With known redidual covariance matrix \\(\\Sigma\\) for
+    independent measurements.
+
+    * Option B: With unknown redidual covariance matrix \\(\\Sigma\\),
+    paramethrized as \\(\\Sigma(\\theta_{\\epsilon})=\\sigma^2 \\textbf{I}_
+    {N_{out}}\\) with unknown residual variances \\(\\sigma^2\\).
+    This term will be jointly infer with the uncertain input parameters. For
+    the inversion, you need to define a prior marginal via `Input` class. Note
+    that \\(\\sigma^2\\) is only a single scalar multiplier for the diagonal
+    entries of the covariance matrix \\(\\Sigma\\).
+
+    Attributes
+    ----------
+    InputDisc : obj
+        Input object. When the \\(\\sigma^2\\) is expected to be inferred
+        jointly with the parameters (`Option B`).If multiple output groups are
+        defined by `Model.Output.names`, each model output needs to have.
+        a prior marginal using the `Input` class. The default is `''`.
+    disc_type : str
+        Type of the noise definition. `'Gaussian'` is only supported so far.
+    parameters : dict or pandas.DataFrame
+        Known residual variance \\(\\sigma^2\\), i.e. diagonal entry of the
+        covariance matrix of the multivariate normal likelihood in case of
+        `Option A`.
+
+    """
+
+    def __init__(self, InputDisc='', disc_type='Gaussian', parameters=None):
         self.InputDisc = InputDisc
-        self.n_samples = 10000
-        self.sigma2_prior = None
-
-    def create_inputDisc(self):
-        InputClass = Input()
-        InputClass.addMarginals()
-
-        return InputClass
+        self.disc_type = disc_type
+        self.parameters = parameters
 
     # -------------------------------------------------------------------------
     def get_sample(self, n_samples):
         """
-        Generate samples for the sigma2s, i.e. the diagonal entries of the
-        variance-covariance matrix in the multivariate normal distribution.
+        Generate samples for the \\(\\sigma^2\\), i.e. the diagonal entries of
+        the variance-covariance matrix in the multivariate normal distribution.
 
         Parameters
         ----------
@@ -59,7 +76,7 @@ class Discrepancy:
         Returns
         -------
         sigma2_prior: array of shape (n_samples, n_params)
-            Sigma2 samples.
+            \\(\\sigma^2\\) samples.
 
         """
         self.n_samples = n_samples
diff --git a/src/bayesvalidrox/bayes_inference/discrepancy_GP.py b/src/bayesvalidrox/bayes_inference/discrepancy_GP.py
deleted file mode 100644
index 4eb5d0023aeab8f18f6fa9274a687c73350b8b9e..0000000000000000000000000000000000000000
--- a/src/bayesvalidrox/bayes_inference/discrepancy_GP.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Sun Nov  7 09:42:33 2021
-
-@author: farid
-"""
-import numpy as np
-from sklearn.gaussian_process import GaussianProcessRegressor
-from sklearn.gaussian_process.kernels import Matern,RBF,RationalQuadratic
-from tqdm import tqdm
-from sklearn.preprocessing import MinMaxScaler
-
-class Bias():
-    def __init__(self, verbose=False):
-        self.verbose = verbose
-    
-    def fit_bias(self, EDX, ED_Y, Data):
-        print('')
-        outNames = [*ED_Y][1:]
-        self.norm_weights = {}
-        self.all_gp = {}
-        self.all_kernels = {}
-        self.best_gp = {}
-        self.all_scales = {}
-        for out in tqdm(outNames, ascii=True, desc ="Fitting the GPR based bias model"):
-            scale = {}
-            gp = {}
-            kernels = {}
-            weights = []
-            # Select kernel
-            # kernel = Matern(length_scale=1.0, length_scale_bounds=(1e-15, 1e5),
-            #                     nu=0.5)
-            kernel = RBF(length_scale=1.0, length_scale_bounds=(1e-15, 1e5))
-            for i in range(ED_Y[out].shape[1]):
-            
-                # Prepare the input matrix
-                scale[i] = MinMaxScaler()
-                EDX_S = scale[i].fit_transform(EDX)
-
-                # Prepare target from data and Remove NaN
-                try:
-                    data = Data[out].to_numpy()[~np.isnan(Data[out])]
-                except:
-                    data = Data[out][~np.isnan(Data[out])]
-                
-                delta = data[i] - ED_Y[out][:,i]
-                
-                # Initiate the GPR class
-                gp[i] = GaussianProcessRegressor(kernel=np.var(data)*kernel, n_restarts_optimizer=5,
-                                                 alpha = 1e-15,
-                                                 normalize_y=False, random_state=5)
-
-                # Fit the GPR
-                gp[i].fit(EDX_S, delta)
-
-                # Save log_marginal_likelihood as a weight
-                weights.append(1)
-                # weights.append(gp[i].log_marginal_likelihood_value_)
-                
-                # kernels
-                kernels[i] = gp[i].kernel_
-                
-            # Save GPR objects and normalized weights in dicts
-            self.all_scales[out]   = scale
-            self.all_gp[out]       = gp
-            self.all_kernels[out]  = kernels
-            self.norm_weights[out] = weights#/np.max(weights)
-            self.best_gp[out]      = gp[np.argmax(weights/np.sum(weights))]
-        
-        return 
-        
-    def predict(self,X,Output,return_std=True):
-        X = np.atleast_2d(X)
-        all_gp = self.all_gp[Output]
-        all_scale = self.all_scales[Output]
-        def predict_GPE(args):
-            (idx,gp),weight = args
-            D_new_T = all_scale[idx].transform(X)
-            return gp.predict(D_new_T,return_std) 
-        map_f = map(predict_GPE, zip(list(all_gp.items()),self.norm_weights[Output]))
-        all_results = list(map_f)
-
-        if not return_std:
-            y_hat = np.array(all_results).reshape(1,-1)
-            return y_hat
-        else:
-            y_hats = np.array([predict[0] for predict in all_results])
-            covs = np.array([predict[1][0]**2 for predict in all_results])
-
-            return y_hats.reshape(1,-1), np.diag(covs)
\ No newline at end of file
diff --git a/src/bayesvalidrox/bayes_inference/discrepancy_GP_v1.py b/src/bayesvalidrox/bayes_inference/discrepancy_GP_v1.py
deleted file mode 100644
index dae0e447f59e7f786a91485270e92f04edd91f41..0000000000000000000000000000000000000000
--- a/src/bayesvalidrox/bayes_inference/discrepancy_GP_v1.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Sun Nov  7 09:42:33 2021
-
-@author: farid
-"""
-import numpy as np
-from sklearn.gaussian_process import GaussianProcessRegressor
-from sklearn.gaussian_process.kernels import Matern,RBF,RationalQuadratic
-from tqdm import tqdm
-from sklearn.preprocessing import MinMaxScaler
-
-class Bias():
-    def __init__(self, verbose=False):
-        self.verbose = verbose
-    
-    def fit_bias(self, BiasInputs, ED_Y, Data):
-        print('')
-        self.BiasInputs = BiasInputs
-        outNames = [*ED_Y][1:]
-        self.norm_weights = {}
-        self.all_gp = {}
-        self.all_kernels = {}
-        self.best_gp = {}
-        self.all_scales = {}
-        for out in tqdm(outNames, ascii=True, desc ="Fitting the GPR based bias model"):
-            scale = {}
-            gp = {}
-            kernels = {}
-            weights = []
-            # Select kernel
-            kernel = Matern(length_scale=1.0, length_scale_bounds=(1e-15, 1e5),
-                                nu=0.5)
-            # kernel = RBF(length_scale=1.0, length_scale_bounds=(1e-15, 1e5))
-            for i, y in enumerate(ED_Y[out]):
-            
-                # Prepare the input matrix
-                D = np.hstack((BiasInputs[out],y.reshape(-1,1)))
-                
-                scale[i] = MinMaxScaler()
-                D_T = scale[i].fit_transform(D)
-
-                # Prepare data Remove NaN
-                try:
-                    data = Data[out].to_numpy()[~np.isnan(Data[out])]
-                except:
-                    data = Data[out][~np.isnan(Data[out])]
-                
-                # Initiate the GPR class
-                gp[i] = GaussianProcessRegressor(kernel=np.var(data)*kernel, n_restarts_optimizer=5,
-                                                 alpha = 1e-15,
-                                                 normalize_y=False, random_state=5)
-
-                # Fit the GPR
-                gp[i].fit(D_T, data)
-
-                # Save log_marginal_likelihood as a weight
-                weights.append(1)
-                # weights.append(gp[i].log_marginal_likelihood_value_)
-                
-                # kernels
-                kernels[i] = gp[i].kernel_
-                
-            # Save GPR objects and normalized weights in dicts
-            self.all_scales[out]   = scale
-            self.all_gp[out]       = gp
-            self.all_kernels[out]  = kernels
-            self.norm_weights[out] = weights#/np.max(weights)
-            self.best_gp[out]      = gp[np.argmax(weights/np.sum(weights))]
-        
-        return 
-    
-    def oldpredict(self,Y,Output,BiasInputs=None,return_cov=True):
-        Y = np.atleast_2d(Y)
-        BiasInputs = self.BiasInputs if BiasInputs is None else BiasInputs
-        D_new = np.hstack((BiasInputs[Output],Y.reshape(-1,1)))
-        
-        return self.best_gp[Output].predict(D_new,return_cov=return_cov)
-        
-    def predict(self,Y,Output,BiasInputs=None,return_cov=True):
-        Y = np.atleast_2d(Y)
-        BiasInputs = self.BiasInputs if BiasInputs is None else BiasInputs
-        all_gp = self.all_gp[Output]
-        all_scale = self.all_scales[Output]
-        D_new = np.hstack((BiasInputs[Output],Y.reshape(-1,1)))
-        def predict_GPE(args):
-            (idx,gp),weight = args
-            D_new_T = all_scale[idx].transform(D_new)
-            return gp.predict(D_new_T,return_cov=True) 
-        map_f = map(predict_GPE, zip(list(all_gp.items()),self.norm_weights[Output]))
-        all_results = list(map_f)
-
-        if not return_cov:
-            y_hat = np.mean(np.array(all_results),axis=0)
-            return y_hat
-        else:
-            formatting_function = np.vectorize(lambda f: format(f, '6.2E'))
-            # print(formatting_function(np.array([predict[0] for predict in all_results])))
-            y_hat = np.average(np.array([predict[0] for predict in all_results]),axis=0,
-                               weights=self.norm_weights[Output])
-            
-            # Equation 13c Gardner et al. (2021)
-            y_hats = np.array([predict[0] for predict in all_results])
-            covs = np.array([predict[1] for predict in all_results])
-            cov=0
-            print(formatting_function(covs))
-            for cov_,y_ in zip(covs,y_hats):
-                cov += cov_ + np.dot( y_.reshape(-1,1), y_.reshape(1,-1))
-            cov /= len(covs)
-            cov -= np.dot(y_hat.reshape(-1,1),y_hat.reshape(1,-1))
-            
-            return y_hat.reshape(1,-1), cov
\ No newline at end of file
diff --git a/src/bayesvalidrox/bayes_inference/mcmc.py b/src/bayesvalidrox/bayes_inference/mcmc.py
index c52a5f2ce4f2e1ffa2bd0a13f7dadef43a60b17e..8a7a44b8fadb327959ee9c4320bb7490617ffc9d 100755
--- a/src/bayesvalidrox/bayes_inference/mcmc.py
+++ b/src/bayesvalidrox/bayes_inference/mcmc.py
@@ -1,23 +1,5 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-"""
-MCMC class for Bayes inference with emcee package using an Affine Invariant
-Markov chain Monte Carlo (MCMC) Ensemble sampler [1].
-
-1. Foreman-Mackey, D., Hogg, D.W., Lang, D. and Goodman, J., 2013. emcee: the
-MCMC hammer. Publications of the Astronomical Society of the Pacific, 125(925),
-p.306. https://emcee.readthedocs.io/en/stable/
-
-Author: Farid Mohammadi, M.Sc.
-E-Mail: farid.mohammadi@iws.uni-stuttgart.de
-Department of Hydromechanics and Modelling of Hydrosystems (LH2)
-Institute for Modelling Hydraulic and Environmental Systems (IWS), University
-of Stuttgart, www.iws.uni-stuttgart.de/lh2/
-Pfaffenwaldring 61
-70569 Stuttgart
-
-Created on Wed Jun 3 2020
-"""
 
 import os
 import numpy as np
@@ -35,7 +17,27 @@ os.environ["OMP_NUM_THREADS"] = "1"
 
 class MCMC:
     """
-    A class for bayesian inference using a Markov-Chain Monte-Carlo Sampler.
+    A class for bayesian inference via a Markov-Chain Monte-Carlo (MCMC)
+    Sampler to approximate the posterior distribution of the Bayes theorem:
+    $$p(\\theta|\\mathcal{y}) = \\frac{p(\\mathcal{y}|\\theta) p(\\theta)}
+                                         {p(\\mathcal{y})}.$$
+
+    This class make inference with emcee package [1] using an Affine Invariant
+    Ensemble sampler (AIES) [2].
+
+    [1] Foreman-Mackey, D., Hogg, D.W., Lang, D. and Goodman, J., 2013.emcee:
+        the MCMC hammer. Publications of the Astronomical Society of the
+        Pacific, 125(925), p.306. https://emcee.readthedocs.io/en/stable/
+
+    [2] Goodman, J. and Weare, J., 2010. Ensemble samplers with affine
+        invariance. Communications in applied mathematics and computational
+        science, 5(1), pp.65-80.
+
+
+    Attributes
+    ----------
+    BayesOpts : obj
+        Bayes object.
     """
 
     def __init__(self, BayesOpts):
@@ -380,8 +382,8 @@ class MCMC:
     # -------------------------------------------------------------------------
     def log_prior(self, theta):
         """
-        Calculates the log prior likelihood for the given parameter set(s)
-        theta.
+        Calculates the log prior likelihood \\( p(\\theta)\\) for the given
+        parameter set(s) \\( \\theta \\).
 
         Parameters
         ----------
@@ -445,8 +447,8 @@ class MCMC:
     # -------------------------------------------------------------------------
     def log_likelihood(self, theta):
         """
-        Computes likelihood p(y|theta, obs) of the performance of the
-        (meta-)model in reproducing the observation data.
+        Computes likelihood \\( p(\\mathcal{Y}|\\theta)\\) of the performance
+        of the (meta-)model in reproducing the observation data.
 
         Parameters
         ----------
@@ -494,8 +496,8 @@ class MCMC:
     # -------------------------------------------------------------------------
     def log_posterior(self, theta):
         """
-        Computes the posterior likelihood p(theta| obs) for the given
-        parameterset.
+        Computes the posterior likelihood \\(p(\\theta| \\mathcal{Y})\\) for
+        the given parameterset.
 
         Parameters
         ----------
@@ -652,11 +654,11 @@ class MCMC:
         The potential scale reduction factor (PSRF) defined by the variance
         within one chain, W, with the variance between chains B.
         Both variances are combined in a weighted sum to obtain an estimate of
-        the variance of a parameter θ.The square root of the ratio of this
-        estimates variance to the within chain variance is called the potential
-        scale reduction.
+        the variance of a parameter \\( \\theta \\).The square root of the
+        ratio of this estimates variance to the within chain variance is called
+        the potential scale reduction.
         For a well converged chain it should approach 1. Values greater than
-        typically 1.1 indicate that the chains have not yet fully converged.
+        1.1 typically indicate that the chains have not yet fully converged.
 
         Source: http://joergdietrich.github.io/emcee-convergence.html
 
@@ -665,7 +667,7 @@ class MCMC:
         Parameters
         ----------
         chain : array (n_walkers, n_steps, n_params)
-            DESCRIPTION.
+            The emcee ensamples.
 
         Returns
         -------
@@ -805,36 +807,6 @@ class MCMC:
         Iterative scheme as proposed in Meng and Wong (1996) to estimate the
         marginal likelihood
 
-        Parameters
-        ----------
-        N1 : TYPE
-            DESCRIPTION.
-        N2 : TYPE
-            DESCRIPTION.
-        q11 : TYPE
-            DESCRIPTION.
-        q12 : TYPE
-            DESCRIPTION.
-        q21 : TYPE
-            DESCRIPTION.
-        q22 : TYPE
-            DESCRIPTION.
-        r0 : TYPE
-            DESCRIPTION.
-        neff : TYPE
-            DESCRIPTION.
-        tol : TYPE
-            DESCRIPTION.
-        maxiter : TYPE
-            DESCRIPTION.
-        criterion : TYPE
-            DESCRIPTION.
-
-        Returns
-        -------
-        TYPE
-            DESCRIPTION.
-
         """
         l1 = q11 - q12
         l2 = q21 - q22
diff --git a/src/bayesvalidrox/post_processing/post_processing.py b/src/bayesvalidrox/post_processing/post_processing.py
index 644c837ddbc2e374a884be6786980af74f16e3aa..e857c59274048829b0a376679408959b4e22f0af 100644
--- a/src/bayesvalidrox/post_processing/post_processing.py
+++ b/src/bayesvalidrox/post_processing/post_processing.py
@@ -1,18 +1,5 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-"""
-This class offers helper functions for post-processing of the metamodels.
-
-Author: Farid Mohammadi, M.Sc.
-E-Mail: farid.mohammadi@iws.uni-stuttgart.de
-Department of Hydromechanics and Modelling of Hydrosystems (LH2)
-Institute for Modelling Hydraulic and Environmental Systems (IWS), University
-of Stuttgart, www.iws.uni-stuttgart.de/lh2/
-Pfaffenwaldring 61
-70569 Stuttgart
-
-Created on Sat Aug 24 2019
-"""
 
 import numpy as np
 import math
@@ -35,34 +22,16 @@ plt.style.use(os.path.join(os.path.split(__file__)[0],
 
 class PostProcessing:
     """
-    This class provies many helper functions to pos process the trained
+    This class provides many helper functions to post-process the trained
     meta-model.
 
     Attributes
     ----------
     MetaModel : obj
         MetaModel object to do postprocessing on.
-    Rosenblatt : bool
-        If Rossenblatt transformation is required for the dependent input
-        parameters.
-    Methods
-    -------
-    plot_moments():
-        Plots the moments in a pdf format in the directory
-        `Outputs_PostProcessing`.
-    valid_metamodel(n_samples=1, samples=None, x_axis="Time [s]"):
-        Evaluates and plots the meta model and the PCEModel outputs for the
-        given number of samples or the given samples.
-    check_accuracy(n_samples=None, samples=None, outputs=None):
-        Checks accuracy of the metamodel by computing the root mean square
-        error and validation error for all outputs.
-    plot_seq_design_diagnostics(ref_BME_KLD=None, save_fig=True):
-        Plots the Bayesian Model Evidence (BME) and Kullback-Leibler divergence
-        (KLD) for the sequential design.
-    sobol_indices(xlabel='Time [s]', plot_type=None, save_fig=True):
-        Performs a global sensitivity analysis via Sobol indices.
-    check_reg_quality(n_samples=1000, samples=None, save_fig=True):
-        Checks the quality of the metamodel for single output models.
+    name : str
+        Type of the anaylsis. The default is `'calib'`. If a validation is
+        expected to be performed change this to `'valid'`.
     """
 
     def __init__(self, MetaModel, name='calib'):
@@ -77,12 +46,12 @@ class PostProcessing:
 
         Parameters
         ----------
-        xlabel : string, optional
-            String to be displayed as x-label. The default is 'Time [s]'.
-        plot_type : string, optional
-            Options: bar or line. The default is None.
+        xlabel : str, optional
+            String to be displayed as x-label. The default is `'Time [s]'`.
+        plot_type : str, optional
+            Options: bar or line. The default is `None`.
         save_fig : bool, optional
-            Save figure or not. The default is True.
+            Save figure or not. The default is `True`.
 
         Returns
         -------
@@ -190,7 +159,7 @@ class PostProcessing:
         return self.pce_means, self.pce_stds
 
     # -------------------------------------------------------------------------
-    def valid_metamodel(self, n_samples=1, samples=None, x_axis="Time [s]"):
+    def valid_metamodel(self, n_samples=1, samples=None, x_axis='Time [s]'):
         """
         Evaluates and plots the meta model and the PCEModel outputs for the
         given number of samples or the given samples.
@@ -201,8 +170,8 @@ class PostProcessing:
             Number of samples to be evaluated. The default is 1.
         samples : array of shape (n_samples, n_params), optional
             Samples to be evaluated. The default is None.
-        x_axis : string, optional
-            Label of x axis. The default is "Time [s]".
+        x_axis : str, optional
+            Label of x axis. The default is `'Time [s]'`.
 
         Returns
         -------
@@ -321,9 +290,9 @@ class PostProcessing:
         Parameters
         ----------
         ref_BME_KLD : array, optional
-            Reference BME and KLD . The default is None.
+            Reference BME and KLD . The default is `None`.
         save_fig : bool, optional
-            Whether to save the figures. The default is True.
+            Whether to save the figures. The default is `True`.
 
         Returns
         -------
@@ -570,13 +539,14 @@ class PostProcessing:
     def sobol_indices(self, xlabel='Time [s]', plot_type=None, save_fig=True):
         """
         Provides Sobol indices as a sensitivity measure to infer the importance
-        of the input parameters. See Eq. 27 in [1] for more details.
+        of the input parameters. See Eq. 27 in [1] for more details. For the
+        case with Principal component analysis refer to [2].
 
-        1. Global sensitivity analysis: A flexible and efficient framework with
-        an example from stochastic hydrogeology S. Oladyshkin, F.P. de Barros,
-        W. Nowak  https://doi.org/10.1016/j.advwatres.2011.11.001
+        [1] Global sensitivity analysis: A flexible and efficient framework
+        with an example from stochastic hydrogeology S. Oladyshkin, F.P.
+        de Barros, W. Nowak  https://doi.org/10.1016/j.advwatres.2011.11.001
 
-        2. Nagel, J.B., Rieckermann, J. and Sudret, B., 2020. Principal
+        [2] Nagel, J.B., Rieckermann, J. and Sudret, B., 2020. Principal
         component analysis and sparse polynomial chaos expansions for global
         sensitivity analysis and model calibration: Application to urban
         drainage simulation. Reliability Engineering & System Safety, 195,
@@ -584,12 +554,13 @@ class PostProcessing:
 
         Parameters
         ----------
-        xlabel : TYPE, optional
-            DESCRIPTION. The default is 'Time [s]'.
-        plot_type : TYPE, optional
-            DESCRIPTION. The default is None.
-        save_fig : TYPE, optional
-            DESCRIPTION. The default is True.
+        xlabel : str, optional
+            Label of the x-axis. The default is `'Time [s]'`.
+        plot_type : str, optional
+            Plot type. The default is `None`. This corresponds to line plot.
+            Bar chart can be selected by `bar`.
+        save_fig : bool, optional
+            Whether to save the figures. The default is `True`.
 
         Returns
         -------
@@ -845,7 +816,7 @@ class PostProcessing:
     # -------------------------------------------------------------------------
     def check_reg_quality(self, n_samples=1000, samples=None, save_fig=True):
         """
-        Checks the quality of the metamodel for single output models.
+        Checks the quality of the metamodel for single output models based on:
         https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685
 
 
@@ -855,7 +826,7 @@ class PostProcessing:
             Number of parameter sets to use for the check. The default is 1000.
         samples : array of shape (n_samples, n_params), optional
             Parameter sets to use for the check. The default is None.
-        save_fig : Bool, optional
+        save_fig : bool, optional
             Whether to save the figures. The default is True.
 
         Returns
@@ -1150,7 +1121,7 @@ class PostProcessing:
         ----------
         samples : array of shape (n_samples, n_params), optional
             Samples to evaluate the model at. The default is None.
-        key_str : string, optional
+        key_str : str, optional
             Key string pass to the model. The default is 'Valid'.
 
         Returns
@@ -1262,7 +1233,7 @@ class PostProcessing:
         ----------
         x_values : list or array, optional
             List of x values. The default is [].
-        x_axis : string, optional
+        x_axis : str, optional
             Label of the x axis. The default is "x [m]".
         save_fig : bool, optional
             Whether to save the figures. The default is True.
diff --git a/src/bayesvalidrox/pylink/pylink.py b/src/bayesvalidrox/pylink/pylink.py
index 4782568213fa39776bbdf25d82db6248396758bc..3d200513a598e5634b956f8e483655d146df94ba 100644
--- a/src/bayesvalidrox/pylink/pylink.py
+++ b/src/bayesvalidrox/pylink/pylink.py
@@ -12,7 +12,7 @@ import tqdm
 
 
 class PyLinkForwardModel(object):
-    r"""
+    """
     A forward model binder
 
     This calss serves as a code wrapper. This wrapper allows the execution of
@@ -100,10 +100,11 @@ class PyLinkForwardModel(object):
 
     def __init__(self, link_type='pylink', name=None, py_file=None,
                  shell_command='', input_file=None, input_template=None,
-                 aux_file=None, exe_path='', output_file_names=[], 
+                 aux_file=None, exe_path='', output_file_names=[],
                  output_names=[], output_parser='', multi_process=True,
                  n_cpus=None, meas_file=None, meas_file_valid=None,
-                 mc_ref_file=None, obs_dict={}, obs_dict_valid={}, mc_ref_dict={}):
+                 mc_ref_file=None, obs_dict={}, obs_dict_valid={},
+                 mc_ref_dict={}):
         self.link_type = link_type
         self.name = name
         self.shell_command = shell_command
@@ -151,22 +152,27 @@ class PyLinkForwardModel(object):
 
         """
         if case.lower() == 'calib':
-            if bool(self.observations):
+            if isinstance(self.observations, dict) and bool(self.observations):
                 obs = pd.DataFrame.from_dict(self.observations)
             elif self.meas_file is not None:
                 file_path = os.path.join(os.getcwd(), self.meas_file)
                 obs = pd.read_csv(file_path, delimiter=',')
+            elif isinstance(self.observations, pd.DataFrame):
+                obs = self.observations
             else:
                 raise Exception("Please provide the observation data as a "
                                 "dictionary via observations attribute or pass"
                                 " the csv-file path to MeasurementFile "
                                 "attribute")
         elif case.lower() == 'valid':
-            if bool(self.observations_valid):
+            if isinstance(self.observations_valid, dict) and \
+              bool(self.observations_valid):
                 obs = pd.DataFrame.from_dict(self.observations_valid)
             elif self.meas_file_valid is not None:
                 file_path = os.path.join(os.getcwd(), self.meas_file_valid)
                 obs = pd.read_csv(file_path, delimiter=',')
+            elif isinstance(self.observations_valid, pd.DataFrame):
+                obs = self.observations_valid
             else:
                 raise Exception("Please provide the observation data as a "
                                 "dictionary via Observations attribute or pass"
@@ -236,24 +242,24 @@ class PyLinkForwardModel(object):
         return output
 
     # -------------------------------------------------------------------------
-    def update_input_params(self, new_input_file, param_sets):
+    def update_input_params(self, new_input_file, param_set):
         """
         Finds this pattern with <X1> in the new_input_file and replace it with
          the new value from the array param_sets.
 
         Parameters
         ----------
-        new_input_file : TYPE
-            DESCRIPTION.
-        param_sets : TYPE
-            DESCRIPTION.
+        new_input_file : list
+            List of the input files with the adapted names.
+        param_set : array of shape (n_params)
+            Parameter set.
 
         Returns
         -------
         None.
 
         """
-        NofPa = param_sets.shape[0]
+        NofPa = param_set.shape[0]
         text_to_search_list = [f'<X{i+1}>' for i in range(NofPa)]
 
         for filename in new_input_file:
@@ -262,7 +268,7 @@ class PyLinkForwardModel(object):
                 filedata = file.read()
 
             # Replace the target string
-            for text_to_search, params in zip(text_to_search_list, param_sets):
+            for text_to_search, params in zip(text_to_search_list, param_set):
                 filedata = filedata.replace(text_to_search, f'{params:0.4e}')
 
             # Write the file out again
@@ -274,13 +280,13 @@ class PyLinkForwardModel(object):
         """
         Runs the execution command given by the user to run the given model.
         It checks if the output files have been generated. If yes, the jobe is
-         done and it extracts and returns the requested output(s). Otherwise,
-         it executes the command again.
+        done and it extracts and returns the requested output(s). Otherwise,
+        it executes the command again.
 
         Parameters
         ----------
-        command : string
-            The command to be executed.
+        command : str
+            The shell command to be executed.
         output_file_names : list
             Name of the output file names.
 
@@ -318,6 +324,19 @@ class PyLinkForwardModel(object):
         This function creates subdirectory for the current run and copies the
         necessary files to this directory and renames them. Next, it executes
         the given command.
+
+        Parameters
+        ----------
+        xx : tuple
+            A tuple including parameter set, simulation number and key string.
+
+        Returns
+        -------
+        output : array of shape (n_outputs+1, n_obs)
+            An array passed by the output paraser containing the x_values as
+            the first row and the simulations results stored in the the rest of
+            the array.
+
         """
         c_points, run_no, key_str = xx
 
@@ -385,21 +404,21 @@ class PyLinkForwardModel(object):
 
         Parameters
         ----------
-        c_points : array like of shape (n_samples, n_params)
+        c_points : array of shape (n_samples, n_params)
             Collocation points (training set).
         prevRun_No : int, optional
             Previous run number, in case the sequential design is selected.
-            The default is 0.
-        key_str : string, optional
-            A descriptive string for validation runs. The default is ''.
+            The default is `0`.
+        key_str : str, optional
+            A descriptive string for validation runs. The default is `''`.
         mp : bool, optional
-            Multiprocessing. The default is True.
+            Multiprocessing. The default is `True`.
 
         Returns
         -------
         all_outputs : dict
             A dictionary with x values (time step or point id) and all outputs.
-            Each key contains an array of the shape (n_samples, n_obs).
+            Each key contains an array of the shape `(n_samples, n_obs)`.
         new_c_points : array
             Updated collocation points (training set). If a simulation does not
             executed successfully, the parameter set is removed.
@@ -550,9 +569,9 @@ class PyLinkForwardModel(object):
 
         Parameters
         ----------
-        dir_name : string
+        dir_name : str
             Directory name.
-        key : string
+        key : str
             Keyword to search for.
 
         Returns
diff --git a/src/bayesvalidrox/surrogate_models/exp_designs.py b/src/bayesvalidrox/surrogate_models/exp_designs.py
index d71e92e79ae279da4976bdcdcf265122ea7932c3..31b9fde86a16ae94afbd8405eb31882c076d46f6 100644
--- a/src/bayesvalidrox/surrogate_models/exp_designs.py
+++ b/src/bayesvalidrox/surrogate_models/exp_designs.py
@@ -1,17 +1,5 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-"""
-Author: Farid Mohammadi, M.Sc.
-E-Mail: farid.mohammadi@iws.uni-stuttgart.de
-Department of Hydromechanics and Modelling of Hydrosystems (LH2)
-Institute for Modelling Hydraulic and Environmental Systems (IWS), University
-of Stuttgart, www.iws.uni-stuttgart.de/lh2/
-Pfaffenwaldring 61
-70569 Stuttgart
-
-Created on Sat Aug 24 2019
-
-"""
 
 import numpy as np
 import math
@@ -24,15 +12,120 @@ from .apoly_construction import apoly_construction
 
 
 class ExpDesigns:
-    def __init__(self, Input, meta_Model='pce', hdf5_file=None,
+    """
+    This class generates samples from the prescribed marginals for the model
+    parameters using the `Input` object.
+
+    Attributes
+    ----------
+    Input : obj
+        Input object containing the parameter marginals, i.e. name,
+        distribution type and distribution parameters or available raw data.
+    method : str
+        Type of the experimental design. The default is `'normal'`. Other
+        option is `'sequential'`.
+    meta_Model : str
+        Type of the meta_model.
+    sampling_method : str
+        Name of the sampling method for the experimental design. The following
+        sampling method are supported:
+
+        * random
+        * latin_hypercube
+        * sobol
+        * halton
+        * hammersley
+        * korobov
+        * chebyshev(FT)
+        * grid(FT)
+        * nested_grid(FT)
+        * user
+    hdf5_file : str
+        Name of the hdf5 file that contains the experimental design.
+    n_new_samples : int
+        Number of (initial) training points.
+    n_max_samples : int
+        Number of maximum training points.
+    mod_LOO_threshold : float
+        The modified leave-one-out cross validation threshold where the
+        sequential design stops.
+    tradeoff_scheme : str
+        Trade-off scheme to assign weights to the exploration and exploitation
+        scores in the sequential design.
+    n_canddidate : int
+        Number of candidate training sets to calculate the scores for.
+    explore_method : str
+        Type of the exploration method for the sequential design. The following
+        methods are supported:
+
+        * Voronoi
+        * random
+        * latin_hypercube
+        * LOOCV
+        * dual annealing
+    exploit_method : str
+        Type of the exploitation method for the sequential design. The
+        following methods are supported:
+
+        * BayesOptDesign
+        * BayesActDesign
+        * VarOptDesign
+        * alphabetic
+        * Space-filling
+    util_func : str or list
+        The utility function to be specified for the `exploit_method`. For the
+        available utility functions see Note section.
+    n_cand_groups : int
+        Number of candidate groups. Each group of candidate training sets will
+        be evaulated separately in parallel.
+    n_replication : int
+        Number of replications. Only for comparison. The default is 1.
+    post_snapshot : int
+        Whether to plot the posterior in the sequential design. The default is
+        `True`.
+    step_snapshot : int
+        The number of steps to plot the posterior in the sequential design. The
+        default is 1.
+    max_a_post : list or array
+        Maximum a posteriori of the posterior distribution, if known. The
+        default is `[]`.
+    adapt_verbose : bool
+        Whether to plot the model response vs that of metamodel for the new
+        trining point in the sequential design.
+
+    Note
+    ----------
+    The following utiliy functions for the **exploitation** methods are
+    supported:
+
+    #### BayesOptDesign (when data is available)
+    - DKL (Kullback-Leibler Divergence)
+    - DPP (D-Posterior-percision)
+    - APP (A-Posterior-percision)
+
+    #### VarBasedOptDesign -> when data is not available
+    - Entropy (Entropy/MMSE/active learning)
+    - EIGF (Expected Improvement for Global fit)
+    - LOOCV (Leave-one-out Cross Validation)
+
+    #### alphabetic
+    - D-Opt (D-Optimality)
+    - A-Opt (A-Optimality)
+    - K-Opt (K-Optimality)
+    """
+
+    def __init__(self, Input, method='normal', meta_Model='pce',
+                 sampling_method='random', hdf5_file=None,
                  n_new_samples=1, n_max_samples=None, mod_LOO_threshold=1e-16,
                  tradeoff_scheme=None, n_canddidate=1, explore_method='random',
                  exploit_method='Space-filling', util_func='Space-filling',
                  n_cand_groups=4, n_replication=1, post_snapshot=False,
-                 step_snapshot=1, max_a_post=[]):
+                 step_snapshot=1, max_a_post=[], adapt_verbose=False):
 
         self.InputObj = Input
+        self.method = method
         self.meta_Model = meta_Model
+        self.sampling_method = sampling_method
         self.hdf5_file = hdf5_file
         self.n_new_samples = n_new_samples
         self.n_max_samples = n_max_samples
@@ -47,6 +140,7 @@ class ExpDesigns:
         self.post_snapshot = post_snapshot
         self.step_snapshot = step_snapshot
         self.max_a_post = max_a_post
+        self.adapt_verbose = adapt_verbose
 
     # -------------------------------------------------------------------------
     def generate_samples(self, n_samples, sampling_method='random',
@@ -58,11 +152,11 @@ class ExpDesigns:
         ----------
         n_samples : int
             Number of requested samples.
-        sampling_method : TYPE, optional
-            DESCRIPTION. The default is 'random'.
+        sampling_method : str, optional
+            Sampling method. The default is `'random'`.
         transform : bool, optional
             Transformation via an isoprobabilistic transformation method. The
-            default is False.
+            default is `False`.
 
         Returns
         -------
@@ -94,12 +188,12 @@ class ExpDesigns:
         ----------
         n_samples : int
             Number of requested training points.
-        sampling_method : string, optional
-            Sampling method. The default is 'random'.
+        sampling_method : str, optional
+            Sampling method. The default is `'random'`.
         transform : bool, optional
-            Isoprobabilistic transformation. The default is False.
-        max_pce_deg : TYPE, optional
-            Maximum PCE polynomial degree. The default is None.
+            Isoprobabilistic transformation. The default is `False`.
+        max_pce_deg : int, optional
+            Maximum PCE polynomial degree. The default is `None`.
 
         Returns
         -------
@@ -180,7 +274,7 @@ class ExpDesigns:
         Parameters
         ----------
         max_deg : int, optional
-            Maximum degree. The default is None.
+            Maximum degree. The default is `None`.
 
         Returns
         -------
@@ -224,9 +318,7 @@ class ExpDesigns:
                                                      domain=self.JDist)
 
         # Create orthogonal polynomial coefficients if necessary
-        if self.meta_Model.lower() != 'gpe' and self.apce\
-            and max_deg is not None \
-                and Inputs.polycoeffsFlag:
+        if self.apce and max_deg is not None and Inputs.poly_coeffs_flag:
             self.polycoeffs = {}
             for parIdx in tqdm(range(ndim), ascii=True,
                                desc="Computing orth. polynomial coeffs"):
@@ -467,12 +559,12 @@ class ExpDesigns:
 
         Parameters
         ----------
-        X : ndarray of shape (n_samples,n_params)
+        X : array of shape (n_samples,n_params)
             Samples to be transformed.
 
         Returns
         -------
-        tr_X: ndarray of shape (n_samples,n_params)
+        tr_X: array of shape (n_samples,n_params)
             Transformed samples.
 
         """
diff --git a/src/bayesvalidrox/surrogate_models/exploration.py b/src/bayesvalidrox/surrogate_models/exploration.py
index 2504a15d5285acc930b13db30422e2acf613e004..d2e90c362cf2e11e7e90de9f1026758c1a9c3101 100644
--- a/src/bayesvalidrox/surrogate_models/exploration.py
+++ b/src/bayesvalidrox/surrogate_models/exploration.py
@@ -1,366 +1,370 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-"""
-Created based on the Surrogate Modeling Toolbox ("SUMO Toolbox")
-
-publication:
->   - A Surrogate Modeling and Adaptive Sampling Toolbox for Computer Based Design
->   D. Gorissen, K. Crombecq, I. Couckuyt, T. Dhaene, P. Demeester,
->   Journal of Machine Learning Research,
->   Vol. 11, pp. 2051-2055, July 2010. 
->
-> Contact : sumo@sumo.intec.ugent.be - http://sumo.intec.ugent.be
-
-@author: farid
-"""
 
 import numpy as np
-import copy
-import scipy.stats as stats
 from scipy.spatial import distance
 
-from .ExpDesigns import ExpDesigns
 
 class Exploration:
-    def __init__(self, PCEModel, NCandidate):
-        self.PCEModel = PCEModel
+    """
+    Created based on the Surrogate Modeling Toolbox (SUMO) [1].
+
+    [1] Gorissen, D., Couckuyt, I., Demeester, P., Dhaene, T. and Crombecq, K.,
+        2010. A surrogate modeling and adaptive sampling toolbox for computer
+        based design. Journal of machine learning research.-Cambridge, Mass.,
+        11, pp.2051-2055. sumo@sumo.intec.ugent.be - http://sumo.intec.ugent.be
+
+    Attributes
+    ----------
+    MetaModel : obj
+        MetaModel object.
+    n_candidate : int
+        Number of candidate samples.
+    mc_criterion : str
+        Selection crieterion. The default is `'mc-intersite-proj-th'`. Another
+        option is `'mc-intersite-proj'`.
+    w : int
+        Number of random points in the domain for each sample of the
+        training set.
+    """
+
+    def __init__(self, MetaModel, n_candidate,
+                 mc_criterion='mc-intersite-proj-th'):
+        self.MetaModel = MetaModel
         self.Marginals = []
-        self.OldExpDesign = PCEModel.ExpDesign.X
-        self.Bounds = PCEModel.BoundTuples
-        self.numNewSamples = NCandidate
-        self.mcCriterion = 'mc-intersite-proj-th' #'mc-intersite-proj'
-        
-        self.allCandidates = []
-        self.newSamples = []
-        
-        self.areas = []
-        self.closestPoints = []
-        self.perc = None
-        self.errors = None
-    
+        self.n_candidate = n_candidate
+        self.mc_criterion = mc_criterion
         self.w = 100
-        
-    def getExplorationSamples(self):
+
+    def get_exploration_samples(self):
         """
-        This function generates prospective candidates to be selected as new design
-        and their associated exploration scores.
+        This function generates candidates to be selected as new design and
+        their associated exploration scores.
+
+        Returns
+        -------
+        all_candidates : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
         """
-        PCEModel = self.PCEModel
-        explore_method = PCEModel.ExpDesign.explore_method
-        
+        MetaModel = self.MetaModel
+        explore_method = MetaModel.ExpDesign.explore_method
+
+        print("\n")
+        print(f' The {explore_method}-Method is selected as the exploration '
+              'method.')
+        print("\n")
+
         if explore_method == 'Voronoi':
-            print("\n")
-            print(' The Voronoi-based method is selected as the exploration method.')
-            print("\n")
-            
             # Generate samples using the Voronoi method
-            allCandidates, scoreExploration = self.getVornoiSamples()
-                
+            all_candidates, exploration_scores = self.get_vornoi_samples()
         else:
-            print("\n")
-            print(f' The {explore_method}-Method is selected as the exploration method.')
-            print("\n")
             # Generate samples using the MC method
-            allCandidates, scoreExploration = self.getMCSamples()
+            all_candidates, exploration_scores = self.get_mc_samples()
 
-        return allCandidates, scoreExploration
-            
-    #--------------------------------------------------------------------------------------------------------
-    
-    def getVornoiSamples(self):
-        
-        mcCriterion = self.mcCriterion
-        numNewSamples = self.numNewSamples
+        return all_candidates, exploration_scores
+
+    # -------------------------------------------------------------------------
+    def get_vornoi_samples(self):
+        """
+        This function generates samples based on voronoi cells and their
+        corresponding scores
+
+        Returns
+        -------
+        new_samples : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
+        """
+
+        mc_criterion = self.mc_criterion
+        n_candidate = self.n_candidate
         # Get the Old ExpDesign #samples
-        OldExpDesign = self.OldExpDesign
-        ndim = OldExpDesign.shape[1]
-        
+        old_ED_X = self.MetaModel.ExpDesign.X
+        ndim = old_ED_X.shape[1]
+
         # calculate error #averageErrors
-        errorVoronoi, allCandidates = self.approximateVoronoi(self.w,OldExpDesign)
-        
-        
-        # get amount of samples
-        sortederrorVoronoi = errorVoronoi #np.sort(copy.copy(errorVoronoi))[::-1]
-        bestSamples = range(len(errorVoronoi)) #np.argsort(errorVoronoi)[::-1]
-        
-
-        # for each best sample, pick the best candidate point in the voronoi cell
-        selectedSamples = np.empty((0, ndim))
-        badSamples = []
-        
-        for i, index in enumerate(bestSamples):
+        error_voronoi, all_candidates = self.approximate_voronoi(
+            self.w, old_ED_X
+            )
+
+        # Pick the best candidate point in the voronoi cell
+        # for each best sample
+        selected_samples = np.empty((0, ndim))
+        bad_samples = []
+
+        for index in range(len(error_voronoi)):
 
             # get candidate new samples from voronoi tesselation
-            candidates = self.closestPoints[index]
-            
+            candidates = self.closest_points[index]
+
             # get total number of candidates
-            nNewSamples = candidates.shape[0]
-            
-            
+            n_new_samples = candidates.shape[0]
+
             # still no candidate samples around this one, skip it!
-            if nNewSamples == 0:
-                print('Sample %s skipped because there were no candidate samples around it...'%OldExpDesign[index])
-                badSamples.append(index)
+            if n_new_samples == 0:
+                print('The following sample has been skipped because there '
+                      'were no candidate samples around it...')
+                print(old_ED_X[index])
+                bad_samples.append(index)
                 continue
-            
+
             # find candidate that is farthest away from any existing sample
-            maxMinDistance = 0
-            bestCandidate = 0
-            minIntersiteDist = np.zeros((nNewSamples))
-            minprojectedDist = np.zeros((nNewSamples))
-            
-            for j in range(nNewSamples):
-                NewSamples = np.vstack((OldExpDesign, selectedSamples))
+            max_min_distance = 0
+            best_candidate = 0
+            min_intersite_dist = np.zeros((n_new_samples))
+            min_projected_dist = np.zeros((n_new_samples))
+
+            for j in range(n_new_samples):
+
+                new_samples = np.vstack((old_ED_X, selected_samples))
+
                 # find min distorted distance from all other samples
-                euclideanDist = self.buildDistanceMatrixPoint(NewSamples, candidates[j], doSqrt=True)
-                mineuclideanDist = np.min(euclideanDist)
-                minIntersiteDist[j] = mineuclideanDist
-                
-                # see if this is the maximal minimum distance from all other samples
-                if mineuclideanDist >= maxMinDistance:
-                    maxMinDistance = mineuclideanDist
-                    bestCandidate = j
-                
+                euclidean_dist = self._build_dist_matrix_point(
+                    new_samples, candidates[j], do_sqrt=True)
+                min_euclidean_dist = np.min(euclidean_dist)
+                min_intersite_dist[j] = min_euclidean_dist
+
+                # Check if this is the maximum minimum distance from all other
+                # samples
+                if min_euclidean_dist >= max_min_distance:
+                    max_min_distance = min_euclidean_dist
+                    best_candidate = j
+
                 # Projected distance
-                projectedDist =  distance.cdist(NewSamples, [candidates[j]], 'chebyshev')
-                minprojectedDist[j] = np.min(projectedDist)
-                
-            
-            if mcCriterion == 'mc-intersite-proj': 
-                weightEuclideanDist = 0.5 * ((nNewSamples+1)**(1/ndim) - 1)
-                weightProjectedDist = 0.5 * (nNewSamples+1)
-                totalDistScores= weightEuclideanDist * minIntersiteDist + weightProjectedDist * minprojectedDist
-            
-            elif mcCriterion == 'mc-intersite-proj-th':
-                alpha = 0.5 # chosen (tradeoff)
-                d_min = 2 * alpha / nNewSamples
-                if any(minprojectedDist < d_min):
-                    candidates = np.delete(candidates, [minprojectedDist < d_min], axis=0)
-                    totalDistScores = np.delete(minIntersiteDist, [minprojectedDist < d_min], axis=0)
+                projected_dist = distance.cdist(
+                    new_samples, [candidates[j]], 'chebyshev')
+                min_projected_dist[j] = np.min(projected_dist)
+
+            if mc_criterion == 'mc-intersite-proj':
+                weight_euclidean_dist = 0.5 * ((n_new_samples+1)**(1/ndim) - 1)
+                weight_projected_dist = 0.5 * (n_new_samples+1)
+                total_dist_scores = weight_euclidean_dist * min_intersite_dist
+                total_dist_scores += weight_projected_dist * min_projected_dist
+
+            elif mc_criterion == 'mc-intersite-proj-th':
+                alpha = 0.5  # chosen (tradeoff)
+                d_min = 2 * alpha / n_new_samples
+                if any(min_projected_dist < d_min):
+                    candidates = np.delete(
+                        candidates, [min_projected_dist < d_min], axis=0
+                        )
+                    total_dist_scores = np.delete(
+                        min_intersite_dist, [min_projected_dist < d_min],
+                        axis=0
+                        )
                 else:
-                    totalDistScores = minIntersiteDist
-           
+                    total_dist_scores = min_intersite_dist
             else:
-                raise NameError('The MC-Criterion you requested is not available.')
-           
-            
-            # add the best candidate to the list of new samples
-            bestCandidate = np.argsort(totalDistScores)[::-1][:numNewSamples]
-            selectedSamples = np.vstack((selectedSamples, candidates[bestCandidate]))
-
-            #print('\nBest candidate around sample %s was chosen to be %s, with minDistance %s'%(OldExpDesign[index], candidates[bestCandidate], totalDistScores[bestCandidate]))
-            
-        self.newSamples = selectedSamples #candidates 
-        self.explorationScore = np.delete(sortederrorVoronoi, badSamples, axis=0)
-        
-        
-        return self.newSamples, self.explorationScore    
-            
-    #--------------------------------------------------------------------------------------------------------
-
-    def getMCSamples(self, allCandidates=None):
+                raise NameError(
+                    'The MC-Criterion you requested is not available.'
+                    )
+
+            # Add the best candidate to the list of new samples
+            best_candidate = np.argsort(total_dist_scores)[::-1][:n_candidate]
+            selected_samples = np.vstack(
+                (selected_samples, candidates[best_candidate])
+                )
+
+        self.new_samples = selected_samples
+        self.exploration_scores = np.delete(error_voronoi, bad_samples, axis=0)
+
+        return self.new_samples, self.exploration_scores
+
+    # -------------------------------------------------------------------------
+    def get_mc_samples(self, all_candidates=None):
         """
-        This function generates random samples based on Global Monte Carlo methods
-        and their corresponding scores.
-        
+        This function generates random samples based on Global Monte Carlo
+        methods and their corresponding scores, based on [1].
+
+        [1] Crombecq, K., Laermans, E. and Dhaene, T., 2011. Efficient
+            space-filling and non-collapsing sequential design strategies for
+            simulation-based modeling. European Journal of Operational Research
+            , 214(3), pp.683-696.
+            DOI: https://doi.org/10.1016/j.ejor.2011.05.032
+
         Implemented methods to compute scores:
             1) mc-intersite-proj
             2) mc-intersite-proj-th
-        
-        Based on the following paper:
-            Crombecq, K., Laermans, E., & Dhaene, T. (2011). Efficient space-filling and non-collapsing sequential design strategies for simulation-based modeling.
-            European Journal of Operational Research, 214(3), 683-696.
-            DOI: https://doi.org/10.1016/j.ejor.2011.05.032
-        
+
+        Arguments
+        ---------
+        all_candidates : array, optional
+            Samples to compute the scores for. The default is `None`. In this
+            case, samples will be generated by defined model input marginals.
+
+        Returns
+        -------
+        new_samples : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
         """
-        PCEModel = self.PCEModel
-        explore_method = PCEModel.ExpDesign.explore_method
-        mcCriterion = self.mcCriterion
-        if allCandidates is None:
-            nNewSamples = self.numNewSamples
-        else: 
-            nNewSamples = allCandidates.shape[0] 
-            
+        MetaModel = self.MetaModel
+        explore_method = MetaModel.ExpDesign.explore_method
+        mc_criterion = self.mc_criterion
+        if all_candidates is None:
+            n_candidate = self.n_candidate
+        else:
+            n_candidate = all_candidates.shape[0]
+
         # Get the Old ExpDesign #samples
-        OldExpDesign = self.OldExpDesign
-        ndim = OldExpDesign.shape[1]
-        
+        old_ED_X = MetaModel.ExpDesign.X
+        ndim = old_ED_X.shape[1]
+
         # ----- Compute the number of random points -----
-        if allCandidates is None:
+        if all_candidates is None:
             # Generate MC Samples
-            allCandidates = PCEModel.ExpDesign.generate_samples(self.numNewSamples,
-                                                                explore_method)
-        self.allCandidates = allCandidates
-        
+            all_candidates = MetaModel.ExpDesign.generate_samples(
+                self.n_candidate, explore_method
+                )
+        self.all_candidates = all_candidates
+
         # initialization
-        newSamples = np.empty((0, ndim))
-        minIntersiteDist = np.zeros((nNewSamples))
-        minprojectedDist = np.zeros((nNewSamples))
-            
-        
-        for i, candidate in enumerate(allCandidates):
-            
+        new_samples = np.empty((0, ndim))
+        min_intersite_dist = np.zeros((n_candidate))
+        min_projected_dist = np.zeros((n_candidate))
+
+        for i, candidate in enumerate(all_candidates):
+
             # find candidate that is farthest away from any existing sample
             maxMinDistance = 0
-            bestCandidate = 0
-            
-            
-            NewSamples = np.vstack((OldExpDesign, newSamples))
+
+            new_samples = np.vstack((old_ED_X, new_samples))
             # find min distorted distance from all other samples
-            euclideanDist = self.buildDistanceMatrixPoint(NewSamples, candidate, doSqrt=True)
-            mineuclideanDist = np.min(euclideanDist)
-            minIntersiteDist[i] = mineuclideanDist
-            
-            # see if this is the maximal minimum distance from all other samples
-            if mineuclideanDist >= maxMinDistance:
-                maxMinDistance = mineuclideanDist
-                bestCandidate = i
-            
+            euclidean_dist = self._build_dist_matrix_point(
+                new_samples, candidate, do_sqrt=True
+                )
+            min_euclidean_dist = np.min(euclidean_dist)
+            min_intersite_dist[i] = min_euclidean_dist
+
+            # Check if this is the maximum minimum distance from all other
+            # samples
+            if min_euclidean_dist >= maxMinDistance:
+                maxMinDistance = min_euclidean_dist
+
             # Projected distance
-            projectedDist =  distance.cdist(NewSamples, [candidate], 'chebyshev')
-            minprojectedDist[i] = np.min(projectedDist)
-                
-            
-        if mcCriterion == 'mc-intersite-proj': 
-            weightEuclideanDist = ((nNewSamples+1)**(1/ndim) - 1) * 0.5
-            weightProjectedDist = (nNewSamples+1) * 0.5
-            totalDistScores= weightEuclideanDist * minIntersiteDist + weightProjectedDist * minprojectedDist
-        
-        elif mcCriterion == 'mc-intersite-proj-th':
-            alpha = 0.5 # chosen (tradeoff)
-            d_min = 2 * alpha / nNewSamples
-            if any(minprojectedDist < d_min):
-                allCandidates = np.delete(allCandidates, [minprojectedDist < d_min], axis=0)
-                totalDistScores = np.delete(minIntersiteDist, [minprojectedDist < d_min], axis=0)
+            projected_dist = distance.cdist(
+                new_samples, [candidate], 'chebyshev'
+                )
+            min_projected_dist[i] = np.min(projected_dist)
+
+        if mc_criterion == 'mc-intersite-proj':
+            weight_euclidean_dist = ((n_candidate+1)**(1/ndim) - 1) * 0.5
+            weight_projected_dist = (n_candidate+1) * 0.5
+            total_dist_scores = weight_euclidean_dist * min_intersite_dist
+            total_dist_scores += weight_projected_dist * min_projected_dist
+
+        elif mc_criterion == 'mc-intersite-proj-th':
+            alpha = 0.5  # chosen (tradeoff)
+            d_min = 2 * alpha / n_candidate
+            if any(min_projected_dist < d_min):
+                all_candidates = np.delete(
+                    all_candidates, [min_projected_dist < d_min], axis=0
+                    )
+                total_dist_scores = np.delete(
+                    min_intersite_dist, [min_projected_dist < d_min], axis=0
+                    )
             else:
-                totalDistScores= minIntersiteDist
-                
+                total_dist_scores = min_intersite_dist
         else:
             raise NameError('The MC-Criterion you requested is not available.')
-        
-            
-        self.newSamples = allCandidates
-        self.explorationScore = totalDistScores / np.nansum(totalDistScores)
-        
-        
-        return self.newSamples, self.explorationScore 
-    
-    #--------------------------------------------------------------------------------------------------------
-            
-    def approximateVoronoi(self, w, constraints=[]):
+
+        self.new_samples = all_candidates
+        self.exploration_scores = total_dist_scores
+        self.exploration_scores /= np.nansum(total_dist_scores)
+
+        return self.new_samples, self.exploration_scores
+
+    # -------------------------------------------------------------------------
+    def approximate_voronoi(self, w, samples):
         """
-        An approximate (monte carlo) version of Matlab's voronoi command.  
-        The samples are assumed to lie within the LB and UB bounds (=vectors, 
-        one lower and upper bound per dimension).  
-        
-        If LB,UB are not given [-1 1] is assumed.
-        
+        An approximate (monte carlo) version of Matlab's voronoi command.
+
         Arguments
         ---------
-        constraints : string
-            A set of constraints that have to be satisfied.
-            Voronoi cells which partly violate constraints are estimated at their
-            size within the allowed area.
-        
+        samples : array
+            Old experimental design to be used as center points for voronoi
+            cells.
+
         Returns
         -------
-        areas: numpy array
+        areas : array
             An approximation of the voronoi cells' areas.
-            
-        allCandidates: list of numpy arrays
+        all_candidates: list of arrays
             A list of samples in each voronoi cell.
         """
-        PCEModel = self.PCEModel
-        # Get the Old ExpDesign #samples
-        samples = self.OldExpDesign
-        
-        nSamples = samples.shape[0] 
-        dim = samples.shape[1]
-        
-        # Get the bounds
-        Bounds = self.Bounds
-        
-        
-        # Compute the number of random points 
-        # 100 random points in the domain for each sample
-        nPoints = w * samples.shape[0]
-        
-        # Generate random points to estimate the voronoi decomposition
-        # points = np.zeros((nPoints, dim))
-        # for i in range(dim):
-        #    points[:,i] = stats.uniform(loc=0, scale=1).rvs(size=nPoints)
-        
-        # # Scale each column to the correct range
-        # for i in range(dim):
-        #     	points[:,i] = self.scaleColumns(points[:,i],Bounds[i][0],Bounds[i][1])
-        
-        ExpDesign = ExpDesigns(PCEModel.Inputs)
-        points = ExpDesign.generate_samples(nPoints, 'random')
-
-        
-        self.allCandidates = points
-        
+        MetaModel = self.MetaModel
+
+        n_samples = samples.shape[0]
+        ndim = samples.shape[1]
+
+        # Compute the number of random points
+        n_points = w * samples.shape[0]
+        # Generate w random points in the domain for each sample
+        points = MetaModel.ExpDesign.generate_samples(n_points, 'random')
+        self.all_candidates = points
+
         # Calculate the nearest sample to each point
-        self.areas = np.zeros((nSamples))
-        self.closestPoints = [np.empty((0,dim)) for i in range(nSamples)] #cell(nSamples, 1)
-        
-        # Compute the minimum distance from all the samples of OldExpDesign for each test point
-        for idx in range(nPoints):
+        self.areas = np.zeros((n_samples))
+        self.closest_points = [np.empty((0, ndim)) for i in range(n_samples)]
+
+        # Compute the minimum distance from all the samples of old_ED_X for
+        # each test point
+        for idx in range(n_points):
             # calculate the minimum distance
-            distances = self.buildDistanceMatrixPoint(samples, points[idx,:], doSqrt=True)
-            closestSample = np.argmin(distances)
-            
-            #Add to the voronoi list of the closest sample
-            #print('Point %s found to be closest to sample %s' %(points[idx], samples[closestSample]))
-            self.areas[closestSample] = self.areas[closestSample] + 1
-            prevclosestPoints = self.closestPoints[closestSample]
-            self.closestPoints[closestSample] = np.vstack((prevclosestPoints, points[idx]))
-            
-        
-        # divide by the amount of points to get the estimated volume of each
+            distances = self._build_dist_matrix_point(
+                samples, points[idx], do_sqrt=True
+                )
+            closest_sample = np.argmin(distances)
+
+            # Add to the voronoi list of the closest sample
+            self.areas[closest_sample] = self.areas[closest_sample] + 1
+            prev_closest_points = self.closest_points[closest_sample]
+            self.closest_points[closest_sample] = np.vstack(
+                (prev_closest_points, points[idx])
+                )
+
+        # Divide by the amount of points to get the estimated volume of each
         # voronoi cell
-        self.areas = self.areas / nPoints
+        self.areas /= n_points
 
         self.perc = np.max(self.areas * 100)
 
-        self.errors = self.areas 
+        self.errors = self.areas
 
-        
-        return self.areas, self.allCandidates
-    
-    #--------------------------------------------------------------------------------------------------------
-        
-    def buildDistanceMatrixPoint(self, samples, point, doSqrt=False):
+        return self.areas, self.all_candidates
+
+    # -------------------------------------------------------------------------
+    def _build_dist_matrix_point(self, samples, point, do_sqrt=False):
         """
         Calculates the intersite distance of all points in samples from point.
-        
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params)
+            The old experimental design.
+        point : array
+            A candidate point.
+        do_sqrt : bool, optional
+            Whether to return distances or squared distances. The default is
+            `False`.
+
+        Returns
+        -------
+        distances : array
+            Distances.
+
         """
-        
         distances = distance.cdist(samples, np.array([point]), 'euclidean')
-                
+
         # do square root?
-        if doSqrt: return distances 
-        else: 
+        if do_sqrt:
+            return distances
+        else:
             return distances**2
-    
-    #--------------------------------------------------------------------------------------------------------
-    
-    def scaleColumns(self, x,lowerLimit,upperLimit):
-        """
-        Scale all columns of X to [c,d], defaults to [-1,1]
-        If mn and mx are given they are used as the original range of each column of x
-        
-        """
-
-        n = x.shape[0]
-        
-        mn, mx = min(x), max(x)
-        
-        # repmat( mn,n,1 ) , repmat(mx-mn,n,1) 
-        res = np.divide((x - np.tile(np.array(mn), n)) , np.tile(np.array(mx-mn), n)) * (upperLimit - lowerLimit) + lowerLimit
-                
-        
-        return res
 
 #if __name__ == "__main__":
 #    import scipy.stats as stats
@@ -370,17 +374,17 @@ class Exploration:
 #    plt.rc('font', family='sans-serif', serif='Arial')
 #    plt.rc('figure', figsize = (12, 8))
 #    
-#    def plotter(OldExpDesign, allCandidates, explorationScore):
+#    def plotter(old_ED_X, all_candidates, exploration_scores):
 #        global Bounds
 #        
 #        from scipy.spatial import Voronoi, voronoi_plot_2d
-#        vor = Voronoi(OldExpDesign)
+#        vor = Voronoi(old_ED_X)
 #        
 #        fig = voronoi_plot_2d(vor)
 #        
 #        # find min/max values for normalization
-##        minima = min(explorationScore)
-##        maxima = max(explorationScore)
+##        minima = min(exploration_scores)
+##        maxima = max(exploration_scores)
 ##        
 ##        # normalize chosen colormap
 ##        norm = mpl.colors.Normalize(vmin=minima, vmax=maxima, clip=True)
@@ -390,20 +394,20 @@ class Exploration:
 ##            region = vor.regions[vor.point_region[r]]
 ##            if not -1 in region:
 ##                polygon = [vor.vertices[i] for i in region]
-##                plt.fill(*zip(*polygon), color=mapper.to_rgba(explorationScore[r]))
+##                plt.fill(*zip(*polygon), color=mapper.to_rgba(exploration_scores[r]))
 #        
 #        
 #        ax1 = fig.add_subplot(111)
 #        
-#        ax1.scatter(OldExpDesign[:,0], OldExpDesign[:,1], s=10, c='r', marker="s", label='Old Design Points')
-#        for i in range(OldExpDesign.shape[0]):
+#        ax1.scatter(old_ED_X[:,0], old_ED_X[:,1], s=10, c='r', marker="s", label='Old Design Points')
+#        for i in range(old_ED_X.shape[0]):
 #            txt = 'p'+str(i+1)
-#            ax1.annotate(txt, (OldExpDesign[i,0],OldExpDesign[i,1]))
+#            ax1.annotate(txt, (old_ED_X[i,0],old_ED_X[i,1]))
 #            
 ##        for i in range(NrofCandGroups):
-##            Candidates = allCandidates['group_'+str(i+1)]
+##            Candidates = all_candidates['group_'+str(i+1)]
 ##            ax1.scatter(Candidates[:,0],Candidates[:,1], s=10, c='b', marker="o", label='Design candidates')
-#        ax1.scatter(allCandidates[:,0],allCandidates[:,1], s=10, c='b', marker="o", label='Design candidates')
+#        ax1.scatter(all_candidates[:,0],all_candidates[:,1], s=10, c='b', marker="o", label='Design candidates')
 #        
 #        ax1.set_xlim(Bounds[0][0], Bounds[0][1])
 #        ax1.set_ylim(Bounds[1][0], Bounds[1][1])
@@ -436,28 +440,28 @@ class Exploration:
 #    Bounds = ((-5,10), (0,15))
 #    
 #    nrofSamples = 10
-#    OldExpDesign = np.zeros((nrofSamples, NofPa))
+#    old_ED_X = np.zeros((nrofSamples, NofPa))
 #    for idx in range(NofPa):
 #        Loc = Bounds[idx][0]
 #        Scale = Bounds[idx][1] - Bounds[idx][0]
-#        OldExpDesign[:,idx] = stats.uniform(loc=Loc, scale=Scale).rvs(size=nrofSamples)
+#        old_ED_X[:,idx] = stats.uniform(loc=Loc, scale=Scale).rvs(size=nrofSamples)
 #    
 #    
 #    nNewCandidate = 40
 #    
 #    # New Function
-#    volumes = voronoi_volumes(OldExpDesign)
+#    volumes = voronoi_volumes(old_ED_X)
 #    
 #    
 #    # SUMO
-#    Exploration = Exploration(Bounds, OldExpDesign, nNewCandidate)
+#    Exploration = Exploration(Bounds, old_ED_X, nNewCandidate)
 #    
-#    #allCandidates, Score = Exploration.getVornoiSamples()
-#    allCandidates, Score = Exploration.getMCSamples()
+#    #all_candidates, Score = Exploration.get_vornoi_samples()
+#    all_candidates, Score = Exploration.get_mc_samples()
 #    
 #    print('-'*40)
 ##    for i in range(nrofSamples):
-##        print("idx={0:d} X1={1: 3.3f} X2={2: 3.3f} Volume={3: 3.3f}".format(i+1, OldExpDesign[i,0], OldExpDesign[i,1], vornoi.areas[i]))
+##        print("idx={0:d} X1={1: 3.3f} X2={2: 3.3f} Volume={3: 3.3f}".format(i+1, old_ED_X[i,0], old_ED_X[i,1], vornoi.areas[i]))
 #        
-#    plotter(OldExpDesign, allCandidates, volumes)
+#    plotter(old_ED_X, all_candidates, volumes)
     
diff --git a/src/bayesvalidrox/surrogate_models/inputs.py b/src/bayesvalidrox/surrogate_models/inputs.py
index 667b2dffe15d88562ce7114a20e67889ab2fa489..783e82b053cc458be712b588b7fde3a0f3c8decb 100644
--- a/src/bayesvalidrox/surrogate_models/inputs.py
+++ b/src/bayesvalidrox/surrogate_models/inputs.py
@@ -29,10 +29,10 @@ class Input:
     >>> Inputs.Marginals[0].name = 'X_1'
     >>> Inputs.Marginals[0].input_data = input_data
     """
+    poly_coeffs_flag = True
 
     def __init__(self):
         self.Marginals = []
-        self.__poly_coeffs_flag = True
         self.Rosenblatt = False
 
     def add_marginals(self):
@@ -56,14 +56,14 @@ class Marginal:
     Attributes
     ----------
     name : string
-        Name of the parameter. The default is '$x_1$'.
+        Name of the parameter. The default is `'$x_1$'`.
     dist_type : string
-        Name of the distribution. The default is None.
+        Name of the distribution. The default is `None`.
     parameters : list
         List of the parameters corresponding to the distribution type. The
-        default is None.
-    input_data : array_like
-        The available data corresponding to the parameters. The default is [].
+        default is `None`.
+    input_data : array
+        Available input data. The default is `[]`.
     moments : list
         List of the moments.
     """
diff --git a/src/bayesvalidrox/surrogate_models/sequential_design.py b/src/bayesvalidrox/surrogate_models/sequential_design.py
index 99a2e780a443978bd770c9967c3a0d58c56ab0e5..bc3a9089a75c061aac9737e9c43a3732cb970956 100644
--- a/src/bayesvalidrox/surrogate_models/sequential_design.py
+++ b/src/bayesvalidrox/surrogate_models/sequential_design.py
@@ -103,7 +103,7 @@ class SeqDesign():
             # ------ Prepare diagonal enteries for co-variance matrix ---------
             for keyIdx, key in enumerate(Model.Output.names):
                 # optSigma = 'B'
-                sigma2 = np.array(PCEModel.Discrepancy.Parameters[key])
+                sigma2 = np.array(PCEModel.Discrepancy.parameters[key])
                 TotalSigma2 = np.append(TotalSigma2, sigma2)
 
             # Calculate the initial BME
@@ -226,7 +226,7 @@ class SeqDesign():
                     # ------ Plot the surrogate model vs Origninal Model ------
                     if hasattr(PCEModel, 'adapt_verbose') and \
                        PCEModel.adapt_verbose:
-                        from PostProcessing.adaptPlot import adaptPlot
+                        from post_processing.adaptPlot import adaptPlot
                         y_hat, std_hat = PCEModel.eval_metamodel(samples=Xnew)
                         adaptPlot(PCEModel, Ynew, y_hat, std_hat, plotED=False)
 
@@ -253,15 +253,15 @@ class SeqDesign():
                     Yprev = PCEModel.ModelOutputDict
 
                     # Pass the new prior as the input
-                    PCEModel.input_obj.__poly_coeffs_flag = False
+                    PCEModel.input_obj.poly_coeffs_flag = False
                     if updatedPrior is not None:
-                        PCEModel.Inputs.__poly_coeffs_flag = True
+                        PCEModel.input_obj.poly_coeffs_flag = True
                         print("updatedPrior:", updatedPrior.shape)
                         # Arbitrary polynomial chaos
                         for i in range(updatedPrior.shape[1]):
-                            PCEModel.Inputs.Marginals[i].DistType = None
+                            PCEModel.Inputs.Marginals[i].dist_type = None
                             x = updatedPrior[:, i]
-                            PCEModel.Inputs.Marginals[i].InputValues = x
+                            PCEModel.Inputs.Marginals[i].raw_data = x
 
                     prevPCEModel = PCEModel
                     PCEModel = PCEModel.train_norm_design(Model)
@@ -1179,8 +1179,8 @@ class SeqDesign():
             explore = Exploration(PCEModel, n_candidates)
             explore.w = 100  # * ndim #500
             # Select criterion (mc-intersite-proj-th, mc-intersite-proj)
-            explore.mcCriterion = 'mc-intersite-proj'
-            allCandidates, scoreExploration = explore.getExplorationSamples()
+            explore.mc_criterion = 'mc-intersite-proj'
+            allCandidates, scoreExploration = explore.get_exploration_samples()
 
             # Temp: ---- Plot all candidates -----
             if ndim == 2:
@@ -1291,7 +1291,7 @@ class SeqDesign():
                     X_can = explore.closestPoints[idx]
 
                     # Calculate the maxmin score for the region of interest
-                    newSamples, maxminScore = explore.getMCSamples(X_can)
+                    newSamples, maxminScore = explore.get_mc_samples(X_can)
 
                     # select the requested number of samples
                     Xnew[i] = newSamples[np.argmax(maxminScore)]
@@ -1313,12 +1313,6 @@ class SeqDesign():
             # Generate candidate samples from Exploration class
             nMeasurement = old_EDY[OutputNames[0]].shape[1]
 
-            # Find indices of the Vornoi cells with samples
-            goodSampleIdx = []
-            for idx in range(len(explore.closestPoints)):
-                if len(explore.closestPoints[idx]) != 0:
-                    goodSampleIdx.append(idx)
-
             # Find sensitive region
             if UtilMethod == 'LOOCV':
                 LCerror = PCEModel.LCerror
@@ -1343,7 +1337,12 @@ class SeqDesign():
                                                 axis=0)
                     goodSampleIdx = range(n_cand_groups)
                 else:
-                    split_cand = explore.closestPoints
+                    # Find indices of the Vornoi cells with samples
+                    goodSampleIdx = []
+                    for idx in range(len(explore.closest_points)):
+                        if len(explore.closest_points[idx]) != 0:
+                            goodSampleIdx.append(idx)
+                    split_cand = explore.closest_points
 
                 # Split the candidates in groups for multiprocessing
                 args = []
@@ -1360,8 +1359,12 @@ class SeqDesign():
                 pool.close()
 
                 # Retrieve the results and append them
-                ExploitScore = np.concatenate([results[NofE][1] for NofE in
-                                               range(len(goodSampleIdx))])
+                if explore_method == 'Voronoi':
+                    ExploitScore = [np.mean(results[k][1]) for k in
+                                    range(len(goodSampleIdx))]
+                else:
+                    ExploitScore = np.concatenate(
+                        [results[k][1] for k in range(len(goodSampleIdx))])
 
             else:
                 raise NameError('The requested utility function is not '
@@ -1386,12 +1389,12 @@ class SeqDesign():
                 Xnew = allCandidates[bestIdx]
             else:
                 for i, idx in enumerate(bestIdx.flatten()):
-                    X_can = explore.closestPoints[idx]
+                    X_can = explore.closest_points[idx]
                     # plotter(self.ExpDesign.X, X_can, explore_method,
                     # scoreExploration=None)
 
                     # Calculate the maxmin score for the region of interest
-                    newSamples, maxminScore = explore.getMCSamples(X_can)
+                    newSamples, maxminScore = explore.get_mc_samples(X_can)
 
                     # select the requested number of samples
                     Xnew[i] = newSamples[np.argmax(maxminScore)]
diff --git a/src/bayesvalidrox/surrogate_models/surrogate_models.py b/src/bayesvalidrox/surrogate_models/surrogate_models.py
index cb6fd21f358157f67661bd997a5e122734d22815..c751d9b5761ae41dcf008659ff8d112a65ff1632 100644
--- a/src/bayesvalidrox/surrogate_models/surrogate_models.py
+++ b/src/bayesvalidrox/surrogate_models/surrogate_models.py
@@ -1,16 +1,6 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-"""
-Author: Farid Mohammadi, M.Sc.
-E-Mail: farid.mohammadi@iws.uni-stuttgart.de
-Department of Hydromechanics and Modelling of Hydrosystems (LH2)
-Institute for Modelling Hydraulic and Environmental Systems (IWS), University
-of Stuttgart www.iws.uni-stuttgart.de/lh2/
-Pfaffenwaldring 61
-70569 Stuttgart
-
-Created on Sat Aug 24 2019
-"""
+
 import warnings
 import numpy as np
 import math
@@ -44,65 +34,61 @@ class MetaModel:
     Meta (surrogate) model
 
     This class trains a surrogate model. It accepts an input object (input_obj)
-    containing the distribution specification of uncertain parameters and a
-    model object with instructions on how to run the computational model.
-
-
-    Experimental design::
-
-        MetaModelOpts.add_ExpDesign()
-
-    Two experimental design schemes are supported: one-shot (`normal`) and
-    adaptive sequential (`sequential`) designs.
-    For experimental design refer to ... .
+    containing the specification of the distributions for uncertain parameters
+    and a model object with instructions on how to run the computational model.
 
     Attributes
     ----------
     input_obj : obj
         Input object with the information on the model input parameters.
-    meta_model_type : string
+    meta_model_type : str
         Surrogate model types. Three surrogate model types are supported:
         polynomial chaos expansion (`PCE`), arbitrary PCE (`aPCE`) and
         Gaussian process regression (`GPE`). Default is PCE.
-    pce_reg_method : string
+    pce_reg_method : str
         PCE regression method to compute the coefficients. The following
-        regression methods are available::
-
-            1. OLS: Ordinary Least Square method
-            2. BRR: Bayesian Ridge Regression
-            3. LARS: Least angle regression
-            4. ARD: Bayesian ARD Regression
-            5. FastARD: Fast Bayesian ARD Regression
-            6. VBL: Variational Bayesian Learning
-            7. EBL: Emperical Bayesian Learning
-        Default is OLS.
+        regression methods are available:
+
+        1. OLS: Ordinary Least Square method
+        2. BRR: Bayesian Ridge Regression
+        3. LARS: Least angle regression
+        4. ARD: Bayesian ARD Regression
+        5. FastARD: Fast Bayesian ARD Regression
+        6. VBL: Variational Bayesian Learning
+        7. EBL: Emperical Bayesian Learning
+        Default is `OLS`.
     pce_deg : int or list of int
         Polynomial degree(s). If a list is given, an adaptive algorithm is used
         to find the best degree with the lowest Leave-One-Out cross-validation
-        (LOO) error (or the highest score=1-LOO). Default is 1.
+        (LOO) error (or the highest score=1-LOO). Default is `1`.
     pce_q_norm : float
         Hyperbolic (or q-norm) truncation for multi-indices of multivariate
-        polynomials. Default is 1.0.
-    dim_red_method : string
+        polynomials. Default is `1.0`.
+    dim_red_method : str
         Dimensionality reduction method for the output space. The available
-        method is based on principal component analysis (PCA). Default is 'no'.
-        Example::
-
-            # Select method
-            MetaModelOpts.dim_red_method = 'PCA'
-            # There is two ways to select number of components.
-            # A: use explainable variance threshold (between 0 and 100)
-            MetaModelOpts.var_pca_threshold = 99.999
-            # B: Direct prescription
-            MetaModelOpts.n_pca_components = 12
+        method is based on principal component analysis (PCA). The Default is
+        `'no'`. There are two ways to select number of components: use
+        percentage of the explainable variance threshold (between 0 and 100)
+        (Option A) or direct prescription of components' number (Option B):
+
+            >>> MetaModelOpts.dim_red_method = 'PCA'
+            >>> MetaModelOpts.var_pca_threshold = 99.999  # Option A
+            >>> MetaModelOpts.n_pca_components = 12 # Option B
+
     verbose : bool
-        Prints summary of the regression results. Default is False.
+        Prints summary of the regression results. Default is `False`.
 
-    Methods
+    Note
     -------
-    create_metamodel(Model):
-        Starts the training of the meta-model for the model objects containg
-         the given computational model.
+    To define the sampling methods and the training set, an experimental design
+    instance shall be defined. This can be done by:
+
+    >>> MetaModelOpts.add_ExpDesign()
+
+    Two experimental design schemes are supported: one-shot (`normal`) and
+    adaptive sequential (`sequential`) designs.
+    For experimental design refer to `ExpDesigns`.
+
     """
 
     def __init__(self, input_obj, meta_model_type='PCE', pce_reg_method='OLS',
@@ -125,12 +111,12 @@ class MetaModel:
 
         Parameters
         ----------
-        Model : object
+        Model : obj
             Model object.
 
         Returns
         -------
-        metamodel : object
+        metamodel : obj
             The meta model object.
 
         """
@@ -169,14 +155,14 @@ class MetaModel:
 
         Parameters
         ----------
-        Model : object
+        Model : obj
             Model object.
         verbose : bool, optional
             Flag for a sequential design in silent mode. The default is False.
 
         Returns
         -------
-        self: object
+        self: obj
             Meta-model object.
 
         """
@@ -289,7 +275,7 @@ class MetaModel:
 
         Returns
         -------
-        basis_indices : array (n_terms, n_params)
+        basis_indices : array of shape (n_terms, n_params)
             Multi-indices of multivariate polynomials.
 
         """
@@ -320,7 +306,7 @@ class MetaModel:
 
         Parameters
         ----------
-        Model : object
+        Model : obj
             Model object.
 
         Raises
@@ -330,7 +316,7 @@ class MetaModel:
 
         Returns
         -------
-        ED_X_tr: array (n_samples, n_params)
+        ED_X_tr: array of shape (n_samples, n_params)
             Training samples transformed by an isoprobabilistic transformation.
         ED_Y: dict
             Model simulations (target) for all outputs.
@@ -415,13 +401,12 @@ class MetaModel:
         samples : array of shape (n_samples, n_params)
             Samples.
         n_max : int, optional
-            Maximum polynomial degree. The default is None.
+            Maximum polynomial degree. The default is `None`.
 
         Returns
         -------
-        univ_basis: array of (n_samples, n_params, n_max+1)
+        univ_basis: array of shape (n_samples, n_params, n_max+1)
             All univariate regressors up to n_max.
-
         """
         # Extract information
         poly_types = self.ExpDesign.poly_types
@@ -451,7 +436,7 @@ class MetaModel:
         basis_indices : array of shape (n_terms, n_params)
             Multi-indices of multivariate polynomials.
         univ_p_val : array of (n_samples, n_params, n_max+1)
-            All univariate regressors up to n_max.
+            All univariate regressors up to `n_max`.
 
         Raises
         ------
@@ -505,19 +490,19 @@ class MetaModel:
 
         Parameters
         ----------
-        X : array-like of shape (n_samples, n_features)
+        X : array of shape (n_samples, n_features)
             Training vector, where n_samples is the number of samples and
             n_features is the number of features.
-        y : array-like of shape (n_samples,)
+        y : array of shape (n_samples,)
             Target values.
-        basis_indices : array-like of shape (n_terms, n_params)
+        basis_indices : array of shape (n_terms, n_params)
             Multi-indices of multivariate polynomials.
-        reg_method : string, optional
+        reg_method : str, optional
             DESCRIPTION. The default is None.
 
         Returns
         -------
-        returnOuts : Dict
+        return_out_dict : Dict
             Fitted estimator, spareMulti-Index, sparseX and coefficients.
 
         """
@@ -630,12 +615,12 @@ class MetaModel:
                 coeffs = np.dot(np.dot(np.linalg.pinv(X_T_X), sparse_X.T), y)
 
         # Create a dict to pass the outputs
-        returnOuts = dict()
-        returnOuts['clf_poly'] = clf_poly
-        returnOuts['spareMulti-Index'] = sparse_basis_indices
-        returnOuts['sparePsi'] = sparse_X
-        returnOuts['coeffs'] = coeffs
-        return returnOuts
+        return_out_dict = dict()
+        return_out_dict['clf_poly'] = clf_poly
+        return_out_dict['spareMulti-Index'] = sparse_basis_indices
+        return_out_dict['sparePsi'] = sparse_X
+        return_out_dict['coeffs'] = coeffs
+        return return_out_dict
 
     # --------------------------------------------------------------------------------------------------------
     def adaptive_regression(self, ED_X, ED_Y, varIdx, verbose=False):
@@ -645,9 +630,9 @@ class MetaModel:
 
         Parameters
         ----------
-        ED_X : array-like of shape (n_samples, n_params)
+        ED_X : array of shape (n_samples, n_params)
             Experimental design.
-        ED_Y : array-like of shape (n_samples,)
+        ED_Y : array of shape (n_samples,)
             Target values, i.e. simulation results for the Experimental design.
         varIdx : int
             Index of the output.
@@ -835,31 +820,33 @@ class MetaModel:
     # -------------------------------------------------------------------------
     def corr_loocv_error(self, clf, psi, coeffs, y):
         """
-        Calculates the corrected LOO error for the OLS regression on regressor
-        matrix PSI that generated the coefficients.
-        (based on Blatman, 2009 (PhD Thesis), pg. 115-116).
+        Calculates the corrected LOO error for regression on regressor
+        matrix `psi` that generated the coefficients based on [1] and [2].
 
-        This is based on the following paper:
-           ""Blatman, G., & Sudret, B. (2011). Adaptive sparse polynomial
-           chaos expansion based on least angle regression.
-           Journal of Computational Physics, 230(6), 2345-2367.""
+        [1] Blatman, G., 2009. Adaptive sparse polynomial chaos expansions for
+            uncertainty propagation and sensitivity analysis (Doctoral
+            dissertation, Clermont-Ferrand 2).
+
+        [2] Blatman, G. and Sudret, B., 2011. Adaptive sparse polynomial chaos
+            expansion based on least angle regression. Journal of computational
+            Physics, 230(6), pp.2345-2367.
 
         Parameters
         ----------
         clf : object
             Fitted estimator.
-        psi : array-like of shape (n_samples, n_features)
+        psi : array of shape (n_samples, n_features)
             The multivariate orthogonal polynomials (regressor).
         coeffs : array-like of shape (n_features,)
             Estimated cofficients.
-        y : array-like of shape (n_samples,)
+        y : array of shape (n_samples,)
             Target values.
 
         Returns
         -------
         Q_2 : float
             LOOCV Validation score (1-LOOCV erro).
-        residual : array-like of shape (n_samples,)
+        residual : array of shape (n_samples,)
             Residual values (y - predicted targets).
 
         """
@@ -945,14 +932,14 @@ class MetaModel:
 
         Parameters
         ----------
-        Output : array-like of shape (n_samples,)
+        Output : array of shape (n_samples,)
             Target values.
 
         Returns
         -------
-        pca : object
+        pca : obj
             Fitted sklearnPCA object.
-        OutputMatrix : array-like of shape (n_samples,)
+        OutputMatrix : array of shape (n_samples,)
             Transformed target values.
 
         """
@@ -996,7 +983,7 @@ class MetaModel:
         return pca, OutputMatrix
 
     # -------------------------------------------------------------------------
-    def gaussian_process_emulator(self, X, y, nugTerm=None, autoSelect=False,
+    def gaussian_process_emulator(self, X, y, nug_term=None, autoSelect=False,
                                   varIdx=None):
         """
         Fits a Gaussian Process Emulator to the target given the training
@@ -1004,11 +991,11 @@ class MetaModel:
 
         Parameters
         ----------
-        X : array-like of shape (n_samples, n_params)
+        X : array of shape (n_samples, n_params)
             Training points.
-        y : array-like of shape (n_samples,)
+        y : array of shape (n_samples,)
             Target values.
-        nugTerm : float, optional
+        nug_term : float, optional
             Nugget term. The default is None, i.e. variance of y.
         autoSelect : bool, optional
             Loop over some kernels and select the best. The default is False.
@@ -1022,15 +1009,15 @@ class MetaModel:
 
         """
 
-        nugTerm = nugTerm if nugTerm else np.var(y)
+        nug_term = nug_term if nug_term else np.var(y)
 
-        Kernels = [nugTerm * kernels.RBF(length_scale=1.0,
-                                         length_scale_bounds=(1e-25, 1e15)),
-                   nugTerm * kernels.RationalQuadratic(length_scale=0.2,
-                                                       alpha=1.0),
-                   nugTerm * kernels.Matern(length_scale=1.0,
-                                            length_scale_bounds=(1e-15, 1e5),
-                                            nu=1.5)]
+        Kernels = [nug_term * kernels.RBF(length_scale=1.0,
+                                          length_scale_bounds=(1e-25, 1e15)),
+                   nug_term * kernels.RationalQuadratic(length_scale=0.2,
+                                                        alpha=1.0),
+                   nug_term * kernels.Matern(length_scale=1.0,
+                                             length_scale_bounds=(1e-15, 1e5),
+                                             nu=1.5)]
 
         # Automatic selection of the kernel
         if autoSelect:
@@ -1080,7 +1067,7 @@ class MetaModel:
         nsamples : int, optional
             Number of samples to generate, if no `samples` is provided. The
             default is None.
-        sampling_method : string, optional
+        sampling_method : str, optional
             Type of sampling, if no `samples` is provided. The default is
             'random'.
         return_samples : bool, optional
@@ -1088,9 +1075,10 @@ class MetaModel:
 
         Returns
         -------
-        TYPE
-            DESCRIPTION.
-
+        mean_pred : dict
+            Mean of the predictions.
+        std_pred : dict
+            Standard deviatioon of the predictions.
         """
         if self.meta_model_type.lower() == 'gpe':
             model_dict = self.gp_poly
@@ -1174,13 +1162,13 @@ class MetaModel:
 
         Parameters
         ----------
-        X : array-like of shape (n_outputs, n_inputs)
+        X : array of shape (n_outputs, n_inputs)
             Input array. It can contain any forcing inputs or coordinates of
              extracted data.
-        y : array-like of shape (n_outputs,)
+        y : array of shape (n_outputs,)
             The model response for the MAP parameter set.
-        name : string, optional
-            Calibration or validation. The default is 'Calib'.
+        name : str, optional
+            Calibration or validation. The default is `'Calib'`.
 
         Returns
         -------
@@ -1267,7 +1255,11 @@ class MetaModel:
 
     # -------------------------------------------------------------------------
     class auto_vivification(dict):
-        """Implementation of perl's AutoVivification feature."""
+        """
+        Implementation of perl's AutoVivification feature.
+
+        Source: https://stackoverflow.com/a/651879/18082457
+        """
 
         def __getitem__(self, item):
             try:
diff --git a/tests/analytical_function/test_analytical_function.py b/tests/analytical_function/test_analytical_function.py
index e857d5f9f63d041c01921e226845a18b52e95551..c832c4f0483a49beb3ee0430a291932e7a693b93 100755
--- a/tests/analytical_function/test_analytical_function.py
+++ b/tests/analytical_function/test_analytical_function.py
@@ -132,7 +132,7 @@ if __name__ == "__main__":
 
     # One-shot (normal) or Sequential Adaptive (sequential) Design
     MetaModelOpts.ExpDesign.method = 'normal'
-    MetaModelOpts.ExpDesign.n_init_samples = 50  # 5*ndim
+    MetaModelOpts.ExpDesign.n_init_samples = 75  # 5*ndim
 
     # Sampling methods
     # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley 6) korobov
@@ -147,7 +147,7 @@ if __name__ == "__main__":
     # ------------------------------------------------
     # Set the sampling parameters
     MetaModelOpts.ExpDesign.n_new_samples = 1
-    MetaModelOpts.ExpDesign.n_max_samples = 15  # 150
+    MetaModelOpts.ExpDesign.n_max_samples = 25  # 150
     MetaModelOpts.ExpDesign.mod_LOO_threshold = 1e-16
 
     MetaModelOpts.adapt_verbose = True
@@ -156,7 +156,7 @@ if __name__ == "__main__":
     # MetaModelOpts.ExpDesign.n_replication = 20
     # -------- Exploration ------
     # 1)'Voronoi' 2)'random' 3)'latin_hypercube' 4)'LOOCV' 5)'dual annealing'
-    MetaModelOpts.ExpDesign.explore_method = 'latin_hypercube'
+    MetaModelOpts.ExpDesign.explore_method = 'Voronoi'
 
     # Use when 'dual annealing' chosen
     MetaModelOpts.ExpDesign.max_func_itr = 200
@@ -176,7 +176,7 @@ if __name__ == "__main__":
     # MetaModelOpts.ExpDesign.util_func = 'DKL'
 
     # VarBasedOptDesign -> when data is not available
-    # Only with Vornoi >>> 1)Entropy 2)EIGF, 3)LOOCV
+    # 1)Entropy 2)EIGF, 3)LOOCV
     # or a combination as a list
     MetaModelOpts.ExpDesign.util_func = 'Entropy'
 
diff --git a/tests/beam/test_beam.py b/tests/beam/test_beam.py
index a1f023610d40f38870cd1747d343bf3416d0c3d0..e1d1ababc8da4b3bd6d96e14dbc9e82c48259549 100644
--- a/tests/beam/test_beam.py
+++ b/tests/beam/test_beam.py
@@ -131,10 +131,10 @@ if __name__ == "__main__":
     # Sampling methods
     # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley
     # 6) chebyshev(FT) 7) korobov 8) grid(FT) 9) nested_grid(FT) 10)user
-    MetaModelOpts.ExpDesign.sampling_method = 'user'
+    MetaModelOpts.ExpDesign.sampling_method = 'latin_hypercube'
 
     # Provide the experimental design object with a hdf5 file
-    MetaModelOpts.ExpDesign.hdf5_file = 'ExpDesign_Beam9points.hdf5'
+    # MetaModelOpts.ExpDesign.hdf5_file = 'ExpDesign_Beam9points.hdf5'
 
     # ------ Train the surrogate model ------
     PCEModel = MetaModelOpts.create_metamodel(Model)