diff --git a/.coverage b/.coverage
new file mode 100644
index 0000000000000000000000000000000000000000..7bd62455cae2ec79af9cbc60f552afe43fca17d4
Binary files /dev/null and b/.coverage differ
diff --git a/.coverage.DESKTOP-ATMEKSV.24388.XANLHOVx b/.coverage.DESKTOP-ATMEKSV.24388.XANLHOVx
new file mode 100644
index 0000000000000000000000000000000000000000..e1f7e6f75f5aa9a7a33884193b67b7cc22082200
Binary files /dev/null and b/.coverage.DESKTOP-ATMEKSV.24388.XANLHOVx differ
diff --git a/.coverage.DESKTOP-ATMEKSV.29708.XilfwCcx b/.coverage.DESKTOP-ATMEKSV.29708.XilfwCcx
new file mode 100644
index 0000000000000000000000000000000000000000..c54e53aa1aa202b1377d33fea299950cda87d2bb
Binary files /dev/null and b/.coverage.DESKTOP-ATMEKSV.29708.XilfwCcx differ
diff --git a/.coverage.DESKTOP-ATMEKSV.33796.XdyCYWmx b/.coverage.DESKTOP-ATMEKSV.33796.XdyCYWmx
new file mode 100644
index 0000000000000000000000000000000000000000..81e53f831236a6e47a886ccb6403982ccf33b461
Binary files /dev/null and b/.coverage.DESKTOP-ATMEKSV.33796.XdyCYWmx differ
diff --git a/.coverage.DESKTOP-ATMEKSV.43284.XuuJaTEx b/.coverage.DESKTOP-ATMEKSV.43284.XuuJaTEx
new file mode 100644
index 0000000000000000000000000000000000000000..f723535d998644947972f91fa6e1bdbfa96b41ff
Binary files /dev/null and b/.coverage.DESKTOP-ATMEKSV.43284.XuuJaTEx differ
diff --git a/.coverage.DESKTOP-ATMEKSV.49024.XoHLhHmx b/.coverage.DESKTOP-ATMEKSV.49024.XoHLhHmx
new file mode 100644
index 0000000000000000000000000000000000000000..8c3bd15ceb242ea9c40c1b4be42b077d3d2db13b
Binary files /dev/null and b/.coverage.DESKTOP-ATMEKSV.49024.XoHLhHmx differ
diff --git a/.coverage.DESKTOP-ATMEKSV.50700.XPPOhAcx b/.coverage.DESKTOP-ATMEKSV.50700.XPPOhAcx
new file mode 100644
index 0000000000000000000000000000000000000000..fad3080e3e429bf14717ae3d448a946735a7c872
Binary files /dev/null and b/.coverage.DESKTOP-ATMEKSV.50700.XPPOhAcx differ
diff --git a/.coverage.DESKTOP-ATMEKSV.51388.XJOycQex b/.coverage.DESKTOP-ATMEKSV.51388.XJOycQex
new file mode 100644
index 0000000000000000000000000000000000000000..e1f7e6f75f5aa9a7a33884193b67b7cc22082200
Binary files /dev/null and b/.coverage.DESKTOP-ATMEKSV.51388.XJOycQex differ
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..13566b81b018ad684f3a35fee301741b2734c8f4
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/bayesvalidrox.iml b/.idea/bayesvalidrox.iml
new file mode 100644
index 0000000000000000000000000000000000000000..fab03b6eca7d238814b681f1f06637c845bcc3f4
--- /dev/null
+++ b/.idea/bayesvalidrox.iml
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="PYTHON_MODULE" version="4">
+  <component name="NewModuleRootManager">
+    <content url="file://$MODULE_DIR$">
+      <sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
+      <sourceFolder url="file://$MODULE_DIR$/tests" isTestSource="true" />
+    </content>
+    <orderEntry type="inheritedJdk" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+  <component name="PyDocumentationSettings">
+    <option name="format" value="NUMPY" />
+    <option name="myDocStringFormat" value="NumPy" />
+  </component>
+  <component name="TestRunnerService">
+    <option name="PROJECT_TEST_RUNNER" value="py.test" />
+  </component>
+</module>
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+<component name="InspectionProjectProfileManager">
+  <settings>
+    <option name="USE_PROJECT_PROFILE" value="false" />
+    <version value="1.0" />
+  </settings>
+</component>
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000000000000000000000000000000000000..a6218fed0aeb0cbb03b46a9064efeeda11861bf6
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="Black">
+    <option name="sdkName" value="Python 3.11" />
+  </component>
+  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11" project-jdk-type="Python SDK" />
+</project>
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000000000000000000000000000000000000..482c611f35883d7c1998edbfac68f5b5b74300db
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/bayesvalidrox.iml" filepath="$PROJECT_DIR$/.idea/bayesvalidrox.iml" />
+    </modules>
+  </component>
+</project>
\ No newline at end of file
diff --git a/.idea/other.xml b/.idea/other.xml
new file mode 100644
index 0000000000000000000000000000000000000000..2e75c2e2af6fe9ab3294dedf305ebb894bb324d7
--- /dev/null
+++ b/.idea/other.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="PySciProjectComponent">
+    <option name="PY_INTERACTIVE_PLOTS_SUGGESTED" value="true" />
+  </component>
+</project>
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000000000000000000000000000000000000..35eb1ddfbbc029bcab630581847471d7f238ec53
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="VcsDirectoryMappings">
+    <mapping directory="" vcs="Git" />
+  </component>
+</project>
\ No newline at end of file
diff --git a/docs/diagrams/.$Structure_BayesInf.drawio.bkp b/docs/diagrams/.$Structure_BayesInf.drawio.bkp
new file mode 100644
index 0000000000000000000000000000000000000000..3ba4ed26362c9fa2ea955f14d7ad3f05a15ab00a
--- /dev/null
+++ b/docs/diagrams/.$Structure_BayesInf.drawio.bkp
@@ -0,0 +1,908 @@
+<mxfile host="Electron" modified="2024-04-19T15:13:43.060Z" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/22.1.11 Chrome/114.0.5735.289 Electron/25.9.8 Safari/537.36" etag="DE_l5njUGrsyMG_jufaX" version="22.1.11" type="device" pages="3">
+  <diagram name="Class and function structure" id="efOe0Jku58RX-i1bv-3b">
+    <mxGraphModel dx="2718" dy="686" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-22" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;MCMC&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1270" y="360" width="770" height="380" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-1" value="_kernel_rbf" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1020" y="200" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-2" value="_logpdf" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="820" y="140" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-10" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;BayesInf&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-120" y="290" width="1310" height="680" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-24" value="if self.bootstrap &lt;br&gt;or self.bayes_loocv &lt;br&gt;or self.just_analysis" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;labelBackgroundColor=#ffae00;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-13">
+          <mxGeometry x="0.2902" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-31" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-18">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-42" value="if self.name != &#39;valid&#39;&lt;br&gt;and self.inference_method != &#39;rejection&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=default;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-31">
+          <mxGeometry x="0.5646" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-32" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="HiMKSJFquRK0mIlwyRFI-5">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-43" value="if self.inference_method == &#39;mcmc&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-32">
+          <mxGeometry x="-0.0958" y="-1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-33" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-19">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-52" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#C2C2C2;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-33">
+          <mxGeometry x="-0.112" y="1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-47" value="if self.plot_post_pred" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-34">
+          <mxGeometry x="0.2399" y="-1" relative="1" as="geometry">
+            <mxPoint y="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-35" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-20">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-46" value="if self.plot_map_pred" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-35">
+          <mxGeometry x="0.4183" y="-1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-54" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-55" value="if self.bootstrap" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-54">
+          <mxGeometry x="0.1816" y="3" relative="1" as="geometry">
+            <mxPoint x="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-57" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-56">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-58" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-57">
+          <mxGeometry x="0.7182" y="2" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-60" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-59">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-61" value="if self.error_model&lt;br&gt;and self.name == &#39;calib&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-60">
+          <mxGeometry x="0.3024" y="2" relative="1" as="geometry">
+            <mxPoint x="67" y="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-54" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="HiMKSJFquRK0mIlwyRFI-51">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-55" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-54">
+          <mxGeometry x="0.8253" y="3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-9" value="create_inference" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="405" y="539" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-25" value="if len(self.perturbed_data) == 0" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-14">
+          <mxGeometry x="0.3402" relative="1" as="geometry">
+            <mxPoint y="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-27" value="if not self.emulator" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-15">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-29" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-16">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-44" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#cdcbcb;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-29">
+          <mxGeometry x="0.4722" y="1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-30" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-17">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-41" value="if self.emulator" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-30">
+          <mxGeometry x="0.6143" y="-3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-62" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-59">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="340" y="680" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-63" value="if self.error_model&lt;br&gt;and self.name == &#39;valid&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=default;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-62">
+          <mxGeometry x="-0.3906" relative="1" as="geometry">
+            <mxPoint y="121" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-13" value="perform_bootstrap" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="50" y="335" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-14" value="_perturb_data" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-75" y="460" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-15" value="_eval_model" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1050" y="660" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-16" target="xary-zVek9Bg-A1b1ZmA-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-49" value="if hasattr bias_inputs&amp;nbsp;&lt;br&gt;and not hasattr error_model" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#ffae00;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-38">
+          <mxGeometry x="0.3126" y="-3" relative="1" as="geometry">
+            <mxPoint x="-103" y="31" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-39" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-16" target="xary-zVek9Bg-A1b1ZmA-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-16" value="normpdf" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="650" y="455" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-40" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-17" target="xary-zVek9Bg-A1b1ZmA-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-50" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#cdcbcb;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-40">
+          <mxGeometry x="-0.6073" y="-5" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-17" value="_corr_factor_BME" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="650" y="385" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-18" value="_rejection_sampling" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="280" y="890" width="120" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-26" value="if not self.emulator&amp;nbsp;&lt;br&gt;and not self.inference_method == &#39;rejection&#39;&amp;nbsp;&lt;br&gt;and self.name == &#39;calib" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-19" target="xary-zVek9Bg-A1b1ZmA-15">
+          <mxGeometry x="-0.0559" y="15" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-19" target="xary-zVek9Bg-A1b1ZmA-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-48" value="if sigma2_prior is not None&lt;br&gt;and if hasattr bias_inputs&lt;br&gt;and if not hasattr error_model" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#ffae00;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-37">
+          <mxGeometry x="-0.5544" y="-1" relative="1" as="geometry">
+            <mxPoint x="1" y="-5" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-19" value="_posterior_predictive" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="690" y="589" width="130" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-28" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-20" target="xary-zVek9Bg-A1b1ZmA-15">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-45" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#cdcbcb;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-28">
+          <mxGeometry x="0.0517" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-20" value="_plot_max_a_posteriori" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="495" y="790" width="140" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-21" value="plot_post_predictive" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="630" y="720" width="120" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-36" value="Note: Arrows indicate function calls, beginning calls the end" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
+          <mxGeometry x="10" y="10" width="190" height="30" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-51" value="Color meanings:&lt;br&gt;&lt;span style=&quot;white-space: pre;&quot;&gt;&#x9;&lt;/span&gt;red: wrong, change&lt;br&gt;&lt;span style=&quot;white-space: pre;&quot;&gt;&#x9;&lt;/span&gt;orange: seems off, look at again&lt;br&gt;&lt;span style=&quot;white-space: pre;&quot;&gt;&#x9;&lt;/span&gt;light beige: has been removed" style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
+          <mxGeometry x="20" y="70" width="220" height="30" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-53" value="plot_log_BME" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="150" y="820" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-56" value="plot_post_params" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="660" y="840" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-59" value="create_error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="45" y="740" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-1" value="_check_ranges" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1595" y="280" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-2" value="gelman_rubin" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1350" y="250" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-3" value="_iterative_scheme" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="2055" y="620" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-21" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-4" target="HiMKSJFquRK0mIlwyRFI-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-24" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-4" target="HiMKSJFquRK0mIlwyRFI-11">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-4" value="_my_ESS" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="1350" y="100" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-14" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-8">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-19" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-10">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-22" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-53" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-52">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-56" value="if opts_sigma != &#39;B&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-53">
+          <mxGeometry x="0.7377" y="1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-5" value="run_sampler" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="1350" y="534" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-20" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-6" target="HiMKSJFquRK0mIlwyRFI-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-6" value="log_prior" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1595" y="510" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-15" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-7" target="HiMKSJFquRK0mIlwyRFI-9">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-16" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-15">
+          <mxGeometry x="0.0246" y="2" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-7" value="log_likelihood" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1760" y="539" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-12" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-8" target="HiMKSJFquRK0mIlwyRFI-6">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-17" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-12">
+          <mxGeometry x="0.4587" y="4" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-13" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-8" target="HiMKSJFquRK0mIlwyRFI-7">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-18" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-13">
+          <mxGeometry x="0.6826" y="4" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-8" value="log_posterior" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1480" y="610" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-9" value="eval_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1760" y="400" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-10" value="train_error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1450" y="420" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-23" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-11" target="HiMKSJFquRK0mIlwyRFI-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-11" value="marginal_llk_emcee" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="1870" y="620" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-25" value="Never used!" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="1880" y="680" width="100" height="30" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-26" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;BayesModelComp&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-1060" y="380" width="840" height="420" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-9" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-27" target="HC1H8j6nMwEtLoyIrXXk-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-13" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.75;entryDx=0;entryDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-27" target="HC1H8j6nMwEtLoyIrXXk-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-14" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-27" target="HiMKSJFquRK0mIlwyRFI-31">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-27" value="model_comparison_all" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="-860" y="566" width="160" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-42" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="xary-zVek9Bg-A1b1ZmA-9">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="-630" y="564" as="sourcePoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-47" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-42">
+          <mxGeometry x="-0.4883" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-29" target="HiMKSJFquRK0mIlwyRFI-30">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-49" value="if perturbed_data is None" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-37">
+          <mxGeometry x="-0.0507" y="4" relative="1" as="geometry">
+            <mxPoint x="-1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-29" value="generate_dataset" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-510" y="566" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-30" value="_perturb_data" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-340" y="636" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-6" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-31" target="HC1H8j6nMwEtLoyIrXXk-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-31" target="HiMKSJFquRK0mIlwyRFI-33">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-11" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-31" target="HC1H8j6nMwEtLoyIrXXk-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-31" value="cal_model_weight" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-835" y="466" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-32" value="plot_just_analysis" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-835" y="736" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-33" value="plot_model_weights" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-980" y="416" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-34" value="plot_bayes_factor" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-410" y="431" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-51" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;Discrepancy&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="360" y="1039.82" width="200" height="130" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-52" value="get_sample" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="400" y="1079.82" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-5" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.25;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="HiMKSJFquRK0mIlwyRFI-34">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-20" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="HC1H8j6nMwEtLoyIrXXk-17">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-21" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.75;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="HiMKSJFquRK0mIlwyRFI-29">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-1" value="calc_bayes_factors" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-630" y="466" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-2" value="calc_model_weights" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-1030" y="566" width="130" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-4" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HiMKSJFquRK0mIlwyRFI-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-12" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HC1H8j6nMwEtLoyIrXXk-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-16" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.75;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="xary-zVek9Bg-A1b1ZmA-9">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-23" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HC1H8j6nMwEtLoyIrXXk-16">
+          <mxGeometry x="-0.5478" y="3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-18" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.25;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HC1H8j6nMwEtLoyIrXXk-17">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-22" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HiMKSJFquRK0mIlwyRFI-29">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-3" value="calc_justifiability_analysis" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-860" y="666" width="160" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-17" value="setup" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-630" y="566" width="110" height="50" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+  <diagram id="sQf09xvhinkT827TE7Va" name="Function structure Engine">
+    <mxGraphModel dx="1436" dy="968" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-1" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;Engine&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="130" y="140" width="1390" height="690" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-2" value="hellinger_distance" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1340" y="50" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-3" value="logpdf" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1050" y="50" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-4" value="subdomain" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="625" y="50" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-5" value="start_engine" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="250" y="680" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-32" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-6" target="JXjM7l_erEiZMkSmYBvl-5">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-6" value="train_normal" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="170" y="420" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-7" target="JXjM7l_erEiZMkSmYBvl-9">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="335" y="335" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-33" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-7" target="JXjM7l_erEiZMkSmYBvl-6">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-7" value="train_sequential" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="170" y="310" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-8" value="eval_metamodel" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="190" y="210" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-7" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-18">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-19" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-23">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-20" value="if len(obs_data) != 0" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-19">
+          <mxGeometry x="0.8137" relative="1" as="geometry">
+            <mxPoint x="-57" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-21" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.25;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-24">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-22" value="if len(obs_data) != 0" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-21">
+          <mxGeometry x="0.7684" y="3" relative="1" as="geometry">
+            <mxPoint x="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-23" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-25">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-24" value="if expdes.valid_model_runs" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-23">
+          <mxGeometry x="0.606" y="3" relative="1" as="geometry">
+            <mxPoint x="-16" y="3" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-25" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-26">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-26" value="if mc_ref and pce" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-25">
+          <mxGeometry x="0.7094" y="-3" relative="1" as="geometry">
+            <mxPoint x="-31" y="-3" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-9" value="train_seq_design" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="315" y="310" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-12" value="util_VarBasedDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="670" y="648" width="130" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-28" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-13" target="JXjM7l_erEiZMkSmYBvl-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-31" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-13" target="JXjM7l_erEiZMkSmYBvl-5">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-15" target="JXjM7l_erEiZMkSmYBvl-13">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-39" value="if method == &#39;bayesactdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=default;" vertex="1" connectable="0" parent="JXjM7l_erEiZMkSmYBvl-38">
+          <mxGeometry x="-0.6235" y="2" relative="1" as="geometry">
+            <mxPoint x="289" y="2" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-12" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-13" target="JXjM7l_erEiZMkSmYBvl-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-15" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-12">
+          <mxGeometry x="0.7865" y="4" relative="1" as="geometry">
+            <mxPoint x="-91" y="185" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-13" value="util_BayesianActiveDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1020" y="680" width="150" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-14" target="JXjM7l_erEiZMkSmYBvl-6">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-13" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-14" target="JXjM7l_erEiZMkSmYBvl-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-16" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-13">
+          <mxGeometry x="0.197" y="-3" relative="1" as="geometry">
+            <mxPoint x="-1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-14" value="utilBayesianDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="880" y="730" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-15" target="JXjM7l_erEiZMkSmYBvl-12">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-42" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-15" target="JXjM7l_erEiZMkSmYBvl-14">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-43" value="if method == &#39;bayesoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="JXjM7l_erEiZMkSmYBvl-42">
+          <mxGeometry x="0.6143" y="-3" relative="1" as="geometry">
+            <mxPoint x="3" y="29" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-15" value="run_util_func" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="660" y="450" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-36" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-16" target="JXjM7l_erEiZMkSmYBvl-12">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-41" value="if method == &#39;varoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="JXjM7l_erEiZMkSmYBvl-36">
+          <mxGeometry x="-0.5992" relative="1" as="geometry">
+            <mxPoint x="-197" y="62" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-44" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-16" target="JXjM7l_erEiZMkSmYBvl-13">
+          <mxGeometry relative="1" as="geometry">
+            <Array as="points">
+              <mxPoint x="965" y="590" />
+              <mxPoint x="1095" y="590" />
+            </Array>
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-27" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-16" target="JXjM7l_erEiZMkSmYBvl-14">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-16" value="dual_annealing" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="910" y="450" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-5" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-17" target="JXjM7l_erEiZMkSmYBvl-18">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-6" value="if exploit _method is &#39;bayesoptdesign&#39;,&lt;br style=&quot;border-color: var(--border-color);&quot;&gt;&#39;bayesactdesign&#39; or &#39;varoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-5">
+          <mxGeometry x="0.1312" y="2" relative="1" as="geometry">
+            <mxPoint x="17" y="-2" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-17" value="tradeoff_weights" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="980" y="210" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-30" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-4">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-1" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-15">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="790" y="280.0000000000002" as="sourcePoint" />
+            <mxPoint x="690" y="499.9999999999998" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-2" value="if exploit _method is &#39;bayesoptdesign&#39;,&lt;br&gt;&#39;bayesactdesign&#39; or &#39;varoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-1">
+          <mxGeometry x="0.1579" relative="1" as="geometry">
+            <mxPoint x="-15" y="49" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-3" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-16">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="680" y="205.05882352941194" as="sourcePoint" />
+            <mxPoint x="805" y="779.9999999999998" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-4" value="if explore_method == &#39;dual annealing&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-3">
+          <mxGeometry x="-0.6061" relative="1" as="geometry">
+            <mxPoint x="270" y="46" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-9" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-20">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-10" value="if exploit_method == &#39;alphabetic&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-9">
+          <mxGeometry x="0.8144" y="1" relative="1" as="geometry">
+            <mxPoint x="74" y="-1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-18" value="choose_next_sample" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="610" y="210" width="140" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-20" value="util_AlphOptDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="330" y="210" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-21" value="_normpdf" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1340" y="430" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-29" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-22" target="JXjM7l_erEiZMkSmYBvl-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-22" value="_corr_factor_BME" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1130" y="220" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-23" value="_posteriorPlot" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="520" y="440" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-27" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-24" target="JXjM7l_erEiZMkSmYBvl-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-11" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-24" target="JXjM7l_erEiZMkSmYBvl-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-14" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-11">
+          <mxGeometry x="0.0929" y="-1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-17" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-24" target="JXjM7l_erEiZMkSmYBvl-22">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-18" value="commented out?" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-17">
+          <mxGeometry x="-0.1477" y="3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-24" value="_BME_Calculator" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1340" y="220" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-25" value="_validError" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="520" y="510" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-26" value="_error_Mean_Std" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="520" y="580" width="110" height="50" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+  <diagram id="ME5gyYpVqUByTnAIOcMV" name="Parameter and function interaction">
+    <mxGraphModel dx="2049" dy="1366" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-33" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-1" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-54" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-1" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-61" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-1" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-1" value="engine" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="160" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-3" value="Discrepancy" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="240" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-71" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-4" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-4" value="emulator" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="320" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-5" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-57" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-5" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-65" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-5" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-5" value="name" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="400" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-47" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-6" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-6" value="bootstrap" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="480" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-7" value="req_outputs" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="560" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-79" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-8" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-8" value="selected_indices" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="640" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-35" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-9" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-55" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-9" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-67" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-9" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-9" value="prior_samples" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="720" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-36" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-11" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-68" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-11" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-11" value="n_prior_samples" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="800" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-12" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-80" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-12" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-12" value="measured_data" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="880" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-58" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-13" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-13" value="inference_method" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="960" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-14" value="mcmc_params" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1040" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-63" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-15" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-15" value="perturbed_data" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1120" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-45" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-16" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-77" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-16" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-16" value="bayes_loocv" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1200" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-64" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-17" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-17" value="n_bootstrap_itrs" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1280" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-18" value="bootstrap_noise" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1360" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-46" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-19" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-78" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-19" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-19" value="just_analysis" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1440" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-20" value="valid_metrics" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1520" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-52" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-21" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-21" value="plot_post_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1600" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-51" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-22" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-22" value="plot_map_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1680" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-23" value="max_a_posteriori" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1760" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-24" value="corner_title_fmt" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1840" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-25" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-25" value="out_dir" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1920" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-50" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-26" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-66" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-26" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-26" value="error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2000" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-56" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-27" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-72" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-27" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-27" value="bias_inputs" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2080" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-41" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-28" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-28" value="measurement_error" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2160" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-44" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-29" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-81" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-29" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-29" value="sigma2s" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2240" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-30" value="log_likes" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2320" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-82" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-31" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-31" value="dtype" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2400" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-32" value="create_inference" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="400" y="20" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-40" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-39" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-39" value="n_tot_measurement" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2480" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-43" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-42" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-42" value="Discrepancy" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2560" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-49" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-48" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-59" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-48" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-48" value="posterior_df" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2640" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-53" value="create_error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="560" y="20" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-60" value="perform_bootstrap" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="720" y="20" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-75" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-69" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-69" value="__mean_pce_prior_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2720" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-76" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-70" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-70" value="_std_pce_prior_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2800" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-74" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-73" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-73" value="__model_prior_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2880" width="120" height="60" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+</mxfile>
diff --git a/docs/diagrams/.$Structure_BayesInf.drawio.dtmp b/docs/diagrams/.$Structure_BayesInf.drawio.dtmp
new file mode 100644
index 0000000000000000000000000000000000000000..14663ecb0086f14c8b80083b9ecf9a1df87e7ad5
--- /dev/null
+++ b/docs/diagrams/.$Structure_BayesInf.drawio.dtmp
@@ -0,0 +1,964 @@
+<mxfile host="Electron" modified="2024-04-19T16:08:46.718Z" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/22.1.11 Chrome/114.0.5735.289 Electron/25.9.8 Safari/537.36" etag="QkifmTMxwBp7UqUSeBiS" version="22.1.11" type="device" pages="4">
+  <diagram name="Class and function structure" id="efOe0Jku58RX-i1bv-3b">
+    <mxGraphModel dx="3735" dy="1372" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-22" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;MCMC&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1270" y="360" width="770" height="380" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-1" value="_kernel_rbf" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1020" y="200" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-2" value="_logpdf" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="820" y="140" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-10" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;BayesInf&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-120" y="290" width="1310" height="680" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-24" value="if self.bootstrap &lt;br&gt;or self.bayes_loocv &lt;br&gt;or self.just_analysis" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;labelBackgroundColor=#ffae00;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-13">
+          <mxGeometry x="0.2902" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-31" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-18">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-42" value="if self.name != &#39;valid&#39;&lt;br&gt;and self.inference_method != &#39;rejection&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=default;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-31">
+          <mxGeometry x="0.5646" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-32" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="HiMKSJFquRK0mIlwyRFI-5">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-43" value="if self.inference_method == &#39;mcmc&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-32">
+          <mxGeometry x="-0.0958" y="-1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-33" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-19">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-52" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#C2C2C2;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-33">
+          <mxGeometry x="-0.112" y="1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-47" value="if self.plot_post_pred" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-34">
+          <mxGeometry x="0.2399" y="-1" relative="1" as="geometry">
+            <mxPoint y="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-35" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-20">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-46" value="if self.plot_map_pred" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-35">
+          <mxGeometry x="0.4183" y="-1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-54" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-55" value="if self.bootstrap" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-54">
+          <mxGeometry x="0.1816" y="3" relative="1" as="geometry">
+            <mxPoint x="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-57" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-56">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-58" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-57">
+          <mxGeometry x="0.7182" y="2" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-60" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-59">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-61" value="if self.error_model&lt;br&gt;and self.name == &#39;calib&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-60">
+          <mxGeometry x="0.3024" y="2" relative="1" as="geometry">
+            <mxPoint x="67" y="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-54" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="HiMKSJFquRK0mIlwyRFI-51">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-55" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-54">
+          <mxGeometry x="0.8253" y="3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-9" value="create_inference" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="405" y="539" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-25" value="if len(self.perturbed_data) == 0" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-14">
+          <mxGeometry x="0.3402" relative="1" as="geometry">
+            <mxPoint y="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-27" value="if not self.emulator" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-15">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-29" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-16">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-44" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#cdcbcb;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-29">
+          <mxGeometry x="0.4722" y="1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-30" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-17">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-41" value="if self.emulator" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-30">
+          <mxGeometry x="0.6143" y="-3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-62" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-59">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="340" y="680" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-63" value="if self.error_model&lt;br&gt;and self.name == &#39;valid&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=default;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-62">
+          <mxGeometry x="-0.3906" relative="1" as="geometry">
+            <mxPoint y="121" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-13" value="perform_bootstrap" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="50" y="335" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-14" value="_perturb_data" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-75" y="460" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-15" value="_eval_model" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1050" y="660" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-16" target="xary-zVek9Bg-A1b1ZmA-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-49" value="if hasattr bias_inputs&amp;nbsp;&lt;br&gt;and not hasattr error_model" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#ffae00;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-38">
+          <mxGeometry x="0.3126" y="-3" relative="1" as="geometry">
+            <mxPoint x="-103" y="31" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-39" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-16" target="xary-zVek9Bg-A1b1ZmA-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-16" value="normpdf" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="650" y="455" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-40" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-17" target="xary-zVek9Bg-A1b1ZmA-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-50" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#cdcbcb;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-40">
+          <mxGeometry x="-0.6073" y="-5" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-17" value="_corr_factor_BME" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="650" y="385" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-18" value="_rejection_sampling" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="280" y="890" width="120" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-26" value="if not self.emulator&amp;nbsp;&lt;br&gt;and not self.inference_method == &#39;rejection&#39;&amp;nbsp;&lt;br&gt;and self.name == &#39;calib" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-19" target="xary-zVek9Bg-A1b1ZmA-15">
+          <mxGeometry x="-0.0559" y="15" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-19" target="xary-zVek9Bg-A1b1ZmA-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-48" value="if sigma2_prior is not None&lt;br&gt;and if hasattr bias_inputs&lt;br&gt;and if not hasattr error_model" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#ffae00;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-37">
+          <mxGeometry x="-0.5544" y="-1" relative="1" as="geometry">
+            <mxPoint x="1" y="-5" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-19" value="_posterior_predictive" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="690" y="589" width="130" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-28" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-20" target="xary-zVek9Bg-A1b1ZmA-15">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-45" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#cdcbcb;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-28">
+          <mxGeometry x="0.0517" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-20" value="_plot_max_a_posteriori" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="495" y="790" width="140" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-21" value="plot_post_predictive" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="630" y="720" width="120" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-36" value="Note: Arrows indicate function calls, beginning calls the end" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
+          <mxGeometry x="10" y="10" width="190" height="30" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-51" value="Color meanings:&lt;br&gt;&lt;span style=&quot;white-space: pre;&quot;&gt;&#x9;&lt;/span&gt;red: wrong, change&lt;br&gt;&lt;span style=&quot;white-space: pre;&quot;&gt;&#x9;&lt;/span&gt;orange: seems off, look at again&lt;br&gt;&lt;span style=&quot;white-space: pre;&quot;&gt;&#x9;&lt;/span&gt;light beige: has been removed" style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
+          <mxGeometry x="20" y="70" width="220" height="30" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-53" value="plot_log_BME" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="150" y="820" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-56" value="plot_post_params" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="660" y="840" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-59" value="create_error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="45" y="740" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-1" value="_check_ranges" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1595" y="280" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-2" value="gelman_rubin" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1350" y="250" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-3" value="_iterative_scheme" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="2055" y="620" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-21" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-4" target="HiMKSJFquRK0mIlwyRFI-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-24" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-4" target="HiMKSJFquRK0mIlwyRFI-11">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-4" value="_my_ESS" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="1350" y="100" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-14" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-8">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-19" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-10">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-22" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-53" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-52">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-56" value="if opts_sigma != &#39;B&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-53">
+          <mxGeometry x="0.7377" y="1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-5" value="run_sampler" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="1350" y="534" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-20" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-6" target="HiMKSJFquRK0mIlwyRFI-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-6" value="log_prior" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1595" y="510" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-15" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-7" target="HiMKSJFquRK0mIlwyRFI-9">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-16" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-15">
+          <mxGeometry x="0.0246" y="2" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-7" value="log_likelihood" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1760" y="539" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-12" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-8" target="HiMKSJFquRK0mIlwyRFI-6">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-17" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-12">
+          <mxGeometry x="0.4587" y="4" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-13" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-8" target="HiMKSJFquRK0mIlwyRFI-7">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-18" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-13">
+          <mxGeometry x="0.6826" y="4" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-8" value="log_posterior" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1480" y="610" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-9" value="eval_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1760" y="400" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-10" value="train_error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1450" y="420" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-23" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-11" target="HiMKSJFquRK0mIlwyRFI-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-11" value="marginal_llk_emcee" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="1870" y="620" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-25" value="Never used!" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="1880" y="680" width="100" height="30" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-26" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;BayesModelComp&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-1096" y="380" width="840" height="420" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-9" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-27" target="HC1H8j6nMwEtLoyIrXXk-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-13" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.75;entryDx=0;entryDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-27" target="HC1H8j6nMwEtLoyIrXXk-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-14" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-27" target="HiMKSJFquRK0mIlwyRFI-31">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-27" value="model_comparison_all" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="-896" y="566" width="160" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-42" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="xary-zVek9Bg-A1b1ZmA-9">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="-630" y="564" as="sourcePoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-47" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-42">
+          <mxGeometry x="-0.4883" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-29" target="HiMKSJFquRK0mIlwyRFI-30">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-49" value="if perturbed_data is None" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-37">
+          <mxGeometry x="-0.0507" y="4" relative="1" as="geometry">
+            <mxPoint x="-1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-29" value="generate_dataset" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-546" y="566" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-30" value="_perturb_data" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-376" y="636" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-6" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-31" target="HC1H8j6nMwEtLoyIrXXk-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-31" target="HiMKSJFquRK0mIlwyRFI-33">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-11" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-31" target="HC1H8j6nMwEtLoyIrXXk-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-31" value="cal_model_weight" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="-871" y="466" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-32" value="plot_just_analysis" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-871" y="736" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-33" value="plot_model_weights" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-1016" y="416" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-34" value="plot_bayes_factor" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-446" y="431" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-51" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;Discrepancy&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="360" y="1039.82" width="200" height="130" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-52" value="get_sample" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="400" y="1079.82" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-5" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.25;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="HiMKSJFquRK0mIlwyRFI-34">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-20" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="HC1H8j6nMwEtLoyIrXXk-17">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-21" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.75;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="HiMKSJFquRK0mIlwyRFI-29">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-1" value="calc_bayes_factors" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="-666" y="466" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-2" value="calc_model_weights" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-1066" y="566" width="130" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-4" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HiMKSJFquRK0mIlwyRFI-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-12" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HC1H8j6nMwEtLoyIrXXk-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-16" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.75;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="xary-zVek9Bg-A1b1ZmA-9">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-23" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HC1H8j6nMwEtLoyIrXXk-16">
+          <mxGeometry x="-0.5478" y="3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-18" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.25;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HC1H8j6nMwEtLoyIrXXk-17">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-22" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HiMKSJFquRK0mIlwyRFI-29">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-3" value="calc_justifiability_analysis" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="-896" y="666" width="160" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-17" value="setup" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-666" y="566" width="110" height="50" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+  <diagram id="sQf09xvhinkT827TE7Va" name="Function structure Engine">
+    <mxGraphModel dx="1436" dy="968" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-1" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;Engine&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="130" y="140" width="1390" height="690" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-2" value="hellinger_distance" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1340" y="50" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-3" value="logpdf" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1050" y="50" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-4" value="subdomain" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="625" y="50" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-5" value="start_engine" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="250" y="680" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-32" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-6" target="JXjM7l_erEiZMkSmYBvl-5">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-6" value="train_normal" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="170" y="420" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-7" target="JXjM7l_erEiZMkSmYBvl-9">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="335" y="335" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-33" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-7" target="JXjM7l_erEiZMkSmYBvl-6">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-7" value="train_sequential" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="170" y="310" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-8" value="eval_metamodel" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="190" y="210" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-7" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-18">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-19" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-23">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-20" value="if len(obs_data) != 0" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-19">
+          <mxGeometry x="0.8137" relative="1" as="geometry">
+            <mxPoint x="-57" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-21" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.25;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-24">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-22" value="if len(obs_data) != 0" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-21">
+          <mxGeometry x="0.7684" y="3" relative="1" as="geometry">
+            <mxPoint x="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-23" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-25">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-24" value="if expdes.valid_model_runs" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-23">
+          <mxGeometry x="0.606" y="3" relative="1" as="geometry">
+            <mxPoint x="-16" y="3" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-25" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-26">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-26" value="if mc_ref and pce" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-25">
+          <mxGeometry x="0.7094" y="-3" relative="1" as="geometry">
+            <mxPoint x="-31" y="-3" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-9" value="train_seq_design" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="315" y="310" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-12" value="util_VarBasedDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="670" y="648" width="130" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-28" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-13" target="JXjM7l_erEiZMkSmYBvl-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-31" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-13" target="JXjM7l_erEiZMkSmYBvl-5">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-15" target="JXjM7l_erEiZMkSmYBvl-13">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-39" value="if method == &#39;bayesactdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=default;" vertex="1" connectable="0" parent="JXjM7l_erEiZMkSmYBvl-38">
+          <mxGeometry x="-0.6235" y="2" relative="1" as="geometry">
+            <mxPoint x="289" y="2" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-12" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-13" target="JXjM7l_erEiZMkSmYBvl-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-15" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-12">
+          <mxGeometry x="0.7865" y="4" relative="1" as="geometry">
+            <mxPoint x="-91" y="185" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-13" value="util_BayesianActiveDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1020" y="680" width="150" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-14" target="JXjM7l_erEiZMkSmYBvl-6">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-13" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-14" target="JXjM7l_erEiZMkSmYBvl-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-16" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-13">
+          <mxGeometry x="0.197" y="-3" relative="1" as="geometry">
+            <mxPoint x="-1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-14" value="utilBayesianDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="880" y="730" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-15" target="JXjM7l_erEiZMkSmYBvl-12">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-42" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-15" target="JXjM7l_erEiZMkSmYBvl-14">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-43" value="if method == &#39;bayesoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="JXjM7l_erEiZMkSmYBvl-42">
+          <mxGeometry x="0.6143" y="-3" relative="1" as="geometry">
+            <mxPoint x="3" y="29" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-15" value="run_util_func" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="660" y="450" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-36" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-16" target="JXjM7l_erEiZMkSmYBvl-12">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-41" value="if method == &#39;varoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="JXjM7l_erEiZMkSmYBvl-36">
+          <mxGeometry x="-0.5992" relative="1" as="geometry">
+            <mxPoint x="-197" y="62" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-44" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-16" target="JXjM7l_erEiZMkSmYBvl-13">
+          <mxGeometry relative="1" as="geometry">
+            <Array as="points">
+              <mxPoint x="965" y="590" />
+              <mxPoint x="1095" y="590" />
+            </Array>
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-27" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-16" target="JXjM7l_erEiZMkSmYBvl-14">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-16" value="dual_annealing" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="910" y="450" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-5" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-17" target="JXjM7l_erEiZMkSmYBvl-18">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-6" value="if exploit _method is &#39;bayesoptdesign&#39;,&lt;br style=&quot;border-color: var(--border-color);&quot;&gt;&#39;bayesactdesign&#39; or &#39;varoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-5">
+          <mxGeometry x="0.1312" y="2" relative="1" as="geometry">
+            <mxPoint x="17" y="-2" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-17" value="tradeoff_weights" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="980" y="210" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-30" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-4">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-1" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-15">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="790" y="280.0000000000002" as="sourcePoint" />
+            <mxPoint x="690" y="499.9999999999998" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-2" value="if exploit _method is &#39;bayesoptdesign&#39;,&lt;br&gt;&#39;bayesactdesign&#39; or &#39;varoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-1">
+          <mxGeometry x="0.1579" relative="1" as="geometry">
+            <mxPoint x="-15" y="49" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-3" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-16">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="680" y="205.05882352941194" as="sourcePoint" />
+            <mxPoint x="805" y="779.9999999999998" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-4" value="if explore_method == &#39;dual annealing&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-3">
+          <mxGeometry x="-0.6061" relative="1" as="geometry">
+            <mxPoint x="270" y="46" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-9" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-20">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-10" value="if exploit_method == &#39;alphabetic&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-9">
+          <mxGeometry x="0.8144" y="1" relative="1" as="geometry">
+            <mxPoint x="74" y="-1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-18" value="choose_next_sample" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="610" y="210" width="140" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-20" value="util_AlphOptDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="330" y="210" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-21" value="_normpdf" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1340" y="430" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-29" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-22" target="JXjM7l_erEiZMkSmYBvl-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-22" value="_corr_factor_BME" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1130" y="220" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-23" value="_posteriorPlot" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="520" y="440" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-27" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-24" target="JXjM7l_erEiZMkSmYBvl-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-11" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-24" target="JXjM7l_erEiZMkSmYBvl-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-14" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-11">
+          <mxGeometry x="0.0929" y="-1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-17" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-24" target="JXjM7l_erEiZMkSmYBvl-22">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-18" value="commented out?" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-17">
+          <mxGeometry x="-0.1477" y="3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-24" value="_BME_Calculator" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1340" y="220" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-25" value="_validError" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="520" y="510" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-26" value="_error_Mean_Std" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="520" y="580" width="110" height="50" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+  <diagram id="ME5gyYpVqUByTnAIOcMV" name="Parameter and function interaction">
+    <mxGraphModel dx="2049" dy="1366" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-33" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-1" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-54" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-1" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-61" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-1" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-1" value="engine" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="160" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-3" value="Discrepancy" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="240" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-71" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-4" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-4" value="emulator" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="320" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-5" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-57" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-5" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-65" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-5" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-5" value="name" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="400" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-47" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-6" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-6" value="bootstrap" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="480" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-7" value="req_outputs" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="560" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-79" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-8" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-8" value="selected_indices" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="640" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-35" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-9" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-55" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-9" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-67" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-9" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-9" value="prior_samples" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="720" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-36" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-11" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-68" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-11" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-11" value="n_prior_samples" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="800" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-12" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-80" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-12" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-12" value="measured_data" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="880" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-58" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-13" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-13" value="inference_method" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="960" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-14" value="mcmc_params" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1040" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-63" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-15" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-15" value="perturbed_data" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1120" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-45" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-16" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-77" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-16" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-16" value="bayes_loocv" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1200" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-64" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-17" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-17" value="n_bootstrap_itrs" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1280" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-18" value="bootstrap_noise" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1360" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-46" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-19" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-78" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-19" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-19" value="just_analysis" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1440" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-20" value="valid_metrics" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1520" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-52" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-21" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-21" value="plot_post_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1600" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-51" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-22" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-22" value="plot_map_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1680" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-23" value="max_a_posteriori" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1760" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-24" value="corner_title_fmt" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1840" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-25" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-25" value="out_dir" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1920" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-50" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-26" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-66" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-26" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-26" value="error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2000" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-56" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-27" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-72" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-27" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-27" value="bias_inputs" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2080" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-41" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-28" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-28" value="measurement_error" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2160" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-44" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-29" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-81" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-29" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-29" value="sigma2s" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2240" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-30" value="log_likes" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2320" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-82" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-31" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-31" value="dtype" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2400" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-32" value="create_inference" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="400" y="20" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-40" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-39" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-39" value="n_tot_measurement" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2480" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-43" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-42" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-42" value="Discrepancy" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2560" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-49" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-48" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-59" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-48" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-48" value="posterior_df" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2640" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-53" value="create_error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="560" y="20" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-60" value="perform_bootstrap" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="720" y="20" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-75" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-69" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-69" value="__mean_pce_prior_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2720" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-76" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-70" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-70" value="_std_pce_prior_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2800" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-74" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-73" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-73" value="__model_prior_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2880" width="120" height="60" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+  <diagram id="QgiNX2WXFOBDsDgzoFY9" name="Folder structure">
+    <mxGraphModel dx="1436" dy="968" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="KLYezTmecfuvBG8KQe-n-1" value="" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="140" y="80" width="750" height="550" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-2" value="" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="170" y="110" width="700" height="220" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-3" value="" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="170" y="370" width="180" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-4" value="" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="170" y="440" width="180" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-5" value="" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="170" y="500" width="180" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-6" value="adaptPlot" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="190" y="150" width="70" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-7" value="apoly_construction" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="280" y="150" width="140" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-8" value="bayes_linear" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="440" y="150" width="90" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-9" value="engine" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="550" y="150" width="70" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-11" value="eval_rec_rule" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="640" y="150" width="100" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-12" value="exp_designs" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="760" y="150" width="90" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-13" value="exploration" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="190" y="210" width="80" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-14" value="glexindex" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="290" y="210" width="70" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-15" value="input_space" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="380" y="210" width="80" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-16" value="inputs" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="480" y="210" width="70" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-17" value="meta_model_engine" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="570" y="210" width="160" height="50" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+</mxfile>
diff --git a/docs/diagrams/Structure_BayesInf.drawio b/docs/diagrams/Structure_BayesInf.drawio
new file mode 100644
index 0000000000000000000000000000000000000000..2e28cbcb3f3e16bee1b42a18f2b93c6f73a156f7
--- /dev/null
+++ b/docs/diagrams/Structure_BayesInf.drawio
@@ -0,0 +1,964 @@
+<mxfile host="Electron" modified="2024-04-19T16:08:43.773Z" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/22.1.11 Chrome/114.0.5735.289 Electron/25.9.8 Safari/537.36" etag="2ELAo-FvqOEnLxZhqqO4" version="22.1.11" type="device" pages="4">
+  <diagram name="Class and function structure" id="efOe0Jku58RX-i1bv-3b">
+    <mxGraphModel dx="3735" dy="1372" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-22" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;MCMC&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1270" y="360" width="770" height="380" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-1" value="_kernel_rbf" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1020" y="200" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-2" value="_logpdf" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="820" y="140" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-10" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;BayesInf&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-120" y="290" width="1310" height="680" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-24" value="if self.bootstrap &lt;br&gt;or self.bayes_loocv &lt;br&gt;or self.just_analysis" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;labelBackgroundColor=#ffae00;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-13">
+          <mxGeometry x="0.2902" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-31" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-18">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-42" value="if self.name != &#39;valid&#39;&lt;br&gt;and self.inference_method != &#39;rejection&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=default;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-31">
+          <mxGeometry x="0.5646" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-32" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="HiMKSJFquRK0mIlwyRFI-5">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-43" value="if self.inference_method == &#39;mcmc&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-32">
+          <mxGeometry x="-0.0958" y="-1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-33" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-19">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-52" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#C2C2C2;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-33">
+          <mxGeometry x="-0.112" y="1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-47" value="if self.plot_post_pred" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-34">
+          <mxGeometry x="0.2399" y="-1" relative="1" as="geometry">
+            <mxPoint y="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-35" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-20">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-46" value="if self.plot_map_pred" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-35">
+          <mxGeometry x="0.4183" y="-1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-54" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-55" value="if self.bootstrap" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-54">
+          <mxGeometry x="0.1816" y="3" relative="1" as="geometry">
+            <mxPoint x="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-57" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-56">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-58" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-57">
+          <mxGeometry x="0.7182" y="2" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-60" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="xary-zVek9Bg-A1b1ZmA-59">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-61" value="if self.error_model&lt;br&gt;and self.name == &#39;calib&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-60">
+          <mxGeometry x="0.3024" y="2" relative="1" as="geometry">
+            <mxPoint x="67" y="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-54" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-9" target="HiMKSJFquRK0mIlwyRFI-51">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-55" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-54">
+          <mxGeometry x="0.8253" y="3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-9" value="create_inference" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="405" y="539" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-25" value="if len(self.perturbed_data) == 0" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-14">
+          <mxGeometry x="0.3402" relative="1" as="geometry">
+            <mxPoint y="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-27" value="if not self.emulator" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-15">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-29" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-16">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-44" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#cdcbcb;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-29">
+          <mxGeometry x="0.4722" y="1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-30" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-17">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-41" value="if self.emulator" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-30">
+          <mxGeometry x="0.6143" y="-3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-62" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-13" target="xary-zVek9Bg-A1b1ZmA-59">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="340" y="680" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-63" value="if self.error_model&lt;br&gt;and self.name == &#39;valid&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=default;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-62">
+          <mxGeometry x="-0.3906" relative="1" as="geometry">
+            <mxPoint y="121" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-13" value="perform_bootstrap" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="50" y="335" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-14" value="_perturb_data" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-75" y="460" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-15" value="_eval_model" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1050" y="660" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-16" target="xary-zVek9Bg-A1b1ZmA-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-49" value="if hasattr bias_inputs&amp;nbsp;&lt;br&gt;and not hasattr error_model" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#ffae00;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-38">
+          <mxGeometry x="0.3126" y="-3" relative="1" as="geometry">
+            <mxPoint x="-103" y="31" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-39" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-16" target="xary-zVek9Bg-A1b1ZmA-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-16" value="normpdf" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="650" y="455" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-40" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-17" target="xary-zVek9Bg-A1b1ZmA-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-50" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#cdcbcb;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-40">
+          <mxGeometry x="-0.6073" y="-5" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-17" value="_corr_factor_BME" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="650" y="385" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-18" value="_rejection_sampling" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="280" y="890" width="120" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-26" value="if not self.emulator&amp;nbsp;&lt;br&gt;and not self.inference_method == &#39;rejection&#39;&amp;nbsp;&lt;br&gt;and self.name == &#39;calib" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-19" target="xary-zVek9Bg-A1b1ZmA-15">
+          <mxGeometry x="-0.0559" y="15" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-19" target="xary-zVek9Bg-A1b1ZmA-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-48" value="if sigma2_prior is not None&lt;br&gt;and if hasattr bias_inputs&lt;br&gt;and if not hasattr error_model" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#ffae00;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-37">
+          <mxGeometry x="-0.5544" y="-1" relative="1" as="geometry">
+            <mxPoint x="1" y="-5" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-19" value="_posterior_predictive" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="690" y="589" width="130" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-28" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="xary-zVek9Bg-A1b1ZmA-20" target="xary-zVek9Bg-A1b1ZmA-15">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-45" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#cdcbcb;" vertex="1" connectable="0" parent="xary-zVek9Bg-A1b1ZmA-28">
+          <mxGeometry x="0.0517" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-20" value="_plot_max_a_posteriori" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="495" y="790" width="140" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-21" value="plot_post_predictive" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="630" y="720" width="120" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-36" value="Note: Arrows indicate function calls, beginning calls the end" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
+          <mxGeometry x="10" y="10" width="190" height="30" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-51" value="Color meanings:&lt;br&gt;&lt;span style=&quot;white-space: pre;&quot;&gt;&#x9;&lt;/span&gt;red: wrong, change&lt;br&gt;&lt;span style=&quot;white-space: pre;&quot;&gt;&#x9;&lt;/span&gt;orange: seems off, look at again&lt;br&gt;&lt;span style=&quot;white-space: pre;&quot;&gt;&#x9;&lt;/span&gt;light beige: has been removed" style="text;html=1;strokeColor=none;fillColor=none;align=left;verticalAlign=middle;whiteSpace=wrap;rounded=0;" vertex="1" parent="1">
+          <mxGeometry x="20" y="70" width="220" height="30" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-53" value="plot_log_BME" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="150" y="820" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-56" value="plot_post_params" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="660" y="840" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="xary-zVek9Bg-A1b1ZmA-59" value="create_error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="45" y="740" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-1" value="_check_ranges" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1595" y="280" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-2" value="gelman_rubin" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1350" y="250" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-3" value="_iterative_scheme" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="2055" y="620" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-21" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-4" target="HiMKSJFquRK0mIlwyRFI-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-24" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-4" target="HiMKSJFquRK0mIlwyRFI-11">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-4" value="_my_ESS" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="1350" y="100" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-14" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-8">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-19" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-10">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-22" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-53" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-5" target="HiMKSJFquRK0mIlwyRFI-52">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-56" value="if opts_sigma != &#39;B&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=#FF9A03;" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-53">
+          <mxGeometry x="0.7377" y="1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-5" value="run_sampler" style="rounded=0;whiteSpace=wrap;html=1;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="1350" y="534" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-20" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-6" target="HiMKSJFquRK0mIlwyRFI-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-6" value="log_prior" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1595" y="510" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-15" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-7" target="HiMKSJFquRK0mIlwyRFI-9">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-16" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-15">
+          <mxGeometry x="0.0246" y="2" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-7" value="log_likelihood" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1760" y="539" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-12" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-8" target="HiMKSJFquRK0mIlwyRFI-6">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-17" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-12">
+          <mxGeometry x="0.4587" y="4" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-13" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-8" target="HiMKSJFquRK0mIlwyRFI-7">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-18" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-13">
+          <mxGeometry x="0.6826" y="4" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-8" value="log_posterior" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1480" y="610" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-9" value="eval_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1760" y="400" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-10" value="train_error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="1450" y="420" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-23" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-11" target="HiMKSJFquRK0mIlwyRFI-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-11" value="marginal_llk_emcee" style="rounded=0;whiteSpace=wrap;html=1;fillColor=#f9f7ed;strokeColor=#CCC1AA;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="1870" y="620" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-25" value="Never used!" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontColor=#CCC1AA;" vertex="1" parent="1">
+          <mxGeometry x="1880" y="680" width="100" height="30" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-26" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;BayesModelComp&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-1096" y="380" width="840" height="420" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-9" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-27" target="HC1H8j6nMwEtLoyIrXXk-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-13" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.75;entryDx=0;entryDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-27" target="HC1H8j6nMwEtLoyIrXXk-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-14" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-27" target="HiMKSJFquRK0mIlwyRFI-31">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-27" value="model_comparison_all" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="-896" y="566" width="160" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-42" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="xary-zVek9Bg-A1b1ZmA-9">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="-630" y="564" as="sourcePoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-47" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-42">
+          <mxGeometry x="-0.4883" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-29" target="HiMKSJFquRK0mIlwyRFI-30">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-49" value="if perturbed_data is None" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HiMKSJFquRK0mIlwyRFI-37">
+          <mxGeometry x="-0.0507" y="4" relative="1" as="geometry">
+            <mxPoint x="-1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-29" value="generate_dataset" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-546" y="566" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-30" value="_perturb_data" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-376" y="636" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-6" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-31" target="HC1H8j6nMwEtLoyIrXXk-1">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-31" target="HiMKSJFquRK0mIlwyRFI-33">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-11" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HiMKSJFquRK0mIlwyRFI-31" target="HC1H8j6nMwEtLoyIrXXk-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-31" value="cal_model_weight" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="-871" y="466" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-32" value="plot_just_analysis" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-871" y="736" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-33" value="plot_model_weights" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-1016" y="416" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-34" value="plot_bayes_factor" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-446" y="431" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-51" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;Discrepancy&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="360" y="1039.82" width="200" height="130" as="geometry" />
+        </mxCell>
+        <mxCell id="HiMKSJFquRK0mIlwyRFI-52" value="get_sample" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="400" y="1079.82" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-5" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.25;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="HiMKSJFquRK0mIlwyRFI-34">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-20" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="HC1H8j6nMwEtLoyIrXXk-17">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-21" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.75;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-1" target="HiMKSJFquRK0mIlwyRFI-29">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-1" value="calc_bayes_factors" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="-666" y="466" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-2" value="calc_model_weights" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-1066" y="566" width="130" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-4" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HiMKSJFquRK0mIlwyRFI-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-12" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HC1H8j6nMwEtLoyIrXXk-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-16" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.75;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="xary-zVek9Bg-A1b1ZmA-9">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-23" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="HC1H8j6nMwEtLoyIrXXk-16">
+          <mxGeometry x="-0.5478" y="3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-18" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.25;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HC1H8j6nMwEtLoyIrXXk-17">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-22" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="HC1H8j6nMwEtLoyIrXXk-3" target="HiMKSJFquRK0mIlwyRFI-29">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-3" value="calc_justifiability_analysis" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="-896" y="666" width="160" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="HC1H8j6nMwEtLoyIrXXk-17" value="setup" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="-666" y="566" width="110" height="50" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+  <diagram id="sQf09xvhinkT827TE7Va" name="Function structure Engine">
+    <mxGraphModel dx="1436" dy="968" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-1" value="&lt;p style=&quot;margin:0px;margin-top:4px;text-align:center;&quot;&gt;&lt;b&gt;Engine&lt;/b&gt;&lt;/p&gt;&lt;hr size=&quot;1&quot;&gt;&lt;div style=&quot;height:2px;&quot;&gt;&lt;/div&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="130" y="140" width="1390" height="690" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-2" value="hellinger_distance" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1340" y="50" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-3" value="logpdf" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1050" y="50" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-4" value="subdomain" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="625" y="50" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-5" value="start_engine" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="250" y="680" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-32" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-6" target="JXjM7l_erEiZMkSmYBvl-5">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-6" value="train_normal" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="170" y="420" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-7" target="JXjM7l_erEiZMkSmYBvl-9">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="335" y="335" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-33" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-7" target="JXjM7l_erEiZMkSmYBvl-6">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-7" value="train_sequential" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="170" y="310" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-8" value="eval_metamodel" style="html=1;whiteSpace=wrap;strokeWidth=2;" vertex="1" parent="1">
+          <mxGeometry x="190" y="210" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-7" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-18">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-19" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-23">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-20" value="if len(obs_data) != 0" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-19">
+          <mxGeometry x="0.8137" relative="1" as="geometry">
+            <mxPoint x="-57" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-21" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;entryX=0.25;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-24">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-22" value="if len(obs_data) != 0" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-21">
+          <mxGeometry x="0.7684" y="3" relative="1" as="geometry">
+            <mxPoint x="1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-23" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-25">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-24" value="if expdes.valid_model_runs" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-23">
+          <mxGeometry x="0.606" y="3" relative="1" as="geometry">
+            <mxPoint x="-16" y="3" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-25" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-9" target="JXjM7l_erEiZMkSmYBvl-26">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-26" value="if mc_ref and pce" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-25">
+          <mxGeometry x="0.7094" y="-3" relative="1" as="geometry">
+            <mxPoint x="-31" y="-3" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-9" value="train_seq_design" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="315" y="310" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-12" value="util_VarBasedDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="670" y="648" width="130" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-28" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-13" target="JXjM7l_erEiZMkSmYBvl-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-31" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-13" target="JXjM7l_erEiZMkSmYBvl-5">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-15" target="JXjM7l_erEiZMkSmYBvl-13">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-39" value="if method == &#39;bayesactdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];labelBackgroundColor=default;" vertex="1" connectable="0" parent="JXjM7l_erEiZMkSmYBvl-38">
+          <mxGeometry x="-0.6235" y="2" relative="1" as="geometry">
+            <mxPoint x="289" y="2" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-12" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-13" target="JXjM7l_erEiZMkSmYBvl-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-15" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-12">
+          <mxGeometry x="0.7865" y="4" relative="1" as="geometry">
+            <mxPoint x="-91" y="185" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-13" value="util_BayesianActiveDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1020" y="680" width="150" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-14" target="JXjM7l_erEiZMkSmYBvl-6">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-13" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-14" target="JXjM7l_erEiZMkSmYBvl-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-16" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-13">
+          <mxGeometry x="0.197" y="-3" relative="1" as="geometry">
+            <mxPoint x="-1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-14" value="utilBayesianDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="880" y="730" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-15" target="JXjM7l_erEiZMkSmYBvl-12">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-42" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-15" target="JXjM7l_erEiZMkSmYBvl-14">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-43" value="if method == &#39;bayesoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="JXjM7l_erEiZMkSmYBvl-42">
+          <mxGeometry x="0.6143" y="-3" relative="1" as="geometry">
+            <mxPoint x="3" y="29" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-15" value="run_util_func" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="660" y="450" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-36" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.25;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-16" target="JXjM7l_erEiZMkSmYBvl-12">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-41" value="if method == &#39;varoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="JXjM7l_erEiZMkSmYBvl-36">
+          <mxGeometry x="-0.5992" relative="1" as="geometry">
+            <mxPoint x="-197" y="62" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-44" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-16" target="JXjM7l_erEiZMkSmYBvl-13">
+          <mxGeometry relative="1" as="geometry">
+            <Array as="points">
+              <mxPoint x="965" y="590" />
+              <mxPoint x="1095" y="590" />
+            </Array>
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-27" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-16" target="JXjM7l_erEiZMkSmYBvl-14">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-16" value="dual_annealing" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="910" y="450" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-5" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-17" target="JXjM7l_erEiZMkSmYBvl-18">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-6" value="if exploit _method is &#39;bayesoptdesign&#39;,&lt;br style=&quot;border-color: var(--border-color);&quot;&gt;&#39;bayesactdesign&#39; or &#39;varoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-5">
+          <mxGeometry x="0.1312" y="2" relative="1" as="geometry">
+            <mxPoint x="17" y="-2" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-17" value="tradeoff_weights" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="980" y="210" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-30" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-4">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-1" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-15">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="790" y="280.0000000000002" as="sourcePoint" />
+            <mxPoint x="690" y="499.9999999999998" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-2" value="if exploit _method is &#39;bayesoptdesign&#39;,&lt;br&gt;&#39;bayesactdesign&#39; or &#39;varoptdesign&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-1">
+          <mxGeometry x="0.1579" relative="1" as="geometry">
+            <mxPoint x="-15" y="49" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-3" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.75;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-16">
+          <mxGeometry relative="1" as="geometry">
+            <mxPoint x="680" y="205.05882352941194" as="sourcePoint" />
+            <mxPoint x="805" y="779.9999999999998" as="targetPoint" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-4" value="if explore_method == &#39;dual annealing&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-3">
+          <mxGeometry x="-0.6061" relative="1" as="geometry">
+            <mxPoint x="270" y="46" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-9" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-18" target="JXjM7l_erEiZMkSmYBvl-20">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-10" value="if exploit_method == &#39;alphabetic&#39;" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-9">
+          <mxGeometry x="0.8144" y="1" relative="1" as="geometry">
+            <mxPoint x="74" y="-1" as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-18" value="choose_next_sample" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="610" y="210" width="140" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-20" value="util_AlphOptDesign" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="330" y="210" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-21" value="_normpdf" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1340" y="430" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-29" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-22" target="JXjM7l_erEiZMkSmYBvl-3">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-22" value="_corr_factor_BME" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1130" y="220" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-23" value="_posteriorPlot" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="520" y="440" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-27" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-24" target="JXjM7l_erEiZMkSmYBvl-2">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-11" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-24" target="JXjM7l_erEiZMkSmYBvl-21">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-14" value="always" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-11">
+          <mxGeometry x="0.0929" y="-1" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-17" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="JXjM7l_erEiZMkSmYBvl-24" target="JXjM7l_erEiZMkSmYBvl-22">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="W5_FOelZ0qj-h3Gb0n3K-18" value="commented out?" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="W5_FOelZ0qj-h3Gb0n3K-17">
+          <mxGeometry x="-0.1477" y="3" relative="1" as="geometry">
+            <mxPoint as="offset" />
+          </mxGeometry>
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-24" value="_BME_Calculator" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="1340" y="220" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-25" value="_validError" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="520" y="510" width="110" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="JXjM7l_erEiZMkSmYBvl-26" value="_error_Mean_Std" style="html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="520" y="580" width="110" height="50" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+  <diagram id="ME5gyYpVqUByTnAIOcMV" name="Parameter and function interaction">
+    <mxGraphModel dx="2049" dy="1366" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-33" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-1" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-54" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-1" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-61" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-1" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-1" value="engine" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="160" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-3" value="Discrepancy" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="240" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-71" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-4" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-4" value="emulator" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="320" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-37" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-5" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-57" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-5" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-65" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-5" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-5" value="name" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="400" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-47" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-6" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-6" value="bootstrap" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="480" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-7" value="req_outputs" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="560" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-79" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-8" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-8" value="selected_indices" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="640" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-35" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-9" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-55" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-9" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-67" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-9" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-9" value="prior_samples" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="720" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-36" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-11" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-68" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-11" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-11" value="n_prior_samples" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="800" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-12" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-80" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-12" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-12" value="measured_data" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="880" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-58" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-13" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-13" value="inference_method" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="960" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-14" value="mcmc_params" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1040" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-63" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-15" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-15" value="perturbed_data" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1120" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-45" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-16" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-77" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-16" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-16" value="bayes_loocv" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1200" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-64" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-17" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-17" value="n_bootstrap_itrs" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1280" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-18" value="bootstrap_noise" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1360" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-46" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-19" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-78" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-19" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-19" value="just_analysis" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1440" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-20" value="valid_metrics" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1520" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-52" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-21" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-21" value="plot_post_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1600" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-51" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-22" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-22" value="plot_map_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1680" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-23" value="max_a_posteriori" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1760" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-24" value="corner_title_fmt" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1840" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-34" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-25" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-25" value="out_dir" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="1920" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-50" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-26" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-66" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-26" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-26" value="error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2000" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-56" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-27" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-72" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-27" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-27" value="bias_inputs" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2080" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-41" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-28" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-28" value="measurement_error" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2160" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-44" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-29" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-81" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-29" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-29" value="sigma2s" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2240" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-30" value="log_likes" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2320" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-82" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-31" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-31" value="dtype" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2400" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-32" value="create_inference" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="400" y="20" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-40" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-39" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-39" value="n_tot_measurement" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2480" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-43" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-42" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-42" value="Discrepancy" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2560" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-49" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-48" target="K5oJ7VEt7dPmeK6pba1f-32">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-59" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-48" target="K5oJ7VEt7dPmeK6pba1f-53">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-48" value="posterior_df" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2640" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-53" value="create_error_model" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="560" y="20" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-60" value="perform_bootstrap" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="720" y="20" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-75" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-69" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-69" value="__mean_pce_prior_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2720" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-76" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-70" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-70" value="_std_pce_prior_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2800" width="120" height="60" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-74" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="K5oJ7VEt7dPmeK6pba1f-73" target="K5oJ7VEt7dPmeK6pba1f-60">
+          <mxGeometry relative="1" as="geometry" />
+        </mxCell>
+        <mxCell id="K5oJ7VEt7dPmeK6pba1f-73" value="__model_prior_pred" style="rounded=0;whiteSpace=wrap;html=1;" vertex="1" parent="1">
+          <mxGeometry x="40" y="2880" width="120" height="60" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+  <diagram id="QgiNX2WXFOBDsDgzoFY9" name="Folder structure">
+    <mxGraphModel dx="1436" dy="968" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="850" pageHeight="1100" math="0" shadow="0">
+      <root>
+        <mxCell id="0" />
+        <mxCell id="1" parent="0" />
+        <mxCell id="KLYezTmecfuvBG8KQe-n-1" value="" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="140" y="80" width="750" height="550" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-2" value="" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="170" y="110" width="700" height="220" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-3" value="" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="170" y="370" width="180" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-4" value="" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="170" y="440" width="180" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-5" value="" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="170" y="500" width="180" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-6" value="adaptPlot" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="190" y="150" width="70" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-7" value="apoly_construction" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="280" y="150" width="140" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-8" value="bayes_linear" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="440" y="150" width="90" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-9" value="engine" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="550" y="150" width="70" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-11" value="eval_rec_rule" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="640" y="150" width="100" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-12" value="exp_designs" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="760" y="150" width="90" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-13" value="exploration" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="190" y="210" width="80" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-14" value="glexindex" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="290" y="210" width="70" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-15" value="input_space" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="380" y="210" width="80" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-16" value="inputs" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="480" y="210" width="70" height="50" as="geometry" />
+        </mxCell>
+        <mxCell id="KLYezTmecfuvBG8KQe-n-17" value="meta_model_engine" style="shape=folder;fontStyle=1;spacingTop=10;tabWidth=40;tabHeight=14;tabPosition=left;html=1;whiteSpace=wrap;" vertex="1" parent="1">
+          <mxGeometry x="570" y="210" width="160" height="50" as="geometry" />
+        </mxCell>
+      </root>
+    </mxGraphModel>
+  </diagram>
+</mxfile>
diff --git a/examples/.coverage b/examples/.coverage
new file mode 100644
index 0000000000000000000000000000000000000000..254e10e4371d703eefec0f0437f9c0575be3f5ec
Binary files /dev/null and b/examples/.coverage differ
diff --git a/examples/analytical-function/example_analytical_function.py b/examples/analytical-function/example_analytical_function.py
index 52e7731b576c42cdd29a705865f0a0389f812654..37900127ab39018eef4af9dc5fa3f0326e2f4eeb 100644
--- a/examples/analytical-function/example_analytical_function.py
+++ b/examples/analytical-function/example_analytical_function.py
@@ -141,7 +141,7 @@ if __name__ == "__main__":
 
     # One-shot (normal) or Sequential Adaptive (sequential) Design
     ExpDesign.method = 'sequential'
-    ExpDesign.n_init_samples = 100#3*ndim
+    ExpDesign.n_init_samples = 140#00#3*ndim
 
     # Sampling methods
     # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley
@@ -227,7 +227,8 @@ if __name__ == "__main__":
     MetaModelOpts.ExpDesign = ExpDesign
     engine = Engine(MetaModelOpts, Model, ExpDesign)
     engine.start_engine()
-    engine.train_sequential()
+    #engine.train_sequential()
+    engine.train_normal()
 
     # Load the objects
     # with open(f"PCEModel_{Model.name}.pkl", "rb") as input:
@@ -266,12 +267,12 @@ if __name__ == "__main__":
 
     # BayesOpts.selected_indices = [0, 3, 5,  7, 9]
     # BME Bootstrap
-    # BayesOpts.bootstrap = True
-    # BayesOpts.n_bootstrap_itrs = 500
-    # BayesOpts.bootstrap_noise = 100
+    BayesOpts.bootstrap = True
+    BayesOpts.n_bootstrap_itrs = 500
+    BayesOpts.bootstrap_noise = 100
 
     # Bayesian cross validation
-    # BayesOpts.bayes_loocv = True
+    BayesOpts.bayes_loocv = True  # TODO: test what this does
 
     # Select the inference method
     import emcee
@@ -296,31 +297,34 @@ if __name__ == "__main__":
     BayesOpts.Discrepancy = DiscrepancyOpts
 
     # -- (Option C) --
-    # DiscOutputOpts = Input()
-    # # # OutputName = 'Z'
-    # DiscOutputOpts.add_marginals()
-    # DiscOutputOpts.Marginals[0].Nnme = '$\sigma^2_{\epsilon}$'
-    # DiscOutputOpts.Marginals[0].dist_type = 'uniform'
-    # DiscOutputOpts.Marginals[0].parameters =  [0, 10]
-    # BayesOpts.Discrepancy = {'known': DiscrepancyOpts,
-    #                           'infer': Discrepancy(DiscOutputOpts)}
-
-    # BayesOpts.bias_inputs = {'Z':np.arange(0, 10, 1.).reshape(-1,1) / 9}
-    # DiscOutputOpts = Input()
-    # # OutputName = 'lambda'
-    # DiscOutputOpts.add_marginals()
-    # DiscOutputOpts.Marginals[0].name = '$\lambda$'
-    # DiscOutputOpts.Marginals[0].dist_type = 'uniform'
-    # DiscOutputOpts.Marginals[0].parameters = [0, 1]
-
-    # # OutputName = 'sigma_f'
-    # DiscOutputOpts.add_marginals()
-    # DiscOutputOpts.Marginals[1].Name = '$\sigma_f$'
-    # DiscOutputOpts.Marginals[1].dist_type = 'uniform'
-    # DiscOutputOpts.Marginals[1].parameters = [0, 1e-4]
-    # BayesOpts.Discrepancy = Discrepancy(DiscOutputOpts)
-    # BayesOpts.Discrepancy = {'known': DiscrepancyOpts,
-    #                           'infer': Discrepancy(DiscOutputOpts)}
+    if 0:
+        DiscOutputOpts = Input()
+        # # # OutputName = 'Z'
+        DiscOutputOpts.add_marginals()
+        DiscOutputOpts.Marginals[0].Nnme = '$\sigma^2_{\epsilon}$'
+        DiscOutputOpts.Marginals[0].dist_type = 'uniform'
+        DiscOutputOpts.Marginals[0].parameters =  [0, 10]
+        #BayesOpts.Discrepancy = {'known': DiscrepancyOpts,
+        #                          'infer': Discrepancy(DiscOutputOpts)}
+    
+        BayesOpts.bias_inputs = {'Z':np.arange(0, 10, 1.).reshape(-1,1) / 9}
+        
+        DiscOutputOpts = Input()
+        # OutputName = 'lambda'
+        DiscOutputOpts.add_marginals()
+        DiscOutputOpts.Marginals[0].name = '$\lambda$'
+        DiscOutputOpts.Marginals[0].dist_type = 'uniform'
+        DiscOutputOpts.Marginals[0].parameters = [0, 1]
+    
+        # # OutputName = 'sigma_f'
+        DiscOutputOpts.add_marginals()
+        DiscOutputOpts.Marginals[1].Name = '$\sigma_f$'
+        DiscOutputOpts.Marginals[1].dist_type = 'uniform'
+        DiscOutputOpts.Marginals[1].parameters = [0, 1e-4]
+        #BayesOpts.Discrepancy = Discrepancy(DiscOutputOpts)
+        BayesOpts.Discrepancy = {'known': DiscrepancyOpts,
+                                  'infer': Discrepancy(DiscOutputOpts)}
+    
     # Start the calibration/inference
     Bayes_PCE = BayesOpts.create_inference()
 
diff --git a/examples/model-comparison/example_model_comparison.py b/examples/model-comparison/example_model_comparison.py
index ebd80fea82a3caf3ff204b89da96c59737ba502b..d678898c178e9722445f08bd22e3ec93906e2ac7 100644
--- a/examples/model-comparison/example_model_comparison.py
+++ b/examples/model-comparison/example_model_comparison.py
@@ -23,6 +23,7 @@ import pandas as pd
 import joblib
 import sys
 sys.path.append("../../src/bayesvalidrox/")
+sys.path.append("../../src/")
 
 from bayesvalidrox.pylink.pylink import PyLinkForwardModel
 from bayesvalidrox.surrogate_models.inputs import Input
@@ -38,7 +39,6 @@ from bayes_inference.bayes_model_comparison import BayesModelComparison
 from bayesvalidrox.surrogate_models.engine import Engine
 
 if __name__ == "__main__":
-
     # Read data
     sigma = 0.6
     data = {
@@ -277,10 +277,10 @@ if __name__ == "__main__":
         "cosine": NL4_engine
         }
 
-    # BME Bootstrap optuions
+    # BME Bootstrap options
     opts_bootstrap = {
         "bootstrap": True,
-        "n_samples": 10000,
+        "n_samples": 100,#0,#0, # TODO: difference between this and the n_bootstrap set below?
         "Discrepancy": DiscrepancyOpts,
         "emulator": True,
         "plot_post_pred": False
@@ -289,10 +289,10 @@ if __name__ == "__main__":
     # Run model comparison
     BayesOpts = BayesModelComparison(
         justifiability=True,
-        n_bootstarp=100,#00,
-        just_n_meas=2
+        n_bootstrap=100,#0,#00,
+        #just_n_meas=2
         )
-    output_dict = BayesOpts.create_model_comparison(
+    output_dict = BayesOpts.model_comparison_all(
         meta_models,
         opts_bootstrap
         )
diff --git a/src/bayesvalidrox.egg-info/SOURCES.txt b/src/bayesvalidrox.egg-info/SOURCES.txt
index d6619704eee21931221fa73b5d2076a2dce99991..344e9840627bb3e5a89593dbd9256472a8ef41d9 100644
--- a/src/bayesvalidrox.egg-info/SOURCES.txt
+++ b/src/bayesvalidrox.egg-info/SOURCES.txt
@@ -29,10 +29,13 @@ src/bayesvalidrox/surrogate_models/exploration.py
 src/bayesvalidrox/surrogate_models/glexindex.py
 src/bayesvalidrox/surrogate_models/input_space.py
 src/bayesvalidrox/surrogate_models/inputs.py
+src/bayesvalidrox/surrogate_models/meta_model_engine.py
 src/bayesvalidrox/surrogate_models/orthogonal_matching_pursuit.py
 src/bayesvalidrox/surrogate_models/reg_fast_ard.py
 src/bayesvalidrox/surrogate_models/reg_fast_laplace.py
+src/bayesvalidrox/surrogate_models/sequential_design.py
 src/bayesvalidrox/surrogate_models/surrogate_models.py
+tests/test_BayesModelComparison.py
 tests/test_Discrepancy.py
 tests/test_ExpDesign.py
 tests/test_Input.py
diff --git a/src/bayesvalidrox/bayes_inference/bayes_inference.py b/src/bayesvalidrox/bayes_inference/bayes_inference.py
index 1898a8ae619597d92bc355ac4249f57019f0aed7..c7cfe78cd08431755f0d16a093363380801964ab 100644
--- a/src/bayesvalidrox/bayes_inference/bayes_inference.py
+++ b/src/bayesvalidrox/bayes_inference/bayes_inference.py
@@ -1,25 +1,25 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 
-import numpy as np
-import os
 import copy
-import pandas as pd
-from tqdm import tqdm
-from scipy import stats
-import scipy.linalg as spla
-import joblib
-import seaborn as sns
+import gc
+import multiprocessing
+import os
+
 import corner
 import h5py
-import multiprocessing
-import gc
-from sklearn.metrics import mean_squared_error, r2_score
-from sklearn import preprocessing
-from matplotlib.patches import Patch
 import matplotlib.lines as mlines
-from matplotlib.backends.backend_pdf import PdfPages
 import matplotlib.pylab as plt
+import numpy as np
+import pandas as pd
+import scipy.linalg as spla
+import seaborn as sns
+from matplotlib.backends.backend_pdf import PdfPages
+from matplotlib.patches import Patch
+from scipy import stats
+from sklearn import preprocessing
+from sklearn.metrics import mean_squared_error, r2_score
+from tqdm import tqdm
 
 from .mcmc import MCMC
 
@@ -28,6 +28,92 @@ plt.style.use(os.path.join(os.path.split(__file__)[0],
                            '../', 'bayesvalidrox.mplstyle'))
 
 
+# -------------------------------------------------------------------------
+def _kernel_rbf(X, hyperparameters):
+    """
+    Isotropic squared exponential kernel.
+
+    Higher l values lead to smoother functions and therefore to coarser
+    approximations of the training data. Lower l values make functions
+    more wiggly with wide uncertainty regions between training data points.
+
+    sigma_f controls the marginal variance of b(x)
+
+    Parameters
+    ----------
+    X : ndarray of shape (n_samples_X, n_features)
+
+    hyperparameters : Dict
+        Lambda characteristic length
+        sigma_f controls the marginal variance of b(x)
+        sigma_0 unresolvable error nugget term, interpreted as random
+                error that cannot be attributed to measurement error.
+    Returns
+    -------
+    var_cov_matrix : ndarray of shape (n_samples_X,n_samples_X)
+        Kernel k(X, X).
+
+    """
+    from sklearn.gaussian_process.kernels import RBF
+    min_max_scaler = preprocessing.MinMaxScaler()
+    X_minmax = min_max_scaler.fit_transform(X)
+
+    nparams = len(hyperparameters)
+    if nparams < 3:
+        raise AttributeError('Provide 3 parameters for the RBF kernel!')
+
+    # characteristic length (0,1]
+    Lambda = hyperparameters[0]
+    # sigma_f controls the marginal variance of b(x)
+    sigma2_f = hyperparameters[1]
+
+    rbf = RBF(length_scale=Lambda)
+    cov_matrix = sigma2_f * rbf(X_minmax)
+
+    # (unresolvable error) nugget term that is interpreted as random
+    # error that cannot be attributed to measurement error.
+    sigma2_0 = hyperparameters[2:]
+    for i, j in np.ndindex(cov_matrix.shape):
+        cov_matrix[i, j] += np.sum(sigma2_0) if i == j else 0
+
+    return cov_matrix
+
+
+# -------------------------------------------------------------------------
+def _logpdf(x, mean, cov):
+    """
+    Computes the likelihood based on a multivariate normal distribution.
+
+    Parameters
+    ----------
+    x : TYPE
+        DESCRIPTION.
+    mean : array_like
+        Observation data.
+    cov : 2d array
+        Covariance matrix of the distribution.
+
+    Returns
+    -------
+    log_lik : float
+        Log likelihood.
+
+    """
+
+    # Tranform into np arrays
+    x = np.array(x)
+    mean = np.array(mean)
+    cov = np.array(cov)
+
+    n = len(mean)
+    L = spla.cholesky(cov, lower=True)
+    beta = np.sum(np.log(np.diag(L)))
+    dev = x - mean
+    alpha = dev.dot(spla.cho_solve((L, True), dev))
+    log_lik = -0.5 * alpha - beta - n / 2. * np.log(2 * np.pi)
+    return log_lik
+
+
 class BayesInference:
     """
     A class to perform Bayesian Analysis.
@@ -42,7 +128,7 @@ class BayesInference:
         of the variance matrix for a multivariate normal likelihood.
     name : str, optional
         The type of analysis, either calibration (`Calib`) or validation
-        (`Valid`). The default is `'Calib'`.
+        (`Valid`). The default is `'Calib'`. # TODO: what is going on here for validation?
     emulator : bool, optional
         Analysis with emulator (MetaModel). The default is `True`.
     bootstrap : bool, optional
@@ -55,11 +141,11 @@ class BayesInference:
         A dictionary with the selected indices of each model output. The
         default is `None`. If `None`, all measurement points are used in the
         analysis.
-    samples : array of shape (n_samples, n_params), optional
+    prior_samples : array of shape (n_samples, n_params), optional
         The samples to be used in the analysis. The default is `None`. If
         None the samples are drawn from the probablistic input parameter
         object of the MetaModel object.
-    n_samples : int, optional
+    n_prior_samples : int, optional
         Number of samples to be used in the analysis. The default is `500000`.
         If samples is not `None`, this argument will be assigned based on the
         number of samples given.
@@ -125,26 +211,32 @@ class BayesInference:
 
     """
 
-    def __init__(self, engine, MetaModel = None, discrepancy=None, emulator=True,
+    def __init__(self, engine, discrepancy=None, emulator=True,
                  name='Calib', bootstrap=False, req_outputs=None,
-                 selected_indices=None, samples=None, n_samples=100000,
+                 selected_indices=None, prior_samples=None, n_prior_samples=100000,
                  measured_data=None, inference_method='rejection',
                  mcmc_params=None, bayes_loocv=False, n_bootstrap_itrs=1,
                  perturbed_data=[], bootstrap_noise=0.05, just_analysis=False,
                  valid_metrics=['BME'], plot_post_pred=True,
                  plot_map_pred=False, max_a_posteriori='mean',
-                 corner_title_fmt='.2e'):
-
+                 corner_title_fmt='.2e', out_dir='', bmc=False):
+
+        self.log_BME_tom = None
+        self.inf_entropy = None
+        self.log_BME = None
+        self.KLD = None
+        self.__mean_pce_prior_pred = None
+        if perturbed_data is None:
+            perturbed_data = []
         self.engine = engine
-        self.MetaModel = engine.MetaModel
         self.Discrepancy = discrepancy
         self.emulator = emulator
         self.name = name
         self.bootstrap = bootstrap
         self.req_outputs = req_outputs
         self.selected_indices = selected_indices
-        self.samples = samples
-        self.n_samples = n_samples
+        self.prior_samples = prior_samples
+        self.n_prior_samples = n_prior_samples
         self.measured_data = measured_data
         self.inference_method = inference_method
         self.mcmc_params = mcmc_params
@@ -158,44 +250,63 @@ class BayesInference:
         self.plot_map_pred = plot_map_pred
         self.max_a_posteriori = max_a_posteriori
         self.corner_title_fmt = corner_title_fmt
+        self.out_dir = out_dir
+
+        # Other properties and parameters (found in code, but never set)
+        self.error_model = False  # TODO: no example or use case for this!
+        self.bias_inputs = None
+        self.measurement_error = None  # TODO: what is this?
+        self.sigma2s = None
+        self.log_likes = None
+        self.n_tot_measurement = None
+        self.Discrepancy = None
+        self.posterior_df = None
+        self.error_MetaModel = None
+        self._mean_pce_prior_pred = None
+        self._std_pce_prior_pred = None
+        self.__model_prior_pred = None
+        self.bmc = bmc  # Set to True, if you want to cut short to only Model Comparison
+
+        # System settings
+        if os.name == 'nt':
+            print('')
+            print('WARNING: Performing the inference on windows can lead to reduced accuracy!')
+            print('')
+            self.dtype = np.longdouble
+        else:
+            self.dtype = np.float128
 
-    # -------------------------------------------------------------------------
-    def create_inference(self):
+    def setup_inference(self):
         """
-        Starts the inference.
-
-        Returns
-        -------
-        BayesInference : obj
-            The Bayes inference object.
-
+        This function sets up the inference by checking the inputs and getting 
+        needed data.
         """
-
-        # Set some variables
-        MetaModel = self.MetaModel
         Model = self.engine.Model
-        n_params = MetaModel.n_params
-        output_names = Model.Output.names
-        par_names = self.engine.ExpDesign.par_names
 
-        # If the prior is set by the user, take it.
-        if self.samples is None:
-            self.samples = self.engine.ExpDesign.generate_samples(
-                self.n_samples, 'random')
+        # Create output directory
+        if self.out_dir == '':
+            self.out_dir = f'Outputs_Bayes_{self.engine.Model.name}_{self.name}'
+        os.makedirs(self.out_dir, exist_ok=True)
+
+        # If the prior is set by the user, take it, else generate from ExpDes
+        if self.prior_samples is None:
+            self.prior_samples = self.engine.ExpDesign.generate_samples(
+                self.n_prior_samples, 'random')
         else:
             try:
-                samples = self.samples.values
+                samples = self.prior_samples.values
             except AttributeError:
-                samples = self.samples
+                samples = self.prior_samples
 
             # Take care of an additional Sigma2s
-            self.samples = samples[:, :n_params]
+            self.prior_samples = samples[:, :self.engine.MetaModel.n_params]
 
             # Update number of samples
-            self.n_samples = self.samples.shape[0]
+            self.n_prior_samples = self.prior_samples.shape[0]
 
         # ---------- Preparation of observation data ----------
-        # Read observation data and perturb it if requested.
+        # Read observation data 
+        # TODO: later use valid #of measurements. but here only get the model observations?
         if self.measured_data is None:
             self.measured_data = Model.read_observation(case=self.name)
         # Convert measured_data to a data frame
@@ -205,11 +316,13 @@ class BayesInference:
         # Extract the total number of measurement points
         if self.name.lower() == 'calib':
             self.n_tot_measurement = Model.n_obs
-        else:
+        elif self.name.lower() == 'valid':
             self.n_tot_measurement = Model.n_obs_valid
+        else:
+            raise AttributeError('The set inference type is not known! Use either `calib` or `valid`')
 
         # Find measurement error (if not given) for post predictive plot
-        if not hasattr(self, 'measurement_error'):
+        if self.measurement_error is None:
             if isinstance(self.Discrepancy, dict):
                 Disc = self.Discrepancy['known']
             else:
@@ -222,383 +335,368 @@ class BayesInference:
                     self.measurement_error = np.sqrt(Disc.parameters)
                 except TypeError:
                     pass
+        # TODO: need a transformation for given measurement error?
 
-        # ---------- Preparation of variance for covariance matrix ----------
-        # Independent and identically distributed
-        total_sigma2 = dict()
+        # Get Discrepancy type
         opt_sigma_flag = isinstance(self.Discrepancy, dict)
         opt_sigma = None
-        for key_idx, key in enumerate(output_names):
+        # Option A: known error with unknown bias term
+        if opt_sigma_flag and opt_sigma is None:
+            opt_sigma = 'A'
+        # Option B: The sigma2 is known (no bias term)
+        elif self.Discrepancy.parameters is not None:
+            opt_sigma = 'B'
+        # Option C: The sigma2 is unknown (bias term including error)
+        elif not isinstance(self.Discrepancy.InputDisc, str):
+            opt_sigma = 'C'
+        self.Discrepancy.opt_sigma = opt_sigma
+
+        # Set MCMC params if used
+        if self.inference_method.lower() == 'mcmc':
+            if self.mcmc_params is None:
+                self.mcmc_params = {}
+            par_list = ['init_samples', 'n_walkers', 'n_burn', 'n_steps',
+                        'moves', 'multiprocessing', 'verbose']
+            init_val = [None, 100, 200, 100000, None, False, False]
+            for i in range(len(par_list)):
+                if par_list[i] not in list(self.mcmc_params.keys()):
+                    self.mcmc_params[par_list[i]] = init_val[i]
 
+    # -------------------------------------------------------------------------
+    def create_inference(self):
+        """
+        Starts the inference.
+
+        Returns
+        -------
+        BayesInference : obj
+            The Bayes inference object.
+            
+        # TODO: should this function really return the class?
+
+        """
+        # Do general set up and check some parameters
+        self.setup_inference()
+
+        # ---------- Preparation of variance for covariance matrix ----------
+        # Independent and identically distributed # TODO: ??
+        total_sigma2 = dict()
+        opt_sigma = self.Discrepancy.opt_sigma
+        for key_idx, key in enumerate(self.engine.Model.Output.names):
             # Find opt_sigma
-            if opt_sigma_flag and opt_sigma is None:
-                # Option A: known error with unknown bias term
-                opt_sigma = 'A'
+            sigma2 = None
+            if opt_sigma == 'A':
                 known_discrepancy = self.Discrepancy['known']
                 self.Discrepancy = self.Discrepancy['infer']
                 sigma2 = np.array(known_discrepancy.parameters[key])
 
-            elif opt_sigma == 'A' or self.Discrepancy.parameters is not None:
-                # Option B: The sigma2 is known (no bias term)
-                if opt_sigma == 'A':
-                    sigma2 = np.array(known_discrepancy.parameters[key])
-                else:
-                    opt_sigma = 'B'
-                    sigma2 = np.array(self.Discrepancy.parameters[key])
+            elif opt_sigma == 'B':
+                sigma2 = np.array(self.Discrepancy.parameters[key])
 
-            elif not isinstance(self.Discrepancy.InputDisc, str):
-                # Option C: The sigma2 is unknown (bias term including error)
-                opt_sigma = 'C'
-                self.Discrepancy.opt_sigma = opt_sigma
+            elif opt_sigma == 'C':
                 n_measurement = self.measured_data[key].values.shape
                 sigma2 = np.zeros((n_measurement[0]))
-
             total_sigma2[key] = sigma2
 
-            self.Discrepancy.opt_sigma = opt_sigma
-            self.Discrepancy.total_sigma2 = total_sigma2
+        self.Discrepancy.total_sigma2 = total_sigma2
 
         # If inferred sigma2s obtained from e.g. calibration are given
         try:
-            self.sigma2s = self.Discrepancy.get_sample(self.n_samples)
+            self.sigma2s = self.Discrepancy.get_sample(self.n_prior_samples)
         except:
-            pass
+            pass  # TODO: should an error be raised in this case? Should this at least be checked against opt_sigma?
 
         # ---------------- Bootstrap & TOM --------------------
         if self.bootstrap or self.bayes_loocv or self.just_analysis:
-            if len(self.perturbed_data) == 0:
-                # zero mean noise Adding some noise to the observation function
-                self.perturbed_data = self._perturb_data(
-                    self.measured_data, output_names
-                    )
-            else:
-                self.n_bootstrap_itrs = len(self.perturbed_data)
-
-            # -------- Model Discrepancy -----------
-            if hasattr(self, 'error_model') and self.error_model \
-               and self.name.lower() != 'calib':
-                # Select posterior mean as MAP
-                MAP_theta = self.samples.mean(axis=0).reshape((1, n_params))
-                # MAP_theta = stats.mode(self.samples,axis=0)[0]
-
-                # Evaluate the (meta-)model at the MAP
-                y_MAP, y_std_MAP = MetaModel.eval_metamodel(samples=MAP_theta)
-
-                # Train a GPR meta-model using MAP
-                self.error_MetaModel = MetaModel.create_model_error(
-                    self.bias_inputs, y_MAP, Name=self.name
-                    )
+            self.perform_bootstrap(total_sigma2)
+            if self.bmc:
+                return self
+        else:
+            print('No bootstrap for TOM performed!')  # TODO: stop the code? Use n_bootstrap = 1?
 
-            # -----------------------------------------------------
-            # ----- Loop over the perturbed observation data ------
-            # -----------------------------------------------------
-            # Initilize arrays
-            logLikelihoods = np.zeros((self.n_samples, self.n_bootstrap_itrs),
-                                      dtype=np.float16)
-            BME_Corr = np.zeros((self.n_bootstrap_itrs))
-            log_BME = np.zeros((self.n_bootstrap_itrs))
-            KLD = np.zeros((self.n_bootstrap_itrs))
-            inf_entropy = np.zeros((self.n_bootstrap_itrs))
-
-            # Compute the prior predtions
-            # Evaluate the MetaModel
-            if self.emulator:
-                y_hat, y_std = MetaModel.eval_metamodel(samples=self.samples)
-                self.__mean_pce_prior_pred = y_hat
-                self._std_pce_prior_pred = y_std
+        # ---------------- Parameter Bayesian inference ----------------
+        # Convert to a dataframe if samples are provided after calibration.
+        MCMC_Obj = None
+        if self.name.lower() == 'valid':
+            self.posterior_df = pd.DataFrame(self.prior_samples, columns=self.engine.ExpDesign.par_names)
+        # Instantiate the MCMC object
+        elif self.inference_method.lower() == 'mcmc':
+            MCMC_Obj = MCMC(self)
+            self.posterior_df = MCMC_Obj.run_sampler(
+                self.measured_data, total_sigma2
+            )
+        # Rejection sampling
+        elif self.inference_method.lower() == 'rejection':
+            self.posterior_df = self._rejection_sampling()
+        else:
+            raise AttributeError('The chosen inference method is not available!')
 
-                # Correct the predictions with Model discrepancy
-                if hasattr(self, 'error_model') and self.error_model:
-                    y_hat_corr, y_std = self.error_MetaModel.eval_model_error(
-                        self.bias_inputs, self.__mean_pce_prior_pred
-                        )
-                    self.__mean_pce_prior_pred = y_hat_corr
-                    self._std_pce_prior_pred = y_std
+        # Provide posterior's summary
+        print('\n')
+        print('-' * 15 + 'Posterior summary' + '-' * 15)
+        pd.options.display.max_columns = None
+        pd.options.display.max_rows = None
+        print(self.posterior_df.describe())
+        print('-' * 50)
 
-                # Surrogate model's error using RMSE of test data
-                if hasattr(MetaModel, 'rmse'):
-                    surrError = MetaModel.rmse
-                else:
-                    surrError = None
+        # -------- Model Discrepancy -----------
+        if self.error_model and self.name.lower() == 'calib' and MCMC_Obj is not None:  # TODO: where is this used
+            # and what does it actually do there?
+            self.create_error_model(opt_sigma=opt_sigma,
+                                    type_='posterior', sampler=MCMC_Obj)
 
-            else:
-                # Evaluate the original model
-                self.__model_prior_pred = self._eval_model(
-                    samples=self.samples, key='PriorPred'
-                    )
-                surrError = None
+        # -------- Posterior predictive -----------
+        self._posterior_predictive()
 
-            # Start the likelihood-BME computations for the perturbed data
-            for itr_idx, data in tqdm(
-                    enumerate(self.perturbed_data),
-                    total=self.n_bootstrap_itrs,
-                    desc="Bootstrapping the BME calculations", ascii=True
-                    ):
+        # ------------------ Visualization --------------------
+        # Posterior parameters
+        self.plot_post_params(opt_sigma)
 
-                # ---------------- Likelihood calculation ----------------
-                if self.emulator:
-                    model_evals = self.__mean_pce_prior_pred
-                else:
-                    model_evals = self.__model_prior_pred
-
-                # Leave one out
-                if self.bayes_loocv or self.just_analysis:
-                    self.selected_indices = np.nonzero(data)[0]
-
-                # Prepare data dataframe
-                nobs = list(self.measured_data.count().values[1:])
-                numbers = list(np.cumsum(nobs))
-                indices = list(zip([0] + numbers, numbers))
-                data_dict = {
-                    output_names[i]: data[j:k] for i, (j, k) in
-                    enumerate(indices)
-                    }
-                #print(output_names)
-                #print(indices)
-                #print(numbers)
-                #print(nobs)
-                #print(self.measured_data)
-                #for i, (j, k) in enumerate(indices):
-                #    print(i,j,k)
-                #print(data)
-                #print(data_dict)
-                #stop
-
-                # Unknown sigma2
-                if opt_sigma == 'C' or hasattr(self, 'sigma2s'):
-                    logLikelihoods[:, itr_idx] = self.normpdf(
-                        model_evals, data_dict, total_sigma2,
-                        sigma2=self.sigma2s, std=surrError
-                        )
-                else:
-                    # known sigma2
-                    logLikelihoods[:, itr_idx] = self.normpdf(
-                        model_evals, data_dict, total_sigma2,
-                        std=surrError
-                        )
+        # Plot MAP
+        if self.plot_map_pred:
+            self._plot_max_a_posteriori()
 
-                # ---------------- BME Calculations ----------------
-                # BME (log)
-                log_BME[itr_idx] = np.log(
-                    np.nanmean(np.exp(logLikelihoods[:, itr_idx],
-                                      dtype=np.longdouble))#float128))
-                    )
+        # Plot log_BME dist
+        if self.bootstrap:
+            self.plot_log_BME()
 
-                # BME correction when using Emulator
-                if self.emulator:
-                    BME_Corr[itr_idx] = self.__corr_factor_BME(
-                        data_dict, total_sigma2, log_BME[itr_idx]
-                        )
+        # Plot posterior predictive
+        if self.plot_post_pred:
+            self._plot_post_predictive()
 
-                # Rejection Step
-                if 'kld' in list(map(str.lower, self.valid_metrics)) and\
-                   'inf_entropy' in list(map(str.lower, self.valid_metrics)):
-                    # Random numbers between 0 and 1
-                    unif = np.random.rand(1, self.n_samples)[0]
-
-                    # Reject the poorly performed prior
-                    Likelihoods = np.exp(logLikelihoods[:, itr_idx],
-                                         dtype=np.float64)
-                    accepted = (Likelihoods/np.max(Likelihoods)) >= unif
-                    posterior = self.samples[accepted]
-
-                    # Posterior-based expectation of likelihoods
-                    postExpLikelihoods = np.mean(
-                        logLikelihoods[:, itr_idx][accepted]
-                        )
+        return self
 
-                    # Calculate Kullback-Leibler Divergence
-                    KLD[itr_idx] = postExpLikelihoods - log_BME[itr_idx]
-
-                # Posterior-based expectation of prior densities
-                if 'inf_entropy' in list(map(str.lower, self.valid_metrics)):
-                    n_thread = int(0.875 * multiprocessing.cpu_count())
-                    with multiprocessing.Pool(n_thread) as p:
-                        postExpPrior = np.mean(np.concatenate(
-                            p.map(
-                                self.engine.ExpDesign.JDist.pdf,
-                                np.array_split(posterior.T, n_thread, axis=1))
-                            )
-                            )
-                    # Information Entropy based on Entropy paper Eq. 38
-                    inf_entropy[itr_idx] = log_BME[itr_idx] - postExpPrior - \
-                        postExpLikelihoods
-
-                # Clear memory
-                gc.collect(generation=2)
-
-            # ---------- Store metrics for perturbed data set ----------------
-            # Likelihoods (Size: n_samples, n_bootstrap_itr)
-            self.log_likes = logLikelihoods
-
-            # BME (log), KLD, infEntropy (Size: 1,n_bootstrap_itr)
-            self.log_BME = log_BME
-
-            # BMECorrFactor (log) (Size: 1,n_bootstrap_itr)
-            if self.emulator:
-                self.log_BME_corr_factor = BME_Corr
+    def create_error_model(self, type_='posterior', opt_sigma='B', sampler=None):
+        """
+        Creates an error model in the engine.MetaModel based on input dist 
+        samples of the chosen type
 
-            if 'kld' in list(map(str.lower, self.valid_metrics)):
-                self.KLD = KLD
-            if 'inf_entropy' in list(map(str.lower, self.valid_metrics)):
-                self.inf_entropy = inf_entropy
+        Parameters
+        ----------
+        opt_sigma : string, optional
+            Type of uncertainty description, only used if type_=='posterior'.
+            The default is 'B'
+        type_ : string
+            Type of parameter samples to use, either 'prior' or 'posterior'. 
+            The default is 'posterior'.
+        sampler : MCMC, optional
+            Should be an MCMC object if type=='posterior' and MCMC is used in 
+            the inference.In al other cases this parameter is not needed.
 
-            # BME = BME + BMECorrFactor
-            if self.emulator:
-                self.log_BME += self.log_BME_corr_factor
+        Returns
+        -------
+        None.
 
-        # ---------------- Parameter Bayesian inference ----------------
-        if self.inference_method.lower() == 'mcmc':
-            # Instantiate the MCMC object
-            MCMC_Obj = MCMC(self)
-            self.posterior_df = MCMC_Obj.run_sampler(
-                self.measured_data, total_sigma2
-                )
+        """
+        n_params = self.engine.MetaModel.n_params
 
-        elif self.name.lower() == 'valid':
-            # Convert to a dataframe if samples are provided after calibration.
-            self.posterior_df = pd.DataFrame(self.samples, columns=par_names)
+        # Get MAP estimate from prior samples
+        if type_ == 'prior':
+            # Select prior ? mean as MAP
+            MAP_theta = self.prior_samples.mean(axis=0).reshape((1, n_params))
 
-        else:
-            # Rejection sampling
-            self.posterior_df = self._rejection_sampling()
+            # Evaluate the (meta-)model at the MAP
+            y_MAP, y_std_MAP = self.engine.MetaModel.eval_metamodel(samples=MAP_theta)
 
-        # Provide posterior's summary
-        print('\n')
-        print('-'*15 + 'Posterior summary' + '-'*15)
-        pd.options.display.max_columns = None
-        pd.options.display.max_rows = None
-        print(self.posterior_df.describe())
-        print('-'*50)
+            # Train a GPR meta-model using MAP
+            self.error_MetaModel = self.engine.MetaModel.create_model_error(
+                self.bias_inputs, y_MAP, self.measured_data, name=self.name
+            )
 
-        # -------- Model Discrepancy -----------
-        if hasattr(self, 'error_model') and self.error_model \
-           and self.name.lower() == 'calib':
+        # Get MAP estimate from posterior samples
+        if type_ == 'posterior':
             if self.inference_method.lower() == 'mcmc':
-                self.error_MetaModel = MCMC_Obj.error_MetaModel
+                self.error_MetaModel = sampler.error_MetaModel
             else:
                 # Select posterior mean as MAP
                 if opt_sigma == "B":
                     posterior_df = self.posterior_df.values
                 else:
-                    posterior_df = self.posterior_df.values[:, :-Model.n_outputs]
+                    posterior_df = self.posterior_df.values[:, :-self.engine.Model.n_outputs]
 
                 # Select posterior mean as Maximum a posteriori
                 map_theta = posterior_df.mean(axis=0).reshape((1, n_params))
                 # map_theta = stats.mode(Posterior_df,axis=0)[0]
 
                 # Evaluate the (meta-)model at the MAP
-                y_MAP, y_std_MAP = MetaModel.eval_metamodel(samples=map_theta)
+                y_MAP, y_std_MAP = self.engine.MetaModel.eval_metamodel(samples=map_theta)
 
                 # Train a GPR meta-model using MAP
-                self.error_MetaModel = MetaModel.create_model_error(
-                    self.bias_inputs, y_MAP, Name=self.name
-                    )
-
-        # -------- Posterior perdictive -----------
-        self._posterior_predictive()
-
-        # -----------------------------------------------------
-        # ------------------ Visualization --------------------
-        # -----------------------------------------------------
-        # Create Output directory, if it doesn't exist already.
-        out_dir = f'Outputs_Bayes_{Model.name}_{self.name}'
-        os.makedirs(out_dir, exist_ok=True)
-
-        # -------- Posteior parameters --------
-        if opt_sigma != "B":
-            par_names.extend(
-                [self.Discrepancy.InputDisc.Marginals[i].name for i
-                 in range(len(self.Discrepancy.InputDisc.Marginals))]
+                self.error_MetaModel = self.engine.MetaModel.create_model_error(
+                    self.bias_inputs, y_MAP, self.measured_data, name=self.name
                 )
-        # Pot with corner
-        figPosterior = corner.corner(self.posterior_df.to_numpy(),
-                                     labels=par_names,
-                                     quantiles=[0.15, 0.5, 0.85],
-                                     show_titles=True,
-                                     title_fmt=self.corner_title_fmt,
-                                     labelpad=0.2,
-                                     use_math_text=True,
-                                     title_kwargs={"fontsize": 28},
-                                     plot_datapoints=False,
-                                     plot_density=False,
-                                     fill_contours=True,
-                                     smooth=0.5,
-                                     smooth1d=0.5)
 
-        # Loop over axes and set x limits
-        if opt_sigma == "B":
-            axes = np.array(figPosterior.axes).reshape(
-                (len(par_names), len(par_names))
-                )
-            for yi in range(len(par_names)):
-                ax = axes[yi, yi]
-                ax.set_xlim(self.engine.ExpDesign.bound_tuples[yi])
-                for xi in range(yi):
-                    ax = axes[yi, xi]
-                    ax.set_xlim(self.engine.ExpDesign.bound_tuples[xi])
-        plt.close()
+    def perform_bootstrap(self, total_sigma2):
+        """
+        Perform bootstrap to get TOM (??)
+        
+        Parameters
+        ----------
+        total_sigma2 : dict
+            Dictionary containing the sigma2 for the training(?) data
+        Returns
+        -------
+        None.
 
-        # Turn off gridlines
-        for ax in figPosterior.axes:
-            ax.grid(False)
+        """
+        MetaModel = self.engine.MetaModel
+        output_names = self.engine.Model.Output.names
+        opt_sigma = self.Discrepancy.opt_sigma
 
-        if self.emulator:
-            plotname = f'/Posterior_Dist_{Model.name}_emulator'
+        # Adding some zero mean noise to the observation function
+        if len(self.perturbed_data) == 0:
+            self.perturbed_data = self._perturb_data(
+                self.measured_data, output_names
+            )
         else:
-            plotname = f'/Posterior_Dist_{Model.name}'
+            self.n_bootstrap_itrs = len(self.perturbed_data)
 
-        figPosterior.set_size_inches((24, 16))
-        figPosterior.savefig(f'./{out_dir}{plotname}.pdf',
-                             bbox_inches='tight')
+        # -------- Model Discrepancy -----------
+        if self.error_model and self.name.lower() == 'valid':  # TODO: what should be set so that this is tested?
+            self.create_error_model(type_='prior')
+        # -----------------------------------------------------
+        # ----- Loop over the perturbed observation data ------
+        # -----------------------------------------------------
+        # Initilize arrays
+        logLikelihoods = np.zeros((self.n_prior_samples, self.n_bootstrap_itrs),
+                                  dtype=np.float16)
+        BME_Corr = np.zeros(self.n_bootstrap_itrs)
+        log_BME = np.zeros(self.n_bootstrap_itrs)
+        KLD = np.zeros(self.n_bootstrap_itrs)
+        inf_entropy = np.zeros(self.n_bootstrap_itrs)
+
+        # Compute the prior predictions
+        # Evaluate the MetaModel
+        if self.emulator:
+            y_hat, y_std = MetaModel.eval_metamodel(samples=self.prior_samples)
+            self.__mean_pce_prior_pred = y_hat
+            self._std_pce_prior_pred = y_std
+
+            # Correct the predictions with Model discrepancy
+            if self.error_model:  # TODO this does not check for calib?
+                y_hat_corr, y_std = self.error_MetaModel.eval_model_error(
+                    self.bias_inputs, self.__mean_pce_prior_pred)
+                self.__mean_pce_prior_pred = y_hat_corr
+                self._std_pce_prior_pred = y_std
 
-        # -------- Plot MAP --------
-        if self.plot_map_pred:
-            self._plot_max_a_posteriori()
+            # Surrogate model's error using RMSE of test data
+            if MetaModel.rmse is not None:
+                surrError = MetaModel.rmse
+            else:
+                surrError = None
+            model_evals = self.__mean_pce_prior_pred
 
-        # -------- Plot log_BME dist --------
-        if self.bootstrap:
+        # Evaluate the model
+        else:
+            self.__model_prior_pred = self._eval_model(
+                samples=self.prior_samples, key='PriorPred')
+            model_evals = self.__model_prior_pred
+            surrError = None
+
+        # Start the likelihood-BME computations for the perturbed data
+        for itr_idx, data in tqdm(
+                enumerate(self.perturbed_data),
+                total=self.n_bootstrap_itrs,
+                desc="Bootstrapping the BME calculations", ascii=True
+        ):
+
+            # ---------------- Likelihood calculation ----------------
+            # Leave one out (see form of perturbed data)
+            if self.bayes_loocv or self.just_analysis:
+                # Consider only non-zero entries
+                self.selected_indices = np.nonzero(data)[0]
+
+            # Prepare data dataframe # TODO: what's with this transformation?
+            nobs = list(self.measured_data.count().values[1:])
+            numbers = list(np.cumsum(nobs))
+            indices = list(zip([0] + numbers, numbers))
+            data_dict = {
+                output_names[i]: data[j:k] for i, (j, k) in
+                enumerate(indices)
+            }
+
+            # Unknown sigma2
+            if opt_sigma == 'C' or self.sigma2s is not None:
+                logLikelihoods[:, itr_idx] = self.normpdf(
+                    model_evals, data_dict, total_sigma2,
+                    sigma2=self.sigma2s, std=surrError
+                )
+            else:
+                # known sigma2
+                logLikelihoods[:, itr_idx] = self.normpdf(
+                    model_evals, data_dict, total_sigma2,
+                    std=surrError
+                )
+            # ---------------- BME Calculations ----------------
+            # BME (log)
+            log_BME[itr_idx] = np.log(
+                np.nanmean(np.exp(logLikelihoods[:, itr_idx],
+                                  dtype=self.dtype))
+            )
+
+            # BME correction when using Emulator
+            if self.emulator:
+                BME_Corr[itr_idx] = self._corr_factor_BME(
+                    data_dict, total_sigma2, log_BME[itr_idx]
+                )
 
-            # Computing the TOM performance
-            self.log_BME_tom = stats.chi2.rvs(
-                self.n_tot_measurement, size=self.log_BME.shape[0]
+            # Rejection Step
+            if 'kld' in list(map(str.lower, self.valid_metrics)) and \
+                    'inf_entropy' in list(map(str.lower, self.valid_metrics)):  # TODO: why and and not or?
+                # Random numbers between 0 and 1
+                unif = np.random.rand(1, self.n_prior_samples)[0]
+
+                # Reject the poorly performed prior
+                Likelihoods = np.exp(logLikelihoods[:, itr_idx],
+                                     dtype=np.float64)
+                accepted = (Likelihoods / np.max(Likelihoods)) >= unif
+                posterior = self.prior_samples[accepted]
+
+                # Posterior-based expectation of likelihoods
+                postExpLikelihoods = np.mean(
+                    logLikelihoods[:, itr_idx][accepted]
                 )
 
-            fig, ax = plt.subplots()
-            sns.kdeplot(self.log_BME_tom, ax=ax, color="green", shade=True)
-            sns.kdeplot(
-                self.log_BME, ax=ax, color="blue", shade=True,
-                label='Model BME')
+                # Calculate Kullback-Leibler Divergence
+                KLD[itr_idx] = postExpLikelihoods - log_BME[itr_idx]
 
-            ax.set_xlabel('log$_{10}$(BME)')
-            ax.set_ylabel('Probability density')
+            # Posterior-based expectation of prior densities
+            if 'inf_entropy' in list(map(str.lower, self.valid_metrics)):
+                n_thread = int(0.875 * multiprocessing.cpu_count())
+                with multiprocessing.Pool(n_thread) as p:
+                    postExpPrior = np.mean(np.concatenate(
+                        p.map(
+                            self.engine.ExpDesign.JDist.pdf,
+                            np.array_split(posterior.T, n_thread, axis=1))
+                    )
+                    )
+                # Information Entropy based on Entropy paper Eq. 38
+                inf_entropy[itr_idx] = log_BME[itr_idx] - postExpPrior - postExpLikelihoods
 
-            legend_elements = [
-                Patch(facecolor='green', edgecolor='green', label='TOM BME'),
-                Patch(facecolor='blue', edgecolor='blue', label='Model BME')
-                ]
-            ax.legend(handles=legend_elements)
+            # Clear memory
+            gc.collect(generation=2)
 
-            if self.emulator:
-                plotname = f'/BME_hist_{Model.name}_emulator'
-            else:
-                plotname = f'/BME_hist_{Model.name}'
+        # ---------- Store metrics for perturbed data set ----------------
+        # Likelihoods (Size: n_samples, n_bootstrap_itr)
+        self.log_likes = logLikelihoods
 
-            plt.savefig(f'./{out_dir}{plotname}.pdf', bbox_inches='tight')
-            plt.show()
-            plt.close()
+        # BME (log), KLD, infEntropy (Size: 1,n_bootstrap_itr)
+        self.log_BME = log_BME
 
-        # -------- Posteior perdictives --------
-        if self.plot_post_pred:
-            # Plot the posterior predictive
-            self._plot_post_predictive()
+        # BMECorrFactor (log) (Size: 1,n_bootstrap_itr)
+        # BME = BME + BMECorrFactor
+        if self.emulator:
+            self.log_BME += BME_Corr
 
-        return self
+        if 'kld' in list(map(str.lower, self.valid_metrics)):
+            self.KLD = KLD
+        if 'inf_entropy' in list(map(str.lower, self.valid_metrics)):
+            self.inf_entropy = inf_entropy
 
     # -------------------------------------------------------------------------
     def _perturb_data(self, data, output_names):
         """
-        Returns an array with n_bootstrap_itrs rowsof perturbed data.
+        Returns an array with n_bootstrap_itrs rows of perturbed data.
         The first row includes the original observation data.
         If `self.bayes_loocv` is True, a 2d-array will be returned with
         repeated rows and zero diagonal entries.
@@ -608,7 +706,7 @@ class BayesInference:
         data : pandas DataFrame
             Observation data.
         output_names : list
-            List of the output names.
+            The output names.
 
         Returns
         -------
@@ -620,12 +718,11 @@ class BayesInference:
         obs_data = data[output_names].values
         n_measurement, n_outs = obs_data.shape
         self.n_tot_measurement = obs_data[~np.isnan(obs_data)].shape[0]
-        # Number of bootstrap iterations
-        if self.bayes_loocv:
-            self.n_bootstrap_itrs = self.n_tot_measurement
 
         # Pass loocv dataset
         if self.bayes_loocv:
+            # Number of bootstrap iterations
+            self.n_bootstrap_itrs = self.n_tot_measurement
             obs = obs_data.T[~np.isnan(obs_data.T)]
             final_data = np.repeat(np.atleast_2d(obs), self.n_bootstrap_itrs,
                                    axis=0)
@@ -633,15 +730,18 @@ class BayesInference:
             return final_data
 
         else:
+            # Init return data with original data
             final_data = np.zeros(
                 (self.n_bootstrap_itrs, self.n_tot_measurement)
-                )
+            )
             final_data[0] = obs_data.T[~np.isnan(obs_data.T)]
             for itrIdx in range(1, self.n_bootstrap_itrs):
                 data = np.zeros((n_measurement, n_outs))
                 for idx in range(len(output_names)):
+                    # Perturb the data
                     std = np.nanstd(obs_data[:, idx])
                     if std == 0:
+                        print('Note: Use std=0.01 for perturbation')
                         std = 0.001
                     noise = std * noise_level
                     data[:, idx] = np.add(
@@ -653,45 +753,17 @@ class BayesInference:
 
             return final_data
 
-    # -------------------------------------------------------------------------
-    def _logpdf(self, x, mean, cov):
-        """
-        computes the likelihood based on a multivariate normal distribution.
-
-        Parameters
-        ----------
-        x : TYPE
-            DESCRIPTION.
-        mean : array_like
-            Observation data.
-        cov : 2d array
-            Covariance matrix of the distribution.
-
-        Returns
-        -------
-        log_lik : float
-            Log likelihood.
-
-        """
-        n = len(mean)
-        L = spla.cholesky(cov, lower=True)
-        beta = np.sum(np.log(np.diag(L)))
-        dev = x - mean
-        alpha = dev.dot(spla.cho_solve((L, True), dev))
-        log_lik = -0.5 * alpha - beta - n / 2. * np.log(2 * np.pi)
-        return log_lik
-
     # -------------------------------------------------------------------------
     def _eval_model(self, samples=None, key='MAP'):
         """
-        Evaluates Forward Model.
+        Evaluates Forward Model and zips the results
 
         Parameters
         ----------
         samples : array of shape (n_samples, n_params), optional
             Parameter sets. The default is None.
         key : str, optional
-            Key string to be passed to the run_model_parallel method.
+            Descriptive key string for the run_model_parallel method.
             The default is 'MAP'.
 
         Returns
@@ -700,18 +772,17 @@ class BayesInference:
             Model outputs.
 
         """
-        MetaModel = self.MetaModel
         Model = self.engine.Model
 
         if samples is None:
-            self.samples = self.engine.ExpDesign.generate_samples(
-                self.n_samples, 'random')
+            self.prior_samples = self.engine.ExpDesign.generate_samples(
+                self.n_prior_samples, 'random')
         else:
-            self.samples = samples
-            self.n_samples = len(samples)
+            self.prior_samples = samples
+            self.n_prior_samples = len(samples)
 
         model_outputs, _ = Model.run_model_parallel(
-            self.samples, key_str=key+self.name)
+            self.prior_samples, key_str=key + self.name)
 
         # Clean up
         # Zip the subdirectories
@@ -724,55 +795,6 @@ class BayesInference:
 
         return model_outputs
 
-    # -------------------------------------------------------------------------
-    def _kernel_rbf(self, X, hyperparameters):
-        """
-        Isotropic squared exponential kernel.
-
-        Higher l values lead to smoother functions and therefore to coarser
-        approximations of the training data. Lower l values make functions
-        more wiggly with wide uncertainty regions between training data points.
-
-        sigma_f controls the marginal variance of b(x)
-
-        Parameters
-        ----------
-        X : ndarray of shape (n_samples_X, n_features)
-
-        hyperparameters : Dict
-            Lambda characteristic length
-            sigma_f controls the marginal variance of b(x)
-            sigma_0 unresolvable error nugget term, interpreted as random
-                    error that cannot be attributed to measurement error.
-        Returns
-        -------
-        var_cov_matrix : ndarray of shape (n_samples_X,n_samples_X)
-            Kernel k(X, X).
-
-        """
-        from sklearn.gaussian_process.kernels import RBF
-        min_max_scaler = preprocessing.MinMaxScaler()
-        X_minmax = min_max_scaler.fit_transform(X)
-
-        nparams = len(hyperparameters)
-        # characteristic length (0,1]
-        Lambda = hyperparameters[0]
-        # sigma_f controls the marginal variance of b(x)
-        sigma2_f = hyperparameters[1]
-
-        # cov_matrix = sigma2_f*rbf_kernel(X_minmax, gamma = 1/Lambda**2)
-
-        rbf = RBF(length_scale=Lambda)
-        cov_matrix = sigma2_f * rbf(X_minmax)
-        if nparams > 2:
-            # (unresolvable error) nugget term that is interpreted as random
-            # error that cannot be attributed to measurement error.
-            sigma2_0 = hyperparameters[2:]
-            for i, j in np.ndindex(cov_matrix.shape):
-                cov_matrix[i, j] += np.sum(sigma2_0) if i == j else 0
-
-        return cov_matrix
-
     # -------------------------------------------------------------------------
     def normpdf(self, outputs, obs_data, total_sigma2s, sigma2=None, std=None):
         """
@@ -788,7 +810,7 @@ class BayesInference:
             A dictionary/dataframe containing the observation data.
         total_sigma2s : dict
             A dictionary with known values of the covariance diagonal entries,
-            a.k.a sigma^2.
+            a.k.a. sigma^2.
         sigma2 : array, optional
             An array of the sigma^2 samples, when the covariance diagonal
             entries are unknown and are being jointly inferred. The default is
@@ -809,11 +831,11 @@ class BayesInference:
 
         # Extract the requested model outputs for likelihood calulation
         if self.req_outputs is None:
-            req_outputs = Model.Output.names
+            req_outputs = Model.Output.names  # TODO: should this then be saved as self.req_outputs?
         else:
             req_outputs = list(self.req_outputs)
 
-        # Loop over the outputs
+        # Loop over the output keys
         for idx, out in enumerate(req_outputs):
 
             # (Meta)Model Output
@@ -825,26 +847,25 @@ class BayesInference:
             except AttributeError:
                 data = obs_data[out][~np.isnan(obs_data[out])]
 
-            # Prepare sigma2s
+            # Prepare data uncertainty / error estimation (sigma2s)
             non_nan_indices = ~np.isnan(total_sigma2s[out])
             tot_sigma2s = total_sigma2s[out][non_nan_indices][:nout]
 
-            # Add the std of the PCE is chosen as emulator.
+            # Add the std of the PCE if an emulator is used
             if self.emulator:
                 if std is not None:
-                    tot_sigma2s += std[out]**2
-
-            # Covariance Matrix
-            covMatrix = np.diag(tot_sigma2s)
+                    tot_sigma2s += std[out] ** 2
 
             # Select the data points to compare
             try:
                 indices = self.selected_indices[out]
             except:
                 indices = list(range(nout))
-            covMatrix = np.diag(covMatrix[indices, indices])
 
-            # If sigma2 is not given, use given total_sigma2s
+            # Set up Covariance Matrix
+            covMatrix = np.diag(np.diag(tot_sigma2s)[indices, indices])
+
+            # If sigma2 is not given, use given total_sigma2s and move to next itr
             if sigma2 is None:
                 logLik += stats.multivariate_normal.logpdf(
                     outputs[out][:, indices], data[indices], covMatrix)
@@ -860,26 +881,24 @@ class BayesInference:
                 # Covariance Matrix
                 covMatrix = np.diag(tot_sigma2s)
 
-                if sigma2 is not None:
-                    # Check the type error term
-                    if hasattr(self, 'bias_inputs') and \
-                       not hasattr(self, 'error_model'):
-                        # Infer a Bias model usig Gaussian Process Regression
-                        bias_inputs = np.hstack(
-                            (self.bias_inputs[out],
-                             tot_outputs[s_idx].reshape(-1, 1)))
-
-                        params = sigma2[s_idx, idx*3:(idx+1)*3]
-                        covMatrix = self._kernel_rbf(bias_inputs, params)
-                    else:
-                        # Infer equal sigma2s
-                        try:
-                            sigma_2 = sigma2[s_idx, idx]
-                        except TypeError:
-                            sigma_2 = 0.0
+                # Check the type error term
+                if self.bias_inputs is not None and self.error_model is None:
+                    # Infer a Bias model usig Gaussian Process Regression
+                    bias_inputs = np.hstack(
+                        (self.bias_inputs[out],
+                         tot_outputs[s_idx].reshape(-1, 1)))
+
+                    params = sigma2[s_idx, idx * 3:(idx + 1) * 3]
+                    covMatrix = _kernel_rbf(bias_inputs, params)
+                else:
+                    # Infer equal sigma2s
+                    try:
+                        sigma_2 = sigma2[s_idx, idx]
+                    except TypeError:
+                        sigma_2 = 0.0
 
-                        covMatrix += sigma_2 * np.eye(nout)
-                        # covMatrix = np.diag(sigma2 * total_sigma2s)
+                    covMatrix += sigma_2 * np.eye(nout)
+                    # covMatrix = np.diag(sigma2 * total_sigma2s)
 
                 # Select the data points to compare
                 try:
@@ -889,86 +908,45 @@ class BayesInference:
                 covMatrix = np.diag(covMatrix[indices, indices])
 
                 # Compute loglikelihood
-                logliks[s_idx] = self._logpdf(
+                logliks[s_idx] = _logpdf(
                     tot_outputs[s_idx, indices], data[indices], covMatrix
-                    )
-
+                )
             logLik += logliks
         return logLik
 
     # -------------------------------------------------------------------------
-    def _corr_factor_BME_old(self, Data, total_sigma2s, posterior):
+    def _corr_factor_BME(self, obs_data, total_sigma2s, logBME):
         """
         Calculates the correction factor for BMEs.
-        """
-        MetaModel = self.MetaModel
-        OrigModelOutput = self.engine.ExpDesign.Y
-        Model = self.engine.Model
-
-        # Posterior with guassian-likelihood
-        postDist = stats.gaussian_kde(posterior.T)
-
-        # Remove NaN
-        Data = Data[~np.isnan(Data)]
-        total_sigma2s = total_sigma2s[~np.isnan(total_sigma2s)]
-
-        # Covariance Matrix
-        covMatrix = np.diag(total_sigma2s[:self.n_tot_measurement])
-
-        # Extract the requested model outputs for likelihood calulation
-        if self.req_outputs is None:
-            OutputType = Model.Output.names
-        else:
-            OutputType = list(self.req_outputs)
-
-        # SampleSize = OrigModelOutput[OutputType[0]].shape[0]
-
-
-        # Flatten the OutputType for OrigModel
-        TotalOutputs = np.concatenate([OrigModelOutput[x] for x in OutputType], 1)
-
-        NrofBayesSamples = self.n_samples
-        # Evaluate MetaModel on the experimental design
-        Samples = self.engine.ExpDesign.X
-        OutputRS, stdOutputRS = MetaModel.eval_metamodel(samples=Samples)
-
-        # Reset the NrofSamples to NrofBayesSamples
-        self.n_samples = NrofBayesSamples
-
-        # Flatten the OutputType for MetaModel
-        TotalPCEOutputs = np.concatenate([OutputRS[x] for x in OutputRS], 1)
-        TotalPCEstdOutputRS= np.concatenate([stdOutputRS[x] for x in stdOutputRS], 1)
+        
+        Parameters
+        ----------
+        obs_data : dict
+            A dictionary/dataframe containing the observation data.
+        total_sigma2s : dict
+            A dictionary with known values of the covariance diagonal entries,
+            a.k.a sigma^2.
+        logBME : ??
+            The log_BME obtained from the estimated likelihoods
 
-        logweight = 0
-        for i, sample in enumerate(Samples):
-            # Compute likelilhood output vs RS
-            covMatrix = np.diag(TotalPCEstdOutputRS[i]**2)
-            logLik = self._logpdf(TotalOutputs[i], TotalPCEOutputs[i], covMatrix)
-            # Compute posterior likelihood of the collocation points
-            logpostLik = np.log(postDist.pdf(sample[:, None]))[0]
-            if logpostLik != -np.inf:
-                logweight += logLik + logpostLik
-        return logweight
+        Returns
+        -------
+        np.log(weights) : ??
+            Correction factors # TODO: factors or log of factors?
 
-    # -------------------------------------------------------------------------
-    def __corr_factor_BME(self, obs_data, total_sigma2s, logBME):
         """
-        Calculates the correction factor for BMEs.
-        """
-        MetaModel = self.MetaModel
+        # Extract the requested model outputs for likelihood calulation
+        MetaModel = self.engine.MetaModel
         samples = self.engine.ExpDesign.X
         model_outputs = self.engine.ExpDesign.Y
-        Model = self.engine.Model
         n_samples = samples.shape[0]
-
-        # Extract the requested model outputs for likelihood calulation
-        output_names = Model.Output.names
+        output_names = self.engine.Model.Output.names
 
         # Evaluate MetaModel on the experimental design and ValidSet
         OutputRS, stdOutputRS = MetaModel.eval_metamodel(samples=samples)
 
-        logLik_data = np.zeros((n_samples))
-        logLik_model = np.zeros((n_samples))
+        logLik_data = np.zeros(n_samples)
+        logLik_model = np.zeros(n_samples)
         # Loop over the outputs
         for idx, out in enumerate(output_names):
 
@@ -997,7 +975,7 @@ class BayesInference:
                 y_m_hat = OutputRS[out][i]
 
                 # CovMatrix with the surrogate error
-                covMatrix = np.eye(len(y_m)) * 1/(2*np.pi)
+                covMatrix = np.eye(len(y_m)) * 1 / (2 * np.pi)
 
                 # Select the data points to compare
                 try:
@@ -1008,20 +986,20 @@ class BayesInference:
                 covMatrix_data = np.diag(covMatrix_data[indices, indices])
 
                 # Compute likelilhood output vs data
-                logLik_data[i] += self._logpdf(
+                logLik_data[i] += _logpdf(
                     y_m_hat[indices], data[indices],
                     covMatrix_data
-                    )
+                )
 
                 # Compute likelilhood output vs surrogate
-                logLik_model[i] += self._logpdf(
+                logLik_model[i] += _logpdf(
                     y_m_hat[indices], y_m[indices],
                     covMatrix
-                    )
+                )
 
         # Weight
         logLik_data -= logBME
-        weights = np.mean(np.exp(logLik_model+logLik_data))
+        weights = np.mean(np.exp(logLik_model + logLik_data))
 
         return np.log(weights)
 
@@ -1037,45 +1015,43 @@ class BayesInference:
             Posterior samples of the input parameters.
 
         """
+        if self.prior_samples is None:
+            raise AttributeError('No prior samples available!')
 
-        MetaModel = self.MetaModel
+        if self.log_likes is None:
+            raise AttributeError('No log-likelihoods available!')
+
+        # Get sigmas # TODO: is this data uncertainty?
         try:
             sigma2_prior = self.Discrepancy.sigma2_prior
         except:
             sigma2_prior = None
 
-        # Check if the discrepancy is defined as a distribution:
-        samples = self.samples
-
+        # Combine samples and sigma2 for the return
+        samples = self.prior_samples
         if sigma2_prior is not None:
             samples = np.hstack((samples, sigma2_prior))
 
         # Take the first column of Likelihoods (Observation data without noise)
         if self.just_analysis or self.bayes_loocv:
-            index = self.n_tot_measurement-1
-            likelihoods = np.exp(self.log_likes[:, index], dtype=np.longdouble)#np.float128)
+            index = self.n_tot_measurement - 1
         else:
-            likelihoods = np.exp(self.log_likes[:, 0], dtype=np.longdouble)#np.float128)
+            index = 0
+
+        # Use longdouble on windows, float128 on linux
+        likelihoods = np.exp(self.log_likes[:, index], dtype=self.dtype)
 
         n_samples = len(likelihoods)
-        norm_ikelihoods = likelihoods / np.max(likelihoods)
+        norm_likelihoods = likelihoods / np.max(likelihoods)
 
         # Normalize based on min if all Likelihoods are zero
         if all(likelihoods == 0.0):
             likelihoods = self.log_likes[:, 0]
-            norm_ikelihoods = likelihoods / np.min(likelihoods)
+            norm_likelihoods = likelihoods / np.min(likelihoods)
 
-        # Random numbers between 0 and 1
+        # Reject the poorly performed prior compared to a uniform distribution
         unif = np.random.rand(1, n_samples)[0]
-
-        # Reject the poorly performed prior
-        accepted_samples = samples[norm_ikelihoods >= unif]
-
-        # Output the Posterior
-        par_names = self.engine.ExpDesign.par_names
-        if sigma2_prior is not None:
-            for name in self.Discrepancy.name:
-                par_names.append(name)
+        accepted_samples = samples[norm_likelihoods >= unif]
 
         return pd.DataFrame(accepted_samples, columns=sigma2_prior)
 
@@ -1097,25 +1073,21 @@ class BayesInference:
 
         """
 
-        MetaModel = self.MetaModel
+        MetaModel = self.engine.MetaModel
         Model = self.engine.Model
 
-        # Make a directory to save the prior/posterior predictive
-        out_dir = f'Outputs_Bayes_{Model.name}_{self.name}'
-        os.makedirs(out_dir, exist_ok=True)
-
-        # Read observation data and perturb it if requested
+        # Read observation data and perturb it if requested # TODO: where is the perturbation?
         if self.measured_data is None:
             self.measured_data = Model.read_observation(case=self.name)
 
         if not isinstance(self.measured_data, pd.DataFrame):
             self.measured_data = pd.DataFrame(self.measured_data)
 
-        # X_values
+        # X_values and prior sigma2
         x_values = self.engine.ExpDesign.x_values
-
         try:
-            sigma2_prior = self.Discrepancy.sigma2_prior
+            sigma2_prior = self.Discrepancy.sigma2_prior  # TODO: what is this? Looks to be built for a different
+            # Discrepancy structure
         except:
             sigma2_prior = None
 
@@ -1123,45 +1095,47 @@ class BayesInference:
         posterior_df = self.posterior_df
 
         # Take care of the sigma2
-        if sigma2_prior is not None:
+        sigma2s = None
+        if sigma2_prior is not None:  # TODO: why is this the if for this code?
             try:
-                sigma2s = posterior_df[self.Discrepancy.name].values
+                sigma2s = posterior_df[self.Discrepancy.name].values  # TODO: what is Discrepancy.name?
                 posterior_df = posterior_df.drop(
                     labels=self.Discrepancy.name, axis=1
-                    )
+                )
             except:
                 sigma2s = self.sigma2s
 
         # Posterior predictive
         if self.emulator:
-            if self.inference_method == 'rejection':
-                prior_pred = self.__mean_pce_prior_pred
-            if self.name.lower() != 'calib':
-                post_pred = self.__mean_pce_prior_pred
+            if self.inference_method.lower() == 'rejection':  # TODO: combine these two? Why is there no
+                # post_pred_std for rejection sampling?
+                prior_pred = self._mean_pce_prior_pred
+            if self.name.lower() == 'valid':
+                post_pred = self._mean_pce_prior_pred
                 post_pred_std = self._std_pce_prior_pred
             else:
-                post_pred, post_pred_std = MetaModel.eval_metamodel(
+                post_pred, post_pred_std = MetaModel.eval_metamodel(  # TODO: recheck if this is needed
                     samples=posterior_df.values
-                    )
+                )
 
-        else:
-            if self.inference_method == 'rejection':
+        else:  # TODO: see emulator version
+            if self.inference_method.lower() == 'rejection':
                 prior_pred = self.__model_prior_pred
-            if self.name.lower() != 'calib':
+            if self.name.lower() == 'valid':
                 post_pred = self.__mean_pce_prior_pred,
                 post_pred_std = self._std_pce_prior_pred
             else:
                 post_pred = self._eval_model(
                     samples=posterior_df.values, key='PostPred'
-                    )
+                )
         # Correct the predictions with Model discrepancy
-        if hasattr(self, 'error_model') and self.error_model:
+        if self.error_model:
             y_hat, y_std = self.error_MetaModel.eval_model_error(
                 self.bias_inputs, post_pred
-                )
+            )
             post_pred, post_pred_std = y_hat, y_std
 
-        # Add discrepancy from likelihood Sample to the current posterior runs
+        # Add discrepancy from likelihood samples to the current posterior runs
         total_sigma2 = self.Discrepancy.total_sigma2
         post_pred_withnoise = copy.deepcopy(post_pred)
         for varIdx, var in enumerate(Model.Output.names):
@@ -1173,16 +1147,15 @@ class BayesInference:
                 tot_sigma2 = clean_sigma2[:len(pred)]
                 cov = np.diag(tot_sigma2)
 
-                # Check the type error term
+                # Account for additional error terms
                 if sigma2_prior is not None:
                     # Inferred sigma2s
-                    if hasattr(self, 'bias_inputs') and \
-                       not hasattr(self, 'error_model'):
+                    if self.bias_inputs is not None and self.error_model is None:
                         # TODO: Infer a Bias model usig GPR
                         bias_inputs = np.hstack((
                             self.bias_inputs[var], pred.reshape(-1, 1)))
-                        params = sigma2s[i, varIdx*3:(varIdx+1)*3]
-                        cov = self._kernel_rbf(bias_inputs, params)
+                        params = sigma2s[i, varIdx * 3:(varIdx + 1) * 3]
+                        cov = _kernel_rbf(bias_inputs, params)
                     else:
                         # Infer equal sigma2s
                         try:
@@ -1193,25 +1166,25 @@ class BayesInference:
                         # Convert biasSigma2s to a covMatrix
                         cov += sigma2 * np.eye(len(pred))
 
+                # Add predictive metamodel error/uncertainty
                 if self.emulator:
-                    if hasattr(MetaModel, 'rmse') and \
-                       MetaModel.rmse is not None:
+                    if MetaModel.rmse is not None:
                         stdPCE = MetaModel.rmse[var]
                     else:
                         stdPCE = post_pred_std[var][i]
                     # Expected value of variance (Assump: i.i.d stds)
-                    cov += np.diag(stdPCE**2)
+                    cov += np.diag(stdPCE ** 2)
 
                 # Sample a multivariate normal distribution with mean of
-                # prediction and variance of cov
+                # posterior prediction and variance of cov
                 post_pred_withnoise[var][i] = np.random.multivariate_normal(
                     pred, cov, 1
-                    )
+                )
 
         # ----- Prior Predictive -----
         if self.inference_method.lower() == 'rejection':
             # Create hdf5 metadata
-            hdf5file = f'{out_dir}/priorPredictive.hdf5'
+            hdf5file = f'{self.out_dir}/priorPredictive.hdf5'
             hdf5_exist = os.path.exists(hdf5file)
             if hdf5_exist:
                 os.remove(hdf5file)
@@ -1232,7 +1205,7 @@ class BayesInference:
 
         # ----- Posterior Predictive only model evaluations -----
         # Create hdf5 metadata
-        hdf5file = out_dir+'/postPredictive_wo_noise.hdf5'
+        hdf5file = self.out_dir + '/postPredictive_wo_noise.hdf5'
         hdf5_exist = os.path.exists(hdf5file)
         if hdf5_exist:
             os.remove(hdf5file)
@@ -1253,7 +1226,7 @@ class BayesInference:
 
         # ----- Posterior Predictive with noise -----
         # Create hdf5 metadata
-        hdf5file = out_dir+'/postPredictive.hdf5'
+        hdf5file = self.out_dir + '/postPredictive.hdf5'
         hdf5_exist = os.path.exists(hdf5file)
         if hdf5_exist:
             os.remove(hdf5file)
@@ -1286,11 +1259,9 @@ class BayesInference:
 
         """
 
-        MetaModel = self.MetaModel
+        MetaModel = self.engine.MetaModel
         Model = self.engine.Model
-        out_dir = f'Outputs_Bayes_{Model.name}_{self.name}'
         opt_sigma = self.Discrepancy.opt_sigma
-
         # -------- Find MAP and run MetaModel and origModel --------
         # Compute the MAP
         if self.max_a_posteriori.lower() == 'mean':
@@ -1301,6 +1272,8 @@ class BayesInference:
             map_theta = Posterior_df.mean(axis=0).reshape(
                 (1, MetaModel.n_params))
         else:
+            # TODO: here just a fix, no previous mention of Posterior_df!
+            Posterior_df = None
             map_theta = stats.mode(Posterior_df.values, axis=0)[0]
         # Prin report
         print("\nPoint estimator:\n", map_theta[0])
@@ -1324,10 +1297,9 @@ class BayesInference:
         Marker = 'x'
 
         # Create a PdfPages object
-        pdf = PdfPages(f'./{out_dir}MAP_PCE_vs_Model_{self.name}.pdf')
+        pdf = PdfPages(f'./{self.out_dir}MAP_PCE_vs_Model_{self.name}.pdf')
         fig = plt.figure()
         for i, key in enumerate(Model.Output.names):
-
             y_val = map_orig_model[key]
             y_pce_val = map_metamodel_mean[key]
             y_pce_val_std = map_metamodel_std[key]
@@ -1338,13 +1310,13 @@ class BayesInference:
             plt.plot(
                 x_values, y_pce_val[i], color=Color[i], lw=2.0,
                 marker=Marker, linestyle='--', label='$Y_{MAP}^{PCE}$'
-                )
+            )
             # plot the confidence interval
             plt.fill_between(
-                x_values, y_pce_val[i] - 1.96*y_pce_val_std[i],
-                y_pce_val[i] + 1.96*y_pce_val_std[i],
+                x_values, y_pce_val[i] - 1.96 * y_pce_val_std[i],
+                y_pce_val[i] + 1.96 * y_pce_val_std[i],
                 color=Color[i], alpha=0.15
-                )
+            )
 
             # Calculate the adjusted R_squared and RMSE
             R2 = r2_score(y_pce_val.reshape(-1, 1), y_val.reshape(-1, 1))
@@ -1359,7 +1331,7 @@ class BayesInference:
             fig.canvas.draw()
             p = leg.get_window_extent().inverse_transformed(ax.transAxes)
             ax.text(
-                p.p0[1]-0.05, p.p1[1]-0.25,
+                p.p0[1] - 0.05, p.p1[1] - 0.25,
                 f'RMSE = {rmse:.3f}\n$R^2$ = {R2:.3f}',
                 transform=ax.transAxes, color='black',
                 bbox=dict(facecolor='none', edgecolor='black',
@@ -1375,6 +1347,110 @@ class BayesInference:
 
         pdf.close()
 
+    def plot_post_params(self, opt_sigma):
+        """
+        Plots the multivar. posterior parameter distribution.
+        
+
+        Parameters
+        ----------
+        opt_sigma : string
+            Type of uncertainty description available.
+
+        Returns
+        -------
+        None.
+
+        """
+        par_names = self.engine.ExpDesign.par_names
+        if opt_sigma != "B":
+            par_names.extend(
+                [self.Discrepancy.InputDisc.Marginals[i].name for i
+                 in range(len(self.Discrepancy.InputDisc.Marginals))]
+            )
+        # Pot with corner
+        figPosterior = corner.corner(self.posterior_df.to_numpy(),
+                                     labels=par_names,
+                                     quantiles=[0.15, 0.5, 0.85],
+                                     show_titles=True,
+                                     title_fmt=self.corner_title_fmt,
+                                     labelpad=0.2,
+                                     use_math_text=True,
+                                     title_kwargs={"fontsize": 28},
+                                     plot_datapoints=False,
+                                     plot_density=False,
+                                     fill_contours=True,
+                                     smooth=0.5,
+                                     smooth1d=0.5)
+
+        # Loop over axes and set x limits
+        if opt_sigma == "B":
+            axes = np.array(figPosterior.axes).reshape(
+                (len(par_names), len(par_names))
+            )
+            for yi in range(len(par_names)):
+                ax = axes[yi, yi]
+                ax.set_xlim(self.engine.ExpDesign.bound_tuples[yi])
+                for xi in range(yi):
+                    ax = axes[yi, xi]
+                    ax.set_xlim(self.engine.ExpDesign.bound_tuples[xi])
+        plt.close()
+
+        # Turn off gridlines
+        for ax in figPosterior.axes:
+            ax.grid(False)
+
+        if self.emulator:
+            plotname = f'/Posterior_Dist_{self.engine.Model.name}_emulator'
+        else:
+            plotname = f'/Posterior_Dist_{self.engine.Model.name}'
+
+        figPosterior.set_size_inches((24, 16))
+        figPosterior.savefig(f'./{self.out_dir}{plotname}.pdf',
+                             bbox_inches='tight')
+
+        plt.clf()
+
+    def plot_log_BME(self):
+        """
+        Plots the log_BME if bootstrap is active.
+
+        Returns
+        -------
+        None.
+
+        """
+
+        # Computing the TOM performance
+        self.log_BME_tom = stats.chi2.rvs(
+            self.n_tot_measurement, size=self.log_BME.shape[0]
+        )
+
+        fig, ax = plt.subplots()
+        sns.kdeplot(self.log_BME_tom, ax=ax, color="green", shade=True)
+        sns.kdeplot(
+            self.log_BME, ax=ax, color="blue", shade=True,
+            label='Model BME')
+
+        ax.set_xlabel('log$_{10}$(BME)')
+        ax.set_ylabel('Probability density')
+
+        legend_elements = [
+            Patch(facecolor='green', edgecolor='green', label='TOM BME'),
+            Patch(facecolor='blue', edgecolor='blue', label='Model BME')
+        ]
+        ax.legend(handles=legend_elements)
+
+        if self.emulator:
+            plotname = f'/BME_hist_{self.engine.Model.name}_emulator'
+        else:
+            plotname = f'/BME_hist_{self.engine.Model.name}'
+
+        plt.savefig(f'./{self.out_dir}{plotname}.pdf', bbox_inches='tight')
+
+        plt.show()
+        plt.clf()
+
     # -------------------------------------------------------------------------
     def _plot_post_predictive(self):
         """
@@ -1387,7 +1463,6 @@ class BayesInference:
         """
 
         Model = self.engine.Model
-        out_dir = f'Outputs_Bayes_{Model.name}_{self.name}'
         # Plot the posterior predictive
         for out_idx, out_name in enumerate(Model.Output.names):
             fig, ax = plt.subplots()
@@ -1396,11 +1471,10 @@ class BayesInference:
 
                 # --- Read prior and posterior predictive ---
                 if self.inference_method == 'rejection' and \
-                   self.name.lower() != 'valid':
+                        self.name.lower() == 'calib':
                     #  --- Prior ---
                     # Load posterior predictive
-                    f = h5py.File(
-                        f'{out_dir}/priorPredictive.hdf5', 'r+')
+                    f = h5py.File(f'{self.out_dir}/priorPredictive.hdf5', 'r+')
 
                     try:
                         x_coords = np.array(f[f"x_values/{out_name}"])
@@ -1409,10 +1483,8 @@ class BayesInference:
 
                     X_values = np.repeat(x_coords, 10000)
 
-                    prior_pred_df = {}
-                    prior_pred_df[x_key] = X_values
-                    prior_pred_df[out_name] = np.array(
-                        f[f"EDY/{out_name}"])[:10000].flatten('F')
+                    prior_pred_df = {x_key: X_values, out_name: np.array(
+                        f[f"EDY/{out_name}"])[:10000].flatten('F')}
                     prior_pred_df = pd.DataFrame(prior_pred_df)
 
                     tags_post = ['prior'] * len(prior_pred_df)
@@ -1422,16 +1494,13 @@ class BayesInference:
                     f.close()
 
                     # --- Posterior ---
-                    f = h5py.File(f"{out_dir}/postPredictive.hdf5", 'r+')
+                    f = h5py.File(f"{self.out_dir}/postPredictive.hdf5", 'r+')
 
                     X_values = np.repeat(
                         x_coords, np.array(f[f"EDY/{out_name}"]).shape[0])
 
-                    post_pred_df = {}
-                    post_pred_df[x_key] = X_values
-                    post_pred_df[out_name] = np.array(
-                        f[f"EDY/{out_name}"]).flatten('F')
-
+                    post_pred_df = {x_key: X_values, out_name: np.array(
+                        f[f"EDY/{out_name}"]).flatten('F')}
                     post_pred_df = pd.DataFrame(post_pred_df)
 
                     tags_post = ['posterior'] * len(post_pred_df)
@@ -1459,7 +1528,7 @@ class BayesInference:
 
                     ax.errorbar(
                         x_coords, obs_data[out_name].values,
-                        yerr=1.96*self.measurement_error[out_name],
+                        yerr=1.96 * self.measurement_error[out_name],
                         ecolor='g', fmt=' ', zorder=-1)
 
                     # Add labels to the legend
@@ -1477,7 +1546,7 @@ class BayesInference:
 
                 else:
                     # Load posterior predictive
-                    f = h5py.File(f"{out_dir}/postPredictive.hdf5", 'r+')
+                    f = h5py.File(f"{self.out_dir}/postPredictive.hdf5", 'r+')
 
                     try:
                         x_coords = np.array(f[f"x_values/{out_name}"])
@@ -1492,7 +1561,7 @@ class BayesInference:
                         x_coords, mu, marker='o', color='b',
                         label='Mean Post. Predictive')
                     plt.fill_between(
-                        x_coords, mu-1.96*std, mu+1.96*std, color='b',
+                        x_coords, mu - 1.96 * std, mu + 1.96 * std, color='b',
                         alpha=0.15)
 
                     # --- Plot Data ---
@@ -1505,7 +1574,7 @@ class BayesInference:
                     for output in orig_ED_Y:
                         plt.plot(
                             x_coords, output, color='grey', alpha=0.15
-                            )
+                        )
 
                     # Add labels for axes
                     plt.xlabel('Time [s]')
@@ -1528,5 +1597,7 @@ class BayesInference:
                 else:
                     plotname = f'/Post_Prior_Perd_{Model.name}'
 
-                fig.savefig(f'./{out_dir}{plotname}_{out_name}.pdf',
+                fig.savefig(f'./{self.out_dir}{plotname}_{out_name}.pdf',
                             bbox_inches='tight')
+
+        plt.clf()
diff --git a/src/bayesvalidrox/bayes_inference/bayes_model_comparison.py b/src/bayesvalidrox/bayes_inference/bayes_model_comparison.py
index 828613556e90ec0c529b91f2592eec148c98136b..a26eaa886d6bfda11c23e0e2a490f9f93fee1c45 100644
--- a/src/bayesvalidrox/bayes_inference/bayes_model_comparison.py
+++ b/src/bayesvalidrox/bayes_inference/bayes_model_comparison.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
 
+import emcee
 import numpy as np
 import os
 from scipy import stats
@@ -8,6 +9,7 @@ import seaborn as sns
 import matplotlib.patches as patches
 import matplotlib.colors as mcolors
 import matplotlib.pylab as plt
+import pandas as pd
 from .bayes_inference import BayesInference
 
 # Load the mplstyle
@@ -27,86 +29,89 @@ class BayesModelComparison:
         `True`.
     perturbed_data : array of shape (n_bootstrap_itrs, n_obs), optional
         User defined perturbed data. The default is `None`.
-    n_bootstarp : int
+    n_bootstrap : int
         Number of bootstrap iteration. The default is `1000`.
     data_noise_level : float
         A noise level to perturb the data set. The default is `0.01`.
-    just_n_meas : int
-        Number of measurements considered for visualization of the
-        justifiability results.
 
     """
 
     def __init__(self, justifiability=True, perturbed_data=None,
-                 n_bootstarp=1000, data_noise_level=0.01, just_n_meas=2):
+                 n_bootstrap=1000, data_noise_level=0.01,
+                 use_Bayes_settings = True, emulator = True, out_dir = 'Outputs_Comparison/'):
 
+        # TODO: check valid ranges of the parameters
+        
         self.justifiability = justifiability
         self.perturbed_data = perturbed_data
-        self.n_bootstarp = n_bootstarp
+        self.n_bootstrap = n_bootstrap
         self.data_noise_level = data_noise_level
-        self.just_n_meas = just_n_meas
-
+        self.use_Bayes_settings = use_Bayes_settings
+        self.emulator = emulator
+        self.out_dir = out_dir
+        
+        # Other parameters
+        self.n_meas = None
+        self.BF_data = None
+        self.just_data = None
+        self.BME_dict = None
+        self.set_up = False
+        self.dtype = None
+        self.bayes_dict = None
+        self.just_bayes_dict = None
+        self.model_weights = None
+        self.model_weights_dict = None
+        self.just_model_weights_dict = None
+        
+        
     # --------------------------------------------------------------------------
-    def create_model_comparison(self, model_dict, opts_dict):
+    def setup(self, model_dict):
         """
-        Starts the two-stage model comparison.
-        Stage I: Compare models using Bayes factors.
-        Stage II: Compare models via justifiability analysis.
-
-        Parameters
-        ----------
-        model_dict : dict
-            A dictionary including the metamodels.
-        opts_dict : dict
-            A dictionary given the `BayesInference` options.
-
-            Example:
-
-                >>> opts_bootstrap = {
-                    "bootstrap": True,
-                    "n_samples": 10000,
-                    "Discrepancy": DiscrepancyOpts,
-                    "emulator": True,
-                    "plot_post_pred": True
-                    }
+        Initialize parameters that are needed for all types of model comparison
 
         Returns
         -------
-        output : dict
-            A dictionary containing the objects and the model weights for the
-            comparison using Bayes factors and justifiability analysis.
+        None.
 
         """
+        
+        if not isinstance(model_dict, dict):
+            raise Exception("To run model comparsion, you need to pass a "
+                            "dictionary of models.")
 
-        # Bayes factor
-        bayes_dict_bf, model_weights_dict_bf = self.compare_models(
-            model_dict, opts_dict
-            )
-
-        output = {
-            'Bayes objects BF': bayes_dict_bf,
-            'Model weights BF': model_weights_dict_bf
-            }
+        # Extract model names
+        self.model_names = [*model_dict]
 
-        # Justifiability analysis
-        if self.justifiability:
-            bayes_dict_ja, model_weights_dict_ja = self.compare_models(
-                model_dict, opts_dict, justifiability=True
-                )
+        # Compute total number of the measurement points
+        # TODO: there could be a different option for this here
+        Engine = list(model_dict.items())[0][1]
+        Engine.Model.read_observation()
+        self.n_meas = Engine.Model.n_obs
 
-            output['Bayes objects JA'] = bayes_dict_ja
-            output['Model weights JA'] = model_weights_dict_ja
+        # Find n_bootstrap
+        if self.perturbed_data is not None:
+            self.n_bootstrap = self.perturbed_data.shape[0]
+            
+        # Output directory
+        os.makedirs(self.out_dir, exist_ok=True)
+
+        # System settings
+        if os.name == 'nt':
+            print('')
+            print('WARNING: Performing the inference on windows can lead to reduced accuracy!')
+            print('')
+            self.dtype=np.longdouble
+        else:
+            self.dtype=np.float128
 
-        return output
 
     # --------------------------------------------------------------------------
-    def compare_models(self, model_dict, opts_dict, justifiability=False):
+    def model_comparison_all(self, model_dict, opts_dict):
         """
-        Passes the options to instantiates the BayesInference class for each
-        model and passes the options from `opts_dict`. Then, it starts the
-        computations.
-        It also creates a folder and saves the diagrams, e.g., Bayes factor
-        plot, confusion matrix, etc.
+        Perform all three types of model comparison: 
+            * Bayes Factors
+            * Model weights
+            * Justifiability analysis
 
         Parameters
         ----------
@@ -114,50 +119,57 @@ class BayesModelComparison:
             A dictionary including the metamodels.
         opts_dict : dict
             A dictionary given the `BayesInference` options.
-        justifiability : bool, optional
-            Whether to perform the justifiability analysis. The default is
-            `False`.
 
         Returns
         -------
-        bayes_dict : dict
-            A dictionary with `BayesInference` objects.
-        model_weights_dict : dict
-            A dictionary containing the model weights.
+        results : dict
+            A dictionary that contains the calculated BME values, model weights
+            and confusion matrix
 
         """
+        self.calc_bayes_factors(model_dict, opts_dict)
+        self.calc_model_weights(model_dict, opts_dict)
+        self.calc_justifiability_analysis(model_dict, opts_dict)
+        
+        results = {'BME': self.BME_dict, 'Model weights': self.model_weights_dict,
+                   'Confusion matrix': self.confusion_matrix}
+        return results
+    
 
-        if not isinstance(model_dict, dict):
-            raise Exception("To run model comparsion, you need to pass a "
-                            "dictionary of models.")
+    # --------------------------------------------------------------------------
+    def calc_bayes_factors(self, model_dict, opts_dict):
+        """
+        Calculate the BayesFactors for each pair of models in the model_dict
+        with respect to given data.
 
-        # Extract model names
-        self.model_names = [*model_dict]
+        Parameters
+        ----------
+        model_dict : dict
+            A dictionary including the metamodels.
+        opts_dict : dict
+            A dictionary given the `BayesInference` options.
 
-        # Compute total number of the measurement points
-        Engine = list(model_dict.items())[0][1]
-        Engine.Model.read_observation()
-        self.n_meas = Engine.Model.n_obs
+        Returns
+        -------
+        None.
 
+        """
+        # Do the setup
+        if self.n_meas is None:
+            self.setup(model_dict)
+        
         # ----- Generate data -----
-        # Find n_bootstrap
-        if self.perturbed_data is None:
-            n_bootstarp = self.n_bootstarp
-        else:
-            n_bootstarp = self.perturbed_data.shape[0]
-
         # Create dataset
-        justData = self.generate_dataset(
-            model_dict, justifiability, n_bootstarp=n_bootstarp)
+        self.BF_data = self.generate_dataset(
+            model_dict, False, n_bootstrap=self.n_bootstrap)
 
         # Run create Interface for each model
-        bayes_dict = {}
+        self.bayes_dict = {}
         for model in model_dict.keys():
             print("-"*20)
             print("Bayesian inference of {}.\n".format(model))
-
             BayesOpts = BayesInference(model_dict[model])
-
+                
             # Set BayesInference options
             for key, value in opts_dict.items():
                 if key in BayesOpts.__dict__.keys():
@@ -167,49 +179,147 @@ class BayesModelComparison:
                         setattr(BayesOpts, key, value)
 
             # Pass justifiability data as perturbed data
-            BayesOpts.perturbed_data = justData
-            BayesOpts.just_analysis = justifiability
+            BayesOpts.bmc = True
+            BayesOpts.emulator= self.emulator
+            BayesOpts.just_analysis = False
+            BayesOpts.perturbed_data = self.BF_data
 
-            bayes_dict[model] = BayesOpts.create_inference()
+            self.bayes_dict[model] = BayesOpts.create_inference()
             print("-"*20)
 
-        # Compute model weights
-        BME_Dict = dict()
-        for modelName, bayesObj in bayes_dict.items():
-            BME_Dict[modelName] = np.exp(bayesObj.log_BME, dtype=np.longdouble)#float128)
+        # Accumulate the BMEs
+        self.BME_dict = dict()
+        for modelName, bayesObj in self.bayes_dict.items():
+            self.BME_dict[modelName] = np.exp(bayesObj.log_BME, dtype=self.dtype)
+
+        # TODO: move the calculation of the Bayes Factors out of the plots to here!
+        # Create kde plot for bayes factors
+        self.plot_bayes_factor(self.BME_dict, 'kde_plot')
+        
+        
+    def calc_model_weights(self, model_dict, opts_dict):
+        """
+        Calculate the model weights from BME evaluations for Bayes factors.
 
-        # BME correction in BayesInference class
-        model_weights = self.cal_model_weight(
-            BME_Dict, justifiability, n_bootstarp=n_bootstarp)
+        Parameters
+        ----------
+        model_dict : TYPE
+            DESCRIPTION.
+        opts_dict : TYPE
+            DESCRIPTION.
 
-        # Plot model weights
-        if justifiability:
-            model_names = self.model_names
-            model_names.insert(0, 'Observation')
+        Returns
+        -------
+        None.
+
+        """
+        # Get BMEs via Bayes Factors if not already done so
+        if self.BME_dict is None:
+            self.calc_bayes_factors(model_dict, opts_dict)
+        
+        # Calculate the model weights
+        self.model_weights = self.cal_model_weight(
+            self.BME_dict, False, n_bootstrap=self.n_bootstrap)
 
-            # Split the model weights and save in a dict
-            list_ModelWeights = np.split(
-                model_weights, model_weights.shape[1]/self.n_meas, axis=1)
-            model_weights_dict = {key: weights for key, weights in
-                                  zip(model_names, list_ModelWeights)}
+        # Create box plot for model weights
+        self.plot_model_weights(self.model_weights, 'model_weights')
 
-            #self.plot_just_analysis(model_weights_dict)
-        else:
-            # Create box plot for model weights
-            self.plot_model_weights(model_weights, 'model_weights')
 
-            # Create kde plot for bayes factors
-            self.plot_bayes_factor(BME_Dict, 'kde_plot')
+    # -------------------------------------------------------------------------    
+    def calc_justifiability_analysis(self, model_dict, opts_dict):
+        """
+        Perform justifiability analysis by calculating the confusion matrix
+        
+        Parameters
+        ----------
+        model_dict : dict
+            A dictionary including the metamodels.
+        opts_dict : dict
+            A dictionary given the `BayesInference` options.
+        
+        Returns
+        -------
+        confusion_matrix: dict
+            The averaged confusion matrix
+        
+        """
+        # Do setup
+        if self.n_meas is None:
+            self.setup(model_dict)
+            
+        # Extend model names
+        model_names = self.model_names
+        if model_names[0]!= 'Observation':
+            model_names.insert(0, 'Observation')
+        
+        # Generate data
+        # TODO: generate the datset only if it does not exist yet
+        self.just_data = self.generate_dataset(
+            model_dict, True, n_bootstrap=self.n_bootstrap)
+
+        # Run inference for each model if this is not available
+        if self.just_bayes_dict is None:
+            self.just_bayes_dict = {}
+            for model in model_dict.keys():
+                print("-"*20)
+                print("Bayesian inference of {}.\n".format(model))
+                BayesOpts = BayesInference(model_dict[model])
+                    
+                # Set BayesInference options
+                for key, value in opts_dict.items():
+                    if key in BayesOpts.__dict__.keys():
+                        if key == "Discrepancy" and isinstance(value, dict):
+                            setattr(BayesOpts, key, value[model])
+                        else:
+                            setattr(BayesOpts, key, value)
+    
+                # Pass justifiability data as perturbed data
+                BayesOpts.bmc = True
+                BayesOpts.emulator= self.emulator
+                BayesOpts.just_analysis = True
+                BayesOpts.perturbed_data = self.just_data
+    
+                self.just_bayes_dict[model] = BayesOpts.create_inference()
+                print("-"*20)
+
+        # Compute model weights
+        self.BME_dict = dict()
+        for modelName, bayesObj in self.bayes_dict.items():
+            self.BME_dict[modelName] = np.exp(bayesObj.log_BME, dtype=self.dtype)
 
-            # Store model weights in a dict
-            model_weights_dict = {key: weights for key, weights in
-                                  zip(self.model_names, model_weights)}
+        # BME correction in BayesInference class
+        just_model_weights = self.cal_model_weight(
+            self.BME_dict, True, n_bootstrap=self.n_bootstrap)
+
+        # Split the model weights and save in a dict
+        list_ModelWeights = np.split(
+            just_model_weights, self.model_weights.shape[1]/self.n_meas, axis=1)
+        self.just_model_weights_dict = {key: weights for key, weights in
+                              zip(model_names, list_ModelWeights)}
+        
+        # Confusion matrix over all measurement points
+        cf_m = pd.DataFrame()
+        cf_m['Generated by'] = model_names
+        for i in range(len(model_names)):
+            # Ignore 'Observation', this not in the model_weights_dict
+            # TODO: how to change the code so that it is included as well?
+            if i==0:
+                continue
+            avg = []
+            for n in model_names:
+                avg.append(np.sum(self.just_model_weights_dict[n][i-1]))
+                
+            # Norm to sum to 1 for each 'Generated by' row
+            cf_m[model_names[i]] = avg/self.n_meas
+        self.confusion_matrix = cf_m
+            
+        # Plot model weights
+        self.plot_just_analysis()
 
-        return bayes_dict, model_weights_dict
 
     # -------------------------------------------------------------------------
     def generate_dataset(self, model_dict, justifiability=False,
-                         n_bootstarp=1):
+                         n_bootstrap=1):
         """
         Generates the perturbed data set for the Bayes factor calculations and
         the data set for the justifiability analysis.
@@ -221,7 +331,7 @@ class BayesModelComparison:
         bool, optional
             Whether to perform the justifiability analysis. The default is
             `False`.
-        n_bootstarp : int, optional
+        n_bootstrap : int, optional
             Number of bootstrap iterations. The default is `1`.
 
         Returns
@@ -238,27 +348,28 @@ class BayesModelComparison:
         # Perturb observations for Bayes Factor
         if self.perturbed_data is None:
             self.perturbed_data = self.__perturb_data(
-                    Engine.Model.observations, out_names, n_bootstarp,
+                    Engine.Model.observations, out_names, n_bootstrap,
                     noise_level=self.data_noise_level)
 
         # Only for Bayes Factor
         if not justifiability:
-            return self.perturbed_data
+            return self.perturbed_data # TODO: why return this as self... and the other one not? Is this used again?
 
         # Evaluate metamodel
         runs = {}
-        for key, metaModel in model_dict.items():
-            y_hat, _ = metaModel.eval_metamodel(nsamples=n_bootstarp)
+        for key, metaModel in model_dict.items(): # TODO: add check for emulator vs model
+            y_hat, _ = metaModel.eval_metamodel(nsamples=n_bootstrap)
             runs[key] = y_hat
 
         # Generate data
-        for i in range(n_bootstarp):
-            y_data = self.perturbed_data[i].reshape(1, -1)
-            justData = np.tril(np.repeat(y_data, y_data.shape[1], axis=0))
+        for i in range(n_bootstrap):
+            y_data = self.perturbed_data[i].reshape(1, -1)# makes every entry in self.perturbed_data 2D by adding one dim outside
+            justData = np.tril(np.repeat(y_data, y_data.shape[1], axis=0)) # Lower triangle matrix from repeats of y_data
+            # TODO: why triangle matrix here?
             # Use surrogate runs for data-generating process
             for key, metaModel in model_dict.items():
                 model_data = np.array(
-                    [runs[key][out][i] for out in out_names]).reshape(y_data.shape)
+                    [runs[key][out][i] for out in out_names]).reshape(y_data.shape) # reshapes model runs to match y_data
                 justData = np.vstack((
                     justData,
                     np.tril(np.repeat(model_data, model_data.shape[1], axis=0))
@@ -276,7 +387,7 @@ class BayesModelComparison:
     # -------------------------------------------------------------------------
     def __perturb_data(self, data, output_names, n_bootstrap, noise_level):
         """
-        Returns an array with n_bootstrap_itrs rowsof perturbed data.
+        Returns an array with n_bootstrap_itrs rows of perturbed data.
         The first row includes the original observation data.
         If `self.bayes_loocv` is True, a 2d-array will be returned with
         repeated rows and zero diagonal entries.
@@ -318,13 +429,13 @@ class BayesModelComparison:
         return final_data
 
     # -------------------------------------------------------------------------
-    def cal_model_weight(self, BME_Dict, justifiability=False, n_bootstarp=1):
+    def cal_model_weight(self, BME_dict, justifiability=False, n_bootstrap=1):
         """
         Normalize the BME (Asumption: Model Prior weights are equal for models)
 
         Parameters
         ----------
-        BME_Dict : dict
+        BME_dict : dict
             A dictionary containing the BME values.
 
         Returns
@@ -334,12 +445,12 @@ class BayesModelComparison:
 
         """
         # Stack the BME values for all models
-        all_BME = np.vstack(list(BME_Dict.values()))
+        all_BME = np.vstack(list(BME_dict.values()))
 
         if justifiability:
             # Compute expected log_BME for justifiabiliy analysis
             all_BME = all_BME.reshape(
-                all_BME.shape[0], -1, n_bootstarp).mean(axis=2)
+                all_BME.shape[0], -1, n_bootstrap).mean(axis=2)
 
         # Model weights
         model_weights = np.divide(all_BME, np.nansum(all_BME, axis=0))
@@ -362,16 +473,16 @@ class BayesModelComparison:
         None.
 
         """
-
-        directory = 'Outputs_Comparison/'
-        os.makedirs(directory, exist_ok=True)
+        model_weights_dict = self.just_model_weights_dict
         Color = [*mcolors.TABLEAU_COLORS]
         names = [*model_weights_dict]
 
+        # Plot weights for each 'Generated by'
         model_names = [model.replace('_', '$-$') for model in self.model_names]
         for name in names:
             fig, ax = plt.subplots()
             for i, model in enumerate(model_names[1:]):
+                #print(model, i)
                 plt.plot(list(range(1, self.n_meas+1)),
                          model_weights_dict[name][i],
                          color=Color[i], marker='o',
@@ -384,13 +495,12 @@ class BayesModelComparison:
             ax.set_xticks(list(range(1, self.n_meas+1)))
             plt.legend(loc="best")
             fig.savefig(
-                f'{directory}modelWeights_{name}.svg', bbox_inches='tight'
+                f'{self.out_dir}modelWeights_{name}.svg', bbox_inches='tight'
                 )
             plt.close()
 
-        # Confusion matrix for some measurement points
-        epsilon = 1 if self.just_n_meas != 1 else 0
-        for index in range(0, self.n_meas+epsilon, self.just_n_meas):
+        # Confusion matrix for each measurement point
+        for index in range(0, self.n_meas):
             weights = np.array(
                 [model_weights_dict[key][:, index] for key in model_weights_dict]
                 )
@@ -405,10 +515,26 @@ class BayesModelComparison:
             g.set_xlabel(r"\textbf{Data generated by:}", labelpad=15)
             g.set_ylabel(r"\textbf{Model weight for:}", labelpad=15)
             g.figure.savefig(
-                f"{directory}confusionMatrix_ND_{index+1}.pdf",
+                f"{self.out_dir}confusionMatrix_ND_{index+1}.pdf",
                 bbox_inches='tight'
                 )
             plt.close()
+                
+        # Plot the averaged confusion matrix
+        out_names = names[1:]
+        cf = self.confusion_matrix[out_names].to_numpy()
+        g = sns.heatmap(cf.T, annot=True, cmap='Blues', xticklabels=model_names,
+        yticklabels=model_names[1:], annot_kws={"size": 24})
+        g.xaxis.tick_top()
+        g.xaxis.set_label_position('top')
+        g.set_xlabel(r"\textbf{Data generated by:}", labelpad=15)
+        g.set_ylabel(r"\textbf{Model weight for:}", labelpad=15)
+        g.figure.savefig(
+            f"{self.out_dir}confusionMatrix_full.pdf",
+            bbox_inches='tight'
+            )
+        plt.close()
+        
 
     # -------------------------------------------------------------------------
     def plot_model_weights(self, model_weights, plot_name):
@@ -428,13 +554,9 @@ class BayesModelComparison:
         None.
 
         """
-        font_size = 40
-        # mkdir for plots
-        directory = 'Outputs_Comparison/'
-        os.makedirs(directory, exist_ok=True)
-
         # Create figure
         fig, ax = plt.subplots()
+        font_size = 40
 
         # Filter data using np.isnan
         mask = ~np.isnan(model_weights.T)
@@ -462,44 +584,35 @@ class BayesModelComparison:
         for median in bp['medians']:
             median.set(color='#b2df8a', linewidth=2)
 
-        # change the style of fliers and their fill
-        # for flier in bp['fliers']:
-        #     flier.set(marker='o', color='#e7298a', alpha=0.75)
-
-        # Custom x-axis labels
+        # Customize the axes
         model_names = [model.replace('_', '$-$') for model in self.model_names]
         ax.set_xticklabels(model_names)
-
         ax.set_ylabel('Weight', fontsize=font_size)
-
-        # Title
-        plt.title('Posterior Model Weights')
-
-        # Set y lim
         ax.set_ylim((-0.05, 1.05))
-
-        # Set size of the ticks
         for t in ax.get_xticklabels():
             t.set_fontsize(font_size)
         for t in ax.get_yticklabels():
             t.set_fontsize(font_size)
 
+        # Title
+        plt.title('Posterior Model Weights')
+        
         # Save the figure
         fig.savefig(
-            f'./{directory}{plot_name}.pdf', bbox_inches='tight'
+            f'./{self.out_dir}{plot_name}.pdf', bbox_inches='tight'
             )
 
         plt.close()
 
     # -------------------------------------------------------------------------
-    def plot_bayes_factor(self, BME_Dict, plot_name=''):
+    def plot_bayes_factor(self, BME_dict, plot_name=''):
         """
         Plots the Bayes factor distibutions in a :math:`N_m \\times N_m`
         matrix, where :math:`N_m` is the number of the models.
 
         Parameters
         ----------
-        BME_Dict : dict
+        BME_dict : dict
             A dictionary containing the BME values of the models.
         plot_name : str, optional
             Plot name. The default is ''.
@@ -509,16 +622,11 @@ class BayesModelComparison:
         None.
 
         """
-
+        # Plot setup
         font_size = 40
-
-        # mkdir for plots
-        directory = 'Outputs_Comparison/'
-        os.makedirs(directory, exist_ok=True)
-
         Colors = ["blue", "green", "gray", "brown"]
 
-        model_names = list(BME_Dict.keys())
+        model_names = list(BME_dict.keys())
         nModels = len(model_names)
 
         # Plots
@@ -540,7 +648,7 @@ class BayesModelComparison:
 
                     # Null hypothesis: key_j is the better model
                     BayesFactor = np.log10(
-                        np.divide(BME_Dict[key_i], BME_Dict[key_j])
+                        np.divide(BME_dict[key_i], BME_dict[key_j])
                         )
 
                     # sns.kdeplot(BayesFactor, ax=ax, color=Colors[i], shade=True)
@@ -633,10 +741,8 @@ class BayesModelComparison:
                             fontsize=fsize, color=Colors[i],
                             transform=ax.transAxes)
 
-        # Defining custom 'ylim' values.
+        # Customize axes
         custom_ylim = (0, 1.05)
-
-        # Setting the values for all axes.
         plt.setp(axes, ylim=custom_ylim)
 
         # set labels
@@ -648,7 +754,7 @@ class BayesModelComparison:
         plt.subplots_adjust(wspace=0.2, hspace=0.1)
 
         plt.savefig(
-            f'./{directory}Bayes_Factor{plot_name}.pdf', bbox_inches='tight'
+            f'./{self.out_dir}Bayes_Factor{plot_name}.pdf', bbox_inches='tight'
             )
 
         plt.close()
diff --git a/src/bayesvalidrox/bayes_inference/discrepancy.py b/src/bayesvalidrox/bayes_inference/discrepancy.py
index fff32a2500ae20b3667c7b0ec2cc85c1da614688..b3c235ebeb6d6ae9e109ca862cc522cc21efb45e 100644
--- a/src/bayesvalidrox/bayes_inference/discrepancy.py
+++ b/src/bayesvalidrox/bayes_inference/discrepancy.py
@@ -36,7 +36,7 @@ class Discrepancy:
     * Option B: With unknown redidual covariance matrix \\(\\Sigma\\),
     paramethrized as \\(\\Sigma(\\theta_{\\epsilon})=\\sigma^2 \\textbf{I}_
     {N_{out}}\\) with unknown residual variances \\(\\sigma^2\\).
-    This term will be jointly infer with the uncertain input parameters. For
+    This term will be jointly infered with the uncertain input parameters. For
     the inversion, you need to define a prior marginal via `Input` class. Note
     that \\(\\sigma^2\\) is only a single scalar multiplier for the diagonal
     entries of the covariance matrix \\(\\Sigma\\).
@@ -58,10 +58,17 @@ class Discrepancy:
     """
 
     def __init__(self, InputDisc='', disc_type='Gaussian', parameters=None):
+        # Set the values
         self.InputDisc = InputDisc
         self.disc_type = disc_type
         self.parameters = parameters
-
+        
+        # Other inits
+        self.ExpDesign = None
+        self.n_samples = None
+        self.sigma2_prior = None
+        self.name = None
+        self.opt_sigma = None # This will be set in the inference class and used in mcmc
     # -------------------------------------------------------------------------
     def get_sample(self, n_samples):
         """
@@ -87,6 +94,11 @@ class Discrepancy:
         # Create and store BoundTuples
         self.ExpDesign = ExpDesigns(self.InputDisc)
         self.ExpDesign.sampling_method = 'random'
+        
+        # TODO: why does it call 'generate_ED' instead of 'generate_samples?
+        # ExpDesign.bound_tuples, onp_sigma, prior_space needed from the outside
+        # Discrepancy opt_sigma, InputDisc needed from the outside
+        # TODO: opt_sigma not defined here, but called from the outside??
         self.ExpDesign.generate_ED(
             n_samples, max_pce_deg=1
             )
diff --git a/src/bayesvalidrox/bayes_inference/mcmc.py b/src/bayesvalidrox/bayes_inference/mcmc.py
index fe22a152f117aab7023bfe6592ce3a48bb0b3aec..f4d1524d3acbd2e09866042a47670c1f0ffad213 100755
--- a/src/bayesvalidrox/bayes_inference/mcmc.py
+++ b/src/bayesvalidrox/bayes_inference/mcmc.py
@@ -15,6 +15,85 @@ import shutil
 os.environ["OMP_NUM_THREADS"] = "1"
 
 
+# -------------------------------------------------------------------------
+def _check_ranges(theta, ranges): # TODO: this is a replica of exp_designs.check_ranges
+    """
+    This function checks if theta lies in the given ranges.
+
+    Parameters
+    ----------
+    theta : array
+        Proposed parameter set.
+    ranges : nested list
+        List of the praremeter ranges.
+
+    Returns
+    -------
+    c : bool
+        If it lies in the given range, it return True else False.
+
+    """
+    c = True
+    # traverse in the list1
+    for i, bounds in enumerate(ranges):
+        x = theta[i]
+        # condition check
+        if x < bounds[0] or x > bounds[1]:
+            c = False
+            return c
+    return c
+
+# -------------------------------------------------------------------------
+def gelman_rubin(chain, return_var=False):
+    """
+    The potential scale reduction factor (PSRF) defined by the variance
+    within one chain, W, with the variance between chains B.
+    Both variances are combined in a weighted sum to obtain an estimate of
+    the variance of a parameter \\( \\theta \\).The square root of the
+    ratio of this estimates variance to the within chain variance is called
+    the potential scale reduction.
+    For a well converged chain it should approach 1. Values greater than
+    1.1 typically indicate that the chains have not yet fully converged.
+
+    Source: http://joergdietrich.github.io/emcee-convergence.html
+
+    https://github.com/jwalton3141/jwalton3141.github.io/blob/master/assets/posts/ESS/rwmh.py
+
+    Parameters
+    ----------
+    chain : array (n_walkers, n_steps, n_params)
+        The emcee ensamples.
+
+    Returns
+    -------
+    R_hat : float
+        The Gelman-Robin values.
+
+    """
+    chain = np.array(chain)
+    m_chains, n_iters = chain.shape[:2]
+
+    # Calculate between-chain variance
+    θb = np.mean(chain, axis=1)
+    θbb = np.mean(θb, axis=0)
+    B_over_n = ((θbb - θb)**2).sum(axis=0)
+    B_over_n /= (m_chains - 1)
+
+    # Calculate within-chain variances
+    ssq = np.var(chain, axis=1, ddof=1)
+    W = np.mean(ssq, axis=0)
+
+    # (over) estimate of variance
+    var_θ = W * (n_iters - 1) / n_iters + B_over_n
+
+    if return_var:
+        return var_θ
+    else:
+        # The square root of the ratio of this estimates variance to the
+        # within chain variance
+        R_hat = np.sqrt(var_θ / W)
+        return R_hat
+
 class MCMC:
     """
     A class for bayesian inference via a Markov-Chain Monte-Carlo (MCMC)
@@ -41,65 +120,67 @@ class MCMC:
     """
 
     def __init__(self, BayesOpts):
-
+        # Inputs
         self.BayesOpts = BayesOpts
+        
+        # Param inits
+        self.counter = 0
+        self.observation = None
+        self.total_sigma2 = None
+        
+        # Get general params from BayesOpts
+        self.out_dir = self.BayesOpts.out_dir
+        
+        # Get MCMC parameters ftom BayesOpts
+        pars = self.BayesOpts.mcmc_params
+        self.initsamples = pars['init_samples']
+        if isinstance(self.initsamples, pd.DataFrame):
+            self.initsamples = self.initsamples.values
+        self.nsteps = int(pars['n_steps'])
+        self.nwalkers = int(pars['n_walkers'])
+        self.nburn = pars['n_burn']
+        self.moves = pars['moves']
+        self.mp = pars['multiprocessing']
+        self.verbose = pars['verbose']
 
     def run_sampler(self, observation, total_sigma2):
+        """
+        Run the MCMC sampler for the given observations and stdevs.
+
+        Parameters
+        ----------
+        observation : TYPE
+            DESCRIPTION.
+        total_sigma2 : TYPE
+            DESCRIPTION.
 
+        Returns
+        -------
+        Posterior_df : TYPE
+            DESCRIPTION.
+
+        """
+        # Get init values
         BayesObj = self.BayesOpts
-        MetaModel = BayesObj.engine.MetaModel
-        Model = BayesObj.engine.Model
         Discrepancy = self.BayesOpts.Discrepancy
-        n_cpus = Model.n_cpus
-        priorDist = BayesObj.engine.ExpDesign.JDist
-        ndim = MetaModel.n_params
-        self.counter = 0
-        output_dir = f'Outputs_Bayes_{Model.name}_{self.BayesOpts.name}'
-        if not os.path.exists(output_dir):
-            os.makedirs(output_dir)
+        n_cpus = BayesObj.engine.Model.n_cpus
+        ndim = BayesObj.engine.MetaModel.n_params
+        if not os.path.exists(self.out_dir):
+            os.makedirs(self.out_dir)
 
+        # Save inputs
         self.observation = observation
         self.total_sigma2 = total_sigma2
 
-        # Unpack mcmc parameters given to BayesObj.mcmc_params
-        self.initsamples = None
-        self.nwalkers = 100
-        self.nburn = 200
-        self.nsteps = 100000
-        self.moves = None
-        self.mp = False
-        self.verbose = False
-
-        # Extract initial samples
-        if 'init_samples' in BayesObj.mcmc_params:
-            self.initsamples = BayesObj.mcmc_params['init_samples']
-            if isinstance(self.initsamples, pd.DataFrame):
-                self.initsamples = self.initsamples.values
-
-        # Extract number of steps per walker
-        if 'n_steps' in BayesObj.mcmc_params:
-            self.nsteps = int(BayesObj.mcmc_params['n_steps'])
-        # Extract number of walkers (chains)
-        if 'n_walkers' in BayesObj.mcmc_params:
-            self.nwalkers = int(BayesObj.mcmc_params['n_walkers'])
-        # Extract moves
-        if 'moves' in BayesObj.mcmc_params:
-            self.moves = BayesObj.mcmc_params['moves']
-        # Extract multiprocessing
-        if 'multiprocessing' in BayesObj.mcmc_params:
-            self.mp = BayesObj.mcmc_params['multiprocessing']
-        # Extract verbose
-        if 'verbose' in BayesObj.mcmc_params:
-            self.verbose = BayesObj.mcmc_params['verbose']
-
         # Set initial samples
         np.random.seed(0)
         if self.initsamples is None:
             try:
-                initsamples = priorDist.sample(self.nwalkers).T
+                initsamples = BayesObj.engine.ExpDesign.JDist.sample(self.nwalkers).T
+                initsamples = np.swapaxes(np.array([initsamples]),0,1) # TODO: test if this still works with multiple input dists
             except:
                 # when aPCE selected - gaussian kernel distribution
-                inputSamples = MetaModel.ExpDesign.raw_data.T
+                inputSamples = self.BayesOpts.engine.ExpDesign.raw_data.T
                 random_indices = np.random.choice(
                     len(inputSamples), size=self.nwalkers, replace=False
                     )
@@ -125,16 +206,14 @@ class MCMC:
                     initsamples[:, idx_dim] = dist.rvs(size=self.nwalkers)
 
                 # Update lower and upper
-                MetaModel.ExpDesign.bound_tuples = bound_tuples
+                BayesObj.engine.MetaModel.ExpDesign.bound_tuples = bound_tuples
 
         # Check if sigma^2 needs to be inferred
-        if Discrepancy.opt_sigma != 'B':
+        if Discrepancy.opt_sigma != 'B': # TODO: why !='B'?
             sigma2_samples = Discrepancy.get_sample(self.nwalkers)
 
             # Update initsamples
             initsamples = np.hstack((initsamples, sigma2_samples))
-
-            # Update ndim
             ndim = initsamples.shape[1]
 
             # Discrepancy bound
@@ -146,10 +225,8 @@ class MCMC:
         print("\n>>>> Bayesian inference with MCMC for "
               f"{self.BayesOpts.name} started. <<<<<<")
 
-        # Set up the backend
-        filename = f"{output_dir}/emcee_sampler.h5"
-        backend = emcee.backends.HDFBackend(filename)
-        # Clear the backend in case the file already exists
+        # Set up the backend and clear it in case the file already exists
+        backend = emcee.backends.HDFBackend(f"{self.out_dir}/emcee_sampler.h5")
         backend.reset(self.nwalkers, ndim)
 
         # Define emcee sampler
@@ -176,8 +253,8 @@ class MCMC:
                         )
 
                     # Reset sampler
-                    sampler.reset()
                     pos = pos.coords
+                    sampler.reset()
                 else:
                     pos = initsamples
 
@@ -252,13 +329,13 @@ class MCMC:
                 # output current autocorrelation estimate
                 if self.verbose:
                     print(f"Mean autocorr. time estimate: {np.nanmean(tau):.3f}")
-                    list_gr = np.round(self.gelman_rubin(sampler.chain), 3)
+                    list_gr = np.round(gelman_rubin(sampler.chain), 3)
                     print("Gelman-Rubin Test*: ", list_gr)
 
                 # check convergence
                 converged = np.all(tau*autocorreverynsteps < sampler.iteration)
                 converged &= np.all(np.abs(tauold - tau) / tau < 0.01)
-                converged &= np.all(self.gelman_rubin(sampler.chain) < 1.1)
+                converged &= np.all(gelman_rubin(sampler.chain) < 1.1)
 
                 if converged:
                     break
@@ -277,7 +354,7 @@ class MCMC:
         thin = int(0.5*np.nanmin(tau)) if int(0.5*np.nanmin(tau)) != 0 else 1
         finalsamples = sampler.get_chain(discard=burnin, flat=True, thin=thin)
         acc_fr = np.nanmean(sampler.acceptance_fraction)
-        list_gr = np.round(self.gelman_rubin(sampler.chain[:, burnin:]), 3)
+        list_gr = np.round(gelman_rubin(sampler.chain[:, burnin:]), 3)
 
         # Print summary
         print('\n')
@@ -307,7 +384,7 @@ class MCMC:
 
         # Plot traces
         if self.verbose and self.nsteps < 10000:
-            pdf = PdfPages(output_dir+'/traceplots.pdf')
+            pdf = PdfPages(self.out_dir+'/traceplots.pdf')
             fig = plt.figure()
             for parIdx in range(ndim):
                 # Set up the axes with gridspec
@@ -334,7 +411,6 @@ class MCMC:
 
                 # Destroy the current plot
                 plt.clf()
-
             pdf.close()
 
         # plot development of autocorrelation estimate
@@ -348,33 +424,9 @@ class MCMC:
             plt.ylim(0, np.nanmax(taus)+0.1*(np.nanmax(taus)-np.nanmin(taus)))
             plt.xlabel("number of steps")
             plt.ylabel(r"mean $\hat{\tau}$")
-            fig1.savefig(f"{output_dir}/autocorrelation_time.pdf",
+            fig1.savefig(f"{self.out_dir}/autocorrelation_time.pdf",
                          bbox_inches='tight')
 
-        # logml_dict = self.marginal_llk_emcee(sampler, self.nburn, logp=None,
-        # maxiter=5000)
-        # print('\nThe Bridge Sampling Estimation is "
-        #       f"{logml_dict['logml']:.5f}.')
-
-        # # Posterior-based expectation of posterior probablity
-        # postExpPostLikelihoods = np.mean(sampler.get_log_prob(flat=True)
-        # [self.nburn*self.nwalkers:])
-
-        # # Posterior-based expectation of prior densities
-        # postExpPrior = np.mean(self.log_prior(emcee_trace.T))
-
-        # # Posterior-based expectation of likelihoods
-        # postExpLikelihoods_emcee = postExpPostLikelihoods - postExpPrior
-
-        # # Calculate Kullback-Leibler Divergence
-        # KLD_emcee = postExpLikelihoods_emcee - logml_dict['logml']
-        # print("Kullback-Leibler divergence: %.5f"%KLD_emcee)
-
-        # # Information Entropy based on Entropy paper Eq. 38
-        # infEntropy_emcee = logml_dict['logml'] - postExpPrior -
-        #                    postExpLikelihoods_emcee
-        # print("Information Entropy: %.5f" %infEntropy_emcee)
-
         Posterior_df = pd.DataFrame(finalsamples, columns=par_names)
 
         return Posterior_df
@@ -397,8 +449,7 @@ class MCMC:
             returned otherwise an array.
 
         """
-
-        MetaModel = self.BayesOpts.MetaModel
+        MetaModel = self.BayesOpts.engine.MetaModel
         Discrepancy = self.BayesOpts.Discrepancy
 
         # Find the number of sigma2 parameters
@@ -417,7 +468,7 @@ class MCMC:
 
         for i in range(nsamples):
             # Check if the sample is within the parameters' range
-            if self._check_ranges(theta[i], params_range):
+            if _check_ranges(theta[i], params_range):
                 # Check if all dists are uniform, if yes priors are equal.
                 if all(MetaModel.input_obj.Marginals[i].dist_type == 'uniform'
                        for i in range(MetaModel.n_params)):
@@ -429,7 +480,7 @@ class MCMC:
 
                 # Check if bias term needs to be inferred
                 if Discrepancy.opt_sigma != 'B':
-                    if self._check_ranges(theta[i, -n_sigma2:],
+                    if _check_ranges(theta[i, -n_sigma2:],
                                           disc_bound_tuples):
                         if all('unif' in disc_marginals[i].dist_type for i in
                                range(Discrepancy.ExpDesign.ndim)):
@@ -463,21 +514,20 @@ class MCMC:
         """
 
         BayesOpts = self.BayesOpts
-        MetaModel = BayesOpts.MetaModel
+        MetaModel = BayesOpts.engine.MetaModel
         Discrepancy = self.BayesOpts.Discrepancy
 
         # Find the number of sigma2 parameters
         if Discrepancy.opt_sigma != 'B':
             disc_bound_tuples = Discrepancy.ExpDesign.bound_tuples
             n_sigma2 = len(disc_bound_tuples)
-        else:
-            n_sigma2 = -len(theta)
-        # Check if bias term needs to be inferred
-        if Discrepancy.opt_sigma != 'B':
+            # Check if bias term should be inferred
             sigma2 = theta[:, -n_sigma2:]
             theta = theta[:, :-n_sigma2]
         else:
+            n_sigma2 = -len(theta)
             sigma2 = None
+        
         theta = theta if theta.ndim != 1 else theta.reshape((1, -1))
 
         # Evaluate Model/MetaModel at theta
@@ -561,12 +611,11 @@ class MCMC:
         """
 
         BayesObj = self.BayesOpts
-        MetaModel = BayesObj.MetaModel
         Model = BayesObj.engine.Model
 
         if BayesObj.emulator:
             # Evaluate the MetaModel
-            mean_pred, std_pred = MetaModel.eval_metamodel(samples=theta)
+            mean_pred, std_pred = BayesObj.engine.MetaModel.eval_metamodel(samples=theta)
         else:
             # Evaluate the origModel
             mean_pred, std_pred = dict(), dict()
@@ -610,8 +659,7 @@ class MCMC:
             A error model.
 
         """
-        BayesObj = self.BayesOpts
-        MetaModel = BayesObj.MetaModel
+        MetaModel = self.BayesOpts.engine.MetaModel
 
         # Prepare the poster samples
         try:
@@ -636,274 +684,6 @@ class MCMC:
 
         # Train a GPR meta-model using MAP
         error_MetaModel = MetaModel.create_model_error(
-            BayesObj.BiasInputs, y_map, name='Calib')
+            self.BayesOpts.BiasInputs, y_map, name='Calib')
 
         return error_MetaModel
-
-    # -------------------------------------------------------------------------
-    def gelman_rubin(self, chain, return_var=False):
-        """
-        The potential scale reduction factor (PSRF) defined by the variance
-        within one chain, W, with the variance between chains B.
-        Both variances are combined in a weighted sum to obtain an estimate of
-        the variance of a parameter \\( \\theta \\).The square root of the
-        ratio of this estimates variance to the within chain variance is called
-        the potential scale reduction.
-        For a well converged chain it should approach 1. Values greater than
-        1.1 typically indicate that the chains have not yet fully converged.
-
-        Source: http://joergdietrich.github.io/emcee-convergence.html
-
-        https://github.com/jwalton3141/jwalton3141.github.io/blob/master/assets/posts/ESS/rwmh.py
-
-        Parameters
-        ----------
-        chain : array (n_walkers, n_steps, n_params)
-            The emcee ensamples.
-
-        Returns
-        -------
-        R_hat : float
-            The Gelman-Robin values.
-
-        """
-        m_chains, n_iters = chain.shape[:2]
-
-        # Calculate between-chain variance
-        θb = np.mean(chain, axis=1)
-        θbb = np.mean(θb, axis=0)
-        B_over_n = ((θbb - θb)**2).sum(axis=0)
-        B_over_n /= (m_chains - 1)
-
-        # Calculate within-chain variances
-        ssq = np.var(chain, axis=1, ddof=1)
-        W = np.mean(ssq, axis=0)
-
-        # (over) estimate of variance
-        var_θ = W * (n_iters - 1) / n_iters + B_over_n
-
-        if return_var:
-            return var_θ
-        else:
-            # The square root of the ratio of this estimates variance to the
-            # within chain variance
-            R_hat = np.sqrt(var_θ / W)
-            return R_hat
-
-    # -------------------------------------------------------------------------
-    def marginal_llk_emcee(self, sampler, nburn=None, logp=None, maxiter=1000):
-        """
-        The Bridge Sampling Estimator of the Marginal Likelihood based on
-        https://gist.github.com/junpenglao/4d2669d69ddfe1d788318264cdcf0583
-
-        Parameters
-        ----------
-        sampler : TYPE
-            MultiTrace, result of MCMC run.
-        nburn : int, optional
-            Number of burn-in step. The default is None.
-        logp : TYPE, optional
-            Model Log-probability function. The default is None.
-        maxiter : int, optional
-            Maximum number of iterations. The default is 1000.
-
-        Returns
-        -------
-        marg_llk : dict
-            Estimated Marginal log-Likelihood.
-
-        """
-        r0, tol1, tol2 = 0.5, 1e-10, 1e-4
-
-        if logp is None:
-            logp = sampler.log_prob_fn
-
-        # Split the samples into two parts
-        # Use the first 50% for fiting the proposal distribution
-        # and the second 50% in the iterative scheme.
-        if nburn is None:
-            mtrace = sampler.chain
-        else:
-            mtrace = sampler.chain[:, nburn:, :]
-
-        nchain, len_trace, nrofVars = mtrace.shape
-
-        N1_ = len_trace // 2
-        N1 = N1_*nchain
-        N2 = len_trace*nchain - N1
-
-        samples_4_fit = np.zeros((nrofVars, N1))
-        samples_4_iter = np.zeros((nrofVars, N2))
-        effective_n = np.zeros((nrofVars))
-
-        # matrix with already transformed samples
-        for var in range(nrofVars):
-
-            # for fitting the proposal
-            x = mtrace[:, :N1_, var]
-
-            samples_4_fit[var, :] = x.flatten()
-            # for the iterative scheme
-            x2 = mtrace[:, N1_:, var]
-            samples_4_iter[var, :] = x2.flatten()
-
-            # effective sample size of samples_4_iter, scalar
-            effective_n[var] = self._my_ESS(x2)
-
-        # median effective sample size (scalar)
-        neff = np.median(effective_n)
-
-        # get mean & covariance matrix and generate samples from proposal
-        m = np.mean(samples_4_fit, axis=1)
-        V = np.cov(samples_4_fit)
-        L = chol(V, lower=True)
-
-        # Draw N2 samples from the proposal distribution
-        gen_samples = m[:, None] + np.dot(
-            L, st.norm.rvs(0, 1, size=samples_4_iter.shape)
-            )
-
-        # Evaluate proposal distribution for posterior & generated samples
-        q12 = st.multivariate_normal.logpdf(samples_4_iter.T, m, V)
-        q22 = st.multivariate_normal.logpdf(gen_samples.T, m, V)
-
-        # Evaluate unnormalized posterior for posterior & generated samples
-        q11 = logp(samples_4_iter.T)
-        q21 = logp(gen_samples.T)
-
-        # Run iterative scheme:
-        tmp = self._iterative_scheme(
-            N1, N2, q11, q12, q21, q22, r0, neff, tol1, maxiter, 'r'
-            )
-        if ~np.isfinite(tmp['logml']):
-            warnings.warn(
-                "Logml could not be estimated within maxiter, rerunning with "
-                "adjusted starting value. Estimate might be more variable than"
-                " usual.")
-            # use geometric mean as starting value
-            r0_2 = np.sqrt(tmp['r_vals'][-2]*tmp['r_vals'][-1])
-            tmp = self._iterative_scheme(
-                q11, q12, q21, q22, r0_2, neff, tol2, maxiter, 'logml'
-                )
-
-        marg_llk = dict(
-            logml=tmp['logml'], niter=tmp['niter'], method="normal",
-            q11=q11, q12=q12, q21=q21, q22=q22
-            )
-        return marg_llk
-
-    # -------------------------------------------------------------------------
-    def _iterative_scheme(self, N1, N2, q11, q12, q21, q22, r0, neff, tol,
-                          maxiter, criterion):
-        """
-        Iterative scheme as proposed in Meng and Wong (1996) to estimate the
-        marginal likelihood
-
-        """
-        l1 = q11 - q12
-        l2 = q21 - q22
-        # To increase numerical stability,
-        # subtracting the median of l1 from l1 & l2 later
-        lstar = np.median(l1)
-        s1 = neff/(neff + N2)
-        s2 = N2/(neff + N2)
-        r = r0
-        r_vals = [r]
-        logml = np.log(r) + lstar
-        criterion_val = 1 + tol
-
-        i = 0
-        while (i <= maxiter) & (criterion_val > tol):
-            rold = r
-            logmlold = logml
-            numi = np.exp(l2 - lstar)/(s1 * np.exp(l2 - lstar) + s2 * r)
-            deni = 1/(s1 * np.exp(l1 - lstar) + s2 * r)
-            if np.sum(~np.isfinite(numi))+np.sum(~np.isfinite(deni)) > 0:
-                warnings.warn(
-                    """Infinite value in iterative scheme, returning NaN.
-                     Try rerunning with more samples.""")
-            r = (N1/N2) * np.sum(numi)/np.sum(deni)
-            r_vals.append(r)
-            logml = np.log(r) + lstar
-            i += 1
-            if criterion == 'r':
-                criterion_val = np.abs((r - rold)/r)
-            elif criterion == 'logml':
-                criterion_val = np.abs((logml - logmlold)/logml)
-
-        if i >= maxiter:
-            return dict(logml=np.NaN, niter=i, r_vals=np.asarray(r_vals))
-        else:
-            return dict(logml=logml, niter=i)
-
-    # -------------------------------------------------------------------------
-    def _my_ESS(self, x):
-        """
-        Compute the effective sample size of estimand of interest.
-        Vectorised implementation.
-        https://github.com/jwalton3141/jwalton3141.github.io/blob/master/assets/posts/ESS/rwmh.py
-
-
-        Parameters
-        ----------
-        x : array of shape (n_walkers, n_steps)
-            MCMC Samples.
-
-        Returns
-        -------
-        int
-            Effective sample size.
-
-        """
-        m_chains, n_iters = x.shape
-
-        def variogram(t):
-            variogram = ((x[:, t:] - x[:, :(n_iters - t)])**2).sum()
-            variogram /= (m_chains * (n_iters - t))
-            return variogram
-
-        post_var = self.gelman_rubin(x, return_var=True)
-
-        t = 1
-        rho = np.ones(n_iters)
-        negative_autocorr = False
-
-        # Iterate until the sum of consecutive estimates of autocorrelation is
-        # negative
-        while not negative_autocorr and (t < n_iters):
-            rho[t] = 1 - variogram(t) / (2 * post_var)
-
-            if not t % 2:
-                negative_autocorr = sum(rho[t-1:t+1]) < 0
-
-            t += 1
-
-        return int(m_chains*n_iters / (1 + 2*rho[1:t].sum()))
-
-    # -------------------------------------------------------------------------
-    def _check_ranges(self, theta, ranges):
-        """
-        This function checks if theta lies in the given ranges.
-
-        Parameters
-        ----------
-        theta : array
-            Proposed parameter set.
-        ranges : nested list
-            List of the praremeter ranges.
-
-        Returns
-        -------
-        c : bool
-            If it lies in the given range, it return True else False.
-
-        """
-        c = True
-        # traverse in the list1
-        for i, bounds in enumerate(ranges):
-            x = theta[i]
-            # condition check
-            if x < bounds[0] or x > bounds[1]:
-                c = False
-                return c
-        return c
diff --git a/src/bayesvalidrox/pylink/pylink.py b/src/bayesvalidrox/pylink/pylink.py
index 227a51ab38cd834e7e85f6193d83563c7ed3437a..637f42317e6f97815e51ce0b331b61c03f26a85b 100644
--- a/src/bayesvalidrox/pylink/pylink.py
+++ b/src/bayesvalidrox/pylink/pylink.py
@@ -231,7 +231,7 @@ class PyLinkForwardModel(object):
                 self.observations_valid = self.observations_valid
             else:
                 raise Exception("Please provide the observation data as a "
-                                "dictionary via observations attribute or pass"
+                                "dictionary via observations_valid attribute or pass"
                                 " the csv-file path to MeasurementFile "
                                 "attribute")
             # Compute the number of observation
diff --git a/src/bayesvalidrox/surrogate_models/__init__.py b/src/bayesvalidrox/surrogate_models/__init__.py
index 70bfb20f570464c2907a0a4128f4ed99b6c13736..6d8ce9f1c58fa154f2af63f0fb5a44097695df33 100644
--- a/src/bayesvalidrox/surrogate_models/__init__.py
+++ b/src/bayesvalidrox/surrogate_models/__init__.py
@@ -1,7 +1,12 @@
 # -*- coding: utf-8 -*-
-
+from .engine import Engine
+from .exp_designs import ExpDesigns
+from .input_space import InputSpace
 from .surrogate_models import MetaModel
 
 __all__ = [
-    "MetaModel"
+    "MetaModel",
+    "InputSpace",
+    "ExpDesigns",
+    "Engine"
     ]
diff --git a/src/bayesvalidrox/surrogate_models/desktop.ini b/src/bayesvalidrox/surrogate_models/desktop.ini
new file mode 100644
index 0000000000000000000000000000000000000000..632de13ae6b61cecf0d9fdbf9c97cfb16bfb51a4
--- /dev/null
+++ b/src/bayesvalidrox/surrogate_models/desktop.ini
@@ -0,0 +1,2 @@
+[LocalizedFileNames]
+exploration.py=@exploration.py,0
diff --git a/src/bayesvalidrox/surrogate_models/engine.py b/src/bayesvalidrox/surrogate_models/engine.py
index 42307d4770d4ae23a40107dfea64057aac682c23..1c8fa56e6acf84fd53cabf92843e04dcce4835c0 100644
--- a/src/bayesvalidrox/surrogate_models/engine.py
+++ b/src/bayesvalidrox/surrogate_models/engine.py
@@ -3,35 +3,27 @@
 Engine to train the surrogate
 
 """
-import copy
 from copy import deepcopy, copy
-import h5py
 import joblib
+from joblib import Parallel, delayed
+import matplotlib.pyplot as plt
+import multiprocessing
 import numpy as np
 import os
-
+import pandas as pd
+import pathlib
+import scipy.optimize as opt
 from scipy import stats, signal, linalg, sparse
 from scipy.spatial import distance
-from tqdm import tqdm
-import scipy.optimize as opt
 from sklearn.metrics import mean_squared_error
-import multiprocessing
-import matplotlib.pyplot as plt
-import pandas as pd
-import sys
 import seaborn as sns
-from joblib import Parallel, delayed
-
+import sys
+from tqdm import tqdm
 
 from bayesvalidrox.bayes_inference.bayes_inference import BayesInference
 from bayesvalidrox.bayes_inference.discrepancy import Discrepancy
 from .exploration import Exploration
-import pathlib
 
-#from .inputs import Input
-#from .exp_designs import ExpDesigns
-#from .surrogate_models import MetaModel
-#from bayesvalidrox.post_processing.post_processing import PostProcessing
 
 def hellinger_distance(P, Q):
     """
@@ -55,17 +47,17 @@ def hellinger_distance(P, Q):
 
     """
     P = np.array(P)
-    Q= np.array(Q)
-    
+    Q = np.array(Q)
+
     mu1 = P.mean()
     Sigma1 = np.std(P)
 
     mu2 = Q.mean()
     Sigma2 = np.std(Q)
 
-    term1 = np.sqrt(2*Sigma1*Sigma2 / (Sigma1**2 + Sigma2**2))
+    term1 = np.sqrt(2 * Sigma1 * Sigma2 / (Sigma1 ** 2 + Sigma2 ** 2))
 
-    term2 = np.exp(-.25 * (mu1 - mu2)**2 / (Sigma1**2 + Sigma2**2))
+    term2 = np.exp(-.25 * (mu1 - mu2) ** 2 / (Sigma1 ** 2 + Sigma2 ** 2))
 
     H_squared = 1 - term1 * term2
 
@@ -100,9 +92,10 @@ def logpdf(x, mean, cov):
 
     return log_lik
 
+
 def subdomain(Bounds, n_new_samples):
     """
-    Divides a domain defined by Bounds into sub domains.
+    Divides a domain defined by Bounds into subdomains.
 
     Parameters
     ----------
@@ -110,8 +103,6 @@ def subdomain(Bounds, n_new_samples):
         List of lower and upper bounds.
     n_new_samples : int
         Number of samples to divide the domain for.
-    n_params : int
-        The number of params to build the subdomains for
 
     Returns
     -------
@@ -127,23 +118,41 @@ def subdomain(Bounds, n_new_samples):
         LinSpace[i] = np.linspace(start=Bounds[i][0], stop=Bounds[i][1],
                                   num=n_subdomains)
     Subdomains = []
-    for k in range(n_subdomains-1):
+    for k in range(n_subdomains - 1):
         mylist = []
         for i in range(n_params):
-            mylist.append((LinSpace[i, k+0], LinSpace[i, k+1]))
+            mylist.append((LinSpace[i, k + 0], LinSpace[i, k + 1]))
         Subdomains.append(tuple(mylist))
 
     return Subdomains
 
-class Engine():
-    
-    
+
+class Engine:
+
     def __init__(self, MetaMod, Model, ExpDes):
         self.MetaModel = MetaMod
         self.Model = Model
         self.ExpDesign = ExpDes
         self.parallel = False
-        
+        self.trained = False
+
+        # Init other parameters
+        self.bound_tuples = None
+        self.errorModel = None
+        self.LCerror = None
+        self.n_obs = None
+        self.observations = None
+        self.out_names = None
+        self.seqMinDist = None
+        self.seqRMSEStd = None
+        self.SeqKLD = None
+        self.SeqDistHellinger = None
+        self.SeqBME = None
+        self.seqValidError = None
+        self.SeqModifiedLOO = None
+        self.valid_likelihoods = None
+        self._y_hat_prev = None
+
     def start_engine(self) -> None:
         """
         Do all the preparations that need to be run before the actual training
@@ -155,9 +164,8 @@ class Engine():
         """
         self.out_names = self.Model.Output.names
         self.MetaModel.out_names = self.out_names
-        
-        
-    def train_normal(self, parallel = False, verbose = False, save = False) -> None:
+
+    def train_normal(self, parallel=False, verbose=False, save=False) -> None:
         """
         Trains surrogate on static samples only.
         Samples are taken from the experimental design and the specified 
@@ -170,10 +178,12 @@ class Engine():
         None
 
         """
-            
+        if self.out_names == 'None':
+            self.start_engine()
+
         ExpDesign = self.ExpDesign
         MetaModel = self.MetaModel
-        
+
         # Read ExpDesign (training and targets) from the provided hdf5
         if ExpDesign.hdf5_file is not None:
             # TODO: need to run 'generate_ED' as well after this or not?
@@ -182,51 +192,52 @@ class Engine():
             # Check if an old hdf5 file exists: if yes, rename it
             hdf5file = f'ExpDesign_{self.Model.name}.hdf5'
             if os.path.exists(hdf5file):
-           #     os.rename(hdf5file, 'old_'+hdf5file)
+                #     os.rename(hdf5file, 'old_'+hdf5file)
                 file = pathlib.Path(hdf5file)
                 file.unlink()
 
         # Prepare X samples 
         # For training the surrogate use ExpDesign.X_tr, ExpDesign.X is for the model to run on 
         ExpDesign.generate_ED(ExpDesign.n_init_samples,
-                                              transform=True,
-                                              max_pce_deg=np.max(MetaModel.pce_deg))
-        
+                              transform=True,
+                              max_pce_deg=np.max(MetaModel.pce_deg))
+
         # Run simulations at X 
         if not hasattr(ExpDesign, 'Y') or ExpDesign.Y is None:
             print('\n Now the forward model needs to be run!\n')
-            ED_Y, up_ED_X = self.Model.run_model_parallel(ExpDesign.X, mp = parallel)
+            ED_Y, up_ED_X = self.Model.run_model_parallel(ExpDesign.X, mp=parallel)
             ExpDesign.Y = ED_Y
         else:
             # Check if a dict has been passed.
             if not type(ExpDesign.Y) is dict:
                 raise Exception('Please provide either a dictionary or a hdf5'
                                 'file to ExpDesign.hdf5_file argument.')
-                
+
         # Separate output dict and x-values
         if 'x_values' in ExpDesign.Y:
             ExpDesign.x_values = ExpDesign.Y['x_values']
             del ExpDesign.Y['x_values']
         else:
             print('No x_values are given, this might lead to issues during PostProcessing')
-        
-        
+
         # Fit the surrogate
         MetaModel.fit(ExpDesign.X, ExpDesign.Y, parallel, verbose)
-        
+
         # Save what there is to save
         if save:
             # Save surrogate
             with open(f'surrogates/surrogate_{self.Model.name}.pk1', 'wb') as output:
                 joblib.dump(MetaModel, output, 2)
-                    
+
             # Zip the model run directories
-            if self.Model.link_type.lower() == 'pylink' and\
-               self.ExpDesign.sampling_method.lower() != 'user':
+            if self.Model.link_type.lower() == 'pylink' and \
+                    self.ExpDesign.sampling_method.lower() != 'user':
                 self.Model.zip_subdirs(self.Model.name, f'{self.Model.name}_')
-                
-            
-    def train_sequential(self, parallel = False, verbose = False) -> None:
+
+        # Set that training was done
+        self.trained = True
+
+    def train_sequential(self, parallel=False, verbose=False) -> None:
         """
         Train the surrogate in a sequential manner.
         First build and train evereything on the static samples, then iterate
@@ -237,22 +248,21 @@ class Engine():
         None
 
         """
-        #self.train_normal(parallel, verbose)
+        # self.train_normal(parallel, verbose)
         self.parallel = parallel
         self.train_seq_design(parallel, verbose)
-        
-        
+
     # -------------------------------------------------------------------------
     def eval_metamodel(self, samples=None, nsamples=None,
                        sampling_method='random', return_samples=False):
         """
-        Evaluates meta-model at the requested samples. One can also generate
+        Evaluates metamodel at the requested samples. One can also generate
         nsamples.
 
         Parameters
         ----------
         samples : array of shape (n_samples, n_params), optional
-            Samples to evaluate meta-model at. The default is None.
+            Samples to evaluate metamodel at. The default is None.
         nsamples : int, optional
             Number of samples to generate, if no `samples` is provided. The
             default is None.
@@ -275,7 +285,7 @@ class Engine():
             samples = self.ExpDesign.generate_samples(
                 nsamples,
                 sampling_method
-                )
+            )
 
         # Transformation to other space is to be done in the MetaModel
         # TODO: sort the transformations better
@@ -285,10 +295,9 @@ class Engine():
             return mean_pred, std_pred, samples
         else:
             return mean_pred, std_pred
-        
-        
+
     # -------------------------------------------------------------------------
-    def train_seq_design(self, parallel = False, verbose = False):
+    def train_seq_design(self, parallel=False, verbose=False):
         """
         Starts the adaptive sequential design for refining the surrogate model
         by selecting training points in a sequential manner.
@@ -300,7 +309,7 @@ class Engine():
 
         """
         self.parallel = parallel
-        
+
         # Initialization
         self.SeqModifiedLOO = {}
         self.seqValidError = {}
@@ -310,14 +319,13 @@ class Engine():
         self.seqRMSEMean = {}
         self.seqRMSEStd = {}
         self.seqMinDist = []
-        
-        if not hasattr(self.MetaModel, 'valid_samples'):
+
+        if not hasattr(self.MetaModel, 'valid_samples') or self.MetaModel.valid_samples is None:
             self.ExpDesign.valid_samples = []
             self.ExpDesign.valid_model_runs = []
             self.valid_likelihoods = []
-        
-        validError = None
 
+        # validError = None
 
         # Determine the metamodel type
         if self.MetaModel.meta_model_type.lower() != 'gpe':
@@ -336,14 +344,15 @@ class Engine():
         n_replication = self.ExpDesign.n_replication
         util_func = self.ExpDesign.util_func
         output_name = self.out_names
-        
+
         # Handle if only one UtilityFunctions is provided
         if not isinstance(util_func, list):
             util_func = [self.ExpDesign.util_func]
 
         # Read observations or MCReference
         # TODO: recheck the logic in this if statement
-        if (len(self.Model.observations) != 0 or self.Model.meas_file is not None) and hasattr(self.MetaModel, 'Discrepancy'):
+        if (len(self.Model.observations) != 0 or self.Model.meas_file is not None) and hasattr(self.MetaModel,
+                                                                                               'Discrepancy'):
             self.observations = self.Model.read_observation()
             obs_data = self.observations
         else:
@@ -351,15 +360,16 @@ class Engine():
             # TODO: TotalSigma2 not defined if not in this else???
             # TODO: no self.observations if in here
             TotalSigma2 = {}
-            
+
         # ---------- Initial self.MetaModel ----------
-        self.train_normal(parallel = parallel, verbose=verbose)
-        
+        if not self.trained:
+            self.train_normal(parallel=parallel, verbose=verbose)
+
         initMetaModel = deepcopy(self.MetaModel)
 
         # Validation error if validation set is provided.
         if self.ExpDesign.valid_model_runs:
-            init_rmse, init_valid_error = self._validError(initMetaModel)
+            init_rmse, init_valid_error = self._validError()  # initMetaModel)
             init_valid_error = list(init_valid_error.values())
         else:
             init_rmse = None
@@ -379,7 +389,7 @@ class Engine():
             if post_snapshot:
                 parNames = self.ExpDesign.par_names
                 print('Posterior snapshot (initial) is being plotted...')
-                self.__posteriorPlot(init_post, parNames, 'SeqPosterior_init')
+                self._posteriorPlot(init_post, parNames, 'SeqPosterior_init')
 
         # Check the convergence of the Mean & Std
         if mc_ref and pce:
@@ -390,13 +400,14 @@ class Engine():
         # Read the initial experimental design
         Xinit = self.ExpDesign.X
         init_n_samples = len(self.ExpDesign.X)
-        initYprev = self.ExpDesign.Y#initMetaModel.ModelOutputDict
-        #self.MetaModel.ModelOutputDict = self.ExpDesign.Y
+        initYprev = self.ExpDesign.Y  # initMetaModel.ModelOutputDict
+        # self.MetaModel.ModelOutputDict = self.ExpDesign.Y
         initLCerror = initMetaModel.LCerror
         n_itrs = max_n_samples - init_n_samples
 
-        ## Get some initial statistics
+        # Get some initial statistics
         # Read the initial ModifiedLOO
+        init_mod_LOO = []
         if pce:
             Scores_all, varExpDesignY = [], []
             for out_name in output_name:
@@ -412,14 +423,14 @@ class Engine():
 
             Scores = [item for sublist in Scores_all for item in sublist]
             weights = [item for sublist in varExpDesignY for item in sublist]
-            init_mod_LOO = [np.average([1-score for score in Scores],
+            init_mod_LOO = [np.average([1 - score for score in Scores],
                                        weights=weights)]
 
         prevMetaModel_dict = {}
-        #prevExpDesign_dict = {}
+        # prevExpDesign_dict = {}
         # Can run sequential design multiple times for comparison
         for repIdx in range(n_replication):
-            print(f'\n>>>> Replication: {repIdx+1}<<<<')
+            print(f'\n>>>> Replication: {repIdx + 1}<<<<')
 
             # util_func: the function to use inside the type of exploitation
             for util_f in util_func:
@@ -435,7 +446,6 @@ class Engine():
                 Yprev = initYprev
 
                 Xfull = []
-                Yfull = []
 
                 # Store the initial ModifiedLOO
                 if pce:
@@ -457,23 +467,23 @@ class Engine():
 
                 # ------- Start Sequential Experimental Design -------
                 postcnt = 1
-                for itr_no in range(1, n_itrs+1):
+                for itr_no in range(1, n_itrs + 1):
                     print(f'\n>>>> Iteration number {itr_no} <<<<')
 
                     # Save the metamodel prediction before updating
                     prevMetaModel_dict[itr_no] = deepcopy(self.MetaModel)
-                    #prevExpDesign_dict[itr_no] = deepcopy(self.ExpDesign)
+                    # prevExpDesign_dict[itr_no] = deepcopy(self.ExpDesign)
                     if itr_no > 1:
-                        pc_model = prevMetaModel_dict[itr_no-1]
+                        pc_model = prevMetaModel_dict[itr_no - 1]
                         self._y_hat_prev, _ = pc_model.eval_metamodel(
                             samples=Xfull[-1].reshape(1, -1))
-                        del prevMetaModel_dict[itr_no-1]
+                        del prevMetaModel_dict[itr_no - 1]
 
                     # Optimal Bayesian Design
-                    #self.MetaModel.ExpDesignFlag = 'sequential'
+                    # self.MetaModel.ExpDesignFlag = 'sequential'
                     Xnew, updatedPrior = self.choose_next_sample(TotalSigma2,
-                                                            n_canddidate,
-                                                            util_f)
+                                                                 n_canddidate,
+                                                                 util_f)
                     S = np.min(distance.cdist(Xinit, Xnew, 'euclidean'))
                     self.seqMinDist.append(S)
                     print(f"\nmin Dist from OldExpDesign: {S:2f}")
@@ -482,20 +492,19 @@ class Engine():
                     # Evaluate the full model response at the new sample
                     Ynew, _ = self.Model.run_model_parallel(
                         Xnew, prevRun_No=total_n_samples
-                        )
+                    )
                     total_n_samples += Xnew.shape[0]
 
                     # ------ Plot the surrogate model vs Origninal Model ------
-                    if hasattr(self.ExpDesign, 'adapt_verbose') and \
-                       self.ExpDesign.adapt_verbose:
+                    if self.ExpDesign.adapt_verbose:
                         from .adaptPlot import adaptPlot
                         y_hat, std_hat = self.MetaModel.eval_metamodel(
                             samples=Xnew
-                            )
+                        )
                         adaptPlot(
                             self.MetaModel, Ynew, y_hat, std_hat,
                             plotED=False
-                            )
+                        )
 
                     # -------- Retrain the surrogate model -------
                     # Extend new experimental design
@@ -509,11 +518,11 @@ class Engine():
                     # Pass new design to the metamodel object
                     self.ExpDesign.sampling_method = 'user'
                     self.ExpDesign.X = Xfull
-                    #self.ExpDesign.Y = self.MetaModel.ModelOutputDict
+                    # self.ExpDesign.Y = self.MetaModel.ModelOutputDict
 
                     # Save the Experimental Design for next iteration
                     Xprev = Xfull
-                    Yprev = self.ExpDesign.Y 
+                    Yprev = self.ExpDesign.Y
 
                     # Pass the new prior as the input
                     # TODO: another look at this - no difference apc to pce to gpe?
@@ -550,7 +559,7 @@ class Engine():
                         weights = [item for sublist in varExpDesignY for item
                                    in sublist]
                         ModifiedLOO = [np.average(
-                            [1-score for score in Scores], weights=weights)]
+                            [1 - score for score in Scores], weights=weights)]
 
                         print('\n')
                         print(f"Updated ModifiedLOO {util_f}:\n", ModifiedLOO)
@@ -558,7 +567,7 @@ class Engine():
 
                     # Compute the validation error
                     if self.ExpDesign.valid_model_runs:
-                        rmse, validError = self._validError(self.MetaModel)
+                        rmse, validError = self._validError()  # self.MetaModel)
                         ValidError = list(validError.values())
                     else:
                         rmse = None
@@ -586,8 +595,8 @@ class Engine():
                         if post_snapshot and postcnt % step_snapshot == 0:
                             parNames = self.ExpDesign.par_names
                             print('Posterior snapshot is being plotted...')
-                            self.__posteriorPlot(Posterior, parNames,
-                                                 f'SeqPosterior_{postcnt}')
+                            self._posteriorPlot(Posterior, parNames,
+                                                f'SeqPosterior_{postcnt}')
                         postcnt += 1
 
                     # Check the convergence of the Mean&Std
@@ -617,11 +626,11 @@ class Engine():
                     if len(obs_data) != 0:
                         del out
                     print()
-                    print('-'*50)
+                    print('-' * 50)
                     print()
 
                 # Store updated ModifiedLOO and BME in dictonary
-                strKey = f'{util_f}_rep_{repIdx+1}'
+                strKey = f'{util_f}_rep_{repIdx + 1}'
                 if pce:
                     self.SeqModifiedLOO[strKey] = SeqModifiedLOO
                 if len(self.ExpDesign.valid_model_runs) != 0:
@@ -632,7 +641,7 @@ class Engine():
                     self.SeqBME[strKey] = SeqBME
                     self.SeqKLD[strKey] = SeqKLD
                 if hasattr(self.MetaModel, 'valid_likelihoods') and \
-                   self.valid_likelihoods:
+                        self.valid_likelihoods:
                     self.SeqDistHellinger[strKey] = SeqDistHellinger
                 if mc_ref and pce:
                     self.seqRMSEMean[strKey] = seqRMSEMean
@@ -655,7 +664,7 @@ class Engine():
             Candidate samples.
         index : int
             Model output index.
-        UtilMethod : string, optional
+        util_func : string, optional
             Exploitation utility function. The default is 'Entropy'.
 
         Returns
@@ -673,10 +682,11 @@ class Engine():
         X_can = X_can.reshape(1, -1)
         Y_PC_can, std_PC_can = MetaModel.eval_metamodel(samples=X_can)
 
+        score = None
         if util_func.lower() == 'alm':
             # ----- Entropy/MMSE/active learning MacKay(ALM)  -----
             # Compute perdiction variance of the old model
-            canPredVar = {key: std_PC_can[key]**2 for key in out_names}
+            canPredVar = {key: std_PC_can[key] ** 2 for key in out_names}
 
             varPCE = np.zeros((len(out_names), X_can.shape[0]))
             for KeyIdx, key in enumerate(out_names):
@@ -691,7 +701,7 @@ class Engine():
 
             # Compute perdiction error and variance of the old model
             predError = {key: Y_PC_can[key] for key in out_names}
-            canPredVar = {key: std_PC_can[key]**2 for key in out_names}
+            canPredVar = {key: std_PC_can[key] ** 2 for key in out_names}
 
             # Compute perdiction error and variance of the old model
             # Eq (5) from Liu et al.(2018)
@@ -699,10 +709,10 @@ class Engine():
             for KeyIdx, key in enumerate(out_names):
                 residual = predError[key] - out_dict_y[key][int(index)]
                 var = canPredVar[key]
-                EIGF_PCE[KeyIdx] = np.max(residual**2 + var, axis=1)
+                EIGF_PCE[KeyIdx] = np.max(residual ** 2 + var, axis=1)
             score = np.max(EIGF_PCE, axis=0)
 
-        return -1 * score   # -1 is for minimization instead of maximization
+        return -1 * score  # -1 is for minimization instead of maximization
 
     # -------------------------------------------------------------------------
     def util_BayesianActiveDesign(self, y_hat, std, sigma2Dict, var='DKL'):
@@ -716,8 +726,8 @@ class Engine():
 
         Parameters
         ----------
-        X_can : array of shape (n_samples, n_params)
-            Candidate samples.
+        y_hat : unknown
+        std : unknown
         sigma2Dict : dict
             A dictionary containing the measurement errors (sigma^2).
         var : string, optional
@@ -742,14 +752,12 @@ class Engine():
         # Sample a distribution for a normal dist
         # with Y_mean_can as the mean and Y_std_can as std.
         Y_MC, std_MC = {}, {}
-        logPriorLikelihoods = np.zeros((mc_size))
-       # print(y_hat)
-       # print(list[y_hat])
+        logPriorLikelihoods = np.zeros(mc_size)
         for key in list(y_hat):
-            cov = np.diag(std[key]**2)
-           # print(y_hat[key], cov)
+            cov = np.diag(std[key] ** 2)
+            print(key, y_hat[key], std[key])
             # TODO: added the allow_singular = True here
-            rv = stats.multivariate_normal(mean=y_hat[key], cov=cov,)
+            rv = stats.multivariate_normal(mean=y_hat[key], cov=cov, allow_singular=True)
             Y_MC[key] = rv.rvs(size=mc_size)
             logPriorLikelihoods += rv.logpdf(Y_MC[key])
             std_MC[key] = np.zeros((mc_size, y_hat[key].shape[0]))
@@ -757,16 +765,16 @@ class Engine():
         #  Likelihood computation (Comparison of data and simulation
         #  results via PCE with candidate design)
         likelihoods = self._normpdf(Y_MC, std_MC, obs_data, sigma2Dict)
-        
+
         # Rejection Step
         # Random numbers between 0 and 1
         unif = np.random.rand(1, mc_size)[0]
 
         # Reject the poorly performed prior
-        accepted = (likelihoods/np.max(likelihoods)) >= unif
+        accepted = (likelihoods / np.max(likelihoods)) >= unif
 
         # Prior-based estimation of BME
-        logBME = np.log(np.nanmean(likelihoods), dtype=np.longdouble)#float128)
+        logBME = np.log(np.nanmean(likelihoods), dtype=np.longdouble)  # float128)
 
         # Posterior-based expectation of likelihoods
         postLikelihoods = likelihoods[accepted]
@@ -778,6 +786,7 @@ class Engine():
         # Utility function Eq.2 in Ref. (2)
         # Posterior covariance matrix after observing data y
         # Kullback-Leibler Divergence (Sergey's paper)
+        U_J_d = None
         if var == 'DKL':
 
             # TODO: Calculate the correction factor for BME
@@ -813,7 +822,7 @@ class Engine():
             AIC = -2 * maxlogL + 2 * nModelParams
             # 2 * nModelParams * (nModelParams+1) / (n_obs-nModelParams-1)
             penTerm = 0
-            U_J_d = 1*(AIC + penTerm)
+            U_J_d = 1 * (AIC + penTerm)
 
         # Deviance information criterion
         elif var == 'DIC':
@@ -821,7 +830,7 @@ class Engine():
             N_star_p = 0.5 * np.var(np.log(likelihoods[likelihoods != 0]))
             Likelihoods_theta_mean = self._normpdf(
                 y_hat, std, obs_data, sigma2Dict
-                )
+            )
             DIC = -2 * np.log(Likelihoods_theta_mean) + 2 * N_star_p
 
             U_J_d = DIC
@@ -838,7 +847,7 @@ class Engine():
         del Y_MC
         del std_MC
 
-        return -1 * U_J_d   # -1 is for minimization instead of maximization
+        return -1 * U_J_d  # -1 is for minimization instead of maximization
 
     # -------------------------------------------------------------------------
     def util_BayesianDesign(self, X_can, X_MC, sigma2Dict, var='DKL'):
@@ -849,6 +858,7 @@ class Engine():
         ----------
         X_can : array of shape (n_samples, n_params)
             Candidate samples.
+        X_MC : unknown
         sigma2Dict : dict
             A dictionary containing the measurement errors (sigma^2).
         var : string, optional
@@ -887,11 +897,11 @@ class Engine():
         for key in oldExpDesignY.keys():
             NewExpDesignY[key] = np.vstack(
                 (oldExpDesignY[key], Y_PC_can[key])
-                )
+            )
 
         engine_can.ExpDesign.sampling_method = 'user'
         engine_can.ExpDesign.X = NewExpDesignX
-        #engine_can.ModelOutputDict = NewExpDesignY
+        # engine_can.ModelOutputDict = NewExpDesignY
         engine_can.ExpDesign.Y = NewExpDesignY
 
         # Train the model for the observed data using x_can
@@ -899,7 +909,7 @@ class Engine():
         engine_can.start_engine()
         engine_can.train_normal(parallel=False)
         engine_can.MetaModel.fit(NewExpDesignX, NewExpDesignY)
-#        engine_can.train_norm_design(parallel=False)
+        #        engine_can.train_norm_design(parallel=False)
 
         # Set the ExpDesign to its original values
         engine_can.ExpDesign.X = oldExpDesignX
@@ -907,7 +917,7 @@ class Engine():
         engine_can.ExpDesign.Y = oldExpDesignY
 
         if var.lower() == 'mi':
-            # Mutual information based on Krause et al
+            # Mutual information based on Krause et al.
             # Adapted from Beck & Guillas (MICE) paper
             _, std_PC_can = engine_can.MetaModel.eval_metamodel(samples=X_can)
             std_can = {key: std_PC_can[key] for key in out_names}
@@ -916,7 +926,7 @@ class Engine():
 
             varPCE = np.zeros((len(out_names)))
             for i, key in enumerate(out_names):
-                varPCE[i] = np.mean(std_old[key]**2/std_can[key]**2)
+                varPCE[i] = np.mean(std_old[key] ** 2 / std_can[key] ** 2)
             score = np.mean(varPCE)
 
             return -1 * score
@@ -932,9 +942,9 @@ class Engine():
             # Compute the score
             score = []
             for i, key in enumerate(out_names):
-                pce_var = Y_MC_std_can[key]**2
-                pce_var_can = Y_MC_std[key]**2
-                score.append(np.mean(pce_var-pce_var_can, axis=0))
+                pce_var = Y_MC_std_can[key] ** 2
+                pce_var_can = Y_MC_std[key] ** 2
+                score.append(np.mean(pce_var - pce_var_can, axis=0))
             score = np.mean(score)
 
             return -1 * score
@@ -944,13 +954,14 @@ class Engine():
         MCsize = X_MC.shape[0]
         ESS = 0
 
-        while ((ESS > MCsize) or (ESS < 1)):
+        likelihoods = None
+        while (ESS > MCsize) or (ESS < 1):
 
             # Enriching Monte Carlo samples if need be
             if ESS != 0:
                 X_MC = self.ExpDesign.generate_samples(
                     MCsize, 'random'
-                    )
+                )
 
             # Evaluate the MetaModel at the given samples
             Y_MC, std_MC = PCE_Model_can.eval_metamodel(samples=X_MC)
@@ -959,13 +970,13 @@ class Engine():
             # results via PCE with candidate design)
             likelihoods = self._normpdf(
                 Y_MC, std_MC, self.observations, sigma2Dict
-                )
+            )
 
             # Check the Effective Sample Size (1<ESS<MCsize)
-            ESS = 1 / np.sum(np.square(likelihoods/np.sum(likelihoods)))
+            ESS = 1 / np.sum(np.square(likelihoods / np.sum(likelihoods)))
 
             # Enlarge sample size if it doesn't fulfill the criteria
-            if ((ESS > MCsize) or (ESS < 1)):
+            if (ESS > MCsize) or (ESS < 1):
                 print("--- increasing MC size---")
                 MCsize *= 10
                 ESS = 0
@@ -975,19 +986,20 @@ class Engine():
         unif = np.random.rand(1, MCsize)[0]
 
         # Reject the poorly performed prior
-        accepted = (likelihoods/np.max(likelihoods)) >= unif
+        accepted = (likelihoods / np.max(likelihoods)) >= unif
 
         # -------------------- Utility functions --------------------
         # Utility function Eq.2 in Ref. (2)
         # Kullback-Leibler Divergence (Sergey's paper)
+        U_J_d = None
         if var == 'DKL':
 
             # Prior-based estimation of BME
-            logBME = np.log(np.nanmean(likelihoods, dtype=np.longdouble))#float128))
+            logBME = np.log(np.nanmean(likelihoods, dtype=np.longdouble))  # float128))
 
             # Posterior-based expectation of likelihoods
-            postLikelihoods = likelihoods[accepted]
-            postExpLikelihoods = np.mean(np.log(postLikelihoods))
+            # postLikelihoods = likelihoods[accepted]
+            # postExpLikelihoods = np.mean(np.log(postLikelihoods))
 
             # Haun et al implementation
             U_J_d = np.mean(np.log(likelihoods[likelihoods != 0]) - logBME)
@@ -1022,6 +1034,8 @@ class Engine():
             postExpLikelihoods = np.mean(np.log(postLikelihoods))
 
             # Posterior-based expectation of prior densities
+            logPriorLikelihoods = []
+            logPriorLikelihoods[accepted] = None  # TODO: this is not defined here, just a fix
             postExpPrior = np.mean(logPriorLikelihoods[accepted])
 
             infEntropy = logBME - postExpPrior - postExpLikelihoods
@@ -1048,8 +1062,7 @@ class Engine():
         del Y_MC
         del std_MC
 
-        return -1 * U_J_d   # -1 is for minimization instead of maximization
-
+        return -1 * U_J_d  # -1 is for minimization instead of maximization
 
     # -------------------------------------------------------------------------
     def run_util_func(self, method, candidates, index, sigma2Dict=None,
@@ -1092,7 +1105,7 @@ class Engine():
 
         elif method.lower() == 'bayesactdesign':
             NCandidate = candidates.shape[0]
-            U_J_d = np.zeros((NCandidate))
+            U_J_d = np.zeros(NCandidate)
             # Evaluate all candidates
             y_can, std_can = self.MetaModel.eval_metamodel(samples=candidates)
             # loop through candidates
@@ -1100,20 +1113,20 @@ class Engine():
                                    desc="BAL Design"):
                 y_hat = {key: items[idx] for key, items in y_can.items()}
                 std = {key: items[idx] for key, items in std_can.items()}
-                
-               # print(y_hat)
-               # print(std)
+
+                # print(y_hat)
+                # print(std)
                 U_J_d[idx] = self.util_BayesianActiveDesign(
                     y_hat, std, sigma2Dict, var)
 
         elif method.lower() == 'bayesoptdesign':
             NCandidate = candidates.shape[0]
-            U_J_d = np.zeros((NCandidate))
+            U_J_d = np.zeros(NCandidate)
             for idx, X_can in tqdm(enumerate(candidates), ascii=True,
                                    desc="OptBayesianDesign"):
                 U_J_d[idx] = self.util_BayesianDesign(X_can, X_MC, sigma2Dict,
                                                       var)
-        return (index, -1 * U_J_d)
+        return index, -1 * U_J_d
 
     # -------------------------------------------------------------------------
     def dual_annealing(self, method, Bounds, sigma2Dict, var, Run_No,
@@ -1130,6 +1143,7 @@ class Engine():
             List of lower and upper boundaries of parameters.
         sigma2Dict : dict
             A dictionary containing the measurement errors (sigma^2).
+        var : unknown
         Run_No : int
             Run number.
         verbose : bool, optional
@@ -1147,13 +1161,14 @@ class Engine():
         Model = self.Model
         max_func_itr = self.ExpDesign.max_func_itr
 
-        if method == 'VarOptDesign':
+        Res_Global = None
+        if method.lower() == 'varoptdesign':
             Res_Global = opt.dual_annealing(self.util_VarBasedDesign,
                                             bounds=Bounds,
                                             args=(Model, var),
                                             maxfun=max_func_itr)
 
-        elif method == 'BayesOptDesign':
+        elif method.lower() == 'bayesoptdesign':
             Res_Global = opt.dual_annealing(self.util_BayesianDesign,
                                             bounds=Bounds,
                                             args=(Model, sigma2Dict, var),
@@ -1163,7 +1178,7 @@ class Engine():
             print(f"Global minimum: xmin = {Res_Global.x}, "
                   f"f(xmin) = {Res_Global.fun:.6f}, nfev = {Res_Global.nfev}")
 
-        return (Run_No, Res_Global.x)
+        return Run_No, Res_Global.x
 
     # -------------------------------------------------------------------------
     def tradeoff_weights(self, tradeoff_scheme, old_EDX, old_EDY):
@@ -1174,7 +1189,7 @@ class Engine():
         `None`: No exploration.
         `equal`: Same weights for exploration and exploitation scores.
         `epsilon-decreasing`: Start with more exploration and increase the
-            influence of exploitation along the way with a exponential decay
+            influence of exploitation along the way with an exponential decay
             function
         `adaptive`: An adaptive method based on:
             Liu, Haitao, Jianfei Cai, and Yew-Soon Ong. "An adaptive sampling
@@ -1198,6 +1213,8 @@ class Engine():
             Exploitation weight.
 
         """
+        exploration_weight = None
+
         if tradeoff_scheme is None:
             exploration_weight = 0
 
@@ -1207,22 +1224,22 @@ class Engine():
         elif tradeoff_scheme == 'epsilon-decreasing':
             # epsilon-decreasing scheme
             # Start with more exploration and increase the influence of
-            # exploitation along the way with a exponential decay function
+            # exploitation along the way with an exponential decay function
             initNSamples = self.ExpDesign.n_init_samples
             n_max_samples = self.ExpDesign.n_max_samples
 
             itrNumber = (self.ExpDesign.X.shape[0] - initNSamples)
             itrNumber //= self.ExpDesign.n_new_samples
 
-            tau2 = -(n_max_samples-initNSamples-1) / np.log(1e-8)
-            exploration_weight = signal.exponential(n_max_samples-initNSamples,
+            tau2 = -(n_max_samples - initNSamples - 1) / np.log(1e-8)
+            exploration_weight = signal.exponential(n_max_samples - initNSamples,
                                                     0, tau2, False)[itrNumber]
 
         elif tradeoff_scheme == 'adaptive':
 
             # Extract itrNumber
             initNSamples = self.ExpDesign.n_init_samples
-            n_max_samples = self.ExpDesign.n_max_samples
+            # n_max_samples = self.ExpDesign.n_max_samples
             itrNumber = (self.ExpDesign.X.shape[0] - initNSamples)
             itrNumber //= self.ExpDesign.n_new_samples
 
@@ -1241,7 +1258,7 @@ class Engine():
                 pce_y_prev = np.array(list(self._y_hat_prev.values()))[:, 0]
                 mseCVError = mean_squared_error(pce_y_prev, y)
 
-                exploration_weight = min([0.5*mseError/mseCVError, 1])
+                exploration_weight = min([0.5 * mseError / mseCVError, 1])
 
         # Exploitation weight
         exploitation_weight = 1 - exploration_weight
@@ -1292,8 +1309,11 @@ class Engine():
         # -----------------------------------------
         # Utility function exploit_method provided by user
         if exploit_method.lower() == 'user':
-            if not hasattr(self.ExpDesign, 'ExploitFunction'):
-                raise AttributeError('Function `ExploitFunction` not given to the ExpDesign, thus cannor run user-defined sequential scheme')
+            # TODO: is the exploit_method meant here?
+            if not hasattr(self.ExpDesign, 'ExploitFunction') or self.ExpDesign.ExploitFunction is None:
+                raise AttributeError(
+                    'Function `ExploitFunction` not given to the ExpDesign, thus cannor run user-defined sequential'
+                    'scheme')
             # TODO: syntax does not fully match the rest - can test this??
             Xnew, filteredSamples = self.ExpDesign.ExploitFunction(self)
 
@@ -1302,7 +1322,6 @@ class Engine():
 
             return Xnew, filteredSamples
 
-
         # Dual-Annealing works differently from the rest, so deal with this first
         # Here exploration and exploitation are performed simulataneously
         if explore_method == 'dual annealing':
@@ -1330,7 +1349,7 @@ class Engine():
                 results = []
                 for i in range(n_new_samples):
                     results.append(self.dual_annealing(exploit_method, subdomains[i], sigma2, var, i))
-                    
+
             # New sample
             Xnew = np.array([results[i][1] for i in range(n_new_samples)])
             print("\nXnew:\n", Xnew)
@@ -1338,29 +1357,30 @@ class Engine():
             # Computational cost
             elapsed_time = time.time() - start_time
             print("\n")
-            print(f"Elapsed_time: {round(elapsed_time,2)} sec.")
-            print('-'*20)
-            
+            print(f"Elapsed_time: {round(elapsed_time, 2)} sec.")
+            print('-' * 20)
+
             return Xnew, None
-        
+
         # Generate needed Exploration class
         explore = Exploration(self.ExpDesign, n_candidates)
         explore.w = 100  # * ndim #500  # TODO: where does this value come from?
-        
+
         # Select criterion (mc-intersite-proj-th, mc-intersite-proj)
         explore.mc_criterion = 'mc-intersite-proj'
-        
+
         # Generate the candidate samples
         # TODO: here use the sampling method provided by the expdesign?
-        sampling_method = self.ExpDesign.sampling_method
-        
+        # sampling_method = self.ExpDesign.sampling_method
+
         # TODO: changed this from 'random' for LOOCV
-        if explore_method == 'LOOCV':
-            allCandidates = self.ExpDesign.generate_samples(n_candidates,
-                                                            sampling_method)
-        else:
-            allCandidates, scoreExploration = explore.get_exploration_samples()
-        
+        # TODO: these are commented out as they are not used !?
+        # if explore_method == 'LOOCV':
+        # allCandidates = self.ExpDesign.generate_samples(n_candidates,
+        #                                                     sampling_method)
+        # else:
+        #     allCandidates, scoreExploration = explore.get_exploration_samples()
+
         # -----------------------------------------
         # ---------- EXPLORATION METHODS ----------
         # -----------------------------------------
@@ -1372,7 +1392,7 @@ class Engine():
 
             # Generate random samples
             allCandidates = self.ExpDesign.generate_samples(n_candidates,
-                                                                'random')
+                                                            'random')
 
             # Construct error model based on LCerror
             errorModel = self.MetaModel.create_ModelError(old_EDX, self.LCerror)
@@ -1400,6 +1420,20 @@ class Engine():
             if ndim == 2:
                 def plotter(points, allCandidates, Method,
                             scoreExploration=None):
+                    """
+                    unknown
+
+                    Parameters
+                    ----------
+                    points
+                    allCandidates
+                    Method
+                    scoreExploration
+
+                    Returns
+                    -------
+
+                    """
                     if Method == 'Voronoi':
                         from scipy.spatial import Voronoi, voronoi_plot_2d
                         vor = Voronoi(points)
@@ -1413,7 +1447,7 @@ class Engine():
                     ax1.scatter(allCandidates[:, 0], allCandidates[:, 1], s=10,
                                 c='b', marker="o", label='Design candidates')
                     for i in range(points.shape[0]):
-                        txt = 'p'+str(i+1)
+                        txt = 'p' + str(i + 1)
                         ax1.annotate(txt, (points[i, 0], points[i, 1]))
                     if scoreExploration is not None:
                         for i in range(allCandidates.shape[0]):
@@ -1429,14 +1463,14 @@ class Engine():
         # -----------------------------------------
         # --------- EXPLOITATION METHODS ----------
         # -----------------------------------------
-        if exploit_method == 'BayesOptDesign' or\
-           exploit_method == 'BayesActDesign':
+        if exploit_method.lower() == 'bayesoptdesign' or \
+                exploit_method.lower() == 'bayesactdesign':
 
             # ------- Calculate Exoploration weight -------
             # Compute exploration weight based on trade off scheme
             explore_w, exploit_w = self.tradeoff_weights(tradeoff_scheme,
-                                                        old_EDX,
-                                                        old_EDY)
+                                                         old_EDX,
+                                                         old_EDY)
             print(f"\n Exploration weight={explore_w:0.3f} "
                   f"Exploitation weight={exploit_w:0.3f}\n")
 
@@ -1455,19 +1489,19 @@ class Engine():
                 # Split the candidates in groups for multiprocessing
                 split_cand = np.array_split(
                     candidates, n_cand_groups, axis=0
-                    )
-               # print(candidates)
-               # print(split_cand)
+                )
+                # print(candidates)
+                # print(split_cand)
                 if self.parallel:
                     results = Parallel(n_jobs=-1, backend='multiprocessing')(
                         delayed(self.run_util_func)(
                             exploit_method, split_cand[i], i, sigma2, var, X_MC)
-                        for i in range(n_cand_groups)) 
+                        for i in range(n_cand_groups))
                 else:
                     results = []
                     for i in range(n_cand_groups):
                         results.append(self.run_util_func(exploit_method, split_cand[i], i, sigma2, var, X_MC))
-                        
+
                 # Retrieve the results and append them
                 U_J_d = np.concatenate([results[NofE][1] for NofE in
                                         range(n_cand_groups)])
@@ -1489,29 +1523,28 @@ class Engine():
             # ------- Calculate Total score -------
             # ------- Trade off between EXPLORATION & EXPLOITATION -------
             # Accumulate the samples
-            finalCandidates = np.concatenate((allCandidates, candidates), axis = 0)   
-            finalCandidates = np.unique(finalCandidates, axis = 0)
-            
+            finalCandidates = np.concatenate((allCandidates, candidates), axis=0)
+            finalCandidates = np.unique(finalCandidates, axis=0)
+
             # Calculations take into account both exploration and exploitation 
             # samples without duplicates
             totalScore = np.zeros(finalCandidates.shape[0])
-            #self.totalScore = totalScore
-            
+            # self.totalScore = totalScore
+
             for cand_idx in range(finalCandidates.shape[0]):
                 # find candidate indices
                 idx1 = np.where(allCandidates == finalCandidates[cand_idx])[0]
                 idx2 = np.where(candidates == finalCandidates[cand_idx])[0]
-                
+
                 # exploration 
-                if idx1 != []:
+                if idx1.shape[0] > 0:
                     idx1 = idx1[0]
                     totalScore[cand_idx] += explore_w * scoreExploration[idx1]
-                    
+
                 # exploitation
-                if idx2 != []:
+                if idx2.shape[0] > 0:
                     idx2 = idx2[0]
                     totalScore[cand_idx] += exploit_w * norm_U_J_d[idx2]
-                
 
             # Total score
             totalScore = exploit_w * norm_U_J_d
@@ -1547,23 +1580,23 @@ class Engine():
                 Xnew = finalCandidates[sorted_idxtotalScore[:n_new_samples]]
 
 
-        elif exploit_method == 'VarOptDesign':
+        elif exploit_method.lower() == 'varoptdesign':
             # ------- EXPLOITATION: VarOptDesign -------
             UtilMethod = var
 
             # ------- Calculate Exoploration weight -------
             # Compute exploration weight based on trade off scheme
             explore_w, exploit_w = self.tradeoff_weights(tradeoff_scheme,
-                                                        old_EDX,
-                                                        old_EDY)
+                                                         old_EDX,
+                                                         old_EDY)
             print(f"\nweightExploration={explore_w:0.3f} "
                   f"weightExploitation={exploit_w:0.3f}")
 
             # Generate candidate samples from Exploration class
             nMeasurement = old_EDY[OutputNames[0]].shape[1]
-            
-           # print(UtilMethod)
-            
+
+            # print(UtilMethod)
+
             # Find sensitive region
             if UtilMethod == 'LOOCV':
                 LCerror = self.MetaModel.LCerror
@@ -1575,12 +1608,12 @@ class Engine():
                             LCerror[y_key][key])
 
                 ExploitScore = np.max(np.max(allModifiedLOO, axis=1), axis=1)
-               # print(allModifiedLOO.shape)
+            # print(allModifiedLOO.shape)
 
             elif UtilMethod in ['EIGF', 'ALM']:
                 # ----- All other in  ['EIGF', 'ALM'] -----
                 # Initilize the ExploitScore array
-                ExploitScore = np.zeros((len(old_EDX), len(OutputNames)))
+                # ExploitScore = np.zeros((len(old_EDX), len(OutputNames)))
 
                 # Split the candidates in groups for multiprocessing
                 if explore_method != 'Voronoi':
@@ -1630,9 +1663,9 @@ class Engine():
             # Normalize U_J_d
             ExploitScore = ExploitScore / np.sum(ExploitScore)
             totalScore = exploit_w * ExploitScore
-           # print(totalScore.shape)
-           # print(explore_w)
-           # print(scoreExploration.shape)
+            # print(totalScore.shape)
+            # print(explore_w)
+            # print(scoreExploration.shape)
             totalScore += explore_w * scoreExploration
 
             temp = totalScore.copy()
@@ -1654,7 +1687,7 @@ class Engine():
                     # select the requested number of samples
                     Xnew[i] = newSamples[np.argmax(maxminScore)]
 
-        elif exploit_method == 'alphabetic':
+        elif exploit_method.lower() == 'alphabetic':
             # ------- EXPLOITATION: ALPHABETIC -------
             Xnew = self.util_AlphOptDesign(allCandidates, var)
 
@@ -1676,7 +1709,7 @@ class Engine():
             raise NameError('The requested design method is not available.')
 
         print("\n")
-        print("\nRun No. {}:".format(old_EDX.shape[0]+1))
+        print("\nRun No. {}:".format(old_EDX.shape[0] + 1))
         print("Xnew:\n", Xnew)
 
         # TODO: why does it also return None?
@@ -1694,7 +1727,7 @@ class Engine():
 
         Arguments
         ---------
-        NCandidate : int
+        candidates : int?
             Number of candidate points to be searched
 
         var : string
@@ -1705,7 +1738,7 @@ class Engine():
         X_new : array of shape (1, n_params)
             The new sampling location in the input space.
         """
-        MetaModelOrig = self # TODO: this doesn't fully seem correct?
+        MetaModelOrig = self  # TODO: this doesn't fully seem correct?
         n_new_samples = MetaModelOrig.ExpDesign.n_new_samples
         NCandidate = candidates.shape[0]
 
@@ -1713,7 +1746,7 @@ class Engine():
         OutputName = self.out_names[0]
 
         # To avoid changes ub original aPCE object
-        MetaModel = deepcopy(MetaModelOrig)
+        # MetaModel = deepcopy(MetaModelOrig)
 
         # Old Experimental design
         oldExpDesignX = self.ExpDesign.X
@@ -1722,19 +1755,14 @@ class Engine():
         # Suggestion: Go for the one with the highest LOO error
         # TODO: this is just a patch, need to look at again!
         Scores = list(self.MetaModel.score_dict['b_1'][OutputName].values())
-        #print(Scores)
-        #print(self.MetaModel.score_dict)
-        #print(self.MetaModel.score_dict.values())
-        #print(self.MetaModel.score_dict['b_1'].values())
-        #print(self.MetaModel.score_dict['b_1'][OutputName].values())
-        ModifiedLOO = [1-score for score in Scores]
+        ModifiedLOO = [1 - score for score in Scores]
         outIdx = np.argmax(ModifiedLOO)
 
         # Initialize Phi to save the criterion's values
-        Phi = np.zeros((NCandidate))
+        Phi = np.zeros(NCandidate)
 
         # TODO: also patched here
-        BasisIndices = self.MetaModel.basis_dict['b_1'][OutputName]["y_"+str(outIdx+1)]
+        BasisIndices = self.MetaModel.basis_dict['b_1'][OutputName]["y_" + str(outIdx + 1)]
         P = len(BasisIndices)
 
         # ------ Old Psi ------------
@@ -1753,10 +1781,9 @@ class Engine():
 
             # Information matrix
             PsiTPsi = np.dot(Psi_cand.T, Psi_cand)
-            M = PsiTPsi / (len(oldExpDesignX)+1)
+            M = PsiTPsi / (len(oldExpDesignX) + 1)
 
-            if np.linalg.cond(PsiTPsi) > 1e-12 \
-               and np.linalg.cond(PsiTPsi) < 1 / sys.float_info.epsilon:
+            if 1e-12 < np.linalg.cond(PsiTPsi) < 1 / sys.float_info.epsilon:
                 # faster
                 invM = linalg.solve(M, sparse.eye(PsiTPsi.shape[0]).toarray())
             else:
@@ -1768,7 +1795,7 @@ class Engine():
 
             # D-Opt
             if var.lower() == 'd-opt':
-                Phi[idx] = (np.linalg.det(invM)) ** (1/P)
+                Phi[idx] = (np.linalg.det(invM)) ** (1 / P)
 
             # A-Opt
             elif var.lower() == 'a-opt':
@@ -1779,9 +1806,9 @@ class Engine():
                 Phi[idx] = np.linalg.cond(M)
 
             else:
-               # print(var.lower())
+                # print(var.lower())
                 raise Exception('The optimality criterion you requested has '
-                      'not been implemented yet!')
+                                'not been implemented yet!')
 
         # find an optimal point subset to add to the initial design
         # by minimization of the Phi
@@ -1794,7 +1821,7 @@ class Engine():
 
     # -------------------------------------------------------------------------
     def _normpdf(self, y_hat_pce, std_pce, obs_data, total_sigma2s,
-                  rmse=None):
+                 rmse=None):
         """
         Calculated gaussian likelihood for given y+std based on given obs+sigma
         # TODO: is this understanding correct?
@@ -1826,7 +1853,7 @@ class Engine():
         for idx, out in enumerate(self.out_names):
 
             # (Meta)Model Output
-           # print(y_hat_pce[out])
+            # print(y_hat_pce[out])
             nsamples, nout = y_hat_pce[out].shape
 
             # Prepare data and remove NaN
@@ -1841,9 +1868,9 @@ class Engine():
 
             # Surrogate error if valid dataset is given.
             if rmse is not None:
-                tot_sigma2s += rmse[out]**2
+                tot_sigma2s += rmse[out] ** 2
             else:
-                tot_sigma2s += np.mean(std_pce[out])**2
+                tot_sigma2s += np.mean(std_pce[out]) ** 2
 
             likelihoods *= stats.multivariate_normal.pdf(
                 y_hat_pce[out], data, np.diag(tot_sigma2s),
@@ -1870,8 +1897,8 @@ class Engine():
         # TODO: Evaluate MetaModel on the experimental design and ValidSet
         OutputRS, stdOutputRS = MetaModel.eval_metamodel(samples=samples)
 
-        logLik_data = np.zeros((n_samples))
-        logLik_model = np.zeros((n_samples))
+        logLik_data = np.zeros(n_samples)
+        logLik_model = np.zeros(n_samples)
         # Loop over the outputs
         for idx, out in enumerate(output_names):
 
@@ -1892,7 +1919,6 @@ class Engine():
             covMatrix_data = np.diag(tot_sigma2s)
 
             for i, sample in enumerate(samples):
-
                 # Simulation run
                 y_m = model_outputs[out][i]
 
@@ -1901,22 +1927,22 @@ class Engine():
 
                 # CovMatrix with the surrogate error
                 # covMatrix = np.diag(stdOutputRS[out][i]**2)
-                covMatrix = np.diag((y_m-y_m_hat)**2)
+                # covMatrix = np.diag((y_m - y_m_hat) ** 2)
                 covMatrix = np.diag(
-                    np.mean((model_outputs[out]-OutputRS[out]), axis=0)**2
-                    )
+                    np.mean((model_outputs[out] - OutputRS[out]), axis=0) ** 2
+                )
 
                 # Compute likelilhood output vs data
                 logLik_data[i] += logpdf(
                     y_m_hat, data, covMatrix_data
-                    )
+                )
 
                 # Compute likelilhood output vs surrogate
                 logLik_model[i] += logpdf(y_m_hat, y_m, covMatrix)
 
         # Weight
         logLik_data -= logBME
-        weights = np.exp(logLik_model+logLik_data)
+        weights = np.exp(logLik_model + logLik_data)
 
         return np.log(np.mean(weights))
 
@@ -1942,7 +1968,7 @@ class Engine():
         """
 
         # Initialization
-        newpath = (r'Outputs_SeqPosteriorComparison/posterior')
+        newpath = r'Outputs_SeqPosteriorComparison/posterior'
         os.makedirs(newpath, exist_ok=True)
 
         bound_tuples = self.ExpDesign.bound_tuples
@@ -1984,7 +2010,6 @@ class Engine():
 
         return figPosterior
 
-    
     # -------------------------------------------------------------------------
     def _BME_Calculator(self, obs_data, sigma2Dict, rmse=None):
         """
@@ -2006,7 +2031,8 @@ class Engine():
         
         """
         # Initializations
-        if hasattr(self, 'valid_likelihoods'):
+        # TODO: this just does not make sense, recheck from old commits
+        if self.valid_likelihoods is not None:
             valid_likelihoods = self.valid_likelihoods
         else:
             valid_likelihoods = []
@@ -2014,7 +2040,7 @@ class Engine():
 
         post_snapshot = self.ExpDesign.post_snapshot
         if post_snapshot or valid_likelihoods.shape[0] != 0:
-            newpath = (r'Outputs_SeqPosteriorComparison/likelihood_vs_ref')
+            newpath = r'Outputs_SeqPosteriorComparison/likelihood_vs_ref'
             os.makedirs(newpath, exist_ok=True)
 
         SamplingMethod = 'random'
@@ -2027,7 +2053,7 @@ class Engine():
             # Generate samples for Monte Carlo simulation
             X_MC = self.ExpDesign.generate_samples(
                 MCsize, SamplingMethod
-                )
+            )
 
             # Monte Carlo simulation for the candidate design
             Y_MC, std_MC = self.MetaModel.eval_metamodel(samples=X_MC)
@@ -2036,10 +2062,10 @@ class Engine():
             # simulation results via PCE with candidate design)
             Likelihoods = self._normpdf(
                 Y_MC, std_MC, obs_data, sigma2Dict, rmse
-                )
+            )
 
             # Check the Effective Sample Size (1000<ESS<MCsize)
-            ESS = 1 / np.sum(np.square(Likelihoods/np.sum(Likelihoods)))
+            ESS = 1 / np.sum(np.square(Likelihoods / np.sum(Likelihoods)))
 
             # Enlarge sample size if it doesn't fulfill the criteria
             if (ESS > MCsize) or (ESS < 1):
@@ -2052,7 +2078,7 @@ class Engine():
         unif = np.random.rand(1, MCsize)[0]
 
         # Reject the poorly performed prior
-        accepted = (Likelihoods/np.max(Likelihoods)) >= unif
+        accepted = (Likelihoods / np.max(Likelihoods)) >= unif
         X_Posterior = X_MC[accepted]
 
         # ------------------------------------------------------------
@@ -2068,16 +2094,17 @@ class Engine():
         postExpLikelihoods = np.mean(np.log(Likelihoods[accepted]))
 
         # Posterior-based expectation of prior densities
-        postExpPrior = np.mean(
-            np.log(self.ExpDesign.JDist.pdf(X_Posterior.T))
-            )
+        # TODO: this is commented out, as it is not used again
+        # postExpPrior = np.mean(
+        #     np.log(self.ExpDesign.JDist.pdf(X_Posterior.T))
+        # )
 
         # Calculate Kullback-Leibler Divergence
         # KLD = np.mean(np.log(Likelihoods[Likelihoods!=0])- logBME)
         KLD = postExpLikelihoods - logBME
 
         # Information Entropy based on Entropy paper Eq. 38
-        infEntropy = logBME - postExpPrior - postExpLikelihoods
+        # infEntropy = logBME - postExpPrior - postExpLikelihoods
 
         # If post_snapshot is True, plot likelihood vs refrence
         if post_snapshot or valid_likelihoods:
@@ -2086,10 +2113,10 @@ class Engine():
             ref_like = np.log(valid_likelihoods[(valid_likelihoods > 0)])
             est_like = np.log(Likelihoods[Likelihoods > 0])
             distHellinger = hellinger_distance(ref_like, est_like)
-            
+
             idx = len([name for name in os.listdir(newpath) if 'Likelihoods_'
                        in name and os.path.isfile(os.path.join(newpath, name))])
-            
+
             fig, ax = plt.subplots()
             try:
                 sns.kdeplot(np.log(valid_likelihoods[valid_likelihoods > 0]),
@@ -2130,7 +2157,7 @@ class Engine():
                 'n_walkers': 30,
                 'moves': emcee.moves.KDEMove(),
                 'verbose': False
-                }
+            }
 
             # ----- Define the discrepancy model -------
             # TODO: check with Farid if this first line is how it should be
@@ -2142,13 +2169,13 @@ class Engine():
             # # -- (Option B) --
             DiscrepancyOpts = Discrepancy('')
             DiscrepancyOpts.type = 'Gaussian'
-            DiscrepancyOpts.parameters = obs_data**2
+            DiscrepancyOpts.parameters = obs_data ** 2
             BayesOpts.Discrepancy = DiscrepancyOpts
             # Start the calibration/inference
             Bayes_PCE = BayesOpts.create_inference()
             X_Posterior = Bayes_PCE.posterior_df.values
 
-        return (logBME, KLD, X_Posterior, Likelihoods, distHellinger)
+        return logBME, KLD, X_Posterior, Likelihoods, distHellinger
 
     # -------------------------------------------------------------------------
     def _validError(self):
@@ -2180,14 +2207,14 @@ class Engine():
                 sample_weight=None,
                 squared=False)
             # Validation error
-            valid_error[key] = (rms_error[key]**2)
+            valid_error[key] = (rms_error[key] ** 2)
             valid_error[key] /= np.var(valid_model_runs[key], ddof=1, axis=0)
 
             # Print a report table
             print("\n>>>>> Updated Errors of {} <<<<<".format(key))
             print("\nIndex  |  RMSE   |  Validation Error")
-            print('-'*35)
-            print('\n'.join(f'{i+1}  |  {k:.3e}  |  {j:.3e}' for i, (k, j)
+            print('-' * 35)
+            print('\n'.join(f'{i + 1}  |  {k:.3e}  |  {j:.3e}' for i, (k, j)
                             in enumerate(zip(rms_error[key],
                                              valid_error[key]))))
 
@@ -2213,13 +2240,12 @@ class Engine():
 
         # Compute the root mean squared error
         for output in self.out_names:
-
             # Compute the error between mean and std of MetaModel and OrigModel
             RMSE_Mean = mean_squared_error(
                 self.Model.mc_reference['mean'], pce_means[output], squared=False
-                )
+            )
             RMSE_std = mean_squared_error(
                 self.Model.mc_reference['std'], pce_stds[output], squared=False
-                )
+            )
 
         return RMSE_Mean, RMSE_std
diff --git a/src/bayesvalidrox/surrogate_models/exp_designs.py b/src/bayesvalidrox/surrogate_models/exp_designs.py
index fa03fe17d96fb2c1f19546b7b72fb2fd6dd1c13a..ce1745903c776049c797e5585b60d45463d93325 100644
--- a/src/bayesvalidrox/surrogate_models/exp_designs.py
+++ b/src/bayesvalidrox/surrogate_models/exp_designs.py
@@ -4,18 +4,17 @@
 Experimental design with associated sampling methods
 """
 
-import numpy as np
-import math
 import itertools
+import math
+
 import chaospy
-import scipy.stats as st
-from tqdm import tqdm
 import h5py
-import os
+import numpy as np
 
 from .apoly_construction import apoly_construction
 from .input_space import InputSpace
 
+
 # -------------------------------------------------------------------------
 def check_ranges(theta, ranges):
     """
@@ -26,12 +25,12 @@ def check_ranges(theta, ranges):
     theta : array
         Proposed parameter set.
     ranges : nested list
-        List of the praremeter ranges.
+        The parameter ranges.
 
     Returns
     -------
     c : bool
-        If it lies in the given range, it return True else False.
+        If it lies in the given range, it returns True else False.
 
     """
     c = True
@@ -52,7 +51,7 @@ class ExpDesigns(InputSpace):
 
     Attributes
     ----------
-    Input : obj
+    input_object : obj
         Input object containing the parameter marginals, i.e. name,
         distribution type and distribution parameters or available raw data.
     meta_Model_type : str
@@ -143,15 +142,17 @@ class ExpDesigns(InputSpace):
     - K-Opt (K-Optimality)
     """
 
-    def __init__(self, Input, meta_Model_type='pce',
-                 sampling_method='random', hdf5_file=None,
-                 n_new_samples=1, n_max_samples=None, mod_LOO_threshold=1e-16,
-                 tradeoff_scheme=None, n_canddidate=1, explore_method='random',
-                 exploit_method='Space-filling', util_func='Space-filling',
-                 n_cand_groups=4, n_replication=1, post_snapshot=False,
-                 step_snapshot=1, max_a_post=[], adapt_verbose=False, max_func_itr=1):
+    def __init__(self, input_object, meta_Model_type='pce', sampling_method='random', hdf5_file=None,
+                 n_new_samples=1, n_max_samples=None, mod_LOO_threshold=1e-16, tradeoff_scheme=None, n_canddidate=1,
+                 explore_method='random', exploit_method='Space-filling', util_func='Space-filling', n_cand_groups=4,
+                 n_replication=1, post_snapshot=False, step_snapshot=1, max_a_post=None, adapt_verbose=False,
+                 max_func_itr=1):
 
-        self.InputObj = Input
+        super().__init__(input_object, meta_Model_type)
+        if max_a_post is None:
+            max_a_post = []
+
+        self.InputObj = input_object
         self.meta_Model_type = meta_Model_type
         self.sampling_method = sampling_method
         self.hdf5_file = hdf5_file
@@ -170,17 +171,20 @@ class ExpDesigns(InputSpace):
         self.max_a_post = max_a_post
         self.adapt_verbose = adapt_verbose
         self.max_func_itr = max_func_itr
-        
+
         # Other 
         self.apce = None
+        self.n_init_samples = None
+        self.n_samples = None
         self.ndim = None
-        
+        self.X = None
+        self.Y = None
+
         # Init 
         self.check_valid_inputs()
-        
+
     # -------------------------------------------------------------------------
-    def generate_samples(self, n_samples, sampling_method='random',
-                         transform=False):
+    def generate_samples(self, n_samples, sampling_method='random'):
         """
         Generates samples with given sampling method
 
@@ -190,9 +194,6 @@ class ExpDesigns(InputSpace):
             Number of requested samples.
         sampling_method : str, optional
             Sampling method. The default is `'random'`.
-        transform : bool, optional
-            Transformation via an isoprobabilistic transformation method. The
-            default is `False`.
 
         Returns
         -------
@@ -203,17 +204,14 @@ class ExpDesigns(InputSpace):
         try:
             samples = chaospy.generate_samples(
                 int(n_samples), domain=self.origJDist, rule=sampling_method
-                )
+            )
         except:
             samples = self.random_sampler(int(n_samples)).T
 
         return samples.T
 
-
-            
     # -------------------------------------------------------------------------
-    def generate_ED(self, n_samples, transform=False,
-                    max_pce_deg=None):
+    def generate_ED(self, n_samples, max_pce_deg=None):
         """
         Generates experimental designs (training set) with the given method.
 
@@ -221,10 +219,6 @@ class ExpDesigns(InputSpace):
         ----------
         n_samples : int
             Number of requested training points.
-        sampling_method : str, optional
-            Sampling method. The default is `'random'`.
-        transform : bool, optional
-            Isoprobabilistic transformation. The default is `False`.
         max_pce_deg : int, optional
             Maximum PCE polynomial degree. The default is `None`.
             
@@ -233,21 +227,23 @@ class ExpDesigns(InputSpace):
         None
 
         """
-        if n_samples <0:
+        if n_samples < 0:
             raise ValueError('A negative number of samples cannot be created. Please provide positive n_samples')
         n_samples = int(n_samples)
-        
-        if not hasattr(self, 'n_init_samples'):
+
+        if self.n_init_samples is None:
             self.n_init_samples = n_samples
 
         # Generate the samples based on requested method
         self.init_param_space(max_pce_deg)
 
+        samples = None
         sampling_method = self.sampling_method
         # Pass user-defined samples as ED
         if sampling_method == 'user':
-            if not hasattr(self, 'X'):
-                raise AttributeError('User-defined sampling cannot proceed as no samples provided. Please add them to this class as attribute X')
+            if self.X is None:
+                raise AttributeError('User-defined sampling cannot proceed as no samples provided. Please add them to '
+                                     'this class as attribute X')
             if not self.X.ndim == 2:
                 raise AttributeError('The provided samples shuld have 2 dimensions')
             samples = self.X
@@ -279,7 +275,7 @@ class ExpDesigns(InputSpace):
                                                rule=sampling_method).T
 
         self.X = samples
-            
+
     def read_from_file(self, out_names):
         """
         Reads in the ExpDesign from a provided h5py file and saves the results.
@@ -294,7 +290,7 @@ class ExpDesigns(InputSpace):
         None.
 
         """
-        if self.hdf5_file == None:
+        if self.hdf5_file is None:
             raise AttributeError('ExpDesign cannot be read in, please provide hdf5 file first')
 
         # Read hdf5 file
@@ -331,11 +327,9 @@ class ExpDesigns(InputSpace):
         f.close()
         print(f'Experimental Design is read in from file {self.hdf5_file}')
         print('')
-        
-    
 
     # -------------------------------------------------------------------------
-    def random_sampler(self, n_samples, max_deg = None):
+    def random_sampler(self, n_samples, max_deg=None):
         """
         Samples the given raw data randomly.
 
@@ -355,10 +349,10 @@ class ExpDesigns(InputSpace):
             The sampling locations in the input space.
 
         """
-        if not hasattr(self, 'raw_data'):
+        if self.raw_data is None:
             self.init_param_space(max_deg)
         else:
-            if np.array(self.raw_data).ndim !=2:
+            if np.array(self.raw_data).ndim != 2:
                 raise AttributeError('The given raw data for sampling should have two dimensions')
         samples = np.zeros((n_samples, self.ndim))
         sample_size = self.raw_data.shape[1]
@@ -371,10 +365,18 @@ class ExpDesigns(InputSpace):
                 # store the raw data with given random indices
                 samples[:, pa_idx] = self.raw_data[pa_idx, rand_idx]
         else:
+            if self.JDist is None:
+                raise AttributeError('Sampling cannot proceed, build ExpDesign with max_deg != 0 to create JDist!')
             try:
+                # Use resample if JDist is of type gaussian_kde
                 samples = self.JDist.resample(int(n_samples)).T
             except AttributeError:
+                # Use sample if JDist is of type chaospy.J
                 samples = self.JDist.sample(int(n_samples)).T
+            # If there is only one input transform the samples
+            if self.ndim == 1:
+                samples = np.swapaxes(np.atleast_2d(samples), 0, 1)
+
             # Check if all samples are in the bound_tuples
             for idx, param_set in enumerate(samples):
                 if not check_ranges(param_set, self.bound_tuples):
@@ -384,7 +386,7 @@ class ExpDesigns(InputSpace):
                     except:
                         proposed_sample = self.JDist.resample(1).T[0]
                     while not check_ranges(proposed_sample,
-                                                 self.bound_tuples):
+                                           self.bound_tuples):
                         try:
                             proposed_sample = chaospy.generate_samples(
                                 1, domain=self.JDist, rule='random').T[0]
@@ -414,49 +416,67 @@ class ExpDesigns(InputSpace):
             Collocation points.
 
         """
-        
-        if not hasattr(self, 'raw_data'):
+
+        if self.raw_data is None:
             self.init_param_space(max_deg)
 
         raw_data = self.raw_data
 
         # Guess the closest degree to self.n_samples
         def M_uptoMax(deg):
+            """
+            ??
+            Parameters
+            ----------
+            deg : int
+                Degree.
+
+            Returns
+            -------
+            list of ..?
+            """
             result = []
-            for d in range(1, deg+1):
-                result.append(math.factorial(self.ndim+d) //
+            for d in range(1, deg + 1):
+                result.append(math.factorial(self.ndim + d) //
                               (math.factorial(self.ndim) * math.factorial(d)))
             return np.array(result)
-        #print(M_uptoMax(max_deg))
-        #print(np.where(M_uptoMax(max_deg) > n_samples)[0])
 
         guess_Deg = np.where(M_uptoMax(max_deg) > n_samples)[0][0]
 
-        c_points = np.zeros((guess_Deg+1, self.ndim))
+        c_points = np.zeros((guess_Deg + 1, self.ndim))
 
         def PolynomialPa(parIdx):
+            """
+            ???
+            Parameters
+            ----------
+            parIdx
+
+            Returns
+            -------
+
+            """
             return apoly_construction(self.raw_data[parIdx], max_deg)
 
         for i in range(self.ndim):
-            poly_coeffs = PolynomialPa(i)[guess_Deg+1][::-1]
+            poly_coeffs = PolynomialPa(i)[guess_Deg + 1][::-1]
             c_points[:, i] = np.trim_zeros(np.roots(poly_coeffs))
 
         #  Construction of optimal integration points
-        Prod = itertools.product(np.arange(1, guess_Deg+2), repeat=self.ndim)
+        Prod = itertools.product(np.arange(1, guess_Deg + 2), repeat=self.ndim)
         sort_dig_unique_combos = np.array(list(filter(lambda x: x, Prod)))
 
         # Ranking relatively mean
-        Temp = np.empty(shape=[0, guess_Deg+1])
+        Temp = np.empty(shape=[0, guess_Deg + 1])
         for j in range(self.ndim):
-            s = abs(c_points[:, j]-np.mean(raw_data[j]))
+            s = abs(c_points[:, j] - np.mean(raw_data[j]))
             Temp = np.append(Temp, [s], axis=0)
         temp = Temp.T
 
         index_CP = np.sort(temp, axis=0)
-        sort_cpoints = np.empty((0, guess_Deg+1))
+        sort_cpoints = np.empty((0, guess_Deg + 1))
 
         for j in range(self.ndim):
-            #print(index_CP[:, j])
             sort_cp = c_points[index_CP[:, j], j]
             sort_cpoints = np.vstack((sort_cpoints, sort_cp))
 
@@ -464,8 +484,9 @@ class ExpDesigns(InputSpace):
         sort_unique_combos = np.empty(shape=[0, self.ndim])
         for i in range(len(sort_dig_unique_combos)):
             sort_un_comb = []
+            sort_uni_comb = None
             for j in range(self.ndim):
-                SortUC = sort_cpoints[j, sort_dig_unique_combos[i, j]-1]
+                SortUC = sort_cpoints[j, sort_dig_unique_combos[i, j] - 1]
                 sort_un_comb.append(SortUC)
                 sort_uni_comb = np.asarray(sort_un_comb)
             sort_unique_combos = np.vstack((sort_unique_combos, sort_uni_comb))
diff --git a/src/bayesvalidrox/surrogate_models/exploration.py b/src/bayesvalidrox/surrogate_models/exploration.py
index 6abb652f145fadb410ecf8f987142e8ceb544a41..67decae2bfef6d397bafbc10abdcb35ff77a63a3 100644
--- a/src/bayesvalidrox/surrogate_models/exploration.py
+++ b/src/bayesvalidrox/surrogate_models/exploration.py
@@ -33,6 +33,7 @@ class Exploration:
 
     def __init__(self, ExpDesign, n_candidate,
                  mc_criterion='mc-intersite-proj-th'):
+        self.closestPoints = None
         self.ExpDesign = ExpDesign
         self.n_candidate = n_candidate
         self.mc_criterion = mc_criterion
diff --git a/src/bayesvalidrox/surrogate_models/input_space.py b/src/bayesvalidrox/surrogate_models/input_space.py
index 4e010d66f2933ec243bad756d8f2c5454808d802..c534d34b22cbdc9421b5b59c91054032577df4c6 100644
--- a/src/bayesvalidrox/surrogate_models/input_space.py
+++ b/src/bayesvalidrox/surrogate_models/input_space.py
@@ -9,6 +9,7 @@ import chaospy
 import scipy.stats as st
 
 
+# noinspection SpellCheckingInspection
 class InputSpace:
     """
     This class generates the input space for the metamodel from the
@@ -24,24 +25,32 @@ class InputSpace:
 
     """
 
-    def __init__(self, Input, meta_Model_type='pce'):
-        self.InputObj = Input
+    def __init__(self, input_object, meta_Model_type='pce'):
+        self.InputObj = input_object
         self.meta_Model_type = meta_Model_type
-        
+
         # Other 
         self.apce = None
+        self.bound_tuples = None
+        self.input_data_given = None
+        self.JDist = None
+        self.MCSize = None
         self.ndim = None
-        
+        self.origJDist = None
+        self.par_names = None
+        self.poly_types = None
+        self.prior_space = None
+        self.raw_data = None
+
         # Init 
         self.check_valid_inputs()
-        
-        
-    def check_valid_inputs(self)-> None:
+
+    def check_valid_inputs(self) -> None:
         """
         Check if the given InputObj is valid to use for further calculations:
-            Has some Marginals
-            Marginals have valid priors
-            All Marginals given as the same type (samples vs dist)
+        1) Has some Marginals
+        2) The Marginals have valid priors
+        3) All Marginals given as the same type (samples vs dist)
 
         Returns
         -------
@@ -50,7 +59,7 @@ class InputSpace:
         """
         Inputs = self.InputObj
         self.ndim = len(Inputs.Marginals)
-        
+
         # Check if PCE or aPCE metamodel is selected.
         # TODO: test also for 'pce'??
         if self.meta_Model_type.lower() == 'apce':
@@ -59,26 +68,22 @@ class InputSpace:
             self.apce = False
 
         # check if marginals given 
-        if not self.ndim >=1:
+        if not self.ndim >= 1:
             raise AssertionError('Cannot build distributions if no marginals are given')
-            
+
         # check that each marginal is valid
         for marginals in Inputs.Marginals:
             if len(marginals.input_data) == 0:
-                if marginals.dist_type == None:
+                if marginals.dist_type is None:
                     raise AssertionError('Not all marginals were provided priors')
-                    break
-            if np.array(marginals.input_data).shape[0] and (marginals.dist_type != None):
+            if np.array(marginals.input_data).shape[0] and (marginals.dist_type is not None):
                 raise AssertionError('Both samples and distribution type are given. Please choose only one.')
-                break
-                
+
         # Check if input is given as dist or input_data.
         self.input_data_given = -1
         for marg in Inputs.Marginals:
-            #print(self.input_data_given)
             size = np.array(marg.input_data).shape[0]
-            #print(f'Size: {size}')
-            if size and abs(self.input_data_given) !=1:
+            if size and abs(self.input_data_given) != 1:
                 self.input_data_given = 2
                 break
             if (not size) and self.input_data_given > 0:
@@ -88,11 +93,10 @@ class InputSpace:
                 self.input_data_given = 0
             if size:
                 self.input_data_given = 1
-                
+
         if self.input_data_given == 2:
             raise AssertionError('Distributions cannot be built as the priors have different types')
-            
-    
+
         # Get the bounds if input_data are directly defined by user:
         if self.input_data_given:
             for i in range(self.ndim):
@@ -100,8 +104,6 @@ class InputSpace:
                 up_bound = np.max(Inputs.Marginals[i].input_data)
                 Inputs.Marginals[i].parameters = [low_bound, up_bound]
 
-  
-
     # -------------------------------------------------------------------------
     def init_param_space(self, max_deg=None):
         """
@@ -112,7 +114,7 @@ class InputSpace:
         max_deg : int, optional
             Maximum degree. The default is `None`.
 
-        Creates
+        Returns
         -------
         raw_data : array of shape (n_params, n_samples)
             Raw data.
@@ -122,7 +124,7 @@ class InputSpace:
         """
         # Recheck all before running!
         self.check_valid_inputs()
-        
+
         Inputs = self.InputObj
         ndim = self.ndim
         rosenblatt_flag = Inputs.Rosenblatt
@@ -192,16 +194,17 @@ class InputSpace:
         orig_space_dist : object
             A chaospy JDist object or a gaussian_kde object.
         poly_types : list
-            List of polynomial types for the parameters.
+            A list of polynomial types for the parameters.
 
         """
         Inputs = self.InputObj
-        
+
         all_data = []
         all_dist_types = []
         orig_joints = []
         poly_types = []
-        
+        params = None
+
         for parIdx in range(self.ndim):
 
             if Inputs.Marginals[parIdx].dist_type is None:
@@ -222,27 +225,27 @@ class InputSpace:
 
             elif 'unif' in dist_type.lower():
                 polytype = 'legendre'
-                if not np.array(params).shape[0]>=2:
+                if not np.array(params).shape[0] >= 2:
                     raise AssertionError('Distribution has too few parameters!')
                 dist = chaospy.Uniform(lower=params[0], upper=params[1])
 
             elif 'norm' in dist_type.lower() and \
-                 'log' not in dist_type.lower():
-                if not np.array(params).shape[0]>=2:
+                    'log' not in dist_type.lower():
+                if not np.array(params).shape[0] >= 2:
                     raise AssertionError('Distribution has too few parameters!')
                 polytype = 'hermite'
                 dist = chaospy.Normal(mu=params[0], sigma=params[1])
 
             elif 'gamma' in dist_type.lower():
                 polytype = 'laguerre'
-                if not np.array(params).shape[0]>=3:
+                if not np.array(params).shape[0] >= 3:
                     raise AssertionError('Distribution has too few parameters!')
                 dist = chaospy.Gamma(shape=params[0],
                                      scale=params[1],
                                      shift=params[2])
 
             elif 'beta' in dist_type.lower():
-                if not np.array(params).shape[0]>=4:
+                if not np.array(params).shape[0] >= 4:
                     raise AssertionError('Distribution has too few parameters!')
                 polytype = 'jacobi'
                 dist = chaospy.Beta(alpha=params[0], beta=params[1],
@@ -250,29 +253,29 @@ class InputSpace:
 
             elif 'lognorm' in dist_type.lower():
                 polytype = 'hermite'
-                if not np.array(params).shape[0]>=2:
+                if not np.array(params).shape[0] >= 2:
                     raise AssertionError('Distribution has too few parameters!')
-                mu = np.log(params[0]**2/np.sqrt(params[0]**2 + params[1]**2))
-                sigma = np.sqrt(np.log(1 + params[1]**2 / params[0]**2))
+                mu = np.log(params[0] ** 2 / np.sqrt(params[0] ** 2 + params[1] ** 2))
+                sigma = np.sqrt(np.log(1 + params[1] ** 2 / params[0] ** 2))
                 dist = chaospy.LogNormal(mu, sigma)
                 # dist = chaospy.LogNormal(mu=params[0], sigma=params[1])
 
             elif 'expon' in dist_type.lower():
                 polytype = 'exponential'
-                if not np.array(params).shape[0]>=2:
+                if not np.array(params).shape[0] >= 2:
                     raise AssertionError('Distribution has too few parameters!')
                 dist = chaospy.Exponential(scale=params[0], shift=params[1])
 
             elif 'weibull' in dist_type.lower():
                 polytype = 'weibull'
-                if not np.array(params).shape[0]>=3:
+                if not np.array(params).shape[0] >= 3:
                     raise AssertionError('Distribution has too few parameters!')
                 dist = chaospy.Weibull(shape=params[0], scale=params[1],
                                        shift=params[2])
 
             else:
                 message = (f"DistType {dist_type} for parameter"
-                           f"{parIdx+1} is not available.")
+                           f"{parIdx + 1} is not available.")
                 raise ValueError(message)
 
             if self.input_data_given or self.apce:
@@ -311,6 +314,8 @@ class InputSpace:
         ----------
         X : array of shape (n_samples,n_params)
             Samples to be transformed.
+        params : list
+            Parameters for laguerre/gamma-type distribution.
         method : string
             If transformation method is 'user' transform X, else just pass X.
 
@@ -321,17 +326,18 @@ class InputSpace:
 
         """
         # Check for built JDist
-        if not hasattr(self, 'JDist'):
+        if self.JDist is None:
             raise AttributeError('Call function init_param_space first to create JDist')
-            
+
         # Check if X is 2d
         if X.ndim != 2:
             raise AttributeError('X should have two dimensions')
-            
+
         # Check if size of X matches Marginals
-        if X.shape[1]!= self.ndim:
-            raise AttributeError('The second dimension of X should be the same size as the number of marginals in the InputObj')
-        
+        if X.shape[1] != self.ndim:
+            raise AttributeError(
+                'The second dimension of X should be the same size as the number of marginals in the InputObj')
+
         if self.InputObj.Rosenblatt:
             self.origJDist, _ = self.build_polytypes(False)
             if method == 'user':
@@ -354,8 +360,8 @@ class InputSpace:
             if None in disttypes or self.input_data_given or self.apce:
                 return X
 
-            cdfx = np.zeros((X.shape))
-            tr_X = np.zeros((X.shape))
+            cdfx = np.zeros(X.shape)
+            tr_X = np.zeros(X.shape)
 
             for par_i in range(n_params):
 
@@ -370,11 +376,12 @@ class InputSpace:
 
                 # Extract the parameters of the transformation space based on
                 # polyType
+                inv_cdf = None
                 if polytype == 'legendre' or disttype == 'uniform':
                     # Generate Y_Dists based
                     params_Y = [-1, 1]
                     dist_Y = st.uniform(loc=params_Y[0],
-                                        scale=params_Y[1]-params_Y[0])
+                                        scale=params_Y[1] - params_Y[0])
                     inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
 
                 elif polytype == 'hermite' or disttype == 'norm':
@@ -383,9 +390,11 @@ class InputSpace:
                     inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
 
                 elif polytype == 'laguerre' or disttype == 'gamma':
-                    if params == None:
+                    if params is None:
                         raise AttributeError('Additional parameters have to be set for the gamma distribution!')
                     params_Y = [1, params[1]]
+
+                    # TOOD: update the call to the gamma function, seems like source code has been changed!
                     dist_Y = st.gamma(loc=params_Y[0], scale=params_Y[1])
                     inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
 
diff --git a/src/bayesvalidrox/surrogate_models/inputs.py b/src/bayesvalidrox/surrogate_models/inputs.py
index 094e1066fe008e37288e44750524c5a1370bd7a2..40ae36337fa84c0bb9758e488f983ba29cdb9e77 100644
--- a/src/bayesvalidrox/surrogate_models/inputs.py
+++ b/src/bayesvalidrox/surrogate_models/inputs.py
@@ -4,6 +4,7 @@
 Inputs and related marginal distributions
 """
 
+
 class Input:
     """
     A class to define the uncertain input parameters.
@@ -20,17 +21,18 @@ class Input:
     -------
     Marginals can be defined as following:
 
-    >>> Inputs.add_marginals()
-    >>> Inputs.Marginals[0].name = 'X_1'
-    >>> Inputs.Marginals[0].dist_type = 'uniform'
-    >>> Inputs.Marginals[0].parameters = [-5, 5]
+    >>> inputs = Inputs()
+    >>> inputs.add_marginals()
+    >>> inputs.Marginals[0].name = 'X_1'
+    >>> inputs.Marginals[0].dist_type = 'uniform'
+    >>> inputs.Marginals[0].parameters = [-5, 5]
 
     If there is no common data is avaliable, the input data can be given
     as following:
 
-    >>> Inputs.add_marginals()
-    >>> Inputs.Marginals[0].name = 'X_1'
-    >>> Inputs.Marginals[0].input_data = input_data
+    >>> inputs.add_marginals()
+    >>> inputs.Marginals[0].name = 'X_1'
+    >>> inputs.Marginals[0].input_data = [0,0,1,0]
     """
     poly_coeffs_flag = True
 
@@ -63,12 +65,12 @@ class Marginal:
     dist_type : string
         Name of the distribution. The default is `None`.
     parameters : list
-        List of the parameters corresponding to the distribution type. The
+        Parameters corresponding to the distribution type. The
         default is `None`.
     input_data : array
         Available input data. The default is `[]`.
     moments : list
-        List of the moments.
+        Moments of the distribution. The default is `None`.
     """
 
     def __init__(self):
diff --git a/src/bayesvalidrox/surrogate_models/reg_fast_ard.py b/src/bayesvalidrox/surrogate_models/reg_fast_ard.py
index e6883a3edd6d247c219b8be328f5206b75780fbb..fdd0ee7470dc5d0bdccd354cee16f6257ec01b02 100755
--- a/src/bayesvalidrox/surrogate_models/reg_fast_ard.py
+++ b/src/bayesvalidrox/surrogate_models/reg_fast_ard.py
@@ -236,7 +236,7 @@ class RegressionFastARD(LinearModel, RegressorMixin):
             self.var_y = False
 
         A = np.PINF * np.ones(n_features)
-        active = np.zeros(n_features, dtype=np.bool)
+        active = np.zeros(n_features, dtype=bool)
 
         if self.start is not None and not hasattr(self, 'active_'):
             start = self.start
diff --git a/src/bayesvalidrox/surrogate_models/surrogate_models.py b/src/bayesvalidrox/surrogate_models/surrogate_models.py
index ca902f26bef0c45e8befb72ff67313ef09a77603..d8a589dde7973e6c7c86ba6d246e297bf001397a 100644
--- a/src/bayesvalidrox/surrogate_models/surrogate_models.py
+++ b/src/bayesvalidrox/surrogate_models/surrogate_models.py
@@ -4,38 +4,284 @@
 Implementation of metamodel as either PC, aPC or GPE
 """
 
+import copy
+import os
 import warnings
-import numpy as np
-import math
-import h5py
+
 import matplotlib.pyplot as plt
-from sklearn.preprocessing import MinMaxScaler
+import numpy as np
 import scipy as sp
-from scipy.optimize import minimize, NonlinearConstraint, LinearConstraint
-from tqdm import tqdm
-from sklearn.decomposition import PCA as sklearnPCA
-import sklearn.linear_model as lm
-from sklearn.gaussian_process import GaussianProcessRegressor
 import sklearn.gaussian_process.kernels as kernels
-import os
+import sklearn.linear_model as lm
 from joblib import Parallel, delayed
-import copy
+from scipy.optimize import minimize, NonlinearConstraint
+from sklearn.decomposition import PCA as sklearnPCA
+from sklearn.gaussian_process import GaussianProcessRegressor
+from sklearn.preprocessing import MinMaxScaler
+from tqdm import tqdm
 
-from .input_space import InputSpace
-from .glexindex import glexindex
+from .apoly_construction import apoly_construction
+from .bayes_linear import VBLinearRegression, EBLinearRegression
 from .eval_rec_rule import eval_univ_basis
+from .glexindex import glexindex
+from .input_space import InputSpace
+from .orthogonal_matching_pursuit import OrthogonalMatchingPursuit
 from .reg_fast_ard import RegressionFastARD
 from .reg_fast_laplace import RegressionFastLaplace
-from .orthogonal_matching_pursuit import OrthogonalMatchingPursuit
-from .bayes_linear import VBLinearRegression, EBLinearRegression
-from .apoly_construction import apoly_construction
+
 warnings.filterwarnings("ignore")
 # Load the mplstyle
+# noinspection SpellCheckingInspection
 plt.style.use(os.path.join(os.path.split(__file__)[0],
                            '../', 'bayesvalidrox.mplstyle'))
 
 
-class MetaModel():
+# noinspection SpellCheckingInspection
+def corr_loocv_error(clf, psi, coeffs, y):
+    """
+    Calculates the corrected LOO error for regression on regressor
+    matrix `psi` that generated the coefficients based on [1] and [2].
+
+    [1] Blatman, G., 2009. Adaptive sparse polynomial chaos expansions for
+        uncertainty propagation and sensitivity analysis (Doctoral
+        dissertation, Clermont-Ferrand 2).
+
+    [2] Blatman, G. and Sudret, B., 2011. Adaptive sparse polynomial chaos
+        expansion based on least angle regression. Journal of computational
+        Physics, 230(6), pp.2345-2367.
+
+    Parameters
+    ----------
+    clf : object
+        Fitted estimator.
+    psi : array of shape (n_samples, n_features)
+        The multivariate orthogonal polynomials (regressor).
+    coeffs : array-like of shape (n_features,)
+        Estimated cofficients.
+    y : array of shape (n_samples,)
+        Target values.
+
+    Returns
+    -------
+    R_2 : float
+        LOOCV Validation score (1-LOOCV erro).
+    residual : array of shape (n_samples,)
+        Residual values (y - predicted targets).
+
+    """
+    psi = np.array(psi, dtype=float)
+
+    # Create PSI_Sparse by removing redundent terms
+    nnz_idx = np.nonzero(coeffs)[0]
+    if len(nnz_idx) == 0:
+        nnz_idx = [0]
+    psi_sparse = psi[:, nnz_idx]
+
+    # NrCoeffs of aPCEs
+    P = len(nnz_idx)
+    # NrEvaluation (Size of experimental design)
+    N = psi.shape[0]
+
+    # Build the projection matrix
+    PsiTPsi = np.dot(psi_sparse.T, psi_sparse)
+
+    if np.linalg.cond(PsiTPsi) > 1e-12:  # and \
+        # np.linalg.cond(PsiTPsi) < 1/sys.float_info.epsilon:
+        # faster
+        try:
+            M = sp.linalg.solve(PsiTPsi,
+                                sp.sparse.eye(PsiTPsi.shape[0]).toarray())
+        except:
+            raise AttributeError(
+                'There are too few samples for the corrected loo-cv error. Fit surrogate on at least as many '
+                'samples as parameters to use this')
+    else:
+        # stabler
+        M = np.linalg.pinv(PsiTPsi)
+
+    # h factor (the full matrix is not calculated explicitly,
+    # only the trace is, to save memory)
+    PsiM = np.dot(psi_sparse, M)
+
+    h = np.sum(np.multiply(PsiM, psi_sparse), axis=1, dtype=np.longdouble)  # float128)
+
+    # ------ Calculate Error Loocv for each measurement point ----
+    # Residuals
+    try:
+        residual = clf.predict(psi) - y
+    except:
+        residual = np.dot(psi, coeffs) - y
+
+    # Variance
+    var_y = np.var(y)
+
+    if var_y == 0:
+        # norm_emp_error = 0
+        loo_error = 0
+        LCerror = np.zeros(y.shape)
+        return 1 - loo_error, LCerror
+    else:
+        # norm_emp_error = np.mean(residual ** 2) / var_y
+
+        # LCerror = np.divide(residual, (1-h))
+        LCerror = residual / (1 - h)
+        loo_error = np.mean(np.square(LCerror)) / var_y
+        # if there are NaNs, just return an infinite LOO error (this
+        # happens, e.g., when a strongly underdetermined problem is solved)
+        if np.isnan(loo_error):
+            loo_error = np.inf
+
+    # Corrected Error for over-determined system
+    tr_M = np.trace(M)
+    if tr_M < 0 or abs(tr_M) > 1e6:
+        tr_M = np.trace(np.linalg.pinv(np.dot(psi.T, psi)))
+
+    # Over-determined system of Equation
+    if N > P:
+        T_factor = N / (N - P) * (1 + tr_M)
+
+    # Under-determined system of Equation
+    else:
+        T_factor = np.inf
+
+    corrected_loo_error = loo_error * T_factor
+
+    R_2 = 1 - corrected_loo_error
+
+    return R_2, LCerror
+
+
+def create_psi(basis_indices, univ_p_val):
+    """
+    This function assemble the design matrix Psi from the given basis index
+    set INDICES and the univariate polynomial evaluations univ_p_val.
+
+    Parameters
+    ----------
+    basis_indices : array of shape (n_terms, n_params)
+        Multi-indices of multivariate polynomials.
+    univ_p_val : array of (n_samples, n_params, n_max+1)
+        All univariate regressors up to `n_max`.
+
+    Raises
+    ------
+    ValueError
+        n_terms in arguments do not match.
+
+    Returns
+    -------
+    psi : array of shape (n_samples, n_terms)
+        Multivariate regressors.
+
+    """
+    # Check if BasisIndices is a sparse matrix
+    sparsity = sp.sparse.issparse(basis_indices)
+    if sparsity:
+        basis_indices = basis_indices.toarray()
+
+    # Initialization and consistency checks
+    # number of input variables
+    n_params = univ_p_val.shape[1]
+
+    # Size of the experimental design
+    n_samples = univ_p_val.shape[0]
+
+    # number of basis terms
+    n_terms = basis_indices.shape[0]
+
+    # check that the variables have consistent sizes
+    if n_params != basis_indices.shape[1]:
+        raise ValueError(
+            f"The shapes of basis_indices ({basis_indices.shape[1]}) and "
+            f"univ_p_val ({n_params}) don't match!!"
+        )
+
+    # Preallocate the Psi matrix for performance
+    psi = np.ones((n_samples, n_terms))
+    # Assemble the Psi matrix
+    for m in range(basis_indices.shape[1]):
+        aa = np.where(basis_indices[:, m] > 0)[0]
+        try:
+            basisIdx = basis_indices[aa, m]
+            bb = univ_p_val[:, m, basisIdx].reshape(psi[:, aa].shape)
+            psi[:, aa] = np.multiply(psi[:, aa], bb)
+        except ValueError as err:
+            raise err
+    return psi
+
+
+def gaussian_process_emulator(X, y, nug_term=None, autoSelect=False,
+                              varIdx=None):
+    """
+    Fits a Gaussian Process Emulator to the target given the training
+     points.
+
+    Parameters
+    ----------
+    X : array of shape (n_samples, n_params)
+        Training points.
+    y : array of shape (n_samples,)
+        Target values.
+    nug_term : float, optional
+        Nugget term. The default is None, i.e. variance of y.
+    autoSelect : bool, optional
+        Loop over some kernels and select the best. The default is False.
+    varIdx : int, optional
+        The index number. The default is None.
+
+    Returns
+    -------
+    gp : object
+        Fitted estimator.
+
+    """
+
+    nug_term = nug_term if nug_term else np.var(y)
+
+    Kernels = [nug_term * kernels.RBF(length_scale=1.0,
+                                      length_scale_bounds=(1e-25, 1e15)),
+               nug_term * kernels.RationalQuadratic(length_scale=0.2,
+                                                    alpha=1.0),
+               nug_term * kernels.Matern(length_scale=1.0,
+                                         length_scale_bounds=(1e-15, 1e5),
+                                         nu=1.5)]
+
+    # Automatic selection of the kernel
+    if autoSelect:
+        gp = {}
+        BME = []
+        for i, kernel in enumerate(Kernels):
+            gp[i] = GaussianProcessRegressor(kernel=kernel,
+                                             n_restarts_optimizer=3,
+                                             normalize_y=False)
+
+            # Fit to data using Maximum Likelihood Estimation
+            gp[i].fit(X, y)
+
+            # Store the MLE as BME score
+            BME.append(gp[i].log_marginal_likelihood())
+
+        gp = gp[np.argmax(BME)]
+
+    else:
+        gp = GaussianProcessRegressor(kernel=Kernels[0],
+                                      n_restarts_optimizer=3,
+                                      normalize_y=False)
+        gp.fit(X, y)
+
+    # Compute score
+    if varIdx is not None:
+        Score = gp.score(X, y)
+        print('-' * 50)
+        print(f'Output variable {varIdx}:')
+        print('The estimation of GPE coefficients converged,')
+        print(f'with the R^2 score: {Score:.3f}')
+        print('-' * 50)
+
+    return gp
+
+
+class MetaModel:
     """
     Meta (surrogate) model
 
@@ -82,7 +328,7 @@ class MetaModel():
         `'no'`. There are two ways to select number of components: use
         percentage of the explainable variance threshold (between 0 and 100)
         (Option A) or direct prescription of components' number (Option B):
-
+            >>> MetaModelOpts = MetaModel()
             >>> MetaModelOpts.dim_red_method = 'PCA'
             >>> MetaModelOpts.var_pca_threshold = 99.999  # Option A
             >>> MetaModelOpts.n_pca_components = 12 # Option B
@@ -110,7 +356,7 @@ class MetaModel():
     def __init__(self, input_obj, meta_model_type='PCE',
                  pce_reg_method='OLS', bootstrap_method='fast',
                  n_bootstrap_itrs=1, pce_deg=1, pce_q_norm=1.0,
-                 dim_red_method='no', apply_constraints = False, 
+                 dim_red_method='no', apply_constraints=False,
                  verbose=False):
 
         self.input_obj = input_obj
@@ -123,8 +369,38 @@ class MetaModel():
         self.dim_red_method = dim_red_method
         self.apply_constraints = apply_constraints
         self.verbose = verbose
- 
-    def build_metamodel(self, n_init_samples = None) -> None:
+
+        # Other params
+        self.InputSpace = None
+        self.var_pca_threshold = None
+        self.polycoeffs = None
+        self.errorScale = None
+        self.errorclf_poly = None
+        self.errorRegMethod = None
+        self.nlc = None
+        self.univ_p_val = None
+        self.n_pca_components = None
+        self.out_names = None
+        self.allBasisIndices = None
+        self.deg_array = None
+        self.n_samples = None
+        self.CollocationPoints = None
+        self.pca = None
+        self.LCerror = None
+        self.clf_poly = None
+        self.score_dict = None
+        self.basis_dict = None
+        self.coeffs_dict = None
+        self.q_norm_dict = None
+        self.deg_dict = None
+        self.x_scaler = None
+        self.gp_poly = None
+        self.n_params = None
+        self.ndim = None
+        self.init_type = None
+        self.rmse = None
+
+    def build_metamodel(self, n_init_samples=None) -> None:
         """
         Builds the parts for the metamodel (polynomes,...) that are neede before fitting.
 
@@ -134,31 +410,33 @@ class MetaModel():
             DESCRIPTION.
 
         """
-        
+
         # Generate general warnings
         if self.apply_constraints or self.pce_reg_method.lower() == 'ols':
             print('There are no estimations of surrogate uncertainty available'
                   ' for the chosen regression options. This might lead to issues'
                   ' in later steps.')
-        
+
+        if self.CollocationPoints is None:
+            raise AttributeError('Please provide samples to the metamodel before building it.')
+        self.CollocationPoints = np.array(self.CollocationPoints)
+
         # Add InputSpace to MetaModel if it does not have any
-        if not hasattr(self, 'InputSpace'):
+        if self.InputSpace is None:
+            if n_init_samples is None:
+                n_init_samples = self.CollocationPoints.shape[0]
             self.InputSpace = InputSpace(self.input_obj)
             self.InputSpace.n_init_samples = n_init_samples
             self.InputSpace.init_param_space(np.max(self.pce_deg))
-            
+
         self.ndim = self.InputSpace.ndim
-        
-        if not hasattr(self, 'CollocationPoints'):
-            raise AttributeError('Please provide samples to the metamodel before building it.')
-            
+
         # Transform input samples
         # TODO: this is probably not yet correct! Make 'method' variable
-        self.CollocationPoints = self.InputSpace.transform(self.CollocationPoints, method='user') 
+        self.CollocationPoints = self.InputSpace.transform(self.CollocationPoints, method='user')
 
-        
         self.n_params = len(self.input_obj.Marginals)
-        
+
         # Generate polynomials
         if self.meta_model_type.lower() != 'gpe':
             self.generate_polynomials(np.max(self.pce_deg))
@@ -183,8 +461,10 @@ class MetaModel():
         self.CollocationPoints = np.array(self.CollocationPoints)
         self.n_samples, ndim = self.CollocationPoints.shape
         if self.ndim != ndim:
-            raise AttributeError('The given samples do not match the given number of priors. The samples should be a 2D array of size (#samples, #priors)')
-            
+            raise AttributeError(
+                'The given samples do not match the given number of priors. The samples should be a 2D array of size'
+                '(#samples, #priors)')
+
         self.deg_array = self.__select_degree(ndim, self.n_samples)
 
         # Generate all basis indices
@@ -194,15 +474,13 @@ class MetaModel():
             if deg not in np.fromiter(keys, dtype=float):
                 # Generate the polynomial basis indices
                 for qidx, q in enumerate(self.pce_q_norm):
-                    basis_indices = glexindex(start=0, stop=deg+1,
+                    basis_indices = glexindex(start=0, stop=deg + 1,
                                               dimensions=self.n_params,
                                               cross_truncation=q,
                                               reverse=False, graded=True)
                     self.allBasisIndices[str(deg)][str(q)] = basis_indices
 
-        
-        
-    def fit(self, X, y, parallel = True, verbose = False):
+    def fit(self, X: np.array, y: dict, parallel=False, verbose=False):
         """
         Fits the surrogate to the given data (samples X, outputs y).
         Note here that the samples X should be the transformed samples provided
@@ -214,33 +492,43 @@ class MetaModel():
             The parameter value combinations that the model was evaluated at.
         y : dict of 2D lists or arrays of shape (#samples, #timesteps)
             The respective model evaluations.
+        parallel : bool
+            Set to True to run the training in parallel for various keys.
+            The default is False.
+        verbose : bool
+            Set to True to obtain more information during runtime.
+            The default is False.
 
         Returns
         -------
         None.
 
         """
+        #        print(X)
+        #        print(X.shape)
+        #        print(y)
+        #        print(y['Z'].shape)
         X = np.array(X)
         for key in y.keys():
             y_val = np.array(y[key])
-            if y_val.ndim !=2:
+            if y_val.ndim != 2:
                 raise ValueError('The given outputs y should be 2D')
             y[key] = np.array(y[key])
-        
+
         # Output names are the same as the keys in y
         self.out_names = list(y.keys())
-        
+
         # Build the MetaModel on the static samples
         self.CollocationPoints = X
-        
+
         # TODO: other option: rebuild every time
-        if not hasattr(self, 'deg_array'):
-            self.build_metamodel(n_init_samples = X.shape[1])
-            
+        if self.deg_array is None:
+            self.build_metamodel(n_init_samples=X.shape[1])
+
         # Evaluate the univariate polynomials on InputSpace
         if self.meta_model_type.lower() != 'gpe':
-           self.univ_p_val = self.univ_basis_vals(self.CollocationPoints)
-        
+            self.univ_p_val = self.univ_basis_vals(self.CollocationPoints)
+
         # --- Loop through data points and fit the surrogate ---
         if verbose:
             print(f"\n>>>> Training the {self.meta_model_type} metamodel "
@@ -252,10 +540,10 @@ class MetaModel():
             self.n_bootstrap_itrs = 100
 
         # Check if fast version (update coeffs with OLS) is selected.
+        n_comp_dict = {}
+        first_out = {}
         if self.bootstrap_method.lower() == 'fast':
             fast_bootstrap = True
-            first_out = {}
-            n_comp_dict = {}
         else:
             fast_bootstrap = False
 
@@ -295,8 +583,8 @@ class MetaModel():
                     # Start transformation
                     pca, target, n_comp = self.pca_transformation(
                         Output[b_indices], verbose=False
-                        )
-                    self.pca[f'b_{b_i+1}'][key] = pca
+                    )
+                    self.pca[f'b_{b_i + 1}'][key] = pca
                     # Store the number of components for fast bootsrtrapping
                     if fast_bootstrap and b_i == 0:
                         n_comp_dict[key] = n_comp
@@ -304,39 +592,40 @@ class MetaModel():
                     target = Output[b_indices]
 
                 # Parallel fit regression
+                out = None
                 if self.meta_model_type.lower() == 'gpe':
                     # Prepare the input matrix
                     scaler = MinMaxScaler()
                     X_S = scaler.fit_transform(X_train_b)
 
-                    self.x_scaler[f'b_{b_i+1}'][key] = scaler
+                    self.x_scaler[f'b_{b_i + 1}'][key] = scaler
                     if parallel:
                         out = Parallel(n_jobs=-1, backend='multiprocessing')(
-                            delayed(self.gaussian_process_emulator)(
+                            delayed(gaussian_process_emulator)(
                                 X_S, target[:, idx]) for idx in
                             range(target.shape[1]))
                     else:
-                        results = map(self.gaussian_process_emulator,
-                                      [X_train_b]*target.shape[1],
+                        results = map(gaussian_process_emulator,
+                                      [X_train_b] * target.shape[1],
                                       [target[:, idx] for idx in
                                        range(target.shape[1])]
                                       )
                         out = list(results)
 
                     for idx in range(target.shape[1]):
-                        self.gp_poly[f'b_{b_i+1}'][key][f"y_{idx+1}"] = out[idx]
+                        self.gp_poly[f'b_{b_i + 1}'][key][f"y_{idx + 1}"] = out[idx]
 
                 else:
                     self.univ_p_val = self.univ_p_val[b_indices]
                     if parallel and (not fast_bootstrap or b_i == 0):
                         out = Parallel(n_jobs=-1, backend='multiprocessing')(
-                            delayed(self.adaptive_regression)(X_train_b,
-                                                              target[:, idx],
-                                                              idx)
+                            delayed(self.adaptive_regression)(  # X_train_b,
+                                target[:, idx],
+                                idx)
                             for idx in range(target.shape[1]))
                     elif not parallel and (not fast_bootstrap or b_i == 0):
                         results = map(self.adaptive_regression,
-                                      [X_train_b]*target.shape[1],
+                                      # [X_train_b] * target.shape[1],
                                       [target[:, idx] for idx in
                                        range(target.shape[1])],
                                       range(target.shape[1]))
@@ -347,27 +636,26 @@ class MetaModel():
                         first_out[key] = copy.deepcopy(out)
 
                     if b_i > 0 and fast_bootstrap:
-
                         # fast bootstrap
                         out = self.update_pce_coeffs(
                             X_train_b, target, first_out[key])
 
                     for i in range(target.shape[1]):
                         # Create a dict to pass the variables
-                        self.deg_dict[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['degree']
-                        self.q_norm_dict[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['qnorm']
-                        self.coeffs_dict[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['coeffs']
-                        self.basis_dict[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['multi_indices']
-                        self.score_dict[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['LOOCVScore']
-                        self.clf_poly[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['clf_poly']
-                        #self.LCerror[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['LCerror']
+                        self.deg_dict[f'b_{b_i + 1}'][key][f"y_{i + 1}"] = out[i]['degree']
+                        self.q_norm_dict[f'b_{b_i + 1}'][key][f"y_{i + 1}"] = out[i]['qnorm']
+                        self.coeffs_dict[f'b_{b_i + 1}'][key][f"y_{i + 1}"] = out[i]['coeffs']
+                        self.basis_dict[f'b_{b_i + 1}'][key][f"y_{i + 1}"] = out[i]['multi_indices']
+                        self.score_dict[f'b_{b_i + 1}'][key][f"y_{i + 1}"] = out[i]['LOOCVScore']
+                        self.clf_poly[f'b_{b_i + 1}'][key][f"y_{i + 1}"] = out[i]['clf_poly']
+                        # self.LCerror[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['LCerror']
 
         if verbose:
             print(f"\n>>>> Training the {self.meta_model_type} metamodel"
                   " sucessfully completed. <<<<<<\n")
 
     # -------------------------------------------------------------------------
-    def update_pce_coeffs(self, X, y, out_dict = None):
+    def update_pce_coeffs(self, X, y, out_dict=None):
         """
         Updates the PCE coefficents using only the ordinary least square method
         for the fast version of the bootstrapping.
@@ -388,26 +676,26 @@ class MetaModel():
             The updated training output dictionary.
 
         """
+        # TODO: why is X not used here?
         # Make a copy
         final_out_dict = copy.deepcopy(out_dict)
 
         # Loop over the points
         for i in range(y.shape[1]):
 
-                    
             # Extract nonzero basis indices
             nnz_idx = np.nonzero(out_dict[i]['coeffs'])[0]
             if len(nnz_idx) != 0:
                 basis_indices = out_dict[i]['multi_indices']
 
                 # Evaluate the multivariate polynomials on CollocationPoints
-                psi = self.create_psi(basis_indices, self.univ_p_val)
+                psi = create_psi(basis_indices, self.univ_p_val)
 
                 # Calulate the cofficients of surrogate model
                 updated_out = self.regression(
                     psi, y[:, i], basis_indices, reg_method='OLS',
                     sparsity=False
-                    )
+                )
 
                 # Update coeffs in out_dict
                 final_out_dict[i]['coeffs'][nnz_idx] = updated_out['coeffs']
@@ -425,7 +713,7 @@ class MetaModel():
 
         """
         self.InputSpace = InputSpace(self.input_obj,
-                                    meta_Model_type=self.meta_model_type)
+                                     meta_Model_type=self.meta_model_type)
 
     # -------------------------------------------------------------------------
     def univ_basis_vals(self, samples, n_max=None):
@@ -462,63 +750,6 @@ class MetaModel():
         return univ_basis
 
     # -------------------------------------------------------------------------
-    def create_psi(self, basis_indices, univ_p_val):
-        """
-        This function assemble the design matrix Psi from the given basis index
-        set INDICES and the univariate polynomial evaluations univ_p_val.
-
-        Parameters
-        ----------
-        basis_indices : array of shape (n_terms, n_params)
-            Multi-indices of multivariate polynomials.
-        univ_p_val : array of (n_samples, n_params, n_max+1)
-            All univariate regressors up to `n_max`.
-
-        Raises
-        ------
-        ValueError
-            n_terms in arguments do not match.
-
-        Returns
-        -------
-        psi : array of shape (n_samples, n_terms)
-            Multivariate regressors.
-
-        """
-        # Check if BasisIndices is a sparse matrix
-        sparsity = sp.sparse.issparse(basis_indices)
-        if sparsity:
-            basis_indices = basis_indices.toarray()
-
-        # Initialization and consistency checks
-        # number of input variables
-        n_params = univ_p_val.shape[1]
-
-        # Size of the experimental design
-        n_samples = univ_p_val.shape[0]
-
-        # number of basis terms
-        n_terms = basis_indices.shape[0]
-
-        # check that the variables have consistent sizes
-        if n_params != basis_indices.shape[1]:
-            raise ValueError(
-                f"The shapes of basis_indices ({basis_indices.shape[1]}) and "
-                f"univ_p_val ({n_params}) don't match!!"
-                )
-
-        # Preallocate the Psi matrix for performance
-        psi = np.ones((n_samples, n_terms))
-        # Assemble the Psi matrix
-        for m in range(basis_indices.shape[1]):
-            aa = np.where(basis_indices[:, m] > 0)[0]
-            try:
-                basisIdx = basis_indices[aa, m]
-                bb = univ_p_val[:, m, basisIdx].reshape(psi[:, aa].shape)
-                psi[:, aa] = np.multiply(psi[:, aa], bb)
-            except ValueError as err:
-                raise err
-        return psi
 
     # -------------------------------------------------------------------------
     def regression(self, X, y, basis_indices, reg_method=None, sparsity=True):
@@ -536,6 +767,8 @@ class MetaModel():
             Multi-indices of multivariate polynomials.
         reg_method : str, optional
             DESCRIPTION. The default is None.
+        sparsity : bool
+            Use with sparsity-inducing training methods. The default is True
 
         Returns
         -------
@@ -557,22 +790,23 @@ class MetaModel():
             Lambda = 1e-6
 
         # Bayes sparse adaptive aPCE
+        clf_poly = None
         if reg_method.lower() == 'ols':
             clf_poly = lm.LinearRegression(fit_intercept=False)
         elif reg_method.lower() == 'brr':
             clf_poly = lm.BayesianRidge(n_iter=1000, tol=1e-7,
                                         fit_intercept=False,
-                                        #normalize=True,
+                                        # normalize=True,
                                         compute_score=compute_score,
                                         alpha_1=1e-04, alpha_2=1e-04,
                                         lambda_1=Lambda, lambda_2=Lambda)
             clf_poly.converged = True
 
         elif reg_method.lower() == 'ard':
-            if X.shape[0]<2:
+            if X.shape[0] < 2:
                 raise ValueError('Regression with ARD can only be performed for more than 2 samples')
             clf_poly = lm.ARDRegression(fit_intercept=False,
-                                        #normalize=True,
+                                        # normalize=True,
                                         compute_score=compute_score,
                                         n_iter=1000, tol=0.0001,
                                         alpha_1=1e-3, alpha_2=1e-3,
@@ -585,14 +819,14 @@ class MetaModel():
                                          n_iter=300, tol=1e-10)
 
         elif reg_method.lower() == 'bcs':
-            if X.shape[0]<10:
+            if X.shape[0] < 10:
                 raise ValueError('Regression with BCS can only be performed for more than 10 samples')
             clf_poly = RegressionFastLaplace(fit_intercept=False,
-                                         bias_term=bias_term,
-                                         n_iter=1000, tol=1e-7)
+                                             bias_term=bias_term,
+                                             n_iter=1000, tol=1e-7)
 
         elif reg_method.lower() == 'lars':
-            if X.shape[0]<10:
+            if X.shape[0] < 10:
                 raise ValueError('Regression with LARS can only be performed for more than 5 samples')
             clf_poly = lm.LassoLarsCV(fit_intercept=False)
 
@@ -608,29 +842,29 @@ class MetaModel():
 
         elif reg_method.lower() == 'ebl':
             clf_poly = EBLinearRegression(optimizer='em')
-            
-        
+
         # Training with constraints automatically uses L2
-        if self.apply_constraints:       
+        if self.apply_constraints:
             # TODO: set the constraints here
             # Define the nonlin. constraint     
-            nlc = NonlinearConstraint(lambda x: np.matmul(X,x),-1,1.1)
+            nlc = NonlinearConstraint(lambda x: np.matmul(X, x), -1, 1.1)
             self.nlc = nlc
-            
-            fun = lambda x: (np.linalg.norm(np.matmul(X, x)-y, ord = 2))**2
-            if self.init_type =='zeros':
-                res = minimize(fun, np.zeros(X.shape[1]), method = 'trust-constr', constraints  = self.nlc) 
+
+            fun = lambda x: (np.linalg.norm(np.matmul(X, x) - y, ord=2)) ** 2
+            res = None
+            if self.init_type == 'zeros':
+                res = minimize(fun, np.zeros(X.shape[1]), method='trust-constr', constraints=self.nlc)
             if self.init_type == 'nonpi':
                 clf_poly.fit(X, y)
                 coeff = clf_poly.coef_
-                res = minimize(fun, coeff, method = 'trust-constr', constraints  = self.nlc)
-            
+                res = minimize(fun, coeff, method='trust-constr', constraints=self.nlc)
+
             coeff = np.array(res.x)
             clf_poly.coef_ = coeff
             clf_poly.X = X
             clf_poly.y = y
             clf_poly.intercept_ = 0
-            
+
         # Training without constraints uses chosen regression method
         else:
             clf_poly.fit(X, y)
@@ -658,76 +892,15 @@ class MetaModel():
         return_out_dict['sparePsi'] = sparse_X
         return_out_dict['coeffs'] = coeffs
         return return_out_dict
-    
-    # -------------------------------------------------------------------------
-    def create_psi(self, basis_indices, univ_p_val):
-        """
-        This function assemble the design matrix Psi from the given basis index
-        set INDICES and the univariate polynomial evaluations univ_p_val.
-
-        Parameters
-        ----------
-        basis_indices : array of shape (n_terms, n_params)
-            Multi-indices of multivariate polynomials.
-        univ_p_val : array of (n_samples, n_params, n_max+1)
-            All univariate regressors up to `n_max`.
-
-        Raises
-        ------
-        ValueError
-            n_terms in arguments do not match.
-
-        Returns
-        -------
-        psi : array of shape (n_samples, n_terms)
-            Multivariate regressors.
-
-        """
-        # Check if BasisIndices is a sparse matrix
-        sparsity = sp.sparse.issparse(basis_indices)
-        if sparsity:
-            basis_indices = basis_indices.toarray()
-
-        # Initialization and consistency checks
-        # number of input variables
-        n_params = univ_p_val.shape[1]
-
-        # Size of the experimental design
-        n_samples = univ_p_val.shape[0]
-
-        # number of basis terms
-        n_terms = basis_indices.shape[0]
-
-        # check that the variables have consistent sizes
-        if n_params != basis_indices.shape[1]:
-            raise ValueError(
-                f"The shapes of basis_indices ({basis_indices.shape[1]}) and "
-                f"univ_p_val ({n_params}) don't match!!"
-                )
-
-        # Preallocate the Psi matrix for performance
-        psi = np.ones((n_samples, n_terms))
-        # Assemble the Psi matrix
-        for m in range(basis_indices.shape[1]):
-            aa = np.where(basis_indices[:, m] > 0)[0]
-            try:
-                basisIdx = basis_indices[aa, m]
-                bb = univ_p_val[:, m, basisIdx].reshape(psi[:, aa].shape)
-                psi[:, aa] = np.multiply(psi[:, aa], bb)
-            except ValueError as err:
-                raise err
-        return psi
 
     # --------------------------------------------------------------------------------------------------------
-    def adaptive_regression(self, ED_X, ED_Y, varIdx, verbose=False):
+    def adaptive_regression(self, ED_Y, varIdx, verbose=False):
         """
         Adaptively fits the PCE model by comparing the scores of different
         degrees and q-norm.
 
         Parameters
         ----------
-        ED_X : array of shape (n_samples, n_params)
-            Experimental design.
         ED_Y : array of shape (n_samples,)
             Target values, i.e. simulation results for the Experimental design.
         varIdx : int
@@ -743,7 +916,7 @@ class MetaModel():
 
         """
 
-        n_samples, n_params = ED_X.shape
+        # n_samples, n_params = ED_X.shape
         # Initialization
         qAllCoeffs, AllCoeffs = {}, {}
         qAllIndices_Sparse, AllIndices_Sparse = {}, {}
@@ -751,7 +924,7 @@ class MetaModel():
         qAllnTerms, AllnTerms = {}, {}
         qAllLCerror, AllLCerror = {}, {}
 
-        # Extract degree array and qnorm array
+        # Extract degree array and q-norm array
         deg_array = np.array([*self.allBasisIndices], dtype=int)
         qnorm = [*self.allBasisIndices[str(int(deg_array[0]))]]
 
@@ -763,7 +936,7 @@ class MetaModel():
         n_checks_qNorm = 2
         nqnorms = len(qnorm)
         qNormEarlyStop = True
-        if nqnorms < n_checks_qNorm+1:
+        if nqnorms < n_checks_qNorm + 1:
             qNormEarlyStop = False
 
         # =====================================================================
@@ -771,6 +944,7 @@ class MetaModel():
         # polynomial degree until the highest accuracy is reached
         # =====================================================================
         # For each degree check all q-norms and choose the best one
+        best_q = None
         scores = -np.inf * np.ones(deg_array.shape[0])
         qNormScores = -np.inf * np.ones(nqnorms)
 
@@ -783,38 +957,38 @@ class MetaModel():
                 BasisIndices = self.allBasisIndices[str(deg)][str(q)]
 
                 # Assemble the Psi matrix
-                Psi = self.create_psi(BasisIndices, self.univ_p_val)
+                Psi = create_psi(BasisIndices, self.univ_p_val)
 
-                # Calulate the cofficients of the meta model
+                # Calulate the cofficients of the metamodel
                 outs = self.regression(Psi, ED_Y, BasisIndices)
 
                 # Calculate and save the score of LOOCV
-                score, LCerror = self.corr_loocv_error(outs['clf_poly'],
-                                                       outs['sparePsi'],
-                                                       outs['coeffs'],
-                                                       ED_Y)
+                score, LCerror = corr_loocv_error(outs['clf_poly'],
+                                                  outs['sparePsi'],
+                                                  outs['coeffs'],
+                                                  ED_Y)
 
                 # Check the convergence of noise for FastARD
                 if self.pce_reg_method == 'FastARD' and \
-                   outs['clf_poly'].alpha_ < np.finfo(np.float32).eps:
+                        outs['clf_poly'].alpha_ < np.finfo(np.float32).eps:
                     score = -np.inf
 
                 qNormScores[qidx] = score
-                qAllCoeffs[str(qidx+1)] = outs['coeffs']
-                qAllIndices_Sparse[str(qidx+1)] = outs['spareMulti-Index']
-                qAllclf_poly[str(qidx+1)] = outs['clf_poly']
-                qAllnTerms[str(qidx+1)] = BasisIndices.shape[0]
-                qAllLCerror[str(qidx+1)] = LCerror
+                qAllCoeffs[str(qidx + 1)] = outs['coeffs']
+                qAllIndices_Sparse[str(qidx + 1)] = outs['spareMulti-Index']
+                qAllclf_poly[str(qidx + 1)] = outs['clf_poly']
+                qAllnTerms[str(qidx + 1)] = BasisIndices.shape[0]
+                qAllLCerror[str(qidx + 1)] = LCerror
 
                 # EarlyStop check
                 # if there are at least n_checks_qNorm entries after the
                 # best one, we stop
                 if qNormEarlyStop and \
-                   sum(np.isfinite(qNormScores)) > n_checks_qNorm:
+                        sum(np.isfinite(qNormScores)) > n_checks_qNorm:
                     # If the error has increased the last two iterations, stop!
                     qNormScores_nonInf = qNormScores[np.isfinite(qNormScores)]
                     deltas = np.sign(np.diff(qNormScores_nonInf))
-                    if sum(deltas[-n_checks_qNorm+1:]) == 2:
+                    if sum(deltas[-n_checks_qNorm + 1:]) == 2:
                         # stop the q-norm loop here
                         break
                 if np.var(ED_Y) == 0:
@@ -824,11 +998,11 @@ class MetaModel():
             best_q = np.nanargmax(qNormScores)
             scores[degIdx] = qNormScores[best_q]
 
-            AllCoeffs[str(degIdx+1)] = qAllCoeffs[str(best_q+1)]
-            AllIndices_Sparse[str(degIdx+1)] = qAllIndices_Sparse[str(best_q+1)]
-            Allclf_poly[str(degIdx+1)] = qAllclf_poly[str(best_q+1)]
-            AllnTerms[str(degIdx+1)] = qAllnTerms[str(best_q+1)]
-            AllLCerror[str(degIdx+1)] = qAllLCerror[str(best_q+1)]
+            AllCoeffs[str(degIdx + 1)] = qAllCoeffs[str(best_q + 1)]
+            AllIndices_Sparse[str(degIdx + 1)] = qAllIndices_Sparse[str(best_q + 1)]
+            Allclf_poly[str(degIdx + 1)] = qAllclf_poly[str(best_q + 1)]
+            AllnTerms[str(degIdx + 1)] = qAllnTerms[str(best_q + 1)]
+            AllLCerror[str(degIdx + 1)] = qAllLCerror[str(best_q + 1)]
 
             # Check the direction of the error (on average):
             # if it increases consistently stop the iterations
@@ -836,7 +1010,7 @@ class MetaModel():
                 scores_nonInf = scores[scores != -np.inf]
                 ss = np.sign(scores_nonInf - np.max(scores_nonInf))
                 # ss<0 error decreasing
-                errorIncreases = np.sum(np.sum(ss[-2:])) <= -1*n_checks_degree
+                errorIncreases = np.sum(np.sum(ss[-2:])) <= -1 * n_checks_degree
 
             if errorIncreases:
                 break
@@ -847,7 +1021,7 @@ class MetaModel():
 
         # ------------------ Summary of results ------------------
         # Select the one with the best score and save the necessary outputs
-        best_deg = np.nanargmax(scores)+1
+        best_deg = np.nanargmax(scores) + 1
         coeffs = AllCoeffs[str(best_deg)]
         basis_indices = AllIndices_Sparse[str(best_deg)]
         clf_poly = Allclf_poly[str(best_deg)]
@@ -863,24 +1037,24 @@ class MetaModel():
             nnz_idx = np.nonzero(coeffs)[0]
             BasisIndices_Sparse = basis_indices[nnz_idx]
 
-            print(f'Output variable {varIdx+1}:')
+            print(f'Output variable {varIdx + 1}:')
             print('The estimation of PCE coefficients converged at polynomial '
-                  f'degree {deg_array[best_deg-1]} with '
+                  f'degree {deg_array[best_deg - 1]} with '
                   f'{len(BasisIndices_Sparse)} terms (Sparsity index = '
-                  f'{round(len(BasisIndices_Sparse)/P, 3)}).')
+                  f'{round(len(BasisIndices_Sparse) / P, 3)}).')
 
-            print(f'Final ModLOO error estimate: {1-max(scores):.3e}')
-            print('\n'+'-'*50)
+            print(f'Final ModLOO error estimate: {1 - max(scores):.3e}')
+            print('\n' + '-' * 50)
 
         if verbose:
-            print('='*50)
-            print(' '*10 + ' Summary of results ')
-            print('='*50)
+            print('=' * 50)
+            print(' ' * 10 + ' Summary of results ')
+            print('=' * 50)
 
             print("Scores:\n", scores)
-            print("Degree of best score:", self.deg_array[best_deg-1])
+            print("Degree of best score:", self.deg_array[best_deg - 1])
             print("No. of terms:", len(basis_indices))
-            print("Sparsity index:", round(len(basis_indices)/P, 3))
+            print("Sparsity index:", round(len(basis_indices) / P, 3))
             print("Best Indices:\n", basis_indices)
 
             if self.pce_reg_method in ['BRR', 'ARD']:
@@ -899,7 +1073,7 @@ class MetaModel():
 
                 plt.text(0.75, 0.5, text, fontsize=18, transform=ax.transAxes)
                 plt.show()
-            print('='*80)
+            print('=' * 80)
 
         # Create a dict to pass the outputs
         returnVars = dict()
@@ -913,118 +1087,6 @@ class MetaModel():
 
         return returnVars
 
-    # -------------------------------------------------------------------------
-    def corr_loocv_error(self, clf, psi, coeffs, y):
-        """
-        Calculates the corrected LOO error for regression on regressor
-        matrix `psi` that generated the coefficients based on [1] and [2].
-
-        [1] Blatman, G., 2009. Adaptive sparse polynomial chaos expansions for
-            uncertainty propagation and sensitivity analysis (Doctoral
-            dissertation, Clermont-Ferrand 2).
-
-        [2] Blatman, G. and Sudret, B., 2011. Adaptive sparse polynomial chaos
-            expansion based on least angle regression. Journal of computational
-            Physics, 230(6), pp.2345-2367.
-
-        Parameters
-        ----------
-        clf : object
-            Fitted estimator.
-        psi : array of shape (n_samples, n_features)
-            The multivariate orthogonal polynomials (regressor).
-        coeffs : array-like of shape (n_features,)
-            Estimated cofficients.
-        y : array of shape (n_samples,)
-            Target values.
-
-        Returns
-        -------
-        R_2 : float
-            LOOCV Validation score (1-LOOCV erro).
-        residual : array of shape (n_samples,)
-            Residual values (y - predicted targets).
-
-        """
-        psi = np.array(psi, dtype=float)
-
-        # Create PSI_Sparse by removing redundent terms
-        nnz_idx = np.nonzero(coeffs)[0]
-        if len(nnz_idx) == 0:
-            nnz_idx = [0]
-        psi_sparse = psi[:, nnz_idx]
-
-        # NrCoeffs of aPCEs
-        P = len(nnz_idx)
-        # NrEvaluation (Size of experimental design)
-        N = psi.shape[0]
-
-        # Build the projection matrix
-        PsiTPsi = np.dot(psi_sparse.T, psi_sparse)
-
-        if np.linalg.cond(PsiTPsi) > 1e-12: #and \
-           # np.linalg.cond(PsiTPsi) < 1/sys.float_info.epsilon:
-            # faster
-            try:
-                M = sp.linalg.solve(PsiTPsi,
-                                sp.sparse.eye(PsiTPsi.shape[0]).toarray())
-            except:
-                raise AttributeError('There are too few samples for the corrected loo-cv error. Fit surrogate on at least as many samples as parameters to use this')
-        else:
-            # stabler
-            M = np.linalg.pinv(PsiTPsi)
-
-        # h factor (the full matrix is not calculated explicitly,
-        # only the trace is, to save memory)
-        PsiM = np.dot(psi_sparse, M)
-
-        h = np.sum(np.multiply(PsiM, psi_sparse), axis=1, dtype=np.longdouble)#float128)
-
-        # ------ Calculate Error Loocv for each measurement point ----
-        # Residuals
-        try:
-            residual = clf.predict(psi) - y
-        except:
-            residual = np.dot(psi, coeffs) - y
-
-        # Variance
-        var_y = np.var(y)
-
-        if var_y == 0:
-            norm_emp_error = 0
-            loo_error = 0
-            LCerror = np.zeros((y.shape))
-            return 1-loo_error, LCerror
-        else:
-            norm_emp_error = np.mean(residual**2)/var_y
-
-            # LCerror = np.divide(residual, (1-h))
-            LCerror = residual / (1-h)
-            loo_error = np.mean(np.square(LCerror)) / var_y
-            # if there are NaNs, just return an infinite LOO error (this
-            # happens, e.g., when a strongly underdetermined problem is solved)
-            if np.isnan(loo_error):
-                loo_error = np.inf
-
-        # Corrected Error for over-determined system
-        tr_M = np.trace(M)
-        if tr_M < 0 or abs(tr_M) > 1e6:
-            tr_M = np.trace(np.linalg.pinv(np.dot(psi.T, psi)))
-
-        # Over-determined system of Equation
-        if N > P:
-            T_factor = N/(N-P) * (1 + tr_M)
-
-        # Under-determined system of Equation
-        else:
-            T_factor = np.inf
-
-        corrected_loo_error = loo_error * T_factor
-
-        R_2 = 1 - corrected_loo_error
-
-        return R_2, LCerror
-
     # -------------------------------------------------------------------------
     def pca_transformation(self, target, verbose=False):
         """
@@ -1034,6 +1096,9 @@ class MetaModel():
         ----------
         target : array of shape (n_samples,)
             Target values.
+        verbose : bool
+            Set to True to get more information during functtion call.
+            The default is False.
 
         Returns
         -------
@@ -1046,20 +1111,20 @@ class MetaModel():
 
         """
         # Transform via Principal Component Analysis
-        if hasattr(self, 'var_pca_threshold'):
+        if self.var_pca_threshold is None:
             var_pca_threshold = self.var_pca_threshold
         else:
             var_pca_threshold = 100.0
         n_samples, n_features = target.shape
 
-        if hasattr(self, 'n_pca_components'):
+        if self.n_pca_components is None:
             n_pca_components = self.n_pca_components
         else:
             # Instantiate and fit sklearnPCA object
             covar_matrix = sklearnPCA(n_components=None)
             covar_matrix.fit(target)
             var = np.cumsum(np.round(covar_matrix.explained_variance_ratio_,
-                                     decimals=5)*100)
+                                     decimals=5) * 100)
             # Find the number of components to explain self.varPCAThreshold of
             # variance
             try:
@@ -1084,95 +1149,16 @@ class MetaModel():
 
         return pca, scaled_target, n_pca_components
 
-    # -------------------------------------------------------------------------
-    def gaussian_process_emulator(self, X, y, nug_term=None, autoSelect=False,
-                                  varIdx=None):
-        """
-        Fits a Gaussian Process Emulator to the target given the training
-         points.
-
-        Parameters
-        ----------
-        X : array of shape (n_samples, n_params)
-            Training points.
-        y : array of shape (n_samples,)
-            Target values.
-        nug_term : float, optional
-            Nugget term. The default is None, i.e. variance of y.
-        autoSelect : bool, optional
-            Loop over some kernels and select the best. The default is False.
-        varIdx : int, optional
-            The index number. The default is None.
-
-        Returns
-        -------
-        gp : object
-            Fitted estimator.
-
-        """
-
-        nug_term = nug_term if nug_term else np.var(y)
-
-        Kernels = [nug_term * kernels.RBF(length_scale=1.0,
-                                          length_scale_bounds=(1e-25, 1e15)),
-                   nug_term * kernels.RationalQuadratic(length_scale=0.2,
-                                                        alpha=1.0),
-                   nug_term * kernels.Matern(length_scale=1.0,
-                                             length_scale_bounds=(1e-15, 1e5),
-                                             nu=1.5)]
-
-        # Automatic selection of the kernel
-        if autoSelect:
-            gp = {}
-            BME = []
-            for i, kernel in enumerate(Kernels):
-                gp[i] = GaussianProcessRegressor(kernel=kernel,
-                                                 n_restarts_optimizer=3,
-                                                 normalize_y=False)
-
-                # Fit to data using Maximum Likelihood Estimation
-                gp[i].fit(X, y)
-
-                # Store the MLE as BME score
-                BME.append(gp[i].log_marginal_likelihood())
-
-            gp = gp[np.argmax(BME)]
-
-        else:
-            gp = GaussianProcessRegressor(kernel=Kernels[0],
-                                          n_restarts_optimizer=3,
-                                          normalize_y=False)
-            gp.fit(X, y)
-
-        # Compute score
-        if varIdx is not None:
-            Score = gp.score(X, y)
-            print('-'*50)
-            print(f'Output variable {varIdx}:')
-            print('The estimation of GPE coefficients converged,')
-            print(f'with the R^2 score: {Score:.3f}')
-            print('-'*50)
-
-        return gp
-
     # -------------------------------------------------------------------------
     def eval_metamodel(self, samples):
         """
-        Evaluates meta-model at the requested samples. One can also generate
+        Evaluates metamodel at the requested samples. One can also generate
         nsamples.
 
         Parameters
         ----------
         samples : array of shape (n_samples, n_params), optional
-            Samples to evaluate meta-model at. The default is None.
-        nsamples : int, optional
-            Number of samples to generate, if no `samples` is provided. The
-            default is None.
-        sampling_method : str, optional
-            Type of sampling, if no `samples` is provided. The default is
-            'random'.
-        return_samples : bool, optional
-            Retun samples, if no `samples` is provided. The default is False.
+            Samples to evaluate metamodel at. The default is None.
 
         Returns
         -------
@@ -1183,29 +1169,33 @@ class MetaModel():
         """
         # Transform into np array - can also be given as list
         samples = np.array(samples)
-        
+
         # Transform samples to the independent space
         samples = self.InputSpace.transform(
             samples,
             method='user'
-            )
+        )
         # Compute univariate bases for the given samples
+        univ_p_val = None
         if self.meta_model_type.lower() != 'gpe':
             univ_p_val = self.univ_basis_vals(
                 samples,
                 n_max=np.max(self.pce_deg)
-                )
+            )
 
+        mean_pred = None
+        std_pred = None
         mean_pred_b = {}
         std_pred_b = {}
+        b_i = 0
         # Loop over bootstrap iterations
         for b_i in range(self.n_bootstrap_itrs):
 
             # Extract model dictionary
             if self.meta_model_type.lower() == 'gpe':
-                model_dict = self.gp_poly[f'b_{b_i+1}']
+                model_dict = self.gp_poly[f'b_{b_i + 1}']
             else:
-                model_dict = self.coeffs_dict[f'b_{b_i+1}']
+                model_dict = self.coeffs_dict[f'b_{b_i + 1}']
 
             # Loop over outputs
             mean_pred = {}
@@ -1219,30 +1209,30 @@ class MetaModel():
 
                     # Prediction with GPE
                     if self.meta_model_type.lower() == 'gpe':
-                        X_T = self.x_scaler[f'b_{b_i+1}'][output].transform(samples)
-                        gp = self.gp_poly[f'b_{b_i+1}'][output][in_key]
+                        X_T = self.x_scaler[f'b_{b_i + 1}'][output].transform(samples)
+                        gp = self.gp_poly[f'b_{b_i + 1}'][output][in_key]
                         y_mean, y_std = gp.predict(X_T, return_std=True)
 
                     else:
                         # Prediction with PCE
                         # Assemble Psi matrix
-                        basis = self.basis_dict[f'b_{b_i+1}'][output][in_key]
-                        psi = self.create_psi(basis, univ_p_val)
+                        basis = self.basis_dict[f'b_{b_i + 1}'][output][in_key]
+                        psi = create_psi(basis, univ_p_val)
 
                         # Prediction
                         if self.bootstrap_method != 'fast' or b_i == 0:
                             # with error bar, i.e. use clf_poly
-                            clf_poly = self.clf_poly[f'b_{b_i+1}'][output][in_key]
+                            clf_poly = self.clf_poly[f'b_{b_i + 1}'][output][in_key]
                             try:
                                 y_mean, y_std = clf_poly.predict(
                                     psi, return_std=True
-                                    )
+                                )
                             except TypeError:
                                 y_mean = clf_poly.predict(psi)
                                 y_std = np.zeros_like(y_mean)
                         else:
                             # without error bar
-                            coeffs = self.coeffs_dict[f'b_{b_i+1}'][output][in_key]
+                            coeffs = self.coeffs_dict[f'b_{b_i + 1}'][output][in_key]
                             y_mean = np.dot(psi, coeffs)
                             y_std = np.zeros_like(y_mean)
 
@@ -1252,7 +1242,7 @@ class MetaModel():
 
                 # Save predictions for each output
                 if self.dim_red_method.lower() == 'pca':
-                    PCA = self.pca[f'b_{b_i+1}'][output]
+                    PCA = self.pca[f'b_{b_i + 1}'][output]
                     mean_pred[output] = PCA.inverse_transform(mean)
                     std_pred[output] = np.zeros(mean.shape)
                 else:
@@ -1283,12 +1273,13 @@ class MetaModel():
             if self.n_bootstrap_itrs > 1:
                 std_pred[output] = np.std(outs, axis=0)
             else:
+                # TODO: this b_i seems off here
                 std_pred[output] = std_pred_b[b_i][output]
 
         return mean_pred, std_pred
 
     # -------------------------------------------------------------------------
-    def create_model_error(self, X, y, Model, name='Calib'):
+    def create_model_error(self, X, y, MeasuredData):
         """
         Fits a GPE-based model error.
 
@@ -1299,8 +1290,7 @@ class MetaModel():
              extracted data.
         y : array of shape (n_outputs,)
             The model response for the MAP parameter set.
-        name : str, optional
-            Calibration or validation. The default is `'Calib'`.
+        MeasuredData :
 
         Returns
         -------
@@ -1315,7 +1305,7 @@ class MetaModel():
 
         # Read data
         # TODO: do this call outside the metamodel
-        MeasuredData = Model.read_observation(case=name)
+        # MeasuredData = Model.read_observation(case=name)
 
         # Fitting GPR based bias model
         for out in outputNames:
@@ -1331,7 +1321,7 @@ class MetaModel():
             delta = data  # - y[out][0]
             BiasInputs = np.hstack((X[out], y[out].reshape(-1, 1)))
             X_S = scaler.fit_transform(BiasInputs)
-            gp = self.gaussian_process_emulator(X_S, delta)
+            gp = gaussian_process_emulator(X_S, delta)
 
             self.errorScale[out]["y_1"] = scaler
             self.errorclf_poly[out]["y_1"] = gp
@@ -1413,13 +1403,13 @@ class MetaModel():
         """
         # TODO: what properties should be moved to the new object?
         new_MetaModelOpts = copy.deepcopy(self)
-        new_MetaModelOpts.input_obj = self.input_obj#InputObj
+        new_MetaModelOpts.input_obj = self.input_obj  # InputObj
         new_MetaModelOpts.InputSpace = self.InputSpace
-        #new_MetaModelOpts.InputSpace.meta_Model = 'aPCE'
-        #new_MetaModelOpts.InputSpace.InputObj = self.input_obj
-        #new_MetaModelOpts.InputSpace.ndim = len(self.input_obj.Marginals)
+        # new_MetaModelOpts.InputSpace.meta_Model = 'aPCE'
+        # new_MetaModelOpts.InputSpace.InputObj = self.input_obj
+        # new_MetaModelOpts.InputSpace.ndim = len(self.input_obj.Marginals)
         new_MetaModelOpts.n_params = len(self.input_obj.Marginals)
-        #new_MetaModelOpts.InputSpace.hdf5_file = None
+        # new_MetaModelOpts.InputSpace.hdf5_file = None
 
         return new_MetaModelOpts
 
@@ -1439,15 +1429,15 @@ class MetaModel():
         Returns
         -------
         deg_array: array
-            Array containing the arrays.
+            The selected degrees.
 
         """
         # Define the deg_array
         max_deg = np.max(self.pce_deg)
         min_Deg = np.min(self.pce_deg)
-        
+
         # TODO: remove the options for sequential?
-        #nitr = n_samples - self.InputSpace.n_init_samples
+        nitr = n_samples - self.InputSpace.n_init_samples
 
         # Check q-norm
         if not np.isscalar(self.pce_q_norm):
@@ -1455,49 +1445,61 @@ class MetaModel():
         else:
             self.pce_q_norm = np.array([self.pce_q_norm])
 
-        def M_uptoMax(maxDeg):
-            n_combo = np.zeros(maxDeg)
-            for i, d in enumerate(range(1, maxDeg+1)):
-                n_combo[i] = math.factorial(ndim+d)
-                n_combo[i] /= math.factorial(ndim) * math.factorial(d)
-            return n_combo
+        # def M_uptoMax(maxDeg):
+        #    n_combo = np.zeros(maxDeg)
+        #    for i, d in enumerate(range(1, maxDeg + 1)):
+        #        n_combo[i] = math.factorial(ndim + d)
+        #        n_combo[i] /= math.factorial(ndim) * math.factorial(d)
+        #    return n_combo
 
         deg_new = max_deg
-        #d = nitr if nitr != 0 and self.n_params > 5 else 1
+        # d = nitr if nitr != 0 and self.n_params > 5 else 1
         # d = 1
         # min_index = np.argmin(abs(M_uptoMax(max_deg)-ndim*n_samples*d))
         # deg_new = range(1, max_deg+1)[min_index]
 
         if deg_new > min_Deg and self.pce_reg_method.lower() != 'fastard':
-            deg_array = np.arange(min_Deg, deg_new+1)
+            deg_array = np.arange(min_Deg, deg_new + 1)
         else:
             deg_array = np.array([deg_new])
 
         return deg_array
 
     def generate_polynomials(self, max_deg=None):
+        """
+        Generates (univariate) polynomials.
+
+        Parameters
+        ----------
+        max_deg : int
+            Maximum polynomial degree.
+
+        Returns
+        -------
+        None
+        """
         # Check for InputSpace
-        if not hasattr(self, 'InputSpace'):
+        if self.InputSpace is None:
             raise AttributeError('Generate or add InputSpace before generating polynomials')
-            
+
         ndim = self.InputSpace.ndim
         # Create orthogonal polynomial coefficients if necessary
-        if (self.meta_model_type.lower()!='gpe') and max_deg is not None:# and self.input_obj.poly_coeffs_flag:
+        if (self.meta_model_type.lower() != 'gpe') and max_deg is not None:  # and self.input_obj.poly_coeffs_flag:
             self.polycoeffs = {}
             for parIdx in tqdm(range(ndim), ascii=True,
                                desc="Computing orth. polynomial coeffs"):
                 poly_coeffs = apoly_construction(
                     self.InputSpace.raw_data[parIdx],
                     max_deg
-                    )
-                self.polycoeffs[f'p_{parIdx+1}'] = poly_coeffs
+                )
+                self.polycoeffs[f'p_{parIdx + 1}'] = poly_coeffs
         else:
             raise AttributeError('MetaModel cannot generate polynomials in the given scenario!')
 
     # -------------------------------------------------------------------------
     def _compute_pce_moments(self):
         """
-        Computes the first two moments using the PCE-based meta-model.
+        Computes the first two moments using the PCE-based metamodel.
 
         Returns
         -------
@@ -1507,11 +1509,11 @@ class MetaModel():
             The second moment (standard deviation) of the surrogate.
 
         """
-        
-        # Check if its truly a pce-surrogate
+
+        # Check if it's truly a pce-surrogate
         if self.meta_model_type.lower() == 'gpe':
             raise AttributeError('Moments can only be computed for pce-type surrogates')
-        
+
         outputs = self.out_names
         pce_means_b = {}
         pce_stds_b = {}
@@ -1519,7 +1521,7 @@ class MetaModel():
         # Loop over bootstrap iterations
         for b_i in range(self.n_bootstrap_itrs):
             # Loop over the metamodels
-            coeffs_dicts = self.coeffs_dict[f'b_{b_i+1}'].items()
+            coeffs_dicts = self.coeffs_dict[f'b_{b_i + 1}'].items()
             means = {}
             stds = {}
             for output, coef_dict in coeffs_dicts:
@@ -1529,20 +1531,20 @@ class MetaModel():
 
                 for index, values in coef_dict.items():
                     idx = int(index.split('_')[1]) - 1
-                    coeffs = self.coeffs_dict[f'b_{b_i+1}'][output][index]
+                    coeffs = self.coeffs_dict[f'b_{b_i + 1}'][output][index]
 
                     # Mean = c_0
                     if coeffs[0] != 0:
                         pce_mean[idx] = coeffs[0]
                     else:
-                        clf_poly = self.clf_poly[f'b_{b_i+1}'][output]
+                        clf_poly = self.clf_poly[f'b_{b_i + 1}'][output]
                         pce_mean[idx] = clf_poly[index].intercept_
                     # Var = sum(coeffs[1:]**2)
                     pce_var[idx] = np.sum(np.square(coeffs[1:]))
 
                 # Save predictions for each output
                 if self.dim_red_method.lower() == 'pca':
-                    PCA = self.pca[f'b_{b_i+1}'][output]
+                    PCA = self.pca[f'b_{b_i + 1}'][output]
                     means[output] = PCA.inverse_transform(pce_mean)
                     stds[output] = PCA.inverse_transform(np.sqrt(pce_var))
                 else:
diff --git a/tests/test_BayesInference.py b/tests/test_BayesInference.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f22f9158190c930fe6827fe892e3a87027e33c5
--- /dev/null
+++ b/tests/test_BayesInference.py
@@ -0,0 +1,1105 @@
+# -*- coding: utf-8 -*-
+"""
+Test the BayesInference class for bayesvalidrox
+
+Tests are available for the following functions
+    _logpdf                 - x
+    _kernel_rbf             - x
+class BayesInference:
+    setup_inference         - x
+    create_inference        - x
+    perform_bootstrap       Need working model for tests without emulator
+    _perturb_data           - x
+    create_error_model      Error in the MetaModel
+    _eval_model             Need working model to test this
+    normpdf                 - x
+    _corr_factor_BME_old    - removed
+    _corr_factor_BME        - x
+    _rejection_sampling     - x
+    _posterior_predictive   - x
+    plot_post_params        - x 
+    plot_log_BME            - x
+    _plot_max_a_posteriori  Need working model to test this
+    _plot_post_predictive   - x
+"""
+import sys
+import pytest
+import numpy as np
+import pandas as pd
+
+from bayesvalidrox.surrogate_models.inputs import Input
+from bayesvalidrox.surrogate_models.exp_designs import ExpDesigns
+from bayesvalidrox.surrogate_models.surrogate_models import MetaModel
+from bayesvalidrox.pylink.pylink import PyLinkForwardModel as PL
+from bayesvalidrox.surrogate_models.engine import Engine
+from bayesvalidrox.bayes_inference.discrepancy import Discrepancy
+from bayesvalidrox.bayes_inference.mcmc import MCMC
+from bayesvalidrox.bayes_inference.bayes_inference import BayesInference
+from bayesvalidrox.bayes_inference.bayes_inference import _logpdf, _kernel_rbf
+
+sys.path.append("src/")
+sys.path.append("../src/")
+
+
+#%% Test _logpdf
+
+def test_logpdf() -> None:
+    """
+    Calculate loglikelihood
+
+    """
+    _logpdf([0], [0], [1])
+
+
+#%% Test _kernel_rbf
+
+def test_kernel_rbf() -> None:
+    """
+    Create RBF kernel
+    """
+    X = [[0, 0], [1, 1.5]]
+    pars = [1, 0.5, 1]
+    _kernel_rbf(X, pars)
+
+
+def test_kernel_rbf_lesspar() -> None:
+    """
+    Create RBF kernel with too few parameters
+    """
+    X = [[0, 0], [1, 1.5]]
+    pars = [1, 2]
+    with pytest.raises(AttributeError) as excinfo:
+        _kernel_rbf(X, pars)
+    assert str(excinfo.value) == 'Provide 3 parameters for the RBF kernel!'
+
+
+#%% Test MCMC init
+
+def test_BayesInference() -> None:
+    """
+    Construct a BayesInference object
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mod = PL()
+    mm = MetaModel(inp)
+    expdes = ExpDesigns(inp)
+    engine = Engine(mm, mod, expdes)
+    BayesInference(engine)
+
+
+#%% Test create_inference
+# TODO: disabled this test!
+def test_create_inference() -> None:
+    """
+    Run inference
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.x_values = np.array([0])  # Error in plots if this is not available
+
+    mm = MetaModel(inp)
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}  # Error if x_values not given
+    mod.Output.names = ['Z']
+
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts  # Error if this not class 'DiscrepancyOpts' or dict(?)
+    bi.bootstrap = True  # Error if this and bayes_loocv and just_analysis are all False?
+    bi.plot_post_pred = False  # Remaining issue in the violinplot
+    bi.create_inference()
+    # Remaining issue in the violinplot in plot_post_predictive
+
+
+#%% Test rejection_sampling
+def test_rejection_sampling_nologlik() -> None:
+    """
+    Perform rejection sampling without given log likelihood
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mod = PL()
+    mm = MetaModel(inp)
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    engine = Engine(mm, mod, expdes)
+    bi = BayesInference(engine)
+    bi.prior_samples = expdes.generate_samples(100, 'random')
+    with pytest.raises(AttributeError) as excinfo:
+        bi._rejection_sampling()
+    assert str(excinfo.value) == 'No log-likelihoods available!'
+
+
+def test_rejection_sampling_noprior() -> None:
+    """
+    Perform rejection sampling without prior samples
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mod = PL()
+    mm = MetaModel(inp)
+    expdes = ExpDesigns(inp)
+    engine = Engine(mm, mod, expdes)
+    bi = BayesInference(engine)
+    with pytest.raises(AttributeError) as excinfo:
+        bi._rejection_sampling()
+    assert str(excinfo.value) == 'No prior samples available!'
+
+
+def test_rejection_sampling() -> None:
+    """
+    Perform rejection sampling
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mod = PL()
+    mm = MetaModel(inp)
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    engine = Engine(mm, mod, expdes)
+    bi = BayesInference(engine)
+    bi.prior_samples = expdes.generate_samples(100, 'random')
+    bi.log_likes = np.swapaxes(np.atleast_2d(np.log(np.random.random(100) * 3)), 0, 1)
+    bi._rejection_sampling()
+
+
+#%% Test _perturb_data
+
+def test_perturb_data() -> None:
+    """
+    Perturb data
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mod = PL()
+    mm = MetaModel(inp)
+    expdes = ExpDesigns(inp)
+    engine = Engine(mm, mod, expdes)
+
+    bi = BayesInference(engine)
+    data = pd.DataFrame()
+    data['Z'] = [0.45]
+    bi._perturb_data(data, ['Z'])
+
+
+def test_perturb_data_loocv() -> None:
+    """
+    Perturb data with bayes_loocv
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mod = PL()
+    mm = MetaModel(inp)
+    expdes = ExpDesigns(inp)
+    engine = Engine(mm, mod, expdes)
+
+    bi = BayesInference(engine)
+    data = pd.DataFrame()
+    data['Z'] = [0.45]
+    bi.bayes_loocv = True
+    bi._perturb_data(data, ['Z'])
+
+
+#%% Test _eval_model
+
+def test_eval_model() -> None:
+    """
+    Run model with descriptive key
+    """
+    # TODO: need functioning example model to test this
+    None
+
+
+#%% Test corr_factor_BME
+
+def test_corr_factor_BME() -> None:
+    """
+    Calculate correction factor
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+
+    mm = MetaModel(inp)
+    mm.fit(expdes.X, expdes.Y)
+    mod = PL()
+    engine = Engine(mm, mod, expdes)
+
+    obs_data = {'Z': np.array([0.45])}
+    total_sigma2s = {'Z': np.array([0.15])}
+    logBME = [0, 0, 0]
+
+    bi = BayesInference(engine)
+    bi.selected_indices = {'Z': 0}
+    bi._corr_factor_BME(obs_data, total_sigma2s, logBME)
+
+
+def test_corr_factor_BME_selectedindices() -> None:
+    """
+    Calculate correction factor
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+
+    mm = MetaModel(inp)
+    mm.fit(expdes.X, expdes.Y)
+    mod = PL()
+    engine = Engine(mm, mod, expdes)
+
+    obs_data = {'Z': np.array([0.45])}
+    total_sigma2s = {'Z': np.array([0.15])}
+    logBME = [0, 0, 0]
+
+    bi = BayesInference(engine)
+    bi.selected_indices = {'Z': 0}
+    bi._corr_factor_BME(obs_data, total_sigma2s, logBME)
+
+
+#%% Test normpdf
+
+def test_normpdf_nosigmas() -> None:
+    """
+    Run normpdf without any additional sigmas
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': np.array([[0.4], [0.5], [0.45]])}
+
+    mm = MetaModel(inp)
+    mod = PL()
+    mod.Output.names = ['Z']
+    engine = Engine(mm, mod, expdes)
+
+    obs_data = {'Z': np.array([0.45])}
+    total_sigma2s = {'Z': np.array([0.15])}
+
+    bi = BayesInference(engine)
+    bi.normpdf(expdes.Y, obs_data, total_sigma2s, sigma2=None, std=None)
+
+
+def test_normpdf_sigma2() -> None:
+    """
+    Run normpdf with sigma2
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': np.array([[0.4], [0.5], [0.45]])}
+
+    mm = MetaModel(inp)
+    mod = PL()
+    mod.Output.names = ['Z']
+    engine = Engine(mm, mod, expdes)
+
+    obs_data = {'Z': np.array([0.45])}
+    total_sigma2s = {'Z': np.array([0.15])}
+    sigma2 = [[0]]
+
+    bi = BayesInference(engine)
+    bi.normpdf(expdes.Y, obs_data, total_sigma2s, sigma2=sigma2, std=None)
+
+
+def test_normpdf_allsigmas() -> None:
+    """
+    Run normpdf with all additional sigmas
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': np.array([[0.4], [0.5], [0.45]])}
+
+    mm = MetaModel(inp)
+    mod = PL()
+    mod.Output.names = ['Z']
+    engine = Engine(mm, mod, expdes)
+
+    obs_data = {'Z': np.array([0.45])}
+    total_sigma2s = {'Z': np.array([0.15])}
+    sigma2 = [[0]]
+
+    bi = BayesInference(engine)
+    bi.normpdf(expdes.Y, obs_data, total_sigma2s, sigma2=sigma2, std=total_sigma2s)
+
+
+#%% Test setup_inference
+
+def test_setup_inference_noobservation() -> None:
+    """
+    Test the object setup without given observations
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+
+    mod = PL()
+    mod.Output.names = ['Z']
+
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts
+    with pytest.raises(Exception) as excinfo:
+        bi.setup_inference()
+    assert str(
+        excinfo.value) == ('Please provide the observation data as a dictionary via observations attribute or pass the '
+                           'csv-file path to MeasurementFile attribute')
+
+
+def test_setup_inference() -> None:
+    """
+    Test the object setup with observations
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}
+    mod.Output.names = ['Z']
+
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts
+    bi.setup_inference()
+
+
+def test_setup_inference_priorsamples() -> None:
+    """
+    Test the object setup with prior samples set by hand
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}
+    mod.Output.names = ['Z']
+
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.prior_samples = np.swapaxes(np.array([np.random.normal(0, 1, 100)]), 0, 1)
+    bi.Discrepancy = DiscrepancyOpts
+    bi.setup_inference()
+
+
+def test_setup_inference_valid() -> None:
+    """
+    Test the object setup for valid
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+
+    mod = PL()
+    mod.observations_valid = {'Z': np.array([0.45])}
+    mod.observations_valid = {'Z': np.array([0.45]), 'x_values': np.array([0])}
+    mod.Output.names = ['Z']
+
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts
+    bi.name = 'valid'
+    bi.setup_inference()
+
+
+def test_setup_inference_noname() -> None:
+    """
+    Test the object setup for an invalid inference name
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}
+    mod.Output.names = ['Z']
+
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts
+    bi.name = ''
+    with pytest.raises(Exception) as excinfo:
+        bi.setup_inference()
+    assert str(excinfo.value) == 'The set inference type is not known! Use either `calib` or `valid`'
+
+
+#%% Test perform_bootstrap
+
+def test_perform_bootstrap() -> None:
+    """
+    Do bootstrap
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.x_values = np.array([0])  # Error in plots if this is not available
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}  # Error if x_values not given
+    mod.Output.names = ['Z']
+    mod.n_obs = 1
+
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts
+    bi.bootstrap = True
+    bi.plot_post_pred = False
+    total_sigma2s = {'Z': np.array([0.15])}
+    bi.setup_inference()
+    bi.perform_bootstrap(total_sigma2s)
+
+
+def test_perform_bootstrap_bayesloocv() -> None:
+    """
+    Do bootstrap
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.x_values = np.array([0])  # Error in plots if this is not available
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}  # Error if x_values not given
+    mod.Output.names = ['Z']
+    mod.n_obs = 1
+
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts
+    bi.bootstrap = True
+    bi.plot_post_pred = False
+    total_sigma2s = {'Z': np.array([0.15])}
+    bi.setup_inference()
+    bi.bayes_loocv = True
+    bi.perform_bootstrap(total_sigma2s)
+
+
+#%% Test create_error_model
+
+def create_error_model_prior() -> None:
+    """ 
+    Test creating MetaModel error-model for 'prior'
+    """
+    # TODO: there are issues with the expected formats from the MetaModel
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}
+    mod.Output.names = ['Z']
+    mod.n_obs = 1
+
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts
+    bi.bootstrap = True
+    bi.setup_inference()
+    bi.bias_inputs = expdes.X
+    bi.create_error_model(type_='prior', opt_sigma='B', sampler=None)
+
+
+def create_error_model_posterior() -> None:
+    """ 
+    Test creating MetaModel error-model for 'posterior'
+    """
+    # TODO: there are issues with the expected formats from the MetaModel
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}
+    mod.Output.names = ['Z']
+    mod.n_obs = 1
+
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    posterior = pd.DataFrame()
+    posterior[None] = [0, 1, 0.5]
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts
+    bi.bootstrap = True
+    bi.setup_inference()
+    bi.bias_inputs = expdes.X
+    bi.posterior_df = posterior
+    bi.create_error_model(type_='posterior', opt_sigma='B', sampler=None)
+
+
+#%% Test _posterior_predictive
+
+def test_posterior_predictive() -> None:
+    """
+    Test posterior predictions
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    prior_samples = np.swapaxes(np.array([np.random.normal(0, 1, 10)]), 0, 1)
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.x_values = np.array([0])  # Error in plots if this is not available
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+    y_hat, y_std = mm.eval_metamodel(prior_samples)
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}  # Error if x_values not given
+    mod.Output.names = ['Z']
+    mod.n_obs = 1
+
+    engine = Engine(mm, mod, expdes)
+
+    total_sigma2s = {'Z': np.array([0.15])}
+    posterior = pd.DataFrame()
+    posterior[None] = [0, 1, 0.5]
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts
+    bi.bootstrap = True
+    bi.plot_post_pred = False
+    bi.posterior_df = posterior
+    bi.bias_inputs = expdes.X
+    bi._mean_pce_prior_pred = y_hat
+    bi._std_pce_prior_pred = y_std
+    bi.Discrepancy.total_sigma2 = total_sigma2s
+    bi.setup_inference()
+    bi._posterior_predictive()
+
+
+def test_posterior_predictive_rejection() -> None:
+    """
+    Test posterior predictions with rejection inference
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    prior_samples = np.swapaxes(np.array([np.random.normal(0, 1, 10)]), 0, 1)
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.x_values = np.array([0])  # Error in plots if this is not available
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+    y_hat, y_std = mm.eval_metamodel(prior_samples)
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}  # Error if x_values not given
+    mod.Output.names = ['Z']
+    mod.n_obs = 1
+
+    engine = Engine(mm, mod, expdes)
+
+    total_sigma2s = {'Z': np.array([0.15])}
+    posterior = pd.DataFrame()
+    posterior[None] = [0, 1, 0.5]
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts
+    bi.bootstrap = True
+    bi.plot_post_pred = False
+    bi.posterior_df = posterior
+    bi.bias_inputs = expdes.X
+    bi._mean_pce_prior_pred = y_hat
+    bi._std_pce_prior_pred = y_std
+    bi.Discrepancy.total_sigma2 = total_sigma2s
+    bi.inference_method = 'rejection'
+    bi.setup_inference()
+    bi._posterior_predictive()
+
+
+#%% Test plot_post_params
+
+def test_plot_post_params() -> None:
+    """
+    Plot posterior dist
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mod = PL()
+    engine = Engine(mm, mod, expdes)
+
+    bi = BayesInference(engine)
+    posterior = pd.DataFrame()
+    posterior[None] = [0, 1, 0.5]
+    bi.posterior_df = posterior
+    bi.plot_post_params('B')
+
+
+def test_plot_post_params_noemulator() -> None:
+    """
+    Plot posterior dist with emulator = False
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mod = PL()
+    engine = Engine(mm, mod, expdes)
+
+    bi = BayesInference(engine)
+    posterior = pd.DataFrame()
+    posterior[None] = [0, 1, 0.5]
+    bi.posterior_df = posterior
+    bi.emulator = False
+    bi.plot_post_params('B')
+
+
+#%% Test plot_log_BME
+
+def test_plot_log_BME() -> None:
+    """
+    Show the log_BME from bootstrapping
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mod = PL()
+    engine = Engine(mm, mod, expdes)
+
+    bi = BayesInference(engine)
+    bi.log_BME = np.array([[0, 0.2], [0, 0.2]])
+    bi.n_tot_measurement = 1
+    bi.plot_log_BME()
+
+
+def test_plot_log_BME_noemulator() -> None:
+    """
+    Show the log_BME from bootstrapping with emulator = False
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mod = PL()
+    engine = Engine(mm, mod, expdes)
+
+    bi = BayesInference(engine)
+    bi.log_BME = np.array([[0, 0.2], [0, 0.2]])
+    bi.n_tot_measurement = 1
+    bi.emulator = False
+    bi.plot_log_BME()
+
+
+#%% Test _plot_max_a_posteriori
+
+def test_plot_max_a_posteriori_rejection() -> None:
+    """
+    Plot MAP estimate for rejection
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mod = PL()
+    engine = Engine(mm, mod, expdes)
+
+    bi = BayesInference(engine)
+    bi.inference_method = 'rejection'
+    bi._plot_post_predictive()
+
+
+def test_plot_max_a_posteriori() -> None:
+    """
+    Plot MAP estimate
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mod = PL()
+    engine = Engine(mm, mod, expdes)
+
+    bi = BayesInference(engine)
+    bi._plot_post_predictive()
+
+
+#%% Test _plot_post_predictive
+
+
+def test_plot_post_predictive_rejection() -> None:
+    """
+    Plot posterior predictions for rejection
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mod = PL()
+    engine = Engine(mm, mod, expdes)
+
+    bi = BayesInference(engine)
+    bi.inference_method = 'rejection'
+    bi._plot_post_predictive()
+
+
+def test_plot_post_predictive() -> None:
+    """
+    Plot posterior predictions
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.n_init_samples = 2
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mod = PL()
+    engine = Engine(mm, mod, expdes)
+
+    bi = BayesInference(engine)
+    bi._plot_post_predictive()
+
+
+#%% Main runs
+if __name__ == '__main__':
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    # prior_samples = np.swapaxes(np.array([np.random.normal(0,1,10)]),0,1)
+
+    expdes = ExpDesigns(inp)
+    expdes.init_param_space(max_deg=1)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.x_values = np.array([0])  # Error in plots if this is not
+
+    mm = MetaModel(inp)
+    mm.n_params = 1
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(1))
+    # y_hat, y_std = mm.eval_metamodel(prior_samples)
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45])}
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}  # Error if x_values not given
+    mod.Output.names = ['Z']
+    mod.n_obs = 1
+
+    engine = Engine(mm, mod, expdes)
+
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
+    obsData = pd.DataFrame({'Z': np.array([0.45]), 'x_values': np.array([0])}, columns=mod.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = (obsData * 0.15) ** 2
+    DiscrepancyOpts.opt_sigma = 'B'
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = DiscrepancyOpts  # Error if this not class 'DiscrepancyOpts' or dict(?)
+    bi.bootstrap = True  # Error if this and bayes_loocv and just_analysis are all False?
+    bi.plot_post_pred = False  # Remaining issue in the violinplot
+    bi.error_model = False
+    bi.bayes_loocv = True
+    if 1:
+        bi.create_inference()
+    # opt_sigma = 'B'
+    # total_sigma2s = {'Z':np.array([0.15])}
+    # data = pd.DataFrame()
+    # data['Z'] = [0.45]
+    # data['x_values'] = [0.3]
+    # bi.setup_inference()
+    # bi.perform_bootstrap(total_sigma2s)
+    posterior = pd.DataFrame()
+    posterior[None] = [0, 1, 0.5]
+    bi.posterior_df = posterior
+    # bi.bias_inputs = expdes.X
+    # bi._mean_pce_prior_pred = y_hat
+    # bi._std_pce_prior_pred = y_std
+    # bi.Discrepancy.total_sigma2 = total_sigma2s
+    # bi.create_error_model(type_ = 'posterior', opt_sigma = 'B', sampler = None)
+    # bi._posterior_predictive()
+    # bi.plot_post_params('B')
+    # bi.log_BME = np.array([[0,0.2],[0,0.2]])
+    # bi.n_tot_measurement = 1
+    # bi.plot_log_BME()
+    bi.inference_method = 'rejection'
+    bi._plot_max_a_posteriori()
diff --git a/tests/test_BayesModelComparison.py b/tests/test_BayesModelComparison.py
new file mode 100644
index 0000000000000000000000000000000000000000..91f328ec7ae39cf7cded6b228edc1442053a2dfb
--- /dev/null
+++ b/tests/test_BayesModelComparison.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+"""
+Test the BayesModelComparison class in bayesvalidrox.
+Tests are available for the following functions
+Class BayesModelComparison: 
+    create_model_comparison
+    compare_models
+    generate_dataset
+    __perturb_data
+    cal_model_weight
+    plot_just_analysis
+    plot_model_weights
+    plot_bayes_factor
+    
+"""
+import sys
+sys.path.append("src/")
+import pytest
+import numpy as np
+
+from bayesvalidrox.bayes_inference.bayes_model_comparison import BayesModelComparison
+#from bayesvalidrox.surrogate_models.input_space import InputSpace
+
+def test_BMC() -> None:
+    """
+    Build BMC without inputs
+    """
+    BayesModelComparison()
\ No newline at end of file
diff --git a/tests/test_Discrepancy.py b/tests/test_Discrepancy.py
index c46e0a13751756e0583f3176489e7215da77f4ba..7fb948d905031e7d7e6235857c27792fb29ece57 100644
--- a/tests/test_Discrepancy.py
+++ b/tests/test_Discrepancy.py
@@ -36,22 +36,8 @@ def test_get_sample() -> None:
     """
     Get discrepancy sample
     """
-    inp = Input()
-    inp.add_marginals()
-    inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
-    disc = Discrepancy(InputDisc = inp)
+    disc = Discrepancy()
     with pytest.raises(AttributeError) as excinfo:
         disc.get_sample(2)
     assert str(excinfo.value) == 'Cannot create new samples, please provide input distributions'
-    
-    
-    
-    
-if __name__ == '__main__':
-    inp = Input()
-    inp.add_marginals()
-    inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
-    disc = Discrepancy(InputDisc = inp)
-    disc.get_sample(2)
\ No newline at end of file
+    
\ No newline at end of file
diff --git a/tests/test_engine.py b/tests/test_Engine.py
similarity index 56%
rename from tests/test_engine.py
rename to tests/test_Engine.py
index 72dabd466174fbfd2b60ed23fc500e53ad51f240..6b03a26237d1b1ff741bcfd69e8ffff188cc8d7d 100644
--- a/tests/test_engine.py
+++ b/tests/test_Engine.py
@@ -31,10 +31,7 @@ Engine:
 import math
 import numpy as np
 import pandas as pd
-
 import sys
-sys.path.append("src/")
-#import pytest
 
 from bayesvalidrox.surrogate_models.inputs import Input
 from bayesvalidrox.surrogate_models.exp_designs import ExpDesigns
@@ -43,6 +40,8 @@ from bayesvalidrox.pylink.pylink import PyLinkForwardModel as PL
 from bayesvalidrox.surrogate_models.engine import Engine
 from bayesvalidrox.surrogate_models.engine import hellinger_distance, logpdf, subdomain
 
+sys.path.append("src/")
+
 
 #%% Test Engine constructor
 
@@ -54,12 +53,13 @@ def test_engine() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mod = PL()
     mm = MetaModel(inp)
     expdes = ExpDesigns(inp)
     Engine(mm, mod, expdes)
 
+
 #%% Test Engine.start_engine
 
 def test_start_engine() -> None:
@@ -69,7 +69,7 @@ def test_start_engine() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mod = PL()
     mm = MetaModel(inp)
     expdes = ExpDesigns(inp)
@@ -89,9 +89,9 @@ def test__error_Mean_Std() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    mm.fit([[0.0],[1.0]], {'Z':[[0.5],[0.5]]})
+    mm.fit([[0.0], [1.0]], {'Z': [[0.5], [0.5]]})
     expdes = ExpDesigns(inp)
     mod = PL()
     mod.mc_reference['mean'] = [0.5]
@@ -100,8 +100,9 @@ def test__error_Mean_Std() -> None:
     engine = Engine(mm, mod, expdes)
     engine.start_engine()
     mean, std = engine._error_Mean_Std()
-    assert mean < 0.01 and std <0.01
-    
+    assert mean < 0.01 and std < 0.01
+
+
 #%% Test Engine._validError
 
 def test__validError() -> None:
@@ -111,19 +112,20 @@ def test__validError() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    mm.fit([[0.0],[1.0]], {'Z':[[0.5],[0.5]]})
+    mm.fit([[0.0], [1.0]], {'Z': [[0.5], [0.5]]})
     expdes = ExpDesigns(inp)
     mod = PL()
     expdes.valid_samples = [[0.5]]
-    expdes.valid_model_runs = {'Z':[[0.5]]}
+    expdes.valid_model_runs = {'Z': [[0.5]]}
     mod.Output.names = ['Z']
     engine = Engine(mm, mod, expdes)
     engine.start_engine()
     mean, std = engine._validError()
-    assert mean['Z'][0] < 0.01 #and std['Z'][0] <0.01
-    
+    assert mean['Z'][0] < 0.01  # and std['Z'][0] <0.01
+
+
 #%% Test Engine._BME_Calculator
 
 def test__BME_Calculator() -> None:
@@ -133,21 +135,22 @@ def test__BME_Calculator() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    mm.fit([[0.0],[0.5],[1.0]], {'Z':[[0.5],[0.4],[0.5]]})
+    mm.fit([[0.0], [0.5], [1.0]], {'Z': [[0.5], [0.4], [0.5]]})
     expdes = ExpDesigns(inp)
-    expdes.generate_ED(2,transform=True,max_pce_deg=1)
+    expdes.generate_ED(2, max_pce_deg=1)
     mod = PL()
     mod.Output.names = ['Z']
     engine = Engine(mm, mod, expdes)
     engine.start_engine()
-    obs_data = {'Z':np.array([0.45])}
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
+    obs_data = {'Z': np.array([0.45])}
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
     engine._BME_Calculator(obs_data, sigma2Dict)
     # Note: if error appears here it might also be due to inoptimal choice of training samples
 
+
 def test__BME_Calculator_rmse() -> None:
     """
     Calculate BME with given RMSE
@@ -155,21 +158,22 @@ def test__BME_Calculator_rmse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    mm.fit([[0.0],[0.5],[1.0]], {'Z':[[0.5],[0.4],[0.5]]})
+    mm.fit([[0.0], [0.5], [1.0]], {'Z': [[0.5], [0.4], [0.5]]})
     expdes = ExpDesigns(inp)
-    expdes.generate_ED(2,transform=True,max_pce_deg=1)
+    expdes.generate_ED(2, max_pce_deg=1)
     mod = PL()
     mod.Output.names = ['Z']
     engine = Engine(mm, mod, expdes)
     engine.start_engine()
-    obs_data = {'Z':np.array([0.45])}
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
-    engine._BME_Calculator(obs_data, sigma2Dict, rmse = {'Z':0.1})
+    obs_data = {'Z': np.array([0.45])}
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
+    engine._BME_Calculator(obs_data, sigma2Dict, rmse={'Z': 0.1})
     # Note: if error appears here it might also be due to inoptimal choice of training samples
 
+
 def test__BME_Calculator_lik() -> None:
     """
     Calculate BME with given validation likelihood and post-snapshot
@@ -177,23 +181,23 @@ def test__BME_Calculator_lik() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    mm.fit([[0.0],[0.5],[1.0]], {'Z':[[0.5],[0.4],[0.5]]})
+    mm.fit([[0.0], [0.5], [1.0]], {'Z': [[0.5], [0.4], [0.5]]})
     expdes = ExpDesigns(inp)
-    expdes.generate_ED(2,transform=True,max_pce_deg=1)
+    expdes.generate_ED(2, max_pce_deg=1)
     mod = PL()
     mod.Output.names = ['Z']
     engine = Engine(mm, mod, expdes)
     engine.start_engine()
-    obs_data = {'Z':np.array([0.45])}
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
+    obs_data = {'Z': np.array([0.45])}
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
     expdes.post_snapshot = True
-    
+
     engine.valid_likelihoods = [0.1]
     engine._BME_Calculator(obs_data, sigma2Dict)
-    
+
 
 def test__BME_Calculator_2d() -> None:
     """
@@ -202,28 +206,28 @@ def test__BME_Calculator_2d() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     inp.add_marginals()
     inp.Marginals[1].dist_type = 'normal'
-    inp.Marginals[1].parameters = [0,1]
+    inp.Marginals[1].parameters = [0, 1]
     mm = MetaModel(inp)
-    mm.fit([[0.0,0.0],[0.5,0.1],[1.0,0.9]], {'Z':[[0.5],[0.4],[0.5]]})
+    mm.fit([[0.0, 0.0], [0.5, 0.1], [1.0, 0.9]], {'Z': [[0.5], [0.4], [0.5]]})
     expdes = ExpDesigns(inp)
-    expdes.generate_ED(2,transform=True,max_pce_deg=1)
+    expdes.generate_ED(2, max_pce_deg=1)
     mod = PL()
     mod.n_obs = 1
     mod.Output.names = ['Z']
     engine = Engine(mm, mod, expdes)
     engine.start_engine()
-    obs_data = {'Z':np.array([0.45])}
-    m_observations = obs_data
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
+    obs_data = {'Z': np.array([0.45])}
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
     expdes.post_snapshot = True
-    
+
     engine.valid_likelihoods = [0.1]
     engine._BME_Calculator(obs_data, sigma2Dict)
-    
+
+
 #%% Test hellinger_distance
 
 def test_hellinger_distance_isnan() -> None:
@@ -232,112 +236,118 @@ def test_hellinger_distance_isnan() -> None:
     """
     P = [0]
     Q = [1]
-    math.isnan(hellinger_distance(P,Q))
-    
+    math.isnan(hellinger_distance(P, Q))
+
+
 def test_hellinger_distance_0() -> None:
     """
     Calculate Hellinger distance-0
     """
-    P = [0,1,2]
-    Q = [1,0,2]
-    assert hellinger_distance(P,Q) == 0.0
-    
+    P = [0, 1, 2]
+    Q = [1, 0, 2]
+    assert hellinger_distance(P, Q) == 0.0
+
+
 def test_hellinger_distance_1() -> None:
     """
     Calculate Hellinger distance-1
     """
-    P = [0,1,2]
-    Q = [0,0,0]
-    assert hellinger_distance(P,Q) == 1.0
-    
+    P = [0, 1, 2]
+    Q = [0, 0, 0]
+    assert hellinger_distance(P, Q) == 1.0
+
+
 #%% Test Engine._normpdf
-   
+
 def test__normpdf() -> None:
     """
     Likelihoods based on gaussian dist
     """
-    
+
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     expdes = ExpDesigns(inp)
     mod = PL()
     mod.Output.names = ['Z']
-    
-    y_hat_pce =  {'Z':np.array([[0.12]])}
-    std_pce = {'Z':np.array([[0.05]])}
-    obs_data = {'Z':np.array([0.1])}
-    sigma2Dict = {'Z':np.array([0.05])}
-    total_sigma2s = pd.DataFrame(sigma2Dict, columns = ['Z'])
-    
+
+    y_hat_pce = {'Z': np.array([[0.12]])}
+    std_pce = {'Z': np.array([[0.05]])}
+    obs_data = {'Z': np.array([0.1])}
+    sigma2Dict = {'Z': np.array([0.05])}
+    total_sigma2s = pd.DataFrame(sigma2Dict, columns=['Z'])
+
     engine = Engine(mm, mod, expdes)
     engine.start_engine()
     engine._normpdf(y_hat_pce, std_pce, obs_data, total_sigma2s)
-      
+
+
 def test__normpdf_rmse() -> None:
     """
     Likelihoods based on gaussian dist with rmse
     """
-    
+
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     expdes = ExpDesigns(inp)
     mod = PL()
     mod.Output.names = ['Z']
-    
-    y_hat_pce =  {'Z':np.array([[0.12]])}
-    std_pce = {'Z':np.array([[0.05]])}
-    obs_data = {'Z':np.array([0.1])}
-    sigma2Dict = {'Z':np.array([0.05])}
-    total_sigma2s = pd.DataFrame(sigma2Dict, columns = ['Z'])
-    
+
+    y_hat_pce = {'Z': np.array([[0.12]])}
+    std_pce = {'Z': np.array([[0.05]])}
+    obs_data = {'Z': np.array([0.1])}
+    sigma2Dict = {'Z': np.array([0.05])}
+    total_sigma2s = pd.DataFrame(sigma2Dict, columns=['Z'])
+
     engine = Engine(mm, mod, expdes)
     engine.start_engine()
-    engine._normpdf(y_hat_pce, std_pce, obs_data, total_sigma2s, rmse = {'Z':0.1})
-    
-    
+    engine._normpdf(y_hat_pce, std_pce, obs_data, total_sigma2s, rmse={'Z': 0.1})
+
+
 #%% Test Engine._posteriorPlot
 
 def test__posteriorPlot() -> None:
     """
     Plot posterior
-    """    
+    """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     expdes = ExpDesigns(inp)
-    expdes.generate_ED(2,transform=True,max_pce_deg=1)
+    expdes.generate_ED(2, max_pce_deg=1)
     mod = PL()
-    posterior = np.array([[0],[0.1],[0.2]])
+    posterior = np.array([[0], [0.1], [0.2]])
     engine = Engine(mm, mod, expdes)
     engine._posteriorPlot(posterior, ['i'], 'Z')
-    
+
+
 def test__posteriorPlot_2d() -> None:
     """
     Plot posterior for 2 params
-    """    
+    """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     inp.add_marginals()
     inp.Marginals[1].dist_type = 'normal'
-    inp.Marginals[1].parameters = [0,1]
+    inp.Marginals[1].parameters = [0, 1]
     mm = MetaModel(inp)
     expdes = ExpDesigns(inp)
-    expdes.generate_ED(2,transform=True,max_pce_deg=1)
+    expdes.generate_ED(2, max_pce_deg=1)
     mod = PL()
-    posterior = np.array([[0,0],[0.1,1.0],[0.2,0.5]])
+    posterior = np.array([[0, 0], [0.1, 1.0], [0.2, 0.5]])
     engine = Engine(mm, mod, expdes)
     engine._posteriorPlot(posterior, ['i', 'j'], 'Z')
-    
+
+
 #%% Test logpdf
 
 def test_logpdf() -> None:
@@ -345,7 +355,8 @@ def test_logpdf() -> None:
     Calculate log-pdf
     """
     logpdf(np.array([0.1]), np.array([0.2]), np.array([0.1]))
-    
+
+
 #%% Test Engine._corr_factor_BME
 # TODO: not used again here?
 
@@ -355,7 +366,7 @@ def test_subdomain() -> None:
     """
     Create subdomains from bounds
     """
-    subdomain([(0,1),(0,1)], 2)
+    subdomain([(0, 1), (0, 1)], 2)
 
 
 #%% Test Engine.tradeoff_weights
@@ -363,89 +374,94 @@ def test_subdomain() -> None:
 def test_tradeoff_weights_None() -> None:
     """
     Tradeoff weights with no scheme
-    """  
+    """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     expdes = ExpDesigns(inp)
     mod = PL()
     engine = Engine(mm, mod, expdes)
-    weights = engine.tradeoff_weights(None, [[0],[1]], {'Z':[[0.4],[0.5]]})
+    weights = engine.tradeoff_weights(None, [[0], [1]], {'Z': [[0.4], [0.5]]})
     assert weights[0] == 0 and weights[1] == 1
-    
+
+
 def test_tradeoff_weights_equal() -> None:
     """
     Tradeoff weights with 'equal' scheme
-    """  
+    """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     expdes = ExpDesigns(inp)
     mod = PL()
     engine = Engine(mm, mod, expdes)
-    weights = engine.tradeoff_weights('equal', [[0],[1]], {'Z':[[0.4],[0.5]]})
+    weights = engine.tradeoff_weights('equal', [[0], [1]], {'Z': [[0.4], [0.5]]})
     assert weights[0] == 0.5 and weights[1] == 0.5
-    
+
+
 def test_tradeoff_weights_epsdecr() -> None:
     """
     Tradeoff weights with 'epsilon-decreasing' scheme
-    """  
+    """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 3
-    expdes.X = np.array([[0],[1]])
+    expdes.X = np.array([[0], [1]])
     mod = PL()
     engine = Engine(mm, mod, expdes)
-    weights = engine.tradeoff_weights('epsilon-decreasing', expdes.X, {'Z':[[0.4],[0.5]]})
+    weights = engine.tradeoff_weights('epsilon-decreasing', expdes.X, {'Z': [[0.4], [0.5]]})
     assert weights[0] == 1.0 and weights[1] == 0.0
-    
+
+
 def test_tradeoff_weights_adaptive() -> None:
     """
     Tradeoff weights with 'adaptive' scheme
-    """  
+    """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 3
-    expdes.X = np.array([[0],[1]])
+    expdes.X = np.array([[0], [1]])
     mod = PL()
     engine = Engine(mm, mod, expdes)
-    weights = engine.tradeoff_weights('adaptive', expdes.X, {'Z':[[0.4],[0.5]]})
+    weights = engine.tradeoff_weights('adaptive', expdes.X, {'Z': [[0.4], [0.5]]})
     assert weights[0] == 0.5 and weights[1] == 0.5
-    
+
+
 def test_tradeoff_weights_adaptiveit1() -> None:
     """
     Tradeoff weights with 'adaptive' scheme for later iteration (not the first)
-    """  
+    """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
     mod = PL()
     engine = Engine(mm, mod, expdes)
-    engine._y_hat_prev, _ = mm.eval_metamodel(samples=np.array([[0.1],[0.2],[0.6]]))
+    engine._y_hat_prev, _ = mm.eval_metamodel(samples=np.array([[0.1], [0.2], [0.6]]))
     engine.tradeoff_weights('adaptive', expdes.X, expdes.Y)
-    
+
+
 #%% Test Engine.choose_next_sample
 
 def test_choose_next_sample() -> None:
@@ -455,25 +471,26 @@ def test_choose_next_sample() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
-    expdes.explore_method='random'
-    expdes.exploit_method='Space-filling'
-    expdes.util_func='Space-filling'
-    
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.explore_method = 'random'
+    expdes.exploit_method = 'Space-filling'
+    expdes.util_func = 'Space-filling'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
     x, nan = engine.choose_next_sample()
-    assert x.shape[0]==1 and x.shape[1] == 1
-    
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_da_spaceparallel() -> None:
     """
     Chooses new sample using dual-annealing and space-filling, parallel=True
@@ -481,26 +498,27 @@ def test_choose_next_sample_da_spaceparallel() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
-    expdes.explore_method='dual-annealing'
-    expdes.exploit_method='Space-filling'
-    expdes.util_func='Space-filling'
-    
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.explore_method = 'dual-annealing'
+    expdes.exploit_method = 'Space-filling'
+    expdes.util_func = 'Space-filling'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
     engine.parallel = True
     x, nan = engine.choose_next_sample()
-    assert x.shape[0]==1 and x.shape[1] == 1
-       
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_da_spacenoparallel() -> None:
     """
     Chooses new sample using dual-annealing and space-filling, parallel = False
@@ -508,26 +526,27 @@ def test_choose_next_sample_da_spacenoparallel() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
-    expdes.explore_method='dual-annealing'
-    expdes.exploit_method='Space-filling'
-    expdes.util_func='Space-filling'
-    
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.explore_method = 'dual-annealing'
+    expdes.exploit_method = 'Space-filling'
+    expdes.util_func = 'Space-filling'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
     engine.parallel = False
     x, nan = engine.choose_next_sample()
-    assert x.shape[0]==1 and x.shape[1] == 1
-    
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_loo_space() -> None:
     """
     Chooses new sample using all LOO-CV and space-filling
@@ -535,25 +554,26 @@ def test_choose_next_sample_loo_space() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
-    expdes.explore_method='LOO-CV'
-    expdes.exploit_method='Space-filling'
-    expdes.util_func='Space-filling'
-    
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.explore_method = 'LOO-CV'
+    expdes.exploit_method = 'Space-filling'
+    expdes.util_func = 'Space-filling'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
     x, nan = engine.choose_next_sample()
-    assert x.shape[0]==1 and x.shape[1] == 1
-    
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_vor_space() -> None:
     """
     Chooses new sample using voronoi, space-filling
@@ -561,25 +581,26 @@ def test_choose_next_sample_vor_space() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
-    expdes.explore_method='voronoi'
-    expdes.exploit_method='Space-filling'
-    expdes.util_func='Space-filling'
-    
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.explore_method = 'voronoi'
+    expdes.exploit_method = 'Space-filling'
+    expdes.util_func = 'Space-filling'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
     x, nan = engine.choose_next_sample()
-    assert x.shape[0]==1 and x.shape[1] == 1
-    
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_latin_space() -> None:
     """
     Chooses new sample using all latin-hypercube, space-filling
@@ -587,25 +608,26 @@ def test_choose_next_sample_latin_space() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='Space-filling'
-    expdes.util_func='Space-filling'
-    
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'Space-filling'
+    expdes.util_func = 'Space-filling'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
     x, nan = engine.choose_next_sample()
-    assert x.shape[0]==1 and x.shape[1] == 1
-     
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_latin_alphD() -> None:
     """
     Chooses new sample using all latin-hypercube, alphabetic (D)
@@ -613,25 +635,26 @@ def test_choose_next_sample_latin_alphD() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='alphabetic'
-    expdes.util_func='D-Opt'
-    
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'alphabetic'
+    expdes.util_func = 'D-Opt'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    x, nan = engine.choose_next_sample(var = expdes.util_func)
-    assert x.shape[0]==1 and x.shape[1] == 1
-     
+    x, nan = engine.choose_next_sample(var=expdes.util_func)
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_latin_alphK() -> None:
     """
     Chooses new sample using all latin-hypercube, alphabetic (K)
@@ -639,25 +662,26 @@ def test_choose_next_sample_latin_alphK() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='alphabetic'
-    expdes.util_func='K-Opt'
-    
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'alphabetic'
+    expdes.util_func = 'K-Opt'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    x, nan = engine.choose_next_sample(var = expdes.util_func)
-    assert x.shape[0]==1 and x.shape[1] == 1
-    
+    x, nan = engine.choose_next_sample(var=expdes.util_func)
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_latin_alphA() -> None:
     """
     Chooses new sample using all latin-hypercube, alphabetic (A)
@@ -665,25 +689,26 @@ def test_choose_next_sample_latin_alphA() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='alphabetic'
-    expdes.util_func='A-Opt'
-    
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'alphabetic'
+    expdes.util_func = 'A-Opt'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    x, nan = engine.choose_next_sample(var = expdes.util_func)
-    assert x.shape[0]==1 and x.shape[1] == 1
-     
+    x, nan = engine.choose_next_sample(var=expdes.util_func)
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_latin_VarALM() -> None:
     """
     Chooses new sample using all latin-hypercube, VarDesign (ALM)
@@ -691,26 +716,27 @@ def test_choose_next_sample_latin_VarALM() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='VarOptDesign'
-    expdes.util_func='ALM'
-    
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'VarOptDesign'
+    expdes.util_func = 'ALM'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    x, nan = engine.choose_next_sample(var = expdes.util_func)
-    assert x.shape[0]==1 and x.shape[1] == 1
-     
+    x, nan = engine.choose_next_sample(var=expdes.util_func)
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_latin_VarEIGF() -> None:
     """
     Chooses new sample using all latin-hypercube, VarDesign (EIGF)
@@ -718,25 +744,26 @@ def test_choose_next_sample_latin_VarEIGF() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='VarOptDesign'
-    expdes.util_func='EIGF'
-    
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'VarOptDesign'
+    expdes.util_func = 'EIGF'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    x, nan = engine.choose_next_sample(var = expdes.util_func)
-    assert x.shape[0]==1 and x.shape[1] == 1
+    x, nan = engine.choose_next_sample(var=expdes.util_func)
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
 
 def test_choose_next_sample_latin_VarLOO() -> None:
     """
@@ -745,26 +772,27 @@ def test_choose_next_sample_latin_VarLOO() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='VarOptDesign'
-    expdes.util_func='LOOCV'
-    
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'VarOptDesign'
+    expdes.util_func = 'LOOCV'
+
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    x, nan = engine.choose_next_sample(var = expdes.util_func)
-    assert x.shape[0]==1 and x.shape[1] == 1
-    
+    x, nan = engine.choose_next_sample(var=expdes.util_func)
+    assert x.shape[0] == 1 and x.shape[1] == 1
+
+
 def test_choose_next_sample_latin_BODMI() -> None:
     """
     Chooses new sample using all latin-hypercube, BayesOptDesign (MI)
@@ -772,28 +800,29 @@ def test_choose_next_sample_latin_BODMI() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='BayesOptDesign'
-    expdes.util_func='MI'
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'BayesOptDesign'
+    expdes.util_func = 'MI'
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    engine.observations = {'Z':np.array([0.45])}
-    #engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
-    x, nan = engine.choose_next_sample(sigma2=sigma2Dict, var = expdes.util_func)
-      
+    engine.observations = {'Z': np.array([0.45])}
+    # engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
+    engine.choose_next_sample(sigma2=sigma2Dict, var=expdes.util_func)
+
+
 def test_choose_next_sample_latin_BODALC() -> None:
     """
     Chooses new sample using all latin-hypercube, BayesOptDesign (ALC)
@@ -801,28 +830,29 @@ def test_choose_next_sample_latin_BODALC() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='BayesOptDesign'
-    expdes.util_func='ALC'
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'BayesOptDesign'
+    expdes.util_func = 'ALC'
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    engine.observations = {'Z':np.array([0.45])}
-    #engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
-    x, nan = engine.choose_next_sample(sigma2=sigma2Dict, var = expdes.util_func)
-   
+    engine.observations = {'Z': np.array([0.45])}
+    # engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
+    engine.choose_next_sample(sigma2=sigma2Dict, var=expdes.util_func)
+
+
 def test_choose_next_sample_latin_BODDKL() -> None:
     """
     Chooses new sample using all latin-hypercube, BayesOptDesign (DKL)
@@ -830,29 +860,29 @@ def test_choose_next_sample_latin_BODDKL() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='BayesOptDesign'
-    expdes.util_func='DKL'
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'BayesOptDesign'
+    expdes.util_func = 'DKL'
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    engine.observations = {'Z':np.array([0.45])}
-    #engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
-    x, nan = engine.choose_next_sample(sigma2=sigma2Dict, var = expdes.util_func)
+    engine.observations = {'Z': np.array([0.45])}
+    # engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
+    engine.choose_next_sample(sigma2=sigma2Dict, var=expdes.util_func)
+
 
-   
 def test_choose_next_sample_latin_BODDPP() -> None:
     """
     Chooses new sample using all latin-hypercube, BayesOptDesign (DPP)
@@ -860,29 +890,29 @@ def test_choose_next_sample_latin_BODDPP() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='BayesOptDesign'
-    expdes.util_func='DPP'
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'BayesOptDesign'
+    expdes.util_func = 'DPP'
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    engine.observations = {'Z':np.array([0.45])}
-    #engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
-    x, nan = engine.choose_next_sample(sigma2=sigma2Dict, var = expdes.util_func)
+    engine.observations = {'Z': np.array([0.45])}
+    # engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
+    engine.choose_next_sample(sigma2=sigma2Dict, var=expdes.util_func)
+
 
-   
 def test_choose_next_sample_latin_BODAPP() -> None:
     """
     Chooses new sample using all latin-hypercube, BayesOptDesign (APP)
@@ -890,59 +920,59 @@ def test_choose_next_sample_latin_BODAPP() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='BayesOptDesign'
-    expdes.util_func='APP'
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'BayesOptDesign'
+    expdes.util_func = 'APP'
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    engine.observations = {'Z':np.array([0.45])}
-    #engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
-    x, nan = engine.choose_next_sample(sigma2=sigma2Dict, var = expdes.util_func)
+    engine.observations = {'Z': np.array([0.45])}
+    # engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
+    engine.choose_next_sample(sigma2=sigma2Dict, var=expdes.util_func)
 
-   
-def test_choose_next_sample_latin_BODMI() -> None:
+
+def test_choose_next_sample_latin_BODMI_() -> None:
     """
     Chooses new sample using all latin-hypercube, BayesOptDesign (MI)
     """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='BayesOptDesign'
-    expdes.util_func='MI'
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'BayesOptDesign'
+    expdes.util_func = 'MI'
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    engine.observations = {'Z':np.array([0.45])}
-    #engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
-    x, nan = engine.choose_next_sample(sigma2=sigma2Dict, var = expdes.util_func)
+    engine.observations = {'Z': np.array([0.45])}
+    # engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
+    engine.choose_next_sample(sigma2=sigma2Dict, var=expdes.util_func)
+
 
-   
 def test_choose_next_sample_latin_BADBME() -> None:
     """
     Chooses new sample using all latin-hypercube, BayesActDesign (BME)
@@ -950,28 +980,29 @@ def test_choose_next_sample_latin_BADBME() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='BayesActDesign'
-    expdes.util_func='BME'
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'BayesActDesign'
+    expdes.util_func = 'BME'
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    engine.observations = {'Z':np.array([0.45])}
-    #engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
+    engine.observations = {'Z': np.array([0.45])}
+    # engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
     engine.n_obs = 1
-    x, nan = engine.choose_next_sample(sigma2=sigma2Dict, var = expdes.util_func)
+    engine.choose_next_sample(sigma2=sigma2Dict, var=expdes.util_func)
+
 
 def test_choose_next_sample_latin_BADDKL() -> None:
     """
@@ -980,28 +1011,28 @@ def test_choose_next_sample_latin_BADDKL() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='BayesActDesign'
-    expdes.util_func='DKL'
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'BayesActDesign'
+    expdes.util_func = 'DKL'
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    engine.observations = {'Z':np.array([0.45])}
-    #engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
+    engine.observations = {'Z': np.array([0.45])}
+    # engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
     engine.n_obs = 1
-    x, nan = engine.choose_next_sample(sigma2=sigma2Dict, var = expdes.util_func)
+    engine.choose_next_sample(sigma2=sigma2Dict, var=expdes.util_func)
 
 
 def test_choose_next_sample_latin_BADinfEntropy() -> None:
@@ -1011,52 +1042,25 @@ def test_choose_next_sample_latin_BADinfEntropy() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     expdes = ExpDesigns(inp)
     expdes.n_init_samples = 2
     expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
     expdes.tradeoff_scheme = 'equal'
-    expdes.explore_method='latin-hypercube'
-    expdes.exploit_method='BayesActDesign'
-    expdes.util_func='infEntropy'
+    expdes.explore_method = 'latin-hypercube'
+    expdes.exploit_method = 'BayesActDesign'
+    expdes.util_func = 'infEntropy'
     mm = MetaModel(inp)
     mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
     mod = PL()
     engine = Engine(mm, mod, expdes)
     engine.out_names = ['Z']
-    engine.observations = {'Z':np.array([0.45])}
-    #engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
-    sigma2Dict = {'Z':np.array([0.05])}
-    sigma2Dict = pd.DataFrame(sigma2Dict, columns = ['Z'])
+    engine.observations = {'Z': np.array([0.45])}
+    # engine.choose_next_sample(sigma2=None, n_candidates=5, var='DKL')
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
     engine.n_obs = 1
-    x, nan = engine.choose_next_sample(sigma2=sigma2Dict, var = expdes.util_func)
-
-    
-if __name__ == '__main__':
-    inp = Input()
-    inp.add_marginals()
-    inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
-    expdes = ExpDesigns(inp)
-    expdes.n_init_samples = 2
-    expdes.n_max_samples = 4
-    expdes.X = np.array([[0],[1],[0.5]])
-    expdes.Y = {'Z':[[0.4],[0.5],[0.45]]}
-    expdes.explore_method='dual-annealing'
-    expdes.exploit_method='Space-filling'
-    expdes.util_func='Space-filling'
-    
-    mm = MetaModel(inp)
-    mm.fit(expdes.X, expdes.Y)
-    expdes.generate_ED(expdes.n_init_samples, transform=True, max_pce_deg=np.max(mm.pce_deg))
-    mod = PL()
-    engine = Engine(mm, mod, expdes)
-    engine.out_names = ['Z']
-    engine.parallel = True
-    x, nan = engine.choose_next_sample()
-    assert x.shape[0]==1 and x.shape[1] == 1
-    
-    None
\ No newline at end of file
+    engine.choose_next_sample(sigma2=sigma2Dict, var=expdes.util_func)
diff --git a/tests/test_ExpDesign.py b/tests/test_ExpDesign.py
index 42f87663c2d843c4fa3a23e047270673501dbd4c..68255b3380881a8182ecd5a3de7411842fcedebd 100644
--- a/tests/test_ExpDesign.py
+++ b/tests/test_ExpDesign.py
@@ -131,6 +131,47 @@ def test_random_sampler() -> None:
     exp = ExpDesigns(inp)
     exp.random_sampler(4)
     
+def test_random_sampler_largedatanoJDist() -> None:
+    """
+    Sample randomly, init_param_space implicitly, more samples wanted than given, no JDist available
+    """
+    x = np.random.uniform(0,1,1000)
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].input_data = x
+    exp = ExpDesigns(inp)
+    with pytest.raises(AttributeError) as excinfo:
+        exp.random_sampler(100000) 
+    assert str(excinfo.value) == 'Sampling cannot proceed, build ExpDesign with max_deg != 0 to create JDist!'
+    
+def test_random_sampler_largedataJDist0() -> None:
+    """
+    Sample randomly, init_param_space implicitly, more samples wanted than given, 
+    JDist available, priors given via samples
+    """
+    x = np.random.uniform(0,1,1000)
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].input_data = x
+    exp = ExpDesigns(inp)
+    exp.init_param_space(max_deg = 1)
+    exp.random_sampler(100000) 
+    
+def test_random_sampler_largedataJDist1() -> None:
+    """
+    Sample randomly, init_param_space implicitly, more samples wanted than given, 
+    JDist available, prior distributions given
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0,1]
+    exp = ExpDesigns(inp)
+    exp.init_param_space(max_deg = 1)
+    exp.random_sampler(100000) 
+     
+        
+        
 def test_random_sampler_rawdata() -> None:
     """
     Sample randomly, init_param_space implicitly, has 2d raw data
@@ -338,7 +379,7 @@ def test_read_from_file_wrongcomp():
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     exp = ExpDesigns(inp, sampling_method = 'user')
-    exp.hdf5_file = 'tests/ExpDesign_testfile.hdf5'
+    exp.hdf5_file = 'ExpDesign_testfile.hdf5'
     with pytest.raises(KeyError) as excinfo:
         exp.read_from_file(['Out'])
     assert str(excinfo.value) == "'Unable to open object (component not found)'"
@@ -352,13 +393,5 @@ def test_read_from_file():
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     exp = ExpDesigns(inp, sampling_method = 'user')
-    exp.hdf5_file = 'tests/ExpDesign_testfile.hdf5'
+    exp.hdf5_file = 'ExpDesign_testfile.hdf5'
     exp.read_from_file(['Z'])
-    
-if __name__ == '__main__':
-    x = np.random.uniform(0,1,1000)
-    inp = Input()
-    inp.add_marginals()
-    inp.Marginals[0].input_data = x
-    exp = ExpDesigns(inp, sampling_method = 'PCM')
-    exp.generate_ED(4)
\ No newline at end of file
diff --git a/tests/test_Input.py b/tests/test_Input.py
index 84b9b239cb1646ccc2a946fb7a76831f17d38c1f..41c0e5ab84a5178b1934504bc63b5d5c03d19078 100644
--- a/tests/test_Input.py
+++ b/tests/test_Input.py
@@ -9,10 +9,10 @@ Class Input:
 @author: Rebecca Kohlhaas
 """
 import sys
-sys.path.append("src/")
-import pytest
 
-from bayesvalidrox.surrogate_models.inputs import Marginal, Input
+from bayesvalidrox.surrogate_models.inputs import Input
+
+sys.path.append("src/")
 
 
 def test_addmarginals() -> None:
diff --git a/tests/test_InputSpace.py b/tests/test_InputSpace.py
index 1b5a28fa3eb4b1ad11c8a666a9e98e2b0dbaa8b9..ae31f8e90d051e39dd67c2e55f900a3eb0e11958 100644
--- a/tests/test_InputSpace.py
+++ b/tests/test_InputSpace.py
@@ -10,13 +10,16 @@ Class InputSpace:
 
 """
 import sys
-sys.path.append("src/")
 import pytest
 import numpy as np
 
 from bayesvalidrox.surrogate_models.inputs import Input
 from bayesvalidrox.surrogate_models.input_space import InputSpace
 
+sys.path.append("src/")
+sys.path.append("../src/")
+
+
 #%% Test ExpDesign.check_valid_input
 
 def test_check_valid_input_hasmarg() -> None:
@@ -28,6 +31,7 @@ def test_check_valid_input_hasmarg() -> None:
         InputSpace(inp)
     assert str(excinfo.value) == 'Cannot build distributions if no marginals are given'
 
+
 def test_check_valid_input_haspriors() -> None:
     """
     Distribution not built if no distribution set for the marginals
@@ -36,45 +40,43 @@ def test_check_valid_input_haspriors() -> None:
     inp.add_marginals()
     with pytest.raises(AssertionError) as excinfo:
         InputSpace(inp)
-    assert str(excinfo.value) ==  'Not all marginals were provided priors'
-    
+    assert str(excinfo.value) == 'Not all marginals were provided priors'
+
+
 def test_check_valid_input_priorsmatch() -> None:
     """
     Distribution not built if dist types do not align
     """
-    x = np.random.uniform(0,1,1000)
+    x = np.random.uniform(0, 1, 1000)
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     inp.add_marginals()
     inp.Marginals[1].dist_type = 'normal'
-    inp.Marginals[1].parameters = [0,1]
+    inp.Marginals[1].parameters = [0, 1]
     with pytest.raises(AssertionError) as excinfo:
         InputSpace(inp)
     assert str(excinfo.value) == 'Distributions cannot be built as the priors have different types'
 
+
 def test_check_valid_input_samples() -> None:
     """
     Design built correctly - samples
     """
-    x = np.random.uniform(0,1,1000)
+    x = np.random.uniform(0, 1, 1000)
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     inp.add_marginals()
-    inp.Marginals[1].input_data = x+2
-    try:
-        InputSpace(inp)
-    except AssertionError:
-        pytest.fail("ExpDesign raised AssertionError unexpectedly!")
-    # TODO: check for better options to assert that no error at all occurred
-    
+    inp.Marginals[1].input_data = x + 2
+    InputSpace(inp)
+
 
 def test_check_valid_input_both() -> None:
     """
     Design no built - samples and dist type given
     """
-    x = np.random.uniform(0,1,1000)
+    x = np.random.uniform(0, 1, 1000)
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
@@ -83,7 +85,8 @@ def test_check_valid_input_both() -> None:
         InputSpace(inp)
     assert str(excinfo.value) == 'Both samples and distribution type are given. Please choose only one.'
 
-#def test_check_valid_input_distnotok() -> None:
+
+# def test_check_valid_input_distnotok() -> None:
 #    """
 #    Design built incorrectly - dist types without parameters
 #    """
@@ -95,7 +98,7 @@ def test_check_valid_input_both() -> None:
 #    with pytest.raises(AssertionError) as excinfo:
 #        exp = ExpDesigns(inp)
 #    assert str(excinfo.value) == 'Some distributions do not have characteristic values'
-    
+
 def test_check_valid_input_distok() -> None:
     """
     Design built correctly - dist types
@@ -103,16 +106,13 @@ def test_check_valid_input_distok() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     inp.add_marginals()
     inp.Marginals[1].dist_type = 'normal'
-    inp.Marginals[1].parameters = [0,1]
-    try:
-        InputSpace(inp)
-    except AssertionError:
-        pytest.fail("ExpDesign raised AssertionError unexpectedly!")
-    # TODO: check for better options to assert that no error at all occurred
-    
+    inp.Marginals[1].parameters = [0, 1]
+    InputSpace(inp)
+
+
 def test_check_valid_input_noapc() -> None:
     """
     Design built correctly - no apc
@@ -120,11 +120,12 @@ def test_check_valid_input_noapc() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     inp.add_marginals()
     inp.Marginals[1].dist_type = 'normal'
-    inp.Marginals[1].parameters = [0,1]
-    InputSpace(inp, meta_Model_type = 'gpe')
+    inp.Marginals[1].parameters = [0, 1]
+    InputSpace(inp, meta_Model_type='gpe')
+
 
 #%% Test ExpDesign.build_polytypes
 def test_build_polytypes_normalerr() -> None:
@@ -140,6 +141,7 @@ def test_build_polytypes_normalerr() -> None:
         exp.build_polytypes(False)
     assert str(excinfo.value) == 'Distribution has too few parameters!'
 
+
 def test_build_polytypes_normal() -> None:
     """
     Build dist 'normal'
@@ -147,11 +149,11 @@ def test_build_polytypes_normal() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     exp = InputSpace(inp)
     exp.build_polytypes(False)
-    
-    
+
+
 def test_build_polytypes_uniferr() -> None:
     """
     Build dist 'unif' - too few params
@@ -165,6 +167,7 @@ def test_build_polytypes_uniferr() -> None:
         exp.build_polytypes(False)
     assert str(excinfo.value) == 'Distribution has too few parameters!'
 
+
 def test_build_polytypes_unif() -> None:
     """
     Build dist 'unif'
@@ -172,10 +175,11 @@ def test_build_polytypes_unif() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'unif'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     exp = InputSpace(inp)
     exp.build_polytypes(False)
-    
+
+
 def test_build_polytypes_gammaerr() -> None:
     """
     Build dist 'gamma' - too few params
@@ -189,6 +193,8 @@ def test_build_polytypes_gammaerr() -> None:
         exp.build_polytypes(False)
     assert str(excinfo.value) == 'Distribution has too few parameters!'
 
+
+# noinspection SpellCheckingInspection
 def test_build_polytypes_gamma() -> None:
     """
     Build dist 'gamma'
@@ -196,12 +202,14 @@ def test_build_polytypes_gamma() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'gamma'
-    inp.Marginals[0].parameters = [0,1,0]
+    inp.Marginals[0].parameters = [0, 1, 0]
     exp = InputSpace(inp)
     with pytest.raises(ValueError) as excinfo:
         exp.build_polytypes(False)
     assert str(excinfo.value) == 'Parameter values are not valid, please set differently'
-    
+
+
+# noinspection SpellCheckingInspection
 def test_build_polytypes_betaerr() -> None:
     """
     Build dist 'beta' - too few params
@@ -215,6 +223,7 @@ def test_build_polytypes_betaerr() -> None:
         exp.build_polytypes(False)
     assert str(excinfo.value) == 'Distribution has too few parameters!'
 
+
 def test_build_polytypes_beta() -> None:
     """
     Build dist 'beta'
@@ -222,11 +231,12 @@ def test_build_polytypes_beta() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'beta'
-    inp.Marginals[0].parameters = [0.5,1,2,3]
+    inp.Marginals[0].parameters = [0.5, 1, 2, 3]
     exp = InputSpace(inp)
     exp.build_polytypes(False)
-    
-        
+
+
+# noinspection SpellCheckingInspection
 def test_build_polytypes_lognormerr() -> None:
     """
     Build dist 'lognorm' - too few params
@@ -240,6 +250,7 @@ def test_build_polytypes_lognormerr() -> None:
         exp.build_polytypes(False)
     assert str(excinfo.value) == 'Distribution has too few parameters!'
 
+
 def test_build_polytypes_lognorm() -> None:
     """
     Build dist 'lognorm'
@@ -247,11 +258,11 @@ def test_build_polytypes_lognorm() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'lognorm'
-    inp.Marginals[0].parameters = [0.5,1,2,3]
+    inp.Marginals[0].parameters = [0.5, 1, 2, 3]
     exp = InputSpace(inp)
     exp.build_polytypes(False)
-    
-        
+
+
 def test_build_polytypes_exponerr() -> None:
     """
     Build dist 'expon' - too few params
@@ -265,6 +276,7 @@ def test_build_polytypes_exponerr() -> None:
         exp.build_polytypes(False)
     assert str(excinfo.value) == 'Distribution has too few parameters!'
 
+
 def test_build_polytypes_expon() -> None:
     """
     Build dist 'expon'
@@ -272,11 +284,11 @@ def test_build_polytypes_expon() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'expon'
-    inp.Marginals[0].parameters = [0.5,1,2,3]
+    inp.Marginals[0].parameters = [0.5, 1, 2, 3]
     exp = InputSpace(inp)
     exp.build_polytypes(False)
-    
-        
+
+
 def test_build_polytypes_weibullerr() -> None:
     """
     Build dist 'weibull' - too few params
@@ -290,6 +302,7 @@ def test_build_polytypes_weibullerr() -> None:
         exp.build_polytypes(False)
     assert str(excinfo.value) == 'Distribution has too few parameters!'
 
+
 def test_build_polytypes_weibull() -> None:
     """
     Build dist 'weibull'
@@ -297,50 +310,52 @@ def test_build_polytypes_weibull() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'weibull'
-    inp.Marginals[0].parameters = [0.5,1,2,3]
+    inp.Marginals[0].parameters = [0.5, 1, 2, 3]
     exp = InputSpace(inp)
     exp.build_polytypes(False)
-    
+
 
 def test_build_polytypes_arbitrary() -> None:
     """
     Build poly 'arbitrary'
     """
-    x = np.random.uniform(0,1,1000)
+    x = np.random.uniform(0, 1, 1000)
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     exp = InputSpace(inp)
     exp.build_polytypes(False)
-   
+
+
 def test_build_polytypes_rosenblatt() -> None:
     """
     Build dist with rosenblatt
     """
-    x = np.random.uniform(0,1,1000)
+    x = np.random.uniform(0, 1, 1000)
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     exp = InputSpace(inp)
     exp.build_polytypes(True)
-    
+
+
 def test_build_polytypes_samples() -> None:
     """
     Build dist from samples
     """
-    x = np.random.uniform(0,1,1000)
+    x = np.random.uniform(0, 1, 1000)
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     exp = InputSpace(inp)
     exp.build_polytypes(False)
-    
-    
+
+
 def test_build_polytypes_samples2d() -> None:
     """
     Build dist from samples - samples too high dim
     """
-    x = np.random.uniform(0,1,(2,1000))
+    x = np.random.uniform(0, 1, (2, 1000))
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
@@ -348,21 +363,22 @@ def test_build_polytypes_samples2d() -> None:
     with pytest.raises(ValueError) as excinfo:
         exp.build_polytypes(False)
     assert str(excinfo.value) == 'The samples provided to the Marginals should be 1D only'
-    
-    
+
+
 #%% Test ExpDesign.init_param_space
 
 def test_init_param_space_nomaxdegsample() -> None:
     """
     Init param space without max_deg for given samples
     """
-    x = np.random.uniform(0,1,1000)
+    x = np.random.uniform(0, 1, 1000)
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     exp = InputSpace(inp)
     exp.init_param_space()
 
+
 def test_init_param_space_nomaxdegdist() -> None:
     """
     Init param space without max_deg for given dist
@@ -370,21 +386,23 @@ def test_init_param_space_nomaxdegdist() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'expon'
-    inp.Marginals[0].parameters = [0.5,1,2,3]
+    inp.Marginals[0].parameters = [0.5, 1, 2, 3]
     exp = InputSpace(inp)
     exp.init_param_space()
-     
+
+
 def test_init_param_space_maxdeg() -> None:
     """
     Init param space with max_deg for given samples
     """
-    x = np.random.uniform(0,1,1000)
+    x = np.random.uniform(0, 1, 1000)
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
-    
+
+
 def test_init_param_space_maxdegdist() -> None:
     """
     Init param space with max_deg for given dist (not uniform)
@@ -392,10 +410,11 @@ def test_init_param_space_maxdegdist() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'expon'
-    inp.Marginals[0].parameters = [0.5,1,2,3]
+    inp.Marginals[0].parameters = [0.5, 1, 2, 3]
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
-     
+
+
 def test_init_param_space_maxdegdistunif() -> None:
     """
     Init param space with max_deg for given dist (uniform)
@@ -403,20 +422,19 @@ def test_init_param_space_maxdegdistunif() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'unif'
-    inp.Marginals[0].parameters = [0.5,1,2,3]
+    inp.Marginals[0].parameters = [0.5, 1, 2, 3]
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
-     
-    
-    
+
+
 #%% Test ExpDesign.transform
-    
+
 def test_transform_noparamspace() -> None:
     """
     Call transform without a built JDist
     """
-    x = np.random.uniform(0,1,1000)
-    y = np.random.uniform(0,1,(2,1000))
+    x = np.random.uniform(0, 1, 1000)
+    y = np.random.uniform(0, 1, (2, 1000))
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
@@ -424,12 +442,13 @@ def test_transform_noparamspace() -> None:
     with pytest.raises(AttributeError) as excinfo:
         exp.transform(y)
     assert str(excinfo.value) == 'Call function init_param_space first to create JDist'
-      
+
+
 def test_transform_dimerrlow() -> None:
     """
     Call transform with too few dimensions
     """
-    x = np.random.uniform(0,1,1000)
+    x = np.random.uniform(0, 1, 1000)
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
@@ -438,13 +457,14 @@ def test_transform_dimerrlow() -> None:
     with pytest.raises(AttributeError) as excinfo:
         exp.transform(x)
     assert str(excinfo.value) == 'X should have two dimensions'
-          
+
+
 def test_transform_dimerrhigh() -> None:
     """
     Call transform with too many dimensions
     """
-    x = np.random.uniform(0,1,1000)
-    y = np.random.uniform(0,1,(1,1,1000))
+    x = np.random.uniform(0, 1, 1000)
+    y = np.random.uniform(0, 1, (1, 1, 1000))
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
@@ -453,13 +473,14 @@ def test_transform_dimerrhigh() -> None:
     with pytest.raises(AttributeError) as excinfo:
         exp.transform(y)
     assert str(excinfo.value) == 'X should have two dimensions'
-    
+
+
 def test_transform_dimerr0() -> None:
     """
     Call transform with wrong X.shape[0]
     """
-    x = np.random.uniform(0,1,1000)
-    y = np.random.uniform(0,1,(2,1000))
+    x = np.random.uniform(0, 1, 1000)
+    y = np.random.uniform(0, 1, (2, 1000))
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
@@ -467,27 +488,30 @@ def test_transform_dimerr0() -> None:
     exp.init_param_space(max_deg=2)
     with pytest.raises(AttributeError) as excinfo:
         exp.transform(y)
-    assert str(excinfo.value) == 'The second dimension of X should be the same size as the number of marginals in the InputObj'
-   
+    assert str(
+        excinfo.value) == 'The second dimension of X should be the same size as the number of marginals in the InputObj'
+
+
 def test_transform_paramspace() -> None:
     """
     Transform successfully
     """
-    x = np.random.uniform(0,1,1000)
-    y = np.random.uniform(0,1,(1000,1))
+    x = np.random.uniform(0, 1, 1000)
+    y = np.random.uniform(0, 1, (1000, 1))
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
     exp.transform(y)
-  
+
+
 def test_transform_rosenblatt() -> None:
     """
     Transform with rosenblatt
     """
-    x = np.random.uniform(0,1,1000)
-    y = np.random.uniform(0,1,(1000,1))
+    x = np.random.uniform(0, 1, 1000)
+    y = np.random.uniform(0, 1, (1000, 1))
     inp = Input()
     inp.Rosenblatt = True
     inp.add_marginals()
@@ -495,86 +519,92 @@ def test_transform_rosenblatt() -> None:
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
     exp.transform(y)
-  
+
+
 def test_transform_user() -> None:
     """
     Transform with method 'user'
     """
-    x = np.random.uniform(0,1,1000)
-    y = np.random.uniform(0,1,(1000,1))
+    x = np.random.uniform(0, 1, 1000)
+    y = np.random.uniform(0, 1, (1000, 1))
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
-    exp.transform(y, method = 'user')
-  
+    exp.transform(y, method='user')
+
+
+# noinspection SpellCheckingInspection
 def test_transform_rosenblattuser() -> None:
     """
     Transform with rosenblatt and method 'user'
     """
-    x = np.random.uniform(0,1,1000)
-    y = np.random.uniform(0,1,(1000,1))
+    x = np.random.uniform(0, 1, 1000)
+    y = np.random.uniform(0, 1, (1000, 1))
     inp = Input()
     inp.Rosenblatt = True
     inp.add_marginals()
     inp.Marginals[0].input_data = x
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
-    exp.transform(y, method = 'user')
-  
+    exp.transform(y, method='user')
+
+
 def test_transform_uniform() -> None:
     """
     Transform uniform dist
     """
-    y = np.random.uniform(0,1,(1000,1))
+    y = np.random.uniform(0, 1, (1000, 1))
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'unif'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
     exp.transform(y)
-  
+
+
 def test_transform_norm() -> None:
     """
     Transform normal dist
     """
-    y = np.random.uniform(0,1,(1000,1))
+    y = np.random.uniform(0, 1, (1000, 1))
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'norm'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
     exp.transform(y)
-  
+
+
 # TODO: what are these other params here???
 def test_transform_gammanoparam() -> None:
     """
     Transform gamma dist - no parameters
     """
-    y = np.random.uniform(0,1,(1000,1))
+    y = np.random.uniform(0, 1, (1000, 1))
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'gamma'
-    inp.Marginals[0].parameters = [1,1,0]
+    inp.Marginals[0].parameters = [1, 1, 0]
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
     with pytest.raises(AttributeError) as excinfo:
         exp.transform(y)
     assert str(excinfo.value) == 'Additional parameters have to be set for the gamma distribution!'
-  
+
+
 def test_transform_gammaparam() -> None:
     """
     Transform gamma dist - with parameters
     """
-    y = np.random.uniform(0,1,(1000,1))
+    y = np.random.uniform(0, 1, (1000, 1))
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'gamma'
-    inp.Marginals[0].parameters = [1,1,0]
+    inp.Marginals[0].parameters = [1, 1, 0]
     exp = InputSpace(inp)
     exp.init_param_space(max_deg=2)
-    exp.transform(y, params = [1,1])
-  
\ No newline at end of file
+    exp.transform(y, params=[1, 1])
diff --git a/tests/test_MCMC.py b/tests/test_MCMC.py
new file mode 100644
index 0000000000000000000000000000000000000000..3485a615b34150f63f8c8f428167453393531c09
--- /dev/null
+++ b/tests/test_MCMC.py
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+"""
+Test the MCM class of bayesvalidrox
+Tests are available for the following functions
+_check_ranges           - x
+gelmain_rubin
+_iterative_scheme
+_my_ESS                 - x
+Class MCMC: 
+    run_sampler
+    log_prior
+    log_likelihood
+    log_posterior
+    eval_model
+    train_error_model
+    marginal_llk_emcee
+"""
+import sys
+import pandas as pd
+import numpy as np
+
+from bayesvalidrox.surrogate_models.inputs import Input
+from bayesvalidrox.surrogate_models.exp_designs import ExpDesigns
+from bayesvalidrox.surrogate_models.surrogate_models import MetaModel
+from bayesvalidrox.pylink.pylink import PyLinkForwardModel as PL
+from bayesvalidrox.surrogate_models.engine import Engine
+from bayesvalidrox.bayes_inference.discrepancy import Discrepancy
+from bayesvalidrox.bayes_inference.mcmc import MCMC
+from bayesvalidrox.bayes_inference.bayes_inference import BayesInference
+from bayesvalidrox.bayes_inference.mcmc import _check_ranges, gelman_rubin
+
+sys.path.append("src/")
+sys.path.append("../src/")
+
+
+#%% Test MCMC init
+
+def test_MCMC() -> None:
+    """
+    Construct an MCMC object
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.x_values = np.array([0])
+
+    mm = MetaModel(inp)
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}
+    mod.Output.names = ['Z']
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    disc = Discrepancy('')
+    disc.type = 'Gaussian'
+    disc.parameters = (obsData * 0.15) ** 2
+    disc.opt_sigma = 'B'
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = disc
+    bi.inference_method = 'mcmc'
+    bi.setup_inference()
+    MCMC(bi)
+
+
+#%% Test run_sampler
+
+def test_run_sampler() -> None:
+    """
+    Run short MCMC
+
+    Returns
+    -------
+    None
+        DESCRIPTION.
+
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    expdes.x_values = np.array([0])
+
+    mm = MetaModel(inp)
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}
+    mod.Output.names = ['Z']
+    engine = Engine(mm, mod, expdes)
+
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    disc = Discrepancy('')
+    disc.type = 'Gaussian'
+    disc.parameters = (obsData * 0.15) ** 2
+    disc.opt_sigma = 'B'
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = disc
+    bi.inference_method = 'mcmc'
+    bi.setup_inference()
+    total_sigma2s = {'Z': np.array([0.15])}
+    mcmc = MCMC(bi)
+    mcmc.nburn = 10
+    mcmc.nsteps = 50
+    mcmc.run_sampler(mod.observations, total_sigma2s)
+
+
+#%% Test log_prior
+
+#%% Test log_likelihood
+
+#%% Test log_posterior
+
+#%% Test eval_model
+
+#%% Test train_error_model
+
+#%% Test gelmain_rubin
+
+def test_gelman_rubin() -> None:
+    """
+    Calculate gelman-rubin
+    """
+    chain = [[[1], [2]]]
+    gelman_rubin(chain)
+
+
+def test_gelman_rubin_returnvar() -> None:
+    """
+    Calculate gelman-rubin returning var
+    """
+    chain = [[[1], [2]]]
+    gelman_rubin(chain, return_var=True)
+
+
+#%% Test marginal_llk_emcee
+
+#%% Test _check_ranges
+
+def test_check_ranges() -> None:
+    """
+    Check to see if theta lies in expected ranges
+    """
+    theta = [0.5, 1.2]
+    ranges = [[0, 1], [1, 2]]
+    assert _check_ranges(theta, ranges) is True
+
+
+def test_check_ranges_inv() -> None:
+    """
+    Check to see if theta lies not in expected ranges
+    """
+    theta = [1.5, 1.2]
+    ranges = [[0, 1], [1, 2]]
+    assert _check_ranges(theta, ranges) is False
+
+
+#%% Main
+
+if __name__ == '__main__':
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    expdes = ExpDesigns(inp)
+    expdes.n_init_samples = 2
+    expdes.n_max_samples = 4
+    expdes.X = np.array([[0], [1], [0.5]])
+    expdes.Y = {'Z': [[0.4], [0.5], [0.45]]}
+    # expdes.x_values = np.array([0]) #  Error in plots if this is not available
+
+    mm = MetaModel(inp)
+    mm.fit(expdes.X, expdes.Y)
+    expdes.generate_ED(expdes.n_init_samples, max_pce_deg=np.max(mm.pce_deg))
+
+    mod = PL()
+    mod.observations = {'Z': np.array([0.45]), 'x_values': np.array([0])}
+    mod.Output.names = ['Z']
+
+    engine = Engine(mm, mod, expdes)
+
+    sigma2Dict = {'Z': np.array([0.05])}
+    sigma2Dict = pd.DataFrame(sigma2Dict, columns=['Z'])
+    obsData = pd.DataFrame(mod.observations, columns=mod.Output.names)
+    disc = Discrepancy('')
+    disc.type = 'Gaussian'
+    disc.parameters = (obsData * 0.15) ** 2
+    disc.opt_sigma = 'B'
+
+    bi = BayesInference(engine)
+    bi.Discrepancy = disc
+    bi.inference_method = 'mcmc'
+    bi.setup_inference()
+
+    # chain = [[[1],[2]]]
+    total_sigma2s = {'Z': np.array([0.15])}
+    mcmc = MCMC(bi)
+    mcmc.nsteps = 50
+    mcmc.nburn = 10
+    mcmc.run_sampler(mod.observations, total_sigma2s)
+    # mcmc.gelmain_rubin(chain)
+
+    chain = [[[1], [2]]]
+    gelman_rubin(chain, return_var=True)
diff --git a/tests/test_MetaModel.py b/tests/test_MetaModel.py
index b5095fa3494619b32af5d3ff81991e123adceab9..a3f9b19d3fb42c3e6ec3f34b3efb19298c8de545 100644
--- a/tests/test_MetaModel.py
+++ b/tests/test_MetaModel.py
@@ -26,13 +26,16 @@ Class MetaModel:
     
 """
 import numpy as np
-import sys
-sys.path.append("src/")
 import pytest
+import sys
 
 from bayesvalidrox.surrogate_models.inputs import Input
 from bayesvalidrox.surrogate_models.input_space import InputSpace
-from bayesvalidrox.surrogate_models.surrogate_models import MetaModel
+from bayesvalidrox.surrogate_models.surrogate_models import MetaModel, corr_loocv_error, create_psi
+from bayesvalidrox.surrogate_models.surrogate_models import gaussian_process_emulator
+
+sys.path.append("src/")
+
 
 #%% Test MetaMod constructor on its own
 
@@ -43,9 +46,10 @@ def test_metamod() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     MetaModel(inp)
-    
+
+
 #%% Test MetaModel.build_metamodel
 
 def test_build_metamodel_nosamples() -> None:
@@ -55,12 +59,12 @@ def test_build_metamodel_nosamples() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     with pytest.raises(AttributeError) as excinfo:
         mm.build_metamodel()
     assert str(excinfo.value) == 'Please provide samples to the metamodel before building it.'
-    
+
 
 def test_build_metamodel() -> None:
     """
@@ -69,11 +73,12 @@ def test_build_metamodel() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    mm.CollocationPoints = [[0.2],[0.8]]
+    mm.CollocationPoints = np.array([[0.2], [0.8]])
     mm.build_metamodel()
-    
+
+
 def test_build_metamodel_ninitsamples() -> None:
     """
     Build MetaModel with n_init_samples
@@ -81,11 +86,12 @@ def test_build_metamodel_ninitsamples() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    mm.CollocationPoints = [[0.2],[0.8]]
-    mm.build_metamodel(n_init_samples = 2)
-    
+    mm.CollocationPoints = np.array([[0.2], [0.8]])
+    mm.build_metamodel(n_init_samples=2)
+
+
 def test_build_metamodel_gpe() -> None:
     """
     Build MetaModel gpe
@@ -93,13 +99,13 @@ def test_build_metamodel_gpe() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.meta_model_type = 'gpe'
-    mm.CollocationPoints = [[0.2],[0.8]]
+    mm.CollocationPoints = np.array([[0.2], [0.8]])
     mm.build_metamodel()
-    
-    
+
+
 def test_build_metamodel_coldimerr() -> None:
     """
     Build MetaModel with wrong shape collocation samples
@@ -107,29 +113,31 @@ def test_build_metamodel_coldimerr() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    mm.CollocationPoints = [[0.2,0.8]]
+    mm.CollocationPoints = [[0.2, 0.8]]
     with pytest.raises(AttributeError) as excinfo:
         mm.build_metamodel()
-    assert str(excinfo.value) == 'The given samples do not match the given number of priors. The samples should be a 2D array of size (#samples, #priors)'
+    assert str(
+        excinfo.value) == 'The second dimension of X should be the same size as the number of marginals in the InputObj'
 
 
 #%% Test MetaMod.generate_polynomials
 
-def test_generate_polynomials_noExp() -> None:
+def test_generate_polynomials_noexp() -> None:
     """
     Generate polynomials without ExpDeg
     """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     with pytest.raises(AttributeError) as excinfo:
         mm.generate_polynomials()
     assert str(excinfo.value) == 'Generate or add InputSpace before generating polynomials'
-    
+
+
 def test_generate_polynomials_nodeg() -> None:
     """
     Generate polynomials without max_deg
@@ -137,21 +145,21 @@ def test_generate_polynomials_nodeg() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    
+
     # Setup
     mm.InputSpace = InputSpace(inp)
     mm.InputSpace.n_init_samples = 2
     mm.InputSpace.init_param_space(np.max(mm.pce_deg))
     mm.ndim = mm.InputSpace.ndim
     mm.n_params = len(mm.input_obj.Marginals)
-        
+
     # Generate
     with pytest.raises(AttributeError) as excinfo:
         mm.generate_polynomials()
     assert str(excinfo.value) == 'MetaModel cannot generate polynomials in the given scenario!'
-    
+
 
 def test_generate_polynomials_deg() -> None:
     """
@@ -160,96 +168,100 @@ def test_generate_polynomials_deg() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    
+
     # Setup
     mm.InputSpace = InputSpace(inp)
     mm.InputSpace.n_init_samples = 2
     mm.InputSpace.init_param_space(np.max(mm.pce_deg))
     mm.ndim = mm.InputSpace.ndim
     mm.n_params = len(mm.input_obj.Marginals)
-        
+
     # Generate
     mm.generate_polynomials(4)
-    
-    
+
+
 #%% Test MetaMod.add_InputSpace
 
-def test_add_InputSpace() -> None:
+def test_add_inputspace() -> None:
     """
     Add InputSpace in MetaModel
     """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.add_InputSpace()
-    
+
+
 #%% Test MetaModel.fit
 # Faster without these
-if 0:
-    def test_fit() -> None:
-        """
-        Fit MetaModel 
-        """
-        inp = Input()
-        inp.add_marginals()
-        inp.Marginals[0].dist_type = 'normal'
-        inp.Marginals[0].parameters = [0,1]
-        mm = MetaModel(inp)
-        mm.fit( [[0.2],[0.8]], {'Z':[[0.4],[0.5]]})
-        
-    def test_fit_parallel() -> None:
-        """
-        Fit MetaModel in parallel
-        """
-        inp = Input()
-        inp.add_marginals()
-        inp.Marginals[0].dist_type = 'normal'
-        inp.Marginals[0].parameters = [0,1]
-        mm = MetaModel(inp)
-        mm.fit( [[0.2],[0.8]], {'Z':[[0.4],[0.5]]}, parallel = True)
-        
-    def test_fit_verbose() -> None:
-        """
-        Fit MetaModel verbose
-        """
-        inp = Input()
-        inp.add_marginals()
-        inp.Marginals[0].dist_type = 'normal'
-        inp.Marginals[0].parameters = [0,1]
-        mm = MetaModel(inp)
-        mm.fit( [[0.2],[0.8]], {'Z':[[0.4],[0.5]]}, verbose = True)
-        
-            
-    def test_fit_pca() -> None:
-        """
-        Fit MetaModel verbose and with pca
-        """
-        inp = Input()
-        inp.add_marginals()
-        inp.Marginals[0].dist_type = 'normal'
-        inp.Marginals[0].parameters = [0,1]
-        mm = MetaModel(inp)
-        mm.dim_red_method = 'pca'
-        mm.fit( [[0.2],[0.8]], {'Z':[[0.4,0.4],[0.5,0.6]]}, verbose = True)
-        
-    def test_fit_gpe() -> None:
-        """
-        Fit MetaModel 
-        """
-        inp = Input()
-        inp.add_marginals()
-        inp.Marginals[0].dist_type = 'normal'
-        inp.Marginals[0].parameters = [0,1]
-        mm = MetaModel(inp)
-        mm.meta_model_type = 'gpe'
-        mm.fit( [[0.2],[0.8]], {'Z':[[0.4],[0.5]]})
-        
+def test_fit() -> None:
+    """
+    Fit MetaModel
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mm = MetaModel(inp)
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4], [0.5]]})
+
+
+def test_fit_parallel() -> None:
+    """
+    Fit MetaModel in parallel
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mm = MetaModel(inp)
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4], [0.5]]}, parallel=True)
+
+
+def test_fit_verbose() -> None:
+    """
+    Fit MetaModel verbose
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mm = MetaModel(inp)
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4], [0.5]]}, verbose=True)
+
+
+def test_fit_pca() -> None:
+    """
+    Fit MetaModel verbose and with pca
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mm = MetaModel(inp)
+    mm.dim_red_method = 'pca'
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4, 0.4], [0.5, 0.6]]}, verbose=True)
+
+
+def test_fit_gpe() -> None:
+    """
+    Fit MetaModel
+    """
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+    mm = MetaModel(inp)
+    mm.meta_model_type = 'gpe'
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4], [0.5]]})
+
+
 #%% Test MetaModel.create_psi
-        
+
 def test_create_psi() -> None:
     """
     Create psi-matrix
@@ -257,16 +269,16 @@ def test_create_psi() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.2],[0.8]])
+    samples = np.array([[0.2], [0.8]])
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    mm.create_psi(BasisIndices, univ_bas)
-    
-    
+    create_psi(BasisIndices, univ_bas)
+
+
 #%% Test MetaModel.regression
 
 def test_regression() -> None:
@@ -276,19 +288,20 @@ def test_regression() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
     mm.regression(samples, outputs, psi)
-    
+
+
 def test_regression_ols() -> None:
     """
     Regression: ols
@@ -296,19 +309,20 @@ def test_regression_ols() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'ols')
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='ols')
+
+
 def test_regression_olssparse() -> None:
     """
     Regression: ols and sparse
@@ -316,19 +330,20 @@ def test_regression_olssparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'ols', sparsity = True)
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='ols', sparsity=True)
+
+
 def test_regression_ard() -> None:
     """
     Regression: ard
@@ -336,19 +351,20 @@ def test_regression_ard() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.2],[0.8]])
-    outputs = np.array([0.4,0.5])
-    
+    samples = np.array([[0.2], [0.8]])
+    outputs = np.array([0.4, 0.5])
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'ard')
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='ard')
+
+
 def test_regression_ardssparse() -> None:
     """
     Regression: ard and sparse
@@ -356,19 +372,20 @@ def test_regression_ardssparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.2],[0.8]])
-    outputs = np.array([0.4,0.5])
-    
+    samples = np.array([[0.2], [0.8]])
+    outputs = np.array([0.4, 0.5])
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'ard', sparsity = True)
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='ard', sparsity=True)
+
+
 def test_regression_fastard() -> None:
     """
     Regression: fastard
@@ -376,19 +393,20 @@ def test_regression_fastard() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'fastard')
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='fastard')
+
+
 def test_regression_fastardssparse() -> None:
     """
     Regression: fastard and sparse
@@ -396,19 +414,20 @@ def test_regression_fastardssparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'fastard', sparsity = True)
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='fastard', sparsity=True)
+
+
 def test_regression_brr() -> None:
     """
     Regression: brr
@@ -416,19 +435,20 @@ def test_regression_brr() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'brr')
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='brr')
+
+
 def test_regression_brrssparse() -> None:
     """
     Regression: brr and sparse
@@ -436,19 +456,20 @@ def test_regression_brrssparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'brr', sparsity = True)
-        
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='brr', sparsity=True)
+
+
 def test_regression_bcs() -> None:
     """
     Regression: bcs
@@ -456,19 +477,20 @@ def test_regression_bcs() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.0],[0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],[0.8],[0.9]])
-    outputs = np.array([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])
+    samples = np.array([[0.0], [0.1], [0.2], [0.3], [0.4], [0.5], [0.6], [0.7], [0.8], [0.9]])
+    outputs = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
     mm.pce_deg = 3
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(mm.pce_deg)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'bcs')
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='bcs')
+
+
 def test_regression_bcsssparse() -> None:
     """
     Regression: bcs and sparse
@@ -476,20 +498,20 @@ def test_regression_bcsssparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.0],[0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],[0.8],[0.9],[1.0]])
-    outputs = np.array([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1])
-    
+    samples = np.array([[0.0], [0.1], [0.2], [0.3], [0.4], [0.5], [0.6], [0.7], [0.8], [0.9], [1.0]])
+    outputs = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.1])
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'bcs', sparsity = True)
-    
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='bcs', sparsity=True)
+
+
 def test_regression_lars() -> None:
     """
     Regression: lars
@@ -497,19 +519,20 @@ def test_regression_lars() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.0],[0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],[0.8],[0.9],[1.0]])
-    outputs = np.array([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1])
-    
+    samples = np.array([[0.0], [0.1], [0.2], [0.3], [0.4], [0.5], [0.6], [0.7], [0.8], [0.9], [1.0]])
+    outputs = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.1])
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'lars')
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='lars')
+
+
 def test_regression_larsssparse() -> None:
     """
     Regression: lars and sparse
@@ -517,19 +540,20 @@ def test_regression_larsssparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.0],[0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],[0.8],[0.9],[1.0]])
-    outputs = np.array([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1])
-    
+    samples = np.array([[0.0], [0.1], [0.2], [0.3], [0.4], [0.5], [0.6], [0.7], [0.8], [0.9], [1.0]])
+    outputs = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.1])
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'lars', sparsity = True)
-        
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='lars', sparsity=True)
+
+
 def test_regression_sgdr() -> None:
     """
     Regression: sgdr
@@ -537,19 +561,20 @@ def test_regression_sgdr() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'sgdr')
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='sgdr')
+
+
 def test_regression_sgdrssparse() -> None:
     """
     Regression: sgdr and sparse
@@ -557,20 +582,20 @@ def test_regression_sgdrssparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'sgdr', sparsity = True)
-        
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='sgdr', sparsity=True)
+
+
 def test_regression_omp() -> None:
     """
     Regression: omp
@@ -578,19 +603,20 @@ def test_regression_omp() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'omp')
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='omp')
+
+
 def test_regression_ompssparse() -> None:
     """
     Regression: omp and sparse
@@ -598,20 +624,20 @@ def test_regression_ompssparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'omp', sparsity = True)
-        
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='omp', sparsity=True)
+
+
 def test_regression_vbl() -> None:
     """
     Regression: vbl
@@ -619,19 +645,20 @@ def test_regression_vbl() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'vbl')
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='vbl')
+
+
 def test_regression_vblssparse() -> None:
     """
     Regression: vbl and sparse
@@ -639,19 +666,20 @@ def test_regression_vblssparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'vbl', sparsity = True)
-        
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='vbl', sparsity=True)
+
+
 def test_regression_ebl() -> None:
     """
     Regression: ebl
@@ -659,19 +687,20 @@ def test_regression_ebl() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'ebl')
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='ebl')
+
+
 def test_regression_eblssparse() -> None:
     """
     Regression: ebl and sparse
@@ -679,21 +708,20 @@ def test_regression_eblssparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.5])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi, reg_method = 'ebl', sparsity = True)
-        
-    
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    mm.regression(samples, outputs, psi, reg_method='ebl', sparsity=True)
+
+
 #%% Test Model.update_pce_coeffs
 
 # TODO: very linked to the actual training...
@@ -707,15 +735,16 @@ def test_univ_basis_vals() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.2],[0.8]])
+    samples = np.array([[0.2], [0.8]])
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     mm.univ_basis_vals(samples)
-    
+
+
 #%% Test MetaModel.adaptive_regression
-     
+
 def test_adaptive_regression_fewsamples() -> None:
     """
     Adaptive regression, no specific method, too few samples given
@@ -723,22 +752,25 @@ def test_adaptive_regression_fewsamples() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.8])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
 
     # Evaluate the univariate polynomials on InputSpace
     if mm.meta_model_type.lower() != 'gpe':
-       mm.univ_p_val = mm.univ_basis_vals(mm.CollocationPoints)
-    
+        mm.univ_p_val = mm.univ_basis_vals(mm.CollocationPoints)
+
     with pytest.raises(AttributeError) as excinfo:
-        mm.adaptive_regression(samples, outputs, 0)
-    assert str(excinfo.value) == 'There are too few samples for the corrected loo-cv error. Fit surrogate on at least as many samples as parameters to use this'
-        
+        mm.adaptive_regression(outputs, 0)
+    assert str(
+        excinfo.value) == ('There are too few samples for the corrected loo-cv error. Fit surrogate on at least as '
+                           'many samples as parameters to use this')
+
+
 def test_adaptive_regression() -> None:
     """
     Adaptive regression, no specific method
@@ -746,19 +778,20 @@ def test_adaptive_regression() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.0],[0.1]])
-    outputs = np.array([0.0,0.1])
-    
+    samples = np.array([[0.0], [0.1]])
+    outputs = np.array([0.0, 0.1])
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
-    
+    mm.build_metamodel(n_init_samples=2)
+
     # Evaluate the univariate polynomials on InputSpace
     if mm.meta_model_type.lower() != 'gpe':
-       mm.univ_p_val = mm.univ_basis_vals(mm.CollocationPoints)
-    mm.adaptive_regression(samples, outputs, 0)
-            
+        mm.univ_p_val = mm.univ_basis_vals(mm.CollocationPoints)
+    mm.adaptive_regression(outputs, 0)
+
+
 def test_adaptive_regression_verbose() -> None:
     """
     Adaptive regression, no specific method, verbose output
@@ -766,19 +799,20 @@ def test_adaptive_regression_verbose() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.0],[0.1]])
-    outputs = np.array([0.0,0.1])
-    
+    samples = np.array([[0.0], [0.1]])
+    outputs = np.array([0.0, 0.1])
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
-    
+    mm.build_metamodel(n_init_samples=2)
+
     # Evaluate the univariate polynomials on InputSpace
     if mm.meta_model_type.lower() != 'gpe':
-       mm.univ_p_val = mm.univ_basis_vals(mm.CollocationPoints)
-    mm.adaptive_regression(samples, outputs, 0, True)
-        
+        mm.univ_p_val = mm.univ_basis_vals(mm.CollocationPoints)
+    mm.adaptive_regression(outputs, 0, True)
+
+
 def test_adaptive_regression_ols() -> None:
     """
     Adaptive regression, ols
@@ -786,21 +820,22 @@ def test_adaptive_regression_ols() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.0],[0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],[0.8],
-                        [0.9],[1.0]])
-    outputs = np.array([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1])
-    
+    samples = np.array([[0.0], [0.1], [0.2], [0.3], [0.4], [0.5], [0.6], [0.7], [0.8],
+                        [0.9], [1.0]])
+    outputs = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.1])
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
-    
+    mm.build_metamodel(n_init_samples=2)
+
     # Evaluate the univariate polynomials on InputSpace
     if mm.meta_model_type.lower() != 'gpe':
-       mm.univ_p_val = mm.univ_basis_vals(mm.CollocationPoints)
+        mm.univ_p_val = mm.univ_basis_vals(mm.CollocationPoints)
     mm.pce_reg_method = 'ols'
-    mm.adaptive_regression(samples, outputs, 0)
-    
+    mm.adaptive_regression(outputs, 0)
+
+
 #%% Test MetaModel.corr_loocv_error
 
 def test_corr_loocv_error_nosparse() -> None:
@@ -810,22 +845,23 @@ def test_corr_loocv_error_nosparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.0],[0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],
-                        [0.8],[0.9],[1.0]])
-    outputs = np.array([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1])
-    
+    samples = np.array([[0.0], [0.1], [0.2], [0.3], [0.4], [0.5], [0.6], [0.7],
+                        [0.8], [0.9], [1.0]])
+    outputs = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.1])
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    outs = mm.regression(samples, outputs, psi, reg_method = 'ebl')
-    mm.corr_loocv_error(outs['clf_poly'], outs['sparePsi'], outs['coeffs'],
-                                          outputs)
-        
+    psi = create_psi(BasisIndices, univ_bas)
+
+    outs = mm.regression(samples, outputs, psi, reg_method='ebl')
+    corr_loocv_error(outs['clf_poly'], outs['sparePsi'], outs['coeffs'],
+                     outputs)
+
+
 def test_corr_loocv_error_singley() -> None:
     """
     Corrected loocv error
@@ -833,21 +869,22 @@ def test_corr_loocv_error_singley() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     samples = np.array([[0.2]])
     outputs = np.array([0.1])
-    
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    outs = mm.regression(samples, outputs, psi, reg_method = 'ols')
-    mm.corr_loocv_error(outs['clf_poly'], outs['sparePsi'], outs['coeffs'],
-                                          outputs)
-        
+    psi = create_psi(BasisIndices, univ_bas)
+
+    outs = mm.regression(samples, outputs, psi, reg_method='ols')
+    corr_loocv_error(outs['clf_poly'], outs['sparePsi'], outs['coeffs'],
+                     outputs)
+
+
 def test_corr_loocv_error_sparse() -> None:
     """
     Corrected loocv error from sparse results
@@ -855,25 +892,26 @@ def test_corr_loocv_error_sparse() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    samples = np.array([[0.0],[0.1],[0.2],[0.3],[0.4],[0.5],[0.6],[0.7],
-                        [0.8],[0.9],[1.0]])
-    outputs = np.array([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.1])
-    
+    samples = np.array([[0.0], [0.1], [0.2], [0.3], [0.4], [0.5], [0.6], [0.7],
+                        [0.8], [0.9], [1.0]])
+    outputs = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.1])
+
     mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
+    mm.build_metamodel(n_init_samples=2)
     BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
     univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    outs = mm.regression(samples, outputs, psi, reg_method = 'ebl',
-                         sparsity = True)
-    mm.corr_loocv_error(outs['clf_poly'], outs['sparePsi'], outs['coeffs'],
-                                          outputs)
-    
+    psi = create_psi(BasisIndices, univ_bas)
+
+    outs = mm.regression(samples, outputs, psi, reg_method='ebl',
+                         sparsity=True)
+    corr_loocv_error(outs['clf_poly'], outs['sparePsi'], outs['coeffs'],
+                     outputs)
+
+
 #%% Test MetaModel.pca_transformation
-   
+
 def test_pca_transformation() -> None:
     """
     Apply PCA
@@ -881,11 +919,12 @@ def test_pca_transformation() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    outputs = np.array([[0.4,0.4],[0.5,0.6]])
+    outputs = np.array([[0.4, 0.4], [0.5, 0.6]])
     mm.pca_transformation(outputs)
 
+
 def test_pca_transformation_verbose() -> None:
     """
     Apply PCA verbose
@@ -893,11 +932,12 @@ def test_pca_transformation_verbose() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    outputs = np.array([[0.4,0.4],[0.5,0.6]])
+    outputs = np.array([[0.4, 0.4], [0.5, 0.6]])
     mm.pca_transformation(outputs, True)
-    
+
+
 def test_pca_transformation_varcomp() -> None:
     """
     Apply PCA with set var_pca_threshold
@@ -905,12 +945,13 @@ def test_pca_transformation_varcomp() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    outputs = np.array([[0.4,0.4],[0.5,0.6]])
+    outputs = np.array([[0.4, 0.4], [0.5, 0.6]])
     mm.var_pca_threshold = 1
     mm.pca_transformation(outputs)
-    
+
+
 def test_pca_transformation_ncomp() -> None:
     """
     Apply PCA with set n_pca_components
@@ -918,9 +959,9 @@ def test_pca_transformation_ncomp() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    outputs = np.array([[0.4,0.4],[0.5,0.6]])
+    outputs = np.array([[0.4, 0.4], [0.5, 0.6]])
     mm.n_pca_components = 1
     mm.pca_transformation(outputs)
 
@@ -934,9 +975,9 @@ def test_gaussian_process_emulator() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
-    mm = MetaModel(inp)
-    mm.gaussian_process_emulator( [[0.2],[0.8]], [0.4,0.5])
+    inp.Marginals[0].parameters = [0, 1]
+    gaussian_process_emulator([[0.2], [0.8]], [0.4, 0.5])
+
 
 def test_gaussian_process_emulator_nug() -> None:
     """
@@ -945,9 +986,9 @@ def test_gaussian_process_emulator_nug() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
-    mm = MetaModel(inp)
-    mm.gaussian_process_emulator( [[0.2],[0.8]], [0.4,0.5],nug_term=1.0)
+    inp.Marginals[0].parameters = [0, 1]
+    gaussian_process_emulator([[0.2], [0.8]], [0.4, 0.5], nug_term=1.0)
+
 
 def test_gaussian_process_emulator_autosel() -> None:
     """
@@ -956,21 +997,21 @@ def test_gaussian_process_emulator_autosel() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
-    mm = MetaModel(inp)
-    mm.gaussian_process_emulator( [[0.2],[0.8]], [0.4,0.5],autoSelect=True)
+    inp.Marginals[0].parameters = [0, 1]
+    gaussian_process_emulator([[0.2], [0.8]], [0.4, 0.5], autoSelect=True)
 
-def test_gaussian_process_emulator_varIdx() -> None:
+
+def test_gaussian_process_emulator_varidx() -> None:
     """
     Create GPE with var_idx
     """
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
-    mm = MetaModel(inp)
-    mm.gaussian_process_emulator( [[0.2],[0.8]], [0.4,0.5],varIdx=1)
-    
+    inp.Marginals[0].parameters = [0, 1]
+    gaussian_process_emulator([[0.2], [0.8]], [0.4, 0.5], varIdx=1)
+
+
 #%% Test MetaModel.eval_metamodel
 
 def test_eval_metamodel() -> None:
@@ -980,12 +1021,13 @@ def test_eval_metamodel() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.out_names = ['Z']
-    mm.fit( [[0.2],[0.8]], {'Z':[[0.4],[0.5]]})
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4], [0.5]]})
     mm.eval_metamodel([[0.4]])
 
+
 def test_eval_metamodel_normalboots() -> None:
     """
     Eval trained MetaModel with normal bootstrap
@@ -993,13 +1035,14 @@ def test_eval_metamodel_normalboots() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.bootstrap_method = 'normal'
     mm.out_names = ['Z']
-    mm.fit( [[0.2],[0.8]], {'Z':[[0.4],[0.5]]})
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4], [0.5]]})
     mm.eval_metamodel([[0.4]])
 
+
 def test_eval_metamodel_highnormalboots() -> None:
     """
     Eval trained MetaModel with higher bootstrap-itrs
@@ -1007,13 +1050,14 @@ def test_eval_metamodel_highnormalboots() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.n_bootstrap_itrs = 2
     mm.out_names = ['Z']
-    mm.fit( [[0.2],[0.8]], {'Z':[[0.4],[0.5]]})
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4], [0.5]]})
     mm.eval_metamodel([[0.4]])
 
+
 def test_eval_metamodel_gpe() -> None:
     """
     Eval trained MetaModel - gpe
@@ -1021,14 +1065,14 @@ def test_eval_metamodel_gpe() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.meta_model_type = 'gpe'
     mm.out_names = ['Z']
-    mm.fit( [[0.2],[0.8]], {'Z':np.array([[0.4],[0.5]])})
+    mm.fit([[0.2], [0.8]], {'Z': np.array([[0.4], [0.5]])})
     mm.eval_metamodel([[0.4]])
 
- 
+
 def test_eval_metamodel_pca() -> None:
     """
     Eval trained MetaModel with pca
@@ -1036,13 +1080,14 @@ def test_eval_metamodel_pca() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.dim_red_method = 'pca'
     mm.out_names = ['Z']
-    mm.fit( [[0.2],[0.8]], {'Z':[[0.4,0.4],[0.5,0.6]]})
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4, 0.4], [0.5, 0.6]]})
     mm.eval_metamodel([[0.4]])
-       
+
+
 #%% Test MetaModel.create_model_error
 # TODO: move model out of this function
 
@@ -1057,12 +1102,13 @@ def test_auto_vivification() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.auto_vivification()
-    
+
+
 #%% Test MetaModel.copy_meta_model_opts
-    
+
 def test_copy_meta_model_opts() -> None:
     """
     Copy the metamodel with just some stats
@@ -1070,11 +1116,12 @@ def test_copy_meta_model_opts() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.add_InputSpace()
     mm.copy_meta_model_opts()
-    
+
+
 #%% Test MetaModel.__select_degree
 
 #%% Test Engine._compute_pce_moments
@@ -1086,11 +1133,12 @@ def test__compute_pce_moments() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
-    mm.fit( [[0.2],[0.8]], {'Z':[[0.4],[0.5]]})
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4], [0.5]]})
     mm._compute_pce_moments()
 
+
 def test__compute_pce_moments_pca() -> None:
     """
     Compute moments of a pce-surrogate with pca
@@ -1098,12 +1146,13 @@ def test__compute_pce_moments_pca() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.dim_red_method = 'pca'
-    mm.fit( [[0.2],[0.8]], {'Z':[[0.4,0.4],[0.5,0.6]]})
+    mm.fit([[0.2], [0.8]], {'Z': [[0.4, 0.4], [0.5, 0.6]]})
     mm._compute_pce_moments()
 
+
 def test__compute_pce_moments_gpe() -> None:
     """
     Compute moments of a gpe-surrogate
@@ -1111,30 +1160,13 @@ def test__compute_pce_moments_gpe() -> None:
     inp = Input()
     inp.add_marginals()
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
+    inp.Marginals[0].parameters = [0, 1]
     mm = MetaModel(inp)
     mm.meta_model_type = 'gpe'
     with pytest.raises(AttributeError) as excinfo:
         mm._compute_pce_moments()
     assert str(excinfo.value) == 'Moments can only be computed for pce-type surrogates'
-    
-#%% Test MetaModel.update_metamodel
-#TODO: taken from engine
 
-if __name__ == '__main__':
-    
-    inp = Input()
-    inp.add_marginals()
-    inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0,1]
-    mm = MetaModel(inp)
-    samples = np.array([[0.2]])
-    outputs = np.array([0.5])
-    
-    mm.CollocationPoints = samples
-    mm.build_metamodel(n_init_samples = 2)
-    BasisIndices = mm.allBasisIndices[str(1)][str(1.0)]
-    univ_bas = mm.univ_basis_vals(samples)
-    psi = mm.create_psi(BasisIndices, univ_bas)
-    
-    mm.regression(samples, outputs, psi)
+
+#%% Test MetaModel.update_metamodel
+# TODO: taken from engine