diff --git a/examples/user_guide/example_user_guide.py b/examples/user_guide/example_user_guide.py
index f118d05a6fb03a1268de0fdfea56b963ce3514a5..ea6ea060319bc6d289344157039e503f3ddb70cb 100644
--- a/examples/user_guide/example_user_guide.py
+++ b/examples/user_guide/example_user_guide.py
@@ -5,6 +5,7 @@ Code that goes along with the 'user guide' in the BVR docs.
 @author: Rebecca Kohlhaas
 """
 
+import copy
 import numpy as np
 import pandas as pd
 import sys
@@ -14,12 +15,7 @@ import matplotlib
 
 # Add BayesValidRox path
 sys.path.append("../../src/")
-from bayesvalidrox.surrogate_models.inputs import Input
-from bayesvalidrox.surrogate_models.exp_designs import ExpDesigns
-from bayesvalidrox.pylink.pylink import PyLinkForwardModel
-from bayesvalidrox.surrogate_models.surrogate_models import MetaModel
-from bayesvalidrox.surrogate_models.engine import Engine
-from bayesvalidrox.post_processing.post_processing import PostProcessing
+from bayesvalidrox import Input, ExpDesigns, PyLinkForwardModel, MetaModel, Engine, PostProcessing, Discrepancy, BayesInference
 
 if __name__ == '__main__':
     #### Priors, input space and experimental design
@@ -47,13 +43,16 @@ if __name__ == '__main__':
     ExpDesign.plot_samples(samples)
     
     #### Models
+    x_values = np.arange(0,1,0.1)
     Model = PyLinkForwardModel()
     Model.link_type = 'Function'
     Model.py_file = 'model'
     Model.name = 'model'
-    Model.Output.names = ['A', 'B']
+    Model.Output.names = ['A']#, 'B']
+    Model.func_args = {'x_values': x_values}
+    Model.store = False
     
-    #output, samples = Model.run_model_parallel(samples, mp = True)
+    output, samples = Model.run_model_parallel(samples, mp = True)
     
     #from model import model
     #out1 = model(samples)
@@ -63,11 +62,12 @@ if __name__ == '__main__':
     MetaMod.meta_model_type = 'aPCE'
     MetaMod.pce_reg_method = 'FastARD'
     MetaMod.pce_deg = 3
-    MetaMod.pce_q_norm = 0.85
+    MetaMod.pce_q_norm = 1
     
-    ExpDesign.n_init_samples = 10
-    ExpDesign.sampling_method = 'user'
-    ExpDesign.X = samples
+    # TODO: add check in Metamod/ExpDesign to ensure that n_init_samples matches the length of the given .X!
+    ExpDesign.n_init_samples = 30
+    ExpDesign.sampling_method = 'random'#'user'
+    #ExpDesign.X = samples
     
     Engine_ = Engine(MetaMod, Model, ExpDesign)
     Engine_.start_engine()
@@ -77,22 +77,22 @@ if __name__ == '__main__':
     mean, stdev = Engine_.MetaModel.eval_metamodel(samples)
     
     #### Active learning
-    ExpDesign.n_new_samples = 1
-    ExpDesign.n_max_samples = 14
-    ExpDesign.mod_LOO_threshold = 1e-16
-    
-    ExpDesign.tradeoff_scheme = None
-    
-    ExpDesign.explore_method = 'random'
-    ExpDesign.n_canddidate = 1000
-    ExpDesign.n_cand_groups = 4
-    
-    ExpDesign.exploit_method = 'VarOptDesign'
-    ExpDesign.util_func = 'EIGF'
-    
-    Engine_ = Engine(MetaMod, Model, ExpDesign)
-    Engine_.start_engine()
-    Engine_.train_sequential()
+    if 0:
+        ExpDesign.n_new_samples = 1
+        ExpDesign.n_max_samples = 14
+        ExpDesign.mod_LOO_threshold = 1e-16
+        
+        ExpDesign.tradeoff_scheme = None
+        
+        ExpDesign.explore_method = 'random'
+        ExpDesign.n_canddidate = 1000
+        ExpDesign.n_cand_groups = 4
+        
+        ExpDesign.exploit_method = 'VarOptDesign'
+        ExpDesign.util_func = 'EIGF'
+        
+        Engine_ = Engine(copy.deepcopy(MetaMod), Model, copy.deepcopy(ExpDesign))
+        Engine_.train_sequential()
     
     #### Postprocessing
     PostProc = PostProcessing(Engine_)
@@ -100,4 +100,88 @@ if __name__ == '__main__':
     PostProc.check_accuracy(n_samples=10)
     PostProc.plot_moments()
     PostProc.sobol_indices()
-    PostProc.plot_seq_design_diagnostics()
\ No newline at end of file
+    #PostProc.plot_seq_design_diagnostics()
+    
+    # TODO: sanity check - test on training data
+    mean, stdev = Engine_.eval_metamodel(Engine_.ExpDesign.X)
+    print(mean['A']-Engine_.ExpDesign.Y['A'])
+    
+    #### BayesInference
+    
+    true_sample = [[2]]
+    observation = Model.run_model_parallel(true_sample)[0]
+    Model.observations = {}
+    for key in observation:
+        if key == 'x_values':
+            Model.observations[key]=observation[key]
+        else:
+            Model.observations[key]=observation[key][0]
+            
+    obsData = pd.DataFrame(Model.observations, columns=Model.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = obsData**2
+    
+    BayesObj = BayesInference(Engine_)
+    BayesObj.emulator = True
+    BayesObj.Discrepancy = DiscrepancyOpts
+    BayesObj.plot_post_pred = True
+    
+    BayesObj.inference_method = 'MCMC'
+    import emcee
+    BayesObj.mcmc_params = {
+        'n_steps': 1e4,#5
+        'n_walkers': 30,
+        'moves': emcee.moves.KDEMove(),
+        'multiprocessing': False,
+        'verbose': False
+        }
+    BayesObj.create_inference()
+    
+    
+    #### Model Comparison
+    Model1 = PyLinkForwardModel()
+    Model1.link_type = 'Function'
+    Model1.py_file = 'model'
+    Model1.name = 'model'
+    Model1.Output.names = ['B']
+    Model1.func_args = {'x_values': x_values}
+    Model1.store = False
+    
+    MetaMod1 = MetaModel(Inputs)
+    MetaMod1.meta_model_type = 'aPCE'
+    MetaMod1.pce_reg_method = 'FastARD'
+    MetaMod1.pce_deg = 3
+    MetaMod1.pce_q_norm = 1
+    
+    ExpDesign1 = ExpDesigns(Inputs)
+    ExpDesign1.n_init_samples = 30
+    ExpDesign1.sampling_method = 'random'#'user'
+    #ExpDesign1.X = samples
+    
+    Engine_1 = Engine(MetaMod1, Model1, ExpDesign1)
+    Engine_1.train_normal()
+    # TODO: sanity checks for these two models look great, differently from the model above, perhaps just an issue of the training data?
+    
+    
+    Model2 = PyLinkForwardModel()
+    Model2.link_type = 'Function'
+    Model2.py_file = 'model2'
+    Model2.name = 'model2'
+    Model2.Output.names = ['A']
+    Model2.func_args = {'x_values': x_values}
+    Model2.store = False
+    
+    MetaMod2 = MetaModel(Inputs)
+    MetaMod2.meta_model_type = 'aPCE'
+    MetaMod2.pce_reg_method = 'FastARD'
+    MetaMod2.pce_deg = 3
+    MetaMod2.pce_q_norm = 1
+    
+    ExpDesign2 = ExpDesigns(Inputs)
+    ExpDesign2.n_init_samples = 30
+    ExpDesign2.sampling_method = 'random'#'user'
+    #ExpDesign1.X = samples
+    
+    Engine_2 = Engine(MetaMod2, Model2, ExpDesign2)
+    Engine_2.train_normal()
\ No newline at end of file
diff --git a/examples/user_guide/model.py b/examples/user_guide/model.py
index 2bb1b011fae7c82bf299f4a92ba6787dd58a0f2b..0974cb1fa7981386b6b051bf1348d4315a915f41 100644
--- a/examples/user_guide/model.py
+++ b/examples/user_guide/model.py
@@ -6,8 +6,8 @@ A simple model example for the BVR user guide.
 """
 import numpy as np
 
-def model(samples):
-    samples = samples[0]
-    square = np.power(samples, 2)
-    outputs = {'A': samples, 'B': square, 'x_values': [0]}
+def model(samples, x_values):
+    samples = samples[0]*x_values
+    square = np.power(samples*x_values, 2)
+    outputs = {'A': samples, 'B': square, 'x_values': x_values}
     return outputs
\ No newline at end of file
diff --git a/examples/user_guide/model2.py b/examples/user_guide/model2.py
new file mode 100644
index 0000000000000000000000000000000000000000..47a0113b4e0dfb5692b9b27dfe45460bff5bf6e6
--- /dev/null
+++ b/examples/user_guide/model2.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+"""
+A simple model example for the BVR user guide.
+
+@author: Rebecca Kohlhaas
+"""
+import numpy as np
+
+def model2(samples, x_values):
+    poly = samples[0]*np.power(x_values, 3)
+    outputs = {'A': poly, 'x_values': x_values}
+    return outputs
\ No newline at end of file
diff --git a/src/bayesvalidrox/pylink/pylink.py b/src/bayesvalidrox/pylink/pylink.py
index 64ad87b7b35fe948b1c68221be12ebbe3ca8f91a..1f12df8a1001381970772faffa895f6e46b2960e 100644
--- a/src/bayesvalidrox/pylink/pylink.py
+++ b/src/bayesvalidrox/pylink/pylink.py
@@ -394,7 +394,6 @@ class PyLinkForwardModel(object):
             os.makedirs(newpath)
 
         # Copy the necessary files to the directories
-        print(self.input_template)
         for in_temp in self.input_template:
             # Input file(s) of the model
             shutil.copy2(in_temp, newpath)
@@ -426,7 +425,6 @@ class PyLinkForwardModel(object):
             self.exe_path = os.getcwd()
 
         # Run the model
-        print(new_command)
         output = self.run_command(new_command, self.Output.file_names)
 
         return output
diff --git a/src/bayesvalidrox/surrogate_models/engine.py b/src/bayesvalidrox/surrogate_models/engine.py
index ab0aaeea564c95f169fac2a0e47428bbd43c7491..6f14cf3e82ab32e3aa4fdbf991ec692627c8e994 100644
--- a/src/bayesvalidrox/surrogate_models/engine.py
+++ b/src/bayesvalidrox/surrogate_models/engine.py
@@ -266,8 +266,9 @@ class Engine:
         else:
             pce = False
         #mc_ref = True if bool(self.Model.mc_reference) else False
-        mc_ref = True if (self.Model.mc_reference is not None) else False
-        if mc_ref:
+        mc_ref = False
+        if self.Model.mc_reference != {}:
+            mc_ref = True
             self.Model.read_observation('mc_ref')
 
         # Get the parameters
@@ -1015,12 +1016,15 @@ class Engine:
             RMSE of the standard deviations
 
         """
+        if self.Model.mc_reference is None:
+            raise AttributeError('Model.mc_reference needs to be given to calculate the surrogate error!')
         # Compute the mean and std based on the MetaModel
         pce_means, pce_stds = self.MetaModel._compute_pce_moments()
 
         # Compute the root mean squared error
         for output in self.out_names:
             # Compute the error between mean and std of MetaModel and OrigModel
+            # TODO: write test that checks if mc_reference exists
             RMSE_Mean = mean_squared_error(
                 self.Model.mc_reference['mean'], pce_means[output], squared=False
             )
diff --git a/src/bayesvalidrox/surrogate_models/surrogate_models.py b/src/bayesvalidrox/surrogate_models/surrogate_models.py
index 157260d4b5432f847670a6804deac7dbfa8eb4bd..b71040b405769617ab33fbb68d97d652c2e1d2d2 100644
--- a/src/bayesvalidrox/surrogate_models/surrogate_models.py
+++ b/src/bayesvalidrox/surrogate_models/surrogate_models.py
@@ -1005,7 +1005,6 @@ class MetaModel:
                     break
 
             # Store the score in the scores list
-            print(qNormScores)
             best_q = np.nanargmax(qNormScores)
             scores[degIdx] = qNormScores[best_q]
 
@@ -1230,7 +1229,6 @@ class MetaModel:
                 mean = np.empty((len(samples), len(values)))
                 std = np.empty((len(samples), len(values)))
                 idx = 0
-                #print('Looping over ??')
                 for in_key, InIdxValues in values.items():
 
                     # Prediction with GPE
diff --git a/tests/test_engine.py b/tests/test_engine.py
index 780d64417f888a8d94e8fcce145f60d55561b437..820de5b79851c688c8d2adcb5a66ed790937e0db 100644
--- a/tests/test_engine.py
+++ b/tests/test_engine.py
@@ -31,6 +31,7 @@ Engine:
 import math
 import numpy as np
 import pandas as pd
+import pytest
 import sys
 
 sys.path.append("src/")
@@ -84,9 +85,9 @@ def test_start_engine() -> None:
 
 #%% Test Engine._error_Mean_Std
 
-def test__error_Mean_Std() -> None:
+def test__error_Mean_Std_nomc() -> None:
     """
-    Compare moments of surrogate and reference
+    Compare moments of surrogate and reference without mc-reference
     """
     inp = Input()
     inp.add_marginals()
@@ -103,7 +104,17 @@ def test__error_Mean_Std() -> None:
     engine.start_engine()
     mean, std = engine._error_Mean_Std()
     assert mean < 0.01 and std < 0.01
-
+    
+def test__error_Mean_Std() -> None:
+    """
+    Compare moments of surrogate and reference
+    """
+    mod = PL()
+    engine = Engine(None, mod, None)
+    engine.start_engine()
+    with pytest.raises(AttributeError) as excinfo:
+        engine._error_Mean_Std()
+    assert str(excinfo.value) == ('Model.mc_reference needs to be given to calculate the surrogate error!')
 
 #%% Test Engine._validError