diff --git a/examples/analytical-function/example_analytical_function_testSequential.py b/examples/analytical-function/example_analytical_function_testSequential.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcbf7dcc889b5a1a74c40a34fb5f40d599396285
--- /dev/null
+++ b/examples/analytical-function/example_analytical_function_testSequential.py
@@ -0,0 +1,422 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+This test shows a surrogate-assisted Bayesian calibration of a time dependent
+    analytical function.
+
+Author: Farid Mohammadi, M.Sc.
+E-Mail: farid.mohammadi@iws.uni-stuttgart.de
+Department of Hydromechanics and Modelling of Hydrosystems (LH2)
+Institute for Modelling Hydraulic and Environmental Systems (IWS), University
+of Stuttgart, www.iws.uni-stuttgart.de/lh2/
+Pfaffenwaldring 61
+70569 Stuttgart
+
+Created on Fri Aug 9 2019
+
+"""
+
+import numpy as np
+import pandas as pd
+import sys
+import joblib
+import matplotlib
+matplotlib.use('agg')
+
+# import bayesvalidrox
+# Add BayesValidRox path
+sys.path.append("../../src/")
+
+from bayesvalidrox.surrogate_models.inputs import Input
+from bayesvalidrox.surrogate_models.input_space import InputSpace
+from bayesvalidrox.pylink.pylink import PyLinkForwardModel
+from bayesvalidrox.surrogate_models.inputs import Input
+from bayesvalidrox.surrogate_models.exp_designs import ExpDesigns
+from bayesvalidrox.surrogate_models.surrogate_models import MetaModel
+#from bayesvalidrox.surrogate_models.meta_model_engine import MetaModelEngine
+from bayesvalidrox.post_processing.post_processing import PostProcessing
+from bayesvalidrox.bayes_inference.bayes_inference import BayesInference
+from bayesvalidrox.bayes_inference.discrepancy import Discrepancy
+
+from bayesvalidrox.surrogate_models.engine import Engine
+
+if __name__ == "__main__":
+
+    # Number of parameters
+    ndim = 2#10  # 2, 10
+
+    # =====================================================
+    # =============   COMPUTATIONAL MODEL  ================
+    # =====================================================
+    Model = PyLinkForwardModel()
+
+    Model.link_type = 'Function'
+    Model.py_file = 'analytical_function'
+    Model.name = 'AnalyticFunc'
+
+    Model.Output.names = ['Z']
+
+    # For Bayesian inversion synthetic data with X=[0,0]
+    Model.observations = {}
+    Model.observations['Time [s]'] = np.arange(0, 10, 1.) / 9
+    Model.observations['Z'] = np.repeat([2.], 10)
+
+    # For Checking with the MonteCarlo refrence
+    Model.mc_reference = {}
+    Model.mc_reference['Time [s]'] = np.arange(0, 10, 1.) / 9
+    Model.mc_reference['mean'] = np.load(f"data/mean_{ndim}.npy")
+    Model.mc_reference['std'] = np.load(f"data/std_{ndim}.npy")
+
+    # =====================================================
+    # =========   PROBABILISTIC INPUT MODEL  ==============
+    # =====================================================
+    # Define the uncertain parameters with their mean and
+    # standard deviation
+    Inputs = Input()
+
+    # Assuming dependent input variables
+    # Inputs.Rosenblatt = True
+
+    for i in range(ndim):
+        Inputs.add_marginals()
+        Inputs.Marginals[i].name = "$\\theta_{"+str(i+1)+"}$"
+        Inputs.Marginals[i].dist_type = 'uniform'
+        Inputs.Marginals[i].parameters = [-5, 5]
+
+    # arbitrary polynomial chaos
+    # inputParams = np.load('data/InputParameters_{}.npy'.format(ndim))
+    # for i in range(ndim):
+    #     Inputs.add_marginals()
+    #     Inputs.Marginals[i].name = f'$X_{i+1}$'
+    #     Inputs.Marginals[i].input_data = inputParams[:, i]
+
+    # =====================================================
+    # ==========  DEFINITION OF THE METAMODEL  ============
+    # =====================================================
+    MetaModelOpts = MetaModel(Inputs)#, Model)
+
+    # Select if you want to preserve the spatial/temporal depencencies
+    # MetaModelOpts.dim_red_method = 'PCA'
+    # MetaModelOpts.var_pca_threshold = 99.999
+    # MetaModelOpts.n_pca_components = 10
+
+    # Select your metamodel method
+    # 1) PCE (Polynomial Chaos Expansion) 2) aPCE (arbitrary PCE)
+    # 3) GPE (Gaussian Process Emulator)
+    MetaModelOpts.meta_model_type = 'aPCE'
+
+    # ------------------------------------------------
+    # ------------- PCE Specification ----------------
+    # ------------------------------------------------
+    # Select the sparse least-square minimization method for
+    # the PCE coefficients calculation:
+    # 1)OLS: Ordinary Least Square  2)BRR: Bayesian Ridge Regression
+    # 3)LARS: Least angle regression  4)ARD: Bayesian ARD Regression
+    # 5)FastARD: Fast Bayesian ARD Regression
+    # 6)BCS: Bayesian Compressive Sensing
+    # 7)OMP: Orthogonal Matching Pursuit
+    # 8)VBL: Variational Bayesian Learning
+    # 9)EBL: Emperical Bayesian Learning
+    MetaModelOpts.pce_reg_method = 'FastARD'
+
+    # Bootstraping
+    # 1) normal 2) fast
+    MetaModelOpts.bootstrap_method = 'fast'
+    MetaModelOpts.n_bootstrap_itrs = 1
+
+    # Specify the max degree to be compared by the adaptive algorithm:
+    # The degree with the lowest Leave-One-Out cross-validation (LOO)
+    # error (or the highest score=1-LOO)estimator is chosen as the final
+    # metamodel. pce_deg accepts degree as a scalar or a range.
+    MetaModelOpts.pce_deg = 12
+
+    # q-quasi-norm 0<q<1 (default=1)
+    MetaModelOpts.pce_q_norm = 0.85 if ndim < 5 else 0.5
+
+    # Print summary of the regression results
+    # MetaModelOpts.verbose = True
+
+    # ------------------------------------------------
+    # ------ Experimental Design Configuration -------
+    # ------------------------------------------------
+    ExpDesign = ExpDesigns(Inputs)
+
+    # One-shot (normal) or Sequential Adaptive (sequential) Design
+    ExpDesign.method = 'sequential'
+    ExpDesign.n_init_samples = 140#00#3*ndim
+
+    # Sampling methods
+    # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley
+    # 6) chebyshev(FT) 7) grid(FT) 8)user
+    ExpDesign.sampling_method = 'latin_hypercube'
+
+    # Provide the experimental design object with a hdf5 file
+    #ExpDesign.hdf5_file = 'ExpDesign_AnalyticFunc.hdf5'
+    
+    # Set the sampling parameters
+    ExpDesign.n_new_samples = 1
+    ExpDesign.n_max_samples = 141#150          # sum of init + sequential 
+    ExpDesign.mod_LOO_threshold = 1e-16
+
+    # ExpDesign.adapt_verbose = True
+    
+    # 1) None 2) 'equal' 3)'epsilon-decreasing' 4) 'adaptive'
+    ExpDesign.tradeoff_scheme = None
+    
+    # MetaModelOpts.ExpDesign.n_replication = 5
+    
+    # -------- Exploration ------
+    # 1)'Voronoi' 2)'random' 3)'latin_hypercube' 4)'LOOCV' 5)'dual annealing'
+    ExpDesign.explore_method = 'random'
+
+    # Use when 'dual annealing' chosen
+    ExpDesign.max_func_itr = 1000
+
+    # Use when 'Voronoi' or 'random' or 'latin_hypercube' chosen
+    ExpDesign.n_canddidate = 1000
+    ExpDesign.n_cand_groups = 4
+
+    # -------- Exploitation ------
+    # 1)'BayesOptDesign' 2)'BayesActDesign' 3)'VarOptDesign' 4)'alphabetic'
+    # 5)'Space-filling'
+    ExpDesign.exploit_method = 'Space-filling' 
+    ExpDesign.exploit_method = 'BayesActDesign'
+    ExpDesign.util_func = 'DKL'
+
+    # BayesOptDesign/BayesActDesign -> when data is available
+    # 1) MI (Mutual information) 2) ALC (Active learning McKay)
+    # 2)DKL (Kullback-Leibler Divergence) 3)DPP (D-Posterior-percision)
+    # 4)APP (A-Posterior-percision)  # ['DKL', 'BME', 'infEntropy']
+    # MetaModelOpts.ExpDesign.util_func = 'DKL'
+
+    # BayesActDesign -> when data is available
+    # 1) BME (Bayesian model evidence) 2) infEntropy (Information entropy)
+    # 2)DKL (Kullback-Leibler Divergence)
+    #MetaModelOpts.ExpDesign.util_func = 'DKL'
+
+    # VarBasedOptDesign -> when data is not available
+    # 1)ALM 2)EIGF, 3)LOOCV
+    # or a combination as a list
+    # MetaModelOpts.ExpDesign.util_func = 'EIGF'
+
+    # alphabetic
+    # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality)
+    # 3)K-Opt (K-Optimality) or a combination as a list
+    # MetaModelOpts.ExpDesign.util_func = 'D-Opt'
+
+    # Defining the measurement error, if it's known a priori
+    obsData = pd.DataFrame(Model.observations, columns=Model.Output.names)
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = obsData**2
+    MetaModelOpts.Discrepancy = DiscrepancyOpts
+
+    # Plot the posterior snapshots for SeqDesign
+    ExpDesign.post_snapshot = False
+    ExpDesign.step_snapshot = 1
+    ExpDesign.max_a_post = [0] * ndim
+
+    # For calculation of validation error for SeqDesign
+    prior = np.load(f"data/Prior_{ndim}.npy")
+    prior_outputs = np.load(f"data/origModelOutput_{ndim}.npy")
+    likelihood = np.load(f"data/validLikelihoods_{ndim}.npy")
+    ExpDesign.valid_samples = prior[:500]
+    ExpDesign.valid_model_runs = {'Z': prior_outputs[:500]}
+
+    # Do the standard training of the surrogate and init of the engine
+    MetaModelOpts.ExpDesign = ExpDesign
+    engine = Engine(MetaModelOpts, Model, ExpDesign)
+    engine.start_engine()
+    engine.train_normal()
+    
+#%% Test combinations of the sequential strategies
+    """
+    This part of the code tests the available combinations of the 
+    sequential strategies.
+    The following combinations have remaining issues to be solved:
+        - exploitation = 'BayesOptDesign' has circular dependency with the engine class
+        - exploration = 'Voronoi' creates mismatching numbers of candidates and weights
+        - exploration = 'loocv' needs MetaModel.create_ModelError, which does not exist
+        - exploration = 'dual annealing' restricted in what it allows as exploitation methods
+        
+    The following combinations are running through:
+        - BayesActDesign quite fast
+        - VarOptDesign as well
+        
+    Performance notes:
+        - BayesOptDesign slow in the OptBayesianDesign iterations
+        
+    # TODO: user-defined options were left out in this test for both 
+    sampling-methods and exploitation schemes.
+    """
+    # Create a single new sample from each AL strategy
+    tradeoff_schemes = [None, 'equal', 'epsilon-decreasing', 'adaptive']
+    #sampling_method = ['random', 'latin-hypercube', 'sobol', 'halton', 
+    #                   'hammersley', 'chebyshev', 'grid']
+    exploration_schemes = ['Voronoi', 'random', 'latin-hypercube', 'LOOCV', 
+                           'dual annealing']
+    exploration_schemes = ['random', 'dual annealing']
+    exploitation_schemes = ['BayesOptDesign', 'BayesActDesign', 'VarOptDesign', 
+                            'alphabetic', 'Space-filling'] 
+    exploitation_schemes = ['BayesActDesign', 'alphabetic', 'Space-filling'] 
+    
+    for tradeoff in tradeoff_schemes:
+        for exploration in exploration_schemes:
+            for exploitation in exploitation_schemes:
+                if exploration == 'dual annealing':
+                    if exploitation not in ['BayesOptDesign','VarOptDesign']:
+                        continue
+                
+                # Utility function depends on exploitation type
+                if exploitation == 'BayesOptDesign':
+                    # 1) MI (Mutual information) 2) ALC (Active learning McKay)
+                    # 2)DKL (Kullback-Leibler Divergence) 3)DPP (D-Posterior-percision)
+                    # 4)APP (A-Posterior-percision)  # ['DKL', 'BME', 'infEntropy']
+                    util_func_list = ['MI', 'ALC', 'DKL', 'DPP', 'APP', 
+                                      'BayesRisk', 'infEntropy']
+                elif exploitation == 'BayesActDesign':
+                    # 1) BME (Bayesian model evidence) 
+                    # 2) infEntropy (Information entropy)
+                    # 2)DKL (Kullback-Leibler Divergence)
+                    util_func_list = ['BME', 'DKL', 'infEntropy', 'BIC', 
+                                      'AIC', 'DIC']
+                elif exploitation == 'VarOptDesign':
+                    # 1)ALM 2)EIGF, 3)LOOCV
+                    # or a combination as a list
+                    util_func_list = ['ALM', 'EIGF', 'LOOCV']
+                elif exploitation == 'alphabetic':
+                    # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality)
+                    # 3)K-Opt (K-Optimality) or a combination as a list
+                    util_func_list = ['D-Opt', 'A-Opt', 'K-Opt']
+                else:
+                    util_func_list = ['']
+                
+                for util in util_func_list:
+                    # Stop the not working ones
+                    if exploitation == 'BayesActDesign' and util == 'DIC':
+                        continue
+                    # General setting reset and update
+                    engine.ExpDesign.n_max_samples = engine.ExpDesign.X.shape[0]+1
+                    
+                    # Iteration-specific settings
+                    engine.ExpDesign.tradeoff_scheme = tradeoff
+                    ExpDesign.sampling_method = 'latin_hypercube'
+                    ExpDesign.explore_method = exploration
+                    ExpDesign.exploit_method = exploitation
+                    engine.ExpDesign.util_func = util
+                
+                    # Do the Sequential training
+                    print('')
+                    print('*'*50)
+                    print('Current settings:')
+                    print(f' - tradeoff: {tradeoff}')
+                    print(f' - exploration: {exploration}')
+                    print(f' - exploitation: {exploitation}')
+                    print(f' - util_func: {util}')
+                    engine.train_sequential()
+    
+    # Run through all types of AL for 1 iteration each to check their functionality
+    #engine.train_sequential()
+
+    # Load the objects
+    # with open(f"PCEModel_{Model.name}.pkl", "rb") as input:
+    #     PCEModel = joblib.load(input)
+    
+#%%
+    # =====================================================
+    # =========  POST PROCESSING OF METAMODELS  ===========
+    # =====================================================
+    PostPCE = PostProcessing(engine)
+
+    # Plot to check validation visually.
+    PostPCE.valid_metamodel(n_samples=1)
+
+    # Compute and print RMSE error
+    PostPCE.check_accuracy(n_samples=300)
+
+    # Compute the moments and compare with the Monte-Carlo reference
+    if MetaModelOpts.meta_model_type != 'GPE':
+        PostPCE.plot_moments()
+
+    # Plot the evolution of the KLD,BME, and Modified LOOCV error
+    if MetaModelOpts.ExpDesign.method == 'sequential':
+        refBME_KLD = np.load("data/refBME_KLD_"+str(ndim)+".npy")
+        PostPCE.plot_seq_design_diagnostics(refBME_KLD)
+
+    # Plot the sobol indices
+    if MetaModelOpts.meta_model_type != 'GPE':
+        total_sobol = PostPCE.sobol_indices()
+
+    # =====================================================
+    # ========  Bayesian inference with Emulator ==========
+    # =====================================================
+    BayesOpts = BayesInference(engine)
+    BayesOpts.emulator = True
+    BayesOpts.plot_post_pred = True
+
+    # BayesOpts.selected_indices = [0, 3, 5,  7, 9]
+    # BME Bootstrap
+    BayesOpts.bootstrap = True
+    BayesOpts.n_bootstrap_itrs = 500
+    BayesOpts.bootstrap_noise = 100
+
+    # Bayesian cross validation
+    BayesOpts.bayes_loocv = True  # TODO: test what this does
+
+    # Select the inference method
+    import emcee
+    BayesOpts.inference_method = "MCMC"
+    # Set the MCMC parameters passed to self.mcmc_params
+    BayesOpts.mcmc_params = {
+        'n_steps': 1e4,#5,
+        'n_walkers': 30,
+        'moves': emcee.moves.KDEMove(),
+        'multiprocessing': False,
+        'verbose': False
+        }
+
+    # ----- Define the discrepancy model -------
+    obsData = pd.DataFrame(Model.observations, columns=Model.Output.names)
+    BayesOpts.measurement_error = obsData
+
+    # # -- (Option B) --
+    DiscrepancyOpts = Discrepancy('')
+    DiscrepancyOpts.type = 'Gaussian'
+    DiscrepancyOpts.parameters = obsData**2
+    BayesOpts.Discrepancy = DiscrepancyOpts
+
+    # -- (Option C) --
+    if 0:
+        DiscOutputOpts = Input()
+        # # # OutputName = 'Z'
+        DiscOutputOpts.add_marginals()
+        DiscOutputOpts.Marginals[0].Nnme = '$\sigma^2_{\epsilon}$'
+        DiscOutputOpts.Marginals[0].dist_type = 'uniform'
+        DiscOutputOpts.Marginals[0].parameters =  [0, 10]
+        #BayesOpts.Discrepancy = {'known': DiscrepancyOpts,
+        #                          'infer': Discrepancy(DiscOutputOpts)}
+    
+        BayesOpts.bias_inputs = {'Z':np.arange(0, 10, 1.).reshape(-1,1) / 9}
+        
+        DiscOutputOpts = Input()
+        # OutputName = 'lambda'
+        DiscOutputOpts.add_marginals()
+        DiscOutputOpts.Marginals[0].name = '$\lambda$'
+        DiscOutputOpts.Marginals[0].dist_type = 'uniform'
+        DiscOutputOpts.Marginals[0].parameters = [0, 1]
+    
+        # # OutputName = 'sigma_f'
+        DiscOutputOpts.add_marginals()
+        DiscOutputOpts.Marginals[1].Name = '$\sigma_f$'
+        DiscOutputOpts.Marginals[1].dist_type = 'uniform'
+        DiscOutputOpts.Marginals[1].parameters = [0, 1e-4]
+        #BayesOpts.Discrepancy = Discrepancy(DiscOutputOpts)
+        BayesOpts.Discrepancy = {'known': DiscrepancyOpts,
+                                  'infer': Discrepancy(DiscOutputOpts)}
+    
+    # Start the calibration/inference
+    Bayes_PCE = BayesOpts.create_inference()
+
+    # Save class objects
+    with open(f'Bayes_{Model.name}.pkl', 'wb') as output:
+        joblib.dump(Bayes_PCE, output, 2)
diff --git a/src/bayesvalidrox/surrogate_models/engine.py b/src/bayesvalidrox/surrogate_models/engine.py
index 022fdb63c04c6782f49148dac9a81ccdc68fcf35..54f6bd888a379e1623280d78fa38f939b59a1b5e 100644
--- a/src/bayesvalidrox/surrogate_models/engine.py
+++ b/src/bayesvalidrox/surrogate_models/engine.py
@@ -486,6 +486,10 @@ class Engine:
 
                     # Optimal Bayesian Design
                     # self.MetaModel.ExpDesignFlag = 'sequential'
+                    # TODO: this is only a fix, remove as soon as possible to 
+                    #       avoid circular dependencies!
+                    if self.ExpDesign.exploit_method.lower() == 'bayesoptdesign':
+                        self.SeqDesign.engine = self
                     Xnew, updatedPrior = self.SeqDesign.choose_next_sample(TotalSigma2,
                                                                  n_canddidate,
                                                                  util_f)
diff --git a/src/bayesvalidrox/surrogate_models/sequential_design.py b/src/bayesvalidrox/surrogate_models/sequential_design.py
index 5f969c28f56cf61701da666495c332424f8b77b0..1a3591a6ca4aa204ebc933fa840ab010fa4cba98 100644
--- a/src/bayesvalidrox/surrogate_models/sequential_design.py
+++ b/src/bayesvalidrox/surrogate_models/sequential_design.py
@@ -23,6 +23,7 @@ from tqdm import tqdm
 from bayesvalidrox.bayes_inference.bayes_inference import BayesInference
 from bayesvalidrox.bayes_inference.discrepancy import Discrepancy
 from .exploration import Exploration
+from .surrogate_models import create_psi
 
 # TODO: These first two functions are direct copies from functions in .engine.
 #       Should they be called from there?
@@ -96,11 +97,13 @@ class SequentialDesign:
 
     """
 
-    def __init__(self, MetaMod, Model, ExpDes, parallel=False):
+    def __init__(self, MetaMod, Model, ExpDes, parallel=False, 
+                 out_dir='Outputs_SeqDesign'):
         self.MetaModel = MetaMod  # Surrogate should be trained
         self.Model = Model
         self.ExpDesign = ExpDes
         self.parallel = parallel
+        self.out_dir = out_dir
 
         # Init other parameters
         self.out_names = self.Model.Output.names
@@ -114,6 +117,9 @@ class SequentialDesign:
         None
 
         """
+        
+        # Create output path
+        os.makedirs(self.out_dir, exist_ok=True)
 
         # Read observations or MCReference
         # TODO: recheck the logic in this if statement 
@@ -127,6 +133,8 @@ class SequentialDesign:
             # TODO: TotalSigma2 not defined if not in this else???
             # TODO: no self.observations if in here
             TotalSigma2 = {}
+            
+        
 
     # -------------------------------------------------------------------------
     def choose_next_sample(self, sigma2=None, n_candidates=5, var='DKL'):
@@ -235,102 +243,13 @@ class SequentialDesign:
         print(f"\n Exploration weight={explore_w:0.3f} "
               f"Exploitation weight={exploit_w:0.3f}\n")
 
-        # Generate the candidate samples
-        # TODO: here use the sampling method provided by the expdesign?
-        # sampling_method = self.ExpDesign.sampling_method
-
-        # TODO: changed this from 'random' for LOOCV
-        # TODO: these are commented out as they are not used !?
-        # if explore_method == 'LOOCV':
-        # allCandidates = self.ExpDesign.generate_samples(n_candidates,
-        #                                                     sampling_method)
-        # else:
-        #     allCandidates, scoreExploration = explore.get_exploration_samples()
-
         # -----------------------------------------
         # ---------- EXPLORATION METHODS ----------
         # -----------------------------------------
-        # ToDo: Move this if/else into its own function called "do_exploration", which should select the
-        #       exploration samples, and assign exploration scores. We should send it explore_score, for if/else stmts
         # ToDo: Check if explore_scores can be nan, and remove them from any score normalization
-        if explore_method == 'LOOCV':
-            # -----------------------------------------------------------------
-            # TODO: LOOCV model construnction based on Feng et al. (2020)
-            # 'LOOCV':
-            # Initilize the ExploitScore array
-
-            # Generate random samples
-            allCandidates = self.ExpDesign.generate_samples(n_candidates,
-                                                            'random')
-
-            # Construct error model based on LCerror
-            errorModel = self.MetaModel.create_ModelError(old_EDX, self.LCerror)
-            self.errorModel.append(copy(errorModel))
-
-            # Evaluate the error models for allCandidates
-            eLCAllCands, _ = errorModel.eval_errormodel(allCandidates)
-            # Select the maximum as the representative error
-            eLCAllCands = np.dstack(eLCAllCands.values())
-            eLCAllCandidates = np.max(eLCAllCands, axis=1)[:, 0]
-
-            # Normalize the error w.r.t the maximum error
-            scoreExploration = eLCAllCandidates / np.sum(eLCAllCandidates)
-
-        else:
-            # ------- EXPLORATION: SPACE-FILLING DESIGN -------
-            # ToDo: Remove Exploration class and merge the functions into SequentialDesign class
-            # Generate candidate samples from Exploration class
-            explore = Exploration(self.ExpDesign, n_candidates)
-            explore.w = 100  # * ndim #500   # TODO: where does this value come from?
-            # Select criterion (mc-intersite-proj-th, mc-intersite-proj)
-            explore.mc_criterion = 'mc-intersite-proj'
-            allCandidates, scoreExploration = explore.get_exploration_samples()
-
-            # Temp: ---- Plot all candidates -----
-            # ToDo: Make its own function, called inside of the select_exploration_samples function.
-            if ndim == 2:
-                def plotter(points, allCandidates, Method,
-                            scoreExploration=None):
-                    """
-                    unknown
-
-                    Parameters
-                    ----------
-                    points
-                    allCandidates
-                    Method
-                    scoreExploration
-
-                    Returns
-                    -------
-
-                    """
-                    if Method == 'Voronoi':
-                        from scipy.spatial import Voronoi, voronoi_plot_2d
-                        vor = Voronoi(points)
-                        fig = voronoi_plot_2d(vor)
-                        ax1 = fig.axes[0]
-                    else:
-                        fig = plt.figure()
-                        ax1 = fig.add_subplot(111)
-                    ax1.scatter(points[:, 0], points[:, 1], s=10, c='r',
-                                marker="s", label='Old Design Points')
-                    ax1.scatter(allCandidates[:, 0], allCandidates[:, 1], s=10,
-                                c='b', marker="o", label='Design candidates')
-                    for i in range(points.shape[0]):
-                        txt = 'p' + str(i + 1)
-                        ax1.annotate(txt, (points[i, 0], points[i, 1]))
-                    if scoreExploration is not None:
-                        for i in range(allCandidates.shape[0]):
-                            txt = str(round(scoreExploration[i], 5))
-                            ax1.annotate(txt, (allCandidates[i, 0],
-                                               allCandidates[i, 1]))
-
-                    plt.xlim(self.bound_tuples[0])
-                    plt.ylim(self.bound_tuples[1])
-                    # plt.show()
-                    plt.legend(loc='upper left')
-
+        candidates, scoreExploration = self.do_exploration(explore_method, 
+                                                           n_candidates, old_EDX)
+        
         # -----------------------------------------
         # --------- EXPLOITATION METHODS ----------
         # -----------------------------------------
@@ -348,16 +267,11 @@ class SequentialDesign:
                 MCsize = 15000
                 X_MC = self.ExpDesign.generate_samples(MCsize, 'random')
 
-                # ToDo: Get samples from the "do_exploration"
-                candidates = self.ExpDesign.generate_samples(
-                    n_candidates, 'latin_hypercube')
-
                 # Split the candidates in groups for multiprocessing
                 split_cand = np.array_split(
                     candidates, n_cand_groups, axis=0
                 )
-                # print(candidates)
-                # print(split_cand)
+                
                 if self.parallel:
                     results = Parallel(n_jobs=-1, backend='multiprocessing')(
                         delayed(self.run_util_func)(
@@ -369,60 +283,59 @@ class SequentialDesign:
                         results.append(self.run_util_func(exploit_method, split_cand[i], i, sigma2, var, X_MC))
 
                 # Retrieve the results and append them
-                # ToDo: Rename U_J_D (here and everyhwere) to something more representative
-                U_J_d = np.concatenate([results[NofE][1] for NofE in
+                scoreExploitation = np.concatenate([results[NofE][1] for NofE in
                                         range(n_cand_groups)])
 
                 # Check if all scores are inf
-                if np.isinf(U_J_d).all() or np.isnan(U_J_d).all():
-                    U_J_d = np.ones(len(U_J_d))
+                if np.isinf(scoreExploitation).all() or np.isnan(scoreExploitation).all():
+                    scoreExploitation = np.ones(len(scoreExploitation))
 
                 # Get the expected value (mean) of the Utility score
                 # for each cell
                 if explore_method == 'Voronoi':
-                    U_J_d = np.mean(U_J_d.reshape(-1, n_candidates), axis=1)
+                    scoreExploitation = np.mean(scoreExploitation.reshape(-1, n_candidates), axis=1)
 
                 # Normalize U_J_d
                 # ToDO: Check if this is working for the case where the util_func should be minimized (e.g. IE)
                 # norm_U_J_D = U_J_d / np.nansum(np.abs(U_J_d))  # Possible solution
-                norm_U_J_d = U_J_d / np.sum(U_J_d)
+                scoreExploitation = scoreExploitation / np.sum(scoreExploitation)
 
             else:
-                norm_U_J_d = np.zeros((len(scoreExploration)))
+                scoreExploitation = np.zeros((len(scoreExploration)))
 
             # ------- Calculate Total score -------
             # ToDo: This should be outside of the exploration/exploitation if/else part
             # ------- Trade off between EXPLORATION & EXPLOITATION -------
             # Accumulate the samples
             # ToDo: Stop assuming 2 sets of samples (should only be 1)
-            finalCandidates = np.concatenate((allCandidates, candidates), axis=0)  # ToDo: Remove
-            finalCandidates = np.unique(finalCandidates, axis=0)  # ToDo: Remove
+#            finalCandidates = np.concatenate((allCandidates, candidates), axis=0)  # ToDo: Remove
+#            finalCandidates = np.unique(finalCandidates, axis=0)  # ToDo: Remove
 
             # Calculations take into account both exploration and exploitation
             # samples without duplicates
-            totalScore = np.zeros(finalCandidates.shape[0])
+#            totalScore = np.zeros(finalCandidates.shape[0])
             # self.totalScore = totalScore
 
             # ToDo: Simplify (remove loop) for only one set of samples
             # final_weights = explore_score*explore_weights + exploit_score*exploit_weight
-            for cand_idx in range(finalCandidates.shape[0]):
+#            for cand_idx in range(finalCandidates.shape[0]):
                 # find candidate indices
-                idx1 = np.where(allCandidates == finalCandidates[cand_idx])[0]
-                idx2 = np.where(candidates == finalCandidates[cand_idx])[0]
+#                idx1 = np.where(allCandidates == finalCandidates[cand_idx])[0]
+#                idx2 = np.where(candidates == finalCandidates[cand_idx])[0]
 
                 # exploration
-                if idx1.shape[0] > 0:
-                    idx1 = idx1[0]
-                    totalScore[cand_idx] += explore_w * scoreExploration[idx1]
+#                if idx1.shape[0] > 0:
+#                    idx1 = idx1[0]
+#                    totalScore[cand_idx] += explore_w * scoreExploration[idx1]
 
                 # exploitation
-                if idx2.shape[0] > 0:
-                    idx2 = idx2[0]
-                    totalScore[cand_idx] += exploit_w * norm_U_J_d[idx2]
+#                if idx2.shape[0] > 0:
+#                    idx2 = idx2[0]
+#                    totalScore[cand_idx] += exploit_w * norm_U_J_d[idx2]
 
             # Total score
-            totalScore = exploit_w * norm_U_J_d
-            totalScore += explore_w * scoreExploration
+#            totalScore = exploit_w * norm_U_J_d
+#            totalScore += explore_w * scoreExploration
 
             # temp: Plot
             # dim = self.ExpDesign.X.shape[1]
@@ -432,46 +345,44 @@ class SequentialDesign:
             # ------- Select the best candidate -------
             # find an optimal point subset to add to the initial design by
             # maximization of the utility score and taking care of NaN values
-            temp = totalScore.copy()
-            temp[np.isnan(totalScore)] = -np.inf  # Since we are maximizing
-            sorted_idxtotalScore = np.argsort(temp)[::-1]
-            bestIdx = sorted_idxtotalScore[:n_new_samples]
+#            temp = totalScore.copy()
+#            temp[np.isnan(totalScore)] = -np.inf  # Since we are maximizing
+#            sorted_idxtotalScore = np.argsort(temp)[::-1]
+#            bestIdx = sorted_idxtotalScore[:n_new_samples]
 
             # select the requested number of samples
-            if explore_method == 'Voronoi':
-                Xnew = np.zeros((n_new_samples, ndim))
-                for i, idx in enumerate(bestIdx):
-                    X_can = explore.closestPoints[idx]
+            # TODO: shold we also consider voronoi as an extra case?
+#            if explore_method == 'Voronoi':
+#                Xnew = np.zeros((n_new_samples, ndim))
+#                for i, idx in enumerate(bestIdx):
+#                    X_can = explore.closestPoints[idx]
 
                     # Calculate the maxmin score for the region of interest
-                    newSamples, maxminScore = explore.get_mc_samples(X_can)
+#                    newSamples, maxminScore = explore.get_mc_samples(X_can)
 
                     # select the requested number of samples
-                    Xnew[i] = newSamples[np.argmax(maxminScore)]
-            else:
+#                    Xnew[i] = newSamples[np.argmax(maxminScore)]
+#            else:
                 # Changed this from allCandiates to full set of candidates
                 # TODO: still not changed for e.g. 'Voronoi'
-                Xnew = finalCandidates[sorted_idxtotalScore[:n_new_samples]]
+#                Xnew = finalCandidates[sorted_idxtotalScore[:n_new_samples]]
 
         elif exploit_method.lower() == 'varoptdesign':
             # ------- EXPLOITATION: VarOptDesign -------
-            UtilMethod = var
+            UtilMethod = var.lower()
 
             # ------- Calculate Exoploration weight -------
             # Compute exploration weight based on trade off scheme
-            explore_w, exploit_w = self.tradeoff_weights(tradeoff_scheme,
-                                                         old_EDX,
-                                                         old_EDY)
-            print(f"\nweightExploration={explore_w:0.3f} "
-                  f"weightExploitation={exploit_w:0.3f}")
+#            explore_w, exploit_w = self.tradeoff_weights(tradeoff_scheme,
+#                                                         old_EDX,
+#                                                         old_EDY)
 
             # Generate candidate samples from Exploration class
             nMeasurement = old_EDY[OutputNames[0]].shape[1]
 
-            # print(UtilMethod)
 
             # Find sensitive region
-            if UtilMethod == 'LOOCV':
+            if UtilMethod == 'loocv':
                 # TODO: why is this not inside the VarOptDesign function?
                 LCerror = self.MetaModel.LCerror
                 allModifiedLOO = np.zeros((len(old_EDX), len(OutputNames),
@@ -483,15 +394,15 @@ class SequentialDesign:
 
                 ExploitScore = np.max(np.max(allModifiedLOO, axis=1), axis=1)
 
-            elif UtilMethod in ['EIGF', 'ALM']:
+            elif UtilMethod in ['eigf', 'alm']:
                 # ToDo: Check the methods it actually can receive (ALC is missing from conditional list and code)
                 # ----- All other in  ['EIGF', 'ALM'] -----
                 # Initilize the ExploitScore array
                 # ExploitScore = np.zeros((len(old_EDX), len(OutputNames)))
 
                 # Split the candidates in groups for multiprocessing
-                if explore_method != 'Voronoi':
-                    split_cand = np.array_split(allCandidates,
+                if explore_method.lower() != 'voronoi':
+                    split_cand = np.array_split(candidates,
                                                 n_cand_groups,
                                                 axis=0)
                     goodSampleIdx = range(n_cand_groups)
@@ -519,82 +430,98 @@ class SequentialDesign:
 
                 # Retrieve the results and append them
                 if explore_method == 'Voronoi':
-                    ExploitScore = [np.mean(results[k][1]) for k in
+                    scoreExploitation = [np.mean(results[k][1]) for k in
                                     range(len(goodSampleIdx))]
                 else:
-                    ExploitScore = np.concatenate(
+                    scoreExploitation = np.concatenate(
                         [results[k][1] for k in range(len(goodSampleIdx))])
 
             else:
                 raise NameError('The requested utility function is not '
                                 'available.')
 
-            # print("ExploitScore:\n", ExploitScore)
 
             # find an optimal point subset to add to the initial design by
             # maximization of the utility score and taking care of NaN values
             # Total score
             # Normalize U_J_d
             # ToDo: MOve this out of the exploitation if/else part (same as with Bayesian approaches)
-            ExploitScore = ExploitScore / np.sum(ExploitScore)
-            totalScore = exploit_w * ExploitScore
-            # print(totalScore.shape)
-            # print(explore_w)
-            # print(scoreExploration.shape)
-            totalScore += explore_w * scoreExploration
-
-            temp = totalScore.copy()
-            sorted_idxtotalScore = np.argsort(temp, axis=0)[::-1]
-            bestIdx = sorted_idxtotalScore[:n_new_samples]
-
-            Xnew = np.zeros((n_new_samples, ndim))
-            if explore_method != 'Voronoi':
-                Xnew = allCandidates[bestIdx]
-            else:
-                for i, idx in enumerate(bestIdx.flatten()):
-                    X_can = explore.closest_points[idx]
+            scoreExploitation = scoreExploitation / np.sum(scoreExploitation)
+#            totalScore = exploit_w * ExploitScore
+#            totalScore += explore_w * scoreExploration
+
+#            temp = totalScore.copy()
+#            sorted_idxtotalScore = np.argsort(temp, axis=0)[::-1]
+#            bestIdx = sorted_idxtotalScore[:n_new_samples]
+
+#            Xnew = np.zeros((n_new_samples, ndim))
+#            if explore_method != 'Voronoi':
+#                Xnew = candidates[bestIdx]
+#            else:
+            # TODO: do we also need to consider voronoi as extra case when 
+            #       adding up the scores?
+#                for i, idx in enumerate(bestIdx.flatten()):
+#                    X_can = explore.closest_points[idx]
                     # plotter(self.ExpDesign.X, X_can, explore_method,
                     # scoreExploration=None)
 
                     # Calculate the maxmin score for the region of interest
-                    newSamples, maxminScore = explore.get_mc_samples(X_can)
+#                    newSamples, maxminScore = explore.get_mc_samples(X_can)
 
                     # select the requested number of samples
-                    Xnew[i] = newSamples[np.argmax(maxminScore)]
+#                    Xnew[i] = newSamples[np.argmax(maxminScore)]
 
-        # ToDo: For these 2 last methods, we should find better ways
+        # TODO: For these 2 last methods, we should find better ways
         elif exploit_method.lower() == 'alphabetic':
-            # ToDo: Check function to see what it does for scores/how it chooses points, so it gives as an output the
+            # TODO: Check function to see what it does for scores/how it chooses points, so it gives as an output the
             #       scores. See how it works with exploration_scores.
-            # Todo: Check if it is a minimization or maximization. (We think it is minimization)
+            # TODO: Check if it is a minimization or maximization. (We think it is minimization)
             # ------- EXPLOITATION: ALPHABETIC -------
-            Xnew = self.util_AlphOptDesign(allCandidates, var)
+            
+            scoreExploitation = -self.util_AlphOptDesign(candidates, var)
 
         elif exploit_method == 'Space-filling':
-            # ToDo: Set exploitation score to 0, so we can do tradeoff oustide of if/else
+            # TODO: Set exploitation score to 0, so we can do tradeoff oustide of if/else
             # ------- EXPLOITATION: SPACE-FILLING -------
-            totalScore = scoreExploration
+            scoreExploitation = scoreExploration
 
             # ------- Select the best candidate -------
             # find an optimal point subset to add to the initial design by
             # maximization of the utility score and taking care of NaN values
-            temp = totalScore.copy()
-            temp[np.isnan(totalScore)] = -np.inf
-            sorted_idxtotalScore = np.argsort(temp)[::-1]
+#            temp = totalScore.copy()
+#            temp[np.isnan(totalScore)] = -np.inf
+#            sorted_idxtotalScore = np.argsort(temp)[::-1]
 
             # select the requested number of samples
-            Xnew = allCandidates[sorted_idxtotalScore[:n_new_samples]]
+#            Xnew = candidates[sorted_idxtotalScore[:n_new_samples]]
 
         else:
             raise NameError('The requested design method is not available.')
 
+        # ------------- Select the best candidates ------------------
+        totalScore = explore_w * scoreExploration
+        totalScore += exploit_w * scoreExploitation
+        
+        # TODO: this maximizes the scores, recheck that it works as expected
+        #       for all exploration and exploitation types!
+        sorted_scores = np.argsort(totalScore)[::-1]
+        Xnew = candidates[sorted_scores[:n_new_samples]]
+
         print("\n")
         print("\nRun No. {}:".format(old_EDX.shape[0] + 1))
         print("Xnew:\n", Xnew)
+        
+        # Plot if wanted and the samples are 2D
+        # TODO: add a parameter that toggles the plots
+        ndim = self.ExpDesign.X.shape[1]
+        if ndim == 2:
+            self.plotter(self.ExpDesign.X, candidates,
+                        scoreExploration, scoreExploitation, Xnew)
+
 
         # TODO: why does it also return None?
         return Xnew, None
-
+    
     # -------------------------------------------------------------------------
     def tradeoff_weights(self, tradeoff_scheme, old_EDX, old_EDY):
         """
@@ -679,6 +606,189 @@ class SequentialDesign:
         exploitation_weight = 1 - exploration_weight
 
         return exploration_weight, exploitation_weight
+    
+    def do_exploration(self, explore_method, n_candidates, old_EDX=None):
+        """
+        Generate the sequential candidates and evaluate based on the chosen
+        exploration scheme.
+
+        Parameters
+        ----------
+        explore_method : string
+            Exploration method.
+        n_candidates : int
+            Number of candidates
+        old_EDX : bvr ExpDesign object, optional
+            ExpDesign object that is used in creating the error model for 
+            'LOOCV' exploration scheme. The default is None.
+
+        Returns
+        -------
+        candidates : np.array of shape (n_candidates, input-dimension)
+            The candidates for the next chosen sample.
+        scoreExploration : np.array of shape (n_candidates)
+            The exploration score for each candidate.
+
+        """
+        explore_method = explore_method.lower()
+        if explore_method == 'random':
+            # Generate random samples
+            candidates = self.ExpDesign.generate_samples(n_candidates,
+                                                            'random')
+            # Uniform score for all samples
+            scoreExploration = np.ones(n_candidates)/n_candidates
+        elif explore_method == 'latin-hypercube':
+            # Generate random samples
+            candidates = self.ExpDesign.generate_samples(n_candidates,
+                                                            'latin-hypercube')
+            # Uniform score for all samples
+            scoreExploration = np.ones(n_candidates)/n_candidates            
+        elif explore_method == 'loocv':
+            # TODO: LOOCV model construnction based on Feng et al. (2020)
+            # Construct error model based on LCerror
+            # TODO: there is no create_ModelError in the MetaModel class!?
+            errorModel = self.MetaModel.create_ModelError(old_EDX, self.LCerror)
+            self.errorModel.append(copy(errorModel))
+
+            # Generate random samples
+            sampling_method = self.ExpDesign.sampling_method
+            candidates = self.ExpDesign.generate_samples(n_candidates,
+                                                            sampling_method)
+            # Evaluate the error models for allCandidates
+            eLCAllCands, _ = errorModel.eval_errormodel(candidates)
+            # Select the maximum as the representative error
+            eLCAllCands = np.dstack(eLCAllCands.values())
+            eLCAllCandidates = np.max(eLCAllCands, axis=1)[:, 0]
+
+            # Normalize the error w.r.t the maximum error
+            scoreExploration = eLCAllCandidates / np.sum(eLCAllCandidates)
+            
+        elif explore_method == 'global_mc' or explore_method == 'voronoi':
+            explore = Exploration(self.ExpDesign, n_candidates)
+            # TODO: this seems to be some ratio of number of candidates
+            #       to number of exploration scores? Leads to issues in plots
+            explore.w = 100  # * ndim #500   # TODO: where does this value come from?
+            
+            # Select criterion (mc-intersite-proj-th, mc-intersite-proj)
+            explore.mc_criterion = 'mc-intersite-proj'
+            candidates, scoreExploration = explore.get_exploration_samples()
+        else:
+            raise AttributeError('The chosen exploration method is not available!')
+        
+        # OLD CODE
+#         if explore_method == 'LOOCV':
+#             # -----------------------------------------------------------------
+#             # TODO: LOOCV model construnction based on Feng et al. (2020)
+#             # 'LOOCV':
+#             # Initilize the ExploitScore array
+# 
+#             # Generate random samples
+#             allCandidates = self.ExpDesign.generate_samples(n_candidates,
+#                                                             'random')
+# 
+#             # Construct error model based on LCerror
+#             errorModel = self.MetaModel.create_ModelError(old_EDX, self.LCerror)
+#             self.errorModel.append(copy(errorModel))
+# 
+#             # Evaluate the error models for allCandidates
+#             eLCAllCands, _ = errorModel.eval_errormodel(allCandidates)
+#             # Select the maximum as the representative error
+#             eLCAllCands = np.dstack(eLCAllCands.values())
+#             eLCAllCandidates = np.max(eLCAllCands, axis=1)[:, 0]
+# 
+#             # Normalize the error w.r.t the maximum error
+#             scoreExploration = eLCAllCandidates / np.sum(eLCAllCandidates)
+# 
+#         else:
+#             # ------- EXPLORATION: SPACE-FILLING DESIGN -------
+#             # TODO: Remove Exploration class and merge the functions into SequentialDesign class
+#             # Generate candidate samples from Exploration class
+#             explore = Exploration(self.ExpDesign, n_candidates)
+#             explore.w = 100  # * ndim #500   # TODO: where does this value come from?
+#             # Select criterion (mc-intersite-proj-th, mc-intersite-proj)
+#             explore.mc_criterion = 'mc-intersite-proj'
+#             allCandidates, scoreExploration = explore.get_exploration_samples()
+# 
+#             # Temp: ---- Plot all candidates -----
+#             if ndim == 2:
+#                 self.plotter(self.ExpDesign.X, allCandidates, explore_method,
+#                             scoreExploration)
+
+        return candidates, scoreExploration
+
+    # -------------------------------------------------------------------------
+    def plotter(self, ED_X, candidates, scoreExploration, scoreExploitation, 
+                XNew) -> None:
+        """
+        Creates 2D visualizations of the candidate samples.
+        # TODO: find better visualization of the scores and combined scores
+
+        Parameters
+        ----------
+        ED_X: np.array
+            The points that are already in the ExpDesign
+        allCandidates: np.array
+        Method : string
+        scoreExploration : np.array
+
+        """
+        tradeoff = self.ExpDesign.tradeoff_scheme#.lower()
+        explore = self.ExpDesign.explore_method.lower()
+        exploit = self.ExpDesign.exploit_method.lower()
+        util_func = self.ExpDesign.util_func.lower()
+        
+        # Exploration first
+        if explore.lower() == 'voronoi':
+            from scipy.spatial import Voronoi, voronoi_plot_2d
+            vor = Voronoi(ED_X)
+            fig = voronoi_plot_2d(vor)
+            ax1 = fig.axes[0]
+            # TODO: this is just a fix since the Voronoi candidate size is off
+            candidates = candidates[:scoreExploration.shape[0]]
+        else:
+            fig = plt.figure()
+            ax1 = fig.add_subplot(111)
+            
+        ax1.scatter(ED_X[:, 0], ED_X[:, 1], marker = 'x', 
+                    label='Old Design Points')
+        ax1.scatter(candidates[:, 0], candidates[:, 1], 
+                    c=scoreExploration/np.sum(scoreExploration),
+                    label='Design candidates')
+        ax1.scatter(XNew[0,0], XNew[0,1], marker='x', color='red', 
+                    label='New Point')
+        
+        plt.xlim(self.ExpDesign.bound_tuples[0])
+        plt.ylim(self.ExpDesign.bound_tuples[1])
+        plt.legend(loc='upper left')
+        plt.savefig(f'./{self.out_dir}/candidates_Exploration_{tradeoff}_{explore}_{exploit}_{util_func}.pdf', bbox_inches='tight')
+        plt.close()
+        
+        # Exploitation
+        if exploit.lower() == 'voronoi':
+            from scipy.spatial import Voronoi, voronoi_plot_2d
+            vor = Voronoi(ED_X)
+            fig = voronoi_plot_2d(vor)
+            ax1 = fig.axes[0]
+            # TODO: this is just a fix since the Voronoi candidate size is off
+            candidates = candidates[:scoreExploration.shape[0]]
+        else:
+            fig = plt.figure()
+            ax1 = fig.add_subplot(111)
+            
+        ax1.scatter(ED_X[:, 0], ED_X[:, 1], marker = 'x', 
+                    label='Old Design Points')
+        ax1.scatter(candidates[:, 0], candidates[:, 1], 
+                    c=scoreExploitation/np.sum(scoreExploitation),
+                    label='Design candidates')
+        ax1.scatter(XNew[0,0], XNew[0,1], marker='x', color='red', 
+                    label='New Point')
+        
+        plt.xlim(self.ExpDesign.bound_tuples[0])
+        plt.ylim(self.ExpDesign.bound_tuples[1])
+        plt.legend(loc='upper left')
+        plt.savefig(f'./{self.out_dir}/candidates_Exploitation_{tradeoff}_{explore}_{exploit}_{util_func}.pdf', bbox_inches='tight')
+        plt.close()
+
 
     # -------------------------------------------------------------------------
     def run_util_func(self, method, candidates, index, sigma2Dict=None,
@@ -730,13 +840,11 @@ class SequentialDesign:
                 y_hat = {key: items[idx] for key, items in y_can.items()}
                 std = {key: items[idx] for key, items in std_can.items()}
 
-                # print(y_hat)
-                # print(std)
                 U_J_d[idx] = self.util_BayesianActiveDesign(
                     y_hat, std, sigma2Dict, var)
 
         elif method.lower() == 'bayesoptdesign':
-            # ToDo: Create X_MC here, since it is not used in the other active learning approaches.
+            # TODO: Create X_MC here, since it is not used in the other active learning approaches.
             NCandidate = candidates.shape[0]
             U_J_d = np.zeros(NCandidate)
             for idx, X_can in tqdm(enumerate(candidates), ascii=True,
@@ -851,7 +959,6 @@ class SequentialDesign:
         logPriorLikelihoods = np.zeros(mc_size)
         for key in list(y_hat):
             cov = np.diag(std[key] ** 2)
-            print(key, y_hat[key], std[key])
             # TODO: added the allow_singular = True here
             rv = stats.multivariate_normal(mean=y_hat[key], cov=cov, allow_singular=True)
             Y_MC[key] = rv.rvs(size=mc_size)
@@ -949,6 +1056,11 @@ class SequentialDesign:
     def util_BayesianDesign(self, X_can, X_MC, sigma2Dict, var='DKL'):
         """
         Computes scores based on Bayesian sequential design criterion (var).
+        
+        Needs to be given the engine object, as this now takes place in 
+        the SequentialDesign class 
+        # TODO: Remove this need as soon as possible to avoid circular
+        #       dependencies!
 
         Parameters
         ----------
@@ -966,6 +1078,8 @@ class SequentialDesign:
             Score.
 
         """
+        # TODO: remove this need asap!
+        engine = self.engine
 
         # To avoid changes ub original aPCE object
         MetaModel = self.MetaModel
@@ -985,7 +1099,7 @@ class SequentialDesign:
         # Evaluate the PCE metamodels at that location ???
         Y_PC_can, Y_std_can = MetaModel.eval_metamodel(samples=X_can)
         PCE_Model_can = deepcopy(MetaModel)
-        engine_can = deepcopy(self)
+        engine_can = deepcopy(self.engine)
         # Add the candidate to the ExpDesign
         NewExpDesignX = np.vstack((oldExpDesignX, X_can))
 
@@ -1195,6 +1309,8 @@ class SequentialDesign:
         max_func_itr = self.ExpDesign.max_func_itr
 
         Res_Global = None
+        # TODO: Does dual-annealing work in the same way for the other
+        #       exploitation methods as well?
         if method.lower() == 'varoptdesign':
             Res_Global = opt.dual_annealing(self.util_VarBasedDesign,
                                             bounds=Bounds,
@@ -1206,6 +1322,8 @@ class SequentialDesign:
                                             bounds=Bounds,
                                             args=(Model, sigma2Dict, var),
                                             maxfun=max_func_itr)
+        else:
+            raise AttributeError('Dual annealing is currently not supported with the chosen exploitation method')
 
         if verbose:
             print(f"Global minimum: xmin = {Res_Global.x}, "
@@ -1265,12 +1383,12 @@ class SequentialDesign:
 
         # ------ Old Psi ------------
         univ_p_val = self.MetaModel.univ_basis_vals(oldExpDesignX)
-        Psi = self.MetaModel.create_psi(BasisIndices, univ_p_val)
+        Psi = create_psi(BasisIndices, univ_p_val)
 
         # ------ New candidates (Psi_c) ------------
         # Assemble Psi_c
         univ_p_val_c = self.MetaModel.univ_basis_vals(candidates)
-        Psi_c = self.MetaModel.create_psi(BasisIndices, univ_p_val_c)
+        Psi_c = create_psi(BasisIndices, univ_p_val_c)
 
         for idx in range(NCandidate):
 
@@ -1310,12 +1428,12 @@ class SequentialDesign:
 
         # find an optimal point subset to add to the initial design
         # by minimization of the Phi
-        sorted_idxtotalScore = np.argsort(Phi)
+#        sorted_idxtotalScore = np.argsort(Phi)
 
         # select the requested number of samples
-        Xnew = candidates[sorted_idxtotalScore[:n_new_samples]]
+#        Xnew = candidates[sorted_idxtotalScore[:n_new_samples]]
 
-        return Xnew
+        return Phi
 
     # -------------------------------------------------------------------------
     def _normpdf(self, y_hat_pce, std_pce, obs_data, total_sigma2s,
@@ -1750,7 +1868,7 @@ class SequentialDesign:
 
     def _select_indexes(self, prior_samples, collocation_points):
         """
-        ToDo: This function will be used to check the user-input exploration samples, remove training points that
+        TODO: This function will be used to check the user-input exploration samples, remove training points that
         were already used, and select the first mc_size samples that have not yet been used for training. It should also
         assign an exploration score of 0 to all samples.
         Args: