diff --git a/Outputs_Bayes_None_Calib/emcee_sampler.h5 b/Outputs_Bayes_None_Calib/emcee_sampler.h5
index fb19ab42bc20ce5c07901fed541bc29760bfc3d9..81f2373f9817399ab2c7fb908ad03de3f94a7d88 100644
Binary files a/Outputs_Bayes_None_Calib/emcee_sampler.h5 and b/Outputs_Bayes_None_Calib/emcee_sampler.h5 differ
diff --git a/src/bayesvalidrox/surrogate_models/sequential_design.py b/src/bayesvalidrox/surrogate_models/sequential_design.py
index 2885331fd04f227963ca78edcd1afc8b1ef4d88d..f4669a7f29f0cefefdd4ae18f2ea5f0eab0b2317 100644
--- a/src/bayesvalidrox/surrogate_models/sequential_design.py
+++ b/src/bayesvalidrox/surrogate_models/sequential_design.py
@@ -371,20 +371,13 @@ class SequentialDesign:
 
                 # Create a sample pool for rejection sampling
                 # ToDo: remove from here, add only to BayesOptDesign option
-                # TODO: reduced for testing
                 MCsize = 15000
                 X_MC = self.ExpDesign.generate_samples(MCsize, 'random')
 
-                # ToDo: Get samples from the "do_exploration"
-                #candidates = self.ExpDesign.generate_samples(
-                #    n_candidates, 'latin_hypercube')
-
                 # Split the candidates in groups for multiprocessing
                 split_cand = np.array_split(
                     allCandidates, n_cand_groups, axis=0
                 )
-                # print(candidates)
-                # print(split_cand)
                 if self.parallel:
                     results = Parallel(n_jobs=-1, backend='multiprocessing')(
                         delayed(self.run_util_func)(
@@ -411,52 +404,12 @@ class SequentialDesign:
                     U_J_d = np.mean(U_J_d.reshape(-1, n_candidates), axis=1)
 
                 # Normalize U_J_d
-                # ToDO: Check if this is working for the case where the util_func should be minimized (e.g. IE)
                 # norm_U_J_D = U_J_d / np.nansum(np.abs(U_J_d))  # Possible solution
                 norm_scoreExploitation = U_J_d / np.sum(U_J_d)
 
             else:
                 norm_scoreExploitation = np.zeros((len(scoreExploration)))
 
-            # ------- Calculate Total score -------
-            # ToDo: This should be outside of the exploration/exploitation if/else part
-            # ------- Trade off between EXPLORATION & EXPLOITATION -------
-            # Accumulate the samples
-            # ToDo: Stop assuming 2 sets of samples (should only be 1)
-            #finalCandidates = np.concatenate((allCandidates, candidates), axis=0)  # ToDo: Remove
-            finalCandidates = allCandidates#np.unique(finalCandidates, axis=0)                   # ToDo: Remove
-
-            # Calculations take into account both exploration and exploitation
-            # samples without duplicates
-            #totalScore = np.zeros(finalCandidates.shape[0])
-            # self.totalScore = totalScore
-
-            # ToDo: Simplify (remove loop) for only one set of samples
-            # final_weights = explore_score*explore_weights + exploit_score*exploit_weight
-            #for cand_idx in range(finalCandidates.shape[0]):
-            #    # find candidate indices
-            #    idx1 = np.where(allCandidates == finalCandidates[cand_idx])[0]
-            #    idx2 = np.where(candidates == finalCandidates[cand_idx])[0]#
-
-                # exploration
-            #    if idx1.shape[0] > 0:
-            #        idx1 = idx1[0]
-            #        totalScore[cand_idx] += explore_w * scoreExploration[idx1]
-
-            #    # exploitation
-            #    if idx2.shape[0] > 0:
-            #        idx2 = idx2[0]
-            #        totalScore[cand_idx] += exploit_w * norm_U_J_d[idx2]
-
-            # Total score
-            print('Shapes of components of the weights')
-            print(f'Exploration: {explore_w} * {scoreExploration.shape}')
-            print(f'Exploitation: {exploit_w} * {norm_scoreExploitation.shape}')
-            # TODO: for the combination none, voronoi, BOD+MI the exploration scores contain 10*more values than the exploitation scores
-            #       This seems to be an issue resulting from the voronoi sampling, not BODMI
-            
-            totalScore = exploit_w * norm_scoreExploitation + explore_w * scoreExploration
-
             # temp: Plot
             # dim = self.ExpDesign.X.shape[1]
             # if dim == 2:
@@ -478,8 +431,6 @@ class SequentialDesign:
             # Generate candidate samples from Exploration class
             nMeasurement = old_EDY[OutputNames[0]].shape[1]
 
-            # print(UtilMethod)
-
             # Find sensitive region
             if UtilMethod.lower() == 'loocv':
                 # TODO: why is this not inside the VarOptDesign function?
@@ -496,8 +447,6 @@ class SequentialDesign:
             elif UtilMethod in ['EIGF', 'ALM']:
                 # ToDo: Check the methods it actually can receive (ALC is missing from conditional list and code)
                 # ----- All other in  ['EIGF', 'ALM'] -----
-                # Initilize the ExploitScore array
-                # ExploitScore = np.zeros((len(old_EDX), len(OutputNames)))
 
                 # Split the candidates in groups for multiprocessing
                 if explore_method.lower() != 'voronoi':
@@ -538,69 +487,40 @@ class SequentialDesign:
             else:
                 raise NameError('The requested utility function is not '
                                 'available.')
-
-            # print("ExploitScore:\n", ExploitScore)
-
-            # find an optimal point subset to add to the initial design by
-            # maximization of the utility score and taking care of NaN values
-            # Total score
-            # Normalize U_J_d
-            # ToDo: MOve this out of the exploitation if/else part (same as with Bayesian approaches)
-            norm_scoreExploitation = ExploitScore / np.sum(ExploitScore)
-            totalScore = exploit_w * norm_scoreExploitation + explore_w * scoreExploration
-            
+                
+            # Total score - Normalize U_J_d
+            norm_scoreExploitation = ExploitScore / np.sum(ExploitScore)            
             opt_type = 'maximization'
 
-            #temp = totalScore.copy()
-            #sorted_idxtotalScore = np.argsort(temp, axis=0)[::-1]
-            #bestIdx = sorted_idxtotalScore[:n_new_samples]
-
-            #Xnew = np.zeros((n_new_samples, ndim))
-            #if explore_method.lower() != 'voronoi':
-            #    Xnew = allCandidates[bestIdx]
-            #else:
-            #    for i, idx in enumerate(bestIdx.flatten()):
-            #        X_can = explore.closest_points[idx]
-                    # plotter(self.ExpDesign.X, X_can, explore_method,
-                    # scoreExploration=None)
-
-                    # Calculate the maxmin score for the region of interest
-            #        newSamples, maxminScore = explore.get_mc_samples(X_can)
-
-                    # select the requested number of samples
-            #        Xnew[i] = newSamples[np.argmax(maxminScore)]
-
-        # ToDo: For these 2 last methods, we should find better ways
         elif exploit_method.lower() == 'alphabetic':
             # ToDo: Check function to see what it does for scores/how it chooses points, so it gives as an output the
             #       scores. See how it works with exploration_scores.
-            # Todo: Check if it is a minimization or maximization. (We think it is minimization)
-            # TODO: Move the final accumulation of scores outside of this function to match the other methods
+            # TODO: Rethink the final accumulation of scores for this function
+            #       Should this be reworked to maximization as well?
             # ------- EXPLOITATION: ALPHABETIC -------
-            scoreExploitation = self.util_AlphOptDesign(allCandidates, var)
-            totalScore = exploit_w * scoreExploitation + explore_w * scoreExploration
+            norm_scoreExploitation = self.util_AlphOptDesign(allCandidates, var)
             opt_type = 'minimization'
 
         elif exploit_method.lower() == 'space-filling':
-            # ToDo: Set exploitation score to 0, so we can do tradeoff oustide of if/else
             # ------- EXPLOITATION: SPACE-FILLING -------
-            scoreExploitation = scoreExploration
-            totalScore = exploit_w * scoreExploitation + explore_w * scoreExploration
+            norm_scoreExploitation = scoreExploration
+            exploit_w = 0
             opt_type = 'maximization'
-
-            # ------- Select the best candidate -------
-            # find an optimal point subset to add to the initial design by
-            # maximization of the utility score and taking care of NaN values
-            #temp = totalScore.copy()
-            #temp[np.isnan(totalScore)] = -np.inf
-            #sorted_idxtotalScore = np.argsort(temp)[::-1]
-
-            # select the requested number of samples
-            #Xnew = allCandidates[sorted_idxtotalScore[:n_new_samples]]
-
+            
         else:
             raise NameError('The requested design method is not available.')
             
+        # Accumulate all candidates and scores
+        # TODO: recheck if this line holds true for all combinations of methods
+        # TODO: for the combination none, voronoi, BOD+MI the exploration scores contain 10*more values than the exploitation scores
+        #       This seems to be an issue resulting from the voronoi sampling, not BODMI
+        #print('Shapes of components of the weights')
+        #print(f'Exploration: {explore_w} * {scoreExploration.shape}')
+        #print(f'Exploitation: {exploit_w} * {norm_scoreExploitation.shape}')
+        finalCandidates = allCandidates
+        totalScore = exploit_w * norm_scoreExploitation + explore_w * scoreExploration
+            
+        # Choose the new training samples
         # If the total weights should be maximized
         if opt_type == 'maximization':
             
@@ -630,12 +550,12 @@ class SequentialDesign:
                
         # If the total weights should be maximized
         elif opt_type == 'minimization':
-                # find an optimal point subset to add to the initial design
-                # by minimization of the Phi
-                sorted_idxtotalScore = np.argsort(totalScore)
+            # find an optimal point subset to add to the initial design
+            # by minimization of the Phi
+            sorted_idxtotalScore = np.argsort(totalScore)
 
-                # select the requested number of samples
-                Xnew = finalCandidates[sorted_idxtotalScore[:n_new_samples]]
+            # select the requested number of samples
+            Xnew = finalCandidates[sorted_idxtotalScore[:n_new_samples]]
             
 
         print("\n")