diff --git a/BayesValidRox/BayesInference/BayesInference.py b/BayesValidRox/BayesInference/BayesInference.py
index e420eafefad716a6c365e05af2fdb1855dff994a..9d5a7e95c6ca454d30f6781def2443dde106dcf5 100644
--- a/BayesValidRox/BayesInference/BayesInference.py
+++ b/BayesValidRox/BayesInference/BayesInference.py
@@ -509,6 +509,7 @@ class BayesInference:
         NofPa = PCEModel.NofPa
         OutputNames = Model.Output.Names
         BootstrapItrNr = self.BootstrapItrNr
+        parNames = [self.PCEModel.Inputs.Marginals[i].Name for i in range(len(PCEModel.Inputs.Marginals))]
         
         
         # If the prior is set by the user, take it.
@@ -678,11 +679,15 @@ class BayesInference:
             MCMC_ = MCMC(self, initsamples=initsamples, nwalkers=nwalkers, verbose=self.MCMCverbose,
                          nsteps = nsteps, moves=self.MCMCmoves, multiprocessing=multiprocessing)
             self.Posterior_df = MCMC_.run_sampler(Data[0], TotalSigma2)
+        
+        elif self.Name.lower() == 'valid':
+            self.Posterior_df = pd.DataFrame(self.Samples, columns=parNames)
             
         else: # Rejection sampling
             self.Posterior_df = self.Rejection_Sampling()
-        
-        # Provide description
+            
+            
+        # Provide posterior's summary
         print('\n')
         print('-'*15 + 'Posterior summary' + '-'*15)
         pd.options.display.max_columns = None
@@ -708,7 +713,6 @@ class BayesInference:
         if not os.path.exists(OutputDir): os.makedirs(OutputDir)
         
         # -------- Posteior parameters -------- 
-        parNames = [self.PCEModel.Inputs.Marginals[i].Name for i in range(len(PCEModel.Inputs.Marginals))]
         if optSigma != "B":
             parNames.extend([self.Discrepancy.InputDisc.Marginals[i].Name for i in range(len(Model.Output.Names))])
             
@@ -743,7 +747,7 @@ class BayesInference:
         #         ax.plot(MAP_theta[0,xi], MAP_theta[0,yi], "sr")
 
         # plt.yticks(rotation=45, horizontalalignment='right')
-        figPosterior.set_size_inches((24,16)) 
+        figPosterior.set_size_inches((24,16))
         
         if self.emulator:
             plotname = '/Posterior_Dist_'+Model.Name+'_emulator'
diff --git a/BayesValidRox/surrogate_models/ExpDesigns.py b/BayesValidRox/surrogate_models/ExpDesigns.py
index 8df252bf2616f466934b9f63503066dfad915e12..7f71e6d00fc38559744eaedf8512128e513fa32c 100644
--- a/BayesValidRox/surrogate_models/ExpDesigns.py
+++ b/BayesValidRox/surrogate_models/ExpDesigns.py
@@ -22,7 +22,7 @@ class ExpDesigns:
         self.MCSize = 10000
         self.Input_distributions = []
         self.BoundTuples = []
-        self.SamplingMethod = 'MC'
+        self.SamplingMethod = 'random'
         self.metaModel = metaModel
         self.TradeOffScheme = 'None'
         # TODO: This should be prescribed by TotalNSamples
diff --git a/BayesValidRox/surrogate_models/surrogate_models.py b/BayesValidRox/surrogate_models/surrogate_models.py
index dcd99dafafc80e96c771a478bd4403c44c787cbb..220586ff56069c456bcf727cb08bbfb743728777 100644
--- a/BayesValidRox/surrogate_models/surrogate_models.py
+++ b/BayesValidRox/surrogate_models/surrogate_models.py
@@ -1022,11 +1022,11 @@ class Metamodel:
             Y_PC_can, std_PC_can = self.eval_metamodel(samples=X_can)
             canPredVar = {key:std_PC_can[key]**2 for key in OutputNames}
             
-            varPCE = []
+            varPCE = np.zeros((len(OutputNames),X_can.shape[0]))
             for KeyIdx, key in enumerate(OutputNames):
-                varPCE.extend(np.max(canPredVar[key],axis=1))
-            score = np.array(varPCE)
-            
+                varPCE[KeyIdx] = np.max(canPredVar[key],axis=1)
+            score = np.max(varPCE, axis=0)
+
         elif UtilMethod == 'EIGF':
             # ----- Expected Improvement for Global fit -----
             # Eq (5) from Liu et al.(2018)
@@ -1035,13 +1035,14 @@ class Metamodel:
             predError = {key:Y_PC_can[key] for key in OutputNames}
             canPredVar = {key:std_PC_can[key]**2 for key in OutputNames}
             
-            EIGF_PCE = []
+            EIGF_PCE = np.zeros((len(OutputNames),X_can.shape[0]))
             for KeyIdx, key in enumerate(OutputNames):
                 residual = predError[key] - OutputDictY[key][index]
                 var = canPredVar[key]
                 EIGF_PCE.extend(np.max(residual**2 + var,axis=1))
-            score = np.array(EIGF_PCE)
-        
+                EIGF_PCE[KeyIdx] = np.max(residual**2 + var,axis=1)
+            score = np.max(EIGF_PCE, axis=0)
+            
         return -1 * score   # -1 is for minimization instead of maximization
     
     #--------------------------------------------------------------------------------------------------------
@@ -2074,15 +2075,20 @@ class Metamodel:
         ModelOutputNames = self.ModelObj.Output.Names
         
         SampleSize, index = PCEOutputs[ModelOutputNames[0]].shape
-        if self.index is not None: index = self.index
-        
+        # if self.index is not None: index = self.index
+        index = self.index if type(self.index) is list else [self.index]*len(ModelOutputNames)
+            
         # Flatten the ObservationData
         TotalData = ObservationData[ModelOutputNames].to_numpy().flatten('F')
         
+        # Remove NaN
+        TotalData = TotalData[~np.isnan(TotalData)] 
+        Sigma2s = Sigma2s[~np.isnan(Sigma2s)] 
+        
         # Flatten the Output
         TotalOutputs = np.empty((SampleSize,0))
-        for key in ModelOutputNames:
-            TotalOutputs = np.hstack((TotalOutputs, PCEOutputs[key][:,:index]))
+        for idx, key in enumerate(ModelOutputNames):
+            TotalOutputs = np.hstack((TotalOutputs, PCEOutputs[key][:,:index[idx]]))
         
         # Covariance Matrix 
         covMatrix = np.zeros((Sigma2s.shape[0], Sigma2s.shape[0]), float)
@@ -2091,8 +2097,8 @@ class Metamodel:
         # Add the std of the PCE.
         covMatrix_PCE = np.zeros((Sigma2s.shape[0], Sigma2s.shape[0]), float)
         stdPCE = np.empty((SampleSize,0))
-        for key in ModelOutputNames:
-            stdPCE = np.hstack((stdPCE, std_PC_MC[key][:,:index]))
+        for idx, key in enumerate(ModelOutputNames):
+            stdPCE = np.hstack((stdPCE, std_PC_MC[key][:,:index[idx]]))
 
         # Expected value of variance (Assump: i.i.d stds)
         varPCE = np.mean(stdPCE**2, axis=0)