diff --git a/BayesValidRox/BayesInference/BayesInference.py b/BayesValidRox/BayesInference/BayesInference.py
index dcc4a1b0b0df2dc435d1912792924095a028aa2d..83b637cadf50f7a542b83f51125b24a710c59443 100644
--- a/BayesValidRox/BayesInference/BayesInference.py
+++ b/BayesValidRox/BayesInference/BayesInference.py
@@ -260,7 +260,7 @@ class BayesInference:
             if self.emulator:
                 stdPCE = std[out] if std is not None else np.mean(self._stdPCEPriorPred[out], axis=0)
                 # Expected value of variance (Assump: i.i.d stds)
-                covMatrix += stdPCE**3 #np.diag(stdPCE**3)
+                covMatrix += stdPCE**3
 
             # print(formatting_function(covMatrix))
             # Select the data points to compare
@@ -270,9 +270,9 @@ class BayesInference:
                 covMatrix = covMatrix[:,self.selectedIndices[out]]
 
             # Compute loglikelihood
-            if TotalOutputs.shape[0] == 1:
-                logLik += self._logpdf(TotalOutputs[0], data, covMatrix)
-            elif covMatrix.ndim == 1:
+            # if TotalOutputs.shape[0] == 1:
+            #     logLik += self._logpdf(TotalOutputs[0], data, np.diag(covMatrix))
+            if covMatrix.ndim == 1:
                 logLik += stats.multivariate_normal.logpdf(TotalOutputs, data, np.diag(covMatrix))
             else:
                 logliks = np.zeros(TotalOutputs.shape[0])
@@ -819,7 +819,7 @@ class BayesInference:
             self.sigma2s = self.Discrepancy.get_Sample(self.NrofSamples)
         except:
             pass
-
+            
         # ---------------- Bootstrap & TOM --------------------
         if self.Bootstrap or self.BayesLOOCV:
             if len(self.perturbedData) == 0:
@@ -840,7 +840,7 @@ class BayesInference:
                     perturbedData[itrIdx] = data.T[~np.isnan(data.T)]
                 self.perturbedData = perturbedData
             
-            # TODO:-------- Model Discrepancy -----------
+            # -------- Model Discrepancy -----------
             if hasattr(self, 'errorModel') and self.errorModel \
                 and self.Name.lower()!='calib':
                 # Select posterior mean as MAP
@@ -849,10 +849,11 @@ class BayesInference:
                 
                 # Evaluate the (meta-)model at the MAP
                 y_MAP, y_std_MAP = PCEModel.eval_metamodel(samples=MAP_theta,name=self.Name)
-                
+
                 # Train a GPR meta-model using MAP
                 self.errorMetaModel = PCEModel.create_ModelError(self.BiasInputs,y_MAP,
                                                                       Name=self.Name)
+                
             # -----------------------------------------------------
             # ----- Loop over the perturbed observation data ------
             # -----------------------------------------------------
@@ -983,21 +984,24 @@ class BayesInference:
         print(self.Posterior_df.describe())
         print('-'*50)
         
-        # TODO:-------- Model Discrepancy -----------
-        if hasattr(self, 'errorModel') and self.errorModel:# \
-            # and self.Name.lower()=='calib':
-            # Select posterior mean as MAP
-            Posterior_df = self.Posterior_df.to_numpy() if optSigma == "B" else self.Posterior_df.to_numpy()[:,:-len(Model.Output.Names)]
-            MAP_theta = Posterior_df.mean(axis=0).reshape((1,NofPa))
-            # MAP_theta = stats.mode(Posterior_df,axis=0)[0]
-            
-            # Evaluate the (meta-)model at the MAP
-            y_MAP, y_std_MAP = PCEModel.eval_metamodel(samples=MAP_theta,name=self.Name)
+        # -------- Model Discrepancy -----------
+        if hasattr(self, 'errorModel') and self.errorModel \
+            and self.Name.lower()=='calib':
+            if self.SamplingMethod.lower() == 'mcmc':
+                self.errorMetaModel = MCMC_.errorMetaModel
+            else:
+                # Select posterior mean as MAP
+                Posterior_df = self.Posterior_df.to_numpy() if optSigma == "B" else self.Posterior_df.to_numpy()[:,:-len(Model.Output.Names)]
+                MAP_theta = Posterior_df.mean(axis=0).reshape((1,NofPa))
+                # MAP_theta = stats.mode(Posterior_df,axis=0)[0]
+                
+                # Evaluate the (meta-)model at the MAP
+                y_MAP, y_std_MAP = PCEModel.eval_metamodel(samples=MAP_theta,name=self.Name)
+                
+                # Train a GPR meta-model using MAP
+                self.errorMetaModel = PCEModel.create_ModelError(self.BiasInputs,y_MAP,
+                                                                  Name=self.Name)
             
-            # Train a GPR meta-model using MAP
-            self.errorMetaModel = PCEModel.create_ModelError(self.BiasInputs,y_MAP,
-                                                             Name=self.Name)
-        
         # -------- Posterior perdictive -----------
         self.PosteriorPredictive()