diff --git a/BayesValidRox/BayesInference/BayesInference.py b/BayesValidRox/BayesInference/BayesInference.py
index a5ac810d0536e40327390ad6e490032200a1ad93..9618148a510b2796d378c7470d285cf4e6e6e72a 100644
--- a/BayesValidRox/BayesInference/BayesInference.py
+++ b/BayesValidRox/BayesInference/BayesInference.py
@@ -179,10 +179,10 @@ class BayesInference:
     
         """
         from sklearn.gaussian_process.kernels import RBF
-        formatting_function = np.vectorize(lambda f: format(f, '6.2E'))
+        formatting_function = np.vectorize(lambda f: format(f, '6.2e'))
         min_max_scaler = preprocessing.MinMaxScaler()
         X_minmax = min_max_scaler.fit_transform(X)
-        
+
         nparams = len(Sigma2)
         # characteristic length (0,1]
         Lambda =  Sigma2[0]
@@ -193,12 +193,10 @@ class BayesInference:
         
         rbf = RBF(length_scale=Lambda)
         cov_matrix = sigma2_f * rbf(X_minmax)
-
         if nparams > 2:
             # (unresolvable error) nugget term that is interpreted as random 
             # error that cannot be attributed to measurement error.
             sigma2_0 = Sigma2[2:]
-            
             for i, j in np.ndindex(cov_matrix.shape):
                 cov_matrix[i,j] += np.sum(sigma2_0) if i==j else 0
 
@@ -235,13 +233,13 @@ class BayesInference:
 
             if Sigma2 is not None:
                 # Check the type error term
-                if hasattr(self, 'BiasInputs') and not hasattr(self, 'discrepancy_GP'):
+                if hasattr(self, 'BiasInputs') and not hasattr(self, 'errorModel'):
                     # TODO: Infer a Bias model usig Gaussian Process Regression
                     BiasInputs = np.hstack((self.BiasInputs[out],TotalOutputs.T))
                     # EDY = self.PCEModel.ExpDesign.Y[out]
                     # BiasInputs = np.hstack((self.BiasInputs[out],EDY.T))
-
-                    biasSigma2s = self.RBF_kernel(BiasInputs, Sigma2)
+                    sigma2 = Sigma2[:3] if idx==0 else Sigma2[3:]
+                    biasSigma2s = self.RBF_kernel(BiasInputs, sigma2)
                     # biasSigma2s = self.RBF_kernel(self.BiasInputs[out], Sigma2)
                 else:
                     # Infer equal sigma2s
@@ -261,6 +259,7 @@ class BayesInference:
             # Add the std of the PCE is chosen as emulator.
             if self.emulator:
                 stdPCE = std[out] if std is not None else np.mean(self._stdPCEPriorPred[out], axis=0)
+                # stdPCE = np.mean(self._stdPCEPriorPred[out], axis=0)
                 # Expected value of variance (Assump: i.i.d stds)
                 covMatrix += np.diag(stdPCE**3)
             # print(formatting_function(covMatrix))
@@ -554,10 +553,6 @@ class BayesInference:
         
         PCEModel = self.PCEModel
         Model = PCEModel.ModelObj
-        if hasattr(PCEModel, 'errorModel') and PCEModel.errorModel:
-            discrepancy = True 
-        else:
-            discrepancy = False
             
         # Make a directory to save the prior/posterior predictive
         OutputDir = (r'Outputs_Bayes_' + Model.Name + '_' + self.Name)
@@ -595,13 +590,18 @@ class BayesInference:
         if self.emulator:
             if self.SamplingMethod == 'rejection':
                 PriorPred = self.__meanPCEPriorPred
-            PosteriorPred, _ = PCEModel.eval_metamodel(samples=Posterior_df.to_numpy(),
-                                                       name=self.Name,discrepModel=discrepancy)
+            PosteriorPred, PosteriorPred_std = PCEModel.eval_metamodel(samples=Posterior_df.to_numpy(),
+                                                       name=self.Name)
         else:
             if self.SamplingMethod == 'rejection':
                 PriorPred = self.__ModelPriorPred
             PosteriorPred = self.eval_Model(Samples=Posterior_df.to_numpy(),key='PostPred')
         
+        # TODO: Correct the predictions with Model discrepancy
+        if hasattr(self, 'errorModel') and self.errorModel:
+            PosteriorPred, PosteriorPred_std = self.errorMetaModel.eval_errormodel(self.BiasInputs,
+                                                                      PosteriorPred)
+        
         # Add discrepancy from likelihood Sample to the current posterior runs
         TotalSigma2 = self.Discrepancy.TotalSigma2
         for  varIdx, var in enumerate(Model.Output.Names):
@@ -615,12 +615,13 @@ class BayesInference:
                 # Check the type error term
                 if Sigma2Prior is not None:
                     # Inferred sigma2s
-                    if hasattr(self, 'BiasInputs') and not hasattr(self, 'discrepancy_GP'):
+                    if hasattr(self, 'BiasInputs') and not hasattr(self, 'errorModel'):
                         # TODO: Infer a Bias model usig Gaussian Process Regression
                         EDY = self.PCEModel.ExpDesign.Y[var]
                         # BiasInputs = np.hstack((self.BiasInputs[var],EDY.T))
                         BiasInputs = np.hstack((self.BiasInputs[var],pred.reshape(-1,1)))
-                        cov = self.RBF_kernel(BiasInputs,sigma2s[i])
+                        sigma2 = sigma2s[i,:3] if varIdx==0 else sigma2s[i,3:]
+                        cov = self.RBF_kernel(BiasInputs,sigma2)
                         # cov = self.RBF_kernel(self.BiasInputs[var], sigma2s[i])
                     else:
                         # Infer equal sigma2s
@@ -633,6 +634,10 @@ class BayesInference:
                         # cov = np.diag(sigma2 * np.ones(len(pred)))
                         cov = np.diag( sigma2 * totalSigma2)
                 
+                # Add discrepancy model's covMatrix
+                if hasattr(self, 'errorMetaModel') and self.errorModel:
+                    cov += np.diag(PosteriorPred_std[var][i]**3)
+                
                 # Sample a multi-variate normal distribution with mean of prediction and variance of cov
                 PosteriorPred[var][i] = np.random.multivariate_normal(pred, cov, 1)  
                 
@@ -719,15 +724,6 @@ class BayesInference:
             self.MeasuredData = pd.DataFrame(self.MeasuredData)
         ObservationData = self.MeasuredData[OutputNames].to_numpy()
         
-        # TODO: Create a GPR based Bias model
-        if hasattr(self, 'BiasInputs'):
-            from .discrepancy_GP import Bias
-            self.discrepancy_GP = Bias()
-            self.discrepancy_GP.fit_bias(self.PCEModel.ExpDesign.X, 
-                                          self.PCEModel.ExpDesign.Y,
-                                          self.MeasuredData)
-        
-        
         nMeasurement, nOutputs = ObservationData.shape
         self.ntotMeasurement = ObservationData[~np.isnan(ObservationData)].shape[0]
         BootstrapItrNr = self.BootstrapItrNr if not self.BayesLOOCV else self.ntotMeasurement 
@@ -823,13 +819,8 @@ class BayesInference:
             
             # Evaluate the PCEModel
             if self.emulator:
-                if hasattr(PCEModel, 'errorModel') and PCEModel.errorModel:
-                    discrepancy = True 
-                else:
-                    discrepancy = False
-
                 self.__meanPCEPriorPred, self._stdPCEPriorPred = PCEModel.eval_metamodel(samples=self.Samples,
-                                                                                         name=self.Name,discrepModel=discrepancy)
+                                                                                         name=self.Name)
                 # Surrogate model's error using RMSE of test data
                 surrError = PCEModel.RMSE if hasattr(PCEModel,'RMSE') else None
             else:
@@ -940,6 +931,22 @@ class BayesInference:
         print(self.Posterior_df.describe())
         print('-'*50)
         
+        # TODO:-------- Model Discrepancy -----------
+        # Note: only in calibration. Reuse this with new inputs in Validation
+        if hasattr(self, 'errorModel') and self.errorModel \
+            and self.Name.lower()=='calib':
+            # Select posterior mean as MAP
+            Posterior_df = self.Posterior_df.to_numpy() if optSigma == "B" else self.Posterior_df.to_numpy()[:,:-len(Model.Output.Names)]
+            MAP_theta = Posterior_df.mean(axis=0).reshape((1,NofPa))
+            # MAP_theta = stats.mode(Posterior_df,axis=0)[0]
+            
+            # Evaluate the (meta-)model at the MAP
+            y_MAP, y_std_MAP = PCEModel.eval_metamodel(samples=MAP_theta,name=self.Name)
+            
+            # Train a GPR meta-model using MAP
+            self.errorMetaModel = PCEModel.create_ModelError(self.BiasInputs,y_MAP,
+                                                             Name=self.Name)
+        
         # -------- Posterior perdictive -----------
         self.PosteriorPredictive()
         
diff --git a/BayesValidRox/BayesInference/MCMC.py b/BayesValidRox/BayesInference/MCMC.py
index e241c614aba5904fea5320c5785d20b60ef47eeb..432425826976ded790e5e91e5dbd7912c418ed4f 100755
--- a/BayesValidRox/BayesInference/MCMC.py
+++ b/BayesValidRox/BayesInference/MCMC.py
@@ -40,7 +40,8 @@ class MCMC():
         
     def run_sampler(self, Observation, TotalSigma2):
 
-        PCEModel = self.BayesOpts.PCEModel
+        BayesObj = self.BayesOpts
+        PCEModel = BayesObj.PCEModel
         Model = PCEModel.ModelObj
         Discrepancy = self.BayesOpts.Discrepancy
         nrCPUs = Model.nrCPUs
@@ -164,6 +165,13 @@ class MCMC():
                 if sampler.iteration % autocorreverynsteps:
                     continue
                 
+                # Train model discrepancy/error
+                if hasattr(BayesObj, 'errorModel') and BayesObj.errorModel:
+                    try:
+                        self.errorMetaModel = self.train_errorModel(sampler)
+                    except:
+                        pass
+
                 # always print the current mean acceptance fraction
                 if self.verbose:
                     print("\nStep: {}".format(sampler.iteration))
@@ -407,17 +415,13 @@ class MCMC():
     
     def eval_model(self, theta):
         
-        BayesOpts = self.BayesOpts
-        PCEModel = BayesOpts.PCEModel
-        if hasattr(PCEModel, 'errorModel') and PCEModel.errorModel:
-            discrepancy = True 
-        else:
-            discrepancy = False
+        BayesObj = self.BayesOpts
+        PCEModel = BayesObj.PCEModel
         Model = PCEModel.ModelObj
-        if BayesOpts.emulator:
+       
+        if BayesObj.emulator:
             # Evaluate the PCEModel
-            meanPred, stdPred = PCEModel.eval_metamodel(samples=theta, name=BayesOpts.Name,
-                                                        discrepModel=discrepancy)
+            meanPred, stdPred = PCEModel.eval_metamodel(samples=theta, name=BayesObj.Name)
         else:
             # Evaluate the origModel
             
@@ -445,9 +449,40 @@ class MCMC():
             
             # Add one to the counter
             self.counter += 1
-
+        
+        if hasattr(self, 'errorMetaModel') and BayesObj.errorModel:
+            meanPred,stdPred = self.errorMetaModel.eval_errormodel(BayesObj.BiasInputs,meanPred)
+        
         return meanPred, stdPred
     
+    def train_errorModel(self,sampler):
+        BayesObj = self.BayesOpts
+        PCEModel = BayesObj.PCEModel
+        
+        # Prepare the poster samples
+        try:
+            tau = sampler.get_autocorr_time(tol=0)
+        except emcee.autocorr.AutocorrError:
+            tau = 5
+        
+        if all(np.isnan(tau)): tau = 5
+        
+        burnin = int(2*np.nanmax(tau))
+        thin = int(0.5*np.nanmin(tau)) if int(0.5*np.nanmin(tau))!=0 else 1
+        finalsamples = sampler.get_chain(discard=burnin, flat=True, thin=thin)
+        Posterior_df = finalsamples[:,:PCEModel.NofPa]
+        
+        # Select posterior mean as MAP
+        MAP_theta = Posterior_df.mean(axis=0).reshape((1,PCEModel.NofPa))
+
+        # Evaluate the (meta-)model at the MAP
+        y_MAP, y_std_MAP = PCEModel.eval_metamodel(samples=MAP_theta, name=BayesObj.Name)
+
+        # Train a GPR meta-model using MAP
+        errorMetaModel = PCEModel.create_ModelError(BayesObj.BiasInputs,
+                                                    y_MAP,Name='Calib')
+        return errorMetaModel
+        
     def Marginal_llk_emcee(self, sampler, nburn=None, logp=None, maxiter=1000):
         """
         The Bridge Sampling Estimator of the Marginal Likelihood.
diff --git a/BayesValidRox/surrogate_models/surrogate_models.py b/BayesValidRox/surrogate_models/surrogate_models.py
index 77fcf3495a108441512e0b407a3f2375e93e066e..c8535ead07064c4d09e9453a7a00d7965ee8c375 100644
--- a/BayesValidRox/surrogate_models/surrogate_models.py
+++ b/BayesValidRox/surrogate_models/surrogate_models.py
@@ -747,9 +747,9 @@ class Metamodel:
         
         else:
             gp = GaussianProcessRegressor(kernel=kernels[0], n_restarts_optimizer=5,
-                                          alpha = 1e-15, normalize_y=False)
+                                          normalize_y=False)
             gp.fit(X, y)
-        
+
         # Compute score
         if varIdx is not None:
             Score = gp.score(X, y)
@@ -885,7 +885,7 @@ class Metamodel:
         return self
 
     #--------------------------------------------------------------------------------------------------------
-    def create_ModelError(self,Name='Calib'):
+    def create_ModelError(self,X,y,Name='Calib'):
         """
         This function loops over the outputs and each time step/point to compute the PCE
         coefficients.
@@ -899,21 +899,21 @@ class Metamodel:
         self.errorScoresDict = self.AutoVivification()
         self.errorclf_poly = self.AutoVivification()
         self.errorLCerror = self.AutoVivification()
-        self.errorScale = {}
+        self.errorScale = self.AutoVivification()
         # Original EDY
         EDY = self.ExpDesign.Y
         
         # Prepare the input matrix
         from sklearn.preprocessing import MinMaxScaler
-        self.errorScale = {}
+        # self.errorScale = {}
         
         # Read data
         if Name.lower() == 'calib':
             MeasuredData = Model.read_Observation()
         else:
             MeasuredData = Model.read_ObservationValid()
-                
-        for out in tqdm(outputNames, ascii=True, desc ="Fitting {} based bias model".format(self.errorRegMethod)):
+
+        for out in outputNames:#tqdm(outputNames, ascii=True, desc ="Fitting {} based bias model".format(self.errorRegMethod)):
             # Select data
             try:
                 data = MeasuredData[out].to_numpy()[~np.isnan(MeasuredData[out])]
@@ -921,36 +921,39 @@ class Metamodel:
                 data = MeasuredData[out][~np.isnan(MeasuredData[out])]
             
             # Prepare the input matrix
-            self.errorScale[out] = MinMaxScaler()
-            Expected_Y = np.mean(EDY[out],axis=0).reshape(-1,1)
-            BiasInputs = np.hstack((self.BiasInputs[out],Expected_Y))
-            X_S = self.errorScale[out].fit_transform(BiasInputs)
+            scaler = MinMaxScaler()
+            delta = data - y[out][0]
+            BiasInputs = X[out]#np.hstack((X[out],y[out].reshape(-1,1)))
+            X_S = scaler.fit_transform(BiasInputs)
             
-            self.errorclf_poly[out]["y_1"] =  self.GaussianProcessEmulator(X_S, data,nugTerm=np.var(data))
-
+            self.errorScale[out]["y_1"] = scaler
+            self.errorclf_poly[out]["y_1"] =  self.GaussianProcessEmulator(X_S, delta,nugTerm=np.var(delta))
+        
         return self
     
     #--------------------------------------------------------------------------------------------------------
-    def eval_errormodel(self,predictions):
+    def eval_errormodel(self,X,y_pred):
         meanPCEOutputs = {}
         stdPCEOutputs = {}
         
         for Outkey, ValuesDict in self.errorclf_poly.items():
             
-            PCEOutputs_mean = np.zeros_like(predictions[Outkey])
-            PCEOutputs_std = np.zeros_like(predictions[Outkey])
+            PCEOutputs_mean = np.zeros_like(y_pred[Outkey])
+            PCEOutputs_std = np.zeros_like(y_pred[Outkey])
             
             for Inkey, InIdxValues in ValuesDict.items():
                 
                 gp = self.errorclf_poly[Outkey][Inkey]
+                scaler = self.errorScale[Outkey][Inkey]
                 
                 # Transform Samples using scaler
-                for j,pred in enumerate(predictions[Outkey]):
-                    BiasInputs = np.hstack((self.BiasInputs[Outkey],pred.reshape(-1,1)))
-                    Samples_S = self.errorScale[Outkey].transform(BiasInputs)
-                    
+                for j,pred in enumerate(y_pred[Outkey]):
+                    BiasInputs = X[Outkey]#np.hstack((X[Outkey],pred.reshape(-1,1)))
+                    Samples_S = scaler.transform(BiasInputs)
+
                     PCEOutputs_mean[j], PCEOutputs_std[j] = gp.predict(Samples_S, return_std=True)
-                    
+                    PCEOutputs_mean[j] += pred 
+
             meanPCEOutputs[Outkey] = PCEOutputs_mean
             stdPCEOutputs[Outkey] = PCEOutputs_std
         
@@ -2069,7 +2072,7 @@ class Metamodel:
         
         # Evaluate error model
         if discrepModel:
-            meanPCEOutputs, stdPCEOutputs =  self.eval_errormodel(meanPCEOutputs)
+            meanPCEOutputs, stdPCEOutputs =  self.eval_errormodel(meanPCEOutputs,stdPCEOutputs)
         
         if return_samples:
             return meanPCEOutputs, stdPCEOutputs, samples