diff --git a/BayesValidRox/surrogate_models/RegressionFastARD.py b/BayesValidRox/surrogate_models/RegressionFastARD.py
index 10509108938cd8e91cda41f8ed59a784ce121952..418ea0c0f87377d49f01f592fb75b3f50948895f 100755
--- a/BayesValidRox/surrogate_models/RegressionFastARD.py
+++ b/BayesValidRox/surrogate_models/RegressionFastARD.py
@@ -173,13 +173,20 @@ class RegressionFastARD(LinearModel,RegressorMixin):
             X_mean = np.average(X,axis = 0)
             y_mean = np.average(y,axis = 0)
             X     -= X_mean
-            y      = y - y_mean
+            y     -= y_mean
         else:
             X_mean = np.zeros(X.shape[1],dtype = X.dtype)
-            # y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
-            y_mean = np.average(y,axis = 0)
-            y     /= y_mean
-        return X,y, X_mean, y_mean, X_std
+            y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
+            Y_std = np.ones(y.shape, dtype = y.dtype)
+            # TODO
+            # X_mean = np.average(X,axis = 0)
+            # X_std = np.std(X,axis = 0)
+            # X_std[0]=np.finfo(np.float32).eps
+            # X     = (X - X_mean) / X_std
+            # y_mean = np.average(y,axis = 0)
+            # Y_std = np.std(y,axis = 0)
+            # y     = (y - y_mean) / Y_std
+        return X,y, X_mean, y_mean, X_std, Y_std
         
         
     def fit(self,X,y):
@@ -202,10 +209,11 @@ class RegressionFastARD(LinearModel,RegressorMixin):
         X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
         n_samples, n_features = X.shape
         
-        X, y, X_mean, y_mean, X_std = self._center_data(X, y)
+        X, y, X_mean, y_mean, X_std, Y_std = self._center_data(X, y)
         self._x_mean_ = X_mean
         self._y_mean  = y_mean
         self._x_std   = X_std
+        self._y_std   = Y_std
 
         #  precompute X'*Y , X'*X for faster iterations & allocate memory for
         #  sparsity & quality vectors
@@ -283,7 +291,7 @@ class RegressionFastARD(LinearModel,RegressorMixin):
                 break
             beta    = n_samples - np.sum(active) + np.sum(Aa * Sdiag )
             beta   /= ( rss + np.finfo(np.float32).eps )
-            
+            print("Active terms: {0} Beta: {1:.3e} rss: {2:.3e}".format(np.sum(active), beta, rss))
             # update precision parameters of coefficients
             A,converged  = update_precisions(Q,S,q,s,A,active,self.tol,
                                              n_samples,False)
@@ -296,8 +304,10 @@ class RegressionFastARD(LinearModel,RegressorMixin):
                         'in the model: {1}').format(i,np.sum(active)))
 
             if converged or i == self.n_iter - 1:
-                if converged and self.verbose:
+                if converged: #and self.verbose:
                     print('Algorithm converged !')
+                else:
+                    print('Algorithm NOT converged !')
                 break
 
         # after last update of alpha & beta update parameters
@@ -359,13 +369,13 @@ class RegressionFastARD(LinearModel,RegressorMixin):
         [1] Bishop, C. M. (2006). Pattern recognition and machine learning. springer.
         '''
         x         = (X - self._x_mean_) / self._x_std
-        # y_hat     = np.dot(x,self.coef_) + self._y_mean
-        y_hat     = np.dot(x,self.coef_) * self._y_mean
+        y_hat     = np.dot(x,self.coef_) * self._y_std + self._y_mean
+        # y_hat     = np.dot(x,self.coef_) * self._y_mean
         
         if return_std:
             var_hat   = 1./self.alpha_
             var_hat  += np.sum( np.dot(x[:,self.active_],self.sigma_) * x[:,self.active_], axis = 1)
-            var_hat  *= self._y_mean**2
+            var_hat  *= self._y_std**2
             std_hat = np.sqrt(var_hat)
             return y_hat, std_hat
         else:
diff --git a/BayesValidRox/surrogate_models/surrogate_models.py b/BayesValidRox/surrogate_models/surrogate_models.py
index 9805d050cdf9fcd5b1b7834d99b8ebdd928a23b5..881d5ca2964a7f06ca8ebcc8611889f773ba34df 100644
--- a/BayesValidRox/surrogate_models/surrogate_models.py
+++ b/BayesValidRox/surrogate_models/surrogate_models.py
@@ -347,7 +347,7 @@ class Metamodel:
                 
             elif RegMethod.lower() == 'ebl':
                 clf_poly = EBLinearRegression(fit_intercept=False,optimizer = 'em')
-            
+
             # Fit
             clf_poly.fit(PSI, ModelOutput)