From 1484344706aa598f5535c730c2c9ecdfc8907dcb Mon Sep 17 00:00:00 2001
From: farid <farid.mohammadi@iws.uni-stuttgart.de>
Date: Sat, 29 Feb 2020 19:00:24 +0100
Subject: [PATCH] [surrogate] added debug log-likelihood plots.

---
 .../surrogate_models/surrogate_models.py      | 36 +++++++++++++++----
 .../AnalyticalFunction_Test.py                | 16 ++++-----
 2 files changed, 37 insertions(+), 15 deletions(-)

diff --git a/BayesValidRox/surrogate_models/surrogate_models.py b/BayesValidRox/surrogate_models/surrogate_models.py
index 2d88abcaf..12a6da1da 100644
--- a/BayesValidRox/surrogate_models/surrogate_models.py
+++ b/BayesValidRox/surrogate_models/surrogate_models.py
@@ -250,14 +250,19 @@ class aPCE:
         if self.RegMethod != 'AaPCE':
             if self.RegMethod == 'BRR':
                 clf_poly = linear_model.BayesianRidge(n_iter=1000, tol=1e-7,
-                                                      fit_intercept = False, compute_score=True)
-            
+                                                      fit_intercept = False, 
+                                                      compute_score=True,
+                                                       alpha_1=1e-20, alpha_2=1e-20, 
+                                                       lambda_1=1e-7, lambda_2=1e-7)
+
+                                                        
             elif self.RegMethod == 'ARD':
-                clf_poly = ARDRegression(fit_intercept = False,compute_score=True)#,
-#                                         n_iter=1000, tol= 1e-7,
-#                                         alpha_1=1e-05, alpha_2=1e-05, 
-#                                         lambda_1=1e-9, lambda_2=1e-9)
-            
+                clf_poly = ARDRegression(fit_intercept = False,compute_score=True,
+                                        n_iter=500, tol= 0.001,
+                                        alpha_1=1e-06, alpha_2=1e-06, 
+                                        lambda_1=1e-1, lambda_2=1e-1)
+                
+
             elif self.RegMethod == 'LARS':
                 clf_poly = linear_model.Lars(fit_intercept=False)
             
@@ -265,6 +270,7 @@ class aPCE:
             clf_poly.fit(PSI, ModelOutput)
             #print("R^2:\n", clf_poly.score(PSI, ModelOutput))
             
+            
             # Select the nonzero entries of coefficients
             # The first column must be kept (For mean calculations)
             nnz_idx = np.nonzero(clf_poly.coef_)[0]
@@ -404,6 +410,22 @@ class aPCE:
             print("Sparsity index:", round(len(PolynomialDegrees)/P, 3))
             print("Best Indices:\n", PolynomialDegrees)
             
+            if self.RegMethod in ['BRR', 'ARD']:
+                plt.figure(figsize=(12, 10))
+                plt.title("Marginal log-likelihood")
+                plt.plot(clf_poly.scores_, color='navy', linewidth=2)
+                plt.ylabel("Score")
+                plt.xlabel("Iterations")
+                try:
+                    text = "$\\alpha={:.1f}$\n$\\lambda={:.3f}$\n$L={:.1f}$".format(
+                        clf_poly.alpha_, clf_poly.lambda_, clf_poly.scores_[-1])
+                except:
+                    text = "$\\alpha={:.1f}$\n$\\$L={:.1f}$".format(
+                        clf_poly.alpha_, clf_poly.scores_[-1])
+                    
+                plt.text(0.05, -1.0, text, fontsize=18)
+                plt.show()
+            
             print ('='*80)
         
         
diff --git a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py
index 41e101459..2a74d4539 100644
--- a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py
+++ b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py
@@ -76,7 +76,7 @@ if __name__ == "__main__":
     Inputs.Marginals[1].Name = 'x2'
     Inputs.Marginals[1].DistType = 'unif'
     Inputs.Marginals[1].Parameters =  [-5, 5]
-
+    
     #=====================================================
     #======  POLYNOMIAL CHAOS EXPANSION METAMODELS  ======
     #=====================================================    
@@ -86,8 +86,8 @@ if __name__ == "__main__":
     # The degree with the lowest Leave-One-Out cross-validation (LOO)
     # error (or the highest score=1-LOO)estimator is chosen as the final 
     # metamodel.
-    MetaModelOpts.MinPceDegree = 2 #2
-    MetaModelOpts.MaxPceDegree = 8 #8
+    MetaModelOpts.MinPceDegree = 4 #2
+    MetaModelOpts.MaxPceDegree = 10 #8
     
     # q-quasi-norm 0<q<1 (default=1)
     MetaModelOpts.q = 1 #0.75
@@ -97,7 +97,7 @@ if __name__ == "__main__":
     # 1)AaPCE: Adaptive aPCE  2)BRR: Bayesian Ridge Regression 
     # 3)LARS: Least angle regression  4)ARD: Bayesian ARD Regression (Non-degree Adaptive)
     
-    MetaModelOpts.RegMethod = 'ARD'
+    MetaModelOpts.RegMethod = 'BRR'
     
     # Print summary of the regression results
     #MetaModelOpts.DisplayFlag = True
@@ -143,21 +143,21 @@ if __name__ == "__main__":
     MetaModelOpts.ExpDesign.MaxFunItr = 200
     
     # Use when 'Voronoi' or 'MC' or 'LHS' chosen
-    MetaModelOpts.ExpDesign.NCandidate = 5
+    MetaModelOpts.ExpDesign.NCandidate = 3
     MetaModelOpts.ExpDesign.NrofCandGroups = 4
     
     # -------- Exploitation ------
     # 1)'BayesOptDesign' 2)'VarOptDesign' 3)'alphabetic' 4)'Space-filling'
-    MetaModelOpts.ExpDesign.ExploitMethod = 'VarOptDesign'
+    MetaModelOpts.ExpDesign.ExploitMethod = 'BayesOptDesign'
     
     # BayesOptDesign -> when data is available
     # 1)DKL (Kullback-Leibler Divergence) 2)DPP (D-Posterior-percision)
     # 3)APP (A-Posterior-percision) 
-    #MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' #['DKL', 'DPP']
+    MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' #['DKL', 'DPP']
     
     # VarBasedOptDesign -> when data is not available
     # Only with Vornoi >>> 1)Entropy 2)EIGF, 3)ALM, 4)LOOCV
-    MetaModelOpts.ExpDesign.UtilityFunction = 'Entropy'#['EIGF', 'Entropy', 'LOOCV']
+    #MetaModelOpts.ExpDesign.UtilityFunction = 'Entropy'#['EIGF', 'Entropy', 'LOOCV']
     
     # alphabetic
     # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality)
-- 
GitLab