Skip to content
Snippets Groups Projects
Commit 06e86b92 authored by Farid Mohammadi's avatar Farid Mohammadi
Browse files

[examples][pollution] add the bootstrapping arguments.

parent 3063c96d
No related branches found
No related tags found
1 merge request!1Resolve "Justifiability analysis"
...@@ -118,6 +118,11 @@ if __name__ == "__main__": ...@@ -118,6 +118,11 @@ if __name__ == "__main__":
# 7)EBL: Emperical Bayesian Learning # 7)EBL: Emperical Bayesian Learning
MetaModelOpts.pce_reg_method = 'FastARD' MetaModelOpts.pce_reg_method = 'FastARD'
# Bootstraping
# 1) normal 2) fast
MetaModelOpts.bootstrap_method = 'fast'
MetaModelOpts.n_bootstrap_itrs = 100
# Specify the max degree to be compared by the adaptive algorithm: # Specify the max degree to be compared by the adaptive algorithm:
# The degree with the lowest Leave-One-Out cross-validation (LOO) # The degree with the lowest Leave-One-Out cross-validation (LOO)
# error (or the highest score=1-LOO)estimator is chosen as the final # error (or the highest score=1-LOO)estimator is chosen as the final
......
...@@ -97,9 +97,9 @@ if __name__ == "__main__": ...@@ -97,9 +97,9 @@ if __name__ == "__main__":
MetaModelOpts = MetaModel(Inputs) MetaModelOpts = MetaModel(Inputs)
# Select if you want to preserve the spatial/temporal depencencies # Select if you want to preserve the spatial/temporal depencencies
# MetaModelOpts.dim_red_method = 'PCA' MetaModelOpts.dim_red_method = 'PCA'
# MetaModelOpts.var_pca_threshold = 99.999 # MetaModelOpts.var_pca_threshold = 99.999
# MetaModelOpts.n_pca_components = 12 # MetaModelOpts.n_pca_components = 5
# Select your metamodel method # Select your metamodel method
# 1) PCE (Polynomial Chaos Expansion) 2) aPCE (arbitrary PCE) # 1) PCE (Polynomial Chaos Expansion) 2) aPCE (arbitrary PCE)
...@@ -118,6 +118,11 @@ if __name__ == "__main__": ...@@ -118,6 +118,11 @@ if __name__ == "__main__":
# 7)EBL: Emperical Bayesian Learning # 7)EBL: Emperical Bayesian Learning
MetaModelOpts.pce_reg_method = 'FastARD' MetaModelOpts.pce_reg_method = 'FastARD'
# Bootstraping
# 1) normal 2) fast
MetaModelOpts.bootstrap_method = 'fast'
MetaModelOpts.n_bootstrap_itrs = 100
# Specify the max degree to be compared by the adaptive algorithm: # Specify the max degree to be compared by the adaptive algorithm:
# The degree with the lowest Leave-One-Out cross-validation (LOO) # The degree with the lowest Leave-One-Out cross-validation (LOO)
# error (or the highest score=1-LOO)estimator is chosen as the final # error (or the highest score=1-LOO)estimator is chosen as the final
...@@ -170,7 +175,7 @@ if __name__ == "__main__": ...@@ -170,7 +175,7 @@ if __name__ == "__main__":
# Plot the sobol indices # Plot the sobol indices
if MetaModelOpts.meta_model_type != 'GPE': if MetaModelOpts.meta_model_type != 'GPE':
sobol_cell, total_sobol = PostPCE.sobol_indices() total_sobol = PostPCE.sobol_indices()
# Compute and print RMSE error # Compute and print RMSE error
valid_samples = np.load("data/validSet.npy") valid_samples = np.load("data/validSet.npy")
...@@ -205,7 +210,7 @@ if __name__ == "__main__": ...@@ -205,7 +210,7 @@ if __name__ == "__main__":
DiscOutputOpts.add_marginals() DiscOutputOpts.add_marginals()
DiscOutputOpts.Marginals[0].name = '$\\sigma^2_{\\epsilon}$' DiscOutputOpts.Marginals[0].name = '$\\sigma^2_{\\epsilon}$'
DiscOutputOpts.Marginals[0].dist_type = 'uniform' DiscOutputOpts.Marginals[0].dist_type = 'uniform'
DiscOutputOpts.Marginals[0].parameters = [0.0, 5.0] DiscOutputOpts.Marginals[0].parameters = [0.0, 0.1]
BayesOpts.Discrepancy = Discrepancy(DiscOutputOpts) BayesOpts.Discrepancy = Discrepancy(DiscOutputOpts)
# Start the inference # Start the inference
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment