diff --git a/src/bayesvalidrox/surrogate_models/engine.py b/src/bayesvalidrox/surrogate_models/engine.py index 0c4f15263973aa2329e601960740015e1c6f630e..a6d2447c6cae42413261361d2560feb678b928f7 100644 --- a/src/bayesvalidrox/surrogate_models/engine.py +++ b/src/bayesvalidrox/surrogate_models/engine.py @@ -439,7 +439,7 @@ class Engine: if itr_no > 1: pc_model = prev_meta_model_dict[itr_no - 1] self.SeqDes._y_hat_prev, _ = pc_model.eval_metamodel( - samples=x_full[-1].reshape(1, -1)) + x_full[-1].reshape(1, -1)) del prev_meta_model_dict[itr_no - 1] if itr_no == 1 and self.ExpDesign.tradeoff_scheme == 'adaptive': # TODO: this was added just as a fix, needs to be reworked @@ -448,7 +448,7 @@ class Engine: # print(x_prev.shape) pc_model = prev_meta_model_dict[itr_no] self.SeqDes._y_hat_prev, _ = pc_model.eval_metamodel( - samples=x_prev) + x_prev) # Optimal Bayesian Design # self.MetaModel.ExpDesignFlag = 'sequential' @@ -474,7 +474,7 @@ class Engine: if self.ExpDesign.adapt_verbose: from .adaptPlot import adaptPlot y_hat, std_hat = self.MetaModel.eval_metamodel( - samples=x_new + x_new ) adaptPlot( self.MetaModel, y_new, y_hat, std_hat, @@ -701,7 +701,7 @@ class Engine: output_names = self.out_names # TODO: Evaluate MetaModel on the experimental design and ValidSet - OutputRS, _ = metamod.eval_metamodel(samples=samples) + OutputRS, _ = metamod.eval_metamodel(samples) logLik_data = np.zeros(n_samples) logLik_model = np.zeros(n_samples) @@ -861,7 +861,7 @@ class Engine: ) # Monte Carlo simulation for the candidate design - Y_MC, std_MC = self.MetaModel.eval_metamodel(samples=X_MC) + Y_MC, std_MC = self.MetaModel.eval_metamodel(X_MC) # Likelihood computation (Comparison of data and # simulation results via PCE with candidate design) @@ -1000,7 +1000,7 @@ class Engine: # Run the PCE model with the generated samples valid_PCE_runs, _ = self.MetaModel.eval_metamodel( - samples=self.ExpDesign.valid_samples) + self.ExpDesign.valid_samples) rms_error = {} valid_error = {} diff --git a/src/bayesvalidrox/surrogate_models/sequential_design.py b/src/bayesvalidrox/surrogate_models/sequential_design.py index 2fb70febc0c1fc88bc73f7d12022d7a41f7d5b5f..a8039dd6194ae8e1d0398ec3255feea4cb83b71a 100644 --- a/src/bayesvalidrox/surrogate_models/sequential_design.py +++ b/src/bayesvalidrox/surrogate_models/sequential_design.py @@ -548,7 +548,7 @@ class SequentialDesign: # New adaptive trade-off according to Liu et al. (2017) # Mean squared error for last design point last_EDX = old_EDX[-1].reshape(1, -1) - lastPCEY, _ = self.MetaModel.eval_metamodel(samples=last_EDX) + lastPCEY, _ = self.MetaModel.eval_metamodel(last_EDX) pce_y = np.array(list(lastPCEY.values()))[:, 0] y = np.array(list(old_EDY.values()))[:, -1, :] mseError = mean_squared_error(pce_y, y) @@ -607,7 +607,7 @@ class SequentialDesign: NCandidate = candidates.shape[0] U_J_d = np.zeros(NCandidate) # Evaluate all candidates - y_can, std_can = self.MetaModel.eval_metamodel(samples=candidates) + y_can, std_can = self.MetaModel.eval_metamodel(candidates) # loop through candidates for idx, X_can in tqdm(enumerate(candidates), ascii=True, desc="BAL Design"): @@ -659,7 +659,7 @@ class SequentialDesign: # Run the Metamodel for the candidate X_can = X_can.reshape(1, -1) - Y_PC_can, std_PC_can = MetaModel.eval_metamodel(samples=X_can) + Y_PC_can, std_PC_can = MetaModel.eval_metamodel(X_can) score = None if util_func.lower() == 'alm': @@ -876,14 +876,14 @@ class SequentialDesign: # Compute the mean and std based on the MetaModel # pce_means, pce_stds = self._compute_pce_moments(MetaModel) if var.lower() == 'alc': - Y_MC, Y_MC_std = MetaModel.eval_metamodel(samples=X_MC) + Y_MC, Y_MC_std = MetaModel.eval_metamodel(X_MC) # Old Experimental design oldExpDesignX = self.ExpDesign.X oldExpDesignY = self.ExpDesign.Y # Evaluate the PCE metamodels at that location ??? - Y_PC_can, Y_std_can = MetaModel.eval_metamodel(samples=X_can) + Y_PC_can, Y_std_can = MetaModel.eval_metamodel(X_can) PCE_Model_can = deepcopy(MetaModel) # TODO: this is really not clean, create a workaround for this issue! engine_can = deepcopy(self.engine) @@ -916,7 +916,7 @@ class SequentialDesign: if var.lower() == 'mi': # Mutual information based on Krause et al. # Adapted from Beck & Guillas (MICE) paper - _, std_PC_can = engine_can.MetaModel.eval_metamodel(samples=X_can) + _, std_PC_can = engine_can.MetaModel.eval_metamodel(X_can) std_can = {key: std_PC_can[key] for key in out_names} std_old = {key: Y_std_can[key] for key in out_names} @@ -934,8 +934,7 @@ class SequentialDesign: # metrics, 51 (2009), pp. 130–145. # Evaluate the MetaModel at the given samples - _, Y_MC_std_can = engine_can.MetaModel.eval_metamodel( - samples=X_MC) + _, Y_MC_std_can = engine_can.MetaModel.eval_metamodel(X_MC) # Compute the score score = [] @@ -962,7 +961,7 @@ class SequentialDesign: ) # Evaluate the MetaModel at the given samples - Y_MC, std_MC = PCE_Model_can.eval_metamodel(samples=X_MC) + Y_MC, std_MC = PCE_Model_can.eval_metamodel(X_MC) # Likelihood computation (Comparison of data and simulation # results via PCE with candidate design) @@ -1299,7 +1298,7 @@ class SequentialDesign: output_names = self.out_names # TODO: Evaluate MetaModel on the experimental design and ValidSet - OutputRS, _ = MetaModel.eval_metamodel(samples=samples) + OutputRS, _ = MetaModel.eval_metamodel(samples) logLik_data = np.zeros(n_samples) logLik_model = np.zeros(n_samples) @@ -1396,7 +1395,7 @@ class SequentialDesign: ) # Monte Carlo simulation for the candidate design - Y_MC, std_MC = self.MetaModel.eval_metamodel(samples=X_MC) + Y_MC, std_MC = self.MetaModel.eval_metamodel(X_MC) # Likelihood computation (Comparison of data and # simulation results via PCE with candidate design) @@ -1534,8 +1533,7 @@ class SequentialDesign: valid_model_runs = self.ExpDesign.valid_model_runs # Run the PCE model with the generated samples - valid_PCE_runs, _ = self.MetaModel.eval_metamodel( - samples=self.ExpDesign.valid_samples) + valid_PCE_runs, _ = self.MetaModel.eval_metamodel(self.ExpDesign.valid_samples) rms_error = {} valid_error = {}