diff --git a/BayesValidRox/BayesInference/BayesInference.py b/BayesValidRox/BayesInference/BayesInference.py index 801cd352345ce4aab89bbf8aa3134515025ac359..1b291e03f8983316af2a2c06f78d08eae01f59b5 100644 --- a/BayesValidRox/BayesInference/BayesInference.py +++ b/BayesValidRox/BayesInference/BayesInference.py @@ -59,7 +59,8 @@ class BayesInference: self.perturbedData = [] self.MeasuredData = None self.MeasurementError = [] - + self.selectedIndices = None + self.ModelPriorPred = [] self.ModelPosteriorPred = [] self.PCEAllPred_df = [] @@ -183,12 +184,18 @@ class BayesInference: np.fill_diagonal(covMatrix_PCE, varPCE) covMatrix += covMatrix_PCE + # Select the data points to compare + if self.selectedIndices is not None: + TotalOutputs = TotalOutputs[:,self.selectedIndices] + Data = Data[self.selectedIndices] + covMatrix = covMatrix[self.selectedIndices,self.selectedIndices] + # Compute loglikelihood try: logL = multivariate_normal.logpdf(TotalOutputs, mean=Data, cov=covMatrix) except: logL = -np.inf - # print("logL: {0:.3f}".format(logL)) + return logL #-------------------------------------------------------------------------------------------------------- @@ -719,9 +726,9 @@ class BayesInference: figPosterior.set_size_inches((24,16)) if self.emulator: - plotname = '/Poisterior_Dist_'+Model.Name+'_emulator' + plotname = '/Posterior_Dist_'+Model.Name+'_emulator' else: - plotname = '/Poisterior_Dist_'+Model.Name + plotname = '/Posterior_Dist_'+Model.Name figPosterior.savefig('./'+OutputDir+ plotname + '.pdf', bbox_inches='tight') diff --git a/BayesValidRox/BayesInference/MCMC.py b/BayesValidRox/BayesInference/MCMC.py index 11b7fef81a3b8aa3631e19fee31678d64af56215..96f4afee65bd0653bfac4ec0c1fe869bb48fd4ae 100755 --- a/BayesValidRox/BayesInference/MCMC.py +++ b/BayesValidRox/BayesInference/MCMC.py @@ -64,10 +64,8 @@ class MCMC(): initsamples = priorDist.sample(self.nwalkers).T except: # when aPCE selected - gaussian kernel distribution - # initsamples = priorDist.resample(self.nwalkers).T - randIdx = np.random.randint(0, len(PCEModel.ExpDesign.Input_distributions[0]), - self.nwalkers) - initsamples = PCEModel.ExpDesign.Input_distributions.T[randIdx] + theta = PCEModel.ExpDesign.Input_distributions.mean(axis=1) + initsamples = [theta + 1e-1*np.multiply(np.random.randn(ndim), theta) for i in range(self.nwalkers)] else: # Pick samples based on a uniform dist between min and max of each dim @@ -82,7 +80,7 @@ class MCMC(): PCEModel.ExpDesign.BoundTuples = BoundTuples - # TODO: Check if bias term needs to be inferred + # Check if bias term needs to be inferred if Discrepancy.optSigma != 'B': sigma2Samples = Discrepancy.get_Sample(self.nwalkers) @@ -135,7 +133,7 @@ class MCMC(): # Burn-in print("\n Burn-in period is starting:") pos = sampler.run_mcmc(initsamples, self.nburn, progress=True) - + # Reset sampler sampler.reset() @@ -328,7 +326,7 @@ class MCMC(): # Evaluate Model/PCEModel at theta meanPCEPriorPred, BayesOpts.stdPCEPriorPred = self.eval_model(theta) - + # Likelihood return BayesOpts.normpdf(meanPCEPriorPred, self.Observation, self.TotalSigma2, Sigma2) @@ -361,7 +359,7 @@ class MCMC(): else: # Compute log prior log_prior = self.log_prior(theta) - # Compute loLikelihood + # Compute log Likelihood log_likelihood = self.log_likelihood(theta) return log_prior + log_likelihood else: