From 267ce3f21e8ca94a51bec2250d31dbb28cf03c8e Mon Sep 17 00:00:00 2001 From: Farid Mohammadi <farid.mohammadi@iws.uni-stuttgart.de> Date: Thu, 19 Mar 2020 15:29:10 +0100 Subject: [PATCH] [ExpDesign] fixed the special case of lognormal distribution and fixed the Beam tese. --- BayesValidRox/surrogate_models/ExpDesigns.py | 29 +++--- .../surrogate_models/surrogate_models.py | 57 ++++++----- .../AnalyticalFunction_Test.py | 4 +- .../tests/BeamTest/SSBeam_Deflection.inp | 4 +- BayesValidRox/tests/BeamTest/Test_Beam.py | 93 +++++++++++------- BayesValidRox/tests/BeamTest/myBeam9points | Bin 19528 -> 19528 bytes .../tests/BeamTest/myBeam9points.cpp | 4 +- 7 files changed, 111 insertions(+), 80 deletions(-) diff --git a/BayesValidRox/surrogate_models/ExpDesigns.py b/BayesValidRox/surrogate_models/ExpDesigns.py index 95b05ebac..4cb853f86 100644 --- a/BayesValidRox/surrogate_models/ExpDesigns.py +++ b/BayesValidRox/surrogate_models/ExpDesigns.py @@ -77,15 +77,14 @@ class ExpDesigns: ## Sample the distribution of parameters if len(Inputs.Marginals[0].InputValues) == 0 and self.SamplingMethod != 'user': - ## Case I - # polytype not arbitrary + ## Case I = polytype not arbitrary # Create ExpDesign in the actual space - X = chaospy.generate_samples(NrSamples, domain=self.JDist, rule=SamplingMethod).T + X = chaospy.generate_samples(NrSamples, domain=self.JDist , rule=SamplingMethod).T else: - # Case II + # Case II: # polytype arbitrary or Input values are directly prescribed by the user. # Generate the samples based on requested method @@ -112,6 +111,7 @@ class ExpDesigns: origJoints = [] Polytypes = [] + for parIdx in range(NofPa): DistType = Inputs.Marginals[parIdx].DistType params = Inputs.Marginals[parIdx].Parameters @@ -119,35 +119,40 @@ class ExpDesigns: if DistType == 'unif': polytype = 'legendre' Dist = chaospy.Uniform(lower=params[0],upper=params[1]) - + elif DistType == 'norm': polytype = 'hermite' Dist = chaospy.Normal(mu=params[0],sigma=params[1]) - + elif DistType == 'gamma': polytype = 'laguerre' Dist = chaospy.Gamma(shape=params[0],scale=params[1],shift=params[2]) - + elif DistType == 'beta': polytype = 'jacobi' Dist = chaospy.Beta(alpha=params[0],beta=params[1], lower=params[2], upper=params[3]) - + elif DistType == 'lognormal': - polytype = 'arbitrary' - Dist = chaospy.LogNormal(mu=params[0],sigma=params[1]) + polytype = 'hermite' + + Mu = np.log(params[0]**2 / np.sqrt(params[0]**2 + params[1]**2)) + Sigma = np.sqrt(np.log(1 + params[1]**2 / params[0]**2)) + + Dist = chaospy.LogNormal(mu=Mu,sigma=Sigma) elif DistType == 'exponential': polytype = 'arbitrary' Dist = chaospy.Exponential(scale=params[0],shift=params[1]) - + elif DistType == 'weibull': polytype = 'arbitrary' Dist = chaospy.Weibull(shape=params[0],scale=params[1],shift=params[2]) - + elif DistType is None: polytype = 'arbitrary' Dist = None + else: raise ValueError('DistType %s for parameter %s is not available.'%(DistType,parIdx+1)) diff --git a/BayesValidRox/surrogate_models/surrogate_models.py b/BayesValidRox/surrogate_models/surrogate_models.py index 94f6303cc..c89d06f32 100644 --- a/BayesValidRox/surrogate_models/surrogate_models.py +++ b/BayesValidRox/surrogate_models/surrogate_models.py @@ -178,7 +178,7 @@ class aPCE: if parIdx_or_parms is None: print('%s polynomials are parametrically defined! \ Please provide them as an input argument.',polytype) - parms = parIdx_or_parms + Parms = parIdx_or_parms ## Arbitrary polynomials @@ -202,21 +202,30 @@ class aPCE: sqrt_b0 = 1 sqrt_bn = lambda n : np.sqrt(n) bounds = [-np.inf,np.inf] - + polyType = 'Normal' + params = [0, 1] + Dist = chaospy.Normal(mu=params[0], sigma=params[1]) + # Uniform distribution elif polytype == 'legendre': an = lambda n : np.zeros((n+1)) sqrt_b0 = 1 sqrt_bn = lambda n : np.sqrt(1./(4-(1/np.square(n)))) bounds = [-1,1] - + polyType = 'Uniform' + params = [-1, 1] + Dist = chaospy.Uniform(params[0], params[1]) + # Gamma distribution elif polytype == 'laguerre': - an = lambda n : 2*n + parms[1] + an = lambda n : 2*n + Parms[1] sqrt_b0 = 1 - sqrt_bn = lambda n : -1*np.sqrt(np.multiply(n ,(n+parms[1]-1))) + sqrt_bn = lambda n : -1*np.sqrt(np.multiply(n ,(n+Parms[1]-1))) bounds = [0,np.inf] - + polyType = 'Gamma' + params = self.Inputs.Marginals[parIdx_or_parms].Parameters + Dist = chaospy.Gamma(shape=params[0],scale=params[1],shift=params[2]) + # elif polytype == 'jacobi': # Beta distribution # # in order to avoid zeros on the denominator some special # # cancelation cases are hard-coded. @@ -243,6 +252,8 @@ class aPCE: # sqrt_bn = @(n) sqrt( 4 .* n .* (n+a) .* (n+b) .* (n+bpa)./((2 .* n + bpa ).^2 .* (2.*n + bpa + 1).*(2.*n + bpa -1)) ) .* 0.5; # # bounds = [0,1]; + # polyType = 'Beta' + # prams = [-1, 1] elif polytype == 'zero': #case 'zero' # the very special case of constant - recurrence terms are zero: @@ -250,15 +261,16 @@ class aPCE: an = lambda n: np.zeros((1, len(n))) #@(n) zeros(1,length(n)); sqrt_bn = lambda n: np.zeros((1, len(n))) #@(n) zeros(1,length(n)); bounds = [-np.inf,np.inf] - + polyType = 'Constant' + prams = [-1, 1] + else: raise ValueError('Unknown polynomial type!') ## Assemble the recurrence coefficients into the output - AB['recurrence terms'] = np.vstack((an(n_max) , np.hstack((sqrt_b0, sqrt_bn(range(1,n_max+1)))))).T - AB['bounds'] = bounds + AB = np.vstack((an(n_max) , np.hstack((sqrt_b0, sqrt_bn(range(1,n_max+1)))))).T - return AB + return AB, Dist def eval_rec_rule(self,normX,AB,nonrecursive=False): """ @@ -337,20 +349,18 @@ class aPCE: # ---------------- else: # Only for Uniform, Normal, Gamma, Beta - - # Forward Rosenblatt transformation - u = origSpaceDist.fwd(ExpDesignX) for parIdx in range(NofPa): - AB = self.poly_rec_coeffs(n_max, polytypes[parIdx], parIdx) - bounds = AB['bounds'] + AB, dist = self.poly_rec_coeffs(n_max, polytypes[parIdx], parIdx) + + # Forward Rosenblatt transformation + u = origSpaceDist[parIdx].fwd(ExpDesignX[:,parIdx]) # Rosenblatt inverse Transformation based the bounds given by poly_rec_coeffs - dist = chaospy.Uniform(bounds[0], bounds[1]) - normX = dist.inv(u[:,parIdx]) + U = dist.inv(u) - univ_vals[:,parIdx,:] = self.eval_rec_rule(normX,AB['recurrence terms'],nonrecursive=False) - + univ_vals[:,parIdx,:] = self.eval_rec_rule(U,AB,nonrecursive=False) + return univ_vals #-------------------------------------------------------------------------------------------------------- def PCE_create_Psi(self,BasisIndices,univ_p_val): @@ -777,12 +787,11 @@ class aPCE: UtilMethod = var OutputDictY = self.ExpDesign.Y OutputNames = list(OutputDictY.keys())[1:] - print("UtilMethod:", UtilMethod) + if UtilMethod == 'Entropy': # ----- Entropy/MMSE ----- # Compute perdiction variance of the old model Y_PC_can, std_PC_can = self.eval_PCEmodel(X_can) - print("Y_PC_can:\n", Y_PC_can) canPredVar = {key:std_PC_can[key]**2 for key in OutputNames} @@ -941,7 +950,6 @@ class aPCE: def Utility_runner(self, method, Model, allCandidates,index, sigma2Dict=None, var=None): if method == 'VarOptDesign': - print("\nNumber of samples to explore:", len(allCandidates)) U_J_d = self.util_VarBasedDesign(allCandidates, index, var) elif method == 'BayesOptDesign': @@ -1229,6 +1237,7 @@ class aPCE: else: split_allCandidates = exploration.closestPoints + # Split the candidates in groups for multiprocessing args = [(ExploitMethod, Model, split_allCandidates[index], index, sigma2Dict , var) for index in goodSampleIdx] #args = [(ExploitMethod, Model, exploration.closestPoints[index], index, sigma2Dict , var) for index in goodSampleIdx] @@ -1429,10 +1438,8 @@ class aPCE: # Perdiction try: # with error bar clf_poly = self.clf_poly[Outkey][Inkey] - print("clf_poly:", clf_poly) - print("PSI_Val:\n",PSI_Val) y_mean, y_std = clf_poly.predict(PSI_Val, return_std=True) - print("y_mean:\n",y_mean) + PCEOutputs_mean[:, idx] = y_mean PCEOutputs_std[:, idx] = y_std diff --git a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py index 7949a8f3d..7b0386265 100644 --- a/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py +++ b/BayesValidRox/tests/AnalyticalFunction/AnalyticalFunction_Test.py @@ -100,7 +100,7 @@ if __name__ == "__main__": MetaModelOpts.RegMethod = 'BRR' # Print summary of the regression results - MetaModelOpts.DisplayFlag = True + #MetaModelOpts.DisplayFlag = True # ------ Experimental Design -------- # Generate an experimental design of size NrExpDesign based on a latin @@ -118,7 +118,7 @@ if __name__ == "__main__": MetaModelOpts.ExpDesign.SamplingMethod = 'halton' # Sequential experimental design (needed only for sequential ExpDesign) - MetaModelOpts.ExpDesign.NrofNewSample = 1 + MetaModelOpts.ExpDesign.NrofNewSample = 2 MetaModelOpts.ExpDesign.MaxNSamples = 50 MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-6 diff --git a/BayesValidRox/tests/BeamTest/SSBeam_Deflection.inp b/BayesValidRox/tests/BeamTest/SSBeam_Deflection.inp index d9949aca2..00b170341 100644 --- a/BayesValidRox/tests/BeamTest/SSBeam_Deflection.inp +++ b/BayesValidRox/tests/BeamTest/SSBeam_Deflection.inp @@ -2,5 +2,5 @@ 0.15 % b in m 0.3 % h in m 5 % L in m -30 % E in Pa -10 % p in N/m +30000e+6 % E in Pa +10000 % p in N/m diff --git a/BayesValidRox/tests/BeamTest/Test_Beam.py b/BayesValidRox/tests/BeamTest/Test_Beam.py index c78313df3..c3e254fbe 100644 --- a/BayesValidRox/tests/BeamTest/Test_Beam.py +++ b/BayesValidRox/tests/BeamTest/Test_Beam.py @@ -62,23 +62,23 @@ if __name__ == "__main__": Inputs.addMarginals() Inputs.Marginals[0].Name = 'Beam width' - Inputs.Marginals[0].DistType = 'lognorm' - Inputs.Marginals[0].Moments = [0.15, 0.0075] + Inputs.Marginals[0].DistType = 'lognormal' + Inputs.Marginals[0].Parameters = [0.15, 0.0075] Inputs.addMarginals() Inputs.Marginals[1].Name = 'Beam height' - Inputs.Marginals[1].DistType = 'lognorm' - Inputs.Marginals[1].Moments = [0.3, 0.015] + Inputs.Marginals[1].DistType = 'lognormal' + Inputs.Marginals[1].Parameters = [0.3, 0.015] Inputs.addMarginals() Inputs.Marginals[2].Name = 'Youngs modulus' - Inputs.Marginals[2].DistType = 'norm' #'lognorm' - Inputs.Marginals[2].Moments = [30, 4.5] #[30000e+6, 4500e+6] + Inputs.Marginals[2].DistType = 'lognormal' + Inputs.Marginals[2].Parameters = [30000e+6, 4500e+6] Inputs.addMarginals() Inputs.Marginals[3].Name = 'Uniform load' - Inputs.Marginals[3].DistType = 'norm' #'lognorm' - Inputs.Marginals[3].Moments = [10, 2] + Inputs.Marginals[3].DistType = 'lognormal' + Inputs.Marginals[3].Parameters = [1e4, 2e3] #===================================================== #====== POLYNOMIAL CHAOS EXPANSION METAMODELS ====== @@ -107,49 +107,68 @@ if __name__ == "__main__": # hypercube sampling of the input model or user-defined values of X and/or Y: MetaModelOpts.addExpDesign() + # One-shot (normal) or Sequential Adaptive (sequential) Design + MetaModelOpts.ExpDesign.Method = 'normal' MetaModelOpts.ExpDesign.NrSamples = 20 - MetaModelOpts.ExpDesign.SamplingMethod = 'MC' # 1)MC 2)LHS 3)PCM 4)LSCM 5)user - MetaModelOpts.ExpDesign.Method = 'sequential' # 1) normal 2) sequential - #MetaModelOpts.ExpDesign.X = np.load('CollocationPoints.npy') + + # Sampling methods + # 1) random 2) latin_hypercube 3) sobol 4) halton 5) hammersley 6) chebyshev(FT) + # 7) korobov 8) grid(FT) 9) nested_grid(FT) 10)user + MetaModelOpts.ExpDesign.SamplingMethod = 'halton' + + #MetaModelOpts.ExpDesign.X = np.load('EDX_Beam9points.npy') # Sequential experimental design (needed only for sequential ExpDesign) - MetaModelOpts.ExpDesign.MaxNSamples = 50 #150 - MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-3 + MetaModelOpts.ExpDesign.NrofNewSample = 2 + MetaModelOpts.ExpDesign.MaxNSamples = 50 + MetaModelOpts.ExpDesign.ModifiedLOOThreshold = 1e-6 + + DiscrepancyOpts = Discrepancy('') + DiscrepancyOpts.Type = 'Gaussian' + DiscrepancyOpts.Parameters = np.square([1e-8, 1e-2, 1e-2, 1e-3, 4e-3, 5e-3, 1e-3, + 1e-2, 1e-2, 1e-2, 1e-2, 1e-9]) + MetaModelOpts.Discrepancy = DiscrepancyOpts # Plot the posterior snapshots for SeqDesign MetaModelOpts.ExpDesign.PostSnapshot = True MetaModelOpts.ExpDesign.stepSnapshot = 1 - MetaModelOpts.ExpDesign.MAP = (0.150064, 0.299698, 30.763206, 10.164872) + MetaModelOpts.ExpDesign.MAP = (0.150064, 0.299698, 30.763206e9, 10.164872e2) MetaModelOpts.ExpDesign.parNames = ['Beam width', 'Beam height', 'Youngs modulus', 'Uniform load'] - - # -------- Optimality criteria: Optimization ------ - # 1)'dual annealing' 2)'minimization' 3)'BayesOptDesign' - MetaModelOpts.ExpDesign.SeqOptimMethod = 'BayesOptDesign' - MetaModelOpts.ExpDesign.ExplorationMethod = 'Voronoi' #1)'Voronoi' 2)'LHS' 3) 'MC' + + # ------------------------------------------------ + # ------- Sequential Design configuration -------- + # ------------------------------------------------ + # 1) 'None' 2) 'epsilon-decreasing' + MetaModelOpts.ExpDesign.TradeOffScheme = 'None' + #MetaModelOpts.ExpDesign.nReprications = 2 + # -------- Exploration ------ + #1)'Voronoi' 2)'MC' 3)'LHS' 4)'dual annealing' + MetaModelOpts.ExpDesign.ExploreMethod = 'Voronoi' + + # Use when 'dual annealing' chosen + MetaModelOpts.ExpDesign.MaxFunItr = 200 + + # Use when 'Voronoi' or 'MC' or 'LHS' chosen + MetaModelOpts.ExpDesign.NCandidate = 1000 + MetaModelOpts.ExpDesign.NrofCandGroups = 4 - MetaModelOpts.ExpDesign.MaxFunItr = 100 + # -------- Exploitation ------ + # 1)'BayesOptDesign' 2)'VarOptDesign' 3)'alphabetic' 4)'Space-filling' + MetaModelOpts.ExpDesign.ExploitMethod = 'VarOptDesign' - MetaModelOpts.ExpDesign.NCandidate = 1000 # 5000 - MetaModelOpts.ExpDesign.NrofCandGroups = 4 + # BayesOptDesign -> when data is available # 1)DKL (Kullback-Leibler Divergence) 2)DPP (D-Posterior-percision) # 3)APP (A-Posterior-percision) - MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' - -# DiscrepancyOpts = Discrepancy('') -# DiscrepancyOpts.Type = 'Gaussian' -# DiscrepancyOpts.Parameters = [1e-8, 1e-2, 1e-2, 1e-3, 4e-3, 5e-3, 1e-3, -# 1e-2, 1e-2, 1e-2, 1e-2, 1e-9] #1e-5 -# MetaModelOpts.Discrepancy = DiscrepancyOpts + #MetaModelOpts.ExpDesign.UtilityFunction = 'DKL' #['DKL', 'DPP'] - # -------- Optimality criteria: alphabetic ------ -# MetaModelOpts.ExpDesign.SeqOptimMethod = 'alphabetic' -# MetaModelOpts.ExpDesign.NCandidate = 5000 -# -# # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality) -# # 3)K-Opt (K-Optimality) -# MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt' -# + # VarBasedOptDesign -> when data is not available + # Only with Vornoi >>> 1)Entropy 2)EIGF, 3)ALM, 4)LOOCV + MetaModelOpts.ExpDesign.UtilityFunction = 'Entropy'#['EIGF', 'Entropy', 'LOOCV'] + # alphabetic + # 1)D-Opt (D-Optimality) 2)A-Opt (A-Optimality) + # 3)K-Opt (K-Optimality) + #MetaModelOpts.ExpDesign.UtilityFunction = 'D-Opt' #['D-Opt', 'A-Opt', 'K-Opt'] # >>>>>>>>>>>>>>>>>>>>>> Build Surrogate <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # Adaptive sparse arbitrary polynomial chaos expansion diff --git a/BayesValidRox/tests/BeamTest/myBeam9points b/BayesValidRox/tests/BeamTest/myBeam9points index ff025184b64135d4af02fdbe06ff57e9f6251e4b..559357bf494373c8ebc0a02892f79831dd70313d 100755 GIT binary patch delta 1936 zcmZ8ie@s(X6u!5mP_RHN{XtitmDWKKN}*JNteYK1UzItX*ku`GgZ&63n>wd%Zbn$J z%ooy4cjoM03pf(z786WemJA~@H$kJwvMf$q3@%xC3&tTJn@rZ7`(9s>z2tHFeeXNx ze&^i#cta676rqPkQm;6OcwISsy<Zo}mp~%I33@~CNjbSm^TFvo=K}ka)|?KE+)!4h zM<WXN62+V~dMRC<tiGfngbSx{iH>F!QrZVfLlKRD%V4J+@Qk5`J`JB5UZ7@}F}z5x zLY?sp4S>#6NUNdRWT*MiV0x!DB>WwVO=aprhs3B2O~!EqBNq;hJix#Y3U}^{a1#p% zOXnE$9!q5od^FoegnFbVf=IqBk+)4qu46j>JFYXdgVp0NaQro)TuR=EItMFemqn@; z%!!NgknE>87@-ahXWN|936XO_bW?QY)k`bB#8&Ks(QKRN%Y=H&m?(KfQr}dIrcF$} zQ|yQNI|%(|qq8xQcXT1IR?7QA${WT9f)idh+nm`6^~MFYQc~}f)FGxGgRjjt<5hVr zm1;r#ElijT9%JIE%!UEB5nXIhovMC=<U8zRYC#-d*gKVi-f?7`#c49%UCg&}b)xp3 zcplDq1-$zRPkBhP6wnqZw!Ej9Plgi~AN>r*EmnF5QgU*w?!VYhhDO9z28Cs_B79z~ zKPc>knw;U*w?y)YNOp!LE1_OdGR)kZzqcr9mjp#Y*n2NYvfLmR@lG%p_KhsXrdBZF z|4)ZeCRO#QVW(j|_M$`ftPPJtzm}be_Z5Wj=h3(1Vo-Q<hLswKcfyZyyy$F>(F@&m z!f5WAnvTQ@XLQmExBx5A9%{y>=)_kKrCdZ_Jl1N-_;pMXDxfh>-)a?$do4<-K`n!< z8&z<j>=GoUW9-LYedry#PfYs_u|lGW52r=9@=LZ|g7PcqM#%rdbv`A$!g(~y3t=WN zhgN{jn&o4UCiZaRMC6^!VVqLp<n3%`+{VRR9W+|AxZa1<@=PyJEwXbGp<Gb*)WRfa z3J<|qtRR@-jJ3pG&t|k1F&8!zXmP}0Z-9-YQ<cgPiuZ!XR^ofjm1#Y|MnW>JYY6c_ z>kkI_YyPeNy4vvapvFeFa^aE&j9dKte1M;=b^gw^-&S)2^Gab|beqknfKT$>PZs_e zi*;d<?7Okp5XM4`J1~}`IUD{9G0&qDj7=DuF}{s)44+3Z9z?AE7n1Eov=_?krL+wi z?FD9TG8V&kGI8bLuX?;cf^K`!<FBG6i$#;7K7`p0q}iQ@tB$7;lJpG%e}=Y0QInp` zwJVjQX{aQGmG~a-i;(KD(kXB{tP1r5SnpU%-Ei1pd7{ridX-yF>&fw?FzwQlXtD~y z&&d~(@X6;Hs$cbFAVW2zC&OCRq@GM`_eKn4R`&xvU?4vkRMESR<0&66e>Cl!9#_F@ z&I-qhgS01lzu2WTd0p-j&+ew(+ji|P+h11ca#uKQt6W}K?RG?)+(Alv;Kwq9QcHR< zn#$yFkyJ|ht0YUwWc*vTn9m|@QFHlGO3%S5PbobGw>&2NOu?MTMfZWTqLfCW&sC&Q zoN=SKm5#$L?{+k{d1a;on{TY_r(LkD%0x4vs_GwF3EFC7bVGG+2K^0MxYE=fo3vaH zbaSh8y>TQ*0scUZY&ZhT0;|$wo0Iv_6j+(s#x@AAwrF=?kWwcMlxM-d4VV7|oybOx delta 1918 zcmY*ae@qj16u)oFLHQwVX|Dwe_M~i3l=8D81{j@;9Mc$`phFagbBS9r#cY_lP0^S- zE~FdmZH_FORJY6-V}dK0BrXxq37g_D8l31KxMi+_M6JvL4!ifg>kqi(`uaZa^WOX1 zd-r3YgA8<#wxQ^WbVfQR+HalLcBjcs$ihi-TYqEp<PT+Ebldke-x<17Ub21D%xSl$ zLy;S+NZ>ucE>K0PM&cMoa4mS#Gos5R<T5C^Eb=|rI14!puW>8L7C6jpCRUi_wvfM} z+VB&p1+6iYyb8;W7LpJ3#t)iZ-YLJ|r`E7XV;IJ#uI=@ywazvfb3F>g#5u7J!~~af z=sz@Kzjt_Aa@SCUu5pIC_OGL<veNrTp7B;7G`=3eIvKn{!5qpH`-GBKXCo~rUJ}Hc z-eftuk`l34Y=G3tuEm@HClbN)1PA6Vfjx<Q!R-)98zx<nRN3R?jsorYjij(oXm##~ z`Xqz(^HAP^Ayj;?oHwkJ<ohw;-3YNsytOLCZVs~xWwu*p|47+ds75vuVlNG|$ECs9 z-;vqxQuY8GP2vq_gUZ+mE~Ml*Tu;i#{}9POb-lC{H_dNlPXsuwQp)zeE>|6kLd7_; zVpx*Q8!|%`>%$dwur*mE?eHKugX{vm=_3Uj1?{F%atMAinaMCbG^Lso|E2qL4oMf# z?~Q&cxpmTt`f(PEQ?8Oam`O2jJN3`J&?edKl;uoB$Z{&BeEXl&b(MA}pkN&(;^yZq z{`q78WGj#p*!1)y>`h&mQ7grI1FFOkLA*+@^bZP$mA(gksTIvTg2mZywV^t+z(MDB zR7S>6%}F_fxO8JOvb<hQ@uo=K`j9sIoNAKp%P6UrU8j_<YU)W)ZCSX>6AH>VrWNVu z=p#L+S&dY}Da{Af@1|^*Gx*qSklo-zl?a0i7h6*h99IhBg*n7TitU!M;F2O>ky+Qg zhDKJ)k>I{cDE8ufcE_BmK{Z{zd1}5yH`q8RX)UFQzthQohZVd8e}n@U2k^eD+kBx$ z0FQa0rGie-Mk6jxt-%Gj?0IxTt?Xkp8q95gNprScZQf7E<JjO>UnSmj)HteFxn8J@ z<Cz-4mAxA6I)^x0B|cqc{YyCbKqd6fD!oOXdfuRjduwpeL35e%&&r`Ot?*^lL%-jR z{?sGCe*mrOvESc_HUS0m_@2<tq8zjv(QZe(6YVI*ThO+kKQ#;Y(y~Y!Xe~M94RBa8 zk}|OrK6s%c1^;cr^Ak8=$trsV^_fg@QK~k~rmt~&j$w))-;xn$(Q(9U4k=bFh!hSg zmA)8cG7K$o5zk(@Z!wcGh)y>vrk+7odL`juSGwt?>m1XmTz5s!oQQCdZas53l0|1C z@=^pw&c(BndgfL<JE~{yY1kHyY1QuO<d{zrekMmaW}IU^*V0c%#q{X01s(}J9AC1Y zba|$i*p#M1TW)q<)7GXP?>FV`%Ufp4EwtrX`63(QaE+vSw&hk6(goM@IHiW^L2D%4 zK=L_L0;93W1?T=M86D&HB;|ia$Pk<<$RWSL-2x+ilwhX7Mvj2BFo$$|Y73(X>464& zGx5V+`%V<IcBxuH%AsM|dD01sij5=#ii@9-BG4>1cvdb?jVC^MTgZv-;^lT-a7-xD v_5{43or3$w34baVRTaeqWoj(2wQ6a!n{EUTx93<@KOu{uH(v)Iue|a<pR-8g diff --git a/BayesValidRox/tests/BeamTest/myBeam9points.cpp b/BayesValidRox/tests/BeamTest/myBeam9points.cpp index 2ad6b9e6e..546428bd6 100644 --- a/BayesValidRox/tests/BeamTest/myBeam9points.cpp +++ b/BayesValidRox/tests/BeamTest/myBeam9points.cpp @@ -66,8 +66,8 @@ int main(int argc, char* argv[]) { beam_width = params[0]; beam_height = params[1]; beam_span = params[2]; - youngs_modulus = params[3] * 1e+9; - load = params[4] * 1e+3; + youngs_modulus = params[3]; + load = params[4]; inputfile.close(); } else { -- GitLab