From c3292a5e9fa8e736ed3ee4b160f4fb2ff5eaa1e6 Mon Sep 17 00:00:00 2001
From: farid <farid.mohammadi@iws.uni-stuttgart.de>
Date: Wed, 19 Jan 2022 09:26:51 +0100
Subject: [PATCH] [PA-A] update refrence generator scripts.

---
 BayesValidRox/tests/PA-A/ref_stokesdarcy.py | 101 ++++++++++----------
 BayesValidRox/tests/PA-A/ref_stokespnm.py   |  97 ++++++++++---------
 2 files changed, 105 insertions(+), 93 deletions(-)

diff --git a/BayesValidRox/tests/PA-A/ref_stokesdarcy.py b/BayesValidRox/tests/PA-A/ref_stokesdarcy.py
index 8e4e1a904..47ae8572d 100755
--- a/BayesValidRox/tests/PA-A/ref_stokesdarcy.py
+++ b/BayesValidRox/tests/PA-A/ref_stokesdarcy.py
@@ -6,42 +6,44 @@ Created on Thu Aug 13 09:53:11 2020
 @author: Farid Mohammadi, M.Sc.
     Email: farid.mohammadi@iws.uni-stuttgart.de
 """
-import sys, os
+import sys
+import os
 import numpy as np
 import scipy.stats as stats
 import matplotlib
-import chaospy
+import chaospy as cp
+import h5py
+import pandas as pd
 
 matplotlib.use('agg')
-try:
-    import cPickle as pickle
-except ModuleNotFoundError:
-    import pickle
 
 # Add BayesValidRox path
-sys.path.insert(0,'./../../../BayesValidRox/')
+sys.path.insert(0, './../../../BayesValidRox/')
 
 from PyLink.PyLinkForwardModel import PyLinkForwardModel
 
-#=====================================================
-#============   COMPUTATIONAL MODEL   ================
-#=====================================================
+# =====================================================
+# ============   COMPUTATIONAL MODEL   ================
+# =====================================================
 modelDir = './models/stokesdarcy/'
 
-inclusion = 'squared' # squared or circular
-couplingcond = 'ER' # ER or BJ
-inletLoc = 'top' # left or top
+inclusion = 'squared'  # squared or circular
+couplingcond = 'ER'  # ER or BJ
+inletLoc = 'top'  # left or top
 
 
 Model = PyLinkForwardModel()
-Model.nrCPUs = 4 #8
+Model.nrCPUs = 4
 Model.Type = 'PyLink'
-Model.Name = 'ffpm-stokesdarcy{}_{}_inclusion_{}Inflow-testset'.format(couplingcond,inclusion,inletLoc)
-Model.InputFile = modelDir+'params_{}_{}_inclusion_{}Inflow.input'.format(couplingcond,inclusion,inletLoc)
-Model.InputTemplate = modelDir+'params_{}_{}_inclusion_{}Inflow.tpl.input'.format(couplingcond,inclusion,inletLoc)
+Model.Name = f'ffpm-stokesdarcy{couplingcond}_{inclusion}_inclusion_'\
+    f'{inletLoc}Inflow-testset'
+Model.InputFile = f'{modelDir}params_{couplingcond}_{inclusion}_inclusion_'\
+    f'{inletLoc}Inflow.input'
+Model.InputTemplate = f'{modelDir}params_{couplingcond}_{inclusion}_'\
+    f'inclusion_{inletLoc}Inflow.tpl.input'
 
 
-Model.Command = "model_exe.py stokesdarcy -o {} {}".format(inclusion, Model.InputFile)
+Model.Command = f"model_exe.py stokesdarcy -o {inclusion} {Model.InputFile}"
 Model.ExecutionPath = os.getcwd()
 Model.Output.Parser = 'read_ffpm'
 Model.Output.Names = ['velocity [m/s]', 'p']
@@ -49,45 +51,46 @@ Model.Output.FileNames = ["ffpm_stokesdarcy_velocity_final.csv",
                           "ffpm_stokesdarcy_p_final.csv"]
 
 
-#=====================================================
-#=========   PROBABILISTIC INPUT MODEL  ==============
-#=====================================================
-NrSamples = 150
+# =====================================================
+# =========   PROBABILISTIC INPUT MODEL  ==============
+# =====================================================
+n_samples = 150
 
-# l = Microscopic length scale >>>> l/4 nach oben und l/2 nach unten.
-l = 0.5e-3
 if couplingcond == 'BJ':
-    params = [4.9*1e-3, 5.1*1e-3] #[0.005-0.5*l, 0.005+0.25*l]
+    params = [4.9*1e-3, 5.1*1e-3]
 else:
-    params = [5.0*1e-3, 5.1*1e-3]#[5.025*1e-3, 5.1*1e-3] #[0.004975, 0.005+0.25*l]
+    params = [5.0*1e-3, 5.1*1e-3]
 
-allParams = [(5e-4, 1.5e-3), # 'VyMaxTop'
-             (params[0], params[1]), # '$\\Gamma$'
-             (1.5e-09, 1.0e-9) # '$K$'
+allParams = [(5e-4, 1.5e-3),  # 'VyMaxTop'
+             (params[0], params[1]),  # '$\\Gamma$'
+             (1.5e-09, 1.0e-9)  # '$K$'
              ]
 
 if couplingcond == 'BJ':
-    allParams.append((0.1 , 4.0)) # '$\\alpha_{BJ}$'
+    allParams.append((0.1, 4.0))  # '$\\alpha_{BJ}$'
 
-parametersets = np.zeros((NrSamples,len(allParams)))
+parametersets = np.zeros((n_samples, len(allParams)))
 for parIdx, params in enumerate(allParams):
     if parIdx != 2:
-        parametersets[:,parIdx] = stats.uniform(loc=params[0], scale=params[1]-params[0]).rvs(size=NrSamples)
+        x = stats.uniform(loc=params[0],
+                          scale=params[1]-params[0]).rvs(size=n_samples)
+        parametersets[:, parIdx] = x
     else:
         Mu = np.log(params[0]**2 / np.sqrt(params[0]**2 + params[1]**2))
-        Sigma  = np.sqrt(np.log(1 + params[1]**2 / params[0]**2))
+        Sigma = np.sqrt(np.log(1 + params[1]**2 / params[0]**2))
 
-        parametersets[:,parIdx] = chaospy.LogNormal(mu=Mu,sigma=Sigma).sample(NrSamples)
+        parametersets[:, parIdx] = cp.LogNormal(mu=Mu,
+                                                sigma=Sigma).sample(n_samples)
 
-#=====================================================
-#=============  RUN THE SIMULATIONS  =================
-#=====================================================
+# =====================================================
+# =============  RUN THE SIMULATIONS  =================
+# =====================================================
 # Run the models for the samples
 OutputMatrix, _ = Model.Run_Model_Parallel(parametersets)
 
 
 # Cleanup
-#Zip the subdirectories
+# Zip the subdirectories
 try:
     dir_name = Model.Name + '_ValidSet'
     key = Model.Name + '_'
@@ -95,12 +98,10 @@ try:
 except:
     pass
 
-#=====================================================
-#==========  SLICE VALIDSET HDF5 File  ===============
-#=====================================================
+# =====================================================
+# ==========  SLICE VALIDSET HDF5 File  ===============
+# =====================================================
 # Prepare two separate hdf5 for calibration and validation
-import h5py
-import pandas as pd
 ValidSets = h5py.File("ExpDesign_"+Model.Name+".hdf5", 'r+')
 validSamples = np.array(ValidSets["EDX/init_"])
 OutputNames = ['velocity [m/s]', 'p']
@@ -117,20 +118,24 @@ for case in ['Calibration', 'Validation']:
     sorted_indices = {}
     Velocity = pd.read_csv("models/velocity_points.csv")
     Pressure = pd.read_csv("models/pressure_points.csv")
-    sorted_indices[OutputNames[0]] = list(range(0,10)) if case=='Calibration' else list(range(10,20))
-    sorted_indices[OutputNames[1]] = list(range(0,3)) if case=='Calibration' else list(range(3,5))
+    if case == 'Calibration':
+        sorted_indices[OutputNames[0]] = list(range(0, 10))
+        sorted_indices[OutputNames[1]] = list(range(0, 3))
+    else:
+        sorted_indices[OutputNames[0]] = list(range(10, 20))
+        sorted_indices[OutputNames[1]] = list(range(3, 5))
 
     # Extract x values
     grp_x_values = file.create_group("x_values/")
     validModelRuns = dict()
     for varIdx, var in enumerate(OutputNames):
-        validModelRuns[var] = np.array(ValidSets["x_values/"+var])#[sorted_indices[var]]
+        validModelRuns[var] = np.array(ValidSets[f"x_values/{var}"])
         grp_x_values.create_dataset(var, data=validModelRuns[var])
 
     # Extract Y values
     for varIdx, var in enumerate(OutputNames):
-        validModelRuns[var] = OutputMatrix[var][:,sorted_indices[var]]
-        grpY = file.create_group("EDY/"+var)
+        validModelRuns[var] = OutputMatrix[var][:, sorted_indices[var]]
+        grpY = file.create_group(f"EDY/{var}")
         grpY.create_dataset("init_", data=validModelRuns[var])
 
     # close
diff --git a/BayesValidRox/tests/PA-A/ref_stokespnm.py b/BayesValidRox/tests/PA-A/ref_stokespnm.py
index 2db815b8d..066a25174 100755
--- a/BayesValidRox/tests/PA-A/ref_stokespnm.py
+++ b/BayesValidRox/tests/PA-A/ref_stokespnm.py
@@ -5,73 +5,78 @@ Created on Thu Aug 13 09:53:11 2020
 
 @author: farid
 """
-import sys, os
+import sys
+import os
 import numpy as np
 import scipy.stats as stats
-import chaospy
+import chaospy as cp
+import h5py
+import pandas as pd
 import matplotlib
 matplotlib.use('agg')
-try:
-    import cPickle as pickle
-except ModuleNotFoundError:
-    import pickle
 
 # Add BayesValidRox path
-sys.path.insert(0,'./../../../BayesValidRox/')
+sys.path.insert(0, './../../../BayesValidRox/')
 
 from PyLink.PyLinkForwardModel import PyLinkForwardModel
 
-#=====================================================
-#============   COMPUTATIONAL MODEL   ================
-#=====================================================
+# =====================================================
+# ============   COMPUTATIONAL MODEL   ================
+# =====================================================
 modelDir = './models/stokespnm/'
 
-averaging = True # True or False
-inletLoc = 'top' # left or top
-inclusion = 'squared' # only squared
+averaging = True  # True or False
+inletLoc = 'top'  # left or top
+inclusion = 'squared'  # only squared
 
 Model = PyLinkForwardModel()
-#Model.nrCPUs = 4
 Model.Type = 'PyLink'
 
 if averaging:
-    Model.Name = 'ffpm-stokespnm_{}_inclusion_{}Inflow-testset'.format(inclusion,inletLoc)
+    Model.Name = f'ffpm-stokespnm_{inclusion}_inclusion_'\
+        f'{inletLoc}Inflow-testset'
 else:
-    Model.Name = 'ffpm-stokespnmNA_{}_inclusion_{}Inflow-testset'.format(inclusion,inletLoc)
+    Model.Name = f'ffpm-stokespnmNA_{inclusion}_inclusion_'\
+        f'{inletLoc}Inflow-testset'
 
-Model.InputFile = modelDir+'params_{}_inclusion_{}Inflow.input'.format(inclusion,inletLoc)
-Model.InputTemplate = modelDir+'params_{}_inclusion_{}Inflow.tpl.input'.format(inclusion,inletLoc)
+Model.InputFile = f'{modelDir}params_{inclusion}_inclusion_'\
+    f'{inletLoc}Inflow.input'
+Model.InputTemplate = f'{modelDir}params_{inclusion}_inclusion_'\
+    f'{inletLoc}Inflow.tpl.input'
 
 
-Model.Command = "model_exe.py stokespnm {} --averaging {}".format(Model.InputFile,averaging)
+Model.Command = f"model_exe.py stokespnm {Model.InputFile} --averaging "\
+    f"{averaging}"
 Model.ExecutionPath = os.getcwd()
 Model.Output.Parser = 'read_ffpm'
 Model.Output.Names = ['velocity [m/s]', 'p']
-Model.Output.FileNames = ["ffpm_stokespnm_velocity_final.csv","ffpm_stokespnm_p_final.csv"]
-
-#=====================================================
-#=========   PROBABILISTIC INPUT MODEL  ==============
-#=====================================================
-allParams = [(5e-4, 1.5e-3), # 'VyMaxTop'
-             (1.0e-07,1.0e-05), # 'TransmissibilityTotal'
+Model.Output.FileNames = ["ffpm_stokespnm_velocity_final.csv",
+                          "ffpm_stokespnm_p_final.csv"]
+
+# =====================================================
+# =========   PROBABILISTIC INPUT MODEL  ==============
+# =====================================================
+allParams = [(5e-4, 1.5e-3),  # 'VyMaxTop'
+             (1.0e-07, 1.0e-05),  # 'TransmissibilityTotal'
              # (1.0e-07,1.0e-04), # 'TransmissibilityThroat'
              # (1.0e-07,1.0e-04), # 'TransmissibilityHalfPore'
              (1.0e3, 1.0e5)  # 'beta'
-    ]
+             ]
 
 origJoints = []
 for params in allParams:
-    origJoints.append(chaospy.Uniform(lower=params[0],upper=params[1]))
-origSpaceDist = chaospy.J(*origJoints)
+    origJoints.append(cp.Uniform(lower=params[0], upper=params[1]))
+origSpaceDist = cp.J(*origJoints)
 
-#=====================================================
-#=============  RUN THE SIMULATIONS  =================
-#=====================================================
+# =====================================================
+# =============  RUN THE SIMULATIONS  =================
+# =====================================================
 SamplingMethod = 'random'
 NrSamples = 150
 
 # Generate samples with chaospy
-parametersets = chaospy.generate_samples(NrSamples, domain=origSpaceDist , rule=SamplingMethod).T
+parametersets = cp.generate_samples(NrSamples, domain=origSpaceDist,
+                                    rule=SamplingMethod).T
 
 
 # Run the models for the samples
@@ -79,7 +84,7 @@ OutputMatrix, _ = Model.Run_Model_Parallel(parametersets)
 
 
 # Cleanup
-#Zip the subdirectories
+# Zip the subdirectories
 try:
     dir_name = Model.Name + '_ValidSet'
     key = Model.Name + '_'
@@ -87,12 +92,11 @@ try:
 except:
     pass
 
-#=====================================================
-#==========  SLICE VALIDSET HDF5 File  ===============
-#=====================================================
+# =====================================================
+# ==========  SLICE VALIDSET HDF5 File  ===============
+# =====================================================
 # Prepare two separate hdf5 for calibration and validation
-import h5py
-import pandas as pd
+
 ValidSets = h5py.File("ExpDesign_"+Model.Name+".hdf5", 'r+')
 validSamples = np.array(ValidSets["EDX/init_"])
 OutputNames = ['velocity [m/s]', 'p']
@@ -109,20 +113,23 @@ for case in ['Calibration', 'Validation']:
     sorted_indices = {}
     Velocity = pd.read_csv("models/velocity_points.csv")
     Pressure = pd.read_csv("models/pressure_points.csv")
-    sorted_indices[OutputNames[0]] = list(range(0,10)) if case=='Calibration' else list(range(10,20))
-    sorted_indices[OutputNames[1]] = list(range(0,3)) if case=='Calibration' else list(range(3,5))
-
+    if case == 'Calibration':
+        sorted_indices[OutputNames[0]] = list(range(0, 10))
+        sorted_indices[OutputNames[1]] = list(range(0, 3))
+    else:
+        sorted_indices[OutputNames[0]] = list(range(10, 20))
+        sorted_indices[OutputNames[1]] = list(range(3, 5))
     # Extract x values
     grp_x_values = file.create_group("x_values/")
     validModelRuns = dict()
     for varIdx, var in enumerate(OutputNames):
-        validModelRuns[var] = np.array(ValidSets["x_values/"+var])[sorted_indices[var]]
+        validModelRuns[var] = np.array(ValidSets[f"x_values/{var}"])[sorted_indices[var]]
         grp_x_values.create_dataset(var, data=validModelRuns[var])
 
     # Extract Y values
     for varIdx, var in enumerate(OutputNames):
-        validModelRuns[var] = OutputMatrix[var][:,sorted_indices[var]]
-        grpY = file.create_group("EDY/"+var)
+        validModelRuns[var] = OutputMatrix[var][:, sorted_indices[var]]
+        grpY = file.create_group(f"EDY/{var}")
         grpY.create_dataset("init_", data=validModelRuns[var])
 
     # close
-- 
GitLab