From e5e464aed25302fc34cbafb5dd396999845cf0fa Mon Sep 17 00:00:00 2001
From: farid <farid.mohammadi@iws.uni-stuttgart.de>
Date: Tue, 1 Feb 2022 09:23:26 +0100
Subject: [PATCH] [PyLink] fixed some styling issues.

---
 BayesValidRox/PyLink/FuncForwardModel.py | 69 ++++++++++++------------
 1 file changed, 34 insertions(+), 35 deletions(-)

diff --git a/BayesValidRox/PyLink/FuncForwardModel.py b/BayesValidRox/PyLink/FuncForwardModel.py
index 2a0aa16bf..08b54b6e6 100644
--- a/BayesValidRox/PyLink/FuncForwardModel.py
+++ b/BayesValidRox/PyLink/FuncForwardModel.py
@@ -21,10 +21,7 @@ import tqdm
 from functools import reduce
 import multiprocessing
 import h5py
-try:
-    import cPickle as pickle
-except ModuleNotFoundError:
-    import pickle
+
 
 class FuncForwardModel:
 
@@ -35,54 +32,57 @@ class FuncForwardModel:
         self.pyFile = self.Name
         self.outMin = None
         self.outMax = None
-        self.Observations  = {}
+        self.Observations = {}
         self.ObservationsValid = {}
         self.OutputMatrix = {}
         self.MeasurementFile = None
         self.MCReference = {}
+
     # Nested class
     class Output:
         def __init__(self):
             self.Names = None
-    
+
     def run_forwardmodel(self, xx):
         Function, theta = xx
         return Function(np.array([theta]))
-    
+
     def Run_Model_Parallel(self, CollocationPoints, prevRun_No=0, keyString=''):
-        
+
         # Create hdf5 metadata
         hdf5file = 'ExpDesign'+'_'+self.Name+'.hdf5'
         hdf5_exist = os.path.exists(hdf5file)
         file = h5py.File(hdf5file, 'a')
-        
+
         # Initilization
         P = len(CollocationPoints)
         OutputNames = self.Output.Names
         TotalOutputs = {}
-        
+
         # Prepare the function
         Filename = self.pyFile
         Function = getattr(__import__(Filename), Filename)
-        
+
         # Parallel runs with multiprocessing
         with multiprocessing.Pool() as p:
-            group_results = list(tqdm.tqdm(p.imap(self.run_forwardmodel, zip([Function]*P, CollocationPoints)),
-                                           total=P, desc='Running forward model'))
+            group_results = list(tqdm.tqdm(
+                p.imap(self.run_forwardmodel, zip([Function]*P, CollocationPoints)),
+                total=P, desc='Running forward model'))
         print("")
         # Save time steps or x-values
         x_values = group_results[0][0]
         TotalOutputs["x_values"] = x_values
-        if not hdf5_exist: file.create_dataset("x_values", data=x_values)
-        
+        if not hdf5_exist:
+            file.create_dataset("x_values", data=x_values)
+
         # save each output in their corresponding array
         outRangeIdx = []
         for varIdx, var in enumerate(OutputNames):
-            
+
             grpY = file.create_group("EDY/"+var) if not hdf5_exist else file.get("EDY/"+var)
-            
+
             Outputs = np.asarray([item[varIdx+1] for item in group_results], dtype=np.float64)
-            
+
             if prevRun_No == 0:
                 grpY.create_dataset("init_"+keyString, data=Outputs)
             else:
@@ -93,15 +93,15 @@ class FuncForwardModel:
                 except:
                     data = Outputs
                 grpY.create_dataset('adaptive_'+keyString, data=data)
-                
+
             # Check if all outputs lay between provided min and max
             if self.outMin is not None and P > 1:
                 for outIdx, out in enumerate(Outputs):
                     if not self.within_range(out, self.outMin, self.outMax):
                         outRangeIdx.append(outIdx)
-            
+
             TotalOutputs[var] = np.delete(Outputs, outRangeIdx, axis=0)
-            
+
             if prevRun_No == 0:
                 grpY.create_dataset("New_init_"+keyString, data=TotalOutputs[var])
             else:
@@ -112,27 +112,27 @@ class FuncForwardModel:
                 except:
                     data = TotalOutputs[var]
                 grpY.create_dataset('New_adaptive_'+keyString, data=data)
-                
+
         # Print the collocation points whose simulations crashed
         if len(outRangeIdx) != 0:
             print('\n')
             print('*'*20)
-            print("\nThe following parametersets have been removed:\n", 
+            print("\nThe following parametersets have been removed:\n",
                   CollocationPoints[outRangeIdx])
             print("\n")
             print('*'*20)
-            
-        # Pass it to the attribute   
+
+        # Pass it to the attribute
         NewCollocationPoint = np.delete(CollocationPoints, outRangeIdx, axis=0)
         self.OutputMatrix = TotalOutputs
-        
+
         # Save CollocationPoints
         grpX = file.create_group("EDX") if not hdf5_exist else file.get("EDX")
         if prevRun_No == 0:
             grpX.create_dataset("init_"+keyString, data=CollocationPoints)
             if len(outRangeIdx) != 0:
                 grpX.create_dataset("New_init_"+keyString, data=NewCollocationPoint)
-                  
+
         else:
             try:
                 oldCollocationPoints = np.array(file['EDX/'+'adaptive_'+keyString])
@@ -141,7 +141,7 @@ class FuncForwardModel:
             except:
                 data = NewCollocationPoint
             grpX.create_dataset('adaptive_'+keyString, data=data)
-            
+
             if len(outRangeIdx) != 0:
                 try:
                     oldCollocationPoints = np.array(file['EDX/New_'+'adaptive_'+keyString])
@@ -150,28 +150,27 @@ class FuncForwardModel:
                 except:
                     data = NewCollocationPoint
                 grpX.create_dataset('New_adaptive_'+keyString, data=data)
-        
+
         # Close h5py file
         file.close()
-        
+
         return self.OutputMatrix, NewCollocationPoint
-    
+
     def within_range(self, out, minout, maxout):
         inside = False
         if (out > minout).all() and (out < maxout).all():
             inside = True
         return inside
-                           
+
     def read_Observation(self):
         obsDataFrame = pd.DataFrame.from_dict(self.Observations)
         self.nObs = reduce(lambda x, y: x*y, obsDataFrame[self.Output.Names].shape)
         return obsDataFrame
-    
+
     def read_ObservationValid(self):
         obsDataFrame = pd.DataFrame.from_dict(self.ObservationsValid)
         self.nObsValid = reduce(lambda x, y: x*y, obsDataFrame[self.Output.Names].shape)
         return obsDataFrame
-    
+
     def read_MCReference(self):
-        
         return pd.DataFrame.from_dict(self.MCReference)
-- 
GitLab