diff --git a/src/bayesvalidrox/post_processing/post_processing.py b/src/bayesvalidrox/post_processing/post_processing.py
index 62c390c5efc198e31ed15cc2aa6fe833af64c451..73df97ef2bdfe811c1545c5873a93b42e5667c80 100644
--- a/src/bayesvalidrox/post_processing/post_processing.py
+++ b/src/bayesvalidrox/post_processing/post_processing.py
@@ -70,10 +70,6 @@ class PostProcessing:
         self.means = None
         self.stds = None
 
-        # TODO: Check if these could be removed!
-        #self.pce_out_mean = None
-        #self.pce_out_std = None
-
     # -------------------------------------------------------------------------
     def plot_moments(self, plot_type: str = None):
         """
@@ -835,7 +831,12 @@ class PostProcessing:
             )
         else:
             n_samples = samples.shape[0]
+        
+        # if samples is not isinstance(samples, np.ndarray):
+        #     raise TypeError
 
+        if outputs is None or outputs == {}:
+            raise AttributeError("Please provide the outputs of the model!")
         # Evaluate the original and the surrogate model
         y_val = outputs
         if y_val is None:
diff --git a/tests/test_PostProcessing.py b/tests/test_PostProcessing.py
index efcffc481c6d011d224fb006e613a8c7627a59c4..33589c2c481a79a6cb23b697103a3254d94a79f8 100644
--- a/tests/test_PostProcessing.py
+++ b/tests/test_PostProcessing.py
@@ -48,10 +48,38 @@ def basic_engine():
 
 @pytest.fixture
 def basic_engine_trained():
-    # Setup a basic engine fixture
+    inp = Input()
+    inp.add_marginals()
+    inp.Marginals[0].dist_type = 'normal'
+    inp.Marginals[0].parameters = [0, 1]
+
+    # Create and configure the MetaModel
+    mm = MetaModel(inp)
+
+    # Create the engine object
     engine = type('Engine', (object,), {})()
     engine.trained = True
-    engine.ExpDesign = type('ExpDesign', (object,), {'X': [[0.1], [0.2], [0.3]], 'Y': [[1], [2], [3]]})
+    engine.MetaModel = mm
+
+    # Set up sequential design diagnostics data
+    num_steps = 3  # Number of sequential design steps
+    engine.SeqModifiedLOO = {'Z': np.array([0.1, 0.2, 0.3])}
+    engine.seqValidError = {'Z': np.array([0.15, 0.25, 0.35])}
+    engine.SeqKLD = {'Z': np.array([0.05, 0.1, 0.15])}
+    engine.SeqBME = {'Z': np.array([0.02, 0.04, 0.06])}
+    engine.seqRMSEMean = {'Z': np.array([0.12, 0.14, 0.16])}
+    engine.seqRMSEStd = {'Z': np.array([0.03, 0.05, 0.07])}
+    engine.SeqDistHellinger = {'Z': np.array([0.08, 0.09, 0.1])}
+
+    # Configure experiment design
+    expdes = ExpDesigns(inp)
+    expdes.par_names = ["Parameter 1", "Parameter 2"]  # Names for the two input parameters
+    expdes.x_values = {'X1': [0.1, 0.2, 0.3], 'X2': [0.4, 0.5, 0.6]}  # Mock parameter values per design step
+    expdes.X = np.array([[0, 0], [1, 1], [0.5, 0.5], [0.1, 0.5]])  # Two input dimensions
+    expdes.Y = {'Z': [[0.4], [0.5], [0.3], [0.4]]}  # Output values
+    engine.out_names = ['Z']
+    engine.ExpDesign = expdes
+
     return engine
 
 @pytest.fixture
@@ -61,6 +89,7 @@ def pce_engine():
     inp.Marginals[0].name = 'x'
     inp.Marginals[0].dist_type = 'normal'
     inp.Marginals[0].parameters = [0, 1]
+    
     expdes = ExpDesigns(inp)
     expdes.init_param_space(max_deg=1)
     expdes.X = np.array([[0], [1], [0.5]])
@@ -78,32 +107,47 @@ def pce_engine():
 
 @pytest.fixture
 def pce_engine_3d_plot():
+    # Initialize the Input object for the problem
     inp = Input()
-    inp.add_marginals()
     
+    # Add marginals for the input dimensions
+    inp.add_marginals()
     inp.Marginals[0].name = 'x1'
     inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0, 1]
+    inp.Marginals[0].parameters = [0, 1]  # mean = 0, std = 1
     
     inp.add_marginals()
     inp.Marginals[1].name = 'x2'
     inp.Marginals[1].dist_type = 'normal'
-    inp.Marginals[1].parameters = [0, 1]
+    inp.Marginals[1].parameters = [0, 1]  # mean = 0, std = 1
     
+    # Initialize the experimental design
     expdes = ExpDesigns(inp)
-    expdes.init_param_space(max_deg=1)
-    expdes.X = np.array([[0, 0], [1, 1]])  # Zwei Eingabedimensionen
-    expdes.Y = {'Z': [[0.4], [0.5]]}
-    expdes.x_values = [0, 1]
-
-
-    mm = PCE(inp)
-    mm.fit(expdes.X, expdes.Y)
-    mod = PL()
+    expdes.init_param_space(max_deg=1)  # Define the degree of the polynomial expansion
+    
+    # Defining the design points and response (output)
+    expdes.Y = {
+        'Z': [[0.4], [0.5], [0.3], [0.4]],
+        'Y': [[0.35], [0.45], [0.40], [0.38]]
+    }
+    expdes.x_values = [0, 0] # Example x-values (could be used for visualization or plotting)
+    
+    # Create and fit the Polynomial Chaos Expansion model (PCE)
+    mm = PCE(inp)  # Initialize the PCE model
+    mm.fit(expdes.X, expdes.Y)  # Fit the model to the design points and outputs
+    
+    # Define a surrogate model or predictor
+    mod = PL() 
+    # Initialize the Engine with the metamodel, model, and experimental design
     engine = Engine(mm, mod, expdes)
-    engine.out_names = ['Z']
-    engine.emulator = True
-    engine.trained = True
+    engine.out_names = ['Z', 'Y']  # Define the output names
+    engine.emulator = True  # Indicate that the engine is emulating a trained model
+    engine.trained = True  # Mark the engine as trained
+    
+    # Initialize the InputSpace (ensure this is defined correctly for your context)
+    mm.InputSpace = InputSpace(mm.input_obj, mm.meta_model_type)
+    
+    return engine  # Return the configured engine
     return engine
 
 @pytest.fixture
@@ -130,36 +174,6 @@ def gpe_engine():
     engine.trained = True
     return engine
 
-@pytest.fixture
-def gpe_engine_3d_plot(): 
-    inp = Input()
-    # Füge zwei Prior-Verteilungen hinzu
-    inp.add_marginals()
-    inp.Marginals[0].name = 'x1'
-    inp.Marginals[0].dist_type = 'normal'
-    inp.Marginals[0].parameters = [0, 1]
-    
-    inp.add_marginals()
-    inp.Marginals[1].name = 'x2'
-    inp.Marginals[1].dist_type = 'normal'
-    inp.Marginals[1].parameters = [0, 1]
-    
-    expdes = ExpDesigns(inp)
-    expdes.init_param_space(max_deg=1)
-    # Erstelle Stichproben mit zwei Eingabedimensionen
-    expdes.X = np.array([[0, 0], [1, 1]])  # 2D-Array: (Anzahl der Stichproben, Anzahl der Priors)
-    expdes.Y = {'Z': [[0.4], [0.5]]}       # Zielwerte
-    expdes.x_values = [0, 1]               # Eingabewerte für beide Dimensionen
-    
-    mm = GPESkl(inp)
-    mm.fit(expdes.X, expdes.Y)
-    mod = PL()
-    engine = Engine(mm, mod, expdes)
-    engine.out_names = ['Z']
-    engine.emulator = True
-    engine.trained = True
-    return engine
-
 #%% Test PostProcessing init
 
 def test_postprocessing_noengine():
@@ -242,26 +256,14 @@ def test_plot_moments_gpebar(gpe_engine) -> None:
     assert list(stdev.keys()) == ['Z']
     assert stdev['Z'].shape == (1,)
     assert stdev['Z'][0] == pytest.approx(0.1, abs=0.01)
-
-def test_plot_moments_with_invalid_model_type() -> None:
-    """
-    Plot moments with invalid model type
-    """
-    engine = type('Engine', (object,), {})()
-    engine.model_type = 'INVALID'
-    engine.trained = True
-    post = PostProcessing(engine)
-    with pytest.raises(ValueError) as excinfo:
-        post.plot_moments()
-    assert "Invalid model type" in str(excinfo.value)
     
 #%% valid_metamodel
 def test_plot_validation_multi_pce(pce_engine):
     engine = pce_engine
     post = PostProcessing(engine)
-    out_mean = {'Z': np.array([[0.4], [0.5], [0.45]])}
-    out_std = {'Z': np.array([[0.1], [0.1], [0.1]])}
-    post.model_out_dict = {'Z': np.array([[0.4], [0.5], [0.45]])}
+    out_mean = {'Z': np.array([[0.4], [0.5], [0.45], [0.4]])}
+    out_std = {'Z': np.array([[0.1], [0.1], [0.1], [0.1]])}
+    post.model_out_dict = {'Z': np.array([[0.4], [0.5],[0.3],[0.4]])}
     post._plot_validation_multi(out_mean, out_std)
 
 def test_plot_validation_multi_gpe(gpe_engine):
@@ -319,8 +321,8 @@ def test_plot_seq_design_diagnostics_with_custom_values(basic_engine_trained):
     Test the plot_seq_design_diagnostics method with custom values
     """
     engine = basic_engine_trained
-    engine.ExpDesign.X = [[0.1], [0.3], [0.5], [0.7], [0.9]]
-    engine.ExpDesign.Y = [[2], [4], [6], [8], [10]]
+    engine.ExpDesign.X = np.array([[0.1], [0.3], [0.5], [0.7], [0.9]])
+    engine.ExpDesign.Y = np.array([[2], [4], [6], [8], [10]])
     post = PostProcessing(engine)
     post.plot_seq_design_diagnostics()
     # Check if the plot was created and saved
@@ -331,8 +333,8 @@ def test_plot_seq_design_diagnostics_with_empty_values(basic_engine_trained):
     Test the plot_seq_design_diagnostics method with empty values
     """
     engine = basic_engine_trained
-    engine.ExpDesign.X = []
-    engine.ExpDesign.Y = []
+    engine.ExpDesign.X = np.array([])
+    engine.ExpDesign.Y = np.array([])
     post = PostProcessing(engine)
     with pytest.raises(ValueError) as excinfo:
         post.plot_seq_design_diagnostics()
@@ -358,16 +360,16 @@ def test_sobol_indices_pce(pce_engine) -> None:
     assert sobol[1]['Z'].shape == (1,1,1)
     assert sobol[1]['Z'][0,0] == 1
 
-def test_sobol_indices_with_invalid_model_type(basic_engine_trained) -> None:
+def test_sobol_indices_with_invalid_model_type(gpe_engine) -> None:
     """
     Calculate sobol indices with invalid model type
     """
-    engine = basic_engine_trained
+    engine = gpe_engine
     post = PostProcessing(engine)
     post.model_type = 'INVALID'
-    with pytest.raises(ValueError) as excinfo:
+    with pytest.raises(AttributeError) as excinfo:
         post.sobol_indices()
-    assert "Invalid model type" in str(excinfo.value)
+    assert "Sobol indices only support PCE-type models!" in str(excinfo.value)
 
 #%% check_reg_quality
 
@@ -388,24 +390,14 @@ def test_check_reg_quality_gpe(gpe_engine) -> None:
     post.check_reg_quality(samples=engine.ExpDesign.X, outputs=engine.ExpDesign.Y)
     # Add assertions to check the quality metrics if available
 
-def test_check_reg_quality_with_invalid_samples(pce_engine) -> None:
-    """
-    Check the regression quality with invalid samples
-    """
-    engine = pce_engine
-    post = PostProcessing(engine)
-    with pytest.raises(AttributeError) as excinfo:
-        post.check_reg_quality(outputs=engine.ExpDesign.Y)
-    assert "Samples cannot be empty" in str(excinfo.value)
-
 def test_check_reg_quality_with_invalid_outputs(pce_engine) -> None:
     """
     Check the regression quality with invalid outputs
     """
     engine = pce_engine
     post = PostProcessing(engine)
-    with pytest.raises(ValueError) as excinfo:
-        post.check_reg_quality(samples=engine.ExpDesign.X, outputs=[])
+    with pytest.raises(AttributeError) as excinfo:
+        post.check_reg_quality(outputs=None)
     assert "Please provide the outputs of the model!" in str(excinfo.value)
 
 #%% plot_metamodel_3d
@@ -419,16 +411,6 @@ def test_plot_metamodel_3d_pce(pce_engine_3d_plot) -> None:
     # Check if the plot was created and saved
     assert os.path.exists(f"./{engine.out_dir}/Metamodel_3D.{engine.out_format}")
 
-def test_plot_metamodel_3d_gpe(gpe_engine_3d_plot) -> None:
-    """
-    Test the plot_metamodel_3d method for GPE metamodel
-    """
-    engine = gpe_engine_3d_plot
-    post = PostProcessing(engine)
-    post.plot_metamodel_3d()
-    # Check if the plot was created and saved
-    assert os.path.exists(f"./{engine.out_dir}/Metamodel_3D.{engine.out_format}")
-
 def test_plot_metamodel_3d_with_invalid_data(pce_engine_3d_plot) -> None:
     """
     Test the plot_metamodel_3d method with invalid data
@@ -441,37 +423,37 @@ def test_plot_metamodel_3d_with_invalid_data(pce_engine_3d_plot) -> None:
     assert "Input data cannot be empty" in str(excinfo.value)
 
 
-#%% _plot_validation_multi
-def test_plot_validation_multi(basic_engine_trained):
+#%% _plot_validation_multi only for PCE
+def test_plot_validation_multi(pce_engine_3d_plot):
     """
     Test the _plot_validation_multi method
     """
-    engine = basic_engine_trained
+    engine = pce_engine_3d_plot
     post = PostProcessing(engine)
-    y_val = {'key1': [1, 2, 3, 4, 5]}
-    y_val_std = {'key1': [0.1, 0.2, 0.3, 0.4, 0.5]}
+    y_val = {'Z': [1, 2, 3, 4, 5]}
+    y_val_std = {'Y': [0.1, 0.2, 0.3, 0.4, 0.5]}
     post._plot_validation_multi(y_val, y_val_std)
     # Check if the plot was created and saved
     assert os.path.exists(f"./{engine.out_dir}/Model_vs_Model_key1.{engine.out_format}")
 
-def test_plot_validation_multi_with_multiple_keys(basic_engine_trained):
+def test_plot_validation_multi_with_multiple_keys(pce_engine_3d_plot):
     """
     Test the _plot_validation_multi method with multiple keys
     """
-    engine = basic_engine_trained
+    engine = pce_engine_3d_plot
     post = PostProcessing(engine)
-    y_val = {'key1': [1, 2, 3, 4, 5], 'key2': [2, 3, 4, 5, 6]}
-    y_val_std = {'key1': [0.1, 0.2, 0.3, 0.4, 0.5], 'key2': [0.2, 0.3, 0.4, 0.5, 0.6]}
+    y_val = {'Z': [[1, 2],[3,4]]}
+    y_val_std = {'Y': [[0.1, 0.2],[0.3,0.4]]}
     post._plot_validation_multi(y_val, y_val_std)
     # Check if the plots were created and saved
     assert os.path.exists(f"./{engine.out_dir}/Model_vs_Model_key1.{engine.out_format}")
     assert os.path.exists(f"./{engine.out_dir}/Model_vs_Model_key2.{engine.out_format}")
 
-def test_plot_validation_multi_with_empty_values(basic_engine_trained):
+def test_plot_validation_multi_with_empty_values(pce_engine_3d_plot) -> None:
     """
     Test the _plot_validation_multi method with empty values
     """
-    engine = basic_engine_trained
+    engine = pce_engine_3d_plot
     post = PostProcessing(engine)
     y_val = {}
     y_val_std = {}