From ad9121a576bbfc035d82663036491acc2443d485 Mon Sep 17 00:00:00 2001
From: kohlhaasrebecca <rebecca.kohlhaas@outlook.com>
Date: Fri, 15 Mar 2024 11:57:20 +0100
Subject: [PATCH] Built package

---
 build/lib/bayesvalidrox/__init__.py           |   25 +
 .../bayesvalidrox/bayes_inference/__init__.py |    9 +
 .../bayes_inference/bayes_inference.py        | 1532 ++++++++++++
 .../bayes_inference/bayes_model_comparison.py |  654 +++++
 .../bayes_inference/discrepancy.py            |  106 +
 .../lib/bayesvalidrox/bayes_inference/mcmc.py |  909 +++++++
 .../lib/bayesvalidrox/bayesvalidrox.mplstyle  |   16 +
 .../bayesvalidrox/post_processing/__init__.py |    7 +
 .../post_processing/post_processing.py        | 1338 ++++++++++
 build/lib/bayesvalidrox/pylink/__init__.py    |    7 +
 build/lib/bayesvalidrox/pylink/pylink.py      |  803 ++++++
 .../surrogate_models/__init__.py              |    7 +
 .../surrogate_models/adaptPlot.py             |  109 +
 .../surrogate_models/apoly_construction.py    |  124 +
 .../surrogate_models/bayes_linear.py          |  523 ++++
 .../bayesvalidrox/surrogate_models/engine.py  | 2225 +++++++++++++++++
 .../surrogate_models/eval_rec_rule.py         |  197 ++
 .../surrogate_models/exp_designs.py           |  479 ++++
 .../surrogate_models/exploration.py           |  367 +++
 .../surrogate_models/glexindex.py             |  161 ++
 .../surrogate_models/input_space.py           |  398 +++
 .../bayesvalidrox/surrogate_models/inputs.py  |   79 +
 .../orthogonal_matching_pursuit.py            |  366 +++
 .../surrogate_models/reg_fast_ard.py          |  475 ++++
 .../surrogate_models/reg_fast_laplace.py      |  452 ++++
 .../surrogate_models/surrogate_models.py      | 1576 ++++++++++++
 dist/bayesvalidrox-1.0.0-py3-none-any.whl     |  Bin 0 -> 130371 bytes
 dist/bayesvalidrox-1.0.0.tar.gz               |  Bin 0 -> 128377 bytes
 src/bayesvalidrox.egg-info/PKG-INFO           |    6 +-
 29 files changed, 12947 insertions(+), 3 deletions(-)
 create mode 100644 build/lib/bayesvalidrox/__init__.py
 create mode 100644 build/lib/bayesvalidrox/bayes_inference/__init__.py
 create mode 100644 build/lib/bayesvalidrox/bayes_inference/bayes_inference.py
 create mode 100644 build/lib/bayesvalidrox/bayes_inference/bayes_model_comparison.py
 create mode 100644 build/lib/bayesvalidrox/bayes_inference/discrepancy.py
 create mode 100644 build/lib/bayesvalidrox/bayes_inference/mcmc.py
 create mode 100644 build/lib/bayesvalidrox/bayesvalidrox.mplstyle
 create mode 100644 build/lib/bayesvalidrox/post_processing/__init__.py
 create mode 100644 build/lib/bayesvalidrox/post_processing/post_processing.py
 create mode 100644 build/lib/bayesvalidrox/pylink/__init__.py
 create mode 100644 build/lib/bayesvalidrox/pylink/pylink.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/__init__.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/adaptPlot.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/apoly_construction.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/bayes_linear.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/engine.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/eval_rec_rule.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/exp_designs.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/exploration.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/glexindex.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/input_space.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/inputs.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/orthogonal_matching_pursuit.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/reg_fast_ard.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/reg_fast_laplace.py
 create mode 100644 build/lib/bayesvalidrox/surrogate_models/surrogate_models.py
 create mode 100644 dist/bayesvalidrox-1.0.0-py3-none-any.whl
 create mode 100644 dist/bayesvalidrox-1.0.0.tar.gz

diff --git a/build/lib/bayesvalidrox/__init__.py b/build/lib/bayesvalidrox/__init__.py
new file mode 100644
index 000000000..8e865af80
--- /dev/null
+++ b/build/lib/bayesvalidrox/__init__.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+__version__ = "0.0.5"
+
+from .pylink.pylink import PyLinkForwardModel
+from .surrogate_models.surrogate_models import MetaModel
+#from .surrogate_models.meta_model_engine import MetaModelEngine
+from .surrogate_models.engine import Engine
+from .surrogate_models.inputs import Input
+from .post_processing.post_processing import PostProcessing
+from .bayes_inference.bayes_inference import BayesInference
+from .bayes_inference.bayes_model_comparison import BayesModelComparison
+from .bayes_inference.discrepancy import Discrepancy
+
+__all__ = [
+    "__version__",
+    "PyLinkForwardModel",
+    "Input",
+    "Discrepancy",
+    "MetaModel",
+    #"MetaModelEngine",
+    "Engine",
+    "PostProcessing",
+    "BayesInference",
+    "BayesModelComparison"
+    ]
diff --git a/build/lib/bayesvalidrox/bayes_inference/__init__.py b/build/lib/bayesvalidrox/bayes_inference/__init__.py
new file mode 100644
index 000000000..df8d93568
--- /dev/null
+++ b/build/lib/bayesvalidrox/bayes_inference/__init__.py
@@ -0,0 +1,9 @@
+# -*- coding: utf-8 -*-
+
+from .bayes_inference import BayesInference
+from .mcmc import MCMC
+
+__all__ = [
+    "BayesInference",
+    "MCMC"
+    ]
diff --git a/build/lib/bayesvalidrox/bayes_inference/bayes_inference.py b/build/lib/bayesvalidrox/bayes_inference/bayes_inference.py
new file mode 100644
index 000000000..1898a8ae6
--- /dev/null
+++ b/build/lib/bayesvalidrox/bayes_inference/bayes_inference.py
@@ -0,0 +1,1532 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import numpy as np
+import os
+import copy
+import pandas as pd
+from tqdm import tqdm
+from scipy import stats
+import scipy.linalg as spla
+import joblib
+import seaborn as sns
+import corner
+import h5py
+import multiprocessing
+import gc
+from sklearn.metrics import mean_squared_error, r2_score
+from sklearn import preprocessing
+from matplotlib.patches import Patch
+import matplotlib.lines as mlines
+from matplotlib.backends.backend_pdf import PdfPages
+import matplotlib.pylab as plt
+
+from .mcmc import MCMC
+
+# Load the mplstyle
+plt.style.use(os.path.join(os.path.split(__file__)[0],
+                           '../', 'bayesvalidrox.mplstyle'))
+
+
+class BayesInference:
+    """
+    A class to perform Bayesian Analysis.
+
+
+    Attributes
+    ----------
+    MetaModel : obj
+        Meta model object.
+    discrepancy : obj
+        The discrepancy object for the sigma2s, i.e. the diagonal entries
+        of the variance matrix for a multivariate normal likelihood.
+    name : str, optional
+        The type of analysis, either calibration (`Calib`) or validation
+        (`Valid`). The default is `'Calib'`.
+    emulator : bool, optional
+        Analysis with emulator (MetaModel). The default is `True`.
+    bootstrap : bool, optional
+        Bootstrap the analysis. The default is `False`.
+    req_outputs : list, optional
+        The list of requested output to be used for the analysis.
+        The default is `None`. If None, all the defined outputs for the model
+        object is used.
+    selected_indices : dict, optional
+        A dictionary with the selected indices of each model output. The
+        default is `None`. If `None`, all measurement points are used in the
+        analysis.
+    samples : array of shape (n_samples, n_params), optional
+        The samples to be used in the analysis. The default is `None`. If
+        None the samples are drawn from the probablistic input parameter
+        object of the MetaModel object.
+    n_samples : int, optional
+        Number of samples to be used in the analysis. The default is `500000`.
+        If samples is not `None`, this argument will be assigned based on the
+        number of samples given.
+    measured_data : dict, optional
+        A dictionary containing the observation data. The default is `None`.
+        if `None`, the observation defined in the Model object of the
+        MetaModel is used.
+    inference_method : str, optional
+        A method for approximating the posterior distribution in the Bayesian
+        inference step. The default is `'rejection'`, which stands for
+        rejection sampling. A Markov Chain Monte Carlo sampler can be simply
+        selected by passing `'MCMC'`.
+    mcmc_params : dict, optional
+        A dictionary with args required for the Bayesian inference with
+        `MCMC`. The default is `None`.
+
+        Pass the mcmc_params like the following:
+
+            >>> mcmc_params:{
+                'init_samples': None,  # initial samples
+                'n_walkers': 100,  # number of walkers (chain)
+                'n_steps': 100000,  # number of maximum steps
+                'n_burn': 200,  # number of burn-in steps
+                'moves': None,  # Moves for the emcee sampler
+                'multiprocessing': False,  # multiprocessing
+                'verbose': False # verbosity
+                }
+        The items shown above are the default values. If any parmeter is
+        not defined, the default value will be assigned to it.
+    bayes_loocv : bool, optional
+        Bayesian Leave-one-out Cross Validation. The default is `False`. If
+        `True`, the LOOCV procedure is used to estimate the bayesian Model
+        Evidence (BME).
+    n_bootstrap_itrs : int, optional
+        Number of bootstrap iteration. The default is `1`. If bayes_loocv is
+        `True`, this is qualt to the total length of the observation data
+        set.
+    perturbed_data : array of shape (n_bootstrap_itrs, n_obs), optional
+        User defined perturbed data. The default is `[]`.
+    bootstrap_noise : float, optional
+        A noise level to perturb the data set. The default is `0.05`.
+    just_analysis : bool, optional
+        Justifiability analysis. The default is False.
+    valid_metrics : list, optional
+        List of the validation metrics. The following metrics are supported:
+
+        1. log_BME : logarithm of the Bayesian model evidence
+        2. KLD : Kullback-Leibler Divergence
+        3. inf_entropy: Information entropy
+        The default is `['log_BME']`.
+    plot_post_pred : bool, optional
+        Plot posterior predictive plots. The default is `True`.
+    plot_map_pred : bool, optional
+        Plot the model outputs vs the metamodel predictions for the maximum
+        a posteriori (defined as `max_a_posteriori`) parameter set. The
+        default is `False`.
+    max_a_posteriori : str, optional
+        Maximum a posteriori. `'mean'` and `'mode'` are available. The default
+        is `'mean'`.
+    corner_title_fmt : str, optional
+        Title format for the posterior distribution plot with python
+        package `corner`. The default is `'.2e'`.
+
+    """
+
+    def __init__(self, engine, MetaModel = None, discrepancy=None, emulator=True,
+                 name='Calib', bootstrap=False, req_outputs=None,
+                 selected_indices=None, samples=None, n_samples=100000,
+                 measured_data=None, inference_method='rejection',
+                 mcmc_params=None, bayes_loocv=False, n_bootstrap_itrs=1,
+                 perturbed_data=[], bootstrap_noise=0.05, just_analysis=False,
+                 valid_metrics=['BME'], plot_post_pred=True,
+                 plot_map_pred=False, max_a_posteriori='mean',
+                 corner_title_fmt='.2e'):
+
+        self.engine = engine
+        self.MetaModel = engine.MetaModel
+        self.Discrepancy = discrepancy
+        self.emulator = emulator
+        self.name = name
+        self.bootstrap = bootstrap
+        self.req_outputs = req_outputs
+        self.selected_indices = selected_indices
+        self.samples = samples
+        self.n_samples = n_samples
+        self.measured_data = measured_data
+        self.inference_method = inference_method
+        self.mcmc_params = mcmc_params
+        self.perturbed_data = perturbed_data
+        self.bayes_loocv = bayes_loocv
+        self.n_bootstrap_itrs = n_bootstrap_itrs
+        self.bootstrap_noise = bootstrap_noise
+        self.just_analysis = just_analysis
+        self.valid_metrics = valid_metrics
+        self.plot_post_pred = plot_post_pred
+        self.plot_map_pred = plot_map_pred
+        self.max_a_posteriori = max_a_posteriori
+        self.corner_title_fmt = corner_title_fmt
+
+    # -------------------------------------------------------------------------
+    def create_inference(self):
+        """
+        Starts the inference.
+
+        Returns
+        -------
+        BayesInference : obj
+            The Bayes inference object.
+
+        """
+
+        # Set some variables
+        MetaModel = self.MetaModel
+        Model = self.engine.Model
+        n_params = MetaModel.n_params
+        output_names = Model.Output.names
+        par_names = self.engine.ExpDesign.par_names
+
+        # If the prior is set by the user, take it.
+        if self.samples is None:
+            self.samples = self.engine.ExpDesign.generate_samples(
+                self.n_samples, 'random')
+        else:
+            try:
+                samples = self.samples.values
+            except AttributeError:
+                samples = self.samples
+
+            # Take care of an additional Sigma2s
+            self.samples = samples[:, :n_params]
+
+            # Update number of samples
+            self.n_samples = self.samples.shape[0]
+
+        # ---------- Preparation of observation data ----------
+        # Read observation data and perturb it if requested.
+        if self.measured_data is None:
+            self.measured_data = Model.read_observation(case=self.name)
+        # Convert measured_data to a data frame
+        if not isinstance(self.measured_data, pd.DataFrame):
+            self.measured_data = pd.DataFrame(self.measured_data)
+
+        # Extract the total number of measurement points
+        if self.name.lower() == 'calib':
+            self.n_tot_measurement = Model.n_obs
+        else:
+            self.n_tot_measurement = Model.n_obs_valid
+
+        # Find measurement error (if not given) for post predictive plot
+        if not hasattr(self, 'measurement_error'):
+            if isinstance(self.Discrepancy, dict):
+                Disc = self.Discrepancy['known']
+            else:
+                Disc = self.Discrepancy
+            if isinstance(Disc.parameters, dict):
+                self.measurement_error = {k: np.sqrt(Disc.parameters[k]) for k
+                                          in Disc.parameters.keys()}
+            else:
+                try:
+                    self.measurement_error = np.sqrt(Disc.parameters)
+                except TypeError:
+                    pass
+
+        # ---------- Preparation of variance for covariance matrix ----------
+        # Independent and identically distributed
+        total_sigma2 = dict()
+        opt_sigma_flag = isinstance(self.Discrepancy, dict)
+        opt_sigma = None
+        for key_idx, key in enumerate(output_names):
+
+            # Find opt_sigma
+            if opt_sigma_flag and opt_sigma is None:
+                # Option A: known error with unknown bias term
+                opt_sigma = 'A'
+                known_discrepancy = self.Discrepancy['known']
+                self.Discrepancy = self.Discrepancy['infer']
+                sigma2 = np.array(known_discrepancy.parameters[key])
+
+            elif opt_sigma == 'A' or self.Discrepancy.parameters is not None:
+                # Option B: The sigma2 is known (no bias term)
+                if opt_sigma == 'A':
+                    sigma2 = np.array(known_discrepancy.parameters[key])
+                else:
+                    opt_sigma = 'B'
+                    sigma2 = np.array(self.Discrepancy.parameters[key])
+
+            elif not isinstance(self.Discrepancy.InputDisc, str):
+                # Option C: The sigma2 is unknown (bias term including error)
+                opt_sigma = 'C'
+                self.Discrepancy.opt_sigma = opt_sigma
+                n_measurement = self.measured_data[key].values.shape
+                sigma2 = np.zeros((n_measurement[0]))
+
+            total_sigma2[key] = sigma2
+
+            self.Discrepancy.opt_sigma = opt_sigma
+            self.Discrepancy.total_sigma2 = total_sigma2
+
+        # If inferred sigma2s obtained from e.g. calibration are given
+        try:
+            self.sigma2s = self.Discrepancy.get_sample(self.n_samples)
+        except:
+            pass
+
+        # ---------------- Bootstrap & TOM --------------------
+        if self.bootstrap or self.bayes_loocv or self.just_analysis:
+            if len(self.perturbed_data) == 0:
+                # zero mean noise Adding some noise to the observation function
+                self.perturbed_data = self._perturb_data(
+                    self.measured_data, output_names
+                    )
+            else:
+                self.n_bootstrap_itrs = len(self.perturbed_data)
+
+            # -------- Model Discrepancy -----------
+            if hasattr(self, 'error_model') and self.error_model \
+               and self.name.lower() != 'calib':
+                # Select posterior mean as MAP
+                MAP_theta = self.samples.mean(axis=0).reshape((1, n_params))
+                # MAP_theta = stats.mode(self.samples,axis=0)[0]
+
+                # Evaluate the (meta-)model at the MAP
+                y_MAP, y_std_MAP = MetaModel.eval_metamodel(samples=MAP_theta)
+
+                # Train a GPR meta-model using MAP
+                self.error_MetaModel = MetaModel.create_model_error(
+                    self.bias_inputs, y_MAP, Name=self.name
+                    )
+
+            # -----------------------------------------------------
+            # ----- Loop over the perturbed observation data ------
+            # -----------------------------------------------------
+            # Initilize arrays
+            logLikelihoods = np.zeros((self.n_samples, self.n_bootstrap_itrs),
+                                      dtype=np.float16)
+            BME_Corr = np.zeros((self.n_bootstrap_itrs))
+            log_BME = np.zeros((self.n_bootstrap_itrs))
+            KLD = np.zeros((self.n_bootstrap_itrs))
+            inf_entropy = np.zeros((self.n_bootstrap_itrs))
+
+            # Compute the prior predtions
+            # Evaluate the MetaModel
+            if self.emulator:
+                y_hat, y_std = MetaModel.eval_metamodel(samples=self.samples)
+                self.__mean_pce_prior_pred = y_hat
+                self._std_pce_prior_pred = y_std
+
+                # Correct the predictions with Model discrepancy
+                if hasattr(self, 'error_model') and self.error_model:
+                    y_hat_corr, y_std = self.error_MetaModel.eval_model_error(
+                        self.bias_inputs, self.__mean_pce_prior_pred
+                        )
+                    self.__mean_pce_prior_pred = y_hat_corr
+                    self._std_pce_prior_pred = y_std
+
+                # Surrogate model's error using RMSE of test data
+                if hasattr(MetaModel, 'rmse'):
+                    surrError = MetaModel.rmse
+                else:
+                    surrError = None
+
+            else:
+                # Evaluate the original model
+                self.__model_prior_pred = self._eval_model(
+                    samples=self.samples, key='PriorPred'
+                    )
+                surrError = None
+
+            # Start the likelihood-BME computations for the perturbed data
+            for itr_idx, data in tqdm(
+                    enumerate(self.perturbed_data),
+                    total=self.n_bootstrap_itrs,
+                    desc="Bootstrapping the BME calculations", ascii=True
+                    ):
+
+                # ---------------- Likelihood calculation ----------------
+                if self.emulator:
+                    model_evals = self.__mean_pce_prior_pred
+                else:
+                    model_evals = self.__model_prior_pred
+
+                # Leave one out
+                if self.bayes_loocv or self.just_analysis:
+                    self.selected_indices = np.nonzero(data)[0]
+
+                # Prepare data dataframe
+                nobs = list(self.measured_data.count().values[1:])
+                numbers = list(np.cumsum(nobs))
+                indices = list(zip([0] + numbers, numbers))
+                data_dict = {
+                    output_names[i]: data[j:k] for i, (j, k) in
+                    enumerate(indices)
+                    }
+                #print(output_names)
+                #print(indices)
+                #print(numbers)
+                #print(nobs)
+                #print(self.measured_data)
+                #for i, (j, k) in enumerate(indices):
+                #    print(i,j,k)
+                #print(data)
+                #print(data_dict)
+                #stop
+
+                # Unknown sigma2
+                if opt_sigma == 'C' or hasattr(self, 'sigma2s'):
+                    logLikelihoods[:, itr_idx] = self.normpdf(
+                        model_evals, data_dict, total_sigma2,
+                        sigma2=self.sigma2s, std=surrError
+                        )
+                else:
+                    # known sigma2
+                    logLikelihoods[:, itr_idx] = self.normpdf(
+                        model_evals, data_dict, total_sigma2,
+                        std=surrError
+                        )
+
+                # ---------------- BME Calculations ----------------
+                # BME (log)
+                log_BME[itr_idx] = np.log(
+                    np.nanmean(np.exp(logLikelihoods[:, itr_idx],
+                                      dtype=np.longdouble))#float128))
+                    )
+
+                # BME correction when using Emulator
+                if self.emulator:
+                    BME_Corr[itr_idx] = self.__corr_factor_BME(
+                        data_dict, total_sigma2, log_BME[itr_idx]
+                        )
+
+                # Rejection Step
+                if 'kld' in list(map(str.lower, self.valid_metrics)) and\
+                   'inf_entropy' in list(map(str.lower, self.valid_metrics)):
+                    # Random numbers between 0 and 1
+                    unif = np.random.rand(1, self.n_samples)[0]
+
+                    # Reject the poorly performed prior
+                    Likelihoods = np.exp(logLikelihoods[:, itr_idx],
+                                         dtype=np.float64)
+                    accepted = (Likelihoods/np.max(Likelihoods)) >= unif
+                    posterior = self.samples[accepted]
+
+                    # Posterior-based expectation of likelihoods
+                    postExpLikelihoods = np.mean(
+                        logLikelihoods[:, itr_idx][accepted]
+                        )
+
+                    # Calculate Kullback-Leibler Divergence
+                    KLD[itr_idx] = postExpLikelihoods - log_BME[itr_idx]
+
+                # Posterior-based expectation of prior densities
+                if 'inf_entropy' in list(map(str.lower, self.valid_metrics)):
+                    n_thread = int(0.875 * multiprocessing.cpu_count())
+                    with multiprocessing.Pool(n_thread) as p:
+                        postExpPrior = np.mean(np.concatenate(
+                            p.map(
+                                self.engine.ExpDesign.JDist.pdf,
+                                np.array_split(posterior.T, n_thread, axis=1))
+                            )
+                            )
+                    # Information Entropy based on Entropy paper Eq. 38
+                    inf_entropy[itr_idx] = log_BME[itr_idx] - postExpPrior - \
+                        postExpLikelihoods
+
+                # Clear memory
+                gc.collect(generation=2)
+
+            # ---------- Store metrics for perturbed data set ----------------
+            # Likelihoods (Size: n_samples, n_bootstrap_itr)
+            self.log_likes = logLikelihoods
+
+            # BME (log), KLD, infEntropy (Size: 1,n_bootstrap_itr)
+            self.log_BME = log_BME
+
+            # BMECorrFactor (log) (Size: 1,n_bootstrap_itr)
+            if self.emulator:
+                self.log_BME_corr_factor = BME_Corr
+
+            if 'kld' in list(map(str.lower, self.valid_metrics)):
+                self.KLD = KLD
+            if 'inf_entropy' in list(map(str.lower, self.valid_metrics)):
+                self.inf_entropy = inf_entropy
+
+            # BME = BME + BMECorrFactor
+            if self.emulator:
+                self.log_BME += self.log_BME_corr_factor
+
+        # ---------------- Parameter Bayesian inference ----------------
+        if self.inference_method.lower() == 'mcmc':
+            # Instantiate the MCMC object
+            MCMC_Obj = MCMC(self)
+            self.posterior_df = MCMC_Obj.run_sampler(
+                self.measured_data, total_sigma2
+                )
+
+        elif self.name.lower() == 'valid':
+            # Convert to a dataframe if samples are provided after calibration.
+            self.posterior_df = pd.DataFrame(self.samples, columns=par_names)
+
+        else:
+            # Rejection sampling
+            self.posterior_df = self._rejection_sampling()
+
+        # Provide posterior's summary
+        print('\n')
+        print('-'*15 + 'Posterior summary' + '-'*15)
+        pd.options.display.max_columns = None
+        pd.options.display.max_rows = None
+        print(self.posterior_df.describe())
+        print('-'*50)
+
+        # -------- Model Discrepancy -----------
+        if hasattr(self, 'error_model') and self.error_model \
+           and self.name.lower() == 'calib':
+            if self.inference_method.lower() == 'mcmc':
+                self.error_MetaModel = MCMC_Obj.error_MetaModel
+            else:
+                # Select posterior mean as MAP
+                if opt_sigma == "B":
+                    posterior_df = self.posterior_df.values
+                else:
+                    posterior_df = self.posterior_df.values[:, :-Model.n_outputs]
+
+                # Select posterior mean as Maximum a posteriori
+                map_theta = posterior_df.mean(axis=0).reshape((1, n_params))
+                # map_theta = stats.mode(Posterior_df,axis=0)[0]
+
+                # Evaluate the (meta-)model at the MAP
+                y_MAP, y_std_MAP = MetaModel.eval_metamodel(samples=map_theta)
+
+                # Train a GPR meta-model using MAP
+                self.error_MetaModel = MetaModel.create_model_error(
+                    self.bias_inputs, y_MAP, Name=self.name
+                    )
+
+        # -------- Posterior perdictive -----------
+        self._posterior_predictive()
+
+        # -----------------------------------------------------
+        # ------------------ Visualization --------------------
+        # -----------------------------------------------------
+        # Create Output directory, if it doesn't exist already.
+        out_dir = f'Outputs_Bayes_{Model.name}_{self.name}'
+        os.makedirs(out_dir, exist_ok=True)
+
+        # -------- Posteior parameters --------
+        if opt_sigma != "B":
+            par_names.extend(
+                [self.Discrepancy.InputDisc.Marginals[i].name for i
+                 in range(len(self.Discrepancy.InputDisc.Marginals))]
+                )
+        # Pot with corner
+        figPosterior = corner.corner(self.posterior_df.to_numpy(),
+                                     labels=par_names,
+                                     quantiles=[0.15, 0.5, 0.85],
+                                     show_titles=True,
+                                     title_fmt=self.corner_title_fmt,
+                                     labelpad=0.2,
+                                     use_math_text=True,
+                                     title_kwargs={"fontsize": 28},
+                                     plot_datapoints=False,
+                                     plot_density=False,
+                                     fill_contours=True,
+                                     smooth=0.5,
+                                     smooth1d=0.5)
+
+        # Loop over axes and set x limits
+        if opt_sigma == "B":
+            axes = np.array(figPosterior.axes).reshape(
+                (len(par_names), len(par_names))
+                )
+            for yi in range(len(par_names)):
+                ax = axes[yi, yi]
+                ax.set_xlim(self.engine.ExpDesign.bound_tuples[yi])
+                for xi in range(yi):
+                    ax = axes[yi, xi]
+                    ax.set_xlim(self.engine.ExpDesign.bound_tuples[xi])
+        plt.close()
+
+        # Turn off gridlines
+        for ax in figPosterior.axes:
+            ax.grid(False)
+
+        if self.emulator:
+            plotname = f'/Posterior_Dist_{Model.name}_emulator'
+        else:
+            plotname = f'/Posterior_Dist_{Model.name}'
+
+        figPosterior.set_size_inches((24, 16))
+        figPosterior.savefig(f'./{out_dir}{plotname}.pdf',
+                             bbox_inches='tight')
+
+        # -------- Plot MAP --------
+        if self.plot_map_pred:
+            self._plot_max_a_posteriori()
+
+        # -------- Plot log_BME dist --------
+        if self.bootstrap:
+
+            # Computing the TOM performance
+            self.log_BME_tom = stats.chi2.rvs(
+                self.n_tot_measurement, size=self.log_BME.shape[0]
+                )
+
+            fig, ax = plt.subplots()
+            sns.kdeplot(self.log_BME_tom, ax=ax, color="green", shade=True)
+            sns.kdeplot(
+                self.log_BME, ax=ax, color="blue", shade=True,
+                label='Model BME')
+
+            ax.set_xlabel('log$_{10}$(BME)')
+            ax.set_ylabel('Probability density')
+
+            legend_elements = [
+                Patch(facecolor='green', edgecolor='green', label='TOM BME'),
+                Patch(facecolor='blue', edgecolor='blue', label='Model BME')
+                ]
+            ax.legend(handles=legend_elements)
+
+            if self.emulator:
+                plotname = f'/BME_hist_{Model.name}_emulator'
+            else:
+                plotname = f'/BME_hist_{Model.name}'
+
+            plt.savefig(f'./{out_dir}{plotname}.pdf', bbox_inches='tight')
+            plt.show()
+            plt.close()
+
+        # -------- Posteior perdictives --------
+        if self.plot_post_pred:
+            # Plot the posterior predictive
+            self._plot_post_predictive()
+
+        return self
+
+    # -------------------------------------------------------------------------
+    def _perturb_data(self, data, output_names):
+        """
+        Returns an array with n_bootstrap_itrs rowsof perturbed data.
+        The first row includes the original observation data.
+        If `self.bayes_loocv` is True, a 2d-array will be returned with
+        repeated rows and zero diagonal entries.
+
+        Parameters
+        ----------
+        data : pandas DataFrame
+            Observation data.
+        output_names : list
+            List of the output names.
+
+        Returns
+        -------
+        final_data : array
+            Perturbed data set.
+
+        """
+        noise_level = self.bootstrap_noise
+        obs_data = data[output_names].values
+        n_measurement, n_outs = obs_data.shape
+        self.n_tot_measurement = obs_data[~np.isnan(obs_data)].shape[0]
+        # Number of bootstrap iterations
+        if self.bayes_loocv:
+            self.n_bootstrap_itrs = self.n_tot_measurement
+
+        # Pass loocv dataset
+        if self.bayes_loocv:
+            obs = obs_data.T[~np.isnan(obs_data.T)]
+            final_data = np.repeat(np.atleast_2d(obs), self.n_bootstrap_itrs,
+                                   axis=0)
+            np.fill_diagonal(final_data, 0)
+            return final_data
+
+        else:
+            final_data = np.zeros(
+                (self.n_bootstrap_itrs, self.n_tot_measurement)
+                )
+            final_data[0] = obs_data.T[~np.isnan(obs_data.T)]
+            for itrIdx in range(1, self.n_bootstrap_itrs):
+                data = np.zeros((n_measurement, n_outs))
+                for idx in range(len(output_names)):
+                    std = np.nanstd(obs_data[:, idx])
+                    if std == 0:
+                        std = 0.001
+                    noise = std * noise_level
+                    data[:, idx] = np.add(
+                        obs_data[:, idx],
+                        np.random.normal(0, 1, obs_data.shape[0]) * noise,
+                    )
+
+                final_data[itrIdx] = data.T[~np.isnan(data.T)]
+
+            return final_data
+
+    # -------------------------------------------------------------------------
+    def _logpdf(self, x, mean, cov):
+        """
+        computes the likelihood based on a multivariate normal distribution.
+
+        Parameters
+        ----------
+        x : TYPE
+            DESCRIPTION.
+        mean : array_like
+            Observation data.
+        cov : 2d array
+            Covariance matrix of the distribution.
+
+        Returns
+        -------
+        log_lik : float
+            Log likelihood.
+
+        """
+        n = len(mean)
+        L = spla.cholesky(cov, lower=True)
+        beta = np.sum(np.log(np.diag(L)))
+        dev = x - mean
+        alpha = dev.dot(spla.cho_solve((L, True), dev))
+        log_lik = -0.5 * alpha - beta - n / 2. * np.log(2 * np.pi)
+        return log_lik
+
+    # -------------------------------------------------------------------------
+    def _eval_model(self, samples=None, key='MAP'):
+        """
+        Evaluates Forward Model.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params), optional
+            Parameter sets. The default is None.
+        key : str, optional
+            Key string to be passed to the run_model_parallel method.
+            The default is 'MAP'.
+
+        Returns
+        -------
+        model_outputs : dict
+            Model outputs.
+
+        """
+        MetaModel = self.MetaModel
+        Model = self.engine.Model
+
+        if samples is None:
+            self.samples = self.engine.ExpDesign.generate_samples(
+                self.n_samples, 'random')
+        else:
+            self.samples = samples
+            self.n_samples = len(samples)
+
+        model_outputs, _ = Model.run_model_parallel(
+            self.samples, key_str=key+self.name)
+
+        # Clean up
+        # Zip the subdirectories
+        try:
+            dir_name = f'{Model.name}MAP{self.name}'
+            key = dir_name + '_'
+            Model.zip_subdirs(dir_name, key)
+        except:
+            pass
+
+        return model_outputs
+
+    # -------------------------------------------------------------------------
+    def _kernel_rbf(self, X, hyperparameters):
+        """
+        Isotropic squared exponential kernel.
+
+        Higher l values lead to smoother functions and therefore to coarser
+        approximations of the training data. Lower l values make functions
+        more wiggly with wide uncertainty regions between training data points.
+
+        sigma_f controls the marginal variance of b(x)
+
+        Parameters
+        ----------
+        X : ndarray of shape (n_samples_X, n_features)
+
+        hyperparameters : Dict
+            Lambda characteristic length
+            sigma_f controls the marginal variance of b(x)
+            sigma_0 unresolvable error nugget term, interpreted as random
+                    error that cannot be attributed to measurement error.
+        Returns
+        -------
+        var_cov_matrix : ndarray of shape (n_samples_X,n_samples_X)
+            Kernel k(X, X).
+
+        """
+        from sklearn.gaussian_process.kernels import RBF
+        min_max_scaler = preprocessing.MinMaxScaler()
+        X_minmax = min_max_scaler.fit_transform(X)
+
+        nparams = len(hyperparameters)
+        # characteristic length (0,1]
+        Lambda = hyperparameters[0]
+        # sigma_f controls the marginal variance of b(x)
+        sigma2_f = hyperparameters[1]
+
+        # cov_matrix = sigma2_f*rbf_kernel(X_minmax, gamma = 1/Lambda**2)
+
+        rbf = RBF(length_scale=Lambda)
+        cov_matrix = sigma2_f * rbf(X_minmax)
+        if nparams > 2:
+            # (unresolvable error) nugget term that is interpreted as random
+            # error that cannot be attributed to measurement error.
+            sigma2_0 = hyperparameters[2:]
+            for i, j in np.ndindex(cov_matrix.shape):
+                cov_matrix[i, j] += np.sum(sigma2_0) if i == j else 0
+
+        return cov_matrix
+
+    # -------------------------------------------------------------------------
+    def normpdf(self, outputs, obs_data, total_sigma2s, sigma2=None, std=None):
+        """
+        Calculates the likelihood of simulation outputs compared with
+        observation data.
+
+        Parameters
+        ----------
+        outputs : dict
+            A dictionary containing the simulation outputs as array of shape
+            (n_samples, n_measurement) for each model output.
+        obs_data : dict
+            A dictionary/dataframe containing the observation data.
+        total_sigma2s : dict
+            A dictionary with known values of the covariance diagonal entries,
+            a.k.a sigma^2.
+        sigma2 : array, optional
+            An array of the sigma^2 samples, when the covariance diagonal
+            entries are unknown and are being jointly inferred. The default is
+            None.
+        std : dict, optional
+            A dictionary containing the root mean squared error as array of
+            shape (n_samples, n_measurement) for each model output. The default
+            is None.
+
+        Returns
+        -------
+        logLik : array of shape (n_samples)
+            Likelihoods.
+
+        """
+        Model = self.engine.Model
+        logLik = 0.0
+
+        # Extract the requested model outputs for likelihood calulation
+        if self.req_outputs is None:
+            req_outputs = Model.Output.names
+        else:
+            req_outputs = list(self.req_outputs)
+
+        # Loop over the outputs
+        for idx, out in enumerate(req_outputs):
+
+            # (Meta)Model Output
+            nsamples, nout = outputs[out].shape
+
+            # Prepare data and remove NaN
+            try:
+                data = obs_data[out].values[~np.isnan(obs_data[out])]
+            except AttributeError:
+                data = obs_data[out][~np.isnan(obs_data[out])]
+
+            # Prepare sigma2s
+            non_nan_indices = ~np.isnan(total_sigma2s[out])
+            tot_sigma2s = total_sigma2s[out][non_nan_indices][:nout]
+
+            # Add the std of the PCE is chosen as emulator.
+            if self.emulator:
+                if std is not None:
+                    tot_sigma2s += std[out]**2
+
+            # Covariance Matrix
+            covMatrix = np.diag(tot_sigma2s)
+
+            # Select the data points to compare
+            try:
+                indices = self.selected_indices[out]
+            except:
+                indices = list(range(nout))
+            covMatrix = np.diag(covMatrix[indices, indices])
+
+            # If sigma2 is not given, use given total_sigma2s
+            if sigma2 is None:
+                logLik += stats.multivariate_normal.logpdf(
+                    outputs[out][:, indices], data[indices], covMatrix)
+                continue
+
+            # Loop over each run/sample and calculate logLikelihood
+            logliks = np.zeros(nsamples)
+            for s_idx in range(nsamples):
+
+                # Simulation run
+                tot_outputs = outputs[out]
+
+                # Covariance Matrix
+                covMatrix = np.diag(tot_sigma2s)
+
+                if sigma2 is not None:
+                    # Check the type error term
+                    if hasattr(self, 'bias_inputs') and \
+                       not hasattr(self, 'error_model'):
+                        # Infer a Bias model usig Gaussian Process Regression
+                        bias_inputs = np.hstack(
+                            (self.bias_inputs[out],
+                             tot_outputs[s_idx].reshape(-1, 1)))
+
+                        params = sigma2[s_idx, idx*3:(idx+1)*3]
+                        covMatrix = self._kernel_rbf(bias_inputs, params)
+                    else:
+                        # Infer equal sigma2s
+                        try:
+                            sigma_2 = sigma2[s_idx, idx]
+                        except TypeError:
+                            sigma_2 = 0.0
+
+                        covMatrix += sigma_2 * np.eye(nout)
+                        # covMatrix = np.diag(sigma2 * total_sigma2s)
+
+                # Select the data points to compare
+                try:
+                    indices = self.selected_indices[out]
+                except:
+                    indices = list(range(nout))
+                covMatrix = np.diag(covMatrix[indices, indices])
+
+                # Compute loglikelihood
+                logliks[s_idx] = self._logpdf(
+                    tot_outputs[s_idx, indices], data[indices], covMatrix
+                    )
+
+            logLik += logliks
+        return logLik
+
+    # -------------------------------------------------------------------------
+    def _corr_factor_BME_old(self, Data, total_sigma2s, posterior):
+        """
+        Calculates the correction factor for BMEs.
+        """
+        MetaModel = self.MetaModel
+        OrigModelOutput = self.engine.ExpDesign.Y
+        Model = self.engine.Model
+
+        # Posterior with guassian-likelihood
+        postDist = stats.gaussian_kde(posterior.T)
+
+        # Remove NaN
+        Data = Data[~np.isnan(Data)]
+        total_sigma2s = total_sigma2s[~np.isnan(total_sigma2s)]
+
+        # Covariance Matrix
+        covMatrix = np.diag(total_sigma2s[:self.n_tot_measurement])
+
+        # Extract the requested model outputs for likelihood calulation
+        if self.req_outputs is None:
+            OutputType = Model.Output.names
+        else:
+            OutputType = list(self.req_outputs)
+
+        # SampleSize = OrigModelOutput[OutputType[0]].shape[0]
+
+
+        # Flatten the OutputType for OrigModel
+        TotalOutputs = np.concatenate([OrigModelOutput[x] for x in OutputType], 1)
+
+        NrofBayesSamples = self.n_samples
+        # Evaluate MetaModel on the experimental design
+        Samples = self.engine.ExpDesign.X
+        OutputRS, stdOutputRS = MetaModel.eval_metamodel(samples=Samples)
+
+        # Reset the NrofSamples to NrofBayesSamples
+        self.n_samples = NrofBayesSamples
+
+        # Flatten the OutputType for MetaModel
+        TotalPCEOutputs = np.concatenate([OutputRS[x] for x in OutputRS], 1)
+        TotalPCEstdOutputRS= np.concatenate([stdOutputRS[x] for x in stdOutputRS], 1)
+
+        logweight = 0
+        for i, sample in enumerate(Samples):
+            # Compute likelilhood output vs RS
+            covMatrix = np.diag(TotalPCEstdOutputRS[i]**2)
+            logLik = self._logpdf(TotalOutputs[i], TotalPCEOutputs[i], covMatrix)
+            # Compute posterior likelihood of the collocation points
+            logpostLik = np.log(postDist.pdf(sample[:, None]))[0]
+            if logpostLik != -np.inf:
+                logweight += logLik + logpostLik
+        return logweight
+
+    # -------------------------------------------------------------------------
+    def __corr_factor_BME(self, obs_data, total_sigma2s, logBME):
+        """
+        Calculates the correction factor for BMEs.
+        """
+        MetaModel = self.MetaModel
+        samples = self.engine.ExpDesign.X
+        model_outputs = self.engine.ExpDesign.Y
+        Model = self.engine.Model
+        n_samples = samples.shape[0]
+
+        # Extract the requested model outputs for likelihood calulation
+        output_names = Model.Output.names
+
+        # Evaluate MetaModel on the experimental design and ValidSet
+        OutputRS, stdOutputRS = MetaModel.eval_metamodel(samples=samples)
+
+        logLik_data = np.zeros((n_samples))
+        logLik_model = np.zeros((n_samples))
+        # Loop over the outputs
+        for idx, out in enumerate(output_names):
+
+            # (Meta)Model Output
+            nsamples, nout = model_outputs[out].shape
+
+            # Prepare data and remove NaN
+            try:
+                data = obs_data[out].values[~np.isnan(obs_data[out])]
+            except AttributeError:
+                data = obs_data[out][~np.isnan(obs_data[out])]
+
+            # Prepare sigma2s
+            non_nan_indices = ~np.isnan(total_sigma2s[out])
+            tot_sigma2s = total_sigma2s[out][non_nan_indices][:nout]
+
+            # Covariance Matrix
+            covMatrix_data = np.diag(tot_sigma2s)
+
+            for i, sample in enumerate(samples):
+
+                # Simulation run
+                y_m = model_outputs[out][i]
+
+                # Surrogate prediction
+                y_m_hat = OutputRS[out][i]
+
+                # CovMatrix with the surrogate error
+                covMatrix = np.eye(len(y_m)) * 1/(2*np.pi)
+
+                # Select the data points to compare
+                try:
+                    indices = self.selected_indices[out]
+                except:
+                    indices = list(range(nout))
+                covMatrix = np.diag(covMatrix[indices, indices])
+                covMatrix_data = np.diag(covMatrix_data[indices, indices])
+
+                # Compute likelilhood output vs data
+                logLik_data[i] += self._logpdf(
+                    y_m_hat[indices], data[indices],
+                    covMatrix_data
+                    )
+
+                # Compute likelilhood output vs surrogate
+                logLik_model[i] += self._logpdf(
+                    y_m_hat[indices], y_m[indices],
+                    covMatrix
+                    )
+
+        # Weight
+        logLik_data -= logBME
+        weights = np.mean(np.exp(logLik_model+logLik_data))
+
+        return np.log(weights)
+
+    # -------------------------------------------------------------------------
+    def _rejection_sampling(self):
+        """
+        Performs rejection sampling to update the prior distribution on the
+        input parameters.
+
+        Returns
+        -------
+        posterior : pandas.dataframe
+            Posterior samples of the input parameters.
+
+        """
+
+        MetaModel = self.MetaModel
+        try:
+            sigma2_prior = self.Discrepancy.sigma2_prior
+        except:
+            sigma2_prior = None
+
+        # Check if the discrepancy is defined as a distribution:
+        samples = self.samples
+
+        if sigma2_prior is not None:
+            samples = np.hstack((samples, sigma2_prior))
+
+        # Take the first column of Likelihoods (Observation data without noise)
+        if self.just_analysis or self.bayes_loocv:
+            index = self.n_tot_measurement-1
+            likelihoods = np.exp(self.log_likes[:, index], dtype=np.longdouble)#np.float128)
+        else:
+            likelihoods = np.exp(self.log_likes[:, 0], dtype=np.longdouble)#np.float128)
+
+        n_samples = len(likelihoods)
+        norm_ikelihoods = likelihoods / np.max(likelihoods)
+
+        # Normalize based on min if all Likelihoods are zero
+        if all(likelihoods == 0.0):
+            likelihoods = self.log_likes[:, 0]
+            norm_ikelihoods = likelihoods / np.min(likelihoods)
+
+        # Random numbers between 0 and 1
+        unif = np.random.rand(1, n_samples)[0]
+
+        # Reject the poorly performed prior
+        accepted_samples = samples[norm_ikelihoods >= unif]
+
+        # Output the Posterior
+        par_names = self.engine.ExpDesign.par_names
+        if sigma2_prior is not None:
+            for name in self.Discrepancy.name:
+                par_names.append(name)
+
+        return pd.DataFrame(accepted_samples, columns=sigma2_prior)
+
+    # -------------------------------------------------------------------------
+    def _posterior_predictive(self):
+        """
+        Stores the prior- and posterior predictive samples, i.e. model
+        evaluations using the samples, into hdf5 files.
+
+        priorPredictive.hdf5 : Prior predictive samples.
+        postPredictive_wo_noise.hdf5 : Posterior predictive samples without
+        the additive noise.
+        postPredictive.hdf5 : Posterior predictive samples with the additive
+        noise.
+
+        Returns
+        -------
+        None.
+
+        """
+
+        MetaModel = self.MetaModel
+        Model = self.engine.Model
+
+        # Make a directory to save the prior/posterior predictive
+        out_dir = f'Outputs_Bayes_{Model.name}_{self.name}'
+        os.makedirs(out_dir, exist_ok=True)
+
+        # Read observation data and perturb it if requested
+        if self.measured_data is None:
+            self.measured_data = Model.read_observation(case=self.name)
+
+        if not isinstance(self.measured_data, pd.DataFrame):
+            self.measured_data = pd.DataFrame(self.measured_data)
+
+        # X_values
+        x_values = self.engine.ExpDesign.x_values
+
+        try:
+            sigma2_prior = self.Discrepancy.sigma2_prior
+        except:
+            sigma2_prior = None
+
+        # Extract posterior samples
+        posterior_df = self.posterior_df
+
+        # Take care of the sigma2
+        if sigma2_prior is not None:
+            try:
+                sigma2s = posterior_df[self.Discrepancy.name].values
+                posterior_df = posterior_df.drop(
+                    labels=self.Discrepancy.name, axis=1
+                    )
+            except:
+                sigma2s = self.sigma2s
+
+        # Posterior predictive
+        if self.emulator:
+            if self.inference_method == 'rejection':
+                prior_pred = self.__mean_pce_prior_pred
+            if self.name.lower() != 'calib':
+                post_pred = self.__mean_pce_prior_pred
+                post_pred_std = self._std_pce_prior_pred
+            else:
+                post_pred, post_pred_std = MetaModel.eval_metamodel(
+                    samples=posterior_df.values
+                    )
+
+        else:
+            if self.inference_method == 'rejection':
+                prior_pred = self.__model_prior_pred
+            if self.name.lower() != 'calib':
+                post_pred = self.__mean_pce_prior_pred,
+                post_pred_std = self._std_pce_prior_pred
+            else:
+                post_pred = self._eval_model(
+                    samples=posterior_df.values, key='PostPred'
+                    )
+        # Correct the predictions with Model discrepancy
+        if hasattr(self, 'error_model') and self.error_model:
+            y_hat, y_std = self.error_MetaModel.eval_model_error(
+                self.bias_inputs, post_pred
+                )
+            post_pred, post_pred_std = y_hat, y_std
+
+        # Add discrepancy from likelihood Sample to the current posterior runs
+        total_sigma2 = self.Discrepancy.total_sigma2
+        post_pred_withnoise = copy.deepcopy(post_pred)
+        for varIdx, var in enumerate(Model.Output.names):
+            for i in range(len(post_pred[var])):
+                pred = post_pred[var][i]
+
+                # Known sigma2s
+                clean_sigma2 = total_sigma2[var][~np.isnan(total_sigma2[var])]
+                tot_sigma2 = clean_sigma2[:len(pred)]
+                cov = np.diag(tot_sigma2)
+
+                # Check the type error term
+                if sigma2_prior is not None:
+                    # Inferred sigma2s
+                    if hasattr(self, 'bias_inputs') and \
+                       not hasattr(self, 'error_model'):
+                        # TODO: Infer a Bias model usig GPR
+                        bias_inputs = np.hstack((
+                            self.bias_inputs[var], pred.reshape(-1, 1)))
+                        params = sigma2s[i, varIdx*3:(varIdx+1)*3]
+                        cov = self._kernel_rbf(bias_inputs, params)
+                    else:
+                        # Infer equal sigma2s
+                        try:
+                            sigma2 = sigma2s[i, varIdx]
+                        except TypeError:
+                            sigma2 = 0.0
+
+                        # Convert biasSigma2s to a covMatrix
+                        cov += sigma2 * np.eye(len(pred))
+
+                if self.emulator:
+                    if hasattr(MetaModel, 'rmse') and \
+                       MetaModel.rmse is not None:
+                        stdPCE = MetaModel.rmse[var]
+                    else:
+                        stdPCE = post_pred_std[var][i]
+                    # Expected value of variance (Assump: i.i.d stds)
+                    cov += np.diag(stdPCE**2)
+
+                # Sample a multivariate normal distribution with mean of
+                # prediction and variance of cov
+                post_pred_withnoise[var][i] = np.random.multivariate_normal(
+                    pred, cov, 1
+                    )
+
+        # ----- Prior Predictive -----
+        if self.inference_method.lower() == 'rejection':
+            # Create hdf5 metadata
+            hdf5file = f'{out_dir}/priorPredictive.hdf5'
+            hdf5_exist = os.path.exists(hdf5file)
+            if hdf5_exist:
+                os.remove(hdf5file)
+            file = h5py.File(hdf5file, 'a')
+
+            # Store x_values
+            if type(x_values) is dict:
+                grp_x_values = file.create_group("x_values/")
+                for varIdx, var in enumerate(Model.Output.names):
+                    grp_x_values.create_dataset(var, data=x_values[var])
+            else:
+                file.create_dataset("x_values", data=x_values)
+
+            # Store posterior predictive
+            grpY = file.create_group("EDY/")
+            for varIdx, var in enumerate(Model.Output.names):
+                grpY.create_dataset(var, data=prior_pred[var])
+
+        # ----- Posterior Predictive only model evaluations -----
+        # Create hdf5 metadata
+        hdf5file = out_dir+'/postPredictive_wo_noise.hdf5'
+        hdf5_exist = os.path.exists(hdf5file)
+        if hdf5_exist:
+            os.remove(hdf5file)
+        file = h5py.File(hdf5file, 'a')
+
+        # Store x_values
+        if type(x_values) is dict:
+            grp_x_values = file.create_group("x_values/")
+            for varIdx, var in enumerate(Model.Output.names):
+                grp_x_values.create_dataset(var, data=x_values[var])
+        else:
+            file.create_dataset("x_values", data=x_values)
+
+        # Store posterior predictive
+        grpY = file.create_group("EDY/")
+        for varIdx, var in enumerate(Model.Output.names):
+            grpY.create_dataset(var, data=post_pred[var])
+
+        # ----- Posterior Predictive with noise -----
+        # Create hdf5 metadata
+        hdf5file = out_dir+'/postPredictive.hdf5'
+        hdf5_exist = os.path.exists(hdf5file)
+        if hdf5_exist:
+            os.remove(hdf5file)
+        file = h5py.File(hdf5file, 'a')
+
+        # Store x_values
+        if type(x_values) is dict:
+            grp_x_values = file.create_group("x_values/")
+            for varIdx, var in enumerate(Model.Output.names):
+                grp_x_values.create_dataset(var, data=x_values[var])
+        else:
+            file.create_dataset("x_values", data=x_values)
+
+        # Store posterior predictive
+        grpY = file.create_group("EDY/")
+        for varIdx, var in enumerate(Model.Output.names):
+            grpY.create_dataset(var, data=post_pred_withnoise[var])
+
+        return
+
+    # -------------------------------------------------------------------------
+    def _plot_max_a_posteriori(self):
+        """
+        Plots the response of the model output against that of the metamodel at
+        the maximum a posteriori sample (mean or mode of posterior.)
+
+        Returns
+        -------
+        None.
+
+        """
+
+        MetaModel = self.MetaModel
+        Model = self.engine.Model
+        out_dir = f'Outputs_Bayes_{Model.name}_{self.name}'
+        opt_sigma = self.Discrepancy.opt_sigma
+
+        # -------- Find MAP and run MetaModel and origModel --------
+        # Compute the MAP
+        if self.max_a_posteriori.lower() == 'mean':
+            if opt_sigma == "B":
+                Posterior_df = self.posterior_df.values
+            else:
+                Posterior_df = self.posterior_df.values[:, :-Model.n_outputs]
+            map_theta = Posterior_df.mean(axis=0).reshape(
+                (1, MetaModel.n_params))
+        else:
+            map_theta = stats.mode(Posterior_df.values, axis=0)[0]
+        # Prin report
+        print("\nPoint estimator:\n", map_theta[0])
+
+        # Run the models for MAP
+        # MetaModel
+        map_metamodel_mean, map_metamodel_std = MetaModel.eval_metamodel(
+            samples=map_theta)
+        self.map_metamodel_mean = map_metamodel_mean
+        self.map_metamodel_std = map_metamodel_std
+
+        # origModel
+        map_orig_model = self._eval_model(samples=map_theta)
+        self.map_orig_model = map_orig_model
+
+        # Extract slicing index
+        x_values = map_orig_model['x_values']
+
+        # List of markers and colors
+        Color = ['k', 'b', 'g', 'r']
+        Marker = 'x'
+
+        # Create a PdfPages object
+        pdf = PdfPages(f'./{out_dir}MAP_PCE_vs_Model_{self.name}.pdf')
+        fig = plt.figure()
+        for i, key in enumerate(Model.Output.names):
+
+            y_val = map_orig_model[key]
+            y_pce_val = map_metamodel_mean[key]
+            y_pce_val_std = map_metamodel_std[key]
+
+            plt.plot(x_values, y_val, color=Color[i], marker=Marker,
+                     lw=2.0, label='$Y_{MAP}^{M}$')
+
+            plt.plot(
+                x_values, y_pce_val[i], color=Color[i], lw=2.0,
+                marker=Marker, linestyle='--', label='$Y_{MAP}^{PCE}$'
+                )
+            # plot the confidence interval
+            plt.fill_between(
+                x_values, y_pce_val[i] - 1.96*y_pce_val_std[i],
+                y_pce_val[i] + 1.96*y_pce_val_std[i],
+                color=Color[i], alpha=0.15
+                )
+
+            # Calculate the adjusted R_squared and RMSE
+            R2 = r2_score(y_pce_val.reshape(-1, 1), y_val.reshape(-1, 1))
+            rmse = np.sqrt(mean_squared_error(y_pce_val, y_val))
+
+            plt.ylabel(key)
+            plt.xlabel("Time [s]")
+            plt.title(f'Model vs MetaModel {key}')
+
+            ax = fig.axes[0]
+            leg = ax.legend(loc='best', frameon=True)
+            fig.canvas.draw()
+            p = leg.get_window_extent().inverse_transformed(ax.transAxes)
+            ax.text(
+                p.p0[1]-0.05, p.p1[1]-0.25,
+                f'RMSE = {rmse:.3f}\n$R^2$ = {R2:.3f}',
+                transform=ax.transAxes, color='black',
+                bbox=dict(facecolor='none', edgecolor='black',
+                          boxstyle='round,pad=1'))
+
+            plt.show()
+
+            # save the current figure
+            pdf.savefig(fig, bbox_inches='tight')
+
+            # Destroy the current plot
+            plt.clf()
+
+        pdf.close()
+
+    # -------------------------------------------------------------------------
+    def _plot_post_predictive(self):
+        """
+        Plots the posterior predictives against the observation data.
+
+        Returns
+        -------
+        None.
+
+        """
+
+        Model = self.engine.Model
+        out_dir = f'Outputs_Bayes_{Model.name}_{self.name}'
+        # Plot the posterior predictive
+        for out_idx, out_name in enumerate(Model.Output.names):
+            fig, ax = plt.subplots()
+            with sns.axes_style("ticks"):
+                x_key = list(self.measured_data)[0]
+
+                # --- Read prior and posterior predictive ---
+                if self.inference_method == 'rejection' and \
+                   self.name.lower() != 'valid':
+                    #  --- Prior ---
+                    # Load posterior predictive
+                    f = h5py.File(
+                        f'{out_dir}/priorPredictive.hdf5', 'r+')
+
+                    try:
+                        x_coords = np.array(f[f"x_values/{out_name}"])
+                    except:
+                        x_coords = np.array(f["x_values"])
+
+                    X_values = np.repeat(x_coords, 10000)
+
+                    prior_pred_df = {}
+                    prior_pred_df[x_key] = X_values
+                    prior_pred_df[out_name] = np.array(
+                        f[f"EDY/{out_name}"])[:10000].flatten('F')
+                    prior_pred_df = pd.DataFrame(prior_pred_df)
+
+                    tags_post = ['prior'] * len(prior_pred_df)
+                    prior_pred_df.insert(
+                        len(prior_pred_df.columns), "Tags", tags_post,
+                        True)
+                    f.close()
+
+                    # --- Posterior ---
+                    f = h5py.File(f"{out_dir}/postPredictive.hdf5", 'r+')
+
+                    X_values = np.repeat(
+                        x_coords, np.array(f[f"EDY/{out_name}"]).shape[0])
+
+                    post_pred_df = {}
+                    post_pred_df[x_key] = X_values
+                    post_pred_df[out_name] = np.array(
+                        f[f"EDY/{out_name}"]).flatten('F')
+
+                    post_pred_df = pd.DataFrame(post_pred_df)
+
+                    tags_post = ['posterior'] * len(post_pred_df)
+                    post_pred_df.insert(
+                        len(post_pred_df.columns), "Tags", tags_post, True)
+                    f.close()
+                    # Concatenate two dataframes based on x_values
+                    frames = [prior_pred_df, post_pred_df]
+                    all_pred_df = pd.concat(frames)
+
+                    # --- Plot posterior predictive ---
+                    sns.violinplot(
+                        x_key, y=out_name, data=all_pred_df, hue="Tags",
+                        legend=False, ax=ax, split=True, inner=None,
+                        color=".8")
+
+                    # --- Plot Data ---
+                    # Find the x,y coordinates for each point
+                    x_coords = np.arange(x_coords.shape[0])
+                    first_header = list(self.measured_data)[0]
+                    obs_data = self.measured_data.round({first_header: 6})
+                    sns.pointplot(
+                        x=first_header, y=out_name, color='g', markers='x',
+                        linestyles='', capsize=16, data=obs_data, ax=ax)
+
+                    ax.errorbar(
+                        x_coords, obs_data[out_name].values,
+                        yerr=1.96*self.measurement_error[out_name],
+                        ecolor='g', fmt=' ', zorder=-1)
+
+                    # Add labels to the legend
+                    handles, labels = ax.get_legend_handles_labels()
+                    labels.append('Data')
+
+                    data_marker = mlines.Line2D(
+                        [], [], color='lime', marker='+', linestyle='None',
+                        markersize=10)
+                    handles.append(data_marker)
+
+                    # Add legend
+                    ax.legend(handles=handles, labels=labels, loc='best',
+                              fontsize='large', frameon=True)
+
+                else:
+                    # Load posterior predictive
+                    f = h5py.File(f"{out_dir}/postPredictive.hdf5", 'r+')
+
+                    try:
+                        x_coords = np.array(f[f"x_values/{out_name}"])
+                    except:
+                        x_coords = np.array(f["x_values"])
+
+                    mu = np.mean(np.array(f[f"EDY/{out_name}"]), axis=0)
+                    std = np.std(np.array(f[f"EDY/{out_name}"]), axis=0)
+
+                    # --- Plot posterior predictive ---
+                    plt.plot(
+                        x_coords, mu, marker='o', color='b',
+                        label='Mean Post. Predictive')
+                    plt.fill_between(
+                        x_coords, mu-1.96*std, mu+1.96*std, color='b',
+                        alpha=0.15)
+
+                    # --- Plot Data ---
+                    ax.plot(
+                        x_coords, self.measured_data[out_name].values,
+                        'ko', label='data', markeredgecolor='w')
+
+                    # --- Plot ExpDesign ---
+                    orig_ED_Y = self.engine.ExpDesign.Y[out_name]
+                    for output in orig_ED_Y:
+                        plt.plot(
+                            x_coords, output, color='grey', alpha=0.15
+                            )
+
+                    # Add labels for axes
+                    plt.xlabel('Time [s]')
+                    plt.ylabel(out_name)
+
+                    # Add labels to the legend
+                    handles, labels = ax.get_legend_handles_labels()
+
+                    patch = Patch(color='b', alpha=0.15)
+                    handles.insert(1, patch)
+                    labels.insert(1, '95 $\\%$ CI')
+
+                    # Add legend
+                    ax.legend(handles=handles, labels=labels, loc='best',
+                              frameon=True)
+
+                # Save figure in pdf format
+                if self.emulator:
+                    plotname = f'/Post_Prior_Perd_{Model.name}_emulator'
+                else:
+                    plotname = f'/Post_Prior_Perd_{Model.name}'
+
+                fig.savefig(f'./{out_dir}{plotname}_{out_name}.pdf',
+                            bbox_inches='tight')
diff --git a/build/lib/bayesvalidrox/bayes_inference/bayes_model_comparison.py b/build/lib/bayesvalidrox/bayes_inference/bayes_model_comparison.py
new file mode 100644
index 000000000..828613556
--- /dev/null
+++ b/build/lib/bayesvalidrox/bayes_inference/bayes_model_comparison.py
@@ -0,0 +1,654 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import numpy as np
+import os
+from scipy import stats
+import seaborn as sns
+import matplotlib.patches as patches
+import matplotlib.colors as mcolors
+import matplotlib.pylab as plt
+from .bayes_inference import BayesInference
+
+# Load the mplstyle
+plt.style.use(os.path.join(os.path.split(__file__)[0],
+                           '../', 'bayesvalidrox.mplstyle'))
+
+
+class BayesModelComparison:
+    """
+    A class to perform Bayesian Analysis.
+
+
+    Attributes
+    ----------
+    justifiability : bool, optional
+        Whether to perform the justifiability analysis. The default is
+        `True`.
+    perturbed_data : array of shape (n_bootstrap_itrs, n_obs), optional
+        User defined perturbed data. The default is `None`.
+    n_bootstarp : int
+        Number of bootstrap iteration. The default is `1000`.
+    data_noise_level : float
+        A noise level to perturb the data set. The default is `0.01`.
+    just_n_meas : int
+        Number of measurements considered for visualization of the
+        justifiability results.
+
+    """
+
+    def __init__(self, justifiability=True, perturbed_data=None,
+                 n_bootstarp=1000, data_noise_level=0.01, just_n_meas=2):
+
+        self.justifiability = justifiability
+        self.perturbed_data = perturbed_data
+        self.n_bootstarp = n_bootstarp
+        self.data_noise_level = data_noise_level
+        self.just_n_meas = just_n_meas
+
+    # --------------------------------------------------------------------------
+    def create_model_comparison(self, model_dict, opts_dict):
+        """
+        Starts the two-stage model comparison.
+        Stage I: Compare models using Bayes factors.
+        Stage II: Compare models via justifiability analysis.
+
+        Parameters
+        ----------
+        model_dict : dict
+            A dictionary including the metamodels.
+        opts_dict : dict
+            A dictionary given the `BayesInference` options.
+
+            Example:
+
+                >>> opts_bootstrap = {
+                    "bootstrap": True,
+                    "n_samples": 10000,
+                    "Discrepancy": DiscrepancyOpts,
+                    "emulator": True,
+                    "plot_post_pred": True
+                    }
+
+        Returns
+        -------
+        output : dict
+            A dictionary containing the objects and the model weights for the
+            comparison using Bayes factors and justifiability analysis.
+
+        """
+
+        # Bayes factor
+        bayes_dict_bf, model_weights_dict_bf = self.compare_models(
+            model_dict, opts_dict
+            )
+
+        output = {
+            'Bayes objects BF': bayes_dict_bf,
+            'Model weights BF': model_weights_dict_bf
+            }
+
+        # Justifiability analysis
+        if self.justifiability:
+            bayes_dict_ja, model_weights_dict_ja = self.compare_models(
+                model_dict, opts_dict, justifiability=True
+                )
+
+            output['Bayes objects JA'] = bayes_dict_ja
+            output['Model weights JA'] = model_weights_dict_ja
+
+        return output
+
+    # --------------------------------------------------------------------------
+    def compare_models(self, model_dict, opts_dict, justifiability=False):
+        """
+        Passes the options to instantiates the BayesInference class for each
+        model and passes the options from `opts_dict`. Then, it starts the
+        computations.
+        It also creates a folder and saves the diagrams, e.g., Bayes factor
+        plot, confusion matrix, etc.
+
+        Parameters
+        ----------
+        model_dict : dict
+            A dictionary including the metamodels.
+        opts_dict : dict
+            A dictionary given the `BayesInference` options.
+        justifiability : bool, optional
+            Whether to perform the justifiability analysis. The default is
+            `False`.
+
+        Returns
+        -------
+        bayes_dict : dict
+            A dictionary with `BayesInference` objects.
+        model_weights_dict : dict
+            A dictionary containing the model weights.
+
+        """
+
+        if not isinstance(model_dict, dict):
+            raise Exception("To run model comparsion, you need to pass a "
+                            "dictionary of models.")
+
+        # Extract model names
+        self.model_names = [*model_dict]
+
+        # Compute total number of the measurement points
+        Engine = list(model_dict.items())[0][1]
+        Engine.Model.read_observation()
+        self.n_meas = Engine.Model.n_obs
+
+        # ----- Generate data -----
+        # Find n_bootstrap
+        if self.perturbed_data is None:
+            n_bootstarp = self.n_bootstarp
+        else:
+            n_bootstarp = self.perturbed_data.shape[0]
+
+        # Create dataset
+        justData = self.generate_dataset(
+            model_dict, justifiability, n_bootstarp=n_bootstarp)
+
+        # Run create Interface for each model
+        bayes_dict = {}
+        for model in model_dict.keys():
+            print("-"*20)
+            print("Bayesian inference of {}.\n".format(model))
+
+            BayesOpts = BayesInference(model_dict[model])
+
+            # Set BayesInference options
+            for key, value in opts_dict.items():
+                if key in BayesOpts.__dict__.keys():
+                    if key == "Discrepancy" and isinstance(value, dict):
+                        setattr(BayesOpts, key, value[model])
+                    else:
+                        setattr(BayesOpts, key, value)
+
+            # Pass justifiability data as perturbed data
+            BayesOpts.perturbed_data = justData
+            BayesOpts.just_analysis = justifiability
+
+            bayes_dict[model] = BayesOpts.create_inference()
+            print("-"*20)
+
+        # Compute model weights
+        BME_Dict = dict()
+        for modelName, bayesObj in bayes_dict.items():
+            BME_Dict[modelName] = np.exp(bayesObj.log_BME, dtype=np.longdouble)#float128)
+
+        # BME correction in BayesInference class
+        model_weights = self.cal_model_weight(
+            BME_Dict, justifiability, n_bootstarp=n_bootstarp)
+
+        # Plot model weights
+        if justifiability:
+            model_names = self.model_names
+            model_names.insert(0, 'Observation')
+
+            # Split the model weights and save in a dict
+            list_ModelWeights = np.split(
+                model_weights, model_weights.shape[1]/self.n_meas, axis=1)
+            model_weights_dict = {key: weights for key, weights in
+                                  zip(model_names, list_ModelWeights)}
+
+            #self.plot_just_analysis(model_weights_dict)
+        else:
+            # Create box plot for model weights
+            self.plot_model_weights(model_weights, 'model_weights')
+
+            # Create kde plot for bayes factors
+            self.plot_bayes_factor(BME_Dict, 'kde_plot')
+
+            # Store model weights in a dict
+            model_weights_dict = {key: weights for key, weights in
+                                  zip(self.model_names, model_weights)}
+
+        return bayes_dict, model_weights_dict
+
+    # -------------------------------------------------------------------------
+    def generate_dataset(self, model_dict, justifiability=False,
+                         n_bootstarp=1):
+        """
+        Generates the perturbed data set for the Bayes factor calculations and
+        the data set for the justifiability analysis.
+
+        Parameters
+        ----------
+        model_dict : dict
+            A dictionary including the metamodels.
+        bool, optional
+            Whether to perform the justifiability analysis. The default is
+            `False`.
+        n_bootstarp : int, optional
+            Number of bootstrap iterations. The default is `1`.
+
+        Returns
+        -------
+        all_just_data: array
+            Created data set.
+
+        """
+        # Compute some variables
+        all_just_data = []
+        Engine = list(model_dict.items())[0][1]
+        out_names = Engine.Model.Output.names
+
+        # Perturb observations for Bayes Factor
+        if self.perturbed_data is None:
+            self.perturbed_data = self.__perturb_data(
+                    Engine.Model.observations, out_names, n_bootstarp,
+                    noise_level=self.data_noise_level)
+
+        # Only for Bayes Factor
+        if not justifiability:
+            return self.perturbed_data
+
+        # Evaluate metamodel
+        runs = {}
+        for key, metaModel in model_dict.items():
+            y_hat, _ = metaModel.eval_metamodel(nsamples=n_bootstarp)
+            runs[key] = y_hat
+
+        # Generate data
+        for i in range(n_bootstarp):
+            y_data = self.perturbed_data[i].reshape(1, -1)
+            justData = np.tril(np.repeat(y_data, y_data.shape[1], axis=0))
+            # Use surrogate runs for data-generating process
+            for key, metaModel in model_dict.items():
+                model_data = np.array(
+                    [runs[key][out][i] for out in out_names]).reshape(y_data.shape)
+                justData = np.vstack((
+                    justData,
+                    np.tril(np.repeat(model_data, model_data.shape[1], axis=0))
+                    ))
+            # Save in a list
+            all_just_data.append(justData)
+
+        # Squeeze the array
+        all_just_data = np.array(all_just_data).transpose(1, 0, 2).reshape(
+            -1, np.array(all_just_data).shape[2]
+            )
+
+        return all_just_data
+
+    # -------------------------------------------------------------------------
+    def __perturb_data(self, data, output_names, n_bootstrap, noise_level):
+        """
+        Returns an array with n_bootstrap_itrs rowsof perturbed data.
+        The first row includes the original observation data.
+        If `self.bayes_loocv` is True, a 2d-array will be returned with
+        repeated rows and zero diagonal entries.
+
+        Parameters
+        ----------
+        data : pandas DataFrame
+            Observation data.
+        output_names : list
+            List of the output names.
+
+        Returns
+        -------
+        final_data : array
+            Perturbed data set.
+
+        """
+        obs_data = data[output_names].values
+        n_measurement, n_outs = obs_data.shape
+        n_tot_measurement = obs_data[~np.isnan(obs_data)].shape[0]
+        final_data = np.zeros(
+            (n_bootstrap, n_tot_measurement)
+            )
+        final_data[0] = obs_data.T[~np.isnan(obs_data.T)]
+        for itrIdx in range(1, n_bootstrap):
+            data = np.zeros((n_measurement, n_outs))
+            for idx in range(len(output_names)):
+                std = np.nanstd(obs_data[:, idx])
+                if std == 0:
+                    std = 0.001
+                noise = std * noise_level
+                data[:, idx] = np.add(
+                    obs_data[:, idx],
+                    np.random.normal(0, 1, obs_data.shape[0]) * noise,
+                )
+
+            final_data[itrIdx] = data.T[~np.isnan(data.T)]
+
+        return final_data
+
+    # -------------------------------------------------------------------------
+    def cal_model_weight(self, BME_Dict, justifiability=False, n_bootstarp=1):
+        """
+        Normalize the BME (Asumption: Model Prior weights are equal for models)
+
+        Parameters
+        ----------
+        BME_Dict : dict
+            A dictionary containing the BME values.
+
+        Returns
+        -------
+        model_weights : array
+            Model weights.
+
+        """
+        # Stack the BME values for all models
+        all_BME = np.vstack(list(BME_Dict.values()))
+
+        if justifiability:
+            # Compute expected log_BME for justifiabiliy analysis
+            all_BME = all_BME.reshape(
+                all_BME.shape[0], -1, n_bootstarp).mean(axis=2)
+
+        # Model weights
+        model_weights = np.divide(all_BME, np.nansum(all_BME, axis=0))
+
+        return model_weights
+
+    # -------------------------------------------------------------------------
+    def plot_just_analysis(self, model_weights_dict):
+        """
+        Visualizes the confusion matrix and the model wights for the
+        justifiability analysis.
+
+        Parameters
+        ----------
+        model_weights_dict : dict
+            Model weights.
+
+        Returns
+        -------
+        None.
+
+        """
+
+        directory = 'Outputs_Comparison/'
+        os.makedirs(directory, exist_ok=True)
+        Color = [*mcolors.TABLEAU_COLORS]
+        names = [*model_weights_dict]
+
+        model_names = [model.replace('_', '$-$') for model in self.model_names]
+        for name in names:
+            fig, ax = plt.subplots()
+            for i, model in enumerate(model_names[1:]):
+                plt.plot(list(range(1, self.n_meas+1)),
+                         model_weights_dict[name][i],
+                         color=Color[i], marker='o',
+                         ms=10, linewidth=2, label=model
+                         )
+
+            plt.title(f"Data generated by: {name.replace('_', '$-$')}")
+            plt.ylabel("Weights")
+            plt.xlabel("No. of measurement points")
+            ax.set_xticks(list(range(1, self.n_meas+1)))
+            plt.legend(loc="best")
+            fig.savefig(
+                f'{directory}modelWeights_{name}.svg', bbox_inches='tight'
+                )
+            plt.close()
+
+        # Confusion matrix for some measurement points
+        epsilon = 1 if self.just_n_meas != 1 else 0
+        for index in range(0, self.n_meas+epsilon, self.just_n_meas):
+            weights = np.array(
+                [model_weights_dict[key][:, index] for key in model_weights_dict]
+                )
+            g = sns.heatmap(
+                weights.T, annot=True, cmap='Blues', xticklabels=model_names,
+                yticklabels=model_names[1:], annot_kws={"size": 24}
+                )
+
+            # x axis on top
+            g.xaxis.tick_top()
+            g.xaxis.set_label_position('top')
+            g.set_xlabel(r"\textbf{Data generated by:}", labelpad=15)
+            g.set_ylabel(r"\textbf{Model weight for:}", labelpad=15)
+            g.figure.savefig(
+                f"{directory}confusionMatrix_ND_{index+1}.pdf",
+                bbox_inches='tight'
+                )
+            plt.close()
+
+    # -------------------------------------------------------------------------
+    def plot_model_weights(self, model_weights, plot_name):
+        """
+        Visualizes the model weights resulting from BMS via the observation
+        data.
+
+        Parameters
+        ----------
+        model_weights : array
+            Model weights.
+        plot_name : str
+            Plot name.
+
+        Returns
+        -------
+        None.
+
+        """
+        font_size = 40
+        # mkdir for plots
+        directory = 'Outputs_Comparison/'
+        os.makedirs(directory, exist_ok=True)
+
+        # Create figure
+        fig, ax = plt.subplots()
+
+        # Filter data using np.isnan
+        mask = ~np.isnan(model_weights.T)
+        filtered_data = [d[m] for d, m in zip(model_weights, mask.T)]
+
+        # Create the boxplot
+        bp = ax.boxplot(filtered_data, patch_artist=True, showfliers=False)
+
+        # change outline color, fill color and linewidth of the boxes
+        for box in bp['boxes']:
+            # change outline color
+            box.set(color='#7570b3', linewidth=4)
+            # change fill color
+            box.set(facecolor='#1b9e77')
+
+        # change color and linewidth of the whiskers
+        for whisker in bp['whiskers']:
+            whisker.set(color='#7570b3', linewidth=2)
+
+        # change color and linewidth of the caps
+        for cap in bp['caps']:
+            cap.set(color='#7570b3', linewidth=2)
+
+        # change color and linewidth of the medians
+        for median in bp['medians']:
+            median.set(color='#b2df8a', linewidth=2)
+
+        # change the style of fliers and their fill
+        # for flier in bp['fliers']:
+        #     flier.set(marker='o', color='#e7298a', alpha=0.75)
+
+        # Custom x-axis labels
+        model_names = [model.replace('_', '$-$') for model in self.model_names]
+        ax.set_xticklabels(model_names)
+
+        ax.set_ylabel('Weight', fontsize=font_size)
+
+        # Title
+        plt.title('Posterior Model Weights')
+
+        # Set y lim
+        ax.set_ylim((-0.05, 1.05))
+
+        # Set size of the ticks
+        for t in ax.get_xticklabels():
+            t.set_fontsize(font_size)
+        for t in ax.get_yticklabels():
+            t.set_fontsize(font_size)
+
+        # Save the figure
+        fig.savefig(
+            f'./{directory}{plot_name}.pdf', bbox_inches='tight'
+            )
+
+        plt.close()
+
+    # -------------------------------------------------------------------------
+    def plot_bayes_factor(self, BME_Dict, plot_name=''):
+        """
+        Plots the Bayes factor distibutions in a :math:`N_m \\times N_m`
+        matrix, where :math:`N_m` is the number of the models.
+
+        Parameters
+        ----------
+        BME_Dict : dict
+            A dictionary containing the BME values of the models.
+        plot_name : str, optional
+            Plot name. The default is ''.
+
+        Returns
+        -------
+        None.
+
+        """
+
+        font_size = 40
+
+        # mkdir for plots
+        directory = 'Outputs_Comparison/'
+        os.makedirs(directory, exist_ok=True)
+
+        Colors = ["blue", "green", "gray", "brown"]
+
+        model_names = list(BME_Dict.keys())
+        nModels = len(model_names)
+
+        # Plots
+        fig, axes = plt.subplots(
+            nrows=nModels, ncols=nModels, sharex=True, sharey=True
+            )
+
+        for i, key_i in enumerate(model_names):
+
+            for j, key_j in enumerate(model_names):
+                ax = axes[i, j]
+                # Set size of the ticks
+                for t in ax.get_xticklabels():
+                    t.set_fontsize(font_size)
+                for t in ax.get_yticklabels():
+                    t.set_fontsize(font_size)
+
+                if j != i:
+
+                    # Null hypothesis: key_j is the better model
+                    BayesFactor = np.log10(
+                        np.divide(BME_Dict[key_i], BME_Dict[key_j])
+                        )
+
+                    # sns.kdeplot(BayesFactor, ax=ax, color=Colors[i], shade=True)
+                    # sns.histplot(BayesFactor, ax=ax, stat="probability",
+                    #              kde=True, element='step',
+                    #              color=Colors[j])
+
+                    # taken from seaborn's source code (utils.py and
+                    # distributions.py)
+                    def seaborn_kde_support(data, bw, gridsize, cut, clip):
+                        if clip is None:
+                            clip = (-np.inf, np.inf)
+                        support_min = max(data.min() - bw * cut, clip[0])
+                        support_max = min(data.max() + bw * cut, clip[1])
+                        return np.linspace(support_min, support_max, gridsize)
+
+                    kde_estim = stats.gaussian_kde(
+                        BayesFactor, bw_method='scott'
+                        )
+
+                    # manual linearization of data
+                    # linearized = np.linspace(
+                    #     quotient.min(), quotient.max(), num=500)
+
+                    # or better: mimic seaborn's internal stuff
+                    bw = kde_estim.scotts_factor() * np.std(BayesFactor)
+                    linearized = seaborn_kde_support(
+                        BayesFactor, bw, 100, 3, None)
+
+                    # computes values of the estimated function on the
+                    # estimated linearized inputs
+                    Z = kde_estim.evaluate(linearized)
+
+                    # https://stackoverflow.com/questions/29661574/normalize-
+                    # numpy-array-columns-in-python
+                    def normalize(x):
+                        return (x - x.min(0)) / x.ptp(0)
+
+                    # normalize so it is between 0;1
+                    Z2 = normalize(Z)
+                    ax.plot(linearized, Z2, "-", color=Colors[i], linewidth=4)
+                    ax.fill_between(
+                        linearized, 0, Z2, color=Colors[i], alpha=0.25
+                        )
+
+                    # Draw BF significant levels according to Jeffreys 1961
+                    # Strong evidence for both models
+                    ax.axvline(
+                        x=np.log10(3), ymin=0, linewidth=4, color='dimgrey'
+                        )
+                    # Strong evidence for one model
+                    ax.axvline(
+                        x=np.log10(10), ymin=0, linewidth=4, color='orange'
+                        )
+                    # Decisive evidence for one model
+                    ax.axvline(
+                        x=np.log10(100), ymin=0, linewidth=4, color='r'
+                        )
+
+                    # legend
+                    BF_label = key_i.replace('_', '$-$') + \
+                        '/' + key_j.replace('_', '$-$')
+                    legend_elements = [
+                        patches.Patch(facecolor=Colors[i], edgecolor=Colors[i],
+                                      label=f'BF({BF_label})')
+                        ]
+                    ax.legend(
+                        loc='upper left', handles=legend_elements,
+                        fontsize=font_size-(nModels+1)*5
+                        )
+
+                elif j == i:
+                    # build a rectangle in axes coords
+                    left, width = 0, 1
+                    bottom, height = 0, 1
+
+                    # axes coordinates are 0,0 is bottom left and 1,1 is upper
+                    # right
+                    p = patches.Rectangle(
+                        (left, bottom), width, height, color='white',
+                        fill=True, transform=ax.transAxes, clip_on=False
+                        )
+                    ax.grid(False)
+                    ax.add_patch(p)
+                    # ax.text(0.5*(left+right), 0.5*(bottom+top), key_i,
+                    fsize = font_size+20 if nModels < 4 else font_size
+                    ax.text(0.5, 0.5, key_i.replace('_', '$-$'),
+                            horizontalalignment='center',
+                            verticalalignment='center',
+                            fontsize=fsize, color=Colors[i],
+                            transform=ax.transAxes)
+
+        # Defining custom 'ylim' values.
+        custom_ylim = (0, 1.05)
+
+        # Setting the values for all axes.
+        plt.setp(axes, ylim=custom_ylim)
+
+        # set labels
+        for i in range(nModels):
+            axes[-1, i].set_xlabel('log$_{10}$(BF)', fontsize=font_size)
+            axes[i, 0].set_ylabel('Probability', fontsize=font_size)
+
+        # Adjust subplots
+        plt.subplots_adjust(wspace=0.2, hspace=0.1)
+
+        plt.savefig(
+            f'./{directory}Bayes_Factor{plot_name}.pdf', bbox_inches='tight'
+            )
+
+        plt.close()
diff --git a/build/lib/bayesvalidrox/bayes_inference/discrepancy.py b/build/lib/bayesvalidrox/bayes_inference/discrepancy.py
new file mode 100644
index 000000000..fff32a250
--- /dev/null
+++ b/build/lib/bayesvalidrox/bayes_inference/discrepancy.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import scipy.stats as stats
+from bayesvalidrox.surrogate_models.exp_designs import ExpDesigns
+
+
+class Discrepancy:
+    """
+    Discrepancy class for Bayesian inference method.
+    We define the reference or reality to be equal to what we can model and a
+    descripancy term \\( \\epsilon \\). We consider the followin format:
+
+    $$\\textbf{y}_{\\text{reality}} = \\mathcal{M}(\\theta) + \\epsilon,$$
+
+    where \\( \\epsilon \\in R^{N_{out}} \\) represents the the effects of
+    measurement error and model inaccuracy. For simplicity, it can be defined
+    as an additive Gaussian disrepancy with zeromean and given covariance
+    matrix \\( \\Sigma \\):
+
+    $$\\epsilon \\sim \\mathcal{N}(\\epsilon|0, \\Sigma). $$
+
+    In the context of model inversion or calibration, an observation point
+    \\( \\textbf{y}_i \\in \\mathcal{y} \\) is a realization of a Gaussian
+    distribution with mean value of \\(\\mathcal{M}(\\theta) \\) and covariance
+    matrix of \\( \\Sigma \\).
+
+    $$ p(\\textbf{y}|\\theta) = \\mathcal{N}(\\textbf{y}|\\mathcal{M}
+                                             (\\theta))$$
+
+    The following options are available:
+
+    * Option A: With known redidual covariance matrix \\(\\Sigma\\) for
+    independent measurements.
+
+    * Option B: With unknown redidual covariance matrix \\(\\Sigma\\),
+    paramethrized as \\(\\Sigma(\\theta_{\\epsilon})=\\sigma^2 \\textbf{I}_
+    {N_{out}}\\) with unknown residual variances \\(\\sigma^2\\).
+    This term will be jointly infer with the uncertain input parameters. For
+    the inversion, you need to define a prior marginal via `Input` class. Note
+    that \\(\\sigma^2\\) is only a single scalar multiplier for the diagonal
+    entries of the covariance matrix \\(\\Sigma\\).
+
+    Attributes
+    ----------
+    InputDisc : obj
+        Input object. When the \\(\\sigma^2\\) is expected to be inferred
+        jointly with the parameters (`Option B`).If multiple output groups are
+        defined by `Model.Output.names`, each model output needs to have.
+        a prior marginal using the `Input` class. The default is `''`.
+    disc_type : str
+        Type of the noise definition. `'Gaussian'` is only supported so far.
+    parameters : dict or pandas.DataFrame
+        Known residual variance \\(\\sigma^2\\), i.e. diagonal entry of the
+        covariance matrix of the multivariate normal likelihood in case of
+        `Option A`.
+
+    """
+
+    def __init__(self, InputDisc='', disc_type='Gaussian', parameters=None):
+        self.InputDisc = InputDisc
+        self.disc_type = disc_type
+        self.parameters = parameters
+
+    # -------------------------------------------------------------------------
+    def get_sample(self, n_samples):
+        """
+        Generate samples for the \\(\\sigma^2\\), i.e. the diagonal entries of
+        the variance-covariance matrix in the multivariate normal distribution.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of samples (parameter sets).
+
+        Returns
+        -------
+        sigma2_prior: array of shape (n_samples, n_params)
+            \\(\\sigma^2\\) samples.
+
+        """
+        self.n_samples = n_samples # TODO: not used again in here - needed from the outside?
+        
+        if self.InputDisc == '':
+            raise AttributeError('Cannot create new samples, please provide input distributions')
+        
+        # Create and store BoundTuples
+        self.ExpDesign = ExpDesigns(self.InputDisc)
+        self.ExpDesign.sampling_method = 'random'
+        self.ExpDesign.generate_ED(
+            n_samples, max_pce_deg=1
+            )
+        # TODO: need to recheck the following line
+        # This used to simply be the return from the call above
+        self.sigma2_prior = self.ExpDesign.X
+
+        # Naive approach: Fit a gaussian kernel to the provided data
+        self.ExpDesign.JDist = stats.gaussian_kde(self.ExpDesign.raw_data)
+
+        # Save the names of sigmas
+        if len(self.InputDisc.Marginals) != 0:
+            self.name = []
+            for Marginalidx in range(len(self.InputDisc.Marginals)):
+                self.name.append(self.InputDisc.Marginals[Marginalidx].name)
+
+        return self.sigma2_prior
diff --git a/build/lib/bayesvalidrox/bayes_inference/mcmc.py b/build/lib/bayesvalidrox/bayes_inference/mcmc.py
new file mode 100644
index 000000000..fe22a152f
--- /dev/null
+++ b/build/lib/bayesvalidrox/bayes_inference/mcmc.py
@@ -0,0 +1,909 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import os
+import numpy as np
+import emcee
+import pandas as pd
+import matplotlib.pyplot as plt
+from matplotlib.backends.backend_pdf import PdfPages
+import multiprocessing
+import scipy.stats as st
+from scipy.linalg import cholesky as chol
+import warnings
+import shutil
+os.environ["OMP_NUM_THREADS"] = "1"
+
+
+class MCMC:
+    """
+    A class for bayesian inference via a Markov-Chain Monte-Carlo (MCMC)
+    Sampler to approximate the posterior distribution of the Bayes theorem:
+    $$p(\\theta|\\mathcal{y}) = \\frac{p(\\mathcal{y}|\\theta) p(\\theta)}
+                                         {p(\\mathcal{y})}.$$
+
+    This class make inference with emcee package [1] using an Affine Invariant
+    Ensemble sampler (AIES) [2].
+
+    [1] Foreman-Mackey, D., Hogg, D.W., Lang, D. and Goodman, J., 2013.emcee:
+        the MCMC hammer. Publications of the Astronomical Society of the
+        Pacific, 125(925), p.306. https://emcee.readthedocs.io/en/stable/
+
+    [2] Goodman, J. and Weare, J., 2010. Ensemble samplers with affine
+        invariance. Communications in applied mathematics and computational
+        science, 5(1), pp.65-80.
+
+
+    Attributes
+    ----------
+    BayesOpts : obj
+        Bayes object.
+    """
+
+    def __init__(self, BayesOpts):
+
+        self.BayesOpts = BayesOpts
+
+    def run_sampler(self, observation, total_sigma2):
+
+        BayesObj = self.BayesOpts
+        MetaModel = BayesObj.engine.MetaModel
+        Model = BayesObj.engine.Model
+        Discrepancy = self.BayesOpts.Discrepancy
+        n_cpus = Model.n_cpus
+        priorDist = BayesObj.engine.ExpDesign.JDist
+        ndim = MetaModel.n_params
+        self.counter = 0
+        output_dir = f'Outputs_Bayes_{Model.name}_{self.BayesOpts.name}'
+        if not os.path.exists(output_dir):
+            os.makedirs(output_dir)
+
+        self.observation = observation
+        self.total_sigma2 = total_sigma2
+
+        # Unpack mcmc parameters given to BayesObj.mcmc_params
+        self.initsamples = None
+        self.nwalkers = 100
+        self.nburn = 200
+        self.nsteps = 100000
+        self.moves = None
+        self.mp = False
+        self.verbose = False
+
+        # Extract initial samples
+        if 'init_samples' in BayesObj.mcmc_params:
+            self.initsamples = BayesObj.mcmc_params['init_samples']
+            if isinstance(self.initsamples, pd.DataFrame):
+                self.initsamples = self.initsamples.values
+
+        # Extract number of steps per walker
+        if 'n_steps' in BayesObj.mcmc_params:
+            self.nsteps = int(BayesObj.mcmc_params['n_steps'])
+        # Extract number of walkers (chains)
+        if 'n_walkers' in BayesObj.mcmc_params:
+            self.nwalkers = int(BayesObj.mcmc_params['n_walkers'])
+        # Extract moves
+        if 'moves' in BayesObj.mcmc_params:
+            self.moves = BayesObj.mcmc_params['moves']
+        # Extract multiprocessing
+        if 'multiprocessing' in BayesObj.mcmc_params:
+            self.mp = BayesObj.mcmc_params['multiprocessing']
+        # Extract verbose
+        if 'verbose' in BayesObj.mcmc_params:
+            self.verbose = BayesObj.mcmc_params['verbose']
+
+        # Set initial samples
+        np.random.seed(0)
+        if self.initsamples is None:
+            try:
+                initsamples = priorDist.sample(self.nwalkers).T
+            except:
+                # when aPCE selected - gaussian kernel distribution
+                inputSamples = MetaModel.ExpDesign.raw_data.T
+                random_indices = np.random.choice(
+                    len(inputSamples), size=self.nwalkers, replace=False
+                    )
+                initsamples = inputSamples[random_indices]
+
+        else:
+            if self.initsamples.ndim == 1:
+                # When MAL is given.
+                theta = self.initsamples
+                initsamples = [theta + 1e-1*np.multiply(
+                    np.random.randn(ndim), theta) for i in
+                               range(self.nwalkers)]
+            else:
+                # Pick samples based on a uniform dist between min and max of
+                # each dim
+                initsamples = np.zeros((self.nwalkers, ndim))
+                bound_tuples = []
+                for idx_dim in range(ndim):
+                    lower = np.min(self.initsamples[:, idx_dim])
+                    upper = np.max(self.initsamples[:, idx_dim])
+                    bound_tuples.append((lower, upper))
+                    dist = st.uniform(loc=lower, scale=upper-lower)
+                    initsamples[:, idx_dim] = dist.rvs(size=self.nwalkers)
+
+                # Update lower and upper
+                MetaModel.ExpDesign.bound_tuples = bound_tuples
+
+        # Check if sigma^2 needs to be inferred
+        if Discrepancy.opt_sigma != 'B':
+            sigma2_samples = Discrepancy.get_sample(self.nwalkers)
+
+            # Update initsamples
+            initsamples = np.hstack((initsamples, sigma2_samples))
+
+            # Update ndim
+            ndim = initsamples.shape[1]
+
+            # Discrepancy bound
+            disc_bound_tuple = Discrepancy.ExpDesign.bound_tuples
+
+            # Update bound_tuples
+            BayesObj.engine.ExpDesign.bound_tuples += disc_bound_tuple
+
+        print("\n>>>> Bayesian inference with MCMC for "
+              f"{self.BayesOpts.name} started. <<<<<<")
+
+        # Set up the backend
+        filename = f"{output_dir}/emcee_sampler.h5"
+        backend = emcee.backends.HDFBackend(filename)
+        # Clear the backend in case the file already exists
+        backend.reset(self.nwalkers, ndim)
+
+        # Define emcee sampler
+        # Here we'll set up the computation. emcee combines multiple "walkers",
+        # each of which is its own MCMC chain. The number of trace results will
+        # be nwalkers * nsteps.
+        if self.mp:
+            # Run in parallel
+            if n_cpus is None:
+                n_cpus = multiprocessing.cpu_count()
+
+            with multiprocessing.Pool(n_cpus) as pool:
+                sampler = emcee.EnsembleSampler(
+                    self.nwalkers, ndim, self.log_posterior, moves=self.moves,
+                    pool=pool, backend=backend
+                    )
+
+                # Check if a burn-in phase is needed!
+                if self.initsamples is None:
+                    # Burn-in
+                    print("\n Burn-in period is starting:")
+                    pos = sampler.run_mcmc(
+                        initsamples, self.nburn, progress=True
+                        )
+
+                    # Reset sampler
+                    sampler.reset()
+                    pos = pos.coords
+                else:
+                    pos = initsamples
+
+                # Production run
+                print("\n Production run is starting:")
+                pos, prob, state = sampler.run_mcmc(
+                    pos, self.nsteps, progress=True
+                    )
+
+        else:
+            # Run in series and monitor the convergence
+            sampler = emcee.EnsembleSampler(
+                self.nwalkers, ndim, self.log_posterior, moves=self.moves,
+                backend=backend, vectorize=True
+                )
+
+            # Check if a burn-in phase is needed!
+            if self.initsamples is None:
+                # Burn-in
+                print("\n Burn-in period is starting:")
+                pos = sampler.run_mcmc(
+                    initsamples, self.nburn, progress=True
+                    )
+
+                # Reset sampler
+                sampler.reset()
+                pos = pos.coords
+            else:
+                pos = initsamples
+
+            # Production run
+            print("\n Production run is starting:")
+
+            # Track how the average autocorrelation time estimate changes
+            autocorrIdx = 0
+            autocorr = np.empty(self.nsteps)
+            tauold = np.inf
+            autocorreverynsteps = 50
+
+            # sample step by step using the generator sampler.sample
+            for sample in sampler.sample(pos,
+                                         iterations=self.nsteps,
+                                         tune=True,
+                                         progress=True):
+
+                # only check convergence every autocorreverynsteps steps
+                if sampler.iteration % autocorreverynsteps:
+                    continue
+
+                # Train model discrepancy/error
+                if hasattr(BayesObj, 'errorModel') and BayesObj.errorModel \
+                   and not sampler.iteration % 3 * autocorreverynsteps:
+                    try:
+                        self.error_MetaModel = self.train_error_model(sampler)
+                    except:
+                        pass
+
+                # Print the current mean acceptance fraction
+                if self.verbose:
+                    print("\nStep: {}".format(sampler.iteration))
+                    acc_fr = np.mean(sampler.acceptance_fraction)
+                    print(f"Mean acceptance fraction: {acc_fr:.3f}")
+
+                # compute the autocorrelation time so far
+                # using tol=0 means that we'll always get an estimate even if
+                # it isn't trustworthy
+                tau = sampler.get_autocorr_time(tol=0)
+                # average over walkers
+                autocorr[autocorrIdx] = np.nanmean(tau)
+                autocorrIdx += 1
+
+                # output current autocorrelation estimate
+                if self.verbose:
+                    print(f"Mean autocorr. time estimate: {np.nanmean(tau):.3f}")
+                    list_gr = np.round(self.gelman_rubin(sampler.chain), 3)
+                    print("Gelman-Rubin Test*: ", list_gr)
+
+                # check convergence
+                converged = np.all(tau*autocorreverynsteps < sampler.iteration)
+                converged &= np.all(np.abs(tauold - tau) / tau < 0.01)
+                converged &= np.all(self.gelman_rubin(sampler.chain) < 1.1)
+
+                if converged:
+                    break
+                tauold = tau
+
+        # Posterior diagnostics
+        try:
+            tau = sampler.get_autocorr_time(tol=0)
+        except emcee.autocorr.AutocorrError:
+            tau = 5
+
+        if all(np.isnan(tau)):
+            tau = 5
+
+        burnin = int(2*np.nanmax(tau))
+        thin = int(0.5*np.nanmin(tau)) if int(0.5*np.nanmin(tau)) != 0 else 1
+        finalsamples = sampler.get_chain(discard=burnin, flat=True, thin=thin)
+        acc_fr = np.nanmean(sampler.acceptance_fraction)
+        list_gr = np.round(self.gelman_rubin(sampler.chain[:, burnin:]), 3)
+
+        # Print summary
+        print('\n')
+        print('-'*15 + 'Posterior diagnostics' + '-'*15)
+        print(f"Mean auto-correlation time: {np.nanmean(tau):.3f}")
+        print(f"Thin: {thin}")
+        print(f"Burn-in: {burnin}")
+        print(f"Flat chain shape: {finalsamples.shape}")
+        print(f"Mean acceptance fraction*: {acc_fr:.3f}")
+        print("Gelman-Rubin Test**: ", list_gr)
+
+        print("\n* This value must lay between 0.234 and 0.5.")
+        print("** These values must be smaller than 1.1.")
+        print('-'*50)
+
+        print(f"\n>>>> Bayesian inference with MCMC for {self.BayesOpts.name} "
+              "successfully completed. <<<<<<\n")
+
+        # Extract parameter names and their prior ranges
+        par_names = self.BayesOpts.engine.ExpDesign.par_names
+
+        if Discrepancy.opt_sigma != 'B':
+            for i in range(len(Discrepancy.InputDisc.Marginals)):
+                par_names.append(Discrepancy.InputDisc.Marginals[i].name)
+
+        params_range = self.BayesOpts.engine.ExpDesign.bound_tuples
+
+        # Plot traces
+        if self.verbose and self.nsteps < 10000:
+            pdf = PdfPages(output_dir+'/traceplots.pdf')
+            fig = plt.figure()
+            for parIdx in range(ndim):
+                # Set up the axes with gridspec
+                fig = plt.figure()
+                grid = plt.GridSpec(4, 4, hspace=0.2, wspace=0.2)
+                main_ax = fig.add_subplot(grid[:-1, :3])
+                y_hist = fig.add_subplot(grid[:-1, -1], xticklabels=[],
+                                         sharey=main_ax)
+
+                for i in range(self.nwalkers):
+                    samples = sampler.chain[i, :, parIdx]
+                    main_ax.plot(samples, '-')
+
+                    # histogram on the attached axes
+                    y_hist.hist(samples[burnin:], 40, histtype='stepfilled',
+                                orientation='horizontal', color='gray')
+
+                main_ax.set_ylim(params_range[parIdx])
+                main_ax.set_title('traceplot for ' + par_names[parIdx])
+                main_ax.set_xlabel('step number')
+
+                # save the current figure
+                pdf.savefig(fig, bbox_inches='tight')
+
+                # Destroy the current plot
+                plt.clf()
+
+            pdf.close()
+
+        # plot development of autocorrelation estimate
+        if not self.mp:
+            fig1 = plt.figure()
+            steps = autocorreverynsteps*np.arange(1, autocorrIdx+1)
+            taus = autocorr[:autocorrIdx]
+            plt.plot(steps, steps / autocorreverynsteps, "--k")
+            plt.plot(steps, taus)
+            plt.xlim(0, steps.max())
+            plt.ylim(0, np.nanmax(taus)+0.1*(np.nanmax(taus)-np.nanmin(taus)))
+            plt.xlabel("number of steps")
+            plt.ylabel(r"mean $\hat{\tau}$")
+            fig1.savefig(f"{output_dir}/autocorrelation_time.pdf",
+                         bbox_inches='tight')
+
+        # logml_dict = self.marginal_llk_emcee(sampler, self.nburn, logp=None,
+        # maxiter=5000)
+        # print('\nThe Bridge Sampling Estimation is "
+        #       f"{logml_dict['logml']:.5f}.')
+
+        # # Posterior-based expectation of posterior probablity
+        # postExpPostLikelihoods = np.mean(sampler.get_log_prob(flat=True)
+        # [self.nburn*self.nwalkers:])
+
+        # # Posterior-based expectation of prior densities
+        # postExpPrior = np.mean(self.log_prior(emcee_trace.T))
+
+        # # Posterior-based expectation of likelihoods
+        # postExpLikelihoods_emcee = postExpPostLikelihoods - postExpPrior
+
+        # # Calculate Kullback-Leibler Divergence
+        # KLD_emcee = postExpLikelihoods_emcee - logml_dict['logml']
+        # print("Kullback-Leibler divergence: %.5f"%KLD_emcee)
+
+        # # Information Entropy based on Entropy paper Eq. 38
+        # infEntropy_emcee = logml_dict['logml'] - postExpPrior -
+        #                    postExpLikelihoods_emcee
+        # print("Information Entropy: %.5f" %infEntropy_emcee)
+
+        Posterior_df = pd.DataFrame(finalsamples, columns=par_names)
+
+        return Posterior_df
+
+    # -------------------------------------------------------------------------
+    def log_prior(self, theta):
+        """
+        Calculates the log prior likelihood \\( p(\\theta)\\) for the given
+        parameter set(s) \\( \\theta \\).
+
+        Parameters
+        ----------
+        theta : array of shape (n_samples, n_params)
+            Parameter sets, i.e. proposals of MCMC chains.
+
+        Returns
+        -------
+        logprior: float or array of shape n_samples
+            Log prior likelihood. If theta has only one row, a single value is
+            returned otherwise an array.
+
+        """
+
+        MetaModel = self.BayesOpts.MetaModel
+        Discrepancy = self.BayesOpts.Discrepancy
+
+        # Find the number of sigma2 parameters
+        if Discrepancy.opt_sigma != 'B':
+            disc_bound_tuples = Discrepancy.ExpDesign.bound_tuples
+            disc_marginals = Discrepancy.ExpDesign.InputObj.Marginals
+            disc_prior_space = Discrepancy.ExpDesign.prior_space
+            n_sigma2 = len(disc_bound_tuples)
+        else:
+            n_sigma2 = -len(theta)
+        prior_dist = self.BayesOpts.engine.ExpDesign.prior_space
+        params_range = self.BayesOpts.engine.ExpDesign.bound_tuples
+        theta = theta if theta.ndim != 1 else theta.reshape((1, -1))
+        nsamples = theta.shape[0]
+        logprior = -np.inf*np.ones(nsamples)
+
+        for i in range(nsamples):
+            # Check if the sample is within the parameters' range
+            if self._check_ranges(theta[i], params_range):
+                # Check if all dists are uniform, if yes priors are equal.
+                if all(MetaModel.input_obj.Marginals[i].dist_type == 'uniform'
+                       for i in range(MetaModel.n_params)):
+                    logprior[i] = 0.0
+                else:
+                    logprior[i] = np.log(
+                        prior_dist.pdf(theta[i, :-n_sigma2].T)
+                        )
+
+                # Check if bias term needs to be inferred
+                if Discrepancy.opt_sigma != 'B':
+                    if self._check_ranges(theta[i, -n_sigma2:],
+                                          disc_bound_tuples):
+                        if all('unif' in disc_marginals[i].dist_type for i in
+                               range(Discrepancy.ExpDesign.ndim)):
+                            logprior[i] = 0.0
+                        else:
+                            logprior[i] += np.log(
+                                disc_prior_space.pdf(theta[i, -n_sigma2:])
+                                )
+
+        if nsamples == 1:
+            return logprior[0]
+        else:
+            return logprior
+
+    # -------------------------------------------------------------------------
+    def log_likelihood(self, theta):
+        """
+        Computes likelihood \\( p(\\mathcal{Y}|\\theta)\\) of the performance
+        of the (meta-)model in reproducing the observation data.
+
+        Parameters
+        ----------
+        theta : array of shape (n_samples, n_params)
+            Parameter set, i.e. proposals of the MCMC chains.
+
+        Returns
+        -------
+        log_like : array of shape (n_samples)
+            Log likelihood.
+
+        """
+
+        BayesOpts = self.BayesOpts
+        MetaModel = BayesOpts.MetaModel
+        Discrepancy = self.BayesOpts.Discrepancy
+
+        # Find the number of sigma2 parameters
+        if Discrepancy.opt_sigma != 'B':
+            disc_bound_tuples = Discrepancy.ExpDesign.bound_tuples
+            n_sigma2 = len(disc_bound_tuples)
+        else:
+            n_sigma2 = -len(theta)
+        # Check if bias term needs to be inferred
+        if Discrepancy.opt_sigma != 'B':
+            sigma2 = theta[:, -n_sigma2:]
+            theta = theta[:, :-n_sigma2]
+        else:
+            sigma2 = None
+        theta = theta if theta.ndim != 1 else theta.reshape((1, -1))
+
+        # Evaluate Model/MetaModel at theta
+        mean_pred, BayesOpts._std_pce_prior_pred = self.eval_model(theta)
+
+        # Surrogate model's error using RMSE of test data
+        surrError = MetaModel.rmse if hasattr(MetaModel, 'rmse') else None
+
+        # Likelihood
+        log_like = BayesOpts.normpdf(
+            mean_pred, self.observation, self.total_sigma2, sigma2,
+            std=surrError
+            )
+        return log_like
+
+    # -------------------------------------------------------------------------
+    def log_posterior(self, theta):
+        """
+        Computes the posterior likelihood \\(p(\\theta| \\mathcal{Y})\\) for
+        the given parameterset.
+
+        Parameters
+        ----------
+        theta : array of shape (n_samples, n_params)
+            Parameter set, i.e. proposals of the MCMC chains.
+
+        Returns
+        -------
+        log_like : array of shape (n_samples)
+            Log posterior likelihood.
+
+        """
+
+        nsamples = 1 if theta.ndim == 1 else theta.shape[0]
+
+        if nsamples == 1:
+            if self.log_prior(theta) == -np.inf:
+                return -np.inf
+            else:
+                # Compute log prior
+                log_prior = self.log_prior(theta)
+                # Compute log Likelihood
+                log_likelihood = self.log_likelihood(theta)
+
+                return log_prior + log_likelihood
+        else:
+            # Compute log prior
+            log_prior = self.log_prior(theta)
+
+            # Initialize log_likelihood
+            log_likelihood = -np.inf*np.ones(nsamples)
+
+            # find the indices for -inf sets
+            non_inf_idx = np.where(log_prior != -np.inf)[0]
+
+            # Compute loLikelihoods
+            if non_inf_idx.size != 0:
+                log_likelihood[non_inf_idx] = self.log_likelihood(
+                    theta[non_inf_idx]
+                    )
+
+            return log_prior + log_likelihood
+
+    # -------------------------------------------------------------------------
+    def eval_model(self, theta):
+        """
+        Evaluates the (meta-) model at the given theta.
+
+        Parameters
+        ----------
+        theta : array of shape (n_samples, n_params)
+            Parameter set, i.e. proposals of the MCMC chains.
+
+        Returns
+        -------
+        mean_pred : dict
+            Mean model prediction.
+        std_pred : dict
+            Std of model prediction.
+
+        """
+
+        BayesObj = self.BayesOpts
+        MetaModel = BayesObj.MetaModel
+        Model = BayesObj.engine.Model
+
+        if BayesObj.emulator:
+            # Evaluate the MetaModel
+            mean_pred, std_pred = MetaModel.eval_metamodel(samples=theta)
+        else:
+            # Evaluate the origModel
+            mean_pred, std_pred = dict(), dict()
+
+            model_outs, _ = Model.run_model_parallel(
+                theta, prevRun_No=self.counter,
+                key_str='_MCMC', mp=False, verbose=False)
+
+            # Save outputs in respective dicts
+            for varIdx, var in enumerate(Model.Output.names):
+                mean_pred[var] = model_outs[var]
+                std_pred[var] = np.zeros((mean_pred[var].shape))
+
+            # Remove the folder
+            if Model.link_type.lower() != 'function':
+                shutil.rmtree(f"{Model.name}_MCMC_{self.counter+1}")
+
+            # Add one to the counter
+            self.counter += 1
+
+        if hasattr(self, 'error_MetaModel') and BayesObj.error_model:
+            meanPred, stdPred = self.error_MetaModel.eval_model_error(
+                BayesObj.BiasInputs, mean_pred
+                )
+
+        return mean_pred, std_pred
+
+    # -------------------------------------------------------------------------
+    def train_error_model(self, sampler):
+        """
+        Trains an error model using a Gaussian Process Regression.
+
+        Parameters
+        ----------
+        sampler : obj
+            emcee sampler.
+
+        Returns
+        -------
+        error_MetaModel : obj
+            A error model.
+
+        """
+        BayesObj = self.BayesOpts
+        MetaModel = BayesObj.MetaModel
+
+        # Prepare the poster samples
+        try:
+            tau = sampler.get_autocorr_time(tol=0)
+        except emcee.autocorr.AutocorrError:
+            tau = 5
+
+        if all(np.isnan(tau)):
+            tau = 5
+
+        burnin = int(2*np.nanmax(tau))
+        thin = int(0.5*np.nanmin(tau)) if int(0.5*np.nanmin(tau)) != 0 else 1
+        finalsamples = sampler.get_chain(discard=burnin, flat=True, thin=thin)
+        posterior = finalsamples[:, :MetaModel.n_params]
+
+        # Select posterior mean as MAP
+        map_theta = posterior.mean(axis=0).reshape((1, MetaModel.n_params))
+        # MAP_theta = st.mode(Posterior_df,axis=0)[0]
+
+        # Evaluate the (meta-)model at the MAP
+        y_map, y_std_map = MetaModel.eval_metamodel(samples=map_theta)
+
+        # Train a GPR meta-model using MAP
+        error_MetaModel = MetaModel.create_model_error(
+            BayesObj.BiasInputs, y_map, name='Calib')
+
+        return error_MetaModel
+
+    # -------------------------------------------------------------------------
+    def gelman_rubin(self, chain, return_var=False):
+        """
+        The potential scale reduction factor (PSRF) defined by the variance
+        within one chain, W, with the variance between chains B.
+        Both variances are combined in a weighted sum to obtain an estimate of
+        the variance of a parameter \\( \\theta \\).The square root of the
+        ratio of this estimates variance to the within chain variance is called
+        the potential scale reduction.
+        For a well converged chain it should approach 1. Values greater than
+        1.1 typically indicate that the chains have not yet fully converged.
+
+        Source: http://joergdietrich.github.io/emcee-convergence.html
+
+        https://github.com/jwalton3141/jwalton3141.github.io/blob/master/assets/posts/ESS/rwmh.py
+
+        Parameters
+        ----------
+        chain : array (n_walkers, n_steps, n_params)
+            The emcee ensamples.
+
+        Returns
+        -------
+        R_hat : float
+            The Gelman-Robin values.
+
+        """
+        m_chains, n_iters = chain.shape[:2]
+
+        # Calculate between-chain variance
+        θb = np.mean(chain, axis=1)
+        θbb = np.mean(θb, axis=0)
+        B_over_n = ((θbb - θb)**2).sum(axis=0)
+        B_over_n /= (m_chains - 1)
+
+        # Calculate within-chain variances
+        ssq = np.var(chain, axis=1, ddof=1)
+        W = np.mean(ssq, axis=0)
+
+        # (over) estimate of variance
+        var_θ = W * (n_iters - 1) / n_iters + B_over_n
+
+        if return_var:
+            return var_θ
+        else:
+            # The square root of the ratio of this estimates variance to the
+            # within chain variance
+            R_hat = np.sqrt(var_θ / W)
+            return R_hat
+
+    # -------------------------------------------------------------------------
+    def marginal_llk_emcee(self, sampler, nburn=None, logp=None, maxiter=1000):
+        """
+        The Bridge Sampling Estimator of the Marginal Likelihood based on
+        https://gist.github.com/junpenglao/4d2669d69ddfe1d788318264cdcf0583
+
+        Parameters
+        ----------
+        sampler : TYPE
+            MultiTrace, result of MCMC run.
+        nburn : int, optional
+            Number of burn-in step. The default is None.
+        logp : TYPE, optional
+            Model Log-probability function. The default is None.
+        maxiter : int, optional
+            Maximum number of iterations. The default is 1000.
+
+        Returns
+        -------
+        marg_llk : dict
+            Estimated Marginal log-Likelihood.
+
+        """
+        r0, tol1, tol2 = 0.5, 1e-10, 1e-4
+
+        if logp is None:
+            logp = sampler.log_prob_fn
+
+        # Split the samples into two parts
+        # Use the first 50% for fiting the proposal distribution
+        # and the second 50% in the iterative scheme.
+        if nburn is None:
+            mtrace = sampler.chain
+        else:
+            mtrace = sampler.chain[:, nburn:, :]
+
+        nchain, len_trace, nrofVars = mtrace.shape
+
+        N1_ = len_trace // 2
+        N1 = N1_*nchain
+        N2 = len_trace*nchain - N1
+
+        samples_4_fit = np.zeros((nrofVars, N1))
+        samples_4_iter = np.zeros((nrofVars, N2))
+        effective_n = np.zeros((nrofVars))
+
+        # matrix with already transformed samples
+        for var in range(nrofVars):
+
+            # for fitting the proposal
+            x = mtrace[:, :N1_, var]
+
+            samples_4_fit[var, :] = x.flatten()
+            # for the iterative scheme
+            x2 = mtrace[:, N1_:, var]
+            samples_4_iter[var, :] = x2.flatten()
+
+            # effective sample size of samples_4_iter, scalar
+            effective_n[var] = self._my_ESS(x2)
+
+        # median effective sample size (scalar)
+        neff = np.median(effective_n)
+
+        # get mean & covariance matrix and generate samples from proposal
+        m = np.mean(samples_4_fit, axis=1)
+        V = np.cov(samples_4_fit)
+        L = chol(V, lower=True)
+
+        # Draw N2 samples from the proposal distribution
+        gen_samples = m[:, None] + np.dot(
+            L, st.norm.rvs(0, 1, size=samples_4_iter.shape)
+            )
+
+        # Evaluate proposal distribution for posterior & generated samples
+        q12 = st.multivariate_normal.logpdf(samples_4_iter.T, m, V)
+        q22 = st.multivariate_normal.logpdf(gen_samples.T, m, V)
+
+        # Evaluate unnormalized posterior for posterior & generated samples
+        q11 = logp(samples_4_iter.T)
+        q21 = logp(gen_samples.T)
+
+        # Run iterative scheme:
+        tmp = self._iterative_scheme(
+            N1, N2, q11, q12, q21, q22, r0, neff, tol1, maxiter, 'r'
+            )
+        if ~np.isfinite(tmp['logml']):
+            warnings.warn(
+                "Logml could not be estimated within maxiter, rerunning with "
+                "adjusted starting value. Estimate might be more variable than"
+                " usual.")
+            # use geometric mean as starting value
+            r0_2 = np.sqrt(tmp['r_vals'][-2]*tmp['r_vals'][-1])
+            tmp = self._iterative_scheme(
+                q11, q12, q21, q22, r0_2, neff, tol2, maxiter, 'logml'
+                )
+
+        marg_llk = dict(
+            logml=tmp['logml'], niter=tmp['niter'], method="normal",
+            q11=q11, q12=q12, q21=q21, q22=q22
+            )
+        return marg_llk
+
+    # -------------------------------------------------------------------------
+    def _iterative_scheme(self, N1, N2, q11, q12, q21, q22, r0, neff, tol,
+                          maxiter, criterion):
+        """
+        Iterative scheme as proposed in Meng and Wong (1996) to estimate the
+        marginal likelihood
+
+        """
+        l1 = q11 - q12
+        l2 = q21 - q22
+        # To increase numerical stability,
+        # subtracting the median of l1 from l1 & l2 later
+        lstar = np.median(l1)
+        s1 = neff/(neff + N2)
+        s2 = N2/(neff + N2)
+        r = r0
+        r_vals = [r]
+        logml = np.log(r) + lstar
+        criterion_val = 1 + tol
+
+        i = 0
+        while (i <= maxiter) & (criterion_val > tol):
+            rold = r
+            logmlold = logml
+            numi = np.exp(l2 - lstar)/(s1 * np.exp(l2 - lstar) + s2 * r)
+            deni = 1/(s1 * np.exp(l1 - lstar) + s2 * r)
+            if np.sum(~np.isfinite(numi))+np.sum(~np.isfinite(deni)) > 0:
+                warnings.warn(
+                    """Infinite value in iterative scheme, returning NaN.
+                     Try rerunning with more samples.""")
+            r = (N1/N2) * np.sum(numi)/np.sum(deni)
+            r_vals.append(r)
+            logml = np.log(r) + lstar
+            i += 1
+            if criterion == 'r':
+                criterion_val = np.abs((r - rold)/r)
+            elif criterion == 'logml':
+                criterion_val = np.abs((logml - logmlold)/logml)
+
+        if i >= maxiter:
+            return dict(logml=np.NaN, niter=i, r_vals=np.asarray(r_vals))
+        else:
+            return dict(logml=logml, niter=i)
+
+    # -------------------------------------------------------------------------
+    def _my_ESS(self, x):
+        """
+        Compute the effective sample size of estimand of interest.
+        Vectorised implementation.
+        https://github.com/jwalton3141/jwalton3141.github.io/blob/master/assets/posts/ESS/rwmh.py
+
+
+        Parameters
+        ----------
+        x : array of shape (n_walkers, n_steps)
+            MCMC Samples.
+
+        Returns
+        -------
+        int
+            Effective sample size.
+
+        """
+        m_chains, n_iters = x.shape
+
+        def variogram(t):
+            variogram = ((x[:, t:] - x[:, :(n_iters - t)])**2).sum()
+            variogram /= (m_chains * (n_iters - t))
+            return variogram
+
+        post_var = self.gelman_rubin(x, return_var=True)
+
+        t = 1
+        rho = np.ones(n_iters)
+        negative_autocorr = False
+
+        # Iterate until the sum of consecutive estimates of autocorrelation is
+        # negative
+        while not negative_autocorr and (t < n_iters):
+            rho[t] = 1 - variogram(t) / (2 * post_var)
+
+            if not t % 2:
+                negative_autocorr = sum(rho[t-1:t+1]) < 0
+
+            t += 1
+
+        return int(m_chains*n_iters / (1 + 2*rho[1:t].sum()))
+
+    # -------------------------------------------------------------------------
+    def _check_ranges(self, theta, ranges):
+        """
+        This function checks if theta lies in the given ranges.
+
+        Parameters
+        ----------
+        theta : array
+            Proposed parameter set.
+        ranges : nested list
+            List of the praremeter ranges.
+
+        Returns
+        -------
+        c : bool
+            If it lies in the given range, it return True else False.
+
+        """
+        c = True
+        # traverse in the list1
+        for i, bounds in enumerate(ranges):
+            x = theta[i]
+            # condition check
+            if x < bounds[0] or x > bounds[1]:
+                c = False
+                return c
+        return c
diff --git a/build/lib/bayesvalidrox/bayesvalidrox.mplstyle b/build/lib/bayesvalidrox/bayesvalidrox.mplstyle
new file mode 100644
index 000000000..1f31c01f2
--- /dev/null
+++ b/build/lib/bayesvalidrox/bayesvalidrox.mplstyle
@@ -0,0 +1,16 @@
+figure.titlesize : 30
+axes.titlesize : 30
+axes.labelsize : 30
+axes.linewidth : 3
+axes.grid : True
+lines.linewidth : 3
+lines.markersize : 10
+xtick.labelsize : 30
+ytick.labelsize : 30
+legend.fontsize : 30
+font.family : serif
+font.serif : Arial
+font.size : 30
+text.usetex : True
+grid.linestyle : -
+figure.figsize : 24, 16
diff --git a/build/lib/bayesvalidrox/post_processing/__init__.py b/build/lib/bayesvalidrox/post_processing/__init__.py
new file mode 100644
index 000000000..81c982542
--- /dev/null
+++ b/build/lib/bayesvalidrox/post_processing/__init__.py
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 -*-
+
+from .post_processing import PostProcessing
+
+__all__ = [
+    "PostProcessing"
+    ]
diff --git a/build/lib/bayesvalidrox/post_processing/post_processing.py b/build/lib/bayesvalidrox/post_processing/post_processing.py
new file mode 100644
index 000000000..6520a40f9
--- /dev/null
+++ b/build/lib/bayesvalidrox/post_processing/post_processing.py
@@ -0,0 +1,1338 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import numpy as np
+import math
+import os
+from itertools import combinations, cycle
+import pandas as pd
+import scipy.stats as stats
+from sklearn.linear_model import LinearRegression
+from sklearn.metrics import mean_squared_error, r2_score
+import matplotlib.pyplot as plt
+import matplotlib.ticker as ticker
+from matplotlib.offsetbox import AnchoredText
+from matplotlib.patches import Patch
+# Load the mplstyle
+plt.style.use(os.path.join(os.path.split(__file__)[0],
+                           '../', 'bayesvalidrox.mplstyle'))
+
+
+class PostProcessing:
+    """
+    This class provides many helper functions to post-process the trained
+    meta-model.
+
+    Attributes
+    ----------
+    MetaModel : obj
+        MetaModel object to do postprocessing on.
+    name : str
+        Type of the anaylsis. The default is `'calib'`. If a validation is
+        expected to be performed change this to `'valid'`.
+    """
+
+    def __init__(self, engine, name='calib'):
+        self.engine = engine
+        self.MetaModel = engine.MetaModel
+        self.ExpDesign = engine.ExpDesign
+        self.ModelObj = engine.Model
+        self.name = name
+
+    # -------------------------------------------------------------------------
+    def plot_moments(self, xlabel='Time [s]', plot_type=None):
+        """
+        Plots the moments in a pdf format in the directory
+        `Outputs_PostProcessing`.
+
+        Parameters
+        ----------
+        xlabel : str, optional
+            String to be displayed as x-label. The default is `'Time [s]'`.
+        plot_type : str, optional
+            Options: bar or line. The default is `None`.
+
+        Returns
+        -------
+        pce_means: dict
+            Mean of the model outputs.
+        pce_means: dict
+            Standard deviation of the model outputs.
+
+        """
+
+        bar_plot = True if plot_type == 'bar' else False
+        meta_model_type = self.MetaModel.meta_model_type
+        Model = self.ModelObj
+
+        # Read Monte-Carlo reference
+        self.mc_reference = Model.read_observation('mc_ref')
+
+        # Set the x values
+        x_values_orig = self.engine.ExpDesign.x_values
+
+        # Compute the moments with the PCEModel object
+        self.pce_means, self.pce_stds = self.compute_pce_moments()
+
+        # Get the variables
+        out_names = Model.Output.names
+
+        # Open a pdf for the plots
+        newpath = (f'Outputs_PostProcessing_{self.name}/')
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        # Plot the best fit line, set the linewidth (lw), color and
+        # transparency (alpha) of the line
+        for key in out_names:
+            fig, ax = plt.subplots(nrows=1, ncols=2)
+
+            # Extract mean and std
+            mean_data = self.pce_means[key]
+            std_data = self.pce_stds[key]
+
+            # Extract a list of x values
+            if type(x_values_orig) is dict:
+                x = x_values_orig[key]
+            else:
+                x = x_values_orig
+
+            # Plot: bar plot or line plot
+            if bar_plot:
+                ax[0].bar(list(map(str, x)), mean_data, color='b',
+                          width=0.25)
+                ax[1].bar(list(map(str, x)), std_data, color='b',
+                          width=0.25)
+                ax[0].legend(labels=[meta_model_type])
+                ax[1].legend(labels=[meta_model_type])
+            else:
+                ax[0].plot(x, mean_data, lw=3, color='k', marker='x',
+                           label=meta_model_type)
+                ax[1].plot(x, std_data, lw=3, color='k', marker='x',
+                           label=meta_model_type)
+
+            if self.mc_reference is not None:
+                if bar_plot:
+                    ax[0].bar(list(map(str, x)), self.mc_reference['mean'],
+                              color='r', width=0.25)
+                    ax[1].bar(list(map(str, x)), self.mc_reference['std'],
+                              color='r', width=0.25)
+                    ax[0].legend(labels=[meta_model_type])
+                    ax[1].legend(labels=[meta_model_type])
+                else:
+                    ax[0].plot(x, self.mc_reference['mean'], lw=3, marker='x',
+                               color='r', label='Ref.')
+                    ax[1].plot(x, self.mc_reference['std'], lw=3, marker='x',
+                               color='r', label='Ref.')
+
+            # Label the axes and provide a title
+            ax[0].set_xlabel(xlabel)
+            ax[1].set_xlabel(xlabel)
+            ax[0].set_ylabel(key)
+            ax[1].set_ylabel(key)
+
+            # Provide a title
+            ax[0].set_title('Mean of ' + key)
+            ax[1].set_title('Std of ' + key)
+
+            if not bar_plot:
+                ax[0].legend(loc='best')
+                ax[1].legend(loc='best')
+
+            plt.tight_layout()
+
+            # save the current figure
+            fig.savefig(
+                f'./{newpath}Mean_Std_PCE_{key}.pdf',
+                bbox_inches='tight'
+                )
+
+        return self.pce_means, self.pce_stds
+
+    # -------------------------------------------------------------------------
+    def valid_metamodel(self, n_samples=1, samples=None, model_out_dict=None,
+                        x_axis='Time [s]'):
+        """
+        Evaluates and plots the meta model and the PCEModel outputs for the
+        given number of samples or the given samples.
+
+        Parameters
+        ----------
+        n_samples : int, optional
+            Number of samples to be evaluated. The default is 1.
+        samples : array of shape (n_samples, n_params), optional
+            Samples to be evaluated. The default is None.
+        model_out_dict: dict
+            The model runs using the samples provided.
+        x_axis : str, optional
+            Label of x axis. The default is `'Time [s]'`.
+
+        Returns
+        -------
+        None.
+
+        """
+        MetaModel = self.MetaModel
+        Model = self.ModelObj
+
+        if samples is None:
+            self.n_samples = n_samples
+            samples = self._get_sample()
+        else:
+            self.n_samples = samples.shape[0]
+
+        # Extract x_values
+        x_values = self.engine.ExpDesign.x_values
+
+        if model_out_dict is not None:
+            self.model_out_dict = model_out_dict
+        else:
+            self.model_out_dict = self._eval_model(samples, key_str='valid')
+        self.pce_out_mean, self.pce_out_std = MetaModel.eval_metamodel(samples)
+
+        try:
+            key = Model.Output.names[1]
+        except IndexError:
+            key = Model.Output.names[0]
+
+        n_obs = self.model_out_dict[key].shape[1]
+
+        if n_obs == 1:
+            self._plot_validation()
+        else:
+            self._plot_validation_multi(x_values=x_values, x_axis=x_axis)
+
+    # -------------------------------------------------------------------------
+    def check_accuracy(self, n_samples=None, samples=None, outputs=None):
+        """
+        Checks accuracy of the metamodel by computing the root mean square
+        error and validation error for all outputs.
+
+        Parameters
+        ----------
+        n_samples : int, optional
+            Number of samples. The default is None.
+        samples : array of shape (n_samples, n_params), optional
+            Parameter sets to be checked. The default is None.
+        outputs : dict, optional
+            Output dictionary with model outputs for all given output types in
+            `Model.Output.names`. The default is None.
+
+        Raises
+        ------
+        Exception
+            When neither n_samples nor samples are provided.
+
+        Returns
+        -------
+        rmse: dict
+            Root mean squared error for each output.
+        valid_error : dict
+            Validation error for each output.
+
+        """
+        MetaModel = self.MetaModel
+        Model = self.ModelObj
+
+        # Set the number of samples
+        if n_samples:
+            self.n_samples = n_samples
+        elif samples is not None:
+            self.n_samples = samples.shape[0]
+        else:
+            raise Exception("Please provide either samples or pass the number"
+                            " of samples!")
+
+        # Generate random samples if necessary
+        Samples = self._get_sample() if samples is None else samples
+
+        # Run the original model with the generated samples
+        if outputs is None:
+            outputs = self._eval_model(Samples, key_str='validSet')
+
+        # Run the PCE model with the generated samples
+        pce_outputs, _ = MetaModel.eval_metamodel(samples=Samples)
+
+        self.rmse = {}
+        self.valid_error = {}
+        # Loop over the keys and compute RMSE error.
+        for key in Model.Output.names:
+            # Root mena square
+            self.rmse[key] = mean_squared_error(outputs[key], pce_outputs[key],
+                                                squared=False,
+                                                multioutput='raw_values')
+            # Validation error
+            self.valid_error[key] = (self.rmse[key]**2) / \
+                np.var(outputs[key], ddof=1, axis=0)
+
+            # Print a report table
+            print("\n>>>>> Errors of {} <<<<<".format(key))
+            print("\nIndex  |  RMSE   |  Validation Error")
+            print('-'*35)
+            print('\n'.join(f'{i+1}  |  {k:.3e}  |  {j:.3e}' for i, (k, j)
+                            in enumerate(zip(self.rmse[key],
+                                             self.valid_error[key]))))
+        # Save error dicts in PCEModel object
+        self.MetaModel.rmse = self.rmse
+        self.MetaModel.valid_error = self.valid_error
+
+        return
+
+    # -------------------------------------------------------------------------
+    def plot_seq_design_diagnostics(self, ref_BME_KLD=None):
+        """
+        Plots the Bayesian Model Evidence (BME) and Kullback-Leibler divergence
+        (KLD) for the sequential design.
+
+        Parameters
+        ----------
+        ref_BME_KLD : array, optional
+            Reference BME and KLD . The default is `None`.
+
+        Returns
+        -------
+        None.
+
+        """
+        engine = self.engine
+        PCEModel = self.MetaModel
+        n_init_samples = engine.ExpDesign.n_init_samples
+        n_total_samples = engine.ExpDesign.X.shape[0]
+
+        newpath = f'Outputs_PostProcessing_{self.name}/seq_design_diagnostics/'
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        plotList = ['Modified LOO error', 'Validation error', 'KLD', 'BME',
+                    'RMSEMean', 'RMSEStd', 'Hellinger distance']
+        seqList = [engine.SeqModifiedLOO, engine.seqValidError,
+                   engine.SeqKLD, engine.SeqBME, engine.seqRMSEMean,
+                   engine.seqRMSEStd, engine.SeqDistHellinger]
+
+        markers = ('x', 'o', 'd', '*', '+')
+        colors = ('k', 'darkgreen', 'b', 'navy', 'darkred')
+
+        # Plot the evolution of the diagnostic criteria of the
+        # Sequential Experimental Design.
+        for plotidx, plot in enumerate(plotList):
+            fig, ax = plt.subplots()
+            seq_dict = seqList[plotidx]
+            name_util = list(seq_dict.keys())
+
+            if len(name_util) == 0:
+                continue
+
+            # Box plot when Replications have been detected.
+            if any(int(name.split("rep_", 1)[1]) > 1 for name in name_util):
+                # Extract the values from dict
+                sorted_seq_opt = {}
+                # Number of replications
+                n_reps = engine.ExpDesign.n_replication
+
+                # Get the list of utility function names
+                # Handle if only one UtilityFunction is provided
+                if not isinstance(engine.ExpDesign.util_func, list):
+                    util_funcs = [engine.ExpDesign.util_func]
+                else:
+                    util_funcs = engine.ExpDesign.util_func
+
+                for util in util_funcs:
+                    sortedSeq = {}
+                    # min number of runs available from reps
+                    n_runs = min([seq_dict[f'{util}_rep_{i+1}'].shape[0]
+                                 for i in range(n_reps)])
+
+                    for runIdx in range(n_runs):
+                        values = []
+                        for key in seq_dict.keys():
+                            if util in key:
+                                values.append(seq_dict[key][runIdx].mean())
+                        sortedSeq['SeqItr_'+str(runIdx)] = np.array(values)
+                    sorted_seq_opt[util] = sortedSeq
+
+                # BoxPlot
+                def draw_plot(data, labels, edge_color, fill_color, idx):
+                    pos = labels - (idx-1)
+                    bp = plt.boxplot(data, positions=pos, labels=labels,
+                                     patch_artist=True, sym='', widths=0.75)
+                    elements = ['boxes', 'whiskers', 'fliers', 'means',
+                                'medians', 'caps']
+                    for element in elements:
+                        plt.setp(bp[element], color=edge_color[idx])
+
+                    for patch in bp['boxes']:
+                        patch.set(facecolor=fill_color[idx])
+
+                if engine.ExpDesign.n_new_samples != 1:
+                    step1 = engine.ExpDesign.n_new_samples
+                    step2 = 1
+                else:
+                    step1 = 5
+                    step2 = 5
+                edge_color = ['red', 'blue', 'green']
+                fill_color = ['tan', 'cyan', 'lightgreen']
+                plot_label = plot
+                # Plot for different Utility Functions
+                for idx, util in enumerate(util_funcs):
+                    all_errors = np.empty((n_reps, 0))
+
+                    for key in list(sorted_seq_opt[util].keys()):
+                        errors = sorted_seq_opt.get(util, {}).get(key)[:, None]
+                        all_errors = np.hstack((all_errors, errors))
+
+                    # Special cases for BME and KLD
+                    if plot == 'KLD' or plot == 'BME':
+                        # BME convergence if refBME is provided
+                        if ref_BME_KLD is not None:
+                            if plot == 'BME':
+                                refValue = ref_BME_KLD[0]
+                                plot_label = r'BME/BME$^{Ref.}$'
+                            if plot == 'KLD':
+                                refValue = ref_BME_KLD[1]
+                                plot_label = '$D_{KL}[p(\\theta|y_*),p(\\theta)]'\
+                                    ' / D_{KL}^{Ref.}[p(\\theta|y_*), '\
+                                    'p(\\theta)]$'
+
+                            # Difference between BME/KLD and the ref. values
+                            all_errors = np.divide(all_errors,
+                                                   np.full((all_errors.shape),
+                                                           refValue))
+
+                            # Plot baseline for zero, i.e. no difference
+                            plt.axhline(y=1.0, xmin=0, xmax=1, c='green',
+                                        ls='--', lw=2)
+
+                    # Plot each UtilFuncs
+                    labels = np.arange(n_init_samples, n_total_samples+1, step1)
+                    draw_plot(all_errors[:, ::step2], labels, edge_color,
+                              fill_color, idx)
+
+                plt.xticks(labels, labels)
+                # Set the major and minor locators
+                ax.xaxis.set_major_locator(ticker.AutoLocator())
+                ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
+                ax.xaxis.grid(True, which='major', linestyle='-')
+                ax.xaxis.grid(True, which='minor', linestyle='--')
+
+                # Legend
+                legend_elements = []
+                for idx, util in enumerate(util_funcs):
+                    legend_elements.append(Patch(facecolor=fill_color[idx],
+                                                 edgecolor=edge_color[idx],
+                                                 label=util))
+                plt.legend(handles=legend_elements[::-1], loc='best')
+
+                if plot != 'BME' and plot != 'KLD':
+                    plt.yscale('log')
+                plt.autoscale(True)
+                plt.xlabel('\\# of training samples')
+                plt.ylabel(plot_label)
+                plt.title(plot)
+
+                # save the current figure
+                plot_name = plot.replace(' ', '_')
+                fig.savefig(
+                    f'./{newpath}/seq_{plot_name}.pdf',
+                    bbox_inches='tight'
+                    )
+                # Destroy the current plot
+                plt.clf()
+                # Save arrays into files
+                f = open(f'./{newpath}/seq_{plot_name}.txt', 'w')
+                f.write(str(sorted_seq_opt))
+                f.close()
+            else:
+                for idx, name in enumerate(name_util):
+                    seq_values = seq_dict[name]
+                    if engine.ExpDesign.n_new_samples != 1:
+                        step = engine.ExpDesign.n_new_samples
+                    else:
+                        step = 1
+                    x_idx = np.arange(n_init_samples, n_total_samples+1, step)
+                    if n_total_samples not in x_idx:
+                        x_idx = np.hstack((x_idx, n_total_samples))
+
+                    if plot == 'KLD' or plot == 'BME':
+                        # BME convergence if refBME is provided
+                        if ref_BME_KLD is not None:
+                            if plot == 'BME':
+                                refValue = ref_BME_KLD[0]
+                                plot_label = r'BME/BME$^{Ref.}$'
+                            if plot == 'KLD':
+                                refValue = ref_BME_KLD[1]
+                                plot_label = '$D_{KL}[p(\\theta|y_*),p(\\theta)]'\
+                                    ' / D_{KL}^{Ref.}[p(\\theta|y_*), '\
+                                    'p(\\theta)]$'
+
+                            # Difference between BME/KLD and the ref. values
+                            values = np.divide(seq_values,
+                                               np.full((seq_values.shape),
+                                                       refValue))
+
+                            # Plot baseline for zero, i.e. no difference
+                            plt.axhline(y=1.0, xmin=0, xmax=1, c='green',
+                                        ls='--', lw=2)
+
+                            # Set the limits
+                            plt.ylim([1e-1, 1e1])
+
+                            # Create the plots
+                            plt.semilogy(x_idx, values, marker=markers[idx],
+                                         color=colors[idx], ls='--', lw=2,
+                                         label=name.split("_rep", 1)[0])
+                        else:
+                            plot_label = plot
+
+                            # Create the plots
+                            plt.plot(x_idx, seq_values, marker=markers[idx],
+                                     color=colors[idx], ls='--', lw=2,
+                                     label=name.split("_rep", 1)[0])
+
+                    else:
+                        plot_label = plot
+                        seq_values = np.nan_to_num(seq_values)
+
+                        # Plot the error evolution for each output
+                        plt.semilogy(x_idx, seq_values.mean(axis=1),
+                                     marker=markers[idx], ls='--', lw=2,
+                                     color=colors[idx],
+                                     label=name.split("_rep", 1)[0])
+
+                # Set the major and minor locators
+                ax.xaxis.set_major_locator(ticker.AutoLocator())
+                ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
+                ax.xaxis.grid(True, which='major', linestyle='-')
+                ax.xaxis.grid(True, which='minor', linestyle='--')
+
+                ax.tick_params(axis='both', which='major', direction='in',
+                               width=3, length=10)
+                ax.tick_params(axis='both', which='minor', direction='in',
+                               width=2, length=8)
+                plt.xlabel('Number of runs')
+                plt.ylabel(plot_label)
+                plt.title(plot)
+                plt.legend(frameon=True)
+
+                # save the current figure
+                plot_name = plot.replace(' ', '_')
+                fig.savefig(
+                    f'./{newpath}/seq_{plot_name}.pdf',
+                    bbox_inches='tight'
+                    )
+                # Destroy the current plot
+                plt.clf()
+
+                # ---------------- Saving arrays into files ---------------
+                np.save(f'./{newpath}/seq_{plot_name}.npy', seq_values)
+
+        return
+
+    # -------------------------------------------------------------------------
+    def sobol_indices(self, xlabel='Time [s]', plot_type=None):
+        """
+        Provides Sobol indices as a sensitivity measure to infer the importance
+        of the input parameters. See Eq. 27 in [1] for more details. For the
+        case with Principal component analysis refer to [2].
+
+        [1] Global sensitivity analysis: A flexible and efficient framework
+        with an example from stochastic hydrogeology S. Oladyshkin, F.P.
+        de Barros, W. Nowak  https://doi.org/10.1016/j.advwatres.2011.11.001
+
+        [2] Nagel, J.B., Rieckermann, J. and Sudret, B., 2020. Principal
+        component analysis and sparse polynomial chaos expansions for global
+        sensitivity analysis and model calibration: Application to urban
+        drainage simulation. Reliability Engineering & System Safety, 195,
+        p.106737.
+
+        Parameters
+        ----------
+        xlabel : str, optional
+            Label of the x-axis. The default is `'Time [s]'`.
+        plot_type : str, optional
+            Plot type. The default is `None`. This corresponds to line plot.
+            Bar chart can be selected by `bar`.
+
+        Returns
+        -------
+        sobol_cell: dict
+            Sobol indices.
+        total_sobol: dict
+            Total Sobol indices.
+
+        """
+        # Extract the necessary variables
+        PCEModel = self.MetaModel
+        basis_dict = PCEModel.basis_dict
+        coeffs_dict = PCEModel.coeffs_dict
+        n_params = PCEModel.n_params
+        max_order = np.max(PCEModel.pce_deg)
+        sobol_cell_b = {}
+        total_sobol_b = {}
+        cov_Z_p_q = np.zeros((n_params))
+
+        for b_i in range(PCEModel.n_bootstrap_itrs):
+
+            sobol_cell_, total_sobol_ = {}, {}
+
+            for output in self.ModelObj.Output.names:
+
+                n_meas_points = len(coeffs_dict[f'b_{b_i+1}'][output])
+
+                # Initialize the (cell) array containing the (total) Sobol indices.
+                sobol_array = dict.fromkeys(range(1, max_order+1), [])
+                sobol_cell_array = dict.fromkeys(range(1, max_order+1), [])
+
+                for i_order in range(1, max_order+1):
+                    n_comb = math.comb(n_params, i_order)
+
+                    sobol_cell_array[i_order] = np.zeros((n_comb, n_meas_points))
+
+                total_sobol_array = np.zeros((n_params, n_meas_points))
+
+                # Initialize the cell to store the names of the variables
+                TotalVariance = np.zeros((n_meas_points))
+                # Loop over all measurement points and calculate sobol indices
+                for pIdx in range(n_meas_points):
+
+                    # Extract the basis indices (alpha) and coefficients
+                    Basis = basis_dict[f'b_{b_i+1}'][output][f'y_{pIdx+1}']
+
+                    try:
+                        clf_poly = PCEModel.clf_poly[f'b_{b_i+1}'][output][f'y_{pIdx+1}']
+                        PCECoeffs = clf_poly.coef_
+                    except:
+                        PCECoeffs = coeffs_dict[f'b_{b_i+1}'][output][f'y_{pIdx+1}']
+
+                    # Compute total variance
+                    TotalVariance[pIdx] = np.sum(np.square(PCECoeffs[1:]))
+
+                    nzidx = np.where(PCECoeffs != 0)[0]
+                    # Set all the Sobol indices equal to zero in the presence of a
+                    # null output.
+                    if len(nzidx) == 0:
+                        # This is buggy.
+                        for i_order in range(1, max_order+1):
+                            sobol_cell_array[i_order][:, pIdx] = 0
+
+                    # Otherwise compute them by summing well-chosen coefficients
+                    else:
+                        nz_basis = Basis[nzidx]
+                        for i_order in range(1, max_order+1):
+                            idx = np.where(np.sum(nz_basis > 0, axis=1) == i_order)
+                            subbasis = nz_basis[idx]
+                            Z = np.array(list(combinations(range(n_params), i_order)))
+
+                            for q in range(Z.shape[0]):
+                                Zq = Z[q]
+                                subsubbasis = subbasis[:, Zq]
+                                subidx = np.prod(subsubbasis, axis=1) > 0
+                                sum_ind = nzidx[idx[0][subidx]]
+                                if TotalVariance[pIdx] == 0.0:
+                                    sobol_cell_array[i_order][q, pIdx] = 0.0
+                                else:
+                                    sobol = np.sum(np.square(PCECoeffs[sum_ind]))
+                                    sobol /= TotalVariance[pIdx]
+                                    sobol_cell_array[i_order][q, pIdx] = sobol
+
+                        # Compute the TOTAL Sobol indices.
+                        for ParIdx in range(n_params):
+                            idx = nz_basis[:, ParIdx] > 0
+                            sum_ind = nzidx[idx]
+
+                            if TotalVariance[pIdx] == 0.0:
+                                total_sobol_array[ParIdx, pIdx] = 0.0
+                            else:
+                                sobol = np.sum(np.square(PCECoeffs[sum_ind]))
+                                sobol /= TotalVariance[pIdx]
+                                total_sobol_array[ParIdx, pIdx] = sobol
+
+                    # ----- if PCA selected: Compute covariance -----
+                    if PCEModel.dim_red_method.lower() == 'pca':
+                        # Extract the basis indices (alpha) and coefficients for
+                        # next component
+                        if pIdx < n_meas_points-1:
+                            nextBasis = basis_dict[f'b_{b_i+1}'][output][f'y_{pIdx+2}']
+                            if PCEModel.bootstrap_method != 'fast' or b_i == 0:
+                                clf_poly = PCEModel.clf_poly[f'b_{b_i+1}'][output][f'y_{pIdx+2}']
+                                nextPCECoeffs = clf_poly.coef_
+                            else:
+                                nextPCECoeffs = coeffs_dict[f'b_{b_i+1}'][output][f'y_{pIdx+2}']
+
+                            # Choose the common non-zero basis
+                            mask = (Basis[:, None] == nextBasis).all(-1).any(-1)
+                            n_mask = (nextBasis[:, None] == Basis).all(-1).any(-1)
+
+                            # Compute the covariance in Eq 17.
+                            for ParIdx in range(n_params):
+                                idx = (mask) & (Basis[:, ParIdx] > 0)
+                                n_idx = (n_mask) & (nextBasis[:, ParIdx] > 0)
+                                try:
+                                    cov_Z_p_q[ParIdx] += np.sum(np.dot(
+                                        PCECoeffs[idx], nextPCECoeffs[n_idx])
+                                        )
+                                except:
+                                    pass
+
+                # Compute the sobol indices according to Ref. 2
+                if PCEModel.dim_red_method.lower() == 'pca':
+                    n_c_points = self.engine.ExpDesign.Y[output].shape[1]
+                    PCA = PCEModel.pca[f'b_{b_i+1}'][output]
+                    compPCA = PCA.components_
+                    nComp = compPCA.shape[0]
+                    var_Z_p = PCA.explained_variance_
+
+                    # Extract the sobol index of the components
+                    for i_order in range(1, max_order+1):
+                        n_comb = math.comb(n_params, i_order)
+                        sobol_array[i_order] = np.zeros((n_comb, n_c_points))
+                        Z = np.array(list(combinations(range(n_params), i_order)))
+
+                        # Loop over parameters
+                        for q in range(Z.shape[0]):
+                            S_Z_i = sobol_cell_array[i_order][q]
+
+                            for tIdx in range(n_c_points):
+                                var_Y_t = np.var(
+                                    self.engine.ExpDesign.Y[output][:, tIdx])
+                                if var_Y_t == 0.0:
+                                    term1, term2 = 0.0, 0.0
+                                else:
+                                    # Eq. 17
+                                    term1 = 0.0
+                                    for i in range(nComp):
+                                        a = S_Z_i[i] * var_Z_p[i]
+                                        a *= compPCA[i, tIdx]**2
+                                        term1 += a
+
+                                    # TODO: Term 2
+                                    # term2 = 0.0
+                                    # for i in range(nComp-1):
+                                    #     term2 += cov_Z_p_q[q] * compPCA[i, tIdx]
+                                    #     term2 *= compPCA[i+1, tIdx]
+                                    # term2 *= 2
+
+                                sobol_array[i_order][q, tIdx] = term1 #+ term2
+
+                                # Devide over total output variance Eq. 18
+                                sobol_array[i_order][q, tIdx] /= var_Y_t
+
+                    # Compute the TOTAL Sobol indices.
+                    total_sobol = np.zeros((n_params, n_c_points))
+                    for ParIdx in range(n_params):
+                        S_Z_i = total_sobol_array[ParIdx]
+
+                        for tIdx in range(n_c_points):
+                            var_Y_t = np.var(self.engine.ExpDesign.Y[output][:, tIdx])
+                            if var_Y_t == 0.0:
+                                term1, term2 = 0.0, 0.0
+                            else:
+                                term1 = 0
+                                for i in range(nComp):
+                                    term1 += S_Z_i[i] * var_Z_p[i] * \
+                                        (compPCA[i, tIdx]**2)
+
+                                # Term 2
+                                term2 = 0
+                                for i in range(nComp-1):
+                                    term2 += cov_Z_p_q[ParIdx] * compPCA[i, tIdx] \
+                                        * compPCA[i+1, tIdx]
+                                term2 *= 2
+
+                            total_sobol[ParIdx, tIdx] = term1 #+ term2
+
+                            # Devide over total output variance Eq. 18
+                            total_sobol[ParIdx, tIdx] /= var_Y_t
+
+                    sobol_cell_[output] = sobol_array
+                    total_sobol_[output] = total_sobol
+                else:
+                    sobol_cell_[output] = sobol_cell_array
+                    total_sobol_[output] = total_sobol_array
+
+            # Save for each bootsrtap iteration
+            sobol_cell_b[b_i] = sobol_cell_
+            total_sobol_b[b_i] = total_sobol_
+
+        # Average total sobol indices
+        total_sobol_all = {}
+        for i in sorted(total_sobol_b):
+            for k, v in total_sobol_b[i].items():
+                if k not in total_sobol_all:
+                    total_sobol_all[k] = [None] * len(total_sobol_b)
+                total_sobol_all[k][i] = v
+
+        self.total_sobol = {}
+        for output in self.ModelObj.Output.names:
+            self.total_sobol[output] = np.mean(total_sobol_all[output], axis=0)
+
+        # ---------------- Plot -----------------------
+        par_names = self.engine.ExpDesign.par_names
+        x_values_orig = self.engine.ExpDesign.x_values
+
+        newpath = (f'Outputs_PostProcessing_{self.name}/')
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        fig = plt.figure()
+
+        for outIdx, output in enumerate(self.ModelObj.Output.names):
+
+            # Extract total Sobol indices
+            total_sobol = self.total_sobol[output]
+
+            # Compute quantiles
+            q_5 = np.quantile(total_sobol_all[output], q=0.05, axis=0)
+            q_97_5 = np.quantile(total_sobol_all[output], q=0.975, axis=0)
+
+            # Extract a list of x values
+            if type(x_values_orig) is dict:
+                x = x_values_orig[output]
+            else:
+                x = x_values_orig
+
+            if plot_type == 'bar':
+                ax = fig.add_axes([0, 0, 1, 1])
+                dict1 = {xlabel: x}
+                dict2 = {param: sobolIndices for param, sobolIndices
+                         in zip(par_names, total_sobol)}
+
+                df = pd.DataFrame({**dict1, **dict2})
+                df.plot(x=xlabel, y=par_names, kind="bar", ax=ax, rot=0,
+                        colormap='Dark2', yerr=q_97_5-q_5)
+                ax.set_ylabel('Total Sobol indices, $S^T$')
+
+            else:
+                for i, sobolIndices in enumerate(total_sobol):
+                    plt.plot(x, sobolIndices, label=par_names[i],
+                             marker='x', lw=2.5)
+                    plt.fill_between(x, q_5[i], q_97_5[i], alpha=0.15)
+
+                plt.ylabel('Total Sobol indices, $S^T$')
+                plt.xlabel(xlabel)
+
+            plt.title(f'Sensitivity analysis of {output}')
+            if plot_type != 'bar':
+                plt.legend(loc='best', frameon=True)
+
+            # Save indices
+            np.savetxt(f'./{newpath}totalsobol_' +
+                       output.replace('/', '_') + '.csv',
+                       total_sobol.T, delimiter=',',
+                       header=','.join(par_names), comments='')
+
+            # save the current figure
+            fig.savefig(
+                f'./{newpath}Sobol_indices_{output}.pdf',
+                bbox_inches='tight'
+                )
+
+            # Destroy the current plot
+            plt.clf()
+
+        return self.total_sobol
+
+    # -------------------------------------------------------------------------
+    def check_reg_quality(self, n_samples=1000, samples=None):
+        """
+        Checks the quality of the metamodel for single output models based on:
+        https://towardsdatascience.com/how-do-you-check-the-quality-of-your-regression-model-in-python-fa61759ff685
+
+
+        Parameters
+        ----------
+        n_samples : int, optional
+            Number of parameter sets to use for the check. The default is 1000.
+        samples : array of shape (n_samples, n_params), optional
+            Parameter sets to use for the check. The default is None.
+
+        Returns
+        -------
+        None.
+
+        """
+        MetaModel = self.MetaModel
+
+        if samples is None:
+            self.n_samples = n_samples
+            samples = self._get_sample()
+        else:
+            self.n_samples = samples.shape[0]
+
+        # Evaluate the original and the surrogate model
+        y_val = self._eval_model(samples, key_str='valid')
+        y_pce_val, _ = MetaModel.eval_metamodel(samples=samples)
+
+        # Open a pdf for the plots
+        newpath = f'Outputs_PostProcessing_{self.name}/'
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        # Fit the data(train the model)
+        for key in y_pce_val.keys():
+
+            y_pce_val_ = y_pce_val[key]
+            y_val_ = y_val[key]
+            residuals = y_val_ - y_pce_val_
+
+            # ------ Residuals vs. predicting variables ------
+            # Check the assumptions of linearity and independence
+            fig1 = plt.figure()
+            for i, par in enumerate(self.engine.ExpDesign.par_names):
+                plt.title(f"{key}: Residuals vs. {par}")
+                plt.scatter(
+                    x=samples[:, i], y=residuals, color='blue', edgecolor='k')
+                plt.grid(True)
+                xmin, xmax = min(samples[:, i]), max(samples[:, i])
+                plt.hlines(y=0, xmin=xmin*0.9, xmax=xmax*1.1, color='red',
+                           lw=3, linestyle='--')
+                plt.xlabel(par)
+                plt.ylabel('Residuals')
+                plt.show()
+
+                # save the current figure
+                fig1.savefig(f'./{newpath}/Residuals_vs_Par_{i+1}.pdf',
+                             bbox_inches='tight')
+                # Destroy the current plot
+                plt.clf()
+
+            # ------ Fitted vs. residuals ------
+            # Check the assumptions of linearity and independence
+            fig2 = plt.figure()
+            plt.title(f"{key}: Residuals vs. fitted values")
+            plt.scatter(x=y_pce_val_, y=residuals, color='blue', edgecolor='k')
+            plt.grid(True)
+            xmin, xmax = min(y_val_), max(y_val_)
+            plt.hlines(y=0, xmin=xmin*0.9, xmax=xmax*1.1, color='red', lw=3,
+                       linestyle='--')
+            plt.xlabel(key)
+            plt.ylabel('Residuals')
+            plt.show()
+
+            # save the current figure
+            fig2.savefig(f'./{newpath}/Fitted_vs_Residuals.pdf',
+                         bbox_inches='tight')
+            # Destroy the current plot
+            plt.clf()
+
+            # ------ Histogram of normalized residuals ------
+            fig3 = plt.figure()
+            resid_pearson = residuals / (max(residuals)-min(residuals))
+            plt.hist(resid_pearson, bins=20, edgecolor='k')
+            plt.ylabel('Count')
+            plt.xlabel('Normalized residuals')
+            plt.title(f"{key}: Histogram of normalized residuals")
+
+            # Normality (Shapiro-Wilk) test of the residuals
+            ax = plt.gca()
+            _, p = stats.shapiro(residuals)
+            if p < 0.01:
+                annText = "The residuals seem to come from a Gaussian Process."
+            else:
+                annText = "The normality assumption may not hold."
+            at = AnchoredText(annText, prop=dict(size=30), frameon=True,
+                              loc='upper left')
+            at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
+            ax.add_artist(at)
+
+            plt.show()
+
+            # save the current figure
+            fig3.savefig(f'./{newpath}/Hist_NormResiduals.pdf',
+                         bbox_inches='tight')
+            # Destroy the current plot
+            plt.clf()
+
+            # ------ Q-Q plot of the normalized residuals ------
+            plt.figure()
+            stats.probplot(residuals[:, 0], plot=plt)
+            plt.xticks()
+            plt.yticks()
+            plt.xlabel("Theoretical quantiles")
+            plt.ylabel("Sample quantiles")
+            plt.title(f"{key}: Q-Q plot of normalized residuals")
+            plt.grid(True)
+            plt.show()
+
+            # save the current figure
+            plt.savefig(f'./{newpath}/QQPlot_NormResiduals.pdf',
+                        bbox_inches='tight')
+            # Destroy the current plot
+            plt.clf()
+
+    # -------------------------------------------------------------------------
+    def eval_pce_model_3d(self):
+
+        self.n_samples = 1000
+
+        PCEModel = self.MetaModel
+        Model = self.ModelObj
+        n_samples = self.n_samples
+
+        # Create 3D-Grid
+        # TODO: Make it general
+        x = np.linspace(-5, 10, n_samples)
+        y = np.linspace(0, 15, n_samples)
+
+        X, Y = np.meshgrid(x, y)
+        PCE_Z = np.zeros((self.n_samples, self.n_samples))
+        Model_Z = np.zeros((self.n_samples, self.n_samples))
+
+        for idxMesh in range(self.n_samples):
+            sample_mesh = np.vstack((X[:, idxMesh], Y[:, idxMesh])).T
+
+            univ_p_val = PCEModel.univ_basis_vals(sample_mesh)
+
+            for Outkey, ValuesDict in PCEModel.coeffs_dict.items():
+
+                pce_out_mean = np.zeros((len(sample_mesh), len(ValuesDict)))
+                pce_out_std = np.zeros((len(sample_mesh), len(ValuesDict)))
+                model_outs = np.zeros((len(sample_mesh), len(ValuesDict)))
+
+                for Inkey, InIdxValues in ValuesDict.items():
+                    idx = int(Inkey.split('_')[1]) - 1
+                    basis_deg_ind = PCEModel.basis_dict[Outkey][Inkey]
+                    clf_poly = PCEModel.clf_poly[Outkey][Inkey]
+
+                    PSI_Val = PCEModel.create_psi(basis_deg_ind, univ_p_val)
+
+                    # Perdiction with error bar
+                    y_mean, y_std = clf_poly.predict(PSI_Val, return_std=True)
+
+                    pce_out_mean[:, idx] = y_mean
+                    pce_out_std[:, idx] = y_std
+
+                    # Model evaluation
+                    model_out_dict, _ = Model.run_model_parallel(sample_mesh,
+                                                                 key_str='Valid3D')
+                    model_outs[:, idx] = model_out_dict[Outkey].T
+
+                PCE_Z[:, idxMesh] = y_mean
+                Model_Z[:, idxMesh] = model_outs[:, 0]
+
+        # ---------------- 3D plot for PCEModel -----------------------
+        fig_PCE = plt.figure()
+        ax = plt.axes(projection='3d')
+        ax.plot_surface(X, Y, PCE_Z, rstride=1, cstride=1,
+                        cmap='viridis', edgecolor='none')
+        ax.set_title('PCEModel')
+        ax.set_xlabel('$x_1$')
+        ax.set_ylabel('$x_2$')
+        ax.set_zlabel('$f(x_1,x_2)$')
+
+        plt.grid()
+        plt.show()
+
+        #  Saving the figure
+        newpath = f'Outputs_PostProcessing_{self.name}/'
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        # save the figure to file
+        fig_PCE.savefig(f'./{newpath}/3DPlot_PCEModel.pdf',
+                        bbox_inches='tight')
+        plt.close(fig_PCE)
+
+        # ---------------- 3D plot for Model -----------------------
+        fig_Model = plt.figure()
+        ax = plt.axes(projection='3d')
+        ax.plot_surface(X, Y, PCE_Z, rstride=1, cstride=1,
+                        cmap='viridis', edgecolor='none')
+        ax.set_title('Model')
+        ax.set_xlabel('$x_1$')
+        ax.set_ylabel('$x_2$')
+        ax.set_zlabel('$f(x_1,x_2)$')
+
+        plt.grid()
+        plt.show()
+
+        # Save the figure
+        fig_Model.savefig(f'./{newpath}/3DPlot_Model.pdf',
+                          bbox_inches='tight')
+        plt.close(fig_Model)
+
+        return
+
+    # -------------------------------------------------------------------------
+    def compute_pce_moments(self):
+        """
+        Computes the first two moments using the PCE-based meta-model.
+
+        Returns
+        -------
+        pce_means: dict
+            The first moments (mean) of outpust.
+        pce_means: dict
+            The first moments (mean) of outpust.
+
+        """
+
+        MetaModel = self.MetaModel
+        outputs = self.ModelObj.Output.names
+        pce_means_b = {}
+        pce_stds_b = {}
+
+        # Loop over bootstrap iterations
+        for b_i in range(MetaModel.n_bootstrap_itrs):
+            # Loop over the metamodels
+            coeffs_dicts = MetaModel.coeffs_dict[f'b_{b_i+1}'].items()
+            means = {}
+            stds = {}
+            for output, coef_dict in coeffs_dicts:
+
+                pce_mean = np.zeros((len(coef_dict)))
+                pce_var = np.zeros((len(coef_dict)))
+
+                for index, values in coef_dict.items():
+                    idx = int(index.split('_')[1]) - 1
+                    coeffs = MetaModel.coeffs_dict[f'b_{b_i+1}'][output][index]
+
+                    # Mean = c_0
+                    if coeffs[0] != 0:
+                        pce_mean[idx] = coeffs[0]
+                    else:
+                        clf_poly = MetaModel.clf_poly[f'b_{b_i+1}'][output]
+                        pce_mean[idx] = clf_poly[index].intercept_
+                    # Var = sum(coeffs[1:]**2)
+                    pce_var[idx] = np.sum(np.square(coeffs[1:]))
+
+                # Save predictions for each output
+                if MetaModel.dim_red_method.lower() == 'pca':
+                    PCA = MetaModel.pca[f'b_{b_i+1}'][output]
+                    means[output] = PCA.inverse_transform(pce_mean)
+                    stds[output] = np.sqrt(np.dot(pce_var,
+                                                  PCA.components_**2))
+                else:
+                    means[output] = pce_mean
+                    stds[output] = np.sqrt(pce_var)
+
+            # Save predictions for each bootstrap iteration
+            pce_means_b[b_i] = means
+            pce_stds_b[b_i] = stds
+
+        # Change the order of nesting
+        mean_all = {}
+        for i in sorted(pce_means_b):
+            for k, v in pce_means_b[i].items():
+                if k not in mean_all:
+                    mean_all[k] = [None] * len(pce_means_b)
+                mean_all[k][i] = v
+        std_all = {}
+        for i in sorted(pce_stds_b):
+            for k, v in pce_stds_b[i].items():
+                if k not in std_all:
+                    std_all[k] = [None] * len(pce_stds_b)
+                std_all[k][i] = v
+
+        # Back transformation if PCA is selected.
+        pce_means, pce_stds = {}, {}
+        for output in outputs:
+            pce_means[output] = np.mean(mean_all[output], axis=0)
+            pce_stds[output] = np.mean(std_all[output], axis=0)
+
+            # Print a report table
+            print("\n>>>>> Moments of {} <<<<<".format(output))
+            print("\nIndex  |  Mean   |  Std. deviation")
+            print('-'*35)
+            print('\n'.join(f'{i+1}  |  {k:.3e}  |  {j:.3e}' for i, (k, j)
+                            in enumerate(zip(pce_means[output],
+                                             pce_stds[output]))))
+        print('-'*40)
+
+        return pce_means, pce_stds
+
+    # -------------------------------------------------------------------------
+    def _get_sample(self, n_samples=None):
+        """
+        Generates random samples taken from the input parameter space.
+
+        Returns
+        -------
+        samples : array of shape (n_samples, n_params)
+            Generated samples.
+
+        """
+        if n_samples is None:
+            n_samples = self.n_samples
+        self.samples = self.ExpDesign.generate_samples(
+            n_samples,
+            sampling_method='random')
+        return self.samples
+
+    # -------------------------------------------------------------------------
+    def _eval_model(self, samples=None, key_str='Valid'):
+        """
+        Evaluates Forward Model for the given number of self.samples or given
+        samples.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params), optional
+            Samples to evaluate the model at. The default is None.
+        key_str : str, optional
+            Key string pass to the model. The default is 'Valid'.
+
+        Returns
+        -------
+        model_outs : dict
+            Dictionary of results.
+
+        """
+        Model = self.ModelObj
+
+        if samples is None:
+            samples = self._get_sample()
+            self.samples = samples
+        else:
+            self.n_samples = len(samples)
+
+        model_outs, _ = Model.run_model_parallel(samples, key_str=key_str)
+
+        return model_outs
+
+    # -------------------------------------------------------------------------
+    def _plot_validation(self):
+        """
+        Plots outputs for visual comparison of metamodel outputs with that of
+        the (full) original model.
+
+        Returns
+        -------
+        None.
+
+        """
+        PCEModel = self.MetaModel
+
+        # get the samples
+        x_val = self.samples
+        y_pce_val = self.pce_out_mean
+        y_val = self.model_out_dict
+
+        # Open a pdf for the plots
+        newpath = f'Outputs_PostProcessing_{self.name}/'
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        fig = plt.figure()
+        # Fit the data(train the model)
+        for key in y_pce_val.keys():
+
+            y_pce_val_ = y_pce_val[key]
+            y_val_ = y_val[key]
+
+            regression_model = LinearRegression()
+            regression_model.fit(y_pce_val_, y_val_)
+
+            # Predict
+            x_new = np.linspace(np.min(y_pce_val_), np.max(y_val_), 100)
+            y_predicted = regression_model.predict(x_new[:, np.newaxis])
+
+            plt.scatter(y_pce_val_, y_val_, color='gold', linewidth=2)
+            plt.plot(x_new, y_predicted, color='k')
+
+            # Calculate the adjusted R_squared and RMSE
+            # the total number of explanatory variables in the model
+            # (not including the constant term)
+            length_list = []
+            for key, value in PCEModel.coeffs_dict['b_1'][key].items():
+                length_list.append(len(value))
+            n_predictors = min(length_list)
+            n_samples = x_val.shape[0]
+
+            R2 = r2_score(y_pce_val_, y_val_)
+            AdjR2 = 1 - (1 - R2) * (n_samples - 1) / \
+                (n_samples - n_predictors - 1)
+            rmse = mean_squared_error(y_pce_val_, y_val_, squared=False)
+
+            plt.annotate(f'RMSE = {rmse:.3f}\n Adjusted $R^2$ = {AdjR2:.3f}',
+                         xy=(0.05, 0.85), xycoords='axes fraction')
+
+            plt.ylabel("Original Model")
+            plt.xlabel("PCE Model")
+            plt.grid()
+            plt.show()
+
+            # save the current figure
+            plot_name = key.replace(' ', '_')
+            fig.savefig(f'./{newpath}/Model_vs_PCEModel_{plot_name}.pdf',
+                        bbox_inches='tight')
+
+            # Destroy the current plot
+            plt.clf()
+
+    # -------------------------------------------------------------------------
+    def _plot_validation_multi(self, x_values=[], x_axis="x [m]"):
+        """
+        Plots outputs for visual comparison of metamodel outputs with that of
+        the (full) multioutput original model
+
+        Parameters
+        ----------
+        x_values : list or array, optional
+            List of x values. The default is [].
+        x_axis : str, optional
+            Label of the x axis. The default is "x [m]".
+
+        Returns
+        -------
+        None.
+
+        """
+        Model = self.ModelObj
+
+        newpath = f'Outputs_PostProcessing_{self.name}/'
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        # List of markers and colors
+        color = cycle((['b', 'g', 'r', 'y', 'k']))
+        marker = cycle(('x', 'd', '+', 'o', '*'))
+
+        fig = plt.figure()
+        # Plot the model vs PCE model
+        for keyIdx, key in enumerate(Model.Output.names):
+
+            y_pce_val = self.pce_out_mean[key]
+            y_pce_val_std = self.pce_out_std[key]
+            y_val = self.model_out_dict[key]
+            try:
+                x = self.model_out_dict['x_values'][key]
+            except (TypeError, IndexError):
+                x = x_values
+
+            for idx in range(y_val.shape[0]):
+                Color = next(color)
+                Marker = next(marker)
+
+                plt.plot(x, y_val[idx], color=Color, marker=Marker,
+                         label='$Y_{%s}^M$'%(idx+1))
+
+                plt.plot(x, y_pce_val[idx], color=Color, marker=Marker,
+                         linestyle='--',
+                         label='$Y_{%s}^{PCE}$'%(idx+1))
+                plt.fill_between(x, y_pce_val[idx]-1.96*y_pce_val_std[idx],
+                                 y_pce_val[idx]+1.96*y_pce_val_std[idx],
+                                 color=Color, alpha=0.15)
+
+            # Calculate the RMSE
+            rmse = mean_squared_error(y_pce_val, y_val, squared=False)
+            R2 = r2_score(y_pce_val[idx].reshape(-1, 1),
+                          y_val[idx].reshape(-1, 1))
+
+            plt.annotate(f'RMSE = {rmse:.3f}\n $R^2$ = {R2:.3f}',
+                         xy=(0.85, 0.1), xycoords='axes fraction')
+
+            plt.ylabel(key)
+            plt.xlabel(x_axis)
+            plt.legend(loc='best')
+            plt.grid()
+
+            # save the current figure
+            plot_name = key.replace(' ', '_')
+            fig.savefig(f'./{newpath}/Model_vs_PCEModel_{plot_name}.pdf',
+                        bbox_inches='tight')
+
+            # Destroy the current plot
+            plt.clf()
+
+        # Zip the subdirectories
+        Model.zip_subdirs(f'{Model.name}valid', f'{Model.name}valid_')
diff --git a/build/lib/bayesvalidrox/pylink/__init__.py b/build/lib/bayesvalidrox/pylink/__init__.py
new file mode 100644
index 000000000..4bd81739f
--- /dev/null
+++ b/build/lib/bayesvalidrox/pylink/__init__.py
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 -*-
+
+from .pylink import PyLinkForwardModel
+
+__all__ = [
+    "PyLinkForwardModel"
+    ]
diff --git a/build/lib/bayesvalidrox/pylink/pylink.py b/build/lib/bayesvalidrox/pylink/pylink.py
new file mode 100644
index 000000000..227a51ab3
--- /dev/null
+++ b/build/lib/bayesvalidrox/pylink/pylink.py
@@ -0,0 +1,803 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Calls to the model and evaluations
+"""
+
+from dataclasses import dataclass
+
+import os
+import shutil
+import h5py
+import numpy as np
+import time
+import zipfile
+import pandas as pd
+import multiprocessing
+from functools import partial
+import tqdm
+
+#from multiprocessing import get_context
+from multiprocess import get_context
+
+
+
+def within_range(out, minout, maxout):
+    """
+    Checks if all the values in out lie between minout and maxout
+
+    Parameters
+    ----------
+    out : array or list
+        Data to check against range
+    minout : int
+        Lower bound of the range
+    maxout : int
+        Upper bound of the range
+
+    Returns
+    -------
+    inside : bool
+        True if all values in out are in the specified range
+
+    """
+    try:
+        out = np.array(out)
+    except:
+        raise AttributeError('The given values should be a 1D array, but are not')
+    if out.ndim != 1:
+            raise AttributeError('The given values should be a 1D array, but are not')
+        
+    if minout > maxout:
+        raise ValueError('The lower and upper bounds do not form a valid range, they might be switched')
+    
+    inside = False
+    if (out > minout).all() and (out < maxout).all():
+        inside = True
+    return inside
+
+
+class PyLinkForwardModel(object):
+    """
+    A forward model binder
+
+    This calss serves as a code wrapper. This wrapper allows the execution of
+    a third-party software/solver within the scope of BayesValidRox.
+
+    Attributes
+    ----------
+    link_type : str
+        The type of the wrapper. The default is `'pylink'`. This runs the
+        third-party software or an executable using a shell command with given
+        input files.
+        Second option is `'function'` which assumed that model can be run using
+        a function written separately in a Python script.
+    name : str
+        Name of the model.
+    py_file : str
+        Python file name without `.py` extension to be run for the `'function'`
+        wrapper. Note that the name of the python file and that of the function
+        must be simillar. This function must recieve the parameters in an array
+        of shape `(n_samples, n_params)` and returns a dictionary with the
+        x_values and output arrays for given output names.
+    func_args : dict
+        Additional arguments for the python file. The default is `{}`.
+    shell_command : str
+        Shell command to be executed for the `'pylink'` wrapper.
+    input_file : str or list
+        The input file to be passed to the `'pylink'` wrapper.
+    input_template : str or list
+        A template input file to be passed to the `'pylink'` wrapper. This file
+        must be a copy of `input_file` with `<Xi>` place holder for the input
+        parameters defined using `inputs` class, with i being the number of
+        parameter. The file name ending should include `.tpl` before the actual
+        extension of the input file, for example, `params.tpl.input`.
+    aux_file : str or list
+        The list of auxiliary files needed for the `'pylink'` wrapper.
+    exe_path : str
+        Execution path if you wish to run the model for the `'pylink'` wrapper
+        in another directory. The default is `None`, which corresponds to the
+        currecnt working directory.
+    output_file_names : list of str
+        List of the name of the model output text files for the `'pylink'`
+        wrapper.
+    output_names : list of str
+        List of the model outputs to be used for the analysis.
+    output_parser : str
+        Name of the model parser file (without `.py` extension) that recieves
+        the `output_file_names` and returns a 2d-array with the first row being
+        the x_values, e.g. x coordinates or time and the rest of raws pass the
+        simulation output for each model output defined in `output_names`. Note
+        that again here the name of the file and that of the function must be
+        the same.
+    multi_process: bool
+        Whether the model runs to be executed in parallel for the `'pylink'`
+        wrapper. The default is `True`.
+    n_cpus: int
+        The number of cpus to be used for the parallel model execution for the
+        `'pylink'` wrapper. The default is `None`, which corresponds to all
+        available cpus.
+    meas_file : str
+        The name of the measurement text-based file. This file must contain
+        x_values as the first column and one column for each model output. The
+        default is `None`. Only needed for the Bayesian Inference.
+    meas_file_valid : str
+        The name of the measurement text-based file for the validation. The
+        default is `None`. Only needed for the validation with Bayesian
+        Inference.
+    mc_ref_file : str
+        The name of the text file for the Monte-Carlo reference (mean and
+        standard deviation) values. It must contain `x_values` as the first
+        column, `mean` as the second column and `std` as the third. It can be
+        used to compare the estimated moments using meta-model in the post-
+        processing step. This is only available for one output.
+    obs_dict : dict
+        A dictionary containing the measurement text-based file. It must
+        contain `x_values` as the first item and one item for each model output
+        . The default is `{}`. Only needed for the Bayesian Inference.
+    obs_dict_valid : dict
+        A dictionary containing the validation measurement text-based file. It
+        must contain `x_values` as the first item and one item for each model
+        output. The default is `{}`.
+    mc_ref_dict : dict
+        A dictionary containing the Monte-Carlo reference (mean and standard
+        deviation) values. It must contain `x_values` as the first item and
+        `mean` as the second item and `std` as the third. The default is `{}`.
+        This is only available for one output.
+    """
+
+    # Nested class
+    @dataclass
+    class OutputData(object):
+        parser: str = ""
+        names: list = None
+        file_names: list = None
+
+    def __init__(self, link_type='pylink', name=None, py_file=None,
+                 func_args={}, shell_command='', input_file=None,
+                 input_template=None, aux_file=None, exe_path='',
+                 output_file_names=[], output_names=[], output_parser='',
+                 multi_process=True, n_cpus=None, meas_file=None,
+                 meas_file_valid=None, mc_ref_file=None, obs_dict={},
+                 obs_dict_valid={}, mc_ref_dict={}):
+        self.link_type = link_type
+        self.name = name
+        self.shell_command = shell_command
+        self.py_file = py_file
+        self.func_args = func_args
+        self.input_file = input_file
+        self.input_template = input_template
+        self.aux_file = aux_file
+        self.exe_path = exe_path
+        self.multi_process = multi_process
+        self.n_cpus = n_cpus
+        self.Output = self.OutputData(
+            parser=output_parser,
+            names=output_names,
+            file_names=output_file_names,
+        )
+        self.n_outputs = len(self.Output.names)
+        self.meas_file = meas_file
+        self.meas_file_valid = meas_file_valid
+        self.mc_ref_file = mc_ref_file
+        self.observations = obs_dict
+        self.observations_valid = obs_dict_valid
+        self.mc_reference = mc_ref_dict
+
+    # -------------------------------------------------------------------------
+    def read_observation(self, case='calib'):
+        """
+        Reads/prepare the observation/measurement data for
+        calibration.
+        
+        Parameters
+        ----------
+        case : str
+            The type of observation to read in. Can be either 'calib',
+            'valid' or 'mc_ref'
+
+        Returns
+        -------
+        DataFrame
+            A dataframe with the calibration data.
+
+        """
+        # TOOD: check that what is read in/transformed matches the expected form of data/reference
+        if case.lower() == 'calib':
+            if isinstance(self.observations, dict) and bool(self.observations):
+                self.observations = pd.DataFrame.from_dict(self.observations)
+            elif self.meas_file is not None:
+                file_path = os.path.join(os.getcwd(), self.meas_file)
+                self.observations = pd.read_csv(file_path, delimiter=',')
+            elif isinstance(self.observations, pd.DataFrame):
+                self.observations = self.observations
+            else:
+                raise Exception("Please provide the observation data as a "
+                                "dictionary via observations attribute or pass"
+                                " the csv-file path to MeasurementFile "
+                                "attribute")
+            # Compute the number of observation
+            self.n_obs = self.observations[self.Output.names].notnull().sum().values.sum()
+            return self.observations
+            
+        elif case.lower() == 'valid':
+            if isinstance(self.observations_valid, dict) and \
+              bool(self.observations_valid):
+                self.observations_valid = pd.DataFrame.from_dict(self.observations_valid)
+            elif self.meas_file_valid is not None:
+                file_path = os.path.join(os.getcwd(), self.meas_file_valid)
+                self.observations_valid = pd.read_csv(file_path, delimiter=',')
+            elif isinstance(self.observations_valid, pd.DataFrame):
+                self.observations_valid = self.observations_valid
+            else:
+                raise Exception("Please provide the observation data as a "
+                                "dictionary via observations attribute or pass"
+                                " the csv-file path to MeasurementFile "
+                                "attribute")
+            # Compute the number of observation
+            self.n_obs_valid = self.observations_valid[self.Output.names].notnull().sum().values.sum()
+            return self.observations_valid
+                
+        elif case.lower() == 'mc_ref':
+            if self.mc_ref_file is None and \
+               isinstance(self.mc_reference, pd.DataFrame):
+                return self.mc_reference
+            elif isinstance(self.mc_reference, dict) and bool(self.mc_reference):
+                self.mc_reference = pd.DataFrame.from_dict(self.mc_reference)
+            elif self.mc_ref_file is not None:
+                file_path = os.path.join(os.getcwd(), self.mc_ref_file)
+                self.mc_reference = pd.read_csv(file_path, delimiter=',')
+            else:
+                self.mc_reference = None
+            return self.mc_reference
+
+
+    # -------------------------------------------------------------------------
+    def read_output(self):
+        """
+        Reads the the parser output file and returns it as an
+         executable function. It is required when the models returns the
+         simulation outputs in csv files.
+
+        Returns
+        -------
+        Output : func
+            Output parser function.
+
+        """
+        output_func_name = self.Output.parser
+
+        output_func = getattr(__import__(output_func_name), output_func_name)
+
+        file_names = []
+        for File in self.Output.file_names:
+            file_names.append(os.path.join(self.exe_path, File))
+        try:
+            output = output_func(self.name, file_names)
+        except TypeError:
+            output = output_func(file_names)
+        return output
+
+    # -------------------------------------------------------------------------
+    def update_input_params(self, new_input_file, param_set):
+        """
+        Finds this pattern with <X1> in the new_input_file and replace it with
+         the new value from the array param_sets.
+
+        Parameters
+        ----------
+        new_input_file : list
+            List of the input files with the adapted names.
+        param_set : array of shape (n_params)
+            Parameter set.
+
+        Returns
+        -------
+        None.
+
+        """
+        NofPa = param_set.shape[0]
+        text_to_search_list = [f'<X{i+1}>' for i in range(NofPa)]
+
+        for filename in new_input_file:
+            # Read in the file
+            with open(filename, 'r') as file:
+                filedata = file.read()
+
+            # Replace the target string
+            for text_to_search, params in zip(text_to_search_list, param_set):
+                filedata = filedata.replace(text_to_search, f'{params:0.4e}')
+
+            # Write the file out again
+            with open(filename, 'w') as file:
+                file.write(filedata)
+
+    # -------------------------------------------------------------------------
+    def run_command(self, command, output_file_names):
+        """
+        Runs the execution command given by the user to run the given model.
+        It checks if the output files have been generated. If yes, the jobe is
+        done and it extracts and returns the requested output(s). Otherwise,
+        it executes the command again.
+
+        Parameters
+        ----------
+        command : str
+            The shell command to be executed.
+        output_file_names : list
+            Name of the output file names.
+
+        Returns
+        -------
+        simulation_outputs : array of shape (n_obs, n_outputs)
+            Simulation outputs.
+
+        """
+
+        # Check if simulation is finished
+        while True:
+            time.sleep(3)
+            files = os.listdir(".")
+            if all(elem in files for elem in output_file_names):
+                break
+            else:
+                # Run command
+                Process = os.system(f'./../{command}')
+                if Process != 0:
+                    print('\nMessage 1:')
+                    print(f'\tIf the value of \'{Process}\' is a non-zero value'
+                          ', then compilation problems occur \n' % Process)          
+        os.chdir("..")
+
+        # Read the output
+        simulation_outputs = self.read_output()
+
+        return simulation_outputs
+
+    # -------------------------------------------------------------------------
+    def run_forwardmodel(self, xx):
+        """
+        This function creates subdirectory for the current run and copies the
+        necessary files to this directory and renames them. Next, it executes
+        the given command.
+
+        Parameters
+        ----------
+        xx : tuple
+            A tuple including parameter set, simulation number and key string.
+
+        Returns
+        -------
+        output : array of shape (n_outputs+1, n_obs)
+            An array passed by the output paraser containing the x_values as
+            the first row and the simulations results stored in the the rest of
+            the array.
+
+        """
+        c_points, run_no, key_str = xx
+
+        # Handle if only one imput file is provided
+        if not isinstance(self.input_template, list):
+            self.input_template = [self.input_template]
+        if not isinstance(self.input_file, list):
+            self.input_file = [self.input_file]
+
+        new_input_file = []
+        # Loop over the InputTemplates:
+        for in_temp in self.input_template:
+            if '/' in in_temp:
+                in_temp = in_temp.split('/')[-1]
+            new_input_file.append(in_temp.split('.tpl')[0] + key_str +
+                                  f"_{run_no+1}" + in_temp.split('.tpl')[1])
+
+        # Create directories
+        newpath = self.name + key_str + f'_{run_no+1}'
+        if not os.path.exists(newpath):
+            os.makedirs(newpath)
+
+        # Copy the necessary files to the directories
+        print(self.input_template)
+        for in_temp in self.input_template:
+            # Input file(s) of the model
+            shutil.copy2(in_temp, newpath)
+        # Auxiliary file
+        if self.aux_file is not None:
+            shutil.copy2(self.aux_file, newpath)  # Auxiliary file
+
+        # Rename the Inputfile and/or auxiliary file
+        os.chdir(newpath)
+        for input_tem, input_file in zip(self.input_template, new_input_file):
+            if '/' in input_tem:
+                input_tem = input_tem.split('/')[-1]
+            os.rename(input_tem, input_file)
+
+        # Update the parametrs in Input file
+        self.update_input_params(new_input_file, c_points)
+
+        # Update the user defined command and the execution path
+        try:
+            new_command = self.shell_command.replace(self.input_file[0],
+                                                     new_input_file[0])
+            new_command = new_command.replace(self.input_file[1],
+                                              new_input_file[1])
+        except:
+            new_command = self.shell_command.replace(self.input_file[0],
+                                                     new_input_file[0])
+        # Set the exe path if not provided
+        if not bool(self.exe_path):
+            self.exe_path = os.getcwd()
+
+        # Run the model
+        print(new_command)
+        output = self.run_command(new_command, self.Output.file_names)
+
+        return output
+
+    # -------------------------------------------------------------------------
+    def run_model_parallel(self, c_points, prevRun_No=0, key_str='',
+                           mp=True, verbose=True):
+        """
+        Runs model simulations. If mp is true (default), then the simulations
+         are started in parallel.
+
+        Parameters
+        ----------
+        c_points : array of shape (n_samples, n_params)
+            Collocation points (training set).
+        prevRun_No : int, optional
+            Previous run number, in case the sequential design is selected.
+            The default is `0`.
+        key_str : str, optional
+            A descriptive string for validation runs. The default is `''`.
+        mp : bool, optional
+            Multiprocessing. The default is `True`.
+        verbose: bool, optional
+            Verbosity. The default is `True`.
+
+        Returns
+        -------
+        all_outputs : dict
+            A dictionary with x values (time step or point id) and all outputs.
+            Each key contains an array of the shape `(n_samples, n_obs)`.
+        new_c_points : array
+            Updated collocation points (training set). If a simulation does not
+            executed successfully, the parameter set is removed.
+
+        """
+
+        # Initilization
+        n_c_points = len(c_points)
+        all_outputs = {}
+        
+        # If the link type is UM-Bridge, then no parallel needs to be started from here
+        if self.link_type.lower() == 'umbridge':
+            import umbridge 
+            if not hasattr(self, 'x_values'):
+                raise AttributeError('For model type `umbridge` the attribute `x_values` needs to be set for the model!')
+            # Init model
+            #model = umbridge.HTTPModel('http://localhost:4242', 'forward')
+            self.model = umbridge.HTTPModel(self.host, 'forward') # TODO: is this always forward?
+            Function = self.uMBridge_model
+
+        # Extract the function
+        if self.link_type.lower() == 'function':
+            # Prepare the function
+            Function = getattr(__import__(self.py_file), self.py_file)
+        # ---------------------------------------------------------------
+        # -------------- Multiprocessing with Pool Class ----------------
+        # ---------------------------------------------------------------
+        # Start a pool with the number of CPUs
+        if self.n_cpus is None:
+            n_cpus = multiprocessing.cpu_count()
+        else:
+            n_cpus = self.n_cpus
+
+        # Run forward model
+        if n_c_points == 1 or not mp:
+            if n_c_points== 1:
+                if self.link_type.lower() == 'function' or self.link_type.lower() == 'umbridge':
+                    group_results = Function(c_points, **self.func_args)
+                else:
+                    group_results = self.run_forwardmodel(
+                        (c_points[0], prevRun_No, key_str)
+                        )
+            else:
+                for i in range(c_points.shape[0]):
+                    if i == 0:
+                        if self.link_type.lower() == 'function' or self.link_type.lower() == 'umbridge':
+                            group_results = Function(np.array([c_points[0]]), **self.func_args)
+                        else:
+                            group_results = self.run_forwardmodel(
+                                (c_points[0], prevRun_No, key_str)
+                                )
+                        for key in group_results:
+                            if key != 'x_values':
+                                group_results[key] = [group_results[key]]
+                    else: 
+                        if self.link_type.lower() == 'function' or self.link_type.lower() == 'umbridge':
+                            res = Function(np.array([c_points[i]]), **self.func_args)
+                        else:
+                            res = self.run_forwardmodel(
+                                (c_points[i], prevRun_No, key_str)
+                                )
+                        for key in res:
+                            if key != 'x_values':
+                                group_results[key].append(res[key])
+        
+                for key in group_results:
+                    if key != 'x_values':
+                        group_results[key]= np.array(group_results[key])
+
+        elif self.multi_process or mp:
+            with get_context('spawn').Pool(n_cpus) as p:
+            #with multiprocessing.Pool(n_cpus) as p:
+                
+                if self.link_type.lower() == 'function' or self.link_type.lower() == 'umbridge':
+                    imap_var = p.imap(partial(Function, **self.func_args),
+                                      c_points[:, np.newaxis])
+                else:
+                    args = zip(c_points,
+                               [prevRun_No+i for i in range(n_c_points)],
+                               [key_str]*n_c_points)
+                    imap_var = p.imap(self.run_forwardmodel, args)
+
+                if verbose:
+                    desc = f'Running forward model {key_str}'
+                    group_results = list(tqdm.tqdm(imap_var, total=n_c_points,
+                                                   desc=desc))
+                else:
+                    group_results = list(imap_var)
+
+        # Check for NaN
+        for var_i, var in enumerate(self.Output.names):
+            # If results are given as one dictionary
+            if isinstance(group_results, dict):
+                Outputs = np.asarray(group_results[var])
+            # If results are given as list of dictionaries
+            elif isinstance(group_results, list):
+                Outputs = np.asarray([item[var] for item in group_results],
+                                     dtype=np.float64)
+            NaN_idx = np.unique(np.argwhere(np.isnan(Outputs))[:, 0])
+            new_c_points = np.delete(c_points, NaN_idx, axis=0)
+            all_outputs[var] = np.atleast_2d(
+                np.delete(Outputs, NaN_idx, axis=0)
+                )
+
+        # Print the collocation points whose simulations crashed
+        if len(NaN_idx) != 0:
+            print('\n')
+            print('*'*20)
+            print("\nThe following parameter sets have been removed:\n",
+                  c_points[NaN_idx])
+            print("\n")
+            print('*'*20)
+
+        # Save time steps or x-values
+        if isinstance(group_results, dict):
+            all_outputs["x_values"] = group_results["x_values"]
+        elif any(isinstance(i, dict) for i in group_results):
+            all_outputs["x_values"] = group_results[0]["x_values"]
+
+        # Store simulations in a hdf5 file
+        self._store_simulations(
+            c_points, all_outputs, NaN_idx, key_str, prevRun_No
+            )
+
+        return all_outputs, new_c_points
+    
+    def uMBridge_model(self, params):
+        """
+        Function that calls a UMBridge model and transforms its output into the 
+        shape expected for the surrogate.
+    
+        Parameters
+        ----------
+        params : 2d np.array, shape (#samples, #params)
+            The parameter values for which the model is run.
+    
+        Returns
+        -------
+        dict
+            The transformed model outputs.
+    
+        """
+        # Run the model
+        #out = np.array(model(np.ndarray.tolist(params), {'level':0}))
+        out = np.array(self.model(np.ndarray.tolist(params), self.modelparams))
+        
+        # Sort into dict
+        out_dict = {}
+        cnt = 0
+        for key in self.Output.names:
+        #    # If needed resort into single-value outputs
+        #    if self.output_type == 'single-valued':
+        #        if out.shape[1]>1:  # TODO: this doesn't fully seem correct??
+        #            for i in range(out[:,key]): # TODO: this doesn't fully seem correct??
+        #                new_key = key+str(i)
+        #                if new_key not in self.Output.names:
+        #                    self.Output.names.append(new_key)
+        #                    if i == 0:
+        #                        self.Ouptut.names.remove(key)
+        #                out_dict[new_key] = out[:,cnt,i] # TODO: not sure about this, need to test
+        #        else: 
+        #            out_dict[key] = out[:,cnt]
+        #            
+        #        
+        #    else:
+            out_dict[key] = out[:,cnt]
+            cnt += 1
+        
+            
+        ## TODO: how to deal with the x-values?
+        #if self.output_type == 'single-valued':
+        #    out_dict['x_values'] = [0]
+        #else:
+        #    out_dict['x_values'] = np.arange(0,out[:,0].shape[0],1)
+        out_dict['x_values'] = self.x_values
+        
+        #return {'T1':out[:,0], 'T2':out[:,1], 'H1':out[:,2], 'H2':out[:,3], 
+       #         'x_values':[0]}
+        return out_dict
+
+    # -------------------------------------------------------------------------
+    def _store_simulations(self, c_points, all_outputs, NaN_idx, key_str,
+                           prevRun_No):
+        """
+        
+
+        Parameters
+        ----------
+        c_points : TYPE
+            DESCRIPTION.
+        all_outputs : TYPE
+            DESCRIPTION.
+        NaN_idx : TYPE
+            DESCRIPTION.
+        key_str : TYPE
+            DESCRIPTION.
+        prevRun_No : TYPE
+            DESCRIPTION.
+
+        Returns
+        -------
+        None.
+
+        """
+
+        # Create hdf5 metadata
+        if key_str == '':
+            hdf5file = f'ExpDesign_{self.name}.hdf5'
+        else:
+            hdf5file = f'ValidSet_{self.name}.hdf5'
+        hdf5_exist = os.path.exists(hdf5file)
+        file = h5py.File(hdf5file, 'a')
+
+        # ---------- Save time steps or x-values ----------
+        if not hdf5_exist:
+            if type(all_outputs["x_values"]) is dict:
+                grp_x_values = file.create_group("x_values/")
+                for varIdx, var in enumerate(self.Output.names):
+                    grp_x_values.create_dataset(
+                        var, data=all_outputs["x_values"][var]
+                        )
+            else:
+                file.create_dataset("x_values", data=all_outputs["x_values"])
+
+        # ---------- Save outputs ----------
+        for varIdx, var in enumerate(self.Output.names):
+            if not hdf5_exist:
+                grpY = file.create_group("EDY/"+var)
+            else:
+                grpY = file.get("EDY/"+var)
+
+            if prevRun_No == 0 and key_str == '':
+                grpY.create_dataset(f'init_{key_str}', data=all_outputs[var])
+            else:
+                try:
+                    oldEDY = np.array(file[f'EDY/{var}/adaptive_{key_str}'])
+                    del file[f'EDY/{var}/adaptive_{key_str}']
+                    data = np.vstack((oldEDY, all_outputs[var]))
+                except KeyError:
+                    data = all_outputs[var]
+                grpY.create_dataset('adaptive_'+key_str, data=data)
+
+            if prevRun_No == 0 and key_str == '':
+                grpY.create_dataset(f"New_init_{key_str}",
+                                    data=all_outputs[var])
+            else:
+                try:
+                    name = f'EDY/{var}/New_adaptive_{key_str}'
+                    oldEDY = np.array(file[name])
+                    del file[f'EDY/{var}/New_adaptive_{key_str}']
+                    data = np.vstack((oldEDY, all_outputs[var]))
+                except KeyError:
+                    data = all_outputs[var]
+                grpY.create_dataset(f'New_adaptive_{key_str}', data=data)
+
+        # ---------- Save CollocationPoints ----------
+        new_c_points = np.delete(c_points, NaN_idx, axis=0)
+        grpX = file.create_group("EDX") if not hdf5_exist else file.get("EDX")
+        if prevRun_No == 0 and key_str == '':
+            grpX.create_dataset("init_"+key_str, data=c_points)
+            if len(NaN_idx) != 0:
+                grpX.create_dataset("New_init_"+key_str, data=new_c_points)
+
+        else:
+            try:
+                name = f'EDX/adaptive_{key_str}'
+                oldCollocationPoints = np.array(file[name])
+                del file[f'EDX/adaptive_{key_str}']
+                data = np.vstack((oldCollocationPoints, new_c_points))
+            except KeyError:
+                data = new_c_points
+            grpX.create_dataset('adaptive_'+key_str, data=data)
+
+            if len(NaN_idx) != 0:
+                try:
+                    name = f'EDX/New_adaptive_{key_str}'
+                    oldCollocationPoints = np.array(file[name])
+                    del file[f'EDX/New_adaptive_{key_str}']
+                    data = np.vstack((oldCollocationPoints, new_c_points))
+                except KeyError:
+                    data = new_c_points
+                grpX.create_dataset('New_adaptive_'+key_str, data=data)
+
+        # Close h5py file
+        file.close()
+
+    # -------------------------------------------------------------------------
+    def zip_subdirs(self, dir_name, key):
+        """
+        Zips all the files containing the key(word).
+
+        Parameters
+        ----------
+        dir_name : str
+            Directory name.
+        key : str
+            Keyword to search for.
+
+        Returns
+        -------
+        None.
+
+        """
+        # setup file paths variable
+        dir_list = []
+        file_paths = []
+
+        # Read all directory, subdirectories and file lists
+        dir_path = os.getcwd()
+
+        for root, directories, files in os.walk(dir_path):
+            for directory in directories:
+                # Create the full filepath by using os module.
+                if key in directory:
+                    folderPath = os.path.join(dir_path, directory)
+                    dir_list.append(folderPath)
+
+        # Loop over the identified directories to store the file paths
+        for direct_name in dir_list:
+            for root, directories, files in os.walk(direct_name):
+                for filename in files:
+                    # Create the full filepath by using os module.
+                    filePath = os.path.join(root, filename)
+                    file_paths.append('.'+filePath.split(dir_path)[1])
+
+        # writing files to a zipfile
+        if len(file_paths) != 0:
+            zip_file = zipfile.ZipFile(dir_name+'.zip', 'w')
+            with zip_file:
+                # writing each file one by one
+                for file in file_paths:
+                    zip_file.write(file)
+
+            file_paths = [path for path in os.listdir('.') if key in path]
+
+            for path in file_paths:
+                shutil.rmtree(path)
+
+            print("\n")
+            print(f'{dir_name}.zip has been created successfully!\n')
+
+        return
diff --git a/build/lib/bayesvalidrox/surrogate_models/__init__.py b/build/lib/bayesvalidrox/surrogate_models/__init__.py
new file mode 100644
index 000000000..70bfb20f5
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/__init__.py
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 -*-
+
+from .surrogate_models import MetaModel
+
+__all__ = [
+    "MetaModel"
+    ]
diff --git a/build/lib/bayesvalidrox/surrogate_models/adaptPlot.py b/build/lib/bayesvalidrox/surrogate_models/adaptPlot.py
new file mode 100644
index 000000000..102f0373c
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/adaptPlot.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Aug 13 13:46:24 2020
+
+@author: farid
+"""
+import os
+from sklearn.metrics import mean_squared_error, r2_score
+from itertools import cycle
+from matplotlib.backends.backend_pdf import PdfPages
+import matplotlib.pyplot as plt
+
+
+def adaptPlot(PCEModel, Y_Val, Y_PC_Val, Y_PC_Val_std, x_values=[],
+              plotED=False, SaveFig=True):
+
+    NrofSamples = PCEModel.ExpDesign.n_new_samples
+    initNSamples = PCEModel.ExpDesign.n_init_samples
+    itrNr = 1 + (PCEModel.ExpDesign.X.shape[0] - initNSamples)//NrofSamples
+
+    oldEDY = PCEModel.ExpDesign.Y
+
+    if SaveFig:
+        newpath = 'adaptivePlots'
+        os.makedirs(newpath, exist_ok=True)
+
+        # create a PdfPages object
+        pdf = PdfPages(f'./{newpath}/Model_vs_PCEModel_itr_{itrNr}.pdf')
+
+    # List of markers and colors
+    color = cycle((['b', 'g', 'r', 'y', 'k']))
+    marker = cycle(('x', 'd', '+', 'o', '*'))
+
+    OutNames = list(Y_Val.keys())
+    x_axis = 'Time [s]'
+
+    if len(OutNames) == 1:
+        OutNames.insert(0, x_axis)
+    try:
+        x_values = Y_Val['x_values']
+    except KeyError:
+        x_values = x_values
+
+    fig = plt.figure(figsize=(24, 16))
+
+    # Plot the model vs PCE model
+    for keyIdx, key in enumerate(PCEModel.ModelObj.Output.names):
+        Y_PC_Val_ = Y_PC_Val[key]
+        Y_PC_Val_std_ = Y_PC_Val_std[key]
+        Y_Val_ = Y_Val[key]
+        if Y_Val_.ndim == 1:
+            Y_Val_ = Y_Val_.reshape(1, -1)
+        old_EDY = oldEDY[key]
+        if isinstance(x_values, dict):
+            x = x_values[key]
+        else:
+            x = x_values
+
+        for idx, y in enumerate(Y_Val_):
+            Color = next(color)
+            Marker = next(marker)
+
+            plt.plot(
+                x, y, color=Color, marker=Marker,
+                lw=2.0, label='$Y_{%s}^{M}$'%(idx+itrNr)
+                )
+
+            plt.plot(
+                x, Y_PC_Val_[idx], color=Color, marker=Marker,
+                lw=2.0, linestyle='--', label='$Y_{%s}^{PCE}$'%(idx+itrNr)
+                )
+            plt.fill_between(
+                x, Y_PC_Val_[idx]-1.96*Y_PC_Val_std_[idx],
+                Y_PC_Val_[idx]+1.96*Y_PC_Val_std_[idx], color=Color,
+                alpha=0.15
+                )
+
+            if plotED:
+                for output in old_EDY:
+                    plt.plot(x, output, color='grey', alpha=0.1)
+
+        # Calculate the RMSE
+        RMSE = mean_squared_error(Y_PC_Val_, Y_Val_, squared=False)
+        R2 = r2_score(Y_PC_Val_.reshape(-1, 1), Y_Val_.reshape(-1, 1))
+
+        plt.ylabel(key)
+        plt.xlabel(x_axis)
+        plt.title(key)
+
+        ax = fig.axes[0]
+        ax.legend(loc='best', frameon=True)
+        fig.canvas.draw()
+        ax.text(0.65, 0.85,
+                f'RMSE = {round(RMSE, 3)}\n$R^2$ = {round(R2, 3)}',
+                transform=ax.transAxes, color='black',
+                bbox=dict(facecolor='none',
+                          edgecolor='black',
+                          boxstyle='round,pad=1')
+                )
+        plt.grid()
+
+        if SaveFig:
+            # save the current figure
+            pdf.savefig(fig, bbox_inches='tight')
+
+            # Destroy the current plot
+            plt.clf()
+    pdf.close()
diff --git a/build/lib/bayesvalidrox/surrogate_models/apoly_construction.py b/build/lib/bayesvalidrox/surrogate_models/apoly_construction.py
new file mode 100644
index 000000000..40830fe8a
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/apoly_construction.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+import numpy as np
+
+
+def apoly_construction(Data, degree):
+    """
+    Construction of Data-driven Orthonormal Polynomial Basis
+    Author: Dr.-Ing. habil. Sergey Oladyshkin
+    Department of Stochastic Simulation and Safety Research for Hydrosystems
+    Institute for Modelling Hydraulic and Environmental Systems
+    Universitaet Stuttgart, Pfaffenwaldring 5a, 70569 Stuttgart
+    E-mail: Sergey.Oladyshkin@iws.uni-stuttgart.de
+    http://www.iws-ls3.uni-stuttgart.de
+    The current script is based on definition of arbitrary polynomial chaos
+    expansion (aPC), which is presented in the following manuscript:
+    Oladyshkin, S. and W. Nowak. Data-driven uncertainty quantification using
+    the arbitrary polynomial chaos expansion. Reliability Engineering & System
+    Safety, Elsevier, V. 106, P.  179-190, 2012.
+    DOI: 10.1016/j.ress.2012.05.002.
+
+    Parameters
+    ----------
+    Data : array
+        Raw data.
+    degree : int
+        Maximum polynomial degree.
+
+    Returns
+    -------
+    Polynomial : array
+        The coefficients of the univariate orthonormal polynomials.
+
+    """
+    if Data.ndim !=1:
+        raise AttributeError('Data should be a 1D array')
+
+    # Initialization
+    dd = degree + 1
+    nsamples = len(Data)
+
+    # Forward linear transformation (Avoiding numerical issues)
+    MeanOfData = np.mean(Data)
+    Data = Data/MeanOfData
+
+    # Compute raw moments of input data
+    raw_moments = [np.sum(np.power(Data, p))/nsamples for p in range(2*dd+2)]
+
+    # Main Loop for Polynomial with degree up to dd
+    PolyCoeff_NonNorm = np.empty((0, 1))
+    Polynomial = np.zeros((dd+1, dd+1))
+
+    for degree in range(dd+1):
+        Mm = np.zeros((degree+1, degree+1))
+        Vc = np.zeros((degree+1))
+
+        # Define Moments Matrix Mm
+        for i in range(degree+1):
+            for j in range(degree+1):
+                if (i < degree):
+                    Mm[i, j] = raw_moments[i+j]
+
+                elif (i == degree) and (j == degree):
+                    Mm[i, j] = 1
+
+            # Numerical Optimization for Matrix Solver
+            Mm[i] = Mm[i] / max(abs(Mm[i]))
+
+        # Defenition of Right Hand side ortogonality conditions: Vc
+        for i in range(degree+1):
+            Vc[i] = 1 if i == degree else 0
+
+        # Solution: Coefficients of Non-Normal Orthogonal Polynomial: Vp Eq.(4)
+        try:
+            Vp = np.linalg.solve(Mm, Vc)
+        except:
+            inv_Mm = np.linalg.pinv(Mm)
+            Vp = np.dot(inv_Mm, Vc.T)
+
+        if degree == 0:
+            PolyCoeff_NonNorm = np.append(PolyCoeff_NonNorm, Vp)
+
+        if degree != 0:
+            if degree == 1:
+                zero = [0]
+            else:
+                zero = np.zeros((degree, 1))
+            PolyCoeff_NonNorm = np.hstack((PolyCoeff_NonNorm, zero))
+
+            PolyCoeff_NonNorm = np.vstack((PolyCoeff_NonNorm, Vp))
+
+        if 100*abs(sum(abs(np.dot(Mm, Vp)) - abs(Vc))) > 0.5:
+            print('\n---> Attention: Computational Error too high !')
+            print('\n---> Problem: Convergence of Linear Solver')
+
+        # Original Numerical Normalization of Coefficients with Norm and
+        # orthonormal Basis computation Matrix Storrage
+        # Note: Polynomial(i,j) correspont to coefficient number "j-1"
+        # of polynomial degree "i-1"
+        P_norm = 0
+        for i in range(nsamples):
+            Poly = 0
+            for k in range(degree+1):
+                if degree == 0:
+                    Poly += PolyCoeff_NonNorm[k] * (Data[i]**k)
+                else:
+                    Poly += PolyCoeff_NonNorm[degree, k] * (Data[i]**k)
+
+            P_norm += Poly**2 / nsamples
+
+        P_norm = np.sqrt(P_norm)
+
+        for k in range(degree+1):
+            if degree == 0:
+                Polynomial[degree, k] = PolyCoeff_NonNorm[k]/P_norm
+            else:
+                Polynomial[degree, k] = PolyCoeff_NonNorm[degree, k]/P_norm
+
+    # Backward linear transformation to the real data space
+    Data *= MeanOfData
+    for k in range(len(Polynomial)):
+        Polynomial[:, k] = Polynomial[:, k] / (MeanOfData**(k))
+
+    return Polynomial
diff --git a/build/lib/bayesvalidrox/surrogate_models/bayes_linear.py b/build/lib/bayesvalidrox/surrogate_models/bayes_linear.py
new file mode 100644
index 000000000..3bd827ac0
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/bayes_linear.py
@@ -0,0 +1,523 @@
+import numpy as np
+from sklearn.base import RegressorMixin
+from sklearn.linear_model._base import LinearModel
+from sklearn.utils import check_X_y, check_array, as_float_array
+from sklearn.utils.validation import check_is_fitted
+from scipy.linalg import svd
+import warnings
+from sklearn.preprocessing import normalize as f_normalize
+
+
+
+class BayesianLinearRegression(RegressorMixin,LinearModel):
+    '''
+    Superclass for Empirical Bayes and Variational Bayes implementations of 
+    Bayesian Linear Regression Model
+    '''
+    def __init__(self, n_iter, tol, fit_intercept,copy_X, verbose):
+        self.n_iter        = n_iter
+        self.fit_intercept = fit_intercept
+        self.copy_X        = copy_X
+        self.verbose       = verbose
+        self.tol           = tol
+        
+        
+    def _check_convergence(self, mu, mu_old):
+        '''
+        Checks convergence of algorithm using changes in mean of posterior
+        distribution of weights
+        '''
+        return np.sum(abs(mu-mu_old)>self.tol) == 0
+        
+        
+    def _center_data(self,X,y):
+        ''' Centers data'''
+        X     = as_float_array(X,copy = self.copy_X)
+        # normalisation should be done in preprocessing!
+        X_std = np.ones(X.shape[1], dtype = X.dtype)
+        if self.fit_intercept:
+            X_mean = np.average(X,axis = 0)
+            y_mean = np.average(y,axis = 0)
+            X     -= X_mean
+            y      = y - y_mean
+        else:
+            X_mean = np.zeros(X.shape[1],dtype = X.dtype)
+            y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
+        return X,y, X_mean, y_mean, X_std
+        
+        
+    def predict_dist(self,X):
+        '''
+        Calculates  mean and variance of predictive distribution for each data 
+        point of test set.(Note predictive distribution for each data point is 
+        Gaussian, therefore it is uniquely determined by mean and variance)                    
+                    
+        Parameters
+        ----------
+        x: array-like of size (n_test_samples, n_features)
+            Set of features for which corresponding responses should be predicted
+
+        Returns
+        -------
+        :list of two numpy arrays [mu_pred, var_pred]
+        
+            mu_pred : numpy array of size (n_test_samples,)
+                      Mean of predictive distribution
+                      
+            var_pred: numpy array of size (n_test_samples,)
+                      Variance of predictive distribution        
+        '''
+        # Note check_array and check_is_fitted are done within self._decision_function(X)
+        mu_pred     = self._decision_function(X)
+        data_noise  = 1./self.beta_
+        model_noise = np.sum(np.dot(X,self.eigvecs_)**2 * self.eigvals_,1)
+        var_pred    =  data_noise + model_noise
+        return [mu_pred,var_pred]
+    
+        
+        
+
+class EBLinearRegression(BayesianLinearRegression):
+    '''
+    Bayesian Regression with type II maximum likelihood (Empirical Bayes)
+    
+    Parameters:
+    -----------  
+    n_iter: int, optional (DEFAULT = 300)
+       Maximum number of iterations
+         
+    tol: float, optional (DEFAULT = 1e-3)
+       Threshold for convergence
+       
+    optimizer: str, optional (DEFAULT = 'fp')
+       Method for optimization , either Expectation Maximization or 
+       Fixed Point Gull-MacKay {'em','fp'}. Fixed point iterations are
+       faster, but can be numerically unstable (especially in case of near perfect fit).
+       
+    fit_intercept: bool, optional (DEFAULT = True)
+       If True includes bias term in model
+       
+    perfect_fit_tol: float (DEAFAULT = 1e-5)
+       Prevents overflow of precision parameters (this is smallest value RSS can have).
+       ( !!! Note if using EM instead of fixed-point, try smaller values
+       of perfect_fit_tol, for better estimates of variance of predictive distribution )
+
+    alpha: float (DEFAULT = 1)
+       Initial value of precision paramter for coefficients ( by default we define 
+       very broad distribution )
+       
+    copy_X : boolean, optional (DEFAULT = True)
+        If True, X will be copied, otherwise will be 
+        
+    verbose: bool, optional (Default = False)
+       If True at each iteration progress report is printed out
+    
+    Attributes
+    ----------
+    coef_  : array, shape = (n_features)
+        Coefficients of the regression model (mean of posterior distribution)
+        
+    intercept_: float
+        Value of bias term (if fit_intercept is False, then intercept_ = 0)
+        
+    alpha_ : float
+        Estimated precision of coefficients
+       
+    beta_  : float 
+        Estimated precision of noise
+        
+    eigvals_ : array, shape = (n_features, )
+        Eigenvalues of covariance matrix (from posterior distribution of weights)
+        
+    eigvecs_ : array, shape = (n_features, n_featues)
+        Eigenvectors of covariance matrix (from posterior distribution of weights)
+
+    '''
+    
+    def __init__(self,n_iter = 300, tol = 1e-3, optimizer = 'fp', fit_intercept = True,
+                 normalize=True, perfect_fit_tol = 1e-6, alpha = 1, copy_X = True, verbose = False):
+        super(EBLinearRegression,self).__init__(n_iter, tol, fit_intercept, copy_X, verbose)
+        if optimizer not in ['em','fp']:
+            raise ValueError('Optimizer can be either "em" or "fp" ')
+        self.optimizer     =  optimizer 
+        self.alpha         =  alpha 
+        self.perfect_fit   =  False
+        self.normalize     = True
+        self.scores_       =  [np.NINF]
+        self.perfect_fit_tol = perfect_fit_tol
+    
+    def _check_convergence(self, mu, mu_old):
+        '''
+        Checks convergence of algorithm using changes in mean of posterior
+        distribution of weights
+        '''
+        return np.sum(abs(mu-mu_old)>self.tol) == 0
+        
+        
+    def _center_data(self,X,y):
+        ''' Centers data'''
+        X     = as_float_array(X,copy = self.copy_X)
+        # normalisation should be done in preprocessing!
+        X_std = np.ones(X.shape[1], dtype = X.dtype)
+        if self.fit_intercept:
+            X_mean = np.average(X, axis=0)
+            X -= X_mean
+            if self.normalize:
+                X, X_std = f_normalize(X, axis=0, copy=False,
+                                         return_norm=True)
+            else:
+                X_std = np.ones(X.shape[1], dtype=X.dtype)
+            y_mean = np.average(y, axis=0)
+            y = y - y_mean
+        else:
+            X_mean = np.zeros(X.shape[1],dtype = X.dtype)
+            y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
+        return X,y, X_mean, y_mean, X_std
+            
+    def fit(self, X, y):
+        '''
+        Fits Bayesian Linear Regression using Empirical Bayes
+        
+        Parameters
+        ----------
+        X: array-like of size [n_samples,n_features]
+           Matrix of explanatory variables (should not include bias term)
+       
+        y: array-like of size [n_features]
+           Vector of dependent variables.
+           
+        Returns
+        -------
+        object: self
+          self
+    
+        '''
+        # preprocess data
+        X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
+        n_samples, n_features = X.shape
+        X, y, X_mean, y_mean, X_std = self._center_data(X, y)
+        self._x_mean_ = X_mean
+        self._y_mean  = y_mean
+        self._x_std   = X_std
+        
+        #  precision of noise & and coefficients
+        alpha   =  self.alpha
+        var_y  = np.var(y)
+        # check that variance is non zero !!!
+        if var_y == 0 :
+            beta = 1e-2
+        else:
+            beta = 1. / np.var(y)
+
+        # to speed all further computations save svd decomposition and reuse it later
+        u,d,vt   = svd(X, full_matrices = False)
+        Uy      = np.dot(u.T,y)
+        dsq     = d**2
+        mu      = 0
+    
+        for i in range(self.n_iter):
+            
+            # find mean for posterior of w ( for EM this is E-step)
+            mu_old  =  mu
+            if n_samples > n_features:
+                 mu =  vt.T *  d/(dsq+alpha/beta) 
+            else:
+                 # clever use of SVD here , faster for large n_features
+                 mu =  u * 1./(dsq + alpha/beta)
+                 mu =  np.dot(X.T,mu)
+            mu =  np.dot(mu,Uy)
+
+            # precompute errors, since both methods use it in estimation
+            error   = y - np.dot(X,mu)
+            sqdErr  = np.sum(error**2)
+            
+            if sqdErr / n_samples < self.perfect_fit_tol:
+                self.perfect_fit = True
+                warnings.warn( ('Almost perfect fit!!! Estimated values of variance '
+                                'for predictive distribution are computed using only RSS'))
+                break
+            
+            if self.optimizer == "fp":           
+                gamma      =  np.sum(beta*dsq/(beta*dsq + alpha))
+                # use updated mu and gamma parameters to update alpha and beta
+                # !!! made computation numerically stable for perfect fit case
+                alpha      =   gamma  / (np.sum(mu**2) + np.finfo(np.float32).eps )
+                beta       =  ( n_samples - gamma ) / (sqdErr + np.finfo(np.float32).eps )
+            else:             
+                # M-step, update parameters alpha and beta to maximize ML TYPE II
+                eigvals    = 1. / (beta * dsq + alpha)
+                alpha      = n_features / ( np.sum(mu**2) + np.sum(1/eigvals) )
+                beta       = n_samples / ( sqdErr + np.sum(dsq/eigvals) )
+
+            # if converged or exceeded maximum number of iterations => terminate
+            converged = self._check_convergence(mu_old,mu)
+            if self.verbose:
+                print( "Iteration {0} completed".format(i) )
+                if converged is True:
+                    print("Algorithm converged after {0} iterations".format(i))
+            if converged or i==self.n_iter -1:
+                break
+        eigvals       = 1./(beta * dsq + alpha)
+        self.coef_    = beta*np.dot(vt.T*d*eigvals ,Uy)
+        self._set_intercept(X_mean,y_mean,X_std)
+        self.beta_    = beta
+        self.alpha_   = alpha
+        self.eigvals_ = eigvals
+        self.eigvecs_ = vt.T
+        
+        # set intercept_
+        if self.fit_intercept:
+            self.coef_ = self.coef_ / X_std
+            self.intercept_ = y_mean - np.dot(X_mean, self.coef_.T)
+        else:
+            self.intercept_ = 0.
+
+        return self
+    
+    def predict(self,X, return_std=False):
+        '''
+        Computes predictive distribution for test set.
+        Predictive distribution for each data point is one dimensional
+        Gaussian and therefore is characterised by mean and variance.
+        
+        Parameters
+        -----------
+        X: {array-like, sparse} (n_samples_test, n_features)
+           Test data, matrix of explanatory variables
+           
+        Returns
+        -------
+        : list of length two [y_hat, var_hat]
+        
+             y_hat: numpy array of size (n_samples_test,)
+                    Estimated values of targets on test set (i.e. mean of predictive
+                    distribution)
+           
+             var_hat: numpy array of size (n_samples_test,)
+                    Variance of predictive distribution
+        '''
+        y_hat     = np.dot(X,self.coef_) + self.intercept_
+        
+        if return_std:
+            if self.normalize:
+                X   = (X - self._x_mean_) / self._x_std
+            data_noise  = 1./self.beta_
+            model_noise = np.sum(np.dot(X,self.eigvecs_)**2 * self.eigvals_,1)
+            var_pred    =  data_noise + model_noise
+            std_hat = np.sqrt(var_pred)
+            return y_hat, std_hat
+        else:
+            return y_hat
+            
+            
+# ==============================  VBLR  =========================================
+
+def gamma_mean(a,b):
+    '''
+    Computes mean of gamma distribution
+    
+    Parameters
+    ----------
+    a: float
+      Shape parameter of Gamma distribution
+    
+    b: float
+      Rate parameter of Gamma distribution
+      
+    Returns
+    -------
+    : float
+      Mean of Gamma distribution
+    '''
+    return float(a) / b 
+    
+
+
+class VBLinearRegression(BayesianLinearRegression):
+    '''
+    Implements Bayesian Linear Regression using mean-field approximation.
+    Assumes gamma prior on precision parameters of coefficients and noise.
+
+    Parameters:
+    -----------
+    n_iter: int, optional (DEFAULT = 100)
+       Maximum number of iterations for KL minimization
+
+    tol: float, optional (DEFAULT = 1e-3)
+       Convergence threshold
+       
+    fit_intercept: bool, optional (DEFAULT = True)
+       If True will use bias term in model fitting
+
+    a: float, optional (Default = 1e-4)
+       Shape parameter of Gamma prior for precision of coefficients
+       
+    b: float, optional (Default = 1e-4)
+       Rate parameter of Gamma prior for precision coefficients
+       
+    c: float, optional (Default = 1e-4)
+       Shape parameter of  Gamma prior for precision of noise
+       
+    d: float, optional (Default = 1e-4)
+       Rate parameter of  Gamma prior for precision of noise
+       
+    verbose: bool, optional (Default = False)
+       If True at each iteration progress report is printed out
+       
+    Attributes
+    ----------
+    coef_  : array, shape = (n_features)
+        Coefficients of the regression model (mean of posterior distribution)
+        
+    intercept_: float
+        Value of bias term (if fit_intercept is False, then intercept_ = 0)
+        
+    alpha_ : float
+        Mean of precision of coefficients
+       
+    beta_  : float 
+        Mean of precision of noise
+
+    eigvals_ : array, shape = (n_features, )
+        Eigenvalues of covariance matrix (from posterior distribution of weights)
+        
+    eigvecs_ : array, shape = (n_features, n_featues)
+        Eigenvectors of covariance matrix (from posterior distribution of weights)
+
+    '''
+    
+    def __init__(self, n_iter = 100, tol =1e-4, fit_intercept = True, 
+                 a = 1e-4, b = 1e-4, c = 1e-4, d = 1e-4, copy_X = True,
+                 verbose = False):
+        super(VBLinearRegression,self).__init__(n_iter, tol, fit_intercept, copy_X,
+                                                verbose)
+        self.a,self.b   =  a, b
+        self.c,self.d   =  c, d
+
+        
+    def fit(self,X,y):
+        '''
+        Fits Variational Bayesian Linear Regression Model
+        
+        Parameters
+        ----------
+        X: array-like of size [n_samples,n_features]
+           Matrix of explanatory variables (should not include bias term)
+       
+        Y: array-like of size [n_features]
+           Vector of dependent variables.
+           
+        Returns
+        -------
+        object: self
+          self
+        '''
+        # preprocess data
+        X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
+        n_samples, n_features = X.shape
+        X, y, X_mean, y_mean, X_std = self._center_data(X, y)
+        self._x_mean_ = X_mean
+        self._y_mean  = y_mean
+        self._x_std   = X_std
+        
+        # SVD decomposition, done once , reused at each iteration
+        u,D,vt = svd(X, full_matrices = False)
+        dsq    = D**2
+        UY     = np.dot(u.T,y)
+        
+        # some parameters of Gamma distribution have closed form solution
+        a      = self.a + 0.5 * n_features
+        c      = self.c + 0.5 * n_samples
+        b,d    = self.b,  self.d
+        
+        # initial mean of posterior for coefficients
+        mu     = 0
+                
+        for i in range(self.n_iter):
+            
+            # update parameters of distribution Q(weights)
+            e_beta       = gamma_mean(c,d)
+            e_alpha      = gamma_mean(a,b)
+            mu_old       = np.copy(mu)
+            mu,eigvals   = self._posterior_weights(e_beta,e_alpha,UY,dsq,u,vt,D,X)
+            
+            # update parameters of distribution Q(precision of weights) 
+            b            = self.b + 0.5*( np.sum(mu**2) + np.sum(eigvals))
+            
+            # update parameters of distribution Q(precision of likelihood)
+            sqderr       = np.sum((y - np.dot(X,mu))**2)
+            xsx          = np.sum(dsq*eigvals)
+            d            = self.d + 0.5*(sqderr + xsx)
+ 
+            # check convergence 
+            converged = self._check_convergence(mu,mu_old)
+            if self.verbose is True:
+                print("Iteration {0} is completed".format(i))
+                if converged is True:
+                    print("Algorithm converged after {0} iterations".format(i))
+               
+            # terminate if convergence or maximum number of iterations are achieved
+            if converged or i==(self.n_iter-1):
+                break
+            
+        # save necessary parameters    
+        self.beta_   = gamma_mean(c,d)
+        self.alpha_  = gamma_mean(a,b)
+        self.coef_, self.eigvals_ = self._posterior_weights(self.beta_, self.alpha_, UY,
+                                                            dsq, u, vt, D, X)
+        self._set_intercept(X_mean,y_mean,X_std)
+        self.eigvecs_ = vt.T
+        return self
+        
+
+    def _posterior_weights(self, e_beta, e_alpha, UY, dsq, u, vt, d, X):
+        '''
+        Calculates parameters of approximate posterior distribution 
+        of weights
+        '''
+        # eigenvalues of covariance matrix
+        sigma = 1./ (e_beta*dsq + e_alpha)
+        
+        # mean of approximate posterior distribution
+        n_samples, n_features = X.shape
+        if n_samples > n_features:
+             mu =  vt.T *  d/(dsq + e_alpha/e_beta)# + np.finfo(np.float64).eps) 
+        else:
+             mu =  u * 1./(dsq + e_alpha/e_beta)# + np.finfo(np.float64).eps)
+             mu =  np.dot(X.T,mu)
+        mu =  np.dot(mu,UY)
+        return mu,sigma
+        
+    def predict(self,X, return_std=False):
+        '''
+        Computes predictive distribution for test set.
+        Predictive distribution for each data point is one dimensional
+        Gaussian and therefore is characterised by mean and variance.
+        
+        Parameters
+        -----------
+        X: {array-like, sparse} (n_samples_test, n_features)
+           Test data, matrix of explanatory variables
+           
+        Returns
+        -------
+        : list of length two [y_hat, var_hat]
+        
+             y_hat: numpy array of size (n_samples_test,)
+                    Estimated values of targets on test set (i.e. mean of predictive
+                    distribution)
+           
+             var_hat: numpy array of size (n_samples_test,)
+                    Variance of predictive distribution
+        '''
+        x         = (X - self._x_mean_) / self._x_std
+        y_hat     = np.dot(x,self.coef_) + self._y_mean
+        
+        if return_std:
+            data_noise  = 1./self.beta_
+            model_noise = np.sum(np.dot(X,self.eigvecs_)**2 * self.eigvals_,1)
+            var_pred    =  data_noise + model_noise
+            std_hat = np.sqrt(var_pred)
+            return y_hat, std_hat
+        else:
+            return y_hat
\ No newline at end of file
diff --git a/build/lib/bayesvalidrox/surrogate_models/engine.py b/build/lib/bayesvalidrox/surrogate_models/engine.py
new file mode 100644
index 000000000..42307d477
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/engine.py
@@ -0,0 +1,2225 @@
+# -*- coding: utf-8 -*-
+"""
+Engine to train the surrogate
+
+"""
+import copy
+from copy import deepcopy, copy
+import h5py
+import joblib
+import numpy as np
+import os
+
+from scipy import stats, signal, linalg, sparse
+from scipy.spatial import distance
+from tqdm import tqdm
+import scipy.optimize as opt
+from sklearn.metrics import mean_squared_error
+import multiprocessing
+import matplotlib.pyplot as plt
+import pandas as pd
+import sys
+import seaborn as sns
+from joblib import Parallel, delayed
+
+
+from bayesvalidrox.bayes_inference.bayes_inference import BayesInference
+from bayesvalidrox.bayes_inference.discrepancy import Discrepancy
+from .exploration import Exploration
+import pathlib
+
+#from .inputs import Input
+#from .exp_designs import ExpDesigns
+#from .surrogate_models import MetaModel
+#from bayesvalidrox.post_processing.post_processing import PostProcessing
+
+def hellinger_distance(P, Q):
+    """
+    Hellinger distance between two continuous distributions.
+
+    The maximum distance 1 is achieved when P assigns probability zero to
+    every set to which Q assigns a positive probability, and vice versa.
+    0 (identical) and 1 (maximally different)
+
+    Parameters
+    ----------
+    P : array
+        Reference likelihood.
+    Q : array
+        Estimated likelihood.
+
+    Returns
+    -------
+    float
+        Hellinger distance of two distributions.
+
+    """
+    P = np.array(P)
+    Q= np.array(Q)
+    
+    mu1 = P.mean()
+    Sigma1 = np.std(P)
+
+    mu2 = Q.mean()
+    Sigma2 = np.std(Q)
+
+    term1 = np.sqrt(2*Sigma1*Sigma2 / (Sigma1**2 + Sigma2**2))
+
+    term2 = np.exp(-.25 * (mu1 - mu2)**2 / (Sigma1**2 + Sigma2**2))
+
+    H_squared = 1 - term1 * term2
+
+    return np.sqrt(H_squared)
+
+
+def logpdf(x, mean, cov):
+    """
+    Computes the likelihood based on a multivariate normal distribution.
+
+    Parameters
+    ----------
+    x : TYPE
+        DESCRIPTION.
+    mean : array_like
+        Observation data.
+    cov : 2d array
+        Covariance matrix of the distribution.
+
+    Returns
+    -------
+    log_lik : float
+        Log likelihood.
+
+    """
+    n = len(mean)
+    L = linalg.cholesky(cov, lower=True)
+    beta = np.sum(np.log(np.diag(L)))
+    dev = x - mean
+    alpha = dev.dot(linalg.cho_solve((L, True), dev))
+    log_lik = -0.5 * alpha - beta - n / 2. * np.log(2 * np.pi)
+
+    return log_lik
+
+def subdomain(Bounds, n_new_samples):
+    """
+    Divides a domain defined by Bounds into sub domains.
+
+    Parameters
+    ----------
+    Bounds : list of tuples
+        List of lower and upper bounds.
+    n_new_samples : int
+        Number of samples to divide the domain for.
+    n_params : int
+        The number of params to build the subdomains for
+
+    Returns
+    -------
+    Subdomains : List of tuples of tuples
+        Each tuple of tuples divides one set of bounds into n_new_samples parts.
+
+    """
+    n_params = len(Bounds)
+    n_subdomains = n_new_samples + 1
+    LinSpace = np.zeros((n_params, n_subdomains))
+
+    for i in range(n_params):
+        LinSpace[i] = np.linspace(start=Bounds[i][0], stop=Bounds[i][1],
+                                  num=n_subdomains)
+    Subdomains = []
+    for k in range(n_subdomains-1):
+        mylist = []
+        for i in range(n_params):
+            mylist.append((LinSpace[i, k+0], LinSpace[i, k+1]))
+        Subdomains.append(tuple(mylist))
+
+    return Subdomains
+
+class Engine():
+    
+    
+    def __init__(self, MetaMod, Model, ExpDes):
+        self.MetaModel = MetaMod
+        self.Model = Model
+        self.ExpDesign = ExpDes
+        self.parallel = False
+        
+    def start_engine(self) -> None:
+        """
+        Do all the preparations that need to be run before the actual training
+
+        Returns
+        -------
+        None
+
+        """
+        self.out_names = self.Model.Output.names
+        self.MetaModel.out_names = self.out_names
+        
+        
+    def train_normal(self, parallel = False, verbose = False, save = False) -> None:
+        """
+        Trains surrogate on static samples only.
+        Samples are taken from the experimental design and the specified 
+        model is run on them.
+        Alternatively the samples can be read in from a provided hdf5 file.
+        
+
+        Returns
+        -------
+        None
+
+        """
+            
+        ExpDesign = self.ExpDesign
+        MetaModel = self.MetaModel
+        
+        # Read ExpDesign (training and targets) from the provided hdf5
+        if ExpDesign.hdf5_file is not None:
+            # TODO: need to run 'generate_ED' as well after this or not?
+            ExpDesign.read_from_file(self.out_names)
+        else:
+            # Check if an old hdf5 file exists: if yes, rename it
+            hdf5file = f'ExpDesign_{self.Model.name}.hdf5'
+            if os.path.exists(hdf5file):
+           #     os.rename(hdf5file, 'old_'+hdf5file)
+                file = pathlib.Path(hdf5file)
+                file.unlink()
+
+        # Prepare X samples 
+        # For training the surrogate use ExpDesign.X_tr, ExpDesign.X is for the model to run on 
+        ExpDesign.generate_ED(ExpDesign.n_init_samples,
+                                              transform=True,
+                                              max_pce_deg=np.max(MetaModel.pce_deg))
+        
+        # Run simulations at X 
+        if not hasattr(ExpDesign, 'Y') or ExpDesign.Y is None:
+            print('\n Now the forward model needs to be run!\n')
+            ED_Y, up_ED_X = self.Model.run_model_parallel(ExpDesign.X, mp = parallel)
+            ExpDesign.Y = ED_Y
+        else:
+            # Check if a dict has been passed.
+            if not type(ExpDesign.Y) is dict:
+                raise Exception('Please provide either a dictionary or a hdf5'
+                                'file to ExpDesign.hdf5_file argument.')
+                
+        # Separate output dict and x-values
+        if 'x_values' in ExpDesign.Y:
+            ExpDesign.x_values = ExpDesign.Y['x_values']
+            del ExpDesign.Y['x_values']
+        else:
+            print('No x_values are given, this might lead to issues during PostProcessing')
+        
+        
+        # Fit the surrogate
+        MetaModel.fit(ExpDesign.X, ExpDesign.Y, parallel, verbose)
+        
+        # Save what there is to save
+        if save:
+            # Save surrogate
+            with open(f'surrogates/surrogate_{self.Model.name}.pk1', 'wb') as output:
+                joblib.dump(MetaModel, output, 2)
+                    
+            # Zip the model run directories
+            if self.Model.link_type.lower() == 'pylink' and\
+               self.ExpDesign.sampling_method.lower() != 'user':
+                self.Model.zip_subdirs(self.Model.name, f'{self.Model.name}_')
+                
+            
+    def train_sequential(self, parallel = False, verbose = False) -> None:
+        """
+        Train the surrogate in a sequential manner.
+        First build and train evereything on the static samples, then iterate
+        choosing more samples and refitting the surrogate on them.
+
+        Returns
+        -------
+        None
+
+        """
+        #self.train_normal(parallel, verbose)
+        self.parallel = parallel
+        self.train_seq_design(parallel, verbose)
+        
+        
+    # -------------------------------------------------------------------------
+    def eval_metamodel(self, samples=None, nsamples=None,
+                       sampling_method='random', return_samples=False):
+        """
+        Evaluates meta-model at the requested samples. One can also generate
+        nsamples.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params), optional
+            Samples to evaluate meta-model at. The default is None.
+        nsamples : int, optional
+            Number of samples to generate, if no `samples` is provided. The
+            default is None.
+        sampling_method : str, optional
+            Type of sampling, if no `samples` is provided. The default is
+            'random'.
+        return_samples : bool, optional
+            Retun samples, if no `samples` is provided. The default is False.
+
+        Returns
+        -------
+        mean_pred : dict
+            Mean of the predictions.
+        std_pred : dict
+            Standard deviatioon of the predictions.
+        """
+        # Generate or transform (if need be) samples
+        if samples is None:
+            # Generate
+            samples = self.ExpDesign.generate_samples(
+                nsamples,
+                sampling_method
+                )
+
+        # Transformation to other space is to be done in the MetaModel
+        # TODO: sort the transformations better
+        mean_pred, std_pred = self.MetaModel.eval_metamodel(samples)
+
+        if return_samples:
+            return mean_pred, std_pred, samples
+        else:
+            return mean_pred, std_pred
+        
+        
+    # -------------------------------------------------------------------------
+    def train_seq_design(self, parallel = False, verbose = False):
+        """
+        Starts the adaptive sequential design for refining the surrogate model
+        by selecting training points in a sequential manner.
+
+        Returns
+        -------
+        MetaModel : object
+            Meta model object.
+
+        """
+        self.parallel = parallel
+        
+        # Initialization
+        self.SeqModifiedLOO = {}
+        self.seqValidError = {}
+        self.SeqBME = {}
+        self.SeqKLD = {}
+        self.SeqDistHellinger = {}
+        self.seqRMSEMean = {}
+        self.seqRMSEStd = {}
+        self.seqMinDist = []
+        
+        if not hasattr(self.MetaModel, 'valid_samples'):
+            self.ExpDesign.valid_samples = []
+            self.ExpDesign.valid_model_runs = []
+            self.valid_likelihoods = []
+        
+        validError = None
+
+
+        # Determine the metamodel type
+        if self.MetaModel.meta_model_type.lower() != 'gpe':
+            pce = True
+        else:
+            pce = False
+        mc_ref = True if bool(self.Model.mc_reference) else False
+        if mc_ref:
+            self.Model.read_observation('mc_ref')
+
+        # Get the parameters
+        max_n_samples = self.ExpDesign.n_max_samples
+        mod_LOO_threshold = self.ExpDesign.mod_LOO_threshold
+        n_canddidate = self.ExpDesign.n_canddidate
+        post_snapshot = self.ExpDesign.post_snapshot
+        n_replication = self.ExpDesign.n_replication
+        util_func = self.ExpDesign.util_func
+        output_name = self.out_names
+        
+        # Handle if only one UtilityFunctions is provided
+        if not isinstance(util_func, list):
+            util_func = [self.ExpDesign.util_func]
+
+        # Read observations or MCReference
+        # TODO: recheck the logic in this if statement
+        if (len(self.Model.observations) != 0 or self.Model.meas_file is not None) and hasattr(self.MetaModel, 'Discrepancy'):
+            self.observations = self.Model.read_observation()
+            obs_data = self.observations
+        else:
+            obs_data = []
+            # TODO: TotalSigma2 not defined if not in this else???
+            # TODO: no self.observations if in here
+            TotalSigma2 = {}
+            
+        # ---------- Initial self.MetaModel ----------
+        self.train_normal(parallel = parallel, verbose=verbose)
+        
+        initMetaModel = deepcopy(self.MetaModel)
+
+        # Validation error if validation set is provided.
+        if self.ExpDesign.valid_model_runs:
+            init_rmse, init_valid_error = self._validError(initMetaModel)
+            init_valid_error = list(init_valid_error.values())
+        else:
+            init_rmse = None
+
+        # Check if discrepancy is provided
+        if len(obs_data) != 0 and hasattr(self.MetaModel, 'Discrepancy'):
+            TotalSigma2 = self.MetaModel.Discrepancy.parameters
+
+            # Calculate the initial BME
+            out = self._BME_Calculator(
+                obs_data, TotalSigma2, init_rmse)
+            init_BME, init_KLD, init_post, init_likes, init_dist_hellinger = out
+            print(f"\nInitial BME: {init_BME:.2f}")
+            print(f"Initial KLD: {init_KLD:.2f}")
+
+            # Posterior snapshot (initial)
+            if post_snapshot:
+                parNames = self.ExpDesign.par_names
+                print('Posterior snapshot (initial) is being plotted...')
+                self.__posteriorPlot(init_post, parNames, 'SeqPosterior_init')
+
+        # Check the convergence of the Mean & Std
+        if mc_ref and pce:
+            init_rmse_mean, init_rmse_std = self._error_Mean_Std()
+            print(f"Initial Mean and Std error: {init_rmse_mean:.2f},"
+                  f" {init_rmse_std:.2f}")
+
+        # Read the initial experimental design
+        Xinit = self.ExpDesign.X
+        init_n_samples = len(self.ExpDesign.X)
+        initYprev = self.ExpDesign.Y#initMetaModel.ModelOutputDict
+        #self.MetaModel.ModelOutputDict = self.ExpDesign.Y
+        initLCerror = initMetaModel.LCerror
+        n_itrs = max_n_samples - init_n_samples
+
+        ## Get some initial statistics
+        # Read the initial ModifiedLOO
+        if pce:
+            Scores_all, varExpDesignY = [], []
+            for out_name in output_name:
+                y = self.ExpDesign.Y[out_name]
+                Scores_all.append(list(
+                    self.MetaModel.score_dict['b_1'][out_name].values()))
+                if self.MetaModel.dim_red_method.lower() == 'pca':
+                    pca = self.MetaModel.pca['b_1'][out_name]
+                    components = pca.transform(y)
+                    varExpDesignY.append(np.var(components, axis=0))
+                else:
+                    varExpDesignY.append(np.var(y, axis=0))
+
+            Scores = [item for sublist in Scores_all for item in sublist]
+            weights = [item for sublist in varExpDesignY for item in sublist]
+            init_mod_LOO = [np.average([1-score for score in Scores],
+                                       weights=weights)]
+
+        prevMetaModel_dict = {}
+        #prevExpDesign_dict = {}
+        # Can run sequential design multiple times for comparison
+        for repIdx in range(n_replication):
+            print(f'\n>>>> Replication: {repIdx+1}<<<<')
+
+            # util_func: the function to use inside the type of exploitation
+            for util_f in util_func:
+                print(f'\n>>>> Utility Function: {util_f} <<<<')
+                # To avoid changes ub original aPCE object
+                self.ExpDesign.X = Xinit
+                self.ExpDesign.Y = initYprev
+                self.ExpDesign.LCerror = initLCerror
+
+                # Set the experimental design
+                Xprev = Xinit
+                total_n_samples = init_n_samples
+                Yprev = initYprev
+
+                Xfull = []
+                Yfull = []
+
+                # Store the initial ModifiedLOO
+                if pce:
+                    print("\nInitial ModifiedLOO:", init_mod_LOO)
+                    SeqModifiedLOO = np.array(init_mod_LOO)
+
+                if len(self.ExpDesign.valid_model_runs) != 0:
+                    SeqValidError = np.array(init_valid_error)
+
+                # Check if data is provided
+                if len(obs_data) != 0 and hasattr(self.MetaModel, 'Discrepancy'):
+                    SeqBME = np.array([init_BME])
+                    SeqKLD = np.array([init_KLD])
+                    SeqDistHellinger = np.array([init_dist_hellinger])
+
+                if mc_ref and pce:
+                    seqRMSEMean = np.array([init_rmse_mean])
+                    seqRMSEStd = np.array([init_rmse_std])
+
+                # ------- Start Sequential Experimental Design -------
+                postcnt = 1
+                for itr_no in range(1, n_itrs+1):
+                    print(f'\n>>>> Iteration number {itr_no} <<<<')
+
+                    # Save the metamodel prediction before updating
+                    prevMetaModel_dict[itr_no] = deepcopy(self.MetaModel)
+                    #prevExpDesign_dict[itr_no] = deepcopy(self.ExpDesign)
+                    if itr_no > 1:
+                        pc_model = prevMetaModel_dict[itr_no-1]
+                        self._y_hat_prev, _ = pc_model.eval_metamodel(
+                            samples=Xfull[-1].reshape(1, -1))
+                        del prevMetaModel_dict[itr_no-1]
+
+                    # Optimal Bayesian Design
+                    #self.MetaModel.ExpDesignFlag = 'sequential'
+                    Xnew, updatedPrior = self.choose_next_sample(TotalSigma2,
+                                                            n_canddidate,
+                                                            util_f)
+                    S = np.min(distance.cdist(Xinit, Xnew, 'euclidean'))
+                    self.seqMinDist.append(S)
+                    print(f"\nmin Dist from OldExpDesign: {S:2f}")
+                    print("\n")
+
+                    # Evaluate the full model response at the new sample
+                    Ynew, _ = self.Model.run_model_parallel(
+                        Xnew, prevRun_No=total_n_samples
+                        )
+                    total_n_samples += Xnew.shape[0]
+
+                    # ------ Plot the surrogate model vs Origninal Model ------
+                    if hasattr(self.ExpDesign, 'adapt_verbose') and \
+                       self.ExpDesign.adapt_verbose:
+                        from .adaptPlot import adaptPlot
+                        y_hat, std_hat = self.MetaModel.eval_metamodel(
+                            samples=Xnew
+                            )
+                        adaptPlot(
+                            self.MetaModel, Ynew, y_hat, std_hat,
+                            plotED=False
+                            )
+
+                    # -------- Retrain the surrogate model -------
+                    # Extend new experimental design
+                    Xfull = np.vstack((Xprev, Xnew))
+
+                    # Updating experimental design Y
+                    for out_name in output_name:
+                        Yfull = np.vstack((Yprev[out_name], Ynew[out_name]))
+                        self.ExpDesign.Y[out_name] = Yfull
+
+                    # Pass new design to the metamodel object
+                    self.ExpDesign.sampling_method = 'user'
+                    self.ExpDesign.X = Xfull
+                    #self.ExpDesign.Y = self.MetaModel.ModelOutputDict
+
+                    # Save the Experimental Design for next iteration
+                    Xprev = Xfull
+                    Yprev = self.ExpDesign.Y 
+
+                    # Pass the new prior as the input
+                    # TODO: another look at this - no difference apc to pce to gpe?
+                    self.MetaModel.input_obj.poly_coeffs_flag = False
+                    if updatedPrior is not None:
+                        self.MetaModel.input_obj.poly_coeffs_flag = True
+                        print("updatedPrior:", updatedPrior.shape)
+                        # Arbitrary polynomial chaos
+                        for i in range(updatedPrior.shape[1]):
+                            self.MetaModel.input_obj.Marginals[i].dist_type = None
+                            x = updatedPrior[:, i]
+                            self.MetaModel.input_obj.Marginals[i].raw_data = x
+
+                    # Train the surrogate model for new ExpDesign
+                    self.train_normal(parallel=False)
+
+                    # -------- Evaluate the retrained surrogate model -------
+                    # Extract Modified LOO from Output
+                    if pce:
+                        Scores_all, varExpDesignY = [], []
+                        for out_name in output_name:
+                            y = self.ExpDesign.Y[out_name]
+                            Scores_all.append(list(
+                                self.MetaModel.score_dict['b_1'][out_name].values()))
+                            if self.MetaModel.dim_red_method.lower() == 'pca':
+                                pca = self.MetaModel.pca['b_1'][out_name]
+                                components = pca.transform(y)
+                                varExpDesignY.append(np.var(components,
+                                                            axis=0))
+                            else:
+                                varExpDesignY.append(np.var(y, axis=0))
+                        Scores = [item for sublist in Scores_all for item
+                                  in sublist]
+                        weights = [item for sublist in varExpDesignY for item
+                                   in sublist]
+                        ModifiedLOO = [np.average(
+                            [1-score for score in Scores], weights=weights)]
+
+                        print('\n')
+                        print(f"Updated ModifiedLOO {util_f}:\n", ModifiedLOO)
+                        print('\n')
+
+                    # Compute the validation error
+                    if self.ExpDesign.valid_model_runs:
+                        rmse, validError = self._validError(self.MetaModel)
+                        ValidError = list(validError.values())
+                    else:
+                        rmse = None
+
+                    # Store updated ModifiedLOO
+                    if pce:
+                        SeqModifiedLOO = np.vstack(
+                            (SeqModifiedLOO, ModifiedLOO))
+                        if len(self.ExpDesign.valid_model_runs) != 0:
+                            SeqValidError = np.vstack(
+                                (SeqValidError, ValidError))
+                    # -------- Caclulation of BME as accuracy metric -------
+                    # Check if data is provided
+                    if len(obs_data) != 0:
+                        # Calculate the initial BME
+                        out = self._BME_Calculator(obs_data, TotalSigma2, rmse)
+                        BME, KLD, Posterior, likes, DistHellinger = out
+                        print('\n')
+                        print(f"Updated BME: {BME:.2f}")
+                        print(f"Updated KLD: {KLD:.2f}")
+                        print('\n')
+
+                        # Plot some snapshots of the posterior
+                        step_snapshot = self.ExpDesign.step_snapshot
+                        if post_snapshot and postcnt % step_snapshot == 0:
+                            parNames = self.ExpDesign.par_names
+                            print('Posterior snapshot is being plotted...')
+                            self.__posteriorPlot(Posterior, parNames,
+                                                 f'SeqPosterior_{postcnt}')
+                        postcnt += 1
+
+                    # Check the convergence of the Mean&Std
+                    if mc_ref and pce:
+                        print('\n')
+                        RMSE_Mean, RMSE_std = self._error_Mean_Std()
+                        print(f"Updated Mean and Std error: {RMSE_Mean:.2f}, "
+                              f"{RMSE_std:.2f}")
+                        print('\n')
+
+                    # Store the updated BME & KLD
+                    # Check if data is provided
+                    if len(obs_data) != 0:
+                        SeqBME = np.vstack((SeqBME, BME))
+                        SeqKLD = np.vstack((SeqKLD, KLD))
+                        SeqDistHellinger = np.vstack((SeqDistHellinger,
+                                                      DistHellinger))
+                    if mc_ref and pce:
+                        seqRMSEMean = np.vstack((seqRMSEMean, RMSE_Mean))
+                        seqRMSEStd = np.vstack((seqRMSEStd, RMSE_std))
+
+                    if pce and any(LOO < mod_LOO_threshold
+                                   for LOO in ModifiedLOO):
+                        break
+
+                    # Clean up
+                    if len(obs_data) != 0:
+                        del out
+                    print()
+                    print('-'*50)
+                    print()
+
+                # Store updated ModifiedLOO and BME in dictonary
+                strKey = f'{util_f}_rep_{repIdx+1}'
+                if pce:
+                    self.SeqModifiedLOO[strKey] = SeqModifiedLOO
+                if len(self.ExpDesign.valid_model_runs) != 0:
+                    self.seqValidError[strKey] = SeqValidError
+
+                # Check if data is provided
+                if len(obs_data) != 0:
+                    self.SeqBME[strKey] = SeqBME
+                    self.SeqKLD[strKey] = SeqKLD
+                if hasattr(self.MetaModel, 'valid_likelihoods') and \
+                   self.valid_likelihoods:
+                    self.SeqDistHellinger[strKey] = SeqDistHellinger
+                if mc_ref and pce:
+                    self.seqRMSEMean[strKey] = seqRMSEMean
+                    self.seqRMSEStd[strKey] = seqRMSEStd
+
+        # return self.MetaModel
+
+    # -------------------------------------------------------------------------
+    def util_VarBasedDesign(self, X_can, index, util_func='Entropy'):
+        """
+        Computes the exploitation scores based on:
+        active learning MacKay(ALM) and active learning Cohn (ALC)
+        Paper: Sequential Design with Mutual Information for Computer
+        Experiments (MICE): Emulation of a Tsunami Model by Beck and Guillas
+        (2016)
+
+        Parameters
+        ----------
+        X_can : array of shape (n_samples, n_params)
+            Candidate samples.
+        index : int
+            Model output index.
+        UtilMethod : string, optional
+            Exploitation utility function. The default is 'Entropy'.
+
+        Returns
+        -------
+        float
+            Score.
+
+        """
+        MetaModel = self.MetaModel
+        ED_X = self.ExpDesign.X
+        out_dict_y = self.ExpDesign.Y
+        out_names = self.out_names
+
+        # Run the Metamodel for the candidate
+        X_can = X_can.reshape(1, -1)
+        Y_PC_can, std_PC_can = MetaModel.eval_metamodel(samples=X_can)
+
+        if util_func.lower() == 'alm':
+            # ----- Entropy/MMSE/active learning MacKay(ALM)  -----
+            # Compute perdiction variance of the old model
+            canPredVar = {key: std_PC_can[key]**2 for key in out_names}
+
+            varPCE = np.zeros((len(out_names), X_can.shape[0]))
+            for KeyIdx, key in enumerate(out_names):
+                varPCE[KeyIdx] = np.max(canPredVar[key], axis=1)
+            score = np.max(varPCE, axis=0)
+
+        elif util_func.lower() == 'eigf':
+            # ----- Expected Improvement for Global fit -----
+            # Find closest EDX to the candidate
+            distances = distance.cdist(ED_X, X_can, 'euclidean')
+            index = np.argmin(distances)
+
+            # Compute perdiction error and variance of the old model
+            predError = {key: Y_PC_can[key] for key in out_names}
+            canPredVar = {key: std_PC_can[key]**2 for key in out_names}
+
+            # Compute perdiction error and variance of the old model
+            # Eq (5) from Liu et al.(2018)
+            EIGF_PCE = np.zeros((len(out_names), X_can.shape[0]))
+            for KeyIdx, key in enumerate(out_names):
+                residual = predError[key] - out_dict_y[key][int(index)]
+                var = canPredVar[key]
+                EIGF_PCE[KeyIdx] = np.max(residual**2 + var, axis=1)
+            score = np.max(EIGF_PCE, axis=0)
+
+        return -1 * score   # -1 is for minimization instead of maximization
+
+    # -------------------------------------------------------------------------
+    def util_BayesianActiveDesign(self, y_hat, std, sigma2Dict, var='DKL'):
+        """
+        Computes scores based on Bayesian active design criterion (var).
+
+        It is based on the following paper:
+        Oladyshkin, Sergey, Farid Mohammadi, Ilja Kroeker, and Wolfgang Nowak.
+        "Bayesian3 active learning for the gaussian process emulator using
+        information theory." Entropy 22, no. 8 (2020): 890.
+
+        Parameters
+        ----------
+        X_can : array of shape (n_samples, n_params)
+            Candidate samples.
+        sigma2Dict : dict
+            A dictionary containing the measurement errors (sigma^2).
+        var : string, optional
+            BAL design criterion. The default is 'DKL'.
+
+        Returns
+        -------
+        float
+            Score.
+
+        """
+
+        # Get the data
+        obs_data = self.observations
+        # TODO: this should be optimizable to be calculated explicitly
+        if hasattr(self.Model, 'n_obs'):
+            n_obs = self.Model.n_obs
+        else:
+            n_obs = self.n_obs
+        mc_size = 10000
+
+        # Sample a distribution for a normal dist
+        # with Y_mean_can as the mean and Y_std_can as std.
+        Y_MC, std_MC = {}, {}
+        logPriorLikelihoods = np.zeros((mc_size))
+       # print(y_hat)
+       # print(list[y_hat])
+        for key in list(y_hat):
+            cov = np.diag(std[key]**2)
+           # print(y_hat[key], cov)
+            # TODO: added the allow_singular = True here
+            rv = stats.multivariate_normal(mean=y_hat[key], cov=cov,)
+            Y_MC[key] = rv.rvs(size=mc_size)
+            logPriorLikelihoods += rv.logpdf(Y_MC[key])
+            std_MC[key] = np.zeros((mc_size, y_hat[key].shape[0]))
+
+        #  Likelihood computation (Comparison of data and simulation
+        #  results via PCE with candidate design)
+        likelihoods = self._normpdf(Y_MC, std_MC, obs_data, sigma2Dict)
+        
+        # Rejection Step
+        # Random numbers between 0 and 1
+        unif = np.random.rand(1, mc_size)[0]
+
+        # Reject the poorly performed prior
+        accepted = (likelihoods/np.max(likelihoods)) >= unif
+
+        # Prior-based estimation of BME
+        logBME = np.log(np.nanmean(likelihoods), dtype=np.longdouble)#float128)
+
+        # Posterior-based expectation of likelihoods
+        postLikelihoods = likelihoods[accepted]
+        postExpLikelihoods = np.mean(np.log(postLikelihoods))
+
+        # Posterior-based expectation of prior densities
+        postExpPrior = np.mean(logPriorLikelihoods[accepted])
+
+        # Utility function Eq.2 in Ref. (2)
+        # Posterior covariance matrix after observing data y
+        # Kullback-Leibler Divergence (Sergey's paper)
+        if var == 'DKL':
+
+            # TODO: Calculate the correction factor for BME
+            # BMECorrFactor = self.BME_Corr_Weight(PCE_SparseBayes_can,
+            #                                      ObservationData, sigma2Dict)
+            # BME += BMECorrFactor
+            # Haun et al implementation
+            # U_J_d = np.mean(np.log(Likelihoods[Likelihoods!=0])- logBME)
+            U_J_d = postExpLikelihoods - logBME
+
+        # Marginal log likelihood
+        elif var == 'BME':
+            U_J_d = np.nanmean(likelihoods)
+
+        # Entropy-based information gain
+        elif var == 'infEntropy':
+            logBME = np.log(np.nanmean(likelihoods))
+            infEntropy = logBME - postExpPrior - postExpLikelihoods
+            U_J_d = infEntropy * -1  # -1 for minimization
+
+        # Bayesian information criterion
+        elif var == 'BIC':
+            coeffs = self.MetaModel.coeffs_dict.values()
+            nModelParams = max(len(v) for val in coeffs for v in val.values())
+            maxL = np.nanmax(likelihoods)
+            U_J_d = -2 * np.log(maxL) + np.log(n_obs) * nModelParams
+
+        # Akaike information criterion
+        elif var == 'AIC':
+            coeffs = self.MetaModel.coeffs_dict.values()
+            nModelParams = max(len(v) for val in coeffs for v in val.values())
+            maxlogL = np.log(np.nanmax(likelihoods))
+            AIC = -2 * maxlogL + 2 * nModelParams
+            # 2 * nModelParams * (nModelParams+1) / (n_obs-nModelParams-1)
+            penTerm = 0
+            U_J_d = 1*(AIC + penTerm)
+
+        # Deviance information criterion
+        elif var == 'DIC':
+            # D_theta_bar = np.mean(-2 * Likelihoods)
+            N_star_p = 0.5 * np.var(np.log(likelihoods[likelihoods != 0]))
+            Likelihoods_theta_mean = self._normpdf(
+                y_hat, std, obs_data, sigma2Dict
+                )
+            DIC = -2 * np.log(Likelihoods_theta_mean) + 2 * N_star_p
+
+            U_J_d = DIC
+
+        else:
+            print('The algorithm you requested has not been implemented yet!')
+
+        # Handle inf and NaN (replace by zero)
+        if np.isnan(U_J_d) or U_J_d == -np.inf or U_J_d == np.inf:
+            U_J_d = 0.0
+
+        # Clear memory
+        del likelihoods
+        del Y_MC
+        del std_MC
+
+        return -1 * U_J_d   # -1 is for minimization instead of maximization
+
+    # -------------------------------------------------------------------------
+    def util_BayesianDesign(self, X_can, X_MC, sigma2Dict, var='DKL'):
+        """
+        Computes scores based on Bayesian sequential design criterion (var).
+
+        Parameters
+        ----------
+        X_can : array of shape (n_samples, n_params)
+            Candidate samples.
+        sigma2Dict : dict
+            A dictionary containing the measurement errors (sigma^2).
+        var : string, optional
+            Bayesian design criterion. The default is 'DKL'.
+
+        Returns
+        -------
+        float
+            Score.
+
+        """
+
+        # To avoid changes ub original aPCE object
+        MetaModel = self.MetaModel
+        out_names = self.out_names
+        if X_can.ndim == 1:
+            X_can = X_can.reshape(1, -1)
+
+        # Compute the mean and std based on the MetaModel
+        # pce_means, pce_stds = self._compute_pce_moments(MetaModel)
+        if var == 'ALC':
+            Y_MC, Y_MC_std = MetaModel.eval_metamodel(samples=X_MC)
+
+        # Old Experimental design
+        oldExpDesignX = self.ExpDesign.X
+        oldExpDesignY = self.ExpDesign.Y
+
+        # Evaluate the PCE metamodels at that location ???
+        Y_PC_can, Y_std_can = MetaModel.eval_metamodel(samples=X_can)
+        PCE_Model_can = deepcopy(MetaModel)
+        engine_can = deepcopy(self)
+        # Add the candidate to the ExpDesign
+        NewExpDesignX = np.vstack((oldExpDesignX, X_can))
+
+        NewExpDesignY = {}
+        for key in oldExpDesignY.keys():
+            NewExpDesignY[key] = np.vstack(
+                (oldExpDesignY[key], Y_PC_can[key])
+                )
+
+        engine_can.ExpDesign.sampling_method = 'user'
+        engine_can.ExpDesign.X = NewExpDesignX
+        #engine_can.ModelOutputDict = NewExpDesignY
+        engine_can.ExpDesign.Y = NewExpDesignY
+
+        # Train the model for the observed data using x_can
+        engine_can.MetaModel.input_obj.poly_coeffs_flag = False
+        engine_can.start_engine()
+        engine_can.train_normal(parallel=False)
+        engine_can.MetaModel.fit(NewExpDesignX, NewExpDesignY)
+#        engine_can.train_norm_design(parallel=False)
+
+        # Set the ExpDesign to its original values
+        engine_can.ExpDesign.X = oldExpDesignX
+        engine_can.ModelOutputDict = oldExpDesignY
+        engine_can.ExpDesign.Y = oldExpDesignY
+
+        if var.lower() == 'mi':
+            # Mutual information based on Krause et al
+            # Adapted from Beck & Guillas (MICE) paper
+            _, std_PC_can = engine_can.MetaModel.eval_metamodel(samples=X_can)
+            std_can = {key: std_PC_can[key] for key in out_names}
+
+            std_old = {key: Y_std_can[key] for key in out_names}
+
+            varPCE = np.zeros((len(out_names)))
+            for i, key in enumerate(out_names):
+                varPCE[i] = np.mean(std_old[key]**2/std_can[key]**2)
+            score = np.mean(varPCE)
+
+            return -1 * score
+
+        elif var.lower() == 'alc':
+            # Active learning based on Gramyc and Lee
+            # Adaptive design and analysis of supercomputer experiments Techno-
+            # metrics, 51 (2009), pp. 130–145.
+
+            # Evaluate the MetaModel at the given samples
+            Y_MC_can, Y_MC_std_can = engine_can.MetaModel.eval_metamodel(samples=X_MC)
+
+            # Compute the score
+            score = []
+            for i, key in enumerate(out_names):
+                pce_var = Y_MC_std_can[key]**2
+                pce_var_can = Y_MC_std[key]**2
+                score.append(np.mean(pce_var-pce_var_can, axis=0))
+            score = np.mean(score)
+
+            return -1 * score
+
+        # ---------- Inner MC simulation for computing Utility Value ----------
+        # Estimation of the integral via Monte Varlo integration
+        MCsize = X_MC.shape[0]
+        ESS = 0
+
+        while ((ESS > MCsize) or (ESS < 1)):
+
+            # Enriching Monte Carlo samples if need be
+            if ESS != 0:
+                X_MC = self.ExpDesign.generate_samples(
+                    MCsize, 'random'
+                    )
+
+            # Evaluate the MetaModel at the given samples
+            Y_MC, std_MC = PCE_Model_can.eval_metamodel(samples=X_MC)
+
+            # Likelihood computation (Comparison of data and simulation
+            # results via PCE with candidate design)
+            likelihoods = self._normpdf(
+                Y_MC, std_MC, self.observations, sigma2Dict
+                )
+
+            # Check the Effective Sample Size (1<ESS<MCsize)
+            ESS = 1 / np.sum(np.square(likelihoods/np.sum(likelihoods)))
+
+            # Enlarge sample size if it doesn't fulfill the criteria
+            if ((ESS > MCsize) or (ESS < 1)):
+                print("--- increasing MC size---")
+                MCsize *= 10
+                ESS = 0
+
+        # Rejection Step
+        # Random numbers between 0 and 1
+        unif = np.random.rand(1, MCsize)[0]
+
+        # Reject the poorly performed prior
+        accepted = (likelihoods/np.max(likelihoods)) >= unif
+
+        # -------------------- Utility functions --------------------
+        # Utility function Eq.2 in Ref. (2)
+        # Kullback-Leibler Divergence (Sergey's paper)
+        if var == 'DKL':
+
+            # Prior-based estimation of BME
+            logBME = np.log(np.nanmean(likelihoods, dtype=np.longdouble))#float128))
+
+            # Posterior-based expectation of likelihoods
+            postLikelihoods = likelihoods[accepted]
+            postExpLikelihoods = np.mean(np.log(postLikelihoods))
+
+            # Haun et al implementation
+            U_J_d = np.mean(np.log(likelihoods[likelihoods != 0]) - logBME)
+
+            # U_J_d = np.sum(G_n_m_all)
+            # Ryan et al (2014) implementation
+            # importanceWeights = Likelihoods[Likelihoods!=0]/np.sum(Likelihoods[Likelihoods!=0])
+            # U_J_d = np.mean(importanceWeights*np.log(Likelihoods[Likelihoods!=0])) - logBME
+
+            # U_J_d = postExpLikelihoods - logBME
+
+        # Marginal likelihood
+        elif var == 'BME':
+
+            # Prior-based estimation of BME
+            logBME = np.log(np.nanmean(likelihoods))
+            U_J_d = logBME
+
+        # Bayes risk likelihood
+        elif var == 'BayesRisk':
+
+            U_J_d = -1 * np.var(likelihoods)
+
+        # Entropy-based information gain
+        elif var == 'infEntropy':
+            # Prior-based estimation of BME
+            logBME = np.log(np.nanmean(likelihoods))
+
+            # Posterior-based expectation of likelihoods
+            postLikelihoods = likelihoods[accepted]
+            postLikelihoods /= np.nansum(likelihoods[accepted])
+            postExpLikelihoods = np.mean(np.log(postLikelihoods))
+
+            # Posterior-based expectation of prior densities
+            postExpPrior = np.mean(logPriorLikelihoods[accepted])
+
+            infEntropy = logBME - postExpPrior - postExpLikelihoods
+
+            U_J_d = infEntropy * -1  # -1 for minimization
+
+        # D-Posterior-precision
+        elif var == 'DPP':
+            X_Posterior = X_MC[accepted]
+            # covariance of the posterior parameters
+            U_J_d = -np.log(np.linalg.det(np.cov(X_Posterior)))
+
+        # A-Posterior-precision
+        elif var == 'APP':
+            X_Posterior = X_MC[accepted]
+            # trace of the posterior parameters
+            U_J_d = -np.log(np.trace(np.cov(X_Posterior)))
+
+        else:
+            print('The algorithm you requested has not been implemented yet!')
+
+        # Clear memory
+        del likelihoods
+        del Y_MC
+        del std_MC
+
+        return -1 * U_J_d   # -1 is for minimization instead of maximization
+
+
+    # -------------------------------------------------------------------------
+    def run_util_func(self, method, candidates, index, sigma2Dict=None,
+                      var=None, X_MC=None):
+        """
+        Runs the utility function based on the given method.
+
+        Parameters
+        ----------
+        method : string
+            Exploitation method: `VarOptDesign`, `BayesActDesign` and
+            `BayesOptDesign`.
+        candidates : array of shape (n_samples, n_params)
+            All candidate parameter sets.
+        index : int
+            ExpDesign index.
+        sigma2Dict : dict, optional
+            A dictionary containing the measurement errors (sigma^2). The
+            default is None.
+        var : string, optional
+            Utility function. The default is None.
+        X_MC : TYPE, optional
+            DESCRIPTION. The default is None.
+
+        Returns
+        -------
+        index : TYPE
+            DESCRIPTION.
+        List
+            Scores.
+
+        """
+
+        if method.lower() == 'varoptdesign':
+            # U_J_d = self.util_VarBasedDesign(candidates, index, var)
+            U_J_d = np.zeros((candidates.shape[0]))
+            for idx, X_can in tqdm(enumerate(candidates), ascii=True,
+                                   desc="varoptdesign"):
+                U_J_d[idx] = self.util_VarBasedDesign(X_can, index, var)
+
+        elif method.lower() == 'bayesactdesign':
+            NCandidate = candidates.shape[0]
+            U_J_d = np.zeros((NCandidate))
+            # Evaluate all candidates
+            y_can, std_can = self.MetaModel.eval_metamodel(samples=candidates)
+            # loop through candidates
+            for idx, X_can in tqdm(enumerate(candidates), ascii=True,
+                                   desc="BAL Design"):
+                y_hat = {key: items[idx] for key, items in y_can.items()}
+                std = {key: items[idx] for key, items in std_can.items()}
+                
+               # print(y_hat)
+               # print(std)
+                U_J_d[idx] = self.util_BayesianActiveDesign(
+                    y_hat, std, sigma2Dict, var)
+
+        elif method.lower() == 'bayesoptdesign':
+            NCandidate = candidates.shape[0]
+            U_J_d = np.zeros((NCandidate))
+            for idx, X_can in tqdm(enumerate(candidates), ascii=True,
+                                   desc="OptBayesianDesign"):
+                U_J_d[idx] = self.util_BayesianDesign(X_can, X_MC, sigma2Dict,
+                                                      var)
+        return (index, -1 * U_J_d)
+
+    # -------------------------------------------------------------------------
+    def dual_annealing(self, method, Bounds, sigma2Dict, var, Run_No,
+                       verbose=False):
+        """
+        Exploration algorithm to find the optimum parameter space.
+
+        Parameters
+        ----------
+        method : string
+            Exploitation method: `VarOptDesign`, `BayesActDesign` and
+            `BayesOptDesign`.
+        Bounds : list of tuples
+            List of lower and upper boundaries of parameters.
+        sigma2Dict : dict
+            A dictionary containing the measurement errors (sigma^2).
+        Run_No : int
+            Run number.
+        verbose : bool, optional
+            Print out a summary. The default is False.
+
+        Returns
+        -------
+        Run_No : int
+            Run number.
+        array
+            Optimial candidate.
+
+        """
+
+        Model = self.Model
+        max_func_itr = self.ExpDesign.max_func_itr
+
+        if method == 'VarOptDesign':
+            Res_Global = opt.dual_annealing(self.util_VarBasedDesign,
+                                            bounds=Bounds,
+                                            args=(Model, var),
+                                            maxfun=max_func_itr)
+
+        elif method == 'BayesOptDesign':
+            Res_Global = opt.dual_annealing(self.util_BayesianDesign,
+                                            bounds=Bounds,
+                                            args=(Model, sigma2Dict, var),
+                                            maxfun=max_func_itr)
+
+        if verbose:
+            print(f"Global minimum: xmin = {Res_Global.x}, "
+                  f"f(xmin) = {Res_Global.fun:.6f}, nfev = {Res_Global.nfev}")
+
+        return (Run_No, Res_Global.x)
+
+    # -------------------------------------------------------------------------
+    def tradeoff_weights(self, tradeoff_scheme, old_EDX, old_EDY):
+        """
+        Calculates weights for exploration scores based on the requested
+        scheme: `None`, `equal`, `epsilon-decreasing` and `adaptive`.
+
+        `None`: No exploration.
+        `equal`: Same weights for exploration and exploitation scores.
+        `epsilon-decreasing`: Start with more exploration and increase the
+            influence of exploitation along the way with a exponential decay
+            function
+        `adaptive`: An adaptive method based on:
+            Liu, Haitao, Jianfei Cai, and Yew-Soon Ong. "An adaptive sampling
+            approach for Kriging metamodeling by maximizing expected prediction
+            error." Computers & Chemical Engineering 106 (2017): 171-182.
+
+        Parameters
+        ----------
+        tradeoff_scheme : string
+            Trade-off scheme for exloration and exploitation scores.
+        old_EDX : array (n_samples, n_params)
+            Old experimental design (training points).
+        old_EDY : dict
+            Old model responses (targets).
+
+        Returns
+        -------
+        exploration_weight : float
+            Exploration weight.
+        exploitation_weight: float
+            Exploitation weight.
+
+        """
+        if tradeoff_scheme is None:
+            exploration_weight = 0
+
+        elif tradeoff_scheme == 'equal':
+            exploration_weight = 0.5
+
+        elif tradeoff_scheme == 'epsilon-decreasing':
+            # epsilon-decreasing scheme
+            # Start with more exploration and increase the influence of
+            # exploitation along the way with a exponential decay function
+            initNSamples = self.ExpDesign.n_init_samples
+            n_max_samples = self.ExpDesign.n_max_samples
+
+            itrNumber = (self.ExpDesign.X.shape[0] - initNSamples)
+            itrNumber //= self.ExpDesign.n_new_samples
+
+            tau2 = -(n_max_samples-initNSamples-1) / np.log(1e-8)
+            exploration_weight = signal.exponential(n_max_samples-initNSamples,
+                                                    0, tau2, False)[itrNumber]
+
+        elif tradeoff_scheme == 'adaptive':
+
+            # Extract itrNumber
+            initNSamples = self.ExpDesign.n_init_samples
+            n_max_samples = self.ExpDesign.n_max_samples
+            itrNumber = (self.ExpDesign.X.shape[0] - initNSamples)
+            itrNumber //= self.ExpDesign.n_new_samples
+
+            if itrNumber == 0:
+                exploration_weight = 0.5
+            else:
+                # New adaptive trade-off according to Liu et al. (2017)
+                # Mean squared error for last design point
+                last_EDX = old_EDX[-1].reshape(1, -1)
+                lastPCEY, _ = self.MetaModel.eval_metamodel(samples=last_EDX)
+                pce_y = np.array(list(lastPCEY.values()))[:, 0]
+                y = np.array(list(old_EDY.values()))[:, -1, :]
+                mseError = mean_squared_error(pce_y, y)
+
+                # Mean squared CV - error for last design point
+                pce_y_prev = np.array(list(self._y_hat_prev.values()))[:, 0]
+                mseCVError = mean_squared_error(pce_y_prev, y)
+
+                exploration_weight = min([0.5*mseError/mseCVError, 1])
+
+        # Exploitation weight
+        exploitation_weight = 1 - exploration_weight
+
+        return exploration_weight, exploitation_weight
+
+    # -------------------------------------------------------------------------
+    def choose_next_sample(self, sigma2=None, n_candidates=5, var='DKL'):
+        """
+        Runs optimal sequential design.
+
+        Parameters
+        ----------
+        sigma2 : dict, optional
+            A dictionary containing the measurement errors (sigma^2). The
+            default is None.
+        n_candidates : int, optional
+            Number of candidate samples. The default is 5.
+        var : string, optional
+            Utility function. The default is None. # TODO: default is set to DKL, not none
+
+        Raises
+        ------
+        NameError
+            Wrong utility function.
+
+        Returns
+        -------
+        Xnew : array (n_samples, n_params)
+            Selected new training point(s).
+        """
+
+        # Initialization
+        Bounds = self.ExpDesign.bound_tuples
+        n_new_samples = self.ExpDesign.n_new_samples
+        explore_method = self.ExpDesign.explore_method
+        exploit_method = self.ExpDesign.exploit_method
+        n_cand_groups = self.ExpDesign.n_cand_groups
+        tradeoff_scheme = self.ExpDesign.tradeoff_scheme
+
+        old_EDX = self.ExpDesign.X
+        old_EDY = self.ExpDesign.Y.copy()
+        ndim = self.ExpDesign.X.shape[1]
+        OutputNames = self.out_names
+
+        # -----------------------------------------
+        # ----------- CUSTOMIZED METHODS ----------
+        # -----------------------------------------
+        # Utility function exploit_method provided by user
+        if exploit_method.lower() == 'user':
+            if not hasattr(self.ExpDesign, 'ExploitFunction'):
+                raise AttributeError('Function `ExploitFunction` not given to the ExpDesign, thus cannor run user-defined sequential scheme')
+            # TODO: syntax does not fully match the rest - can test this??
+            Xnew, filteredSamples = self.ExpDesign.ExploitFunction(self)
+
+            print("\n")
+            print("\nXnew:\n", Xnew)
+
+            return Xnew, filteredSamples
+
+
+        # Dual-Annealing works differently from the rest, so deal with this first
+        # Here exploration and exploitation are performed simulataneously
+        if explore_method == 'dual annealing':
+            # ------- EXPLORATION: OPTIMIZATION -------
+            import time
+            start_time = time.time()
+
+            # Divide the domain to subdomains
+            subdomains = subdomain(Bounds, n_new_samples)
+
+            # Multiprocessing
+            if self.parallel:
+                args = []
+                for i in range(n_new_samples):
+                    args.append((exploit_method, subdomains[i], sigma2, var, i))
+                pool = multiprocessing.Pool(multiprocessing.cpu_count())
+
+                # With Pool.starmap_async()
+                results = pool.starmap_async(self.dual_annealing, args).get()
+
+                # Close the pool
+                pool.close()
+            # Without multiprocessing
+            else:
+                results = []
+                for i in range(n_new_samples):
+                    results.append(self.dual_annealing(exploit_method, subdomains[i], sigma2, var, i))
+                    
+            # New sample
+            Xnew = np.array([results[i][1] for i in range(n_new_samples)])
+            print("\nXnew:\n", Xnew)
+
+            # Computational cost
+            elapsed_time = time.time() - start_time
+            print("\n")
+            print(f"Elapsed_time: {round(elapsed_time,2)} sec.")
+            print('-'*20)
+            
+            return Xnew, None
+        
+        # Generate needed Exploration class
+        explore = Exploration(self.ExpDesign, n_candidates)
+        explore.w = 100  # * ndim #500  # TODO: where does this value come from?
+        
+        # Select criterion (mc-intersite-proj-th, mc-intersite-proj)
+        explore.mc_criterion = 'mc-intersite-proj'
+        
+        # Generate the candidate samples
+        # TODO: here use the sampling method provided by the expdesign?
+        sampling_method = self.ExpDesign.sampling_method
+        
+        # TODO: changed this from 'random' for LOOCV
+        if explore_method == 'LOOCV':
+            allCandidates = self.ExpDesign.generate_samples(n_candidates,
+                                                            sampling_method)
+        else:
+            allCandidates, scoreExploration = explore.get_exploration_samples()
+        
+        # -----------------------------------------
+        # ---------- EXPLORATION METHODS ----------
+        # -----------------------------------------
+        if explore_method == 'LOOCV':
+            # -----------------------------------------------------------------
+            # TODO: LOOCV model construnction based on Feng et al. (2020)
+            # 'LOOCV':
+            # Initilize the ExploitScore array
+
+            # Generate random samples
+            allCandidates = self.ExpDesign.generate_samples(n_candidates,
+                                                                'random')
+
+            # Construct error model based on LCerror
+            errorModel = self.MetaModel.create_ModelError(old_EDX, self.LCerror)
+            self.errorModel.append(copy(errorModel))
+
+            # Evaluate the error models for allCandidates
+            eLCAllCands, _ = errorModel.eval_errormodel(allCandidates)
+            # Select the maximum as the representative error
+            eLCAllCands = np.dstack(eLCAllCands.values())
+            eLCAllCandidates = np.max(eLCAllCands, axis=1)[:, 0]
+
+            # Normalize the error w.r.t the maximum error
+            scoreExploration = eLCAllCandidates / np.sum(eLCAllCandidates)
+
+        else:
+            # ------- EXPLORATION: SPACE-FILLING DESIGN -------
+            # Generate candidate samples from Exploration class
+            explore = Exploration(self.ExpDesign, n_candidates)
+            explore.w = 100  # * ndim #500
+            # Select criterion (mc-intersite-proj-th, mc-intersite-proj)
+            explore.mc_criterion = 'mc-intersite-proj'
+            allCandidates, scoreExploration = explore.get_exploration_samples()
+
+            # Temp: ---- Plot all candidates -----
+            if ndim == 2:
+                def plotter(points, allCandidates, Method,
+                            scoreExploration=None):
+                    if Method == 'Voronoi':
+                        from scipy.spatial import Voronoi, voronoi_plot_2d
+                        vor = Voronoi(points)
+                        fig = voronoi_plot_2d(vor)
+                        ax1 = fig.axes[0]
+                    else:
+                        fig = plt.figure()
+                        ax1 = fig.add_subplot(111)
+                    ax1.scatter(points[:, 0], points[:, 1], s=10, c='r',
+                                marker="s", label='Old Design Points')
+                    ax1.scatter(allCandidates[:, 0], allCandidates[:, 1], s=10,
+                                c='b', marker="o", label='Design candidates')
+                    for i in range(points.shape[0]):
+                        txt = 'p'+str(i+1)
+                        ax1.annotate(txt, (points[i, 0], points[i, 1]))
+                    if scoreExploration is not None:
+                        for i in range(allCandidates.shape[0]):
+                            txt = str(round(scoreExploration[i], 5))
+                            ax1.annotate(txt, (allCandidates[i, 0],
+                                               allCandidates[i, 1]))
+
+                    plt.xlim(self.bound_tuples[0])
+                    plt.ylim(self.bound_tuples[1])
+                    # plt.show()
+                    plt.legend(loc='upper left')
+
+        # -----------------------------------------
+        # --------- EXPLOITATION METHODS ----------
+        # -----------------------------------------
+        if exploit_method == 'BayesOptDesign' or\
+           exploit_method == 'BayesActDesign':
+
+            # ------- Calculate Exoploration weight -------
+            # Compute exploration weight based on trade off scheme
+            explore_w, exploit_w = self.tradeoff_weights(tradeoff_scheme,
+                                                        old_EDX,
+                                                        old_EDY)
+            print(f"\n Exploration weight={explore_w:0.3f} "
+                  f"Exploitation weight={exploit_w:0.3f}\n")
+
+            # ------- EXPLOITATION: BayesOptDesign & ActiveLearning -------
+            if explore_w != 1.0:
+                # Check if all needed properties are set
+                if not hasattr(self.ExpDesign, 'max_func_itr'):
+                    raise AttributeError('max_func_itr not given to the experimental design')
+
+                # Create a sample pool for rejection sampling
+                MCsize = 15000
+                X_MC = self.ExpDesign.generate_samples(MCsize, 'random')
+                candidates = self.ExpDesign.generate_samples(
+                    n_candidates, 'latin_hypercube')
+
+                # Split the candidates in groups for multiprocessing
+                split_cand = np.array_split(
+                    candidates, n_cand_groups, axis=0
+                    )
+               # print(candidates)
+               # print(split_cand)
+                if self.parallel:
+                    results = Parallel(n_jobs=-1, backend='multiprocessing')(
+                        delayed(self.run_util_func)(
+                            exploit_method, split_cand[i], i, sigma2, var, X_MC)
+                        for i in range(n_cand_groups)) 
+                else:
+                    results = []
+                    for i in range(n_cand_groups):
+                        results.append(self.run_util_func(exploit_method, split_cand[i], i, sigma2, var, X_MC))
+                        
+                # Retrieve the results and append them
+                U_J_d = np.concatenate([results[NofE][1] for NofE in
+                                        range(n_cand_groups)])
+
+                # Check if all scores are inf
+                if np.isinf(U_J_d).all() or np.isnan(U_J_d).all():
+                    U_J_d = np.ones(len(U_J_d))
+
+                # Get the expected value (mean) of the Utility score
+                # for each cell
+                if explore_method == 'Voronoi':
+                    U_J_d = np.mean(U_J_d.reshape(-1, n_candidates), axis=1)
+
+                # Normalize U_J_d
+                norm_U_J_d = U_J_d / np.sum(U_J_d)
+            else:
+                norm_U_J_d = np.zeros((len(scoreExploration)))
+
+            # ------- Calculate Total score -------
+            # ------- Trade off between EXPLORATION & EXPLOITATION -------
+            # Accumulate the samples
+            finalCandidates = np.concatenate((allCandidates, candidates), axis = 0)   
+            finalCandidates = np.unique(finalCandidates, axis = 0)
+            
+            # Calculations take into account both exploration and exploitation 
+            # samples without duplicates
+            totalScore = np.zeros(finalCandidates.shape[0])
+            #self.totalScore = totalScore
+            
+            for cand_idx in range(finalCandidates.shape[0]):
+                # find candidate indices
+                idx1 = np.where(allCandidates == finalCandidates[cand_idx])[0]
+                idx2 = np.where(candidates == finalCandidates[cand_idx])[0]
+                
+                # exploration 
+                if idx1 != []:
+                    idx1 = idx1[0]
+                    totalScore[cand_idx] += explore_w * scoreExploration[idx1]
+                    
+                # exploitation
+                if idx2 != []:
+                    idx2 = idx2[0]
+                    totalScore[cand_idx] += exploit_w * norm_U_J_d[idx2]
+                
+
+            # Total score
+            totalScore = exploit_w * norm_U_J_d
+            totalScore += explore_w * scoreExploration
+
+            # temp: Plot
+            # dim = self.ExpDesign.X.shape[1]
+            # if dim == 2:
+            #     plotter(self.ExpDesign.X, allCandidates, explore_method)
+
+            # ------- Select the best candidate -------
+            # find an optimal point subset to add to the initial design by
+            # maximization of the utility score and taking care of NaN values
+            temp = totalScore.copy()
+            temp[np.isnan(totalScore)] = -np.inf
+            sorted_idxtotalScore = np.argsort(temp)[::-1]
+            bestIdx = sorted_idxtotalScore[:n_new_samples]
+
+            # select the requested number of samples
+            if explore_method == 'Voronoi':
+                Xnew = np.zeros((n_new_samples, ndim))
+                for i, idx in enumerate(bestIdx):
+                    X_can = explore.closestPoints[idx]
+
+                    # Calculate the maxmin score for the region of interest
+                    newSamples, maxminScore = explore.get_mc_samples(X_can)
+
+                    # select the requested number of samples
+                    Xnew[i] = newSamples[np.argmax(maxminScore)]
+            else:
+                # Changed this from allCandiates to full set of candidates 
+                # TODO: still not changed for e.g. 'Voronoi'
+                Xnew = finalCandidates[sorted_idxtotalScore[:n_new_samples]]
+
+
+        elif exploit_method == 'VarOptDesign':
+            # ------- EXPLOITATION: VarOptDesign -------
+            UtilMethod = var
+
+            # ------- Calculate Exoploration weight -------
+            # Compute exploration weight based on trade off scheme
+            explore_w, exploit_w = self.tradeoff_weights(tradeoff_scheme,
+                                                        old_EDX,
+                                                        old_EDY)
+            print(f"\nweightExploration={explore_w:0.3f} "
+                  f"weightExploitation={exploit_w:0.3f}")
+
+            # Generate candidate samples from Exploration class
+            nMeasurement = old_EDY[OutputNames[0]].shape[1]
+            
+           # print(UtilMethod)
+            
+            # Find sensitive region
+            if UtilMethod == 'LOOCV':
+                LCerror = self.MetaModel.LCerror
+                allModifiedLOO = np.zeros((len(old_EDX), len(OutputNames),
+                                           nMeasurement))
+                for y_idx, y_key in enumerate(OutputNames):
+                    for idx, key in enumerate(LCerror[y_key].keys()):
+                        allModifiedLOO[:, y_idx, idx] = abs(
+                            LCerror[y_key][key])
+
+                ExploitScore = np.max(np.max(allModifiedLOO, axis=1), axis=1)
+               # print(allModifiedLOO.shape)
+
+            elif UtilMethod in ['EIGF', 'ALM']:
+                # ----- All other in  ['EIGF', 'ALM'] -----
+                # Initilize the ExploitScore array
+                ExploitScore = np.zeros((len(old_EDX), len(OutputNames)))
+
+                # Split the candidates in groups for multiprocessing
+                if explore_method != 'Voronoi':
+                    split_cand = np.array_split(allCandidates,
+                                                n_cand_groups,
+                                                axis=0)
+                    goodSampleIdx = range(n_cand_groups)
+                else:
+                    # Find indices of the Vornoi cells with samples
+                    goodSampleIdx = []
+                    for idx in range(len(explore.closest_points)):
+                        if len(explore.closest_points[idx]) != 0:
+                            goodSampleIdx.append(idx)
+                    split_cand = explore.closest_points
+
+                # Split the candidates in groups for multiprocessing
+                args = []
+                for index in goodSampleIdx:
+                    args.append((exploit_method, split_cand[index], index,
+                                 sigma2, var))
+
+                # Multiprocessing
+                pool = multiprocessing.Pool(multiprocessing.cpu_count())
+                # With Pool.starmap_async()
+                results = pool.starmap_async(self.run_util_func, args).get()
+
+                # Close the pool
+                pool.close()
+
+                # Retrieve the results and append them
+                if explore_method == 'Voronoi':
+                    ExploitScore = [np.mean(results[k][1]) for k in
+                                    range(len(goodSampleIdx))]
+                else:
+                    ExploitScore = np.concatenate(
+                        [results[k][1] for k in range(len(goodSampleIdx))])
+
+            else:
+                raise NameError('The requested utility function is not '
+                                'available.')
+
+            # print("ExploitScore:\n", ExploitScore)
+
+            # find an optimal point subset to add to the initial design by
+            # maximization of the utility score and taking care of NaN values
+            # Total score
+            # Normalize U_J_d
+            ExploitScore = ExploitScore / np.sum(ExploitScore)
+            totalScore = exploit_w * ExploitScore
+           # print(totalScore.shape)
+           # print(explore_w)
+           # print(scoreExploration.shape)
+            totalScore += explore_w * scoreExploration
+
+            temp = totalScore.copy()
+            sorted_idxtotalScore = np.argsort(temp, axis=0)[::-1]
+            bestIdx = sorted_idxtotalScore[:n_new_samples]
+
+            Xnew = np.zeros((n_new_samples, ndim))
+            if explore_method != 'Voronoi':
+                Xnew = allCandidates[bestIdx]
+            else:
+                for i, idx in enumerate(bestIdx.flatten()):
+                    X_can = explore.closest_points[idx]
+                    # plotter(self.ExpDesign.X, X_can, explore_method,
+                    # scoreExploration=None)
+
+                    # Calculate the maxmin score for the region of interest
+                    newSamples, maxminScore = explore.get_mc_samples(X_can)
+
+                    # select the requested number of samples
+                    Xnew[i] = newSamples[np.argmax(maxminScore)]
+
+        elif exploit_method == 'alphabetic':
+            # ------- EXPLOITATION: ALPHABETIC -------
+            Xnew = self.util_AlphOptDesign(allCandidates, var)
+
+        elif exploit_method == 'Space-filling':
+            # ------- EXPLOITATION: SPACE-FILLING -------
+            totalScore = scoreExploration
+
+            # ------- Select the best candidate -------
+            # find an optimal point subset to add to the initial design by
+            # maximization of the utility score and taking care of NaN values
+            temp = totalScore.copy()
+            temp[np.isnan(totalScore)] = -np.inf
+            sorted_idxtotalScore = np.argsort(temp)[::-1]
+
+            # select the requested number of samples
+            Xnew = allCandidates[sorted_idxtotalScore[:n_new_samples]]
+
+        else:
+            raise NameError('The requested design method is not available.')
+
+        print("\n")
+        print("\nRun No. {}:".format(old_EDX.shape[0]+1))
+        print("Xnew:\n", Xnew)
+
+        # TODO: why does it also return None?
+        return Xnew, None
+
+    # -------------------------------------------------------------------------
+    def util_AlphOptDesign(self, candidates, var='D-Opt'):
+        """
+        Enriches the Experimental design with the requested alphabetic
+        criterion based on exploring the space with number of sampling points.
+
+        Ref: Hadigol, M., & Doostan, A. (2018). Least squares polynomial chaos
+        expansion: A review of sampling strategies., Computer Methods in
+        Applied Mechanics and Engineering, 332, 382-407.
+
+        Arguments
+        ---------
+        NCandidate : int
+            Number of candidate points to be searched
+
+        var : string
+            Alphabetic optimality criterion
+
+        Returns
+        -------
+        X_new : array of shape (1, n_params)
+            The new sampling location in the input space.
+        """
+        MetaModelOrig = self # TODO: this doesn't fully seem correct?
+        n_new_samples = MetaModelOrig.ExpDesign.n_new_samples
+        NCandidate = candidates.shape[0]
+
+        # TODO: Loop over outputs
+        OutputName = self.out_names[0]
+
+        # To avoid changes ub original aPCE object
+        MetaModel = deepcopy(MetaModelOrig)
+
+        # Old Experimental design
+        oldExpDesignX = self.ExpDesign.X
+
+        # TODO: Only one psi can be selected.
+        # Suggestion: Go for the one with the highest LOO error
+        # TODO: this is just a patch, need to look at again!
+        Scores = list(self.MetaModel.score_dict['b_1'][OutputName].values())
+        #print(Scores)
+        #print(self.MetaModel.score_dict)
+        #print(self.MetaModel.score_dict.values())
+        #print(self.MetaModel.score_dict['b_1'].values())
+        #print(self.MetaModel.score_dict['b_1'][OutputName].values())
+        ModifiedLOO = [1-score for score in Scores]
+        outIdx = np.argmax(ModifiedLOO)
+
+        # Initialize Phi to save the criterion's values
+        Phi = np.zeros((NCandidate))
+
+        # TODO: also patched here
+        BasisIndices = self.MetaModel.basis_dict['b_1'][OutputName]["y_"+str(outIdx+1)]
+        P = len(BasisIndices)
+
+        # ------ Old Psi ------------
+        univ_p_val = self.MetaModel.univ_basis_vals(oldExpDesignX)
+        Psi = self.MetaModel.create_psi(BasisIndices, univ_p_val)
+
+        # ------ New candidates (Psi_c) ------------
+        # Assemble Psi_c
+        univ_p_val_c = self.MetaModel.univ_basis_vals(candidates)
+        Psi_c = self.MetaModel.create_psi(BasisIndices, univ_p_val_c)
+
+        for idx in range(NCandidate):
+
+            # Include the new row to the original Psi
+            Psi_cand = np.vstack((Psi, Psi_c[idx]))
+
+            # Information matrix
+            PsiTPsi = np.dot(Psi_cand.T, Psi_cand)
+            M = PsiTPsi / (len(oldExpDesignX)+1)
+
+            if np.linalg.cond(PsiTPsi) > 1e-12 \
+               and np.linalg.cond(PsiTPsi) < 1 / sys.float_info.epsilon:
+                # faster
+                invM = linalg.solve(M, sparse.eye(PsiTPsi.shape[0]).toarray())
+            else:
+                # stabler
+                invM = np.linalg.pinv(M)
+
+            # ---------- Calculate optimality criterion ----------
+            # Optimality criteria according to Section 4.5.1 in Ref.
+
+            # D-Opt
+            if var.lower() == 'd-opt':
+                Phi[idx] = (np.linalg.det(invM)) ** (1/P)
+
+            # A-Opt
+            elif var.lower() == 'a-opt':
+                Phi[idx] = np.trace(invM)
+
+            # K-Opt
+            elif var.lower() == 'k-opt':
+                Phi[idx] = np.linalg.cond(M)
+
+            else:
+               # print(var.lower())
+                raise Exception('The optimality criterion you requested has '
+                      'not been implemented yet!')
+
+        # find an optimal point subset to add to the initial design
+        # by minimization of the Phi
+        sorted_idxtotalScore = np.argsort(Phi)
+
+        # select the requested number of samples
+        Xnew = candidates[sorted_idxtotalScore[:n_new_samples]]
+
+        return Xnew
+
+    # -------------------------------------------------------------------------
+    def _normpdf(self, y_hat_pce, std_pce, obs_data, total_sigma2s,
+                  rmse=None):
+        """
+        Calculated gaussian likelihood for given y+std based on given obs+sigma
+        # TODO: is this understanding correct?
+        
+        Parameters
+        ----------
+        y_hat_pce : dict of 2d np arrays
+            Mean output of the surrogate.
+        std_pce : dict of 2d np arrays
+            Standard deviation output of the surrogate.
+        obs_data : dict of 1d np arrays
+            Observed data.
+        total_sigma2s : pandas dataframe, matches obs_data
+            Estimated uncertainty for the observed data.
+        rmse : dict, optional
+            RMSE values from validation of the surrogate. The default is None.
+
+        Returns
+        -------
+        likelihoods : dict of float
+            The likelihood for each surrogate eval in y_hat_pce compared to the
+            observations (?).
+
+        """
+
+        likelihoods = 1.0
+
+        # Loop over the outputs
+        for idx, out in enumerate(self.out_names):
+
+            # (Meta)Model Output
+           # print(y_hat_pce[out])
+            nsamples, nout = y_hat_pce[out].shape
+
+            # Prepare data and remove NaN
+            try:
+                data = obs_data[out].values[~np.isnan(obs_data[out])]
+            except AttributeError:
+                data = obs_data[out][~np.isnan(obs_data[out])]
+
+            # Prepare sigma2s
+            non_nan_indices = ~np.isnan(total_sigma2s[out])
+            tot_sigma2s = total_sigma2s[out][non_nan_indices][:nout].values
+
+            # Surrogate error if valid dataset is given.
+            if rmse is not None:
+                tot_sigma2s += rmse[out]**2
+            else:
+                tot_sigma2s += np.mean(std_pce[out])**2
+
+            likelihoods *= stats.multivariate_normal.pdf(
+                y_hat_pce[out], data, np.diag(tot_sigma2s),
+                allow_singular=True)
+
+        # TODO: remove this here
+        self.Likelihoods = likelihoods
+
+        return likelihoods
+
+    # -------------------------------------------------------------------------
+    def _corr_factor_BME(self, obs_data, total_sigma2s, logBME):
+        """
+        Calculates the correction factor for BMEs.
+        """
+        MetaModel = self.MetaModel
+        samples = self.ExpDesign.X  # valid_samples
+        model_outputs = self.ExpDesign.Y  # valid_model_runs
+        n_samples = samples.shape[0]
+
+        # Extract the requested model outputs for likelihood calulation
+        output_names = self.out_names
+
+        # TODO: Evaluate MetaModel on the experimental design and ValidSet
+        OutputRS, stdOutputRS = MetaModel.eval_metamodel(samples=samples)
+
+        logLik_data = np.zeros((n_samples))
+        logLik_model = np.zeros((n_samples))
+        # Loop over the outputs
+        for idx, out in enumerate(output_names):
+
+            # (Meta)Model Output
+            nsamples, nout = model_outputs[out].shape
+
+            # Prepare data and remove NaN
+            try:
+                data = obs_data[out].values[~np.isnan(obs_data[out])]
+            except AttributeError:
+                data = obs_data[out][~np.isnan(obs_data[out])]
+
+            # Prepare sigma2s
+            non_nan_indices = ~np.isnan(total_sigma2s[out])
+            tot_sigma2s = total_sigma2s[out][non_nan_indices][:nout]
+
+            # Covariance Matrix
+            covMatrix_data = np.diag(tot_sigma2s)
+
+            for i, sample in enumerate(samples):
+
+                # Simulation run
+                y_m = model_outputs[out][i]
+
+                # Surrogate prediction
+                y_m_hat = OutputRS[out][i]
+
+                # CovMatrix with the surrogate error
+                # covMatrix = np.diag(stdOutputRS[out][i]**2)
+                covMatrix = np.diag((y_m-y_m_hat)**2)
+                covMatrix = np.diag(
+                    np.mean((model_outputs[out]-OutputRS[out]), axis=0)**2
+                    )
+
+                # Compute likelilhood output vs data
+                logLik_data[i] += logpdf(
+                    y_m_hat, data, covMatrix_data
+                    )
+
+                # Compute likelilhood output vs surrogate
+                logLik_model[i] += logpdf(y_m_hat, y_m, covMatrix)
+
+        # Weight
+        logLik_data -= logBME
+        weights = np.exp(logLik_model+logLik_data)
+
+        return np.log(np.mean(weights))
+
+    # -------------------------------------------------------------------------
+    def _posteriorPlot(self, posterior, par_names, key):
+        """
+        Plot the posterior of a specific key as a corner plot
+
+        Parameters
+        ----------
+        posterior : 2d np.array
+            Samples of the posterior.
+        par_names : list of strings
+            List of the parameter names.
+        key : string
+            Output key that this posterior belongs to.
+
+        Returns
+        -------
+        figPosterior : corner.corner
+            Plot of the posterior.
+
+        """
+
+        # Initialization
+        newpath = (r'Outputs_SeqPosteriorComparison/posterior')
+        os.makedirs(newpath, exist_ok=True)
+
+        bound_tuples = self.ExpDesign.bound_tuples
+        n_params = len(par_names)
+        font_size = 40
+        if n_params == 2:
+
+            figPosterior, ax = plt.subplots(figsize=(15, 15))
+
+            sns.kdeplot(x=posterior[:, 0], y=posterior[:, 1],
+                        fill=True, ax=ax, cmap=plt.cm.jet,
+                        clip=bound_tuples)
+            # Axis labels
+            plt.xlabel(par_names[0], fontsize=font_size)
+            plt.ylabel(par_names[1], fontsize=font_size)
+
+            # Set axis limit
+            plt.xlim(bound_tuples[0])
+            plt.ylim(bound_tuples[1])
+
+            # Increase font size
+            plt.xticks(fontsize=font_size)
+            plt.yticks(fontsize=font_size)
+
+            # Switch off the grids
+            plt.grid(False)
+
+        else:
+            import corner
+            figPosterior = corner.corner(posterior, labels=par_names,
+                                         title_fmt='.2e', show_titles=True,
+                                         title_kwargs={"fontsize": 12})
+
+        figPosterior.savefig(f'./{newpath}/{key}.pdf', bbox_inches='tight')
+        plt.close()
+
+        # Save the posterior as .npy
+        np.save(f'./{newpath}/{key}.npy', posterior)
+
+        return figPosterior
+
+    
+    # -------------------------------------------------------------------------
+    def _BME_Calculator(self, obs_data, sigma2Dict, rmse=None):
+        """
+        This function computes the Bayesian model evidence (BME) via Monte
+        Carlo integration.
+
+        Parameters
+        ----------
+        obs_data : dict of 1d np arrays
+            Observed data.
+        sigma2Dict : pandas dataframe, matches obs_data
+            Estimated uncertainty for the observed data.
+        rmse : dict of floats, optional
+            RMSE values for each output-key. The dafault is None.
+
+        Returns
+        -------
+        (logBME, KLD, X_Posterior, Likelihoods, distHellinger)
+        
+        """
+        # Initializations
+        if hasattr(self, 'valid_likelihoods'):
+            valid_likelihoods = self.valid_likelihoods
+        else:
+            valid_likelihoods = []
+        valid_likelihoods = np.array(valid_likelihoods)
+
+        post_snapshot = self.ExpDesign.post_snapshot
+        if post_snapshot or valid_likelihoods.shape[0] != 0:
+            newpath = (r'Outputs_SeqPosteriorComparison/likelihood_vs_ref')
+            os.makedirs(newpath, exist_ok=True)
+
+        SamplingMethod = 'random'
+        MCsize = 10000
+        ESS = 0
+
+        # Estimation of the integral via Monte Varlo integration
+        while (ESS > MCsize) or (ESS < 1):
+
+            # Generate samples for Monte Carlo simulation
+            X_MC = self.ExpDesign.generate_samples(
+                MCsize, SamplingMethod
+                )
+
+            # Monte Carlo simulation for the candidate design
+            Y_MC, std_MC = self.MetaModel.eval_metamodel(samples=X_MC)
+
+            # Likelihood computation (Comparison of data and
+            # simulation results via PCE with candidate design)
+            Likelihoods = self._normpdf(
+                Y_MC, std_MC, obs_data, sigma2Dict, rmse
+                )
+
+            # Check the Effective Sample Size (1000<ESS<MCsize)
+            ESS = 1 / np.sum(np.square(Likelihoods/np.sum(Likelihoods)))
+
+            # Enlarge sample size if it doesn't fulfill the criteria
+            if (ESS > MCsize) or (ESS < 1):
+                print(f'ESS={ESS} MC size should be larger.')
+                MCsize *= 10
+                ESS = 0
+
+        # Rejection Step
+        # Random numbers between 0 and 1
+        unif = np.random.rand(1, MCsize)[0]
+
+        # Reject the poorly performed prior
+        accepted = (Likelihoods/np.max(Likelihoods)) >= unif
+        X_Posterior = X_MC[accepted]
+
+        # ------------------------------------------------------------
+        # --- Kullback-Leibler Divergence & Information Entropy ------
+        # ------------------------------------------------------------
+        # Prior-based estimation of BME
+        logBME = np.log(np.nanmean(Likelihoods))
+
+        # TODO: Correction factor
+        # log_weight = self.__corr_factor_BME(obs_data, sigma2Dict, logBME)
+
+        # Posterior-based expectation of likelihoods
+        postExpLikelihoods = np.mean(np.log(Likelihoods[accepted]))
+
+        # Posterior-based expectation of prior densities
+        postExpPrior = np.mean(
+            np.log(self.ExpDesign.JDist.pdf(X_Posterior.T))
+            )
+
+        # Calculate Kullback-Leibler Divergence
+        # KLD = np.mean(np.log(Likelihoods[Likelihoods!=0])- logBME)
+        KLD = postExpLikelihoods - logBME
+
+        # Information Entropy based on Entropy paper Eq. 38
+        infEntropy = logBME - postExpPrior - postExpLikelihoods
+
+        # If post_snapshot is True, plot likelihood vs refrence
+        if post_snapshot or valid_likelihoods:
+            # Hellinger distance
+            valid_likelihoods = np.array(valid_likelihoods)
+            ref_like = np.log(valid_likelihoods[(valid_likelihoods > 0)])
+            est_like = np.log(Likelihoods[Likelihoods > 0])
+            distHellinger = hellinger_distance(ref_like, est_like)
+            
+            idx = len([name for name in os.listdir(newpath) if 'Likelihoods_'
+                       in name and os.path.isfile(os.path.join(newpath, name))])
+            
+            fig, ax = plt.subplots()
+            try:
+                sns.kdeplot(np.log(valid_likelihoods[valid_likelihoods > 0]),
+                            shade=True, color="g", label='Ref. Likelihood')
+                sns.kdeplot(np.log(Likelihoods[Likelihoods > 0]), shade=True,
+                            color="b", label='Likelihood with PCE')
+            except:
+                pass
+
+            text = f"Hellinger Dist.={distHellinger:.3f}\n logBME={logBME:.3f}"
+            "\n DKL={KLD:.3f}"
+
+            plt.text(0.05, 0.75, text, bbox=dict(facecolor='wheat',
+                                                 edgecolor='black',
+                                                 boxstyle='round,pad=1'),
+                     transform=ax.transAxes)
+
+            fig.savefig(f'./{newpath}/Likelihoods_{idx}.pdf',
+                        bbox_inches='tight')
+            plt.close()
+
+        else:
+            distHellinger = 0.0
+
+        # Bayesian inference with Emulator only for 2D problem
+        if post_snapshot and self.MetaModel.n_params == 2 and not idx % 5:
+            BayesOpts = BayesInference(self)
+
+            BayesOpts.emulator = True
+            BayesOpts.plot_post_pred = False
+
+            # Select the inference method
+            import emcee
+            BayesOpts.inference_method = "MCMC"
+            # Set the MCMC parameters passed to self.mcmc_params
+            BayesOpts.mcmc_params = {
+                'n_steps': 1e5,
+                'n_walkers': 30,
+                'moves': emcee.moves.KDEMove(),
+                'verbose': False
+                }
+
+            # ----- Define the discrepancy model -------
+            # TODO: check with Farid if this first line is how it should be
+            BayesOpts.measured_data = obs_data
+            obs_data = pd.DataFrame(obs_data, columns=self.out_names)
+            BayesOpts.measurement_error = obs_data
+            # TODO: shouldn't the uncertainty be sigma2Dict instead of obs_data?
+
+            # # -- (Option B) --
+            DiscrepancyOpts = Discrepancy('')
+            DiscrepancyOpts.type = 'Gaussian'
+            DiscrepancyOpts.parameters = obs_data**2
+            BayesOpts.Discrepancy = DiscrepancyOpts
+            # Start the calibration/inference
+            Bayes_PCE = BayesOpts.create_inference()
+            X_Posterior = Bayes_PCE.posterior_df.values
+
+        return (logBME, KLD, X_Posterior, Likelihoods, distHellinger)
+
+    # -------------------------------------------------------------------------
+    def _validError(self):
+        """
+        Evaluate the metamodel on the validation samples and calculate the
+        error against the corresponding model runs
+
+        Returns
+        -------
+        rms_error : dict
+            RMSE for each validation run.
+        valid_error : dict
+            Normed (?)RMSE for each validation run.
+
+        """
+        # Extract the original model with the generated samples
+        valid_model_runs = self.ExpDesign.valid_model_runs
+
+        # Run the PCE model with the generated samples
+        valid_PCE_runs, _ = self.MetaModel.eval_metamodel(samples=self.ExpDesign.valid_samples)
+
+        rms_error = {}
+        valid_error = {}
+        # Loop over the keys and compute RMSE error.
+        for key in self.out_names:
+            rms_error[key] = mean_squared_error(
+                valid_model_runs[key], valid_PCE_runs[key],
+                multioutput='raw_values',
+                sample_weight=None,
+                squared=False)
+            # Validation error
+            valid_error[key] = (rms_error[key]**2)
+            valid_error[key] /= np.var(valid_model_runs[key], ddof=1, axis=0)
+
+            # Print a report table
+            print("\n>>>>> Updated Errors of {} <<<<<".format(key))
+            print("\nIndex  |  RMSE   |  Validation Error")
+            print('-'*35)
+            print('\n'.join(f'{i+1}  |  {k:.3e}  |  {j:.3e}' for i, (k, j)
+                            in enumerate(zip(rms_error[key],
+                                             valid_error[key]))))
+
+        return rms_error, valid_error
+
+    # -------------------------------------------------------------------------
+    def _error_Mean_Std(self):
+        """
+        Calculates the error in the overall mean and std approximation of the
+        surrogate against the mc-reference provided to the model.
+        This can only be applied to metamodels of polynomial type
+
+        Returns
+        -------
+        RMSE_Mean : float
+            RMSE of the means 
+        RMSE_std : float
+            RMSE of the standard deviations
+
+        """
+        # Compute the mean and std based on the MetaModel
+        pce_means, pce_stds = self.MetaModel._compute_pce_moments()
+
+        # Compute the root mean squared error
+        for output in self.out_names:
+
+            # Compute the error between mean and std of MetaModel and OrigModel
+            RMSE_Mean = mean_squared_error(
+                self.Model.mc_reference['mean'], pce_means[output], squared=False
+                )
+            RMSE_std = mean_squared_error(
+                self.Model.mc_reference['std'], pce_stds[output], squared=False
+                )
+
+        return RMSE_Mean, RMSE_std
diff --git a/build/lib/bayesvalidrox/surrogate_models/eval_rec_rule.py b/build/lib/bayesvalidrox/surrogate_models/eval_rec_rule.py
new file mode 100644
index 000000000..b583c7eb2
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/eval_rec_rule.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+
+
+Based on the implementation in UQLab [1].
+
+References:
+1. S. Marelli, and B. Sudret, UQLab: A framework for uncertainty quantification
+in Matlab, Proc. 2nd Int. Conf. on Vulnerability, Risk Analysis and Management
+(ICVRAM2014), Liverpool, United Kingdom, 2014, 2554-2563.
+
+2. S. Marelli, N. Lüthen, B. Sudret, UQLab user manual – Polynomial chaos
+expansions, Report # UQLab-V1.4-104, Chair of Risk, Safety and Uncertainty
+Quantification, ETH Zurich, Switzerland, 2021.
+
+Author: Farid Mohammadi, M.Sc.
+E-Mail: farid.mohammadi@iws.uni-stuttgart.de
+Department of Hydromechanics and Modelling of Hydrosystems (LH2)
+Institute for Modelling Hydraulic and Environmental Systems (IWS), University
+of Stuttgart, www.iws.uni-stuttgart.de/lh2/
+Pfaffenwaldring 61
+70569 Stuttgart
+
+Created on Fri Jan 14 2022
+"""
+import numpy as np
+from numpy.polynomial.polynomial import polyval
+
+
+def poly_rec_coeffs(n_max, poly_type, params=None):
+    """
+    Computes the recurrence coefficients for classical Wiener-Askey orthogonal
+    polynomials.
+
+    Parameters
+    ----------
+    n_max : int
+        Maximum polynomial degree.
+    poly_type : string
+        Polynomial type.
+    params : list, optional
+        Parameters required for `laguerre` poly type. The default is None.
+
+    Returns
+    -------
+    AB : dict
+        The 3 term recursive coefficients and the applicable ranges.
+
+    """
+
+    if poly_type == 'legendre':
+
+        def an(n):
+            return np.zeros((n+1, 1))
+
+        def sqrt_bn(n):
+            sq_bn = np.zeros((n+1, 1))
+            sq_bn[0, 0] = 1
+            for i in range(1, n+1):
+                sq_bn[i, 0] = np.sqrt(1./(4-i**-2))
+            return sq_bn
+
+        bounds = [-1, 1]
+
+    elif poly_type == 'hermite':
+
+        def an(n):
+            return np.zeros((n+1, 1))
+
+        def sqrt_bn(n):
+            sq_bn = np.zeros((n+1, 1))
+            sq_bn[0, 0] = 1
+            for i in range(1, n+1):
+                sq_bn[i, 0] = np.sqrt(i)
+            return sq_bn
+
+        bounds = [-np.inf, np.inf]
+
+    elif poly_type == 'laguerre':
+
+        def an(n):
+            a = np.zeros((n+1, 1))
+            for i in range(1, n+1):
+                a[i] = 2*n + params[1]
+            return a
+
+        def sqrt_bn(n):
+            sq_bn = np.zeros((n+1, 1))
+            sq_bn[0, 0] = 1
+            for i in range(1, n+1):
+                sq_bn[i, 0] = -np.sqrt(i * (i+params[1]-1))
+            return sq_bn
+
+        bounds = [0, np.inf]
+
+    AB = {'alpha_beta': np.concatenate((an(n_max), sqrt_bn(n_max)), axis=1),
+          'bounds': bounds}
+
+    return AB
+
+
+def eval_rec_rule(x, max_deg, poly_type):
+    """
+    Evaluates the polynomial that corresponds to the Jacobi matrix defined
+    from the AB.
+
+    Parameters
+    ----------
+    x : array (n_samples)
+        Points where the polynomials are evaluated.
+    max_deg : int
+        Maximum degree.
+    poly_type : string
+        Polynomial type.
+
+    Returns
+    -------
+    values : array of shape (n_samples, max_deg+1)
+        Polynomials corresponding to the Jacobi matrix.
+
+    """
+    AB = poly_rec_coeffs(max_deg, poly_type)
+    AB = AB['alpha_beta']
+
+    values = np.zeros((len(x), AB.shape[0]+1))
+    values[:, 1] = 1 / AB[0, 1]
+
+    for k in range(AB.shape[0]-1):
+        values[:, k+2] = np.multiply((x - AB[k, 0]), values[:, k+1]) - \
+                         np.multiply(values[:, k], AB[k, 1])
+        values[:, k+2] = np.divide(values[:, k+2], AB[k+1, 1])
+    return values[:, 1:]
+
+
+def eval_rec_rule_arbitrary(x, max_deg, poly_coeffs):
+    """
+    Evaluates the polynomial at sample array x.
+
+    Parameters
+    ----------
+    x : array (n_samples)
+        Points where the polynomials are evaluated.
+    max_deg : int
+        Maximum degree.
+    poly_coeffs : dict
+        Polynomial coefficients computed based on moments.
+
+    Returns
+    -------
+    values : array of shape (n_samples, max_deg+1)
+        Univariate Polynomials evaluated at samples.
+
+    """
+    values = np.zeros((len(x), max_deg+1))
+
+    for deg in range(max_deg+1):
+        values[:, deg] = polyval(x, poly_coeffs[deg]).T
+
+    return values
+
+
+def eval_univ_basis(x, max_deg, poly_types, apoly_coeffs=None):
+    """
+    Evaluates univariate regressors along input directions.
+
+    Parameters
+    ----------
+    x : array of shape (n_samples, n_params)
+        Training samples.
+    max_deg : int
+        Maximum polynomial degree.
+    poly_types : list of strings
+        List of polynomial types for all parameters.
+    apoly_coeffs : dict , optional
+        Polynomial coefficients computed based on moments. The default is None.
+
+    Returns
+    -------
+    univ_vals : array of shape (n_samples, n_params, max_deg+1)
+        Univariate polynomials for all degrees and parameters evaluated at x.
+
+    """
+    # Initilize the output array
+    n_samples, n_params = x.shape
+    univ_vals = np.zeros((n_samples, n_params, max_deg+1))
+
+    for i in range(n_params):
+
+        if poly_types[i] == 'arbitrary':
+            polycoeffs = apoly_coeffs[f'p_{i+1}']
+            univ_vals[:, i] = eval_rec_rule_arbitrary(x[:, i], max_deg,
+                                                      polycoeffs)
+        else:
+            univ_vals[:, i] = eval_rec_rule(x[:, i], max_deg, poly_types[i])
+
+    return univ_vals
diff --git a/build/lib/bayesvalidrox/surrogate_models/exp_designs.py b/build/lib/bayesvalidrox/surrogate_models/exp_designs.py
new file mode 100644
index 000000000..fa03fe17d
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/exp_designs.py
@@ -0,0 +1,479 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Experimental design with associated sampling methods
+"""
+
+import numpy as np
+import math
+import itertools
+import chaospy
+import scipy.stats as st
+from tqdm import tqdm
+import h5py
+import os
+
+from .apoly_construction import apoly_construction
+from .input_space import InputSpace
+
+# -------------------------------------------------------------------------
+def check_ranges(theta, ranges):
+    """
+    This function checks if theta lies in the given ranges.
+
+    Parameters
+    ----------
+    theta : array
+        Proposed parameter set.
+    ranges : nested list
+        List of the praremeter ranges.
+
+    Returns
+    -------
+    c : bool
+        If it lies in the given range, it return True else False.
+
+    """
+    c = True
+    # traverse in the list1
+    for i, bounds in enumerate(ranges):
+        x = theta[i]
+        # condition check
+        if x < bounds[0] or x > bounds[1]:
+            c = False
+            return c
+    return c
+
+
+class ExpDesigns(InputSpace):
+    """
+    This class generates samples from the prescribed marginals for the model
+    parameters using the `Input` object.
+
+    Attributes
+    ----------
+    Input : obj
+        Input object containing the parameter marginals, i.e. name,
+        distribution type and distribution parameters or available raw data.
+    meta_Model_type : str
+        Type of the meta_Model_type.
+    sampling_method : str
+        Name of the sampling method for the experimental design. The following
+        sampling method are supported:
+
+        * random
+        * latin_hypercube
+        * sobol
+        * halton
+        * hammersley
+        * chebyshev(FT)
+        * grid(FT)
+        * user
+    hdf5_file : str
+        Name of the hdf5 file that contains the experimental design.
+    n_new_samples : int
+        Number of (initial) training points.
+    n_max_samples : int
+        Number of maximum training points.
+    mod_LOO_threshold : float
+        The modified leave-one-out cross validation threshold where the
+        sequential design stops.
+    tradeoff_scheme : str
+        Trade-off scheme to assign weights to the exploration and exploitation
+        scores in the sequential design.
+    n_canddidate : int
+        Number of candidate training sets to calculate the scores for.
+    explore_method : str
+        Type of the exploration method for the sequential design. The following
+        methods are supported:
+
+        * Voronoi
+        * random
+        * latin_hypercube
+        * LOOCV
+        * dual annealing
+    exploit_method : str
+        Type of the exploitation method for the sequential design. The
+        following methods are supported:
+
+        * BayesOptDesign
+        * BayesActDesign
+        * VarOptDesign
+        * alphabetic
+        * Space-filling
+    util_func : str or list
+        The utility function to be specified for the `exploit_method`. For the
+        available utility functions see Note section.
+    n_cand_groups : int
+        Number of candidate groups. Each group of candidate training sets will
+        be evaulated separately in parallel.
+    n_replication : int
+        Number of replications. Only for comparison. The default is 1.
+    post_snapshot : int
+        Whether to plot the posterior in the sequential design. The default is
+        `True`.
+    step_snapshot : int
+        The number of steps to plot the posterior in the sequential design. The
+        default is 1.
+    max_a_post : list or array
+        Maximum a posteriori of the posterior distribution, if known. The
+        default is `[]`.
+    adapt_verbose : bool
+        Whether to plot the model response vs that of metamodel for the new
+        trining point in the sequential design.
+
+    Note
+    ----------
+    The following utiliy functions for the **exploitation** methods are
+    supported:
+
+    #### BayesOptDesign (when data is available)
+    - DKL (Kullback-Leibler Divergence)
+    - DPP (D-Posterior-percision)
+    - APP (A-Posterior-percision)
+
+    #### VarBasedOptDesign -> when data is not available
+    - Entropy (Entropy/MMSE/active learning)
+    - EIGF (Expected Improvement for Global fit)
+    - LOOCV (Leave-one-out Cross Validation)
+
+    #### alphabetic
+    - D-Opt (D-Optimality)
+    - A-Opt (A-Optimality)
+    - K-Opt (K-Optimality)
+    """
+
+    def __init__(self, Input, meta_Model_type='pce',
+                 sampling_method='random', hdf5_file=None,
+                 n_new_samples=1, n_max_samples=None, mod_LOO_threshold=1e-16,
+                 tradeoff_scheme=None, n_canddidate=1, explore_method='random',
+                 exploit_method='Space-filling', util_func='Space-filling',
+                 n_cand_groups=4, n_replication=1, post_snapshot=False,
+                 step_snapshot=1, max_a_post=[], adapt_verbose=False, max_func_itr=1):
+
+        self.InputObj = Input
+        self.meta_Model_type = meta_Model_type
+        self.sampling_method = sampling_method
+        self.hdf5_file = hdf5_file
+        self.n_new_samples = n_new_samples
+        self.n_max_samples = n_max_samples
+        self.mod_LOO_threshold = mod_LOO_threshold
+        self.explore_method = explore_method
+        self.exploit_method = exploit_method
+        self.util_func = util_func
+        self.tradeoff_scheme = tradeoff_scheme
+        self.n_canddidate = n_canddidate
+        self.n_cand_groups = n_cand_groups
+        self.n_replication = n_replication
+        self.post_snapshot = post_snapshot
+        self.step_snapshot = step_snapshot
+        self.max_a_post = max_a_post
+        self.adapt_verbose = adapt_verbose
+        self.max_func_itr = max_func_itr
+        
+        # Other 
+        self.apce = None
+        self.ndim = None
+        
+        # Init 
+        self.check_valid_inputs()
+        
+    # -------------------------------------------------------------------------
+    def generate_samples(self, n_samples, sampling_method='random',
+                         transform=False):
+        """
+        Generates samples with given sampling method
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of requested samples.
+        sampling_method : str, optional
+            Sampling method. The default is `'random'`.
+        transform : bool, optional
+            Transformation via an isoprobabilistic transformation method. The
+            default is `False`.
+
+        Returns
+        -------
+        samples: array of shape (n_samples, n_params)
+            Generated samples from defined model input object.
+
+        """
+        try:
+            samples = chaospy.generate_samples(
+                int(n_samples), domain=self.origJDist, rule=sampling_method
+                )
+        except:
+            samples = self.random_sampler(int(n_samples)).T
+
+        return samples.T
+
+
+            
+    # -------------------------------------------------------------------------
+    def generate_ED(self, n_samples, transform=False,
+                    max_pce_deg=None):
+        """
+        Generates experimental designs (training set) with the given method.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of requested training points.
+        sampling_method : str, optional
+            Sampling method. The default is `'random'`.
+        transform : bool, optional
+            Isoprobabilistic transformation. The default is `False`.
+        max_pce_deg : int, optional
+            Maximum PCE polynomial degree. The default is `None`.
+            
+        Returns
+        -------
+        None
+
+        """
+        if n_samples <0:
+            raise ValueError('A negative number of samples cannot be created. Please provide positive n_samples')
+        n_samples = int(n_samples)
+        
+        if not hasattr(self, 'n_init_samples'):
+            self.n_init_samples = n_samples
+
+        # Generate the samples based on requested method
+        self.init_param_space(max_pce_deg)
+
+        sampling_method = self.sampling_method
+        # Pass user-defined samples as ED
+        if sampling_method == 'user':
+            if not hasattr(self, 'X'):
+                raise AttributeError('User-defined sampling cannot proceed as no samples provided. Please add them to this class as attribute X')
+            if not self.X.ndim == 2:
+                raise AttributeError('The provided samples shuld have 2 dimensions')
+            samples = self.X
+            self.n_samples = len(samples)
+
+        # Sample the distribution of parameters
+        elif self.input_data_given:
+            # Case II: Input values are directly given by the user.
+
+            if sampling_method == 'random':
+                samples = self.random_sampler(n_samples)
+
+            elif sampling_method == 'PCM' or \
+                    sampling_method == 'LSCM':
+                samples = self.pcm_sampler(n_samples, max_pce_deg)
+
+            else:
+                # Create ExpDesign in the actual space using chaospy
+                try:
+                    samples = chaospy.generate_samples(n_samples,
+                                                       domain=self.JDist,
+                                                       rule=sampling_method).T
+                except:
+                    samples = self.JDist.resample(n_samples).T
+
+        elif not self.input_data_given:
+            # Case I = User passed known distributions
+            samples = chaospy.generate_samples(n_samples, domain=self.JDist,
+                                               rule=sampling_method).T
+
+        self.X = samples
+            
+    def read_from_file(self, out_names):
+        """
+        Reads in the ExpDesign from a provided h5py file and saves the results.
+
+        Parameters
+        ----------
+        out_names : list of strings
+            The keys that are in the outputs (y) saved in the provided file.
+
+        Returns
+        -------
+        None.
+
+        """
+        if self.hdf5_file == None:
+            raise AttributeError('ExpDesign cannot be read in, please provide hdf5 file first')
+
+        # Read hdf5 file
+        f = h5py.File(self.hdf5_file, 'r+')
+
+        # Read EDX and pass it to ExpDesign object
+        try:
+            self.X = np.array(f["EDX/New_init_"])
+        except KeyError:
+            self.X = np.array(f["EDX/init_"])
+
+        # Update number of initial samples
+        self.n_init_samples = self.X.shape[0]
+
+        # Read EDX and pass it to ExpDesign object
+        self.Y = {}
+
+        # Extract x values
+        try:
+            self.Y["x_values"] = dict()
+            for varIdx, var in enumerate(out_names):
+                x = np.array(f[f"x_values/{var}"])
+                self.Y["x_values"][var] = x
+        except KeyError:
+            self.Y["x_values"] = np.array(f["x_values"])
+
+        # Store the output
+        for varIdx, var in enumerate(out_names):
+            try:
+                y = np.array(f[f"EDY/{var}/New_init_"])
+            except KeyError:
+                y = np.array(f[f"EDY/{var}/init_"])
+            self.Y[var] = y
+        f.close()
+        print(f'Experimental Design is read in from file {self.hdf5_file}')
+        print('')
+        
+    
+
+    # -------------------------------------------------------------------------
+    def random_sampler(self, n_samples, max_deg = None):
+        """
+        Samples the given raw data randomly.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of requested samples.
+            
+        max_deg : int, optional
+            Maximum degree. The default is `None`.
+            This will be used to run init_param_space, if it has not been done
+            until now.
+
+        Returns
+        -------
+        samples: array of shape (n_samples, n_params)
+            The sampling locations in the input space.
+
+        """
+        if not hasattr(self, 'raw_data'):
+            self.init_param_space(max_deg)
+        else:
+            if np.array(self.raw_data).ndim !=2:
+                raise AttributeError('The given raw data for sampling should have two dimensions')
+        samples = np.zeros((n_samples, self.ndim))
+        sample_size = self.raw_data.shape[1]
+
+        # Use a combination of raw data
+        if n_samples < sample_size:
+            for pa_idx in range(self.ndim):
+                # draw random indices
+                rand_idx = np.random.randint(0, sample_size, n_samples)
+                # store the raw data with given random indices
+                samples[:, pa_idx] = self.raw_data[pa_idx, rand_idx]
+        else:
+            try:
+                samples = self.JDist.resample(int(n_samples)).T
+            except AttributeError:
+                samples = self.JDist.sample(int(n_samples)).T
+            # Check if all samples are in the bound_tuples
+            for idx, param_set in enumerate(samples):
+                if not check_ranges(param_set, self.bound_tuples):
+                    try:
+                        proposed_sample = chaospy.generate_samples(
+                            1, domain=self.JDist, rule='random').T[0]
+                    except:
+                        proposed_sample = self.JDist.resample(1).T[0]
+                    while not check_ranges(proposed_sample,
+                                                 self.bound_tuples):
+                        try:
+                            proposed_sample = chaospy.generate_samples(
+                                1, domain=self.JDist, rule='random').T[0]
+                        except:
+                            proposed_sample = self.JDist.resample(1).T[0]
+                    samples[idx] = proposed_sample
+
+        return samples
+
+    # -------------------------------------------------------------------------
+    def pcm_sampler(self, n_samples, max_deg):
+        """
+        Generates collocation points based on the root of the polynomial
+        degrees.
+
+        Parameters
+        ----------
+        n_samples : int
+            Number of requested samples.
+        max_deg : int
+            Maximum degree defined by user. Will also be used to run 
+            init_param_space if that has not been done beforehand.
+
+        Returns
+        -------
+        opt_col_points: array of shape (n_samples, n_params)
+            Collocation points.
+
+        """
+        
+        if not hasattr(self, 'raw_data'):
+            self.init_param_space(max_deg)
+
+        raw_data = self.raw_data
+
+        # Guess the closest degree to self.n_samples
+        def M_uptoMax(deg):
+            result = []
+            for d in range(1, deg+1):
+                result.append(math.factorial(self.ndim+d) //
+                              (math.factorial(self.ndim) * math.factorial(d)))
+            return np.array(result)
+        #print(M_uptoMax(max_deg))
+        #print(np.where(M_uptoMax(max_deg) > n_samples)[0])
+
+        guess_Deg = np.where(M_uptoMax(max_deg) > n_samples)[0][0]
+
+        c_points = np.zeros((guess_Deg+1, self.ndim))
+
+        def PolynomialPa(parIdx):
+            return apoly_construction(self.raw_data[parIdx], max_deg)
+
+        for i in range(self.ndim):
+            poly_coeffs = PolynomialPa(i)[guess_Deg+1][::-1]
+            c_points[:, i] = np.trim_zeros(np.roots(poly_coeffs))
+
+        #  Construction of optimal integration points
+        Prod = itertools.product(np.arange(1, guess_Deg+2), repeat=self.ndim)
+        sort_dig_unique_combos = np.array(list(filter(lambda x: x, Prod)))
+
+        # Ranking relatively mean
+        Temp = np.empty(shape=[0, guess_Deg+1])
+        for j in range(self.ndim):
+            s = abs(c_points[:, j]-np.mean(raw_data[j]))
+            Temp = np.append(Temp, [s], axis=0)
+        temp = Temp.T
+
+        index_CP = np.sort(temp, axis=0)
+        sort_cpoints = np.empty((0, guess_Deg+1))
+
+        for j in range(self.ndim):
+            #print(index_CP[:, j])
+            sort_cp = c_points[index_CP[:, j], j]
+            sort_cpoints = np.vstack((sort_cpoints, sort_cp))
+
+        # Mapping of Combination to Cpoint Combination
+        sort_unique_combos = np.empty(shape=[0, self.ndim])
+        for i in range(len(sort_dig_unique_combos)):
+            sort_un_comb = []
+            for j in range(self.ndim):
+                SortUC = sort_cpoints[j, sort_dig_unique_combos[i, j]-1]
+                sort_un_comb.append(SortUC)
+                sort_uni_comb = np.asarray(sort_un_comb)
+            sort_unique_combos = np.vstack((sort_unique_combos, sort_uni_comb))
+
+        # Output the collocation points
+        if self.sampling_method.lower() == 'lscm':
+            opt_col_points = sort_unique_combos
+        else:
+            opt_col_points = sort_unique_combos[0:self.n_samples]
+
+        return opt_col_points
diff --git a/build/lib/bayesvalidrox/surrogate_models/exploration.py b/build/lib/bayesvalidrox/surrogate_models/exploration.py
new file mode 100644
index 000000000..6abb652f1
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/exploration.py
@@ -0,0 +1,367 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Exploration for sequential training of metamodels
+"""
+
+import numpy as np
+from scipy.spatial import distance
+
+
+class Exploration:
+    """
+    Created based on the Surrogate Modeling Toolbox (SUMO) [1].
+
+    [1] Gorissen, D., Couckuyt, I., Demeester, P., Dhaene, T. and Crombecq, K.,
+        2010. A surrogate modeling and adaptive sampling toolbox for computer
+        based design. Journal of machine learning research.-Cambridge, Mass.,
+        11, pp.2051-2055. sumo@sumo.intec.ugent.be - http://sumo.intec.ugent.be
+
+    Attributes
+    ----------
+    ExpDesign : obj
+        ExpDesign object.
+    n_candidate : int
+        Number of candidate samples.
+    mc_criterion : str
+        Selection crieterion. The default is `'mc-intersite-proj-th'`. Another
+        option is `'mc-intersite-proj'`.
+    w : int
+        Number of random points in the domain for each sample of the
+        training set.
+    """
+
+    def __init__(self, ExpDesign, n_candidate,
+                 mc_criterion='mc-intersite-proj-th'):
+        self.ExpDesign = ExpDesign
+        self.n_candidate = n_candidate
+        self.mc_criterion = mc_criterion
+        self.w = 100
+
+    def get_exploration_samples(self):
+        """
+        This function generates candidates to be selected as new design and
+        their associated exploration scores.
+
+        Returns
+        -------
+        all_candidates : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
+        """
+        explore_method = self.ExpDesign.explore_method
+
+        print("\n")
+        print(f' The {explore_method}-Method is selected as the exploration '
+              'method.')
+        print("\n")
+
+        if explore_method == 'Voronoi':
+            # Generate samples using the Voronoi method
+            all_candidates, exploration_scores = self.get_vornoi_samples()
+        else:
+            # Generate samples using the MC method
+            all_candidates, exploration_scores = self.get_mc_samples()
+
+        return all_candidates, exploration_scores
+
+    # -------------------------------------------------------------------------
+    def get_vornoi_samples(self):
+        """
+        This function generates samples based on voronoi cells and their
+        corresponding scores
+
+        Returns
+        -------
+        new_samples : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
+        """
+
+        mc_criterion = self.mc_criterion
+        n_candidate = self.n_candidate
+        # Get the Old ExpDesign #samples
+        old_ED_X = self.ExpDesign.X
+        ndim = old_ED_X.shape[1]
+
+        # calculate error #averageErrors
+        error_voronoi, all_candidates = self.approximate_voronoi(
+            self.w, old_ED_X
+            )
+
+        # Pick the best candidate point in the voronoi cell
+        # for each best sample
+        selected_samples = np.empty((0, ndim))
+        bad_samples = []
+
+        for index in range(len(error_voronoi)):
+
+            # get candidate new samples from voronoi tesselation
+            candidates = self.closest_points[index]
+
+            # get total number of candidates
+            n_new_samples = candidates.shape[0]
+
+            # still no candidate samples around this one, skip it!
+            if n_new_samples == 0:
+                print('The following sample has been skipped because there '
+                      'were no candidate samples around it...')
+                print(old_ED_X[index])
+                bad_samples.append(index)
+                continue
+
+            # find candidate that is farthest away from any existing sample
+            max_min_distance = 0
+            best_candidate = 0
+            min_intersite_dist = np.zeros((n_new_samples))
+            min_projected_dist = np.zeros((n_new_samples))
+
+            for j in range(n_new_samples):
+
+                new_samples = np.vstack((old_ED_X, selected_samples))
+
+                # find min distorted distance from all other samples
+                euclidean_dist = self._build_dist_matrix_point(
+                    new_samples, candidates[j], do_sqrt=True)
+                min_euclidean_dist = np.min(euclidean_dist)
+                min_intersite_dist[j] = min_euclidean_dist
+
+                # Check if this is the maximum minimum distance from all other
+                # samples
+                if min_euclidean_dist >= max_min_distance:
+                    max_min_distance = min_euclidean_dist
+                    best_candidate = j
+
+                # Projected distance
+                projected_dist = distance.cdist(
+                    new_samples, [candidates[j]], 'chebyshev')
+                min_projected_dist[j] = np.min(projected_dist)
+
+            if mc_criterion == 'mc-intersite-proj':
+                weight_euclidean_dist = 0.5 * ((n_new_samples+1)**(1/ndim) - 1)
+                weight_projected_dist = 0.5 * (n_new_samples+1)
+                total_dist_scores = weight_euclidean_dist * min_intersite_dist
+                total_dist_scores += weight_projected_dist * min_projected_dist
+
+            elif mc_criterion == 'mc-intersite-proj-th':
+                alpha = 0.5  # chosen (tradeoff)
+                d_min = 2 * alpha / n_new_samples
+                if any(min_projected_dist < d_min):
+                    candidates = np.delete(
+                        candidates, [min_projected_dist < d_min], axis=0
+                        )
+                    total_dist_scores = np.delete(
+                        min_intersite_dist, [min_projected_dist < d_min],
+                        axis=0
+                        )
+                else:
+                    total_dist_scores = min_intersite_dist
+            else:
+                raise NameError(
+                    'The MC-Criterion you requested is not available.'
+                    )
+
+            # Add the best candidate to the list of new samples
+            best_candidate = np.argsort(total_dist_scores)[::-1][:n_candidate]
+            selected_samples = np.vstack(
+                (selected_samples, candidates[best_candidate])
+                )
+
+        self.new_samples = selected_samples
+        self.exploration_scores = np.delete(error_voronoi, bad_samples, axis=0)
+
+        return self.new_samples, self.exploration_scores
+
+    # -------------------------------------------------------------------------
+    def get_mc_samples(self, all_candidates=None):
+        """
+        This function generates random samples based on Global Monte Carlo
+        methods and their corresponding scores, based on [1].
+
+        [1] Crombecq, K., Laermans, E. and Dhaene, T., 2011. Efficient
+            space-filling and non-collapsing sequential design strategies for
+            simulation-based modeling. European Journal of Operational Research
+            , 214(3), pp.683-696.
+            DOI: https://doi.org/10.1016/j.ejor.2011.05.032
+
+        Implemented methods to compute scores:
+            1) mc-intersite-proj
+            2) mc-intersite-proj-th
+
+        Arguments
+        ---------
+        all_candidates : array, optional
+            Samples to compute the scores for. The default is `None`. In this
+            case, samples will be generated by defined model input marginals.
+
+        Returns
+        -------
+        new_samples : array of shape (n_candidate, n_params)
+            A list of samples.
+        exploration_scores: arrays of shape (n_candidate)
+            Exploration scores.
+        """
+        explore_method = self.ExpDesign.explore_method
+        mc_criterion = self.mc_criterion
+        if all_candidates is None:
+            n_candidate = self.n_candidate
+        else:
+            n_candidate = all_candidates.shape[0]
+
+        # Get the Old ExpDesign #samples
+        old_ED_X = self.ExpDesign.X
+        ndim = old_ED_X.shape[1]
+
+        # ----- Compute the number of random points -----
+        if all_candidates is None:
+            # Generate MC Samples
+            all_candidates = self.ExpDesign.generate_samples(
+                self.n_candidate, explore_method
+                )
+        self.all_candidates = all_candidates
+
+        # initialization
+        min_intersite_dist = np.zeros((n_candidate))
+        min_projected_dist = np.zeros((n_candidate))
+
+        for i, candidate in enumerate(all_candidates):
+
+            # find candidate that is farthest away from any existing sample
+            maxMinDistance = 0
+
+            # find min distorted distance from all other samples
+            euclidean_dist = self._build_dist_matrix_point(
+                old_ED_X, candidate, do_sqrt=True
+                )
+            min_euclidean_dist = np.min(euclidean_dist)
+            min_intersite_dist[i] = min_euclidean_dist
+
+            # Check if this is the maximum minimum distance from all other
+            # samples
+            if min_euclidean_dist >= maxMinDistance:
+                maxMinDistance = min_euclidean_dist
+
+            # Projected distance
+            projected_dist = self._build_dist_matrix_point(
+                old_ED_X, candidate, 'chebyshev'
+                )
+            min_projected_dist[i] = np.min(projected_dist)
+
+        if mc_criterion == 'mc-intersite-proj':
+            weight_euclidean_dist = ((n_candidate+1)**(1/ndim) - 1) * 0.5
+            weight_projected_dist = (n_candidate+1) * 0.5
+            total_dist_scores = weight_euclidean_dist * min_intersite_dist
+            total_dist_scores += weight_projected_dist * min_projected_dist
+
+        elif mc_criterion == 'mc-intersite-proj-th':
+            alpha = 0.5  # chosen (tradeoff)
+            d_min = 2 * alpha / n_candidate
+            if any(min_projected_dist < d_min):
+                all_candidates = np.delete(
+                    all_candidates, [min_projected_dist < d_min], axis=0
+                    )
+                total_dist_scores = np.delete(
+                    min_intersite_dist, [min_projected_dist < d_min], axis=0
+                    )
+            else:
+                total_dist_scores = min_intersite_dist
+        else:
+            raise NameError('The MC-Criterion you requested is not available.')
+
+        self.new_samples = all_candidates
+        self.exploration_scores = total_dist_scores
+        self.exploration_scores /= np.nansum(total_dist_scores)
+
+        return self.new_samples, self.exploration_scores
+
+    # -------------------------------------------------------------------------
+    def approximate_voronoi(self, w, samples):
+        """
+        An approximate (monte carlo) version of Matlab's voronoi command.
+
+        Arguments
+        ---------
+        samples : array
+            Old experimental design to be used as center points for voronoi
+            cells.
+
+        Returns
+        -------
+        areas : array
+            An approximation of the voronoi cells' areas.
+        all_candidates: list of arrays
+            A list of samples in each voronoi cell.
+        """
+        n_samples = samples.shape[0]
+        ndim = samples.shape[1]
+
+        # Compute the number of random points
+        n_points = w * samples.shape[0]
+        # Generate w random points in the domain for each sample
+        points = self.ExpDesign.generate_samples(n_points, 'random')
+        self.all_candidates = points
+
+        # Calculate the nearest sample to each point
+        self.areas = np.zeros((n_samples))
+        self.closest_points = [np.empty((0, ndim)) for i in range(n_samples)]
+
+        # Compute the minimum distance from all the samples of old_ED_X for
+        # each test point
+        for idx in range(n_points):
+            # calculate the minimum distance
+            distances = self._build_dist_matrix_point(
+                samples, points[idx], do_sqrt=True
+                )
+            closest_sample = np.argmin(distances)
+
+            # Add to the voronoi list of the closest sample
+            self.areas[closest_sample] = self.areas[closest_sample] + 1
+            prev_closest_points = self.closest_points[closest_sample]
+            self.closest_points[closest_sample] = np.vstack(
+                (prev_closest_points, points[idx])
+                )
+
+        # Divide by the amount of points to get the estimated volume of each
+        # voronoi cell
+        self.areas /= n_points
+
+        self.perc = np.max(self.areas * 100)
+
+        self.errors = self.areas
+
+        return self.areas, self.all_candidates
+
+    # -------------------------------------------------------------------------
+    def _build_dist_matrix_point(self, samples, point, method='euclidean',
+                                 do_sqrt=False):
+        """
+        Calculates the intersite distance of all points in samples from point.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params)
+            The old experimental design.
+        point : array
+            A candidate point.
+        method : str
+            Distance method.
+        do_sqrt : bool, optional
+            Whether to return distances or squared distances. The default is
+            `False`.
+
+        Returns
+        -------
+        distances : array
+            Distances.
+
+        """
+        distances = distance.cdist(samples, np.array([point]), method)
+
+        # do square root?
+        if do_sqrt:
+            return distances
+        else:
+            return distances**2
+
diff --git a/build/lib/bayesvalidrox/surrogate_models/glexindex.py b/build/lib/bayesvalidrox/surrogate_models/glexindex.py
new file mode 100644
index 000000000..90877331e
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/glexindex.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Multi indices for monomial exponents.
+Credit: Jonathan Feinberg
+https://github.com/jonathf/numpoly/blob/master/numpoly/utils/glexindex.py
+"""
+
+import numpy
+import numpy.typing
+
+
+def glexindex(start, stop=None, dimensions=1, cross_truncation=1.,
+              graded=False, reverse=False):
+    """
+    Generate graded lexicographical multi-indices for the monomial exponents.
+    Args:
+        start (Union[int, numpy.ndarray]):
+            The lower order of the indices. If array of int, counts as lower
+            bound for each axis.
+        stop (Union[int, numpy.ndarray, None]):
+            The maximum shape included. If omitted: stop <- start; start <- 0
+            If int is provided, set as largest total order. If array of int,
+            set as upper bound for each axis.
+        dimensions (int):
+            The number of dimensions in the expansion.
+        cross_truncation (float, Tuple[float, float]):
+            Use hyperbolic cross truncation scheme to reduce the number of
+            terms in expansion. If two values are provided, first is low bound
+            truncation, while the latter upper bound. If only one value, upper
+            bound is assumed.
+        graded (bool):
+            Graded sorting, meaning the indices are always sorted by the index
+            sum. E.g. ``(2, 2, 2)`` has a sum of 6, and will therefore be
+            consider larger than both ``(3, 1, 1)`` and ``(1, 1, 3)``.
+        reverse (bool):
+            Reversed lexicographical sorting meaning that ``(1, 3)`` is
+            considered smaller than ``(3, 1)``, instead of the opposite.
+    Returns:
+        list:
+            Order list of indices.
+    Examples:
+        >>> numpoly.glexindex(4).tolist()
+        [[0], [1], [2], [3]]
+        >>> numpoly.glexindex(2, dimensions=2).tolist()
+        [[0, 0], [1, 0], [0, 1]]
+        >>> numpoly.glexindex(start=2, stop=3, dimensions=2).tolist()
+        [[2, 0], [1, 1], [0, 2]]
+        >>> numpoly.glexindex([1, 2, 3]).tolist()
+        [[0, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 2]]
+        >>> numpoly.glexindex([1, 2, 3], cross_truncation=numpy.inf).tolist()
+        [[0, 0, 0], [0, 1, 0], [0, 0, 1], [0, 1, 1], [0, 0, 2], [0, 1, 2]]
+    """
+    if stop is None:
+        start, stop = 0, start
+    start = numpy.array(start, dtype=int).flatten()
+    stop = numpy.array(stop, dtype=int).flatten()
+    start, stop, _ = numpy.broadcast_arrays(start, stop, numpy.empty(dimensions))
+
+    cross_truncation = cross_truncation*numpy.ones(2)
+    
+    # Moved here from _glexindex
+    bound = stop.max()
+    dimensions = len(start)
+    start = numpy.clip(start, a_min=0, a_max=None)
+    dtype = numpy.uint8 if bound < 256 else numpy.uint16
+    range_ = numpy.arange(bound, dtype=dtype)
+    indices = range_[:, numpy.newaxis]
+
+    for idx in range(dimensions-1):
+
+        # Truncate at each step to keep memory usage low
+        if idx:
+            indices = indices[cross_truncate(indices, bound-1, cross_truncation[1])]
+
+        # Repeats the current set of indices.
+        # e.g. [0,1,2] -> [0,1,2,0,1,2,...,0,1,2]
+        indices = numpy.tile(indices, (bound, 1))
+
+        # Stretches ranges over the new dimension.
+        # e.g. [0,1,2] -> [0,0,...,0,1,1,...,1,2,2,...,2]
+        front = range_.repeat(len(indices)//bound)[:, numpy.newaxis]
+
+        # Puts them two together.
+        indices = numpy.column_stack((front, indices))
+
+    # Complete the truncation scheme
+    if dimensions == 1:
+        indices = indices[(indices >= start) & (indices < bound)]
+    else:
+        lower = cross_truncate(indices, start-1, cross_truncation[0])
+        upper = cross_truncate(indices, stop-1, cross_truncation[1])
+        indices = indices[lower ^ upper]
+
+    indices = numpy.array(indices, dtype=int).reshape(-1, dimensions)
+    if indices.size:
+        # moved here from glexsort
+        keys = indices.T
+        keys_ = numpy.atleast_2d(keys)
+        if reverse:
+            keys_ = keys_[::-1]
+    
+        indices_sort = numpy.array(numpy.lexsort(keys_))
+        if graded:
+            indices_sort = indices_sort[numpy.argsort(
+                numpy.sum(keys_[:, indices_sort], axis=0))].T
+        
+        indices = indices[indices_sort]
+    return indices
+
+def cross_truncate(indices, bound, norm):
+    r"""
+    Truncate of indices using L_p norm.
+    .. math:
+        L_p(x) = \sum_i |x_i/b_i|^p ^{1/p} \leq 1
+    where :math:`b_i` are bounds that each :math:`x_i` should follow.
+    Args:
+        indices (Sequence[int]):
+            Indices to be truncated.
+        bound (int, Sequence[int]):
+            The bound function for witch the indices can not be larger than.
+        norm (float, Sequence[float]):
+            The `p` in the `L_p`-norm. Support includes both `L_0` and `L_inf`.
+    Returns:
+        Boolean indices to ``indices`` with True for each index where the
+        truncation criteria holds.
+    Examples:
+        >>> indices = numpy.array(numpy.mgrid[:10, :10]).reshape(2, -1).T
+        >>> indices[cross_truncate(indices, 2, norm=0)].T
+        array([[0, 0, 0, 1, 2],
+               [0, 1, 2, 0, 0]])
+        >>> indices[cross_truncate(indices, 2, norm=1)].T
+        array([[0, 0, 0, 1, 1, 2],
+               [0, 1, 2, 0, 1, 0]])
+        >>> indices[cross_truncate(indices, [0, 1], norm=1)].T
+        array([[0, 0],
+               [0, 1]])
+    """
+    assert norm >= 0, "negative L_p norm not allowed"
+    bound = numpy.asfarray(bound).flatten()*numpy.ones(indices.shape[1])
+
+    if numpy.any(bound < 0):
+        return numpy.zeros((len(indices),), dtype=bool)
+
+    if numpy.any(bound == 0):
+        out = numpy.all(indices[:, bound == 0] == 0, axis=-1)
+        if numpy.any(bound):
+            out &= cross_truncate(indices[:, bound != 0], bound[bound != 0], norm=norm)
+        return out
+
+    if norm == 0:
+        out = numpy.sum(indices > 0, axis=-1) <= 1
+        out[numpy.any(indices > bound, axis=-1)] = False
+    elif norm == numpy.inf:
+        out = numpy.max(indices/bound, axis=-1) <= 1
+    else:
+        out = numpy.sum((indices/bound)**norm, axis=-1)**(1./norm) <= 1
+
+    assert numpy.all(out[numpy.all(indices == 0, axis=-1)])
+
+    return out
diff --git a/build/lib/bayesvalidrox/surrogate_models/input_space.py b/build/lib/bayesvalidrox/surrogate_models/input_space.py
new file mode 100644
index 000000000..4e010d66f
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/input_space.py
@@ -0,0 +1,398 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Input space built from set prior distributions
+"""
+
+import numpy as np
+import chaospy
+import scipy.stats as st
+
+
+class InputSpace:
+    """
+    This class generates the input space for the metamodel from the
+    distributions provided using the `Input` object.
+
+    Attributes
+    ----------
+    Input : obj
+        Input object containing the parameter marginals, i.e. name,
+        distribution type and distribution parameters or available raw data.
+    meta_Model_type : str
+        Type of the meta_Model_type.
+
+    """
+
+    def __init__(self, Input, meta_Model_type='pce'):
+        self.InputObj = Input
+        self.meta_Model_type = meta_Model_type
+        
+        # Other 
+        self.apce = None
+        self.ndim = None
+        
+        # Init 
+        self.check_valid_inputs()
+        
+        
+    def check_valid_inputs(self)-> None:
+        """
+        Check if the given InputObj is valid to use for further calculations:
+            Has some Marginals
+            Marginals have valid priors
+            All Marginals given as the same type (samples vs dist)
+
+        Returns
+        -------
+        None
+
+        """
+        Inputs = self.InputObj
+        self.ndim = len(Inputs.Marginals)
+        
+        # Check if PCE or aPCE metamodel is selected.
+        # TODO: test also for 'pce'??
+        if self.meta_Model_type.lower() == 'apce':
+            self.apce = True
+        else:
+            self.apce = False
+
+        # check if marginals given 
+        if not self.ndim >=1:
+            raise AssertionError('Cannot build distributions if no marginals are given')
+            
+        # check that each marginal is valid
+        for marginals in Inputs.Marginals:
+            if len(marginals.input_data) == 0:
+                if marginals.dist_type == None:
+                    raise AssertionError('Not all marginals were provided priors')
+                    break
+            if np.array(marginals.input_data).shape[0] and (marginals.dist_type != None):
+                raise AssertionError('Both samples and distribution type are given. Please choose only one.')
+                break
+                
+        # Check if input is given as dist or input_data.
+        self.input_data_given = -1
+        for marg in Inputs.Marginals:
+            #print(self.input_data_given)
+            size = np.array(marg.input_data).shape[0]
+            #print(f'Size: {size}')
+            if size and abs(self.input_data_given) !=1:
+                self.input_data_given = 2
+                break
+            if (not size) and self.input_data_given > 0:
+                self.input_data_given = 2
+                break
+            if not size:
+                self.input_data_given = 0
+            if size:
+                self.input_data_given = 1
+                
+        if self.input_data_given == 2:
+            raise AssertionError('Distributions cannot be built as the priors have different types')
+            
+    
+        # Get the bounds if input_data are directly defined by user:
+        if self.input_data_given:
+            for i in range(self.ndim):
+                low_bound = np.min(Inputs.Marginals[i].input_data)
+                up_bound = np.max(Inputs.Marginals[i].input_data)
+                Inputs.Marginals[i].parameters = [low_bound, up_bound]
+
+  
+
+    # -------------------------------------------------------------------------
+    def init_param_space(self, max_deg=None):
+        """
+        Initializes parameter space.
+
+        Parameters
+        ----------
+        max_deg : int, optional
+            Maximum degree. The default is `None`.
+
+        Creates
+        -------
+        raw_data : array of shape (n_params, n_samples)
+            Raw data.
+        bound_tuples : list of tuples
+            A list containing lower and upper bounds of parameters.
+
+        """
+        # Recheck all before running!
+        self.check_valid_inputs()
+        
+        Inputs = self.InputObj
+        ndim = self.ndim
+        rosenblatt_flag = Inputs.Rosenblatt
+        mc_size = 50000
+
+        # Save parameter names
+        self.par_names = []
+        for parIdx in range(ndim):
+            self.par_names.append(Inputs.Marginals[parIdx].name)
+
+        # Create a multivariate probability distribution
+        # TODO: change this to make max_deg obligatory? at least in some specific cases?
+        if max_deg is not None:
+            JDist, poly_types = self.build_polytypes(rosenblatt=rosenblatt_flag)
+            self.JDist, self.poly_types = JDist, poly_types
+
+        if self.input_data_given:
+            self.MCSize = len(Inputs.Marginals[0].input_data)
+            self.raw_data = np.zeros((ndim, self.MCSize))
+
+            for parIdx in range(ndim):
+                # Save parameter names
+                try:
+                    self.raw_data[parIdx] = np.array(
+                        Inputs.Marginals[parIdx].input_data)
+                except:
+                    self.raw_data[parIdx] = self.JDist[parIdx].sample(mc_size)
+
+        else:
+            # Generate random samples based on parameter distributions
+            self.raw_data = chaospy.generate_samples(mc_size,
+                                                     domain=self.JDist)
+
+        # Extract moments
+        for parIdx in range(ndim):
+            mu = np.mean(self.raw_data[parIdx])
+            std = np.std(self.raw_data[parIdx])
+            self.InputObj.Marginals[parIdx].moments = [mu, std]
+
+        # Generate the bounds based on given inputs for marginals
+        bound_tuples = []
+        for i in range(ndim):
+            if Inputs.Marginals[i].dist_type == 'unif':
+                low_bound = Inputs.Marginals[i].parameters[0]
+                up_bound = Inputs.Marginals[i].parameters[1]
+            else:
+                low_bound = np.min(self.raw_data[i])
+                up_bound = np.max(self.raw_data[i])
+
+            bound_tuples.append((low_bound, up_bound))
+
+        self.bound_tuples = tuple(bound_tuples)
+
+    # -------------------------------------------------------------------------
+    def build_polytypes(self, rosenblatt):
+        """
+        Creates the polynomial types to be passed to univ_basis_vals method of
+        the MetaModel object.
+
+        Parameters
+        ----------
+        rosenblatt : bool
+            Rosenblatt transformation flag.
+
+        Returns
+        -------
+        orig_space_dist : object
+            A chaospy JDist object or a gaussian_kde object.
+        poly_types : list
+            List of polynomial types for the parameters.
+
+        """
+        Inputs = self.InputObj
+        
+        all_data = []
+        all_dist_types = []
+        orig_joints = []
+        poly_types = []
+        
+        for parIdx in range(self.ndim):
+
+            if Inputs.Marginals[parIdx].dist_type is None:
+                data = Inputs.Marginals[parIdx].input_data
+                all_data.append(data)
+                dist_type = None
+            else:
+                dist_type = Inputs.Marginals[parIdx].dist_type
+                params = Inputs.Marginals[parIdx].parameters
+
+            if rosenblatt:
+                polytype = 'hermite'
+                dist = chaospy.Normal()
+
+            elif dist_type is None:
+                polytype = 'arbitrary'
+                dist = None
+
+            elif 'unif' in dist_type.lower():
+                polytype = 'legendre'
+                if not np.array(params).shape[0]>=2:
+                    raise AssertionError('Distribution has too few parameters!')
+                dist = chaospy.Uniform(lower=params[0], upper=params[1])
+
+            elif 'norm' in dist_type.lower() and \
+                 'log' not in dist_type.lower():
+                if not np.array(params).shape[0]>=2:
+                    raise AssertionError('Distribution has too few parameters!')
+                polytype = 'hermite'
+                dist = chaospy.Normal(mu=params[0], sigma=params[1])
+
+            elif 'gamma' in dist_type.lower():
+                polytype = 'laguerre'
+                if not np.array(params).shape[0]>=3:
+                    raise AssertionError('Distribution has too few parameters!')
+                dist = chaospy.Gamma(shape=params[0],
+                                     scale=params[1],
+                                     shift=params[2])
+
+            elif 'beta' in dist_type.lower():
+                if not np.array(params).shape[0]>=4:
+                    raise AssertionError('Distribution has too few parameters!')
+                polytype = 'jacobi'
+                dist = chaospy.Beta(alpha=params[0], beta=params[1],
+                                    lower=params[2], upper=params[3])
+
+            elif 'lognorm' in dist_type.lower():
+                polytype = 'hermite'
+                if not np.array(params).shape[0]>=2:
+                    raise AssertionError('Distribution has too few parameters!')
+                mu = np.log(params[0]**2/np.sqrt(params[0]**2 + params[1]**2))
+                sigma = np.sqrt(np.log(1 + params[1]**2 / params[0]**2))
+                dist = chaospy.LogNormal(mu, sigma)
+                # dist = chaospy.LogNormal(mu=params[0], sigma=params[1])
+
+            elif 'expon' in dist_type.lower():
+                polytype = 'exponential'
+                if not np.array(params).shape[0]>=2:
+                    raise AssertionError('Distribution has too few parameters!')
+                dist = chaospy.Exponential(scale=params[0], shift=params[1])
+
+            elif 'weibull' in dist_type.lower():
+                polytype = 'weibull'
+                if not np.array(params).shape[0]>=3:
+                    raise AssertionError('Distribution has too few parameters!')
+                dist = chaospy.Weibull(shape=params[0], scale=params[1],
+                                       shift=params[2])
+
+            else:
+                message = (f"DistType {dist_type} for parameter"
+                           f"{parIdx+1} is not available.")
+                raise ValueError(message)
+
+            if self.input_data_given or self.apce:
+                polytype = 'arbitrary'
+
+            # Store dists and poly_types
+            orig_joints.append(dist)
+            poly_types.append(polytype)
+            all_dist_types.append(dist_type)
+
+        # Prepare final output to return
+        if None in all_dist_types:
+            # Naive approach: Fit a gaussian kernel to the provided data
+            Data = np.asarray(all_data)
+            try:
+                orig_space_dist = st.gaussian_kde(Data)
+            except:
+                raise ValueError('The samples provided to the Marginals should be 1D only')
+            self.prior_space = orig_space_dist
+        else:
+            orig_space_dist = chaospy.J(*orig_joints)
+            try:
+                self.prior_space = st.gaussian_kde(orig_space_dist.sample(10000))
+            except:
+                raise ValueError('Parameter values are not valid, please set differently')
+
+        return orig_space_dist, poly_types
+
+    # -------------------------------------------------------------------------
+    def transform(self, X, params=None, method=None):
+        """
+        Transforms the samples via either a Rosenblatt or an isoprobabilistic
+        transformation.
+
+        Parameters
+        ----------
+        X : array of shape (n_samples,n_params)
+            Samples to be transformed.
+        method : string
+            If transformation method is 'user' transform X, else just pass X.
+
+        Returns
+        -------
+        tr_X: array of shape (n_samples,n_params)
+            Transformed samples.
+
+        """
+        # Check for built JDist
+        if not hasattr(self, 'JDist'):
+            raise AttributeError('Call function init_param_space first to create JDist')
+            
+        # Check if X is 2d
+        if X.ndim != 2:
+            raise AttributeError('X should have two dimensions')
+            
+        # Check if size of X matches Marginals
+        if X.shape[1]!= self.ndim:
+            raise AttributeError('The second dimension of X should be the same size as the number of marginals in the InputObj')
+        
+        if self.InputObj.Rosenblatt:
+            self.origJDist, _ = self.build_polytypes(False)
+            if method == 'user':
+                tr_X = self.JDist.inv(self.origJDist.fwd(X.T)).T
+            else:
+                # Inverse to original spcace -- generate sample ED
+                tr_X = self.origJDist.inv(self.JDist.fwd(X.T)).T
+        else:
+            # Transform samples via an isoprobabilistic transformation
+            n_samples, n_params = X.shape
+            Inputs = self.InputObj
+            origJDist = self.JDist
+            poly_types = self.poly_types
+
+            disttypes = []
+            for par_i in range(n_params):
+                disttypes.append(Inputs.Marginals[par_i].dist_type)
+
+            # Pass non-transformed X, if arbitrary PCE is selected.
+            if None in disttypes or self.input_data_given or self.apce:
+                return X
+
+            cdfx = np.zeros((X.shape))
+            tr_X = np.zeros((X.shape))
+
+            for par_i in range(n_params):
+
+                # Extract the parameters of the original space
+                disttype = disttypes[par_i]
+                if disttype is not None:
+                    dist = origJDist[par_i]
+                else:
+                    dist = None
+                polytype = poly_types[par_i]
+                cdf = np.vectorize(lambda x: dist.cdf(x))
+
+                # Extract the parameters of the transformation space based on
+                # polyType
+                if polytype == 'legendre' or disttype == 'uniform':
+                    # Generate Y_Dists based
+                    params_Y = [-1, 1]
+                    dist_Y = st.uniform(loc=params_Y[0],
+                                        scale=params_Y[1]-params_Y[0])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                elif polytype == 'hermite' or disttype == 'norm':
+                    params_Y = [0, 1]
+                    dist_Y = st.norm(loc=params_Y[0], scale=params_Y[1])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                elif polytype == 'laguerre' or disttype == 'gamma':
+                    if params == None:
+                        raise AttributeError('Additional parameters have to be set for the gamma distribution!')
+                    params_Y = [1, params[1]]
+                    dist_Y = st.gamma(loc=params_Y[0], scale=params_Y[1])
+                    inv_cdf = np.vectorize(lambda x: dist_Y.ppf(x))
+
+                # Compute CDF_x(X)
+                cdfx[:, par_i] = cdf(X[:, par_i])
+
+                # Compute invCDF_y(cdfx)
+                tr_X[:, par_i] = inv_cdf(cdfx[:, par_i])
+
+        return tr_X
diff --git a/build/lib/bayesvalidrox/surrogate_models/inputs.py b/build/lib/bayesvalidrox/surrogate_models/inputs.py
new file mode 100644
index 000000000..094e1066f
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/inputs.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Inputs and related marginal distributions
+"""
+
+class Input:
+    """
+    A class to define the uncertain input parameters.
+
+    Attributes
+    ----------
+    Marginals : obj
+        Marginal objects. See `inputs.Marginal`.
+    Rosenblatt : bool
+        If Rossenblatt transformation is required for the dependent input
+        parameters.
+
+    Examples
+    -------
+    Marginals can be defined as following:
+
+    >>> Inputs.add_marginals()
+    >>> Inputs.Marginals[0].name = 'X_1'
+    >>> Inputs.Marginals[0].dist_type = 'uniform'
+    >>> Inputs.Marginals[0].parameters = [-5, 5]
+
+    If there is no common data is avaliable, the input data can be given
+    as following:
+
+    >>> Inputs.add_marginals()
+    >>> Inputs.Marginals[0].name = 'X_1'
+    >>> Inputs.Marginals[0].input_data = input_data
+    """
+    poly_coeffs_flag = True
+
+    def __init__(self):
+        self.Marginals = []
+        self.Rosenblatt = False
+
+    def add_marginals(self):
+        """
+        Adds a new Marginal object to the input object.
+
+        Returns
+        -------
+        None.
+
+        """
+        self.Marginals.append(Marginal())
+
+
+# Nested class
+class Marginal:
+    """
+    An object containing the specifications of the marginals for each uncertain
+    parameter.
+
+    Attributes
+    ----------
+    name : string
+        Name of the parameter. The default is `'$x_1$'`.
+    dist_type : string
+        Name of the distribution. The default is `None`.
+    parameters : list
+        List of the parameters corresponding to the distribution type. The
+        default is `None`.
+    input_data : array
+        Available input data. The default is `[]`.
+    moments : list
+        List of the moments.
+    """
+
+    def __init__(self):
+        self.name = '$x_1$'
+        self.dist_type = None
+        self.parameters = None
+        self.input_data = []
+        self.moments = None
diff --git a/build/lib/bayesvalidrox/surrogate_models/orthogonal_matching_pursuit.py b/build/lib/bayesvalidrox/surrogate_models/orthogonal_matching_pursuit.py
new file mode 100644
index 000000000..96ef9c1d5
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/orthogonal_matching_pursuit.py
@@ -0,0 +1,366 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Jul 15 14:08:59 2022
+
+@author: farid
+"""
+import numpy as np
+from sklearn.base import RegressorMixin
+from sklearn.linear_model._base import LinearModel
+from sklearn.utils import check_X_y
+
+
+def corr(x, y):
+    return abs(x.dot(y))/np.sqrt((x**2).sum())
+
+
+class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
+    '''
+    Regression with Orthogonal Matching Pursuit [1].
+
+    Parameters
+    ----------
+    fit_intercept : boolean, optional (DEFAULT = True)
+        whether to calculate the intercept for this model. If set
+        to false, no intercept will be used in calculations
+        (e.g. data is expected to be already centered).
+
+    copy_X : boolean, optional (DEFAULT = True)
+        If True, X will be copied; else, it may be overwritten.
+
+    verbose : boolean, optional (DEFAULT = FALSE)
+        Verbose mode when fitting the model
+
+    Attributes
+    ----------
+    coef_ : array, shape = (n_features)
+        Coefficients of the regression model (mean of posterior distribution)
+
+    active_ : array, dtype = np.bool, shape = (n_features)
+       True for non-zero coefficients, False otherwise
+
+    References
+    ----------
+    [1] Pati, Y., Rezaiifar, R., Krishnaprasad, P. (1993). Orthogonal matching
+        pursuit: recursive function approximation with application to wavelet
+        decomposition. Proceedings of 27th Asilomar Conference on Signals,
+        Systems and Computers, 40-44.
+    '''
+
+    def __init__(self, fit_intercept=True, normalize=False, copy_X=True,
+                 verbose=False):
+        self.fit_intercept   = fit_intercept
+        self.normalize       = normalize
+        self.copy_X          = copy_X
+        self.verbose         = verbose
+
+    def _preprocess_data(self, X, y):
+        """Center and scale data.
+        Centers data to have mean zero along axis 0. If fit_intercept=False or
+        if the X is a sparse matrix, no centering is done, but normalization
+        can still be applied. The function returns the statistics necessary to
+        reconstruct the input data, which are X_offset, y_offset, X_scale, such
+        that the output
+            X = (X - X_offset) / X_scale
+        X_scale is the L2 norm of X - X_offset.
+        """
+
+        if self.copy_X:
+            X = X.copy(order='K')
+
+        y = np.asarray(y, dtype=X.dtype)
+
+        if self.fit_intercept:
+            X_offset = np.average(X, axis=0)
+            X -= X_offset
+            if self.normalize:
+                X_scale = np.ones(X.shape[1], dtype=X.dtype)
+                std = np.sqrt(np.sum(X**2, axis=0)/(len(X)-1))
+                X_scale[std != 0] = std[std != 0]
+                X /= X_scale
+            else:
+                X_scale = np.ones(X.shape[1], dtype=X.dtype)
+            y_offset = np.mean(y)
+            y = y - y_offset
+        else:
+            X_offset = np.zeros(X.shape[1], dtype=X.dtype)
+            X_scale = np.ones(X.shape[1], dtype=X.dtype)
+            if y.ndim == 1:
+                y_offset = X.dtype.type(0)
+            else:
+                y_offset = np.zeros(y.shape[1], dtype=X.dtype)
+
+        return X, y, X_offset, y_offset, X_scale
+
+    def fit(self, X, y):
+        '''
+        Fits Regression with Orthogonal Matching Pursuit Algorithm.
+
+        Parameters
+        -----------
+        X: {array-like, sparse matrix} of size (n_samples, n_features)
+           Training data, matrix of explanatory variables
+
+        y: array-like of size [n_samples, n_features]
+           Target values
+
+        Returns
+        -------
+        self : object
+            Returns self.
+        '''
+        X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
+        n_samples, n_features = X.shape
+
+        X, y, X_mean, y_mean, X_std = self._preprocess_data(X, y)
+        self._x_mean_ = X_mean
+        self._y_mean = y_mean
+        self._x_std = X_std
+
+        # Normalize columns of Psi, so that each column has norm = 1
+        norm_X = np.linalg.norm(X, axis=0)
+        X_norm = X/norm_X
+
+        # Initialize residual vector to full model response and normalize
+        R = y
+        norm_y = np.sqrt(np.dot(y, y))
+        r = y/norm_y
+
+        # Check for constant regressors
+        const_indices = np.where(~np.diff(X, axis=0).any(axis=0))[0]
+        bool_const = not const_indices
+
+        # Start regression using OPM algorithm
+        precision = 0        # Set precision criterion to precision of program
+        early_stop = True
+        cond_early = True    # Initialize condition for early stop
+        ind = []
+        iindx = []           # index of selected columns
+        indtot = np.arange(n_features)  # Full index set for remaining columns
+        kmax = min(n_samples, n_features)  # Maximum number of iterations
+        LOO = np.PINF * np.ones(kmax)  # Store LOO error at each iteration
+        LOOmin = np.PINF               # Initialize minimum value of LOO
+        coeff = np.zeros((n_features, kmax))
+        count = 0
+        k = 0.1                # Percentage of iteration history for early stop
+
+        # Begin iteration over regressors set (Matrix X)
+        while (np.linalg.norm(R) > precision) and (count <= kmax-1) and \
+              ((cond_early or early_stop) ^ ~cond_early):
+
+            # Update index set of columns yet to select
+            if count != 0:
+                indtot = np.delete(indtot, iindx)
+
+            # Find column of X that is most correlated with residual
+            h = abs(np.dot(r, X_norm))
+            iindx = np.argmax(h[indtot])
+            indx = indtot[iindx]
+
+            # initialize with the constant regressor, if it exists in the basis
+            if (count == 0) and bool_const:
+                # overwrite values for iindx and indx
+                iindx = const_indices[0]
+                indx = indtot[iindx]
+
+            # Invert the information matrix at the first iteration, later only
+            # update its value on the basis of the previously inverted one,
+            if count == 0:
+                M = 1 / np.dot(X[:, indx], X[:, indx])
+            else:
+                x = np.dot(X[:, ind].T, X[:, indx])
+                r = np.dot(X[:, indx], X[:, indx])
+                M = self.blockwise_inverse(M, x, x.T, r)
+
+            # Add newly found index to the selected indexes set
+            ind.append(indx)
+
+            # Select regressors subset (Projection subspace)
+            Xpro = X[:, ind]
+
+            # Obtain coefficient by performing OLS
+            TT = np.dot(y, Xpro)
+            beta = np.dot(M, TT)
+            coeff[ind, count] = beta
+
+            # Compute LOO error
+            LOO[count] = self.loo_error(Xpro, M, y, beta)
+
+            # Compute new residual due to new projection
+            R = y - np.dot(Xpro, beta)
+
+            # Normalize residual
+            norm_R = np.sqrt(np.dot(R, R))
+            r = R / norm_R
+
+            # Update counters and early-stop criterions
+            countinf = max(0, int(count-k*kmax))
+            LOOmin = min(LOOmin, LOO[count])
+
+            if count == 0:
+                cond_early = (LOO[0] <= LOOmin)
+            else:
+                cond_early = (min(LOO[countinf:count+1]) <= LOOmin)
+
+            if self.verbose:
+                print(f'Iteration: {count+1}, mod. LOOCV error : '
+                      f'{LOO[count]:.2e}')
+
+            # Update counter
+            count += 1
+
+        # Select projection with smallest cross-validation error
+        countmin = np.argmin(LOO[:-1])
+        self.coef_ = coeff[:, countmin]
+        self.active = coeff[:, countmin] != 0.0
+
+        # set intercept_
+        if self.fit_intercept:
+            self.coef_ = self.coef_ / X_std
+            self.intercept_ = y_mean - np.dot(X_mean, self.coef_.T)
+        else:
+            self.intercept_ = 0.
+
+        return self
+
+    def predict(self, X):
+        '''
+        Computes predictive distribution for test set.
+
+        Parameters
+        -----------
+        X: {array-like, sparse} (n_samples_test, n_features)
+           Test data, matrix of explanatory variables
+
+        Returns
+        -------
+        y_hat: numpy array of size (n_samples_test,)
+               Estimated values of targets on test set (i.e. mean of
+               predictive distribution)
+        '''
+
+        y_hat = np.dot(X, self.coef_) + self.intercept_
+
+        return y_hat
+
+    def loo_error(self, psi, inv_inf_matrix, y, coeffs):
+        """
+        Calculates the corrected LOO error for regression on regressor
+        matrix `psi` that generated the coefficients based on [1] and [2].
+
+        [1] Blatman, G., 2009. Adaptive sparse polynomial chaos expansions for
+            uncertainty propagation and sensitivity analysis (Doctoral
+            dissertation, Clermont-Ferrand 2).
+
+        [2] Blatman, G. and Sudret, B., 2011. Adaptive sparse polynomial chaos
+            expansion based on least angle regression. Journal of computational
+            Physics, 230(6), pp.2345-2367.
+
+        Parameters
+        ----------
+        psi : array of shape (n_samples, n_feature)
+            Orthogonal bases evaluated at the samples.
+        inv_inf_matrix : array
+            Inverse of the information matrix.
+        y : array of shape (n_samples, )
+            Targets.
+        coeffs : array
+            Computed regresssor cofficients.
+
+        Returns
+        -------
+        loo_error : float
+            Modified LOOCV error.
+
+        """
+
+        # NrEvaluation (Size of experimental design)
+        N, P = psi.shape
+
+        # h factor (the full matrix is not calculated explicitly,
+        # only the trace is, to save memory)
+        PsiM = np.dot(psi, inv_inf_matrix)
+
+        h = np.sum(np.multiply(PsiM, psi), axis=1, dtype=np.longdouble)
+
+        # ------ Calculate Error Loocv for each measurement point ----
+        # Residuals
+        residual = np.dot(psi, coeffs) - y
+
+        # Variance
+        varY = np.var(y)
+
+        if varY == 0:
+            norm_emp_error = 0
+            loo_error = 0
+        else:
+            norm_emp_error = np.mean(residual**2)/varY
+
+            loo_error = np.mean(np.square(residual / (1-h))) / varY
+
+            # if there are NaNs, just return an infinite LOO error (this
+            # happens, e.g., when a strongly underdetermined problem is solved)
+            if np.isnan(loo_error):
+                loo_error = np.inf
+
+        # Corrected Error for over-determined system
+        tr_M = np.trace(np.atleast_2d(inv_inf_matrix))
+        if tr_M < 0 or abs(tr_M) > 1e6:
+            tr_M = np.trace(np.linalg.pinv(np.dot(psi.T, psi)))
+
+        # Over-determined system of Equation
+        if N > P:
+            T_factor = N/(N-P) * (1 + tr_M)
+
+        # Under-determined system of Equation
+        else:
+            T_factor = np.inf
+
+        loo_error *= T_factor
+
+        return loo_error
+
+    def blockwise_inverse(self, Ainv, B, C, D):
+        """
+        non-singular square matrix M defined as M = [[A B]; [C D]] .
+        B, C and D can have any dimension, provided their combination defines
+        a square matrix M.
+
+        Parameters
+        ----------
+        Ainv : float or array
+            inverse of the square-submatrix A.
+        B : float or array
+            Information matrix with all new regressor.
+        C : float or array
+            Transpose of B.
+        D : float or array
+            Information matrix with all selected regressors.
+
+        Returns
+        -------
+        M : array
+            Inverse of the information matrix.
+
+        """
+        if np.isscalar(D):
+            # Inverse of D
+            Dinv = 1/D
+            # Schur complement
+            SCinv = 1/(D - np.dot(C, np.dot(Ainv, B[:, None])))[0]
+        else:
+            # Inverse of D
+            Dinv = np.linalg.solve(D, np.eye(D.shape))
+            # Schur complement
+            SCinv = np.linalg.solve((D - C*Ainv*B), np.eye(D.shape))
+
+        T1 = np.dot(Ainv, np.dot(B[:, None], SCinv))
+        T2 = np.dot(C, Ainv)
+
+        # Assemble the inverse matrix
+        M = np.vstack((
+            np.hstack((Ainv+T1*T2, -T1)),
+            np.hstack((-(SCinv)*T2, SCinv))
+            ))
+        return M
diff --git a/build/lib/bayesvalidrox/surrogate_models/reg_fast_ard.py b/build/lib/bayesvalidrox/surrogate_models/reg_fast_ard.py
new file mode 100644
index 000000000..e6883a3ed
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/reg_fast_ard.py
@@ -0,0 +1,475 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Mar 24 19:41:45 2020
+
+@author: farid
+"""
+import numpy as np
+from scipy.linalg import solve_triangular
+from numpy.linalg import LinAlgError
+from sklearn.base import RegressorMixin
+from sklearn.linear_model._base import LinearModel
+import warnings
+from sklearn.utils import check_X_y
+from scipy.linalg import pinvh
+
+
+def update_precisions(Q,S,q,s,A,active,tol,n_samples,clf_bias):
+    '''
+    Selects one feature to be added/recomputed/deleted to model based on
+    effect it will have on value of log marginal likelihood.
+    '''
+    # initialise vector holding changes in log marginal likelihood
+    deltaL = np.zeros(Q.shape[0])
+
+    # identify features that can be added , recomputed and deleted in model
+    theta        =  q**2 - s
+    add          =  (theta > 0) * (active == False)
+    recompute    =  (theta > 0) * (active == True)
+    delete       = ~(add + recompute)
+
+    # compute sparsity & quality parameters corresponding to features in
+    # three groups identified above
+    Qadd,Sadd      = Q[add], S[add]
+    Qrec,Srec,Arec = Q[recompute], S[recompute], A[recompute]
+    Qdel,Sdel,Adel = Q[delete], S[delete], A[delete]
+
+    # compute new alpha's (precision parameters) for features that are
+    # currently in model and will be recomputed
+    Anew           = s[recompute]**2/ ( theta[recompute] + np.finfo(np.float32).eps)
+    delta_alpha    = (1./Anew - 1./Arec)
+
+    # compute change in log marginal likelihood
+    deltaL[add]       = ( Qadd**2 - Sadd ) / Sadd + np.log(Sadd/Qadd**2 )
+    deltaL[recompute] = Qrec**2 / (Srec + 1. / delta_alpha) - np.log(1 + Srec*delta_alpha)
+    deltaL[delete]    = Qdel**2 / (Sdel - Adel) - np.log(1 - Sdel / Adel)
+    deltaL            = deltaL  / n_samples
+
+    # find feature which caused largest change in likelihood
+    feature_index = np.argmax(deltaL)
+
+    # no deletions or additions
+    same_features  = np.sum( theta[~recompute] > 0) == 0
+
+    # changes in precision for features already in model is below threshold
+    no_delta       = np.sum( abs( Anew - Arec ) > tol ) == 0
+    # if same_features: print(abs( Anew - Arec ))
+    # print("same_features = {} no_delta = {}".format(same_features,no_delta))
+    # check convergence: if no features to add or delete and small change in
+    #                    precision for current features then terminate
+    converged = False
+    if same_features and no_delta:
+        converged = True
+        return [A,converged]
+
+    # if not converged update precision parameter of weights and return
+    if theta[feature_index] > 0:
+        A[feature_index] = s[feature_index]**2 / theta[feature_index]
+        if active[feature_index] == False:
+            active[feature_index] = True
+    else:
+        # at least two active features
+        if active[feature_index] == True and np.sum(active) >= 2:
+            # do not remove bias term in classification
+            # (in regression it is factored in through centering)
+            if not (feature_index == 0 and clf_bias):
+                active[feature_index] = False
+                A[feature_index]      = np.PINF
+
+    return [A,converged]
+
+
+class RegressionFastARD(LinearModel, RegressorMixin):
+    '''
+    Regression with Automatic Relevance Determination (Fast Version uses
+    Sparse Bayesian Learning)
+    https://github.com/AmazaspShumik/sklearn-bayes/blob/master/skbayes/rvm_ard_models/fast_rvm.py
+
+    Parameters
+    ----------
+    n_iter: int, optional (DEFAULT = 100)
+        Maximum number of iterations
+
+    start: list, optional (DEFAULT = None)
+        Initial selected features.
+
+    tol: float, optional (DEFAULT = 1e-3)
+        If absolute change in precision parameter for weights is below threshold
+        algorithm terminates.
+
+    fit_intercept : boolean, optional (DEFAULT = True)
+        whether to calculate the intercept for this model. If set
+        to false, no intercept will be used in calculations
+        (e.g. data is expected to be already centered).
+
+    copy_X : boolean, optional (DEFAULT = True)
+        If True, X will be copied; else, it may be overwritten.
+
+    compute_score : bool, default=False
+        If True, compute the log marginal likelihood at each iteration of the
+        optimization.
+
+    verbose : boolean, optional (DEFAULT = FALSE)
+        Verbose mode when fitting the model
+
+    Attributes
+    ----------
+    coef_ : array, shape = (n_features)
+        Coefficients of the regression model (mean of posterior distribution)
+
+    alpha_ : float
+       estimated precision of the noise
+
+    active_ : array, dtype = np.bool, shape = (n_features)
+       True for non-zero coefficients, False otherwise
+
+    lambda_ : array, shape = (n_features)
+       estimated precisions of the coefficients
+
+    sigma_ : array, shape = (n_features, n_features)
+        estimated covariance matrix of the weights, computed only
+        for non-zero coefficients
+
+    scores_ : array-like of shape (n_iter_+1,)
+        If computed_score is True, value of the log marginal likelihood (to be
+        maximized) at each iteration of the optimization.
+
+    References
+    ----------
+    [1] Fast marginal likelihood maximisation for sparse Bayesian models
+    (Tipping & Faul 2003) (http://www.miketipping.com/papers/met-fastsbl.pdf)
+    [2] Analysis of sparse Bayesian learning (Tipping & Faul 2001)
+        (http://www.miketipping.com/abstracts.htm#Faul:NIPS01)
+    '''
+
+    def __init__(self, n_iter=300, start=None, tol=1e-3, fit_intercept=True,
+                 normalize=False, copy_X=True, compute_score=False, verbose=False):
+        self.n_iter          = n_iter
+        self.start           = start
+        self.tol             = tol
+        self.scores_         = list()
+        self.fit_intercept   = fit_intercept
+        self.normalize       = normalize
+        self.copy_X          = copy_X
+        self.compute_score   = compute_score
+        self.verbose         = verbose
+
+    def _preprocess_data(self, X, y):
+        """Center and scale data.
+        Centers data to have mean zero along axis 0. If fit_intercept=False or
+        if the X is a sparse matrix, no centering is done, but normalization
+        can still be applied. The function returns the statistics necessary to
+        reconstruct the input data, which are X_offset, y_offset, X_scale, such
+        that the output
+            X = (X - X_offset) / X_scale
+        X_scale is the L2 norm of X - X_offset.
+        """
+
+        if self.copy_X:
+            X = X.copy(order='K')
+
+        y = np.asarray(y, dtype=X.dtype)
+
+        if self.fit_intercept:
+            X_offset = np.average(X, axis=0)
+            X -= X_offset
+            if self.normalize:
+                X_scale = np.ones(X.shape[1], dtype=X.dtype)
+                std = np.sqrt(np.sum(X**2, axis=0)/(len(X)-1))
+                X_scale[std != 0] = std[std != 0]
+                X /= X_scale
+            else:
+                X_scale = np.ones(X.shape[1], dtype=X.dtype)
+            y_offset = np.mean(y)
+            y = y - y_offset
+        else:
+            X_offset = np.zeros(X.shape[1], dtype=X.dtype)
+            X_scale = np.ones(X.shape[1], dtype=X.dtype)
+            if y.ndim == 1:
+                y_offset = X.dtype.type(0)
+            else:
+                y_offset = np.zeros(y.shape[1], dtype=X.dtype)
+
+        return X, y, X_offset, y_offset, X_scale
+
+    def fit(self, X, y):
+        '''
+        Fits ARD Regression with Sequential Sparse Bayes Algorithm.
+
+        Parameters
+        -----------
+        X: {array-like, sparse matrix} of size (n_samples, n_features)
+           Training data, matrix of explanatory variables
+
+        y: array-like of size [n_samples, n_features]
+           Target values
+
+        Returns
+        -------
+        self : object
+            Returns self.
+        '''
+        X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
+        n_samples, n_features = X.shape
+
+        X, y, X_mean, y_mean, X_std = self._preprocess_data(X, y)
+        self._x_mean_ = X_mean
+        self._y_mean = y_mean
+        self._x_std = X_std
+
+        #  precompute X'*Y , X'*X for faster iterations & allocate memory for
+        #  sparsity & quality vectors
+        XY = np.dot(X.T, y)
+        XX = np.dot(X.T, X)
+        XXd = np.diag(XX)
+
+        #  initialise precision of noise & and coefficients
+        var_y = np.var(y)
+
+        # check that variance is non zero !!!
+        if var_y == 0:
+            beta = 1e-2
+            self.var_y = True
+        else:
+            beta = 1. / np.var(y)
+            self.var_y = False
+
+        A = np.PINF * np.ones(n_features)
+        active = np.zeros(n_features, dtype=np.bool)
+
+        if self.start is not None and not hasattr(self, 'active_'):
+            start = self.start
+            # start from a given start basis vector
+            proj = XY**2 / XXd
+            active[start] = True
+            A[start] = XXd[start]/(proj[start] - var_y)
+
+        else:
+            # in case of almost perfect multicollinearity between some features
+            # start from feature 0
+            if np.sum(XXd - X_mean**2 < np.finfo(np.float32).eps) > 0:
+                A[0] = np.finfo(np.float16).eps
+                active[0] = True
+
+            else:
+                # start from a single basis vector with largest projection on
+                # targets
+                proj = XY**2 / XXd
+                start = np.argmax(proj)
+                active[start] = True
+                A[start] = XXd[start]/(proj[start] - var_y +
+                                       np.finfo(np.float32).eps)
+
+        warning_flag = 0
+        scores_ = []
+        for i in range(self.n_iter):
+            # Handle variance zero
+            if self.var_y:
+                A[0] = y_mean
+                active[0] = True
+                converged = True
+                break
+
+            XXa = XX[active, :][:, active]
+            XYa = XY[active]
+            Aa = A[active]
+
+            # mean & covariance of posterior distribution
+            Mn, Ri, cholesky = self._posterior_dist(Aa, beta, XXa, XYa)
+            if cholesky:
+                Sdiag = np.sum(Ri**2, 0)
+            else:
+                Sdiag = np.copy(np.diag(Ri))
+                warning_flag += 1
+
+            # raise warning in case cholesky fails
+            if warning_flag == 1:
+                warnings.warn(("Cholesky decomposition failed! Algorithm uses "
+                               "pinvh, which is significantly slower. If you "
+                               "use RVR it is advised to change parameters of "
+                               "the kernel!"))
+
+            # compute quality & sparsity parameters
+            s, q, S, Q = self._sparsity_quality(XX, XXd, XY, XYa, Aa, Ri,
+                                                active, beta, cholesky)
+
+            # update precision parameter for noise distribution
+            rss = np.sum((y - np.dot(X[:, active], Mn))**2)
+
+            # if near perfect fit , then terminate
+            if (rss / n_samples/var_y) < self.tol:
+                warnings.warn('Early termination due to near perfect fit')
+                converged = True
+                break
+            beta = n_samples - np.sum(active) + np.sum(Aa * Sdiag)
+            beta /= rss
+            # beta /= (rss + np.finfo(np.float32).eps)
+
+            # update precision parameters of coefficients
+            A, converged = update_precisions(Q, S, q, s, A, active, self.tol,
+                                             n_samples, False)
+
+            if self.compute_score:
+                scores_.append(self.log_marginal_like(XXa, XYa, Aa, beta))
+
+            if self.verbose:
+                print(('Iteration: {0}, number of features '
+                       'in the model: {1}').format(i, np.sum(active)))
+
+            if converged or i == self.n_iter - 1:
+                if converged and self.verbose:
+                    print('Algorithm converged!')
+                break
+
+        # after last update of alpha & beta update parameters
+        # of posterior distribution
+        XXa, XYa, Aa = XX[active, :][:, active], XY[active], A[active]
+        Mn, Sn, cholesky = self._posterior_dist(Aa, beta, XXa, XYa, True)
+        self.coef_ = np.zeros(n_features)
+        self.coef_[active] = Mn
+        self.sigma_ = Sn
+        self.active_ = active
+        self.lambda_ = A
+        self.alpha_ = beta
+        self.converged = converged
+        if self.compute_score:
+            self.scores_ = np.array(scores_)
+
+        # set intercept_
+        if self.fit_intercept:
+            self.coef_ = self.coef_ / X_std
+            self.intercept_ = y_mean - np.dot(X_mean, self.coef_.T)
+        else:
+            self.intercept_ = 0.
+        return self
+
+    def log_marginal_like(self, XXa, XYa, Aa, beta):
+        """Computes the log of the marginal likelihood."""
+        N, M = XXa.shape
+        A = np.diag(Aa)
+
+        Mn, sigma_, cholesky = self._posterior_dist(Aa, beta, XXa, XYa,
+                                                    full_covar=True)
+
+        C = sigma_ + np.dot(np.dot(XXa.T, np.linalg.pinv(A)), XXa)
+
+        score = np.dot(np.dot(XYa.T, np.linalg.pinv(C)), XYa) +\
+            np.log(np.linalg.det(C)) + N * np.log(2 * np.pi)
+
+        return -0.5 * score
+
+    def predict(self, X, return_std=False):
+        '''
+        Computes predictive distribution for test set.
+        Predictive distribution for each data point is one dimensional
+        Gaussian and therefore is characterised by mean and variance based on
+        Ref.[1] Section 3.3.2.
+
+        Parameters
+        -----------
+        X: {array-like, sparse} (n_samples_test, n_features)
+           Test data, matrix of explanatory variables
+
+        Returns
+        -------
+        : list of length two [y_hat, var_hat]
+
+             y_hat: numpy array of size (n_samples_test,)
+                    Estimated values of targets on test set (i.e. mean of
+                    predictive distribution)
+
+                var_hat: numpy array of size (n_samples_test,)
+                    Variance of predictive distribution
+        References
+        ----------
+        [1] Bishop, C. M. (2006). Pattern recognition and machine learning.
+        springer.
+        '''
+
+        y_hat = np.dot(X, self.coef_) + self.intercept_
+
+        if return_std:
+            # Handle the zero variance case
+            if self.var_y:
+                return y_hat, np.zeros_like(y_hat)
+
+            if self.normalize:
+                X -= self._x_mean_[self.active_]
+                X /= self._x_std[self.active_]
+            var_hat = 1./self.alpha_
+            var_hat += np.sum(X.dot(self.sigma_) * X, axis=1)
+            std_hat = np.sqrt(var_hat)
+            return y_hat, std_hat
+        else:
+            return y_hat
+
+    def _posterior_dist(self, A, beta, XX, XY, full_covar=False):
+        '''
+        Calculates mean and covariance matrix of posterior distribution
+        of coefficients.
+        '''
+        # compute precision matrix for active features
+        Sinv = beta * XX
+        np.fill_diagonal(Sinv, np.diag(Sinv) + A)
+        cholesky = True
+
+        # try cholesky, if it fails go back to pinvh
+        try:
+            # find posterior mean : R*R.T*mean = beta*X.T*Y
+            # solve(R*z = beta*X.T*Y) =>find z=> solve(R.T*mean = z)=>find mean
+            R = np.linalg.cholesky(Sinv)
+            Z = solve_triangular(R, beta*XY, check_finite=True, lower=True)
+            Mn = solve_triangular(R.T, Z, check_finite=True, lower=False)
+
+            # invert lower triangular matrix from cholesky decomposition
+            Ri = solve_triangular(R, np.eye(A.shape[0]), check_finite=False,
+                                  lower=True)
+            if full_covar:
+                Sn = np.dot(Ri.T, Ri)
+                return Mn, Sn, cholesky
+            else:
+                return Mn, Ri, cholesky
+        except LinAlgError:
+            cholesky = False
+            Sn = pinvh(Sinv)
+            Mn = beta*np.dot(Sinv, XY)
+            return Mn, Sn, cholesky
+
+    def _sparsity_quality(self, XX, XXd, XY, XYa, Aa, Ri, active, beta, cholesky):
+        '''
+        Calculates sparsity and quality parameters for each feature
+
+        Theoretical Note:
+        -----------------
+        Here we used Woodbury Identity for inverting covariance matrix
+        of target distribution
+        C    = 1/beta + 1/alpha * X' * X
+        C^-1 = beta - beta^2 * X * Sn * X'
+        '''
+        bxy = beta*XY
+        bxx = beta*XXd
+        if cholesky:
+            # here Ri is inverse of lower triangular matrix obtained from
+            # cholesky decomp
+            xxr = np.dot(XX[:, active], Ri.T)
+            rxy = np.dot(Ri, XYa)
+            S = bxx - beta**2 * np.sum(xxr**2, axis=1)
+            Q = bxy - beta**2 * np.dot(xxr, rxy)
+        else:
+            # here Ri is covariance matrix
+            XXa = XX[:, active]
+            XS = np.dot(XXa, Ri)
+            S = bxx - beta**2 * np.sum(XS*XXa, 1)
+            Q = bxy - beta**2 * np.dot(XS, XYa)
+        # Use following:
+        # (EQ 1) q = A*Q/(A - S) ; s = A*S/(A-S)
+        # so if A = np.PINF q = Q, s = S
+        qi = np.copy(Q)
+        si = np.copy(S)
+        # If A is not np.PINF, then it should be 'active' feature => use (EQ 1)
+        Qa, Sa = Q[active], S[active]
+        qi[active] = Aa * Qa / (Aa - Sa)
+        si[active] = Aa * Sa / (Aa - Sa)
+
+        return [si, qi, S, Q]
diff --git a/build/lib/bayesvalidrox/surrogate_models/reg_fast_laplace.py b/build/lib/bayesvalidrox/surrogate_models/reg_fast_laplace.py
new file mode 100644
index 000000000..7fdcb5cf6
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/reg_fast_laplace.py
@@ -0,0 +1,452 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+import numpy as np
+from sklearn.utils import as_float_array
+from sklearn.model_selection import KFold
+
+
+class RegressionFastLaplace():
+    '''
+    Sparse regression with Bayesian Compressive Sensing as described in Alg. 1
+    (Fast Laplace) of Ref.[1], which updated formulas from [2].
+
+    sigma2: noise precision (sigma^2)
+    nu fixed to 0
+
+    uqlab/lib/uq_regression/BCS/uq_bsc.m
+
+    Parameters
+    ----------
+    n_iter: int, optional (DEFAULT = 1000)
+        Maximum number of iterations
+
+    tol: float, optional (DEFAULT = 1e-7)
+        If absolute change in precision parameter for weights is below
+        threshold algorithm terminates.
+
+    fit_intercept : boolean, optional (DEFAULT = True)
+        whether to calculate the intercept for this model. If set
+        to false, no intercept will be used in calculations
+        (e.g. data is expected to be already centered).
+
+    copy_X : boolean, optional (DEFAULT = True)
+        If True, X will be copied; else, it may be overwritten.
+
+    verbose : boolean, optional (DEFAULT = FALSE)
+        Verbose mode when fitting the model
+
+    Attributes
+    ----------
+    coef_ : array, shape = (n_features)
+        Coefficients of the regression model (mean of posterior distribution)
+
+    alpha_ : float
+       estimated precision of the noise
+
+    active_ : array, dtype = np.bool, shape = (n_features)
+       True for non-zero coefficients, False otherwise
+
+    lambda_ : array, shape = (n_features)
+       estimated precisions of the coefficients
+
+    sigma_ : array, shape = (n_features, n_features)
+        estimated covariance matrix of the weights, computed only
+        for non-zero coefficients
+
+    References
+    ----------
+    [1] Babacan, S. D., Molina, R., & Katsaggelos, A. K. (2009). Bayesian
+        compressive sensing using Laplace priors. IEEE Transactions on image
+        processing, 19(1), 53-63.
+    [2] Fast marginal likelihood maximisation for sparse Bayesian models
+        (Tipping & Faul 2003).
+        (http://www.miketipping.com/papers/met-fastsbl.pdf)
+    '''
+
+    def __init__(self, n_iter=1000, n_Kfold=10, tol=1e-7, fit_intercept=False,
+                 bias_term=True, copy_X=True, verbose=False):
+        self.n_iter = n_iter
+        self.n_Kfold = n_Kfold
+        self.tol = tol
+        self.fit_intercept = fit_intercept
+        self.bias_term = bias_term
+        self.copy_X = copy_X
+        self.verbose = verbose
+
+    def _center_data(self, X, y):
+        ''' Centers data'''
+        X = as_float_array(X, copy = self.copy_X)
+
+        # normalisation should be done in preprocessing!
+        X_std = np.ones(X.shape[1], dtype=X.dtype)
+        if self.fit_intercept:
+            X_mean = np.average(X, axis=0)
+            y_mean = np.average(y, axis=0)
+            X -= X_mean
+            y -= y_mean
+        else:
+            X_mean = np.zeros(X.shape[1], dtype=X.dtype)
+            y_mean = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
+        return X, y, X_mean, y_mean, X_std
+
+    def fit(self, X, y):
+
+        k_fold = KFold(n_splits=self.n_Kfold)
+
+        varY = np.var(y, ddof=1) if np.var(y, ddof=1) != 0 else 1.0
+        sigma2s = len(y)*varY*(10**np.linspace(-16, -1, self.n_Kfold))
+
+        errors = np.zeros((len(sigma2s), self.n_Kfold))
+        for s, sigma2 in enumerate(sigma2s):
+            for k, (train, test) in enumerate(k_fold.split(X, y)):
+                self.fit_(X[train], y[train], sigma2)
+                errors[s, k] = np.linalg.norm(
+                    y[test] - self.predict(X[test])
+                    )**2/len(test)
+
+        KfCVerror = np.sum(errors, axis=1)/self.n_Kfold/varY
+        i_minCV = np.argmin(KfCVerror)
+
+        self.kfoldCVerror = np.min(KfCVerror)
+
+        return self.fit_(X, y, sigma2s[i_minCV])
+
+    def fit_(self, X, y, sigma2):
+
+        N, P = X.shape
+        # n_samples, n_features = X.shape
+
+        X, y, X_mean, y_mean, X_std = self._center_data(X, y)
+        self._x_mean_ = X_mean
+        self._y_mean = y_mean
+        self._x_std = X_std
+
+        # check that variance is non zero !!!
+        if np.var(y) == 0:
+            self.var_y = True
+        else:
+            self.var_y = False
+        beta = 1./sigma2
+
+        #  precompute X'*Y , X'*X for faster iterations & allocate memory for
+        #  sparsity & quality vectors X=Psi
+        PsiTY = np.dot(X.T, y)
+        PsiTPsi = np.dot(X.T, X)
+        XXd = np.diag(PsiTPsi)
+
+        # initialize with constant regressor, or if that one does not exist,
+        # with the one that has the largest correlation with Y
+        ind_global_to_local = np.zeros(P, dtype=np.int32)
+
+        # identify constant regressors
+        constidx = np.where(~np.diff(X, axis=0).all(axis=0))[0]
+
+        if self.bias_term and constidx.size != 0:
+            ind_start = constidx[0]
+            ind_global_to_local[ind_start] = True
+        else:
+            # start from a single basis vector with largest projection on
+            # targets
+            proj = np.divide(np.square(PsiTY), XXd)
+            ind_start = np.argmax(proj)
+            ind_global_to_local[ind_start] = True
+
+        num_active = 1
+        active_indices = [ind_start]
+        deleted_indices = []
+        bcs_path = [ind_start]
+        gamma = np.zeros(P)
+        # for the initial value of gamma(ind_start), use the RVM formula
+        #   gamma = (q^2 - s) / (s^2)
+        # and the fact that initially s = S = beta*Psi_i'*Psi_i and q = Q =
+        # beta*Psi_i'*Y
+        gamma[ind_start] = np.square(PsiTY[ind_start])
+        gamma[ind_start] -= sigma2 * PsiTPsi[ind_start, ind_start]
+        gamma[ind_start] /= np.square(PsiTPsi[ind_start, ind_start])
+
+        Sigma = 1. / (beta * PsiTPsi[ind_start, ind_start]
+                      + 1./gamma[ind_start])
+
+        mu = Sigma * PsiTY[ind_start] * beta
+        tmp1 = beta * PsiTPsi[ind_start]
+        S = beta * np.diag(PsiTPsi).T - Sigma * np.square(tmp1)
+        Q = beta * PsiTY.T - mu*(tmp1)
+
+        tmp2 = np.ones(P)  # alternative computation for the initial s,q
+        q0tilde = PsiTY[ind_start]
+        s0tilde = PsiTPsi[ind_start, ind_start]
+        tmp2[ind_start] = s0tilde / (q0tilde**2) / beta
+        s = np.divide(S, tmp2)
+        q = np.divide(Q, tmp2)
+        Lambda = 2*(num_active - 1) / np.sum(gamma)
+
+        Delta_L_max = []
+        for i in range(self.n_iter):
+            # Handle variance zero
+            if self.var_y:
+                mu = np.mean(y)
+                break
+
+            if self.verbose:
+                print('    lambda = {0:.6e}\n'.format(Lambda))
+
+            # Calculate the potential updated value of each gamma[i]
+            if Lambda == 0.0:  # RVM
+                gamma_potential = np.multiply((
+                    (q**2 - s) > Lambda),
+                    np.divide(q**2 - s, s**2)
+                    )
+            else:
+                a = Lambda * s**2
+                b = s**2 + 2*Lambda*s
+                c = Lambda + s - q**2
+                gamma_potential = np.multiply(
+                    (c < 0), np.divide(
+                        -b + np.sqrt(b**2 - 4*np.multiply(a, c)), 2*a)
+                    )
+
+            l_gamma = - np.log(np.absolute(1 + np.multiply(gamma, s)))
+            l_gamma += np.divide(np.multiply(q**2, gamma),
+                                 (1 + np.multiply(gamma, s)))
+            l_gamma -= Lambda*gamma  # omitted the factor 1/2
+
+            # Contribution of each updated gamma(i) to L(gamma)
+            l_gamma_potential = - np.log(
+                np.absolute(1 + np.multiply(gamma_potential, s))
+                )
+            l_gamma_potential += np.divide(
+                np.multiply(q**2, gamma_potential),
+                (1 + np.multiply(gamma_potential, s))
+                )
+            # omitted the factor 1/2
+            l_gamma_potential -= Lambda*gamma_potential
+
+            # Check how L(gamma) would change if we replaced gamma(i) by the
+            # updated gamma_potential(i), for each i separately
+            Delta_L_potential = l_gamma_potential - l_gamma
+
+            # deleted indices should not be chosen again
+            if len(deleted_indices) != 0:
+                values = -np.inf * np.ones(len(deleted_indices))
+                Delta_L_potential[deleted_indices] = values
+
+            Delta_L_max.append(np.nanmax(Delta_L_potential))
+            ind_L_max = np.nanargmax(Delta_L_potential)
+
+            # in case there is only 1 regressor in the model and it would now
+            # be deleted
+            if len(active_indices) == 1 and ind_L_max == active_indices[0] \
+               and gamma_potential[ind_L_max] == 0.0:
+                Delta_L_potential[ind_L_max] = -np.inf
+                Delta_L_max[i] = np.max(Delta_L_potential)
+                ind_L_max = np.argmax(Delta_L_potential)
+
+            # If L did not change significantly anymore, break
+            if Delta_L_max[i] <= 0.0 or\
+                    (i > 0 and all(np.absolute(Delta_L_max[i-1:])
+                                   < sum(Delta_L_max)*self.tol)) or \
+                    (i > 0 and all(np.diff(bcs_path)[i-1:] == 0.0)):
+                if self.verbose:
+                    print('Increase in L: {0:.6e} (eta = {1:.3e})\
+                          -- break\n'.format(Delta_L_max[i], self.tol))
+                break
+
+            # Print information
+            if self.verbose:
+                print('    Delta L = {0:.6e} \n'.format(Delta_L_max[i]))
+
+            what_changed = int(gamma[ind_L_max] == 0.0)
+            what_changed -= int(gamma_potential[ind_L_max] == 0.0)
+
+            # Print information
+            if self.verbose:
+                if what_changed < 0:
+                    print(f'{i+1} - Remove regressor #{ind_L_max+1}..\n')
+                elif what_changed == 0:
+                    print(f'{i+1} - Recompute regressor #{ind_L_max+1}..\n')
+                else:
+                    print(f'{i+1} - Add regressor #{ind_L_max+1}..\n')
+
+            # --- Update all quantities ----
+            if what_changed == 1:
+                # adding a regressor
+
+                # update gamma
+                gamma[ind_L_max] = gamma_potential[ind_L_max]
+
+                Sigma_ii = 1.0 / (1.0/gamma[ind_L_max] + S[ind_L_max])
+                try:
+                    x_i = np.matmul(
+                        Sigma, PsiTPsi[active_indices, ind_L_max].reshape(-1, 1)
+                        )
+                except ValueError:
+                    x_i = Sigma * PsiTPsi[active_indices, ind_L_max]
+                tmp_1 = - (beta * Sigma_ii) * x_i
+                Sigma = np.vstack(
+                    (np.hstack(((beta**2 * Sigma_ii) * np.dot(x_i, x_i.T)
+                                + Sigma, tmp_1)), np.append(tmp_1.T, Sigma_ii))
+                    )
+                mu_i = Sigma_ii * Q[ind_L_max]
+                mu = np.vstack((mu - (beta * mu_i) * x_i, mu_i))
+
+                tmp2_1 = PsiTPsi[:, ind_L_max] - beta * np.squeeze(
+                    np.matmul(PsiTPsi[:, active_indices], x_i)
+                    )
+                if i == 0:
+                    tmp2_1[0] /= 2
+                tmp2 = beta * tmp2_1.T
+                S = S - Sigma_ii * np.square(tmp2)
+                Q = Q - mu_i * tmp2
+
+                num_active += 1
+                ind_global_to_local[ind_L_max] = num_active
+                active_indices.append(ind_L_max)
+                bcs_path.append(ind_L_max)
+
+            elif what_changed == 0:
+                # recomputation
+                # zero if regressor has not been chosen yet
+                if not ind_global_to_local[ind_L_max]:
+                    raise Exception('Cannot recompute index{0} -- not yet\
+                                    part of the model!'.format(ind_L_max))
+                Sigma = np.atleast_2d(Sigma)
+                mu = np.atleast_2d(mu)
+                gamma_i_new = gamma_potential[ind_L_max]
+                gamma_i_old = gamma[ind_L_max]
+                # update gamma
+                gamma[ind_L_max] = gamma_potential[ind_L_max]
+
+                # index of regressor in Sigma
+                local_ind = ind_global_to_local[ind_L_max]-1
+
+                kappa_i = (1.0/gamma_i_new - 1.0/gamma_i_old)
+                kappa_i = 1.0 / kappa_i
+                kappa_i += Sigma[local_ind, local_ind]
+                kappa_i = 1 / kappa_i
+                Sigma_i_col = Sigma[:, local_ind]
+
+                Sigma = Sigma - kappa_i * (Sigma_i_col * Sigma_i_col.T)
+                mu_i = mu[local_ind]
+                mu = mu - (kappa_i * mu_i) * Sigma_i_col[:, None]
+
+                tmp1 = beta * np.dot(
+                    Sigma_i_col.reshape(1, -1), PsiTPsi[active_indices])[0]
+                S = S + kappa_i * np.square(tmp1)
+                Q = Q + (kappa_i * mu_i) * tmp1
+
+                # no change in active_indices or ind_global_to_local
+                bcs_path.append(ind_L_max + 0.1)
+
+            elif what_changed == -1:
+                gamma[ind_L_max] = 0
+
+                # index of regressor in Sigma
+                local_ind = ind_global_to_local[ind_L_max]-1
+
+                Sigma_ii_inv = 1. / Sigma[local_ind, local_ind]
+                Sigma_i_col = Sigma[:, local_ind]
+
+                Sigma = Sigma - Sigma_ii_inv * (Sigma_i_col * Sigma_i_col.T)
+
+                Sigma = np.delete(
+                    np.delete(Sigma, local_ind, axis=0), local_ind, axis=1)
+
+                mu = mu - (mu[local_ind] * Sigma_ii_inv) * Sigma_i_col[:, None]
+                mu = np.delete(mu, local_ind, axis=0)
+
+                tmp1 = beta * np.dot(Sigma_i_col, PsiTPsi[active_indices])
+                S = S + Sigma_ii_inv * np.square(tmp1)
+                Q = Q + (mu_i * Sigma_ii_inv) * tmp1
+
+                num_active -= 1
+                ind_global_to_local[ind_L_max] = 0.0
+                v = ind_global_to_local[ind_global_to_local > local_ind] - 1
+                ind_global_to_local[ind_global_to_local > local_ind] = v
+                del active_indices[local_ind]
+                deleted_indices.append(ind_L_max)
+                # and therefore ineligible
+                bcs_path.append(-ind_L_max)
+
+            # same for all three cases
+            tmp3 = 1 - np.multiply(gamma, S)
+            s = np.divide(S, tmp3)
+            q = np.divide(Q, tmp3)
+
+            # Update lambda
+            Lambda = 2*(num_active - 1) / np.sum(gamma)
+
+        # Prepare the result object
+        self.coef_ = np.zeros(P)
+        self.coef_[active_indices] = np.squeeze(mu)
+        self.sigma_ = Sigma
+        self.active_ = active_indices
+        self.gamma = gamma
+        self.Lambda = Lambda
+        self.beta = beta
+        self.bcs_path = bcs_path
+
+        # set intercept_
+        if self.fit_intercept:
+            self.coef_ = self.coef_ / X_std
+            self.intercept_ = y_mean - np.dot(X_mean, self.coef_.T)
+        else:
+            self.intercept_ = 0.
+
+        return self
+
+    def predict(self, X, return_std=False):
+        '''
+        Computes predictive distribution for test set.
+        Predictive distribution for each data point is one dimensional
+        Gaussian and therefore is characterised by mean and variance based on
+        Ref.[1] Section 3.3.2.
+
+        Parameters
+        -----------
+        X: {array-like, sparse} (n_samples_test, n_features)
+           Test data, matrix of explanatory variables
+
+        Returns
+        -------
+        : list of length two [y_hat, var_hat]
+
+             y_hat: numpy array of size (n_samples_test,)
+                    Estimated values of targets on test set (i.e. mean of
+                    predictive distribution)
+
+                var_hat: numpy array of size (n_samples_test,)
+                    Variance of predictive distribution
+
+        References
+        ----------
+        [1] Bishop, C. M. (2006). Pattern recognition and machine learning.
+        springer.
+        '''
+        y_hat = np.dot(X, self.coef_) + self.intercept_
+
+        if return_std:
+            # Handle the zero variance case
+            if self.var_y:
+                return y_hat, np.zeros_like(y_hat)
+
+            var_hat = 1./self.beta
+            var_hat += np.sum(X.dot(self.sigma_) * X, axis=1)
+            std_hat = np.sqrt(var_hat)
+            return y_hat, std_hat
+        else:
+            return y_hat
+
+# l2norm = 0.0
+# for idx in range(10):
+#     sigma2 = np.genfromtxt('./test/sigma2_{0}.csv'.format(idx+1), delimiter=',')
+#     Psi_train = np.genfromtxt('./test/Psi_train_{0}.csv'.format(idx+1), delimiter=',')
+#     Y_train = np.genfromtxt('./test/Y_train_{0}.csv'.format(idx+1))
+#     Psi_test = np.genfromtxt('./test/Psi_test_{0}.csv'.format(idx+1), delimiter=',')
+#     Y_test = np.genfromtxt('./test/Y_test_{0}.csv'.format(idx+1))
+
+#     clf = RegressionFastLaplace(verbose=True)
+#     clf.fit_(Psi_train, Y_train, sigma2)
+#     coeffs_fold = np.genfromtxt('./test/coeffs_fold_{0}.csv'.format(idx+1))
+#     print("coeffs error: {0:.4g}".format(np.linalg.norm(clf.coef_ - coeffs_fold)))
+#     l2norm += np.linalg.norm(Y_test - clf.predict(Psi_test))**2/len(Y_test)
+#     print("l2norm error: {0:.4g}".format(l2norm))
diff --git a/build/lib/bayesvalidrox/surrogate_models/surrogate_models.py b/build/lib/bayesvalidrox/surrogate_models/surrogate_models.py
new file mode 100644
index 000000000..ca902f26b
--- /dev/null
+++ b/build/lib/bayesvalidrox/surrogate_models/surrogate_models.py
@@ -0,0 +1,1576 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Implementation of metamodel as either PC, aPC or GPE
+"""
+
+import warnings
+import numpy as np
+import math
+import h5py
+import matplotlib.pyplot as plt
+from sklearn.preprocessing import MinMaxScaler
+import scipy as sp
+from scipy.optimize import minimize, NonlinearConstraint, LinearConstraint
+from tqdm import tqdm
+from sklearn.decomposition import PCA as sklearnPCA
+import sklearn.linear_model as lm
+from sklearn.gaussian_process import GaussianProcessRegressor
+import sklearn.gaussian_process.kernels as kernels
+import os
+from joblib import Parallel, delayed
+import copy
+
+from .input_space import InputSpace
+from .glexindex import glexindex
+from .eval_rec_rule import eval_univ_basis
+from .reg_fast_ard import RegressionFastARD
+from .reg_fast_laplace import RegressionFastLaplace
+from .orthogonal_matching_pursuit import OrthogonalMatchingPursuit
+from .bayes_linear import VBLinearRegression, EBLinearRegression
+from .apoly_construction import apoly_construction
+warnings.filterwarnings("ignore")
+# Load the mplstyle
+plt.style.use(os.path.join(os.path.split(__file__)[0],
+                           '../', 'bayesvalidrox.mplstyle'))
+
+
+class MetaModel():
+    """
+    Meta (surrogate) model
+
+    This class trains a surrogate model. It accepts an input object (input_obj)
+    containing the specification of the distributions for uncertain parameters
+    and a model object with instructions on how to run the computational model.
+
+    Attributes
+    ----------
+    input_obj : obj
+        Input object with the information on the model input parameters.
+    meta_model_type : str
+        Surrogate model types. Three surrogate model types are supported:
+        polynomial chaos expansion (`PCE`), arbitrary PCE (`aPCE`) and
+        Gaussian process regression (`GPE`). Default is PCE.
+    pce_reg_method : str
+        PCE regression method to compute the coefficients. The following
+        regression methods are available:
+
+        1. OLS: Ordinary Least Square method
+        2. BRR: Bayesian Ridge Regression
+        3. LARS: Least angle regression
+        4. ARD: Bayesian ARD Regression
+        5. FastARD: Fast Bayesian ARD Regression
+        6. VBL: Variational Bayesian Learning
+        7. EBL: Emperical Bayesian Learning
+        Default is `OLS`.
+    bootstrap_method : str
+        Bootstraping method. Options are `'normal'` and `'fast'`. The default
+        is `'fast'`. It means that in each iteration except the first one, only
+        the coefficent are recalculated with the ordinary least square method.
+    n_bootstrap_itrs : int
+        Number of iterations for the bootstrap sampling. The default is `1`.
+    pce_deg : int or list of int
+        Polynomial degree(s). If a list is given, an adaptive algorithm is used
+        to find the best degree with the lowest Leave-One-Out cross-validation
+        (LOO) error (or the highest score=1-LOO). Default is `1`.
+    pce_q_norm : float
+        Hyperbolic (or q-norm) truncation for multi-indices of multivariate
+        polynomials. Default is `1.0`.
+    dim_red_method : str
+        Dimensionality reduction method for the output space. The available
+        method is based on principal component analysis (PCA). The Default is
+        `'no'`. There are two ways to select number of components: use
+        percentage of the explainable variance threshold (between 0 and 100)
+        (Option A) or direct prescription of components' number (Option B):
+
+            >>> MetaModelOpts.dim_red_method = 'PCA'
+            >>> MetaModelOpts.var_pca_threshold = 99.999  # Option A
+            >>> MetaModelOpts.n_pca_components = 12 # Option B
+    apply_constraints : bool
+        If set to true constraints will be applied during training. 
+        In this case the training uses OLS. In this version the constraints 
+        need to be set explicitly in this class.
+
+    verbose : bool
+        Prints summary of the regression results. Default is `False`.
+
+    Note
+    -------
+    To define the sampling methods and the training set, an experimental design
+    instance shall be defined. This can be done by:
+
+    >>> MetaModelOpts.add_InputSpace()
+
+    Two experimental design schemes are supported: one-shot (`normal`) and
+    adaptive sequential (`sequential`) designs.
+    For experimental design refer to `InputSpace`.
+
+    """
+
+    def __init__(self, input_obj, meta_model_type='PCE',
+                 pce_reg_method='OLS', bootstrap_method='fast',
+                 n_bootstrap_itrs=1, pce_deg=1, pce_q_norm=1.0,
+                 dim_red_method='no', apply_constraints = False, 
+                 verbose=False):
+
+        self.input_obj = input_obj
+        self.meta_model_type = meta_model_type
+        self.pce_reg_method = pce_reg_method
+        self.bootstrap_method = bootstrap_method
+        self.n_bootstrap_itrs = n_bootstrap_itrs
+        self.pce_deg = pce_deg
+        self.pce_q_norm = pce_q_norm
+        self.dim_red_method = dim_red_method
+        self.apply_constraints = apply_constraints
+        self.verbose = verbose
+ 
+    def build_metamodel(self, n_init_samples = None) -> None:
+        """
+        Builds the parts for the metamodel (polynomes,...) that are neede before fitting.
+
+        Returns
+        -------
+        None
+            DESCRIPTION.
+
+        """
+        
+        # Generate general warnings
+        if self.apply_constraints or self.pce_reg_method.lower() == 'ols':
+            print('There are no estimations of surrogate uncertainty available'
+                  ' for the chosen regression options. This might lead to issues'
+                  ' in later steps.')
+        
+        # Add InputSpace to MetaModel if it does not have any
+        if not hasattr(self, 'InputSpace'):
+            self.InputSpace = InputSpace(self.input_obj)
+            self.InputSpace.n_init_samples = n_init_samples
+            self.InputSpace.init_param_space(np.max(self.pce_deg))
+            
+        self.ndim = self.InputSpace.ndim
+        
+        if not hasattr(self, 'CollocationPoints'):
+            raise AttributeError('Please provide samples to the metamodel before building it.')
+            
+        # Transform input samples
+        # TODO: this is probably not yet correct! Make 'method' variable
+        self.CollocationPoints = self.InputSpace.transform(self.CollocationPoints, method='user') 
+
+        
+        self.n_params = len(self.input_obj.Marginals)
+        
+        # Generate polynomials
+        if self.meta_model_type.lower() != 'gpe':
+            self.generate_polynomials(np.max(self.pce_deg))
+
+        # Initialize the nested dictionaries
+        if self.meta_model_type.lower() == 'gpe':
+            self.gp_poly = self.auto_vivification()
+            self.x_scaler = self.auto_vivification()
+            self.LCerror = self.auto_vivification()
+        else:
+            self.deg_dict = self.auto_vivification()
+            self.q_norm_dict = self.auto_vivification()
+            self.coeffs_dict = self.auto_vivification()
+            self.basis_dict = self.auto_vivification()
+            self.score_dict = self.auto_vivification()
+            self.clf_poly = self.auto_vivification()
+            self.LCerror = self.auto_vivification()
+        if self.dim_red_method.lower() == 'pca':
+            self.pca = self.auto_vivification()
+
+        # Define an array containing the degrees
+        self.CollocationPoints = np.array(self.CollocationPoints)
+        self.n_samples, ndim = self.CollocationPoints.shape
+        if self.ndim != ndim:
+            raise AttributeError('The given samples do not match the given number of priors. The samples should be a 2D array of size (#samples, #priors)')
+            
+        self.deg_array = self.__select_degree(ndim, self.n_samples)
+
+        # Generate all basis indices
+        self.allBasisIndices = self.auto_vivification()
+        for deg in self.deg_array:
+            keys = self.allBasisIndices.keys()
+            if deg not in np.fromiter(keys, dtype=float):
+                # Generate the polynomial basis indices
+                for qidx, q in enumerate(self.pce_q_norm):
+                    basis_indices = glexindex(start=0, stop=deg+1,
+                                              dimensions=self.n_params,
+                                              cross_truncation=q,
+                                              reverse=False, graded=True)
+                    self.allBasisIndices[str(deg)][str(q)] = basis_indices
+
+        
+        
+    def fit(self, X, y, parallel = True, verbose = False):
+        """
+        Fits the surrogate to the given data (samples X, outputs y).
+        Note here that the samples X should be the transformed samples provided
+        by the experimental design if the transformation is used there.
+
+        Parameters
+        ----------
+        X : 2D list or np.array of shape (#samples, #dim)
+            The parameter value combinations that the model was evaluated at.
+        y : dict of 2D lists or arrays of shape (#samples, #timesteps)
+            The respective model evaluations.
+
+        Returns
+        -------
+        None.
+
+        """
+        X = np.array(X)
+        for key in y.keys():
+            y_val = np.array(y[key])
+            if y_val.ndim !=2:
+                raise ValueError('The given outputs y should be 2D')
+            y[key] = np.array(y[key])
+        
+        # Output names are the same as the keys in y
+        self.out_names = list(y.keys())
+        
+        # Build the MetaModel on the static samples
+        self.CollocationPoints = X
+        
+        # TODO: other option: rebuild every time
+        if not hasattr(self, 'deg_array'):
+            self.build_metamodel(n_init_samples = X.shape[1])
+            
+        # Evaluate the univariate polynomials on InputSpace
+        if self.meta_model_type.lower() != 'gpe':
+           self.univ_p_val = self.univ_basis_vals(self.CollocationPoints)
+        
+        # --- Loop through data points and fit the surrogate ---
+        if verbose:
+            print(f"\n>>>> Training the {self.meta_model_type} metamodel "
+                  "started. <<<<<<\n")
+
+        # --- Bootstrap sampling ---
+        # Correct number of bootstrap if PCA transformation is required.
+        if self.dim_red_method.lower() == 'pca' and self.n_bootstrap_itrs == 1:
+            self.n_bootstrap_itrs = 100
+
+        # Check if fast version (update coeffs with OLS) is selected.
+        if self.bootstrap_method.lower() == 'fast':
+            fast_bootstrap = True
+            first_out = {}
+            n_comp_dict = {}
+        else:
+            fast_bootstrap = False
+
+        # Prepare tqdm iteration maessage
+        if verbose and self.n_bootstrap_itrs > 1:
+            enum_obj = tqdm(range(self.n_bootstrap_itrs),
+                            total=self.n_bootstrap_itrs,
+                            desc="Bootstrapping the metamodel",
+                            ascii=True)
+        else:
+            enum_obj = range(self.n_bootstrap_itrs)
+
+        # Loop over the bootstrap iterations
+        for b_i in enum_obj:
+            if b_i > 0:
+                b_indices = np.random.randint(self.n_samples, size=self.n_samples)
+            else:
+                b_indices = np.arange(len(X))
+
+            X_train_b = X[b_indices]
+
+            if verbose and self.n_bootstrap_itrs == 1:
+                items = tqdm(y.items(), desc="Fitting regression")
+            else:
+                items = y.items()
+
+            # For loop over the components/outputs
+            for key, Output in items:
+
+                # Dimensionality reduction with PCA, if specified
+                if self.dim_red_method.lower() == 'pca':
+
+                    # Use the stored n_comp for fast bootsrtrapping
+                    if fast_bootstrap and b_i > 0:
+                        self.n_pca_components = n_comp_dict[key]
+
+                    # Start transformation
+                    pca, target, n_comp = self.pca_transformation(
+                        Output[b_indices], verbose=False
+                        )
+                    self.pca[f'b_{b_i+1}'][key] = pca
+                    # Store the number of components for fast bootsrtrapping
+                    if fast_bootstrap and b_i == 0:
+                        n_comp_dict[key] = n_comp
+                else:
+                    target = Output[b_indices]
+
+                # Parallel fit regression
+                if self.meta_model_type.lower() == 'gpe':
+                    # Prepare the input matrix
+                    scaler = MinMaxScaler()
+                    X_S = scaler.fit_transform(X_train_b)
+
+                    self.x_scaler[f'b_{b_i+1}'][key] = scaler
+                    if parallel:
+                        out = Parallel(n_jobs=-1, backend='multiprocessing')(
+                            delayed(self.gaussian_process_emulator)(
+                                X_S, target[:, idx]) for idx in
+                            range(target.shape[1]))
+                    else:
+                        results = map(self.gaussian_process_emulator,
+                                      [X_train_b]*target.shape[1],
+                                      [target[:, idx] for idx in
+                                       range(target.shape[1])]
+                                      )
+                        out = list(results)
+
+                    for idx in range(target.shape[1]):
+                        self.gp_poly[f'b_{b_i+1}'][key][f"y_{idx+1}"] = out[idx]
+
+                else:
+                    self.univ_p_val = self.univ_p_val[b_indices]
+                    if parallel and (not fast_bootstrap or b_i == 0):
+                        out = Parallel(n_jobs=-1, backend='multiprocessing')(
+                            delayed(self.adaptive_regression)(X_train_b,
+                                                              target[:, idx],
+                                                              idx)
+                            for idx in range(target.shape[1]))
+                    elif not parallel and (not fast_bootstrap or b_i == 0):
+                        results = map(self.adaptive_regression,
+                                      [X_train_b]*target.shape[1],
+                                      [target[:, idx] for idx in
+                                       range(target.shape[1])],
+                                      range(target.shape[1]))
+                        out = list(results)
+
+                    # Store the first out dictionary
+                    if fast_bootstrap and b_i == 0:
+                        first_out[key] = copy.deepcopy(out)
+
+                    if b_i > 0 and fast_bootstrap:
+
+                        # fast bootstrap
+                        out = self.update_pce_coeffs(
+                            X_train_b, target, first_out[key])
+
+                    for i in range(target.shape[1]):
+                        # Create a dict to pass the variables
+                        self.deg_dict[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['degree']
+                        self.q_norm_dict[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['qnorm']
+                        self.coeffs_dict[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['coeffs']
+                        self.basis_dict[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['multi_indices']
+                        self.score_dict[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['LOOCVScore']
+                        self.clf_poly[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['clf_poly']
+                        #self.LCerror[f'b_{b_i+1}'][key][f"y_{i+1}"] = out[i]['LCerror']
+
+        if verbose:
+            print(f"\n>>>> Training the {self.meta_model_type} metamodel"
+                  " sucessfully completed. <<<<<<\n")
+
+    # -------------------------------------------------------------------------
+    def update_pce_coeffs(self, X, y, out_dict = None):
+        """
+        Updates the PCE coefficents using only the ordinary least square method
+        for the fast version of the bootstrapping.
+
+        Parameters
+        ----------
+        X : array of shape (n_samples, n_params)
+            Training set.
+        y : array of shape (n_samples, n_outs)
+            The (transformed) model responses.
+        out_dict : dict
+            The training output dictionary of the first iteration, i.e.
+            the surrogate model for the original experimental design.
+
+        Returns
+        -------
+        final_out_dict : dict
+            The updated training output dictionary.
+
+        """
+        # Make a copy
+        final_out_dict = copy.deepcopy(out_dict)
+
+        # Loop over the points
+        for i in range(y.shape[1]):
+
+                    
+            # Extract nonzero basis indices
+            nnz_idx = np.nonzero(out_dict[i]['coeffs'])[0]
+            if len(nnz_idx) != 0:
+                basis_indices = out_dict[i]['multi_indices']
+
+                # Evaluate the multivariate polynomials on CollocationPoints
+                psi = self.create_psi(basis_indices, self.univ_p_val)
+
+                # Calulate the cofficients of surrogate model
+                updated_out = self.regression(
+                    psi, y[:, i], basis_indices, reg_method='OLS',
+                    sparsity=False
+                    )
+
+                # Update coeffs in out_dict
+                final_out_dict[i]['coeffs'][nnz_idx] = updated_out['coeffs']
+
+        return final_out_dict
+
+    # -------------------------------------------------------------------------
+    def add_InputSpace(self):
+        """
+        Instanciates experimental design object.
+
+        Returns
+        -------
+        None.
+
+        """
+        self.InputSpace = InputSpace(self.input_obj,
+                                    meta_Model_type=self.meta_model_type)
+
+    # -------------------------------------------------------------------------
+    def univ_basis_vals(self, samples, n_max=None):
+        """
+        Evaluates univariate regressors along input directions.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params)
+            Samples.
+        n_max : int, optional
+            Maximum polynomial degree. The default is `None`.
+
+        Returns
+        -------
+        univ_basis: array of shape (n_samples, n_params, n_max+1)
+            All univariate regressors up to n_max.
+        """
+        # Extract information
+        poly_types = self.InputSpace.poly_types
+        if samples.ndim != 2:
+            samples = samples.reshape(1, len(samples))
+        n_max = np.max(self.pce_deg) if n_max is None else n_max
+
+        # Extract poly coeffs
+        if self.InputSpace.input_data_given or self.InputSpace.apce:
+            apolycoeffs = self.polycoeffs
+        else:
+            apolycoeffs = None
+
+        # Evaluate univariate basis
+        univ_basis = eval_univ_basis(samples, n_max, poly_types, apolycoeffs)
+
+        return univ_basis
+
+    # -------------------------------------------------------------------------
+    def create_psi(self, basis_indices, univ_p_val):
+        """
+        This function assemble the design matrix Psi from the given basis index
+        set INDICES and the univariate polynomial evaluations univ_p_val.
+
+        Parameters
+        ----------
+        basis_indices : array of shape (n_terms, n_params)
+            Multi-indices of multivariate polynomials.
+        univ_p_val : array of (n_samples, n_params, n_max+1)
+            All univariate regressors up to `n_max`.
+
+        Raises
+        ------
+        ValueError
+            n_terms in arguments do not match.
+
+        Returns
+        -------
+        psi : array of shape (n_samples, n_terms)
+            Multivariate regressors.
+
+        """
+        # Check if BasisIndices is a sparse matrix
+        sparsity = sp.sparse.issparse(basis_indices)
+        if sparsity:
+            basis_indices = basis_indices.toarray()
+
+        # Initialization and consistency checks
+        # number of input variables
+        n_params = univ_p_val.shape[1]
+
+        # Size of the experimental design
+        n_samples = univ_p_val.shape[0]
+
+        # number of basis terms
+        n_terms = basis_indices.shape[0]
+
+        # check that the variables have consistent sizes
+        if n_params != basis_indices.shape[1]:
+            raise ValueError(
+                f"The shapes of basis_indices ({basis_indices.shape[1]}) and "
+                f"univ_p_val ({n_params}) don't match!!"
+                )
+
+        # Preallocate the Psi matrix for performance
+        psi = np.ones((n_samples, n_terms))
+        # Assemble the Psi matrix
+        for m in range(basis_indices.shape[1]):
+            aa = np.where(basis_indices[:, m] > 0)[0]
+            try:
+                basisIdx = basis_indices[aa, m]
+                bb = univ_p_val[:, m, basisIdx].reshape(psi[:, aa].shape)
+                psi[:, aa] = np.multiply(psi[:, aa], bb)
+            except ValueError as err:
+                raise err
+        return psi
+
+    # -------------------------------------------------------------------------
+    def regression(self, X, y, basis_indices, reg_method=None, sparsity=True):
+        """
+        Fit regression using the regression method provided.
+
+        Parameters
+        ----------
+        X : array of shape (n_samples, n_features)
+            Training vector, where n_samples is the number of samples and
+            n_features is the number of features.
+        y : array of shape (n_samples,)
+            Target values.
+        basis_indices : array of shape (n_terms, n_params)
+            Multi-indices of multivariate polynomials.
+        reg_method : str, optional
+            DESCRIPTION. The default is None.
+
+        Returns
+        -------
+        return_out_dict : Dict
+            Fitted estimator, spareMulti-Index, sparseX and coefficients.
+
+        """
+        if reg_method is None:
+            reg_method = self.pce_reg_method
+
+        bias_term = self.dim_red_method.lower() != 'pca'
+
+        compute_score = True if self.verbose else False
+
+        #  inverse of the observed variance of the data
+        if np.var(y) != 0:
+            Lambda = 1 / np.var(y)
+        else:
+            Lambda = 1e-6
+
+        # Bayes sparse adaptive aPCE
+        if reg_method.lower() == 'ols':
+            clf_poly = lm.LinearRegression(fit_intercept=False)
+        elif reg_method.lower() == 'brr':
+            clf_poly = lm.BayesianRidge(n_iter=1000, tol=1e-7,
+                                        fit_intercept=False,
+                                        #normalize=True,
+                                        compute_score=compute_score,
+                                        alpha_1=1e-04, alpha_2=1e-04,
+                                        lambda_1=Lambda, lambda_2=Lambda)
+            clf_poly.converged = True
+
+        elif reg_method.lower() == 'ard':
+            if X.shape[0]<2:
+                raise ValueError('Regression with ARD can only be performed for more than 2 samples')
+            clf_poly = lm.ARDRegression(fit_intercept=False,
+                                        #normalize=True,
+                                        compute_score=compute_score,
+                                        n_iter=1000, tol=0.0001,
+                                        alpha_1=1e-3, alpha_2=1e-3,
+                                        lambda_1=Lambda, lambda_2=Lambda)
+
+        elif reg_method.lower() == 'fastard':
+            clf_poly = RegressionFastARD(fit_intercept=False,
+                                         normalize=True,
+                                         compute_score=compute_score,
+                                         n_iter=300, tol=1e-10)
+
+        elif reg_method.lower() == 'bcs':
+            if X.shape[0]<10:
+                raise ValueError('Regression with BCS can only be performed for more than 10 samples')
+            clf_poly = RegressionFastLaplace(fit_intercept=False,
+                                         bias_term=bias_term,
+                                         n_iter=1000, tol=1e-7)
+
+        elif reg_method.lower() == 'lars':
+            if X.shape[0]<10:
+                raise ValueError('Regression with LARS can only be performed for more than 5 samples')
+            clf_poly = lm.LassoLarsCV(fit_intercept=False)
+
+        elif reg_method.lower() == 'sgdr':
+            clf_poly = lm.SGDRegressor(fit_intercept=False,
+                                       max_iter=5000, tol=1e-7)
+
+        elif reg_method.lower() == 'omp':
+            clf_poly = OrthogonalMatchingPursuit(fit_intercept=False)
+
+        elif reg_method.lower() == 'vbl':
+            clf_poly = VBLinearRegression(fit_intercept=False)
+
+        elif reg_method.lower() == 'ebl':
+            clf_poly = EBLinearRegression(optimizer='em')
+            
+        
+        # Training with constraints automatically uses L2
+        if self.apply_constraints:       
+            # TODO: set the constraints here
+            # Define the nonlin. constraint     
+            nlc = NonlinearConstraint(lambda x: np.matmul(X,x),-1,1.1)
+            self.nlc = nlc
+            
+            fun = lambda x: (np.linalg.norm(np.matmul(X, x)-y, ord = 2))**2
+            if self.init_type =='zeros':
+                res = minimize(fun, np.zeros(X.shape[1]), method = 'trust-constr', constraints  = self.nlc) 
+            if self.init_type == 'nonpi':
+                clf_poly.fit(X, y)
+                coeff = clf_poly.coef_
+                res = minimize(fun, coeff, method = 'trust-constr', constraints  = self.nlc)
+            
+            coeff = np.array(res.x)
+            clf_poly.coef_ = coeff
+            clf_poly.X = X
+            clf_poly.y = y
+            clf_poly.intercept_ = 0
+            
+        # Training without constraints uses chosen regression method
+        else:
+            clf_poly.fit(X, y)
+
+        # Select the nonzero entries of coefficients
+        if sparsity:
+            nnz_idx = np.nonzero(clf_poly.coef_)[0]
+        else:
+            nnz_idx = np.arange(clf_poly.coef_.shape[0])
+
+        # This is for the case where all outputs are zero, thereby
+        # all coefficients are zero
+        if (y == 0).all():
+            nnz_idx = np.insert(np.nonzero(clf_poly.coef_)[0], 0, 0)
+
+        sparse_basis_indices = basis_indices[nnz_idx]
+        sparse_X = X[:, nnz_idx]
+        coeffs = clf_poly.coef_[nnz_idx]
+        clf_poly.coef_ = coeffs
+
+        # Create a dict to pass the outputs
+        return_out_dict = dict()
+        return_out_dict['clf_poly'] = clf_poly
+        return_out_dict['spareMulti-Index'] = sparse_basis_indices
+        return_out_dict['sparePsi'] = sparse_X
+        return_out_dict['coeffs'] = coeffs
+        return return_out_dict
+    
+    # -------------------------------------------------------------------------
+    def create_psi(self, basis_indices, univ_p_val):
+        """
+        This function assemble the design matrix Psi from the given basis index
+        set INDICES and the univariate polynomial evaluations univ_p_val.
+
+        Parameters
+        ----------
+        basis_indices : array of shape (n_terms, n_params)
+            Multi-indices of multivariate polynomials.
+        univ_p_val : array of (n_samples, n_params, n_max+1)
+            All univariate regressors up to `n_max`.
+
+        Raises
+        ------
+        ValueError
+            n_terms in arguments do not match.
+
+        Returns
+        -------
+        psi : array of shape (n_samples, n_terms)
+            Multivariate regressors.
+
+        """
+        # Check if BasisIndices is a sparse matrix
+        sparsity = sp.sparse.issparse(basis_indices)
+        if sparsity:
+            basis_indices = basis_indices.toarray()
+
+        # Initialization and consistency checks
+        # number of input variables
+        n_params = univ_p_val.shape[1]
+
+        # Size of the experimental design
+        n_samples = univ_p_val.shape[0]
+
+        # number of basis terms
+        n_terms = basis_indices.shape[0]
+
+        # check that the variables have consistent sizes
+        if n_params != basis_indices.shape[1]:
+            raise ValueError(
+                f"The shapes of basis_indices ({basis_indices.shape[1]}) and "
+                f"univ_p_val ({n_params}) don't match!!"
+                )
+
+        # Preallocate the Psi matrix for performance
+        psi = np.ones((n_samples, n_terms))
+        # Assemble the Psi matrix
+        for m in range(basis_indices.shape[1]):
+            aa = np.where(basis_indices[:, m] > 0)[0]
+            try:
+                basisIdx = basis_indices[aa, m]
+                bb = univ_p_val[:, m, basisIdx].reshape(psi[:, aa].shape)
+                psi[:, aa] = np.multiply(psi[:, aa], bb)
+            except ValueError as err:
+                raise err
+        return psi
+
+    # --------------------------------------------------------------------------------------------------------
+    def adaptive_regression(self, ED_X, ED_Y, varIdx, verbose=False):
+        """
+        Adaptively fits the PCE model by comparing the scores of different
+        degrees and q-norm.
+
+        Parameters
+        ----------
+        ED_X : array of shape (n_samples, n_params)
+            Experimental design.
+        ED_Y : array of shape (n_samples,)
+            Target values, i.e. simulation results for the Experimental design.
+        varIdx : int
+            Index of the output.
+        verbose : bool, optional
+            Print out summary. The default is False.
+
+        Returns
+        -------
+        returnVars : Dict
+            Fitted estimator, best degree, best q-norm, LOOCVScore and
+            coefficients.
+
+        """
+
+        n_samples, n_params = ED_X.shape
+        # Initialization
+        qAllCoeffs, AllCoeffs = {}, {}
+        qAllIndices_Sparse, AllIndices_Sparse = {}, {}
+        qAllclf_poly, Allclf_poly = {}, {}
+        qAllnTerms, AllnTerms = {}, {}
+        qAllLCerror, AllLCerror = {}, {}
+
+        # Extract degree array and qnorm array
+        deg_array = np.array([*self.allBasisIndices], dtype=int)
+        qnorm = [*self.allBasisIndices[str(int(deg_array[0]))]]
+
+        # Some options for EarlyStop
+        errorIncreases = False
+        # Stop degree, if LOO error does not decrease n_checks_degree times
+        n_checks_degree = 3
+        # Stop qNorm, if criterion isn't fulfilled n_checks_qNorm times
+        n_checks_qNorm = 2
+        nqnorms = len(qnorm)
+        qNormEarlyStop = True
+        if nqnorms < n_checks_qNorm+1:
+            qNormEarlyStop = False
+
+        # =====================================================================
+        # basis adaptive polynomial chaos: repeat the calculation by increasing
+        # polynomial degree until the highest accuracy is reached
+        # =====================================================================
+        # For each degree check all q-norms and choose the best one
+        scores = -np.inf * np.ones(deg_array.shape[0])
+        qNormScores = -np.inf * np.ones(nqnorms)
+
+        for degIdx, deg in enumerate(deg_array):
+
+            for qidx, q in enumerate(qnorm):
+
+                # Extract the polynomial basis indices from the pool of
+                # allBasisIndices
+                BasisIndices = self.allBasisIndices[str(deg)][str(q)]
+
+                # Assemble the Psi matrix
+                Psi = self.create_psi(BasisIndices, self.univ_p_val)
+
+                # Calulate the cofficients of the meta model
+                outs = self.regression(Psi, ED_Y, BasisIndices)
+
+                # Calculate and save the score of LOOCV
+                score, LCerror = self.corr_loocv_error(outs['clf_poly'],
+                                                       outs['sparePsi'],
+                                                       outs['coeffs'],
+                                                       ED_Y)
+
+                # Check the convergence of noise for FastARD
+                if self.pce_reg_method == 'FastARD' and \
+                   outs['clf_poly'].alpha_ < np.finfo(np.float32).eps:
+                    score = -np.inf
+
+                qNormScores[qidx] = score
+                qAllCoeffs[str(qidx+1)] = outs['coeffs']
+                qAllIndices_Sparse[str(qidx+1)] = outs['spareMulti-Index']
+                qAllclf_poly[str(qidx+1)] = outs['clf_poly']
+                qAllnTerms[str(qidx+1)] = BasisIndices.shape[0]
+                qAllLCerror[str(qidx+1)] = LCerror
+
+                # EarlyStop check
+                # if there are at least n_checks_qNorm entries after the
+                # best one, we stop
+                if qNormEarlyStop and \
+                   sum(np.isfinite(qNormScores)) > n_checks_qNorm:
+                    # If the error has increased the last two iterations, stop!
+                    qNormScores_nonInf = qNormScores[np.isfinite(qNormScores)]
+                    deltas = np.sign(np.diff(qNormScores_nonInf))
+                    if sum(deltas[-n_checks_qNorm+1:]) == 2:
+                        # stop the q-norm loop here
+                        break
+                if np.var(ED_Y) == 0:
+                    break
+
+            # Store the score in the scores list
+            best_q = np.nanargmax(qNormScores)
+            scores[degIdx] = qNormScores[best_q]
+
+            AllCoeffs[str(degIdx+1)] = qAllCoeffs[str(best_q+1)]
+            AllIndices_Sparse[str(degIdx+1)] = qAllIndices_Sparse[str(best_q+1)]
+            Allclf_poly[str(degIdx+1)] = qAllclf_poly[str(best_q+1)]
+            AllnTerms[str(degIdx+1)] = qAllnTerms[str(best_q+1)]
+            AllLCerror[str(degIdx+1)] = qAllLCerror[str(best_q+1)]
+
+            # Check the direction of the error (on average):
+            # if it increases consistently stop the iterations
+            if len(scores[scores != -np.inf]) > n_checks_degree:
+                scores_nonInf = scores[scores != -np.inf]
+                ss = np.sign(scores_nonInf - np.max(scores_nonInf))
+                # ss<0 error decreasing
+                errorIncreases = np.sum(np.sum(ss[-2:])) <= -1*n_checks_degree
+
+            if errorIncreases:
+                break
+
+            # Check only one degree, if target matrix has zero variance
+            if np.var(ED_Y) == 0:
+                break
+
+        # ------------------ Summary of results ------------------
+        # Select the one with the best score and save the necessary outputs
+        best_deg = np.nanargmax(scores)+1
+        coeffs = AllCoeffs[str(best_deg)]
+        basis_indices = AllIndices_Sparse[str(best_deg)]
+        clf_poly = Allclf_poly[str(best_deg)]
+        LOOCVScore = np.nanmax(scores)
+        P = AllnTerms[str(best_deg)]
+        LCerror = AllLCerror[str(best_deg)]
+        degree = deg_array[np.nanargmax(scores)]
+        qnorm = float(qnorm[best_q])
+
+        # ------------------ Print out Summary of results ------------------
+        if self.verbose:
+            # Create PSI_Sparse by removing redundent terms
+            nnz_idx = np.nonzero(coeffs)[0]
+            BasisIndices_Sparse = basis_indices[nnz_idx]
+
+            print(f'Output variable {varIdx+1}:')
+            print('The estimation of PCE coefficients converged at polynomial '
+                  f'degree {deg_array[best_deg-1]} with '
+                  f'{len(BasisIndices_Sparse)} terms (Sparsity index = '
+                  f'{round(len(BasisIndices_Sparse)/P, 3)}).')
+
+            print(f'Final ModLOO error estimate: {1-max(scores):.3e}')
+            print('\n'+'-'*50)
+
+        if verbose:
+            print('='*50)
+            print(' '*10 + ' Summary of results ')
+            print('='*50)
+
+            print("Scores:\n", scores)
+            print("Degree of best score:", self.deg_array[best_deg-1])
+            print("No. of terms:", len(basis_indices))
+            print("Sparsity index:", round(len(basis_indices)/P, 3))
+            print("Best Indices:\n", basis_indices)
+
+            if self.pce_reg_method in ['BRR', 'ARD']:
+                fig, ax = plt.subplots(figsize=(12, 10))
+                plt.title("Marginal log-likelihood")
+                plt.plot(clf_poly.scores_, color='navy', linewidth=2)
+                plt.ylabel("Score")
+                plt.xlabel("Iterations")
+                if self.pce_reg_method.lower() == 'bbr':
+                    text = f"$\\alpha={clf_poly.alpha_:.1f}$\n"
+                    f"$\\lambda={clf_poly.lambda_:.3f}$\n"
+                    f"$L={clf_poly.scores_[-1]:.1f}$"
+                else:
+                    text = f"$\\alpha={clf_poly.alpha_:.1f}$\n$"
+                    f"\\L={clf_poly.scores_[-1]:.1f}$"
+
+                plt.text(0.75, 0.5, text, fontsize=18, transform=ax.transAxes)
+                plt.show()
+            print('='*80)
+
+        # Create a dict to pass the outputs
+        returnVars = dict()
+        returnVars['clf_poly'] = clf_poly
+        returnVars['degree'] = degree
+        returnVars['qnorm'] = qnorm
+        returnVars['coeffs'] = coeffs
+        returnVars['multi_indices'] = basis_indices
+        returnVars['LOOCVScore'] = LOOCVScore
+        returnVars['LCerror'] = LCerror
+
+        return returnVars
+
+    # -------------------------------------------------------------------------
+    def corr_loocv_error(self, clf, psi, coeffs, y):
+        """
+        Calculates the corrected LOO error for regression on regressor
+        matrix `psi` that generated the coefficients based on [1] and [2].
+
+        [1] Blatman, G., 2009. Adaptive sparse polynomial chaos expansions for
+            uncertainty propagation and sensitivity analysis (Doctoral
+            dissertation, Clermont-Ferrand 2).
+
+        [2] Blatman, G. and Sudret, B., 2011. Adaptive sparse polynomial chaos
+            expansion based on least angle regression. Journal of computational
+            Physics, 230(6), pp.2345-2367.
+
+        Parameters
+        ----------
+        clf : object
+            Fitted estimator.
+        psi : array of shape (n_samples, n_features)
+            The multivariate orthogonal polynomials (regressor).
+        coeffs : array-like of shape (n_features,)
+            Estimated cofficients.
+        y : array of shape (n_samples,)
+            Target values.
+
+        Returns
+        -------
+        R_2 : float
+            LOOCV Validation score (1-LOOCV erro).
+        residual : array of shape (n_samples,)
+            Residual values (y - predicted targets).
+
+        """
+        psi = np.array(psi, dtype=float)
+
+        # Create PSI_Sparse by removing redundent terms
+        nnz_idx = np.nonzero(coeffs)[0]
+        if len(nnz_idx) == 0:
+            nnz_idx = [0]
+        psi_sparse = psi[:, nnz_idx]
+
+        # NrCoeffs of aPCEs
+        P = len(nnz_idx)
+        # NrEvaluation (Size of experimental design)
+        N = psi.shape[0]
+
+        # Build the projection matrix
+        PsiTPsi = np.dot(psi_sparse.T, psi_sparse)
+
+        if np.linalg.cond(PsiTPsi) > 1e-12: #and \
+           # np.linalg.cond(PsiTPsi) < 1/sys.float_info.epsilon:
+            # faster
+            try:
+                M = sp.linalg.solve(PsiTPsi,
+                                sp.sparse.eye(PsiTPsi.shape[0]).toarray())
+            except:
+                raise AttributeError('There are too few samples for the corrected loo-cv error. Fit surrogate on at least as many samples as parameters to use this')
+        else:
+            # stabler
+            M = np.linalg.pinv(PsiTPsi)
+
+        # h factor (the full matrix is not calculated explicitly,
+        # only the trace is, to save memory)
+        PsiM = np.dot(psi_sparse, M)
+
+        h = np.sum(np.multiply(PsiM, psi_sparse), axis=1, dtype=np.longdouble)#float128)
+
+        # ------ Calculate Error Loocv for each measurement point ----
+        # Residuals
+        try:
+            residual = clf.predict(psi) - y
+        except:
+            residual = np.dot(psi, coeffs) - y
+
+        # Variance
+        var_y = np.var(y)
+
+        if var_y == 0:
+            norm_emp_error = 0
+            loo_error = 0
+            LCerror = np.zeros((y.shape))
+            return 1-loo_error, LCerror
+        else:
+            norm_emp_error = np.mean(residual**2)/var_y
+
+            # LCerror = np.divide(residual, (1-h))
+            LCerror = residual / (1-h)
+            loo_error = np.mean(np.square(LCerror)) / var_y
+            # if there are NaNs, just return an infinite LOO error (this
+            # happens, e.g., when a strongly underdetermined problem is solved)
+            if np.isnan(loo_error):
+                loo_error = np.inf
+
+        # Corrected Error for over-determined system
+        tr_M = np.trace(M)
+        if tr_M < 0 or abs(tr_M) > 1e6:
+            tr_M = np.trace(np.linalg.pinv(np.dot(psi.T, psi)))
+
+        # Over-determined system of Equation
+        if N > P:
+            T_factor = N/(N-P) * (1 + tr_M)
+
+        # Under-determined system of Equation
+        else:
+            T_factor = np.inf
+
+        corrected_loo_error = loo_error * T_factor
+
+        R_2 = 1 - corrected_loo_error
+
+        return R_2, LCerror
+
+    # -------------------------------------------------------------------------
+    def pca_transformation(self, target, verbose=False):
+        """
+        Transforms the targets (outputs) via Principal Component Analysis
+
+        Parameters
+        ----------
+        target : array of shape (n_samples,)
+            Target values.
+
+        Returns
+        -------
+        pca : obj
+            Fitted sklearnPCA object.
+        OutputMatrix : array of shape (n_samples,)
+            Transformed target values.
+        n_pca_components : int
+            Number of selected principal components.
+
+        """
+        # Transform via Principal Component Analysis
+        if hasattr(self, 'var_pca_threshold'):
+            var_pca_threshold = self.var_pca_threshold
+        else:
+            var_pca_threshold = 100.0
+        n_samples, n_features = target.shape
+
+        if hasattr(self, 'n_pca_components'):
+            n_pca_components = self.n_pca_components
+        else:
+            # Instantiate and fit sklearnPCA object
+            covar_matrix = sklearnPCA(n_components=None)
+            covar_matrix.fit(target)
+            var = np.cumsum(np.round(covar_matrix.explained_variance_ratio_,
+                                     decimals=5)*100)
+            # Find the number of components to explain self.varPCAThreshold of
+            # variance
+            try:
+                n_components = np.where(var >= var_pca_threshold)[0][0] + 1
+            except IndexError:
+                n_components = min(n_samples, n_features)
+
+            n_pca_components = min(n_samples, n_features, n_components)
+
+        # Print out a report
+        if verbose:
+            print()
+            print('-' * 50)
+            print(f"PCA transformation is performed with {n_pca_components}"
+                  " components.")
+            print('-' * 50)
+            print()
+
+        # Fit and transform with the selected number of components
+        pca = sklearnPCA(n_components=n_pca_components, svd_solver='arpack')
+        scaled_target = pca.fit_transform(target)
+
+        return pca, scaled_target, n_pca_components
+
+    # -------------------------------------------------------------------------
+    def gaussian_process_emulator(self, X, y, nug_term=None, autoSelect=False,
+                                  varIdx=None):
+        """
+        Fits a Gaussian Process Emulator to the target given the training
+         points.
+
+        Parameters
+        ----------
+        X : array of shape (n_samples, n_params)
+            Training points.
+        y : array of shape (n_samples,)
+            Target values.
+        nug_term : float, optional
+            Nugget term. The default is None, i.e. variance of y.
+        autoSelect : bool, optional
+            Loop over some kernels and select the best. The default is False.
+        varIdx : int, optional
+            The index number. The default is None.
+
+        Returns
+        -------
+        gp : object
+            Fitted estimator.
+
+        """
+
+        nug_term = nug_term if nug_term else np.var(y)
+
+        Kernels = [nug_term * kernels.RBF(length_scale=1.0,
+                                          length_scale_bounds=(1e-25, 1e15)),
+                   nug_term * kernels.RationalQuadratic(length_scale=0.2,
+                                                        alpha=1.0),
+                   nug_term * kernels.Matern(length_scale=1.0,
+                                             length_scale_bounds=(1e-15, 1e5),
+                                             nu=1.5)]
+
+        # Automatic selection of the kernel
+        if autoSelect:
+            gp = {}
+            BME = []
+            for i, kernel in enumerate(Kernels):
+                gp[i] = GaussianProcessRegressor(kernel=kernel,
+                                                 n_restarts_optimizer=3,
+                                                 normalize_y=False)
+
+                # Fit to data using Maximum Likelihood Estimation
+                gp[i].fit(X, y)
+
+                # Store the MLE as BME score
+                BME.append(gp[i].log_marginal_likelihood())
+
+            gp = gp[np.argmax(BME)]
+
+        else:
+            gp = GaussianProcessRegressor(kernel=Kernels[0],
+                                          n_restarts_optimizer=3,
+                                          normalize_y=False)
+            gp.fit(X, y)
+
+        # Compute score
+        if varIdx is not None:
+            Score = gp.score(X, y)
+            print('-'*50)
+            print(f'Output variable {varIdx}:')
+            print('The estimation of GPE coefficients converged,')
+            print(f'with the R^2 score: {Score:.3f}')
+            print('-'*50)
+
+        return gp
+
+    # -------------------------------------------------------------------------
+    def eval_metamodel(self, samples):
+        """
+        Evaluates meta-model at the requested samples. One can also generate
+        nsamples.
+
+        Parameters
+        ----------
+        samples : array of shape (n_samples, n_params), optional
+            Samples to evaluate meta-model at. The default is None.
+        nsamples : int, optional
+            Number of samples to generate, if no `samples` is provided. The
+            default is None.
+        sampling_method : str, optional
+            Type of sampling, if no `samples` is provided. The default is
+            'random'.
+        return_samples : bool, optional
+            Retun samples, if no `samples` is provided. The default is False.
+
+        Returns
+        -------
+        mean_pred : dict
+            Mean of the predictions.
+        std_pred : dict
+            Standard deviatioon of the predictions.
+        """
+        # Transform into np array - can also be given as list
+        samples = np.array(samples)
+        
+        # Transform samples to the independent space
+        samples = self.InputSpace.transform(
+            samples,
+            method='user'
+            )
+        # Compute univariate bases for the given samples
+        if self.meta_model_type.lower() != 'gpe':
+            univ_p_val = self.univ_basis_vals(
+                samples,
+                n_max=np.max(self.pce_deg)
+                )
+
+        mean_pred_b = {}
+        std_pred_b = {}
+        # Loop over bootstrap iterations
+        for b_i in range(self.n_bootstrap_itrs):
+
+            # Extract model dictionary
+            if self.meta_model_type.lower() == 'gpe':
+                model_dict = self.gp_poly[f'b_{b_i+1}']
+            else:
+                model_dict = self.coeffs_dict[f'b_{b_i+1}']
+
+            # Loop over outputs
+            mean_pred = {}
+            std_pred = {}
+            for output, values in model_dict.items():
+
+                mean = np.empty((len(samples), len(values)))
+                std = np.empty((len(samples), len(values)))
+                idx = 0
+                for in_key, InIdxValues in values.items():
+
+                    # Prediction with GPE
+                    if self.meta_model_type.lower() == 'gpe':
+                        X_T = self.x_scaler[f'b_{b_i+1}'][output].transform(samples)
+                        gp = self.gp_poly[f'b_{b_i+1}'][output][in_key]
+                        y_mean, y_std = gp.predict(X_T, return_std=True)
+
+                    else:
+                        # Prediction with PCE
+                        # Assemble Psi matrix
+                        basis = self.basis_dict[f'b_{b_i+1}'][output][in_key]
+                        psi = self.create_psi(basis, univ_p_val)
+
+                        # Prediction
+                        if self.bootstrap_method != 'fast' or b_i == 0:
+                            # with error bar, i.e. use clf_poly
+                            clf_poly = self.clf_poly[f'b_{b_i+1}'][output][in_key]
+                            try:
+                                y_mean, y_std = clf_poly.predict(
+                                    psi, return_std=True
+                                    )
+                            except TypeError:
+                                y_mean = clf_poly.predict(psi)
+                                y_std = np.zeros_like(y_mean)
+                        else:
+                            # without error bar
+                            coeffs = self.coeffs_dict[f'b_{b_i+1}'][output][in_key]
+                            y_mean = np.dot(psi, coeffs)
+                            y_std = np.zeros_like(y_mean)
+
+                    mean[:, idx] = y_mean
+                    std[:, idx] = y_std
+                    idx += 1
+
+                # Save predictions for each output
+                if self.dim_red_method.lower() == 'pca':
+                    PCA = self.pca[f'b_{b_i+1}'][output]
+                    mean_pred[output] = PCA.inverse_transform(mean)
+                    std_pred[output] = np.zeros(mean.shape)
+                else:
+                    mean_pred[output] = mean
+                    std_pred[output] = std
+
+            # Save predictions for each bootstrap iteration
+            mean_pred_b[b_i] = mean_pred
+            std_pred_b[b_i] = std_pred
+
+        # Change the order of nesting
+        mean_pred_all = {}
+        for i in sorted(mean_pred_b):
+            for k, v in mean_pred_b[i].items():
+                if k not in mean_pred_all:
+                    mean_pred_all[k] = [None] * len(mean_pred_b)
+                mean_pred_all[k][i] = v
+
+        # Compute the moments of predictions over the predictions
+        for output in self.out_names:
+            # Only use bootstraps with finite values
+            finite_rows = np.isfinite(
+                mean_pred_all[output]).all(axis=2).all(axis=1)
+            outs = np.asarray(mean_pred_all[output])[finite_rows]
+            # Compute mean
+            mean_pred[output] = np.mean(outs, axis=0)
+            # Compute standard deviation
+            if self.n_bootstrap_itrs > 1:
+                std_pred[output] = np.std(outs, axis=0)
+            else:
+                std_pred[output] = std_pred_b[b_i][output]
+
+        return mean_pred, std_pred
+
+    # -------------------------------------------------------------------------
+    def create_model_error(self, X, y, Model, name='Calib'):
+        """
+        Fits a GPE-based model error.
+
+        Parameters
+        ----------
+        X : array of shape (n_outputs, n_inputs)
+            Input array. It can contain any forcing inputs or coordinates of
+             extracted data.
+        y : array of shape (n_outputs,)
+            The model response for the MAP parameter set.
+        name : str, optional
+            Calibration or validation. The default is `'Calib'`.
+
+        Returns
+        -------
+        self: object
+            Self object.
+
+        """
+        outputNames = self.out_names
+        self.errorRegMethod = 'GPE'
+        self.errorclf_poly = self.auto_vivification()
+        self.errorScale = self.auto_vivification()
+
+        # Read data
+        # TODO: do this call outside the metamodel
+        MeasuredData = Model.read_observation(case=name)
+
+        # Fitting GPR based bias model
+        for out in outputNames:
+            nan_idx = ~np.isnan(MeasuredData[out])
+            # Select data
+            try:
+                data = MeasuredData[out].values[nan_idx]
+            except AttributeError:
+                data = MeasuredData[out][nan_idx]
+
+            # Prepare the input matrix
+            scaler = MinMaxScaler()
+            delta = data  # - y[out][0]
+            BiasInputs = np.hstack((X[out], y[out].reshape(-1, 1)))
+            X_S = scaler.fit_transform(BiasInputs)
+            gp = self.gaussian_process_emulator(X_S, delta)
+
+            self.errorScale[out]["y_1"] = scaler
+            self.errorclf_poly[out]["y_1"] = gp
+
+        return self
+
+    # -------------------------------------------------------------------------
+    def eval_model_error(self, X, y_pred):
+        """
+        Evaluates the error model.
+
+        Parameters
+        ----------
+        X : array
+            Inputs.
+        y_pred : dict
+            Predictions.
+
+        Returns
+        -------
+        mean_pred : dict
+            Mean predition of the GPE-based error model.
+        std_pred : dict
+            standard deviation of the GPE-based error model.
+
+        """
+        mean_pred = {}
+        std_pred = {}
+
+        for Outkey, ValuesDict in self.errorclf_poly.items():
+
+            pred_mean = np.zeros_like(y_pred[Outkey])
+            pred_std = np.zeros_like(y_pred[Outkey])
+
+            for Inkey, InIdxValues in ValuesDict.items():
+
+                gp = self.errorclf_poly[Outkey][Inkey]
+                scaler = self.errorScale[Outkey][Inkey]
+
+                # Transform Samples using scaler
+                for j, pred in enumerate(y_pred[Outkey]):
+                    BiasInputs = np.hstack((X[Outkey], pred.reshape(-1, 1)))
+                    Samples_S = scaler.transform(BiasInputs)
+                    y_hat, y_std = gp.predict(Samples_S, return_std=True)
+                    pred_mean[j] = y_hat
+                    pred_std[j] = y_std
+                    # pred_mean[j] += pred
+
+            mean_pred[Outkey] = pred_mean
+            std_pred[Outkey] = pred_std
+
+        return mean_pred, std_pred
+
+    # -------------------------------------------------------------------------
+    class auto_vivification(dict):
+        """
+        Implementation of perl's AutoVivification feature.
+
+        Source: https://stackoverflow.com/a/651879/18082457
+        """
+
+        def __getitem__(self, item):
+            try:
+                return dict.__getitem__(self, item)
+            except KeyError:
+                value = self[item] = type(self)()
+                return value
+
+    # -------------------------------------------------------------------------
+    def copy_meta_model_opts(self):
+        """
+        This method is a convinient function to copy the metamodel options.
+
+        Returns
+        -------
+        new_MetaModelOpts : object
+            The copied object.
+
+        """
+        # TODO: what properties should be moved to the new object?
+        new_MetaModelOpts = copy.deepcopy(self)
+        new_MetaModelOpts.input_obj = self.input_obj#InputObj
+        new_MetaModelOpts.InputSpace = self.InputSpace
+        #new_MetaModelOpts.InputSpace.meta_Model = 'aPCE'
+        #new_MetaModelOpts.InputSpace.InputObj = self.input_obj
+        #new_MetaModelOpts.InputSpace.ndim = len(self.input_obj.Marginals)
+        new_MetaModelOpts.n_params = len(self.input_obj.Marginals)
+        #new_MetaModelOpts.InputSpace.hdf5_file = None
+
+        return new_MetaModelOpts
+
+    # -------------------------------------------------------------------------
+    def __select_degree(self, ndim, n_samples):
+        """
+        Selects degree based on the number of samples and parameters in the
+        sequential design.
+
+        Parameters
+        ----------
+        ndim : int
+            Dimension of the parameter space.
+        n_samples : int
+            Number of samples.
+
+        Returns
+        -------
+        deg_array: array
+            Array containing the arrays.
+
+        """
+        # Define the deg_array
+        max_deg = np.max(self.pce_deg)
+        min_Deg = np.min(self.pce_deg)
+        
+        # TODO: remove the options for sequential?
+        #nitr = n_samples - self.InputSpace.n_init_samples
+
+        # Check q-norm
+        if not np.isscalar(self.pce_q_norm):
+            self.pce_q_norm = np.array(self.pce_q_norm)
+        else:
+            self.pce_q_norm = np.array([self.pce_q_norm])
+
+        def M_uptoMax(maxDeg):
+            n_combo = np.zeros(maxDeg)
+            for i, d in enumerate(range(1, maxDeg+1)):
+                n_combo[i] = math.factorial(ndim+d)
+                n_combo[i] /= math.factorial(ndim) * math.factorial(d)
+            return n_combo
+
+        deg_new = max_deg
+        #d = nitr if nitr != 0 and self.n_params > 5 else 1
+        # d = 1
+        # min_index = np.argmin(abs(M_uptoMax(max_deg)-ndim*n_samples*d))
+        # deg_new = range(1, max_deg+1)[min_index]
+
+        if deg_new > min_Deg and self.pce_reg_method.lower() != 'fastard':
+            deg_array = np.arange(min_Deg, deg_new+1)
+        else:
+            deg_array = np.array([deg_new])
+
+        return deg_array
+
+    def generate_polynomials(self, max_deg=None):
+        # Check for InputSpace
+        if not hasattr(self, 'InputSpace'):
+            raise AttributeError('Generate or add InputSpace before generating polynomials')
+            
+        ndim = self.InputSpace.ndim
+        # Create orthogonal polynomial coefficients if necessary
+        if (self.meta_model_type.lower()!='gpe') and max_deg is not None:# and self.input_obj.poly_coeffs_flag:
+            self.polycoeffs = {}
+            for parIdx in tqdm(range(ndim), ascii=True,
+                               desc="Computing orth. polynomial coeffs"):
+                poly_coeffs = apoly_construction(
+                    self.InputSpace.raw_data[parIdx],
+                    max_deg
+                    )
+                self.polycoeffs[f'p_{parIdx+1}'] = poly_coeffs
+        else:
+            raise AttributeError('MetaModel cannot generate polynomials in the given scenario!')
+
+    # -------------------------------------------------------------------------
+    def _compute_pce_moments(self):
+        """
+        Computes the first two moments using the PCE-based meta-model.
+
+        Returns
+        -------
+        pce_means: dict
+            The first moment (mean) of the surrogate.
+        pce_stds: dict
+            The second moment (standard deviation) of the surrogate.
+
+        """
+        
+        # Check if its truly a pce-surrogate
+        if self.meta_model_type.lower() == 'gpe':
+            raise AttributeError('Moments can only be computed for pce-type surrogates')
+        
+        outputs = self.out_names
+        pce_means_b = {}
+        pce_stds_b = {}
+
+        # Loop over bootstrap iterations
+        for b_i in range(self.n_bootstrap_itrs):
+            # Loop over the metamodels
+            coeffs_dicts = self.coeffs_dict[f'b_{b_i+1}'].items()
+            means = {}
+            stds = {}
+            for output, coef_dict in coeffs_dicts:
+
+                pce_mean = np.zeros((len(coef_dict)))
+                pce_var = np.zeros((len(coef_dict)))
+
+                for index, values in coef_dict.items():
+                    idx = int(index.split('_')[1]) - 1
+                    coeffs = self.coeffs_dict[f'b_{b_i+1}'][output][index]
+
+                    # Mean = c_0
+                    if coeffs[0] != 0:
+                        pce_mean[idx] = coeffs[0]
+                    else:
+                        clf_poly = self.clf_poly[f'b_{b_i+1}'][output]
+                        pce_mean[idx] = clf_poly[index].intercept_
+                    # Var = sum(coeffs[1:]**2)
+                    pce_var[idx] = np.sum(np.square(coeffs[1:]))
+
+                # Save predictions for each output
+                if self.dim_red_method.lower() == 'pca':
+                    PCA = self.pca[f'b_{b_i+1}'][output]
+                    means[output] = PCA.inverse_transform(pce_mean)
+                    stds[output] = PCA.inverse_transform(np.sqrt(pce_var))
+                else:
+                    means[output] = pce_mean
+                    stds[output] = np.sqrt(pce_var)
+
+            # Save predictions for each bootstrap iteration
+            pce_means_b[b_i] = means
+            pce_stds_b[b_i] = stds
+
+        # Change the order of nesting
+        mean_all = {}
+        for i in sorted(pce_means_b):
+            for k, v in pce_means_b[i].items():
+                if k not in mean_all:
+                    mean_all[k] = [None] * len(pce_means_b)
+                mean_all[k][i] = v
+        std_all = {}
+        for i in sorted(pce_stds_b):
+            for k, v in pce_stds_b[i].items():
+                if k not in std_all:
+                    std_all[k] = [None] * len(pce_stds_b)
+                std_all[k][i] = v
+
+        # Back transformation if PCA is selected.
+        pce_means, pce_stds = {}, {}
+        for output in outputs:
+            pce_means[output] = np.mean(mean_all[output], axis=0)
+            pce_stds[output] = np.mean(std_all[output], axis=0)
+
+        return pce_means, pce_stds
diff --git a/dist/bayesvalidrox-1.0.0-py3-none-any.whl b/dist/bayesvalidrox-1.0.0-py3-none-any.whl
new file mode 100644
index 0000000000000000000000000000000000000000..ddc42c25eaf552f0cdc4cfd7567c07777f0f5a81
GIT binary patch
literal 130371
zcma&NQ;;r9&@?!<ZR?C}+cRfu+j_>famKc7+qP}n{@>k;{r8LAh~10si@vJN%IJ!y
z%2Jd81w#V@0)hfU)z{JJpgRA>0tN!Y1qA}a{_oYu(8JW()zHS$#L3>BL0{j}&eBC+
zpWeYE<e!Xn2qRL+Yv3>OkZ!pXM~Ko{2}9c)ted{H<RMl5i4cnR*XwigxoSP2@HCSP
zl`Ap&NSzD0B&nl^)VAzlGoT3CSbwjJHG|1X;VhpMje}Kgp~y)*VZb$JDfcl-(liG4
zXsi3>Vz|xzW+L%Ve#CVTU}lLINE!467fB+E=x#mzT0?c0*@tDWd8zK6KhFL2s`xV3
ztlSp&?lm@O04^GFikr0WlfS2VM8(rv9A}!a3v{-qrY``aZe{+9anHEOFSArO_-_i&
zd5qo^R*=Wg#!iuv=YF3PU5AC=huGi`_Mse$USumP@wW&=4NP~=`U|yz<rLELu!~9C
zm?U$@23<?d5wrEsBwDIFI$XBZt>pu#;{Qb_vk{-hN)k6--hVVI{6{I-|AWr|K`y<m
zgN?I`hmEQ2oGwBjBOK748_G=|unRI9|Jw<ES6>nWE)0b(X5Zpw|CiKW`DJR$<w+d^
zoVX$sZ%-a4W|(fyI`9|wziwbUB%ZVSe-c9`Y{tds$)HD?v>Hj%Yg%^xi$hA32!XF=
zj<anQU~DeQj<fPVtR*As7?vvceP<~0=FPB$#c|&_;1EpxK_j;p_xOt^dg75G+Si$W
zaw1kcgdAcX6OA1I6X8C*c)its$kP8ifBxSQ{wEqUQzuh9W7Ge;fRxqdmKRhdXB5V#
zWLlM|=%f~#fg<jl?wyL!k58%3$jQ}8Fh<Q!ii<IdPsmHDO-?Y5Fq5e`1A_Ss3mAvn
z$J@sl(R*fkW{8Xr4pE(*4i3?6&<i};(fkFWRcrqts9+TdK@Y?pAA<n>?|}agg)hBu
ze}J$60hL_?0Wto62K;}f=zjzFPx#k9*3O&aEqkvj#{vdNTjUnXbwxVn&Z;5hP9~o<
zw;J^mFH;ng76$QPe?*D;fbwWZj=!(HX3pITaf83w64!B-WMzT3OZ88;OBHrM7YA*X
z1+tEh2dlcE**jk<sC&OZD!uymzXuKU=pLK^I5wNd`>$^&m$f}V)cW=n9q<#^o{g{z
z6jU4!y31p^k5-L|el`nEz~uXWnZv94beC56O}%t0z`xPG@0VsdpejE-mWygaMGAit
zQZ4I+6{kxik4N<@H4|o4)@id<0aC-#-O7OU<%iiy2`$3a&4OtT0NZQ^1O(k~?MZ?4
zs#wa3Y17OGcP>Xk;w3Fz;z_HmmU$Fe<z+<<*t67vpmw^Z^3vK;`VIoRX6bcFnHH8v
zlho$BD#gD3UT#Hd1>TgXxN}E|zOG=Si*I&vl=zn<dsK07hlXBJWtAiF&Q+v~c$8{W
z+a4H)*6gbD-_NhBFSy<<SUr_<5{udfgYG{WuIhd9`D&s9g|>+yszIO~k-pUrN<Y9d
zsjY_pXdg*r{@p*FIXyh92mE4-%@59#VNEaSu51;F^^{d^O0sg{#MQIt`>`=BltBCT
z3C~WNs+k}2@|gM~A|R~AjRMmwtMyY<rmA+Ep+7Xi5rA4<XLLniaZ>LL7|piO&7l4G
zP!YbodaRV3E|bs%F6L!?SBM7?2D14Cysme}thI{mOc{PJI`uSGb_G~22{d{FKP8y8
zjjLZi6{UlDB#AwlC6rdeeBhaoKPkB&=UPdD+b!UXu+2IEF27O)i{Tc}l{3M~r+@^h
z9xkiKYAdYzKQ&7(;kehD{AZR+rt9$=E3fSm4MqOsHcQ1H&spG483vdEA_0MS*VsSL
zu6TlrO-@gGLNL2O@Vh<u5;kA!%po`69;jXTf0yUAcM9P?vRi;oSkqKh_b*a3`u6aj
zL{*`VskyW~l<isO!TM|sht5k%V+c*KvkaNu=(o%*=Oz09FWO4Bo6{|}KrRU!)%I%l
zxHHsCYi4qpbRcSo$gU~if=aLv$mIj-pkYmVUmD4C5qs~<efo(+bTFaZ>nLfVKs%E0
z!Sy%<OlV+|YmD}#<1p^fTl5T=Ty;!X$X}Ou^Tr>6443%YXagn)L`XmeU7FmzC{MyZ
z1=y<bTR}zw>vjIf9Gl$QpiE%6d+dzVd*e1E4=A{5b5-fqN_*t2@l7?0_%EgH(!kv!
z2G>`cWK@ZATBVm}SK8dbV%mRJ>=0AN@~ITxBIQuS!U{E(&O4b6{-N>*e5Dn$3?<{G
zIYqn)Wlw6M*{@tUn&oLE_s1VW89p&u*7Se3>CXDsII8g*g~!F-c~lagJA*^PB&pJ@
z)^|@hc2&|<A9UiSpl<;CX4fKVoBwIi(2UY_?P#5_Z}`VCHgA*wbSf2AaM4A*$;u$a
zzqnVSnYhdmT}X_z%S~<Zu7sNi?w?4I%jqNarj)J}jLs;<h<QL4K;Bz{;Vb{mxkn!U
zY7gXS=Nu1VWReNvE}K2b5zi~TWJu_-8*H*PLXH#q(hw75zKV?iW)N*~c3b0=XQg;3
z2-}Z9#o2(R&2lCWrpu>rd3iy>*%{a#7V$@jKf4(4_dY~yUWr7QH7^W}2V9OhX&vFr
z|B3nx#_ErfXWqvE^qc$_Ong1DS=K7fX)<+C1|n@WQ1|f2?#I1oau<;;m}c#Y2-@hA
zlP0>@GeWXMHv+%k&tvX0b?I#<&Jq-38~PC+_b~}k8|L08UJ?KAQJE&i?ZN?fW&Ote
zJo2)V%SbQyKAyVORaM+uzcnEMs7kqS;5s9IZ2QYbJ5}+sNEuxK55)7WqNWnpx$g8$
zxJIWX$2_okrRJgp8^~WW9<?YoVy@gLak;T#T$HYCaOdc{M*Ab^5B~#uP_lguwZW4?
z_m;`fn>KuapdU!KKbXE2Ny+Na9dJFX8PryibDC7EQ))2?<!lHQ(KGj-3M=2mQXuFp
zzl36rbz<2^V#%w$sv4k#UkX*;T5%I7o)X{soF4c0hbw#F>01!a76XqpvAN)Q>-zJw
z2!yQ6iHXHbZ=oWrABP_o+Tl!<i+^3!kWyU55Ca)YIrDfd@S?daKKM(0PXC+?h%kzV
z_MtW^#Ikv4)cXwE+*?eeXSRnx0)`jtF4IxWbOFP7bXIggYvm1J=o^UxlEN=zt>>l^
z67mz&_H*JI7^)ZhB~_9M+x}q27?>>scPJA6B9~17G<-gk>hBKQ4bd|UKFN1|MSA^{
zb6Utrr_)t&C`c|FW(gF-na|}1OdWD_jO4yse{~Z%Nq#y#)O+?#_>IZ+fxVs4zltiA
zVDV_U)oKP2&Fa?kfn*FNaOQaBs?Uj3Z<A4!iq8z21%it|cVa2CiroCb&82$j#l10#
z;^Y>@YMI-XK%q=p-mD-$3-)}n5?zI)?O$hPl8V-n>{LkKG)}<aY|kJvg%;o~p+tC|
z1$N<oX{`I|iDM)?#k5#BbP57rhWNu*DwS9OX>beCAj{jlP~fITb8J)@MG+q`Csit~
z<OrR%=!0-g`)|uF&h$jj{9<{Fd99XD7<XO)etm0qh<!~@6w|PEP|U`9Y~9rf$sYP?
z478SyEo1+VJFb~t>LvT;#Ak9O9EU=TBT=2nsi|=LmRyp-?KKAxwk^=IO-PIJ5+*}=
z<Qh!l$%>}Qh^f3Lq4#d6C4g&LkJ6&Nr$bO0IR=oCNF&>M<<73c+dB;lICZAPdd<37
z!V!>K{!V3{;9&g9Y-lm9M&kLyd4;StE`Mt%H)b;?>5UOv7)eJ@q@dr|!zTT4IygTb
zN&#YexQrMt_rOJPhggg$B{iZ)jjDFXoY;dkB4Sq^hMU)Ywk?W-J{^JZ690<C@&uNz
zCtOj-Ki2?$o}m5^bv_&Bw%dS*F;}IXU#1o!Xisxk{wR1#)}V`36@L{R<`&mJ=AOxJ
z3)*mnw{scQbq;*y`AdFeUm0y#G)jT2Van5Dzm<=O+hL|T%<ZUgO#4jT62F@Rkb8^*
zfLuoJE(jhXoWHLmR%+DCIjMOlOGU~lHBDqlE7&X)@)4F)#nDj%l2e#@*e#d!khPIU
z@Tps@T!vJ)h|PqCURfEwDfy+pfYt-Q6C`CxnOON%yOVg}-n#4-J4~g+s|+>1YncVB
zlrkL(g}kS1;k;^rayf*xm%5M^n#}>S-yRI^+08=A)$cETQ+eXqjp<eLG=+YcSF^NA
z)J};&1q~vAXmIkeg)%C1uko$kCna@4D@}R(xAW^0sn8Z1GMThOpPnNE<t0^rUO`<E
z4&)xtOBA#m_U@S^_-KV5RxaEn;Xj^M^1ahvS=k`Acywd3cinX~NwO4N(Bf<sZp)rU
z?W)Z2&T5yo-8_Ik=FY+2=(NK=+MMl~M!8D%ww!~NV}tk>jtjjzh*HPc4ytQt@N=UC
z7CC13Y1f*M(zx%89Qi=y0IJk&wVw{nxtiqb2SDG-aV!(A35`M=y&UgfQ9_-(@Dp5(
zxnSFLH+Eobe|61{vlKBo-L!6xs4{XlS3E59iGG5vMsNyJ{Jp<XqL;#)3PJ*j8-p6R
zH=3$}&1+gej*JV+EBB_hR5)`YE_BCodIbWhEP{=Mc_VZMly`^NEBJ`#t$O{yjy5&(
zi`wx4k7|$k7WBh2FA8p+jwmE`i;diPkdAoK7L`s%7Z&MFR6VSDM*qeO7i2O;fXK+)
zr}z0l!6&gGLr}qpbG<f}u@b>UjTYF;d8`4+?0*zFIMT#%3@}O=8`PmL)-Nl|Yf&F!
zHUW42!hvH=FpKj5&2AcrRE)!Oc@4z)9s;9!Nt}b2-XLzH@_P~M+ggPnCselH<!X3a
zPoMFVj}1c!_uMWxTF!Yui}k>Lm{8*0$wjxwoy+ci(kb5WwB4cU%iA8rgyISnD4avV
zNix3uo>|iDq*%-No`C=U7{Pr1@`+&Nusj#4OLBrFpW!{CiRkpEBCdY3<@%_JW)Tf5
z<O@lCA@eI?8AH>{I{i3iE9zH(btZ5xu?N9L#bjF}*Px{)$gM&B@~H~L)&3{kjf~SQ
zSeybV3H=Po*kJ?RdZlJg;6?iKrJavYQ{iZ#9dKYgEnFdhNcZrN(WA3jN*l0(K?BDr
zmvddWh#w@_xP82q^IkP)o|^V`hzyat_B}@wp?l_&_%rxt^VEDhSlXs%utCZBCXYuV
z%v<#5FRFpa{T+sP^z(0j{m+fw)0FNkb=wm-{+M&Lb{{6{6K}^`OV&G!*9mw{nA1T3
z@98kcAv(4kC&dWXce?RYHvI(~?@+XY<@kFIiOAy6vqOphOX2UWrY@VI1rS6sU@P4+
zn?s3ojrZF$^((C(aG_D9@!M$Zikw}@;4e`V;1n%{-WC(<NUPZt8Lh$8gfBTd&*>*Y
zz$}*odgX-4*ftdFW}yY#!6g<r2eL)^(srkuum4Nx{`jzUZgxhUJ;6iS7JtKqatoI$
zr_Mmr!0hr?CK<)&q=aCE1YY_e*ER}U0MGojj@yG>o=H1t%f)f{80~S(Id*0{puZ>+
z7C=*}JrYPSyxly)uI|Xpo|`(&VXkXPr=Hf%?gD(Gqt!!|5-4XwnGat&pC-!v2wk??
z&yc+{X%uSvK$&Yh@bQ_!+ps|ZSIxpW@HmRO{cy(|5TXW40cTi+UefA;N*L4cO4foF
zI;vHc85FU@vTqpf8XN=*m_tQaZfHP{E**mz4)qPk1%w&1i~V7_%_3bVzw-q7WxWRR
z!?`Jj6QppOWI*GgYiP{|nDcok8`u5C`8pk@*<j=~*<MpZN@&E~sJOUcUPis4Arq8U
zxGjW9GJqbwV?$*cAaoRwE(l5G^mg|y9fc-y|Et<pXUE*LNjV70I}^{aka&t!qH@%0
zPCL_Ygd*1LNnDtMD`K-j&qf)B^*WkmJqsd|Z49K4fH1nNsd7K~zIaqsVXcl?MKPL9
ztccZ@O=AS~-(~3tG)TR9>8Mr>EW<LL8%Ynfq1qaS^K^Yjtn8O^vXUCKeEp9Gj*d8x
zQ22@)(dmw!q6US#p+4Ol8eaHHT~WJ-d&~ikZLc}Al8E1AOSu@5Fg|~TGt`+uF*B1#
z1~YC(;ZXWyj+iZh9%JOxg<<huq7ftg@6ty;r<%nY^}NLg^)8@Y&)%?)W=NgAAHKCR
z9nBr^^YJI-)I`pD*o<pMMxLtUVO4I$v2nacxKN=TQ=%F~tvyk$G{}%3T1YEVtWY((
z_Rb>N2c$1cJ(ggkseu8;!@FTdsNhIFLIM-&1|-h|P6?Cz7W;{>1zbWz*<2eTVSOed
zs@RM%9{bm9V1T&~C7m>!>qLgT@dUfv38lSBA#RCShT65kNFhdSW$F;rGSNm<k|TzR
z?qp2GF&Ywe4(`-LUI;x0`i8S^4Q~U4TM<OvzCd|Kity8&k3Z`Zh+gW@j!5C07>Pp>
zHenl8khlhzU<}B3SfHX4$K~K&Ovm4bz7l2FS(5WmwJ@D01ycFIuSys9J}IvyUo45E
z+BlUwLw&OWesoKKiH{%9p1h_O1qSMqWxqP@icjK@TAVi`g6=!ZK@3OcPDCU(gs5(M
zr5<R6+Wkaeo?<2-|2U~!!Sen5^zkp@iVVsR9DyY?_KH+xM>S6HV=v9K4=;^*5cZ1h
zIBW(s+~W?j@6U@b;3g9=vEh7x)uSxiHyFnB4sg1@4XM?AW~=wUZIE14!{rG4P5yij
zjvA+*zpu_oXk1;$ZtESRiI7tUdClYM?L~tFjgzv&m?TJ+JY+@<=|&wCTQPR51F65+
z5pF#rN%?mp@2P^w(}nq@a876{3=!uOe<WP)ULrLN%zR0p3S&r*hdL$KTF%bUhrC~H
zE*@W8O75YvXYSew{q5}SHrP})v0Sh*6KTWSGg`Hjc&lsEY(9}@Cu#;e@#LniIsUv`
zn(Z%o4~KR+Ae^(IkrYpnOS{OWGV*@aPe}*^NI94pdypm&_u4kYJ*ARZ9X1M<O%z?{
z-VV-lVRG9ZLe}7*fUUl~9yBjn-**E!FFsXAV<ZEAqP5;Nt#g+!At&gxww1v`-=~G;
zDDazmHTRh2s|e$F<R$2;h|1<vj-!-TY<5z=YP01><pOJ|Jn8g%jTht|#Rp>67p`6o
zQ*_MUxu`WD|HWIhX(#?eSaq(G9ZBI?ax1*VX9B5hh-1KgeA3aN&3of$^Lc(<!je5?
z`jvQ+YuUZnvmuXTbcp=LhU>~I=J)e(5HW*?N6O<e*RD){ldE#ls%2uD*xGKr3moyy
zqWoWFR>#T^OPH{ANAvj=AOwHCJ`A4sZj@`IxDnERAXs0fuc&|$Lp7e*+&_op_cmkT
zoPe<rvhATht#c)(4*o_96Kz;mh9Gp4Jo4iUSdSKT!xCIIg5);D79pz_v!`%U>+c$#
z4mI8_ia0&w-(``P<vYGn_CV6@S}W>%4=n8!>u=rd1h(WXHujW=<77=eo2xe`%cW0u
z?@CDXqtGzFtZ;q9QFqq(1c{l8533wdJWSmA29Hlt%^n}LLrXMkQ>2=4%?;8oy}bkc
zc)8+WL1V%Rgf|NY>u`FFQQ)#C@}cJcaYj92cPSIEBC!8-XRebvX*})CqRVf2jBdhN
zIhmt?pi=Q<aCwIIBzvOmlp!qad($%0M!eg#L(NUFjZ7ri2n~5j^>VcQ>IZfH5U}(N
zf~<GzcpZY#8G8(f)dBYHK*DcQXu|qi=^i+|gC))%62NXt@1{F5GyCQN)JWwksW;;*
z*2?l_LWzx+M&EARpz+Y4L08clD5<LrR+H_^B0E%L$cPh(=LC6)vf?sI2a;}j#{AxA
zLD39GO~iOlc1<FgTAf}<wIT3im(*aQ)W_9&*${Dj4r<Mc&!WS@A$k+_&1bOkkM}t>
zJR={S*rz|Vd}-k?D(mJm_cFG?<5;xspJx_`ujBo*pL<fGf`p|)(Qo9Q;#tpXu_1(L
z_IZ-gXd52x%|Y{GRORzOdg`}$rpfj)xzSMHI{^MkI6A54pZWPUxeSpY9)1ynFrKnO
zT!)`|QHJ_q<QSr|<qs2(yBqjzx`CmW0sJqAJy?3zyyp6jnnG$mzHU%iZY?J&k-PPC
zi8$na!k$ANS4^NBPgG=PaRq+n1M#p@u`L~oyk9|Nj)vA>eTglLXt8?fxq#sWI7<8d
zdnc#uC1ixmgiY#!yi!8JyVkCCkl*+1Zp&7}K720+@hFtKNAG?TiCGdXB2?~SV1|mS
zKN56UXKDJRbaN@2mIGK+tO3$>Gn2?c^JGOb<hNYNgTg+NJCkCcdA5Y=f+Xi`vUc0S
zcyqzcNh+&Ns2iPWO)1p;T|C@Dhu`DF_eH`;rmY6540upn&takxr<QV`_ic^CvA&Bx
zF`!HVDHlyTruorsTcD1-u5E77jfR*c5kZn^)ckah%ONLsM!E0nI$>pYmB9-`p+Uyg
zkuDHfdX~x}m)|X7%b4x6PgLB#tkAI}AG5D<5S@rjk0?B{TkemDZ3M}({uF)u$=;<2
z_6uU(XwSA$ShFp_IjJq`Qm~va$k{!<Vdv8auaJ`!Hn@z6i1I;+w*qkB!jxTq56w=m
zzoZDilE)ESJGG}x_0XWr&MI4;w^J%KgU)@>N$s}EN0@3RNTZC)pvIptV&E3uH}){5
zb^vzS{9A?i$UxU!A@XL(zA6*Kt-GSH6JKdyC%XtC1D+w#rm30UqLsIE9ShoLbT8H&
zlR|+f2)_Sz#2InN>14&vUO;CmY}+L_=sYHYLkkPn8nvs<JGV}a*t}dWFXknlE>_Oa
zDcEx`RTmSM?OWJBQd0XXW-xiN8~MS`@Zj<r%kas<>wG1f44Udjtpk#V5`$<Z5R;X{
zebtoJV1P36T+Wt*_G6ypaRvva#C<`uFisi}1{3{q!A5t_$1GA5?3vw>=_Td|Uv>2S
z+!?qZfV+ohyy-t!GF4l<y4{yfDl1)&lJC=sp_(;I4a}WnXZ7~EgZ|y2n61F%;*4gX
z*^%=euPwH?AaT#2H(==?Uxt(_zNKhytZOszs@pcFj0}EJ3q)LG=9qx3%pXhnM`2$A
zH7h@{N5^g;((o9fNw@d6vP=MBZ19&hMATqn`@&w=7(a7%le@{NjzU&)Pfi3umvn2J
z2xmV6U%uY2=b0PO;Kbv*OEhp3F!6w~U;#pDvEftTm|`|K#y<}W5qA~s2Iki^Jx68q
z6<zgH#$#p1>|n)O2C{Ap$_`-i-_l%pq{j~jO}S^+&cs<ANfkpdX+F^j(F)Mu$C+*r
zIJ0O`ar_Q5h;cVe%~96U2591o%R)3ou4Max>}$p+kuqyS9mKqO3BrJpBwQt+ufys%
z5m7JuKvb702y0!OG}srUpVQbQK_^TE@EJ;EyH6+XPmfbkawadT|NKEf|1D_uo&vR=
z8eir6cOrK)==`eq4|Sqfszm7&2#1@^ad3UAs=QBeL$Xdmn|55G)+`1)dRvOOy2ALy
zndj|^jv$6S2fe6&{6Ntq?%3OO5f-0G89k^t-1Q@nTh@veAHRtKv+f@T?50MyGZ(OX
zhLZ1Z*i(My+5LimB&iV6<ecTova|Dp==;=Jk9I>b`AkcK%CgN#L1XK>Ts_%O-e5BH
zqz4WUs;A{)CfflOIe6{GVfWRWu#7TAA}T6&ufGu?tt+f^^}y3_;CKRusaEL0^4kLO
z`t392D8NK(K7Jt^uh>~@aa+^c4b<7Rk<t2!hIUX_%ZzExsbPmxoUpszhZM)G&f-r2
zMs%uVf1rFoW;U&GiqR{5|L4kJcB`3Yw2`o~x}s6p2Q&25K$cqtiC0Vhm1BLQ0m9`1
zAN8ygJR`elCl;*l>#>13Qx1b&va0(P&)#YL&WpbYrCfWonU$tJ#<Youp8M?kTmlUS
z5KQm!8P$W8<0Z5Ay-@H-#trBW#a-dq*%;~>r4p1;|0lPz6Xq(<h%O3j>a7y*i-G5z
z?l4Y+jQId*9W*7z>i`*KK`w#Dj4e2PEtap2Q;ro;if~21(xd4UA|oWDYlbC(f#+Te
z5)KiEUUm?^*1<&p44g`U<c+n@(^eNDd7r%yLC^d9K{=PMLQW6A*YnrG^kb@k)j-C}
zSle4sSysOhfj9!X0tWrO*UC8c&~Ue#&F3&5A}&h%BwccWR{ARO`w_1KlmFm~<Qw?H
zMu}wqL``3K`%VZWH=L4h>T*jl_aqocfio+uFW!$(EMX7UrHw^`VaojGUVMpWw?fo3
zdO6~{T$A8rI!89ED)0y3%}5xAIGmCy9~8|Q2{KFqIh2%HGOS8DjIk>$&FZTM%38os
zfTJBEC=nj=+0;^;$6)MiM7CNM5_*<UKe~&Ij9oBuPgAv5<`P_C^o7af{u~nmyE<QZ
zeL6fAbTJ%ES<%DU<?s=c%AYKthqxmbQaPjU6E$9s(Iza{GsrcHgywCTQ4J`188%~y
zX93e%T-QBhC1-uCJ*ReC!8kW9c4-OWw{M;IdIgfax#*kUV`ZD=Gm+@;IWZ3w(nqHd
zSuzm+@ZBwF`?wVsCSyYrTnp~^_V4)mw#Bq71-nm-<fWYwAytZGZsd&=@RsxK?LiP2
za%Bf%;9q7Z1I;I|9w`m|48^39`E53GBy+csFJH|Px9os^J+fz6Dp&!zz{+ik%4ShN
ze&5%waz+j2T*xn1X}A!`A%^n{LWuReuHWR^yN{m0T^7){WRyn(FM~TV6G$UH5btC<
zCK#-Vsmdn1r);C^ia%9-rmotL+RbZpqzsBw6%dZ2dTa%xE-@UdRchd`&0T{?98{2M
zTz%GVb<8wY;&?tx8~f*iG4&5yUM}<My6FGqqDQ1=0jHoFcPCarT=WafyH>}2SmXf}
zCY*fjpMh7}lQkV<pU1*}F2lmJO#;MYB^qjh8CxfG>^7D0{1d*!Mzt51GQ1C%4@jce
z;}q1fYx9KGL|173(%mh@SkQKAzebL>hk5g6Wz+Cv7o!;7jyTyrB-9=NbDkB5Rqs^*
zGI^dOxsK*mE7XQP&nV1aNeBD^>2KDTNvcDqHPsGV#x;(8NLCVY^9S}-bphVRJ8lL`
z(DL+BcX0}K`_Pz~#|PeSA2~vR{jCDDiq4{MBs8(si7iN(K6@K6+K-8W>K_)T!mYaW
z@R{ChNp$~a*DCBoJYZv7ZMxq1Q6s`AborE6h}i0(=?$aNh8-V+^enGXqaKQWYF*`)
z&cm>(&)3_`QoKAA?+`Ya^}O}2k*c~C9`nRJLegA}YRh3JOp@Gh0Z2-G1<y6|+Io5Y
zXstV(@^2H@TV-h3%rDZ>qui_xJm$nb^{1%3DC30Gs4I3AmZ2%nRD03nbdq41=$6xx
z__O{rqv1Ko2byxZ{sa*M8cux`tSHo3StOZf={ANbDz~TjderSNf;3m5-mV3$ob1cA
zx3SiTX31vaBr6xFctwuv_gMI#&Zrvx;qKcESqYB&75kvij5%3nDK|0{BLG7Pn|Ou(
z`T6a!2!O(1w>c*xwxag<CxWOt7{Btg15Vw&DQoN{H1c>`y6}?9RPymPaqJ{`VBu-q
zH%^9a_MzddHy6yw^_;V{RDyzPhtJi6UZL0jgW!j6(&6Tg{<dXp94xox*lM70O5;i7
zgJEwg%!sY5U}~N$S?j<JCr7hu$}k17kL8t4>y2vgE0&-X^T^@hf+Xc7AK&znDeiT5
zs2`UCvtA&*=fCkrmn1x8DWlVm0|*D5wT)h{!I-U4raIEI?#q5%{PYBMLd?q!Zf+?%
zWOX0y#945vI?lSHOMEO8p7b4?neuFheF_zN3GtZ*YfPqBs!`MogcaF?kQM0D5RIUp
ztRHJs2g}f-E&O?OX~|oC*i8$JPm7yc=nmw8RYfiQt|abiF_EcipogASc|4j)5IM`;
z%92X;(VCf+k(#%aP;XYw0Swp2n|r;1PLnQFsHfu#+PtT)DjRk-81i4zlp9teiV*v&
zCrbQQ$R~eS2MfHs=8l5_GHrp=j(9zwwx4!IjgTNs&|A*S$+*6;O78dM3zN&l3nnmz
z7@47t@|qHxk7pctiD>A^@Z9VE{g3rbKu!LHn=*nK$sWKtRr$s$*-~exi>cqi`ejkH
zh8|>WB4_wm_C0`iy3lr;%;u8fz)RF_YEBd8y6e8bjx(kFLm6SKemuN@jiD96u1JGY
zYQ$5U-$XV>V^OEXgE3YoQ*aLz3W1c)`aH#DEIe~)O)Ht4&*NN=Mhx|1g)ShiIyi)1
zv&Sa8duWj2(he+G!S3)cVzmfmO~X6e<G+A;@+;0$HUzvjfb3tMm7%ylAIgxNH(^Zj
zTrXFg%Hm+ro`}AETb~biE7WfaH(!0qWoJV`-4?2aEQ864I=ou7>q~mTa#OXhMX6BN
zK{`zxpMi9&U1=y2x)W_5gNITMyf<du%Y^4b`{ysXDJUpE&cB<BNIxD9ygk3R3O8u@
z5gLkFCwpx2#rN5yafs7?7$}7Gg4jD;*C~x7Mees*$cWAdS#1zIENcO)(=NF6tg4@X
z6W@qZnl0>^HytD9I5_BqPgFd2k1lf#qukJ<-`+~!Za`27YvWmkZjh5sOnkHIOq5Ab
zL|960lOW$5()*+o76faHF5|C|3{N4J;qz0B(q5|$wT#Gnt_b3if$7R4!Oh1NbnM8n
z?Rb`wi;i6mBZL(Xgk|K8fEi6zpG~uC&(H;S&NrFAJ>r`yHf=Bbi9J-+Hj{!U@?jet
znC~<qc{hl@Z&#ya6);WKLu>I?mQl^tc$p4KausEZW|>Sfu?ehAZQ856p5Wth@|ztm
z5V||@euJjHLZQZPZtb@JYi(k^)=CKtWpOL^4-A(Fvh#IxvEFzs8S)0zw`|dr0DvgP
zgjJ4r_ei>uq>Gaot*u`7Q^@nli`9U8Mae~OhvNXvMIzI+({rGsV&fagzG8Xkm;4l5
zmCmA|a33TS%INyH5ms+fg;p2mm8(Z}7=CbEkG~tpVg4qelqB^*obvOC@kuXrY_ECv
z$8&Vt4)FN0Q7cHa?>VMN#;#Snm8;&*HWnKy0AMRZ__U2r1SS6^kJSBjM3qq5xa4pz
zW?i+~<nE%xWQz2N3D}nWKF$~Tv(wtw%$eA~@{ckt&LykKvEm|Smnxvk6fY=qJh=%K
z9knxQX}F}aJ?LU=<|}zV_msC-YUbI8{WijOqD}cCiD71}PR*0|ZO#?&3<gSyKWLC)
zW2ckFn@y+IuS6*@p=z7OMs+ayYLjPVTF09jJ=-*A7&$7RVHa4R1NE#Urs7#X8O$V+
zSeQ_+JNRGeCfEL#bpl#=@dA7HX+oFHzE%f?*gZ6N(fMAdiUBR7OgCtHkAOAh57e#q
zPrHk*MuFYFW<QVTAU}-SA4Sae&_1tiZ$9RCRG+j?lk1^o%bQ6}^4q$<m<U|`2k_iw
zY_OI)u+Op;U?ws3ZSwPqCo|J_<;Z6#{b{5Yp3&w!v4o+NuVPg1y}Y#eh*6k<F=;1Z
zonF>i-1(orq%qB)6=Pj#-#xgo*2=kqBf67#%Yy1N@d;90&j@j4*UBT1_7I5nePcJ^
zznPVxY7*<yUQDYCOXej~7nXf7R~I(^h^#~<gPD~@I#MTnBl>DgfTO0O;3EF%$H3xF
zi@=UVaa67$*G9=0nu{l6VI6^mF5EM_Rc4=UrV5dC@`GsSqxN8l$&zt^v|abpi$8=f
zNA|j-F@03==9$#RGo0av+qBf7kv|62=K)=PSn?#O7W1On&YsocaW7VdaaH6>c=0`l
zZleB8`Muto+X@<Ob2*x{t~?}jOZW}^NPg>sJHL!sB&^v{Wy@@1g1;zy!VG=Jey-f9
zIMu3YpR1N)bkS2zG=I~K7e<W?`V`r^cPVG5nEX{6_fbyQ!Nbw#&m%P`E)UL1{N|Bx
zvXHgJCDKhir8wf#4=>ys9fFyuPk>($ovRDM#ABlM+_pjfLII;1HbARMCVDwoIG+i$
z$=sAje~QaVF1DKdmQdABA!6Sa*Bw6m^#bv>n$WG|Tx5%QzB#A#1?hbnM@`#XBA~zy
zY;(!IW=!^^`N1?IZXc&?Zqc!z>#9NeYfl=QRGF_N6P}{(953l0aCwZ>b~bT>-1e@7
zAq#pqIkHLpN9o{K9C}Sem_Z!{3*9?ILeoT_Hl67f9DmcWTz|YJUR4_DL_O-5RQA>U
z_;D@#lSGpT8J!27p7+|Cp6aTkBCV@ZLl^-%-yAKl+qwYbBa<G3E7{1wn-1Qt52(VK
z7vh_KLW)xQnY1Y)K-=|{icvZFAXz?Ue>wt1hTz2<`r~%``2z;A1wh|G!D{BA$H+%l
z2X{VkIpIXw@3Whr)R@N`0nI1bL=fy8FGBrYm;H_Cp^Cs72>o-Ke%R9+(XVAAb#x`k
zQQGYjl*ex>K<>(%q5U&58ES8uED-5Rz14)PjqPUfl%sQiAm$j$9Z2BnXH+oB*na40
zKsB1W6JfVwAev#<Lo}N=W%B_z?wklaJpiy%&~M-4)PLmanMdLfD8Gh0D&`tvPF^h(
zQf{rJYUVOu$bc>+tL-`t-~U8eWUH%xji<io(@yJZGhLixdh*nCw_pq;F31CAy6kzr
z0&l@GxI#h{KaHC2+G6K=0Q02&{J2%+3?e_7rYL~mcw@I3XAfm>>G-Ytk-v`WyZ&O+
zx0hb-34H5)-ZTR_DRqWi4@LQ2%zK$TvGE$_eM+1m)wl4Cu|J7j#J*!tv@L<FcP|43
zeAI`?MYmsX%XSWgBY6M*XX}f?&}``^%%?3hzX^G<SDLD4?f0A3jk_r+u8x^YiAc89
zYGMX=Pn{9f!t0&>x1$@?Yr(O_%{hzLs8;sAP#V?`xK*g%;c|stLO_<!gYfp09vpmk
zRvB!PsIjHUb@Oufe{<*-3Gkayczl+C`vo1G+3oUvcvvFz_Pw9bU&l1*p`7SKHHn2{
zB-U{Oi~Y>gFg%3MaW#7iGCaIm^&h+U8%VoHUCk449((f-cLYda&8%A2xGeY>dbt|k
z^_OENoM`2>liA$P$MKE!xibkNDCrr_<N*?|i^<9N>fV4*O8;16y8okVu|hTSLpzZ4
z54}>}T33antB1cHXjojZ;2W=gzjIu)S<PG|ihHmKxbvZ!#uSIRS5~-6La30VS9+zK
zI9#ndAU6JFWeO^C!P6q)ivoWtSJ+GpRFjS04+G$VJ~C33*eM!u^+K0i;vV515HKI7
zz|f}7=S7WmX|;CUhh^znP`hwA+@o?ojV+Qc8VI;}Jc(~-KzGtmiKos|*w&JKbV7qK
z-I!)nq=1G2!@!Q+1Ml&5dxBBVv%Jww0>_kWCtbb1RhPBmn?v9J=gw60#$PPKf?Jz!
zRPB~xg@k4QbJM1^yzGif5MGOj$lES0ASB}aEX+FlcvG-uwE137)!$Sogb~G00IV2$
z))#o0eexYKY6v9IS$cb(n9IZRifsmu`!t)>a5<#IC8>fSO}bb?>#DOlzkP!7d{Zb8
z^TN5$hsAgG7#=s(;g;9KKXF;y!K0X!yAi6iL8-WY+MleDM{h2D8dqFlh{l$c!SCq$
zqL^Swi$Fn=Qk+}0HLJk+TafBEKS`LaM|-_kFZ3Pqnvso3@eJv;FDArW)wAwDmB>Tq
z>EIJzqqK!*tT`(RykCK!WIgM8-8QHydKW5tT?@@0sALW`>Q{EH{`Y9nRerhIMYlBn
z>uj%X%p~{N;B${$3vI23t{HhhQyMSaS%;~RgpCJtb^=}vR$;0$-Y@|#l?t&fNTZi|
zwYm3ntLo089@Oyt27g899P{hKU>g&{tut_*XQIGOm*NU*&|f>`ILF_&vrbE_j#8#e
zprnEg#aP{u@JmV0V9hn2jm|8f`eIT4ZiIfFisGZS<+!z&#MaedL@>syhSUd2;h*OK
z%6aDPO;f^g$DoF<vAbn!*x!%r)b3`DIPTl2^|B5L#d?UIlrS^yPQIWHdw)j#w%5r_
zl3ZhdgkgjVkPK=A;q=&{(ggGk@|sJ{(r31jvcKHeU7WIoZ7g572J#ZCq%I7Herj*I
zo<+pFTtfavg_{ZM5Ua-vdq(wLlImDiJR)%+q4}b<2aO)2m<BpdjxVh3mM9rM+0G02
zx}guvFl*Vach=hG`Q!`E8#1^g({f~6`&R&SbOYTDaK}zXbauzO+CW4|5Bx@q*m%1I
z)JbkbT{O3o`N(-G`IZ_cZ9IMUlmlIF6#Q|XgPwP6(dEM^L_a^jpZ&}lWij@hkP^4B
zxTsu8Hm_maD-gy`+_~lK>zc8H`ThN|c_Bf#Gb2p-)2$0}omlqT3DX`yb)3l^Ph(0R
zlQm^DXv9@K-l;uC&e)&B&d-s52M(P;(N>5M>W!rd^x@(6o~Ea8E?EK0a%fSOgLkJk
z$X(K+C*ov3>Zi5Uonm_VBE9@l8C(`?`Bx>)y8}#fJ0VO$3Hn*bN#E6WQq8C_kJ$#$
zIfIwnkB&ZX-O*+v?)M4$_wIKT`sV#j?M-xqv?8r^pt*)0b8>~{(zn1DkgMMbCR|l#
z1#2@NM<gO6h4iwt6&}19z&9WZ3;RJ8F1nEscPEw}L8E9FO<qAn;JUR1sw;uziu4t3
z(7L)ip@&qX--#5;>&1=WW#q`;?{6)Mi?9Mhx{%EF1=I6SZx44=+@7rcy|L^1lr=Qz
zsiZ@QXDePh6N_h?$6@*t_IlsnN4Vav7r{Gi^r6Yy(a-WQGmIJN=U*GDh_(9B^YM!e
z?X*IPtqko?ORGexty#%S?uB<?_AmSd?e{V|rxwcbaQSGTzp_&rKo~NBRx-V&j#~m9
zh)-w(8QtrtJCHrC?VahU;&m>Y`3}nmSe)pWm;}&ku~f?)i_ij=&s+26+n%~>_BROm
zCDA%^M{aK9{cAUv4J?-shW53)3*93v=zTx;xs%7slfL8mcDXfl0_y!J>_Dq*VMS7j
z$HYK)O_v}JpluDtdpNj1m+L+NiHPmJeNXiytV<R^hHW2L=YLM|GNo6IE%cPo+u7N;
zmz(B%&+oiR%Av*HUBAq!$_fmLfH(A)#o`Bu9!!hZ{XVx%-P-_dKhg>PQ~vA7S%nGt
zznGjb4teZ&{fE_6FE(k(zdwWiPRau|TU*y7a-TF2QXNZSYmFt0gJ+Vg6mI{OtoG;>
zS|)i;&yx<%bv9;FqOG?xi71z4q#z3Cn2Tuopikha7)Ii&YY-$dMw8STq8x;^@Z2<D
zz}+7F)3HH8#}a8aenmo-$}0ZE$6#9AhQZxJgH*;OXbey%4<w}5*=)qXr=_XUf-$7_
zQgW2uQ|ip2@6~D~ytr@o%xRww>?tJtn5gIVreYRmc;4L(SVJu$XuV$TeApAH<K}aE
zL6PV|MV^m3_yetT;fz#=F|G+Jn?O1T+nI|I+&U_H5=KZnBW;}8n86LH<WflJCD2=5
zj#Hw#!IRe(>;bVi1Sk{6*a(MO6M{lur7gze@tL+X>*~RO9_%^jNGyh8OLnoHk*k%P
zF51QnUF?Iff#~?4v(F&nL}5#KFIEJn!$J%Xp8ib?f7>JSW57R5QpFE=H-I*kvS_V)
zxihA^DYtI6P5^;KY?mJ0oT`2&Z>KgLNh6JSEr%11_iO(mrYHo?DZfrQz@I|N;Sk9u
zd8-#Y&hE2H^xQr$P|-5SF5~N!mj5S~G);O&tn5J7w$5=pn@L8u{<`5(iRx_K8|C{<
zQ9<8rPFOaF)Tid2h}*fCN`p_3*)}relAQqj?$GTFjp%#d5s}@vjh82uO!xr%Py3{P
zz=eF6qzAfFa-MpSs`T4yu+PiA$crl8IX*G|`!I~Wc>@OjIa{^O6UdL(Ly@^uNh<1x
zKyC_&zB!7=!F*7{{=ct;EhJw9yi=&4tIhuEbmX7~&u;q%uM+L|e9pHuE&sIdAgSY`
z+=^MZz}W_BZJfz6$xGE0fDHinNwmVZ!0<U@V^g5{xoIb3-NbWgJmQnU>we5@?jGf-
zKc2sF>2_Vd?i7Z~v9hcl=g+GlgR*%h#AzE#xS4R#Fp-B`{dXo%%?v)X?-u=C9OOkD
z@wUp$mx4NpKQLU*LQPRm6dI1Mx|Kczjf`u#hxzXNpTD^Oc+0K`F|WJ%eExl1@g06O
zUUY@UF`jOQl9ML^c~Jp?0y9`9`e>dDXIu+^rg6TpDO?<}&K4gF8FnGCBW&aORZc(u
z7+w4XIPETuHi0*DAkg3KD*jEBV&&sQ65qZL?!Q=NRN0^R<Av#YKTh@gq~P@02^;#w
zis(}KIY5rejJ8)I>Nw=*u^T8txHQ}Y#x4T$V0jWbDusSmOpL11OS5wV9<LYJEX0Ph
zH#ov!u`3_n#^j6L>jwHF{`)Tm1nNb$$?Nmv7x;hf(<H4Az-nRw0cq<10df5Q+^4a%
zH!-!*H@3HRFm$qXwzvClt!A6|+kUg{UoYm_Z{H~`QM)P{&!(N5b1C@8``qvg=w^@h
zo+)TZi-mR!t)&=Oee2lwHHU~2iqv2Eh1DLOEME)!2)r4%(@awQuWO6J%0kTy!uqd&
zNu8dmN4vYdZh{Dec>g>SirycBZ7zoQ<J?bwx<6kR0GcXn;~bYx5>%8P>pL`W-(FD_
z8mRmV`}rM{)>@CQaON&*DqJ&19&TQ8m&`O5wAmr#)u%Kgj=>-2IBX0J1t>u)52dI}
z^o1<IRF&vf`_VU8af{yl)G4!?LB}jz-yQ8y!36R8uo>~5!($y&?%k7n>_9NXvkrsl
z`VK>`=)TkoUEe{enp&TZU6L#qqlb&*nhw7N*gaj0ybmA;&_nc=DHN*p@R{V@_gD$-
ze)c(kXUH?`BZEz6qX1HsNto{uU;;QXM{%X1vDL6j4OV|uthwllxjZr!ZAO&|r1Om2
zb=A*plu2{%ZwFug`T1?!aqAYjq=4x1I0>7HC4d)ET{Zz(;M?}rVlh@OtcsmzK3WC=
zze6FSw&Z12QT(~x%kryYzv4~e08;8MH=~U})l$~TRzu%TFcNx;Oo@K;fO}i&s<FT>
z1q_4!8y$|0)5ZD;0`;sj$!IJJl0|BWLE!S!KBB{zCTXGu4q>h3Nwep;#1;e(*5Z}a
zYK%NM@F`myH(3xI1{`)taz(MYjxz&wkHJ&``$-QOlMu>`kSqk&2NGv3e~VG<n{3bq
zUd27SPYc3B{2eXLjI7qwb5{J!Dut#J7B)Z~H=W=hO7?>ebYQI7?yqxgHNuOJKUjt-
zE=Zx1go{jFalCR)p#!mY3jQR};33#&Z$p#Z{0Ehe^R$Ldj0=(tJb(XIBm~JYMAAeR
zPFqzqpTaAS(XnEoYe#}_YOSMyQb53lP?c#L<AQ_xfxs&Eqh4zVdWv4i7PV>-aNuDb
zxa2#rDaBnI^5S^!?7fh`vq9Cx4@4@srI>VCd4TlRV1fd6P82rcj&=7DEV)=Z;?%Ie
zS6mc93d>#FkNsr+T&i2*GRJ-{{E(yOGA&rhhHBafQ5)+%$MQ9cwo!UOfKvpcW6VFx
zex_6GWyqE+Y3bhj)rq~q+-=hC?_$?c?`#Sx7kKB{Uc6_x7U4r!o9p+ZS+j%P6z=W%
zS@)O6%tbi(2O)aWm|pFO5n&m?%z7s_Av7kPG3&;t@_C7$$I2Q}&4C>f(+dn84SUc4
zr2D78qrCs7ewEwY*EsLM!|%6wdt>SN1VbCVrMbiaba@7|QNad*ThJNZ0_wci=-dh1
zZT<si1fy1-O(&3nXj{C3Z7_N@0Jx-+p^%)6D5n{w=FZwK$OS5qj7fPU;FDrQe$fB{
z`x5F;=nL3vqC0!E*UJP?eSFhMFOvrK2v0d<scz=f`|$L<on(gt*MD>2vd~k?3zHK{
zPZ~1>=B0C`Pv_B=4H>&!t*P(hayN)KEQ`+5bXm2>tfzAtb>V8Xc>AkXS=Sp!bv}%q
z0&Mdj`71ur4Owm`Q)?z!8Rt7QkL<wOq*>X{DiD_?d+}46$uKB<`cDV3AEzA>Px^x%
zUVjTzXWC<F9u3Kdl9HHjd%NBmXe1RX1q2AtxiO**)JiAMatt}84zOOxeOB01Lq-FF
zxjYDqB@Bb8J`$lD)ne^5<=J0>k2$lGYGsnH9yP7t)pn$j$2+^3a^=0ucZcK^8c5h8
zF{tb8h2+owoJkI@pIN9_t6hR-lFb<``l~uTBSV!1X>8qFGwEw6eoys8%M=_1GH7f5
zSCR1J*hSoJd}T>V(H{`Xz2Z1xY1~rzxG{2Z_fD`v$c$n&X+clcJY=+8MMAv5#vUJe
zh38XX&pK4hVB=zPEVNx^MRJionQaLT{#Wwa$EH2OqL>Eee`?uMLgI63+66`yhR&a+
za*$w*2?9MN857Kwi|dGg+EO>mjt0{x{@J7Y5yu?e(vB8s6j!JG_tUBWNh737z3o(^
zc?{&mR2H#5oDQCx>F=noGi^<?qQ{Pm1ZAAwATX2!*Nl(O6h8==LI$rZHWI87zk!aI
z-9*A_uX))-Y2Ov>D^{hfb*^hRsM5SNCi5<p^T)dE@bX73nk#7!RdMfkC1Sytoq!RY
zA;ga*z*88Rk+3~3uWSoA^X)<S9g`r&5vdS$-h!1LQi=Wm7Fi^+9Pp<0&3UFEj7OZR
zxnN7#!ixX{bp&oNS`Q^&uHLj@Jub3G7Nj;riM5-DGsZp|veVOKDIA*u1ph9}X7!EO
zZa=|-MXUghTQ$INnDCu-l<~P8tj7?1jmb>B*z(IY{Mf8)K{vN*t68a8S%s4LaAwcT
zw%|f`BN<|JpfxHQ<|E5*Q%=Q~bRjkzEVlj4?e#_&iVF4*xcAi7cLH&t$U<+;auut%
zZ6J&4fW0<A1Op$KO*R+oi9$5%qYcTRl)7m&!QvW8^eKkTQtIyms5s=~{F<Nwq4&rt
zBcUZ$VcOAq5x_ugAw(KA{PsHMf0e9X34~{fV}l~E<%VQet-TKW>xro-!^;Gq!(VG2
zqjM(T_K5vSi|qq#j{X3KWX>v#_oe*o+o8QEX3;|b34(7;P$hR}<ltN0a1*&?K3H(O
zB3p_&@1YBBD?}Rw&;6c=XWYgm9zCmTec-?b@rN8<5_}dt4jaYf@B0$U#UO-nS!X|1
ztwrjHJ=*c*_g8AKX-u@fU19rmzUWtbCDhq%r|XqrYpGXs;~lwi9{?9&L2`z`k8A|8
z6Y1}|7TU@+kS%OkCV#uR&Zr{4L#PJSP|Zb+eV78P(;4hK<~UL_X|P%t8Kzrh&LDr=
zDZjHc*nvO*Vol}$!U2ga^e-v|ZM6S}S#SRSxUAO;Jh0k8#dME)cAJjBKh;QaO*30=
zI~*?6mqY|M5ofDBDIcB0(Sz*|X9xwW0T(iFCt_C`cC5>M{?`P?$m{^*h(El_t2_uF
zi^1Dh&(JjoD{0~U*{`kYSjcwbe{Iid!yUcoX}fgD!~c#1!|%^4Tnd+lfb;i#I^$h#
zBuR7gXxW?vt@6BrH%WpAIP_JB8_dNbl&eZiBiK9Li~)zh_U+r_jpy-@Y!B1Gz@Qsn
zx29t?$V7u?nttoxgK8#Z3!xFYz)Tu*(g<cWVTFL9o}$fHS@f9nAoPTl(34PX$}cw7
z8Id?Wlmc@#fs)geht*ZpDCLy}W0RLvFaSeZ1+f1pF|C@|^mF(5f(>w}s?sJ9)FVqm
z%<!wsKwn#EqC1a-Ev$52BkNfy%7oVOflfGX+>HF88pOVnsDkSiE7>|5U2ikZl<y{e
zWaV;)=3(u<gT$~OLHMI%56L|N5;KA8R?z;z^OPU6JkOb-0H2Ao&|u<mz(J+m@v}b*
zZ|ujkKS8T(pLyac5)Elglzh@f+lC}7DHO|-lIe~=@JLMb5B0x=@926`dKL=bMaC)S
z1!ky3FRHju8&tR+&bjJqgRJ5%Mp4)bTS=AH>sVqXbagzu?q#H<03rQeNgBOFN%UP-
za~4&YnIN+c<Hpiny?AFsE0eNJ^x>}uJfHOcMcF&W=)#2Uns3{-ZQHhO+wRr2vD&@b
zwr$(CZQGb{?@Th2e<qpCK^@dtrA{ij>w50%%02&xlgoB)l9}1W8U}zgq_LZ>EDjn`
zCrS2etVR@T%@{yL3-QO$0DZswrUE%nSza;7U|&>4eg_Vv{1&zE+-9=Yv9w&fa!@Z_
zb(hXoo3J%TpV1}#wcF5((>93DCRu*XFFUMJajBhE)fBio>bhXkmSo`(JZg!~*C)oa
zNIma%B<F{N(sIHj<8!+DK3(0wO_2r`<fhdZ^D5KYsFwSt+>3y-svPBEK2ZP7sp>Fv
zrlm%oVsqwD9uBPeJ610E*0j_vV3_-Z7z-r1$qu{o3#S&P9TgGKtAj1C0332s5pLWU
z*+|TcMHwlbm*mKt{sJ*Z-z^F2C77kB!%c!Mn%u0A2=r?~!hZ!3$Rkw`??PK<p_v!7
z%?O6_;<2@N60k@^W4S=&{vlf)Nrazm`GhM8v6tx;r=PC$5tsi^AieL9Ue=5DCHwDi
zq@nCL*<pp)gqMi<!poqTu-GoJ);Q$59mr-YCFAW)2G&`Jt>?0y+L+326;G~GYxs}0
zM&Yc90sjZkAU6zNKN2Jjs>T!~WcWE!{+JjJKBW$9d0GEN=0umC_EZa5X5p#ov14Eq
z|LXFhNkNGp+>q|Zn))^{R-{#=BqTY*s~D`8Em(xZmY7s$13aws!X&Fvk2am<)t348
z_C>)~jyg7=x7?WfqSecHy6gb{eAmBx#{|u8J%-$EcS~?{Yqd6Hd+n96ijw1!D24N-
zS%j1de+m`th1$E}XyEYXPXLKRw4+B`2?ngWV{bNk0JkAhPaER3Q3i)ktb_8HIq18x
z*xUViqk#ecx5U@icI~a-XPy|qZ8vBJX%I)#4&G*2SJB*c#)n~qgP@1!YX&D1V%Cjp
zJ&NxRA0B0vw%VXbW(i<BJ(9}C&VnjxPjDJJMtp{D`fg4)0sUb$eExJag=R=-7(!jG
zit}T6N{di>c;CbAlxtPmMtrjRSrVxiImEs%sTh0_q?VxJ;f--P?;1qGz||?8<b{Se
zNZ>lsFh&jbT%)fVki2S@&E(I7tmUI3qA+IbF3eAYRI}ZUzdl+Hu%w8;lei!pV>7J`
z>!K6B<O5s2F;*zpL^S^K&?$X{>GAi+oiwwJl=4xZDxCj&zYzxCfOc??_jT(CfVp+4
zG+nj1j-WyJX@`G5M&@MBMBBt{7(+Wu&^QFtB1{Hu@eUsKy1&?_i=x^pf$gkf4@u{k
zGg1(%6#i-UEZlQ=10XDo4MBfAhFI;t4VhK|<2(U2q)j=S@*ft<MWjTDpbL$L4y&kw
z{`_N@gx&62CJVdbLePj;ueCbw%dg)f=r20*RH}4WJz?a2$T3m`#-R?8gQSjoTCC-7
z%l0PSz-kusaT+e1C4uL6a21Xv(Bp}$JItzfd<S)FqL+>yag3f7*2QujTwo8Gc3Hv>
z#9|*SEjXME-q&|%Q)DPlSw4%)u)AD8aM?xIjZ03TvO<<4;OW1zW$f4-n(jt|rX(JF
z<Il#m;$9y?a0saxaxfUm4JGBv(hoOlz~pMVqOrPcg{hxPvX+QI8*`$xYK3c>YBYUU
z#yG(!0DE80L^wFcdIuKKL1MO%`t@@B(+OK-(oT$2a^JyB5vh-~pV1>uy7E;Y&_yFI
z21fHB*_Z@j>I=k7@i!@gugC1n{0(^BmGF;n?ISFh=*N+Ul{$;(OP)ThRiclbKV>g~
zQSZZnJ<EqRN(QoqCzeM}esw{gf-uLju5%{7#vg2Nb2%i17L{X8Oti<Fdr)8Q?)@Ur
zffi>BBf>;`xsTPX<GrFy+mvdo;Npq{BeQm{-Y67#kPOQaOj3^AKRUTErYIwboVGR<
zTC7!q=<BG8_jyR=wkkEdV}*INPnI>g=kI4%FhyLhtog3D7wv1AyLo<}8)v7Hv9~3N
zJNIWKpXWil=YQx;rP>4oY_s^n@^;vlxndm1MZX;CM)||E<+PPmKI`bAcO*RjRX#9B
zFF&@;oPLH*+E^wWv$Z6`f1~k&R&H6eenqW9P^;(+aU{gbE19xb)5v3?{7?zV!Pr?x
zupq@Ck^KCKoK~I?R(B=}vHr+!^HsX@mdN~`wwn}?kp2F)2x4`_gLw{Vlo#azy0Wv^
zuv<%tXmK9Qw&7^=LxrJn+ByyiPe?9A+c0eB39mhShLLrwlzJ<-HRep~#a+R4dOYz`
zt3CdlP!w?>%%T3HvE>8blujsAaa6Vk@#uSWbo^`g9(3xqWzT=C!~3p#V6%h(_%$mu
zrK8x8=ji$#MPRPEI;{)`t(&A8;PAgIg434Qjg?DuL<y&Gy|!Voox7MR0q%ngoDn71
zPMW^J<9ioJUv^#O{$EO^Wy*J-y01qMV%3#s9x`@qoT)=#RjaZY@HL&It_JzB+k*w0
z{3K^M7snR^0-bh~k3G(IGqF&=tAzun4?I+ciV`e)WJM~4X2Y8F{&RRg0+msfK24?8
z7U+k^n{XLcW4mJ<7e<7W5O!6z>nC9;Cc4z>jKnG~ug<*Ju}j!M<Ndpq{tsx?Yb`O?
zdkCvCwI(icqUQ)>5sUo`x$pTA<{K1T*NZz=Lmi@LF|IiS6}h--kvMYgu>*E;UM|}M
zO$dRz76%SMJpw=owskGZ+`T-6TK8@sMwiwa=zS154@qJB3$)XkF<KKj9mREj8&c&{
zQX7o9vSD%S4wmuCn;cXj=Q%H@8r$xz&iJsqvW}#K>iS$07!z_V$MVl4Tyf6W^ys5H
z2#28>WgdOg5P4UF1@8-G=F6(0(z%}<|FM(O9(E|w1WS#Jo-;lW{-p|^m|U?9PFDC%
zZRoIK!>rd$L0Q7c=au)UFTXoRUei4Y<^j=(+f7^G$(Ex$d#f0E>|X=&%ZE^}99ol@
z<skEW98cvsxS}SUq?S{(HsWA^6Lv{?!Nz=oIl>HweJb2Y#l%MXMO-}bA(Fq^`{i3G
zLj1c*m14Q-N;A8@ui!5W4`lL?CxVLb;s7MANfbgT1T@9uX&L+<Qd}&Ls8qa=m5P~S
z69N9Aj3)tM2H*%1+G;?zLuoqMPqgRkp^orwk}!aC%GEP+IIoZtls^@_u9#+9#kbO3
z#8tRrss~Qp^gS>8ZP4Z@#hOyBm6}ZQ&3HI1DN~uFZvPkf!f_)JprrBS!Mq+!t&Sc7
z#~V1UTk0a`tL$@*^4aIntwp;9FzhXiea_$0!bytyK^zOd6m6vfK{lGZU8vEyH4GpK
zff{=aR4n0Znq<Hh>@o}s4MJ6N)3<egAJI;D706oXKPR^oE#eB(yP^7JO&d{VY2KaQ
z34W@mk@0hOCEdV|l2NB?IDi(ob$U7l-w!<I=KeVkCn$-_<>j8p^h+8fay;ntZqU0!
zoVqs8ejH>KNH(~^VfUvq^zWRlqe6RaH8g7xmZUgj;tJX|)BE^5YS05zarz|y%aV+p
zqlspiJ`lvlQuVKxsAi)#`ol|Ni;`T_H5<7DREX^PaJjlp9nEBe!`b3B_gf_tpV26s
z=Xbp1nh)RZG17A|(7K6d^@W~VrxwMyOFij&Uq+y^XQQQiBvs-K){KGb=_nR+{V)wF
z+_qK0U@b7Gk1mdHu|So(V{xMKT)iWUZuW=Rtx3vn`{l0%S&0uN-<5PAr5D&tF+0Cg
z-d^5<rPeTRw$VP%kHDW`oEqNA)_M&elS@;ta1Q-ZfR<C=1wEhm5LJ2;C4w!%nONOY
zj`0%(!Z2Q9c`*qGELV?!{`(1tQX{2KT-!%mY84)Qed|@<T2cK+Hn}tXou3&NeU_@}
zF{}Tci{qvn7tT03nQ*obhEvl4`?8j8jPJQ8PP90_1`Lxx6A^S@jTKGiJ$IW*kKp@{
zO&}|lU~5^sUY67$L3T%X5q``z$s8&lbP_pFG~LGq80E_oD8;t#hcFZ<A@mV)`YON^
zL_{6K#xRr@&pr&EXt6r;<UvFNEtm>rKOK~^zJc<uLNL`1Ixazw#(u?3IOaCT!K;qQ
zXU_FA**J1^eL*)-)$(Dbns%eFFW=daY6JNWT)#UR(R&g@6z}!{4hrVds2IIM$Bpem
z9W218Yd0$i?r**nGR%YTr(Dg+z&7N4#BOd#90BA50kj|R{Xwkh@L&h!XaysS;H&WR
zigv!A2QQMtL^<-3f1*T#Lops>gj;Z&AQ2|JEk{~}8W0e$Iz3B-aZ3AC`YEi*eYc#J
zqY$HJ@M!PnkbJA!f)_>M2|9YE`YUj6Vs!neF8+px2k!o#nBi&3cTHS%!&n|A3AFB{
zs1B-2?e4y^5^*sN)MV*g=y$QYgq9F63<LL;xP+@$K(XPPP%Y)Ck+;292}hI1o@sBi
zfC9zp?)$8MOt^f2SG+P}0m~uexvz#-`!v_WB4}r^qzZ5|X2H#f>feW2(Sk@O3e1?3
z*CzKmr%Dvmgy>;rtj~&4s^9-xA=&?GzX~RWJ1GJO0FaRY0NVf0wBFRp#l+dn(a7G!
z^S_9`Si{a?lO6E~pYV5o1+{LQ%evJU3l>vkvq_Ad6|VyrXAUDhWri6ieMxd6HuI$S
zHa1ywTq+6<CB6@N+}ks!GyG`WJ<i4kpP|_6uQkl_PIdFg`P&(&JVCZ_iA#AD){_07
z4;Ftees2E6Olcel_M~P0dU;NmfuLU}pC)E65Se<3PPF2wm>y|cyT9}ygRX@xw01!n
zY8i$9LL25Vy`9vuL;oA$f1{)cZFVHLGp|FY-kgn%3&?>$zz-qlCW1`T*yv+%qYVWv
zSGQC&gTyKN`#@UtfkHI;PZiQW#;`71R0dAD%N{{~5E<yVMr^#--Dck~JU&5*fFJT-
zFyskk<kT2qh)81~0>Q{r_`iR}>Q6R_b(69*G%l<==hCUgJ5s~<_92;0Ew-2Yg9rq;
z`t8RTUj9k?;i;G86`43`Nn9DdGoqf~ALg=$Hm=6NyAt7x;Rrf5oor@qG`J5RFN@D&
zUez`d<W&69oy?X>NRhaBzx&pqSlyRub<ILTs0U%>K*;|_L=gMprA@W$L|YX0$YL}P
z8k4a?dBM(kttZhRdD9BD^J|G`i<5LhGzwaB8gw^O=CWOF-hx!6$5h63gjCRg+R?&J
z%-2N8vl2AqGW^GkNjOV^ikLj1t0DHmMAl&ae3|W5QH+p=T#oMvKhj_Z3T3>aN2D91
zv67hDSqJ7XlTj9BqRlUN^B0uc4QYljp;7{4G3WU2h@i3+qYZ-K9;mtFGa8ao9`slI
zwoHzl3c@NAPLpaoZ9?%FGhX*F|0BI+iIWv$yzl+5ZD$aKu4f~KKv{l2&W&?Kxp$sB
zN&$LKA0g<=?Q76lQD%{N&vAP(1mUGDZ%IfUs$Qj#Yq}>}>qvq6NNkZT(o0$W@v2<v
zlD}nd-Z6$rIypIPIo8j+K61g?(wP0)W=9xP?NSFt=&<UcC8iGKE;W|eUr#1gm5-UM
zn3rBzF6(fG4MY{}B_m2EG>-UlCCX0)ElFnMdSB_4?OPCbaO&-R*38CP@w6Rk*Bhc`
zB3-1QHoWyVnN(MQ;Gn4#l-I=L!BoYS1?)noB-JPdsSKTF$T5WWQQGF<b-YO@Ap%`)
zEyaMK6N%K4C3TW1!P2W_D5O#-t#?Q{Q*@(*lTr<1AlZ<$?lZ!E$&K)y74$4cn)bh&
zytTjc_B;!Nu*L^Y*MR*31QKQz;Oj>SS>2CBbjYTm1nHOr9kj`gOUa50Nd^!O#dI>F
zrJQ@Lod{#f@E{a1^)z~d3%bQCjM~G8ZPYuDVsf)68>}%@1QV)p@NB0T6H-8Qqv^=!
zm*~W8N`@gk`)J(RxzJGwD7rCY(4{E;XkEOZA%~E9&A;*dtlSkhHBFO~aP5ZBHbrr1
zyP6qYM$KX;XD!zulVh>sgRqnMr<<si`ch)BbD_w09qpgJwRUcv#q)0bAj@`eKa#aL
z0LPBlYiK7WmQKWR&BVz=lO;bO%e1d>mvZO%jvBX$JuW7GyUwGP3D7l5{6j0E0!c10
zi9WW*o^;{ImisS8iP&y}nE4W>$(kw<U{4%E&Xw?na3KzVJUcd&j|=gfwZi$9s%`x_
zMcJpED^{QL1j7+jeYF0xa-)#<)6W+kOlC#l;cL(Q?pb&eCEqxNvIEQ7p$9L&p$q%L
z$~(-=-SysU-i2>GEtN^B=;0}DnicETO4L?nYDpTwqY_PPObVQBJ+m&f=ebIln)^!g
z5=u)rz}(O5uL1iv+!h_mbr*YOiaD3h%KiwU1e^AthUSv>`oI_H?l7}61t}$wIFxCJ
z<i>Cpexkx)`Lu3ehUve+#ozmjTs>;w{HGG>gb{l>Y#*l|diffI;SH4!pARF-B+vUV
z1-koOzu<3sFHK?VQTw{qXV&KYQH`Vgo@d?2DDNff@uTc2wZBH=Hi$=u3`Rc+gW;hO
zf6^y98Na6T6RU3h*w|RSPB+$eXOZOxg|8DkKJgMwU`(5ICG2^W=9E9?OvV$7ou+Lp
z5_lt<d1^3^x==>BV_ld6TaK&t0RxTqj>e!}d^Bf+yahzZ72WfTV&zA(tQ0}fn?^6G
z!k+LQnS|;Psa+O}hg2&ukeK&*yJ+E@0qge;-q{RMsqd*iqCc57F3$;$`$C2^!(*OF
zce?nMGCLdA$P>i%qS4_I-}Wm}sVRh-UIBMm$kCt1giR<Pj&KSHHhk__T4l>y0r*i(
z^37%D*8%Zj<-NB|eR2qVaDC)~<eB5w8o%Ag*ZEq^ytz8x=()o}|NqhTh?=fk3q=e7
zum%GFWdEP9ot=rD$^R;N<hxGXWKaB6`R6WZo*02Gvm_bo5=p&5?N7e*>`}_Q)7D~s
zqL~aS_4l19DcPb)|JLPN<%&hm9fS}FF$-(>w4<`qj0`^#fE^=V*gF>h--~TGY0Mhe
zh%~#?I9;QCy8g}3%V|1TNThS5lvQ0~t3wwqu$Rfu`}PpcxqT!y{Lt`|X16#U8%j3G
z{>+m@&sp1RH$WTY{N{kN%&=IuLF)N$$i=^LP(d|E)xfWw_Cb{)Cp@~9H(L6cXiZdx
zJU>f(a?x=Z$9BO^c{r2U;I0Am>hkrNc<1V?0h&g*NcB77RVpiVTpCEP>M<nM!AkM1
z9o!iW9SdORMYFm|9rn=m6d2%?U~vlJ&(8&?|8q?0@jY2tijbRyr`v$VgGoSm-}7ah
z5MnkikDtq%<M$30e7d(M=;JXay3Wk7#=CXT49|jYA)|k1+rR$FX-dQc72W#&xqtWv
z)8dMBmYUxO7hP(G7jzvXerCWm=`Ki(3x-25>Jl?zT&wxSHYpX2!T5|gYO*5YG{7&X
z-bbo9u1@DG8H4xVoA3AYNM~DsyV~1pGz_3gKV&?7dzAn0#euO_NHl_GKGj=i)wac-
z!5;~rOZhmkn&0`~51S&~ly3deVp0|Co?~p!ngeUPHuyHXdvwj9>^O#Xbk6`FA_DqU
zr;+u?D_>235e?9-mHLBgj-$GCq9;PG0N-Wf<9KoHhbZH0JfRUguMqea+!~61pWnC!
z&DRq+e=j7<2P~i1)hTaW-b27Qk~I0~;V1t-o1JAq)5yE=C8M0@e&gabZzKlMj95AO
zA<>jV&M{S&!kSi3glBV-8C=n#BND^ma6k+NLKguA!Ussp{Vjq2ZQ~y`yNPp4QQuE6
z1QJWk<LHGr7tJdYO(@1YCBBLA;#f|&BRB*YeT2g`hlEO0Q-sJCIOTfMhoH|Zr<{__
zDtsJAx;j(jM@Wn}><I1Q=tM7_qjPf|rEW&6fc}pmDEvBz9ZZSV`(A&>ul+TQ$w+j0
zGEA&cfFHq1+?%}a_bFV}C)MO>FIyg1=O9f_nU5V?R2-5)17SpC8#Hk3(YW)R;Dv%3
zoiGygcS)+2ibA-WZKL3VT+rkx8N<$E1Vo)?B_q^?$G<QXl&%2wUpPP}ADf!IVM6v1
zlETHzX>XvQc7M~`pLF%^$_lM(DP4MK_yO;Fo9AnrP`PMYP1_bpprE7ZTDg00G6dEt
zTwj6i9bTaH)`>3xV>mc-)KW+#IFLO10b*Gh8({Qz^dTc4wYbbg9~~eSFr~k@soesL
ztIT;b!3fCuX%BJz5H>zfEu~D)iDq0EeEuMRFCT3L8=kP=zRRf&^N6D;DVeOB-^5LT
z1(iO{mlxkbs4oouq=jEGU_lF}&@X_I2;W0%Tu}SXvZz9_?bnB8CXYP@sX6^uy7zHV
z0==%5)+JWP!p_g%0&}_vJj!g98t||_CO%rd!0NxdY|s(-Iy+F*1heyKoRiu`wM=#Y
z)B*gg%7iNqE367Xp0<cj=cu(H@_tkh$JPo@+^(IS*~cJ9IOhqw0d>R}!7&RCl}P`P
z8>03GYcXeD15bLT@$<>J{;(j2#<+P>rz=<(p|#$J!BkBiC}?f;uHRHn!97DgD~ApC
z(%{eH9JSa5w!v$*D=wdM8?Hat!$DLg*r`P7##W!N;cZmlu@e&%;C_C!&F%unJ2mjl
zxZzr868MLhT%#y5-lrXdczVIjq|i`5<H~o$LGx~XgrRR`Or`gT_aRo!-nyESkzEN>
zYfWu<rXF|tpQa7BLDwT}&*f*W9N^2VpW`3M1yQGV@EW`Vfq>XkpCZtt)tLHz3+XQ)
z&n$1^kI7Cbwq<T3cB^<2J9jH&Z4hKpObjM}kg*|@*nJRyRTcCUa>vvDl7BzF)E#a`
z2kSRx7FHP+6B9|QZI4B!>r&yD-T87mEAtKesy{#4N(Q!dsDzPAP>##u2ySL<d=O&{
zzew%djn3H{7x}#<Fe^IjbuA5HR6@|Q{v=TkA||uEPar)!4J5l5B&^01mT&9ASLpEH
zbYV(|bG&@~AuG6zNQ@zVN?)GgL<nq^-<bf}j;V{jo)e)?6ue;A$S)hDI|e5{{mLA9
zcZ@RQL+?E?1K;cc1#w`o$%!F>xKOiNa*wSBk(byikh}mPb(!!TF3Bjxc<tE1P-IkN
z3bH^hJZhH=g!r2+M<%wg$G11(+2KQf`+?SxXULkMpgR&st|aCGVd0UgQ01^}3vK`9
z5sS4zkp`Y1w<A~^<PO75s=oI;j(VL}pYygTW+|em7=8ZDCYv7cREFMnWjO(UE!Q1Q
zjaO6lugd`Q94Hy~hzphirH5%5*!1H4qa4{^`<Lav&-l-_!(uNUbB^UZZo~iR&bDP^
zGLW-_3Y$?^48`epymE|$GS;3tKgYE%<+$}a6lquY!*b&oHWgh35+EJa0pgmV8zGve
zPP0Z7HRfd9(ybC!jHBIJFeKe9b6bYU_`0V40TO+zAWo<cd1r7@(IA<8sSxrC$SGQy
z0S_<h3QxFCo92^lB7p>0$F|a`Ol8V|Ao{ER@R9T{${-%<ioIF_%QZ>LIdRc|3&INq
zg&aQQTt2Y1ItKy@URqaCMO1phnnfPlfdSA%DIW5yJ@s6_ei?nP;|8uXimV`KVp|G{
z{M27yQVWf))Bvj0ISCll_XfJI43Hw_jNyQTz^hloJP^|3Ao*eLFoe9`z-}$Sf06H`
z2!j^2`Eet^f|V)33Ch5xf8EI<;So*6uVrzW_Fvw@p5n!ym&{K!bWEuq=8{C?I>y|X
zlMmR1?#oT+QnYRcF9U*z^lgSwZa1D49(nIUoA`(K_B$tclZljv6{#^P3#akGqk^Hp
z56lv4gSc_|5O0>MoT9+(JtDSXf3Ya-Gn-eUe_YElv|&PE3(-WfBH11|=Wx(~<9eJi
z3Ekb0GGg(e&hUbj8pPE^D0pW`LZlynTMgj3SsCWwn_G21G4(`W%S^B#eMLBy5;B1D
z5^4kbC)Y2HH;n?_6P5oypwYj*K(FLtD<#5l#6tZ%V|eO|!n?E&!rEcua4%ZbqI)*!
zDR#j{tklfQMrl2+NCo~xraS_zRK9Inglk&|Ch^Eg{i2VSpfi!(p<*z-utHoMg`yT>
zwaw8iS*0K~z6$@|>K?6B;McwGJ)MAU#M^6gPGxj!y!z36hN*pbYCP#zzN@G^hYNO@
zf$es9Iq0|JRlcmCc|TfrKT0<T_Rup5_srB<5{1;kI`%SdslO9LJPIJqzfprHMW_B-
z;{!V+)LADlwSM63V1LDX5;n?zbXB?LwM)<uts1b)5iOv{%(Pd;Roq_gUxpaH&W93K
zmQ0;?Kd4;;9nK%H4zDi&RYalw$xuxB!xf28SQ6fFvEm{8IE3aF;qA|&*LMw+zhQ2C
ze!DoP33=j{;RUaiL&lS)k&KqXXew$-F@A2)sz(^9ijeAi7&MpPp69-|Oy^z*wFSKx
zzmTK1#3@g5r|L4Q3}!315~`-Q6nG7EsbuDy;q9vo7SpPlS_ebnrHRwpqifQIpnkhO
zdLElq5&jlj2lezVTmKo#3MopYG?29Biiw;Jr1ot4-((Cwpq!ZX2pt?OR|Ov4uuU+c
zO=FPpJfCEtgVlIdFDxoi$3n1@>htctZ}d(^(~}O_eb^q5X;^+B9-zP7AjG0*>9(#N
zGW@VtWa1i-c4l_5Yv5-att3RZ{}*a^j!JMci%tY06Q8;s^yP~w0h2{xK>EYm$QkWM
z8c`F?++j&M?U3C33Kn++ZS1=WLPf)N@`9S7;<-i-VW_Iaf{C=S4jxf2Zq7;Zs!kon
zu(d3PIh<B;Zia2yk?MpBh9!equzx<VM*3!Ytxd!<;nBdQM9xX9RN+u|7zMe_!CH6^
z#)gqCxJ{7UNehf-yaH@QHw0Pto)oSzZ*Ar<%zGT<7Gdoh$Ma>+!kWh?)Nb8I@XB%m
z9=xKGUcMR3w=<DC*%}kq1$Q?=2JLGJJsds@Ct!C4@qV`Jsi=p5SmI?p0`rl=?TR4o
z7V?yx8n~%mzzrEBEkA6nAL=nE+#!2XLX+yQ|L<{@b-LL;b+E&|unvLUeDAF^DzV-N
zDVqa-GC@v(wd2d3;3$eOFcSnJ4e94bzpn&KQ_Z2QJ%&Oo>2LUIK{xxvAHB;p|GbOg
z27--;TgngUDGBBzr(+sNnJMX|mRC0Id0cNx_z*BonULK8Iw5#Sok+#>eF1E*jul0i
zH~T1)U$wuTK)}`K#k%w0o$)Djtyb9R8ENP+UG53bAJwUYJM(Wqougjz>X$}PYF5ol
zN9%Jd3hwdd&|@})<f;-G+>^zl5Nbnh+5d8cc8TZ4DD&W(pS94H+$<*P`Fxkum&{ed
z+pbblo9&!Q6X~deLyGg1w2*Hue7j8ioqu^6U<1>AC2}R!@Uz5#@=y^^4B?9~Z-yX2
z|AHSS71GTd1e-vpPx43{Zxg+#SN-H|(KAAKY>}Tq9dUwb|NS9uEPSFK&!4F@hSVBT
z?*op4slR7%gMS`st%DSuR>#+~X)LI*8L#GR$zSTJ-Rd8T@#ubb*`gc8_>t1AiulwP
zp6k=LquBvI;_Z1lb_-&9Vx$8ZID#*{-D%?)Ms<oB$<*QyZ-A5!$q9T)l4NTH4}U*w
z<o=^8Guyd}qlA#=ssEu^Z9#Qt)-}K=q5OVlrQ^th8k8K4REXq$3EohDj-v09F4swY
zP_}g^*4b~DeHdJAR-C(khy{4)!C`!J(6eWp23Efb&|A*MBUE;67KXJH<nY536Kc;U
z5#t5*<Hw7Iu=Uz)#_NVzF;-~r+;{ZhGF&CB+a=$}ecHbIawh1-A2R88XNfFWY^zV(
z-Lk%}bKBrnKdZwO=Z;6Y7{iz=J6`0x%)=e4!x=c1EcOAr1ZE+#e5s@c{bVI(cWgy&
zdcG%Eo^D^LW$YaV3DUl1_gN9yF&H7VDBQt|$i!fJLoPzL=w=Jkc6rNq0w+XD>PmYC
zEM|LiTHldrl6I2H^Z7RC%5?l`#B8oSpx)Q7)putNESt8ucaacW9a6%N(YN1guOFdY
z_qK0)GZx|!HEepSXHNFM6G&ry{Nj#aNvm>RsXSQjxVQiT)mA&=Da66eEkx0*iZ)%}
zsTthCfEr_)T@Vp1h5Sk&-7SLTG}!6KNT!;)!IQ$=eiJ`0D2Ro3fUI6uq~*{&$e{3^
zbu;hlTQJH}_`|V|&1*=E{%;K#={rvM8)2>A@onWr&<?QHS~$V>a9tmwAjRo62WE3^
zN;mk#@cA11eV<^_jf*|PccyMpJ5&Xsj$UH}%cIoHna8=big8)@;RqBU;Agiw!ezU*
zEQ?mR7VJa2<Osnb6|vd&9Z&X7EteNAR-EijJsn6WwF7=sO*PuLRT+?yiFH$l$@3&A
z>RM)jAZUYQ1=Va3u=yA#m>a+)#G{NU>P78=w~lYS0@9sbG}RvtzW9jmd!r}+4KW$l
z!$&%uiWlX>?gwYBDk+&&eu9X0^Ah?<TH^*@p$Lx-C+vyTdo0y8^H}BzdO4R+h{p^#
zT<*zax_S&Vl?bEG)W3;oaZLA6A5p8SOb0iq$lup;*f$EKXpoas{H*=_bjxKea&)am
zl4guqlXISTvdptFx#yHc-vl)Y^#`uRcSj0m@ZgomJ(Svl|Hf?)==LyvT@yDS!@XKG
zpAA;eRoXsH_650eyOY~Chz^)#K>`;K;+q!qv~`mcF7~R2Vluir=gHmv6>@;DiLX+Q
z8wqR><7KyHKs!7qjxafczdDXz=U{gpnX~DE8lzpE<@4xpkbN0rpPtaN!@_ig*5B_(
z!ONoNUxE+yJs<%b1lE`vMI7o9pR21>KF`bhSoaixq^xPukD2S$MZ+lYuzETXF?(1Y
z!ax3{D~<Y6kZ!tAky(fTdGQVIc_zv`$>ziN5xt~>fvJ0yBr&25^1N8XzPXyb@|L;H
zx;PVTUOVNV5Z}X3;{#nj()b5%Uu!wD5n+`;p@<?`N~J^Y37H~qM51|ULHs-*WR%Ni
zFyT})sB8-e0yh@(19O8Jgo_jnqZLd@y94#!HwFa^tI6(^^bKPsx`iO-uBfXYesRm6
zY=4hs_W<V(6aVDpo+CjFv&XjYGNc%uN^fXY>(zL}gf+l)04YiWwUS;&1cAWHSqc8w
zP!LwfLJ`@(&lxEev9a@QZOLoRX(QCPYnF&7Ll?)&R`$?M9S+<l1(88VaC;N}j3n+4
z=Yyvm)I+jFr-GHT9eEyQT!vY%ybTppjQ#TU*fPi!g^y_qN&r?=&dXQA0$O+<JLGAc
z%4c?ka@5lyiFuJ>!rS*Ra{|nR6piu&*nL6yWS9(2?~nWP8hKb3ix9~UJmjEcvMWX4
zKxq312U-1;hIZ`+hn)Tcjks8)E?qUr#{Te+j$yeW=l#Y~D0np#>f_{Ord=P_&&22O
z+s#ptSHF#3TXUGMr&k;G><`09g->S)T8PN?2grq6v*`uVt#ALdd`)}8I^(_(c9X>!
z@O?rFRI70a^iL?6q~D-TSx>IiP$h8^5+GU33301bWY9L9juW(EqET4c3qs}cI3{`7
z?@?nkdAl1jJg-&so3h*a*&DJwKUMd6lR#}!3=~ak6;?+}j5=0d<ZkoY(dKrpml3BI
zx~>;ph{yJ_WBB9+QZrNnazm>82U`=(@F6c8*D#=DDeK<C7M@j=GyH&aW(a=}uxsfz
zn|rDcUpH!5;W+lqjY3i?%^P<NbuUuox4!Koel=Q7Pubr6!pT4QC?=AT!ZI&17#oc%
zJGR5w*zmj$2`~AlGBNn4W0$O+Q2D4~#}P+kuw!9q=6lNjI#D1V@h;fw+y^&A1x*LZ
zK*bG5iEss^U<F#+KjqA}h$Yn4*;FjjU>ze)Y&B|7SSVP9x-hOQJ+aD7R!>ne!OLP$
zktc9KrTiHWc&@8j#SldbSwgZRri7o*W0-;+qNHYUiDGS7YYl_lcnvNq^iN>|R?S*I
zWR7M{v3Er!dVLq6q}FfDjbe6aWJXhWF+p2ION}bSq@`yNw+cb4{izOMbUnzsmaC9_
z#28JfOVb)CxF@}}wV(p&ifkVzxoO;Zb=6Es{1;x6{4w)*l{lCIsn@9-6PhT@uHD-I
zM+(Hdhm^;L&vHdqXGP1zUa`wUs7ncLi~EZQc024%cWVUcH^$xg0QWW97`h<3m|1v<
z4DIX}HlGQMl<r@S1yC|5Q}#MwPs2vvu@-Ah<Ik#Zw3Homx1*iao(NC%9}fPUfKpfi
zh{5zh{?!aUx2f*UU?gn=hnt$!f*m}}6BcWLod2%a7mZB85+95wyD%ooMFCAB>u)Kj
zryU5uxvkv9P5wME2vL)wWJ{0^SBzX$<FJuh_zQR1!HMss_n-am<rys5{F0OtNAbVM
zN~}!LYc~gQDhN0|R~`jNGN0wyjARy_HXg9Dd4K((N}ks^)z=S@_f`GU5b1aar(m+O
z+O@)@Ak_l-M4ls%{g_ga_fHMQUo-wihbfnks+|YCwXNYQ48qPAaN6yNPfzd*KZ^Th
zqNrKMW^q#d<blmm(-vrSvY1KaWXjTA6}MTyuQ3RGEUEdDi^5;Ut@YFBZ~kyVwUsA4
zK+&8%q=4fZJ?f}J$mQQ(C>c!Z8gCYTC*B@vdeO0@F@7c}A8JR1gDX0)BcG0?p+er<
z9p}vX2WK&y;n0RXOI5Jbw88vcrHCXV2I^!NcZVy^beA!FainXTKm_X84`s7SoH+)$
zCWS0Z{#e9GzDvWDN7VGaA*;+)1+>cCi<(R*C70tNAqEJN)ongti_Dg2L#Cyqm73cP
z0=KH}*@}H!iPeaL3xG~)MWKm5S+*asfGVVgXFlDh);7S6n%)_ZQUns$veZ+#q7vRy
zO{09zRCBtfVzEB@j=M*2acE<M>wO_0w0rjRMm1sp{jGfTV(0YB-}unGHzj=-J_Eak
zx@Uds)oc$k#Pi5XctC6MU&5SYoubwTrP@?kL#>=QaM>0@#RjZf)6L$k5}+4MLqmYI
zx!9--j$C%Er&$t=#Gcwx9r#QS=z_47m}h$Pz<?>iFGr(5vG^-gmDOXsR7-F7SAP@2
zHB%k~4C2Zmeqa8bZZweEe#IL$_Mq!DaZ`OXSIs<gG(%Ds<q@8g87K{60r&!SmsFEF
zCo+@ro}10G%AQIy%%dFaQGQmR@6sfd$vkCBtfj7luM>#ey+^57hIa>c$O@?2&zD=G
z4dW`o{6}CSzoCMXz3RAF%C%QztcJ71eW2AjvtT=Zmc|(B`mWl~D0iO0oWO=l%8eO0
zvB0Nwm_M4gRy#Fq@m}hlo&NG>0=$RSgcNP)yu1U^Aw*)3gE`@{s9QYQB25;QL;?9a
zq!>VOxc&3a9yx^e;o-2fX3-lpC`)tT;uKLH$o?`4Ys=SF%eNjW8KODqd_SS3RT%i!
zbB#xz>Cc0tn|)btqmPc^2I0;TKVFRAWWF@&V?_jpjXzk^+RQn<yDGiNwQgl^jk3sQ
zdF7rNPTK3dgNBjv@%s0@yUBMWxLeNAqY`$(5_TwAn36ap_T>CxW22JOznbsqn^#Qi
zpstDkJZ?H}^OY%)8+bQt4GDE(ZO~xXRxw@Bj`XJFS~zXo6sfb3y^a-~*9od+EW#W$
zLuLa9z)VOxS@N{|JESEOX56e;?d6}f{xi*_y6t;X7H8*AiZZp}24-a208i9Cr)L0K
z*Qru85&IPZp4<n2m1}M1Fl0uVkZ`3_uMMZMmUiKMEh@t<6mR|%E?irOQV*;6v$%o)
z{jQbB0?#YrXb*ilu4xEIfA0^}-A)~eDJzo<Co+2l#kD@4;pCAuXty_lfnYBg`d*5U
zJDLYOYv~sY4HP^Da>{MgL=r1zJ%{fpW!aB~T5-)GpyL<h5`s?130aT_RWTQ|55$MV
zdOda!;Heb8MGCFG);&|{CM8w3?yA&2&CaVsX2kzNpW*ZS9f$OVLHEx25l7(HCh%kQ
z@a-OLS<mZtH~6jK(xw0V{8Wy%IGFiy*Fel4&w;qo-ZZSPVR|s}O{IMSlHzc+JJ)w=
z*3DO2GbC(9W6u-f360d&^&k!M_mN2H5}0<X598gMQeI@p12?D~bJPxROsfa$6{RdW
zNNb@8lXVrknOo3roudI7Ht6g8Z;Z4XuzmSEWEr!M(_l^7hcfQrtrw}uhr95(5(2nu
z^CQ#u`%EI2sNHL~iRx0D%Egjo-4ty&#EM|?v79k1eDSziEV#BAZugsvZ>fcn`c?lu
z2WqU&$iImxrT)v<&tZh+c8j~Q{+|fQ5ZSq86wng+cFjlCT<0xZBELUztY{qe+7e6-
zs7*KIXGF>pXH?h|oJrc0`{|rBz-ZAv?>3U!x`HUyx(?6IxDlkt^I<XKwgsuiz{8xJ
z6SO`C@=xjwm`igqBzxo*G;j2P4;&_`&~<?JnV<P0am^Ci!ZkTcH<SUr7-b4<kee@$
z1ISuWMD20h*BFn15vo>XyZ#yV8WIZD%(Al&+R+M{Zg|wX_LlidXN^S&uU{UcSd`AE
zGdK{FPjnKzU9B%HV*rC;t_5!ug2q~xH4!5dBLK7n8pQn<QeMKaz{K!SQ9;Vr1?L9s
zL<T0OEq=2{LMl@MZ4%w8;+!79vfY`phB86&7psHkSro$yo>Nuz1(W@$37&4uLkVSA
zy$8&pq{W03BtI=)W0uW1($BY=@6_rtD=SMS9O|pG3<fVlt(lr(jCK~~b~Ug~Qx~s(
zF~MIsM59d}Zs~>)U?jVtw@XzI0of!|M-5a>SzC2C3Ioff5{97nH&$Bkn>>hV)A_$%
ztgU}4$3sfGqKXpTJ=Np4noYMOC|G5}F2N@?K~N*EGoWa2<|JBg-QbV5eSXV~J{;|M
z5#sc4K`x4$vaDbmq8pqq*vp!@ftyY79CI}`SZi7y^Kum_$#ed*F@CIKwA~%rgcVw}
zcg}x>Jp<161>UTJa>?(YronWbCoJPzkWNybwY&28gV?_zLc>x$;>ceCsUJ<bcx37V
z24{y5qc0QvFt*{FVv%hR4m-M^XjHF-BNcmwA;i4-gxal@fG)-lJDr86u}jYXoQ136
zXfjL}A>T!wL?qI0ATF3%PC%xc$O>9(5V`lvTwUAD3RJwx5}wocbO>jhX_h(2^VI-<
z1}g8=)4}oMglBWx)7V?4%eOWQ-ZV=W{`phgDI=WN{*2~hYWFxDag&TG*VurWvGM}<
zC!}4V<rPm>9z>fNekAyaS=0v>wt#eOP@gnc=F$!4BuzhciAFw%f!VIdsLuN{aXyA<
zV)FZ>KOaT2bV5Sz6-n{v^(!dy0#aD@99%wkBLY8fz65+f+!;n~KUUYSO~}sLd=!*Z
zig$2Mc^lrQuBd-R%%@g}yt6^XF<Tg@0&&2@df6j;;iT-doLGCeX3k$o%xa}ffaBca
zLMK9cca1GD(HCU@fu`mgNsxA+Ga%Vyg(gUVkMcEXHycmL=Cr{e+2-SMAx)y`=U|)&
zHM3gD)1diIISCgv)fHu^g9>u}eT}Dpg^0n@_|mVwFGc*XNQ_lwYd(D6o4iPX5LE{r
z;{d+f%~~%b0{O!-W#DSa81l>O5GWzs81g#-0+iPowGT4DO=7BtgzW%J7+VK!w3S_S
z)J{Z6MC6dr{QHMb1Icaq76eH*-J#Wr#3!IT*8(_=J7sfa$XWv&>(T6O!3jJrXLWdH
zMXSg)c_F;#iV!PI&LCEz&pAkuQxN+`(k~xKJ0=2b`JWa$+-+Z!sC9aRQ9|zt9XE+)
z8?vOWc*?4r7nm%=gPy->A*k|j7;V!q{@id~7#qT?#M3yxG+Lv;QuHvMIvE}L6MA3R
zx+*pezU0KgXI;@Xh(Z^W?$(Tm2Da%(EBMxLm$lS~u`rh2ZpxZ`V)G&uDm8JTX*VW{
z`(EhCO>b1?gVd~Hr|R})(eis$rqE}&Ky7w<%1ciCa6Kvz->06App$v{t8UYd!{%uR
z9?d|ygh`71`vbKO(2`gagG)@T3ES`_*V&=!W>wio>a@9BFHBUuhKa44>dUdn^h>Cv
z&DJu6OB4_OL={_0e{Ib4Z3;$=5RIH6+NEv;L_!DM9g{E&jZvjl>k*INfZCe!8G^bK
zv2e^Ls+L)T1(|`Z)j<Xv>L{CLJ<@~f_J%cXb~d|o+d@~=chN~l@#yNi=O3w+fw?q$
zc4K^NMX@>cJqi6`wR(rOq32D0_zOZ1#qh!VrVfZ5NPP_3!D{VvALkyXa0#ce1Y?9g
z<m;?)qyez9)S$_aTrFbk@UrHr+rTc4$FS|3wkmh@q0kvK-xW|;(<nC^sXgGWQj5UM
z|1?o8T~093<i=vHbC{Y0*}Fq;d^SG%dw56}XSPV_x&cmm-kqX4f)bCsty9CNZ4wJ&
zi9jh*BYmN?iEWnee`I>V?(*IniNi5r{o*7_R0%kh`7mG=@)3p<TK<{aJ9wAex7Wlm
zjHr8{Ib_Thw57kTcy)w56MAtA^bFY;*Yw{cGmNCt>x4D?sdVHB31y1eVD)GI0{>q<
z2meduI*Ba=ukjy4;`-nEe~L~W9b8-u9Gx9Z%v@Zo>@EI(h)&UtFj9vwP>(4}(J?@l
zDgRH;DJUQmWUqkm`2IeUi|hU#x&?Z^OBY%&FQh^oiWqxx66}AFE*wP+=Gt@sfWzYd
zo7MDxp6-9_|F_lD&V7@8;p{bM@i$<@6JIHENOMNcz*(J-LT<tShCI_YtFyEEz>Es1
z0ZWEdM7d?j_UAJjs|%4(NV*}rd;64L0Y?Uv5fc_XIKAJ{@oo8AEnzozWZXALx4~8=
zr*u5{e={$gBJ47SoLt^R>c>8BbUp76(G0y2sahGSv|4nHa4;|_rXSnPw6o8tP5;3#
zo;mbdx({8li(>sp42`Z(aA9Uy<ii2e?2TZn%?*r^0Lrgau@5t*=*Ad>WFx8GIS8n(
zHqARmg)gzJzFos~ai9DRPodkLyT&XGFjn=-h9filSypu!d~%_CBeWA<XpfxI_kiP3
zAto6Y`xi!?qGRHmk9BYXq>|_bwY<L;7`&sv2T8U&lnFDZU<{?_+WU8$>DeMK^Ey5B
znZu2BS=lZzcQ#*9BG1HdB<eXXEmP=X?LyYT()_+p8ep#rz?V6nw~L(f22r+1srrah
zhhPXnKSyV-cgGl2-{<c?-#Y=%CHr9*sr$sS53bjh<lf)>%%iCWQ|LxYb*olNJu{0~
z$oQO+0DewRM1q~1Q7{y+M&9nfBxg&^vg}t`49tY<KL4g7cr?=+U<Kh=fTF#$PKaP_
zNd@}}!$h$E5a^6*#K~Z;o>v@&zVR=`z5O6pbdmCV4lSI2RxB<f+-1y4+wvhpnszYI
z%K0dfL8amfqMGRkdUTF+%AAH+q=jUPAz|=8r8Y*)4lWm%&OI7<vk>k!y;jcV%xaBA
zq_N`dtwN(4SOkE^A;wTy2&Q>vFzkO`@ygBwz)Om~U9aK;0UW{8hb1hN!GyAg%n&`m
z45lFdQdkIf0~raVL_86wRU1<opc<QKb-dV!QK;p4snLAcX7e43`J||dJy&w8R{dCi
z>;NtRlA;7CFBM7QBapjy<z-<m@I~W^6lO$Jt@<G<J(#aMBLM?NoT1}_#nPCM_qmH9
znWjr!v9mUm<Q^KrrcS_zIbtsVT}jFto`38CZW!-0huXG*LG^^K8hRjKvETBQJX{DD
zBIr?7KmW7-K1l~*np@ujtS7zh+TstWIYM2#VFx8R6A%i&FjEV@@*fBRn?y?dAqkYy
z&TfsT7LTD4qnU-Qd{PJYz^CTVudvigUMsgXLBkP{<O={#ltuCOmCT#av}(O|QPA8^
zEI?jbx8ebEuIumcl^u}AVblf96K9^7>D0~%&CEf&P}4o8nca?wkCNky3FNOjo6=R}
zusnLrv}~_>AELAKfws$x=mjDr%;oqbE7kj*R2qYF$|KjTCvzoC&@t_t0FdOoGkkNf
ztM=VjV%fp7XubZf{c%XDXf@kJFB%89GhPITP{ybQ^m513of;L+MUpF&>gkGUXVj#N
z*Zs#hi%QR`SD}QrqA$ZlnIh&C=vlyo)VSj1qC$g0su6zpV(2=RVK|^zJs*mUoL#C(
zn?m1gD;@z9>3tv|<&H*o2q=Z)ww+A$P6=j8;Wgo{g5FUpvA~b~3D6S@a~2t5Il(C0
zd6&8m2GOsIR-YiYFHR}%snp=9y8tVW9ku#)O&tr+a&TIP@MX{w%r(486Eu`lh@ush
z-(Y0WH9a*lkSh&s1||}se*-5gA-94%_4Hvk+{ca(v7WP*9(qjV?WP_f(fj~}8vEEj
zi{#zg!bMDl&__@PZAD!%7m*DZ<5Abl>9PP-$zKmBb}FHBNu%4@@P*KWQ00G4w-Z4i
zo;F!bC-Fx~B`dQT?*$jtLGw6@Q8mgelW&XxguRkzlZdl=Snt%4XXlmMz|oe%f$?8<
zbi&dlDn~jHWDCdX-z~N>U$~dm4%+S${J-jMZLOC!lChgMt)aM^_bhlLdA0eX?~BMY
z6?B-kYV{`}p;6<{b~cEQ4y?Zf-5-FP(#_I>)0uCoJ9(x;@Kpy(no%l_qiR}r9D}Db
zDp6$NQN7B<IW3o_)6vj2HA{B3io#uQ?CQ(*5=6%WA+vo{6I|R;1NK_u`kUL@2mF3J
z=ydh+3*RgMAd(JVPnfY)NLK&C<$iY0^!y<t4m(i4FO%8FC|Xb2ZU-j=!C<h`8T3*f
zIS$%n1HCs)fDcjHixl`4FQ1n$CasF01e=thA*_@%p<9PD`<TC^!s{8msjne7yH8?n
zkl{E2kBX?4Nj}(j1-Ah?<E5{uYu~1|N)-bu{yRG}38<o-v*3l0i@H-jYiv}5{|ez=
zS*Na@2j6+uMEvoRb3Nhv{gEi|@fEHB6O0Bd?eCE&9wZ@Tltg2dF1q3kL2+k~@Ci4r
z(wZ7&B`K{`EBLE-X6q6GO`svuEVwEJ5$l@g#T9;(@*ik3B93SWRFG0D9GHDJ=|T{d
zP5#GpET8y?ajR%nlxund0Blx72M8@%#HgOdPa1tQ2Qqb#iObFtE~ZB7VB|3%_l-WD
zbfW>}h-(*4$}Q^`{`Q81TcEmb?ADk!B!gKs8IX^>TZn0oh*cU29C0H5{zLnEmj&ug
zCOH$u4_`PtVLw_oTxT_;`qhuFMINZTig{yT%Yq0x@Nk8hli1@zTiX?0?j6VYTk7)8
zOaFs+z1u(ztgFbBY=V?D(=+@>&={5{&Vf})I6?YpcRa%Ye`sH0z)=fP9s@*Mu9eKh
z)P4f3>xMGBb)o0{Gyia_O4UDX7LPVxY<6w{2+u;Z@Fnw0$YCHI5X<ujArb0?B<P%w
z;HCEizo9{yxQUy3B?W7TN-X;mug!YO^P}C`CNqI-a_6U?|5}iF=ZN}z!?voqff*y%
z2Jw@L!Dfn@PD|TkX0L!Ntl%^zM6Wb0K>P#o!%(kUg-T&eFtLk?TMiG@rW?xrvCw-p
zjLFdh-G_Azacr@o^){|<>>Xb&+5Gbjx<7zT4rGH-dVtEl&A5-LvcRmMaxRFJX$3Ci
z^klWn;>J59qkBIPx@q;%zFKETGkQ&{(}8h@sZxm%v+>d_a}H*_Z2hN5g^D_;_XKDE
zO_yC2hO|i$Ye?sTd_N}y?soMg>#G?~#bh4(Fo;8ZVlG+|ch0_QeQkF}seeC!QLzkr
z9CS`CH!yXI%p>;?v%!F>8|}M;br&|))!5*AJ83)<s&FrAu`c58#epr((y4AWO{<rP
zR+W_4`wMn7m%$ph(5mrTx2*;uP9nPmdqvtXk4N{OQ9f3HlspsaK5g>nV8JN_d)J4H
zq2ftpa2SrjJ{RS`05N2vp@0io#MnY_K34ZLd2Upk@DI`#7gn`tx815CcES%WleAsu
zn~U#gmPm~wTotzAZR?DW<$U?9wq>ZjqK}*yVMN*#KF1_(2zCL(et7HAx1oGL#0|~_
zpuSOs=GT*^CjXJU2v(DGGK?1FC#5t~XP)qGPtOP0DK(P}oK9FdD$T;Hd}I=-HO&Xw
zAt?o+&^jcm1E~ka$UrH^d-KZNvU|z9|AVq~h!rIWv+%WT+qP}n@7lJlcWv9YZQHhO
z`_9WuW-*!BOedAz^x{vtQ`Pm?Io~nbVDA$1S+D?sJJiL~mCP==x#X~h;1S=A?pGwa
zvo6@`_S!OgUGF5zkpx@5d<rjcj!EIMP#2eDN1_0rG3G3lNJn}-u#7>c*~jgY?ime@
zH`L0Ld3y{02FwN4vz|^Ytk_jbPtqFs83#e1Cl4Y3dG6C}XTRF^QFv{f$lW33MXX@H
z^@+NT=0jYIMUVXg*Y@?5&-EHAx%c-#2Y%@pW-rIvGloluKPh|tleH}NkJR7*XWeV3
z1hqo+q#Z69PEGok*ZH<zEkSU`IoiZZ(CcFL-jb`7&u>klpC1HKGf6)u*i2F@>0U-O
z{>U5e>+V7Gt`uF~4HoMikhtbRA%f@y23=rl@eV*P*LXcwEpnJ8O9w{p4nqTX9QNU6
zYNz2fZ@gzm0gObQhtf#H1f*tlp*?zs;=4_j*QfB+FfXtVi)vf0NW2waM4TB@t#@8^
zIPTg~ok*yRO0{I4vramVb+Rd#w5IT4H`_PLGRUv=+lPH}n|-Ufwc|3@`UmK>Y}9%I
z*l_p}1MLUBRyTY-#vt(71#q|D{}3PJx1~B3Z$i;o!HDzATS@HgKGwV5pAXp#-2_5=
ziJO&l0Q_$>$t~n>-W*Vs%2FmgC?q}7lr0Jsfd6&h8<a+-JxW2gf-&b=FyuMqUIn*t
zbjKQ&5Zk@ISJ<=CS15%&`fs~gO>YxB$%L)~W0`k~lU!5g0l%bsyJD|DlfvqW^AT9D
z3_3M)VEF+d+a#keJKV8+OL@L1L>)8AKpMu|gUUFvDvAkQgA7i!=u}oW7Nu>4lYumj
zxJ^Fsih2ywK~T7LCEC6}GpWDrflMi`it3_x_S|JBTQC~Wy>mBTM|=#<r%)qGSzML7
zGU7X-P9&UOzRzcfS)@A|Q0_V>U5qjYU7Rl|sOLxY$)NFJzLY=3nEy8LoJC*|AoeqP
z;UeE>PIOoo3Ob}pb)3tdCa1V)91^E6<wTps7?Opc2M{XiIS5mCiH5oWb*z-F^IZNU
z=$%*O8;j*iIR)0cWGHg@9LANAwykp&L!yJqbcSDWv#DUei$%j4155u;Y}r>~1-AOk
zHyH>erx8Mj(33mt7h*a9BPN0`-Ns>IBx*>RqX4Ik+U*)xYhDYmOFSxPs40lO9B4@~
zZa*x;`5z2H1ux7@Y}KYjB;AmEAXWQ*t1*z9jXp39-zb5u@QfT}g&@Rs`7shei=l4i
zQ1tv~LR!quG^4F1^0hj@jkH@+4}l|(rWPu|f3Ts**quLV&U?md$*kv#fS9@NHQ-Pa
zc9h!rhoPDwf95h$%1Y!#PUU=9M7ld1jQBjq$a&vRJ<`Fuy+qT!fCTVqIT_|8t{+8$
zX#1=snfhz=O<;F<PX#@DY(?V#F;iWG)hLIx$I6HzC<9#BxDElUqk%O9(V(9((Sq5C
z%qU|r0s2Cwf(ix|rR7V~wPQ=pH_UMbhDcsU?j#ABQ#h{8|IEL~5KKuU5E{Wi;MEy6
z;=HPu!>x7vu-nvgdiRSqQCArIW3OG*J?LeRf?A@ydGv%GlW!#N5`KV42U5i;6l<oT
zcB8G3k(Ubir?`eogJ&*8%uG&Ynj&j{sSF3J5_9ycb~)uBP1ni#6L<P-EYqNkhID&y
z92T@kNN!{Xou6tn39znYe5+66l0Bo4&Whcb%=*vizG7U&yl5e*M8bClaQW7l%SQ!k
z5nCoQG~d@M(BsM|%*RzE3kBrb4DEu3#5AptW@_TdU~m_R-N$!aWZ^(MH4sZThcP!t
za8pPm;3vzGa#I&KYTaxU2|?%_4A6@h#?Uu99*!Ry^~GdB6|Pq$JqXP^OYj#vyE0VT
zZq?l80)7%j0n!Zme1z~-j28l~_v2Q|cZ;9q%}&wr)OzyAMYp`dF8xH!DxBv21x3e}
z3K6`mA%SO`O9x{CN@syM1I-A+RpB7H%QK{o1rwTl$Juqgun{{f$t>VTYJ$C_m8by6
z`k<}}63{E0&R#;X!a7>p^LhK7;pa0P+AZad8LMQ|jvrVZb!non>E@OXiuLonw@C1~
z-3f@$4f)*yar*@RZuZ-7a(C5{kKO*)io0^fCELVInCDuk>HODsq#B-pv1xdKnr6ZQ
zG{!(J0Tp@3zJGfz<(n?R;9SZw`TRV@Y#%tS93IH35rV$agU|6q!Ec)Nc$Q$~`5P`z
z_`~t)cjy5%kv!{@rl!R;>&{N1{WJK?DRj?v81KiTZ>8`t`oykX=;X(iGC7Uc4V+xb
zj`KpX(BjckxB|P8JS+-=9+yIx%$2X=R7|Sh@d4(7zRMfMFm&81Dqp#@Vu_*X%<m~u
z`B1JLzbzKfk#l*$ncy8w9uF<wOz^{nP~8%ry6=VA2sjud&aK3-u(%~yV}D&s*#k>#
zTdqX_?RD_g`0K7(LVrY^x@;iaccP7;djQWJz&Uw1hV035*5CN(5IZOI0?<=vG}%XY
zbGPk^u;vXrREN1nUG^3T<e%9a*0<NUw5}tyEZdZ-g*<^!Bz$;1aqB_?`L8C2UN{EC
zV>vjQ(KZ73$-E1vhYSC3lUJvQvw8dA#4_F!Ec|9|U(!@p+#+c^E;NN9edQ^kD000b
zGrAMPb+9NZDZ<Allmx=(Zns;w@y>|T4aNv5sRXt)CjTC-pg$AgWo|Sp1KHf5DbVKS
z92VrL&iLIfeL}ji`JVV~4A*cHrAVd<T-W;EOR&u$MTd>b8eDvG<6tf7>803}hqjf#
z;T(UaF5%1oLtfa%9|JZsOd^=piZP}FOetk}YdbrSJ4n2?C8d6$vhUnCN=>EOPR4NT
z<)C#(JBGEEn8Y~as4*AMrAENx8Hg;4Lm?}h%i`D@3d%(_h-xh)+%zV^j6>6-;p_f1
z^~9jf{S)7Uq;xZ<h3Oz#mIKQJBb#*3SAv0EZ*2KR)&F7`ZR#I9@(*Gx0#z^#QMEVs
zh7?5CL5)R6*ZtnY)Kzekd#c-Erv4FAI^(-v$c%bJxWm3z?6hmMJdm6bPDdz65y`KP
z52QDmyJnMWsId3-uvfNSxUD{b!qvRmh@4<+e+eL6GIsid(VbxU=dJ7-f{yP`ilAU%
z2>;8=KSKH>)Cbn%JsC?6?T2mgbu&s7Zr9dD{Sy@ehh&(Q7eGWhp$ix1Zq#*NUmMjx
zAzEyYP3TAtWXN1$w=h&6&Na=4c92&$qh-wVDfZSchp_D+CXfPxorjE=y_{V}%4s*4
z5D_#Q!(|)#ZNo=c7brX!Mzs@Iu9*0ZuC<Nc%ZcYT{^d*Ww&CsXI#g_%D)X+Y0GG@O
z#yq4?0$lhn2Z%~-?J<HqU_~fsVJyHmCoH{Sw=pt?gip~jTB%@yfxF&h#vOf_E8T)c
z2f#qk_N*PDbdD6LN1%Q>NM!h_(3w%-J!)#(E;O|~pmw<U*?}=#tV>u{T7L2Cg#HLe
zKeKImjQNXTj?>Fk&}>YvU?O%DjTUOEMNIsd2?l-MRO~bJTiZ5eOy}Ntpk~%SnmN=o
z*%*TX5*-_8T4T~PutO$Jt`_0=+LU~xGt~0bPFY0knh~4c9qWkQBGTaLqdVFnjmT8(
z!dC&1n0qkfp;g0+sJ^OVN#kQ&0oyFQVouII3c8)^L8E#k!v@MJx&xCw|3_`K#301s
zq@Xy!_N5&k#)0}isL`oOI%@3AZtTLE{A1&1A+et`%g4u$7k?C8?En)v=-!(-%fo(e
zVPX98^r7<0>0J~(*p=TW{~PcN*$8vYJCh8Ox2O-drf6Rda7ZscJ}&+{sw`|Ed$=@Z
zyFST1fkwv~$$0`>!R%mDpL4o_l7cyD%n4?EN8m|8yT%0NTPBjh35p*Cas0Wy2+p`F
zA8hnt1mcH9erQ!#Y9(m$!#Xy@*g!diguB=crJM!IZ)Uc2L;}4a+LT8-An##kXb8%r
ze~q5s<6h=$%CsUmex8cgpbip(`IhVCE(MD$#R}R4{bF|sfhIJ@To_#M1&Tp)p8B3B
zj{Fo7%oXpj6BKkjs7r@IuiT*bZXuwM*z}v>zRZDn1QsyrxzwOub~(}Asn{`EyQ%p0
z1#oZ;NdR}2D8ME^ol0LL)vdbS^gA$7k>--SPO|D~>U7|Rj;bYQO%O}0JKYu1+r{~U
zyD9B~nGgDLkOi&r5`o=etEMOB7m{&5_&h`hJ5*Fh{Ap!1D;)U;3xgpy>`^HtMuEU9
zP1s~s%lH)|%N(`_1R@{sz<5nS^*HjPgJEuebuPSf)mhr#e24}XG;CiCO<R~@<DE<}
z{Cs+pZ7#&=rgh@kM2y0l!`<MP69e*(2WPI4f=M5ek7&;oI}_(jETEw@j-64~zkx|0
z7UDscU$+6%qhxgaL4wQQ0nOI(5;==O+Lc>|&g6h6n4|iJJhy?ltjMq^Q)<K=JJI1)
z_F6_#Al!AME7!x!m@<eZ<J;T;mQnl#;Y_G2zuH}h;bO&0Zey6waX6pUKXgoO85qlV
z=7x&dmBYb6O4Nl23gbu46KvX3_VGu7MG?T;#M#S2HL@sKiBbfH5$Z_6T5c-oBEuef
z)r!q;rM`I`M#4iKW~5!){&;X>3zuv|8ShF{;8$S)E*R1*lWyE>n_yUbyoBRGb*K_Z
zpMsI65pwHpdzOG(WR0#xG4U0tXp7S0e{UAh3gr;mvciWg&-d}UZmuju$P2|wuWz1?
z*t^4BU1p{|+W<Y5?6!%JY6y(oj#xM4_Wglmg|h^y^4-9=;IEDPv|%>&$*8m6hX~W}
z70KffZda5gPz99Q_Il4RuB;b<kaDpl)SsK6q@pa84xN`~Kx!GH2T@Nv(^S(lQ_HUU
z$LJhTJAu{)*FB(|w+!Iy?gz+vL6tctEn4KmivGY^Z^enQ>MF}rGQ8*DjwfCbV{KSk
zTmWfRl~fQzEfjT<zkM0vt>w)bKhnX;kLzSli0wD{m_eOC<rmD+e|XQZT_9+cU4ApY
z*A1GIKW(0Me1uKCOvWrf+Z%ged4P3UqV$NzY{OZG*T7=|_=hir-pGkSmvZj`C2mlz
z==Z$oJZ=gVxP169l;rjbySc7mG+<q$^(JtX>V!sS5ZuP<ptOrhDdgWoBQpnlZK~Vh
z_pViNRf3lSq}{OMip>;n8wY2M_ssw;oV)5e-x*vMRAzP(Ua~f=hYPsD=WNFB+NQ7C
zj+kwBZjb9PfJZL*eIM1oSo19KfS1%R4FCBWcvEmGXB4|<>Z4(j*3Bu6@0F-^znN!R
zm^3wTBboiNWNvUr9}2FWoUmEdMLIOH&-m(Ra|_YQ*Lx%QvZ6R2&E9a#E=t}NKu9JM
zIyF#K%0uzQZaLGtXpax+#WfR+^J(^ZzTyDHmc7dV#dkaVy#NgN$_aENr&)JyA1d6k
zU?QQ-0Bi63VWOd903U#tvR}ZYz#*Qd^}%Oxwc`lv9UrmcT`%{-p-96D1Z{;qS@Ff9
zj$~XksCIt|i0(Pp(7bCOzX*)gFyOh;%KdH?q&Lc67xX7%`k6ykAyPm~e*s>#A<m3-
zg_cu+ew|V!l$uP@5nWZL#UldiH++D6uBQ$w`MFYXu8g+fOPiRverNP3t7{!MWu9|8
znwG_=8U43ou<das>92Y)=x99qsUiu8Pgn+pS``px5h}C|IH-x9j0(J4TaA>PCwfLI
z!U?CZXd(3hkdZTHgUqUJ1l8e0FO$T+I*D2!9$KBiR42RuvlsSq5-&CT<D%TWesG`V
z@&>m2=rJX`_?X7oGNDb<onqk$#DhRxJY%C}t+PY1ttM+e3zKWAuHzkt><1?$C@~e>
zTFAP@%dDtCtkwz9?ygH%A8P>>)BrLv%s)CnfU(s<>M61LR%rLqXRhvrk?VPYkVM!C
zli_)HKyFwd={<2nRSu;00_dO)T`fr?etn}8YHjBwQfp8T(|TXw3D<w;^KpUb1<k5c
z8P*xYIM=D>Fz?p3oU67xD$e3%XH5ZtAGhX`aSgvn!jp5l1|0?F&;`A$<B0x^R?I>@
zjDfd<tnU^OW+0nHp-#d_ir%@^xs*XZoo#0qfs3)I#CNrKu{&9An6_EnX|kD;e_1gt
z_0pRDl^ys?Ce%xKXD6AYCmy9I9ehjC)6B4=`$T4_A<bWk6GG38XV5?DG`3&uiq99y
z9jPRH@z0jzwttS0IZ<OdG2;{F{Hi77<mO-@BXRB1nUmH<i?|*$?>cW@mSQPn^*3>7
zT5a5Xg;|&V(xn*sqOU;7t2Q#C!e%v<-2B-A#If_(ByHa3-rz9=fH}(?h|LgIw6hLX
zzep;R{#qt2HUOBRK%PNRx+rkDc(?$qrKq%!BmYtpgN5mjVPGC$XOWE|^zz$M=nwri
z<i0?NURh89tF@L`i<+flfO8sz`ijnma?8E7kcChh0wX<k`}*~9hg>EuaA_4{tsv_f
zbm8nBfD|sb=0G~u7Gj=kePp)5%$t8pUhR!b?MR1+>iI`1#;@(oOE&TW#uMEPpnLQh
zHvxNir=@-6;o<e*R@5=$7TNUJ#7cT~*@b?*DX<jicO6t>gNMqm@SQ?k@<(@eGhdV-
zxTkqt8tiGO-3##ZHak2w)A}#;Er@H!W{nqZ-6NILX>tSrWNb*VBFf%$@(xJy6yP9m
z4qY}{XOQQiIO6wLST%0BL_I=<BEICj9|_5a(>W!o1@QiNpXYA<7_gqvh&S(Kwfnp;
zv0M^EUl+XQ5kKtYo%oZY2mb92=@z(*H^<M>#kO`%6i(y~?3$@Hix;4>p*KJgk3b4$
zD8utZ6Ar#hmSO?3<0J$2Yfi}fume270}nnlA1=^l=d2VayUPpC{0y+`=@Gu&ElaGY
zOL2PLxs5b*Vjga}`V(#?7=a6GMF}#E!X}&zrd!=YyKpaUbxE9WnNwKM*%}7|Y-XHO
zHs3-BrOe6v;L4VCr{bb&qwjNtP6Zb*+ii9m9Oqs!nxU;<MXUbwLhXiGxkFTyR28#+
zw)jL*;3>+%gS&D&-8Hh@5U7yc%UbseV^Y22$K1M)$63_25wD+`wrkcp^V^gkw|*Vh
z{<xFzUqSI|Cp@u&sJ1lWMyzeK-l8et!<<X4oz0%E=OG?P)G~+eZfI@Ll}_AnC%jN*
zJOAb@M8zX!UEkElMgSv*fcMPR9z0y!_19gS&U?5{x_nmpZefQI&$U46<<K4Uq;x_<
z3qt^Pv+{{2s;NStMNo7MImW{_ey3laZY;5L`^1`%rD(z?mLypCMb4i%_<So)OJ577
zqL2^=rYW{#PVd}Mz1SX)w{7pA=T`XmG1bA`>z%GDhve;wy$?gg3(-eB4Ge$3{%p@p
zL<?PwS4=z2W|T+2Mk|%$;f|NT##G-@fI@`4gSn|M_eHQ&f3G~<Qfz^EL_liYx=5#7
zB+m)GlJ7y5Oya^akS#eWU%LJrJLwcSj`>Cn^D$0<#4hwgat3v?oEa7&uaHzUjBG|<
z07?7{bXw?Hg}xBF0V1$JfH+dfp8&Bo_qU_NS}5@?ywpMC1M6G-n>4QJ0$1!nUQ{L@
zsJ^AIhf--mRx=JT?r#Q~Pj}9OXj5uLHE0@xt-&V_8?;PC(4-JY1*hD+UFJqv6tdzX
z;8w<8;esO@EC1Ztt1@kfmO{{?#pbjc(Kz24yK!t<1ulFnp-$Oec@)_l$@Z;yYGeNu
zBk3nHflpSWQ<rh0yeD{_-H9kgs^XqmKS2`wy9N5FMusxrcN6m!e*;jAcB`@|$mq8>
zM!RpRQ!ys+cw-I4%PV0M2c;3W3HNrJg%8N7XyyEZ=^HIz52*yM5k=Q@Hes8GAIkRH
zj)Zl#4nqV5ze^;p4z&L`y?Z$G-s!S1P1<slYOdX7Ce!uYmSDV^qo?Ck$ygf<k&G<m
zYZ2JFLxaRzob1JA7RTtt5b6CmTFqSQvvOtogVQ>N*6+)q>iqbpbmGOrkr-GUAa3z6
zHV>f4EblK-F8P^xzjFF~N;}ZE_uTlI3h$Wy3W|jgB_L|sC&obg%TweJLVD%L#Q`Nu
zR|5gu8jD=S{r$Y_k6Pg`i4G|&mzJarPPSWe?euOn2#-C2UJEGDFgM^a>&)W*?!mBV
zZnE~9glLEWWPeTdEi!lJUzdg;ioUeo0vPFhG+^VP7TI0MPvD!-&ViLd5|7CFJ0sV-
z6%jBLqaG1yP;3757Gh)mzm<V>?nzi>dYxTu!{usL#PcfUNAwbzu;o>!=R_4ju=N*4
zX%v(ah`~paVZbs+>CuV~k5Cvf_d$g{*VpSADB)Wol{CPc*8QijGmz4Hdc#P4-lX?O
zxJP{^^90a^_woFB^kq@VP~txhI(ea1`>Z9<PF;2BCvR*di$Q4P!BN-*#6NtXi+Uk`
zh@29~)@zz<&CsNbwEoOzcs-W7d0OtyR105G`5n>{VO@`O?+YK1C^TameYpxSS5n;`
zhgzG(Wn2iU;%=$cl7xv`HbP}^1vbaMjEjf><g?$$^lb+1_^Uo*C7(1sJPpr^ti;qD
z?g;TNrq~^@>R_I!G^?vGJX9~Yj+qO`DA=nRjk50P?+^Wp`|^G8C-g)i4<_#Ym}&0^
zp3a3n9uTO`jYPuaFL(O@9R_bSvZE5!G`6w4O_6GUfBE$JD{&wGManwEQrYen$lXPv
zDw@68D_qu>s~{A=)YyD79hiS3DO4J!TB2UWP<!I)+Zn55$#{ReF-d?j#PRcI>V5kB
z+W7obC%iF@6$MG<a_6-kuwZ^Exr;ePV@9p@Y9$k}fJG_2pvg0K#yC_rr|XV}9oeVp
z9>lou{HACu4duZggG*7Lsa8K{qZ3P*i(+o$q@{TP)U4sB45EQ$@XCi7&)%;HC<0oj
zsa}V9^o!$g4n!(I#!YR7bMrZaU)0gsbn+07%^m@2igy%+W6Uf<3fgj8Ex07q!u7!(
zJdZ!_)v_H<OE`mQB6^_}00$}l8H9#kx5`O0gH7U@03%20mK#h#&cHrkK>$hkw?GM=
z=RcB!0JjGP4R1J!9vC^+WqFn!Q4gVSs{*vB0{67KsYqDTea%PLF+7`>6dNSt89O9?
zQN>3>+LF_tZ3M|!dpC9_4i)NbIJZ9qmD06F{WUUp+7Z*PS624i7f#qjal(B{N$619
z5X--Ls->T-tAXqe8z@^Y*k}(q1kR8KDE=d>Z4;!}2X3BptJ<>+z7hgJ@z%tvQ{~uI
zKQdTsLl6dF4TTa*ESEm<Jo?g~r3f=*0t0$D;Ce-3F<>=aph@IBlylKi7~J4<TNN~q
zJ9?XW@oKbs+grYSiYI^}Q0_}xBt)Iz^QoiU&tpurO=0Qa(*5Apc%09wA3&J+FY%{q
zu>g?YT?)(I#r%$JqE^BDz6NFeti@i?iS3FJHL2pAU!UpAYbqEya~!8|ep;9N{vRyu
zS?T6nwf7hL;c+5KY9XtD0@X&AlI3a*Trodx2)M@ZSarf!r-drX8?XgCLIZp&1yvBz
z>!t!($sMttxxd6#uld?woe%2_#X&KxyulVwC7K|lf3d#1b|MTTuE17SORyHs8oQkS
z9$n#Y1-?v44P5sH4615b7hq33a^r@$8|g5MqzRiLXxEZ=SxB_92KICHIAKzt-5?=V
z-HFYE1n}r3Rg|{?Mc7h_gW>+7r8OrkEo{khMF-3yZSxixeD;T$TPOv|rKv$T5npn>
zFo7$Zt0|T6DBNH^e5@ieF6W0XRKav$3b5q8b_+KrSMipI4!i!CV4B3Ry1KLax?{{#
z-FOt7TZ#HQFe|9|+-T_h9w(W-`8b_DL<>y=Bw1nFIY{t<yT%`{K8=cYF~`F76p|3f
zaOHkfs#BNKZafAR)cl&%FTtZfWqDWW4FM&c4q?Df)I;ux8vVd=l9GTv8lILkS`wiR
z{d^Eg-ec0qKq5#lT4Fl1u^=%yz@cz0Rj=~T#<ekHcujv7xk9(<36`{5mlF!%gy&aH
zDeJ$^9#!wEPi}ceSw+GAiW1==xTOj*mPHU=;l@Z{YPjIWF~#q`yC(_d8<lCniKJ_A
zdK;;V`oK+K%;S2|mUUGxI?}k_G+Osw$K}2?7mp$pnl4vl9B!==ei9C)!LxdZPI#y9
z($G0aFsIhA&G{Is4%j53EawxI>+xC|SdO+c@XAaB8@M1$dq^Ot^mfW6K_cxfgSrcl
zpSpvBQZtTJufU|-!R?Ul9s-vX&IvOz{33q>^MUse<dSDtSszjZ%u=Spe3BzmGy8B+
z65}s@Po|aau2mFNbQ|2>1btwFzb|%*h&9i8p+t%Z@aytC;M>2y^ZMMBzj4B+J$<(v
zo|t;$;^vgcuJkZ<@TvJon^bnZ_r~CurbJ%zRi2xS9Iph;a}Un?6+{y?_3JB%x>ts7
zg;EVd=Y)j;j#iUJxrV%SV;j^`SbKG$<iy7i+eXKgY|3>O=61Ghvb4PQsQpX(<w<-i
z^VF3FW^E~KJ^Mk*m}?_1v&DKi2@l!8+dDPDWwy-F7&XokoZj5BIwnEw*V)rybAJ)|
zs8UgR()5OeNj|IdjqGs`g8JFfPqEroapd0rA-5zevSjyHzhA&mT0pw2y^tPo(KCA>
z@9EE-pAWEDt7F_2D+J^O@si^<@|!-cbEkUZ?ji!Ig*XIdSz+wdVr!^AZ#_QimlKC7
zGq!<RDLtbbg{!a;@!IVun2^h4ACUxlUnPB^&dYV1NH9x*W0@n`x;7EUH_Fcz<G`gq
zG$gG}gnGm<`q4kCJQS;-xPDANB^)VN**nXC?@DUX%U*1Y5w7`6lNOiwTMnNyUWtTV
zRnG<=!s5F;-&PUa-foV@9I=LN_FEDvplx<lY5R2p7q%u^VPs(#Beai>a``RxI~Qf$
zTq)2hJsUBsmOn4cn#pc*j79HTD>|A7CJ(p@ML|RH&_aY8%*$1g38bLS(nk!{R-Iq&
zI$kcg#Wn1u`HqSOI-O}XCGlvw>wNX1*%5Yhoc;hMDNd2zMS&gxBZI>l2fbRpNdB>c
z3N;$LQww#qnkn#>N(43J_xNz6*-eCoPKiO)?Z2Q$<~~^FV1~-9Sr0di40di^AF&kD
zPa_+_1i}re{J7LXSu+ho(GQqb!pfI4MXOV8Q_q&T!-DZF7cR>^Wl~7VKgGl~2Xi@J
z+~K=PlSyG&>OmOv7JU&tunZ+$Fym1Lm&@)xLdXvnp$WCPJ^o>OKRWMK!DuLavd?c9
z^Qqse9)au`{CU+wEuN@4b^*6iE<0t?U;IbHK~jK_R1PkMbc8>o?tuH9U&bGD?k)2*
zZhmfLl&miy%g-}azZFD&$Kx^fko%3=CCT?@%jJ*VNX}TV!;#-7Ldk<Cv-Bbl)d=sF
zz%Yf0Qf|iLKop`+F`ASiiMo3!6#p<s$coT2tmN}g$bIheaRtV8)3L4U<6T>KCMA);
zU0Qq=L+4IuM68!n<4*pr@{zd?By>t1$LNKYQhJp!59cub(G;gf<M_kV-fuvTSeoQ-
zN_C^oXor#yu$2%=`1s%SWlwTr+C-Vbs`RW6GG~$%zE&BgQ`*FYjaIK0&&=yb?)e8e
z*6HKz(+A7He_cE{d`7oRr(tC1;Zs<$?#$lcb)~llCw&dNTj@Fw?kc2`=8HltMQ`zb
z5)b&@yuNvp<a}JZ^e-lX(6>JR<x87P{E(>5PVADao!%tLXJhp$<EmE1h3Pe2=<^sv
zf?c+t;S_I|pRLk9ds!|>W)Id-Hgq%JUXvZp+9M9Ht%lW}H%ikpK2o*^#uzcDpN%Zg
zYwzVq$PIM8;MFG&>U=8I!zCj{9c|gLAm2h52^deKV^K7-Dqy;R%+-Ku@4)+<p6(s`
zq2~VFKgiwoc45#*I@!5wL{mE*i$Q3N621yN6Hf)nqjk_3U#<sQ7lji4cGli%P0?^I
z284K8OT5|wj1y^#-xPF^&a+^;Dqxc?vLuTk%SEcm@B+bxrEOyyD%JU#fwS8**Ay06
zZacdTKmAdX5no-B-X^zK{UO&1!|i2fVs<gF_??$vmBa!+gIlL}AY06oF`zv)ryefJ
zl%lcj9o8g}LTNK2GpekECMq8H=iEU`{-OV$Dst#k;b*OX3Ua4^gW%sVH1IHSay78F
zFm|+ar?>a8wy?GO|A@y`PK-}XsQ+6uuJQrkVf!w694<=P^G;dLNs#=%^yB4nkVMG;
zTR(n<^T7>{2k_5p3jpx%_J7y)U*-KLBGJkYTRdSG_=sPI1Scela^k@h9-MG0XBEE6
zp`LbVqUO~&04rYBVP!CVoQH%l@b^<ywYS2{Q$ngGh<nczJ!b5@rlLmu-^Kpg%WZvm
zamO?bFY1e%d}*qA<oEHC`F*zc8Dy?DwfJ?xVVODH@3+Uk>)Y7c`g+Yin?O@pSQ3^|
z9Hus`ZLF9uOYD$sY_m`#s#_WI@0qd)RTlXS;}lXC+6xMuWYOid+Pe6UnzHHK=LVaF
zDR1qccI`}fk4~yp#eeP}iyo_mWCna+`AN}?^;uE`@ag^E<1O!Y%SJNVN<*>>EU<D4
zou>CH%802sO)}L)vXq3A)^{k$ozfFLgza-FmPO5Y_JbBhdyheV>i6Du4H0&WMwv&8
z<Rf)Mg{El=t>X4CJ4fhWTs*%|SrB+ej9>Q_l*U*97sLP!2@(o$V;4NZPYP*KXUYt0
zLPyYuBGP<9H4w)Hm2oMSf{No);K#AqwM!1E3Mx%;QBa0Uv7WwDP6*^>gOGo0Wz~ce
z7vy;eW8%Lr7!rotzI3)Cs5)r03d%HLjys`ekDp&!DZD<$nR+6NYVF(*?||k~r$Bo;
zf!?%zixsFDZgjf|QhT{!1$4tSIz@&zM9f6y70>mc;o0PQ*+z!w6ZRB(5u520*eeZE
z3tA%bnI%&6n_^vM6$MyBrT+I+-Cz!sffpw8rcMZkNh0F2#VBI)#RZJ?85)HC4gf6<
zeDdn^Zi#1M`5#zkcZ^}lQGQB+mr)P55Lx+C5+7yZ)yWpP)6hc*v=Nq|X8{E&j6T5$
z8o(KqCP7Wh`V@KgPO?$N$C6J><CX?t*c1z4Bl{S^yckHfp7(`pwC{oOwte{-W3|)^
z49{X2w44oR#@|jEe%_iY?2@d0(Yg)_+CS1$9cZ5@p`q1Dk07ty=*cS;izZs)%mnx(
zgcVkTIn;t?GzFPdf~L_X#Lti}t!?F!%EQ%Z&1ZrATqGQIQzlx|@N<dPg&i6!{+c)|
zqOPzy#jWvDeAH3_V$Z9_v=_a^r+$>SqZu@XY`l0xLH+WXF5zTq?5K?l{<6E&UM5Be
zi0)k=-bNsW?O~IwK;iUDcNJ`<f|?5X@p;@*j;wb_OvsATJsg0%X!=WyhRs5fhZ4m(
zPWLGUzF9OE_1`bq0Ie`L#Uy-4CM@)QrEf$Xnk9{-VMVs;d+-@13lx+y{!nw$d0I;P
zfMoGE@ImsKU^3Nc3=u_$>sUTAKoddC^kaEalVbA;LE<mz?_j;kQ6x<jHtjhF3|}Wu
zG~Gn@W1uSUkq+%W040F5tP7w!f9ByTB<J4qXa$J7*hPtt&gvaz#;7pmjCoj<ZdoLt
z?Uz>;vKCb{Ov2S{1{pl{0zOe!g#y``Q61<~f5JU>K_U16T{<z27P&N&3APlI<p2G&
zb`fM0Ar@**D-R^9Sd|mT^lZx|OAqn&wIp~G<W+{tE?8CE#)E1_#(h{Q=`0h#i|7;b
zWzQ-PfD<aafKnx~HydysYOiwL@aa&T6|E{)ujUNxa!Sz;Q`rDFEl;RrW+0Zi<}#p^
zNN%^|jW?JUA|zpKPP>S-fqD`PkmY^&VC0CE41Xh1UgzJ<sxv<2BX9-=YOguyO4RFp
zz43jY69F|bBhp5~eAVxxv9|1`0R?(5;GD(di3?TJ6J@}QA=yKv-IHocc6NZYZ7_59
z74=o;N8lLQ@uHW$(^&QY>P2NTbppf*WU_=BuQWkMnpH_sCpPln%!dupt`AXk#GwTs
zj4nxBsxR!=sset2F$!$5NY8tYMT)f}JnEppsvyxIy&aa{LNoi#|4Hju6@|{KN~Lgq
zl6Re+j}7*6uiFOFQq>RJ6U!*ja<jRVjgO3scf4*as1VUX!-$p;ZQL`Z?aDlsbyilv
zSL8!03r}n|l^rMU17NMq3izq&dFn33$#|e_V^fH_hHJS@!bNf&nPFU+nAymHWCXNp
zu36mp3%>!pKfdx#sdO!o55~JVt00FoF}><N5Q^yLe(`xn{s`R8G>?Z<r^;c2!oKjK
z?Y~hOOT^fNCEiMKUjSqU9IIT|a%iYDt<69RHC|2?00F%nTZzD6)2vAZOh?JwP!IAn
z7GLy&k{64vEr9Mwb_@bUYz~PUun-aBYn`RmT<J9Wuj#DNQ0Q`|LM!rJJjb6B2s1n(
zC#HXclW=m>)Ieh%5FpXd&IVK5TsdCa*ihZgx<l!fjK~IIQA{Y5U<s_Z+&2I%kKE}6
zATVgyen~P-F(op<@<&t&{IFJew<F)+4`kNMcpE$&GFD>7{qhp&a9BH_LXZNB5;JKo
zs%GbeNpJ%6H)Gjf7HjBQ<{yVAwow12$NeT83BozgWO@PO$MidK-sH)q(Ae!gA^~GL
z_`>Cv)E<FFTR;~u#!NNC`oy~^cD6HJqmPq4O(hcsc1TbTtUd`zf+<;+w4gxHU2W}B
zkzIxAmeKjjk$5-f{A2HZhzwaF62r7U#8?ZJNmXd&(8!MT%+)4mB4fvF!~SNfg21$T
znWRy`($pFxmRD6mBNfa8fwBY~iDae9fAL$Hz-Y5;#v0pUN|dZ$y#f}Rhk|eg9aO_A
z%#$X(!}jgitd9&j_mbeM*s@ahDUY_qiG?VJ@<!56gyxcH`Wy<qxrFBq*xVaFPcr+h
z3<Hs&{Hcg?9nzmEIiQgZ^<I&#ksg?p2et?KCnjK1C(&^}m_5N&mARk`xmUJOR6a3&
zH{y1EID!dOdNI4-<deCHTFPJ-w1R!sRjD3ClSLddA^*T!r3H8S0wSOhi4fhDK4w95
zwE@<FkeBBuBr7tdL0ZO<@zcPvPfgrJFvwTz3*k`j<FfKn!Jg+l?Vl`;sU3+J0_(|t
z=po$F%kM5kqd&@4@gbN23XmP;W@g0YjHi3!s;&W#gSC-ie8s4)6pPX<n%o3-L@Gbm
z)1`e1zabjvSSUuP5&Y6k2Fz;cD5mPC&t<E$yV+QT<s^o7u-WAq9sZs-FTYoYt0CNW
zPnXG5MY!8&JQxl<k*rM=VUXo?xK~eWxf`;KSsAD`AQ!6WLW=|Er9ZBpl39Q~B>vp(
zNCAthZO9wn)HEgYac!Ja1}_=Z%;^G`eBc`C%ZB29JPRSQh#dioQCLV_yVd1C4qbTY
zVY3R#c}L!DHm2DQTn|}+gZ*2N+KnJ#n|-X!O(5)TD8uw*)sbzDS4#1{%SX8mq(~kq
z3)MJ}FoOXT-NBr7N45rD-*E1rpQ_;}rP2`M0J)CxL1%b4%<9vi1l5@=TX!2snp#`K
zV&!{-mE8>Oz2Crq3L}t&1lTa{I6TP`Z;6UgtFWp|7Z}H>Mo4{WOpvTRB+%-rg~bZL
zfn={ujybrzbCBfGl4oj>KQWZgp3@2)HG1*0dwf2>G_G?N8h*fj`*9ICy9P_79<W-6
z7zpI;H>Gc1k{Bi@5uE;B3Ce_uy=q`t3vS^qmTc-{$dYA<@1PN~$XIRl$mad#75)-O
ztQ@UilEHV-hTsW!3yrAFjD}<zZ4r}yLi9QHhr>`j;eazhwZb^S!4Qzs=i@jl#@^T7
z_eQYb_k~i=PuZRX?T-O-Zh62MJ2ifI>dyg0`j4Rwg(DzF9Uy#Xa!zqJQnPdKzH3|}
zSRIXAL2o3B`PI86KsE`cWz;xqE;<1(JXtV1)<RGDy=Z8|3wj(WWSvcmu!v1nTmva=
zsQUN_D3QkpLc81eEJ}a*qDz2%=aA=opJ7%7Zi=<Nd&+ff3d(z?3Q}gdl=MC_mRar`
zusycfs^8UWmCk{qw`^GK5dBH{m=!Xm%ij5((w?#0TH}AyhW*of)t0!DQSPiRpQms4
z3=Vad`U)io)Dm+Y=><WX<@E_EtU8<d`-+)K-~*dmFH591>K9xTJ<BFvU|3Q^eGam%
z?`7_0Fgveki$`atZpBBn;{;Cai1?QXbEjanr0dQ%@z-(h*^@5SNpH+bGtURFk~`+}
zvm!4RLeOndDiib_yeeU=NJ1<#RfMXO{E4k(=V-Ch-V>S0W^CL++6-0F(=VnW9%c1+
zr37{8nq9izwaE&2X1({qmPzRxdTcz2ZrcpCFacszB$*oNS;}tRvcfBsj*HnJ5;Gb6
z08e^gzgrXB9GWUp*M%W_B4O>^c~2N8hD#y@hhWn@SH$e}3RDK^XmfEZKb(~16AAC%
zE1x<A9v#Tio(&U#PiJjI&2ugLcgFk^FrN000t_am7A96&w^6J#uA>hINO`!>iK;iB
zvw>9N&X}ypqN!AX1mE74zQM93vC<)9HUE^cHJQ|+%_t9N4VGKja_{Ty_msF}@0d?T
zoaDl-AH0@zFXIxI35b|%s4NPs3XS2Z4ZXptvybR_r8>ShI7y>Xoj;94@>n$V%uSCs
zI2vafS>vL)*_YJN-UhRtcQeha1H^UeOYNsjBZrhlz@lhWaCF7U%)Vux!3NqTTe0p~
z(E>Gtb{~6tyC5!}>l-mnq?}YhV?$mx=ro!Jtw}Ca1A^6o^I~m~i_^3M#oieTf8Pcm
zjAtHKNKtb@_;C)vF#xZp+{G2&Y09ArgX30+_-`6(CoJ!aHdvS_XPzH)crUQ_-+H~Z
z;DoTR9+mNwD4I%8`VEM+Lfn;f9*rI450iJ$C;2T?LTyceQ(pf}R(777oXD)M!ZE6^
z#4zUy)pOIbbLwu+d(3%QEbB~P<Ug*AFjA8(Xr(u<K0VIRZROx?@ML*USUo%l^142;
z2CI&=zEcpNJi?ka5$L6khpqx0xqcUI8W|w5{;oYhIuwMqs}Z)mb0WB)N@s0G4j%I?
z-qmW?DKpC2=kScwoX6H^tB;}p%+J7ESA<VG`XvZmQ;v^-FR{ObO)ihJ@<DZ`6VNpq
zF=D!wxIO%~b~j<87t%#fQD}22&2Zm&)gc$T{7}w$C>G{!a&5pZ{~(tSkhgYlZ5%S#
zZ^<_^(I{rCVJ4jD&*d>J3Y<p*<Ybqx?B7|_YnfoC2aXR%?I8YRQr5d}&i2s`g8%MN
zD%#JdrM|TOy*L}bjt5+E$!dOQSr;}MdU(H|eepD|=UZlQR<kQxFbX*Imb90hDfmU`
z2Me1>jfjgI>c3tR`6m2mX&|qhPv#wVi&7zvZ<wS8WYzq&=cb6-px8wjjgY8{3_hVE
z*;NIlf!F7)FlB0k>N9qDw)<os(n8G~<H_?!$|FIbU25Lh{Mw+<w~`~f_AidPr_IXw
z`?60P7p8EJ3g^S-rf%qU4r*Gz`&{<&b#eJ5>oU7aM<)AGBQjMImGXXdlVd<u>++P!
z4>Sf1%eif_USLh7!xNbHeVCUr8cKS(pv7I#*2Oh#TVV<9H^6E6m(p^G(X`CCa37)@
zEfw|OB|j7^)atE8Sv1dW4Ety$PCVTBpsuIv!^JI;EtQ^H)A@laHsmb%?gM<a>oZQi
z(jN8I^Ns!kst#M`xd$u}GMTWk3Xe{J@SYz8cQ#rB#I-&Uf36I4V&g-Vl!dLG4%at!
z0?I<>zba?K&&t%yg8K_%B~t0g!7XXe_m8T!CIew5g1eBRR#-DYdbzh@@^mXLdrDrH
zW;^O~9JjP_ThH&lPfm(SiVoFKF?t4r%A!<ynTrjCrHX0IUEFoHLQH&7_0<<>X~-;n
z;XhW|shUvCLk2uN+#FnP|IBpr=YX~3Ifec+v9pEZVw`w383<DVuO)6umHOFD;N64%
zu`|4WzfoP;_p+3)JOcGn$QxL=VrVq57&C2!7+jBpwt~<X0f8r#z)f1D_@D^-neHS9
z9@y<*@Ej|68^<6xagTS*Nrrk<82O=bYn%mpl$^M{_=k%5<XR(zV==Qr-WORu4rymA
zaL?o}xW;nikxao8*{_maBVo>3M+op@FLCAvVK;3gR;bh--PyWLU^<CbL_H<;i7MZL
zV&yyc1y#`@kb`lF_XHC4rO~}gB@y^UNoEk$rgdqM;FwZ2?~=5<%uj~}!v%c525Op?
z8%Q%ArLylLB2$*<T%fLGuzT&scM|!AeL8q<RRHE~;Uq8RsVRk@iwB4f&O4H+Kewh!
zzE5&2S}P|i*dIYV9DO(9=fO_)WTwfX*1w}pa9=}Nw?>$y82x19G^r3YwF4Wmo(UKE
zszdm3!D3VvVah%<6666gdunP2SZ$0x$})JFch#k)t9+G8$yY*Ij+#q>d^R)q<4!;{
zc*RX{aw-hB`!>kHsziJ$)I#*$CQtgbcp&D<ZZ{VO>zKpg`T)DU<kybJn(-#XGPGKx
zX@bh-=D5nqJZ?4^wDh+a<%v(>?DIJSolWNFS-o0$QRaHklaR*4-*&WBYYQa6n5*Zg
zCK3WwDi3U~0q@DLu^#)~F<umm%i=T2B0{6L2E!$45-gYv#Axn#boO?FQ}F?L$Ne3A
zBDLGnMr`<TgW}~ore%JmViw38+ZFrUX7%8nBMAn!s#OOmZuUQFdiYFvkBROdp-`*}
ze+r!G+&1o0P&4rl0rd99k+d@_O!RGHUM@9g-7E0XGqco>3V3AQ5XaNg;iPO7Em$^#
zYYR^Q=1I&<x}7J+yKoDq({0fFiQVdiLde^C)9H^e>)~X7qmn12knww!5_jZg(EKwk
z6dSijHor~&IU-1a8G-uUP=`kG`TX3kSLs}HqX`&9a|J2QGg>sOZ|NrDW#KN=jQyIC
z2qgCmfx*##yA(UN8TWKK=0$BbWTLbvSMPjLKLb0|4-~&g%T6zdUTju^sAukt?)c9D
z8M$^(V76Akm`(!pn4Qa?<Op3&@j|*2j(PZIS6WK`5R&G;&T@RePgr5tLUVEQ7D~>~
z6K)Vig?LgJe~k1EaLx;KKu|XF_P%M95y+6AY|MUv1Z{nXjy^1%Ew4-{@Tp}vq7ofl
zxd7b`Kkw8&Y8uV(8tJ@ZNnQol4ZU2*s36)Gn{*b4@g1-HD|!%%K(RY6jqU;T!|f?1
z-s}qgtf|s1Yz0cLMr7J}X>GNXll`UTHe3vsUq(g8-?c)n(lt3da>ptXYO{04)zoL|
z57=k#5~Ow-Lc%U&>T8dLyH65CSCDw>kRjL6YOpOVh&T$Yo#|UsQ9xVDMa{`L!Cg0h
z{<_;angx`J%=<}22twxQ)^PqEP(ui^DnR4U?BF=73(41+9PE+#i<MhW0>SC%T^9lG
zvDI2%xbX!jv?O24F#XrgEGqMKFx$BEuUF>XuKLQyJ5KtgnJ_X6M~D-FW9R{GA+V-l
z?Jb#g!tMenKKL-E0LQ4a)9dCV1BId2Q?KYCRlRKOLT#Qp(&ed@@<(!W`Gv~O`z-n*
zVfK0G?U~c*#vmS&o8p@n(mH9(Hp9Bld69uealI1~DzTy8LQ?C|dZr|}gAN(H2M><X
z9aCH*E9%c1?`wG_K8p1v>d7&_k=;2JWx#V=sCvx(puqME*IWyFU46E4@CZSSI0dCf
z8d<Bwb*VzJw`a6+w);p^XE<sLlD+Blt>VRO7aIxIP23HS@`~3F?`11Bdr*%t3Z*(~
zQiq&Nu%dn-81$|ohIY*Y^A*-Kx9C-A4i}8y$q?-fpzay>h##0IKpg;G21(``B0mjn
z)RNp??*1vLlSm2K%&vYR6A{XN=25f{;iB3y3+}$3!W)pGFBKDX*4T}0GVK%^SIvMi
zq_EPWT8r$bbAe4u_EnT1ZZibTYqg|m9(|?o4$RrBO-ZhR&uTj1!n#J1_pXGw`D$nC
z9CH_RFhr8r%2)>__|eQCD5nl4;5_;UB8#7IaJwmj9Sg4BWRND0k3ZDL`v1NdNsKS<
z>PHfM3aL=xC+>Y*>`c)>U44!ML~GIlhJftA&J3OxfI5&woc1G$pwPaI^F$8<l{yI&
zn@#(@!}E8@8}%}#VY!VPQ%Jf&N`Q3L@IRv9LZeaX1Kcpl5$Q;b{D%d)-uQmML!~M`
z8=rAT6D|Qs+s+sE?!~22zqo#5uYsNhCorOA^n5|H^e2zz&Po7b+eO9}E4{pNfT4rA
zpPKZmLA<zqSNU9RCzCrKfRYKd;4o$t@W$!y6M#<&U$L_~NLxn`x&`CUAvFg&Gw6PB
z*I@GHS>_WPUK6%9VTz+kI<oRSJg$JjY_m8DTizzp@ATP&_6^Qd$`x!*p0j_>Xt1`q
z7z(Yv6VKE^!*#Ch=m`1}<(hYhK$whi!4FYL&)>U2nA~>NR2@JUXVVg+XKOaqE9`$b
zJek-7ZYAvWuQPE2zV}|1%M&BjKT#wyyKZN7-6`y{ZxGsjdW$-Rg<93_ys+O`GjU;d
zq7isETRwM|AC#6bbY@Hd|0pXo<)tx1+#JYgN7C^5euaw@;$dVe?|@9UDCvQw3hS$-
z&5$qKrvphGqw3#`fb5h&?X1D-E(uj2SK>H{#6y8Dv0CXrfHBwbGCAZ)xw)KIEUMNr
zs-!n|9Yd(Wc2zpr`s)4W+pedUR~A0z#>5aO>ZcwT{4INI3pXc+s1wi@P?ws?==~xE
zkcZC@JqN%)vC|sI^<%U4o^v8lCLh}n-}?)8ZZGUKm0f|SLeUmP@m0DgSX+X+Cwqnp
znLVXfM<}eu29};>3H}GcBAZ89Bzv;arO!)YQqj}aJyyGq_)yU?d?cs<BmO{<GIMVf
z83u{JvI&3a%82*&9N}%_ld7&{4HTNsJUEtYUQsVF$Y23S6OEnYrCnleCNPGkjf!t-
z4(6DRSBR6>_sn`1`@SUR=xVG5FO~!yFE{h*@yvYfSs4!iE9i&p?0qT92R#>Wcob`t
zB0@g*ELQyO-tOcY`NMqY2a*rVUhRd3AEbzB)?(7C=Vx0fU~iPZZXM^zS{rQLzl>ew
zeJUY!(J!aVUef*NV7pOIH#@7x^D|@_zRd-`^JSs3aocMxAxX!J!-RE{Yn$8|6931!
z0j6Og4aleq5KaD((+XS}2Hy1AY_l%4;#YTw-gfd-WrDez`R{n{$Xf8RHIVNVpb)?8
zgB(ky{@y5MGJRtuk_u-d*hr)<sr6TOIV0XRn@yiU;@TfYP1Vh&HYm#?Ix~hPmsgs3
zemKt*_Q5c?!l^IYH*MA9rL1%XaE;@DG)mfVFtC~YUQZIZv7~wGruEO$C?OH^t1{X;
zxShIi0ZDdiwsTDOVyutP+cwZ~jslAY4mRXEQQ)Rgi14pRj80htl*M1nl()#!U!E;5
zlEn~i`!L`Rp_{uO+>SKi0R~xox(gQ&tRVUzQJL4#jVl`glmHt2S&WXqiyxE?oKc~*
zUS%5SSx8Y2a17pDMTc+a&H6*ZNH1@5@CsK8y``NzgRn)~tFla=92gQ0V%ac%7`$j|
z!{6xZ;kJ?X_wN92$dcEhNPUfw&`HowX<B`IiukU&^BpZ~koTQ6-N*E_9hRoawC5xY
z1lC@actE`9uP+2Y{8R_g9A^DLo~kb0G&LW>qn?ED-Wn5j1l~XSPCtC@)?DU_^kU$B
zW=}W4F`yFW(%Z)PUU(L-)(bWXKer=C*9u}Iot8IkZ`Rb;TGDj7;oE0y;pg^h#@w2z
zM@s@BfFrP*3gBREO;=h=uUm@hofkSYMV$QLyCadVj=*+zHv##ytpVqK0L9(q5f7Z`
zQ`hLFyaNxM#hO)%>Zm5|H=$E$c5MGXslKh`fPGL&2J9|6Qq68$S_6D>Cch5qA5`1X
z{_a|6X5v>Sg9~*J?#_AHE2}*22gP)lQwg!h^RSD*JAF1vyy@34&*2S42kTcz?^Ff>
zexrBmTHw1uZZa{ktE{xiN!%ZBxcvuZ=MbG~)UN5+ww*7wQ?V+xZQHhOJE_>VZQH2W
zcGur)_3G2B2d8`R?(sX?cy3(c?}~q%)RvUifB%DYp9w)3H@5fa57ml^FW=yx=mzU_
zp0X3t4VuWPpiPGp9^g}199$7Uy#mJh5Ln6l41&I{nqlSw0-$#Y3KNkU$as>z{l&Yq
zD+x=fPA^rmA{<(Ksap-*-?z2`eLru=?8gfUBJuA#X7#8n&XQY0&erR^Aoy|)9Vp&&
z4Cu*}yTyKX@|IE5Fd7Dx^>Y4DLD~EKF1Il6*DC9Z>L~L=<mXp&3`X<}CL;+eq&h<m
zZAPYxb;sjhAX<JiEGpS{dUlUON5?%&x`_A+aoCoWsliaO_Z9UkXO@8(z|tl@&T5+U
zM2p*3p}kpk^ioRH34+QDFG=?O^IP{c1@_4C<XVPagib)hJr0dc*R@oQV!%EJS;AdV
z?dc$4HG5YK3<QfgX$a#4-@NKV?0yOR-Evu32#&iIX|M5*ts(NR#kZf(^PvN&Mji2q
z`HT6gTZ70DNTSE9UGE#=|0!+1cTeX2N7}af_fh{}60(zvqobXffwPI8jh(THwbTEP
z$eo6Peq2hCT0C;{x3u^#+W(TfUrSQS-UCGhBo7Z!og5Di(XG)7J=)Ry1)%Ax!H~m`
zk3s%RN!ouZ+<1uNX-L6=fY9K9fN1{vKQu5luy>ZXwsZccc;~2I*lx0;ecScj^s!Nq
zu{C&G;5tKqRf#L<uO&j*Fwx#>2c*VYPotG5B$G0Xb>7HhYBhAXWa-Z(&wM>6nDN6o
zk%vVEBL>)sw=C22>pgp!8Q&TT2qBYubCI{l{=1E&`QEHIntko0fK?Ea1SZW^W_L}e
z8GoYqEvlt0j?*w>%ZPpH_P(G_E97J+rcaPnm?W{HDxwy=cdJJxm$89Tx2FZBN|cGq
z$OYUBe5EKsD*!3NoS|?*D^rp!B}PD$hv@&xu*Yo(O;^leD$r=S#bDJ2jZ<!fZlJUk
zxh{N-r5C*Vv>Z3lY9v)oh_*nL>%|n5HWy*>2}Aq1K7SVz>C<*3{1lt)?X0|}m*;kr
zK(?lw6ntxJ-@HkfF|yX~eb`kh=_AiOh1j8x`e~f%&%j9vJyDH(MU`QJh<q((y3oN~
z|0O@3=*6>li&Pf*CsD$^(5{!**d3v`sVilS4}dwuNBq!Ps>&L)ZY~Rfk2+(&cHiKj
zU3dDv{blj+0<VO^VIcQ1-tW#_p+;$$0xid`?^sjC8zrwRxr#)txX#uol`o!*Fw`-j
zZ#2MTMn=1tI;XQ7q5cV7u=9{UbL(dOtEFq-sQTSP9RkT(c@7L!za?;#*+h;YQuCLW
z*^!!aP&n3yM2#S|kVo+0P;eEh&h@<NdSeT3BSxxJ)gO&wjTTc4EvM1)R~lX~;7Ufg
z1P%HrlBxRvM^}39u_rp2$qkmrVS244twGiGm>H7%bh@O2SqT{9lE12z0JyeoN{8?`
z-g#;{;aij%dzh;PEvrfut9Ba)j<2{9Ne)xhhv7^$ZTaAMi+fn>+UcsiRGXHVB!`Bf
zTFz{OYe#7I@A4zN+pBS!v}lGLRbJ@_q;8PJ%!z~;W;%`TrHMMc^qjn+0k)sDEim`o
zz@gN2IiYr6_0KZ{kOpn9t|{5c?d`p^desl?^Q=65L2L8Ij2F_CoC+ge^`gk@J4y*e
z8fRJpca({<=23d==$sX@ddjlDO*m^t!Vv>PRujT?S=4Q0l0sK_>9m?I-14B|_Imra
zvlUcghSU!7Vbrn0S!z$T4wOwg2&s!EZ5E-WM`dE=LGXW>e2n|n_(aT|QSlur6P~1u
zY<c5+|GYshK^q``4-J;<I|EO(&fElzo}8ae9!@W|KW&Yy(MxXG$CCdP8FgM~6fC^`
zJTdR*Kc)uQ!9%Rnude;<9=;T;It-}S0eu05lke_->0f@izZJ3&swj^(==4YVp<Wgr
zs>7PBN4}HVeZK!X_cxNqUs*IVU92k%23xFq;>p>wVpdu~J*+jx;i&!t2KxD{6#=1+
zW?~>agJp_neKbpM)vLl|5P`pkW4*I!=>_ex7P~_0ccnccMOk6H=@qzJACtfl)~{>g
zT;;r9GnquKCb;Y2AhK!l%E2=c^K;IHcixr8&uEtMAp6@z;0F`tJ0<RLsqWQ{8Bjh}
z0Y1^#(c~bFd((9KYJepq#h!wRC^DbAoLB;zIfEme9@p_PuZMS$-XYbTzrU#*SuJ+7
z?-xAYgYx`7nYru~0LN|@!6<Z8YHN(;cGj6`tsw!69grAswdJKp7_bVj5KdR=q4G{n
z{rhMS-vQ>(yNcV)B#}5!6P#PoV&7};?`>HPba$b4Zj!F|nr68yg|}Yo0|xZu_A(U@
zA3y?yYb?>?ERdaABXMdg_92)SQWTRZEwQGL(R){lQuSA59E+~evcyqY5R5HC0JL<(
zpZ~Jg{s%PfYBIpA`VTbTg!B);`v0)k+S^%s=o#7BIypPK7&%+m+5X#fQ`8KUHaU^L
zX5fAd0x|{>B8(P`x=rahRzWzfq(R{xJqbBO6|BjT*Ai*Qq0ef2Q;bJl#bHinSR<M}
zJ3pSUB=IhvV97s>Mm~~fiS^Di_Uv5U5b{^g1Zh!5N<k?fi(j%l8aoFYw8i1O#_q|H
zz<GAJnSM~XffJ*gJqTfInrIED!Cq#<0XpKR%8KWRu5LhiUtc~i$4v&%h9DztY2xgq
zc=UfFM8=|v?L+Fcjp2HI$s(y*KZFA?9D&T3PksI_0+||5Edbe;WiOCxmyl^xtdkzj
zd`ABjIgfU<(cU&ko(*514aH4Gr>bW{%I$;Cknaw*s-R4`g98W+l$wx)Xj02GF1(pV
zXP*fvb@HJ=%P193XeN?~st38uoJhmiW*c5|$oThil~AEC23tm#%y~xcyX;#E0Z=C}
z$wWzzZo$sHFmt<n^l-WSA9TgkxhWUn&z*maVbRgDpOl-4#mhEX7P$!@Kca?71d4Pk
zcC48`xr>l66>(aWX*FsBsIX8)Q3Z1A)}k9LtVLooQbp*AYhgn!Bbim+lTmA;>wnkC
ze$msK!bOPoX_HK9lTse$n}trXqQfS+iMj9dq*auoUJl6UpM6pI!`2hU2}*H+$CWPy
zJ5(LK1lmsnXkDM^(1d<*9@=>Axl9356Pt)>z}?NfB|AvknD|n`p5JC$ci87rbDJr<
z?rQ-lykgMc0Eyh}ycE6|V8#xfa#DhOym|Oz2%?GEeINoW7Jzi(<$@WQS7mRbCy*84
z3?KNusLDkIwGRR%B^aOWq@QHc67{DnWLyZa3Ts*xA$thjnN2_D>`%P0Er%T0%$le_
z<(kb@dxPZ=-N9W{MoYr&Q?_s=MC)&ci}|fyFHhWAiZgOxg5=i0J%`TcyOKgHq4Am1
ztz&83X-g7!3r~h+Mv%zpM~jA@P7S-As_j9p^-}63HV~>}k^hE<qbZCfa%5;H(a2Q{
zk)m>Wr~-aSi-#pa?b{i(0Hy@g002}p95;j7j4XoFZ6fnS=`>0e`4etH|2nLXhah-{
zByfyhJ00t(ujYIotu{Bl%Eug8g2*s_p=yY-9loxp?qVO@`!w?eYdIH3P7TIs)0WYi
zIbVN#sWh0D9hXos6;M6J{*rKXzT92T{k^7lD;mC)$=+0_@2Q%gev}Wspb7hTDg5AD
z=oXf2{8+kCn}HY6i}e1>Y@q?bPbsT6JBUTXBX#TNrvh@={n!$@qRDE^Jh8#JvL%$V
zOWphT4a1JLhPUH$b_%SR^A*36G!VyINUk|1BAP9*!XGC<#T1btD<x-->DA|oRmV?k
zo$g{_5Wi%;?1S!?XPZ@tJTK=K?PXnJkKOOempS)`hEj9<cOZJz#NH{k$q8Af(LY8P
zV#V%YEic&8pr?Ev?r=OF##KBYUc$)cK5#mXXVQZdsNIGUIgEzXob^Gt4CE2b%AmA&
zKQ#-jYa!?vB&=gX@Ae=eS9OfF18q2cXB%)L`!DocQ;jdUxVpmESXu}tyP!9#p(M@P
zHNTU7#T+aVpRvk^{x*h+%WkWW;~RE%@l-`TeE6s!oVeoZ8QlIhC5)KM-89|{s;x#L
z!X`%?6L0%#Db<_AQ_6FfTTSBK1MYpp<v!`uB$!{~H(dvNvtd=Dzd5~%+sl-!D{fW7
z8bR1C<AFWY;_Mg5vKt%pS%r!gVldhimT~g8k#?=DDU=|+l@j*z8#1_mT=Sf3$GFMu
zm0{HA3(+aDP*LhQGikVmmU=0-igObDUvxtMX6^@jDh2H@*%TspcKQm9!C34|Z;SYB
z<J-PEje$H1hnM|t=tZ)nJ&{0V&ZVG{$6%}-37Z|p^DMaaw+eIRdJ(uTbV*bDosc=V
zxpUEI2O^*Ch*jML_)hR0qt%w>Q~_TJ$}^$oyf2Oa_ya8ZWn=2{GOrssV(U!dIiqf#
zD4_3o#PZ>=ayH>gXY<~X)ti2CbM)7#e>dx%^rbVw*9nkT=H*G7%BW4B9%-M0TA(xc
zsJpDPF`JDVUKZ@ETRHDbVaSW*hHi0{stilq^y8BbUTvmS?PztH+Uv5()xR)op0<+0
z%3nwNa)runo)fZ?7+iBRsOilUh@GkBHiTunh1D_rIjEke3Pjzj{KY$h3qgu2LZTSD
z&R;*k|NGVQpL41R#a|K#83^b{8VHE~zdxt`r(gG9hqkSWf#ZLSu(%d>n_|yi{-0qI
za8*=cC0DwH(JBF~4lqRy99f(JK}1B9j>Zm?G(6UoqCX!!94~NT4S1MQ+aVfFe~L3W
z>~?&(7_vI9B-oOtGs*DFGe_f6ZFCZ^TLS?$=yZv(iP^QDQVd!fE-qfFyGJRFbz?NI
ze%dS8bcJhM&zWD2q5Pa)p?nP7?X)`ckFBpHx?u-w7MoyvWIA26=fx*^K1hX?#bb@d
zvlkN(se>zBHx>Z5KOT)@(n$x3HJ3GeYInZyL=Wc;P6!s78%-`SYGH)OJr%Z6oX>8N
z##$rihljsE^P0pvVn36sEYc{@O9P0A_OREQ;NWqlV+r|5ZZEH|@6+6bg|!fDHCm5~
zt?$|8k^(^~BvKL(C9)bnD(!$L15=yM*H`h&(}IPQu%KIzVo;H38xUqs6NemhkA3hU
z@##{D%MZFrZN%R@prGe4eM4rcY~C5Gvs}E96KOW+&toSl_kz4gdsKYcF!QrTF~rr4
zeOy=WLD>Yp*EPuM6rzUA%>DryPR(Lh7iqRQ#4zZMEp&kUq%{91BbDLZ?zYi0Cjmtk
zzjpOi<1;;zgbC1Iu-~YWW2cw>sVwdgF;y)t?qgP8Si+Ylsf8~z@mOtrGt|FFl&Z1t
zZ9ngfNYdIsv;mJKa1q8Pi=w9t^9i_$OGIK}&Py8HtdUEgVxrU?VDLq*$$?G6w3hj8
zpAa$<lHX9h-4BjR{JOJuEq$|*V7cmGbEEnsZ)_|mp{j;<W_)&zdf3%=!IInt2u#tj
zM|P<!F35}J3nRN&g%8=KCwVr_PGjIDvCvKsHtjOVN8K}Zl-S1^m)Yre3uE!R+h}y>
zZBAb__oI)sCqfqNXIsbAe*)2Tb_UJtU2JI)XOsbkJsX6gYKX0%Wl#``T@%DP*dRam
zhLay79FwH9hZzPYHd`<)2mLnn*chdJ6gn?Sp`}_V5cixTs^K@MuANhc@j>mor?I3#
z&eX^(rUA4-;K;xW`QrkUxg%`+?yq%`YYiTB11)9i%S$Q{Hi8>*7S)2e5DZ_>ESTtp
zYf>NlBX}uUw@mc+;*QO9=wa;=*Y5*hk%Y33BW9?_``(#lG_d5@JLYHfkBYYot`hwc
z*9qn$9fn{afx1V1h1c)#8@u_aHkx3y5)UeIh0c3^iAX~K<kbq2rb!4kv+!m`V3&Cx
z+o-yi6JoPZ!&zst4x)B0K~Db&$vnKt$S%*PFf>kGB31?}=_T)<rEDytY2EiOeZ$m`
zA)%Xsp0EY!WBx6b{R#<iA!a=SHXqRh#BUH?+n*31X!^`DW1eXgjm!{6Jv<1qSS96o
z$g*n*NsTt7&E@G*Cp-_l*_3}<4UBP!U2jRxOYq51=~Xvp4cRt9lj(YAZFhs)@O#Nk
zVt#l(!6Dxm^!Gf^Plm_L*bG&<Q}B%4vGJPw{UW7)b|l&F{X&@A=rIHj!Son@UUKrb
zEJsRcYL|rCb8gZAROK0IvgT%_-xf;s46C;S*H}FEgoz!<1?IQ+N6QZ{wX*?+y978p
zSJL`GDBb?{NFpe158}&6`rk17V@w82frR(S+dry7e0u#kYDiOzMn*$wmf(JZ;^gK?
zdvG{=VO^cpuJFYcy%k*Q^%<i?#06ZMc8-pVbIGHdpP`Q6m-6zu#~ef=cIh;)9+Z(^
zjbvZo%4cypn%AN32YCtVWS=30a22V6jndp0!9nMVXzB16sAUq^i2YV@DytMxAaG#m
zs&iz|<mN_^9@ukPA>Z+;w7^Dib!G{|guf1mqi}+J2oq*D`|WQ^rJlj@QZR%eAbb*p
zJ%b6N!RxvE<sqTT=Dde8m)!@Pf(CalNOzhZVG1S1E&QA7UNzHMZRy2}ic`S!{kfe>
z@PnhlJFMrd1g@_--J(rt!uz75AdkkAD7MA~lLtv}lUVw2DRYdc5{UJlvGw|lTCITF
zj_sRINrax8g#aA-d5mAc()WoW<aPF0?#2KgeMQ#FA)<uB_<msVW&iDi3Zn%g5N)<o
z$Ba@gQWO7r@L;uZc>xURJM^jORL%UyOxg{cu{{z~CZ%E{ZC!P7x6U!~jDTCjOJ#+8
zO3-XPUYD3iml##m?}H7Qgh>xF<t<ZL-2kSSa7y|3VN=O|e?_*&bp1|^NW{d&rZTX;
z&jJpagNNKjf%`4^-1s&oFjA2vMs;<#GO!)_vygEZI2~0qdHInn3wUTT?EK-#3@NNX
zUG?#p-Frc3@YUIZJxwYE46{(9LW>hXCuG#2`Xk29OA$y<Q%J&`YiNAGgVPZyP*B>@
zlMh|rvYhvExsY%Uq+}g$QM6;Y0wo#tr`UUmfoY)MBJ7N}Nhijj4l$I6I>|BdDC$UD
zyzGjzi3}>*NMTr~^crE}?CT0#(+8Ni{}2|e6i|FC|EcFva~fs6NA-njhubfe-#`Xd
zqHbR>X{-t!uGhz7sK2$vDBrZ9)I@HzG&%lx-%;5vLj5S_c`~CIwnc4HmhP0@Xt532
z_A8qn_#Gv`Peqq!sDA6b&O<I1-4817L&oPss!lv3Da-se6++Na2^aXF5*=x4A-l}N
zMOoa{SR7Op6G&FM$S277b#Yc(%O^)j$$9C`f-(@MxFJksQ;{rI$HPvDISPO=x+fx^
z8#T&uMy}Ap`s3w;(F*N!4OFUVIXg%mD46|A0f8^KVM`V4DS7_(4iok3Nb^Te4d!nC
z9E=1o1<AOyO!~F2vg4MuK}-T#$6}O6Vy7kBd7HY(r&8fr1R6NIvOUw*8ixN^LX}7z
z5UxMX0y%a^<c&Eo$%{7G<!pU2avCkCl>R#V(`SgJoi-n;imguK<7rISEmd8k`w0s~
z$l}5la5|gxhtei1otM=dk6A%&T4d?0IhhHG-;~(z6@gshXtJhIwPyg3<PK<fRzOWG
zuTU>BFdH~^pLh+m8O+~kHu-cbP$=u(*|l492)@dr%)LR98<PGWsdVcR$R4XRv<<~}
zrL&f43-Eu@w`oTg^AyKP)IDj5msYmtJm&+fr>gDE%}Jn)A{kKk6%E?4<IuL#lRyF_
z0g65{9ifIEC<|l9a}AWV)sv?2@CyjEl<Yx*Lw6q?lNnkfB+bsJ>N*5o&|*!D!5Z$v
z!qjWwpE?Ml$c<|}Cd`M$xkugS8{y1HNI7AEw}$H|_4unR>{fcn_Pe=dIO)GmkE?WI
zoB{J@Ib6;S{8#=t!IMmPLRj2UeSKkIUF+Ur#>mIUguZroL9<2p0IFlg`$5T+vA9rr
z4<4c&L;>xmd9HO(YCJH3a5L#kg!oPt$QS6}kqc__x537HCN*w^x+vCF@$zU>Ykwla
z4n#l;B)lO6{!$u_#hVQYm^p@3(SvGD8i>#sfrGW&*bpDT7L_p92Wta6LDdQEg$jgV
zxb%XU8o)P@3K0aH6{fOUQq)b0JVUC?dj%`MkOygpoW65zLN#z~lKsLmx{HB?Ncy>s
z`R4V;F(J3@ASUFBN8Xnb2;4pqg*LQQrLQ+Fl>+4sQfo0=9i%;3H@W9m@E!~e)t3Mm
z26j-dsEcmpc#s*)FC4XQhMSdo=zlVz^5kv>&YCEulB^yY25$*jY=PeK$lEE(`;=WV
z_G4#8&%+!cSL23RCCl`y-jfd??vVsv2Zb#$Bt>VoEK!R@F|(4^nc`Fm$L(XEU25;X
z`hy*`9Qy}CbBO%TRh#6FC-~a5Gex%HJ~)K*gal`0JaW;?W*j69=CMP}S|_?7BMg{H
zUxp(Wg=gLz60Jz<vJe<ww{rZxn}+7%#ScRJL+6YC_a5t_Uom5<H>5>O-^rg0I|-AY
z5I=m-v~@uR6@5RWNko7$A3(W!dMuUFW2TN#z(MtDi8c=(L&E%%aJ#~Zv=UktP|j(s
zthbkIWsUU5bz<QD^`HuX7hhUhVgEV0pC=6rvuFLxs%a>+4%wb&y`s}!UAX_y&g?R`
zcv6vEzyXkQHg&H7gge~6%3}CDylvr)Zt&j|?WHpbUxu5v+ot|faM5?M-|D+ex)nY>
zd1>H<PVy46GTKrol12(A(i&Q$U2t&=@2KaQP@{}mua#xAYWChYyF1tDt7vn=D1IHZ
z?prTQ+O2*xh_|x!<-GmVap_h`!mt2#btcMQNGy5+G0bzfi1pXN*s@}5WP58WeLNwB
zMgttU>7>ZBTBqaCwt16NpvcrFI{3r(fn9xe;yzIo{{r5E)A2J@!@vlnbkNZts~8K>
z4K;`(K@Ngl4dha>kB}P_k}tsM^fYe`6)7}%+C_SHC7V>`HnZTL!&U{dhntrl;FwyH
zzf6vRq`mqtkfUorLD0zVo`F|DY`uLn7wIw{QGl+eylQkBtJ5Gk4i0@EF4j>|X*`JN
zQfR^6<KMR5T~jx_G#zZocwgVjRsUNxoxLc{m33tlx{gN^8$V>XLUv*Xk-o1^{TL=X
zJ^DSU+o5tD&-;7g4R5^u*YxQg=qZb$6If5vUJe)*WCx?a(D!gl3}AVyo{HH0(;QOK
zUj|WpL9TW;QA-sYQPY-nnzqk`2R6~3DHB~0LfsM(F?{rX{O2N*W3_=?_U8rv!nq<H
z6Y<sK5EvG+Pm9KMr%6w!ZYnb*8&nhFL<U%y6-C=PD+r9-PmpZJ)9_-D7tkVmT=k7{
zY;L^t&0GpT<S(<h!a?$Zd41{u$qn|2CV|gzqWpb_Rf;&<g{y>g{K6sQd&gj#^TEC0
zdL6}eqo<1+ezJU&JalMFs?QKDtIsgnzf}8qoO>u^jRWp<cIS>7cpac*X*?R%o+$%7
z)w{Sy@f5<Ms7%#@ThumKucZ0+m&;(n*_Um{TSdz;M*T_mwfoD%_hO^i`Cga<=}A+$
z1F6x$yYHZwu7D8Wwc;O}ycUm%#B$_Rd>X5uMivh3?K@r2IYfU_sQb@3-Quo>kqCi6
zD*{H$_PZJ!#+y@+%&a>~gc_5(d>4`T3S`aAiq<R04iXvF+<^2x+i&GCZyvKk{<`ni
zQwCVOuSG#06uuv??z^}j((XItUV@H;BUv2>YXr^do%lGuW%Dngou}!M8m%C#fwZ-s
z?T!zc%uc8Kh4ZVPAMw{!R9*wrlN$PR)WJAZEcP|og+j5<dH~!eOzB#_Ch%6BK&UsC
z4<dF;ElsATm6B0)Jao4+vgyczC?ytie}MK>MVt}5n~=^8N{+o%i7nj6Mb^Qg7<@&G
z#I_%V(eG%)nCjH<1kcg}E$RIF<v520=Td1B&r3r(ap?DyzC}F*mlWNKOcD#)cm&SO
zf<7YeA11JNILLToq?4%iuX3Yd=|p9O;*d8%Nm$IP?s4B_YvQkE&U5}?9F9>7njdW)
zl`@dlB0k*KNxKf*2j31vKo-Hw`_w1VX@Z0&6l*TA^45uNy4+SeXx($<)rLVqx_0vm
z_f;J{KFxe}L#Ls-<O}P_x753#F)dF;kvYi_b=%cnTe4Va|Gcp?^l33N=T5s4!3};#
z85BO;QYhEgCPmTtWeTKgtOi@UCT=5K>BMEyP5oGBU~W9NnUc$x5n|R^=>8iYQDbX9
z^VU9S)H>}W@*%7jI}+KJH?eI7Su@hnJ3SsWUE*YnJe|z}9^t9hS5(){+_MA19Lbqz
z{reV)XZ$H*4nQeH!Tn@n+$KKKzVy&mMB9PLDM`-fRl1+(Qu|X~hTWWWk#FA(%jVs5
zz~?P~7ATWWh8@B*2e&Kct**V2lSxrVL);;-<!k2MR2mdtlP$U-Vzt&APm}Gqk7g_P
zeW%jJ(&AnBor|{-$8mGjo^j_(0nNqwo!$RAOM$^Aq(7Ab0_xEM2BQ4$XDKGOX8-0W
z|CyrrTsdxzH0=E-ALAocvV++qVx$~;IjI;ZMv_r<Bv|85QjGRQVg!iG3xR>a0ZAU*
zy|45P_U!YGyS{k)0g;(}kZHNdm#2f6dU~3Ac6fDodA0<8ANn%V11x--acR#jeyqG7
z-pt|%c;7!82S2H&lk({UJE9kP%4(DHHIq#o&{xM#^6nJRtaX#wfHD~Pf2=hzSl+R3
zSChyx4tIHV{d;?SOTO;395Vi--OqY61-(0<A343%l-2PD>c%L6_f#BqoP(=)ri^MZ
zj48gJz<lS9nxvefL)=!#9!eji2rYZlPAv7xnI52ir@}W*40p+zo$;d#!T7@VacH2V
zz3HbenQ+&Ld}*MZC_H_T&?iYG_t~eQY*;5OpLNlWNT~G3SH3v#&PL*h88GS{Fj$-C
zE;sb?%uw9I?8>6qPD*~s9!wTa)r+L4;R&+Wfz&RxMK{t07-W)ZbV&m^i@T1<KXtZ8
zG`OjvK7hZ_|GcqMp<<DK*a6>qdHay<)PO^W`e*~@vg)%3K%XLmu*hB*A;_oH5~38t
z&QsVapV*LRPV#K(BQ5y!mK;f9ZyDa;{l${NsQxVY6V!1uBp2V9tM)OD7g9*z=k4Jt
zEL@&+m7R;rRDHE(gSh)t$o!n-$s}6X=NJpLsYzf%SHQ`6_3L$Z=J{dt30K1TeNs+H
zoWs*6z0*tRTkN3M_1$w%V-6P<-k5l{_w83etRbxU!C$yx?3OkTOABH_P|YpBa?98z
zN!Yu=_sIrCu5k(?(jQK;N?n-T#ubB!$ur8!w`qgSFOT70*DT(-+^T8NAmo2o<m>>r
z{dIv@@wQ6l0LnE7z(Gy;!DnQ^Q=<+me@F(2$Y=x9)BI18%}cK%b&0SCBSDcaZ+g66
zU-UV`Cz*8{`0qSYpeCjlvdE-rqYMYKek!%z>jH-n&={Gd8bN9kJr(|%E?&l2s17v;
z9%NJ*N<JLr;c-ot_Cs;*`p3h_b>hifZR@@Ldj45AghH2v9CH!+KBn!?92SNst)KuC
zP2LGu^>dNBDO>PM`C|}o1~PSHf`83p8c+B@Ef*Bu>)wURv+uv>w(8#}IQS0x5tL)r
zZ<MYl&=EK0T>saSksrnvCch3Ke8~jL0F%PXhN7WSI{AuM(^S=#DOa-Z6PM09QLJT_
za)rsFk1HX1AgcgG`wFuG8H>ik?u(m7B`*<=NgiLpR2k%A@kCH|^R&4rEXocUX7*7V
zE2~G??cGm>#)K|{L(ErkJ2^>BWrv-YJ=CnOI7d@f$5IdN)k~C}fELg*`It+I;(0dQ
zov~nmhKy`JSon5$FwtsyH20U$M<X$8ja=4y^fj{hS>Id7Si+?d4%iRrcvwd<RyQpw
zI1Nk{(@9f%O6V1GuFnr(;f+q<*4ed2)*hnxBCb0J<ArJVU{wXX3z1Zapc15@V%#c$
zMeqV*31f0t4MhIV1@pcMTC4?8IxBxB?3N4h3)Fo!VcmfL#A2010zq8fpt&s@jb*NE
z;3i6x5BQ5b?*53r;$R(5tgm1!=n+Gm>|R9`-ab3f5Q3~74l9^wIsg$=Tena%3&i6~
zCq1|V;Hgf8u#tWL!b&)lhxx$u*-;#seE|0fjFn$R)TKT}UjWjF1a4^@xJDu7Q>C<6
zm^}A4ycrbgSdBj5jv45}F~JnICg(n(8fnW?l27T_%|vb;>UHD*>4?m$JSFRPok~DO
zEC5Q6zFC4P3?jaUmq+h8y&937O6N}IVtFj^g$(Q7CHb$}1C}a#HB$1j3KmQqFKD=7
zC8L-ij5&k}T%sffCRo`ZuN(uGx%w~k9w@=Gl&A0l7`@q627#NJsP<<|_j#rGOv3cV
ze-?NauA;q9p#txsqN<9gFz$m!a)qXfcGEDGZ9%x;TseQPF{V>+4!OF#zghbsdsu)C
z5Flj9F@IN^o&3ndFb4=p<vw=Z0s*NX{`)v6wDYFe!aB(Y3%RnAThOOPf>p~WK7;mv
zC4B?wSG&&O*NI2%!z|^L{a+t8=;;sJ;NE@&W+{L_Xa^$93Im>D%^iY2n|t6@c<>tS
zl($@z{%595EhlXf`oDmZr<ttLQ1=4bzLWB`k8TmA8H}jz+I0P;OKfP=n!rVZu=k7@
z!Mu?OyF&wzo|@H{6cDV@p_O8`_Kpi7rVRx2G9h-WY4p}eO=Iz&$PFhT^4z_~{yFvZ
zFjNY4mG7!Qt&a73=`HHV>5!;@we`!6COTnywBVUH&FGDCUf;oA;;-4_lmIPt7fYMv
zw--4Mi9`N@tUvQ$mMx{4%5Bk&c-3bJRMLpAHz;XtN(+FSG}=<`tMT{0Z5eftMT;Lo
z)m{%4h{i=Uo=n>FWcI<3UZll7Eqb3n?a!w7NlBj+OPOCSkig{wSpxwpFoLnC49|=)
z)z&3<Synl;b=K~eD1{jT;uqxvaPD|0Y%2HT_<r5*9oJ|ZbJFEhlF3PK;Smxz&n?+Q
zW@QuQ4lD_50>Q4)_<zg<7jF!6MItF#^01EU85PvsxX!Ql%8qcXA*SDuECMjZ;jyfm
zqjvS>c0d&c`~@|$i0VL62*DR)MW{VXtM#k?wT6}&t$@#ovIztY`Zr_1n5SN@75)fo
zga*^Dz;gRLFL{t$v?nHzj!xqQDZO%ND|pw92@>rF>*NTu1}Yt-NZg9mpnP38r2`~H
zLlqd*fga=^o&Xh-x!)WMuoLoCLm0HBaO!orDvWcd<jQtVfL=~Zt8%D!(6SSBeWs%A
z{*h&~J0Dg;ig~hjoB%#hztG5E4~QK+94jQw&5{Nmqr*s01snnOWA<!1qbk~~f6ket
zzc*zu)xvOsTRBs6UH*j~Y~X(N9^Kx>Lf~|GVy2<;?X?lmkU#ajlK*2IJGditUj?Kh
z1QB#Ik)tW=yH<;jA=qf{^wqe_K92IO>$5HWUK-ng#c)1if`=u7XEsFOmSx7~q=6f)
z(VansFKyMq161`~%&^2WCR}i496AT*EYb$Zbm#{NCJx-Dba_xBrAcK6ao*C}?=BXA
zLknLWX7{;cQf@;$-)^!xjVxirmlPA!)eG$S?DobqhS0wlz;VAIdu<9Dl)w+!E?S4f
zC^8itoWRV5XrM-zn+VPZI}Ug&fHjC3GpP|J1oTCG%`Q}4;J7j}NklhQ)Xf<ykmNDR
z<l*0du%s(%ZX!V5DbTH2Xl8*`xk#V=R%EbLUp6Zv+b$4`coY&sc3Zq%NyqWtN8(z3
z+6$k>pVO1?!yw9bHBLA$XJ+awH1lP-1Zr*T1al4Ofgr&~JY$*s)^A9kMt?cKyYJQ?
z6;3j(#eV)tnPJn7jrLAV9dDzbP%f2X0)o*(<3_Go23`jruw}#n&v`@Hb&K!tb3kHz
zDgbuy%MUhBU0NL$8F&NP^GBopm8)w7^+}quxWYhJIfpY$5bkDXT=Z;G@vYe5n)2Br
zF!=cX{_Kg<Lhh*4L|M@yz;{f<%69#V_pa_l3VG%Y21a)~j*^OBRUJ6U>g=MO)Mvez
zNmd8K(L)b&FEM4`O7;i}rVXgewN#^$-+2A4l+|kP8_g@TY1!<Seo(3+G=uB**fh}O
zXJgS)kLZm?PTohAFkqx<kRAJ)9%moyDQNq-PMK2m#C-0L!AfPCL%=}><zhKRSswv{
znBYQ+3xib2r}Y~B08B(Q0E176w?gEeLDAF?k}gWr#04(1XB!`C<G55dNQ7iUPxgxq
z8>;ru10}eDh1UZ6;(6{aQiz8o>{NOk4Rwd3(F(KycffckstQALweJeFk)56Gkj*d>
z_>Ad_{58ez13e2Fm&tM$qhDrBy%CO6r7ZBn!4v*d%@8a_wWy%kIuK>4xWF(GUEfgy
zO#_vB48d{OgS{`vVFum<wtem76mf&xXF}vJ7%yO}IUpqcS?z34TQ0LQZ~4?3GQ=x!
ztEB7{lO{hSo9;D0Nwlv;!@3qFm=n;J)&s`W4>_X9uqN=)3Jz2Crn}T|1#fvVEEP;|
zHlF3CkAdF-UIl+*H81x!I<Q$G{#vHQutr{|u$-SFAk65D$qOn=SkwSZzJ$H+;JE;z
z=b#`cH&b!5g1OFv@ulGu-+!NDd>hBdw(^N})#+rR=oiQ855*+uZ=ssXb=L7D7S=On
zJqw<QlcUGgWNh14;VfWFOoFJvDY5{mXzDQm@IQ$I;!F(wY--aL3xF=+^<CGXgZtK@
zV*-~cbi-$lIa@ee6)EQpz6qrv?k~tVL#F&}@?KCyV>DjhHx&JDFS`_f-=FAxKp2+7
z5e*Pg4?man>;2-L;9y!%Q-w-hVDG*fg<05;N)~_Ka#Cn5g@x~4hapZi;O}|JG!poF
zc*uIa4;#iN3yhWwJBAqmdF`8ZMgKV2Zj-*ucl+Fl%vnDN4ndwRQ$c^5O-ZGV9KN`n
z3Q3whl$@(%WCWH&TJ2$IhhcggGaRGgS?5c=#v!IUmQ=mEi@PweP;b|?9)z}}L`vTV
zNj?JoUIvpxtbnaeo*2#`Z245Z;W#W9I;j13t%#8>0UJy|&j1KQSg1m@ev0UmONR2Q
zxwNY#w<v1}6G)6ZvjoLh506YmZ3V(`kctNnq<XW~_-j?GGJq&_RJzPFbAhi?Gm3k<
z@Qj48lkJE?B{KtV46HZxw<XOh@_QPxMm|UQ=Z;?`)zAf(fN7wn(rb1|?!a|YSi6L`
zCz_vrtxY$@iHuG=V|mCHIgUz#(t)vFsDJy0SVr`?qQo8FF#2V_gm3_T@lit+YSRPu
zHq^XSZ9>BiLG_r8wTd32gf<reCdT#3*h^ENIR7e(Czqv;X6hX%pC}e1nVELb(gb}|
zBzqp(;9E!8VKij8ft+YvWV#Pu3)E-lP0z!i|8&r%Fc476ypWkqP!og<w6RjnLU=G^
zi_YPWk7ZH05bB$v5l*uP&fHDG$B&3}6WbdGC|UiiJ5UJfq4-vX8<umSU7a@Z0_{!J
z8F{ZXmoRv|Z^Hp}nkg8KA_zTe$KsPe^Ti<w;?)bNsT4xz)i-}c20&#QN<u9NS#PT^
z$o>k3`)|}!|HWJ}uauz`I%LOfgM}~u%*d&tA&%e#6{<6r)fHXwQTUz_Sw+7{+Xu^h
ze{QUy9DI>o3lJp(@CM0GNMgi~$W`e<FOwl-$4b+Pp+Willn$fMqm*wkcN1<GD%}-=
zYsn3v%U6mBdY`aze_mlEum7o*_yn3p4N^TZp;%|0k`kI%^c15K!{aNQjO6GaI!u|N
zUpC=Hyt;s<tZ|sN5m^Y(+M940MtGh8?vRB7c1#fdeCfFzlc*!5W|dMOs<jbOgy=JB
zWQVaPV91bfskQ!$$siiPHZlP&Yt2w(*nAZ~wdSS5wSib9U$8lweIq&M{ebCxjKGRW
zt~B`hsTYFwAat<B-<}421H$CmG;?Zc6g;9SenEe>-u#T19fj|bVFFaUtWy18Kfbgu
z&o}9UVwj<4gdz~&HkVR|b*lmi#!Np(KLU}n;O}2LHZoSoT_=`mK2&RCkWDQ;Ri>7Z
zl71CdEIKT6qi(dU<_o92&WwOl9wLuv#kV+6{d!MrO2{or$X^ybZ7Ab5uc$HGX5a;N
zr?1d!J4R<GnTb2fiWAErs)M-@GneGK(H0w=39%F(7e=(RcMC~!#@Oc~os1IOX>+)A
zD%=On({=!7z_<Qf9v!wuV8)VRCJknbckQrW-}7MoD+Q;{7q*jH0I~{u@X>^0^YMPI
z0J8$zvt;mo3LQZKj-NO7M^jsIjZ!`;QxAhZQgn}v!60TzC$Q#5OdZf%1MwR+#!e6!
zkoyJnjIT$&|5hirG?<yMhqC@OVB-?}`*XLZK1xW8{48*6NwKQ6E*PfZJ_V>6KoQqq
z1#eus$Jj(7$`yq#h!Q^8wD3|tWzNZ;xpjfuxskf6I?|?p_L=Ahi)fFdfdU2-^l1G0
zjqA=$KrUoU>87y+zdKoYj8jyoKT8d=Vs-6SU}R}wW77+M4o6T}L&QOafMV8@fG{MB
zgJjmc3khO}3v!teNz_BPAi>x(B3KbPOm5A;NW*iCG*tQ#ru&P6uVpAuf^5CD;Yf>K
zRJ^2`)<%+m%Ff$|>u@Aj^^G={L%WcDPrK-^mAHkL|2kW6_)5oL3zv>D=2?hzAoJa>
z?Ml&b$I4gKI5oJ)u!PtWeXD#|!Y4SuLSG7gWh0NVsmgQV-+7PAFY--2Hs`&m2J@^K
zD=!fSkoOuyI+UHO+>oo$jb{s9eSllUd;8k1HY*}m<FJ;>o{1FtrnsF7d#e$q4C+e7
zM${<lix#{@Oc%7;Anslz98L!zzeTPAANgf-JZ=~_%bvqrY=#(5n-tCoWnAd>Z-A!(
zt{P<q^QA+~%XpndE*-s(2ooD6-m62g8(fLuf-fVF_EggcO<n_Me_eW!31l4It3!Rr
zzaZ3C;d1`b@F@CWOr3E(gt~c-5$J~vqcxV8kl)lAsAa4S?-?R4nw<9<h7-2VsQUnS
z`xQ8{MFCvZ)s>A}xX2V-{iBn)tN+TkyR67O;ro*O-EFzmnE)z$*N`!YIbuKR5mgoQ
zN3g%bmg2%OE6?l!&8ge(ay5A5YP<WRno)D=jO;{*z{+nl=u{C8M~1+9;cfM%7Wy+E
zXZ|tBKtmjPR3~F^)6~BOoaVe(hqdudfdn^x6dKWjxj-l9tkV_8wP+vVjiCn<ec(2b
zNBhOXSPYF3*<|v&Gp@X@^r#U2_SKpP>xsl#+H~Mwfninvbn7euw&Q)RIXT%mUvWDN
zqQVs9{pqCPP?Yj-bY(AvXj=Hnc2nUW8~Cg7@C~dv9G+cU4H0eSe1|Dad894Xeku6?
zr-cBQO<&R%W;9@4=ez?P65Ev<#?lzv9clt8VrJ^rI?+_0KuM)M_W`K4!ghdz5Qcmp
z@l@r2-imQJBkUEqmU@whr;1VP#4yy-<t)rfK*`czP{OgdZC`6ns(+>~d9DaYc0SG%
zJjCH`4}__pm3?(JET181Kq}1Mzh^3=_aGS#La$Y=eDzn171WNBtKC;Kdt+$CNp%#d
zO>}ZZ_;l8Lr;IuK=4w8)WkvgJsXH^qXqp@9A_{h-@*-e$u@2vq%5Y}2Lb}s9v=UQk
zR<(WGu`{&ebkW~INWt|1$!-*in4n$CcBFlr6~8JSf!OtLvFZ@|?)r?4S$xIFXQ#u=
z0BB=jo52B%=!c{3m^XtFlGOu~EVEdde$^GvTaAh5(w^c2GSZ;FuJNR~Hrb*Dx=cpL
z(g39Z7?z~s!~9qnyDh}w8saLrGoC$W?a%a);pF=U%=3hlyx?6iDoH#h%NBo3?O~xP
zr&j5~HpXlirp&LRltX)zG~@7rU?9+q(!@7ucZW^=9bqc`jE8OA_?An!mC>T;8xgEQ
zAC`y@St~}>!PK1`ZrdfHjGZxHf<L{X(#%c0iO`hMbGzz1DR*tDfI^^YUTGb2_N6wm
z%qy^0>+^*9ys&XnZ}z99S0>#-t0I2F@t}XpWY)jsbbUS#(%s|HOeStN%mzMdTl9VL
zfLjyu&?L)Bk_Y!!^2(WaQIfbH!I7V))#<9`4lBvUb;UFLMvlgnEcHhMWT}rVO5@z)
z{BF8B6B1@_s?J*M%OeBL{+(D>I>Rib7~bbf<gul3R~ESK8Di8f^Fe&hSatJM&?^>%
zr(p_AB!jb*pDJx<s5!mUW%{7;zU-<ndFtrlzwWWCVw_Ai84wHXzPLS#dHdEca;pOR
zIkgn8jV^ZL+B2e$JI+Hc#JQUYdPHe4ZUP2@a&-vWpEGK7V^F&ocH5HC5)=Owgrdc~
z)cB52Aa1$K>~DD}e9_tQzAD{5&=YIISXAaxe#8S)+JPFXwwh`HY}2CA<%&?14fj~I
zD6k+2Mor@A>Cjr!W#zf?4ho~4x2=x1=@D;xPg~Hb-&#zSZUAB={9|-3g{=Mbsen<m
zcS0b}_PBUI<f*z6OWQ3zVbe?X(HP`2bo<W6n}C$`L7JFI;dj-=0GaWMhQMh>G3PMB
zN4qal(Pnaq@+%0Ai4tup(=7zc`RwETuYko-^<>Y>exbXlJt^%EKcs*DhLeGgv7INR
zR#g=(ry?trh^K)cRpqC)MK+?5RY*H|I7*pUYoYmABi;NepHr$t+t$HMT~An)E;ZP~
zb%g0;x&fY<G~#om-8Qqz6&(JhpL#y?pAOw+W}Gw=l-(x#47e~0rXS8=$-l<{hiMGG
z%COqS8y?@5ZOvk0K%v6VAA`~5pf~+R2br@NZ73C;?_^r4<BYXkeJGvcgMA*c8&$du
zC+(-~=0Gv;hak+yu!_NJ3-v~yr|-pJ_-ZW7bX3$lF?i))Syn5zj>em!X9Nk$v862?
zR9_UDYR;4TW`K?n3WftK2!f2Tm}ar2*2)Wz7stv(*@AQ12+}v#f9*s^N!i_{$1hIW
zzHt#lh>wRac8sbZ|G0AFK5`*ZvD{%EIv7qY-&33cwq!J`hYA^Ne-bq&*?{iSWzQD3
z(aY>}hYs@&OxoPM2f|0bd-SwVfW3ECUso-7<OC|`4V&7OA{}*eXeqzW6>6A;h=NYW
zEXfMDG*yst@vaD6-GT-)T9%`OR+m8@&yzoHnGV@%bcY_oL^Dn5OYFmbZ4Uaeyq<QH
zo125INQ)Su?D6Rj$bwWVb0*$8@LiFuyw>&HdfO{YR)!GwQD{D6N?sn!b~u*Fy#|IM
zMD>|a%tscNz@q}xC3fzJVRf8zgIyG}&&Q#=e_PKf#st#cXM9W>%Dg#0J;j#eif$ku
z4;9#irwe5fh+XQ@0HjkqadWO4Q=<A|F5MVmaD*9*?ZGjc9^%GS?9{&wtxdJQxa?qF
zt@(I?ub-Q2WFQF|qnRZKK+yKX?p=X;um^+gR6P;Af!-FGGk_=t(|@vz+bf@b@V<Nc
z{xPf+ILryA8V5~}y6NHzMB(ICol=v!Qn(Db?2>^6TNI<Jfr=nyN+>j??uVn9cg^*A
zP{P=JW~d*w{-(n!KgD(oOn(bk>X=rhreHCik<4{MA2eGyCOukiaCY`#kCxrzPs{Tb
zke)7W5;h#5xL>eVe?7e8y-G07L{gVqsvFyhWCw>$+#xeLwRu;)a%mSRvY*{K0l-F%
z-Ro{r2*T93xU17H*VchL@Jx9@hQc*6z(Y=Ns%3p-ieDnkUk@KGXgEK4q4MrTW=79Q
z#LtOlASf4mT3G4{zHWKm|4~jE2b}{Ke2V`Dz<cQ#tv~+7dtnnH(22V_O&!_Mw6$fL
z*Kqp-BfVD51Z3OYObz6zMzad@;oc5c`CH175`_@d<4{!p)3Tzgf@*UpS*MbW<-^;O
zTu}t!26>f3SpuE+cWdGF0e&<i$<1e()9vCNPDSw_t_n^QdSmy`<u>VYPN)uIpc|Zx
zKIBF-7%18(2Z*jXxa{Iz6$1>7?hREnc8dBeX6A_6&w(d(miC=q__L;WWLMz7*X<l`
zl{CmWB*twM-@OuKY7PBg3frRW9-Xa0_G|G}RKq_j-4ztlUeR>~nT<?;^cAoJUdl@b
zgoX>X5eT5iaW60ZHPzt)YZV^0)*-f^E`HX0<*A3{iu@|xUjDXkE<TVP9vf(i*-xo8
zP-ZL<>q5M91$^ZF(y0d59;{4Wx_ODkPjqlmBAb>gNjvw<9|Ht`c*sO)(-22O=@r>G
zpLeJmr_V6#wStbM(P}-C!)RScX3mw+;+MLVV4G!E^X!hCq&E(8S&P_Bm)Us!xxk7o
zS}rq258HMRNyCl}+-K+c`&#FnnarxC10xDcu~OTyP&t=-Lx&Gb!u{95pTz*eFUnNp
z4&2ug?2(;&-R%5foG*vy3cFOi@gS0<H#TNSOY-zC*L=YyDr+$uv7>fA$VYbf5mY)e
zn*x)g(mIB6r$W?bF+|C~K^}Dq-GZo3MZC*&3gd+b$uUrkP5zn3I1bs7kSIIxH>_vF
zb~0LN#HudBqt%#*KQ_Yqn59=RrlkhdULQ+6FGzgQeG7~O*>6LU>4hE-cOmq*7)z9F
zIxh=TQQ?^(2M^I*cpXuST7hOsCJES{efXE-0t4fWChvqT<v?Fhm@!tYTIx2LL!cCj
zLd|k?CyYM@b^tFUK4LuMc^$zPUb@xDgEkpVC^D>~=(W7%@vT=OX!W;%21(t^F$5}0
ziggSRZWG~g#g+yBHa2t?GyE(h8^=*A1yd<yWrZr><7pWNYn~D9_hb>4rT3rYO4*G{
zTaH&(k+C%nIiNniq)+HhaRpI$ELbg1-&bB+kL<<jU{BI+{lNJ`QtA{vnMm;4wQCHA
z@BNw8=W5IW9jc8|-eeI0&??J<*@Yu|&3Y#$0!&lzwQMfD2>X-ndjAcuZV)*PX<iA(
zE@77P?2=(@`uQ?52kN2fk{?s-2>n&2HNl=2>I?*dm^05i5rE+~-Wa`)fgwr?gr<*F
z_p@9SZD;f-$JZfQhz89a&aeF|_>A1u@^t1h3dxLynEvo`u+D4SC?;8!HP9j7@zrJ4
zk;kW@1^mGejGm3KhFJ~bo+_$)&>b<qQqD%CHeqKm4yYu61g<{roMxOW|Dhm@Uwk^T
z6dO|0<R=z`V++I3gCKUfS#F<S`rlLjq6n!p?hy0dUM%3ns|Pj<VgQlHnC=oImGqFt
z;cpu{4G3hWQTgo_fAh8v^}ux;wc$6Rh2|%kIa6Gm5W@j9%jRx?mLn|@2tflPM4?^H
zN{<=P*Y=7(???>ArtgNAoD<p+f(#*0xits;*L=^mkE?cn4B_8VEc;F(qi<T5B$(Vt
zDL4srG7tAn@+rFneJ9}Dow~B6Dym>4AUqnJI*~ZPy?;qdfNa$ceD0Wq$X9Dr9vka4
z@m6O<K}ZaQ{f!)-|J;L7Sh5Y3tqpYC^RPG!kX#_X+1?|y){I-%p4!jHcwLm*=K|T|
z6Qa+iB8%MpsstA(TFaLck2fAkq@0LE6fU0rfNMv-caQ~-eJL`LD3R(pXGgS)gcPXL
zmUsjMN!K3Zz^~*rpL!nG>{<UB_0tLR_hfv~oE4bgCL;%Y(xOYvX=e~bd4}gJb((zg
ztb390@M-iE@bgw$icwsk?j^ikXQl<KO|Tu*`aLyf{SE5on2LMb3hp>j*Kvsrh<vn3
z@36lva>V7cSmGX{UPX}%H;AM~{l@8;IoJ?R%{2SZp`gt=%A$-AK?4YZ6Tf4-E#tK|
zL9Kqr6j1hcCutNwn&(4KR3;oiZt11lt?3qxE01q{vt&{Gd|z`|V?HHRC)@Uvvr8TK
z(mDs=bnqu+*gj;dUU25DwtJ($0zc&RHBH+$Z>~#k<P45XtRnb|TVai)<D@#=h+?@T
z6RQGu4CKj6Xb`6(lND&enSqPS1@|=Kb<{XsLUEZ6_WeB6t+IWtP<*H&>Y1WGSU33W
zbk8*yRNcN<k?(ep?`lT&`qH9|3l?hCgAU*JR_$dh#_%joSk+!8{tw2^sXY^>ZPv-e
zwr$%^CZ5=~ZQITr+qP}nwrv~hdAGjZ^&9%=uI|37>cru^2E*JUZRlcv^`A;P%!G11
zuGc~KGPiDfB}Z9QqseN`aVlX*;4#u~rj#|B;%a5HLG(Os4M=a7bMw4VV)jo}G1n4>
zlgP-fz1NQn*w(TwV*P5(VU%1fq=#@SMETya<a8X7uw2Ynu~(|b{??rz(`xPNzc^wt
zVxbSgQuCLKvkAdf>gNl5VSlXyxx)rJbhKr-uHl^6ibUO6GKY1Z#YHabUEV10Db73v
zZJxj{tC1S<qP*XWK*W1V&Wuqupe6P@#Vufz@PaF-o7l|SghWp-zM&5pT|e){qtfuD
zHj>toTp{k<!6!ElFhsQyym!`%H0Q<-9ldfi?H-GCdYF04*g_!~_U;D8ycE^bCvy!5
z3kTiT=kTv*h91Y@i|1$DqwJnzPr~)um-MVSym3#776h-FCgc`kmxT>Ni8WqS&5F@E
z*l6@VO=$21B6H&+*E!*o>}o0wfvQ^sc4(<m<o;Em^RH_ISF(J!s@QX3?wNt1$cxCP
z2jx3kNBq9p{GA6&Hs^;7)wB8C{r&a}_48o2w;2j`%|jK=p^U1ja=E=82wNhm!DSpi
z6B@ETmxQI#!8v+7$kfq8zl@21QxQhjFMJiAQ!(<`(NHLD=OwDdi>L5B68ylfD^Yr6
zr&`?2RA_L?7SH+biz?$$?gUL&zJzrD{S6Du-z^Hl*od=gjcS3rom#=R$ie;(PaP;w
zk_Hif+Q@67&yAWm^jtz(#BG?F*;Grh?zYZhF{43vZqG5$7?6-%4^-%O%_xLp+F(Ln
zr5={-XK-Sdko*p-`FeS2u+Sh@D9037@B4pA!BajDKv8d!nS%G+zhz<wSQqcXkawi7
zN!Ym(dQ<O>t*INzj94=YWs~Ck*?q|e#*=;L493`562Y0cq``d-LFQfi4pu7`OMGxS
z2Q;-PtcQS$S5a;9mRn2#Y*jM?q9YA$T__SsL6``~Nbp4B@(0<Na)i>n!a6);7KI`B
zfLcw-m$bVosWIUNZ*6j>^7SE!pZGql-Eyg(DFUOf=+EWGF@f^k+}9_7%jYL;Kx||C
z1NvgW^-<1`GW-dfFy@`ctlk;GaBcvTcXFSo9#&*FbA%=Q9|7uOpS|uP-}YXXts4L|
z!*9b@4J174B#aVZnr1qBU*D@a@<%AT@<+-o1SgwpUj(qQYvP-YPSBz6Vq^P)zGcCf
zMNUw=-+Allb8iZQV82&;-t~KQIjV{;n(DolNXG+|@6!nWqyB=1>f%L|Jt2HN@>wcM
z<Xu4YHK-A-dtPpTqty|B)NTy&2FZJoU$2G@jR5E!IPLEz_Wk1{(KaTe7c<Z$Jz}wZ
zfG5D$JpL$C^k;BQwbWvv^{J1E=QZk;U#;89M|2*Gxu~+AwEUg{2-{M^`TDJ+8}6g^
z)!=)uDw1G@EwHXb?H9E5J$FlPn$Q<nj*VO>Rs5+9knxV>Y5A!-4zghhU>i6xiNX3i
zmnj1l9)4z4sP~Dc%P-TeVU4zSht>ecB!}>hDDX1Ht6_b8_VhSZb!2;YIqUH5sLSHj
zSKGa(hMehTbPvu!O^&JhcX@tm-jbEzNNP9eI(I{EQYBO^ug2w0^BwF;X*HIjXI$e)
zKbQAoK}J5q2+zVuxVF1n%`xj4UAp1+3cPkV2cJEFmy>tX&&B!U<Lo$!+#Gwj=IJZS
zqK`ZiM^P^}#)<5$PCCve{!g0rfJ_SXAq1cC3%Xh-vUb`OMNbuM4jUkVBxV!yhE;x9
zb5^Is{VV|tDR1}ob(~gNRR^qa6Xw7&tAvCXvOyP}`0UR8N#gd4BzHQ9Q9y3pU}8jw
zF;TJZN9*qr%R_40)1kxQTCaRRV__eQcBPp5aw~*tBk2ftVjbw6V)iOQrF};-x{r<O
zm1$!^+Ta00)bO&4QR8p4#i)}j#B^fMQL8W`2Udw+H71ljh6bQpc*{Hv{K;(1=QhxA
zpV$iMU~uctgvX-2@8nOkNBIi5^{s=(?^p|Nu1@Y3j>c>$$*&)HK!6}e;XAtly(+H0
z66l`hnLB<WpHni+QI}MC&bC~I*nz<l<;)}Lhy<z>;HyXdC~+dcLbtDG!Q!U9O)Y1b
zhxHs#)NwC0zGVBK(T?gQD9;|5x40wiTyqh(1^-vTGTxdt-($iqrn5HWl92mk*W4q4
z&4(#>uvV%HHgh5Pzr`$lk%OBwPP|c4V3LD#0zdm8ECi<X`){0?_~9B^74Nt)BGjBV
z4a6rNLqfeF57<Q1P`sRWbUwz~79m=b&y+P2bI6E~cPC@B>~c5Ay{um9^=uX~T;QhK
zp#4x$6y+VBfDH54(^%qnu@IgS6;Y4jR9dUD)z(!hjLXFmA89r4!Tj%98_%yQ(;$;B
zDKOxlPP_-eX4I<iJL(r+On7Zzh$l?}plR@-H*W9Y+W|mq{>2>b^-%F$J<YQYJ?K>a
z(Qn{+<Cvn(lsm^K_$KM`wA*2zI3mH|3)p!nMT>XAl1nh;j&c9zM^AcuY3U8u)fg7<
zFu&iAbAM>_=mF#a-<04+m`9&velW*R)km+mPtF{3uCz#4ZeJRO3>`Uy^_`|>p?dT!
zNsK#0S`(;egn#)|**}fxud~K<X>JaiAiN_g=d}>d-m*7-X4dC`w0YcGs(9898)R*r
zwlyt-gb{1Otrc!PCAP@)L1(ddb3hh^X9U$T7?j<Fxyh@yM#k!W?0yaI<w5v^+%=L|
zftY*fJP+d=4UCqz^b7jKB%_%2l4KI#k-7Ce3oUqW;>!k!f37boQ3pkzGdoF)uh)nw
zfr{=~95_lfXG3KTI0$9?H&lFM!3La=M8KP{4p+G+1zVX`Y}D1BGBARyoDjJG(i^{d
zzuVc_f43X<y}!zXpx9G2ypR{tJ)-DCIRcfrxC(wWyqd?lRBezucnH)(bN)B-u=q#-
ztggS}lJ#+=(xMx6yy3_sHBWXKlVU*Mx?tG+(%*@|e8GuRbhSvMJpe2ifEl&`D-vB5
zywEqeP|i}i^&dR}Ob(+;0wrOaopFLKjCM3-+t)qF>{7@%4Pa&AG-+?y1Y<kU1j~Qi
z)W)^>U{9Tq%v-IOHC}m0_g_5u?4OmLxh8(Tr#ze5GC$8ve}{MfO%gtsYswMFf;{3A
z+|z%hY{(;vr8z5)M>XZo+`3nJUev7iSC=-==d28`dT@z7*;zAEN^8uN7#nk~O?QGM
zeXfAH+j=_LSMAt=Va+4jp-_ebp{*Qpp&zEChtqr%*Y#GxJ_5OoDeY**AD+OLJLhoZ
z<7QOTd(5GwVAsu%6!Lf&RRY3^)Y${%cN<tz=*G5+5O?DIqm^M|Qeg4F=$<u@uWCHo
z0^XIo&0Q0Hv>6ZngXR||eUDawxTY8@5n@e~CwHDAo1x(Vt@J4EN?QW%rpt1HO5}K(
zL`CoWdSeV@`MK8y3|gFrRh+`!Ulzn8F(tt{5?`M3b*NALH#V_9o(;c`6+L%S$KQAl
z$ZRLPd=L}9v|<<KmSq^Q*SNg++j%Ped4!uir53X(GjvOj0bfO4ytB;@)~TZ{J6mKk
zH(_Sv=ktv&vA&5>SygF-?b%8@2#Up6Vt(L&T7KNAg_U6V(~de(r5t;QhOL$tzE4tq
zlk{N8Px<}vPGWzkD}^CR+wF@NYT3%6bhu{@U1jGp-SF2}WHwHU=6BN!ft`}>arzV5
z@wF6$%XFU9xwQ_?(iOm3u@SSSsP{g&mH1eK3Qa@xYUtUs1z4ytdh^<X+!C;QWybsV
zKIirGkz1x=VP%8*RJXciZYD>qEMKKRe~+PX#{nwWHTm}AAqclqwG122?rmR!8h&NM
zRorP*=nybBu=N;y_P_q0-5K3pUapVY!JxgPyJr9c!VLBdHD4JSMbA!#w*w0FhTz>>
zQ@uFbB(|*_x1~+FgF7@7W<dOzGQ1PRnLX26R^dSr-*Bl&4bs&zEfSRxqJg+Fly9^C
z*FU-jOsdZ#-YfVT477(S+#$dAvGSlgmh6Vlq-0)$V6###4BlGJK%ll#VX7r;-HKT7
zLQn5$x~RORr~2%?k=!(z7oN)#M`MX@{R~sW`~=Cvnf{CHF^s)N8i5b8R=J8Xgf;BW
zMN~gFyoSevqJx_?L|oc~s$L4~&wd?+9p#6K7wnM$S=1;4pa?zxXn&&OY7Ud>?2_Vi
z-j~s##%&I_dy@h@o&D0<170fh<YeVl(*VKrqzY|M66}U+qn>Y$(oqJzYXgx5+q;LS
zXQV2jk4e93GMGgKL5zAihXpE6Z-mE)U)Vdpn`>DucTKPq>`77WDM~QG0JZA|fvW2q
zPk=G;4`hhaIwDj~i!7rO?X1hSI`ptf<AHIv8D0UzuoEj9!R3QHN@8fHFJcxaFLs7?
zajDBQZ#UvUlGo(Y9;xXQ(SV-P^!3qxsxJ8J)WdgT?7_U{{9nz+>b+Zu3iVWE;;91<
z)WHOT&zo1|(QmQfK$V=(rKgbRvcv6ul?C_hRes$q!WnCmpC0%#R|p)UP%dsiGn>Ti
zx?NSBcXg*JqDYGu*wL_AUj7%#ylwydlLu`d1dkb{;1a==R=0Vss5){hG_xL~605<J
zet3K726x^5pG6Z)LSa;f_3&dk0z6B3w;(q%#|U}5iq=IRgeYI$JK?^|3)id53(X?%
z&VhBh@b#7TdjsycENR%o56qX}njh%GJT%88aG%^I7it9i$gs`LwI`#JJsUnFKjw3v
zFsSDAJ8}mRb`aj@%(%0UnO&F`qPfTL=@B1ZU|rR_5(KO3R$reOs<i4>d|0zXiT?gB
zeAn!m$aq#1P_9j&d8Lc9Ko*pJXc%5k{9dl;>w;!rdzOnex4R6mFLM@Sb>vzoHDGyh
zM2O+xX~3sIZNicA82TPz-OF;1$Md;Mwl_Ym^$8w<PlXP?)!w>M54#UtG^vljeYi7C
zY+n~q{u_iXr*rq|p&ciC+897T;^Fdg3EPc0Hma*a>QsOD5%jxjA@<6?r&=t_qBnQn
zz`kcy&_hh<k{EiThA5S((9h2{NFc7_9?5EvX4V{ZbZcXHR3-N4-uGa`{w!I5Wf!R1
za}Mbv==#R^2qO6i==zH2O)>zD8{Fn@*~%^jm~T6H1q|KSAi8ro;_#dArP<Kcr`n_8
zd~tWG%+BAzJOT!#0pBpK-;kVsiZYRllI)7{flj;@38rLHT0Mj9+DGZHzKTc!n!t{o
zbIfiYDeYOTt7v;KXn!Mc7fupLqX7Pt5AN)U1bISTZxuJxYF!Ku&-SLkm4E;@KfmWa
zNDdd*N4^11X|<Gi6SK#1K^vkXK|z-}5H@NWIQ9o&h=`pFN-=0IfOrPiH+$-&lT`qC
z$Jj3W@NU~RjGR2D$Z50@PRI<jNn7|o5pY{*WHQ6Bso6_+ea%YrMwg|~EVEER_t-D8
zT_hl`wal7H4YI^f1dG(da5q^4t|vfpCEu`USNKff)#}2(bsFr6$dF|s%LC)<a(S!%
z8G02i17U->_fg~uLmyhKn`PC?F?$B`rTb^bu{n2OjNDI-gaj8RoO_#`N}znGp6NO=
z?HTZ475+InZ{LX#EuP)b7&@0Vd@Q3Fi?33*K6x-nisQn_9YH|BOE%UrJr@=)<o}21
zw0Ovl&2TPYQdaIjjq;1r))J0w-^|8$ow1z7n(k43x6rGB<(3z?nNT{HbvgnQtf^<k
z9UqMP<qs2IITIh%yEG0^e*MG=J5}S5=bsWyZVT9VO&^u-;hqMW<`@_(`W!Z*U1nr*
zusYy%^+YFsScWfCdT;ytZL{-eNvdrWmzyK1`Agbwmrxm(mNHcroh>#x$FCeRwXy*d
zus9Z;(s;7P4gP{3|89TP2nz`DT7i38@yFu!`9~pzjMN4Wm3VO@Z(zv+;S2Pv!OsDB
zEo(&=5&XZ~XERo1m=;e1VAP&VmFyyJ%x#1nC>`A>`!=Jl0^WYaE#L&u`!+%ASE*Yl
zUtFFPUY(tMZ<ZfDRIIxwEB%)TcXb_8#;X-wj{jxGr3avTj-a>zqWrSASC%|CJX2Y$
zxNm<OMRqtSWB#GHP6y;bT*5Er#7!3e<Gtss%<}wbhpZJxAm~+_<y=2}yXRFHzWGUd
zjd@9O2mi0BkK`A~@mG=D;4v)pkVJ_SKG$Z`lY`&NRI=%j$dhx|JrS?Rk1)o^Z?3;6
z*u%Md2(&V4vdF@FChxM@Lab=9V=GaPi)%w0@=X~sNORTt0;$`kPi1r|91fA{@Aq;B
z0Eol3xQQ8BW)`$+|8+pTT};-apAq0oLtlvmK4tKS8&SJ3md!ZZc93(AMUq*4Ym3T|
zl)$1g1aQb4k4HL9(%ePPyKjQRI(nWjBh`9|KV!RivB@Vvqh&SU>77H$Vl*i%R&1i~
z9d&|cN+fHGvcFZ*qiAb1@Si7s%X0(i#vy(Xo^V{@II*V`WB5tFBliwKIK7%`+vDJj
zBFppz7dk5^LX;}^Se`q1n9O+>uNT4MZ%Jgb*mN^tK!9y3cgcTtf@ZR8x{O_in))~)
z%rPZ^((oDTo<W8*bTFPjl{Y7e!F#|W%M3tRwoBHnS89FyZ$8b8ipdpVur*&p{Ay4(
z_1|ek_ZRC{H1kT&0uO~^{rLe7W)aB9l%v&+ey6CaCg(r1n~|)b%tvwAljcPnae4WT
z_1nVI2#)@B%xF}4$LYM)e@S^q#?88NhSoXe56;Knfb;D>-8bE~DBf?M_}~=|xM--s
z`R;c?MfN4`5}Z@P!Tz!rQTPux1-({?!8M?e_=i}YUkaEm+l^+uRZcrI>Pz-Q&`KOj
z9BZ~gRxiBZkHYG~q(Gc@CGK{$tloN++KPWrut_TDKmSHuZ|?%gL#ynLj^OAhCdP7R
zodVWGSFMHv8F;w6?!$l{U8Hl(7*Y%HeFFY8kN;Q59sSFSeXt}#g?nG1&@AJArLN5A
zGg=3%Yb2+r2d}c6n61LMZos$hp)O|*^Pz6!z7~deE6wR@HoTy@65X3T0!<^fHe5h{
z$${N-&0rN)-f+OU1;_5-+8<dLz6pf4*+xaGRTOkG-!T*?-mH$(#!Yw5ncDwl{5E(;
zfG>)F$R`0NaG83c)~-Wuqwbj@m`_CO3>V-|S*N+h0#G;(`0GxV=Ys+(Lns~Czm=6|
zpm3pmAyi@iN*wnJrnysF0BQqHwQ@3ui3Cm{9K6oAyM4Wh5=r6-y5)2P=GbE;LhZrp
znw)`~jKQ302Kf~>$-W$?Y>&3e2wUZORD5r-^<@h~kEh&1ymL<8t+>@ko+XWnsJ2|X
z<qPYbKYrG3qJs;!6!eaPnI9gNI&l<<g2L60%wsOaJ16R391p^r2KMT^-+kqc!C>2O
zF>p=fR4fPFt1VXGG6M;l-G%L|6E%Ph*t_OY;0Y9tLmfK$3$rCd+U$T`{4fED&!=Mq
z?$>k^9!IFS0-**Xsuy!)GP&h~J`{pAAU!I;N50yU#3%#K-5r~|xGyWp3F2`1&LB+h
z#otYox?lBay~=iI$<{7*sP5pyzJ(A?FecX&WPH$ezUg$m2URAD0mr=iCY9EMdR@Ji
zz$<Q`T_3&L8u-rjAV_gsf=Tb~-WQ8=eh3FQ0O1auBvER?3|uzDpS%ctwRFc|Bgj6r
zRS2+8q|_8k61KzQN~Jr$W``t851cKZ>hzVhXL#d_v_WTZl9(fyVmh>VKA0qrs&k(r
zA8wAc#UnA65d*#VlV&ZF6mGOpp98&4N5fj?o<^Zr8G{bJ>T+^_8hn&RxTWDce|v$>
zGEyYgU!w`q4z_gV(<_6l?Bdq7dj_f&&6EMJ0T_6>H8(P-Zq{zVzLf0raw)M-zh|zV
zss+E(=%RrM#0)j*=gvEk2(z8KZ^aU}z%qI>dKxnsk0iO;Opa76pHy%1$H~(+vmH2a
z+u#D9bhVRPbZwV-bz{$?gS(V%GroY&zis|F8~0pHA(5qurFrJ!JMT$6q7%-+#6P*i
z{ag8hmXYdtzCz<i5a>N(e^HQQz>-Y?_`07TdT8Jq&$VKHwliMAtWLpwm~_yV-*4z*
zHIKkmTy1F4duysE(KtIhxw257f<GX?qBrp>uKV#vYrj0i@proXy;jJUf^nBn9pv->
zSd6ecWQ}5IG~R>ZJT=RHLNb7;dE&_N^wUI9it2U<(Hkp(K9oDhZ3q>u-8G+Cx^#a<
z&ft?ibIeh63_AY(&*tTm$KMy&k#la#C!E#Q{B($Ogl|Z-slI<0leX`Vd;1(MKm+R(
zWDz=F<?+Z~(l1hKMJscm(HM|n2FC$WC?svi7Hso7$-HKZrENnK)%fn>oxW6lx?tX0
zY<>IoG}?WOphs~1DWj34t5UJYXa09%#rn0H1EUvWmPOibXOu=*E%z(6EW=E!YYjWg
zsM+XLPd7pCi0mCR2cDr+d$XpBa?YTD29=y&AxpEpWvK|tW7cajH7)2{lnHXDd)J3m
zp92AUF!-rT0Lyi|f-Y=z+2Ndwhc^4bA*%v$v?o5%Qapq&tbct}E<tZ<kVxIPzJ8ok
zEIWuF)k2nX#CBem+5pyZVdbHT>|==Pq;#*WP-)?6X02-sSqy`Y(HxzxBB@PPw({Ke
z&WKmCmT~iUJP*Sg3l|maYF_Z`y59OfAQMEtLm<_YI&vp8kjZ41zQ;@L;Q+4|!Kph8
z{+;33lnu0UZK3=!<#z2tQDBJ(v5{|VIkT&Pms1(A53l50Kw|9ri?o!}7EI3vH2aXA
zPxs>Q<V!D3_XbTZI%i)`k@ZEgt7PQq2(aD@3%maq3<Hxj7rlWZh3BB>1q;enPL^!H
zX*|gU)HdU+dJzS-(rX4B4oeJ?(wOJ~bXIs3W>nN2r%_mW9EOW~xq8N$UW#)W^lG4X
z-%-aR{oIWIHpMtCP>SlG#-)|IZ09MUPVUdOi%J{q8^>rh3ba<vY-8@_J}Rw>MEglF
z@PfM*35u;^b+D+(yu6{Ev=4||0|t7gyG<5jAnycHCuBXbFgn|Cif&%mzi7arG+|@8
zc~jRvlenfJX(o)diW3;3OOuMTa8CzjQI#}ZVCbp02qDU1^%Y0*tLMvXmS|^x>v8G8
z>H7S7>8QJijAN-PJyE{^QWP|inS`MT?k3k573`gXbeLe&=OvC8aTkaNo<ol_CahQ5
z=J=H`yOKwA<X)KauRbJiaFH#nozR~MWgQiVB~h2&Yic+!FEHr5IA}I{pscg58UM6a
z_taUboe;?T&t3kF7a7`0CVo{ZaY5sy*z_1p6ca0gguCAP&Wp5SdK0KwxC~ui#Dd%*
z_p-ySzZaT3h0h>1@COYP#0*j;5L~f8gLhF3n&;;XI6AUHc%yrG;!GEAt|dnJ_b}n^
zd!b>|Xl6<T?<9=?p*I8=$yv<<7))svq>qRM0VCVsPpyI775_~02*H(SR_o2XI;?gQ
zQ9$p7_&|c6d${NkVhVW0*JB?XTJ}8V<WW<Gv)7g-$l=@4mmxLICp%0e5$*3huTUPt
z-KPS@H@3I#<kEL|{Ph~KMvkCG@gh^eYrPs&q{4Ie<`H#oEeLUgu<v;t2KzLE`FdB}
zwT}|xgwMct!3T>3wxFhr0^jt*GA;iT&-{B!4Y)J*Ku886g1e{BpSlf92J9Ei#m7yQ
zgO??V|0Orw17#E1suF}6q(w5Rt=;4?XiHNPoeHQLAjx6N+S*-~C%L+y<T8Q~KE>{$
z<-#8E?~ECBxrdEd`rohw7+zBQ8~YKduu&faqF}CBvY^1L7bOz7&Oto+o$%`P9Sm|@
z*4g02TXA@OVgDU1D$P<}wbMmiR>!m)vTdZKDkxRjY6(=YuS|Q|oEu-UpHko%NUdZ%
zd9;|{`nUt!T#c~dH@v(g2fOJy=Z)qyKJl+1Hf@{<-JxQ9nb=#``w%M>E3e7qxBl8H
zkw9_8MGN$C7id86m+KG?)Lp$BPgDc*vV3tGtadK|4v<;n+G4$?+|ig`s|8H+vs5HM
zv$qp&r33%7!|ZRHZm67-Lat-29HVqGt6)Q{N%b8=HQ!#SQ_ae@j!{TPWcnV6-{i<7
z?pTrYrUnf-<s4?+<|!Lws!no?jeoB1NF(Yu`%e#}b@s_E5aH}J3+5Qu9U#-`w^C7D
zC&PtXENWu1x7;DGXm~`ivzO%qGnwKNP9w!f{*!UI^Q4Vn2o1hFk!}k^qc?kSJvl5I
zqP;u2N6oAcRgI5SJxcX+adaJ9c0{Lv>~*NNnAC-v5N}J-iZ?HYD?3Fdv7RU}<5s1p
zhhlt0>ts@oTW(96KCgio*_ZaWj(a;Vz3Z&;B1<5U;vThqU^3y!n{T~_V8%q46~V%%
zT}<0slo&^zGkF1S5oP*^$$@H=I}4oQV@RzA&o`uxnpT{CbHwhWNzUmhB2sjdJPykp
zk>Wntrc3Vw1*qGQSYWV<R9JcUlTzxsUxxyxD7V0y(Q7PkVbAcKSS@7p1Pf2ZWSBwn
z8vkX-gEGYW48?~YQ{ji+Iygmtf<e^X5Nt2Yp>iypQ&sfP4_Uy>lMXYHp!LyT33hDy
z*j!vZJLAVvfcyJ+f0*5zWKr1zX|&Iq4u{y!TzN5h$z<9TacL<yFP5ie;4L7z$cd9D
z=%U3o-j2bTAL&)X+5^p&<*m@+0N$Y$d@oJNC)E+QVP&~DBv&xk#9FuI4<0theWbw8
zt0xcxANZF$W8|=dvhY?8zMX!d$PHA(0jzkXeLLZbQyN6Dzb}K39|EW%dBniXtQO8x
z>F1q;Q$pmTraP4xPb}-ULuI$3qRO~^&P1_h*S7zb{$49`cv-7tU}8Zwa^;+!h^%y^
zE|%qzmY8P3y~jx5Wj;q5wOlMt5AR}J<Zso7+0}J=4r>S$K+UYkKT^PX(R0y8zDMLn
zkV^0l>9o`HE1YuM1$y>vs+RH_ctM$96=F<LO%~hvnLz{Vi_4^#fr)k;t}E$&$Vd$b
zqxGs;tS`2#F8_g$G-qUV=|K6xBiH9VS_e5G8W|HH=RMuk<eK9t2Y<mWxX4zAOFJP7
zM5pS-v*g1>JpoWkp9rl@!IiSXWCJJ30L{Oh<UT5gt+=Y1b-1n3o3^7uf|fXowcQo0
zrzyO`RT1ZvaTxj9!a01$V)E&U=rY0KZcQ2CXF`W&C38PqwYl3bBtpeW^cEm)aO}Wg
zDluSHNEz!#fa8z~`{;CO%%T_kAgS>vn?1m`%+QN-$lv)vsdii~)V;qH9NuzLp8)!`
zaIW@Hh{vlPQhL;0Ot{@M(`N1$5pTeDp`Box3|#S0*@7PxVv`#=XT@9esA&u2$n!lW
zBtCT(^K;sa3hbSPB?XyHCvm_++<lwm&IqLV1hb!`l5*vVw5byrWcrox2TcH0S?)7<
z{pv@uZdVMzUdN_7sK7{t(yfhhz`(&KEID5;XGgbIhqxj@0m;Cbm1ot2%c#|#OziZ4
zm3Bo9HVJ#KikwN<B_sbdZkMPe_M^^Rcx*wd6Nt3)e@|5z3oxLhj&@MqYRkmr<0`2n
z;L!ORDG+JvRFZ899c~&^&{vG?=r!kBoXaP*YFt7{VO$2s<6@BOom@OOq)wv&<$(^H
z_3Xb(zvQiau(W59q<p{mmVl3!Fz95}gWFt4HIycg!6asm6spIaJ$Q$_$C?H2C3BJ5
zc1W2K|G49VT6tweLn!WRUICeOk0e-{%OSbpEL-y6>Uilloba+-#${i2hJ|!*1tLnG
zS=b0u;)dBoshb$8w4G7uY$kC~VJ|=>{Fj4%>Ff>S)T7#~HNnjX#%Jm+Gdk@suA6*5
z!~*fgZQROQ8(we}QRh?DuTM;p7!jO-svpd#IFOg#lvOHwNgGkp=(&V-MT#dcS{v0F
zzM~rL%K`4{6xNeI-zjd$OK=T7nEhG;p>w$b6vVkF%6Sc&({t#3`n~AZm$X}G10L7W
zMSCM8B)<4urc`*eypB#s{W^s<l^hPQfQwhOMVM=a426&^$YggYUspJ;m8+TOCgS~*
z1Px5d8qb)HT5RNH@EHxlJY4&7(^XQ1aUi&uR_-tbFI=uMzd6Xh2DMWh#FL9?!_{wt
z4MD-!xzVu+n5@cU0a5$9n-oS32ONMa1l@_h2^N*+Yfj=2^TJCT+<<I5KuCNM!N<j&
z_jK=-9)j>H7mSNemq2z1jJ*(`aP!BVQQhM}j;!#pq6%_fQaO1Eg{_aUHWnq)tx^#+
zL7ocJz&r=c&n|uROiC?e0Zg^IqqUTe?{U`*uP-=9%#3KKA{obJc`m@cX3I}xl)1oI
zI;snp(>Kc9Ig~GF+>2_YFNvAIEZNo4|GIBV5P7?YjtEQwhSiczE)TVdmApY&rqiy3
zH(PW{u^91#zv=AK7n~O|?E>>>DJh(i$6(3i1Dc00Rmi3(Oqbab2|E8O6bZA7&(fVL
zP<^>e<`fq6U1LIf@BA5<`mROPv+y~Vbg~hNWrJz#SZ07ktKWx(wkp!m64uGG8hIZ+
zGj8}^`>rc{S~CG*-s;B_>)y2aV_yxWpKf#>KIEJs5AWC`1ve8nBTV{e=fBmne`FQ`
zTkw(3ogeu-7Z4(Uxuo=?;B_n`;hAQZCjd1;cAfG7v*H=YW6SW=rY03G^#1Z}hyX1|
zvlKvF=yuopDj&}V_hl6?!)Tv5M-9;<vYSP~D;B!v`|KE^#;o+Nkv$c|aJ9jRUavGh
zRk3LdOi+;!h1(&Sm32??0U<|x$IlaszEZ^5t!FBAJ+sZFv?sFf9_@My+Mdf7hFM@e
zWpH;LZm-bH<^cm_U!M)*kN%IK2JIhb|9>{bLGA7TT!_PS!d<=`0aU`x*HR?3PmHyb
zhCR-<WxwukS%$lxw78T^Z@mWbMw-wFDMl;Jyffo{rm*$1KFxMs(9gs`(G<@KT|@U;
zAL-z3Y8iAB=cqabN7D^*x}sogKyielEgM4Jhu_Qm)X?>vodpgB+WO^5+tdz7oX5tL
z87-)Aq2Xk=KaW63AP-SiMZE4d;>4Qa6@<&^ic2*9jvM^s8@CK%n;6?Y8Th~<{dcgg
zdK-Q+-^)7S%$71;X`}8ull!+pdzH0p)%3ZMe+F7$V=xQ1UI`7G?J7g_c3D`EG_#!v
zZf(E}oi#IzT2f5Uvp`a?`OiP=fst}!(2Tp82_^NGsa-9zI%@@4ZFo}Q56?YNltGyE
zZ8{t3lYt>u58Oz(@t~Ex8j4qj9H{W5g;<WcHdG{t3JEM?x!JHwgi6t=T`(w03yHAU
zO2L7FKq1Mn1qUvIn*QYH1Ru=qMzX4HaHrgGA0DSgd8^885?r{TU8c;AXgtXt3|El4
z)J><8ZlQA1Hx%IVbtq!E8#J|-e7q;YAlOzL&o5?LkH#E2mEX-P>nmysc{Mv2@;l@r
zi*g}U@5`QXSD%xJW(;YG-@<xso2Z@Iw=jE-{uH;WB8ztooc_0y=XvzK4?3;<hD-;q
z`(GM$>-gT>-UNE|U7L`wss6?VL0$Ts-f%};y4-|0<rq7X`D>f@-4K9+%2!;Nq(pN&
z@2cD3%ZUxjlBg`_9jlBDir;_x|EzV&@y0$sRrJ@C)y}!q4YkMAV?DYalJLWUO909T
zOcgg>=SrJ81DYhm-&weMvzVd?!po!63ti#wxJlw_8qx0&l@6qgq|D#s*8*@*{~I>_
zh~G{9Rnwje{uAI!097SnWG|+ZLO}Xw7b&8(#L$_DBtKdRLO#v<g#R}%`dkgfNN+-h
zq;Kkc+&YVuotmi3>p!OWRqmC5qn3%xf%vg*m3e;Sn;!P&x;y(x(8f}cDyXK{DY6y?
zKyW%kEqhx(jrUQW1hp-y5yu=q()+lRtv;w;=q;IBAg>o(01r2(4=~r7!=0C?^Jn=l
z8vm7R#4B7_LLi8`I#**gh$Mi?lY)Suy?$*Sp)TVf$9c;NMMeNH=^V&9*}zb05&|tR
zyI-_AjA!d{eADQluQ9PGX)H>pfzD{43ck*Xh1*wq%9*}_4z#vrn4(5P<LW8eh)IkQ
zVO{rAVxnn&0cE>O#S~$x)g$d3<&pt{H-xfu1=e@FrP1dBIgo0s3*R%km}D9O!bau&
zZfQ3VJKa}x5ah`vPFD2@BJenG!B9g7#|A4q7KP1M-4lW3>w<28mr6Wun4-&kxu?_~
z%4Z{D?uc!1eUFmU5L=Cs{^<f`M93#FZ+sDveL|lXwy}YgCwE9rtP)dfLL|1GMj+Bl
ztGxjwid*LTPk2qisH0qDlDIg6M-#%it*1hT(T<H_Z66;zLv-BsV3N`dP0whm-*5(x
zOQ{G$bPyAJ0hd+U%?A82Fg;-4wu1(U9kvK-ga+{3z&|<1VIZE<RyubjVYmu;J%Wuh
zQVo{LQuHmO6~YY~uk+cH1GOHN#ZaLpLjc+H`h&&PyVl>4CX<1LkHA_;Rgkd(t3)l4
zlDc7h;JQH^fKVJgf;mnKzAyEn84DQQu}0umxzRWr)$K?=p96Nf3rNA)6^%}POOj;|
z+;SD&(b6Ldy^;XMJlkziPhuz0kn=BQzcK{+>m;#tCs8`1<Vwj$KN#weE**JpzNX{>
zupyP3-@_g16PDB~99&3Iq-zlHiITf(eGisMOTz{ObbAqa2LYs^LWt~JOzHh`73jD3
z{2W4sdYb}>%K%-;Jl`{iwK?ws-wAGOVm=!lsr*<y6m_?@`g$pMyS?+bv5-%2cAfz~
z!rkAP3Vpv4rz=m^9it5twN@T@ZK#PcPfysns{a&FknsDXXD%c=iyCGk?wtNbpAwb!
zN&VWBHPXrb7owRw*m>fmwx@>Fz)){==#?IN`BP!E6vR6&EmFgT8=2Er#NI>(LPviR
zQJB|tlqyoLxk9~E#g5`9QOhbu*vd<R8IjelhxS>kC-YaufmU^;DvPC3C8`VsxVJ!t
z>*PW<*6Lb4!Ze|TPc1qD-0v@%9C~ti<9ql&j3n8~*sFOaN|?1?Wb7=44Qqr|e1P~1
z6htQ5c^y_d6h_j#8YY*EJ-D8sB3Whc$yYU%<(mDRd|!#e!l4I*l^eaMCF-n3?Orkw
zl6IU9!oRgJgkea1$Y6VVj{Z>tLB}jl<lb3<Prv_bi~ApqYhc5|<>Ws?Of(u05X1jR
zTb!$bwVtDik)ETA_5Vuaj@7npH%E|rF!J2e$T3Q+<6BTEu95Wj#%v?onqfF(mG;U)
zJCIWs29D?(JDZBp!9L)9vUi?kK1j1QexxD~o6bZX%-6_Y&0J41-%U+(eI3)Vd^k}a
z;wE_{R$a@F8f{Oh-ah%KR49)OLDOTtX3dK>JsyrETZIKmr|uAwKs%2J6If56Qd2Yn
zg#Qk-?rPUzNrQ6$)A>%5)U;<gv0S)<18YOrkFx@d<{`+1IfwIUm$qIWV@NQCi!=sq
zlY2rWEXM2`TcjmAL*1|vRYeSMWG`Ls+N4a4*e?f@RUd&uDIc^qfM$}S|0q7#pi9-P
zOAigyeBNoMX<<sC*Tyi)21${x52VjsyPulb=>ZtR#DflX3FwFLR?8KcF33Yf3w!^{
z|4n`hP~F=-odtaFKVr;gl@GSK=t0=uf7*q|U)gCurSD@Sle3@?67+sSt5NGpB}0B?
zF{CqZ!|N4GV>d12E%2&r3@my#{<<RKeg=^!?^%xrgQIaZe%MbF$5O_t?5>o#@a#6}
zUk*-k)b9DEqd$_}v}3%8*BEVyn8HEKq&S4`=ZW8eE&d@=lu99`6{(~Ty5y>HKi6YM
z0uUCw6H=Z2(%i_zN$%rhYIN#vfD_%i@}E-mAP*H?R=Sj_49v??orEALOXQ7<ve~VS
zJPi4}-CM3*y3Q2W|1ojIKz?@Aoczy*lbx3OAZt#h!#QHF6Uq5|({ZXDe#06F)UQ+{
z1@~$1=SOg*_{FFTkI<Q287_*H`brDWmi=xxckS)@^fl~fi3Pfzelm43^!69#EkP0!
z7o}h@kVgXTp<pTj)yG({WLq{^;)p~@GeqzZEmDTnar7X|p9rwkr~LTtT;q<4B`1s0
za&bE7n(gRqB0=Kx(45c}wguvyh5h7Ml7QOJ8aqHqwisKu7aY+p2oxjiiO*uhXlwWe
z57GWIdXiELtfE;Fl2C*+UWvv|?`?h+Idh6mG~4@qG2WX!!Z?s*@CfWlzy6O@V*wG8
zNY8vG=!4LVz?L;-OX1eWK3p}42N$-D7aiQETCqT|+)YbP<Uog<^&?LDm&i0KnuJlH
zP01<!4jfqci|K?`{|}KfO_ZKfro}ot$1i4X!Jqsz4q7sySs;LnBLrsj4Eqo2{1HJ)
z3Fe+PdSPXn_8s;CG1+2eIQCjrR-f(^hPL6vI!?0GS^k{CLZXRC62P^>NCG?!Ch4Bn
zgYj%IZ*_CaddGq%`#Mg4XP+_a3bnr3)^RDU5J#KOTn_mrFfyEFY4DrU__0VclCo$o
zy(pCf6299=@9Oq*aM}n4WV@$Ch9!A?E-yMlZeu6OiFfC(b41So_1!OM*_%76LuH%-
z!NV`YzQ)6)gMWj#;Jvxkx=395pbr#;XlPp8t=rnf4;uU<4WOUj{UEPle3$!Yke_$s
z0so{H<;?;xe4Zd<w$;#$M3+tzK?j!lcIk1%`tMxbk|unM172U1gCa<`w|rdJm+;?&
zo-aKD4JvN#%5tpHwbQb!8PjXsO#T)XdZ<|cW`zTTgWSjj@hT;y>o~2cW;AaiHS;=k
z>yZ?@8cRSyeNENa^4|x>)Z+=(YOGxTjvfUYBh5xOLikXHTx;x_HpRe%FFsSWo_wMb
zU);lx9|MIk=)_$$reCr2FZpL;(YErb{*!T9tcjZI3f#c@j!V+TzD&(Bo$%I5O*EIx
zm>Kw44po30#WZF0pEkyP$(b}MKp>jV#Z)@(Hm<Vq_^O<kGIDs4y+i>Z11&JkQ;VgU
zXv7Yw1>MDqkv739@H$B1u=%vQk7B%a$Vj=Q-Q9k1aQwElT#rviGA5hj`!DG~cXfdL
zRz#fFb-8z|l+N)_U;>=77{{%_Y0`OmUB>w{yPOjSe@^+>B5z;hnAW$~SZZd+@-y45
z$F#0p4#yGE)WoDq!5OyEkC5j=2xP6Dxk*%c@;Td=X=Ww@Vg-NY>JfCmT79u@V<Hu&
zi;_J7M7Ii!FFETb%#QU^l=rKc@<H;AZDbmBW2Yq{YZZ~_t4(?AY*naq;$)11yN!N*
zf-#OK;&zNk9?i%h!{5p<9g0#9COh|=n2{*H<M|Cf0OSI~PWgmZ!s4I`oRU;KCHaEu
z`&exRZR{iaM7qY9$)<>ZHbx!bWJs?oq+P$Q5YrEJkHA=l#UhFFXzV1l6+522+%>7A
z)Sf;Bt^SQ^R-C#KRYrF5p`)vqkl8pHBU|QEQ|4?irI~Z%jJLAR%H?#Le5Z{Z8|R(+
z@Yd$3ys7E#SCR&od|sFPTM`%6cW3jgg9e8Zu8sV=dznm6q46W&@B5yT)w(?k0FJ;X
z$#N6AIsd}usPdn7>m$YgN#_4xHN;^gnX>=EYWDvE0nz<`NT$2Jp0SCOg_*6>e}Xx!
zb@?Bw2Kg7C@OM!6K}eZ;(&5&V6VoJM$rV~pSM0Eb${D<ySPdN|^2mWo%X0qr+w`U1
z$DWGj24s}DS>$GBX6BKjacQv^ug1fPb9Pc#btl1dlfLrd7C^v91~CgSp=sjb;Uv9s
zeXytJetKU>A(zAJ>)cYVkyxv~z7S{}eab=BKDkMjpQUCYtCfNztL6gSI35W+9bH)t
zAULk<u+&tnQF}%wk88NOsW$Jt>N%pb+Nhv*ccai*h+N&g;gNgv<T<9Z;X*#RJ`|5C
z(TsoTU5_kddTY<D`<lqmaX1o_PFv`pZjsbX?y_02$W$xN+4U<pJu>OGMbTYMI`2Pq
zzvSByS|A)0xbXa<G=f<ZkP;w+qqD;VY>-)<2anku9*zPawf_#i?K`GBHK?aLG8ht5
zya$GB6pdkd)7;~<kK1XKk4eYdWcdo=Kzka&rS96u-gS~XNt-XuRd<n<u&L-c$op2G
zqRecR;}WL&OOhR$6N<({FDmH`awtN9OK#7Fj-TV}?KZk@C($cCNI?gm>eGcDy#J9l
zH?KtTUV*ecYS3UAfSwtXg8sclzYI=KP=`&}%}Hp<ZzaSoIX{q%JsK2Uj`RMLRW&JH
zHQ6_#0fLXzo-tqr;v7P1_MY7SiMmxY<ygr12#MN8q(^hnc&Y}fx*2Ep6jMkO)n=-F
zHQrtQl1<(1ynEN%1Vli-w^uq)?O|-qZEfs007o-S-~QzFAK_BLJn_D1{50<Y&Bpct
z857xYAgq$w=$~2kw0*+~Cg<IIv-(el@*{xOhG)6UTr00F^m*Wg$}~W<k9Cr8NJbLM
zWNV>o=@K*O+8q=ny@85q|7QqsTyWLULRqe}8Y|!AWT%<^PiIjpE5lirzP`H~X(64;
z50J3=1Ndt$B=;y{A9mv$*^UzYKAZ}ubFhqY@H12w7&$921E$-7*|#7+STi%GJnAyu
z6yuXO_FiFdtre^{Eze3u|0ULkx@z?m517p(v&NrSnT{NmPo&zDmR)`lDa|HKvuhrj
zJ7V3sY9EX)qn=|aSDfxH+)V`A4Bp4vYA&S#4b!OF1X0~55nCx(2qdO5orM|gx4b3N
zWM-;$odTUb%s>8U2Rk)tPK=D|gntIDoHV1Ek|ao~XfP*vm}xrNb75g8)Gn8SknF3E
zo;%$nJ3%3|Rl%^w3d+hvFBCoXd&ZdXS;8$LRG@zQWEM*PwDuH>!jscjcau1^8pA~n
zD-xUi>I88*f<U-yEoY$YF&Hfy#@o-WRoI=sy#eK*Ja<(iN<hl@KV*^_yR{X9>M6$@
zL2S!<&uURGI*v+fvXP&?s$Uvs{fU5W9Ss$YHad!v9gCX>6{ezFa3R9qXLeMurxn$E
zm;ldOe6jUX%kg_jCwPzzX>j#C2B;_{tNiGTgIRIE&-cVv2QX=-M5E)e{G<51(Xz1X
zjltp027TFhYjvJ{>+DG8s(w8Z#dd!nu?b``S712LYWA?)MT2?(ItW1mB@H4Lf+RQb
zZ|@7?EtAAeYSjW+?yEbgi_a)tplKGY(P`I}8<%<?nqx^HDTUTZXL@L{RH!?MJp>(>
zQL4Vg3m9<^z~v${W7U~p=<GUzKQcJ$?^&ZmqX#(#9%Nl_FpP?g%#8{}!_$Hb(oX!h
zh-o~D4NBWh6M!B`Q>`k<aGLyp?mtsUjf$FuTMh<P{KuKW;=e|mXN)%}Labm;Ww7wB
z<ToI4!6|WP4ph~yX4r14q1aV!i`FW9Z7iy!?H7AWKS&I8&2u|E78sd0<SJdrrR_J)
zpO}BNOI2xZdtHb9;Cv`UCAkw9wh$1jjWl#%8S@=7Kq9mG+Gj+saeqMNL1P}VlcvY$
zQa!L53H?dll|P}6^o3#7S=$}d6yucUBnQ$Kpt=@@tq~TZ1iVZ~o6TmgfQqH_Fp6^q
zj=`vqg)->CbVb%(da4<xfw_F1Gel}vM43v@OM_Nv!@}UWLXb;=8FMs280aOOJ?-<K
zUH#+vPW!Yuj=<nOm>7o)kqi%7Q7?D~kNY`>_8UQ6T)HeD%uHZH_ESVI2KO^LZfD`C
zAa7ELuFU0y@L0@Fce{i0{p%HBQBNy7%5AX<=!gz)BQ4=AwT^5!gpbIOyIp~%B7hhL
zT!BWmBM63tq#^2PP*K0e)ld&h;{o0;#7|er-Y*ru($73e7;&UROR1ztX2XWWmGKb>
z*M|z(4e=^=&Z%K9hw;+eg%SOY_gZV-<~1NvhlLR^jK@v<y3v5O{&@l6Ctkr)$hB~2
z3<sn*15Uv?8Qc3Y$<n%E`qUSEFgGi*<~ITqj%-3R**Ab_2rg<nUZq1$ME3oa<$)NJ
zs93a+Y+{$qL1BH6i2S2c{+rt)Kt3}Huw&Fq8|{N~Kk})ff29@4v7;<dUyo}Stu-=K
z^Zhik(fsc!Bc-=hR}%SkdM5L5|7?w=a?U)6-)Od>YaYB{cx0%H$@d=lb7eO%Ohs)d
zRtZ$x@(!)jbrZ*mq+pHB!mbL&)^K&h=h9(M;wq!7mM;j`a1yOc!PXL>Ljfhf)Kg%7
zQEOVXjpapQAJ9u2ES(l3G?uEFQtK&a?RknA@8j4oNlm>g^5pKp`eOE*U#&wL4)ynS
zuIaH%MB(B#aUT~EQupJJ6&)ApOfsyz>u<f)(f+eeZ4d?bM}yhLv+q^fNo(Z&gWG@Y
z6HBvwlOs?5Q|@m5ea;9%WfV(y8F3Y26nP<158acFwWw_?1D!3ZXTimc=ZN=Ge6k;h
zHnRD!;1y{4$z0i|;gyd|agA2m2!u4(Zq`tAI&wBhJUVt#JvqjhYH%J3ytGuLfHCGA
zW~3NNe%I9%&)KaRy1Jy)wFa~??pDoGc&k{b=IIqH!OW(-8QT&*lG{vr^{{;}UZOlw
zN@}D1Ecx)QuTRt>9-?V%#995C#nfi47D<h5ct^@JsZ9AOj`+z5cTbC39k!X~Y~2a)
z4w<i7NOZH(Z_fSqFG2S{dzgHd@wFI1d#~V7!PCUVG5U{DX6v*=bdx7P$uUTdzdUTe
zdcm=e+d>7s>KKTBJQD-4H2<aEV^ElZ0+=Q($!K;mL+gM3nuEM>j|AR#2tbar^5_e{
z4=A3l&hjyUb_1*X8`@0IWRzGI%)^u&*p-zbR>y{CAaFv#OmmS~3^KaPjDs!3<052M
zZ1SS$L~6^fM8%E}4MujGqXa^YaaXkzWwqE8cG)OTIMx*2jG{$sTrvG=ap_wNCEDnp
z(twN3F)EfJw88v@4^*!YhP-}Y<Zie!x>6?i{zoJ)xaO_1H;<2>g?r|lmtL5amCa3j
zp#T_%ERE&u4=#Ssrw}wk-!{l)jv?@b&99}-1Ok!3+8V`xsfg7vK_8JdPsy)Sdoa%D
z841=<t`*~t9uZmzS>mU($4X>)HT`m2$0~~_)vL*e*Ho-qW{?c6NBgAT(%YO=A_!bD
z1xKrjj+WWvM(dZ+%y6=Vx>12RV5X68U6{G^fv|oeC2sKA^!+C*Uw3*sdXe{x-mUR+
z+}H)YCvUCtFU@ZF!5mLXzyw}B0lE!X;Z}C9#jMlmc;t_iPF5F|f!ve8eic^6!w26~
zdu}T?93wlC@DFwS5v?F*Fx{VAuCmo^nT!^9N)kP(_|vm(D;EA_e%(tJ{_xEya931<
zP6nJcN%>cOMIuJZy$k_H$Uo1==Tdj9Hfdcp>j_aHA!IHA267w$Y>cr1vktJ21ui$7
zr2oAX`B)#sC0ykTbU)tncx0yz*Ys5wmudSe{yt?WkJSqPd$R>22+rOeY`Vc*#q6Mz
zD}yR}Q%vP8A0;t>4^Ylo2-*Q!zk?u}jcvb)1_c;l#|5`a!k*`@sb-7mS1En{0kg)7
zGv`6)HTe)4<!JayH^C>a+JBIOu&f-U@FEbZJEou@(GRMTcj$W!UZU?{W)P=+2yG={
z7|uJG!hAFipc9{4c`9ZH8KlwoKZzpDZ(Kz9+jvIxwt{YKJy=OA>6VhWAu;JmU2$m6
z<n@OYVYh(?YBopvIa1^y(k4Xvh0GLk{p(jgiarmx?xGveGZ?&I@E5xvC6}-Fqk919
z$QQh$_va*mds7f8p~bi`;idcG%j3m3ajagh#$P|B)aHl?owusJc-o^SeNXSzq%H6D
z(&`Y}eZJb|jRx^!dVZBriv=^P%Z6X7{b$BnG!ufq#FFM+EZWg)@`!*m*xqpG!;p*T
zTjQT`U&`(71E29AX>wMQJDqlYT8Zdzx(%dD(DSn4Q5RDc63i;F*oZTdysyjPfL$Se
z{d}2Pv5UBupIGUza8kD^%m4@Nut^o(l<94<+9pH6#4GSJ^zciqMZ(i(g;<>X`S;bb
z=3AC-pt}yL9#sEQgV++te1%5pP_6#(BxrjXQpA*q`@sKW?3}tY0oHB(#kOrb>Daby
z+qP{d9d>Nnwr$(&Sf}?H=VtAjb$&tJRMn_=&YJVN`gMP=m2Ruo7tq;Tqv>VYE3K(=
zv5k_mVVbUDV`!cRztn301I!Zk0ojkNp9bTllc&&^+N58(Awkca)_l2pFiWG$TRA+p
zG9JWGSrcP>TM;O<VimHqsIQ@XKZfq*ZpLymE$Q(M!@`=o6)qJcu4nWKbm{E_W09M#
zvf7e)ogbId9Y=w-Wg(l)vzb>gY`v9e$f}WJ+LrBN{TpO`9%58!!IZoV+Hq|D070z%
z6eWv<W)IY9SJ`J%i7Zc!cjbKAH->Ve@i21ZG?n2zql^FCqi4g?bN{osvje(m<9eJr
zzTK_;miU7Xs2B_XE8lRgfU%k$<YL(SS+es6gZrS{A2s1YX{DoFj{$p60}S+Z6b}#W
z81;F82zyXyRlV9TGr#4>lY;r#6}5uiw7IPF7oldLn((6g%C4c9@QPEDb!;aYab%Bz
z?gWA*+f_Aemm3Xf*D3@xSnX~(X4j3X-0$;QwSOoU>Lm}<@Y%I*m27kO(=dS@d~p-1
z9^$f{#e`9Xl4J&87u5^`F4u78up|)`1Z(Y2AdNXe|4v4IcOjnL2|4;VsC+y+kK;pb
zgftHWv2WT~@I8E=ptgxLgRVbN_kbqyya*BMJtB$QqS6sViI>tq?nwkxD1IjIQ;9q^
zO~VMGm{u+IJjj=bzLr)n&*ZAh7T_<j+}P-1Q35#hT+f{tP6n0)$U}tAw*yL#1QH7w
zOWj81=sWY=MKD>Qrhs!C+9y#q^6>B%(T@Ie<A)fJ*@k<Mt6MS96nISOnJPhf(^N~5
zg5`Mv5RoB!L*U?Xc66g)jx)+f@a2^3sLXp73lF?J70uS*67B_kTu+aU<yTP*PkPFN
z*hibi)y@Q!ABeg{1USd%)X2nVEntjmpsZzLoBF;lj;-UvjONP9_51t&MDs{3)CdFT
z<z}GlHmM>NL|x$}Fi-<EEN?no2an6qUPLPDH!g-S!8;;uL=DGwq|vf%hSbu8ab`j!
zsK*68!?qtMo_+#4#*Gm%WVLc-R@<aw_VJD>A|VQm0hKMgSpx<xts^j6Of(nPS;}=>
zlz^PE@wMH>m!{8<Va-8k$0R+Q@hHVh*M8}i3Np{vIib{ud8S<6R!_c$P5!c(c<(Zl
zj1p)3^{6)wR{#tVh@v?+L9NfvFWPd0Kz4GIk$c}w*_p%Bl@K-f5u%%M&=fA4<ZK-Y
zDazF{G#4+@kSXQO<RVI9b;!e9)msv9G>TvBO3;4nvdjT%@-UNl@jpSzw7K@4(u?wn
zCj1#^jy;-PK;s3P0f~P!M468wP~J$OE^8>t0u0~N7NG`PacE#87SHqQ<Gs^J)6wD~
zoaXp4`!0DC-uFRS%E*e>N5=zpr=1KN{i^OW7Tl=I!?_lv9K2vGHgsG~X}O3RAtN^D
zn;v}*YL2YGa?WI=9p(nv;DeyL(U|EfxFa}?9k+Dvn%q%uiRtM~!uxaRf_YD7a|t>L
zPpB2B+FbDY-wFOxm<id9gYJO^02tZ+e}x$vdnd#Hhq+(YwsqcYL;AkK`hn2-6ikvY
zC@vb;VO8CX$=NBYX2_7<*onDRBt%X+O8^B)@N1X*@pczX02P=^@@co6M2M3Gjvns0
z1vxs(YE+79RL#z*ze+no7)~>1h>MpSJ8}^c!XI8YlbbN&eS_b{CE)dbzIr{I_RVz=
zjQ(wIG7Xk^B|Y9iTmnS}8q4xq<TbOXDvv5&gZQcIu#jWAol-)r<P9W0$>i;RUh6Jp
zjB$nlI73-f6d~G3bB@-8<MEYJrYvPh!`shCWKXf^KHa(BqMx{-0ND@NemE@=p%~Ez
z<aV5_A*jmth)46@oB8vub<&;0iM10xfiEdwcl<?XNU6a_hCYai^VIaU-G_4Ii0;$1
zUundof9QADD1U2;sf`RDc!(JiW8WegOmED%evqRm;-AyS^<RXwkv1BV)@lyuu@-tm
ztuYdrCl|K)SR(6TJk!z|b!6(c3`Wd5CO~g245Hw~!vAUyaLi*!W1x#c`bj^r?SjWS
zWz~t}*!nKXB<7Fo*l?105OU@9XUOjkO#wb7pYg9G5sg8ig6%-Z;51_7CfcZPpI1|l
z^=W&T1&H_19BIqD$tUIC;9t6wE92RB%d(T=A3%DIb&wVE>&&1;!$?uKat70GBM4*?
z+IusbL{ecg7vq*~u%fUF!qPb?nwUnPeaISM{C$XOu~1{;j2RKtmKms|X-A8Ma1?Ed
zM1fPJ7@uZJHV}UVa<CvCna}QZkRIiOu$IR<zhxoUKs|(J_B%raCw%tdI8quw!7r9S
z2KVopix=8*Oq%zaM)zy$h#GVT0*u;mrP8WKtD8nkLf{%L@Rh@0dO5@%-SCqTv~=k=
zQnQ&#j4;SiW;mq7VWRQOkMMkQWlK+L*sC-e*o<hKJt>KP>##HZ%N1%X9CS=kUjjbN
z7_-J#CP+Off&3FDLi(_nlIppylh2mI%vxyC83i5~)#k?yAPVp?CNwth`5S<7wOSZx
zZ~Mc^#}^Dt<;oEM0?7*5S<Z~jsZc_)l?IhKnN&WrN)onJNT!YM8AHi9maQgyi{dWW
z`{4YA?$C;$BT}^S1l?$irKs!8QknqM*uPunZ!{-)IrhrSe${_I@|UNt6cuPL*_a!Z
ziG|fcz*Kn*7Bna7_WIz`b9ZYZ4DNOAXTe7}VLL{7=w_?Z0x3$NegzTtf-h&clWSV$
zf3~PZC&m7u!w@K|_clgJNLwf3(arS-bL;blQ`3?X()_|7x{#z0GkOb+$D_jy<fT$r
z8nA{=)UKXQAd6Iy)DI0!L%xDzOMj$Q65QIL=Ibm39{Q7c{xg~|_kBL}AqUUcVq=QP
zUfy&#R|R7_c$+j<#v=EZFBUgfWRe|Y@=r(p8HMpWrJSJvFPlPV_`v+u5EN-NDJ_ag
ztl%=m7*sdd<ysy4YkGard&d$uvs-ZIN$^R6)$Jkb&a!ZLOGqUshh1AkZ=B7}&gi~5
zSv=amr_p_pRD8z+qwPxeRtm9|6PXYVQsvsl8gbDxkAbBg4{Z2iJ@U?2T^t%~xSpoa
zQ0f7#(aRXJiZ~9Nj0Rg1DVxx#OCjcKeLc<8)Dv>WYMDDYFt}T)>IP~DG$abWVdrE8
zN2ijG?gs|F2hg#{O0{e;FPC;EeJA6HWpt(&dV**+z;UPAqaV++92jPZG)^}16Ze#q
zzNg+&AjwHhFQGgc;;!6j3!r?SNdeWX99Uk#*P6hg9%E`1iOWrozM#b1gwx-_+2jZA
z{`$srb4UzXrDENvtlg>>V4iI#f5u819cEQ!ZZVc`IxV%5`hZ=6$%C$4kr~K}Rwu~l
z<pt5Nnt?oxPsk>;W>aj8B7OeF9NTERJrPt;`V8qpLX*I`>*%KJM8lOT|1|}gKA2xT
zHDAV!5&d_9%zd`myzpncsmDD2rVWp;MRZW8weIrnu}*QT4TZ2{5}OO~jRx5VmS!Cb
zOEVu#QidMmGT`x5$1HOYF<-UQ@o;=x5tA&1Egext+*7;db;ho3EN?t0S!-7_I+sIt
zjG;)&7quYk+5F1Q6QHv9HRrd>vcAGi8wsZnUZx*#stvBlr>BlYR3oL!ILbk%Oj_`L
zjZ_}5SDRu8H7n-dd$!g?F;N!lraROszPl86yY*^%-}4LvqU0p*`dNgJ9rC)#x8i7;
z0kJkpxYe*6yw#wsFLUW-UTkPf^jsR3mA6){1mzV=f3uhPh8NAgSz!HpagU6XOitz1
zjg5;JZ)Qeiha83Z@k$uw3Q_^Z<=hpjEJc&kWRdx`o#&}-nD1|o?q3Jbi}@fL%d{&p
zYY`!ygA&P}Vl8Y947>cKv9UVDUq(LN&k-a-?g3b{ft^;*899yJe30c9XOS<xm1D79
z*|XHHeRsr=vtXA@Zr;Fa-alRjpNLjRMjP_ujm|zkBXtPIx`S)|2K?m|4mal|1e<g8
zG&e*P7O<)-2*0W~BJeHC3a%e~oOaeOK^XFfzm<qxstFTfgwdR(4?}8@(f^{_<Rmx?
z>4_)SI#RZIX}`pa4R;r=01YeECnoHM<1xk7oIH+Sy<6G+a$U`-8C|qJD=6$N&*$Ee
zI%9M0IaolJShm*!i<jQPx(XH3T6E(QEKI8Q9f!zhvx)gwd0$RM0og2PH0w;){RHTa
z_bWt!4|7LMyg=y-X1PMK@{pXWkZuVeqRN==(dv1aZXvVC2i)~56JFAWf2J}gF2Is^
zm&bW+B>!Q`y`2JnFzb#R{B0UXF_|4^(-78D&38NQ!3LFIT+m*H_;GS4b2!@2*KKcf
zrQWU_ElIgqk>;vYuTMY*1X8@mX(II~ai%*%O%lNGzf>y7CTQUQoV6dF$OY>4v>iQO
zx!VJMMTBv?GqXG%pZzCCln1_TEql78--gTE)eWT%haQ+L_<Yjf{h*y{ytS+awIw{;
zs#&0JE?i3sMJ_KZR)V5+gCy5mOyHRn!;g-QtY$NG%1!v(*f&iW#d{rL12PuQU}>=i
zw&DVxAb;8)dTdihb)SBMuwHsq<hHI}zkcLzez~83lA{_878jES%7kfsWMHdsJlyvz
zkdSx&*DUOX=f_}_hV;4{P^O3|ms<zJ=h^u2vx+w>!~3ItAY&>}qVwQVWA32T=+bj)
zp5u;#r(Wl(@hHr;?DEY*QLf#J8!HRBFPjp7n4{WV<z&d$KpHlFqYz`fzgxg?4u{+&
z4vF_lA!HouWWQYO1GphB_=WNS_QvN2;ZiM3pzg?eC|H1J`UjZBEQ6SrR=`9Z3d)Y>
zl9ZJ!FVxjNfcM@{V0IPM(+iM0+J`4xnX6Y^`0V@K2vc_6HlRM=2()-rYzEO3`2u2_
z3%d3<kTvbnT@c$T$|E>kMLYzaY<H;^YR*U0;pDt;$}lg6AmP5qtNLUx|F>M&dZ<Sm
z#j$+2(eu!u8<Pv;rxT<yuftgWSX6jSbQx}3&J{4&mR3LZMJOoO<_7P%3>9TCe6tL%
z#KaG+-5&Q^O<!<l3G2l$te*La53>v!CL%E=BXO$*P##~IDuX3Z2lm*rL{{nClz9I>
zT;Za?dP_(#=NQ5iI8^hOkE>=?%5rQWVyB^^`iw@YWlQ!AVr>+z7mEzz#$Xm14IRcy
ziatCwwAiI7&TW?^pmN1d+`G(5u8YcmeqnXhlsv9vbFu}+N=!92nz!#-`}s^`%C(uC
z5K7SFR)51n?FPQRa$U&+ePelWiAT^>iSF0hdg9V%!_l0Mpkk6i-qy(oICim-lB`QP
z4R^x_>?~B{sTt_b{@vC~*#^=hWjpyx#KrrhdT57*HPAF0U2SMuj>-p(!|V(>8vA_r
zxcqQWR_3%d4_{g~vtVzke3^6>rOqy6<V`JtjewP`1=#8yms&BLxo-krYmnge=&PLa
zlbc(0z0P{HK08Kw7eOx>OKX*R>tr0+o81ma5zi+NpW!N0U^t32MW-B?+?c2j%oAgQ
zJ0)M{1GDnL(k8H?{jU@d>9Q%bz&#WRJ`QNxm7<g0P*V>ovv1SYk7Y;+(R@4Ev#+_|
zE>SN<ouScOfeC3h@6-@aj#n^Zy;j|Gmx`+M2Hx`;!ZXo=i$$>boKM!d8<*cA`FdB?
ze3@rjtlP5F>mxKZ28<+<8m)6n)-?US;&W&}6hXD%=L|x$0nLd52w~C3bT6w^VrDh;
z!0#|nFFkoovc?TF{!M+Qfk1z4FP<yW*<`dFbN>q7{;V;BeFwZ)(AWgieM1y6kYSp@
zU`t>`wwC)UWq!FBxGvY6p-qW4wn(lpW3dQwv`TAr8_t0r4APKoUn<%T%l(dQ+hIk@
z#_+E*0H;%KU3LV0@z6hl<>OU7<@y2o&q3%{ZKu8(Bml682>{UicZb^C#?-^o&cxK?
zpFf?WVQYWLhW!1{pAM7_5P~*Yce+6j?S!AB*)WQ>^<LZdbLwWJn`g5pQzR=Nx|#Iv
z#Y#$|T$kIRM~~ARGslMypPf2;Zgi|Z@)(?Cq9KrUUGi4k(bwNjn%|Bn`qvt4CuY@+
z=Nl+rQ)_2uS5xa(<~$9>z>>=>+9H%Blv1$wi{eTlGAUJScvW3rI?a)nWH&nLf|qW*
zH6|U0+<jHuyPAd-C*ED>_EI)G%IK4y#fqE$vH<zbv1@$TOh=(O!{*O&=BZWJ<T_AH
zqp%JhUg0GTJV_Bb$HQ80^izG1>0aJtWsI#PH@`xr6dqEsDC-ZBhdAjW7!4XmaV7<l
zWU+!(v8ir}%HW_UviPcc(ThEW<w(O@>o0Xi{RVMzw8qx`DR?&2`vYz@W=U4(WJqL5
zuq_qu+@Kd|m#Z=Ns-4Ei8PT@dBe`iVJ~LL**LKhn7`=gktM|zmDarUrhn3TBpgQOK
zAyOpf!QE=6)2Y#;>9pUGuLFI>1>lDv)ZEqaErID7`%@Mj6o310GSjlk=#`@(y?$u>
zfdisC2(9F4j?zw8J!TR>N-d@vGQj+K8{l6T-hgf>eB0Q~utI45(S62%z*}-H8}X^#
zWh@Iq!TE9Avuk?ePs~<S!D_ty?sXLs0Rb~~C|mXQ+Iw&(#p{S`ZdSP-34>##{cISh
zpYQoKPAv}XC$o{-{%SE-@dDl?PPtl?2ZH4;v(Vr@0TweCDOkvK{e0-zGXsd%5(T?|
zlL_O0)e=ROo8<Pk>QFa=AquWogLI{diR3=>w2nAK<Iwr)ZvrTt*Rqo|l@opa>8Uxv
zt1Dn2vu~QYgZsrIe9j7P|Cz<azzax;LG#G;pO1#<VviJFDrUH_ySQ0ESB#Kp9TDT~
z_w?kKu8gse-w3PL4dIXlujuaIsv?vLz6YLh97%#aObv#<WvIl#hmNdF(a=B>9Y|jB
zb1z6h>r#l=`FoN^xm$xM_UCfNGqqnizMd6wGdQ^{Wp3n%1YJJ4R9p>to;wMyJcd%B
zJ-~sbnuWXt6(I+ioHe(<YfR6rb!JlW@4m((Fna1BmMhZn9;XA*9q@fZ!owjB9wfI+
zgZxA5(Ib#Tw&#Y88e+zX`~OgVZ!x4Z0LglDns@=R5u>kvg?=2FS`=D8mQSj`_z%xD
zv7~SaFQ&uoJN)+S?V8#l+0m8bqVZpx%TTdK0cB|Y$rfSWTjaAXZ_dcDw?zjk?o;_S
zRk3nv631~8=xfh1y+y{7Oq-cW99$7Rt$2M{omktjf1B{`Eb{c3k|r#oc6}6!`b#=r
zL<<WO?eEAsQ&rhtrZZwP*m_vzQRm-6Clv8FwBQ^fV9KMz-#bYJLnOF1DO3%q1%#Du
z+SmWm_|C%QVYE9{!;|*T3BMPT;Ac61O?xg^Mb<#;%}|^K;mb6y017rYI9XqBVYcL~
ze^@iVKhO*nVmxn?)!j;pj^nDky8Ax*WNh39Ke>cs?`F4$hO6j|*HB76O%JtbK|S31
zx-6JFMdOi|HtD<U#vv~4B*-_^5b`3>%y#8rztO7@A0(_q-AlAC%?7ahM+9KG<8G|x
zH<i)bMiZZA&twu|yfElLP<QL26w5vp3Kq4b$^>P<oC{jrI0i)^#OWeSq?x@O(i6X8
z57+cOFXrmq(6~vz<18CVBr!TyX1wV#!i+>U8EAlsETzrX66ojj4%jR?$xfg(wQ~8y
zAs>`ly$PfblK>!VNN}UZ!oRw3G13P7tuH4AQY5vmFwH(*8@QDcS4G={k`w%(frUw}
zz+L&;$@F1l+%{HZD8O`ST)v^QlTF-a98h?t55@+!@MjWy<Tk=<ZBavKWId)nb!@$&
z11mf}tnOyS_Ds$#yQ|-GgOQetIsEJ_({{w8ckSlqUexlmjdR7M&AgHTw%+d>9qpW0
zQ9mc%$LR0vJx&D<Mz+hbZ$okTi?6~zv85VZ=9E8+w0#(c^0+#Q?1&NIeV@8hW`!o_
zmmQ};XinI#{d(LzBPY~X+Rds@o~l-eUz?&Jy(|fXH<=ud3d9g`PD@z`l6diJKFDFx
zyNa{dKK4D*U?mGSV!!mEbC=~fj)c%<pJ+Fg3YH(`@C1A64g5MQrf}Vt4lz5lyI*|3
zK!SuJW&8u5YB=5w>kba5qkit|i3$AWN0$E_rDuOhA#)#(56w*lR2i;q{3Q*`Zdh&~
zr?Be-f+*`QbG}k>hWvT_gM>ZP@BM5&J&|HK=(ix!<5);2Z>v>*{-n+Z1W~f-j}HBz
zzbXI{rARB2Cy(3Iz3Nyz$CDydC|LV;FH_*WDc9EQ*#je1d1<x%Yu)<p%ge+qybD6I
zPr_9nC&jvUr-r<y+N(3D!^O8=@(sa7b?#q<O~3eMb7vqQ6ne`jrQtRNh%$To7bqKE
zO6fs)$cTiK)HCs{o`u6q<?n8Kw<PmuP2gtKPg-uGU@6=RlpjP-1&qsxa$X1rfcNRF
zmd&M$y4xQgUTYdB^_h>PFZU`62JIgj&?Nr>!+ou|^avg19c&x25bEK(N3yWTU`sAL
zsg__y50mPM-^l{^n2lE@`c9p^>e|-ZCucRKca`sKl7^sbG3(@v5XxMjW6%Fq<Q#7*
z^fTVe&QpgMHB6?TRu3}|$cH{Bir$#y_S_*X`0I|6cCuQ|KV-`Q14j!x%ZL5?`45Pm
z@n&P!A?4h7#>=d2d-O9z5OB?a<_;aHr-6Uc+%oi#F040;li}`+zFp9PMUTST*he+w
z<7EfU951w~pr04Sn+bxCV8F{9jBVeR#wX<A9=^hz*V#!JIXdD9-$-zGo#uUc4Qx<%
zs9X15^TYq2&l9NL#pMq;0KkU(|4XN}v~zHE(RX$*H2$9-=gT;j_FHVv-hdy7k#ELK
zm#U$2`Z`8Nss#>P<zEoU|IoTs`3|Hruqe8fgZD*e{<|*{QN&bM@EE*m1W8zMZ?8Ae
zvlK4`Ev`=v{g2_ay+p?)niy}Z;xBSz=fTRz7*Mn7r$x4Dr9XY_;xBRW@$om^W0NgJ
zu9C)L=ydWp4OLMwOI1Xs!_iTsU3Szx7C3rG%^NN*@#dzAtfcmhPQn(l4@$k!t4(E-
zvEO>5U0Wfp!dckrD{wdb9T=reHIW?Oc)5OlF7bUNdxznZthJFkO-7uR7K2s`RapDr
zBhmthyQWq7nu&EPMhLZ}h%lanWo`RV(h}Mr3_~Lbz<~%w8a^hDD8xo>x0WQW^;&-X
z%kiJ|?QU_KN73KKA)uc#I1`UI*-2`s2_|Uc7R?8ovX@x<L@<)@x8}TvD8>smQeqal
zYxb;FnSi&yVGk+vN@CRS8Kh15`5|FEk$S|+#6YLG8v0KVx(A&`<?H6O%QgxF=pOZC
zk;H^$kp+2X=T7A3Iqg}e$;@WS=brw^HSog+tI_c$^e>$Jonte)bzkq_i%_0$YwAOR
zqCh}^yNa=lN9o)<o4aVJTd=v)py*ZFZ`AF%6pF;TZm!?&|M>9_R{|1?ixdYI!K_-F
z_nu*!km<yl_c&=N)2wx1K=Hr8ENWFgO|_vOj59}Ur9$Iy>h9lw;A+M&J=ZOBzB>J6
zSN?{TlZ0$@!~7{Juh$?JFXmmzi&0VrdDSQ}C}D4&TeB?XdF?Wl)jkH-n9RFNG{Q2B
z+Y^BsXlxp1*iAVO)FH6S?k46w_xwItcQ%xL^}m%3xCUTeWsv2!Uq3tzUpfQd<P!lp
z5f2c(ex}*Km;){ruNmq18mWILy=BWNs8htv$&mF_X$h-pYBkbD_k!rq!skk;ipVgF
zd!T1}&SS#*F!MXZms3LIcx$az3DJ4$sfwb><j&)=8!buT%F`HNn=eRN{xz`@#y{Ao
zvJ}RQUG?fz`}Sjc=nS7p&Br`&g9P{}Q2k9)_-z`pqcpak*r<1XzDnXb!7{B&vNK(-
z4t61Fl?RL6_wGSTH*+leLQ5^i4|g^`nV^QY{Tc~<$x65WA-+oN1Ij0KBP~>dFZHS7
zqy5WzA>);hdR$|9oRidENeoD$iyr#5c5D&ub5MvN5g%B0d(Mrzke}TL(I~KFQ<;Fv
zF8_xtsq^IMuT7bZ_t`v@1hqXdgGzW^roUiT)6h|B!b`~U5#AL)MK9rRech6LbHO`G
z(2g?kJ0Pgeu&!9L_!neYTs%m3pi_A;U_TG}F1Dwv*_${%z%vQMEj+-X<zHVX1^*U=
z%Js1r1MMspP3eZ|A@!IP0#TvMK)SR8fhUQL-5%Q!`=UorxCwqBZ4{flm8~eSwqXNQ
zE#XLDsL=C(vx7u5oEg^gn;jP(@_ZTxwvy;08jOp=y(d;gaH4=@rsPPt;s<Xpo2?mS
zAsrp!-)9HJ9yqZmUW!}R0v6w$X4ppEI$kE~klok6mPHaRVC0EqZYmpaB4SY5)4HeE
zMbcgSh%CJUl{eN1K;*wzLfA9s1dYs&%`OxuiTk*xB@_!<7M7;T?LAk_0zNB@6gG5%
zW`KyAWJ5>412FyT;{3tIYNBZmufD-hS(W5xBku||8X#w(>?D{Wpv!rb49AgfwL6Ii
z#3dN4w2+2oB&mZSinb0ylOHGaW2NLw>;}%8O`-mz2=;&L84k>&K7%?LWE>jM*BKbl
zC^R*52KD%l78t8*RWTdchJzau(VoIk;$&K6EU2odFYjxA1rvR9DBWM(#_!cTPr09^
zc##bTwYZH1>XY2#Pmf1<oYC*KVvO6~viRH#fCInTjX#)JTp%yGMpTxtB`GaJl3`hp
z^yZ_D)+PkfoduP4t!u1LF|Nk1M&Pz7Djb0;Z_3Nyh}KKIMX`xCzl8}rL`BL6|BaZa
zq}QBE=K-J9Yuunm-~s?pa(XCgKamOtV^TyrkTkQ<o5wmxd>Hf~x;akcOt!l~-D&EQ
zs>!eVJ;^a%Vt|aMC(Un*2416>#=J&+8`f&3^<JMCU@f3S%%)`Sl%>bQDVO*%H+f~7
zvBsb2RJgBMlKFnve8gPct*eY$nP{>=>d(n)5E5^g70hfM=ar3(z=^mW#^ymcgDapl
z0^}%$t|Z&P1{6Von3kzvJHPc#hq=4V4zD)f0bzS}r@Y&73?6=)D!=R`z@IbNpZ0Gk
ztyO#aE$}_oZ!TO4nNeTscEJ~OyqiSw<8FRgn?wF=lqgD1NqjannS@avM(Ik&gHGhH
zo@{c0)(oVpYnuKontofb_1iNh58G^P6nqPbPei#{^F6tGKI@5&mr}9Ca#Ht!$EId|
zvT10|;JFRb*&FAx?WH&jq@%wlV$_%OU)5QL6K7VvCYiS=s9+Y*JnDPy8Y8q%O@#yB
z0OLBtenRK|!`T}t5bYUTt{5BJ;R=M^eYH=NonDn+uzS+-bidAic<mVe%TW<ku4dC*
z5|O|{FId*308E<kBrM#OFFLB$B#t2R9~@6;Yg#;YFh#MW)3UFIOmn`ahI6`59_q6E
z#toZDucV7@?~e*H&z`?~cZ0lJCk@Iqzu@zxDBxdSWi`j&K{sx1ZZwGIkNb7;^tWNB
zb}RMbc2DJFXE#W2wrvqYnZ+i_0K$t~*LjscDFWj^7*bZ?Yy=Jq`!%^ko4%U%CR=)#
zEu6;&P6|Zpj5FZxc%~|r{o0UxU?O{$P?mIXAdV>&Zfktel(J>>p27IC*{9A+kkB78
z5-jR;zUIq^TMky!giq4cc#Y-jh4vy7|6O(;X7>;2?Z(bok(D)dYUl=&KX8{lx0f?q
zGCvdv&^`#DzRr=T1}w>BjxPr`|9~5%PHUH^DP@&Ehxrys=#`Os!@02El@2$pl&&k8
zUDa>Yh1{N<1)5^FC1Ebh4Ht1%uTOvS!*)rk0|f>QmU6*_@fK5_uC}~4Ga0h%HV>Jo
zXbC^ov>`j&KU(>9RQVU$H14)D-{}ys=bChsl~ofzgEg%=lSgY<-n?R#_b^Ak#&Zg%
z^oM<2RLK>vEWr6a?p}#j5`U_(FUj7bEJ_Af%OP?dh$~=+uc7~DG^X;9k{b|^iX-_^
zt8G|Q=^wAVG}vzL&%|w~iTY8DN9QQ;3g>=cr1cC|o$@XO9`}Hb%8dS+sGo(MlUU@w
zuiE_ZM<Mv8L&6t{=zRb7?doN}oYBx-L}O;sz$bAuy1T%j3()U;5J7hDLi=-k&%nI;
zeQ~u~V_gN&H(-pQO`7~8x|k*jPU8c97O!Ul?B+Fg6o9*7OVncx?YOAH?&Q&D!<0o6
zD<zL1*bsV}OYQC+{V(bq>ZWQb6tiU2X#HGbM5Y%(KfEP5&&FXhhgyV8*0l^ZY6PGg
z=QEh&^c4+^1dXeP#;%<mckg1FaD9kW<&?gM^qi~&7YZBR548(h)WX(Mr{)6v=v=l7
z2b%ax{woW-e;9GeqSZ4`C0UvilDtDc6IxW2pIxmsnoYK@)Y*+lOh`vK4FlO@l`avC
z2XCsCts-2-?PhOuFEC%jyM8J-dXnX|tESp7%-zIpizgtXLp0P_o-X-N+2Z1W&p}Lz
zQ!ALI!wX&DVZlNlD%O?rpmY6A_dT$KqJ%3@oh4Nz!um4fV#~I7u;<F)j8m9XXN<~F
zM}|+S5iZ=f4maB<pW*2uY*fsA&T4VHoWkR{J5()Vy5qw_{O%TYry--fzK$GPl|(t_
z?7_zr#G~FtJI0C=Gyp2jQs#X(aoz&+$Qb55K1bQf)qtg;yr7ah!E+sDCDLkBwGH=d
zV2<KcM?`wjsjln}@kg*JRjD9S<Cmi|TB^7U$)*<<lk9lIv~fR^dm1p0k1EV6{$#Wv
z*hUzS2<BafP!GQxtxoOH(b3m@1_4BS<qTR>VD5=CP*`?-*9doGcLHMTJs2?vHBzl$
zHxEh=FjGc&j_xjd3E+7^vCRb?!)Q*Ph$8-KyDj%8_0VEL;gL-{z!6>ATLi`bVq<_j
z7QN@Wdv&uuEng$7M|N9X+<5iCh}4aPw-i=FM<^f2bBj4M(A5(<V_w#P0GFPhqF>d6
zGbb$Fn=F{a&=15eEdPWEGtqp>G=|#KtKw%tP!rM9uO6=+A0>7ReW**lbHk1f;_|*I
z4y9rL?@ge~H;{wM#%Wt49tPN}MF%ZZOmV<A@y`-4ALfUTJLqfKUv6gmU)&h%3!<$~
zlKSjC%(q++<!nb8XTKkb`B?{mI_wl=bA+Q4U48&ud}7cW%B#m0;pF$!+HFouvB}5P
ztU>vt7v$_x6y2Bb2Q9NE9hY7o<P{&AXoFFG5hKT7XO?_aNS@J}-F`llqoCEyD!h98
zVB80Au$Y6=xIQr(6|2|t$^sfn;Ouz}0ZSO@MZdI3Q?_!WorN@zw%|=05VCS$MZWrf
zV^iJ*3WqRszexC4BAV?4B0im(tpos`U&K7jWjH@^bt5K}436~aQfc3@bo<WNW7^Vx
zN<@aJ4KyklxDPyAe**Ye>b0lqUasDk0$7;u5PEnvsHV<N>uAM?skr#i(>wi61IFLb
z;j?(Wn+PVI2ztY8UP&ZZTX8@(j$_9m?|sT^d0M}xA^5&gFD#QBI4P~QQl{QSSOds(
z+{fu1>PJeueCgF`%^Q2XJHtJOsQfE@2&3tkA?0IuI$!BK;A;LAQ*L?s^Cc$&o3G6|
z>pt1L0l;aIp2-C`@b55dXz!w``uJAW6%JPly?;RdbBeT#ifL~H1^^}?007GWE^`0h
zDe`|3Si>r^)?18--3(X$zsUo)=`>68L8FKQ@wFhEIDc`X4jha<dlVAK>qzdH{P>cs
zYxx&j+C7bD&Ua;aW_pg^&RA8?D9M>iaF;!wC@ze?U16@=t|p9UFS3PgpkB()F#o~B
z!<#l=Raq7TDa1vEHe(D=DyO3$Cp_Jt%v}kSUby~xqeYRizH_|y0e^mke|f~hE=^1@
z?uY8<Azn(!XO2N}zyL*4t`Z5B5{I9K$RtO)IO*c^nSS%uP71>`d9P%8#+!-Cm`B<o
z9tf5%D%|sp$c!LRA}MZ>Hg;5^5`}Ql<+$y)MJ3UdP-R}_aj^Y&2RWOb*Q@QS#i1$1
znPj$bm0`SpJWNeU#dP4brZEMfYM(e6Y?L4i@wL0d2&k*8!#!~sdFF<cE=(>D`2%`>
z>LSO~K-249GqM@<de8BBnLZ};Ya_XlzmXPLZH#YLF-jfkp}@5D=rp16LP@VN(~6H-
z%)>ZgHktZ0sqdDQH>SefTTE=;;L8Qy<$Yc=k@XkJT*Bv%)RnH1WsDThiV$tAZ+FVE
zg)^RIi6l83lS!px3p9qeyV{)77ZuyEIK7>{q4ckdf$r+=ncnk7dd)rUBq0V$?@!ut
zu5<J1@kUl};WM=oZ(xI|+;(G0aYJC05ISQtj$6#JU?-O+M7f$f1g}-bXe1S~9Bhd>
zP$y<dIV0;E_c!Oz%)0h6^!4Eu<o5;FoAixL){QCTuDQb${ThG0Nk;Wkk=c5ia5!hR
zfP`{0;DS|#IzIl?ad^tj$fFD9bsT%+)z?lbE4&~A)wgCbLE@|c`8AeGMyiDsDV5Pk
zwo@p}AqUaSnP1m^r}eb*GBFmf^{~fqkA!M}@9fBDweNyfDW$2|>faW2qdc7)RpVfi
z=6fb4Hr7Vf^d@@e1u(oq5HoAu&S^k?{jc4x?S-K$i3k9kFaiL)|9$t{JGofco7>y{
zOYhqnx)@tn+L`M+xH>tzTDtr@64hGQ%13SUKXPb4!A?FJS&x`u+nq-e<Y?$=OXM)p
zK<=zm__>iID~uEGbV<_((z@TzF0Cfoeunz=j<$68+81`s@5ifjw_hiD3l|1-jUb7<
z4F&YAp3hG1=JC_d=Cl*W8N9fS6<bq&;qq?oJ#Qyj+d0Z)9#lqY=2tqB&*{{LkGE@|
z%lG@YalIdbczF1D*Ed%;pONYUa(In!>dY<9rdmf8CYq_}tcRJV!#6Wx$5e|1$o;D5
zHSOqAaUJLgQWiOtFmI%d{e2>4ckZbt^%4WA+}C*B?R^-*oT^kP+py6s#P4-lHCCu@
z?iA=a8udy$%6(Qp|CFo#ViC>qnEU4E+yM@T?u{3O2I8e3mEu$0>sx}i{2Ha%7q~>q
zeXYh9X!P#{bDs7+(<U%7Jyj|7hQGM;LH!TdX$*Q@jO@?NpsfBW$1C|0*bP$)rtXg)
zh;N5oG&LU7Qu$49yw(hl-XxQ$Q+vLoQAv1>uDQW(4FSiZRFPfo&nKTpqwcLX{dTHk
zJ}d~e@!E=Zp+=@N#JM7>z+ruRP?wN)P9EK<nHf2T_Utmz4J1Iy>^$?t2AVfAAC$=<
z)nJvsG}5ND9taa}$i0buAT_6?X99vfs6|53Ab%NB%l9!JW{LurN3GebA|H!(Yxx~(
z2|D;7*O}7b&Lt9m^P-UQpYa~E^34*2zL%M3MpYd{#jkK;9-IHp{nORydFb#pE+>4Q
z-@&p~i~NGUoQ++`HYh)#H14VzWzNPmF>D6**{;+D|9j^_2q4a&ZQB<}!$==~O9i_#
zCJ}V=Ym$9P))ieMCL?`->&`aOTA@CqI<A;asgJTZF-E-D2NnLdttyhe4c_XIWgu^3
zJg$K!>x))JPTH~vW{q<LVR9asUy2U9X4ME}Ggox>93&ewyD$alLpx&Ug(V%?2xV(v
zwwJoR6^j9Nl?Q%Jb*$?Rfxqjo@anD7rF+f>^kKkgUL~ZR;j)dgDUwz2eSyMTvdnly
zEW%)+h}a1b=C$}YCGsk#5z;YYPSX$)QKyn8DX5s?MsndUa3l&{GS4xz8*Ss|u!Ii@
zfq)R6OOu_|TA>O{?l2=89W^Wzkhd#`_Y<{?QEdD0U29Mqpx^M=GSMc;FHZwgf6OWM
zLFAhj{d6QU);gmV@AkPt&p?cw2%f<N=~);F#QTgFE5^=qGdx-@vtQ!!_PV*w2tJIJ
zegm#i-b;!qh0=kbNzZVvenOK=6PRJlt`yZ>Vi$WLJ&yy8i(c%u8A`wILwn6v3}2LO
zCK<OYn_tj?yQpq`wU^avc|LhWIvo?5a}i7(lbxgO5Cu#f1%k<?FMwm#ptVn4yEqZe
zZHYKRIGt@SF(-<{q*Z$~PnK<INHv|2U_?A&5R5v!5*vMa9wa<qPPc-Z+BOUTQ`8KB
zxM~7AlPB=1+Osa;2e55%pF}H12ogcF%^9jc4pY)g6{xVck&YpgX0wKRzse(8ifuwr
zAdooQMxp}Ioy@_0c5AK&t8^Ugi5n%RJx@)q)F>N{?(MnTUT=m9Kr~>a^pe%kp%s3=
zly|&X?@P!}T}=YpJVqD*85F_M!hILMH;Z<Y`cWOMaxFcqr(mxj(KC?V&<;Z#BC3eP
zV9u_dT$>`@3-Jf~MF+&!Jb>>JIo4IopY?(ANcX~B^q2+4-34Lgu=|z!djKBY7^V-V
z@#V)Q)YYD3t`LX<KCG`3+olTNwlCz@SM2cK-EqU`yJ`U1k|FwJ3LUx)Up}FEMw(E>
zkEgJ-w^;AfP{oD(*hv_&&_1S9u)HWNyG`&R4@$XqK<sKeXxk&J#0RP87|^-x;#{gj
zMEiXv+&zTtEYk-@`wwj*0prj=i~~lD2sR3CAMx}eF;YSu#Xh!h$`FxmumyPuHnIee
zf#U#+-U(tNLH_f>B`8rMN(3!E36@r_dos^QuIuD7$kiT+d<EaQbuzYC<*m66Amr_x
zAYlDt#gHP4@b8q^KD`mXeH7cQMlch0U8!u+<HLu#1)746&_*;m2oHZdPlIWLpgU`9
zaWUwo^#?{(%m8L2XAqc)8Aw3AS3x<=k&ye~OW3cGuzxKEX(WzrU|YuNbeK^OMN%xl
z5$f1|)ibgcsJKp5o=^(HSt<~@*A}3;RcRvx`PnDqAjJXWs0}*nZ<8P_)eJM*fVhP*
z5Uo^4u%1UqoE+uqwsL^lZ)D&d^Z*#p{vxPPx7Z2H2#ptKh~@n<TXCMy#TXM;{RZVG
z%nEYI_`52bi_EPjH7}@gwlpM7u)DwuX{g)XF-1GJI5nZ%<=pO<+pVME;pr#|BrduS
zlaHY76rEJNSXIlvCok$405IDGM`*Y{<)kpd4(EsfcDGvM-yx=0Xc*hZv}-Eg0HO&=
zZXGh#->Q0R6Ro9D9-_C`mGkI_kkpD2wPut_PIuB_Zr-P5VgrQ~ULzgW2oTUwS_bze
zzgwTT3RO?wCKP!<5)Ow%NS)%IO(_k71oKlQY&~%2KN6f!NmJKZ4ACX^`o2q4qjD-(
z1H2FsRk^RkyS#sP#$2DCAF{7oIWaU}&61r3+Bsw>!Hih_){!sgffI3^zA9UR7!!&L
z1!4yJP2559wcHk~3Tp$>Zad^s)Um8y=H?N<VNY#H2hf1$t3b2Y7a=!=r~=N-EZ}Rp
zd(jc<y6oWDWO3Dtavter(6yhv<e2+o*5Vac`96%z5BNe+W@(QRV6;dGCB5GLg(?48
z=%MVu$uzH5KkL1n=@HoP$uDd1qA|R+GiwjBIDyu$P;ut8o}e``|H@7%`Vy@VnVvyI
z%<;iS96#$xPtT0mdX&M075`w{cND3WNR-J-|1OD~@TJz?i<p+hQB^fK{6q|9pOMCY
z1x!4(xQq3C>`SoBb#)bbz8J5Ynop48sTm*M;pS}soS`6_0gHWJ4qKVl<>DL7-UxF~
zb2i=>Q4~wvt1VX{U@RsbW*Hnf_T+BkH8mK^w<Q(v49bI>utgRVhj8S=53n%DHtn5>
z(9exeSpoYsLLw7MUJ|tpBTy3!iT#S2i_0LTo++Ni{jX&ArE1@G-iz2*MjHkUh*AQv
zwshv6?znRWKx6JN`f8XOgHGdH;jMr2g440(-$`%9%z)J{N#pqpE*(U%=LR|<*HAu%
zTdW3F<%|F{2gX52HOwbKH~J?fXbUFvUev#EFxE729|{mPuXqoq*hLC4;44{0hiFns
zjKn(o#KGdl$OuT?LCs=sMwWJKrQ>*?YN97rSZP9pvPkCcZ%X)2pIa6ggbWRs_BznX
zZbt&X?d%~)BJjc@`+g&%UvpT~{;`#J^K9S;;O{o0T7V?;=+6rvkio2h0lAU#<!^>@
z*%&~`8Q;OL@%yMXVdJNewV2sY#c=Qr76u<K2BsyPKVgpQL0oxZ+RQiA&No}kG*U(U
z3i=X>QBSEg13lG3EDPTRIwYDR{2K`Hix^SLtmFKs;}-!_y9G}qRAJ@^HfIsDdexaa
z1Cl0%%tvW#Ci*h%FOVOEc3Z8w{%zNjv-`o@v3O0td6i?JM5!`;GQ3HCCbVov^@D6c
zghG71_=mQP+l*o4*Hp9TeXX3~=ziks@o>n~Uw0Ry&9zpQwx<aQ6eg}i)(n+kEZ|%)
z0H9BPmJn`i%_KF@7-<{b!APyOgC}?THaxV7Q%XxCoezFv{Utc?<jCBKRPfJTT`x4O
zqpiR5lk3-PuE|V%aETs1+zm%FS?l3$%hWApsGUew-6+%aphJOu=-}20Dyabg-q!nK
zeJ3SWyCWkY$~As-N|ENK*;4Cgc0;k3<Es>n`Bel3dfnd@<l=F=zzfXax$R93K$d+L
zi?u!y`5jAtzXZqJFzgoLQ|4R@g`}CvBp*g>@+{*la*S2|5xk8(cGZWxn&R_(J*PuX
z8%o($vT)34Obf>50cZ@6w^5r`l*hxgUkrYv98Jn>^s-H4TLKT!=ona9nzCgGlJllT
zKR;We`{s_gcUQLKiGUR*&Fx1qJVnIfAJ2;yx!Xc>UfQ1bot3(OvreEEt^IJutScNG
zBoOM;>LRS(+_6Zca7`;!ps%x-bmn?K#gdZD>+ZEzMF%PXt5aUV#D@7c2qM6nh=fH@
zBOvPnd6cmN9p2H5)2%S<3By~J7$dw{6e;@^&f^I`A*3Yk=T9cyor>6+MXk1$<tGc1
zHOt>yC-`lxwob*2giD)lju7e1M$piEWWXa3Dy0~MwdG?zN8!;N3MP(S<mq|ldmGGz
zwm(0qzB?VK=l8Q&3rQd+FEx1*@uw?!<}ZaeH3)$#_Ej6lnv#nOGl}-Uf_^#nHv1>I
zUB2yB@M-qcnYI$Av*kbNzJ&FbKAjJk=~-!mVmEE;WU||%E35;x(V@180JEi&8<x&7
z<Mzan3>=AtLq?7v(7K8pOy5GIE*#hjaw`xUnfh*!J4}VEV4O>O5I3-TxE7x+`dwq~
zmi?uxr!u2rx7u}za#@0=NC75O2nCUhr9f%mOU;dDCBaCLN8pR%-z*QPN|7W}sc9Ix
zc{z|#yfMwqb)8a3uBEC<6U61bq(Vr`t+Ctf&e;Lx_l|mk2M-@KtXYeqD$ikVC%U=f
z@hx9ywoIiKjNU5bZJZ(`E4vH4Yi`Qq95}lx<X@@lYjNS>zKVVWOVIZ<-r(cU_GWBb
z^H?SbiW#fMHgch15-`@L-D(EA0Pk9I#Q6v%XZ(#SxZdB~0-RxlYsd34|KRZlQca6M
zo1TyBD=+sR{!vr8U(`ZS?_GOI`ayVKg$t%1RQh3tKp*mU+AD($D`6=uDh-1^pJ%j;
z1jdfw5&#<zo8}-Z{b5XUpzR=C;SL~Y$@v4<T%ITopB`wKB(C7p=E^6Eiugckfvx^X
zv8<lmrgEH3O%`!XS%M#zMycioeBqnWies|l<Ygo_GOwA#m#VOm??61GN>uOASDdU`
z=ld^#S19rpQ*8i5EsK>+W8UE#$f*~9J2b4E{kJCE#xdbKnS>*2!oHg+$$XG5zDIf^
z&7bSwX}~9O`^Tr>5Z$Lz7|O{FBaXdFYwSm~+Y#uQ^=Nya{Z>9R;11LexC9`9T-RuR
zkMGAG9xjQPkIF*7G;|0<DsO$9iEZTWPI2}0b-TVz#ogI!E*B<D4{ADdc1ZZH6KT$B
zP^f~)3^jK`U}`h>X9zdF#&D3T7LzSb`97bt7uq=wIFm;LopYwAAs!fZHKS%F?LM+|
zWOmMr+io%w;f-)G!!HSJ{Kj4HPrfG%q7p`X;$1V#{Z%hwiLH<u-B4`^7Dg|=GOux%
z?9gC53n4)cT<~_T^k>!24eX0&{|SWoASe5L3$ZdtUKmu%&-Y1F##xSQGp|Q<cO!gA
zPUIvH%FFj2sQbWVufP=jg5ufBOvInE{h7XEL+fklbS6@nXs+#1)vMm;(l!?tyHmU*
z#>p1`j%A3jM4IhuMfS@j&}_jap1=jlP!;p*{<4v^AZuCh@UtXw0v>w+N|b}OKH-<W
z{pm-mI8fI1j4!{3+u0P$z`|PMmd8^MB;)*(k^AZY`sb0Ch3c*LuUGI?6ab+A?;3@Z
zsky$Hp|gv=p_9q~h!Q;3&YK)byUM9Q0c)yE#|kM|Q*zgy^2(*tq^;%BO2(7irBw`-
zK_U_dVF(#{HZ;oXyFJ@*0iXg>j%M)$xRC(tSU>(eTz(uIQ_kNMZvsc|HjR}lqTdha
z$;K=E*Nvh`6mJ(nww0zo@vkT6$%Tc5rSf{ILF&Y2qBAxBNDilD+&_UiU!%ha<{12+
zfjJ1X;qrfYd>{0EVD=_pT8>PSFPkM>s3xM0H*Fe?bsBsg|Bg4%GLvJBn_(17&McxW
zHku^vL~5#%^XS_u>s^#Lt!2H_kB^>TxN4%CrF~{Y>z&MJOB<T$MygU3(N4UK3nzJ_
z01<w2Pey4J%}?n)$BFG63it+jYXC1L;wRXq7A;h1R}CLL+Hh^+aU7D5->yj1l*${`
zn{3L$Hs#n#)k~Nt9Wl4TQtaDO4myrus5(&$R7ec^kbAaBhQeZ;JF&^PUFq`p4(xGq
zaw-y#6Qe~-0jGm%0}r+o8>sf51l@C>-vitz8>Z1QIRl)bOekR&&$00F-zo_skC6%M
zLE&?XsI(us1G{<;sT_JW8k#ifW727OFr<@biDg$rk;L(ZMrp^BsrM~`Q@TMSM8{V|
zqnN={fYD(Hb$SpogMh|VKk;w^w?y<sGy$H(?I$>nET2ol*cY)ziR$A{=J52I%_n^b
zxvQt<MAnpj9{4Q6V0}c&RbmFqS}NJ+=0j`O=jWW4EahTAsKzT6c_lwXVZ3e=z{5;z
z*!Ao@sRgCz%O8H$^RDfz)~HTTVn47Brx<XQmv`W8TX#z2%W=Hst=f59K0)j}ARNUZ
znq+gP%10vpdp@!ir0fY5u2i7fsl#q0aIz>xD_w0URtsc%NOD%pF>A+gK5wWEP+yf4
zK$-iXKQa3VvK2_B<<G74PqGZ*gW)KjEMogZ$5FUVE#Dk$1e--;!@P&*X4CSdFjm_Q
zWAnz+31aG69#mEw+JtI)#hqFl5=Ihu0(sbiN-!gBf9Nn4xKPGs1Hhzes#g1C63$JN
z7@`6EMPRjhxi7@^iM-O!<}R>ctQ^tcO{#zPDFT*>z@!@S%Qc1?u`dJKDvxqBwv0iZ
z0v*6Sfc$WNYw|**xHch40^|ad3vmGfjgUPW;urno*k@R<hgHhnYk}2R)nxEJSZiY$
ze8NnQdqCeD%nBrNRF1DEJ8&Ad1~^f`_6HkDJGIHf*sv4maAa-djC!fE*703l>#zb5
z5T-o_vLgh*&BeoQT?!L%BYy%FfE`iZ3&;UlMrtq>z+|l%NM-(7CKSQIW+oPrqQ>Sp
ztPUc7O82&nrRPwfh;z|jOyEedZNO+5%tE|nd4eJTOa@rClltBQk?%GA78Kh&EA~!<
z>7yL4%<JwlPb-5Gr-u@|YORH1e!Zp)LNV=v&W_B_SnMPDfz3?Xg)%VcLBxnqwObde
zi>AC4AhZooyfz&XytOvNvA~O$HntVDVIgq>KcN6gt!k=gS{st#hI`s5MJF<nE7I-l
z#oZQXQG(X^-I(R5F<VbdmebjgRw)qTiYe{#_Cl;#O%oaltW#KiG>7DYEDAGT&mV)n
z;9HE^0iy;2&ZgYc04A3_vw{4FH<^wL%6*;*gdO0BSLWubfA+-sfgcVDtUjf5kxw2Z
z6|>eJzRMI1b09>;>`(_g$c2ocG9o%r4EoZ!X94>e3N%Hj>JZ%(qyV?%HqJDngak)v
zU#}ZD3=D*tCsvsYBlO6B2a9$_Lv$;i5l>V}*8RL_#K&vG=jZvcAB!~Zd+`1j>i<x7
z4#Am54Hk}V+qTV)%`diX+qP}nwr$&HC!M6j>Az+*vzob8@8)jr=B-obJde^)$Zv-&
z&Vl^L)%ig5(r~?!eVDlH)q|00c3k^v5IXUufJD_D#|H2lb3;|sa_|oL4|F5&PY5&p
zC;uR&Gz0%LUSeVe!}}0vI5yvPKx-mq@F0xa<{W9mY_Q{4ze8tcO}plA&jQr&Z#-bQ
zGfQSdlZIp}L(yK^`zCBg<J63aK@@TG2ZpU1xLUu;W2`^D`hr`k^C(zg$7$-sv>YA;
zP{2X?`u*5He|FgyzHPW#P7e9~C7b9^HJwZEI&!QjOLi4Ur>{Uhb9O*~gP!1H%(T-y
zn<q7)DDf&|V@Dhi69GmV%n{>?ewH9QL?u*8>|&Np>lU7Iyj$lp6Y<Kci@-QcW2NjG
zv?<UhXyNEa{;g_l^w0*{dvF1?cH`#+H5)icz~)T{mmn;f1K4{F5JX2BwZOcozDJiO
z6*x#(`Nce;7r9xc6hNg^V6h<(`ysGzU?f^%a5LQFPtoDAM*;E>t^Mj@!Xv%!6VXP^
z*>e3)H33|G(3>sd2nRw-50or*#93uA_YeFT9lr^n^dfh*l8w?cL!L!1YDFG;W%}f*
z6U~*bdSz>(*>+J+i^wtYezj?(DAgGXgCZ!Tlj~bFh(kH#U@H4W!`P4AQ`FP5d{(6s
z^Ctdq!zhYnu4!dFD^tP(02Izb{_(#VU>b7<zkBHZGMJ>?8ad>xHwLuc(`LX4kN}%W
z`wDF{RzxsN9N=+vQ7<fzVeJMGpkgpp$B9utjMYlfUds_J$+6G?v2T^NPep@am!v1_
zHz>we;};^N={^2A(425A(mN5`8`sIFGDN_nc^!gTsqwa6PceVW5KJ?`H?O=jvscgV
z1q*r^9uOAm=32gKp}ONrYZ(|Fh-UF*!nGm=OtY#PEa`$8F)~}-C|aAu>Jg3-2GY`j
zo<f*0&izt1oORQ>9uTK$t%BtXf2JW~T^g8P@Fde8NVoxX+R1gJ$TSEFrSf|@IKM<D
zBHj)Cx}{lVkfglqG(F`*FFD}Zv{A&&2OY4TnhPIJ=f^0hhEVYR$C(W@4g-s!NSxjS
z`jX}b@?Vm8r9HQ7iA>Lm;ylkM(m%YEry%3~UaO3Q>U!aC=5mArc5nqi!;lxdrs5x8
zMI+k#XRcgRH!yqNCe4~@SY^+GIJ8eIe*7#>A2O-Nd57$Ai>CAbYRBoivlYFum#`!9
zNl+^t&@qh^xg`;gLswHaF|+un?Aux-HNv1C$$}Th0odal&fHi&k7b8aQ#hLq;u0pU
zC72<IGYQXUpLu!bob@~^7)~tZ4D~#kS*2Uq*6+2HZR#n?HSc*e$3|NPTye&U2Ka8L
zfLo;J7?Kr`8{;(P&ZhqRZBa*qsu0CIf))-GG3ms@NvFvJIxTl%@<eNr;iDHC62>RT
zRmMAxN^9G~2nS3-J%ZF6h<m0OmROX}X*m3nb%OP)qG(J<FpR{ctBvCOmqY!!KyI|&
zFzB4cG$Bwm6W-*UQUV7_pd*_GM+r~o<x0tA7kLNnYS&feF##1=*c8zPJsK#wtkGZ~
zHUPC+%P(UkA+*?WYiuLorlU_n-cbXZT5Y24N}|4A2WmVj17>Ia16u+K;^i(sBD8GG
z7>iejqq%_AUh-HJJCqU@R1`C4mGe0-HLusZd`c@NK*8ka%NBdFueUBA2ls|+<-@L#
zpxcK63`H@uV6pMEcpZO5UW>K{7?0CgO^AB|(M%dteMOuTXhl4#PXp9k>$IDqUxfI1
zd3ouP@<J&mLN4q5)T}q^-TMVQ<Oy5Jg~rp^^|{j-1}kZUJRskKjQ6__<qkuwTo{}y
z1UBMNY}AbgAlT@PRoTrNJO2GD2cq5(^UN{P=;691N^q5hCms?EWVH0Y=)UG!y#unV
z1;NYOc}N&-nB}0FBaqQeXL;~U7U~e}d+!9SXGCn%x7iYDHNm3W*f{M@j4jlY9xT5e
zGa1c;f7C)Vi>zDuml8qk8ZNX@cLy7V844&+@hEe&Mn(`T$Z?Y$f@_$-WSpz9=W#8<
zI6KZ~&++Tdc8z)YmL6$Co}@a#*DiMxU>;o?eIrU1>0T~Gry_3+Q=O1>IWJ=&q_9yH
zGTY`8`U_P~v}1Slj!wul@Wx9t$+B4Fk4b8YTVBimz$%NI#Vpu_f|Hr)1u%OTl(O!G
z3j95WVcQCuJA~{dy|p3rsG&LG@y^f7@iQU*W(Ckg(*$d_QbuHZW?{h6<(rU*3~Fp8
zf(2Y({2SLak1k(Ily*BR1p+$V5czVwf>q2Fw|}?}s||Q=J;NK{x8<#5Et!qtQ&lPL
zS)+vq%(ohcB$~Q8Ojauj77FL^2R}quDuU|F=kvJb)q`=l0q#C$L0)7?&-vY;G)XHO
z9kIa(A{GM%i1WWZ8HIE^EX`uF|JhD!ZGFmPnQkA(cD6d*YPmI(E@Jpqe+#WtRnSyw
zQ=*I3B`~R#ax_qO={X*)cc2F(g3sa$uCZQ3L7LM7C0(mK$8i@X3ArOv?FuTA3si|i
zN~ZDFMPGrPsYH{D4w)gTT{8v)0cR)np@4<c;qUONm5NA-r<7{~OL3fZyIhiPmm%R}
zR(9b9zk~JkV|omcTwjH@P#N)*4X5RE0Hav|^tCZc#{BQ>L%e-U!qCv9ThnsCB&WAv
z*<8cnO$kpw_~m6Bw2_K8WYzT6pMw)If05(+O^XLsPawMzz0p4oj8B*@%Zvf=4bR(6
zrH~&Re~;+7tUz?-gR0};V7=3<+GPGl$K4DvTKa?B-N?tq6AotDHqzv806_6s<u~&O
zHiaZuRYRqhbl(z^*k;t}vwn);-BH;1qSt(ykr@>HVByMjG8&ETdl_XA*wqQ~5@b}#
z@GI#gwC9l+=i;&(^TA1c^0Qkfe_adPExjlHhs*ippa2d=mhUd7YKSrK;K&p<fS@ZL
z5e|d=PiJ9g;NoBqbl28t;E<{~_GiLlS4&o^_Hj>LN<Hi>;t2M^!PZK+2PREfp@!aE
zE{I$~d6OrX_GmXUvQr7`Fr0|X%T#C#jRc)VX;p4RXk<PMDK9JW<|3QNQ&b&ZZWBZ{
zO;(NDHRaMQSvJ~RNqN#t-G&ep@x@fVU+>b6HU1u@a!b04(0rEzG|&r7sK(3(!<1pk
zT%uPm`<X(St#J2=VsG1}gyBsn!U4BQYJ$P~kj>f*2abyfZ3YWGIfT`Os)1-Dp2PW%
zu?s+|8#yYj7_URGaO#uF<b%CFrAaBmp!|+UX~t?Z0tu~U9n|!4gK(FT>v?NlqzJmv
zeLWQIX&?%yz4cBUUeyDbU>nV#pL&h5-V=Jr%zf>cURU<dBKM`@yhOJ0c`A&Wbq#vs
zO*a<f3~WP6`ION!j{%&xq<emnSww{M1IqrfX#iRougZWq9#6_e7i$a&Vi4kyAcA!z
zo6fu5V4IV?bA0P|IFIQSg66_E-%)n#gz;(TvJpEgeJ*y{??6WsX~e`nTVq9UTfo)b
zhKw!-to&Zq=VQ<p$0v?fJI>WVV;&>F{KZww-3CZNb3G-$b2l7bKCMKq21iU$g(;g8
z58;=*!U0|Jug}9`j&Xj=o?q~Xn&}j+zv7I?(j%0j=sD;!QxFbZm(QiAVh7wdwvNX~
z;5dvCM>q=6Cqvv0j1CNTm?!-ec97p+(OWi*qI+3~Z*r`dg2?Z`titb*;(>(iN1lfz
z-gEXEEOkUG&KOmF-;i@lLlul6ujX&b-V%&>VwrR04NUd>e@i24yw}M5ZGMptm;OR%
ztYrjM!LgFFbE^_@m|nk(QUj&#YU91T4QPOOwg6nY1Y%HbvTw8nE}?I>W&Y-|jJjyL
z%fEhcU$-Z(5Hnu$1Dcn6ACyy?kBO;$U(VpI5iV66dhJF#{xHV(N!w=2RXQ@bN`cQ&
z@916%!wfSGb9QL5X34)AXF?7u8|+vo4Z)+Tg5>!P>~jP15CtUUHmLWR?@i!~+LK8b
z4O0$kMyLw>&*HT@F1G||zeg+<9O$?k;5Y)d<CMox%ha2d2()wW%{6kMC&U3u>$DlJ
zv<o#P9<)9Gax&o2?I>8gDFYH7AHb8*b4-fVS!ykXnDc+CEy0DkFnS<})wuc~L>f4@
z1x>(;xj5%dB{^#05p(e;37JpT5ciNWE|MW&EKXni`(y5ZSiP#4L9C15f}UWtzpM1-
z_E&$`qlAz@(g#p`srN8z*EXgdjRij(t`;_H@j3K)0YAUGNTNh}<*@qJQ)iD@Sq7eo
z0^l3UH^L@16h?`Rxct!l011`9v>au4yFmTYuK2OSMDKcaGnwcFJCT-rLO8vdkJQ1;
zPUwx=_=o^MA3lulg)*rD0iLdO?4JTm#RO#s8j%%1pK-*MDxesbqo4HU<#Fgo2C5kk
zyl`8H@bw?17UBjs)boYv1;YGo^)ggC{J2uxlC{)iD~xf{cy_Xf^KCGU`*JIXbuory
z{oq+sJ3e3Vq1RuL#?ok%f33lE9pfeE(0Ii7c9_$_=6d2q2JAG%^St5|aL$t#=(#fG
zG^lr^+v>Pi>kM~1c2=zzaL?Ow!8luMaD;U{VH~!DI51Z4te4u*(979XaS9VQzq%u#
zIt=m1iuq}*XHn6HEgKyK$x{tf8sbkKaIO?Lj-t?pJ<2?f*MOkfQG`%UNR`sq+W-Qr
zhs-n${j{vr2D8$>7wx-ns*qh7H&!nJF1}UXSsq!-)IL8KjAi+hkm}|7Ihy<&?~wz3
z*k2}PYzuwQ&lcTTKBZ);kW^<bQ5<(O{e~u|UmQFgFzhIVBD?x}MuN&0+n%bsPuY`i
zJ@dSUh8};~K@+-Qfj6Xs@Jf`EH9#BE2yJrdcxbZ;*pv9f!B8S|p~hzK{rKrtXNSi}
zn!PPSKvt>D)s?vIa|QuHC0)`yJNa#heqHYsU9GKa42}QT0)Y&aT7o{W1sX6bk1BXP
zAeNHpA-(nF`uWP2*U!f7vo`q)EMc`?)l&H{s=BY}N`!{^*G^1Y6LEz6>&Q)Qx@~WI
z_28oYfTn<v8xf^VzRM2<)@RH>L5S7<fj9k$lQV55Is41@rB(UXDoN4{^}D{=fAx!~
z^B0Xf^*6|!lYp3O*tY^2Rs%DIhj{j<`!81GkdNp2edhX1BU9G$zup&I{u`@dH^pD?
zg?{|5GO*rW9OH1#&?^U`Rm(D8KVUA~Ax3(RP_srX2m(~|Y8}Ifh^6SMovrpOGWuGC
zZf$#Ru0<3cYFv#)cy9U%atpxO?CM{iM8`>Mx1X*Z$ThHz*TbdI;TDi!;Q3wb9qoi%
zjgv#XGs}U_@|5K*XS{J-xo<6eX8x&fPqAnjaX*m}{3tzrU<WrCGP>pI!3Ht?^LLGc
z%u@Cg4`qUm29qG8vwdtq(TpNm%$8<*6l&%7OLe=OpbR(dP#XG(iwmx#U`8+#Ol_7i
z?va1~BqYBJfO@#w1XK>Q0Wk(Tp_<H_sto0{$Yiuf0z^yNhRMhE8{)aYHui*f^SK|*
z+N9ArQA|EKlzbUCp7sA{hmsF8FToBK2q;|?2#DqXk<i*0I@tX89B-D-+J0LyX&>}N
z&}D5PD}`FR>GjOc_ZYk>@5C-)h9t5%5VStrLWw>Sw@+5Nja~SzT_51exw%VXYU&=y
zi(n?(SY2Ipy<5-i#k{$)YLa(?nz-NWv|SVZdGoiKl!G8vQ-?0e$90Zl*6EY*?euRm
z8Etfx;a>{c_Ly}##(Y6-HL<ZP#>xW#fn63^Z6?UEYl|%Go@h_i+gjjERH$UJRtbq+
z63M#nUJt|4=#%-ILeB3lGs2N76jyknde7aSQ`0Hq!@uYA`OF^w=6<KsfXQmDhN@6A
zM5$&o|1)hRN<Q;+F*W7sUITTKdP5VgfY6F#n#^c$8K)N(-B25fM^DLEY9|5Ow1Ekh
zp3Gf{8^hcY>o#i4o<P6pqA896>f*)SoV~CK-lB?Rz?j0krXJY<wqiW0(M)lezJf1w
zn*9cgCz!0<U|HAw8N=z5RMre-c6SBt%E^4X`kSfVxM7*${$rGR5i)?@KYm*RIReFk
zFC3Wzm{vN4Lm~5Zxq6-P=lzAW+o1W<r7C0a9P>NFBpa@k4-*{$|HJu4-8xGyF^^UG
z4oI5$+a2_DUS{mvHPH@d@Q8h09prR3vSdX>$gv4Bv1ScELuvMmgr0zhZx#ov5dzkb
zPZu5Cg3EY;R>o;TY**SZlMOjWO|_8<*KO`h3|Kt<wfVhq3o3XuywzfD0Lp^&%o45&
zJns<p8i^!+7N>(07s1h`)dG&;Us`TS52Dz+cuOp$f`zobvb4kIMXkQOiAt<Nyn!+P
z7(I^iDS`v(a%xHbW;AUSs4+TZAC8B4%F*>b*9da_MsBc9?;Jph9Q|Vw^rnE5_DJu?
zcqjG;2*h4=Zz)9K2}Je-x~N{iuh+{|@vEe(@L6$(`>I+vT$DLkmOV6G4s;J%jn!bw
zkvI)`p&SUc!E06q7_FC23uez|Km)ET9jHh42+Tyi7(mKSs)uBFX3B(MLm!PJniEik
zDMjB2M5XUMw(K;U$i^)!Bf27&q@;5Zqn%j9wBQzA;LZrQhB=$2F+YtI`gcU*U&ImS
znHcYn3J^|O2!`FTW)oB^AQr?@Rmq#h{8SA56H}U+Ej1VA{*f3g@oZUEjE4Ez6Xby(
zOze>GUQAJ8^a}_{vr~A1M4=r&O4!iij>so?9OwvGC1?R*(`e3^?YKJO3k~8Xog{Pu
zr2+Bb+xsPqlU=`-hy)myQRIcfWbe}DT|&@iS$15(<|JF|G#ma){S*y}<{vY9@p!db
zy+AQ<J%@#O0;`!=KN3M(6P=l4S{*q1YccyT@JD;|BcgfX(bsdDQs0zEtMtOx(n+*r
zva1r^0dB}sNg0=jQI%<xjS4AtA9kPsg=u8YH$j4(GK4YTlSr}eF5Br}liv>yNM=Ii
zi3h}Rp2|T})m|WtK0RPzSDrX&(WUle)_=Q@WH5wd!i4ZW6~X90Q_C2bun*%oWuTrO
zXa{th1OJjW9KVBOdoDb#3Os?)Aj-mGvUE*uRQOdCkb7UV1kGS}K#`+fgg-YeOY11H
z_Hy42P`3>F=l(Mmbv2A14z$gH>U`V{R2cXdil4PyHTV>cTS4oV#X+IQpnSRxw3QJF
z$=xIsSWSu8#)cv)w_fwyYe{3_z@2RgMb0C4n590dY7>_{z(*i2O%+YJQZH=2!3OK7
za>8O`05`qViA2$;hAU0<bRb;TBg-Y3#;?rV@^;2i&7m407=hRiI|fv*V4Bf~%(p?h
z@dA0zj$y)d27}?Y$%9gC^2Zp`@!CvaWG^F$HNZcKfrx{(Vl7@G*&zyPKu0WcXNnhi
zz@UW+(TuXuB(xaI;FmEbI$>A+gGQ~GX0iPVy;MwV6asmp8%MOKx)?3e(3|nWTe%y@
zWYXjA)8t~u3$(h&c_<-yZTp65jO*ESVFSTKS|x@WVl(3VAT~JGH0W%%ZzQdP8EUCS
zLR6vw`L<KvP5fnBz$J`}si<g-Pro%6ch@+c9^ffAii*Ul${%a*o-za{6+$q%H7Xpu
zzILy>kHnz(+~%7+^0jN{RKhv%l&;OKH{-`d`Rn|T*AX4gF1(ZH^jH#A$0JE3Zb@{$
zcf`SQym&b(8cFN|+DrE7DW#FfQaxuTWvB8T#}j2)2W16d_>nXNO2<z*PpkGIech<#
z0x5|xVyK97NBD0rvF2~M(g|F#ziaM2BrH2csXeCz{?Hq|M0GhkFqulT%`C0a6sRi^
zNyBzk#ZBn1lTLV+qVX&AM%|AxZS9Yvu|{3pC1&7|+({nvM~sT2k72F<WEHjrI>xGX
zy&jK4QF~j{zA<_#;{&Eq$AA{VQ)chsN>EwKe(OB=Od}Cr7-}U}wk3_1Y3*cPyhOfc
zfPdYjRAWmZO<^{MNae_KON3)xMr2Jz8<crraTTp)8<1CfpemI8D^u(6hLi7Ooji}h
z+oO`$fEaKenqSLm=<P#2sH3frj(&Acr{vp|%7eW4*cL6YfFG(;DOs2y8=w7iN=1bM
zhk7B8q+>=oW~fjtI}>dvwUU7mDVjz(DW0pwQ<gx<LBzpXKz~(8&aJe?vg4Cji*z)%
zLUCDyVq7J*!~P{7xIR%~er}hDW`Hf_wCy}rOj8<8@@Vl0+|!)2$Bzh$7DMa~VGs%b
zpSyiodpYH<rVA^!JVvb%!`LC8M(ZXnzJqG33ZvwKkWFwW6NhED9{!QF_zbdNuu3_G
z<f$+m=j?A;Z0yt?wa8Q*-*p)KQ9EL*M2afg`HkTmHQotb;mkNS*kn@Tu$xG-^MnMw
zQPZD`BC7lWkdg)0V3y2DsFp6rcimY!BKu@z36=$y1qMr`1_YKt*tJg`ancA&I2XM~
znq%~&f8>%=WJG^?V~_NrncXm@m-^{NBa^jNp1v=IEA(HCiqbl?l=O7#D=;s<+YI%*
zud758;gA?<4jq(&(kxx39d6V)@BjnbgGEIp8UGFU+7+Jmb~7Yl1s3ErZUY=)1uS0N
zUqY5P&_s*#WVy})<EFGn8Xut$iW-SU)0W665td3)9~cnaoP)83Pp65|ZWkOx7L*uH
zK?)GN8m}_ZnoCRh(qa4PxEk2SAPNSBS|sKAAhj1JymI8uuh4GCu;q-dR5E{gohR3p
zB7Hc?$5E=7w09Wb_~P%lX55v@B{3|h=t^OwhJZT<h$rxG6S2c6))hgGQh3GC-)zAc
zs{<Rdkhz$*_U~Kz%g|Z^x$yjsVPZ35Eh&{&XF-t35sYj(T-=5tDL}PXbc4qDK(CAQ
z9y1EjJ|t}XfoKn%*jrP^3dm9HVx!5=!v@i$!01t&@Z8R0@zsO=9zEF^qDe>hdPqY*
zF~7&sSD`PgUw(Rke7v>)>lkqu%bACk$3L4y+TEhIGhH+|f{<3islCNS<I%ImJSbN(
zt;WlRnU_au1Xb6)qx@4*Od{4Km0cp+zBdpv-VGX@Wca2Km9XCq1+HK&)P*<$ynqK;
zCiZfk2N#bRr5bB5y5W@)?iYZjFu+HCQ?Umg%_~cJfMo2p9&Ov70uh4NOt^`bq)i!A
z$TYR#f$&VN28iWMPkR3cDH;OB_z1Xl83{Kv=PXH-&o<7Q#Gs{9d!|U?OeWKY0fXCm
z^zA2{k5qYS`Df4xL|aP0-zfTPw`5#fmiP&Oreo59x@_tLMbJEH883+L+3mX673)R;
zh<fX2e{O?U<HysOvLEf_U84lwAuf8nGxA94D-F{_ct=|%zED~04_h}Zy&+62-mNQ@
z$YzVz!$hp@+5n|5xuRbs_a309-_mD#=G`YzVBnaB<7rwTF0c`8OA;CxpWPJC$fke0
zP&L~UpTBr+-(9SD&%S)uriVHHM0q7%PcU>EQ|Fs>KjN*!=RWP;=X@LrSB5D2V4fvN
zZ(=C2DnpcuFLD?6I(8ZA5=nBDusr!lNH3kh#Nu_zJ4UmBte3%q#nq*qu(SHzvhA$i
z(=td`j)6|!e&C0l?W$rNV!^3nNs+)dmFMI<uUj<S>cxGfXS<=opwGaII%fxmIX41V
zHgIFldo(5=hrZh)Y3C|0C@N#dSo?M82LxFfGGzzK;`YG<^Nw))2`2`lO}4a?`Z1J3
zrI02y^8~>*`edu7O0rOzLFy8IODIKGQ{e_K9iJ}L%0hm4^d5#?NiTc6`FEkJ_UUt?
zVT7IMgR~d&?4!fUg^98SsQ7%l??m{r)7|sqk_C+r5cQoiPSl^pk-3+5ZIt5abbEh&
z!^elZ{|T&j%1R)EkA+@C{}td$D%g-BV051#cpz;gyMvx_U)36k&s5R2VHVtTr?!h|
zy(hW^=D3QK=+>+?ef`OgKQ2D+o40)o`_n?kbf4MtxseUUlSv=nPeIvipO7OEQPY7b
zF3b16<phUh2<3FMO)aIH#1FeV%=V}0_iM86&=xNVBW5(^Vz(DoaN$pC6v(rzPALRF
z2Q=CIUCKzp1oPf&gWJfPk+8%qkDbmCio6=B>?(}iN1gE9B(g7<g%p`Iiq_xEqbs1#
z%$ov!L-nzF0(3UOz6(zNIMw20aiFx*yid3X^cFb+FExa&awq&_11_UuHj6<szhURR
z8o_1FASIoQ-l5x?UfZl#)feBN2r}AYY&p1Rr!0&%#QOk$AzZ8Yb~}$xQwtnuAZzFY
zWTzawt+!PSs08Q)wYSHB8`mO$P|b%^R@D^id&mbhed4g$^_xkye1?TJ>C3lS{>d{<
zOj00ZKQA!}3;BT1_lBb;`IibxzuInRCH=rUJ=&T(F0mhOR-}8HwL#PEL7J#f2Op6n
z*#}AS3I6oJB>1(ar($%x6&MQ7=Z3L-OCLW6xIMk^3w=jVIpDr=B~x^dQqS1-eq_to
zVw|hNa&fE@L1bPlj$F?+$o93*=vS(gZ^Pa1#X#<>h|Acyyc1eri*$?yS8Q<i*ArQI
z3QMcHHum{}w%?j4JATaY8xo_B2hX%oCIgx7K9kO(Yqm2WFpqDp@9@@aHozD!X->N;
zM`0;E+LEjVn6`r}lPwa!;ulA9FsY`fxeWi_8W-73s96p%N7hU#B|4l^d0SDzb8Jk`
z;s-MGMUn>G)LM>RO0@gH1m*%*)$5Km>9Hn$2h#ye-mrc8LOQ|N%r9{~Cain9N$86J
zf=}u3tIU+|Vr+rbK6pUjnZ`woIhQB}ZO(RPxQ*&3PFsugEb~pA;$;u45=FhObCVN|
z#VoU!N18_tn=Ik6AFcD^baU@id9!=pRIbjIoNf46>s#^B<@(;H8kJ-UkuuB&!hP(S
zba_~6U$Rl6|L{E2v2$=Ugo1K|`Tid2VPJ+W2Or~X+_%*u-dy2_kgGv{E8s})e%dcR
z{7Xh>{OCyPXEPh)gk{qqwR8&rZ3U-{&$Ic}R_*Eo=iY+1PL<%uHaxc(-C)eL{p{%8
zpn%I3zU#W7jvtD5*HBg9EXH)wdUkdzsCFPX>s<ccxql3QCsi(p-4-Qv?LhEi)MfD6
zoncYAt{1v{u08kbl^EU18?6*YQNfTZ1gTZ}%u?a&8lwBrf!lk;DsPgPv4blRa}bQ)
zffqNs;G9*u?YK{_jW+zS&lt!YL3Up$pqb!%Whrv!wGH=Ya2ce7Ev%dImoA{nkgX=;
zxUd#MGPiTwTM@K8kq2u1XA6{%jj@GB<*LN)S*N2hALkD`g}dfH@bY$pe6N&kt<E0L
zujKNZ6P#LShhVL)Va@l|G;R8<yCdC|Ry-&60hEeI<D&SsJ0SOzt>wn0&kKh#p4e@}
zCh3Mk)@qD7OACZ~u4o$q&NcJ;ZPalT1kb(R8>8LJj+Ev~tzw~VHpbq`V15r1q!TM0
zFt2vV95=*E3}elO-*)uz2^83RVMDY*_<27^*;#fZ+=<0wPE{b+D1ab$igRu8Cb5Q>
zhv}Sk7wj65a)zrWTGx(8IzCthdgv}rvq$W1%%O))0dxJ!q-iWhRq(5;70#ZeG6Zu8
zs`Nl>+Xo-f%0euVV}WcBI+eU9h<NB_E+Q^1?QlX#=k6I-!Z}SC6bX~c33ZQ`mqG*1
zZHrh;r^gk<o*N8-6~DTA`{NEIP<(Ay3ATo*r>fq{@)v+341IjmQ7gMG*^JCDWER(S
zCyU9hbuTYkIQAm{s6Pr@Mp36ow%U*{zQWt$UG^^aOqJ_m*bUPOUnGp+-v{9arto>m
z#ivo|1d+5Qmu3ez3Fz3n!Q;`+;x5CkpQ#l?zzXExIFS9b|JY#E1iUU>=IPx5OfDsG
zPjK`vEq9^RZswQ%_nR!}c`5W)F=&OLp3jaF!_wVXbbSb4Urdy@qY2h1fr;e}Gu<50
zX3e}Yov8TuY|2phT-lzh*7hwN-UkD58eUPDW-g{w^B~8aPcIJpHc^Fdk}MSS|7;_)
ziN~9_XN`|>!_ZqzU71L}xqh(6?|s@uT6}p2Y*<&#WX0CW<G^;8k~tnu>2@D*MgQ0Z
zs88pV4}N>mt7b?dqk7*5ryrZ!;eVb4a{5#W1RT4-@V<VIp?!T-cT4PfC$xoYbwKH9
zj{8!BM!P3l`_R_sD}bL;s7}ZB?@bUrLuKxqehmNIx_8Apr_$P4XEjd7?Hd9pe43nL
zWu-i1Mb^;6*V6X&ZjuZbl3}ialAF*y-~Ly6r-c6Oqs#>al=%FAO7EOqot*5=4P8w2
zZS74=ZT_=`zV-cb-WE@~xUZc00g_P{TPib9(lbW|WYGFkarZXc@K0y|PA!5A2BRW|
z0LB3_b=39q|G19-I~Gu4^3iq&Q0!&~pLzbzad~}yzS;BcsHqN5uJOpgSRXW6)kz&K
zzVF|Lf4RE{UuKz%njSXToO$sZ;oSFcZ*G42avy&Z%bQy+ip;K|3v$AkS*FNKm&$^4
z#4piwPE{McY&Jy77l56jdAog<RQ>wyw&|m)rg+0r2M4NOjW*)euW9gVRJ=cPbT0kw
z@aoaX)W`7LJlQ1Esd>O$(>a-a-Jh*;B3QY9Lc@W=1awtQS!%5>s1aS|nT$&LICI24
zwI~B!TaL}H!$O1ZhnyBoo^1-MXnN+MKff}0XkvJ(39-&k71>pB(PkMw40)0ouoEVA
zh6MEEvn9J|4UO}VdPO;v&*vct=s<igbwpY1_>@59cRMg1lRxt@kO+f8zjHo*r|9>~
zGt$15*J#mq01drb0!ln%Ya{ck<Taz$e&{Hf7^-j`(a$}z5J3l6^hzVEnCKma#7dcH
zF{UZ1FdtPH-?8*_4{<dwSQPA=NTlpdkpKKjldM1Ht6H*DrFRC8&|`?c;mI0trFev^
zerCxUbwwQokVM014RC$J=;!tL?a}0nNACzN)jb3WPDv{sRQ1roS`cu#d4cO-(We1=
zXK*xsQfZt>q;s5rp^ZUAt4Rs0H7{Sw|ItSu4INgzD2Yd+S@gwJjHXUkP8wg*@B5Qk
zS#wN1lhuCDf6KdRrU1HFg&rS@Vv>Q%Jb{V|?9{lQvCQ-(1&2*@U#59<25#Vg=$e0h
zDKY-8xs*T_k}tM!=<{d({O&IJxC}iU-ebBzhBkm}E}^miwP|9{B=qw4yW5fg)IL7|
zj%ToPfe?>HHHqsDR1o~jOA~Nvx>FJyPzI+qT5n(1#AATPW*OKG3P_6%8)QbZ=8?mM
z_W}P+Yz}dHGVihl=4+<=W<3BDm<K-~oES`2g?Y)AZK^^}|7e;7JlQ;nvcEbA7jn{+
zQHH(-i@{_nR8$6-eK;ZkPVvZ!<`4{u0jFDOSOpH~qY;FV!&aRR$`Z<KGQq|_FCuK3
zSHQ93N0+;)4F6Hng+XYP;+Gybn}>$Ap`k5+bY2v;%Uv}6b%_uP2fYrPrmfVhRk8r}
zEXwI*rXo1N;tB-{D6QmW`(%!w{cIAdlTQ|bhr#ZlI>%Nt@Ip^-;Mt`+0x_3Z@OOW|
zx!46#tF%fQoKUAt=#+TiDSMT4gO`Y!5sF%pmSe2|hTZ8F#^O^je`E=-)FmZbTpwkN
zRrC<bub&#ZrD?MWKnXQK^L$ojPP&A{q6#KYpGW{1KJomE%q1&gC2UwUvhJ8Rp3KLe
zVjMqD1*g}`#S3-`D#Q2kDF(Ad%k|MP!lR>~E&vr=UawCGq2NeBf~;pgX^NAPB#z;W
z2Emxu12F-|#31wwx8gF$9~LyB4np>A8url<3rfjHQjlZ)jaU@e6YG;eJ=4S=a49wg
zB8U0o9nti}#FfRk1XFgoH8h6Q4+_(V8q7yM4UzsSHqIoNy01j#fQ%IguAocyw_7ie
zmFm8?`0~3h+*<A)Ki+L9*2-a{iw3yJG7m%nw0a~!1G(f8Ed*4SGcVy1ev00xL{kkI
zcp78{b3soHr(n=@z$65juE<e9_l#*7g@0LQl8HOk-j*e%Kn&T^YM`eo5TgWovQ3ZS
z9CZOv7>Nc?6NJPEz+PdD^en^>4Yu;M$ZKpxeo4+55M`EU_-f|nU9JU2HbHOMAJ*x`
z2bw8;3VdSfmOF$3LrOr&95cl}ll|FLQ?Ty*i9nmhjBnsJCv<7xqY^M+z@BJeb!2^z
zckOZy0RDj_=Bl0Q+A9Xo+0psk_Iv!XLMkCzt14i;a7fkFBaZQcvA*+Y2^XUSHdZ+l
z3Aq6c$v8E&dHxa<;!;aynJ)wTJ4}FqJHP{34etzz;)lT0%zuFr<0tpd*Vj>Ynfz{I
zd0T*TCu5RigRsPKnE^4+`mc?bm8jVaf*}(kKoTXxK$Q+(hg$;w`;C^lEHIKM0La31
zpqL!yyauQhKa`(l1^khPtqr7>@gf+3-@pRL6CT05Aj5%*<M3y89OWhj5DThT{?8#F
zdZfE5<Qs8gs7PJwfG|o#G^&LtBad*+V?>CgWLO%GNYq|8#RvVK-17IIhpa(C#lNLr
zkjW!BRP@0l;4{|*-3ZuBj=#1kRF#U2fj_da=GDbu($~lD?<r<2op2+M3xqA+bAW2E
zTx$XCq7jZn7v=FmdHzto-#ayw8))P!1r_+<0zVK@F{tiOrD7lkY-vT2Zs;#?Q87Cz
z3RbLQh;Wz;+|W#hcjjs5j0!L^P;sd=^h+~zT1!KNDo!S*3Iq^|#f`~dTx0BYnT0gY
z@b9u}(z-I7<%Q^$($?H9u^q&O12lUlv?)um%Dky+Zn4g-^4h+<wNMVdIrB=-Fl{Ut
zK5v*Co0+NoLws`k)CY_*-0Adk_i~X<=yZ^4fY60invewND1-%Ml#~T*4f;vg0H;=n
zYoV$*R}nlt{ZU!>u>6o;%VAS*C}F%AHE8Z_+@$hJlGy=YflKR#T3%X^N}j26+Eqk#
zZr%fV1?Bz6<ZG(8DIN98h%y0T(V{=O-=I@0E|s0%FwG0=9;tH%Yl9m7%T(4|u=|kE
z6D}B{$O}!8MFNtkn~JpCBI?Wp!=ijwe`CnBtMOg~SO=AzRuh<Mp#^~MX!r@>PrYdT
ze;2VpT;72iz{WC5&lj5K#ep6T8y)}?zxm3N+X#oIK)0f^tcf~Jb(D>4loqP&DUFXK
zkmoMZ_uB0gHL4Vse#K~)ycmRN&1fv`Kk3%o8yoB7DAQxx^*cy^j8*=bt0XO5)`-LP
zS?A-|G~;F;ixV2$0(a(#Ohv=>gkE+;y7~Gg2*)e8dG*F8roldm&xDU#i*F@ptWQO1
z$qzT|Fwn&~=abS|ktU43;+_pb-ZQCLzig(7E`Cnu7=c)1=6wBB+-zPSudt8T?`1jI
z)Uej?7QDX%0@!BjC?V`TyiW;j(p*;*GUu<sDN@t?138GbcFt1o9nKL~@xk_$r=|~s
zl<b;38yW^*9mbKH+6>&nRI2rdPlfG*`beSPU<THClX^1%Imj1*#yZ#yxYkosmvOlZ
zl#I^bLS0BO+eIn7H!wd+A!)<`ghaLV7tvpqg&vuMDv*#5Kn#y}W7EYn;Z=tv&0st~
zd>y9)lq4Hx7A~&stF0%ShBm8>WAB%duPKcb0qRTGOSBkQy#0Gw4Hg$v2Mg4hK+D!I
zSiKLZ+wv7w7v_dU2Q3o)HYkW2m?NRiq_~BE%UAhD7THAwr(E)1#gbCtZRQObMP!^4
zy!#qH2fv51VPtilS)lWkqtg^mR-e?vYt#pSfC8Z%{zSdENiXi}6u$T{az*Z!3&)!S
z;@O?18;H7EyY7iCNFA>}s^LI8xq@Ca{y+p$^J-l~#By^amxm79A4=)y27xE8<FkrQ
z;tq>jN8^~As-m0{NTy7ST}h)=Ia3o!K%6k7u`%;NaD+8=xCR~T1yKE3<t6n-l+4=$
z>3`(;#0bg6%k|hyS#)))#jEq(*XfP4+WPL)t;L+*5Z^CThQWzN)GbgA5$({O?m5~o
zB>C%d#j9Epo5Z_9>OZQBG;8jTHNG7aVZx%#*yI66r^FaP_|Ynt;q#LdOYK4q0Q5L<
zf^Gm#s?-DwL7iiWTqe?lW&EHbp`_D^zqSATVw#pM)kq4Z2_zcuZ%=x<kRa~QM0u2w
zYD#nKs%ZYy{E!tAqozF^OiLYL(>{aFJRcl%7G_%kC^|j61Ipbxe;0ViDn3OULV<`P
zciWL1ehh!Z<0o;8RD@7()^j0)9O62+Uhw%x<;RDf+u(AD+QPK@DhAqw;1~5PsN)pu
z%17+)`~^CWFmMC{Vq-$Q`zj1>jV}!s{`ErE3AyXKbID%0yJfy54Ad-0*mPos<wLa3
z8p9E|7!3-`s9*fnP%w!Gz8YuOZzuFhct32werg1l>&>LquY2K%a_LYrzDDD1Ep*Zw
z+p_QSdALSYg~l&Fw1p3m4#+8)e3NUHh4fSGYZQc}EsUSO1^;?y>JUm^DJj$+UkJn5
z5dAgk%?qa{vXwIo?-dZ-0S9V(PTj5ZtMMjRzd6j7%~lPHJpSAlWx1|8<9q?j+x6Oj
z9ZJp^_9}7q7ln>-L|A*61CjuhTDwqUf@w&W<wkpQs!LZ}4%_BT*f4>^;r_S+z$(+d
zd`_HB;7*=ez&=<cMYrcek$O_j2(Gl2d^~_W_t9_Ije!&V-9r9*`Y#xo0LLzxsfP1+
zgfNFjOq<!!Siu<#Q{gCP76w)#ztr1`U&w%{>?Fcd0eIRTNC>6ZM2GDnL#A(gdsTPr
zdHY$^>sn$u(V${mO~`>W*(41}&dgP{GnM;dTD+<vg9{{q=LqcXJr_`|m^^d+U<%~?
z?rKD~UUW?7QqPfZ97&p=%-}aQ#KyrtGfl(w+!W}BsE)O4@J@Qmr9}iE4VaSl!jP-3
zal}B2t%55Jh5eUY!ykb14JI!HLZI(N53J(v!4@#_Z4=k3(*^syZoRU!(yz2X*-weQ
z-xO66M!yw*m>DA6yJU}{D~-npu{HJwQ;KNN0$Pw}s`eWm`ox>9ip$L1Kq%B7is&#q
zl2sWP>oxWmJW>{H5rg)S=hu8f@}Q*dUCCzMzN!3#%RjTH6!iuF^89QzT)A&G!<g~V
zpLT#gu=4w(@y1Ps=6Y^zy|*Io!RIxC+gqe~bz^k}BRkt}>Xj0vVN-n?dLwaWfQXJF
zl;3e-{e4?L{=&b#-}d!&OO@XzMYzaT@0R>JB^hKkijI9v((mQMHNeye{^K_IM_fCX
z=)6CeVzj}s>9Ob@ZICJuN)*oL>v{8AZ9y%U?O~>#Ai8}OgBp#4UB22yI4S-Y8DL?E
zPyU*xPuBOd%S^}55+0VO#0VBR`vTNo2j+C8mVcvQiJklc#n^roo?ZASBhDQynDN9#
z`N_;2G@v8TEVG){i^}fVO;_TUn21_guiOqS3}L<@{a7kR3P8B+q!{2h1kw-ok%#VP
zWbxi03KA#9UGA@V))7Xx6>ywZY4)dG4n%>Kc^Tx(%HCEGUBI5ysYe1;M_>-H3fsP;
z26urBr`j$x*(yQT0dqP3Z9s)}tiD&qq;t)?{YNDTfTWw_ZaK`-4xw@1Du^Z>%WAE8
zj3a#u7M4}e4g%c~IG2HdVn~!6PFBMOwWPWN-(`21@i?Ejdq8N^OaLbaL}Y^YCs>pY
zUWuGCk1zshJgjAi`OeJbuVnR7tRd408Fj$!ME>E{MUD1%)4~K(z*2e#I3o|Y8bQ?r
z#Nz!}=J@Ktb73#)wytw-Aq-W{yjSuC>VPlP`%ZU46_d}!oi}xgp1#5CI*#x`fvd;`
zLMl$LTWXwT1Y$hwimlSRpsk4xU!`RqkW{h0yJH+K{UmxCh^wPGXt?U8+_;AHc!#I#
zkSoXJxvfhWo}}roi-@zqzN9Mp%n~h@e|ny^%28r?eb<6$7u_0coi!t}#zDQe=A4z|
z-bJR2k>yv*#29b1o3i-GX9(*u1c4(w(0a8y=t!zu;|UW@+aQt&O3LTI#1WGxXGTJ@
zi2Vi?YoG`0vDVhb<;DeTObmlZZ^UqU5tz6<ZLRy)I*kLPzL2xGh8ruJSzKoOJuY1v
zZGMo#kA{0BPufTg$?NQU_-{5^Mk5^Q=2<!OZfc8^KnV424og5<=aO`U4AyT3xv|pr
zbctud1~ZaLy}m8?_?dJo5@?gx@1M2hwJ{%2EhqREiLTb`bsrl#AEO6A6F3|r=A{0v
zQ{abTf3-l*6Uhj)e*8&9+QbjljDzOsMWZXlBH#QfYB;vkbDjU#Z6!G&Ot9m`KZZ(W
zF+4~R3b+_m(Tlt&Xx_S4z3xcVLKq#&g&0qYA*4dJ7i?5E2@73Z)~DEXH(!^2<eGfg
zJ=92BRwwSh$U(C<n(f!a$v)b2O^(A*U+Cf1q#hpijB6M6vRfD6x;F6NAZnPCT97gr
zdN1f$lG@;RHSk+#$)5afYXrwXY4C+*)a&(n`M_4&k;OxXb%dRYz_}K>&q79RAIPSC
zrgJ^YevA96J1)>Wf)Ug{`x|95%y;`hc8}xjtpCO&v_o$Ti|l-l)@~R=n*yTU-6s7b
ztmvV294Y9#0`p3cDCB`>eGE*y4KD$*??N2y42-%w>v8@i>khdb!-$-Hapza)C_1p2
zqyq~Vak1@ngS*{(G{RsjktUYA2I0dF(etzkQ4+@xp(5sGnSyp~s?y0TtN^vV0AXTZ
z!o+daqMWMakY_*tH!Zt_H3Zf62F2O5smywpx3(Ma2P^vN6`$UF&Go%u&2=VQfg>H~
z`e;fE<tu5*Ri9v2h|qDmobEeQ?X=4xrRdE(#0$UEN6a&{y*1!q@+r+8vv(K-%=XxG
zcU`_A3cR;9>}^M2ZLQdIW^UKV#ul`#kp4>cibrLMgv4XH-2tylJV!kom>#TQ%Gh;B
zBbTGCcx<%Mv%TwJY+|9erE0&>1-OhzSc<YA3j$%THkckCgEpd&$6D>WCk_AWE4+0x
z4c&@tJM=cv@Nu#808@t%o+KkHDolOx#C^bbxpkUbC=_(L^B4Y8Wo=9vwf+qp;oqMs
zX2N!;j{T)2lG7tm!u;Qsc>5aq4>p&GpvJA)<DAE>T|p^Gt=O50lW6p=Nay6@)0lMk
zj)FbtG0nF;jKdUnO<Vbo?#7uVeUF+qg%LoJ_?C_+`zBAPJ8+C9+9kddxR%d%X*Xq&
z{ebup2R<xrUGJoG{1$zNg<r*5hT{*H<kON-g@P(W=<=G20ty9!T2*aAI-wU`g)%(v
zVre6woeS6B78BvnBYT$v_;gw{+KHcNSt9#}L=JcpwiER?r}}QGB<-3Pn;GqMZ3jMM
zLaCCX06;AZbHSlY!qN(qCOv1*3a;+T@lK2N??HxYP$zDVG3^s5D^_$N2Nvi*w~_p8
zVeS@f{10?sW&%a_R0(ji0j7z~YdBTH>7qr;y3MA8+g*;j<<5LlbB8xATn}N&4lpp#
z=iSPN&eHlav0oH?aK4|v|2{Ssv;OnH<Nhme<KkrONo{O)Wh)2)#dx+4@M@-$4Nk=#
zJRo#0$3pa%(RoR%;t59@#?V_`^t%QW7Ynh1E8<60PoL(X39(lEq#a@XS%weFz<EaQ
z+C6l|h=r+a8hT`%Ith_B+S>Y^(nUIzEH)83RC7kyhH#rhjEs&ie5?iW&+M$?#ALCK
zO&#YsR2gtC)Z8T%O%0`blj=Gocc1k1e(#8kb3PFRO$yK0{l&^>T$rr-Hvhz`*gMuz
zaL20{d%>6W$u>qocgB(~?q?thiVsxFGXp7r-ii~2-h@dg7U}(6CXTu1%pm?&KaRCg
z*i5U@ug0%By!Ufrf6i)(_b@6x=LUzo_(P@?1|y~UJkrwbKCiNDSgL~bWW&^CAg)AL
z!lHG^)K(IMW=2Q59bW)E(4_I|i|TB^@i>C^iPL9$H{aTMUmki5A8$N%NGiU+?Yh8|
zqkrHhylr4l;|_5ovKl`LX&@4140c+{)X0^kg-64LDV+$SeAv#_cM#{%X6J%jsJ8fR
z(#LWIauSKVF*=#awyXp(TXhv9A8t<%0MU9Y;|VdG&EA{0tNtP{%}{>f9;L727B|YH
zHg~T`-WMffPYCDli&b%kh+1m03dN#nKV#3(GG&L0P=Qa(Sd|A${xl#@7?x;XtRw5U
zx#f0UJb5GK;Wq;Lt6srx*VWD7ydYbBfD=H!K4|6}Y#w~c@4NT=!l1%5Yhnb7D`%pV
z`t>)~s_J0-rFJ6$ZsjWvdxDMGi>ASS9fR2tvVtEZ>Kj?KH4dg`3n|C%yogw~>!^`H
zWLxFPH#^hlNgy?r3%<n-qTb7dtQD9ksa&;nA*R%J>9cw;+&Mj4%nYQe{@?HN`Xf(L
zK|y|Kd@JPZTK|L`U9Z8hWTofHQMvLPZtpi0#-bdhb4&<?=T8fA#arV8Z8a85_3K5*
zlUA}j-P>8JtM6?lWx~=G5#^c%sgD-^{{B!?z5-!=KVKM&qy(b&O5W+qq=qx>_+UGI
zWus2U*Y(;$kx!!lyx4~24}aV!LBx^#?cm8sM<4{}1;sutBu0quM=0TRu(f_!R96b4
zZsxe^5qx8+PqoWtL3{Tl*(cn8EFmWu*;Fyy;AB-R2Wra6Z08Y3AHjs_SZ}mhGd7_1
zyy727W@+oJoA8Gxj__c3HE$Egt>XG=i)}|FyCbiT39JD%wJS!v!Gk%{x70AjxI<Er
zrP!DDvJI21q{EjS+`n%39130K^K)%kF2!H;X1ScKZ|Mj*OK@XX>-ZCP^-S8y{Nd=Y
z*$en85cXnk<J(f*n2x&Xj6mDw{r~h^+pZmbvK({==KWHgFkkOQjk)8Td|dJO99&GO
z>jhhxX5Y~l&is?>Et_-7KVq?^*0|Q?{rA(|xK)mfx$fui%m5kF8)WS)sZFLzNV_vx
zV>r<wn*v4zoB0CH>}|tp^hFf>^s<yv`ENxw#@(fP*QmLmyIiPP8iF@5fzvO9*Qh7~
zM&-d-V*4*PgSkKZ2=)-ey^9WLU3caJd!hpe{kmu)=52Hi;#3zIq%`Bbchg9i*?pT@
z4y+r)7nLxswZVf2-(SILipU@R9%Ug*JaOJw^Zy8lZ>lHhDV)yFz{$jJaZ|6cpoc8@
zm}qzGt4($<8DYJJV*ZxS!r_pEW<5a!|J+kg#APlrGo66~sPWqoaI$dii(VEy(QVgc
ztGy!iYq7;6668!m`60M|cBC?czZ9#BXS8|E1OC99-&GxFC!uZ(_+ie?<&0vXHE**i
zZD+#ndho;pU*Ymid(L4Oj;53xl(bkdHDc!*E&B!Az99At%5Y@bQdU43!-jD^Sp_S?
zwl1f0l9=a7|B1l<+?WsLPB6KHjS&?c3Cp21ZDKJr5N=+tYFYjIj*3Sz(92e%K&%GU
zR4NB7>&wI$0@0S%ax`fZX>}78AGv9*MyM}pZ!;9SCEBv_HGDP~w@K~czwYv+T)(%D
zsgCm{pQkW^2Em7{r?}@DsLo4F$uY@a&(wUl;m46kP7x*OuGJ3h_ZQ}W)b~0)ezvMJ
z{Cr#2f7bW@KKS?bzfApX(f<6qpsm2l8*ccu&gDhWTl(|8j#PffT()APn?S^CJo!us
zV%9f=Pvko#d9rzCtm0UDS1h7WKRZE|JsXfbGo2*rr<E1#qrH%e26Z!&I}{b)BVhs8
z@Z)+e=P0lSIuwtBjkR7z)okv2%tv)+tM-;KRnsOb<;!`k+1}-H8@i0o=;8y=d0N^+
zrljatPJ6p&-W_V4wd-)qPkjsSG{Cw5jkz5N`_p}r*Xn3d*E>OHr)A_mxUMe{ko6jK
z6KYuCr_9h)m>Lf^4Sh?&Lkb9;+OFrhl8TPB!jc!)-#mh^9=bA@;C4(%g1PEhZ7SJx
z#s<*Wp&Q$kLrbLxEbOnv0I=gzm(M=?7(H)7WkKO;%w|#$kL-~B$){MO^F&sW-Vq@%
z>-<#bR6L2n<SL3grqs>lEb-RL<Owk?%vLMAaK?f_PSHOC^+04P8N^jM`N^o)$I19h
zo+D@GQuLOVa&P9350t7)5g8*}S=qi8UgxMml`=EvpET3XbL6596%Xgs-mbZr_%i>i
zv9o}RYuWOC;|{@H0|a-6;O_1OcWA7!5FmJPcL?qdL4!kZ2*KThOK^v;^WM7i<}!D_
z%;{C@boW~P>fcqhtBQT9YQvywT3i$f$(NeKNaSl$2&xZ>KW1fWk~HD>>1GSW5^q+W
zrT}z|HP&&DCDKzKTw)P5)K@4!U1S<DX%UFhCr$$ImEma!6_WI3*TGv*F}C}{F!{lg
zj;(aJxJs|?11uQHa+AEW@NX2%36o{DShNGuX1dq?pMJU#9mcBJ;w)*|<fo|=8jcZ6
z)g2G{^D``KQsTZ@7NNwSMQGNrJ(^6;lxY<jm|C{SmK!c{Md+!IHiZZ$Esqrxj_S3>
zJ+;tTO~6}>zK|T%7c5$tOtgqM+;l7$ft+`3z(AAoLe==ZaVj%|tq{?Iz7{UP>qM>_
zAd4<eX=Ux8wWaXb*=z<;r4o8poAFNSf+Tuwrm+9?cn^pdclFwbmU7Rym7j>x30Ebk
zULlE70yXqkQ?B?TR8c~U=u!pab?nmcK%a=x)cRJPuLrt20*NyKsS-2XF}Qzh+*YyV
ziIf<*8>Hf1@BxF|U$p7w&RRO>9BreRUgI1~V*&5Gu&fN{t7gi5*FFA)(lk=hMY*Gp
z0t~C0Q|)*LF?tZt-~bZ#$oCW?hL?*7e#13{L8FYDk+q#7qwSskF*Y}aNMPQ_0mfpF
znO`{{NGY48Q39e9I40wj;0Rp4gaLh^17Pr($}F3h5yOMGQ@hA+zKPVZE3MZ!R>oF0
zhuM};JK8*v^{;Fsmh&+(8lpITM;q^?pc~p~>NHS@&-|)<Jdgvq(Htrvs`CZ8X4p#j
zdJ)Cd+^yRh?Td-@ju&Iau|Fj7hg1JF**Vi(IC`>-#d=~kBSFmU+>kubIFn4XYiJfU
z^nG5cM;MiiW(q3o$h)6BK7J>;XeQC@Or1Rabw$)iM`y^p8JuZImN$6Xwuy~Fue7i+
z$I+FM0Uh;Z9)T)IaYZ8ppOU0)x@y}i{Y*X;;vjD>62KSQ+4!*trLinI&B|r*eXP{q
z+`u`Y05Il-F{{mnG}8l*hZFFNi6@Or>94M+vT*d}q|#Cmpv#ukZ0CZg9JOt)SzM0>
zf)pJ;mlh&duOF6dPNnBVjBMWRcv~+N+Pg>#GREWUS$Kv&PmxKY3Imb-vmkbUco*$_
z#+u<0&N7@6n)Mu^6+}}CB;;xt_&_hPOVdP)C;A!B%D6gm+?ss-d>YpBPSk8~=-{l4
z4f(YN6n_qrT~=|~j6F)TH0w1M=Q?m`@?p(ZkI}kMOvr;}LPBHfqtsK(HxpskhQ65r
z{G!73f-^*{qNerHcw&}nZlZSN@7PRC9Oe`^`JdFZZ%x054N8gR1%s4hWrw*+u%3^+
zDD|m-L{00}hPzl4QuT{N_wUWufT$H&9&cM)xAl()F+l283-gmb{iH`dC6Kx{1tp#~
zjpkay;Rae;rU#+w$IfyrH@#zraCzTDfJXW4QZr40IMx=FOs3}=St*|gCn46_E?lW=
zQDL(#s^yO72n{}L56-DvVupP{BZkdNZ4r5vKUPt@f$r;e6)u&<OY*^O4Jqnb`>}TZ
zv6A94{!B~u(p8muftz+Wfdn>7(hC-eG&p+VJMqP1-tODoqswEVhMr_DvzdcP36$?e
z+3vnyNCn{E3re1Jnk<P5`z{$v9Q=wlvBdiV9iVc|i^c#vrQ?0y#11-cWS3e*+cC$F
zBk+42cIgyzxC`m~*6XmQki_h(AX*jakXo-nHPrQdq@7iTnIItvXzhGua<<B=Cp*Ng
z^{Z3VcZRP-un9iqvE0>BO5k-3Q{Em5-0v==;ChU{oJ<l{e~9PwTv(0O>h_0mY)-a3
znH2a+H{9FCD>~Cn4T8>8R%Q|BU@mj2`c$R$kyR4OcO-7x=EFFIMPh$E2X5<+>6u6x
zP&tUlP(}#i$b}?c^QP@X+6C<EbqoIItA5h0XFOlkHdEgLW1ckcCvH3$AwcQi>5!y`
zQ!t#xac~QBgsgwsPUrkmanV9c8C1h@7#hpwc|U3Tt9>JCtFIlTPmiZ8b7<>AH`gpF
ziDav>*iu5Mm(y6AH^Ef`KF*UEFc|N=HGDu0NaMBOrD~RK?XFmq_U3Ld!W;dJrlifq
z@`1xc@U$cCCKN@Cx-vIqdGiYE5&9#87`v5QPqG$g+82~1Li}OF85KTN@z@5TobC3I
z6tjjS>!@lEXyy<ZVWUeLmLCJo3}?GnWoJM7t>h#dfg01eCvo`*6noIAhxo0oi2G#*
z7b`F!=Ei~4Tb_`@J9$X8h+}9><!$yKp0TRa`oFQ<FcQJS-QNoZ(GiGAPBs&o;=RQ=
z&&>Z+|5hW(h(*K0kfq!ryi~t8nQy70&(;mVRtd^x*r8vt*b`!ArS~ILh%w`oR0;<b
zes|VZHI_YjMC=t+hJ*V022N9{xW3L@)fn-xXsYIil}o=JvN|&-`MfbcZgO+#5;^jd
z7QzR-owKuKs=Q^ym?nQzB|jxplA~*1Jis|acYu_ySF8K4-H?t|T4@+<79Olnl%s~F
zCivHBrs2Y)mEst{zVE7;gSBzKSJ*pg*ZGLARXG%p+Nay?rcnHNF<Pumjs?6cC^7F2
zwv`iRe?V|ZP-mbd<l{@anj^6D6K*{IAo1-&5Uj41o6@QM(i6x{{-ir&JavlKO(I{7
zHH)6{Fje(_oS3_8<C&-3A3E8ypw?aIYlU@Fz0Pi7*`<dH2SXV(8OiiSFxC$@!A&P_
z51r6)0y5@pO_pB**yJd+-d`7D!^Gg~<i$tdQ&>dmGUF#lRcm^?(@OUW1%3UoT>rMB
zj=RV?0z=u=RjP+uEN`*0z9729k2n1ShUkJ~BX%pG$EdOGv}RG~V8`g9`rUNvEzY_o
z5wOGDQ|HSPoX7rR+u{IeiD$RpY;IS}NR`_B1`dBZpBcYF$$P#v;eI`@JVt&~`nR)P
z;T0u?3;{ZKDg|9CAz@RsC<~n{7in6@F&Kb7qMoL?!M2K~U?(O`z8uAh6PZdCJ$=<F
z35!hFE1HESTYn&0$ikZuMt^YN!6r>8YnL@_WSq)ck*wMv=5zg04E%n|!5bWXb|4O~
z^gOj8ioz?p4*p>Id}TDHo7y}lROcMFXjn+dH-eHRuy1fow37I>zm1!?hHw6u8QDLT
zmSs3}(b1>=iDAVyNA-@F_+i(z0KW!`$oU?Ulb1j(EIhSGxT`jRJ$=#_uJXG^*gG+w
zFZ23?pouv#^3<9Ws8>_LSI0kP^BSnr)EQs^m<6kPmDbvEuhE6yxJ=Fo;y)du*LjFZ
z@)?RxK<pE#Lu;Pg<!2V|SC`7i9*;@n)I?ulHRWg?Vp|KmfiyUbQJQGll?z+$@MZCk
z&b0M<V0?%}W_KugFQ)&wN}t0Og_8oU*@#<(QgUDbG%I2#jE|gMD?BPWvH<Uk-x1h+
zu~Q4AOx%D}JDF&&G$y-HQEZC&I1`R_9Xt7pEq~-43`u}BT?&cE5TWKCorpF2_ppSs
z=&p#=$;4KgAj~4e)(kUOh({Ig3xns4SDm44%DBX0@lr!AT@3eLux!;M9D7R{>aj5d
zlqPp?u5^g*9$c!G8oQMC1)oOCme5fBnaHHX_wlK5X1Y3KiK<#tatRzSjQ7OPAuX68
z!qwnHxMO3QHrIakJPo%^ia3(;(0T~Z@g6-wg;+1bzOC!EDu0V-GaWV}GTCubwx7g@
zJj7)2b>V#adTvA6XE$wswU^*pA8Y8UsrAh2>uWd%O8AC>peLe>pjnGo`j)Imt@3p+
z(teg_ir?2lS@cg#szM|Co~gZHXui$l%yN{k285|%d;Dw~ieL`^WigCk3*XqKY3M<5
zzaYCa+hQ~0LbP5mCQieI`UaN>EvT~PI3`=vJlrp#-iXfdn=A7B9HSBvSqGCQdI3W9
zZJF=b!4(*6L)DkYN61%x_(vxp_tfhGIDA;_%m>am-bLI@dwQ9t`cQ{$Bpv~kLt3mH
zsS&fiNG5uR7FuaZ(hxcEQIQjm-B!sb#JiO;si-L=0XW_|Jc`LKIlwcVa1COORXi9+
z%Il}s&d*YdeTLNF)l~sY_(CL0A?t)q3Adb>aM^8*Fad&$EXoYfSRQ%W-H~rsj^I^(
z5gG@%bkqq1h2-CwFKr8R%&q%Yn?%|fZD0|?uuSZ?H?wL_^SzZP)TypJ8@DNRl~g@W
z>&F`~)QsfhoH<gtYkT&3t4+t>z+}lkFn3iOD4R3}DUsfjwY1`W+{88#<8<6Ni8IHO
zAbg&Hx<VgY+%1{lae12876($2ZB*;YZdat30FRB#l{lvtZPu$3nFBKU$LYZfW7ESn
zh>cUoEh0;hEOs>`+!{iB4%iJCNVvmtV@`dZfvlGpQpZaaML2Lg<sCLZwFDUP;W`lV
zKkgxvtVxpvQAm_=Q^<*#>eE_~b-Nor(pT!KF%B~uQaMx;LG6J(v5hy*o}rgD#oQrP
zJg(N;(FXXVyEV&>f~u<=nwv6p;MIYh`?Oq=>gu?Ccp#FDFFK}=3Y-qRwIF1w_2x&;
zGfYFKhE<>=&kQ@0=_BTm;%$1B&1p01lUj@&QAXLsH5GZ!tdj4ykD<fcVGd@b(cO=F
zndvO_QkYYy`%b<I$HW38{;|C81@LaVZXMAHg`d6f-xQ0;+mBrJ1TZ(Lrn=ANxG!!(
zl?puEv7JFLV-cVSk+`o-K4QsrZk%>HB@4xd?)SE`_`5U<&4^a+WYr}ZGVOv&AJ6s%
z(QBhIvn6LAP+&m!4=HQBS!i3%PM_Cd>l4~VZ}#oNkR3*zm&`lE;mQ0Cf&0=$kgW>A
zS|~Dwa*>*!^MlTe4IpNa<)aue=7$NZ-W3Cgn_pXcQZeM-ut)7_g619v!(W5KHmgBI
zLw-u+yv=cfJ!8Xi<FmXVnh9gEN0Bnt4M4GY`a7&tgibr)`5d#l?PLmKLnrdPM*PSs
zmhAS}FF#I(uCF50mRkZXc33Lkm@s$7-x>7HLpqio4Z{>;O;+BtkCBb(1v);tEns)^
zUi73~Ce+^FJrjKzZ(qZ7{bZ8Qmj;C6LPWW6K2^AsYsr#Zrm<UXZ~PYNrR@?uY4It3
zSmO@jg5vDv)7NQ#EeY9X8$T~uY+)0I@!GY<j@T+WPjvV5SP2GZp1PKqg3%^#?W)&p
zcFP_=to+A+e9T2&33WIb$d1SLFuLce2AaVG1|}*h6FdTBc&Q|nt=S1Ahq;PLf`<~$
z-|ezF<(Jecno0y!k8267Kt}Ty+;tixz;lnaHo|7**Qi4E9;Y>Nsm3<Tw3KCW3Wqnz
z0tOF|zsWR1=oSf~-;_W<s{gwF{8<Jw6_(j8I%@BOF5Q!;qP4-mYFbxH<oK==V?L#}
z$6|IB`n9O2z4jtU>}%VO#OoQRgD=h|;}tvz%^7pJXatKt<&l4Us$aPQa@JQuH|ysE
z=gMRGZ0(5jxPf%`Rk=^78$GircgIn7$@Q8L-|~O-sxVXC*9)d3%a@gnw)OYKIL8k(
zIqG^CYE;nJmbPS$3R|)n-4;9iXc)Gqc$`yuzt5eBGtZAO@Y5e{cjWhzOruAmi}|-P
zOXzG%!QTj%Gc+gtcCOSXYq5=NBcr8mwIFjJL$BT-tSC!rcad3%i{VvzE?>Wi>*VqT
z7M)Af?O%`5%iUrDdWdc)HXzErYHb5b9<Siozz4y2w-CVg!_eq-l)Xbx8f+n9Em2A|
zGK(gsF?`B3?9@8*By5WvaqNM$Fn8(ZMf)B^KDxj*<t?3jvaN}5o<Te91SH7P2n;}$
zb?EJZ39Cjv1&oB)7}#U9)0c-A+xqW5ax@-8+u&*q3eQkM#zwb7CBb+5*S+0rC>l&#
zT&$3<Gx*}jzhoeLmYftI8sGAUiF6M})VY!uvND~r`)vD~vNF&Y?RCvU%~g9pPw_xz
zvK01-wA6<OJ+9+T=NvvJa!)6Iu{e`(qNwG{?X;5M=sA2*>;_xYfmJF)LmaGc4bAOw
zSy+DJBz}To25R0t0=<@61*Tf%wSLUy_nG|3Ulv(|bEDrd*{O}zC!A{aMdPW^V-z@A
z`>K2dCI)u!Yr<*gkwiG*{y0GDh8GosnT_?7fAc+!ThFUtNn4+GmbJA>rKqwh{84OD
zfpCJ<jP)bbxTw=+MvZTM@NGV-NvCp`i*^P3<}N|hC3%C}(o#1Es4{o@Eei=%gBU_M
z^^UM!C|cWDnORHrhs~SHN@db*7Y&VWE-$}Iau*~y?(o}DJF4gl@+qJFTAPV{hwR-u
zx0+q>)(;sdj`wi}czp448b(9cTW{;=AV|{tkcTrm`afsoEv`{CDe+~6W*KgB*w($i
zTR1t%LpJ!SPkqQ1l5k<w58GhfCyy}@13h8}cE)S21TC*=bTtlH9T$7%VEaXsyYeLz
zDyeArU^UdpTGo{e`@7%bz#^gwviN-UoR&ak<7l_#>UbO<LkVPBLX~xHuuO7z8Z1_}
z(+y;9>=8+jnsHU+L6HvK(e=lB+#N>QNM!xxM$AQg_KlfwgtsinVpyvbp50F2z`^qo
zLXT`)=j}?OP}g-2;MRS~3uk9OztPF^@xm%P+PB7h6#d!IAzFW~dKaUB^5i&I@f}em
zE^I#vZ+fA*AKW3NfHpu46!w)vAHROh8Ud1xet>XERx_nG9nccMFF@$NMCI1*Vk90#
zSAk2$ruy@o*VDOFiDzvZtxySPrSjT)B1OG>-Y{q2)tge?r*G~i4=c+P{htokR=WiQ
znE5bj1L#x8m{kC>RHER`P_Z#QKV9E@<{ooFzT3Y=Y`%g%wvwEkcjX!`BwFM9rNJ-u
z+S8uch1DaoFVtnHqP3fzb`Ku$Gel0h^FD)&hOX3`vQ31nB<atau<I$7>hN_;p*Oh(
zKthLA{MRZAvC>T@J>~BiQ8hM*KLWR<YYx}aOve{dG@u8&$D*IhC}tG4l<Asv2-NS0
z(ot`2j;h2jCV~jhOx&1a9NXU6!OZW310j|H2ODU8XB0SkMIUGbzi(S!BlwSn#oq+u
z6*%WD(1Z|@5K1v96T#5~+Y;Mgf_GECAyvHAs@7Fly}oxSxlvYE8$7?3>@k`;(`btv
zqmjWj*?{)4zNyHR|Jk~S+ydDob}T!b?_`1K4z$FMbYsGzGvG{+anPFZ^1b;afol86
zZHMrFmP1#yaSmrHbQFFpnbGJo_7LLA{z~AF_RoubwGK7h;&kQs47szwuLm|wvq!k~
z8np^W{IEI0<VQSt_NUi2^D#R~nGGpd@2uBweBJn*aP9EqzWe<Ikhw<bS?$#L+4wa2
zsg>D-p9Kh7)*W~0hSYsNxO%`u`HblZ-quOGzchcWdC^RA!IZw_upNV&Wf#--;36$v
z8xTme4@&v4?60nFHcm)c@&S6NP=8spaL5wdz*s!LKeJ-(E@MGZIc$3D*-UCY(MxcJ
zE))ok1MLyvW}l7L=XZeKDV1*E@6)9{d;S%FDK2Vr)6l3v-em-LQghXjfe+@M6%{>G
zBH7<MR@1YpaJ+dHg~bm(v9Whjr@6dv&)9LABmC~yx|oUB1@y?-nJEwB4%%jjkO0Z!
z&2<(@l+V)h>6%0I^YL2FbMGZ>7}F%|8+WYG0)%HrOFzRN46Gu?22`GRKlg<uHIj8D
zT2f=+ci<tU2pjX@cRQy8kQs`%xfQAEv?cnO@KZ!M^Xjko1XELy7*&xyh7gf`CK@j|
zsYDtQ-8_@OF={PPJmUF{V#M1#+lat@`EWHs$oHyafQ5YswoHbAfRxo3g*r&43|38G
zR_iC55)04-^)Pt=53W=A<HNP9Rb<I`<G4bkhb}XHHFC-z{KEvHpUyeptvlr(%s(gC
zYRy7mAH71#t2&@NgQfKr33wH+deKlpr(#Fm!T6D4aGT;N2!$*mbZ)<Xi~acPJVxL9
z50!EAn^sA&DEDzp8CD1znvV{Aym_BCBPI5A$OB^R#Jq1RU2<!!b%wXH=`3Ts7v!k?
zX)xF^p?5O5Djv{=v6RN~8+n%BnHHg66IFeP@|3p`E8rC1;^2V&G=P|2xBjJj2S{t9
zZXr=+d>Xi<(mhXA^_1&ui{?Xs8%ci?%tf-go6%@%;=8Sb{#NT}ZEl&q-hfIRQH`3h
zK6Sp-#p)<R94ClLMa8oUyEQ0Eq&isxFL9oth+|t||L*O#J<dZ#sZo|njw@0MPtq_M
zo1HrBqV1_E?6;_!oQ`9e4h<-NlP8qAOlfD(eb9X{R;z*;h^2%1U${n^j23aamLwa(
z#!l6<Ra`RRvbEib^)eD#Gufik%VL!Jda5s-xZXNqQu8E|ytiHd;k#3hB!wsdYaYMN
z){X7N;5lmqsGW=navV5a_1ZKt{7jQ(9r`HuD<yh5LjE|1y>A=I?0u@YX732pj%g)~
zt(_I(p>h8)eP3JF+^m#Mxk9(8LiR(m%>dg-HfHStvkheF7>IH{0iDdK^ecaRh1WND
zOb8E}#agEI%U@rf-Ply-w@pe$Y-;jwaq%^X@Ixnh5nsbk^6kCZ;U=85*AN?V*@<an
zn&(?N`tEg+-WhRbLmB!5%gTa1;vTo@T;*+08zIk0IsXxh`Um^VD3=J&_<?zmr#ZFM
zfu((4UxT^%HddI-Q`d6UU+I=aZk+Dc60lC6#DDcML^HQhpbp7j)_dFE*1dwGGtzmE
z843DDuEK)6@jlJmOxZ#38Y27|)Y`KcDD9K4F-M`+wi$0ODFqnTe$bgxsHZH;d96J_
zTu=9emlJ)HaX;m{Qb3h4(vN#-HF^4BMo&BohpmX4>WPD|+Aa8QUUx6D$r2nB!7@9s
zR9XLqTr{!B8eL1rQ)y@Hi7SlyO(?Vu%ePV4g;YZan6O9_x2>^SS$)da?w9lSR6vEq
z`^5dG-ipcK{4J3ZZ{QE^Qg0ytnl^5UbSZ;X4oVd;PSMdd41X7^wPAA_sVzX+eorRZ
z6|i1v-VS+=Up=qp$e`n#3_pW9gW2PsUZ22a9~ah|&B8=U0ziPS@-mZAdz7>5tem%1
z&znZ%PX=j`$}fC)`~-?;p#CuYX1&)JkJUMSb36aJ=?MBl#`y3yn8(2`G;h4FTlCvO
zPEsJ(rBc|mrcrC9Zh2u`G-$?VcJszwId;SYq+zW<h9TI>8n!bBFRuN2AJcX23gBc5
z_O<C&mHR!sq*e%fmcdFgKHTI0X11con0S1O<<`pGxPi>G3%X-5-Cda-{I6C6<P*l~
zIL8!@Y^~0G(P;B2iM-WNUY0kJyFYe+LB=oK4kNk6jpDoSIqvhK^nBNRp4?zOh^$F+
zhHlwp<nAH6(@-!}p5>0_4m3=&myT!<9k9PE>oX<@55N!6X_Pw^HSRTP5I_O{Ck{ky
z<EYlfeQ!<S`*nO6MJ4&^qa0?Ln|WStSUra%X!sp_BDbq<#J3r5oFjLG*m+P;3IU$y
z#kqyO-QFbLiIU41@u*d;DNrU>bKnPLXsg(Cf428%6R{KB7gBDi_`t2u2K63FLDybB
zS!97%W*v(FUAOikxmDlmXTX2X7YjAk*Cx{*V~GXFXw`uJhZwDltV}H6mzlK-h|wBo
z;lL~}BPO9JCc$KHW}rF<Tm;8xMH&}E{vZqBRPd&IpGU<<UHUD1H(zi9!@ed)LM-2Q
z>xBO`zl@sjN0voBi()>gAmy0FKID~7V%TdZEU6Oywy3Ldm?qm}<ENXW`{dir52$i)
z7jO_~_cm1;ul(N6S_tT#R1kK)-yN}Xt8&D<d?laW>B9lLM3SZ$4Hqx$m>r_Ullo)x
z<06bb96yn;SU`qTU`37?(aF_%u2jG>qt%IlDJP`pLE8BbWDz)urNtGGCZt_c%~fiP
zYfL#?5UG?KfUbRd%|vWx3_^0V$BGbArV~}c7bx>huenjmD=+)h8e1^x)z#q-Xc>jI
zUEQxm9jO_aV_#S9nEYliu8xUV_Goz-xyj!cF2k=}&>)jD$Yve-?53wlKZYg|0Bz3_
znnA}m^WhQ__vbuhC8@!>A-3r}zUkD;!FSh{C(A)goIoqa)H9)7N5t^=J5MQt-!GQ}
zkZ7(;{l^^dc#bQ!_?NG%*&8d!*^jlKZLKL`1lORdTSDRq-wZuhC^|@0@Jvmswy_L~
zOjRdM*9~~UvTK?O$gN2fYt*ZJB#uk8L7K+UZVt-I9l==Zn2DR^RA-A4uNi~s&!?IE
z5FdJmfWxD$dD9LfXeX|L{c00M?pNA4BGV*xupPN`PJ;{)V-OvJtB#COwS{UZD4reR
zAW~n8QwjoqBPmy3Hh&$4c^YcM1oeg;R=G<1Go}_%4GKXFCHjO-TjGW_IAq07{QfQf
zJg$a;+e9Sq0$xlBNPPM^%&bDXKoPGgcWiiOdPQCnvzr20bt+z&GF2I5Ob)u~`@`^J
zPe}Q&G`HFW4L=g=A|6!*8XoO4dgLQQ!AuxEnw6;sVsy4KXXr9(59iyNNUcYtzc*`g
z3|>tGa3CrRWB`Er<*|AHqgg9RsEde;sEd5lTm&vkzVgP+_Qm62jx>sE_(@%11SgZo
z!)fy-*dCh3KUX*IwT-EB^1OlU%<A=FT+2KpBo@pag%$60Dtof68wIYSxZ#|3$@C}1
zjZL4E_+k(767AZ|aMf)|p(`)jq|F-|!Tsywv{DQU-dy)E&?sGP3oAZpoMMO5HibPU
zeLwFq!?HYmsl*i-VqpoF8KEhNoJgu$*ZS?O!%A9ASi47fcNf+kt$9l+A@1kgeAylS
zg{IYe11+LyW$xTmmwI9<8FB*#TrOZjM7eDSeV~)mSg5*7)0l{6DFuR)Oiu3Yf@;Tg
zrC+>-IBxeqM&+eES&Kf&1Y=Q#x&PC6oilZyL5-w7f=-mxKFo#<)i1X!q!2S*##6B(
zb7s8`ZYFQR!npN86gIPLET+jsWr@vrZi0%5P~RQ{Kl;>~TECt1WvaF^+q|6thLzdm
zYx=Xwk)1RYHUOr(Ttbdb(4Okgh`4PUqYn4sRYL1I$%>!oZM>v?9T#3F^A?gbg1$er
z^QML*u7?SzoE~;DK5mlFB5gI)5g80%^p*{uhA3^KIVC!j_@KRt0H7x#$?fd)pIU#o
zi+E5dDv*b|o$I+kXvU~ys<+iy%Yoa&#2KcTmh|(%B0ZTjX4hd32%0^0B)t@ADK<j~
z5%u@HrCYjgg*@f(ssAp_g}DHWSrp>~^e>Cxz467$5=ag{w-Xi&Rl1_ddL-gN+cdoX
z7R6L+4^@APc8yn@HX*Y{ZMx{(2D@2iX{V3~c?lCjnQ^0f>BYQ&gL>q304Leuo;1w6
zZcuz4FoFi<@~d$5HDA1j$c)(z+_S_vwTekEqzBYTg}8$e>H;QEEqe#WGi{}Y6Ch;i
zwhJ{@;FQeI!4g5=XVd#xsBqy?UN<dw&ogHf>h>v-R}Ujhe3Ju!n)sQ6@p2<{P+j{K
zBY^@RZ!!>pR)uN3-}nF~P7=RWTA|tt{=MQkX^RBXo(1JY87&M0-=tY3D^W;pN46W0
z36tc-rF}}U@8d;JEo^&oX0RNLU6MI>=!vB~tzO74iD2cF7_1^%_6VN!L4AVRP9e>k
zB8{N;f!bze!9-Z)L;el}WL+o@2R?Z)4<cWEJ^Z|774jhMOXao}(PSu#Z4_W2e+8d1
z45@T*r^+k)rAoOI$FSxX=kiX~dtEp@ocA)UdN=;cRWCLBsiZt}@>oJRC1>n9RR9<^
zW8}8Btqr7!>s8}|EztU?Add@>D2R+Ex+!j%DD^(o$qw2pJI@w#cxyK_V}S<IZ_iM}
zCDBbM5~`e%ZzVt%&$Mb3PH^CBt0Og_I5i*EU{A#?1|zulEyrCzyuqMalhgm3<2O_&
zyvE^A0ALb#a+!0m|LxSBfPhSU`Rw~FA{O#8J>gRrd(NPqKEl)QZc(om#Les0EQb<o
zgD4nj>FaHN#&D=hPF8jsSOT8Ox}MLZb#6(-><9(YQ{14UzLJS)831;z)Ht?39)s7N
zYr^WnFYh{+fT$oMY7FHs*3tRrf(41j`&Emw57TL}=oH=grVBc7s@DT@yyFWwUEj8o
zxU0rjZ&8=+9}n20<dfk%6u=Cdz;@on;%cf-`7|CroK8qAF_wJfu2SdJXhSBXV;B~3
zH!OFb$^J_75R#W<>LPZ-33kwJ8M$)WMFn?_QLS}kfy#Yu9o(Lc>W^m^ea4w?E`3Nr
z^xkbh;EY4P-d`{2E~Zxycem~I(+iP!s{+@)=q0;i!Z4ZlvHT(~nmY-Bg(DsMNQ$Z2
z5~Q`b_0i=h*MFHn^;Um|<m5rSTXSpig0<VzviEKgt1&AQ`F?%Zp)aGB4l$UI=jp#i
z$NoL<a7e7>&<A(SztihKcF<bV5)$%A-=!5sf1t=Q(v6LLuTy22VqbGs8kJ*|Wf)|x
zRh5<<WMB$qhAUASXBlT_onRl|LLB<e{Ns{g28D)Ec63m_R+Wa9PT?n_jC`XiO)<yJ
z=<s*BX@!xptqsT*lOo6--Dhy*ZU8Dc(f?5#$idOj&fLx1jtS%el9Mf$XJnEa8I-3T
zs#<#qW{%h=y8;hO>qr0q<sZSS5@JfK;*m)*@udR<!Pk#^*=e5I7owCfDp)b9Mq*^=
z0+eLG4w$`_vQURh30I6nD8*Zqe0uK9SK4?3^R)6sw3K{XfVu^32R<C^keJcgsU7lz
zFe=`4OP?d~6f|6UoWFcgs0w@YDeKXoeO^Kiqx9ED_~A+4r4D!>9q$AMi2G)EUt6)f
z<S1>snPfuZ-7fLYS9SeYbXSL|!RL;NWwDqzeQD}}l)h8%xQT)cv~Smy(odFESJ7Eb
zo09XYKcto8R4a$)s<L4Yx$&xS>;qZhjFVsUD6~VgmS^#hi56v^h0@022#;It)hCHP
z6E*b8EPcA`la-;0AqxrS>^33_dNj<hMeXjsJg7Zw!J3)I92eS_>xTW(Wp`%2psGW!
zHIwIP5e%87+MfAHxMEWhL<=C74t|fT8zb6*L|ApUv$GJut{jExt$OM9s{eqkVcx(7
zm1t|hh59@Ca)yok_kM8+nmBJ<jlJZj_&c#$6DMh0#E7Outu(YD|LSqxIwJB;G5tB?
z!*-+4lF8>v!Tz=ZZqCEy!m`I#-Cx3dONICvgPLPYrF364b6szCJ6xRZs`V>=J`!8M
z#kGs^t~CWg_os4yOuodlBygFMy;3uGfw|oSuzwmMo}^CjnrWK)hDV={a=U(rjGaT9
zVPq)d603QugpPH33ZMER7ma$MAk@)TZng3v?FzJr?K!38Kr&Wy>9#%t|Bmr2`nYN~
z-exGw4H4@yP_C3JKE27W>!$dmv)P?+)2Sfm1CQ!*(xv7mI!AxUZg<YX{@g*2;af!C
zh32Y_sdq9Ny;YWUT0#bjpCdu~pvs=FYDuO2bZE^{OGoO^{qNY%BG+KvP1;H<3z0)s
zVhD)2uXhnBi8r|>h{Jd#vCZcX`q#TVCM>JETcfQlh)mf+d0TS0vQ`C`8LguDWck$S
z-&Jp>uj^eE)d<9GuTMd4V*PsG)Dk1eyHqGo>KU}6{Y#j}-`l>>q1y0R8wvCMGN$An
z+t)3KXnf>#G`DtU3n^M<6;9$JyPdu%f7<QH_c^A}J32(Z{Rm|q<^qig`gp;e8Qwb^
zL`q*xai~kt!@hc(%;SP^2Qxhilq#sxIlE8sL|UoJJC>L(mVq>%MO3#oweHX3_@-o7
z$&H5XvBv3eYp4An!xt|!s=?k$beq7!j`<UQOPeLKKx_T3H$w;^a^AlnbsQ9s1Cuvi
zmH0_sBYMf#*;8J}7$Gw_ivtz#!ZA-Uxi}@3J(+)A2PU|Oh+f>yDOd|iSvo3Dto5~2
zQpJoiRf`g;eK#PmdzG;@*QgU7?kHPEmhqXah0{I_nrNoy>X2cNt)JDKcU#}tM1y2I
ziYv4wkoH>dNf<BAr~aN>QkPTgq`U9lS+{v9ht6v*Oe^!S)OcTL)3GJ&+y`&Hi}0ba
z7_TSBF6f<D4#I;y&GR>b*?hmEh}9mdhT?4&|KM=~t};EYbZEbgy+Ns>4d@bqXZ^PC
z=TrQ`%a=05v(pQf1+({qeBR3zyK6Cm(NOIMj)$LVE=5ohHkpk2*^Hy5N<g=}Kvh~d
zE#=}HT6Pn7#qpa(&+(udLeGQo>z1|9Jp243#V%USS7*C6P7&S=_6B%<^1Rghptjf_
zUBFq5UY{eL<HdqqUl+tG+V+`Jza2GFhRNnm);UfFo9#l64i5382!bFt*0qN-fwNQ7
zQA)W<hc}l8A#aju<}YaqQ?S$_AAz3pPh9kVM`m;ZNlPm$$VH(EH&*DZf#i6HK|vd&
zb9d!hp(F-qk9XfUeUKFOeyPBAAUL_C^9Wm}_7E``Z_73BF_Fy#f2v!g<Im!@VxhO!
zfA=Ifx&o<g1niA$zGYNGf`Wf&dDV#nH-809r@kM$+*#v=1Lq_%-q<2?&!`L`y1{TM
z`9uxn_)uX`bjRsrT`{4798-gnkJPu?P;?eMl)Eouyb~-zALCO&A#f_$Tt(Oj-(P*R
zTl8e;uihA)uB0xPC4~V!tIiKtWGfX76sBTMmnO%x_lvAuI9pypseVgH;s0<-sw@Wq
zi3Pv{j}h<yOhbL`7Mi_hJO}`Q5DEam2Y>t5)!_I0or$rhxr>{zowb>>g9o#rp*7GN
zWM~LJKIp$=*i7DP&m`~>WrP1f35@zZ0R43}`2GIo@0h=RVzPI%a{+nUnZG2iqe#}+
zfHA4yMEe&Gd~fh$1-ljg#$f)}C$KmRb7ym)srmmV^!M+by5hToWCs9>j{pFczhS|n
z^&hY=vUyPqzv&6{-AFpP0DvBN`oaA-IvdyjjsC9#nhZ@H>>Z7rtz8^||By)lCCYX_
zEC7Ik4gfIxEt55cKV<@TxtKbeI~oH`J^z7>m8x2WKmp!_9smGP{f#`P{wK1%slDk7
z<nM+tOdx`)1xA6u>F?D@;QkWn=l~7@>geoXYVP7<4Yd6KRU>ykvxP1r0N^nBvKpKI
zh<%aGKZr)!5xdj{3w8k~GI09qYVb2E_#dJ@?W}>e|39sVM0fmfBmjVdF8~0({nypt
zck{o|9RCIV2W59{!=HaEvjr!*|04ai3jW@g%3hxO^}lKST}=YyG4!v&6G$wSf6q++
zLO1v=|CwuSX6y)3wsQcz5d7OB+|0Y#G{KGa3wYkk@fQ_*Z}5{D`lDcDM+ZAkurUB#
zK+dkFAnSiLhdhjrvH-9tJj?$(*6d%IH29s4|1pPun>4rv&5fP^K|LZqDbp_4sM_@)
z05pGr|E2!z#vj?{Kua+91@(7fcQ07GZiCAt0vsWi`EOKP7Kj&p(Eq4Jb8vTT=xlCk
z=<I6u68^iuDMDVSD}sw-T@(NS8_0iM4St`w{|xtVG&D1Jv9<*MJ|phGcgerefvZu7
z?O?|j3%D=)JDOYO|AMx2a5ny5Dv_h=tDz=505FO7vi%8b{t<0yXYOGQG&A@32h9i_
zlhgO$Ms3IcQuFG&KZ31+j;<g>7e`~$e=2F~u!!a+@SrgS_iq>XUq#*EM`iziL;q7D
z>%GS{kO3RZHh6S;`xg>?Z}5BV_(!UPGsw!p(g6r=BKF20Q!8*UYUt?d?BZ(uPqz6o
zO7ma2HTd;^{9{%xVm7~QD=*`V{D*Cw^nYjnC)&tM{>u{(|HFS%`bYjtqk7S9{^|L}
zfB1tNFZln%?fajn8UJ1NZ|Y0;@qeh<KmSDi%bEO=`O@R@AEwB`pP2vhf4l^~^iuo>
wxbjEf-+UG?!7sfOe}TXFaUcFk>A!sz%5pH^eiHy71;5%6!NdO<cy<Z+f4Q?)>;M1&

literal 0
HcmV?d00001

diff --git a/dist/bayesvalidrox-1.0.0.tar.gz b/dist/bayesvalidrox-1.0.0.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..24fdedf3e2cb338386fc58ec4ef6882fd986fc1f
GIT binary patch
literal 128377
zcmV)QK(xOfiwFolBj05L|6*ZzWpj36Y-wb2Z+I;+E-)@IE_7jX0PMYMciTp?D4MVF
zTK*3hIs1^5Nr<E@IpH1VZdsNS%~-aUq)d{nXkd^8DMTc|0H7Y@?fctrRrLeijRr`{
zb|!NouEZjNepXjkS5?=;-Ez17``EwzKJZ6D^2KNQwArWZZ@azyEdP$rJMC_#`-OA+
z#qaQ0q?w;U_h0-uKi#jLSr*QMe&>1nn@+d8y}RpfKkvei?MGiceEzTh4E(zwz4oW!
zD2Z=d9k=baw?4=IfByV=`TmFRoo73{&v(0>XU}(G|97^ZwZCw79`^rd|G0ky`rq9x
z(*NxT`v1Go|E{3_yWi}7y}R>lx9heaDF2_D{=YulKX|i$;Lb*$ZvXG@?w0L;;PrNp
z{&za<&TbdWb%6hP_Jz}a*#DpXb9NaxM~7$5>u?xEY2ei1N8|CM$B*{o`CSrDE;FY-
zY&hL^=j&Ft4b<Y5pM)dlD8BS(Gk+AiSoSzbW?`C!apZ)la~UMT;Le#Oev}2HrZY~0
zz=_As@X}8vLDR`%$B*uud61+~As%FY7=_Wq@tq;``S=kO%`TyxG#+O+eiA_Gk>jUn
zJPdtk&>6+U#Vm+2Kf?~k;WS8{dIs~@IOR1q8q~`u@TZR-g^`0V#XILF%r4_a<|ILy
zCE*YoX*yvvoGwN<3h{ax&O+V=Rwq!Tj~_v^ixeh-Lv1><codHDZ$LAfF9y>vy=*$8
z5ZfLsGI*HcLjrCSXR;M1P8v*MV9-PeL#OHH<D)`2`Z)qM<6uhh$<1Xv)8`Vxh{lT~
zf=&afG>QRi)aS3kFvBNUejHDy@eR&y7)PTJCzbYa;c)f+L3|z1eAseFaR$R>qrhdD
z=Zni<rI-G6>I?!7HRu^&?5k5sa0IZiqAc{M&OA=2OLMA(J7?b?IHzx4o&C6Xa^M`E
zI>#q(|8e;8;H9&%cM88Zn$C}hXWzg5;mm;&Cwp(ses<oza`xW*?EL-k&C8~9@YC_h
z!Re{<_T=%S!=vNZhX?TR@Xh|~A6_25`ObL(72muC{5}N4hE~tsI@lv`>+s+dTRS>9
z+5aAX?!7pCeR%eB^YNoshi7lF)mLv%oIU4w@8s-o|A*ImC(iK?C&zD34`2W<q1`u!
zZ(f~1HwQ-tZ_Zrk7M?i=|9~IP>GylDUt?d7AMO1B6F9*kI{R;re?B?<?)x+6`?s%O
z9>BvF2Qaq17q1UkUofxz*L#OYP3Ps_(cX6lRO>A?1JlEDY=F*>-yh&J>~RnN+dn&e
z`vzyS|Mtz<3H)rrL{H9S-5(E651P*2$>Ax&<kiXBBbXz?6so+XR-odW1J(?}+tHQ<
zis0`bP7h>@&dY<n*U;E0R;F2s(y*!z$La(B|DgYQ(EmJq{v-MS<G+8`f`jetr`-P)
z{Qque+tmMbpKb3v@c+NZ=P1bhk)Qdke*m8eOj^(Bx}C?5-uSbi=V-|B_>n9^XaXnx
z0z`o1uIKCl8$S=CRvIsoA#kuUai6U?2?G=XoZ~y-sUv6Z53l?QvZKf!CP_R2e!c~q
zJ#foV>IL=&vVijs?2)nFC^p2i`GRrcz)J$h9ZXZ;F}{yyL2C}p^_<Hro2R|4tx1@<
z;Z5o;qOg@_i!7V?N#>4%tuVTVp$AkKxZo|CSLjErYuOWUX$u%+(lZ5|rgIVug5l72
z{vKaWFMU51MO(ql52rn6j1}COsPNymAv8hUpjO-~-ioyth+R{J$2bgNB*y@u1R6!g
z{k0!W76`mvPk<A?ZR-_;5C?DGo*p`T^LYZi{Rn?6GM--NE%5gM0qElN4whlYDm#bK
zD1ap$!2&*hbQ1iw2ti(Iv7UR*xBYGR>&K5?2I(*f=Lo;nehf<;Wv#QjIV?#Q+-6&V
z99N_G24Ks3ZoL9w4TdT83rf>30SS75BGJfC`~8ml%zajP`)drM)BxXh5J`=<Ge4V8
z<BUqc@DM|d5@`U!P7?L|ZMS1<AsvSEJ8b^ByKQ@U6=tnz;A0CNciYlH_S<OI?>}>Q
zZLNd+8>iU*GpJi^KbQ@J07u?-yT!UN>>%m)yKdLg^5qUTyX}5sn=@>kl<gXt_qL_=
z*<zZ7fGfiwr9#i4+-{-NVm1Iop9DDhF0RhwM^D(Mr44(6B;xU-zmCFd5C+3Z)ZZB5
z_D?pxef-FA{u<6EPMQq+8=qpkZnd@&{{}D&P&U~b#lv)K8c*Ua{W!x%?mU`oz<{#;
zhW66Imm6C!w!dz{kiW&T|2yc|8OqpW*yh1aoLo7$W5W^1&_0ONi!7dz$^&1GL4-HD
z9QY}`Okrz*On)6_cTmfp-oZGUY!m=AX9%MhBtFw~pj3aobXcSWaeVx>nEKFTmMn(Z
zf=ldAo;ZvE0wruY=l@Og@1nkRS*cuuw{RE%Vos-=_M#qQ5TqGQ-ygzMP_u!`i(wML
zJYgi)VUjIid_e@?;t170G?$PyLuv)FEbt5<=)f>YzmT_rY9wqOe_#b$qsbyFs7!#Y
zQ&;pNLv;v%s`vQOyLazk1uhZ0=Y$#CPHW~|Lv!rp^xzx59FFwg485E|7;TC1P-BlD
z{Twe~y&^a)=2IVvyp#3bu{F7assN;593%-WeFSLZI<EjJW-+RRBEXLsU)snvzZ9?~
zaL!4w1AFT#Up59kn6S&{;>O8jD~PsWiC|&2CJV&MEe>xe)>@?a?>wBh06Hz+S4-_{
zt3fO9CcbInz-eacg0ZGyvOrVQKxl`=GQtFo?~V_}a?*k!Xb}zu@WZ)m*I-!)stTgu
zg0hx~m2tRskI@kawn;R+q;U=cXB0=UNO#U~8b_=i01Z|qcJv1I!2tSGjo_Yai5R;J
z)l}rk=cG^A<&&gc^$HLjwkd4PP3KgQ2}g9mQ0O?G-bL{Y6?6M|A~^@QaO$SC4>z6f
zrttvY*l3DFEAE`egLsPk0i5a7k&V)@u8#JP_Ia&$biBP2tE<&h2ohDJ@j5pFsa%QV
z(E-(B&(k17c7@NFpHYNw*l4tvu_L@o2j!;1QP7#SsW1_3Rzl|+p5ghpS!5M94F&*C
zX<k%6^fwFV{HAfVZ<?q1raZv;PwSaY(>)7rUn6-$pd7=XGr6q!EPyu-fEeJ8ivm0!
z7Qo`6);f_|Y>BVwVg%?s&sviplPER14(EvP!R-|KYT-7$Z2?1<`108pxV^NM1V|i_
zOB^}FS&KU2==dO6Kj{A+?*AX|{~td8dHTPTgT0qWpYHm9nf~utd%J7i|L;8CebE2?
z9-jvZ{y~C&kl-IA_y-C8L4tpf;2$LT2MPW`{(q4F|BUkgb6AG$XPup|ALRc(x%@xB
z0~rS8CKsfa>1UV!QR+wefBQMP0-*f=th@an|NkzZ^T8sVj#??%eq20$l<+%$r|+C2
zqv~t`x3-vPaSTG`w|(bX+ue2B8%_4^<}wJT8;>7BMb^#$m_y(upte%arFY&e$o!25
zzT)Bhf3W}k1D*d?``>Tc&pO?Qo%$y||A}(D!|~+z(f@b5yE}&c?{;VR;r#!7&i@%V
z0EQzRb&+s#8_Fu7XzL5JU!PRR@OuSISyt8~OIfJ#Nlj%(x%DYD{qNBHPvdCf>BAV4
zg{!<lE0*zw%uv?L?&b(>!%}vUAe=spMe~cr#5qflBTGI}n_I1|FijVzH5^JCV&oMc
zKl*YdYvnIjHBtU@6}#lekIrKPYI=cwp5%<^@fG$EsbOSi$pV^_55t6wwvRVi@_9-t
z7ma%Ggof)Wd@wh5=DIcY4YXqzEPrX3D1T`&O8&B-nfy{WJpR&ZYW$_HrTo%VQhurH
zD8DpRlwWE_!e3eylwXz@0)J^S{S}aR1qfXPAgsd;`u{o{zQuALCpMc<WYe9^r)hRK
z4X9)=nRs{s0&g>a;*r!qP{v;))Pwh(HkN!iwjcQahx7ll@&EAeH@nXs`2RmG|DO!E
zKK1@zFa8s3|K|_!pZ+}bf4fNkI}h~#ccK5!E%bl8{dIfyf&BlO>A&`T>*M$Tdh~w>
zXu<>i|I^U_hx`A3AN}uY^uM$H&BOiw1N}GYzvqQfn0cN%zq{xDuh9Q@o&o>I=zr(=
z4%z=d2TtTc|Nnb@o;a<|mNSeo_Fd0eWaHM?_~h{;&*O@J&%@h%zzcV01C?*%B%V2d
z2xxrF|2W|cW4t-Xcdy~st2nvwlhF|>7e#TYYWKME+<YLKz<4CQ=97}Dcw@o7dqFe_
zqo7dtfF76jt=C#oIE?0tOb+i5e@Pg`Y36ZlDVbgCza>=Q@v(d=Dl@zAFd7F*5Df$K
zx2Q}P5{Kexb!7$<ysPk&5MwK<Z4hAg<-4+$MqxTkg1H|J??m;N`6FC^&qFP^Ck;fL
z4Fy%WVTiL)L<RYZU?_hnt;=U}A;ps?8^+qoqQ*}h*7C6qMg6e>m<{&s;!mPwevtnk
z<o|;FzYFqz=Ya`);Qs{wXPgpS>+k=y<o_;4fP0YtALRds_5VHP|J|K!_<#37{(qqV
zOX&Z4`haEj|K05l(*K>E=bi1HXYjlWpZ0_N|8JH5O9aGfxpX$14W%@`f3$Cx7fYlu
ztneq3z8=p1hx7lBbN)Z*{~yl()y{uYYgo+&RG$AGc(c>i_5a=N-DeN_|KI2Q|I5}Q
zO|~$S35Mq2QBW|lZz*5E&v%N`;P|N%&E>N=m0wvdAO3?`Nn_#pi1P|)%`8|2{LUYf
zCA4@>M#{1gdgV?rbjt*rO6OBwzGQaSd0A#+O~oR0P-qh^f1*jW{Gr-Dlb)VGpA5y&
zuE-!8?;nzID6a%&fggG4Zwo&OMqZF4anf{>u9rezfnHP8m?y#9ybs9j#ND}{4KD-P
z{xSZ{N12xcNCX7a8GS1@#GNry@x_~u#<JDX_!zCWZ4J-wrv88yYntVZppCbC;=GRi
z5evW~ExDnVOJChZ8q{O7{LU`jU*j;6KVa#?tnPW}E#Y~M^Y#U20!uzM*WIc$othqB
zMRZwfP)s83lVGr>J<eTjaMp6qVWn7xC5*O{hcVgXaZ+5?92L)aBBzx7Zpjb(E$=0q
z9=e?5v%nWT4hnNcaoIcdg2kwUF_hHTtRA_lAcWI!GV{A>(+S<cr6;4%pO8mN5Wy(L
z5b?t!@m%`}%xj3@aiG90HRUr*r&n3vL>O;q>P*9{U>aV=@raL;oPjWWIG{5#;m2fJ
z>QKpn1v<(jRs|t+nrKmE>hJdP*SiKLg;JeUL`(H|<ZAP-;j;M$V;_cx(WBngsA}yU
zA5j3~_A_Xv=M3U_Y8#eVKo(F%R;bHWvh;!uBBBduH-j1b^NK!R$TA3FG5KP<ul#8$
z8%~1Xym*n(Rbmf-on~cF#dio*bTbLaCyCWVgdGGJ1ZE^L!q4$l!y#o9Z{i3B;T(<~
z{MK|t<R&NsL_;*5$|h&*?p5LuLqQ516bRHvgDE@&Y8SE)LNFuvYn{;^y}^gc9a|K_
zWZr-y8i4r*{!qjXVq+qR<}KML#lEs>0U=HoERc{h2WkVe!W?M?F&6qOZ_R*Q>H{sI
ziTFw4-{E-DOCQj?4y2vEYdVoP_Y;4XHp-Sxw4$sO4>?wjC2}fxQ}~F1iMNNd7$yEq
z<gk+yLu@7Sz{g~BSqMZKFH{{GT#(`MW~?43205wH2$Gy3%pQPdo7|hlYykTjp>mHY
z?hv2%PC?ScydfwM#hF}3iqPdJ6H4$02*dKsp{Lmdv6Y3(i;cyM3d5U(*FnTP<Y+Ns
z4q+>zDmv-$w1hahc#sCkHKS<QOxZf-t%YM{PDTTK>v6!TOTw2*KSfvpeG5v~8>1>*
z#-nmdz31=}MElX*?lzn;$2vl!7~`ge@J{9|q%@9fY~o~8<|P$DP;YLb*-3yCfi`L|
z^_$BOb~Ppf91)4h+mXfCnxXjw4E4xQuHtKqY6b&1LPtqw-%qA7U)V$hc1wW|ncn5C
zNrE!CgMGlPFW%LVOOdAsvM+oCuSKr_8B>S*{N<EavJ5#quyo$gJM8>jIiBPtkBJ*W
z(yNRRk7GTGVNb1tpKrhYR;|)|U*w=_q^0GXqSoU_fb+z`=NLYYzp~Vfyc>Ud6(m@#
z({59(e4p`mPJM_g)v(q_{NT0Qg$8CmVBum$NM!GRut*}P-7Qqb7cD@KvZ}NATA%C@
zK9DO%UXlWzY}Iu!poR&7rYUQwAR-iMUkAw`P6JU3>aZtac2_L@K|emiEP$0yFJmC|
zEGZ`v{!Bq2;Gcjepo5PxsQL*Jd`bf<i6Kx^PGOqG%0-e2lspti0Tb(|aXh@PVt@qh
zzYhHCpao!VVZzk?BnCvz{iaJfBJHqa>=zs9>$h+B|KSjPM+?es%1#&<B5*=TwHP>r
zW3qFkGb;xecNMo{{l(EiLmqFEsq?}tS?QF^*+5v6q+~W7#(}6XQ`aG%SV*TF2#8ae
zkTr`l<hg@r0u+WH&*mAUQa7<cAhBeNWRM?bMRrA>Epjc;u$5)`0pkgZ!$`JVa=x8k
z6nJDWibM3|cE;1#FQKNa$TYZy6O2h**bBo;oB>X)&{x}S?}*-hU8I>O<5!j=<=>!O
zI1c?mI8`HF78)lU<ZY8U;t6rSoGpLNS#l;($+!T#BI_cLHY?i0&6_UfC@%*ije2)n
z3|;I2l47Uv1SGNSawd9^+lw*70Y~b*de?RS{`w_U{rh4%MbW<XItT}NI=%$9Cz<G_
zw_QA=Jd_aQ`CZRBR3gmshvm}4d5sUWCYK+T1Rl3^M8Eyg6~W+^3VVU2Llzkrn@Fn#
zG3sRoIJQbBQf`sb%r!qzfqP;vMDLh|Sc*FA3{r%Ge7K=g7dyj;p+Qm4_wu(O&PsM$
z;;>y-(ZoQropQGAh#!#Zm|Wm#P}@{{NAYXn2h0tBV2<o-Kb(R@5NL2!__kD5=3`;H
zFfR+UY2b}#S%t8VMI44U5_%<!BQ6|coR}&xZ$(7jc*i=n@J2Pa8`N^<h*UOQX%dVb
zapznIW^dedm=SMN;gI|M$X8S-efC5s0{VzzHcb;MJNjJR(99`ZpCA2-^v+teG+_$G
zylc*3@n2Hb>T|Bh(vT*v@Omb@-&fddYa2OV$Xig3DKRVatmt=aP3cEY|NNq9o&bHM
z*G-*L^FA!?X+*bwUL)etG{{QnLTPkCOvKz(eTL|^`X<u%3FjIbc}DyaHsXDbXU1C<
z=~w|*F<jS*D)!A#c%<4=>m3SZRbQg3QG$o(!9T{EoL}kZi(`~gHBfyOqm6O}<xanV
z+GY(dboYzm1RJ-Pmyq9$GP;o7*EfMtwjlfV3;V-tU6HK%>gF)Y8SGWxKz<YI3g_2X
zH&|i*1_#<NY`wDe<SbU-+;T>Fok{BJThS=1Gd+EMKbr8+&-%WxOKqj)ncdg-s<|TO
zLD(<sVxvakxat=+v)IH>6fONne)8jor69=fJn0x}^zu>48wUKGW`2?}CM3TlRf)_=
z0EiVS0M|N{VwSwdFz*Tkw??HDEre9<83W6IK5<Ti%t_-JzaAK9yHTZVI;m62sP6^&
z(~Cs;c9RXbQDN6H667ICqoP#oEmQx}LzR6<WI46tgWLJb0A*QMme6N=$deKfuM%SV
zgbdY0_zWYegM{W^1-T5&m3JCtgIais_H^)@#Lqe?;Kosc3<TY)7Y`+!NH(2X0^D6Z
zt0`)g0M0q1_bj>VS=!Qv!+*L=aiA9uZim4<%P+VN@al5qCQWVB6Xy)UI7D8Q?t~nF
zGzxjZv{QC-RSiD&?Y!4?dIC`{ioN_Wr$A5zm9M33_1w{*L9+6B`$ET)eA7F}ctj^M
z`kN`47cK!=11Et$Dwah~U5LG5hNvZPb}SgDpMw>6XC9br>i~@!dCCy#Lm&7v$vrgG
z;qJ#6?=3U*;~=E_Y~Ym9MjIijmco?cpa}w~j|14ak^2(9yu!MTRmQDWu(a6F*XRIK
z7z{H-hoYzy3)eqI@KUxFkkf;t-f;STr$$$mwVoAwGCaPNMg@>bqf)syS82ebgIAy4
zD`2p+d5}4QQ|GX!Ta5-uV0cWLB6VS{F8$Pp-OUB2n$j?{Q>d9M1oh1IRoK@i>Bbut
z5`l%pCQ~Y$*RG=YCaPT+gr^9wibkqNh{aqfh^OTPR1qtmKlJwgs^>&=H~lTi%+}7Y
zE*Pw@DoqoNPZ&8yJML9*m)08}Rsw>Zs#i{-WcU`PJg4}Wp}maQvy(PWS0#<ozyJX?
zj7@t3JE2r!II=`M_$`F3IK9i22`bA-Tis*!4<tklv$`_Bc%HGB-gxRyP#jo_#Folj
ze~`bSwGZyRaCF<mFSxb=&>K3>>niKg=oJqt+RL)>0u~!%_SM2Abf2~RmPH!f>p8T!
zIEf&Q%OYZr2BDu)maAfYb+)y=TCo7N;Egn)b;W&MBJvh%6Sr!uEmjzCnl#(>!f><=
z8r)qpOvndQ6}m*^_iAW-Vva*;N!&s&1Nw^|yXR-agh~wF^(f9^ZrOL10hMj?djX)Z
z&+W9Ou>LP<tB#@qb``5&;}_IM$PpYLG*R=|sDk{y33-9*bqRQf1%O2rJnf0b671iv
zEk|Ehtx>X>nYg2$^F>C6z{L4*#(-9%|G$DHPV058VPIJs1xTy3kvhTd>6cmZK9eid
zF-WocvmkcSK1W>uw_F3}7>z3M8XfIe0(atSwqhtd5Rao!{=#|4IC<V+k-C7uC2qXy
zx)h*_Q*;m-4Obm|{P4?7zy6PN_V&mr*BGcwXRe(SR7lkviD#P5rpT8~gQ#xNQ4ts0
zw(cCnATmznmvDO{+8ZSJvnTvAQWaUoi)g6Z=T$4n=&8pa)5H35_C*K_>Je(M)mVvX
zD$}x+!K`>#%N1s-4fO!CE-haagTW@U!=iB18g!&Hok0HB`K2&=Sxl2n{!%8J2n`Nu
zWR*J@T4x}HM|;P`^6=1uh0YgD2*Oya?%#%Kzuf@Yk;qcL-cd~58n)iG_RvJz#mUvx
zrklJ~L&{bpIV6SRLcfj|b*%;ik<Y|C>rC!Ec-(|PX*R;IntCaKCOvuOR~I)ea<mQm
zNX`;8Hu9bCj!)>qkS8f*Ne``~P*zn{m*pejx<VeK1`Ag03OvC)7G|a?X7~m#q$FCD
zZjLfMzK4#}XvBFP$3U>I1Af~l%>zo5A)jR=hiIlW4gVD|I|qYEOykLGX|<ec$EK-L
zvr>pgb5+4^gr?4YXr3$`I=jZMcyV;#?Z-)C8-megqc8<B16aEv8UU<aNwEW1t+Kfu
z`;-PDQ1q#&f}mS<v#7SeEjncNfE4##J==!AMAH)5@T+Wgl?F9RcCP1<%f@^dcr*!d
zUqOA9)j@(|sR57eR0Z)N;F>Dc_<&_qC5kaBQ*zw*bSdHaX<ptC29yKRM#4Buml7>I
z;VK7hc`J<ys#v*7G@Gi*>n@j(YPXhhxtg5<Cr76TWS)b`j`f>B%X&$GqAGuu2DL_+
zv<ICYa4k;03Rq<=nNDqnX)spenv6XfC*dTN*-gr~4xxtzR`w=Gp0cyfvPDU+(ytw3
zbH~tHZ6!8vl}SHgS8yy#wLB$SNNkm4F1ay~ZvUpYg=K&wGDQ$m%tz$2Rx;CENo3<b
zOHLh<r21A;Tv7v*%Jes+OfnZH-84skI$W^a?CC}mIKyEW_E{<vD}a0UZBdYb@&i}x
z!l^#USETi1xWcFp;F1*XR-`Ev8^qqIfyj0Mkcq2<4jJ(xE1%W60^Kr;3pksjIKtz(
zPB>G_Ml*FRH>SjYj9Z)BDtJH=QJ7|yTNZa1FQTm8;QER4PS2)RVplD)85rYmF-sS-
zIyP>T6!Q5{g@1+fI!wfQDq3oaFSg1!E)S1Q)Y6wIvlRL9JiO>po%3INR~Kx{Hl6yf
zKw%m%kYyW{54Ge#`d|S)U=Dyx(*l>4DsP<^5O63jfFLe?YrQuslr>>unZEUyK-~zN
zM)TL^Rrx5&`pw^ZhVES!NwauvNAMrGo=>Qlmg-jbN!e@gPF!D9dJ^lR1l~b&dVe7Z
z2D(YkN8<{XNTD*#9DAFZimXE7U<LZBdJEN}qrN=p*5aco$l(*G8l-;|fUG?Cl};`^
z759}>afLJM300~CEGz)#qT9Iw7odpnv;-nZ(*1}uF7P$Do!84@yuM(HS>{IZVlWLF
zjVDY>?S9>8tf)B1Cp%<f3!dpWmqEn&--Cid;+n_0kdV(!Eb@qx_s0GZDkD5A@Tm-i
z3QMszvYbd~w9_n@TW49jnvQBnH0dat`SUt3T})lqv>4VkNJ9I?I+B_qhp*LS>3%*T
zgCTi94}$C_fW>Q*l-RM?T0}4_!dYf+M1N5CXK2k!SPvC&+#Wnmrgy@p9z9BswX#<%
zsBJ#cb||Q7cAu4O0e^@$03^QDm0q`?;>^EQAHhm~+b3w1+?Gontgc)=7hP0<?^x7r
zF~?Q_1AtRruIF+~THg1;?Ysa^qB7+`D~Gf?=(Y9`S9);z8m#7;Vkop!;$WFs%d*d{
zEO9kZnciv?L}{3XwtENM?4NjtMqYM_rdedEoYmXz*UxvHP1Bvw9nKdX=fq3Umb6D^
zwc|LR)<qW$irdjEUp@}mV~(-{TacTNqamzagbZQzj>AnduP@(~c9Y1zy##2wz&ot4
zuf+|t$0CNv&E}ppB~(GY#mk0H8PJw!o0hzOqIpamaP_HlL>CX{K2SdA;5XOV{@UJ<
zf>SD{7x;*lzC<nOm!*sOS|t=#Z6wB@ac03RPAs~{$q?3lib80eo2$Z9``ya>1vn|=
zB#_Q-WRs?;P{_`9nSx*4-}Te*UqR2&0xoEJkcQ<V7@>z`GG&mg0EEKKHIdqoQ;k?X
z-gl?DVwX&3&%arFLT=|3aW$;ZRl8V$p;AYovQID~!DpF6x(@?W)-$_$fPaf!d_uA*
zJUx^P3+0!6acTa}Qyu&t1zzWAzZBd{Yz2;`f1o8+z;dGj)1yVRT15{Q^I{n36dKxO
zp}ck3KicR10D4h;;k_OFifU*0$~+k?m@cWUHyZP@Sj|lqVv{EIYFkTj)32L}RB{w#
zIb=81C9EmVTNpA+(`C|u5hRrWDwt<F9HBqwIMZVkxyz?sv{IHt3Ml7d7NvdZePGO{
zAcm{rhKPN!d<0Bdl3stFs985Gq>tG&bGEdWI_Y9I^HsvnG%~ecBGt!$KWx=DJ3Fw6
zYLbr<^=tT)UaPf6u1N4a3Na$v9r^Zfs1$4}ONuA)jiqd^+fsqyvSjLmK;x3~(eJby
z_N}?9amObx?64bklp37hd(&50BZ%E<yjt;YXgv!PQwz88VxzZ2TvE_lTV0#0$`Xav
zifxi>NlTjGGw;U|A)<Wx7QgIb1L*Z8EM&Bi+|%MlZBMbdIaZqf16kb2(f(g;aib8H
zoKyl2EL<|Iw1wNt$#u(JIO>gRhGI<v8e0R-Kf-hY^ygppOS|O_tT}>xS`>%*^Esms
zHH#pCG|7tt9>hT!)iMy!F}{XBMOp67)tm!8s08aguCX>!k9aBXJ>SHzP#?Vaa?w60
z*T*qz5&sH+o1}H#P?PoK#aDC_TDh3Cmbrbn^^nLZ-d}7KPcj5<a0^oP$byjP<tEYY
zk)P0gJl<?E*KfMNu^=YwrNoaWL0#HAE@`pRxUe!|st$z*aS#Dh<yCk%IaU;2>^1kz
zwH(!1>`|E5`pULeQ-1)IKIf)Zt@qmkSzn|+=WVyM({$P{{qyzC%IZUmyu_S|)6)0@
z)|-17Ou*`>yw=R;{;1z}yQ|h&q(Jlh?9$5s$L=xcs~Ze&)_=b-j-xCE0d=G2bie+v
zY9r*~jT|@gw<{0&P+pnzaChs~9EVd(a~EatB3W;#(iw=}mwm*IwQ6*5Rdx)Dky}Xm
zw<u%~&zCv3Aok8e!~1XvcSH>+{ub&sbMa-)EELC0`#u*xnvVXvQMuQJeal^_(-Eb<
zh3oQfVYE1|^ScmE(D1@m#06-0w*ZW~T@N^j7tzSe7NjkQHY@|hk=-gIy9>)yton#<
z!;7+FYmMqw8x;mN9!}%5a8{o!64Wh^ok<docra#l3NSdBLSa!0NZ?}qI_(9mhb3AI
z+(|q*$hfwZGli&U)z3=NK&?_jUAeuQI!JAL2xO!uz-3`9>w3NWtm$-iRb<r5`_}<H
zsE=#z)_Z<le|Rqj@&UEQwPjm&Fo<t?fBjk(PA;=r=^2kvijbQvSHNh|D{MX}9xtIs
zt*BRaDbz~n<>;*7G14ou!4<Y%=!L?s-2B4bil({B;+Yg4hL>U2O|C0~5g4w^O$QgV
zuQsT7(J#Frf|YHu9ZIpm7Xyj_RyTD{QR-fe0(@6DhmO_z{w=9n;-tSZNrE8SXkysA
zQ6Sj4vc{@)d}h-FkQKF-EqF&~b-%{s5R70{n6e}^Sh9{`)V}iGciJDmqS%DBhPCt^
zFMUkWeOV+senJ*|m<AJ!=?;P_u27oi*HJ$dJib2mhXI>ijR02zR(LcqAMr^cO3{>>
zD>R3|(p%$CY!EVAyC?vG&8dC~XDM=F#uOVCwe~V?u1?Dkn=V(OiWM4%6<VyB10}Ry
zg~XK*u|iwGs@5&ft*4VkvXN41x{LzKU_S*drVPDji}6=VFJ*f+sW+jZI8^+z#`4h}
z)|is7=#o|%+=s{Rp&%y|f0IO|f&&^VHSxk(kF2Q&TO5OYnqf)qy&9ytF?J#Fv=&7B
zooRgf4qao3W;wpo9ks;pSj<-jUKofTsu(0il+#BvU}9s*b<j*MU<54|vT-4Tk%mh=
zOexR*A$90Fdb};4q!u8EMd;M)s<AnFpnN8DuNQ1-jL_DCs_NYw7w$HTA(nDm+YrPu
z7W1@k4d|stXitSJB^j@(^SvnOLL>c5CM6~ke$k5QSX>&1L{vEc|3Ksj)5wqN;!)$m
za<V_E4vuSyw4xxTJvd6iThl&R<8}jMWivBt94jotIz2NRXbG${%k14VQ#Ybu2D6r;
z-GcTKK1k3$P_yo+&O+?2W4vFGOGtUEmqjZk6m`W;tmk8CI%Z)`JM$t-uA$6%F<&r?
zbZMVe$#ShAY|4Jo7JU8XV7?fKquX36?36k9_3UQl<sSB8Pc=$p(NOIc#boWgDE0Fp
zmn&uf3g6}I$nt7*dr@L}f(VT@?GE00<88OyE-~~IAq27Drb?Vjyh@aTah1eqRAK95
zPNo9ib6Z1}bf(@0nXqY4FAQ=Z#@SrMepSIMh94K4rfP_lBh`w#=kxF%AP%7IG~+x#
z^uTK&6hN*^c@E~Z#My(~FfcbnvZwV?WAonY0KbLv@a*T~107s14^H<_4v)_c-@Z`}
zYq}%j$0u3ktbEMF%%Ng;WIKBI3sHsmabG&O<;OiY8xpb04VEIF=;@Hk&UqeU0H>{N
z;n%p==2IV}>==ZYtGhbP4vmy=g2a?m2Z5kQ<VVBo$KelB{QB#LN`^)OMj5?zS_Eb#
zX!Ue{N$f*#?T%1#=3RSfJO%!`{<=vFAskOoLTymMpzpNWE}A2<mRf93EtuXGh8o`#
zX6;?}bsjbf3_EY?^X?hN!I146J+v}85+3ax*GjjLxByL^S8;OVCnFw??LOP$;}ZsH
zy&*$cjEIgCQ#J{Ppel{gO*XVp0Jj9{hA2M|hBRUM6L7zv*$(%5gMO#eV9Ha&xW%xd
z$|wk^Ywj=BO`h-yr9Hz2NsZTDzQ;Z?9I4J7{(+-;E(@ewY+1xNk-qLF&~G|k9v-rQ
z0rm1>(3bK5pZf6ksTxC4yW^cA(O=BfXa5tj+-T`yATE71OY34#A1LNYor<bS0o=3S
z?}`o4&+9&QYF^EPW$wSixyQzm)<tQW-MS%xIJwi{`+14pRR99})Jq0}S@@~xT;9!t
zL<#g)M(+;O7>y;vp~JINqA5HeenhE%Sj*bZ`#zjt`o1a8GXwbL6Y*eoVDLsn2Vz<R
zd=!k)3K+@^V?RmNn_?x^6jtD8c9!t0S}e=RYdjS54)Ml5??grD1T)u6CR46!xj_>*
zC<jtGw3pq%%1o#m;rgQW!^44UbLFx3DA89EPkFuwenXyzHAH#0e%rXu5%Ckyy=YW=
zJa|9h0Wk(y5@Zj3{~8Mn?Yu0|&)5EJF!G(@rH=togM?**V;N<PJ-y~ki*?%oY8WH%
z6BsO-M@EPilSz<K<TW&<hQD*z9xPu5BY7o0w^Td3^fOH6g(2cF8<7Y?O!!uafa<Pu
z_P~rh;J7@_EG}CF<+~YUg?5N@Rfm=OsZoALX~|(H{vt(VUynOzxNM)vY_2CSUMYAT
zMs$Ic4t?~I>Fb$c-J>u%@^4S+rK;`y$%BeBx=+ztaK~Zh0qRF7UI^8H(y%I$5xMc)
zFo}ptzpY5+fV9?8gmI35eG^*^z0rNKkJ$=)V@n4eBXF~_*nL@V6G$efo^=7QrZe$p
z6z#6F#fHDRse3s<C3p*KRA-ZBaP70g4NYDtb_x73(rM9KLyyPHq5rMZEojo~#qHHl
z_ZQnOm_};(Hhc218%zOy+XDP<&!&)RI=`Y`1XUcPFd7B7^&CP>theb#@-pYx!UbB}
zb5Tf)ph31`r~~<x*e0iKIgj&pKI>sDJf7J>EZHlezR<1QQA5T&x_KU$Y((EmPiyHr
zR}k@$ZiO=uYC|y9s1Tq-U03E?E)3Q`x~drWsx+CFVPe*Ajr!`X>Ee>ANF>B6CWW=A
z&X<jHEB6Ulme$iAa(D$m5Ld$7>NzjN86G7jvU#CmXt#X#%Jmu2{Ilztlu$^TWwPI1
zl*3sji~eWVk<cY~?vfE1_nvHcEO{;uzkvK6{vHGfJIv*sP48rYPg8Q$+cU&i)I*jh
z;9Q1c39zC}(IlB#qIU|O8MMqImal+k_AFXN%%xzyrpD<tDV$1qk%oD(C0*Gn#N*|e
zIC#gTA}$Z6pJ$ua6R9C^rdrG&zQK$4E^2}yyNIo)AgW6CVUdva%6T9g<$0-nUVb@f
z7-msKCrn<cMMF`Wv{>L!fUgDvJsZApZIfdfHr*Fp<K^CkxS}iErmKN`aJK<^VhYeV
z{+m_ue(-Azsf?$txZ6YVT7_PlH!G{=1F>~g)koP(Q(IDyD2_lpkCaeIdCM9}X3ZBu
zA$xgb;9~J}qxFmP9<GpOjC-RIqoUbJ5Y*%S14QfLWt;}&2_mdU+_h~|_{C0j&~bA@
zPw57S<^f!*%`Qxm^&`#;8NAk!U|5BMS!v$l>&snWWU5Y{Qk`UdVm(&DOD&05G1S#L
z1Ah?>drPB9u{$_iKl8HAHdXoToHx@H?Oj-=j`_)Dq;Z*fqlxC$?5mDkMFfzQOBR<C
z<)_k{N4dK3*wu~8?}(~GUvaeV<&$A{Ea&-8Iq`;#|BkXST3D!2PCDsiOcv1=BWgsN
zhBC~9=5k#KZ3+j67M3-#pL9rwQ%}2El_g8!(w*jvCXCIJMguTAKP*&Wl|}2U+&uT$
zKm}A-ve#h{m%;FgX~{t75^|h1jf34yQE_YHZi?lh11$MrG%v@B8)u4$fL8C$3yi%l
zowp{=cU<{_UZ|+V0KRCFpfV$l%9~S0%|Lqzdt!LCB)ml3bmXGdU1n^d;Kezi+=aBO
zZ*`hZr_m^lZo*x9m<EN1+fP|YNcd-SyH|%lPdkmx?TXkbDiSfHI7Pv$d8P52qY@oj
zxi6%;Kmq2Qwx6@hDR)r~*tJc(VzMiyn<;i;aH{1%eA<__=x#E&<0nEDY-}5u@6t`3
zt~V-uLsvZ*t3kZ}0a<xSu60m;io;ShN)I^Ex1I;enZUQ6+yj*-U17_va@ehCH<BO2
zd}Owp?bo0CV%vy-=Ec(yC&@2uY9(oIyQ(TliRs1zh2V(`J*DnP+`7I^!U;X%?ww^<
zra!NFQL6-SVp^5S!ly%}Wy3542D+t4*9NJ7zZwN<xFAC|Jh4buFPUKTQn!nINj8yX
ziXl_XD3MkghRV8v-zecTls<ZP)4PktzgfIzh=AmNed%7SyNYmsN(>^#)PV{HE}rMD
zowqM8w0w_Rx;>a{#<gO~01zm$Ma5hj7vZgBDvf|y=jIr0SrA%cYV*b~kjYR-@g|AK
z<kWnsn;uvaB`Yrc`38;I@G;0)5+WL)(HHGJB^m7!tA?MHT}>lBIVFX)__2!5|Ea|e
z0k;`?MB)H(o?;wuQkv7QNEux$y5bsIh!(9ONb(iS$meWXzmrqGdS+`1sEQ3LFSRz6
z=LMvIQ|Bf?-w$Ml42_krd(u@_r@6dTax&8{pRx-DMzODfKR;bAI$LIW9$v7UGs|IW
z9O~+>foe^23FujwkeUxa_c75gM%ei_oyJ2Z4VsBR`M|I`8yq)g6X%9Y7diwy6r+)<
zT{Id6uQp7;l(+JizSBZFAC2vzJzu-0eaT=bwQVd0tMGZbj6$GQevMUjdj-QnPqlw5
zkCCoUxO9uNk7Gu1cZ-5WR!Q#QPjuF<kPiFa#{uaB{(%v+PF1godlP({;Tc1B&thXQ
zN;e9HXM7!&l>P{H{<2J+YwPnhTzk;!|Ho+cS5=WK2)tCyS8*nPqz?1Wn^|$~JiJ(z
z!zNESWNj8R=pffDPV>rE_a#d}(ipSr6deWj*z5)yfod7FIfAYm=-biRs&_YqDfxqj
zrJ!LcZNWrL{l)s)8Y@p|&u^rVc0d&w7qeP-!*T7rOkrcMrO#@u5cbtGlXzP?J=$#_
zGe3B;^7K}UG4>;84Q)qktIxT#{59hwQ$4VA9$lSH@Tt;B!{&g^nPlEr<MT46mY9|l
zkhF~RJZ9l8QYR^d(L@Tdn2&rdMV;y<#Q7sdyMgL<CDPZLRvo!mB7B_Pk|aDzG)>M^
za_OaV1T^vWcxFq+TxLr*SA*265yI5>OAO+Trp&}!;tng64q7tOW|-n?Q}SOl)zMa^
zSIY6HMUh|Lsqs;i->l_r<(EqJJSC*kNW+ZhgLdedbkAqOOmV@qc-5vq8y%3yA(HQ6
zqo_jDGD6vs0h?h#(UP*%GrDE?droah74#t4{H8d#MM`W*9r8q`xa@vi?XR+8XYJKG
zvWr~EGAR8tR1f=P=4m5P$Ft?|EDm~Q&6}OB*im^X{U>L@yTL;F)2X(~$kU=+qOB$r
zs%sFSJF7+|jBLO$x#Z=u3Zsfyt(pd;H1$Vm){nI^eTW<qMd|t6oFdPxVf4js6v*mE
zkWGc=ipXT8Qoby4AF6>2J-HKwQIS;Q3tLDS8S2rW&oO{a!BL7ItXdpt6Ha+7X^l30
z=13^EP^6XTF9rTj^8>a;=w%B6lN+CgZs3}UB?G3tAh#VB`juo0StWwQ{Bktj!Ek>@
zJde4^7s>i@saVfBF6rOZ5A3|6cN4RK9I}OqSqSp39JMfBe>4iI6l<rfr<FU<8Z}~y
ztjU}dtW@v8O{{ojcZ3I~pNDuQuQAM=D6wR#I!e&*8|(E19oq_ttO<=-oEHpF3nW9T
z@``7<7T~_hrBp8{JuzVfo-%^^5YC~#-N_QYM`ARq4k^I@FsR!??5m`uvC2S-G40I^
zf-1;<^2}f~xBN*d8NQWO{wUn3P}9y$X57Njm&6jcFp5Lu5mdAL?tRW3y8<Iue5r#t
zw;oIvrLoh^YzA61{Uk0+<-=p~T00X-cUDT+TdGXSr>EvUEHekN5soG1wq=p^DWbj<
zAZ=mwEh&zdC!)2cuJ}t?*nhD!>-M76Qxkey(q69?(oQtiEVNsC4^W-td8Ls41$BpI
zc%OIy3rT-}kJW4bUdy+BGH%N{iJaV=e_5WA`~J!9KS7dvN(cY(dF_j70VGhDMzpHf
zJ?iLms)im0s(yr=h?VO>cA+mqcMKN*;7C2Vl7-6qskb7wL)4ysCm$^?l!y~GjOTam
zC<x~Gr7p`f^sBaOA7d;v@vnaCQM|NlSR^bn+O+899NN3E2GZjAt(PplwfMV|;nqTV
zh8VRXhnET-)~@}2ijD5VZf`DIQyV|;(ex007goi-^_r<nyS=)B`09LzVKj_TuCy5b
zH=3QFy?yz%S8jZMe6qfg`4TJhf{{5QK$CFMVmQ8{*?5ZH&TRjn**N>Qg5~&spv`#K
zHtA1nGQNVvc%CB+;dRQHPRbXy<TgftURatdhURi(m%4tekQiCnP4c|Lm_f5NSh|<;
z0}hL?u*JFeEqb!`joO6R_d*=mpmz4<)Y-ls2b7c*XbtfkC_Txm`TAa(E@tzd6S|>0
z!uHEhnlFqr;-`_AAstMU(G>#a@ieRGWE_ueE#*88Es+|<0>)Iu?a6~y07%orTHL@&
z2oz&CD6m{-^;|_<Lzb(hocm-SEakdo#vf94p{P*2z{OXn&|{IFL>TNXn?|oztm&~_
zL48#D%>C@rrQc~?v|3E2nb)%5Ftox9a>^RuBfZ=Kmgp6Hm*rrO`Zb%=BxOat^=u*!
z-iiTXtBbb{x)g&6TShiX=ANQp!7fFPt4R_s=JgHnc59;~R_RA@Xtq&^?s?o@Jhs?%
zeqWSe+|f!Bi#i|Cpq$Z$(QYZ2mj@n)3H)pa-@(hD3y}K+P{R((A(hK@0#=qSDkr6E
z(>R*mab8DNo0nzWD%pR^-s2nZX>F@YBVN<%uCe>7c3$PKTVuzS@3vKU+DGiNPlGET
zvAe7xo$tG=R@+gl@1~E0&XQfE3yUjnA{M2eq})%xaUS-|!+!ar?-zsnv|Lbs=DTKV
z!lY_rFH97~&Fa%RoO$x5MK!K>{E3g3V=U^KC?HhTj&I!A&MY}xg#~Y&vHc0Pk4@!1
zgpEJc_3Nj(InC436_tkaWl1`jS0Ttgm_&;V+ZT~CBYYAIhd9TY8<bmW781|O3swV9
z^jyfWNR}kZE94+PzEA$)5-#@m<MIxxO{ocfkC!G4wq$AOG!=>7mQ2_(hZV94%R2!z
zyRd5Yk80ciBU_rOxHo=@j?rw}3DOLsz{W}M7tGitJH<f%#<lz+%C|Oin9)$<iOr=C
z+ml<}V}bPbXKP(#*>V``*XV`*&`Q7Xu(B2#QQ?^mjkxH7iZ$@DbebvL_N+8~tqJ{s
z{k}Y%hC?*aA=^Q#1%Telc}={k8Kzw#UC+!<uFyt}!pUIbFlEQ><8Mr7R=cX9dKv$j
z;6F)CLB1nu0gBabYlaOf<K=wkcr-rtCunLrK#hf(g&y~-c&%sT0*v#H_Yb`5)FbGt
zl#_CF=_w3(PCNLvNP>E!;H-%;s#iG8%}XtSUJ<I$)`eLRuM6|SIu2G<D8oTsr<hud
zGU<tBZL%SXoO!fj<jKj_wa*r(EE>!7rr&kjGDqB3Kf_VAcl_a>?~guwRaC0T&Wo+7
z!{PJd{-pY-c!#z;?dotHO5K^=O@n@|)v8$r2#5hgTzVt;gp$N@$H-_Lj!>1xqPJd~
z!44VrOs5_<*Iap;PRr@I-|TK`ON!IA`TQu=o~~TY1S2Jg=wo{26>>g4$z4RbO&gj&
z2P5ai6OkKn^Pe1@8o5PIP@$i6y>tj1c3lqBxNqSYXx_pYF#~6GqlqzPe@ilA?L_DN
zCRjGl+iO_SkLP97BgTuDJjLV2SvU)v^YmiFQiifE1D#>~&~>V?O7Ef754QY~Bsokd
zQEb5<c^XV8-K|KNIE{z>+5m7Dkd~Ye;>ea16Wbg5(KWg$Bz`f2AQ@dx+)0pus1S|f
z8;??#XZ3~~;{8b)<k6*r5wImL{oK1P#Pq^fP}hRYb9df8?_9LnZhNPRzdGz!*Orob
zTtobTVZFzt?77?H55GiTo&2-=6+S!Z(i2-^SUJqTI!HkoYJ;giyt389^oxBI<kTdW
z5peQmlFO3%N=xxACnX7H6mDW>?@rB*Um~5Oxr3w$rMUlR$BfayXq@Mk#6+vh^5C+z
zUjnuy@ttm7094DCRC25j4tq2*O@2m!(aadRqP%Dm%~M5C45~vVe**dLGs|u(XW&Fm
z1bY(R(Vj4GU(0A?r4+uTV#CzCNVPn||N2H24zJP;n?~o>V<|lHfG@fUs^Nv8$)L=L
zmsvNI8lY<nwx&s0#qF!zO6(e)>uaUv6;6HuXC0RCa`#6oTx`f?(R6?1^l0f_8?x6=
zYo*ces_*-6y<r?DBRz%v`8d}Gz87fsVWTYDU8Tcqc_+EP?xJ+?Kjm^tp1@Hw2W&$d
zKBblOI~ykDzyGj8xpTs7yxjfCtFnI7D+wT7s8g(1Q-Clk5;fqS_h>p7?wAF}sn=fB
z8mrArGYHpSmZOC~Nr^y{xJGqq7tW@`Y$Nm*mJR_nBoO4RV#bALUG6ah?BK>3jAf&l
z52@mGwy;JLsJ0x+MdFiRFqH0PeQS<4lnq$CIoMdT|Ewsye6KY1{aC=3Jmq33`b*_;
zHTEmTRz`lc&ZoeA1Ls$okdE}qyH#;sAVQA!dLzrmup-v0HL8$)Mcgj6k`jTdoa`oc
zq%U)t`$^tbt|>1KkkR&OQ=4*$RggcO>I=i-Xw_Mp%QhtPLaXxtbczF?avjDX^p~g?
zG&ThU#J)f_ezBqqxanLjf<9l4s-;A6t^dlOrsP4<$D}mrd>UqbmbE*Kq99=@L#hnY
zPzc#@zus5{C=}MPg5w}HK5~(_P0Y3cWFw3yu9HlRK;bv+)y+ehEPBO5mAY7w1-+}i
zOW@;3kyptD+UhIGvy0VS61(c})lPcO?uSM>GSEDi;X+?+P)7<b9HOq0>o@zg+bSth
z>YJb-R2us8H2hc4@9c5}%Y$<fs+6ua$hf4=82HIbhk_c-$5UO0D#m&TJ@iR)q{12<
z(U~eGZ>yq_KmpNsmi226eEAm)3Xs0lslaA5muK!s!X%z;C42cxVBN`ylvg2D32HT1
zC6B-N*y}QS$leN<-Wu+s@?!<T>CJ>rdPXbjzJ`ChFDsVh{Gv(!bFfUqSs;<8Uwc~9
zRFZE<4N=}0$2r2FcB2w9Vp7VemjS;5+_@f#H{UlFpwIqbP&uimT9y`l97h?=r8f1G
zNl;X5Sz`8-TdIDfVD*vQ+kdq5G+XG=WGdLnrah0nVk*qFvDs+#mOk;RUvZsKB$Ts7
zz8m9Ou0E_J>g>|s2yX^aOmGz&q*76A`6YmDa4klUvl0Gyn*Ug3n7K0l<CzE`04syE
zcv7!(3e~RS{9XntOLD1bXK!k&Oi4N_m(Ppt-VR=RKbHrT%m-gWN0{<$&Vmxk1}k>z
z3J7YP=G1)7ek8$Nt@^U2^5VvN=;Aa`r&o@?{2HVtu0g6$o!_Sj(ES_F{gN^I8L+*5
z=NSLi^WCIxnyT}a3w50)Ypa^W$P3rL*>S%5<(L2Z%Gp1x{ozjJW#=!tSX>AEk^<2k
z@7eJZ#?S8UAcH$WW`^EEdg_r5)jJN7k*+2eE!EcI3|8-;W}7o!sma?H)sEo3X!wJd
zpKN46umsTd>zgk=)5qO%xBmOszx|%%na|Tln|;duw%gs^{5wAHw7Z?{FPz&ieuvK@
z&HMzq|KiX2>2{r21|+-RdEWk}17QE=Yq$OAi-*sj<U`Tt=;kVkZ-Jj|yX~zs8E$FM
z*)LCCnDO7tXo!Vm4bwPs=Xa(1e|LAcWdHB%cDA>_=sermeZJdyzJukuJKgQ}7f$<O
z|9|$+lfP^&(qwB8Mq5F2?ac48%Q)J8{0JE3%@(kfqcEEEoJBTneT`4>2;`Z}qs44~
zhrt}9xqKD_gUHgM4?}p#pOS5<ERzQQAWkTSVjAU-fl0s<a~cj@VwnQU9n8PlN-?uR
zEHo1qgqEg|)ue_njVle!HIP}1k}lp4#Z#OpR~nE>>U;ogK$5>oTA0U===vMKx{EZZ
z3(pSBh%0}PLbvXDX!!4WsK2Z}@@uZUMV5~0o^wT)+-BtQqv6y~Q#MTEO7`W(<E)Le
zgxGUfSzwuYls;AsQ?u>y_>*p)4MheA=j^>{i-Cq);W+dMA<XN}={bWqp4yW4{&*Px
zuoAUj1fEe}M8GW18vG7f%y&GP8V|`O0$15G=)!oiG)w%s7iI}%_>Bj6JyIOu55Q^5
zK%}x|X8vIe$9abeH!%v)iJ#122%0|p4aHle6!u~qPMD!#1$16)74K(7tE7J;j|>n|
zu7fEwO<^(f#`n0#5_`=6OA*RsaO^b5ik$!%?}$z)>?limTt0L~pNqN0qcj|$OwBw~
z!gPVrGI?h2u^vL#SluK@VGwz|KE*1A<)H|pF!MYf{XwtHFG2JbFTD*>mZXnBZ5F`U
zM=&&10Q9@MwXKZHs?4BojLIl&;A7tyZXuIRjyHXE$VRyWCiDxlv5Z<QYhRt?XRygq
zjIy|!k$Lv>^<<A_a_^MBR)muD6K<}R-NY?`%S1T#C|reB9ts@x97g)2rvM8dNhFS^
zA4!X~iq)<|zmztqNOCNLGZ)OkG}Tc)`Q-`YLBN|XMk4B^G{IKKE#bGkHJ+3fT6(7+
zWA8W()F*_WgIjVHuqG$?_S<h+?>Ry1JMZmh*oG{#(WA@flCy3@2A73$WTR46Q1L;6
zqROwgFr>0dLik@Y2;HKP7q*xEu%0;}SMn`gDkGmgb4_555V>2M2;+{_S0lFU`d4fX
z8{$(-t6;TZuJ}Z2CV$SjC7ggakQ<zjPdpatQJ$TF5r@=JU|LCoUPRL>#Uihufp|4G
zP64<VuWCJGkVg3<4Wv}wIt#tp2OT(nvqRJ#<U22DD4mpX{_0zS`K!MYFiQbyJyKUN
z*Jsc^H-P)My&4+iXv4PG(}B$ES?6a7iyZT;JD>I3(!p7AToph_<{ITD$G~axc*vaA
zBAdoywPvBrkfqaQ&Y%ku3*!u72Cc<*Nq%`J2l|fqzNm@OYEyZXs;MN=$I=({Upzj{
z9GFndPjU>#KaQtpf<fW-{A=DR#?(P4!lo0r6SrAH9+03m$;)MdX`WH`O2S*HlMVlG
zJVQ&E-g1%XQ;J0-8WA$SThmrO-@wb^KsMY(5HO11nk%Jl?zIm;?XasloQffk)tzhd
zm_%88l$s9cS{&mmZ=A*CqpWh5h_FrPE?ziM0HO{q4{l1|*{IggZzwZD(TO45MoIov
z)}s-Rp521d97HVp^UZutg><ppSsG&z^7#I_o4XX;3fT`2=(<vvR^`pRAfC?BIwd+h
z?_8J_+>#j8je=~$g%7P73E#G$9Nz^Ic^fdXwcrs>c4&$$Up!h3$;%#eTNk)Y8LZMD
z+EP{TvLyQ-cR1yd7jz==Qn62)Z~({{4do1yP8*t<tgC#tI8ZaH?i`}CA^I@oW_(8c
zeT63IcD-XB{a}h$cnZ>71$Tfm2F$p#cWYy_YnpxVS5otoYuo^J-+yp_i8fqzJIGM8
z=o>{<k?V&M>I6wa=5zXbVT%$Km|8tS$&0Z`$ikBaqr-c8acqd4(z9M*a_3$-OxI&f
zj%SCJQ4?bnsIm^7zY2+?(b%b*?u?RA{6IO1rZSlvI998nC87PQTDHI&IU(~@W0Cb!
zL)TWa%thUb*g|C$^prY9%}p8m&EX>OirV6u4T+}pMkT7-2()&f<pnUS`%AXb$hWXz
z*=%pfgp@}3cJM2rRX*AhEELV2%LXp;WIUoJ$~gliOA_%|v+z8@rq!IT0?7^jwS*av
z5{wMH%wE*(!@n=JBP*T~h{G1E6vXtS*F23&%L-GCcGIaT>7T5T0PzyQBGL;c8)52O
z_$555S>%Bq)m(1uPN7T)<c%8=B|ok@7hB44Eduv9Y?IWuxjy+O_cUb$?SJtgjFy><
z;OAfAysm(-S)5_xLjip3@I_^fw!`biVOEDYk%#jjzGY7N`RP_bnq0+8fr`|w>IR%^
z+Hc$X^M0>JLEi5`Rn(OA%eFl$RL}QF4O$|H*z&NGI--@}*XLbmb0--XsjoDb{&PBK
z(};Xl590zyR#XevbOBY@dD^W+snS63!rWI`I~?)c6`DUy>c*VWL!L912#Frns!;b2
ztlR$m#bq0lPq$%Z!>GL0OD*?$>k8%QK19?F7mi<R>O){OB*%)Ph+Jer(s&lIh{OXe
zv5nRniklZ7At%O4mU2z_D^FH_%<nA~fs#=ghT*Raz0z7TpZ#8rC*<JI>1l~gtu_U9
zfVsbXs&OSH1VFudwBG6&9L!q`>sko|)Ou7hGMt85AW)JkUT#N)4lknAA_LM9Fq45w
zbYalWk@9>O9_oI};%$j^>IP@0O$r8d&V5L!NqsJwBEg8bl;@RDn=cGgMG+lr=i!A6
zD${8?Et64FgnD4hvLu|=bC29R?yhyGstY<7(wm_WC+LS%O{Yx@fz!a+Eq*v59i1of
zFi6Y9*7ep%q2_WLRqnRuay3MvaADL=lCszz7kPL(b@mp?Q-|PnWg-Sqwrs-{Rv@2Q
zQ<>$`m5}!$g%vuLygCv`Bg&0RQ-x2J7_u2K^tVM2{ENpz(n-BZn&pCMFB&Y4%^W>=
z0Wm=Y>6S$V#9&BejW9@bFDh={I8D(ie%?SYI+12f7+XVjHDr=tIE$MqwJqhd_$dtB
z0yi6CL4Xa5At#A%QjGPf8wBOeoXK3kP?GaE;%+7h;qdUMws11?#%1ihGjsjE!?TRp
zyZBCb)Dpv+PMtx(mjDKW<5gE?&Nu=bFo}|wo0hI>kb!Z62)09TKMs#uSFtqKj&zFG
z@#Jm!B-NFIc8a@2#Qr0wvCCa1RIkT2jS<?KEtrvMyTDABvrUQzvG7Eab10_Mi`4YX
zWM#(E$%t0i-crjODVvqDN`@ivsBxj_mo2j;!hq{-UIy3oZDID>C~Wm&<Itx%ku%Hi
z+%q+W5goP}Ml8-x+>W+E&2z_`L){9_h9z!vsCI(U5Y%O|CX14uhA;tiHk44u1lXfn
zn?4j7&WvIJwR=_Wi*2{vwxrw;*|V_Fren3*QUM_b!m0geRIhe%7v;;(L7qZ{XAObV
zq0=-F6CLrz*qSSt1Sr_SR{DZ(6Ae9b)F>Pvc>|yI^eCtq+4)hXUKG(6Ruk0oOyMMr
zA01$Rn#GJH#vYIH#ezCX1z!^IEJ{+<qzA89l^{#4@w(TlI7%KSeLd2uYjLgA^{D#F
zR#i#)JXu*j9Ogy{Cme%{&H-UDl_MlsRKS4K;(DWDrpB$5j&iX$5DAC4I-G{0*0tI6
z=tJY*Dxxrp5@NeG87HO)B1$~xuE8K4*_D!}W}UJfU5BHf&U<SLI=7hRk8&=zuq(9&
zKkLS{=x<g1nx@4q+kgMyw#S^?G7U{kySWmxxlgYNTWa@EzEM6tv|yfRyYI-rfw?=t
zR!MGbS*@5ITWXv{?Bz}FRN<cOy?A}F_k*|p_VwG7Q-uRC?G{vsXs3Z;<}l;N1@oyt
z4C*xxJs-YmeN}7d7tf|9+2B1y3`2T%uULgVK(p*HkA|r9dfw?>ShG<ICvBoqlKoNc
zS)O(pjVe8H5oqV+Ms5$qYsCI31YE}G1XZ1-{Z5+&6S)aT*=4^goahZz&~o>1C7kUB
zN%2C5K5_<kJ?A}zMYOE$2U}cR?g+lYFM>8I!t1_?UBgd-o4afjL(c-w@7+?^$?Em7
zbf*N=-9WeK4NC}J;qPmUSNmRWvJW%mipzV?{D0GH41Hs9@m-!}mLjnhLeo*uE}bMI
zVv`oQ$}}jLr{Ofl*!3OFbX9m1`~_d&g__egn0|}_pR?m_9jLt7W}!iI6KMxpnfmG6
zvPno0h1@WX<3cFGa?O`PhL%7T!zg3ur%Mp4XTB{Fx}e}^KvJS87J<5kP`Y1xK{4)|
z4j~)&tXFTpETOpVC6UhX4!o<IwEunsW8ZA_obIy^D+SxWr6V5$v}N(!n4^1(?_BH|
zWW%{h2*rEcH8e6bkPpe?s|Lkuh2pfm*tSeIe!)1;gYo+!E%>m(i4284-mx}wXEviA
zo4B}3TVc@#s`liDvL_|~z><D?Z(e%u36q|7$T@$bBqsDH*sY(B#nNx8Ei6`(m7`c2
zD>7R8T{3g(LivJBkY5~~lJkraMOoJ+eLQoungpU+BD2uOJ&?i^Ln(w;@N_hN3QkOo
zQRU%A1uF8atrF$g6~=W&;zYd1=VaY1mWXDU@N$MoH^7``0lRzPjwM3KS2K{LSI|r@
z{_6%OXL@F`EJ*U}xbxBZjL~ZhSA+yRw+zYcSD<}Un;@r!=mvXHk5WFEvoJ6GMP2Kb
zhhOphBm<yv(v8v9$I}qQ^D)bQy|dvZ9&D(gM0SZWTuq$Klzk_WLvm6=p$XlqszS2%
zi0Pah%+G7|wsujFPHlY|CTlUVGa?Ahljl3n+k@?z%F8`#lr)=<+1{oS;{Hiz@J;ai
zdCd-sih18$hUt~It#Ru7shGYfRDb|~v22FjwTFeF6!jqu@%o+(2VWM30uMj^FlMlO
z{m2*zdng9OixkGgo@+xIbVuW_{Z)p9eUtMz4us*lkd%=u0bZz;a8Oi8jEU9IhV+CD
zA79XzEK$*(1kbzQ(D+1{q~|*(eFBCM&X3y`F)NG>`iz32DhlwJPf9^;Y`iGvM{101
z$Do5E456g;#+1%b7E(_kAs*H87}4y=`q5@A#;la@U`c0|!GyDVooCAEz`qSkRXTn+
zx{=7EBO6`P@+ep;987*ZqaMX<>*{RFn!CGSbJ_$%f(y%uRl?FykfofVdoPcD9$aZT
zyQb%|_&pgl&9u+dQp<_-Yqb^fTWE%1n7y1QFXx7TJrD*jd+*+OGv}9IFu+gh!1s6R
zNzNR#ZZ6@pREpERBz9o9nFtH6e~TWwFp3hUugsP!Xa7vIty=99>*h?J@ZZiA?z1>4
zKu|XZ080?3HYQ0BMC^-yhkpl2d=qU{Xzg@mJo9c)NJDhQJnH)tth|iIGL!AJz?V`F
z8fR+w<=}01pZC#pA~<i9pXsHa1h<m6gC91h*M`1EA~%~Cma6`WY1!z?fK~govRW}5
zAMvOl=0mT)URWdNRg?1jkn}Z3`+Z3KTBKgRutD8Y*eefi_+|lY>*d`%1~{Z)+7ke0
zWN8p&D5X^BEr~KRTRNs*n#Pk(yCR%sex)KkDhM?$a`H?Mel7E(SrSJR6--y7fW#AJ
za54bAqSH-DsS8*<3i=h1{8^j8JZGhiV$h}j#yp7!{MMr^F8vez=N0c6NXnE{hy5C`
zG4tALwY91IdQkzC42W37)CoKyR4sMVc#)uU3Fh9d!*K?*9%D?Eg}Hn}2VWviJ}6rP
zFFb*HFCN;_r;9npe5x}Uad6XgCP_F#tcBH}SgO--UgdNJc#K6#>`@C!do0>_>MazT
zqcPps!QTpO;sf+%AxkfM%kJ6XXT9OHV8qU5KG5<A`&v^(g|G$I6tvlJo|;W{s+!_A
zh`6P~D4io)t_-xPc9lc46bEo2C^@W5KRfXkDY^>c8ddC94c!MfJbe@3=`hZ0y15m$
z=**As#vTPLICAqyM@46~ypAXwa9a}zld7Ha+ak_F*i~#Po62uok0x?m{hfAI+-uY<
z5Do0XV$Q;$vVADSB-(+c*<w7lHvsG1&q41J%p^}r_8}}jw*us{=(XznZM$pb6{H+Y
zO=r7Fo2g>%ETRGM3p!iKQw(Fcszt;+*5arbI3;f<ub@mKjF9cJSNR_eXaP5-uIIHY
z27Q@j^R&0MMP?20b&!mw@ePLe*!pdO?E&T5>VC7k+u3>kY%3D>CZ+j0DDECJ7i{rN
zFs(3ZvB-Vp<WDwSzpWx{e2dp_fy&$xlC~QSXA8d1vw6J=FJ%Wn#W5lUkVYQO+-d(m
zof5EOuFrgA|I?_BFP?+D2~~k_ZEaXi&vMO}(%y>E*3}-{tkXh&Lfg^ZxfkhqnfN!(
zi&qZD6A#DX(2p_}?Jafu;gHg?OwbqRZ^3vBylv`qzS-@RHFlaMF%%E3dCpm;hs-Vu
z7B>bs{M&1sYGvXR$w_Tv_EkWQzILVjOe%s#;f#WiSAt{p@dHVyV)xb^eW$%-_%YpV
z-e>eLgJGCr_JPkk{$&$L?so=O1r&erid_vN8AR6Ds+N0Nk}i=xwXGTyAU4`ox#Wmp
zaoag3N<wT!;CvP<)MZiIbLF<ORr4eP`NLIQ8)+<LFEOsYcvXKd!119`t6-NdmIY_8
zpgS0r9gb0ubf&==6_Fy4y8)7l;TKhxtvZ*QpLQCX>r$&=O2S*832$~38Z5%;$nhOi
zOTZ?YGS3MV@L95gvQ-`f`^gmNeI&Cb<P8W}7S8|_>~>fbEgMYUFHdNNmYMBln@%Qb
zih3r+c&FLH#{}`R_7d#1B#1mJFgW_1h`CoRZ=Fq<jiSNlE+!`T@y%tJRc8FbLyQY+
zr3sILsrTZD-E^%@(ohWmY<ykZla<l0(a59e*XND$)nFN++wRULP5&vu6DChj7$}}*
z@w~wmGbQsLbKROm>Zjc{dCGAW)L)%v%x*;%C>^#K4-KJNPS}=^h|3rl1k_wkfs2_$
zOlBSi$crzF^$*No1}Fad74yB#h1Ru*njKjTUENEJl8d~@kX_o;@H(QF8x~99mR{3k
z1<JT>epOMpsmO#{*RTb~ZR+X@`7{7t(<l7JX8TGPM!V?lY+fUoet--yOs1FAacG_!
zUZ@5wH8^Cy^4@paAHJ%;c-1Jo0V=c<0uo(lS4hXX>SdLCq`eVZOF2RXY5;^k^?WK-
zzac>rc}^f2;(MoXv9Zz(3QIW6gt1S0oAMA`|3BdKkpJT$|HniAkB9sp>*fC#<zdUp
zl76h5|D)4>*6kYkKenO5L;jCHcK#1a@!^u<o^pAxBp-C4qbIVE@eO$FT?|j=jVOO!
zDw1!=jC!e>>q3c+XQsGNUMaD3EwKjK<p5_ee++o?2Vz8%K!ycO;D=AXL0OFc*UW(r
zzTaH>nR9~%Wswq_*B6EzFo=*10+@C*wW!1Y!nXdGUm7lUDbgu0OP6>$jc<TEWuXt{
zJ?K|o{UU6j?>>0%*^l>pR3AP7YyHbFXruws!uz8Sb$EXnWIicfM2F3<z7nl5s}6HG
z(Cf)R-@l=RWY8px4?W70ASLArT|1*TeLSY%&oRG2P~8Y9)h~fS*kAd>;Ue*Y4mhvS
z?*P_w8V)InTbL2l1_I_>wd6Bz{Lv_+M4jL9J0HBt6AMaC`sgK$-g49%3(h-?DX<Nt
zXUT0?_%u($$;`(&X&}uf4?|Nx_y$49U;Z!1EWAD}SPsO)Ncf!LTF6kZFzM@ng2y^6
zCQ~?IL4nYKUkd}EB+h1I%ojLh%aIT0j;%+SlKMnTx8X}5akNt|JsGSBPAqyQl!w0T
zxPl!ZG)v&fs;Z!Lb5J_-x-$O%m2LJ-psHo^L9D7}6ABX1ej{IwGiCQoWK3*S>iE}w
zIK_BJa_KjnxAflG>p4Fngs!6a1|-5@6pnCb=MYekh)*1$G!{0q=p2AyJrMcT94T(0
zrx(1ZMYLXDTvnemYpP3j7X_OnFC>70RDq-HhejVU8eaaho8#5t2ah!-_c+ewMjutm
zMkU6?JK}AzHD}O9g9{{TA|?gqILfAXjJC2?aeFMFkp#`Q*ae!H2<#9d39L0ND7TFs
zbD0xlALDDHU8kS0cpB(n@a_<se8(uf>%57xK(q&B*%-J({x2{<-$Cp3X#hu*KlPzW
z^j(;ffvX6OCL(6A)_B+qX*^uLVD74UlW6dGP&qw}pRL3LAwaQ^+AhXBV}!;sAL+Kt
zjwJ$p5-3V~u_$sma_~F#cLFuvHQd9ofP;*_F-hXZoOY06iN<py4ep$G#gJa_no1~P
z8N>q?l49Op`qzP?$tkQTb1I++>-xHyq10+MnIH+E=V9DZ)2ZVOpYio#u@`9MA<Mb+
zt|lmE?VUsmek~1?OJis3C$6^f5Maze5ve+hg5p}Dp#0swjm*V^<IfFTJu=H(A$L*{
z(}bZTE)~7Y0)9d2Ov9^S8eYaRURA@nlLqpLrcif+;=Nlj_mV`<ey!HbA>Gem*HnN*
zX_!<c8rfavDBjQ8GgBMMHSFiTm`RoLMd&M?e&*nh8Ba;W0U|ThEDhV}@Pd+_*jHbr
zGzyW?!!`;GXcdtmj7pI~<pJ(xl`AJ}<^&@nS84kz3f1MJpw2aItZ6k%py_%<5NsLT
zcsIBP)KO~aSWVK%%a^-mai>%<{Jt{ZC(ha1mv4I%TW^u#VLM@`mqSc*iwGLplAtKV
zgB2nF{{QB!7vmHv8=>#iYSwH7^1L`eF1lXZCx1^7u{{cI90>;a7x(u(iSZ_xpRAg(
zn|kqR@s*-5!xR<vMZAbcXN&o?kbfZ0fZkUGu)02<Mp-471`8CO+h3vSG7CaeE3G2q
ztscBIZG3YSnEAKfd>DX;IO%svV;9L)<ZMI|3@-y7aU|yw@p@dVgIo<QI#eNH<qpqP
zCJnNnHDc9=bU87IuT5H|;<AV#m{0L1U9JDdM<K_b!%D)z*>hfjROCCt(9O9Dl8EK?
zqqCbWw;3bd=;3cKF_4zkX3i*?_%|%FREgIGv+l-yh*Q$TXQLsvjtTA&KgiPtn((%5
zK{$CP?emMG<+P|>h~`w<i#4*7=tYFvE3193^mpMF(#)%IGt~wCvt3&}=>H$|{}1~A
z2mSwg`v2K*He5mRzl#39)9yUmeP-zYJI@~SfB(_-|8ZKG3Otw%gFyb`QAIHk%*dXn
zo024GiMo~71All0hkYu(c=OR%Tpb*b#>f6d%@Iy&4i+&PMqhqmq2?+_>wSMZ5seNn
zW8g)vXj1rHHge-9XcVKQGrvSXmB)|b)CB;BNgSPTygfSh-u!Unoqd0Du=nzG0|O^+
zbT-thjidde{j#*(GF60rlYt5`AK9p@_`0=!i89L(s-|1}elm@nIyT7}<Wu6MDTF?;
zp0{M}&Uj4em8_|ex!!?!mBKfW9A-VC`<)97<o~MrYNq=gC;ssLyso~;mBX@PV;u#s
z(Olz$t15wkfoEVsH%J9MqyZzW18f^!Vb0(4&IMOGVdC>K=@}0rCS`Mp_aI7x*<dOp
z`oyX49Uh!Eob&F56fUuuR|pV4Y8@d0-Zh<<ZqxZbo=ouDkMR99u(SA`1h4Pncm#!;
z&fnlox82!xX<R*B@W=IXF8$doNL=T5F_?x!9u}W3#$Jln(D4i{#GKQ37zSBUg>>u>
z(Q3Tubh<nBZ@N1T5FXv__O2`a4ybR+<^~l<@i2A67&bflBf!MB<Se@v>cD9tKL&mh
z$Z53Q0=QBJcAsFY$PbZFWw;oDakhx$+|VF&j>^~(ZtzP0Y8DPtek~#MRjLlWus?Bu
zo6b(XgR`5vyF0C~+iN7S&+~{GidZ$<sk@5KOtxf6BVNc>hTqk;l7*0Rh4;5|S=40N
z{X8a&=*_<nS)uwyPkE6e83VpAivvMoc@4xnrFdCUy_kB-YWD3aFIBbDaK1nYh-85H
zQ!x-j%ERSXvu{JfRoWbpX`!5kE0jdW0u18?+D@Wja3L%;I$rmkv2?CxR&VclduZ_H
zy*K7ZPiuvA3_$4TfVeJsT&MNCeN$XjJ++lm1-wyj4Ey$YN>on84=AYq)aCmhA|!wg
zGHI%In<qGpbHHN>J17z9bKM!LeofWdjX%9&>e^1bZN40!Y~2T%QFw#Cq`XqQ@OBno
zm-RP86KlhWnO<B6$snfC%lxe^2Pa(&pkExDlD4Kb+Ti?6Ezjqt!^fuV(1B#Db*?vV
zXhooNCEcIdBv86hegRRc3(*H*K9VuaZBWT0Jg`+kZv(chI)H#`;rq2<BNrNT=Gj3b
zn!RXL4^CiOeTY1dsu^a3<L}oSUXH$10~Af$he+tEkA@ztH4=gI_93wLF08$qY_2vA
z{pETinwJm9Xx2I~z9aPk@rP>-B;VTh(TFxLicxV&H+&u`Jq?0Uz1=9P^3@$>_wUY<
zJ6l3fZ70f;&Q<Q1B;GaLGrfU;G8Ol1p&@SYe%L?WKfvL!+u&9~gRC<1wxPl4b1KJ{
zpQJ@yu{khyeSr`IPG}Sk10J)JLlK0du+-%lwYzH9z`c<TyatM<!(A?_d{i3Cz^!(3
zu8&tk4@-_f%euRa<$z<#x*~LAf3)`+(SuG^w^)kA9t%ZYHj#5y@u|}ZTAfV*G{3bk
zOV5)p1^$WZIC6k97r=xs#8T;tspEAVHIA#2Y_U(A;}EzFF@u4Rw<|HmAOfzRvbP~_
zN?%}%Xh8O3{;hTusyJt(g~3RThh>LUo=L*MCjx;5V+ME==4A^3=?i-RlyG$G;qvDm
zB-mbw&)YP<33!+hbe1+(o9Au7_R0b)FfTvWf`5CT7Swr(Ymqt)tjSt0%XTyp7fCK(
zDvEo|YohT?(5Lz>dQ{R#$ynKCf959FX}!4VtsaKJxz0zZr(i%uEGcowvk@?3b=99n
z5IVXi4Ee3sKfAdBi`Bvg3M!J08_zSwbmJXZ?S*;M%kPqMELH0o76;|fkdP=P9EGiL
z$@6Gw9Hc&)MtMI`VZU*qOC^dTiB_={RZb+p8>wH=>O)x&11$D6EFG&}s1R1dDp6|4
z@w8tUk><q2@;v_%eG4C(NinHBNPC0iV#7qw@rG5B#QR#bK60JE(r3dG%x^JgF^-s)
zQ;Ve<@T*m5Jr^cFFy)TWD!7+B8)|}gY6a>r4Ukl#e*f~-3--M(8rJyR{b}GQ>OiEu
z5ewA^3+PW#pLEB<j-`b@F-Hc^rfSk<i<4o%)pmRq>ih4}BI72gO{e7Q$Kj(Wid<e7
z9u1(SG&geF;Qgv`kNDw&Ld9hWU*P->(bD-QV(Uo~2D3*~!ruW`8}JawDOp3R4Fb)O
z!o;S-#2&ZERL<sw;IAhOGCDzSb~-hq1E8{jsZz>A3`t#!!Ir!5*dt{~y|Ck%xv)|G
zIF6@v)>?yf@bKFvZ*mo>Kq#RM<l5#E2E~E|JUl$6Vv*|TCKIRnxlmN%#EOIK&nbT_
z$F_c9zbtiTS55)nL2X3~SK<<J9N^6ifc|1n61AF$kazTgwO%qAL2E=Y6v9Ka@JkuJ
z!0VTdMj0eX<mY=IwTmboRk)q&XM@z#G|}*60tlX30_Uw5JmLg5d~pX`(1+88$~nQm
zZdt^D5;ioiteu%wel>|l3l=5_fM!YVAs0n2xpYyWe}d(Jg<lI+UmdEcC{9*dlExCj
zMv|B`Aj?DMwgJ%LMvKE3EwU$p^aHs6M*ozX+}PDk=bAi~k%zRy!j^q~-QB(3&aT+i
z_ukQK?dFf!$t47F$qrt+d#iVD$*x_xW2<-T$~)C;^ejOQ`DJ{=^0C4;!23dfk;TIp
z*ngH|-9f_`87qv();80F@hX@IQM-3gUos9on9Z}hy0Xa)AZGp|p7L07z--!^3Sg*r
zQn|X*E>4XBl2p2A#ZP~8%X@BtiLP3FLD}D;y&i7|$%j_5j<jlx+uS^7dS4}QYqXFp
zA|8`+jY`@^X!PtjPksT+Zd4(L4guK;a{AZK7jPiS={o;qYoX-iLLaj*S_JlC0=A0$
zHjd(YyG7o3*3kg1^Rq0GitVOTqrxOK*O(VrE-lG7B>|kUG+uq#=C}=9@9OikYfiOO
zghu13_nGYW3t`}~mn@MuA594(QL0eQ!$qZ~`~grEc0w*-Qq1!!!f0&JBH8Do(L;%*
zb*$;GdrR29Q-D>^dH-R<W$vK$0*K3WH!vD+Ec8a0R8Z8+hv|u7HmXK3-Z(0m9Sn^1
z(sQ@RA2v$P4IYz`5mFlgW!^Tns+=qVKi?)0r_8U5$$S3v#=lF^O9QPpBoz+u5;L?a
z4k_qhRD-2U7HM`9C)wqlMKS9ylyeqUGGf#o4!2IjY1rBq#{|Z36t_~AV9&g@bLD8b
zklAQysbC0=@``wrJnh>fTRjm@R{}FlhzmG<9A*lf;ElWbNr+kA^vOsBvBywMGjGCC
zH9?&VI|L`ebmm81vKWLp_L2nNXgb@KIJ@y3)oq<%UFQr2w%KzqQ55T{3{y=)Uo0eE
z@?#hTbDZC%?I`$bksLOv8vH-9Mf@{J>-@ZLAx2_|O+?JUVt(PyitQ|eBDB^qq5>(%
zAX_ZKgh3McSJv&rxD@ze1b0^~%lt_MzrvxiKZ>M(-JQrtI+sZ#Ztn3f<WytrVaG@r
z%h!M<FhbxrmRCdWnqn~{`gJ!s{`<GAzG~PdOJQ6yUM}SQ(3OA5D|D$~k;^(phHmA0
zUx5{^aUIzKKLKHh#@BSlKnFxfI2cMF|51jgo*H5UuXb>(zl-rUfDNm6!FRNVE_BMK
zi`mRi?o91#?UzV(aN-YJwaw0s^VF$XQ4QluvLc0=3O#F?=hF(*Of+%^z=YxmQ|o&!
z@j($bZ)>4f&^J>wGWRzqt*$cDvRiAGozt7egL)~sET@-}H`!#ymMpBm41<$R{kuG-
zuiM>zMw}ravuo*d6YGPpDB`_Q3j<&WW~lB&4Ft>v=#!-`uHsJHGOF>)T6wz)zM!k$
zNEbuYf{qtclmud20!2svOJr*4g^@$<4#}LT^6W~?FDx-+MJO+mX&6H*Tvo`k2HjfY
z8p?=)hZpf`?Nz<4<VeJ|;*v(rLyM~wGuQBFBrD9m{Nj$XxR5qJEh^ZBu?RUnDi%n8
zWu|7v<j{V+FHOf4^Sr0EE$V^7?NJQ8Vggv6t%T()vZWP<86T=%Ze<t9+NBSLD<veR
z$ocbNXt|zV(m8&xHZSxYd_9GR>d%_aGbK`Bx9Q{&1T9U>P}HJy0hosgqkHlIgmrBE
zyoX^Ed)v0_=sWL{-6@w>Y;`X5gsJBj>&W#y!wVmxmANsHU%zVYRVk;{GnSoUA<VSN
z7qldvFCPjEQ7)BoK*OqH!UL4z3G53VsNMl--3R`3L}*~I#b8TWKSVd@;(UemZ8xb^
z=E9G-kI`X+QEdg_#tEh%pd+zg%fp%GsogM7kbOF0@-knUy4%k=+#1#Oc)m2b?`Q$>
zG?(OO#fC%-Pttj@3%pv1ERYHxSRqK68ORh$A>(LLz9LIR%#uEUpk+yXr*)$zuhipa
zj*HiI*t<3BGr?(uVdCRCMP|Y{K}%&pZo+K8k%ZZGDyfUmI$ETmP0Hdd(rhb|?9)zB
z%c-_;-c!ZoViH}pso0eW>wnAEKjs2xUD@)7s|{e^))eD_w{6}&OT=N#RnAIjtbf{g
z+IBmeb>m@6WA0OHrg4$j+_1u}NRysykRIx*UoQRZ{V&k)hp#N@$vZi^>ZSq)dXS7&
zmh(850+(R>6K5JvW>ZSHEe>}P@Wq=>uRN00gkYfGRY1)-g%#0m=mCf*P-7O9wq}+f
zB?G*ke*xzSuoI^|27z<Ha2?Sec-9RwaV1%0<Rd+=(f8U#&)pe+aLp+xvTTdlv#~fl
zG9p**M8y&;VT?9ysPG;*8f@jYjM|b~lyxY`(G3|iQ<t)vHtq8qWSiPi*1K4H*i2*!
zB24eC9y9rfc~Ap6PI-lgFY3(Ngb1~J)>yauJb;a@YXwjYY1nO&4UR2sxaKJK{poNq
zMP1?Ffvd+`#MbK|9H7kcGAycoo;ZJh{nG5QIGC1WL7yUeZWQ_)$v%6|e*wB}{Fm&n
z0KmhDseut-2N4_|^Sj)jQaqUZXfSy2o9k?UtyTw7h!>Hwu?*P&p3^FB74wG93J}ZG
zEu-gCcK*v8oHlidHy*L&s^3Fhm5qrX!Gt$`$r;#!I;hQk)(ArR#$fJT%;>f(c#y<P
z7HSbH^9`v4cEb33xzi+%M8_`f(9YNpszf-RdfE^H2FZ9>4N|sFOdwYKKJj^uwdg=R
z{1Ff}z#dpVY$~^}O4p1XgvSJn8#JEAK85Ep1}Mj;59+lA!me{T=JUMtd0;S1(2~SA
z%{*`%(|LqOdknpBGeQH&O-Rxv8%tG2(85(#F8o?N^Hz7c({r~o{e$Y$!yLutmbG6`
zmtfjTrYo9n7TOSeL3snDQot)osZp>rLrdzB{94+OQl=1RNI1};Qlv1qMx{lKQoDuK
z*)}!Y8$DrGy-dwx8{<bQOHAJb_BZ4R&zu@yi91Zq%pN8I?V);|L~|{vYc3PBqRjrc
zeNo(+2oGkpfPw(*r?f6=7Za~4dJ#*!HN$>~GMRHr1g0M0LD6!&uCdna_B<Y4`Y?E=
zY`rKGkP2tJY9==!nlkqq7HgDS$~W;1IuFzQ*((Yf>{>5d@X8@KS|?`}FIMp!HD}lf
z50nS{katv`%gF#{(LJ_N?zF&`4LU`m6}P>Xm7QLj<)x}HI?VS33L9bxnoh4JcGCs$
zl4}|rD$6qneIBT^+K&Hz#BU{hf}JDg(pyWJRiv#;l8z!C5oVAholaGC6k5~s$4*9>
zC(V-VrYm663Rq=p?db~WRdzJ!$fm4MV|iN*L-C$d!lFkQ=Q-uDRq|+=u2KB6v!^-d
zvl@>o5^P#{QW25k=RCj%GN?RQ1;`}C{cExne^&=W?Y9~tdNu_eM{_-4l%{&XlMmv*
zk$JW8t}<H3efd{fhN`hOikz#$vsN&dYS4+*V?+F@c+bzv^4;Skx{pVhV8ZZHT_I+M
zZdK*`u%vRvR!&m(p+{c$I6hLlHsu*}Sy+&*93%aV)z1r~-U$dfW?apY&r7or1qEdk
z9$yO##bmtPB2cW2=2j)LPcZw*TFP@6Fx#_}qtgT0;Q_GhM%2v(w<*yarjr>Ok>>`M
z@=epJ;k#Oc0fhll9Z0T0v5*FJV`79~JiPQJQov$H4QUqRg9z8Zrm4Xn_2s-uL#yUz
z6b<h4t5Af#X1yayk7lJGSb4-MM?JV2T5Nxg;yf@)POO6L;Q&I_i3PS*N0A~gb_`<C
zH;6?pNv_Oi2?jjZxrqc6QuefPmK>E^Hk0~Ne|C;xxrVM-NRC2KA#+&E+O<&wbG<k^
zD?KROT#?pUwl5pvQ=`QtPD4wkzrxhb77tl84ouuxHWKT+SJEl4F2;fh5kV?Y{%JvF
zQb_*)?0su{+s4soKKFV09}qb$L74<EvSde3n75AYv_AFiwVc$cqQVdXO0Y?Q20)3d
zU!UJ~e#QAsXJ+<--NgbVWy>dRr8p7^>~(f_c4l@i#*v;T7|YU+$!7x^r))WHIkB&0
z*KGqyO$a@)U1`XhR@se5NlPt3ldID{R;r!UtbL7bJ)Qb$^$q=hHb9tbJOPyRdlT|6
zIEh@9EJSLiLZRo3()?Qya>)bmOci?PWFE)>fo0+`&kdj){`HlQim*&@q*Y%ddTbw+
zZu@X_+96Svnc;0p#pYLGHVN9*-b{8%z(wWr7*@hp)z>5+nZ(^nK(+qUEI!+~e}uZ*
z?(o0nKBb}fn2n(0_%g&Ec?!}S;Yse+M596P!06&7lz*0v)QEW&l@%sK&Wmi+@)2EI
z9cMOY@p(ETzSBq~g*K;8;pLg(LC&OoG-jh@UpTM4YA<&|);%5kixnaeBq>dRTg^aY
zQ$|Tx9zz32GfP0x6N@t=2f3(FhLX|SVvLF{ZpO<9-B(4nfzb4r1UzM9pT@~MQpkA}
z)Wjt_@783I(Dt%v8(BbE5HyM`iqM8bjZuh;!2=cYwYxp|XmfD9ABJ?V#WM@=!j>Rg
zB50!NaiYjLj4Zbd$1JNC8N<ZGDz3i}sQy9~-Sid}QI0u!SrA!v_X7yqbX|lQBgCSu
z@eSq&on`M`>j`IZT_cDJ2e!JSj@<qzTOJcx8FA(TZOg=YFIXT4KuL0VylO%i;q+~U
zNF^$#hUeJw%rf2iEY06n=U1Q7<ujZ&y8l5m$6K3HjI2hByYX514fw2-a_C5Fn-pBt
zTdztqK#ZBEG{Ss2bLZ*(7Yb(;%zdHll0}&bRIrS5SW->FWxH!rJB7Aob9vz*V!NtU
zN{2UUNEZg{psCF{Z>x=Z<-@o;&Wf~S@Np9kM$XjGlQAD-AS*9kI@GU9ZM0si4!UKh
zESFIO4Y$&7J`X(NMy&^!P_t#D+eXLNoE%JbYa((@$lKwA@qs<y98fL!Cc;ug35!ID
z;nXlGB#%-s2{5RI<Gwh0`D0s6yQk&^D;wiC-*}|+ar~`QCjFC%!-RJZ6EB<}6!!mz
z6bhGFACdI&ph*<pDR3?$H1EXpc#9eG4e2RmbyS^i>vET|ETyGQl4*>Dpvd1Zu+J<_
zi%Jl4a-U>RG3g=iE-%~XYz&72^PZ9~F(N!NzM($2TG%T<!JM@?^XXLauw>207~4Nx
zOhcKy$r*Ug&&-+WjPRT}F_o<iynzF}9>!~@JeeUzJ&%}EGjU;TRWMH?*<r7u!V#WF
zymRgNJ4zQBnhjzk_rqTAZ3;!gI4ZJueD0kAEEcB}%N-AwuHx6~ofk9hv=gD=c^z2y
z-rLJyTBOO|;Lbq(tTcN%O;3BX0I60l$We{bL(-V{9vvO^vdh`IH(y<a(O}>e;<YQI
z>{0?gW~f~wULzVYUL;Cv8Q|7)BQJei3vNkPZCv=trKd5^r`wQXZpMx?8g9%}7qS3i
zOuczHG>Cg?n98?lS4ZTs?En2gr-~6S-|%#1sOI!gN-YLoc_Ebn`N2nbbsrCS*QFX=
ztk~Y&9kxB#>27_E9#j#Ngj$1k-E`QtG-jiQ6wlwYal$)&HX!PT=|r2=-&FJC{Jk<I
zwR0E8*j6c%1&r{-|G)nS+WVWci-^Wn31{N;<f7e{6E;|`lF-}l@LB6?bOCnCw<*Ch
zTeH$Xy%5820%86>D_jnPp7Xc1brw|qT8~njJAf+x!2Ur_ka+?qP65&r03G-mPuUu0
zfHadTH<5z2)YnS8Y8#aVo=qi{i)0=pXVW0<-3f;W2j7P9FPub!@b0~PdxLw!gFEAJ
zJn8S>+xs-Z>(wtW9vM14jMRt0ilPpWKqzf#vxRz)v5@<e%Cp0BV+Yf^q@E%q7E;?v
z_FaU%r<?fkq183a*uW?0S(kZ3#OMy;2pz||4)`iJkNqhWngLr<ni!N}g(@8&zHF;m
z1q4C-wCYAVIsvvwJOZe6pETTx$xYVBydKj5{T`Be)P9FTJNN0&9fRf&wC!=n=v66B
z39k`<k{ALH?14hDn?xNPQj^OR8MH!kz4%GS^vv?Y+3){>)Nzxz5SBSYd0i7<{Fd;0
z28kT_kC2++2AX_<E-(%Ec{GdkpqLC_?K7RRh?|BhW+S1sS3wDpIzmZNJB$;43QePg
zxwAmQES>xu&?(MZW5>8!>)F6(MkK6`)9X1y^(`7u4E){181J*8T8qDiW9XTtaO11x
z-|+$dnod%TtOGSvIkT)07b1jowaQSf5=|yd6-x|3wY+J!!4na)BtJanE6gIv(bfw^
z1yf_f)wzm|uW0$ua%snqt%7ZOxuskYlK26_q$#f4%V_{a^(`(s)LnY0U;tUb^k=*!
zohwXSH3l%$MgRji6eF<rkIktA7^)*M#~_zPMsCBq!ZP(TZ?D6%*&CX<ay^AEo0+N3
zRz9%l?s8}@XcVH((B7%bx=?Mvp`hT$u%fGUXta!hnn<_uM@w3BBok+q<k^^gt-9v3
zik}=`Qwt0HGb;-n>P3|zPlzo~r|!=k7Fb-k*lBTyv)~f<sW!9?2q4T!iS<1r^Z<f(
z0$e2wD@+SR{qY1<&Sc?3fzk005IGUF)#euu6%!q+uE;t@7KlJ<z5Xb{Z`ri(2Scu?
zQZkMEEYmn1G-!=vjPDh&XC3EfWrXjC>l;zPrL<t3)FNS30XCsBkF91kU@ma;%HXwO
z%Tn6tjB!yURf9^X`P?XJ3fp?Jn9qtCDDZ56M7e{*!(aGmh@Xb=3E2hQa)N2#yb;=#
zwQ9|^;57Z;q#B-J9L>muu}E(}Q%fw;7kc=?a=o(i1nc2-(UgV!J1xV0g+lz0qsk&+
zLa{w|K<rrB3&OXHoR)`&<78sBCz&c|hFT}=XNK9z32!jL9BNi<4LXY)y)1Srw0x0c
zbjx%`>Jh2s)w?w$puRs;Bqf4d2J+rCZ=D=>hbOznlYwbFzQw9@gtV-<KUCIysIEC%
z!!>tWl5^#^#}%e=HXW%L0iBUhMbGFXK4U4|M>}lKnI7RV@R1n&NK9ZPCIKIY>ud?d
zD6dt}tgZo*GdJFGjjkJVUB+2V<R__>Ngo@8f?}!@k!HHqrxB&344uEF@WCB?`|Uv+
zC4Q;R&_w^46gnyfIhCHLNX(G%b#bD~b3}9`bzMhWty|8k6h?$<`<x88VPcqV5_3b6
z7^k`B=ZjN{ekAxc&P<?d_kbOl@b{0{8LEX8R%MFYP~#(}>M5S%sBsl~E-@Upk!w|6
zlV$SI{t}zb6qggWT`|2&raP+5rlq%1)<zpA9F(G#6w8VYkPYK)#0gD6(?k4To?|Km
zH+H@o3G8UY^j*EB@39q=4reTeUS`>?><xW1%+?mOm`4&`&Rqa<mrbGFa{++6mKQJs
zfZeWR#z+mL1X~&y6$fi8qTn_st)X_};K<tT+qPHORl5y9uvxyXJB>JcKTcR<!Y#LY
z5{p|IM9gQwGp}YJzsgqTnMH?=IP{?}Q%Qzu(`SPopd14fPLQVC<6mfkMkPW)5i-k!
z;RdM4aE)1Hm{A)AZjp~*W-}l$5t)aZbTe39#JhIS91>6!vt@Q#Y|%QKR#)P1yGNf)
zSE|_gUTzk<T9r;Qc5Xr2u$tn;iaQ*#_&mtz`oLvR+U-r8u9TMPw`{n5y~h%l#n@rA
zTxZx$l(kkUWJg3o#(FUt?!W^D-G64G^68wxdNUE%$@@Z=LEEc8msU$l^G?XXSkizV
zUFixaPE7Tl4c6OT`z))vB3yZ7W+}>-TbKucd`0&%OB5W7Lk!MA9}ktwilTi|Ue>hh
zTG8%mw9Ag7-LAX7V0E=veXyo@<005u?HYWkU4B=jXjED$1S>mF8F^r)+iX0VYT=CW
zYD$!ROA1%UOQAei#8akfT+9$v#%YpA;{`2Td4XaHgB5EkPtmP;jH9qq9TM&|w{X4_
zYh5|V&(q`Lgt&#Sw%kt7aq$Ed0Ieu~cqp#I`GYgG<@T{a2N8fe><$i#+aQf#xL~v0
zqGHL%*Bfnk1cvMi^ng(!pEKOWcA?c1hGp$*rihvowPN+%;c+t?ZhPWf-Yl?y&bjn8
z0S1}0-HH)7Ywpvnzzs8R!OLq=SUlC90lS6Dfchx>PPKg2qzJ+o5yEvg3$lncV;S#y
ziZ+IhPSez27a!vtNgYT!_>Ln6Ze(V}Mcc2Q@{Iup^|T_lP#JuIE>ofpoaw*__|Rb~
z+j8Bux4P0o*(;(nm=ZqTFvR7;SN78q_5<tbxPO9SVwS*n@&|(x8!uDMDQ#?;ab+99
zldt}be!QO7`_qeH`IjgNqwIS6=(C@?zy1F3p!|%_2mRq-?<;5dB>=EM)*HJ2>Ob>m
zICN%cxH=l#?H}y#!T$$d|94;A{QQUfoT5D@h){8urOWQX>wEoPo{f9jbN!nKOmbeV
zrqMS0|KQ-DcK`1W?tV46v%kN8cd)mA2c8cHckbNz%IV+i|7-tYkzPFt!Ammcp|b}(
zB}O5(KV(6h&4+OkUB+Q?jt}`WibH+K83rs+DaIbof(#`+*5UwKq}b4vepc3})94J-
zjZV^}P@dvfZxYPn=?b1;_PYsxNT1=^{Vc{SNA|3&SVYUhTL3Q?E#=U09yD^od3X$*
z7l#`B;gyGXI?mwWUywEUO`QLGd)4!QaC82Dk@Np<dH#Pp7!LOi4!q$(|6YG^!vx%%
z|Kj{7Ek>CzxVQcOzkBy?{r-o~aQ+|MJ%EG%F7OG%{%{EA|NhPXzmflL*8fKSyZQNz
z(SP5^3t8Xy=Bv*j{|yfA81mn6Z@7OW|9y#{TTXYk>x|P7GbKBVV$!{bPf!O;1|6QU
z^mwh9W(CDqe<7b@ao-Q7Qy&eakD1bRM=!$_$tS<Rp)NP)|IPXT8$AC({u}Ild++A_
zzd8Tcod3r6HEclR`9Ihnz`@Vuzro#oEH}K<zcai!|G&uj|8;MXXT8%n=|#zfGhY?w
zX|nhG-!<?BoC8P}v-t`wTa&qbhLJYqmo(=*1ezKYX*$gX^T6T`Fz>yNGhU6Sk*qZj
zk`Nk2i`h^<%*XM3<-tl7IXCJqMC19pX%u7$X=no;Uq|%wgr1dgaf~X=R9udV!U8d0
z-H^qu=s4NX&&O#NsWZfkV$=8(ke-sT;Yg>2tpIAQqYMkMPrQ4jL^_$|QE{3s#Tf1<
z<8$aPd=)K=YVmncU?MKbgW*?PswZi{yzs=$%<q4PX~XCvs<YiR$GYd<+cZw(mmD1h
zUEiO?)5!NR7D;pH`j+SQS{<ilzYJ<In~~rDZafY0+~5!S-Mh3VATthD5U9vS9AbXh
zAXz!*(G-&#2@4O3p^U_)%ZUxmlHw_eXb)wq5^1<_zhGW4!WqQ0?x+n$RK9@67&QiZ
z57__|4p<t~VZ0K&UnmsQ%~u$EpWAf=NwAvcan(WfO>0a+PTo*9n!q7oR*jWd`m8AA
zfuwnabrzrk(}R%jIJ|q)q6VNj1<Z=I4Hwq9p$!YIC!(#efv8p(EHW1r0$!edFy5-M
zUqqz7YM4DLDg$(vMdc&2Rjkh9JL%#7v{D505sm&@ZIp1&0X0DbN@1eJGWLyHuYg~2
zj`I`P3{;9zZlc>)t=ST0u4TB(TVxSeFbge9nI+6;!Z;%fPQ~Ep4GVvl`}&r8b0woD
zHU);Xb-va|uzS$JpO7w$flR=L50Q*5yHwqR&k`hW6e}nRoaX+X)6@LWISn$0q7B2+
zSGqv>zS+_+3Im2CRnKd~W%8ZMP7NNj(A%(qqXLhrEW|XNu}pGjYpyngCd}BULxdSt
zp%GIFG&IA80xiWhOAQ5D1U$?VD{Smc&nQrhwW;kn!`TglK`56LQTIWRO;aa}c$RHr
zGtS1od{qXdgxbCmp4@G*VrG^_%zcc|TJlIM3YPlpiI2fEWw1aq_mL=NL?uzIsKsWJ
zagts<c%;&IV>WWNJLUJh2xV%8F>9LQ&<o07%<0d3PQr~%nG~Rd%m7}EYpFN$d>$$E
zff)%R2~n+QFYypZ>)}buP9b=lND3U;Yqcv`U<Dr#Eit)~%Ud)J@>0B^e-{B^DvQ|C
za-U;Vn;i4#7PNyAh6OhX6u18F^s>!!3ZrGMVMmzc7>^HgPq@K!ejdowB2(=ILV&!B
zFuYO;(rPAK^cCjGY<UXm99_{bDdtQ9L&!%%i!-)J@6Ox@F_m}`S^~FXrT~Ez5^IiO
zR40{q`<1eYQM_nf?*W-n;`FO~gD)WNX;<4PZ6uROr7VVy2nwyRWvn&f&?{A|!$t&P
z)SGA-C*hV<$Cx97Q#?yg!4g<d4+^+Adv_MhT{<<E?RKXGyUa;_+ML;%Fb?yZ@3*a8
z4Qjg*i}tyCf=Mwm6;~uYH)!)o?FhH4SF?g_2$TnTsR43&Iogwxe1|c9$rNYQTCPbN
z&iEi>G_{iuJ(qC(>>b-v1!$)eYz`!PmT)8MF>@_;R=PWGA)H(5lYwy{WUWp!9<9gP
z>JR|npML~b!se@D^qQPuI%d}|0!O$_vCM!9=lWkp6R*`cw}yeRrTDDlFsbkp;_ON8
zTw;7jIJ~%Q4d)4&^651g@Zga4c_wo9r(G#E*r*h5i>E4w<E6GCtw`l8*=)%4(rpPI
zzUAC@n!4o0d64_EwACeN?GYt#I32^;06b4?E&WzYR@1vvT6G_!EZ}Es5n?c(0C7N$
zzdMT`xyS&YkjFyv!wSzM6Ter7GiiCf51f7bh*0wZ7Cx}U{s(}>M@*JyLq<#X_sv3V
zH}cchDz}p=M2&k(qPq<2f$X37$fL}OVDXeE+ssE_VTXK*K~oC68zicrtypRR(5;Ba
z^yR^`O0k4T&F4(N5@&{7ca#KcJm!p=9NMvk#N1RdP=>MDFov8c&eQ}EpKMqS#^zN5
zR&AJCqgFq$Vb5lqIc=S70{tr~x@U_d=b?|)Kx&+xhMp>_B0e-LI2dInZXOG-Q=e?3
z;ox(uLBUGq>4wxBwd#PlTx)x{HYQ~%VUTLlaVsIgDPjr+1ZU6OX7}w%2LjP;X{1Ui
zs?-mddWEf23h~51od#Bto&0*~kx_m9R4WZIc&UkU#AcCcI+zByZP938J4l5)`XeC3
z8-xqj9egX%@KFx8X*1yLtAxORoP^QR7Vy5d5q)73)F}yI=pZ6#mSe-9l2nY<t7a5q
zP$hj^ses=E70o(6MZA*I_(*){h|_}oxn7@I=29Dsfkz0&tIDy*&N}@YKW&;-*AJMF
zt!P-PI0dF)$a1C_6V#I8+>!!?U8Wh4<LT5>ZammSJn=BmwoPez-DlXkljHLoTUu@?
zaeC3Jtv|a2U%*bST17gQa(L>dAX|wLCnjrwfMzTMdrI1*ypk;XO?6McsT-+sXa#W|
zRaQ#ji-;U(QS>P6^Eom&5eyh0RIX%_%CsC5vR*lYw>c*<HK*;!e`(-qsGwtnA>SE*
zJT;~?@|UcXt;L^hn5{Q;ou_rFR<AIN236!wuW*=CxEO^eXgUm=kL{+IEr>=XvbZ}h
zrWB(}$g~_&6h?R+@cUsl^POhddPjlG*E<z8J`Yuy0kJ}h0WnzsT!iZNK)s<mYG7<E
z>=V1skx(_G42q;-6!MGhU3q`e5bY59kvX@lY?^p$VJD94>;zy^t8E`3NGw~7jq@Dy
zGIQCwEG0)4_Jf8^`FBUohmXc{bzADM(L5rZI~d}f#tCy|A{oEVVL5=Qrf0ons8Yd7
zly^=>lAv<rX#*!d1sTGMrHRYILnS&Yz}S=ZIqNHF#8~q@Y1{VoiKk&>7`3wClJhjJ
zO58=0;92IbERFzys{;iC+&DewS4$uimNh_!4@(+gO${K_NWcI%0cM&<OPK=i;ta(L
zmT-4|O}@t;hZs`|AM@d(^Bw-|c<gGP6jkk-%EZ(<&i^<Ji|CUA5NdVDTDjG2?e6W{
zU;Uc2n5oXB^&!4J_(;uucz5XSMf~$y`rH!nHQaX{=WV-rk+t9(|BUCx3T%ra)`e}i
z^*l(F3Xp9|O8tj}Xug9rGHs8_%~M-Y-}>gbs>-SBHyg>L_kKtcC5R`%S(4^OJQh|Y
zSv2u~c>2iy>yw9@8jVoWB#gAk0QU$FK)lm%p&gcm@2`vL^fVa1>pqF%Q%oih#v&O^
z*`Nzuw&jH~406Hq<FK(_iRV^kBlzPw_VlH^i-8Jk)KKa3Sb8+;L8LX4BK0d%6$n|+
zb|uV&s$^v<nghLrT9c<`udn*rs>diV_&2)Wx1(IIbxpVah~^Z+XyhEX09f%PhJ*jf
z^XF{0qnV(2F5oj*aQp-7Uw8G<LOO~HYAlDJkBSgK{w11XY7fHfyui@ntrL|ZyceV4
z;5~}oi($d2gw>k|1!-JFnC(L-8-USvl&>&qwH-0g##VSan1tHWLl~=^(1{Vv_Z<r<
z=i>E{(@OCdL0}hu-B!<Ex*K9;-%&BBe3nHK!R{1)CBel?ya8sQRcE;oU8K{6YQa?E
zkO=q~RFZa~s3A!RpfePraD!9lp`a%!H-#t@W0qGk63|W~flckr>^Ryci}Aixlo1{r
z^S%rV2iz$>CRfGys(6DcYI?{Mx^3Hq&@@V1S&x#<+X9)6Q-DIUh^map59yLk_!8yY
zm(hHRIl;)ZmC}HnVz#yr2rU|VdX?^jWaT1DfJ5L0yF0+s_&XhE&_2cxaNj!v0x#J`
z0#wVvT65bA<tQ^!B4sF98CL~a0xd94z@j3YfTkOab=hdCQOT4ET6j4h-q(;%rKT;Z
zeQ8=HOk;44YMvQ~+fh|({sqpODH+$L$#g|I^nPNMe-w4&Qn_z64nWEo1JPxq$E}PX
z2jk;NIyAm^&4slrmMf&n(#pwJ*Wg;K^^MxVikL$?0x+j+rluc;4Y28J5P@fyh}DZK
zx=9Hxf_O?%-3iwa3GB5o(quxJ6oC0X7Tfa}*kByoM??~zIO5icMT=1TLtL_wl7!H(
zebTl;M3jetJPwyyB^X-W!g9?JyIVMJ7?UDb8r0`-4f{Tk3kBs5*Uy@#uj5gOB`lFm
zKW39VVL3M14T6JQ^5Yi#dR%0F>vmpbE^DC8b7!+qysX1^6YdB~a*R`<WF?|~`<@5t
z`J!S)CRptdrBbrF;8sOsQv&B~coz9|Hg@0ynNG!LI1uW9JWp}mSOZSiaiM5;P&1R$
zIX{n1)1}foG(oWm^AUU!gBkHLZ75rje}|6=3v!VVH($+0En%7ivaWZx&Ne4v(S8X5
zTQD||)#2p7JdbnaDDd-S8uL$dE5CvD36{ig?Nn(T%=6Ys{f^-Sprf6Sr2$RpM30KO
zdpbYn#ZQDySh<47c#1dfQ-T@x3GMJ{pEPxg#j#^|5{x6(U5O<1-NE*=99qB{OV0l5
z>J3wA1{%!=_G3?Jp}y|Wse}Swb0ml!_nRBFye?r+h=g1ta+9#N@h@X9El5zpkg64o
zfgi8f-zl1o)Yc`LnK>qmELNimuR0+`!i40=LLB0bIK^}O`9@5qpq(YtSsr(FQ~~|j
zBc>N(VfSbAV&(GVqvQ13jflyqJYzI1)K4&-4ah1x*Xv^{Z5n<Dj^Q?aLmib&K2+bJ
zIiqvnXU6Ybw|w5=oz~9{Gt5O}<oU-z9<d80MWDA=;*JgE(||&t!vz9*fZ|{S6mC)T
zN_tzCoO6Ot8~HY+b5%{zYzbO7>NSH@^dXcY7?V4ceQw02Xxkve4twzbH~;v67JnbV
zX>C4Mgz(kJI@orst#2OsAO8B}<MG_(NuU2`<?psT@@xB~_3LIwV9V(_tT8@qvrT8?
zmXzKRcFnbJIS)BmQIcrm3LwG=Gc5p;XSK&)x6Q&BF;v}U+w0?*Djc&#tJ{_53A<W*
zCLYx9d4mjN>QplE$Gwjk1cABoA`iBeq|W20xg|XQgXKB4;;u#muitT&!1RykZ?Ht=
z{&>WXn=NJsELW@BZIPwzuz`fpH0V|q4-q^-YL=Aqgq+Rgl7On}>zFFO+h}2k$8Qav
zR<f<-GE&U{LpqgDtc>hB2$+1XZRQAaimjQuq6hZ3Z98wpHS#QY%WXDbdC;{Z9b;zQ
z+-CCXEy=zOEpDlr&kMWE|Jl30DAFhVQM<ja1?<k&!c%;)Q8O62(`9@#@WJErQHzF!
z=po{H0=l%eYy!JCn&`ILb^+!I830$`FvDP9<=Ia@GxuV2Ap}8k8n0)5uR=)>Ppypm
zRT^Vf?xetK+i{p1RiBeq2ShA=p2vrW-2rZ*dYfzYxcr(Q?b1k>o-`bwID!@C$&6e~
zlGCagB&Ka(rFfnU>uzM0@U35ey+ztp^lL{eDo#o2+U5rI<r!R4j#<59u^Q~$$go@<
z_uTUuKYOUH0~B&w4zl;YZKTZx=@p}Ny21D$JE}EF-`Fgjo2FY%dO(USU1<}yOC1Qr
z@pR(a2_yoKG#@;}GR9G`i68*86t<q*Jl$ehki2dMrFV%+c=Yx$M2WVw^mv-)k-M39
zn4~d6fn3s<Mpd%Vyek&9T*HLbPHM$~t4q;BaJ`}+-5@P1Ef4G^0NujnRk(n5Eg;OB
z!Gzt4P)~<V4hV82LVBY|l@Zi)b2q}+jWG6~BaBG`RSIGy-P)c*kwRG6$R`V7|8W9X
zRm_;iv$)9D4|xRz-Qz*jh4Brd!N~?)Jjf!RQ`HQjP_LJs#lYLG1Z5TG{M>PqU(#>Q
z;xZnY-P*ItI=HrJi?PY-wJ>TW*|l*0q~S(q%`sS`kNx~GX3nw<zRG_56c~Rl_^yTR
zl^Bc-wA@y{3J6XT;Mwj2W2R7@M$A`jJIFAuwD2(O(bwJ3=sQ6ndUSV42AG45NuO<T
zueL_jwZ6W^y-|jI4rK_mg5&0HG7NKCr)hECstkxl!2)C*wc<@q9~QS@uY=Y)XYgsz
zZ(pr{KAWrcKa~C7TO;p3)6642tC-)QFqxo5C(NmmxbsFKa0P{cts&Da7<B}w&#7n$
z9IO4X!Db??)+h~<Ihyy?oC#mR+Br{8(<uNR<o@V0M9CAOAdj$Fhc}CX=K`2xg7y^`
zc;5>rPYzg#E_87+;cgu)CT5`Vz1)~FPSD3q=IYEnpy!eE=)LC*@1n>JLKdBhlnXor
zAtjzdnIBCbe7x>t?k?zEFplSV1)4&)2_j681k+Ux;w(k|z_A?<Pc)Nc?B>tY^c1S8
zvk?^!o%_yY8ZFU&n~s=hGKt4A?Lr*+Wtu4|FKHY>;zUc9vzP@oEz<FMKvtXQt1wH?
zq7)Br=g4!OPlIrkpTCQfj`O4U!c!9!J3jz|qS4CVJP^b$gLh?)=rE1FG&}1J`re>F
zIOx6gg7D%pC^BFGhW)|7gMa<LmTnfN?K}(4qG`wZU)~R1$9Wl}k7G6qk^~>o+>RC@
zpl8Ry_rw0M@0BoB?OQ9rrI1{(lIRGQPFG1fql@VCAk8scW{~7$T8XQ9Mxa)0gsU)T
zM>!dPQy|QY><?ij=F%7uS8|b^28z34h<aL>p_9k61xqsQIWIvV3r?9q!Xr{oMHEKy
zkIvC5FQOUHf=N`YfY11LU!h`ifa}5Cy}Q>I%~1v)piq0=O#%>TM1FnzM#gDEu{u+J
z9_|%WGUu?MAvwRw5MhSVE8rroCq@Yd^gcyDg(-`Idb)DnoCevpt^tfjjic$*8pKg0
zHOf%9Dh=OSLq*{0O63~5rd!=g2SQtrzzuv6K=zMwVel-<d*xGg{{RW87F8arehQ2)
zQVWV_s(Jd-PqQ$}m|Oy1U0E7^*23tlZG*gjYM6{F;H|tHrx*U;{ki|1b%N@R9IbA+
zf3<eT;YL68728f_@TVyVq5$`~9~Ufhw_!J44zHsPh{n^QF&Xyg&>Q!djuYJn*Yppx
z89(E><<HZYUHGA8xv~_;lh&#K0p>#1t;ejJI(g)A5~I~U<{m_pa&Z!E?wyXd==>gu
z-qFn3wTP}mgEcWCjQ8+NAtNOQ+CisW|Jxw*Imb3Vg96p<8>_jV;5a82*{FP2XH1N7
z#?hD<{ax_0M52ypvsQ>Ur*X_HoES)n&35#)wQKQI1QUR%V)sT(Rn{Mei8K|?|BO9h
zfkrvix6m1lj{S`9Sp-*gM8-HQm)p{>76S@!u8d4TIg*o?Z7?0<AsI1{3JkQb^xQDH
zR)=s{Z_S~SS0aRxcNCFCnZK{(sA}|6KTzF~LSpT^5}vMr4~N0h<GO)W<E3aCjOoNj
z9!4Y0{Mm-x)-?_7Js`>gZHp#}?E5vEEAp~G+_1rQG;2fXD4}}kFumYdXfLfH>M=IX
zcVfPnx%h|Non1N5<H6xcy*fSlr@TbFJdf0RcmdgO*Ip4ZRS=>(;+ig}L@)%}hPZ=7
zK<_#5Jamr%doieMGg-*UCtmH99eE<*xNE(T$_9zyf`6xrv$K`g?2T|m>bjOtqD_-n
zr+z&WJjXB&ml*3|Tt;r5A+rZ7JHu1#658&L&r^Wo277po7B=~(f68|`?e=2=>B+T%
z$iOv$v|=RRJALlSKA@#7kETYrFHXfoL`%obGr^y~s}37vnylvHa0T6!p=`uh*56Da
zoZgoZ`n&YcX>V}(`FG^?{(k&^gUe2sw=#9{5s~fh8&;RgG0)P_RoW|8;d{i`^-axC
z^+5{&Z6fqxy2q@ClMTiKo7zg&V83|%!_D;B_1o{gvi-abrnAAtkk)B4`Q^YlX}7m&
zvNy6p<TFBo3N>Gxr~%|(J%4ro$y(M~-2x!G8$1l(OPkRMv6BIvSxYDDanXW@C+l|W
zr=nhkn>}X3-2&$}#JA6kYoCT^>!;m_U|a`_(0}pZzSL|SN}L1HSMcIBdSSkJl4A_x
znU7IqW>ImThTb&2j53#wk=A@1G+Xj)&o(29H?^2V%c4}<H5vgCtN)!Y?sNxh5CYr3
z0+T*$<d#j4mU;sQRI&)21bIP*QmAcM!+(7i4uAca1wgjt_P5-Cl`gj6|A!6yzd~`(
zQ(*3xB%97==*pcYUE-o?k(=w!g8UuEK=?rrPV#}mHImrU_JCt|y94-}tlV19PDNA0
z+mtP6?bfxqcAm<~qU>}yD;~Xf26w&n4Axaom-2*hacXVnj|#k$gJuH)nsEC=mqCe|
z(!jJ`bImfce&f&8g2$qV+v=$hriHt?{Y-gyFdGC7$&YD{C+*Ff+o0%L8C$z&4D#Hr
z`B8Bw*RRROW8imq<|Z`PbB0#4_fI@J5`SFkFzr!ve-V^G3CU_-g!?LKgih@Q#a<ha
z8PURhPaZmX4b@8!azwtU+M4hIusIRgc;nG>KBbg%zSy+>X38kJLeWwv<H~{7yN-T>
zyt$#$vzdwBNaHzf&<$R%xT$I$=a#Fo&nB840S?51sk4g~C!5H{g+a8(icWJKDL_p8
z#V;5jF%sT}(oplxL_&?D+Jwl(lX4K-h^DY8GeBJYGh_lm=d(+qx9A#waCeIlZ7L7T
z&8Vy#(vbGy=H~(?ej+3~j!&FjK~><(HcjkGqIDeeRomSiZr%c$H5`_~I$Tv0h|eEB
zKXhI}O=q}i9c7_6tzv_3chKH=@x}CpIK7fVc#ps~VYYpnD&WzdW2<Il6T?lQZlk-X
zw<x$+$=2}JZPx52EuvWy1<GZCPf5kj?=B_hNqgtsr;ZMUX-*$DQ1x%6R8hq3+S{^q
z$Iw;TW<mSwME*v?|0zVjLhwHmDZdICzY+=GkbFyGyuRqCkYP#vtaKQ@{JJ?MbuDzb
zz1e2ml$1(PUkxgo6Hg1-6mnISc5VZt-768z&B>**A*J^1YGm?rk;wWHttX8NYbiFg
zWH^c3tfh=<J>~Is;er}QQL?>P9g3)J#zrI$TIr5JYPYNi=9Di!sAdRI&54ddMl^<~
z7uF1nL}~S*9trb4U;tY5F(jz5RM4h0of>9Kayv2)T~`}HC9)+32<kW&bWN-cH9qlR
z{xf?(IH2M0grlf2n#1+01Er4NAsmjGDr=YSl=X40F<hY*jtQ6-mDue1ac#n8L&K?x
z5Tov{61~v+3>{F+(eV=2(BKvWZL(c#G;?O^m`tYbwV%ea#OFw2{F}+`JE4K2(L3`4
za4WWV0B$08<>Hp^<Mqp2wdGONCRNMI+MO++YC(}|2Z9BCzX+0|>Z<?V-)Cqg-q)kn
z`w{R!`wEKb?R<N8yQaRqyWbGW{tM;=wyN8%nikla%G{b#5kp+ZXI>D7K4z|TkCDmj
zIOyPM<7sg^$Txmq*3XB|@}s>FvQZz1(LZDq{V~^+vao^hyrVsDIOGAZF=D>l<(j={
z`=jk}4>9h;JoFw0Merl)sN4^`yEOKW!#)f@+GaQ5o{=Lqy^gaQDc!z<Eiu{wQ0^eg
zi~`ttSz3(x4L(ZbBR31?qt?S9dpB%Bi&2)17@l?k?d`r&N~X@1#oD>!d~@`VSKm}!
zy_&*I=<wF5g$mGy&?_>>s?v~1jVj>;2h=*JEhPsl`5k(7VKo``L!N^<qGRU(H0*-!
zb^1sbrm&d@`?ax`HiM?ErkQ%xmO>PLz$UFDo09`3wP1VuquGPDOTVt)rHUI_8Ca*|
zG`f^=2G2^wxqDNwEOZYi!U9GoTh8s;HRQH((&?<noz9%wPRkqT7Y#mp3Z8kdIu4vp
z=y{H)&}pc39t9yQ!xD2yRHJ<MGm1PkYPG&V^425O<;oZ9`uP&L3c+X-mpH3CoSFev
zlSi&S`CS&B`DlX)$X`wS+VA)E{CDdz-r>}FYt^K8NY0QCn?{25q31b8W(b`$Q5qM%
zR7H9jWMPisf^#&d7)N+N(>qTuyJ6a0rHd|2qYFKCdB@#!f-kZzOf5ssFHO4CQ8!My
z^Hp&U-zUMr;O_pnlgYuoeFX9s$eTB>q_SJ&kxVQ?v#92Qg$4KmIrY{Lq?)kq8ndka
zE2mNAiFNgaWFq`Z4n!e6!`Cz2J!yqZ;T3b1u6%SNg2x+X39}_Bz2!WgM+vaYKoKP-
z7%?ff%&l@=NmOn*KgR5y7-@n_;olgY!CAJg$;c##%Bb;rr?LP-P=2AX?`A<+$X);l
zP8<Rq%0(I9=_<`v4-GC;zm)YZat{sQDIsNYR{EhhHm~PfjPSAy_IbXT%~_fZpbY3M
z2z8043F$f=Lm5_M+`#cUs0geUE)=MWMO3X9>f47+p7)a-%AtN}OdLgzk2^IST|N#9
zAX_z;xl4iBsM|;Gay62RA!4M9klbbP#n!tz{!zwlu)N2hKP==APrIOX-6q2W<C(n^
z3MG`gtC5H#guiw{yyXFf@Yn9Z8^{SzXz@li9T@XHOKgb-78zjP98IAm7pbnL91djn
zQ{y8bCQ6P@_u-cV@GpSs0E$sUi>BxXP2Ajd1vHSm8Aud5-y%kr1mXJj@33+IuHAGK
zK3dXC>{x1xjkg??l<dm;t7&gl_7x+cd_(as)#k3Yi`Y)7#h%6;r0gDyB42&EcIVXZ
zn$5+ZVa;w~=!aWHcE5J3tluV|B;aV<<1fGfq-Vf|BAzFxXhE;caLrzUdG0kLI#u`Q
zuxs)(p^$*0xgMHuE?xPc-9_XozuIsL4X?BoJI-mG<fCDKJ${STe~>Pcq5*eW&n%N~
zt>3?E0k~rUKJN<%ihBgiYn-Lszs1vcZKsHsj+8|R<Spp6Wj?X9abSWSNCDdX<wa01
zwisGgU|ZFlIN#xYxXo-mNRn4*4L@>rUa5lt#yFaxqyn-Z58Dwqe-0KoC3xf<sJF9`
zQqAZikx(h=FHoBmv8?CmG&GwHu<Cu-C@D}8Y{}&<b}*3Ve1wdfo5OO9_WEsI+p{((
z5~+z6^Et?7(`Zt`oS^VzDi$C1E0HI6Cj%@CJM$nM^}P;#L1vtW(D#NWK@XNpYfK4q
z+@P?k#y*uY?bT2wL|h*c@wZ8s{=56%ECwmx4I9&@dZNU(1z-l{;VbtAvW5K<7UpLJ
z)onzA#htg&m)bX+upnv!enK&jE9K?QPCcpGVacPL%NkTjg;pIA+VtfAWTc}8tT^}I
z|BV-x+hN@`BHgv<@<`T4){LZJe=j8IOc!)3stnXFX{xXFaa8p8{F>6H6dM`c=^{Lj
ze7X0q`)9xh^$oM3d>Xuq91!zadUU0K?t}`wSw2T4c6Yx6(@=Dls$#?_i`QfOdeO4*
z>yGn_xFOHa2}_s2;<h#D_<z?d>hu|Qj2Wv;1wm@-+J>1#xO@sjD{YsI3Wqk0myfgI
zR&hM=^=lGHSz|zdeo?=++ukd4|1FaE!k=>mmGnEHr_7rS-sG;*UE3A`=J}!kqS0|E
zWN-cuqw6SXt<~^4iY13d^kO;Q(f=*cpovBq>KG`jpj-CVwkAg4t>s1d>5Z|S2hHW%
zw`ITjdz=u+9%I%(RsmsFR&B6(DSekP5h^vrBX{DB59L7Z)`e~7K7!F1caXJ&OFCxD
zd2&oG*ZG<>dGeVJ)Hu5wJ@$Xr5pPVS!=LA|s|~cHAZk+%n<yjA8Zdbj4UPqRJq>C+
zG*^UaKnOUFNLN9wp>z2NJNyO*i`B=9G%=WOy%W+E;)CWY(6&|szSmEbQR;~4qbp`%
z6{MwQ<r-v`Nq&*=!vwDrrc<dwCN%!E+)3<5s<bJ&^xnf(d(9YgW%l~O1v*u!m!#Y(
znQMSIr_@GieF&z~zhWS@_mJ^LWJWCx)wKpwz`6KPx>m%MG8h?r07v;&#9Q6dE(CzV
zr|ao_kzvLf7mv0M0|KBaKs*j3ic~H?HrPRs4a7wZ1>;;->?COt8Qr2-!g$1%n6{;a
zkbS>d`h#yQ?^osfq3!)Y#rug1Ep(t%+c2q+jAmPZR%I}6IU?LBa&abK`J1`&kQ^`@
zp~FL>R&b|=1KxW`9B}D0_$kaUV`wo~3h%VN#ctSQD+pHe-(WNRYwUz09my=K(TrXj
z;nsH8^_1V~ebM;5JfspAr^rf0N>g^*2Ua2Hzyqvuip$jDb)7{n=nJ4-m)SbtwJbAA
z*z#7L28D@;tpPS=|Dtaf%eGNKCyR?*4VCp7nlb>g+4a6|kCdCzq-OML8Eqx>CB6Yx
zsC=O!b?NjfBUF^8@_9o{%<>-H78FzGwaQL3gBL@Gs>nFGc2Qp&96~VodOHLKOE!6g
zf#q>&M&6+zu+XM)qK&6k*s=;*vYA?`>LSS2)T*&|M6Yj=J46i96bqY$9cp4@VP`BN
zbZoF3(s_)!Xo%7GlmTt*_iJO(uuhKqCloucDFX&ygk#S5%8FNt<fh1E%7h!kt86he
z(GG);2dfcf7{I}=1JuuiCK!xjECUcS_prMD0I@`OmEcC>H9-vd5hsi*M9k~PydJPr
zC7`Yr{DH+=C|lVwWCLw?#fbxBE5w*dERTGkNl6ZKn7Lvt+I6|h!(4X+$=_#%2m!(Y
zdnKXHh$Dfk*WSvF%AB8=$yO8LGpIACwJy2kKrmFd<&h(tq3CP16gz&Te-(T)^quE;
zrf{RMkl6#`?J_V1N}OnDdjl6KWmHWLQtIG0c8?N6XjrNmhbVPa7Ts7`QTjnCE4tyV
z8I;#eny<yibLRaY*5X^kjQNNx0i9(vi{IBR=Z64oKE&o_A@g~BvpAO_&TPC|N6t|R
zxNY|s<NVcO4N_bD!sIel9l~rFZ)riyyUC-BE8?;ka})qWk0~ge0&U!kZm@*A^K0@w
z{($Jn_)s*=|LA;&KRX@+pUb+dT;q!d9#bqC=YJe_GSjD{BJ_aWzlaI?JJzbLZfkdM
z-~Q^?gj=;wT4akvjedA{=<P-P^IQ7d683X0AnDt6?&hj}9XZdKtF<kSHrJ}Hv25iu
z@APf<=oYlQR?$nf%QcPB8_m!EOczBcgTbZ>F{=l^hbn@1QNk<+ELlw)x*6IwH^Np{
z*!p^jafZ^4xYk0NvYj?tKQ~>sX}5kxvC>-ejE_gub89;2ST2`P{lz$;QHw!c735Wq
z2ICI!dQeBTnkLjy5vOBln_IP*BrLdd%w>bV2V7?*tfJ523qYtun)Pe&#SAL=zJe@Q
z%2e`s%>9qVPDsUVKvddGqe)4Pt-qnBZ%$|O*Jy>WQPF{6*swF%N2LS41Y2%9<$lek
zuz1M$J4_LRDZ-*0I&0WnH5b|2xE^feQefMH<`&(=?V!9y)bFiJsBCn3qj(bVzc#X8
zHhR5VmNc{E&_IW9vz85YjJZ@A;dZ=;^94^Q6=X5m{K3kX?na_0-SZXa0oti3aS1TZ
z2PUO$mwp;oQOGsBxipwmsQlm=PnB3fjiu@sQh6(_aYYIBa<Rs{M87!u7j*)$`Pl!4
z9YjnsRpFM#_9zs7Lbl#7<%_Cit6l}BUAUSpwlIyZve{9URxUB|yI~!J96T9Zi$>u<
z;n|9$5%!>OiULrabr6MQaBB=x+>%p=cy)?-S))r73Qp{HhuqS<IyY&%eU?r`o;;JM
zkhGYR@kF^$q@#{eHuBEqb^1VNT_JP)@a-bUdA;<RekmkF{FhIU9+?%fI19m0@&y$B
zGQkX^O3o@rMMbj}mkFff=^_*t2IDlzivZ*#49#Oqlcmk{DZl}UrY9Du7_CQ0m0D+O
zbKE-h2dxuA(*|)_>B<Y{b8={g^XNjP-_}`14(&9{xe>ooy<N}15~;Jg)e#{^ABSw{
z=i?N%yA}1-;`hV1RAb<D9T$JS9JWDjFBvD)2)CV{Ek=`8NShAUF*fEbkGNV$V2MvA
zB~}dPB}P96aN=9>KS%%xQ23j)5YbS^$1XsUoP7K>!D%s^{O099hTq^@nkc<)ymecy
zMlOrL(f97{!&Y9d#%Y>``KW~#sA#23>{H7+Dq$n~TpW~yNIMPYr)bAeTgbeS{7m+r
zWpY)tc3P8LRdpWMZ!?(%2)Y^Z4YO%J)%wfL-?HVX$%OecV4P!-mXrr886BVC7gFZz
zES=-o$<F@=K8;4k^;<g6cBMR!B3t+&4-%6xF{?Hwq!CG`BGw$AcxvkitZlMPI@T<c
z9NM4P_j;Y|^P91I5isYu&v|Nvsg^vPzT@rFY7C;63r8Am_%r;4X{lS7>=({^Ezt#Q
zudGB7O<KrSv~J_C6o2iu+FLN4GIcyd>Wdr?T=Sskr!qx<;;d(>?_;NPbI7Mgj^E0T
zic^T$FKTtrp0I|4wMz?@()LjF%bNNvvFSL^VL0qiw0FmSwVFqd@DN4Y*DzYr=k}U-
zN;ae8SPdq+()bHoa}PKsVO)Ec(8{8&eJW6gN-;#Ljb0-H?=WG2Ma^d%DD~A55p`Ij
zO$Rv75B<$A{)a#0AOG?6o7NxP7?Z`dH|Si5E1xs~%^h-!p?-jU@KGDCZLnsvAbrr?
zf%omfu7=EPG8<^a^_FgbLQ5Lp8e$ijl0;Qx*nn;1NK<7RH{gnCM!*3Pvb$Xj3f69%
z;Hs|HRhU%Cn{LjS-Xq3za7Dh<?tv@fKhZ&Kd0&al)WE6UaHRDPbhn(p$8#Ru<P^^G
z2xX;MRa3Ep?Vs`7XC-pHGvl^ZI0Y8cq~lng5|qCBt^IgCulJ`H!SXK@;Oct%=(C@?
zzy1E+o$@n2AM}TV;aAS`O8@|b&>(~EzxvPo8QybdMLdf}gS-822gBjs!GSkCxZA(C
zfA4o+-TeI9|C|P^C@1=qrAy#V`d+`6XXBpsyf<F~&-AYM3H$%<-MjVsA3hK6>>u1c
z7!K~-J%Igxus0Zd<?P?=|Njj7KRl??|GgXf|3&Eku%!P7`+M;Jf!DvG{5SO9M*n@_
zzT?98z4>a}{eN(9P`m$u+#~(p-@iN9+rI<Phy6Q9|NA%l|Jr|UIo;i^GfwfI?$B8j
zlkPoyf>IG#nt5#Lop?sJjW1SD;2Sq5lDGdp8cO<jRea1urJZUyuCzV*{S9%sIsb3Y
z|KH;I5Axq&@7sGf=l{+5Z$JOppDj=Sb?5)xyZeKEF8>Yg?qj(j?En3n^Z$#S|6lhO
zdDc6PlU|fuI4q|4-tT``%NNk~>jOCNb<0?~&n!c9<h&r~BG7zoJPq<3?S>d9As=BO
z{w&Sq$NYRz#8dh8e1E=@Uy{Xajxp|>WG<f}4U=E~8P8cv0segsgM;GmZyw5rbOk?`
z*T8(FlSMKv(sU}vImhU7K{=A*eK^A*-=gA1Bcc)-(2mojh?WI!S1o8Q#r{Jx1~eXs
z6Q5o6rReKAixc)YSi)a%-zP7^dBFO3;-K>pE=lQh3gw+?969_xiZ?~8#aa{kZ`0Pm
zv@^h3(t0~1JA+a!j!8}^rXfXu4bFl%$qRN_$qMmqDZYGJ?Fo7vI;R-sPX=vN>d^Si
zIzLfJL0cVGMzbi;W&mTtWN(M)EvH;$V~868I~{I876~i2d>)PCNsJ-k)XwA*=y%6h
zSxJ11YsOwyl7MGHoJY?6qR8UYMZvC$-PS8<oX-c8pQnpy2#e?h&fuXi?l|RROj3E{
z6O&83mxS>Qy>ZRJl%Kmh{E;2=HU6G2TxFIN?Mdm9d`<}y7v-AfPMDJ21-e5)_jCj?
zpm%U3R?yAad4XfgVWR-T$ru^hDvunxWR!z*`Os*LZ4VaPZByU$;ybam*b8OovN=Q(
z_Ly6ivo~zp5+Sn)M{atG!8|HE_C8LJJTe&ZJViI-OkmZk^Eh|LFnsi%%Pu0KTmcf}
z$hpkWYBBRzA^r_<H@##*8=_@2UXT}_Fg=2uc^+qB7b)4w$<s*zJx9GfouZpRqc#l5
z$LT!6P4xpF{^Z&4GF?jJ8;LeG6askzmPm$7g_IqJHl-`pH1?y+K{F6~(_#!p>kXe?
zwn%7FstHwfCS>lE@QI^sDi2c;03JtEGS-^mN|RBR;nTvLsPM2*bI=||K$b&p>cmDy
z=K|i|zHu(k<MBBQu^NTU%8RYy7)Akj38Q8sP=cH|qCQL>BO0SO5&AO}(UeSQ0ze)-
zd0`E*crHR)(2c7B%xCya<Rjrm{&b<a@^Knwao#MwqUI0^i1KfMje7$?D==eFis=N!
z*oD2kI_a`Oxp>b~vIz*7sZgQ}Yp(Rk@<jwP45?^RF<=4?jJ9(;i>K2-ptb}Uz0ZLB
zM*O0>O!-cbOB^FPrSvU&V|6ZiLt|o_7`IhedfRZby~tu&hjqBU9(Bje0U6cg&&ke-
zuLh3Z53;kWgU|gijF|-;G<3FLj-b*-p@dv@cYOHxhPO_5?F+m%R{TgqdbX@=ZveKI
zYb&;zTu?ziVJZqHRnPiY()W-`u@2BvDhdThxTf_Yf;B;llA0dwJF>*~9dYc@LP`k~
zLmE0Kr>ZyQJm0WofAihz`1>~w*)>qGWT1@#T2xp|2PqH`)+bJ2JdEP7R`WLwod+G(
za11>QBY0_3Z)wcqfojA?jVSi9IE$GvJ<QlE=F>OOB#ez4_m7K(YK~cMX};x37<O31
zfF<pTj`N1?2yD@#LIS^n#d0kQh-`#VHlD_~dx(lUNfd>fAR3^gPnMYm`aY897rlj}
zdzCH#lKD9<I?|_7=BV#PrSPzKQz(@=!lGkU-CJm7`liE4`#8<AD4(aujW9%0Of=D=
zWSkVvWtzRi^(h;be6qkSo-_1N!b+Q{FbFlNr)o*;plrYaTb*o`C~HI5xOd%lneDSh
zt}LR+B&W9z*aPgz+Vjcb<p`nNn&YR<4hc@?bKPe8O$DYV4IhSGCYcBd&I3MW=_T8J
zdV_+hcbv#O^PHtahFWn#zOp!H5GG}&ql|}?&jR4Em`A9J5O8dvc^vIGwq`OahqjV}
zQUE@_Q5Wb9J4w|60w73)asX+g3Vt*mtAf<&^MylDxWSSf>T^NV6ovkF9??cA*N$;u
z`Wb@3DYz@<Mo3xNY{*IrQj_@O`64%DyjLozbg+O8E9D^AP)o*_7bx4a9}}BX0O&^H
z<t~DFN^B+$On@~Cax4G!%G}*haFIoL?9=AvLDa;F&yOy)jHnv`G_Y|aYJj$JI$g{X
z#-SvU_-e&Mns(V<W&WP?JOPH(py4DAV0MDX$pkPm8As-<nbXB5%w6_XdT(59tYv*h
z3&hyVhANX7`&l&EXz?U9mfbx?^-cFdkWJxiig?2;z8o%y8X5+5P$2NhI2~>2eDS!@
z*4ud_*7=RL(3+nBEHND5*pDofGpR*Ehc|f<%GV@1QD;m}DVtkxj*5A;Fp7f_eP$Hh
zL67}cM*9=0k$IXIYE-{c8I~8(oFg@6A;r}yHxC7PNksZYak88;XTB=etg`7GPJ*dw
z+)^C2D(o78i~*Epaxc*r>rN^gv@`14Z6q;oxsNuVy@K#-0AcXMp9%;w`eBp6!uQzL
zOS@)!$z7$AiK}ieIbr48u<b7i7u)Wt2Z2=VY`7JaD3rIHXA!wda(y{I`;($SXVLJG
zGhrS#sEsu=Ph8RfK8&&SBS-b3B1VKWiX-y4QeA}v(`6J;{O-6}2=}y;dZ3Xwc01Gq
zk6ywR73`bE*jtLuBa8{9iMpc}G*wD&wN2<^Chu1WHT<iPKd?<p6BU+|{03FVO?|*X
zTho-ra<Sw&obBO5k;iE5gp7k$)GJRe{zT9b1f6YC8kr&ZDmxRttLQ>bPo++8RIaO;
zhlBYA;2$$qkEW*>>5FNU75b}@KoH}7sZkk~C}NaRloulfPpqY+ieY4+i&0w2!Xt@6
zMhPj&jwDhUB{U2`XBqW-C94VXyCaSw##^?*g-e>U-3{*shL0L{8VJE~SVicL>BJ#X
zzp@#WqV3A~%V4+^>7^Vf_XIS7BU$HBiRX1?_yIXGcZ6A9;ps-|J~7I{hKRBYc4G+z
z#r|k0B?qAIC2RluxEiUO>{=b5vnU9CWwe|ikAV#swZ<^c)B0dhFQIPUn`e<^0hNY&
zDr<o!51v^HPlZjhu#=v7F|>^zGY-_`!OaT<WmKdPf?>l3_M8Vy`5wh2!SdM}D9tkt
zqln*PYtq`z85y_QkC{g3I12+WC_P%Kflz=#uj`$dF;&p>hll+3l*IZ={Dn6lY<fKz
zU=bN|%$pX8+nzbaS7g^^03PhACvi{nW=800fnFJm4l7yh;yk8U9b=xDN5%gRack`A
z43!7f5;k{K+h&`Go&=|dcjP`xH&<!TOobOuOu_+x!Ye~dWGhL?IDr=4+!XE*zMMtH
z_%d|c9iz$i#&e)OH_k6y*$sdQ206n>tfN+^WgCApgjL9F3eC#1jS>j5i`hqXD~<GG
z=fyO_JqSnB1zvj^WPojbx{Tae?_|28@b$oyJKBf?ahZsl5w&WYG|2W<e$k~>rzHgv
z@U)~&KjQOET9V!GRC1y55cs;jULCwOycxW=t-^8T_&f0cRg(o?GgHjxw#VfZ`eMdP
zDgzJJudNz=&9==M(c5mDvr|po?3L&;_TA4aZiKb+DBYA2@V3{|1K#lGrwY~qtex9u
zrx0Rswx$zuR5fpIXvUYO8S7T$dgzC3wKt(8oM)<%k_yw!<4oEqM+Lc6c6mKotFuyT
zZm^5=j%=KaTB3GO8SH%%{nFa2rCd7XJ`>d{8*OhqkL_udl}6ZF*VRhR3;6}4B-&EV
zVLC}9;@Z7PTw{t$K5+{tE+ONBh@8696^kTsKS$a~;_KfpVvNIgc^)Y@+PQ2_yT7Vl
zGO~ms`GquH+D3}uN`ylimA)kWmAJAMqplTQgk}I0GF%U&QY_YvT3e|AWdUW8s=B_<
zOk#cCHCt?VDxDaijO0bT9y$=Uszlr4L>rT$(5zE<h{%B24%2i!9co&Q08@<~lnPyq
zGJaQT%sNVcZPj{%0td;sL7TK`ZHR9!aa(bX`)M%`G3++eqA`;a5%D~_^h-5dhuOOL
zc~sPHxgTS?az#D)oWf`FYrOAX555-{PI{|+6EeFU*o0VIff=vBV@Jy1>C9R{j^}DY
zt&M>k8dfr<nT29&SZWD^FqotKt{RU?vmhLrHDtCmAp46l6ns>C)+lSw(#eYe4+t?R
zCAOsmbhIq|B89g>HZEg%PFml+rlbPjx5`8q%(R_aZJ(IQfe}DNN}!+);YzZqmrOe%
z%_8*o1mhGovMZXTIBIRA)0XB02_SxT#BR;-sIUdidCBpO&W0evn8z$+cr|dB4i~-$
zh<syG1Pjz^wn@zpNwXxv@tA#0S|3<<hkfr(^wAbA=WkhDM5SE`nZ2KBMhCT^yj%-P
z4~<<TSB&#|qd#JxaDKHQ^q%Znr+S%Ff8=wEW5t?8TwgF-&r=pyXo1J0VvojNs`j{K
zYEUqp9{FaFTvRd-xpR(@mrl_sL!>Ui>-mYZLc=61{Wd*Ceo(pG6f8B+Iaq28D3xnW
z2(wpuzhKveg2!ol&U4g?UV`AJXhf+IvgyfrgkCmLTK_8%#2SlKp~B4@EK|KIi(p#N
z=?$uOW-3o62<cX7FeG~=FE4CV7epw$x#L9)>U&gS(VCJ9-eOKWBs-NA$@(XW0Zxi=
zc6pA|!fQigm!XxKmrtW;?(Vg#tW3^C1cY1|XYP(?imlA&#f_%X49QVxWhkCBZX2lv
z0vh>lBcb3H?k=Zt`zMi1UXQ`3^A%w5jDm-GUhf01^wE}A8Iuk1stx()WU8B|P%t=)
zoWWsTl4e$P()zV{JSn}Y5EFiFec)Yv{I!Lv6@aXkbTKg?E7WRIBeZB&69nclU-5aC
zf_MPuK{_5UGUwN%<@`ZRr)?9=0XE}vww|=0DoW9DSnh{L{N{YRDht`tCsb=*I}PWa
zF>3mg<+7Gw>%Iakqk67}Z(B+08xaf+X*P?#I*%hm(n=zPd}+;0=D*NG*%~9aj4nad
znddwMQruCgscwA9js}jwS0by+C6J?HF`rhho#>$wXnn4-fE|^zaIF-M{+&#4vL%^J
zh1_T%jSM+%4~RgXn#A#bA~|j{-{+@jDy7W;&r`$rN3nJ^2%l!(E9`X3$>t(w3QRN2
zBFQ2u*1XjQX?*o8@YtV&5QwQs5J!_#CLh7LtxJ6${so3KC7(hv^dbAHnLMCSv|*~!
zuo7(8)K}@22^~5H+qV2F6`O`*%j1&``(wh|n!fnm`myoo#0b9Z>!nZZ32ku!9%I2*
ze5k5y&XTFpETk;2PpqntZ1q}LmRGZ-&EpM@#3zwvwbyDNcLzo|UwtA%Jz&&CKMJVa
zKXGnL<hZ>_j<d;*|AAo@$Vod;+urct#Gv;Nh}t>UK$T_O>(WL?85W2JN}tyk_4)z&
z+b|j2LRf3Y&JJM9I=4RoDQ+<Yqwawv)?5?}Bq!H%4_$?ZVvRdaIlr>S`<m~Io^VOC
z9jaIAYMp77$}VkP>E}KzuiQjJEz(2B$eJNaKB77=23ihE)!t4Btya>cZ;^JxmgM!U
zo07j%jj|dyCQO4J#;$e51~&dgnkv<akogdnm|@uQ*i>7lQcIi+^?i}BDEvR9l*Ny&
zNb`4U(kEiaiY;Bmz)q?G43Z2K=}PHvD|8QIjW-QR`BpoY!S-V@1~E9Ps7rs_fV$-z
z@tjz&*3zpD=|L?YP~Jxgg|qb}RxA&d`yYciEUbBY7>!jR)voY)LPSvXDoXiIjZ)nv
zMSKAzv@J~>QvylGZJtFJ01f|HI_fJ)6kE+}KAUrsUEotr(>$VY&H5weC!p}7q}IWM
z3eG)@HsNv;@ivzT4fa(T58C2kfEvT^>T22<4v8ye`+Sg2r|Fmrt*jZAkBl5WqN1(p
zt|g#Z@Q%8qAupg@oG!@Ep9?4*y4ON;0%mdlegP6&986)2G0A65N@1a3QZ4WMZ?;YR
zcpp0<_j{1oL<&es`ck@Bpg)`?Zb3`!3>KO>*41_NR1X@{>>!6f9QB%8{h12I#j3u&
zZG=6H=vOMks)^U`R~+X>HW@$+^q)lUA2QTHY;od{-Lzwrl~O;ZHuwlVOHelDVqh*^
z`vtFN_3B4qQ-u|gC<D{=j@Zdej#6t-8n<nrO3`5&5hrIR`{16D`C^P~iYb0p9X(b7
zHpni1XCU!}&AP$INeoP3{7=JlBq=AxY^xR4!?tXC8gi`z22hIZNQSjAvY(!If5_sH
z$60|5nU)@w=*J{{Fa<fHyEODrt@6FnQ$VvGfelXWSL{bvG$irbsc?>XBAy31-4rvT
zUM62@*IFo<QEq+&Tx67)X8cC<_lAl8(zZx(57MR`6;hK#&3xTzS0#+NuqMP4$q_}L
z-d|q5dch*Zw9boSe%R|FDo)SSyg0lwyfehK3S1G^vZY{cXpr7xLuwObWBl;>Ass=e
z&<duPJctDr_%FSSAB8@RvjB^y3^V*$HQ(nWb`Mb#+@l#IWPFiBORh(0hhA;f7^B6C
zKy@h*1~KBRI{MGTf7qHbi992bFMxhK56D$(^QJy&R7bd5F%c^EEN`7l8?^^7e#)!M
z#hp;<Qgw3=6(jhW2aka}TqK2Rhi179kPRq(Rhd>TLXwe7yrhGe7ZG+Pl8B0)M=Lt2
zM#I_yL9vf3kT&simZgiiFSL>)iM(#fo$l`HZcdiViJE&ATL+SJ*R<*NjIJCA%0LPO
zC;8-decjDW!i>v*(O+X0D`BXNFs{4U`ErZ6b}?nZw_^pGC+&^ac7rwkTx<O~*1Trz
z%?0O);j|I2nH{Vp*7|y+9P)?j2cmUB3E5EHTzz_CZ;HU~{8lh5i`L;)d<}S&MV|+o
z;%mmHEc#+NBsBf-jJ~&R?DQ28e#=2t2d6}stiIILFp8zI=81|Nv3Y2+D0yXMQn!`Q
zgUh7V_K<mT*=bFNKW5EaRMlh(*447Z^2_YEcoxh7^mvCi_wb9$qrJLf?^*WP#wtR&
zAu7qjx7eKA;~CZNl2=$efa7xe-Hy$Zw`7Uh8>l;v`R+a0RqAaDO)D+wIGj{jS6!$&
z>|LOOhElo0<Pxr)QNn0`;Di0x+KfxWD^3>!`g-`w6%**N4CteB##d7d<7h|ttNm%r
z91T+pu5B|T!X<tdJk#%Rps*if*7QtI&RX@zu3nW*<P|zfVa$Xf$muG(RQoh}lxcI|
zt`?T*K9{Nrq-D8{n!)%_+M5ng#5$9M)Giinej&y{ZMWXF13kuAAT&U>$qHsSr8T+n
zl|o1%Goba!G!2S_J8je6?U|2BU}*r0B!0h$7-MjDiAos!9Op@pxO}ATHqssYjZLXV
zf?BX~q9Rfx5#CdWvN(_WMgxk1iOrlrq(CRI!XJhfR-x>Kk8NEaoEOk?661XZH&d*r
z?Jm!OWM~E;<1Emv=wQ)NW5)Y!+f2oziC86lCVR2l+8y?-uXcV-D0U-`?6P8qqgr}$
z_21#I$&M8-<e9+-dQ#WlPV*Qw6#!-U6e?Ld^_N|dY52;!SH*)J!NBYwI_TS1d1o*r
zL9%j{K4amKCC@9Zp{w@aKhcMvD@QPhriwQdiuXL6?AvaGeKIHVmGTC%mwQMVor3KA
zcv9J8y#d?hnbwrLebuPf<cFbY>A0qpU*^>MTL>*0MI9JZSpUHJiMOPL_Ll(&(9<aw
zSA;MOX2+pGij?$fTmqg@HOta7IDR~J*xOuRal`pTXBbMxw<9jFZpj;gTh{BVSDG%K
z-&5e&SiHY7;311TT^Z#1`;zJn2}P|?<2I;aHY+_C5g}@>ZEqQ&<=IjrcNsDRwjw2N
zfdirAd}vLhi)h+9?0;;xYa`T`I_0Le%2NDM+kU%ngz9`k4IL;Vce-&gfzecf8dp-w
zn~qzi&(<x;3h*#%u;pdXD0@t!E;rp3rmI?2A!BeeXBOoMqz1LB<~M&!A(j|rpQ)|}
zC*Kba9jRbv=AvnoC#}Mvn;_@~_yP(kIWGR|zbx%p^kdLIoZlqL9e#4#;^0T1QUd4O
zK!x16-B28Lr@RIk3U06zRwq!kf{><p!|gToZJNJZb=(v0Y+lID*iq}Q=}6$@F(1?k
z^Q{Be16p?C6A62S0=kSl!6|MYT#61M9+|R5s(Zq%I<3+zYtLq{Cp8r;kBvtbnPQV(
z#3sBwat3t@4y7jnk@NHt=N?9Za<d>f3l$Zww$bFsOZ5op^i-#_TgHr=DiX1yz18n9
zDD+RH_PH}?SLu||0L?)>u~I&MVt;788ng~YA04OlYAC)A@atdX+aZ0GZ};G<Y_42;
zMLz^X{b)0<x&GKv7Ust^uxVy`8pWWJA*y9nKGEdm)h{m|=|gz<=;*=A$1h$ze*Vm>
zvwPUImXOXiuTh$$Y+6Gzt65j+)15ypmd3;wV+2_A=F?;cX$%kB+SImTEpGokX+2ub
zACf7L|3MlifAp|OtMSfIYlKph97RP_8~D{H^JOJe0yk$CjkR?*awv#-raW}*mStdr
z1}#IPQm%O$gexmX3UdZIy6T-J(nq&OylFe+j501PDvGmg?n|Eu?!d|%Vtf*bT-i=<
zr)^hXgFycn_w4pMBikTE?}#lR2RB?IkkSo{jcVpis)kR#f>a<P##HvvJb*PAASi}y
z$v+LS>kx(m{TKU+KYI8}Z|62s8*Tt0wV^YFj#|GmE`=Z=n<ku#Y6*ep!CZq$i{i(Y
z+FQ#~ShRpObEq0l%X57i!hBS@hm3WBD8Vd0Ko1{#<c=R-M9N@J+I70<sk2f2n(EAl
z8-{X`7s2?Q>$2f>DjEQCK#jjPtM-_MiTG=@vPFq7`Y;>ZY*kuvF0I>AaY5^%`Ep+e
zKI}Xrb7~bQb~aR_UJK&z$R_GS;|MK_y%pkNvs+<TO{f2=7&d9uOsEdes&rQ|OMbyP
zy}CdhS5|t%$Y0kH*VjAkDuE{a(rD;w^}M~pM!_Le4lddsJI2<mxuV-Z`(4|Q+>2&k
zDpZZ@=F0xE?=59tz3y$an*f8WOTOv8()QHr4QaJ?7gfffYi7;;w0<v%UMlL{rlr}Q
zLT`cu8<D}+TNA*mFPmlQJ_8}#atXI4iS<iayPVpr)-EYXWmCNNLXpL|tzfhY-@4Zu
zbLF4$+-JV_LK^@deHI@E6>oL2{NLkwE|b+Vhfu@$8*00kX%@Dx<mw_uVT(laP`dIb
zWm+UIk=x3{a^pZy)y3lKAVdFI!z<mwTf)WMkr66$loMhM9H&o71XVgQ!k@7^>_sIY
z1cF(HFX*VD5n`U#kov{8bG^G}*D@4qvNSC^suORAuO>zh$-T>9`py+CnSu@0FT*N8
z1*I8l^boFuU?$;HMh|F+rz@VECMDmi#WeD)wk7;dL3UcRfhUyw?}a7EqL@lK<y9TM
z;;SW8-esG{9@c%};}G4QD9^CENQioc$oE%biM|%Bf-gd6i4j<!zTv8g*4j1JylS8>
zs$T=oPhB~I=C&1KQxPL+*H>nHL@Y<kYuy%2a{pq9rd2<CjABK`M#6JFC^L*3hiloD
z^?XHo$u&*91rJCe>6-+}y4~{NRg1;fvY6(HCahaTjFhtUuz+4kgix-;-c%P+EFc?r
z%_@q1m6%=?4nrqKw89e#)r&5#i5iUv7ND(cNi1<vX-%oxI6Uq{pUsLairh*Rf(`5@
zCan(w{vQ$i=x)ReHrN@hyLNreOhimQ{O^DF)xX}4*YkRRdJ!!D0!KxZT~8l<_EYz_
z-`~4ae#Ym6{%|n-%2|F104&g-4Z8p8Kl5jJ&zYh6dNjD(|8_7O?j0O>!+Qt!4*J92
zeRcEmZ~t?ODe1_;IZKz_f!Fu?y*wNDwC6o(y~Ava^4`|_|L)zp_4^+_5AN(A+&vf$
z?%X~2sz2=S9Spv5_HXw8e+K;@9#rZ7-VOc#BJ_V)(*J|~J^25?>)%lR8~Wc&|3%72
zZ@$`W{~sJ2)b9Vm{vOi*{r$Uxz5P4zd^o&w(ErNm-|YWu|GDLKce~Cw#qg1b&Z3xf
z@8J`aZYaQmXD+=6*YGqdf~O)JfbZiCkdIOy&t58Db3NC|?{A38&G~<G{@<McH$VRd
z=RaLjy_lxOCJexu^Z($^@Q!)@AKaY(U+Db*y0^%)-f5ilqU6GvuZr_D+57$PYWV`X
zxq860qG{s1I$t>V7iZ335B?qAIXE2NafTqlBU|vNU;)i#ht4F(Fy9!}<@`gMa}Fba
zH;sZU@h}=^JeK?gre62+_lqElLO;U$){c`6{d}Ba#3`zYIjM>?EqRjhYCIJ$XF)Ls
z{G7(8-f1v?7bRgXKKS!+BC5OyCoh6CwC5K6mi6W<{6tRc(}E?X!b?wy`|gVekDfA@
z=3o4u1N!sgf&R(Qi?Aa@S!2#{Q-6(pJbF0#F_`92hpbwDjL&40L00-%mQIeChZFEv
zqFYZI(|U=I_s!f1iq$~|{n?sI#Kh~hitHJBa1Wf@j%%s&TAIu|U9F3DucwSs3|v}1
zS(^Gq6pANe+K09P#?s6IVV<|N7`A5kVa)3u=Y^lXV-VCYa+osT3FN9f>FL`@bx}gh
z7|GX^f8Yae`eTnK;9umvn2Zn5@;?yBK6+5KrQEiQ%s;ardlzLnk2pU~#nm-^hQ0`A
zUH7<kig!L|Ey_^?zgGC`T}#?#v3AM|ttA!+@z-tqmEy16R$C6?`J#Bn(_)bu7on*4
zE?VWTXmjbKwG)E)6<RPK=O-;?P4GTTG(zzd)c8r_g%>9|>_WHS;jQtebt@9ZESZL(
zdRDMf^|lpC-O~Ajy`IEp7;IpQ@kh@V8Qw~r<?%nGkvqK8aRvu%Wjzpi6hFU<98pqr
zuj5{4kGao+Zl){ImwrD#eOqg@D>pvPm3=1X8%q(O4Qes`UMVV@s<s1b#NK;J7|$F_
zge$$e?`09~UU$%Ox|+w!H1yd%WgE8Ik>aQ;R>AHUyj0f7XvSg}wzRCO^oGNgFxG+1
z!|d|`-%AO`E1`{L-10pYn}<rUtyH}u1Uy1wY_U_vfpplG9Z_c;z8^=dWs7m@^l~)x
zU{_3o(`Y(see;X|;Sc%8e|&iQ@tf8kT$tN!q8sZx-`5XJV$U)9i(hq=aT4XlY8s7N
z-EONoYQX<Z#%+#z5>Kc8X;fTBKub0lJusl(9_(uSgUza1SFd=xuA&BxYAeBXejbea
z-e7-&bwJBaI;?99BBWEa*iUu2XvBIhMQSQB%+^^JA>o(9GA+J>>3A^}wiGX)9w|OR
z_zi>dRrn@Xf`#)b@Rt{0T!*r7FNe^CV6)1Kl5li^atzuXlb*20+F)^#D?%f~1ruK^
z*$eH&5buh(m<qQ|#Vu<|Hn?YAu#9re2QXNA)94JXI;QD()H($`1_(~jP%KRZ7f@O#
zoO$CQxd?JE%z{fbP$9HK@s9f5!G6c-d-wJ&_%dm6Fn`F>1$2quI?i7E<FCm#FaI(8
zMtL)&=eFq5(kXm|ed3q<OC+Zp2TrHJrrPSAo~FwYrIwup<A|3|(j;mQVI75M(YmIU
zPSPbO1T>G%JP1dFR`YhpWjg~7UpEl5vUS|vjl8CmoILFOHV8;K@vtDg!-Ks;^9Po7
zd=89stBB9e3tL7z;4X_SU1`nZZmUwK@pK{(0Q-^#lQ;Um8~xvZqW<sO0f7D6dpG*O
z8~xu}{og#Dt}sL((Aovd5nZncY}Wt5W<R*Y?LP*0_p#jY&fWgtM*sJl)c<ic8AxRF
z6{^OPxuO}XV1@fIC<06&4~(reB$meS50sLqE5X9uFr#2@&odkeFiFAGc>&EP=?wEg
z{1D_KBkFywTzr^$-N%?l;XF8vr=D{ZWoID2Kc5ESDnEZGa@jt_h~mWz*#YS4s7S}>
zL0-gTHFGB_X^w(PRIHqrQEBb({AC5(faRp*oj-<=#|6fV$M>Wan4;w^mJAkCXcpUg
zlw8DFnqa3e_9L}{pAwj6md8a96)>7bQJlf>I?jtpFquTjWiSmX0mVLm?{0tp;M=kg
zYv56L7R1v-K3%Vz?w{hz93#Yab5X$yIcvpZ&0k(#dQhl4&G%{wzEYScmJ<UV&rgG#
zDSauKQ7o1%$WB2f#5@r5a-jgaX)bg-bKr5Yup7L10MY{sx<rZa0Ln>Grsyj}sLg_8
z!FuLcQ%<ns9C-xZzj@BH^fGwoX_&J}#!-g8B(RDg=8!=>j~7SA=3GHVIsfVgUXIxV
zOqj-qSuwQtC^-X3Iii*MBVSO~9z&as^9Tg0ix@MR{Omb{{sFA0=QxAA-*yMzf}lO@
z4~E>v{o(V+hfu;B^alsMw<t^H9=-1Gd%zP(e-YE^#dLVVxmPn?ybLbMamW+iI=BN_
zK9;i3Q;@%Ai<vroR#0}X`L<}?Dx0#>IiXJ)O#tw5guY%pg9jiRdA7i|NtOLwc3&pV
zl|&+Dbb;x(z8<M6E_<Q}f=7OyE~X(SZU~&gLpDBDJxeJz0zJn@i0Ps@pl*v$I7u#5
z=%^5VsOPc%k;flJ{Yj9Ok{v@g_x?p1Q({h1U;!3lfU$fL37PjP2<y)$52b$^3h$x|
zxwIqt(<{rC-9Jb%uVe&(xO8UeOn{nYnlFQNz-wQ;9y!O*QNEaAq^~&z%Vjir-fs8g
zWXblvj7Bxw4a3{RGFSU8z#Mo<&MR0{!3A<s;6Pl=DL_zIBFqCs3jbM}JOd!HIY+a3
zv2sy?JP>+mr8z2s33hYWg)t1^EP>yps)0OTSZ{K)R6;?Mr!&17Do#!DPpY%u&tq%h
zwxNl6Na@^w@H41B4FCt1(37FUR0bs)vN+_tU0;mv1UGiRGfq?U=jrS??l^BTVv(}$
z$MNmAC$^Zo6pGQMKwG?BBCYOQ<;nV<230xynMC>LbC59jPGiR<gVs?xg(J|=)Iyus
zAp6^cV_@k9r@2etEsGG9$IwgE2vG7>Cyzs-FzFfQNkYO6Oi4&3^FvtTtyk&iF&oxE
zMkV7&N50;DBdlaPrHp=u&I5x60^)a{F`7sG4Gm90cNp8;dGy|M)#$HPhuoj%3`c?L
z1k*DwM>qn6;K&)T%Z(Q&7rxjJy!srTLG8A^k1#D<RtcN;UYRN$4hKtyhMN7(U9c)|
zq4{}D;}%aUt!G<e4@$~ygbQm3{=kcy8($$xYo>M1U5hRA!WP>VO;t@h7xnD`d`tih
z`u$zp9!SseFJG{yytLDG@F}1LeE#0)d;4`UZ{b+~9#2yAKV+DTCriK*jX89(0xeIS
zbJ%3g*EJDxc{49qQgdu1xj^m|7&65EC+v`9+un?+^gN5t5FZt~!FGxu9GI*|-A;7S
z5ZttM^0ze3g2V;NxXvddX-|;?OK=vc6`!R=bg1k`H}1S`v*3#PJjL+Csmj%eEIK=H
zy93Re7e-oPsGOZxEA_%p*pl~aX^}XL+m?N$SCrS68=oWfyH@Mv_Na=c$L~&@U5A(k
zpozP?@3sqWUf-_RahCQC05I6`Ms|0Ha84w-Zg`-<7#W`TS>dt=6))xuz_u1-CDy3(
zu!FD1#<3Ay+PvNJwQOH7Yd-)*X<$=%4Brd`Ia2h3d>)J=g#+CkIV$&Qfg7^M<xtxy
z_>>tODudS^^&GeCV0YJjR}wlN<FKr9V@Q0X|G&}y-RS>se*Qn||LL{Qj+3<}z|H#q
zI|qAr?;865y*q=08~y*6__3+;HM3mQ`3hs%m&jt|d73?qm#9|P3NmD12;%u_HOBXI
zXCbW)#%(CAcQKWX|Jp}OEBeSo{0F&DDN%f`WURK}DH)oz#$u=%gHRKSTH|=W;&QJj
znO}tRRs)zMj6h_XCeO1-$>b?O#-)=8!8Y;bS9Trp``?Wz&F2reQQ{zBFyH{hixC}`
z9R)^$d1|$|t?SWZ9%ZaKWV;^C<}nkHsBu!){7hO^CakfiF|xyS@xU@$fKb9(6C)Dh
zSUD!=X=x29JEz#s+}n@GcuIV<yv8g^Q;fY*z+3o^%7)H3ov-}YGFiIH2l8Z}SXDe4
z@urMIT9ah<qkK2YvJT4z*>|H5AB`-;zZ<1sItt@G!s*$fd@EpKNH#8|Tn?bw0{{2Z
zX{Z7~f|Rbluqs8pQYB<S%QOSIcE-cvjL(s~rocR?8fKDZ4v>h`OpVo)GYvQwlP@FG
zgyl6obB3BLfl)m$TXgwozZZZYd*A-L>EqQ+M)Awo#9wz-#)O>*R3<06LL0_w4rP6J
zyRQj_M6`9bnp*sRzRz;D+e<Znh>>nEl#IGpzgGG~qd+DLLdo2HEz>u2Sn4Hs_u8XR
z?P@|?4FwG`?X^#9!<1XFngPf|uRGkb+DsU<vJ_s`6=pE%j(8VFO9D?<PM0^^Xlknt
zlj*n=5Y$6J8+6}8h^$I`W2XO<Ev`(Uj;alE`~zI+@G*6G2OYLZ%~%KPizaYBB0a~t
z`b{RxC3A=EA0(A@0mb(lZ!Eru^i70hmSQN8III}DQ7(JD%!={?qqcc2DvdU5ht-Ca
zE*t!Fu*i`ZQ_l4)nqZKxn98seU(*##8qjVAv^vB<PL*l2ZAuoq>Sg;ldP{z@*PX`i
z2w-Sk@3@JN^Y>-Woy4C+0jz3d#vMJ12wLK;l5t0B)#!SReaxXmxuH2s@$B9v)ZU>;
z0ask6f~~<R<jye=Ic$`oaMP!gx@;G`q;sgYP&@zHw53l4&9<XQT{Zo=80;sG<>w91
zzS6(8o0x)B<;|E=i_U3LK0YGGsGt>jWBf20$H-&&B6p{&l63)ZM$k5?fg1{VF1-K6
zYZ<Rcl};mgsx(VZI;_;lDX%?IMgF=&HQ=bch{n0!Mm6HDBcBD++}DzVh-F|yRQtcJ
z_EVvA66*}4vr{?2Ha_~H!Z6lxk`;DQGI$DS$MUlvjeq<Y@6_nlA2;<hK2OuoaSf)j
ztq!uJ7Kf$<LRXYwj2xL4bewd~SyA`lqaW}8^yC#lY_G4JZo=-M%tMd08K<fsFt^l5
zr-u&l2=<l-QFpIw`qeoar2{iV#75z-WC`w>#x5ms%oJqR)^ckyS8h6<M#Xu`noZRk
z&5jerNWGm$l=MtoGc3yzP(!xzW4r_;qL8!BpBK|<_h~TxD{Q3?t!UQjV2>X?UWlI;
z5;C~8WIK}}Cng-Wt}}+k28t<-4B*sQptsKH6bJ;6R@h1O5O(7j<+2o$I|1ttV#NeT
zj0|?$GvK3hJkDu~%)Sjsx`qAY2|a+W#?wUz1pPDy@d!^aVq&#yX593e4FbuBUoJKF
za9>&B{j!S}S#-fJRAFhM@Fgc{j9ipy-g3vKgjw(}2bOV)M>fj`_VVb6V1{`am5IB~
z*I$3lh#YV^j01o4)IrB545W$&8Lms0)}@2@vb<rDQBJmm1Jb9}A-n<lgyIj3B%Tpl
z0tGkWN2E5K977ZkNeC$`z${z%RI8xD0kEy1T&B6mTL60%ET$j@N6f%WN$UrYUOA^(
z8ibXBX^Y9lF@`V1(yd1sfiuAV0iBvo5tX5x7*FaHH`XOmS@FiCn<8Ow6+`)KMvmd+
zB2W|{NW7WcU;wF<C??Qu67k7GEhhW4bWx}zQQ92U*kmDaeMcBZi8O^HJgzOQ(gTaB
zR8|si=CSBjBtUH`D!KUN=Jh$gC^jwvHOkHD#uI(Z<K($dG7KkCnll6y1u=c6($yo5
z>Y;*$(6Neq1`5#`?ueG0wM`kvE;l*kM|k53bW|UfM=_id+|!f>DEH-z-59&160!qd
zk-FQqp&*B`W+?o#h8Jv16s7l*2W2n`b;=#CprP}cG(7x(?UV;5KNt<w58u`{-y*3R
z8?kL}kVe+S0rz^tZyiBLc$;FA%e}0sl~6h6R{14j@7kW6U4zofF|}5j{9n$SCqg`y
zC-sRTVl#V0+Fjfh@wu$b&uGpn>_oF2WD9mC^BqUEreKNkWG&8ZmJf`Q3}mIwio?(-
zr2w3jA}E@wIjPTNO(EosBIFeam@IokQ^IGDpZ$1J-yvUN<4FYxZ&YbFs<b*)nmtMQ
z>pI1l=tv^8#jN6WN6tb~-Iskan!tl{Y?#2k#0YB9bUke_@>mPd(Hgy+rcAVhYxQrf
zDb%+Upcp3~u%N42!to<!vTnEz;Szu@8QV7Urs}lUR(0BOBDFUqL!etNa-%h<5iRG_
zAPFdEf=Chp>^tAQ?1Uo8tK{uW9-Y>yZY1{pe<mIT%Y_kIZ=&^^>|GDpzm2YrMPE6j
z?W#6Xey>$Um9)vxlTqS|<2{hLOvF9C5;z9vM-=kl4q_6Qm(7iu*c6+D@r1H^S2dU^
zwOA@)CuB5p$}g!JGSwz6WAFKn!rg0qzohOQs$Zi#zvWmsX6KJQB#ng;cLZB8QrQe$
z7q)V^y=suTdWK~JJxeGr*;NoG66l<)Y(dD@P6gH)CBiZY5sCtgI~+C=EK$xwL%?!K
z>Yy-%#vHRXV>*<{A|ozLu?x+aPkMd<0&axQ(>#_gVOg}uNiUCg2@1Ec=!BgM#!^8=
zTwlx&;1h2?CY&+jEBqvHZMYh8;l1jp;0yEjyg<0SJ5)8KqGI3N8HU-Ya?fg7F`6d&
zAmfLO8zO%XDMcXT<GAdy`Kcq6D37|B;<R1Sl%uW<F=e)>X08-l$@yN{OE$|;m>)E8
zQFyPMU4TN*1wh;;wCmyOww;X*Qp7&Ycb_>6rsg{O`JsasD^7<i%xF^60Qf{@K(*sp
zz+eU*4aT|cD8s3(A+*b|tg}T0R!RwAY=2r=!WZC9<z77$y#uHa97C{$PSfHXlkc9V
zVeW8z#Mn4YDz027P-S^PD3#uoLFMm5kX4*gKTZ_^_u5UkgFF_i*i#nyJ1h5Z%h+K`
zC8oF`Q=V{x3=coJj@!CFodKGvnoHE#m8yYKf|s<XwVr=#5&qXG-|#|%FG(oU)g;pu
zY7twuG-0P%6uespJYCEK;)jypVdE9vSumRgQuN}hh1j?Y=-HE>1ftr8c8d^aF%Jn=
zfR9MsSi6ea6lf(Y!p{LLgst1!L+H<fFw$)7RYf#cLlc0@MI$|QO*kGoMKR-^<MP?g
z7KoTILp;diWRkk#MBf{>J<PvsTMs-PWTUQvuU+0l8@u56ym{M1gVtQT-g2H2)$9m}
zDqz-ui%_4jOM}RH`h*gEJC7e*Z0nh7i%o})JX(67RO;fdfvv*W^bjOf@Z$GDk9XEy
z3)d2;*q#bYYzPrlZOSBi@kEO6A*w~tbtwuFMH?=PoYD6VyBY_;>MfRyNj|KiUtlL!
zl^}}UDHO}qi8F5(XXmk09DnG4qz!;cMZ+DBnby0p6>QpE;aEk&R~IOXWi7fdRT*W)
zV1kDk_FDo(>9{&44ZiVcq?%)OZ4Oih8B@@XV_vTr@3_{1^x#;5=o3GO@SNWbcSW1*
zB-HuZJW|vIE@!eik4>DeS%d41MOQXOna|WDIy0+X`NC`d(Nci4xFcdx>})oSS<#bk
zr}|L9HB!IyY@#l`*EI56Oejx!E{&8ek?URq|5R<W?^TmrRb(^8CP!$lgcbybJ*wzy
zb=4g^R`W&!8fj~z2;AG*+n_!xjAs!!^as<b`3+HF#R4Zsbzv5aksr=$3~&A)d+)a0
zwv{Z3_H)L_KOpwlP0}VIQI6ByI$^RDNlDa^VzuR@?WNYCArh1jlK=yNx-1{>vHxNJ
z&Urh3vcKe1)x6(8QkIkSSoYqlLjjn#nl)?Iz2fyX0j4V8t&+gI7;;lq={vLua6(Fc
z9bw;^7YLOxMQ+cUwF7q+&$lDtOEXRL3v?j3E~3-h;U!w)QEG!9K^K~cz6(077>f)z
zt@yaQ8d-O=lI0RFqIMRivC`^F+z6Fs^cv0F1@INV*AiCnx(OT&0-q@>a+ei4&3H>&
zC)T$UKzY<*+`faJl*Vtr1j1sdE0AN-^bcmkm3(vV+dt<{J~~AA>M?;@g$asotll{R
z<oM5<HIh`lMUy~)>2?N<{{WM|IeZ>1`AYjIQt}XkOMBK%y629skR-t3^e~3l#n07J
zktCM=_k=8+lIO(%zH1!d-0I-D!813t7d?a%mRMa2=e$sd5=ML%sc}0&%s-FBoR!Dc
z&+p-}bs&88S5QaeM)f9HIt3nZJ}-+KF8%ZCXs3cR1pCQ_I;PIa{q8ZA9iDDCIv}3Z
zGp^^?ihf`HRsFu`^8Vp40)Ae4PKkM}=K`jOo!7!0?0(&w$QuP^Y1h38yRIqCka;Hj
zz9}$sE!RWvys(YrT8h=&DApJ@bjp0AHVip>zm+yG)uh~<_k6E4u0P&y?Q8VW-Dq8R
zBhBOb{hW=w9dVra=wkF<E=G~`G3rYnBRs1Chaz7v1Gfr6lXLlNB!7+7GsBI@)6r7L
zqMz@>v1knk0&`xrjw4}A@j3VPf#uu!7WM%%2u34-g)YK-Eaa}AiDe&mA`6#>4*t8_
z#$WxL*v3Cv#oyB^j>bX5*xO~UPX$uHF0=ZM-K$%-|L&pvcV+7@H|`FiJ;S#9)2n8i
zu%TisW~s%)y8;Y3wnvoDoZ=N`&}?cc8>A_K5f+e;H{+)uVf(D2BjekV_N`Gnh%VIb
zSo`$6%Qqyj0~z;KcX<ulL*222I|-2P%P8&|YVc_5(EUf-y@=@=9vUVQP4pgh$IiEg
zJ;oB#{Wf-mC3vs2J@)0U($b5gCP6ynlWwzVcKHmtKfUS#UUnCN$$-u;JMRx2gJ%+;
zvU)m?j96=atr2293fiND3F*G)r~;u}<9J$@I^=O0ZQG*Wan0p7)s4PBWd{>R59|A}
z2OeVq4?c~@*ekZP7tB0vbO+eGn8Ui<bG6YN)?l&X)+*B~#<Q>4hOs}0UCe_#(xRwO
zh%~rmvo#Xk1mK>?(yMg5)cU3qq@FiPTcZ~Nub`2qc-2$>VukC*hlXXWfg~9=vIcUb
z6wqDAa2BFBY7Sj}oG#pZci@bTLj!aRF5w6$SF{H_#`_x)gJwR*`X%&#QpWKwyvuKe
zuAum|Y-8qQocRhN>?S=^%D*%uoN3JDGEer0C_^?1ry8KLiy3pb+lcsO!u?M85(BJ@
zxa615{0>~_%Fbp!M>93`4Yr=nL!TeW7oU?Kh<=s%{Lp-^W9xM9?Jne3=J=GoaviAQ
zG3{AHI)#s>+mEK(kEYvuGTq*2jNBTgQ;)^>#%J*z+9L02=H8E1Sx<|P`9DAA|NNN$
z^JD(ckNH2>%m0~D9h{|^KUc{Ax%Ctie_HuJx3->aea!#)o7Ddb)%l|OJFMIhB?vDQ
zUZP3j0!qL1l}Nc+%tdxi3OweI$7wppzq|Yc{`m5#_Qx;9`7}HC7Z>Bt3QFy+vUtk$
zopn-EjLZdy2UN&7nO+E{+(`Qgk4V`p=9RH}*8H3L*YQle!ryXW)>y=9;H(PFLTMxj
z1-YTjMlzGBTZ32jHd{>VOhr|z=Shu4N&yORsPGpKG@sV;gE<H=@Ccu(e5KWGrGBT$
zc>#P4d{N~hgDC^7m=}sT(`h=z$~RLu;A7^%L|+;QK4$rSK|BSy3I!(XZ_yzc1P{d1
zm3jiOkIEEQGm=}lrylVxVw8r8(#Yt#uO4YYsV@m}KK<k&Ye=P!<VGCeU-BJvIvl5n
zDV5gg9(y9cvT2?8^awa*ZsmOFStn{ThH7K&zvULd<6~|6nU~%rs~(3YF^-k(<8Jg1
zr(oXq;ya1G5wEUI0^X>|>nvXsi^>pFR8cLg%O#YUvWGqj%|de-U8DEKF<=pc6RhA|
z6hEWr&AQ+|2b2;BWjLS?ET`W;WGfWl(WG{j>K$~M5T3!9So%(sCfful5Ur*dFvN-v
z^`kb;k^+dv*@SSX?yOo6?buTdEftunbp3qzf4E;O>DBmpnS-I_i{jZVo|e$RIdYee
zc|<>DaX9Ak&Ofwg|1i?C%@%#EslbYd@i!-^@9A^tsUA}=;%hAN{SWu+E%o(3<kzST
z?`uNX)+5&QBi>{qYV!vm2tMW=p*6(lfp-n(uicBc&~n(uN%wFv9ag_%SvTJcdP+r@
zdVFG!Soc<~2r*gNrlU$!PK%5Ac!HJJnO%BRTzSe;a@7`IcUqD>S^h9#WL%W8<F#h=
zU0u0&-wj|NhI%Qq+1o$aeSUEK;^5hj979Px!jW`{^HCo^JFn96iqT1}+8?Y68f=Yi
z#NKthd${|TckW?K2Poo1T>q=X;=*@kBxrM9Mh}<7k?#;6u}V~<%VL^VZ*JQ#f4IuA
zIGyf>z{9+iMgpcmiis?_XdiZjac7)f!B;nkUQ$gWQVYR5(qpIrhpc*5`>@MgJ@Ib5
z5?zUP4x%2jc(Z<bY$`oi&PKEqqa~b8W{drtXLk7#-jTq<YH>a;X24OkzriBwq}|Tb
z>!DTgM}loHyUH+`UJ|ipJU<(0H(3wpFjru|{EKR34CL*V#;{(PCjCR9NyPmi`n;IW
z;nz7ejD;VtHh;U!<d4!`CO*KjfV3uteQZ&aqLjUnvBT*I?=NM1<KIAE=ZkDQ=G-ZV
zEcp{J$K(_B{kB{wgMbef`>3I&huU{zzB{O^u2{y~b8S;h&|nc-56zb=X6J)#N=LlY
zRyDBtlZ7e|v-||>_83*nM7Ld|6SI7xUUG_mu50~8zpMZrD;q}zg4!rK)dMzFcze2e
zh6=c1u086Xtz_bZJslVmbT(%Yot}9s59!TJOX}>FFj}=-alxvss8!ciFz7~a9^*RA
zC;c<svaF^pIv_-8vre7TM(T4d|0@YzZ85DS4walW?J_UCF6U~U<ol9?#rlfuxBZ29
zYkKq1JIN};d-mH-`o*OS;rQe;HxbGVnf8Dt--&u(VOd07Nh=Qpk-mabcHr4Y@3usk
z+bO?Dju}1h{HIa5!2D`t1ESKtqj~{%GKsC4GyZDU6P%CU!k7>X-CxwhJeiR#Qb8_$
zwy1$~i0MrT_-<SIpgBnxt%WD6;u~*++{u*JWsU;E6>S-?UzBv%qzHt}4hq=WNK$T0
zeW2OTPWWT=eyEc-AUiN=8nyt~y|m1D?Q14?31eMEIcX;u?4Fga8D$aN%7T;)Gi{`u
zDcoUsf<gsI1=I;2o2S+*F-dF2CxCwa1(M})bUB_pjV9UDH2d8fKA793pAz%f=)UFZ
z9GbY{H=IAmg{h9)0;w4^nN_W$;BGA3unoyRVth43n6S(|wT%^Q;1|#Ko^8uRhTGD*
zNOSafNr(G;EmSpJgOJ0WZYWPoL1_}dHhWUz;vNlgrZjllME~aa<cBQLAFczfg;Q-~
z0o8!8R@;<v41EPa)PoMA%!<0h2K4<PnzZDkhHteKjP2eLcv_AVPf^9FaERGxZPA-`
zcRXYs7gfwAAwTOzEtvGM^;ouZZ$>^juGWZ;;lJ%ApT~<FF61|DE8o#E(aJP>DNl+1
z_S>SAq|sFBQBafGua`s2jjjES_(GO_)FcoNbIdUOO;iI7U)t(v&h9Va+p(?}kgQ)g
z2>JCl{+lH?!}%y3j?)YDeZ~^QiaPTboq!X|b69GX&Cn%>d41IYV$%%;PSa&lC3Rh@
z6~pGgYIX2*C?LE-SbEN#ET)RZ?FrBmUS*sC9M5B=ZoSX`mD}Z5_xFacFyjaSouVH!
z{DuZhuQe2Rhqf&*K`NdT{;^k`&@q06JOB)`@_~)ee2!p&Nv9ZuSb-=VyZNF3cIc9>
z&FNJK!Gz5{PI4^a>OK`Z#)!+-@ifH*=bQwJY^!W|bRa2jaSxKnbHM$1f$%^AsC|?Z
z2=4-Kx46}5@<e_ZoiMf~#lS8q%#N@?+I`cr@`fV}S~o-Xw1t<M28iv@ZiohwF{95<
zRYz6{WJIE+A9^;FBj%4qBu7LNa{)Ip6)=H)no$kzDPC9rHCF3B8ZSzuGp6dZ)d_Pu
zoJeK0ZE$dIP)w;|BCxhF#UJ@n`x-lhOwu)Sl;lZ-7>Ckya%Fm$;R6TBsjX*@l)ehf
zE)*aFv?ne3Nu`<7+^c8)rr!c;aeWTt0&NY1G9J`tn*DgZn9UXS>hkZq(N<?!4H!22
z|IFqZEk{B=&Ps}C$?Uvn!V{Ry7B8J4p4FK20&Plj+nV3v^A=+Fznlq}{7_8PA24Ca
zlA6hWK7)SX7Ajkz#9?d#|CP;2^vg=_Vyd9nh3nmc?6CQm`grXs{dIwg>DBq|)i`YH
z%FR%tsK$c(DThll)&sFh5M-H>gLppb1U2yKE!J4M;Ow%+tBX)d@=?|>b97l0ES4Ol
zCBYxCM29?h*_xr^#_x@7e#r78>oQH#X|*Lx@zbv1CiaaRdfKZ|08uEpcVGV$CZ#|r
z5yKNYVH}S*%m)aeuB=_n{tVtq)_EDUfD0NIvzBt&<lQ)qn+D+bk#r|4=R=+J*uiI%
z4>|%QtkCem`--Dyc}fBn^jSoLo6{V0#fL`W?G<DGl)6xP=uzNm+wlA4DjF4VZ55eP
zI>nT)&0JbcdP7m&f-_yQyN1Y0V_**BRbmQ&bQ$*&z2@&;vv_VMh|&1=6&#w3wabxJ
zp2qCv1?FNFGlE9Tr=g8(b|tVWw%vy{rLGzFZnj5qkSofw#ylA1T$8qtiJjNzLVn7e
zjELUmS|iT=>Cu?zC>U&hQlqiG#8j|X6#QB&>CL2U(RUmxqC%^+PBHH!OP|9v;RMcQ
z3a|WPf$G-sl;o{!eS;Fc_!n)LvgBb2%)&$W154n1;nb9;+AXY>JZGr1-0}!ZkJ>}%
z94d0dfN#uF^%bw6<H3Wj+EQDq65BMGTUqo4Xr8m|t>0<XGhEl*%dpd_r~PkyA2~PV
zs+>*eJ~=^^F0+*;<77@oe~p>pia*pDAa~)>IG9DD(4+-Z0v43M6iNyT2&lrW(mJ$5
z>EE__n6*tk>Vz}I>~-9%3~ppgblm2pnPhSsqnDHPSC|wjMGv1ngTCIrv&(O!zy6Hw
zCRAI*_W^YA&C!0~%@2orfj4_uRV!x&|G>|WPWF#jYX8vd6AJM5y*<kEJ=YWy)S8&5
z7la{lvJnTf)$t`RRAkTi*cvkUIa7&%_!DfwzEigCD!7^}eUAhWtfRr6sOZTgFF}_`
z^Wi3qRAdal<TEt{FBF(A=BX_&&B+#M=2iAd^`q3a*vv*lIIX-nP6e-iUCd%1FrS(Z
ztL<QQ0gc#4zU|`5DAWcQ%7vlbVl7(M-SS<^E`zy;uS8{DJ`7%h`4B&FC^SHE2&Zf)
z(?hu}olo?rG~kHG<BY<LJwvLudg)lK(wQf7=&N=c8L#z`$`r2Q5xZ19Lus$IyaaVN
zrLviB3;9Y54kVQUso+<#NIi_chxJVfEhrH-Ug<y8<l^-$uhm)U44-q*vkGhKk(-;G
zPnY};?6a+}<TSLDGZS0LT%utW86}Q(r3;wLZIr2~S&*Wq21wAvp<vp3GFjG?oXuE9
z8?E#jD(mCXiQL3d^#hY)c^=E+!ra}0bWd}P^iw@!m4n8kWhp=46{X*hJFfNANG`O7
zHk}mUd{NZNlskywN`<qE*nJLL?ELGmzYZ9k3eO6m189sII!g@F1~hLV8*{b7rJ_!v
zWg7R&sX<kRcI8WsY0#u;K)Z)-{}bNP*5+FTkgk1316W>vDw`pnQQl!qz;Z8<@O@_|
z1x33|IYS1JezMO~ew(rQGj+kWjpcMaU779ih_~(MG1DctJI(e#IWz56^_UtA|Kb1*
z!m}&zm><?VpF<P(tQ%Wv80(jpV<-_(-b8{#Rutgnth2X}H^&fO4n;HA2v<H63+(Ds
z>?$z&b_+V?UjVnrf8ou<e<Jr&@!#k%IK0%jMm=ktW-gdK_*Z_Q7PK9`6(eoOTa$MW
zI{rqY3Cu$@!Jndu4Zvd=M=r0}HV31d&%(HCeTgiR@<-i2L%XGu(!Lih<4Ds)5FgJ|
zl4hm_*2<1!ukDC!uu3R!3}3gky%6&Q%mnUTjz~6Dn|0ci7n@Zdj|t$w(Ek}ExUq+A
zqay*#Vdxn0#7^pOJ;4(l>>>6)gx=drHu*>dXrl;0O~szefoSKu4}8ke$pigk419I5
zgmVS@>GHUzXffXSmAn5hO+s!ko{}NZKJS>{zXEyx%I)>lLxYU68=Sck?HRhMhZcFU
zKJ@f#j(@l-2(dXJf3CBgSzY3yOzyg8t=fPvW78==l>jlBBdR(ZRgF8LiM<9My2y9J
z3Q)p*OHu4{p|6M=={g(<)#?_!EwBz6tLr)Uw;lkVisonT_i8r6&yVgmzXIRdIx3|(
zL{;Kx>wMU6ovDG;edy1UWA!x7W<!iKvaD!ijT<GcPzs$<;$Bzq(3y?DIVDZYDS_w!
z`--KW&L{?Eg@LR65&+}Xx>R?vdZDM6P|>F)o!{zx`tS*nCj%az(4|_OliMg@i`rl2
zHj1CXWB!E&8J=Gy)R&2^%e$k4#zj!<Q>*ZJ+V2teup#J|ob1`^&M<t!1CgN1kfM-K
zNxUF5wHWJ%_=R$z@_hkZO->eNR~yKp;Lx=q!+Vw`dz-<%mQ}hO%an%mgYk{-v8#!^
z9j}MNr1h`-EBFVLMt%tw3hU`{|J|4HujOGPq(HgNyl%KGOZAX2^Hp96hxMA<T_{W~
zt3BnnaO$i-oVw~c#5l+~b3r;1k~B=5wSE_gdD}gJ%v40lRgsNlmD|NRh@aU7Mma>u
z@$SA?&*{;Ip|=p-eM`T5#cvV1)0Td1-aPWY@vY^AD|;I6XL(Qa8#OTJ8aZF%mbNc2
zyM?%-wWWDR#YB%D0hl__-g%c-3m4_a>#y+|wDO=S23zZD`$5+rA_1!GXq{z{nbp`k
zH4o2Y%a=^P+76CxXtd42H5swv2d0wxP$S_L_dbx=r>%gg+~w3yg?w`s207EkTTS3$
zLlav`Z#6c>v9r*IHL~{V?CR=%Yaq#yhvsTn1AL8vLq7}br_jxiO$k>m?u~t&7;rym
zM@^%vT1PphfcN;!($x!MT%qoiewTB+kNcg*qb8}eN^;1<k>0XC6_NIL!`;P9y}z<4
ziwFkFU`b&M{<+wf^C=tstWlxlosWBs2m6wr1^Pnm90TlEQGYwkhs;NeJRs*7T1l^e
z7Vbb~LAp#Z!aBX`MnmF4S+};ex0xXlJ~KqpPhr>?wf)JQkfPV`bi&Q~)-^03gagko
znSh>8doOUk`Gh{X4c|_a3s`qcu~z|))|YvDEea5hk4ZTpQu(4MSUSwp8{v%I)|Ex?
zmz`0!gS=-~i~|Z0M@DC6S>6_b*6|2`wTUa}^7XgU#R%>bpj@p^r$I(j_DUf~oOBv#
z0^Gkac0?9V3c`3c9m`Du_U&Zbqvvw2@COYvL%3{lwiNga?yi|uAU0L0aNmM;a)kEx
z^ojs6T<Q@X-Yf=6#QNvZ?8jnYaaiG&f%V%Q+T#I@!K=e>HXI^;RiJXr=THz`RnaqG
zfpg-4b=`iDdKrv>?z&HY4MVQhZ!x3IzruccCPQR2ZXk;+`viSWYvKtb<b!ZaGxyn2
z_m>q3BJ4u*mkp#*PI}p37R$vD)LhRJ(t)bf{k?%@d=AXF8J7T?o~NEfHyojQP3QxM
z^rlX+fD)X+l|&a^kU&iZP_EINcKao}(GdKdh8h3FIRtN%-79~q^|WEOK%>o(1S6#)
zV*954Zn`wX`ZXAWh6=Cw7$aj5zI>UOmdIeFLPG9x=krMToUhcFq+~YN@QrPX%FUYF
z71%`|2aFqvcek%Lfmccn&pD);j+ShPxOP|-rKIAL?R-&(nlbxsk~8<fX;Hjk*H?!6
z<-CyjUPf@O%}2P&=%IqXw)6Dsm2|5#{1A{Ko)^>GA(auRh7-mvH4qB80t_Z892$Ao
z!Tm+fEnYyM14{Aw-|Sv&IA0H=o$?%5P)Z686Ud7h%1@)qq-eODt*Cf+#MAy+XL}u5
zuEF6bDM=B92?1h~Q%QX$thh~m-oRJ-WKOrcQFeA8^C**R;l^+iM)4QH%YYrd>&O${
z>rBS$IWQ2#L6+W02KQI83kp5-*S?-GE3#A(MX2s!>?)C=;E@RH3GQN%dH?(9E|!`*
zTWvmo1?NLpbMDcy^PX0oJ6L?)Pc#ghi&iup-N}s9j9hCNlvY`M&`9*&_NSFVztZrg
z5?MD_HILD3%xX4L1za%Pcxa!{{u8@ajEM_N%QlGPUHwgG#X&>FmuD0s3Vvnzas>(R
zdX8I0umSmR8Ggu)=hjN<LHuVZ=!sOSUVN@wYaF|PH<!;~vT(QSt~Zv);=u)*W>jsP
zElmVz0N_2?NgC`WD@|QYUp4M(`x=_O#)Ir8qbbjMfObzbd0;Zq(P#lo1y)$V{HM!U
zkTt9|L5t0H6YIO88{LX7bpZ!DkMErRk6h@;drmrLbtCRLXWMX!MSaiIjCt6Foa>re
zGDkbz$L>8S9v2W%jYSSY!WT}Ko0Yz84RTeT&Kn%&jn|Eb&-A<}(-$`o{SSMP<tOk?
zzJaUx39jfJxYScn!#n9Icn2P9VmcST<v@Gah>jevkID4bOa~f#68~&?B;Lh@x$*%*
zQziK+cG=I>ofsRL3ZFxx9E$leMh}*4%H+XYF{k@Dl)CT1LeY!p&v^EKCz`FB&xLl8
zJ?rA!8d+`4jIK2$(hL8yq^ZZ8u61I(y|aK~^wR0PKSn!-c`;?}MOUG8vmVy8(px>j
zb9ErXdT7RM(ae+lwvGJMmr+CDR1>lTDa7_!ZtztN(BKvFf|xr+FfHcygQ%ol4qeym
z@HJZaTD{hzr<<K6?ZXa39{!iWj;9Cn!DgeH(i?d^vFh@N^p;Yd3Hb(H=!VK4uC=Dy
zktbaBl#PN)eDlQzaT{`@Va@T?^AF=Tv;;1&C+2*D9H<tXYqs!GWZOjPhRAB+sf}hz
ze?;q&Wf|nmoUZZeeH@R>U}J3%?dkHiK)u|i#8c?*sai=#Jp<zhjJ>Dh_j6KygNelV
z^zi$a=qrH^d*k$`tK3Bgt^K?%i@E84W<;PH$;x$qRHjX-WY%)6gM^hbquLe7a2_S2
zAClYl&fyV58vFfjahXT(dbgwJc$ufb)o4Ci+$cpkuZ|X!e)S+%86l9s^U0P*ra*4i
zs-pJM!R~%%JKCRVqGl4ks20FiXWWL58L#p5;yk}wWYcM)G2HE~&HfjiyQHF_4OlS|
zjfsG}FcTIDtWx19)fw!koy3}}%h-j2`QYd)trfZeA8FZND6xw_>&m{q;S26MEF71e
zL@N@T)=ZdSC3%;M&Tw9YRZ^Vl$qBt7HmIq_i*0z@7}?U4PuEV}DAJB7BMylnVhe42
z7_k3ZK4I#sSHt67c5=~Kf&Es=m4lf_2Gpyae?*>oL+hVRXDufa3^xq1-P||=>bB8z
zrX!D9MkyuX01EfRE3M`s0tN%)t@IpZ7f5m};|Y`ueg5C1x7*tKPvP$~tm8n5x`Fdx
zmL0Yu@9Ya1Mnv~?J!d)Hw=yRzr;XAf$YyGB6d?4#$#lhNX}*|I<{PaW&yB-|K4p!0
zZgb29s#Zr!;c9=osv*-=%SNm-X(G^<3D;|AbJOf%65L&&j3Z?E4ra)nPzXA$_Pc3u
z4zzWW)&70`7U)Sdnidrn+1%fIDf}cnSWXuc_nE`vWBJeF36*z~?muV8svwo)&b}AA
zA715&vhbiCiw)2#v@StF^q7@dPze8C>DU)Sy$^)L_p_#l(f+Sd`zcT4e3&hww2qQ#
zjCa~!?M$o(-+ep$z0M5~!)%PV`#|jj1A9*++4STT<r+ed&YAZBLLb&~b3YS{bx*38
z39A|d-K}<3MTg$g%DKWG7MEe|=t%ZO;$uK>ECj|&Q{`1n*(N5etdh*^^P!nY;W@lR
zRNr7FmG@kimFnI?4<oY94_bRa9In7hTAZWuQ|6Zp_rDsI87T*WH-yeR1~+iP^qHau
zmc@7qw2u^=#5-zLY^KThwz_<i0hu{Lec)|3`W8+Nio2Isihi7Rql4)$N%TWmq;Jxa
zRg(H=F`Zl_(BF^6b@E0}$0jEH#9?K{m46}1NAnV0Q95I4|7cMej`x|y!a~EMyp11-
zn>X6p0=BS-qrW2OxwQ$r>tFx28UJzkPqqKvJa;>KUSupDrey@jd?kyL-8w|efIp}1
z{_m|$tS37vmoll}>>N5M<MO76njefW^<@)6d0yipS4xN?+}Frmt-350Q%vU<@#=tB
zvzzC+8wpd?n1r%yl-1K)gWWSlc`nB1n6t$RHlnAd*ET(B%$H#_vOWiCoJwI2qCVE(
zG~vVYX;RW`t3G?8m%7rMkj(dr)wiZ(p_RZPrUrk7YDDn{e#SZreYDHi=c8SUAnh7a
zq|@SpJiiW&#9Qjp<15i_w})JvMs(19j23KG2we|g((ZXOakIu2M6s$CE6t4sr(nv0
zP3Tw!0JI5k781*h`4inFV=UD`+4GP*4w3o*;*=uymYeOSBtK0odl~Cx$b^F#0)1eO
zJ-~um)&vmn?AjhcmvMPjwGom70a5d7-#$F1hP<Mh?5^V$@>T;m8^`Y}dT;P5I(kKF
zRPsq$aDlhID}#oSWYY0PbW;iXjb7kR19(-@RhC4^JQ3DPu9%Z3ZTe=iH)az;AdBS+
zwC!rSRushz6%a_#M;T{-Qm1p{4JBaZ0cDje?8t%?wMe!_o=q4iS;|)W4~nK@=d7>^
zF(x;>7UlF78CAU20Q2Ui6*d`RE`3U(+1A#-!C5EmQKu7qHK57Zl_#LX9=rKdR!C8K
z4HySoT7GyPw>-(IDxp3|HyWe2=zzY;FUG|JXl&;pQU3ncU+t<F!dxlFCmBjkK<n5{
z%w|#-?Xyz>k2A9Y2xksOqPg?cSe<s(n<sOo9;Z3h&`c|5Xc5CD2K5|HHF0a`pDZO2
zNFDJO(!J;DBnCF7<DDlG5vy__EBeB&Nn{%6^xf*Me^^YX=gH_z?=a0^*Gep7C#=0~
z#^<#v#te3htC0kQ0b>caU1^IEPSe=~uKkjqt4V_C>xra0iuZ^3XcxZymVLyJHF<u(
z)8RkK&#Vp9VR%9ngo%?R0od+nrGHv|#=S6okTXM)YE01n=KExkGr17sgQmouc^v;?
zn}>fNj$OReF+}_Q*#L-ckDp0vVxlJx+KUz@h6z7we5su!OCcim7@FG}5N+;0l3<R;
zS#f@lbZ-3uxY@v<@R?ABZaeqfDrCvgfQbH(e6TKh=4tJD0O((~*4ZNzJel}u%bq3_
zq=cpeKr-7Q&>!r!92N(|6<q&e9>##;g7o>b*iHKISQIg&%OcI&R~?%96(Vddx~C`1
zD|H%jA%qSN6%JTej~{ToEmesVn|GqeavPEV?ci%|nkEQ$-Xt)>S^(Vng98v&e(0dG
zeT!Qn;lZwer0C#rwB-QKBsTVYEby!U{kY%362G(wJ^e+`*3`_?{6$*MU<#Z5J?=kh
z;}jo@FFST>-79uUt+S1L?lwYCL%3GzWO$zFH!T6?Fo=mi4za-D5Njy0da>MhobQ0n
zSm?|p+LRpyoi+-e%8dDan2e|=jg_VV@-e2~RPKh~ioyZ++R*0G;iw^2ZQs;l&!9h3
zA0DbK^n#?IivsrRau(ed3#}jp){eqxEUF(R?>=~Oo7SJ%S?@%G+MHROe@uRi+L$&7
zQ_-KZLgpsN0qe{vI2~=8GZpIMvj;%nOXx{|#vTRkt<BhysL>S#sCPOmbO#NzKzTV*
ze1Y<X`Gm<DAyp+C`;V-u^x3;#GAZhwwUlnYsU}V3M-5%1K=%hy)2*M=Y89jND#cfU
z9x&SpY}*V^yPeIX**I*Rd@b-pYQxaS)X<E33t?a(LTmzfg+HN@(n2sLNJ>=<X9amr
zw0*H0I<Io*Tyji@f&a_hURSVNAMIKzd^R1MvB`G4d!c2+SYq$jzI^31`5TPC9u|q{
zAg5cgfMWQcX~8q68+oRc)n4f(*D%^k!T~}e%lpDQklAv4JHbjG+h0-zWP=GEk4;;x
zFy4CNOn*$T4Zvz%oCeG}wKa7RdgE7C=04r%sc%UP539DV6*YR&^nAg<6k~R;xRTPa
z-*%Q%n^C}AE27`q3?XMgQrix#ftza8Soe}aUb){}wR3owvFVq>lq*0dNVh_Usptl~
z_6~Qq0C=qfqd2!0cb9}RH%<yGH5!2KnjnGp4;x3cOS*X?s8#krRW2FpP-5VTxH{~T
zn?bBF4v~Lx4pJDXy#i8L-`kYia2A<mu9U+4>~v{I-Yh?q38t+iZIRt>2NSfwXXI-@
z{&as9-gMjzkLl&ic0<e0$-B=hQcqzUU_E&qFjn)vz(%Zqhwud9-LBEcQZFQ*_e6F_
zhokIF6DaxYg(Yc2pSxpOG8`GOHS5uFvT=ME9Csuex030|MXnvob}ljNJ798eN5sn<
zrl}XVG&cbj9gs|KtBm5mss$hx=U_@Le@zv=NJp1>;W#w0=tAIwp7znszWKKfutf7X
z>Oa~1KmY50_dkCcyE<ZnJ5Y=vPn1O^Ya?&YDq@E?bHEs*J0ZKpC%N1p-xuHR`m$KB
zjcmvoG6x%d9Rja{-}A+aHsSASvRa5d;Un*)r*#&JnzvDz{$3B0t#TfDie+9j<Dbkc
zyr4`;!t(JmUI1PZ575RiSg|P5I=v|I-pG>Z2qdr+Iw+@wcws8vk9N6T9Wg*lVkUi=
zPEMF^OFg>2#G)|mHop3bcR&gVdh%t|?{qviko_Ev1OQDyvcHvnWpmo4ImrrwveKYa
z`~W)*#4x;MCiwNs2&$ELWgSWXSDkx4sha12!H%zaFz>;>EMf}QGchkSGkfw28-{1c
zRb{E7&tIdRSf<G&WfY%#fSe#IxBFiL%6!T3(sDy2Z0<)Jc#5mV4Cz4i>mn&r+jfjE
z4Xv{e5%OtLUI-;gL<fm-ibmrit@2ilsShW>qc91HsVEW$!IzyRfAG}<q#;>8D$|76
zY$7E8mBLeB)FnUWkI;X{<FM!+@(*hF=PUhv&Hg@3pkr*VqOij74(90(Wnf;>g<*Aj
zq2G%^b7N>KX|ske!=3yY?!~8JE%S81INfX>i8T9kgS8te&3D6mIKwnMj*b2KZ6YR!
ze$}6MmYC655>B)z{Zr=GX|SOSQrBc=UuLOyhJ3Vw?Y`r+#hU}%!GOP_@&1FKpH9Hy
z>zh64vLm>(->fiy{PsC~>aIc>mHWz!_nUbkd>|11?j%NUv>}Z97H?p9E`0zRbywf3
zd-1<|FK3baa~Ij`DJaa#bd*&M*74)xmSg`=j$usH32pjA-Eqv${o<|XYMecV(NiQy
z7x6f)@h|AKtxck1``7KPvA&)6TOVdse7~hp56f5e8<@@i;CAW{XsN~|=rTl)+o+ix
zp{vBVN(N{v8Eb&a%Yqp~(3(vj5E9YPpk4bp7N4fvEH(>l*b13I%+o{O#g08QBa0j2
ziP@Li(d(a+^4Yv*`heHn=rvuNJ0tM~1r@U!_JwMrnWGgn?_^8g0r5}C)AE#I3DlJW
zua#ZEj=FX%@gZaRoo(`HOSfV#m0=xmV@<35Ps`%Htv*S=vmL#7b-W+ya&P}+_xZu`
zi-TuBhI(7a@-MalhYSy--W(cnCDKu#YVs7o<PvVORDd6>km*}o?n+Q$QlI#;GI-8B
zI*tUz7;ZFB%^Kq_GYq<9P6TN8`*l2PE3K~TqtlI&YLsOI3<Oy%3lS`MG<cvd{((>D
zK{G$iSTJP}2xd|Wwtnjp^+5g{kA5-=!2U<&3^Ry4;93S@)xOgSY7!HjNLUx}t%}zl
zDR;W18hve7>|v)xF^9un7K@9^;IO~Nwqu;#UU<`Qd36Y8Cd2fv6+;1TBI~keI5z@P
zOn<gJ?|eCh#;bM2!Q9Z5^CS?zWxvL}R4XI4H(b^a*@p1h)zLpldOjTb{}u=a7rN=q
zw+?Z!`~X89fOk!{W|C*la<)0`Qmkj^gR+qrj6O{AJWbFO#o{BsDHi#-a<Hl!u~ItW
zm5aLa%-<ozKN9Q4i;^j6pe~{b=7S)25{yz^%yh144x-1uBNxeF3Byx<Ym~kgu7T)~
zzoX+x+VsVI4!_Q^F$kq8nWL4Y_M5t8vQ77~sF<gQ83S~Fkl_G)e_j;R5c_tFq>*yk
zCQ-GR&0u;C8%J2QCLg!vWQnaezoU|8REa{6p(dWr^x`x<Be5hka=ciu&$U+SZ@pY1
z5osM2Z4&)Ft%f|s$^gL}dk(daU0RcJ5fWAdP88N_o0J#Tpv_~(k?^hA9)JWe8tAYJ
z-EB%udJh0IuYlhRVD_zk&%i^~lDF8GND|9oMOu%=Y&*Ka>T$qLDLBSAq4cnm2a`5_
z-?19NOt#}MFsp1np*lYLOMLY1fiKHRn_oSVKK6&bBI`04r^RG4<SG66B`F_Qqf4Nj
zUG$6_?(e-6KVF670Yn^VC2JrcFO+K6p&co}3aM}vnMU+!Xt=3R(#0DJK1rtZV_s#`
zBJYh;VIZdKDteu8U#HgwHDeuYgT$;)LnDy9=WVpKr@?9C2%e07MnCSvwt4kbay^)#
zla1XcH*=H6g`=9DE<`qFW9S6!mIS!#<d*fF;1^WGgGE}5Y??3h654zvytbpAJQ9Bi
zTH{Go$nNn)H~Kz-sQ{+_9q7j-&0yOzmIdZjdfhuIV2aQ3i#U3qcPm_i&F+%<yeyK@
zB};)sZlM>-d7K>GZiVhqR9K@#T&hm1*&*HX@dKINu!{bSZWXg^gq3T_Z7fA5De7;2
zL1xFlbhe}ZU;4fNU$^eWd|8Jjz=gfQuX^wqM{JH?YoHfDgHlnqf_@Ie0emIt+T_Pe
z$$ttUgH^{F=9QPzdM1)($zqqlPSxlq!unW;i_y*jr#>*GCpbmtq}g||)d`0f?<L%c
zfUN8#K*O?$v!`6)W*r635-d*>y1VVpDAyd4oYqSH#ZOo3(IGyrrp^6^W261yIz-E$
zQ_hI%lPwRWV$?9Je>{<;<1J1zA5uwcuWMR9ME0NxH`QJ{=E1uBk=2wQM0S?smr_v>
z^^ECtLVav(ct+3D>(J<Rve-iNRNI(iPamsC(Y)M#)lYkNayl<g;mE*TXrONx;f_jx
z%`Q#9%S4>csf5DWO6V-E4WCDYsGcUf{CzOxkAo+y*teK~-=8#0T#T>$HG&>ubcnjj
zYk8R^Be<r<ByAU3nnZrF`Z}U~`OGhoS7V^7aGF%LxSr@za@*o3bOR2=UFazh?r(Oy
zyZ=hBwY*HhEQad%U9)dReoeTCS;0Cnh*oS4%Oh@jGAO!@`IT$6?ZJY!J?&>zDza@-
ziZ2dJRv?3%zHhtHZ6JTW0sY;d0U_7g0_uIptCO0Gu<O9D00~&h?$68C!i&oL7vx21
z%z1hWnEgmVYC{dwjrw*D9uIHO$Y79@ZO=2bN2GH<>iW9;pfaAPtUQzQnCQ(-;;cSA
zQ%Mh=t`ho9CUVkZ!p-eOdEbeZW;6ODSbTMGFil%%I-Kf)h&FN}%WZAn(+^@(mPwej
z7Zpa{6%lM;mpo2#tE$`cB&$+qH?)XU*sp!JAl^Sq<lvmt=Ie3(SWSB!Ddr@dGLaA4
znUYJ}5M7+8^#fLPOnR+Y3Tx+*C9T%b)?OP|Z?x=YHc3;e0${CX=6i?YG`ExQZHygW
zl*MB1ou&3VEWA4HtPgbf3W0P<6gf$gZv1#mG1uBwv-tifz&!P}`-25}{<uPx`8CO<
z(<s{g>Ey+;ql5q4-;0j+Uwr>;?<88<X!VgC|9WflMI{u}fnfNwA)}h#8fH%lwcd7k
zL?i@8qEiXxZq(w};J17#EpM`Eq#Dsq%@UC^0?}@XCedrFkJmIJ^O3S6=RrESTvSLi
zbGZJ?MNSI@$#lZ%DXaU0Azmwxv9r2`3*d%=QfM?Rs*VbydUVM(i@?<MaGa>dpO`E5
zYpZfO5~OZ4$)<2arQ@LFV69H%3-IZcAN(t~BUR)J9F5g9#b2QyJMWy7-yQcHT_n@q
zj?nE#*G2iJk|o<~fCEJo3oyWqU%(v!ASKb8vgS^*(o7!ved-bSO)0)iwa^tFeU;>C
zv8b%1x;6;}%%oK1(i#!SwvYMtUmhPmd%lAnN!!u0;}-{T2I((<Sxe@o9|137M{!aV
zJwAv1#Q((jKW&dE@?M5VjpZUPW|RV;z*4*H4@=xr&k;?;A8lb&Hm(HESVx#Cmgl;5
zG=-wrtca}loI|vrctTU9=Yc97Z_Xr8=?A+L!J}=HtaP=do@Qr)Y2>!@%vaQGUKD71
znwe{ikKu9KemI&hh9dx4-R=YgmVY9uVp~dxFiYmc1dja3SKmv7`weIc*7pQb(-zjH
zg?HkMG+6Fy7YjFtn60UQtua-1Xgl-?XO70-S<_|*<ZZQ-``TmPqu6E-INpEfT@NCr
zH(*n9X|66qp7II8P;l2bFY)Y-q*cUxCydty;lXxrOsC1b0{PP=7;uLuTCx_&pFG&t
zdfkrRmPjSq`cU1i&O12tquAdAmaE&c%hfg98mxq~`ypmqXW_#XkerSU<@yNZf0us+
zC}<x!Y=bT*b)3fWHA`xaLqB2+-ovNtPj-7>V?u3qdDC@7Is=UQP3h|T+P?}W5a@Zt
zW}_ZPfR`0K>A_k1rB`2K3TEr6XNI%UQ1yd(J?u8EC6F`|1UX`WSSc-Z!HJ15g+(4d
zk5>+3QnUsDH=~YSgV+4PKW#P{mc(n!Speh_M4-5+{J8t`vYVVfvM+GBuy&Q)ygYhc
zN1r=8n|a~WX^hn{=GkSoHvKFNB!0l<Fx0g;V&;K_;_oSN=zP!p3mj`~<UMGj{QNqN
z#*K)hBCqPw39R@wMLVz5BRiDuVQ|hQ?@uupoXZ+`pOMqL@Pe{i$Yad#-=Evycf@M^
zC(fq#J|hT@xVDSKRYLM`H+6M5^vIu?FGi~#EQJ1G(w+4Dpf7occJ>f_rMeY71_}7;
zac5b&32h0?{lb8|wTQ#r9rm_jdVOtp(&^LROsQ{l<KPm%Oh|i(ZU&2)D40>Eb6`qX
zF!B|jUT_w?76nVQraccO1XiC&wBq6Y#?p8NC80rgxGT#2B@mtg@H#GIb3LA!`zW40
zBbi9TezPnRE+EF89PjMz_r5(iJUsaEJB&Fw_%0yMY3I_pTG@Toz_`3O^Rk?Q3E<Fs
zF(h|oO787CbLe$Cn{U(pL|Dn*isSHAyF(QdPtv*N5kt@z0VCJ2kT&x>>Dn`Ag*KPa
zOnZ4wB%d{>BhA&cD2u#sQnwkx6{1%)%I3FmHBU$b#kC8(F-Sn{#}F4Y+-j((eMOcn
z-j1)ip$zyW!<2wl@9isx-Wpqf)5mJk*+rb(q}8d<%+?@hnj_Drbqv22W!heC#PN6t
zLLyGI-S77UZQ%R38YSAcGiuh2)L(s+F9-e2ZZsOS%GL^(<}4}Sq~+j21;XAmIZvm9
z7J4V~aKdBixwXQK3<MMta~{h1EuR=H;Jnq9^DET+_}tVP3eC?Fei@il3|qrK);DOL
zY|UGbfh}lfkNeI07^C*J#&{HH*p0;YWXASnWHapqkC#JjGNy_3I7~Gx(g68NYgOyT
zwKAdFo(m~1o_0FTy?7vDVgLik+MJxzN&n2R<|0ua4&Ti*o3UF;w?v(8o^`@)ZvAcg
zp*9bxRdrchw;Ot$rWeS<O$$J07N;>yC$$}-{(jO9;|>m9d|+uuv!%J-^HEWnMcG45
zWN3`fAxT<`WEGtKo5FTm46!;Q4`J#!I%+=BXoZWE_o$a`7+y=~g5kB`?Hvzq+qe7f
z@`(@)-nWNW0kzq`a*y-p;H_Nk_GbKK@-F0>?{VwkZ4o@IEmPSwGOrvxw<8mAqCfM5
zoQEPGr{6B2B<^d>oYapuz1~$U?;CuLOcvM80iOrw4%42H$)!p?{y58wdU{+$E2tXw
zTJ?;`9><R7@hWbGbm$~gVhj{ZB}euFr2eH$!sd4kr`RKr!m<B!b93{~SrYAhh#tx}
zT1Ue71JaQDqZTG5%!ik^m|%5ro`&Fa0!yz=H6lgpa!Wqt<Y+P>5KD}G(CC^@eMnCO
zbJb^R_y&j+p}~}qfe_%JD5Zx9%V~DJxV+5nWtv8g`G@U%_)BqK4bWi@6VSoMHE3BN
zYjxUTGBr-8aJacq#7I8U)XKdry4pSv=aE@f6Q1yLMV83emQE+~xW9$D`eqB#($O0D
zE}tdI%-C`ls5F4ZcW9nt7UJ|O6~=8^E9Ew(S>fYZAQ+mRkWrC?1d!&)@=4p%kHutP
znxF8GD9cyB-F$F73lT+~;PZ$mr1Dul@l$y$i32ZrNt_s(wJDpXRV#<R2<)D=eBjM1
z%Bszp1!gIV^U9zbW}j)ZdP6+Tl5k@2<WOe4P^1fn_>9u&)Vn&5R(gq~Zs+NvzojcM
zQUXJ{qm?4}IyXx-8ujCTLK*MHnC!n&J0>F8S2&nPH)eKoTXOLxWp(+?7e$?L1Pbw&
zqR|V<LyDxPy0PKUCO01FYiBfC%v7C6ueN-G$-!*JrheSomeR-BCv;5jI0xU`?;_8B
zU8HU6oz_j0(e$ANBxOUb6RJ1|91eQ5FLL0&i~6$J8OH8VC@HVG@qfI)a?`H<y2gDw
zQCYVcCRoc*Vz4y~<Lvb=)E`X?rWD*rowD(bqWr;8y{sxFFi=`q_$wRPtD|E*COr|X
zjf}i)%ZM_t_w!Ut?5yL}V!@j&y}!}>bmm)_zRzANMJxIYFVv8(ku4ej6I8^hjZ$;O
zLZeDdcoex>4CwJ}6@Lpi=qj|5tz|3OVk_CY+e%0TeWVdQT*H=kCmg-AB54h0#y?81
z!4e3&v#3e?jf!vk&B|`7>~jEjKqvl?Yg?E`-0slPjhmNb!|kA1w9e7xQ61I*Ri`7J
z<kFd+6o2Uc!@bB8tb<hWSiej!<#vASl!wsL1PTsep>qgCLxI$y%sfJt2R_1*fOaJq
z+CbO@gSa_P!{Vz`$p@*AJ7<xs7id|(iW0aP#8{gSqemFLY-69D)9vk^y`c!v1E8J*
zZzreQhNqIpVpplnQF$SA8AIR?E?bvFQqEPJ!!TymB`b}O<L0$3`0b#w74b!f_)1ZM
zO&FQTA+D;Ax&9dFLP+WP&?+1a*n{~O7@~;6OJ#b&k%^38X;lZr@AO)PEb@*FV!^Fn
zSeKnk@uCV&Xl{4eD}4)iaTz(SQ-)w@<kjZaIa@IVZ`bkK5fp-MK};^RKxrbajcAn0
zdq;5Z=^8WmqjpEQ1QD|yU&IQ(g6L)6xvLzAGt-5c!aqGK*QS&(AqAb@#!u-`QW3^L
z6g@vGa(^pDE}OG%vt3n<tF`1Djibf&>2TlEe;_~70?9<U>8n#M@C4YRv%sBi{3TRh
z3ip<XB;Vp?T(OMTSAzIC_koVeAs;UM#~pTDPam&q5I0)E7uh6B$Iz==g^S?|NNV_(
z29Px~_vjD~T}8J;%At2Vbc!wM<8C*ox3Ia>m@oa5`aR=y6q__&CP>g8%xA{a>LusZ
z5<Q_gq$sQCDORGptVxd`{vUH1Ql+8kHUer_vmwJ;XLusT8d?F!PFwp2-+kNaV!hI%
z)|vNGV)POd0fVdz_aZd2+xc8YR`u6g4xp8Av-1IU?=E-t*>e3=lfvHQ``0JRraFK9
z_Pj)xzu`qu@Mt8)1^bkqYbmNZ#p8MeAu<5?0D7b`WXhCgZp)rkqt08`eITM(Z0nHw
z=`>LU*z<5p;&?lux?aI)3xztg?yP_`{;@wCnlFnvqU6%pr@p2;1srMC0_^lmBt2dM
zlQp|S=*S-}Tj<^6mp*Xxl3`@I_u!@bv$EVtp>GlPQ=!-wrkOWrm+7#i#w%N8)EPIh
zveP+Ri$uAXj;_uQPvz8@yqtT(v^=-6KR%31_A*GHRh>{Ws#COpyJWSNgRPZZCD}AN
zpQf?T;he`h=@4N7SNaoAyFaXosX^bk#4coEh4H5}4q0GXNu8;;@yZgKK1}d(p0A{U
z<$rCfKz&`khX!>u)!k}}I9avy{uOd}(Uh&t@bN)RcR)Vx)s#O=6m#)pir%bwSd0m%
z=yZ-H>@FBmQ1h(eCNR1|ZJO6@`=e&CSu?m?SC&lYm&tirXQNd$Wjlw*-|u|0|KecR
zugc>1s!|2-zz|ZmW}9!lx%fTvJi+wlJuDfzJn~;o)r=YO&eotD%MCjptt21FN^&2T
z4lku#S+CqcM3*p3e4{wFa&9Ta33=na<wMND{$mkGZ{KY{h*{-9p_q`C^~e2=+q5Bg
zU4={EvS8^9!*QxYgnc6={n|-BrU!$6!2Dt!3c>P=jZ7jjXzlgjlW;;YRz*9lgfhrU
zTg%gy=o_bqpH|dLhE7SfJ0tTVOBfYeV7*!Rs8ZruIjtt!(f7$XyTEM!M{zg$bF^0!
zAfv(cx5Kg>{<RZFhbgAeV@d2P=xBPI7c9^IWm1^2^+}FZ@PHP>Vy-g4dVLC2jko(n
zmR2y7NMy|8m?}g1*_lHNSmRNO2Apif#4|0;aW{JM1O(zIf8FYRzWEnpRXgRy0s-tx
z%BE!kQ$<od6$gCT&lmua7M`Qqa#F&kj`e}{)Ms`<F|nP3%pygU8@gtmv7w%5Ow~2(
z2Q!T!QOHHY3fzckF%pIFIAMpakcB$DSS(fS&q@la!m}U`Fq!VO0@_m`_h~wdfTWc`
zEWZvX9W%OJo_%b2X&aZi9AfFD;tIEuRHv0M<Cc?f%k2!V!mA=1bCY5fEzYBY5_@Cn
zAFRV(oTKBn2V~IsI8EnteHjb3s3Asrc?g7&c1(iEpP<ZL{<9o*1BhibuQJLPMcB(y
z&&5XiiN(bQR)NLi`&}WKQEVzH)Ma*YiChJmP%J}>feY~ee_2#mA!LpztGeX;k4I-(
z6mOygUSP@E&s1wxf{lv2ilm#$s6(v75Yr=`w$6wB)|qCpI`c&$KV)|g>(qT7?0$_;
z8-`sr`}b(NY~iMf>$Klff(82p2cAJilZyc7nWU+>b*-OHAopMz9baaYOe*1;G)d1|
zmBY~Cx8WL7E*23RK)8eTMN?g4@J&)>)dAP5I|jgWd>aDU>4V$h1BxMJYX*izgTpbR
zA4r?}0N%(jI;h8Rocs}5i#)p;&WBftCldj^V{?S}Roghx+D2mUp)>|?kc~NZwGsXE
z#cV~Iio6ZI4@aFS6w0<!Rp|^E#E8E0Eo?YizBF&-1oe2QmBEDdwK*ma4Wm4vYzO&h
zy5M=x@G2~eYa!K0A`Wx2Djnfur7i1<B|$*ob=fOs5$X6wQ?<F_KV^2~bo_$tFeX<h
z>b4jue$kbqT9^0{hKBHV8<8*!YRL20bpvM}_Png|sL01{-a{w)D(a`b{#NuaN1De4
zhgy9Z^<nVpwu-45!4Qj($2|SM->o*mLc<<qdVY08)Sd>Zis@C_K0+f&QdVi4-lk$q
z<+dBw1<R?`>8$8k1RDle93EY*at=@0M`7nf(|N|n!$si?G0&XO6Eng136B^1JboJY
z5u<@Ik3FgUP|k2R7`RR&N#?i*)AOn8fCP(zc~-G&1V^V6J$eL8<i@cF3Onw|qP(>+
zcCy0QvKTdu>W=lpietT5Z7dy)k5;rp2wk!^WG7^-*uTM=-I#`hWN$BOxYe?Q7K_%(
zb+qrSx6wPnA~bqoY_78b5Na8^mg-C3b8B+zDoMD4Wb_`|5luVrLG=PdbjzNPCtMrA
zGsBNkR_>L46z9N!Bz4kd%BvwW-}-$P%2|~**X)*bXB=H5iwbL-X{FNXB4jbFw~w)8
zs#FfJhcK7NG``DYWjs6GA|IzE3jIWnT$yeCgy#|{Jm)#?`4-+POlY;VW0bs~$z}op
zs|5%b7p#Dv#6t$&mAX8^)g&cqz^*cm+Diuz+op}s503B*&RKbdIyPvbbz<x|^fbqb
zRP@yZf!Ae8e9<L@4`-R*Dm>0)?aW7MiCN&$;wgl&z^I5tOY+M<KRVgxY&r84gx`49
zv}3P;at~#>6^d3GKs`l_a3~9(DXdP;Cc?~DtYWZ0t5GqVQ>_O^;mi&hka+Y{`|D0q
z5lDTO6vuATF{L{o=-c`O>5PXlQHBM}R3`X1Qqlx;n0|nXC|-M}T=OaPW=Blr%Akic
z7)0jRjCXQ}J}y&)9w8C2du2L<MWQC&Qm&V`zJy2EY#{NM4aG3)^nXfKyz$y`2B%xo
z3eI0~sHQPOE9K|Wf?fgFNrHuQge>_#O+$h>#y-%)2lA8;BKPxCtM@awyfi?$Q$11W
zPOh!Q^1ult7~q`_$B;;R?681z`kT|!=+k{XpcXXEM~}8vVy3P3!i|N~WCkGW%WSG2
zuSb~CCaJ5Kyp4djLjQicO_OPi7iyrIxE!Dxu}c+YrYyN=Yjg5>=_XU)-G=DT1h;2N
zb*R0n29Er66`Gm`mK)HZ5ZZz~%!lXghv7l+k{(WyQC*b7Z;tjkFB0NAqG@pfU#(Kz
zlJq!?1Hg-c4MB7cI;fT^haJh@O7!Z>Lh%xTM5ylSdnwc5kkegHwFT9beOWGYqr!zY
zD6d`O*A$D=TPDNG(oTxesg{tsr$%s&r&cT?`;^iAB_$jf?#V=v3WB_JsIR^ulIovv
zwI_Pg6n1$$KOqKG{GqGXL)D;siQEBG0O5z6?&|&{;_Hs{@r>{MlCR(6Hr3&Jhr86h
zCk?#%cvt>G?@C`{Sh_c`DC3bwuQn>K*kcWWovXs`l{+EvG(IL5r;^Wm{f}9Z2qct1
zczoB_%!_NM*;$}N$peJS!18YKUWBg2X>aazS3pJCaVnG9@U@b#)WE0_7%?PV`o63~
zp2on+&U$?Aoz<HKT%R}t(srTSGp5*)UUm+vBmRZ_jCtG}qxn-J`do;-Vgj9~D4!yq
z=nDm$5<KzqHrh@K#>Pb7dz`G??%<3GjOLW(<lu)kH;ae*r&U=_zd(8e&UvWUq6{_d
zFWhl$ef-B-V_zk4UJg|3J@2dYVQYyG=S4+&yb`14Ij1F`!Ox^*7ZZhK1$ivWHOrjV
zc~caJlc<`fqim9mD4r{+;0dr_Ib4x*iqT(R_oDjWW(tzn6LTbr0dc`!w$T_oxi07j
zjk2V-Bj1L~uAE&-waQ#fGNPthcep&ia|AoC^g)eejTmvYit`l9>!RPqnhJ<Xc5$r1
zf`K(=|FO(5xHle9+@S<po?g!rAhLs~UAEZDs^LlctDMO$X)dy=$T#Frdfv7I<fG&b
z5dX4j^Ulx}6<|Cp-nc@Ho*Z-a!f(t=gqu6%Mt5{pEXNCzQq+CEX(XRity$^^JDZIH
zI#OPqewU}y#Z)jC*zKU*f7*@uPhI0#l~?hbaZ1T|Zw3-XB8Ba(`KaHNz7%~j31T>r
zK?02BXqL<eIG@oh{w1v&+K#5#e4vBRNwB_ysdy*_u5}(*##4NuKzoXFN1)Q;C3HJ(
z%Uio;Kh)CA)?KswK+|lN)xJ4pvvzX=R+)I!&AI9^sIpSoIHd@u<QcKfMsHxxS6X+&
zx6Vp|j~QK3xH%r#i!vK~_5dHZS(WA`UX(l$wvRLzL}XwRk+yaVFhm%r%V3T8#5${|
z>2NZu2d#K3Z2>EKSzHh4X~pn&?atm@qitsJ_JM%sgYBrl_0F`QYfFpKi31+AC#`tn
zEvGy0Hr@hJeuuglnAG`saRUrGs;&pE8gE{mY|)`|W3FJYgm;Ld=y1!$`CPk^FsKAR
z^9~LlwzT`!#}?=d5wAZaLjVlTP^e6b($T#zIb)Ah@yo3{FDUDU2xuBHHje3GC^aiu
zdzgToVl5q%eA=kXiLSCFI)dFzbv;fwEhuK-f(O6cg@L#iGl&Ah?_mRxmWFC2YlyHo
zFowAYc*hMO$@>^U+Kl(@Mn4?xVNi*7FKRkP;Qirl|30Pgfwc72-!Txz<~=IocGgqo
zcB2;4jcIC0CtI%bh2SEb7mXy;*Nq-i>w7PYq&Y85zjT1xu*#D;kj2{Nag5gnEX;<m
z$<8>cg1slIc^y_x^*Ow%hGjajQ}M3J)}3%qU76NJWcbx|t}=!2rjbH;{{)qPmK}?q
zD9zT#sl-&#5zI7Wk`GO6yUeCQc(CuU_{fx|lb(DT^?gb?nX5!*D1ioSBu1zzsXnoy
zC00v*9LlQPwV>yI=PO<5otLCJ$_w1~p}>EyV8YD&uO_}!`^uB%+7mg{G|Z~1U({B}
zhC3*<kxp=mrjg=t&A7klolLqPdmSBHlyk^{ba!yUs4u(e0$gbWSmsy?k(f+U%0bSh
z)aV59yp3r7C7|1v9Q~{-gRs7@B2@7|6tK|N7PY}&YLC1LN%!+9@J|ABiMyrzg0=4C
zj^c&x_nJ6%x#siY#I3WmLM`}Y@D~2(9W-P^1N~Y|$LP3C(<)<MqHj(Q9w9;SBpC5g
zh37I;`AMD5jW?`p81vYvWM<V((t!63kI)ID<cvPie=rh(uh}uQupveCemT9>YVOV%
z11IfwBh-7RNd4?RL>Faa52LRJG#5QeLg(A@u$_ty&+b_5VR-?F=!eC0in+RbhiQhc
zhgg0$g)Kw2?azi==zd<8#r!sUkD3rm=5vf|?=g3v)S&-{MvieQ+*yuk*YYGM1)Bk4
zpZRLnwiN20KxaIum!u_*-O^7qxVh3CQEsPL5+!ezYw|KG$N|6&a%i(1v^u_C?WMwy
z&U%9r<^oBj>{h8efn$z_VrYxeU=y+KyT9)Nb3k@+4VB{;wkM)BQRSrCfQotpU{0H1
zqW%7CuzA+$sqGQHus%G%?n(Wx$-lbCKzgl-2lE80v+VyGM^FB$adr7bd?0koFo*%=
zo@XM~_%>?@{5?|>q1H*a@m^JMjZR7vW-D=)P8_i0@QL3~j2`dAAe%{6CiGUFVyDHa
z`v|V4O{eHEM$emF1&{$-+l>uT270&@zlUPcZ86oZ9H}vJTt<N@sHQwcy$Ug8^drlW
zr$)sz@YI5(?%?LMv`G%bsX?Fv>IiQ%=n0$0Sp}DR+7^HQQe?T}*756(120Ei<kNa~
zRxuUpn$O@adN=WGS&}*sbH*uGg^r48Q4Stlr~+Q-ZLYC?KIXzRw??$+>O(fqjnC&?
z%|~N_iD?Dywbk+2#C=Q@<vg@Q#HpOt<bgs>qOF6s2EJ~y(m9;04BoQ;(o-vmBbM;l
z`{8i#76_ks@9H0M*!E_;`Lr8t#(#nT#Rp7}Hb5ms8}7lBt-f`AnI?7XE}1IR@rCH*
zd<x|LPMyG%s`_@C4q8-itUFJ}gMKTBRW)#V71HoQaud@ZJ2$Dvu%2B6^osfkeG5mN
z>lni`Z&p4wC?Gwm3Y$o6+D@NR?*_L`O2nVx;r@(i{NS3JGIi9}9+oqL+jZ7(JtUO)
z(hvy^8wv}!!D|R;-T#Q5TGJH;n(#uP-v?q=#4$Kfm*2;!nD2nDx6o%)kdda0P6zNY
z>CAoh1?khT05kK5*j$~S&PHi)P}zg3`~Bc(_h|Ql&#Hk#;!9;-tmvRK|6hXbY&08j
zpoWIjK7rBRy2#ebhrnjdtJZeZPoH+ZA6zHXH!wVW^2Ap)v4y@t_?8wQ(_irqd;3T5
zqwPCSE#S${0c)WN2fO{nyFgG<v<C+(XS#Jj=@EMG<fB`zEN(1TJtA3~aQfS%%*Keb
zEc<d+R>%P5<W727T%*V<rOzN>S<2J#(DFcXyt2gy^KrZfzrMxI)I<OvV~bf{4J_a3
zCBveN+)!kL^$#u6Leol6e5b@Dnr`_#)iu^x4tS7^k+T;ae(ml8?nBgmM(XA08;qtj
zzt~fo!jG@^q}{R!nDu#F-_DT<YkkLKK3dJ6Yez$Y!Et#}(9_zRGsCyeA*_>9BwCzi
z=S(}eA<wX9j3KI=2HJQ$!c(@fc_LF@mwm+2@-rMyJigQ11awzj`-e0W5v#`1vJjyT
z<_FP>g-bo5@B`JNG=v^Cp-LrM7AftzqjNyP^;NB$d8>KBf_qrk%u9A1TShsn_!(mc
z7LPGST2Zta!$7fZ0|<BXBU1ylzwRvQFlZUk-FRhKFI%&8eG_`*u^WYEdlI`!XxG=+
zP;M>Qo+0k9J1VrH9$DG<)l|sd$@#o`)%L-~_s+i~<{R5t7qdw-#N&=;v_;f1Ha&QG
zUTTww+i8o$d?;l!`bee;T)w2Q<@53An{D7z<E{li_RwuiK?lsh2V8D)J!G`R$8<6X
za@`eKZ+#!J$qs~rfk`lbRw(Onu++dV0MIr;;<`sVZ8lg?dQ!Fna2bz_$)Mj=d5>L(
zUyK+{fQAykM#0j)O!pw}e1-p_pXOsiVIpeeJoffo^d<h2DWT8>t`q3wfO6?Z{|_e^
z^h*O2_4>fuxYcVtdh*o&>R)+_3F?#9+w5`w9d-Nm4aj9F|NRU7ZV5-<_M2|>%aYQ`
zh8OUEWpitL)|IjB&FffJ2Y!pmLArX24|_||5JL>X9G=wU=9|ij6X9`6jOHPwz@mS6
zl9|X;Q$F3YxG^n{nvZ~VLe?*=*{D~hLcD<H==C5YnF+~b%X<Qo+K@;FTwKBvN5S`!
zUJ|BiS*wt<TJQQpd?gscz2%8uBy{F>Rb0PbnuMAnES9#coS4<B;i?l3zhX=Yb5)b$
zQVT25RMI4KC3w@|PiW%FqB7)Uen?*zlupa`?9pdX79hXUG&o(f$sV3P?vL!d96~im
z6q*SWl(ANToC=)5hnQ5!TCcf(D_%7W90Upw8YMoSwy<^UtgE1S%2oxBj2ppg^K_<!
zYk1!oK_6m5xR>k9$00xxPF*>_Pe1w7d;g0!;*I|}PHw&jp(-u^^g;e@vVWofZf<UU
zp?>4@{^nNy$)BQ|Km7*(7MS)NhX2!l&wpE6(X7s9>7f6Y%`culf&cp=-u&cGAOHO~
z`FD=eJaII@4fOi)X1uvkm7@*g`G#Z%7%N+CkknJAK<yUOG@josJ^x>P@kQYL_qV?2
zfBvWb=TDzL{Y(GJ)6e1g)|01Cp8hG?{CNI9_`iprZ7iyC<2=hZ()=o#-`1B!{^Zk7
zfTMlX1Bnt<6Wh_Ep7j2TPd@#Ghg*F5$u~MPBQxU2+?7B5<1jgoPWxx16nHK!MdT>@
zadZ+#M{om9r@-_hbMOs3T8!bU>#~NJrIh?puZ!{xaVWY<5N^-B#>gD*p$5YpCG|8p
z?}Fe|jN)hux;)717?_oO65}L)UQAKSKc|GpaD`^on`kFbrngnbgy*9qPcCQ)pMKIl
z*!}tW&e7Ip|ML!NsepgR$`vrTJVPPt2LQ^rm~|uk0{-{u)6aWbPrrCVOWiV8_hTF#
z{(t{3zz8mGdzI0m0vUdm<O^VZ|Ih#W-_fy?zg6aK1)lIZ69yl$#=W2W@#nq%CQM-W
zGRY`r373UFc9RsKgsb{VLG9B|{$YTv8|}aNKKjoEX29)|-~NB4<rEs>vbOrPmYoHl
zL%GdW(MQE)GMgpi3=`6xjNps?-cgcGfqlmJ@l3q`kL<d_oO8XZUextPQr7Vp*ql8K
zVy&sb8O-(jTOchnBZ(~WP?QB#ysvJnI-ONf`|x{|x(`4S$!g?2nZu&^9KTH#(`>|Z
z(OzX`k@EoU6Vb)NKTkTeg|MAfh5&(4PQ(nm(e?Fp?3?(;^m1$C(@%~k$z+n|*U5BD
zX??%wfBMN^HlKd+H`ND%Y?qX&bTGaxv*_=@*!Dk1h;H$-!%Z+C!p<qGET4b+2_=1@
zC$SQSw4cImgHNu|UmE`(%82-Y=<ujWCzGn34`&!mLyu9G>N2}bWjMM^ab;I`X@_2b
z%9ChEon@m8MGIEJ8SB*n-b|x^W~eRe?Nq2sf+<}V7s`WMO;{x}wq4t8Ntb49=lMOt
zWaW#QE*Xx~i!v1{f#mu;acjq>G$~pz;}VOP^Ld`Dc~X{vtmKs`Ur&>Z1#tf9YZ{gH
z*_eaJkXenj?tJ4YSFxif5ez$H8&IKhr2+qhRlQ;2^9U6|EUTgdu@(WBO*GhGDz<5Q
zk>)^hT81QtsDmy=wy&cK<`mfi9{0OZ-_(d;<LcM49-e!8sD6dVX2Jp4h4bC%CPt2-
zbEqfmTcwZ0?8%S4ewnP7jQ2u*rDfgj#~ba>d)cE$y{%5iyFhB^#Oje_d=F<U-4H3g
zoe9xP*ekdN{yPCQTOW!nyFVp_f&VtaQ=G5W0Gg}-ij`NJu%Z}SkMig-rw~qQO}^IT
z-(tgha>t@aQ9FAq*VOB;e-bvG-2?hHh~BdDAH(ysPFmY`X{t7E8j^JMGZG-8zg5*p
zLq*$S142*ihqS5l3GRHOZaYKjXx~s}l_6XV+J)!bal(XGamQwLIbcAuGQm|skNNjx
zRGeo#H!N;-mZ#i{g05-&X6Kug@3$M74qi*WqTg1Ox&9in8(A|1(qFO!YdC+A5S%YL
zbYs59HM#8e;vrdXcKcivFXFR%-0%2Csw|VI58~#nCb7<DbUZt+eZBL|sg9)*f%y{b
zDxQd}u%&LT44w^!qM3ON19ZKq-w;-bHLaVTabQ(fZys-P;>v3KO>f)no2Z99zv(g$
zJ^f>>LD%!-0yYzFh@P~TXI<W9Uw4f3&10OQCckaIVy%gEiT3<RYEaoe3!KhjQl4jZ
znUuHgIb~b8`bmXz%A-9P=H2}J96q*GhwIbjJe}9$s(sB;z!m%-@OYv?#1g(4Csi(8
zfxmLkX2XF}V|Dbig5Z;<MSbHvDe&rypM7}PzA^CU6yJ8@7uJ2D{7^KkRQb>Sw+4VF
z(Yp3aOA0?13S=d2c~zm!HNj*Uyk5sR<G~lz9dOmVb)NbmFG>*Xsd$Uro)wUEsZ7<F
zRYYVIv21osOhhB2a(3eVChkFqyaL$TXX1`h7$F0u!CwvFOHYkXGz1hFWSQ`$xqlgl
z%5&A(^{Kg8PRkPGnIN2y8<8T-T%R}Zn}21-8PzyXiFO8et~2o!G0~V|Tu^*F^EZCV
z_kUoZ!|9|oAChN4%i_A_8juVSlODWX*ca+l-@(V)nq{ZaM|sKA+zFYW>R9wjcK7M;
zVs-gw|NChF6ZXHq^#SaE`|C&h-$(o3Qv2V{e8|Got7X=|W%j?%`%gaaqy6uTzkJdE
z%TxSr>&e#BCm-#9e_Z=tuvB6KtnesP&a~v%%!&v~)1l7*!k^8O`cnRZk^6N~;LRwq
zK%q?xvlH>Zt5G(;jVW_Ac7(NfnTqDu@l2RZ@V9Jm`BZCNkd#0TV!@T<!0;}30dw}1
z`$n`OPJhTMJMxbY@DWv=#jfwwD-dNF3Jw`E?SC8iLyXK|e>uX6mAeQVRxFkTS^;~H
z6+M~hmer!LObW|{71qsmWaTeBE{l1AT)Ij&Sfw==1lSPJC}%00QO0lu3lhVDvrAcX
zYhGcleiWewvCssH1Ih{!SZWvFaTW|Cu!z%RaXv0vJ~G0z9%7Ik+7MIGFRrAovCmzh
zMKyd?&7EQ*-+)1q^26%+UGs6KHqPL?Z=x@G@0c40M!Si=5)b-k7Oy%|A!8oh@5o?8
zM@(iWQ(q)K*Q>U|X+M&&2EvKHQYmGGvUCmt@~F(tF>7~HlH-3Rv$xEYN1!GrEh<qs
z^)(Iunip`CTO%_r2k?_x0Y*SG4I$_u>j`08bMe{Cjw$B|C=#bJ#TIKq1%|26(6~LM
zTSXbqc&aS~#X(Wi7ZC4SPmG05utTz8YdTs@y2oeIbJh5icOWRNh|{w=pxT+jfz>3&
z68xT2=1i=f6qusl&=}i&P(td(d`{JX4Ds+$#8QsvPcZdOKD>n4m!pLqu=XgbigW$k
zK8h}rX{~1-!Y4C0Mb$LD)t|!YIKQnf)2sHkFFN`QmS50(hQ1c;@5}Mz=@6sIn;?cC
zMf4Fj6cD;pL8uAwT(6;OX<$_4+BPry)In0hkWkhDmPYaA-ORX3$KMGYt>NLbXT$mu
z4&Y@m_4qlzU?-63Nk-EYs9LYc;s4;6jLM>_^!OgtjntRw<rscl064S?<W*hF#Y|v+
z<FuGeh867C%*Awk-NTfT{5_V)su)wja}sk)L8{8E)3M_<->pPd6)RZEiL)~`31iH-
z<gYXAt1&Er3^y80$cXb2gHB}74V!BkIGp<F)>mbnPj~X6)5*E4##8xoQ5JcT-T9yb
zHtqhbKNzFqQj+IsGL@6zd$Q`f`R=W@aNTL0Ltv$ai&)1!D?=E6vorF%`8g@QEqwLA
zA4POQ_Au|11O!znM;461Afg%-m38{?Bdka#Sv6k6nx&pj6+m8_aD5#|-?BGaRwjAV
z*u7R*^Evvlz>IH9FKrxx;YC?2=G7{vfPEQ9`xyO4e>9zvYk-GlS|)=Eua{P#GZ_4u
zQb-#9o=#=_NIoo6@^&GR1gE8a1XFq@LjOlHADg2h?n`C2o(8}g;Ge*fU1D%u!LrVn
zDN=)E#TrZX_ZoS}*W%Ky(|K@Yss^q~@nOXKU1ni;D%eZMn<b&70#eUk+I+mQ6(nlJ
z%*n!SFg;!5P2c3jb;Ar^pPmUQB;#aW4|#yS!@2u5fQQGD@fez1RqS5ITTsU3inBs>
z231B3?QU$k6j&2HBz~UKAiV5|=_gDK{^*fT*B(962^2>Ghe|z!e-@F7+HlS0ESn&@
zsEV`Ad72((If>d0nWWT*$H!57ucz{4A!X0dr&NBjgP-gKKGCKKG?CnX)hv5oMaDca
zS%#de=w9T7YV#i(M@J|78wtxK2y91*8zd*QfAHP6@F}J70zz>x!$>po%q19qH!aR#
z+a_5pThP4}wGRyjW|tTk6~t(+#HQjsR>BnlrE0<|_^@|7zIQ*ccR#rAG`~EG=Nlr|
zHyrW=uq2IiZPtF!nvYU%x)qDtABeQ0UB%Ca0%A?J6a!BU29q^&`NaX2A^yVGpT*Ge
z4s?zR2QfJyHDOPO=H)hM8P_Q+QQoD_EAMKxOKtFZ*St<}W(N6ZQCMD(>$Do%DcU(W
zJ?olOjCVud;7o^ET@I}5s}w0l8~^P57m&^Ar}f$v3kNoeSl`=H;K0Td>w6{r3>4;A
z-<V>|z{DTtTU}Nen5bp1%@IikF4|dbEIxW*p_T7z#eEJe%(A{#EZRV!nDvb<ybNsY
zci^GP6$9f?c)t|P*TBSE=Ubg48ki_;eQRfB4y0jb*kiCG0~6)#-BX;yKsyT7SLW3|
zFb|2RM{!nopW;tnvh9b_GvW*E5wYy(fXx3k)W+G&d1L46w!33mRu<(=I$k45+y@N!
zcG4)bi$b&92k(FeqqoWk#_}PABFMjUG<c-ZXF5|$_j8xr^?692Ve|B&#GmQa5>$AF
z(#W5fQ+7mw*8+I2^%B7XSez#fb^bzp$Ic@ra>k@&Re?FjD9We;)<Sis%eWSUY%|HU
zORv{VXwPA_t_P96?_*PQ%N<`i-<@fy({Pp-H+iDE7P#$Zm0aPNJ3_hZbtBj$5Gr}@
zsWQ8u^sC(ny)6d8yUr8a?afSR)xKmn!3Qtf=2SFqGu?%n<LL{gKzB1xepq6$zvt2k
z>&yqp1Cm>eO&_Nh&4n-&Y4B)1tEjCD{T(f_4v`Ly^uGzo@GC<9Sft~inMk;UNrK?n
zHwKu{P(qByq(`pXpED5R!0R}aLv=Y3V%KZ{hp0T2*v>EQf)2266;?TzE?5oCc54Tu
z_=|)jL`@^Y+XJ73g626Uc&Bi?I69tE$s?YX1bLo}H5P;FN!|47v*r=^u?aYJ7|p8R
zkuz~l_HNS^#<l6+5ZfjhW1S6=^%$hB2ELa5j#|}Q9}`PM5NT(|=(dI*Mx2pHQt${6
zMs961?V;hMC$0mT=&XwN_Y4p@y$_-mHnqJ0{m^{b@<q2Wv@px7wp%|r6T-8~aRLxv
zl%j(r>LTQHMY0-+C&`$i>SoNcAkCOq<xh+jy|nWH@~IQlUUKG=Ra#qZ;x8_>v^ipG
z)g`8Iyu?~eThSP4D~0>oV_WysOE0#mkI)O-mu~E#sNBGFQ!1+^I`w+!GDU20JVSp6
zRP+pqvO9UScM-4$2ix4x%6-GBLNB>PPjC5^aDL0OJi<Q4_LUjHBu;R*Ln*;R7kKVX
zx~3N98R~fVsD(PwK*)u!$>9mKSw6S<$TP8S6gsZ~2UHGhB7DkFFEpFUs79j<b4y`%
zbm=zZiUz)9*DLaEY5{lkxHT9%#?;-pD<3mR?3$}-3Z$p@>=(=PU#9G_hD#<NNSGuE
z!-~fQ#y~nIlI4=>+&GZc+MKk8QuhGaAVjU}=cTIP?qeO0R|QlZ4^gThU6ZKFrdJL3
zvJB6mnKa`nL?`hiQG^tox|pS&a!&yv6#?y3W#?{%)?}&pB^6d*hy;vejX+u_lCJU6
z^zGXY&1o!N$a&+G*A(-dQfD@cWo!7D?!(J$*_V?7u;Q+8<6z!ceZk<alyd~7C2n=>
zN;1$)exn&}QRN7`6@M$xK+P3+fby}oqy4>?JdOe`=&Z&YTg{b;q>Un|KvG$<VRHHa
z`rP<2y=Ls$gEL1+jDAROeZ|~^ovDuW`LnEy8UxGyKBAB+)nXSpry(K=-xoO4-7B6>
z)R&y~Mutx<f#<8!2RA%T=K~CZprrYpFzl<OJQ&|#Kx1h-!+B4??iZ&4cp}Hzcncl9
zdvMkXBw;hAbqe3&+-}y|Tzh5uMynS#PgNJC9<gI4*IOS5{)_q62Fw25D+ZAOE-zp5
zl5YK7ayW7DYJ9~+rTKR%XX%Oq!Ld|=z%ZIl1m&$s7T&d-&W!ZjqNL~t)U;R}rK4;S
z@pg>X25&P_*IRc)W&VJCgz0VkJLy$Tao30+T&7xBN0&hEqUaKWSIdHz2q+hMWJ|DJ
zUeWC`VFu-M%q}@Ll=v(XtInqI!S!#aEPbK-Qcer*dm^rOrX{5%G~Uu4`4`h3BQE&$
z2woANi3<&a0*)lkHJ6RpARQ*Vel}Q7bhWS+Nt=WPuS7s%Y<*n>1X)cH;tNcaHk*#q
zMpQgE*1<2vzI8GFgz`Ol0i9>LOm8cu9nwhY<7~SZ**qCy4G<X@six|c&c-+#JJ!$!
zZbzFr<__`WA*-TfR_Q}3x7pQaq!FBsZ@fxgKyrID>&>!>@SZVnk*)L0hV&_W-j#Dc
zYs9=j6{SY$%A;!c++!wQt~$tSLx@V3fEXCVD^*>Jsn=_-dlI%FfcPm*YxAlXgL-Dn
z34#&aA$#G6!YFqi5MIO5Ji9wtXa-;YdKMY|U*9KPGHFq0X#ldst$K?hHOb%piTbdm
z-!uj*iR%K@?D;OrhLtyC`G(x<rrg8Eth71zv_XR4a8hIS7`Cx}P(DxB&V`t~6-;C!
zbP?o!8q5r3=KZF8+|rj)7-e*aNlG2?d+~yS>=7={CoRP1w@i~8{S#TcWLgzAe`m2J
z7Bj=b(h?852fu)>rk8-cYqEI2v<_h-hHL}w!s+cg+Zbl)J{WVSode#5eKA{hB-mCa
zVUQqJ)ncziEw0>k%tF*Z8ZPE_0sGswaGV`M)T3EN<7GM*BT1Q)%f;*?PUiD8AGa~g
zGiIe`0reFx^?2NgHa3<~x?uB8^eD34jXQz7XOb~yGt)kL$fQgK60sfbH_!>i!gxN8
zzS20h(@j%haDm%2+#{KBjXo?5(1>HV!A;AqF>#Z@Pn$cgvaK8^$ZZ^qZvwds-2m9O
z%^G5BmGHtC%StB_WSjLQF#~gUS?5$+)!FIxcF#`ICqN-nasWVpPn-=IRFJzau-K=b
ze#%neV5(ph{EOwqm^&|$K{!J?Au!@y(Tpl1m1DTc#?VEZ&`X>OHT$hj7lRhkq#h_p
zXrfM0*28gjf%%GnLSv<e^TMb$i@NCcB%8v3?P)SQA1Bexc638|kH;N*P0y434GL#v
zIwec`^p*-5Xz>Q=Y|greA2mpcNF4{5;7o^j$K1|eR@hF;^m1Oc^^N)EtS4$2OWgV8
z%s!uLN}O2WgKl(MooVHAB!|U5#!pPkM27WjhP%f+i6y|bT~j|!7X<T>ev%j{+9nV>
z4py%M22Ll$Y#0=5Z5o>Z@_qtX=J)u2-X_|duc|s3y=k}gH(l|ajTT1$S)OEhR}+%q
zj@V^E653M>P#(mzF-HQ!#v6?ckagON>>US_*s%0Ea0mUeqF8<cUH!C+H>d`s(_gxQ
zbB9?q0cZOj9#)@}z*E+HC-`wDmVl^TajlHjPsh7%2i(R^8}Ga3NcOHjW4WYQ;W?YP
zag9ip<u9HV*FbYREM;poa!m0i$0dMlOt4X9utNLO&25v3IrB&gW<Q^POl9*i|I^3(
zFCX(ief)Rd`JY6{`upa8dh%rRi%mQK6FmEv|LG5%|4C*nATy8(r<E?)dSq^M@+UI>
z@?1{yq}J+Ppg&ZSvusLQY8o-NQ_q6e-Z1oSC$f|wh0x*DUVyB2Ufe|Olb?>Bb!3qm
z>;isA-xXz6RamfPFYW>}ycoS%+}7Rb0RGxbXDO=AN)Wm5&&wpm(8m`sd136rO3u^K
zuifZ}SW_==!HpJ2J6cu6nV1T;q6jJURhHJYnom$fvoGpY3y5ON6Y<`EFM#a<kWoI6
z(IplL7a_H%c7dOx%ec1ybU=&03&LWVjWO}j5kOI&T^|D?=keC&(|!;B=V=V{oE86p
z{}ZF|JBk+<u(|Ph8ug;fx}I-uY<OO-nwm$sr&#HEY`+yFMZwBxI;>o{qv5E`D5yEN
zQ&F6x)0C&jDzlV*<8k$$je59HEE$vbK)(N_S6{YX!*=Ec2BR4|N6H)KZ6relt{c)B
zFf$$JfVs(usq|IFWv+C$3wG)u81X9M#}e9I9l}1Fhz`Sn9~`>0WmY~0Mqd8*hCiBd
zlD;rWV-E+$bgVD0F-QC6roQTnv>s}gl@VGOROXw>(^P6JIm1e%=^zy1-sb7G$bAE!
zX*+Y7W~H7OPfz#4vc{~b+)k#`p*9ttc3W*|NWZ<K{2Cn_fB`U?As?gi4`Ow_?^~c3
z&`xZxRW{oW^|f7r9{ekR;BhvhL;Tik{H}Mz2FWU8+w259E&BwtxB%=rBJt4;+1XwZ
zNXtCyPRnrwmQf$VXrt1e@ivh!s?FT~u4lIdAQAbmiV`}Kc;D<X(m1E1-Fui0oL+4z
zj`Y5~vkz(l^Z@Kmgu)5ox#IgdN~cp<(kas}53?|axzyFDf|+>i-;N|`7D$ucJ9mI?
z?$mD(`%*CjteEEO*>tS&84n#}STP+B_xFY`T~ee4-0-M!@om5hNu>v4RgxcK&dB6~
zIj-osFsky+5u|Gq_kjp;7uC8`@oC#*O1$pMS($GwUEpyxdcz!A&_<|$XynMzaaM0E
zSrKY1O!nG5h_((5UoWLox81wW6aCZEGsk#DmFW%5u>rA8$5W#k&at*CyqS&IX0b8=
za7CsO9jWLvIvc?4sfLFB%s+5lppR?r@}`!{key^hedl%f7LHzMWX}ti<Avjhekm-P
zjq<TnZ?bum)t|Xuf#%4AXw#!D=gzif)+5e+qNSd!^f<&EC9ZUoEGqIqD^vFlm3Qei
zzHXX*R>yJN>V&2vu$ymy=Q|Cbgl3+;^?Zss9kYCqx=@}#6FsRJdM6;IPfA!IAboNT
z#FfRN<hMW@F_2e6$LtJ^qO&X?3Q-m|dei&@kC8#htoPVjvfR{<<v*!yrfp5bw#cNj
zBe}e7qjAOj+BtUiC1jW@#Kv_U0^ts;?E%c6lmL`2OR57n=;0uc2<8tzBHFqbO|vo9
zaOX>>vphUsWK#x>AuBq?j&;DtN?TJ`JEW&*nHv{Fta3KM3~wF`L-0FuKqF`dRLpoD
zXl(8~42c4e)2SbT(wUJcLdIBKkunrIVZJ<mka~SgLr{hxJPZA5;NbptI1hn!M0{)X
zHFeI;FMe1Yi!-39lstZI<~b3c#v}Z-3U-_t=mGfAk|}CifgLtSWjo9<#C+B9M9P}1
z637~^-oXQNmQ>Y6%+2^ImJKw|`Qv`)(W7>MgE`prqJAgXp#wnPq1~S+5EZXXb|9t5
z;Gg9q&#_pgzsG~XEFXDx!5b>P9M-4_@l~0`RC62}$QND$Cy`Uq49X(q0mm4R0~V$f
z=|-cCPzo*$8Q>~vd+7a_th<n-s=?}DEjS?0b=u%=s*7i*;qj!qM!2U=z>nF34697c
zMT{jA40m;hnFj*3Lkn7l#ewcva0ce7W^oz*0f}il+U@O1e7!9en%AeDr!fvNY#o;~
z+F^MUY{7x&XBP6K#&Iv<b;z^nf;s5eVCpbeuhVT!s5^6{9*?xanGSbRZToxULN_Pp
zV=g;R^s~wE*xh?m*;%R)h1!-RsByBI-;(PZZB3&a8ta2=PBh&P(=nT}Y;%sGkSc{M
z#T|_bPXl@cJbW7MCgrs7<iZwu7O#4xt2)yyK*9=S*m9!7BrRu24vqGisYh9Ux@h0&
z$I-s1S!bcBp0ARc=0)B^7o22XF^#>OI~~`4k+FQ@R(HU*F_E>$rY}rMFo8u`%;Bcg
zO+wG+DMJGI^EtN$nH|FP`=7U;bjS$w#b2NFzWCc0R^Z;=vx9B2{8ZZ;8{;C2i}GTl
zzZv&8`(JGQ5~sfuWlS*Gd>U^)+0r3-fZK*)pE7$5%bm`QT6`xgLf7v^&fRK$vE})q
zhoxiGXFKJ^g31JXJm#0F(i`(arN#oR)bD{f$mk#^F5K2KV7^e5ZI1;;UdViG=eOQ0
zY_dQ_{NGAJcvqXj`f5cM|E1#vFD|)lD{xj*KYH$Rv#B{+Al(0VP+<}Rp*N^R^*ms#
zv`}qT2x|6*qg`>ln#>FCqE-l$vUjE%Od;#x@0j}4o`m_pgc{HBko}jXSX`>dQD>uL
zwrbSi7_BVbsm9)z;fUt^eJa%ZRe>F4`JSNw^A7hO+Mf5;^e6?Dh9|murx8ouOWEU5
z^kl0kdhS!n6HxFpDR;D8^~fsDwy(0DWr`Z-DBR0FYigR6w%JnJWUDD{?xe5@DQk?Q
z?<#4cN2qV`cIAN3>dDviJ{2_gsGPaGVrD(1j9($+x$5t%ig9k3Wf~9LQhs+;%!>NB
zWfNLk{j$!a0_u=;H7lNOY>mqLx|3x}0h?D?t`cyTwY<>=ZBPyp>SE^6r2Jv^20p_!
zQyyGP(I}`h*va+wQF}(pfDtMKIuTZA<e_{=Nez3|s+38)n9b~<;?)F3OMo(Q30ZSb
zhGgz6MiJ3T6^&3DE(PaI38L(}TqGT$tH`>ZVP%>G=4QZ(!^O7pS1r~<91@UKGCU^Q
zEfEii=R{8deUOk`r)Mt};-!*=EWyS6z?NH9kXZC_WysMr79$J}tqCjF>-cJFZeudG
zX^9LY=G2vmPnSw0Vp#?pj0$ym3Y#lEHV`RklGM=do$$z;rKe-h#Pp0n9}bVdhaCW@
zx@*|FF#D>fNyboJA@Um9vWIMaHLlrOB+u5O&xcLl3E;G<{Ml2o*aZHxx&vP-sj%=c
zp^)me1Q4+kGA{>Ht|M0}XBJZF7X~qyYQ=a(CIO#hr;z2;99$+4^1pl>^=-C3y&5{0
z<nz3-y7J7Yxxq3EfoCSho-DHpJj4=Qm?M)XIZkH9A}3(-WdW!!xGD#Rpqpz9fS)c{
z?NP*8y*IxXmUc?<<{!GJpCuel%Mlk%lAE^H=n?u!yNnB2ASz?;LYxD=?D~k&2jv%n
zCyFr*<|*n%qE3nwQd+BI{Sl|GxnPQ<t`$%I#(7RfuL_n(N?j6$7aBdjQ<|41E$%8A
z><T}h5?eP_fRV8M1!LRrVh37~2|>U_mV>Z>y`*Y_RuYh7p(3^zfb{DE5K<`(s&F+%
z{CBq^D{4e9VD{wL!9+3o1z~xYsC^T;9!?3mXB~mX7MU3reASdu=xamGz+vi7=piT3
zEEicHKYB!VosSWdALGA2#(#f||Ni*zzjgfg#WcMkx9a85-^=2^`=3AAdSb<YZ~f)T
z*2nnoKW_Z@5inJm$R|tE^sL~;rGT$2@|2~^+Xa>^tGA=S13y<^CVBL2n&szddGYBd
z!kgqGt1lPl@u-+>{6b$%HZV54nBH!jPmA-7S%SgV@);KCG_lVcS#JIn*SB-3%8vh!
zoRviTHf}!VSgngWFJh|_7h}QK5f%|1*5x7>#baYffVe=L!B~`k1@RI1_tcafBoin#
zNUTwW6B!lo$NVxI0rX}F*q#npR{PomT&}a!oo8q@QTwMHW_n8U4_kjeCXVvV)@~q+
zOnH5Pg&s2{8Ixpa_&7S4C;@}I8=)*qRg$Qg<=aOA98&p(+I_tM8-O@GwJv!Fcqb@~
znM}gT^3ilbg`8k=0F)ZbXtU10>@fiRUmOnbw`E*7pk<&6Ulid$0k)>~B;^HaIau%x
zfx}(Dt!$u{i#Z5VOBSz?D#9ehzGW&srS>t`E5f!X^hE2@#>}WanHCAa^#$goJ>`GU
ze;lx4g6zxNIhJIdW+T>Bq;<tB-IA;|UW_b7s%6bf%NbMj%1I*JFbB9+07}7|3Wneo
z00uFr*{P$&G`c*GFFBSbHR{YYSTh93r?(jE#D?m!_dcw|3hGt0n5DYp$<JUL<xU$!
z-?4Y-m<pt$OL_A|Agnl^xUgh;jV?|Vb6MazAEGT5vMgqCv>#u@(d*akt!{+>zw`Pv
zMN%dIKYQ=m-L`eD3x9vdIR62*_9y`vghV-Z8tu?M@+IBgwQR+5bCjc~Fi1iYVv?W<
z(iZFE{`PlX)*TBVB|B}phddISB(Uyl%{8yj#HYCO4`cEVBu_Akq(b!5VBjd-5Q?$D
z<4SuLDFjf~LAkiZs_(~$1Kh)^u`2kbN59;MUlLQ3LsZH?_SgKdPMyST7?Y7LL=#wl
zP0W@(#fh9`<FV|l=qD73VXlC{XCp~_${A(u%SB&>P;PV(Rg+GCUlH+=I!j5tB-(@k
z``1MtJ$e*~`y|mf!8cK|C<$KCP?(&a?w-Zy4GMql;a~U9&em15XWb!t4b{d0*DU^p
z@4fX^6J6`?iTmpQCJpWBhI*o*z4Z;@>rnFkS#$4=j_Z%Rx|M4-<hg43f+(ivH}6Ex
z>5uwVbxqyOh<k^(NZnD*;1SSaAm$(VIo~vWahVBFKYo9Nx!U_k?UQpNUsKtCQIlP!
zoV6EGeTaip6&}pXY%~NGDP?<HXuZepuqj6G6zwKfyF&W4pSHOUn2;*mi)6AD0V7#r
zRx1klBz<P8mX+UN%G;;Tkp5YeH|{r)rw}_G`6hHYE@pB}8F~%$cj-fR#m?lSLYy{L
zW(o86M_f*B|NCI?!9!AT8P9tUxg;5XQe!R17(@l-YS7=JAxZfAq9EdP3N|MHh*!Qc
zX4MQ#deGgT?QXXfyy5AF%VZ(!hKqcLx9__g{+;BLa*hz&*#&WHmZA^ZaFc$hK8jDL
z)|z4x1o}DV`RRI?T%dPRE!Px+)(U885C(w<CH|*IZ-wCvFFH7Cy?E~|=spr3WBxly
z68>~%;kAlh3y!@Q8$FdP(u-<&m*)8bn3alWxe8!Q&V{k_quyxi`nIe3)T2+>OYW!H
zN7&@kg<AWBaeOHtU34_s*`ba`&HF+)xh;7rC&VT#$_vtMC(Y9_MDx%z6|uC`kyyT@
z=Y*}N=%PbYP-Ey6p)*flKj`iI8kW094h~)Z_@D-V2-J_?^Dc_SG&y=?mMt8rVyr9G
z=iPz3rk#zszP4)0S<^ml8ya`-Z(P4xB6seYK&!?K8a>ZRCJC`^gTAV1mM6nWDYFIg
zoJ~2Aa3XJ|-=UO(s;6e65`1D%tOX|KTcmrV5FbPqaS%+NLm*@Y^sfn~U7M;D`{c|6
ze-{0vHq*#%mAPs^RgyKDUr%NA90lfyDW~V?*Ce_eTjez_baZA;RNKm1<;7LN_70UJ
zhcyMQv=a__Iaf%-bHz$1dakLXfYEu8&Zvwy>PbR)^~PMm3*l7+J^cm7n-;<UyGo0l
zL0bI3zs-Wbed_JZJ_o;y^Y_wxPRQ}WK2`D-UVBS233X0nq@?>qyl{mth(t{c>EKr*
z$UcY16qz*4PstADEa`Lc7CSe|DKh0NzJNoL_FJoZBU9_>05H5jKNgGOr6q9<L3j|U
zDGlMrXc8w^Ny4g4zZ8tEzMZ|5LiSsj`?p<Mo!}Tq0`63yZK#BJ^&;IBV%3Wj_|mtH
zLds*1sxT0%&~x<mt@r|R5~XHBmkzBkV3-qLPY2jdQE}nrngy5Tc(g{q@l!GWJh_+`
zqtpE!@KW&CnI>ewiFbQeS|p>YMm}PX&jB1j>--5LrsUHF+g&%mOO9Ny^Cro@PV2q(
zt#8tS-c9;&DnyQT9cgM@HK<4kCIv?IC2r?OBy`-J<`)FwuZRfyq2e*lM|W+OTda9?
z&aLwutz`<6t5=tl@T61R1meVSHB(Uo-1fWXA(E*y=q=$VX0k(ZB(F=-GPG2T5Hx1Z
z<<hu!#$#C{l73y_jQ$qXzuU8KA*TiR>|+CeXyPbzGk5wVRnnJJ>nB=jBL8&+^5w!D
zDz20Rv11Mo?={6@nZpUb@0(syQ07z*N|z8fkt~J&O{99344KgdB~UkX1!Xi*@s3li
z(HV!wbH`%W5N&T`>$)NgQ%H7b;zXVHhF42&j8|^}XT|kqHK+d8sCR4szqS9}+W&9=
z{C4(#ibUshWnYZ{{@}rbZ(RKMy@$8>@4sXF|8t$_I6#No1?w?TBRHEEm=><67zG?P
zBCX)>;y01^QhgV2@QaMi%^db8A>gU`V{F~EuM>(0zoa2VoYc(PtS;f{x%W!!vO2F!
zmj)8<Tk61DjZ?lE@;T3%gL=+Ca!rItC~&+fJ*4yp)|d}*P$YR0VAlB9_A8Y=4oDl0
zQmXZ<s*C((Rrs397c2qOVNOy4%1@~t$CdXD{w7Ry&Z}w~+HuEeys=xj-<b{bI*d5I
z{A%zkaG3PbMFw{|+jpz=X@j<cSL8|TI@8Z6M){?cWn86`sy#95dk*7stK-D8L_ky8
zdlvMv`-xOd^%8_V+<oL4q--yA9RMyfbgNl|vq1Hs{sGZBapRva=QMm7*MfW85sv-~
z4_rCPgG0IN+&rM-XHNIcP0+b_1H#5*<7MugfZVP!bTGh0lb+APhbpLrOs{v!=4EM^
z9GVFg&miZ;zrHBB^81PEbL3xeHRnf9p3!N-e>F8R5XLRb!<$!6U+r@|5@kun<)D54
zr$5#6Z@H(-%)f<uwc|@ADmJklE#z_THeVLfq6J+n$2V~nua@XkPvE0|&l_2LK!P3Y
zG7(<;B%6{355qVdN=tPbZK5ud+I6uL>wVR_PFY-Semm+**ItN&Vl4N5Esz^Gg~r=q
zt_P`Q)P$OAPFg}yDomOBKKPUNnR?0U9iz*Ho~!O7K5?A}6urSb&)(IMe`RoicTg(m
zk*<%>H;_9#kf;%x&K<>L)SN1iU-wqxyGpKe5*(p9Bgb(368@3yJzhXy&j1_GDc}Bu
zL4zn1o#3VA=o5!5=qD+c>IdCkeKT#ak3<$oZ>?_DzvAz?neP)@@UD=zh4aoanwo-7
zSj}g5(vY%X^^|gEP&M=iR=Bs{fT81=lD&-<2AU3uTtNE~b--WWBk!5|;<jbe*Qk5f
zJ9pQppx3rFC6TX}gHdfnMxR=gb0~<a<i^D%%M^ld=}~cho|D}k&s^mvsXn|ZVNH^!
zh_g`RksTFtxZ}nvHxc;$`f={pai)t6lX${Cm?||lL$0{T@3c5G&rH3J<;<!gySjcA
zz5)h|-49MxM`P8VurdFp0g|<B;>>o7+|2D5jL#)<6_9Ezd$5#$lQ=~ymwZ!XpPf~{
z2|>?np3{7_F@-8yx-UYK!y;oNx3=T{XiG%Ou97a6BI;6wM=$GmNdg4HkmT6S0EwRA
zzDjZHdc!n_45IH1^Eesd1CmSVd^yFc@7z%6SjQGikOg@Tjfq8=oB`UA((`e4A?30v
zd95B;h<oBbdw_}_$8`sZlHOz}(7G@u@HD00oxHzDE;#A95y0<DLTyzdvjNmjuE<}W
z;9HJ=5%07hW54_l*}R~r*LgX}2F18otSoM(&g%~^v2%>gwLsSPU9JvAIT#lg*`l1U
z{)CQS<i|lV!YFxFvwT>bqi;5_LzOKa$O;4!jfb!N@27;mL)p%W+RMe{;A#Aweh&3=
z_MMfDJX0yE<!LqRuC?c&UTty*&^w1uj(NlQgfZZHnrX6?lHNwHfSnPW)%)`tY_g#?
z*}2+oziQz2t**-5XEJ@w5$cV5xAm^duZH<-an0W76<3WgxeUdzj%*;Vwp>z87`}QE
z9erWj$_h<k9W+JsYm)$ff6zaIxAgV69a`}0YBA4-i(pbZC_$U<p2<@18+kSjz0>aQ
z>4o4>;KODGOmgnoC8Fy{PA5zBn;X?IM#YL5jGtO9=1}=wv1RFoi?U9&i}o1;QsLvP
zEHS#XoEGPuHH=|9cUBXh9G-AZX*VH!12&n5QMK2wINOLd^%U|%xQIEImt5%KKBGF`
zhqGMtAvAxCep6o6y^few)i+Ty^agW)EaX6O5gy0)5j&*K(4&AgpVQ()3cI+dkTt3#
zNQD{5MCtAFMMnBISFL{?j#u|x;8{4VrG5;=WLm+Q5NKR@=Ux+zqMR2O%;Ph#-oEH~
z4X%R;Cg0N1(tuja;38XARgq28cca`+y2=<J>WXKU*RJ@5V1jG&FU>2PaJTC?Q5%R%
z+yEw3p`Rsva7hl$(60)Y>dZSf`$0Qdw<Oc`v`f-mO~Cvx$aEffY#ZG0e(*?=Sj6dX
z-~$Xg>R2;c$e3AZ{Qx~#bx7K3D?BsKR&7r8f%2s&6O;pL?_B2dNwLT~{t*~_!%IAK
z<1q3jdD&#8%vQ7cpn#J+Up2Su8Yom-e3JzN^VrhzuW5Ch1M@SQ`vxntU5b|!7(hyI
z_^7{kEhV(6XQE)mMOg;t`A6e)-|0kaC;hK5RG9aW#@FXgQEVNF#PWyUnSWI}=r87<
zDUz#y@r<Z5E-yMX7aLCS?`7t`WK&O;=G<1r#Uxuhw-?!DlHF_@W*5tRe#32e|6j2U
z|BPb|sT0OTZ748Q!)%=E>D;*3WpTcc#rB$3VF27m=j(0NZ~p09_1A1z4vO_V^)U=I
zBn)M9tK#slJ+&4w*>j1>{idk~!q7}lZhXu9UdV@(GGUaVn&9p2y&aSz-_IA;kHNh_
zO*(vyJoOO~5n?D77ghJ1qQOpJRvfKAi!aIxMYRO2ihSypmSWc@b_Aetqm`G?8^GuO
z`&_%vbf-g$e9?@VgsE}HKIYJk@%Tn_Cd>ZY+z<bUJ6yXVZc6(%q5fX3dy-ca@7oW;
z^Sd}Wa=iMa7T~iKPk3;5Tl;f<_Y?El_j;e}_~mz_CR6@T2&%yoEc)ZiLFqSSA||7B
zGPwp1?Wz!u7v-D*HQ9N<R54re99q&-LQp-?hAu5%lKqJ4NoTZFY1WvEx2<sAc|Jpf
z=sEeFl*<J=8z`VkOB0U50G=efftu#@Wrl%1gx8%7FZY8ViiIJr2k-LvG#^XQUgZ!{
z7Y?5)Wq4NcL6f3l<f5+lwAHSL0V9ADQ)v)Btyk5kZLV!9g4&l#VKoXdD(yEWA{GaE
z(0fWQAswIElWg7GSxjtX_q<h6T_2?+2mc*z8yjn*Df9HjoinFBsaEfyjV!wPWGWSv
ziMxYGo$P^eFk{ym^wCwm06drV2qwFSKFiK;q7PI$F`)@Oi3L+3u(<@9Z+jj=Sx04|
zgnuZqATP-CF*DV0s0W(@Q7n~FuUZsCGyKw2)7?lXc+!f~+W^yRq?)7XTG0#3qtHa#
zC44gn>&j6vir3)+=%AZMM?c4jCWMS%mxz^)-qR=7R7Ni5>B$X-`9=@S$SK!o3TKBd
zy!zPBg;bg~r$o5_VQJ@L6LsjNPULs1kuKZHJ8?YLLCCHzg#fR@^nYv^7F9QUCn^uL
z6P)+Gk=54|_ME)a==0-r?nIJ8a_B+m2#q?hQ76b6Ut(uqOAu^^$HSL8CZj>`7+wGy
z^^r(=J}k-6Pjrr()MQ-w|FSz;<>YCwTr}~KR10^!&J2<TQFgD_O)5ir4QZROSEo&8
zamMbe=fM&g2m9gx`4ZC1b>cm01vsM*q1|wD{&5tZByS=s(z=BiL=SU@>w=w!)exUi
z&4#!oyImFOATv8Wd)nSH-4oThmX3K%yXxFp1lB`I9U|mrL9C&bLm$QNbI}Ic4F&Qd
zXCsHX0!=KSe97;%AcX^;jli%Dq*hJ~LE;*;=oT;O(}t&}t<#gJ;Rp#cCXY3)10>y;
za7yyf<dW`pKzCSBJvzy&Uzdp&+`@_5)8Xj+%5uRFD;m`}DB6?W_pUYDz9S>e^|mES
z!OZDXlnwLd^%6MkG*4oQyuz${zu76r5joXnZLGE>qqt>SXpyb--fnCK7E7eWfc*lU
z`g<OZv&mqT1y}pjP6BU)SM@d7WSSigL<EcphXP-9*x@%`J5q6O49v8F1R~tbau7iM
zUc=lOUJgH}Pf4<*9{JzqO_u(QB%n)ha?LA3Z^48um9b;km*uaaXLeN0P_}p0HR@?_
z_y<~^u3gOZ=VUf(Tu##XShFnkowZ4xw#=|OXS<tC+0-{*^=bHL4qaI{YGY@MNyFfa
zb7k^ujrnM1p$<l)LS*bVP89QeBn_YlCyngX7t4L>PE$mPxh%a{Yi~DL9%|)Zx;{EH
z)RU(_q*vid<RxWSh~2@Y4&`{DTqpXMH5EfIvF=rf^+bLa%Bo%rE41773Dj8h_X$jK
zi~n$o|8R@{aEt$N6a0s2ljIL;@gH{Yf7{!0^FQo8d~l2Z@H@wUV2jK=A5#W96?f~0
zelUO=T+D_80oVY{4M&m@W3!wN^Ermlh~Q0+H{m+G1#LobFc;(l;qzD}!Ev4kZwp%{
zSMYw#KAq3c@oAG!C&+@BH@HCjbIGrbauokD46m>O4LswM(0<nF+@T>P;viQscv&T&
z9dX@cMTlW16b{*Flqv<Aj1DrMThSI6+JF|ileE`qf7A35R03Fg5j$G0`ydV;C~x%V
z=Zt^P;xwh}h4~~GNksBTx`ejN*nsw+hhj2Fdm(Yf{w4Eagi7`U*L73vd=k%xWqy8M
z$#5zT-ETn8sNrMiZD4Dt=%KN1ohW}wylf5Jt=&$uJQ`t=0)$j{PL4Fz6$Xe&a((=U
zW|hA^N~t`PKZGnRAhmkQ*|~|5NNOWrs}Y=O1Kx^^x?)reg>#}NS)p|*;w08lZsm<h
z6YZqB;Re%?%Vu?@wUEfpe_W-#|L8~pr4L_wjV5Joj<d9hQO))Hg^?Q8cx_mMs9ep;
z>4+w?tTjl^;nb=DThq|Iae8W*77rA1jXwAF!JMASx@62t+c?BK$$I>n+6j~-fmg(N
z*z9mELlZRx^gOqSNbPv*U>+`dEC1c{f4B1A?Vo>&{8!FFPPjlFWlE-GAo}TSIj@$*
zqAnD!!T;O6|II^3{(A^tZsot<ul&dHcN7)+!@LOodpQnz4}#t|`@4VKfAC$f2NFD9
zq5nHuLUr@~;5?fn5EGU4fdDdk;Jb02&8G=kV*>HkYXpU;s&al<Tp_n*y*LJmC!6z@
zOj5Jh3wlD)d`=lkAQ&jnLBvZqNmn{GAo4NcRUE7otUqgT5isW}8I_B06-COII=n((
zXTm*}tSGaduhjWC6vyL;Pe*8UEp~<{o9uKt5?G!dBlkp*@w(PPRvU<VF-eu;K3&fw
zm-{>ieumPOqjQ-MYV&NW5jukK>9ZdW{`%sLkVp(|_(z?HTSCa?I{!IU$g(c5VZ1UZ
z%~iR<dO^{1PL>F=oi0Xc&p>)xqInSHeiNZi7Y-?ZixdShi#eLPg=Hig!^Rw~f?<xe
z=A%eW$*`QQ(vxeB6*`W;#KDQ`9Mn_fqyHr1Y#hM6PqG#Mj)`YJ&M_UKb`tx53HGXd
zU86r7yf}VlwEB}MhO>v$J;n7}NEv0qn|d=b17*up^3bvH%Ic$E);S#D<=nzPd4hsy
zF)WCy6rAx~??A@33o&|~!#*)Fr;GKej33z$vFeSsRg$Jz!g=0XJEM{#pt}Ql2hdPt
z?_w5bgA#Y*$3hFXuXFNi95&4d91=Jjiy{twPVn^oJu3>JP4E%E{$*ZNm(y%E&#G(`
z2S-T|_P+b>ew3K!PTqEg;)W04KFrk+KBD8Uav&oQ-m60BgXj#yPh%lr;->wWeaOe=
z;TYw^azZ)qNJ=}Jm%}_qDoks#_bpU$P!;16q*7S1sTeO(;^X3i5JGib$17N)3G3;E
zm1_=@_swqin{N_>)--~3T7;s{XJJaw7scOmnemj57C-ga|HLV<6QU|9#wJAp><8-?
zSK5+oiy!;CH!c(opLg}AFS_ZxCXc9oP5fd_%50v`a2=|Oq%rNCx^i?TPl)a?z!};`
zT^u2PUNQNC)hDAaohaJl*|>yLj_CHmE|E=p{>7%BTfT32O3Aq}L-z`F5`*0~FRqBV
zG95(H0WXZmhZfGMnl#5H3$<s}LJ&IIr<i9IMQgR8`8E`;wF@YZK3;H#=Qug&ezYh}
z<0mG7T+ElkKST@cl-d*1q|Tx9X?cDQ_XEtP`gD@g^Z}7r4lm6x{fuj3AA5VVp({~%
z5_DCCQLrN`GMsVw2b>SA=*1pqM<5SHT{LmjQFU)a-c9@UE}YP#kTS^jJAbkLt5>!&
zz9ftNlVnu$`I0SJd%$f_^jB01dpEnt!xNPe$?aaZuZsBNIMi|ScrD1OqqZn_i1rh$
z52WVOqQFc<6d5v2C%`_b&hAk1kdvs}^F@z|)=#mXJDmLk>(t*pMS`7veFwC-R^5Dr
zD{CPL;brB%0?$^k$5yok@bt!BSay%wv@bUw1?+;AfWx)Z!g+=%YQWsudFbx6hWTB2
zMzw0`Tg{IGNp9RmF-)=z2d9BjD+U|?{7?Y9a~)3mV0=*mr*}EIMl^sK4zM45qT}Bk
z7w?e7nN<5TInUz-7h2A-9(Ikr=UI8pG?lBs8h|YxXHx<f3kbDrFt*)WSAs92KBzWN
zy=~ej>ka1JT8J2v>#6#2F&a=xL}4aE4GV><NWnQmXbX8a@TP(NL|MKbeuKLLgiE;g
zhJA;h_YTkWIJO(ZT{R)LeI@>dT}EV#x>;ijXq!0<MtVg>(bb$QbDwk7NE^J5uSAQ~
zvK@BzQZYlE9+DWuqY9YYsx$zR`5^^Gav~rTniPMLPPk(<a$K-0p6?1dNk#dS9WG{f
zM}tSgX|0OU63!0$dXd1lL?mMpA=pk(F%YeXe|wFS;dF6j-yI}}<7}JRm#`T3Y~`T%
zsi+a+N3!Wc$~6{ZIXz5`#A&mH^09^gz!r=1b7NL036o4aXyHkraFbG1#GNkeYTfju
znC#Y&u{g!RtD{3;1qG2Y1P$P)ii-2}?i!U)(gpcQ<cMS8LjQ~c+Z;1_8};Sce7s6U
zGGZGt9OE}qdM+N-mKfhCRHi<D6|1tc2s&nq0)D!p-z@(-PJLINE1z|_pRKwUrQ~v?
z|F@!vSji8#zqlsp9y!k^f;`shdpF6jBZ$f6C$p5_SOAmi$P}DfS0C}>)hq7T(esx-
z1l#J)#U>&h!iulu=-;W5{Hn^Vx}Zx&b@ez9wE*xU^^OQJb`A=g3kcG(4TGo#7t`>~
zqcbd*l-bq*J${E@lV06h@(3Bx=>mh3%^3+UK@`C2#@!C)et(=_pg)HyL)m)uQKXd%
z5BU~5v2;$w7;i+IM!k-LM|z7h5=_WL`MytM#KdJ@e3&pacDU>?9fc_PTksG4upV8o
z75sHJ%E(<*ZnpAVQQ%chHgVp`HRg&tcc%e4-q_<K)Co{NI`}E(gX0MZ`2o+EpzkEX
z5Y;Ee8$r;{rE?DPG?FUYwOm4rsN57(bsh^k@ABaC@X`sqKw!GdQ|_PZ^($WFho@BT
ztlsxRZ)NHoWtOgyX9{iso-4m1M|}|xMqqArgmJNrF_$1MyC%inpem`d%VoeUArw;Y
zu{3`0tdba=Mb*84f}2ePMRrN?*of8SLm<Rm4m(umCJt~_=M)R=*0&U!b5TjkU`&~^
z+t2e4MY*hC^B2@Usq$UzjoPpcsSXa2l>ixA?3@#knD+`8dHs1a!9Z**vzoKyO-nuU
z!oN=2vd<jyH!g?oP<5CV99tJ2#=#Z*ht1ACXYF7#LJycRQn)2=4><n9(Lvo@^lM%P
zbpd-ilHMx~hxwT5u*l(ZKos$4ULwa#5j5}<2GO`8A6)Lpn8~@T*ZyiizI28b72(=v
z`5gBgF&!_CozUDjdVY}=h9;a=5wu6Y1M~Ohjq?UIhh!q=t%O25ma6wpI^nB((g7QM
z_3BjBKnpl7%amRWv1f5`NDMnx9yQmFE=-C!9W5!K0)LyS8Fs`HA+yVEMlCh8q1kai
zp(W1rb)AoW9S5&nVvAe*H6AT0(|85ZjG#Ud=@s2qU1H}H6Ii*+0q+BuLDmNd_%3di
z1(VJ0yKR>%us9NAmH10+EQSvcq?Kk^j3rix$wuI|=BhU%7Iqz?W2bU#`}E;n?<}(F
z^{cytveyStA`)smmqZt&c2V=^7zLRG>wfZ+VCwdRPE(lAdFPX!u>E9@6LYk$sAt)N
zdq%cYi(=@_%QtBS+i;9YhA6vjmyGy)t2hUX>QxLU-s)m5F(-UfQ>ddUsV|Q7eymEn
zeo1Wk@xDo{PvQW<BZ*V`&GdTuPpfCfmmMiD*Iw2wDUsVa-$KmSH6<oRYf7YK)w!E^
z^j7$qshWZd7#Ihoo@&%h3A$Cu0>uZ%sg6voxYn$+{%Y!|&xZaf#RgiGP}uyn6;SK7
zMXMB;*?s9JpnxT8-5B?+e$hXx7HD;Y8(Hvn$oY`wsv?d^Oj!_8Ha%g}tku#m>yfis
zPM;07#N0|zaL?T?o}EH9m|I2PbG%t*sNliKGoW>;H1(`vK5bPqVzRW=SAx@?&zWP<
zf+@?)N*6)a9>I&4DLrV3-$FOuvbZ792S%b|!-%Y)!_n@Fchu?LnYByt_s7ty37)z?
zCvmX1yZc=NyjC`&Sr@ud17et<d2oAWjC+B-w%1Mh<}Ox98fV!B6F<&6G)2Se2h7|6
z+jG1^rZ{|BqL#|NuVKXzV@z<|PsaItQo=p?156fHwHMhV*|SDMtsO5%bJR^grh)Z(
z>j&n7|A<lQiR3I1+4N#;+UAqszf0IW=$4K5FVc+6multc5(Yf1pf~q-!-o-UrdhIg
z|C<Nhz55Tpy%DwzY=c&G8%;HwGioT<ad5Dtcu;8@386>B!I)vBde_%ogBd4+kurp&
zQ<td{(;C;>br-6J$w%TSSt@@&1mzydRjNq)sE)=po#Yh}fW}D8?WE8tN5y%;hgq^D
zb)R}mcRYW_^NkY|9t&I;^ro33YRdxIW8h*hOj`Fc4vr90pepL>@~z-9IL}Dg8<NCI
z>Rw(a0g0fzg+_Ecz(g#@tJti9(!B}TfFYV_W74ItjeG)EiP4#(syNhy)<YC69_v!D
z-OCAb-IUB_HeQ8T2~pHYD0X{>IRS0)qjCusmjn04i=!!OKyll_i?SSk&>;eFr&O@*
zaWdhU12qclCB?ixEGAUuB_L!Idt`#Pqw&!0%1?N;PE7y{xNLtGan{+KY>*a5#gFT}
zISCs1WF~f5E&rL`fYu`qM^vjx+C$~o&|_`~`{6RdW*f2)$x6#?o~z=(+l9UEWfY-#
z-&f6+@QRpoV!@Z$OV~#QY$aX!Fr@4p^-rc+Hbjp#x28)H*`Yi-oyF{om0^qp><!qf
zOOW>GBP5d(3cf<*n{hrN{FQS2As<D)1dK&Bg^{Vz`jeblQvuy|EE?+iex|N&RA+UK
z&Q#?5V<B{j6GO)fr#4&AeUk2>Qo=esk?muGMZXVr2`z0<h4>wnuf6=?eht#aZA7SS
zXNWbVH#MqR@Zd$x;d<pA1rn!c@0X6c9Kmd$7e`JX-lT#O^@Eo?;mht(6l@0sSWEr3
z+xsgn;ie6Gw!6_-eWCO+Zuixj9;Q#dXEJ_u4HYwh2k;em3Sdd%;AtbjhkkQtOaclu
z5BLa4dVFXG3*u^>o*o2`&;BzweG)u9I}3~(4qG8s<|#P`k-JhhU1?~q*uZVP%nQ8Z
zCIeV-x>vXv<4|R8Pj19>-~`BfkalGK_AP8KfLrZW%YkV4z#QY6x}Mk7vh2tQHyrDe
z1e0v~d9A6F;In2W_wcb%@aY$HMkyh+3h|n3#^KF*s)lVSDIPj!z<u;I+V?8JxpD`#
z>?y8uKj`f|t*J_fmrL4ez^LH`>p&+ykp;u2TDFJNB0h-&gqjJAoH~mf3$7RNY;89+
zr6bZGKBe~Z6@2oeUu`-Jw_+O9lWiQu_T$LciUEe%)0_kkTYS-@kGUbUcW?G|sV7Wu
z9IA=%%M*;j<Nd%>!+XKpP0WW>i){EVbh4VkgG=!M>%aG=xBX@>4!Uo8Q4}}6+6}q0
z^lq)+_{Z`HVCs7Kzi#81ZsY%M<9}}B|8D<$t@uA+F@T)~(IK0U)}p|)$Nzn^_w6^|
zI`Mz^?|=L7HvaG5BL45qlCau?y>Eivcl+P;_P=?6@qfEFi2oZFvz6d=WP~2EJm~_p
z93;w!SE;b`>WgA}Fur)kV$^RMP$+);2t_b1XTu1?#xbH~e5oT4*@Gihj!jkgV|*OH
zkE{3~X4_i4D95o@R1U}I>7dA}TKFK_2hre>2SN)g<K#vlvF*(Bq0mz9u)`6B(Xo@J
z4bqvPpQEL*5J7q*2g2Gb-?MRfp#gx<jx;VV%W{-B9nxMuFtfrp^s*ciWCW*5CKsQE
z+N5YaUSuy!*R~&3)(|6GWq}ww#rY~wZoXu!5CAbW{vZzY43Ly7XQ7zNFguPqK|co-
zAQZgEbT{2V2vSg$8D&K3Pk8+iIw5Tbp|pky1eQn!m5O%P6*3%{xpS(ae}vfbJzbSC
zcv(4F8PL}8hX5pC%2(Y6*`cON;+ky1*ynj3T+GYmtdetziso!keh^y6AE6)dv6_Z{
z@Z%|bLAiqd6)!<I<752m0REzvRLAL6^V@;(O%%sy*<<|c05>NV=joub>f?d@Sf2`%
z)G6y!rwT&t7ptcuA|;x=Te5ks>VX@O5+<n4K|4l<h3btYA!eXGb6-@(m@pet5c0k<
zeul+FFLo5KL+Q?be=kb%Srr+3Kc!)Ctzj?Op{BY4euBC@yFer%-|S$~nyFrgw7R_C
zX^BzkddwS7B!a?kJMyi_s>&EzpLPJ}i3)KaKn1-7z8FJ_SXh_YrSPq7^RZR8SZJ~1
z;G3))SEd`_HnHl1{^OG!eqxalYn|k8J33fE&*(Y&TS`jG5p2VZ!j;Ep972~1J=f0k
zief4IjhTLw+@!H^(~=K6gAmP;#1w|tF+%6-=ml-r5F6?r#<UYb>66M{?Fvn2^)axv
zz6_#P8@;Gti;v5XL=7q=*aF0;OsVfW7*livb;N<#mvr7ySqzs}AUmV<^wwDRh1X5J
zcqEJP3wP})=m(!Z>(1b}y9ukPLhHp?zN71-`x!loKIE`~(_y~PNhAzHP!fs9IA<on
zWJg998@<@FHXrD&HOGRAm}DkLZv@mm!=*2Swz)SW4cjICVXR?u_ymt=-}2p8#ci*C
zp$|Ddh}Am=r4-3CbaCNI>LHeR<$uhJi%W$8s1qHr@dfswwc%+i8l60FA0vgdf8qn}
ztIJX^@?BZ0PE4Jn$7^`mm?ua1w}sYdHo9ZJ4RJBcCD^PZ6hA?W#m9qRgd<VU`>_>_
zN?L_^K7k81K(>yy2jL+gvV?*sa|?#W)b!pc$Pb(?4$RTO;VqXJmpUfcW1NDPLx)gd
ztO<7B<uW&!R%7>?e`<@b$p=O~$Sv8lT_whbj@kPG*5}~$(=Wo-I9M)9RB8|5$vFRj
zR@LBXE{`Ue$*~zkXAq$}a{Z6lm*X)?%$S|)Mb3hOnBU9AVpi?%>|8)A%RvIPdFNn~
z{XMH@$Ct}V@oq;54qXztcLw8furtXJTmhc&Z}Sfm6tt!AO@cT427>pdh#%7wJwo@<
z(asM-(c3jcz1l-x#5IJ$ON^@KuaeR<nW?^s_19(!d5V<v0B#6j{b=Z8-o0<5bHGhe
zj%^;tN3M|cN_y3FnQ_`T5p<<VvhLk2Xv#kYG)3?(X*I;KDQ-HJ*nWM7qNuhcuZwfq
zz~$BhuY^I%$QKWzn+RBhTND>M5Wsk+HK(Ioc~@EyhLpysG~ihNH6SpYR$$W!^NdB!
zo7tQqdzfdoz48WMS>4jG{2&~643}AKP!K7#pGQuNPvc~$-r=dKRQGgvxAbnq&7a=u
z#rA=f&52V7G=h(nQt-5$tB}Zu=^}cCrw9m1<B{{9v`x@^=nnSvf?KUJ<rZBz*D_z8
zTvPa_n9cAk{sAgnj?wM(eiVdwRl`mF@#Dt?E@lwG=q0+cXE1;BY6tF<E?&RYV4TcG
z=VBJo`SU=!lj3YUO^*df;A^*MtVCM};HslcJ5_SIm~3I`{g=;=j#c?~ARk`%{{7wE
zm@jUTQ4y}(K3=O{s2@*6Lo4`?O}phA?f5_3F;fEPU!2#d&$dDZe{tSI5z`ju7baRa
z*CbAeeg`?2F#3Jr64|#Pzh?Y$i(+((VswjQbc<qii(+((Vq~Bg9lU;8(`y{(@0Vz{
z0dC1uRNSH%-J%%%E>Mg}>=M%ZNoV`#AjW@A*d#$}FhlnQd~-G)mqS#aa7c(9{{_`~
zY@TdsHrDjyXCp=rJ<-hppPV=kPppT6{*Q|6B0M>9{31>Jwj~;qSd6_Vzz$PxGXggx
z%27RXLmDcGbx^V+1=a|r>&~4!ZlDKNSC8}%LHoe;?|DL5WiM>Qfrk*vY7z#3kX>%9
z(8@o;S$}+}IbIr}B8nBcHDzsea!{k-<vmz<Ffy|W(@8B;xX@SBa)omReWNy4`_6BW
zi$})kXT(p8yTLoq$wWLaf5jv5GaEW!XL)TERLxm!Nm!4r9ztpHWhcan<&!RN1Z!Hn
z{>`L1WdG)DOz<&?4S`M{6fHF@$Lt(~=Ly#SV~!&%Cq7$9ZAPTUbk`RRLktw0s;;0c
zI2+$L8^RqslAa1;rE0}`52;wA$zu1+$+@%U?srxiBP_<bwa9$yN_(&oo9|C}D}s`J
z00iyZ!`zqJmW-vM#?iLyrR(jd;NHf$Y+GC!4E;FkM`RKtW1~oM-9{24q#m6M+R!A_
z0}TEP4*NLQR1+yk0~!Plt$A~~S6lN|@*q|;m}C_>o#)v*cMqPNWV95g!tE>AKSQ^D
z{^m#uCqL7>Kc9M^9^k_R{mh*sy1f5j>LnX>!%pNWuoJH_Lh-T$hUDE!a}~01iiN|2
zjB!(998`?GtuyVi4$lf4<8`YY3|<!`ac#)28-+=zmACin!Y83wThYbb;MwA|z2Zeh
zIn@-LXT`XVPGxU#zhELg3KRSghIgOHDlJfOY9Jro(JVX}BZIqZ&*WWlHI!-#j8ZHv
zreskj&!%cze#CTB&q07Hmz&fF?FO%ZdM&KY+2})2u~|vj<Lr#!8&rtejCc8bnvd_?
zjXW?Zl2eqd-5)f+H}i-tL_y-<eH<Le!H;Tl$}*{_1}-4nm?PYnv^nDdH!N)I%~N*U
z)IjcbxhXtDX>qS$H9u~$=94$CDt#bA6QszX5iuOlD8h(aZ<H)v=<1#aK{JTG4k;$a
z8a8YAM(r?r0XMc(z^>iaoo58yV_Mb~5|iB>Un1*`NvRL0O7@y3%k;&#Cw~A+xy=;A
z2X?j7$9Z>WQa+-|xwqMkw{8zq4CUaH6o{b5)=c<(hH=Zo#)s{H5UX7&SLwPN+mJb>
zAFKZkwbay}y&&#$iG?Phye?EK!W2dMP+k&z4heAU#u!eah2gaOIX1i+l=R$)-V}5M
z<{+7mpn~4#P9(j{irCplkslCCE);Ps{k~=S=z4Dgs~97fv<_7dx1(<bRf9Jre2wMU
z3bJ$T(HKo^VrP;#fbOP$;9V{^tcTWaZNf8{3){lm#0Do5n~aOecN|Y|!gs{3zk-D!
z<&iMGugV@lkwaJ8AuPmw=%5!G3m@?(=dm<g1FPm1WIMbB&vW{t>45s|Q}s6=Ov^AP
z#0k{wi=U$Z@qk@x;JWGrN@!&3WXiFmN&GHpKX+1!nz0gA-1N(s!e^lYsnm1yCd9!W
z*f@lED0uVT=wMu**CA1CIYy9G-~*(d(hP&|OMi)ba>?Y%!N}gkfrAfm5Jl7>s}{C_
z_T378_EqqNDgfysxc5sfiWoh?_4}iIfv-c)Ua|rTAMf$US>e;$cXyKq@LX768`0ci
z@jh<oy4CxO@ZFS}zU6gtxFFLk!_E{O0Hh!rOi-vlXUmE_6!3~6%#?HC{{aj<dK<uw
zL5_Z~Vi6HvQzD`rA|xC*5_Hly7AlYX$^B&Se;6zsdpuImQ$D>w;Np*Ea7wUkF)2sj
zLruc|GBCTZh-kM4)~^1(7||DX{U=kY;BVX7LzWXz6AB1H#}(COIg5iQNpP41;ok1<
z!zjT7U$DWZWVpYW77F}rl3^Z?TzTUd=cz*8=^~#ezXh@%+>4qZG^wZXRv<;bI$o$$
zxSpQMgb<7sE5jES{n*Guw;IgR-fmfePEBT}$zp97e%js_`-@BoI|i@gf9sx-cu!~<
z41Nm(=cLO(&$UD>Mtb?kaxW@$VRx+w6s20(i6+!W_qr9(J^IdNRfBPPuDK{Qe4`@7
zUN6FR47ejzHd!gPrbWx9q7l4f8si|28MB0B1y~Qmt%;;!7%eg`cnO8a3XcR?8T^Ll
z=)g=%pm`WqMZXm+fP~8zBuYGK-hzt~t|l~=l`^zeqf@PF=Z|8#^sLeB?gy{8Ung(2
zg-sJjvwZ^Jf36uh2$ALW_TR0yqM-kXs{MQak$gqh_V-9Ut!tBCTW~LObUZiqoBtO%
zD<?J@k)*j3KgYtp%qVBVJCC$`t}&VUMsID%Uj5&en*54@EdeRdF9o_nwSUlbGW2RT
zo!KZF1}PA44s_hF(@XX|+=QQPoD<kk`f&P`%2RzAye2fW*G1Gwl3j6iBfYgz)>N?@
z^yrG56OF)krxxP?c|vh$2y{YxJM2*Hd@)KsuO~nIX=Z(d1~IJZTc!Bapml9hqHaJP
zl{y$Hq$lWEama#pS!hX@IdH#=VhDoS%W`3^N!R_OAN?014Sp0682<+(xxo@h({qZI
zUT{7XJ`tRW#-Z63WoCY^fpR}#&(7Ws5!ZY0fi*KgW;^(o@#f#UJxPSS^v~Z=t~o)?
z=9J!Vn8(4@N)fW3&7ZH-&nLEOp}}Ue6%hU+oZzA|!PuH;LP^;PFeV6yLrv#z(+d0X
z)s>kB!q$D`xpa5y)yh1#K1<Xwjul4CGl$mKZOL>%W7^fvWio!Gax1qSHU#BjY&u$F
zgf}LwWusW8!A5)Fu{kZ7hdi~6^W=D&%3NcJC&%twZUuj>FnnVSi;f^7=9BQ*k5GB=
z9#vi2KkkGF7@r*l{~1*D%Q5`YJ+|tqO5ARy(+C#+F{Tnm=8E@)Vd?wP)ZLj6?7E+0
z#lk&A)GiDmz*B&Pu^f-kmsq%mbd)~>-0o-q<H6_(e}tJk&KL+mZpGue3iEwoC=to*
z@MDG%><1YI+gn|C-#m8StjG8wZ0!4j?Otas0BE=PU$^*QxA<ST_+K~0{~BkraW>3b
zF~HX0f9>tL_+R+jE&kW<6920izRQPyl~pO3z*7M$YrRQ(Ddl)@!Ytr2fBAuevYP_^
zU+|6#>u^`>f<AW~;8heJD%<JFhdek&T{uQ5Rfuyx1a6zdr$LZPWPVhR_ZQ7Y$i52w
zwsfFjW9SH3{0VTg6^piJjtf#v?d=O^4y{TF>A~Oj#3esn2Is{Ud(J4RmgW0#HrN>#
zgPrC3R1a$B@snfxWl#;1$=3}3zi#~h2GRQ8ezj<QGkQvd?cYZ0{|=({Z-e7+gX8~g
zg5x)dFMpg3vLSBc<0N>R#KB>S+E@^u;qxDYzhsLlySM;~gl<(y@E5i^eHSI_nlXWx
z^nFu_`-T%di#r89b`YdJc>e6!Gsc9^h642qZ^P^&H?u!ej1CkBx%|7Z7sbJY``w54
z6CHy7bz<Wi<KvUf0@Yh1<u{5_$IA%6{N)_TJbY7O>fgq`SYn@|7m<omycSesx)G+n
zQABz}AiC%kJ*H2d5OiPIxqTnCN0Y10qq;?Y_Q#PogpW(I><<LztD`Ll9G1z7^0p(`
z(0|F|>(CsE{m?!=*eAzrNZ1&~F4Srj4CofDfZpSGtXO9mbh~jF=w?yJ!XUFj%<;<e
z?y5aLxo+KA;R82_z#BEI8@pc2tm@`&;zYqxJDY{8HUz7BLI~T#RE=EXsW>vk*`w3I
zY+Nj=zImXm&Eh06f<BJQ^L{T9Aw$lO7&p!1>m_F72=U&i0AN6$zaGSZ>{YalwQh&K
z-R*6*u~8!3u-khW2i;z59dFx@lTx!(RyK94QZx|Ni<&o5C492>aF6E{(h1j`D(tL1
zmUtHj;R0h~WAf^V?1DU*2~8o#YWfh_)b0;YPN^<f=#~1!jeE>hJknE45r1acA}Qgz
z-=GH7LN75ih#Hm7P$&FzgG&hdr0?J)(g@AT{>%B3pN!N;sE+1dDYg5KIo%?Ylsfn+
z2m()jvND>g8f~vKtmGY5VYSy>!0-(eGfO8!Eaa(ZSz{23Q;{0SLe0E&JVai#6W7yk
z>uX2)nsoo49OZl6@KqI3>I-4LUZkoo47C=;y2A_euL?~LPWnexVP&^_(-xzQ&yS=<
zV}n3t@xF5-%V6cd=d39qoRe&7<RFj9W&NBNj=TuVob4p}72^JxwNVuWYckEbz-1;J
zUnMv$0#38E%Ws<pcRETh#^oRzr;9Shc^sPr_sEFzg$w=uo;wr?_gx=DWkI8Ym@yLY
zyC2cq7XE|g==|Kc`x98mP<-apKy}VS^Jwgf!xbmw7I&w<WpQ{iT2sD!>Y{yk=HOHn
zJlkZif5q_BhR{?ImP(U`dB2PdeY9!Gf47$R&KP}btm=j{V0aZRC#eb#@3|r4h%Z_U
ziDVkp=(hykyZM^&bTF*aS+=-rDt(bnCYiNKEvq7{h;n(#G?p5b3ssFUV`xRhS6}~h
zD3ys8;ZRM5@Bc>7^9TwQR)5>G8WcXf1p3McP&AF9eP|_>MtoSPwCM0Z>?eW7%Ajvn
zX1@5dGiG~9oDE<+j9SWd`;sl%mefo?j)TVeHtX1N8*i+{q87*4uEZ_~g{#_zeX#!^
zhWU=$Evv1`5+|M-<TlLtfuHPnj>Tlw)82EnE*bqe)~~rVK6!&yVbP?XAFSInQrqqP
zOr<8vZTX(nsXasdI*MqojnTz6ql0H89IkEpDt>QRymuGHc!Vd_oe6`Ju-@5VMX-DJ
zhLlyn;))hA0201B!7~;aJC3O)V}{;a&ws2ve?giwcxi7tG{{gF9UFEe>q?u$n$D+$
zX#XMw3ivM#YNwsqg!0rFlD=usT}SI05s-a%KY5sc{$<*ck#jtGz6jqZmd1TnE(Cay
zG`1)*N?u&z@SW9qDOU$&;oW^oxBJs!y|+|8)vfT9$o#+!5>F^IfFK%$7)gV-q255U
z49EZ+kLwkSHgXKaDHY?}rrJD<hvxuWya(Hp-`)0rObm5R_W}%yeebQUeU=&~X^0tP
zqu7{&W{1q~Kt$l8Z}xy^>YHt|5p)B@*q*)Zta<L7e8Q=`HoHdjk2FG2jtR2`dKp$H
za!~<fJ@*{8N0od}PGNl1H}N{Xe$!pKOxye&gDod$iP1Mc5Rcx@p65K3Qym{9kBvM`
zf)$C-w)8>~D^Cxs#aGi=AM4r)))mos>ou%x-&zK54}1%wEATDR*Xe8XqO_05SzZ0a
zvzjDFUzQ)$as?krkt$89n3w@%AF?kP>pNK4NY`fg`N*B25c8ocN7(`>J{o5i`PlJW
zmRFg%H$0=0ztu;h^DQy|EtJA2Uk(URqr!aK7g;f_(Rb7xIqZAn6TDcRNC8u@aXCe^
zfj8x=#<S41k)Aq*kngC2$vOAE40?ygr`Z%mpjy3=M<9~C0v8j)Q@y0m5ucGsI2k&{
z0fH#d)1q-;#<r6rfEm8T8hot79LNRpu7)jgMXYwFI`tU(j$nqMIlt7V8w)wR=u}m5
zChzo3R>>?SSHHO&ya%j=xZN7&xE82yFVHns=Q%Jb!KmQvE{?B_$C6D~lX8x3@O4NF
zm~^Mx-_x`N<@}e13%Nk-ESg-@Z<v(Ys;S%CZ_?U2f4&cp#We~?+tLOWMW{L2xF@85
zk@9xLeGu#HRd26nLk(8+`E&^LSCNtF#lB=lgOGK9pL+Yr{rq$EOWTmUU0wr&gSBU0
zBWpyPveH|@(VXIQbvo@Y;I*kc!3)i72aSELbKM^=vqj2V5VeL_sg`Q3Qx~-q>>4H8
zPHyxY%mm(hW?#PdUIcp1&O4uqd%e$aIlbmgJDMhLeNsJ!H<ASA(Pt?cyG{1{*Jx9!
zm9O9In%%A&IT(%BR`0Gu7dHA|8R7#IT)kgr7{pQ_VvZH_;+ygYOsg$QUxu!Bx;MT+
zV+DpIcm=NvViWY{o#tzRR3d3nkmhF>wFB_aPOaK|!Ljkxvjp`ZLKTaW7V`2~04LcZ
z?@(W2rQ)zJ#aLe|X9+sHWivEW*26JcSWzQ9>?dSPe9>Wcw@oEgd!Ia0GMS}45)7m|
zO-?pO<wE@pt6?RGZ@4Rr?C+tDIx*{)K{ja-V_UVDAHU{KECF>rVlh>)XY>==Q&c0H
zN$adt(UXaLZ9Dk!v~A|37DmiL2;cQ2Vr60iWB%d|(MP=$O}|{aeRH)$u#VCN=K0^7
zByqiSjhgIza7I(S`FvnpZX0Ffz9SpB(+~FiW7C>Semi;N*-oV2?wS*CsXl!Qmmf*%
z-Q_jo%6-#o==SbeHimUZ3q_}N4Nc&Ya(d+!RK<PFgkY0=@4D)v4ezKemA}+;`SJ^7
zr6nW-d843pk0mt_(uAzGauNBZIk#M_nWl#Q!BC6Oh>}6i!p@Uy%K7o+4N58GKJ9)+
z9u1#B7k=3c$!3NIDGA7f63`vRFzGpsT1i*7pxl7z-YBGBqvjKCygFG%O**zB#k800
zS6fp__OWp<w12@(yTyxwb8Cs3G)48-X)E9fAl9>OlXQE&=H9_U$cXIf8(mB!rp492
zu$xhHab^p|_vRPx3HpAjIv(pWoV7O4R=pry>5x1KxrKeZ=H|V~|8`Yd+d*j7w{6z$
zy&%QKHCdjv4TUx{-%+|Hc}*EjV8;<?`>f%<>e=QixV`;H*6g+9gnH<%6*VzWXVIxI
zi3Z;f_l)^zw2ax;!#&?fv4nr)PfH`X!7)P9A*gNdYY=$o%x==#jLdg^ytwCN@BR)B
zu)MTW1`i>-?6pqVS2;RXFV>!))@y;)WX)WbcrI8_W01n>r2bpa+oW+Et%GYY$SL8f
zG#$>S%Sv=`vh?+C<1=lv*?e4^&#JQ^n;%%g(7994bZSlG?adgx-DKCJb#6HguBX!@
zW3S?+wRs;}Yw8Cd>NSz7*Ve+eoh?Vvy$Of9<-*BLfka&tgK@r|*maxP*)7U{&G<v(
zF$ID#N|s~Tgmu18mu=T)WIJ}Tp*(H?_nmiqt^oHv{S&GawtL$TZ|E(6sxh>_<RVn5
zg3;7c&-f8ckJ^AsO}m957h0UD17>Q|elMP>tkro<nqeJ&lb)%0dErbNd)cv$)`NO#
zIA_U^|Km|g*W#4k0#V)qQQiVkejyO$?-Yvie>M<h9R|{-`TtfRkXylckMeN}&X9e9
z(eGS2_w{y>6mJ=UO5CrDe2Tu?i>pQ0Np^5Ai^zoZ)9&YFSbfkcs?pUw6oY|3D<%{+
z*@@Alvn2xrQi}FztX92o&6<B+Q}55Mwb~s-QqkH!_;CI1wblA_Tb&Uv9y!L1&!G-K
z@PrH>6_NC+07nI>d5z^%8Sz%)U5rhxWKf*HqsBYyrjiYRcew<IaIuy9n~TqPWf>=w
zYp8gduGyVP*DAK~y;>BQnBguBS#lh5wIUVR#jm-Y6ty;VlAmH9Zh?9JNq=tfzi#or
zZt=fv@xQK#|K<L0bNsK~w|jegJs1D${@(pt{IB07{uiYwd5*Fci18xo2gE6mr8D71
z3Oq<b_(?}kfE7P_f`*SjA3YOA6@r+|3g(9t6TeMJuOq%(KA5e{C$n+67#9QBd-w@U
z&c+J|W6VO8kO*Uk#q=<{I>uDnb5&tAEV%hfA)4VY3E~+|P{@=G7gPF9N_0+&`UE37
zk<|l%;f3>ysB7_lG?6vnXQ!KXf<N)z(USw}55EK7bYDd=Za!5LKX&WA5PFqVOoFUh
z>RXQZ=hxaZ%W1)_AmQwM6}BcmsxoCITK}~iz`CemWb<r1&d0>E16diV0*DiWJAw*<
z&^lW#QbJ{rMW5qO$M}nU>0+E;v2jYgqJEHX<UqAj5V+I%a;z$%AD7eOLrTH0@?8aE
zn9WD>Rh#KQc>T2YvcNp_7Z6f{ECerHmIQd8!j46$|010&=hYHZCUW^#>Xk$B#u2|D
z>mXX3@><J6KRxD+rW=ogXSLsC4cV+5uTly;o-Zx1z50{?_5Z1Sedh&+`N^;0-QoiH
z^ZYI`^)Jed;~an#RErhr_Tls;^f6ghd04`wfQtQJ%VMg&Fl<GdLd|)aqOG<)V5XBK
zJDoV_Sl3Y^o9wX2CUF7}fd(DoQBc@`DyI@Y3PN>jMK;FR8$@#BI_S_=7&|TBRS<75
zSwqyRz*Cq$+?hfXAB4Ph;aem@$)*dD)E=j(n&m@s>s}aOV5ZiIamkm{VLr!lwft)2
zDlOdfMVl1IUFc;a<r@ZuEXrWMoC=68<<pam#W*&_RZ~OaT=xcIoH=q8^l$j?k`_=!
z74Ssq0SUbp-WN=_Qi46(4`3ule5*bC0lrfuuuy38seBIuG?G-y8Sd)b)NbJhpOzC0
zMHyaZB?0PXQ-)!Dd-UYl+X!x{`JjN6pRa)CK7*e#`W>g;qoS6i)BuhP-@*m=HcEo0
z`FVyW8rV-zg&6B>m?MB=3S+)3NA6IuMH7RLUqb9b;o;<ht>H8nz}lu&z^R2NCHs&S
zV^lC(s3W~3c=h6VAE*yp@i_7q=$;fDlZjQ7*KhA7!Q<Dj_cdbJ>tb|~2PUPFuij6B
z7YDDQX09+J2!gri#cz@TsH;&YeDl`$APEF1-e-)nwIv=VNFeuF;VZVkD)d4qUG?kV
zCP+K?gJ+XjJ}3D9mY0nsehV}BRxI+MEEjm?&l;Bbv3!n1fnR|YnDNoYDSz8xgsje6
zI__^fc-M5^@@kK`QA3wo8E*h-LOpvWqy9zUmc%BJp1>59R?I7c@`xSuJY$!^CZis~
zB`Sk)M+Ury+%V+N7F6bjNhI8;o((uxRY1t22I7qeR#b(KtD*s^^iex*P63U)_trRC
zqx^y!Kt@yAM6C9XGzl2tZpp(cBFxT=--jwMiVrzlp=eqfWiv7hXnaw4j)<K$yk{Zc
z76xSBp=$LcVWI;BkIl0WdH2;6{s$sBoR?MArCZGLvkYIndKIx&EC|IsToxCXWJ|$x
zbNyZyU$F>`Jqzzs(yX~?l)--iahwmzaWSNt-*-vH2KOA?HacdXtmIwgsdUaikO{!d
z0<39i<#Z+4m7N(C6QFjZhGXzlTmKOM23{2Wh<IJ@f^xaQdxN+^UVlZ#40Hu9gCMcO
z1W0;DVB3+{BSSr<y$p@55CSCx9y;RMbmw(-cml<NL$ZlR_m3sWDXR(>oN<Z`NL<xG
zwU67wm?gBm00VH5%e$V;eQ;_9X3V}uvPFmQkLX6rK~X&mi#KE<5gbrR)u@<bFA!LA
zI4@?Bq0oKl$R5gak8QLm{PXD1Bh4(qYgOVbT0iK(RCU&sLIcWdn5B9o{ouRrlJCCz
z4h>4O-|K3bQXP5(Xaw5R<sS<cXf{)fD#}%rz?`+9Y|-urOSM33RP$8{ZHtu@uu(?K
zIq_z5X3~<tWN#SQS4K{%mpI8fQ2a!_gLzGgQu125IRl?2SHQj4H{1tsi(_zKA=^Q=
zvjI=nw6jyi)p9aH(p!hFW!DhrRFcjAt*RVz+E~n<;C@S_t;LjwD~YaVAq1UM^8f=O
zLW#h{^MbShNLMj-QL>H*!2~<Jy3Baqx&BciIBh5_fgcAewL9xOBO8rUEdqwhYWN0@
zrKfcum6!RX#tPzX*M;o~eEM6yVolagk^3rtzZ4<M;amL$-sRdQ=lTP>Hh3DJqqP{<
z`mOG=o_nGDXaGJ{%y#TD#(g-5&pJLK$>u8i9oU&5DY<M&pP2+-1@0}~@5S;yl^^+1
z=>v85RbUg1K2p!vb4~^7S{!()s6nwhZa!KH>FY^y-f=~VzB`A``wlzacV^OcG;;a6
zer=9x5U~#T)2~gnr$@-e^yU0}o{RgvZ>)>+R*i<+Ke)bIo^`7IwjNeC(0h8YEXLFr
zD*`p9FYa{Vo&iPC^)w2)kLZJ)F4^Fgu`WiB!!Qla9cn#eD43DFijyRXnD@n#k2DwA
zGV&l2xZ=t6d~2|4P8k2Q<0r45AH8}0>ZMhe)oauL?9cg>06T*V`Y<;1ovs^2eJKz(
zw7yjnN}m-{Xnm&~SDk$qn~lPOHTyCxC9bvv?$)BSQZ`_&G|5_Ze9D-PntT!EV_Z87
z_Ff6XK0$zPlndz2E2?UlR}EFd^@nl=jB}CCs>F<SHD?inST#AtTGb69ay}|V!pjVK
z_UX#Tk)#Jzwph%?e(dP#I+4%q&S<W0v>n>?C~7U4)ONl7LwjL*lQbr*4Gjezfk`Xe
zz$BLj5%6!lTkx}QqWv@Vq+~!#Sm7h|;HzpA#^G5?l|)gdVdrQ};r6q6iT<*IoCIXd
ztP?DbDA5hl&M%Tq<FK|EARV<#LZ?)lOL*ng(^vb<R~6{XSPo!UuVh>p15geZcY?$0
zT^@A!RCSaCo->EF;d>@@A^Q~ii^PGv9D#eBccQ?g9kqLd)?6YM+xsm!6u9nHw1(ao
zGFqL~I5(rBD0hIOT+H&0??7CLnp30ZrY&iuRnzdIl>7il5whL|$$K#6`8E32Z|mQT
zdL~yVTQ17<L-9c=TtnYky-F)G|6H%sizgB)czvNJw_cd36er<&E%S|kgMz}l=lTW7
zP;!HEq_4P1N5|(kSjX$Eh}>j0Uu5kukVL(E41RBI!rb>yS(rjq5;>_l#sF3oRT~f+
z!jnOGeg7L#jecmfXoLLM%DFhxDhW6i0z!cQZbau%k3xDgMdC*#(OR-}(O~(p)|tqR
z7jG{0y`WP8WWnB3*Ryv^4Qh)EN1gVo56>6ZA#txoim6oS{&0M;GiA;aDq>3VE5Yab
zLmS}vV|@HvA;_-ZhR97Jw_i-14m*?nE??;i?RFA;7S>Z1W3?P>P5HLxB{@ij__hR4
zCiP(C4FXrAX5>YsGi{gvb3E^h(N!G0cQEd4*(GX-@n%A!F?f|lHzY{U{w{jqma{$#
z?p`n6EVkPcQd!}s@(u7Ud+mxydz)&#Y5)DTtC;7M30I^o2`=W@C?EN<Pq*!{Q;-ru
zyl&6v)B6Zxc&s_}^B7IJ8e%fep@ClDE2{)4QgV-)q_iK31<Pn!tPo5dpL+`a2<174
z##pOc1uHX|463C#^(~2T3xhPA7{o#7pqSkPMnb+L*i1vdWw1N1lBPJf>tPMJ(C>5L
z$u+`6%$z9uNW0kjFjthD$Qb27GU*s>NoT#^u$(IrpfSYhu$&AqsTqlGYG#;o{)kRC
z_!^mJGBYA)1wEn57n&A5B6*VfQ~5iD4uB+wdAbFX9_AlI4H>sB+9k2?`cieX@O)z4
zcqb0M1PX?<WhDsTzMJ>tgG43e;;U15@2pNA=|y?b?fD`X=%Uk*Ip!s&Hm|WM_nx}9
z5jWl18S|!j#deNqCiRwL`{#iG&!<}ehwj`w(3_M?^=XkqIX4Xrkw%4T)DpB%U03KL
zFs#c{jf}~OzX_JUDCL+?4D7@9A@MFi$_K|1HrhI=Q(cFC@!Zui)g*15ET=`5d?pSL
zjUWAj*wSSRPdGcK|IwF;QdEQ-kY-}D>R*_6e?YNnEvIJ=0@1o&l;sR9N#*jw47Ovn
zJrH__JX_}xM&d_Q6HsDzf0;f)rTm+@!M=X-4gRws)!+49Wq0W+M+?CB^yio9U5Dku
z!91>+e(eq;nk`B221lvQ#V{Q7n5)zFdH#M`0K=bLTNI#KY*6af;m(@0zrEdEd#sG&
z3VlG`ys{#N0+vMta*!?g)r;c@hrk@HcL=UF+8P8Y|J@uz=)<O`%}pyoub`Je3M2;c
z1V7u)revy<0-pKQBk|VS5(!wm5zgcyshk!jSzc9E3Ov1ETh`@KZCQ{Z6($61BDCPu
z?b7Sun-?YU5`AycH6=02Z2zv>;4_u$Ql6W;Ybwa9VNtleqG!g9(Y6iLUIL;bnBUX!
zz%aAEn%D6lEhMXtZSB`CT72P=7i(%@aQbjXLLW!vg#JOA;|j7U1NLj8+6IV-PBR(L
z1?tsK>L_X_B4!Oy(@#!S(K8SF{rU~qfP)BANvb{Cf~$nSh7`tz8{-F&{#2{h?`}3u
zS*@zEj!J+A%dxe<+Sa}!m>N$OvK%A>!JWWTQ|<eHu$V@l4kBSdlVc)Q>;&ne2VYMy
z<3A%?!C!^39t1_GK+q_asEK#<Tpm1sC4w9q)C$+Qc_X{JTi1udpp(P^Hh2D*Qsdb7
zRztw_5FraG*gBSj>?;jbYFE*MKgsKA>~tq7=CyDpO(ogsd1sJ*f_m@uK6lO}D+~{s
z$6gAi*XLOCwN|hNfzY|q6!%U)T?>Gf*9FSfX4ijgq+dL-X+8(ZFI0~EF+|vQ!bk)S
zYbh1uW|&0Su<$1-!u4<w3V_uX5k*y_rmb&j;{98v5<wab>n&9mEsM)sjGSzc7k(`V
zRUh$G2ifplJ{|Qtj1i;4L^@Hp?lPx1hESlJ*bx$Gj`^ElTd%7{OrJc^1lu{fI*W{G
z@%9S%PUF&A=xJD}mW|7*De@me=ZS2#W?&oZy-w8@INNr*xp^J->})(aYv<`~)AG@}
zy+z`;n83!pUV{K|`?Kzf5oRvWF*!ZIyGlP%RQTt+cxa$or#M-@zO?LvHW`(Eu}IpQ
z6G^l;METyK$%1`Ek{taDkFE6kN;Tz;G|9TQYS(`m$X)xz)j=g|P>t8_uSU8c)G%Lb
z;XIVYGuQt^XvoH0xYn9nleCyzl<@C|!phLT`pVprQVz&F8pHUKQJ&B6LkJHVd#WWv
z)=ydOH?sFUM$HD|%j+hNDHkbJQZzxaQe_Q+(8pgf+4f-Dsr!v+`&RH|o|CDO)%J@L
zPGZHnUjrGc^=gnV1<iMZbu*lucGwi!Ia^b);dQWi-S4r?+Nuq|gw3nwLTl^Rt_z!2
zO_zzh-qyCDog6lA;l-<0PkuVag6k(o_(EJ`W<-g$s<%v!iEH$m3v}E(xL;p)>{lHJ
z)e>*`^Cc!SMX8JcDSevcEfd1+%l>F<X`P6ewo^322vsOKwl<kc85)w0HAejDP%E@O
z;sjB0rgZ^Uwi=Gpj2S9q5f?Bp)r>c`XSx<lD;qf*(Or9?5hCE2BwH(kN%WZ>L&MxF
zB6i6fSx&3GGU``rz?RQ?MJjYbL>1|)QO+1&n@ZgYd_s~Z4y@>9J@QmVE6odX&hy!v
zuW2Mb$9mGWLz1c6nupeGY29M~$4tcEH?-`z*Xcp4>X?mBjX6zM)|Kd|RId8++0_C8
zATd`w;nKBuqfDoNPgQzv@xJP}b%E4T!bKJ{QAJ}eIoDams=aoH_|!g@>Cp_cbB&KR
zGoU8{6y<>DD}%U*6n+V<KE|#P7x}yO1bU^?t`18TZESmui?DgOxY&)U!Fy>|zk%t3
zo&p6Wp8PBhoIckATN)%*1mi3gt2V7=!+8H{8QY4fT1k(ClXWnxeRL}J3*MK;B$%8^
z4Yk#-?l--=oB%3Zn8q9GIY(_3bWQnuXE@~ltNMIg!{L5INsE|^LmfWl<2Zg}e!*kX
zj|1bjn`Br0R$f6;h|2U#P(b`ukc~_9mY*`cpEJg7b)$P;x=q~Vwm;_Q#x+S}<5=ui
zc!0PG4i2-cVzQi=?u9Ii)PuZnj^18#yXnc_XhdQ;?zvc@2jg+W+%IP+p;4)%@p_e4
zmN%gt0j@A)MLnKCKeP?~VydN!uIsv_y<g;;Mz#zJCJ?sM$XzUz?9%e&C)XT$4hw<n
zKx#w&$-e&OP|0(fiC*1B!HR9c^Nfxvsqm_r*WSsX8_oa;rbf_eY4B3NwA#b%k{sCO
z^#n53TVMp$Hx<<2API%`!H$jPjm@?r&qZM35&eb)$>0o_7CCpV!5TDCB#Q1jUryPq
zn^jdl83?RkaXAQR>my)D81G8C%_)8@zcLB`B6$At>GLPgj#YrM*A2^bH#B;BBVuRs
zZaxA5bxfKG-eDU^f`yIX>>XC88x4L<TJ)BRIfN437;CekziS6a*SCjefRfkDFP0Nx
z08OX<Ym#H+PuCD)YS1^)^#Qj@G|HvQa>p+S$%zo#YbH<0J0d-qCHzuSRQ$o_p`92K
zQP3eYu4G_+PZlLRgEqxTWeOV{<3NjQkxz#}P;pdNgqyaK^ZoBp5gKu1#(|N7m)-s`
z##m^O3V%F5SE}!rTCKZIt%i3V9e!G4vo79c?i~3mqdCwH&}zUO@Tq2NLB2xv<)dce
zj=!ninK#bGaR2oP19vHQ1<O}zglgHtPyTj36XJl+H}d?hK_bFWvdd6lR8BkM*xk8v
z*PDIbaihyvU}VCKq*zd8l)_<ci5Z!izN{=i%4F!igBe-TxF+G(_1oh9M2q|Wxp1}N
znaG;-5xwv2GAJER&d^>^7ufxISgH7P(gD~tWte}#qZ}O!tQ}1)#;O{ffORI)VP<DM
zkjN{Ii&q3&B~#Dn2T=3Cb?kD}v=V~meBS7t4$o?=1hnv*@(_kJZ)!iArS_KuF{R{l
z@>^^PBQayjSsm)&n+Q+?8SL@fC^h{FJhWQ#1BhDXJPv558YHo(91kY>zygqC(kI)f
z7m`ml)||TCWd9@1%8~qQ*&Y|}u1Op;VHI2fu%<d#&u@tI7KZlpX-%Vpu9o>o0wdsZ
z;x5g3l+Te(l}TPs#C>c)?=^BDoEkONh*S=`g)=FAAHIWG`Jl)uS{M0hgKy*=;Tvg|
z6~KWh8L5PSx0^$q1v-y6Hc)R0Qj=UkIjHjahulcFBk?OUGeHO5wlf5T3Rhm;smk-z
z<8)X2W~=0;i{#yhb|(p*Pcn8Iu<hu{vxXH}2c2OAACD)A3!@rZ$)W^UjoJ6+wg&Tg
zds`A|kN_1ingugj^mcc5V_@#beHi<<*YF<kbo3gfw-_D?eXc1GaPyMZmg`&Ju2D-i
zo?T{X4+p*bO)S3diLaYi!}-^s9^OH*{9#Z05c$_50bUZeJ|)l;F%g@saW)@y>Mrmn
zil6;{vq&?&-`H^)!7ULgiwuK<Tyk(ZgXB%v;{%@W$?K*QwQV%0u6;-U8*P-@G1*NJ
zO8#bB?Y_0s?tk5lw%HCsBd%u`84FlLrT)Sd3~s!FUug;D8s0Z3U2k{e85<0%_A}Sp
zz0sk2{N#9}L)Y8g;MDo^2YunfDbAp;KHXs2?Hgv}X&=Ll`<u)=LI!U%@eekmb}zE3
zDqlcvpZwHtdv7-2>SDC!0zdxqQw5*@WotXhu6S`C+-z~+B(`+55oZ32=I6s;+|p(p
zrT&W=%Uc_(!{3K8cf-8j$tTUYAr8tE3wyLJO;9!j^+03JFhfTqLIZuVw*i{leuHBJ
zhf5P=se>Pi;-&lcQw`BMrMwe~QN&aKbUb8xhnu1z<Vz~J+Go?x0>ruSB)*E`ZZGa7
zJs-*rR|bDK%^>lfOUw$VYj<;wnXLs^Q5U_Z=E!U9QPwbjia`qQW?095htddkya@-%
z-&kbk2%#rB?O<pGgiAOs1h`$ym(`-nbJ&Tk`Ij;m3^)qb_bceYip&aM#}sdgm>Z}w
z@u-bRz(jW&&4=da>1KnZvNsr2+d9hrC;$W0NnABE&73BeN;SN5Vq*qa&+z1~8lKr1
za9fe#^qEB;6m!^gQ0vHru0Nee0QD`Nr95XuXmKRTIVqnm5ZH{P*tFhY6Z?VJdC8t;
zTM6pvu~mnNd9|xh7qv5%Hv%(HK|LaZAM3dg14&|EpeG`BEM}~f!OE-yUo=Op-nNDx
zt~i7Macycy9@YSgX_e0@bz$>#!~xutHrvQ*fwa{gqx^Pi#b~dhwzmF;vLf5v@YLl$
z9hHLu(CGJF3(9r%T74>G#^gC~m=3{4hg;rvm0(oPH<jz^II0S>>`7Z6g_D#qL5^PA
zDZ<ZhZRNMN@>^T^t*!jlR(@+MzqOU$+RA?yw(@KEWY>aW1+Ve!X?j9`{~Y7deU6Zt
zwFC)XntmYa9Iwy<q@#=k&NhclWpkB}4t2G3B1gsfIqW1eS(3n~r8CIcf^Nh>;9zd#
zR{YExLSt0>^No$-9M%KGkAMt}v?)sLX>Xht#m#2L&hwa(QkqPOeK*TlSzVfJ;gsWr
z!dFF_mzqVKHo=Wd;y-1Sg=-UQxRK9FeBoUX2ReYU?%2?7CHLHov3d96W^rMO-3wno
zc@J#f6OtF=Kz$@Eve?2S!}kPtlO7YRLhsu@HI$PQ6O}NOjJ|iLZ<rNU9~<5ift2*3
z#;ulb`n;+|S~1@3bRa1S2z|FstU^3iN}JPdA9(DUgaL-FW}KAwBDaXY5aG<C=1?_+
z`h)1q<&VlqE)zn~fqItB$E)K-X#{K2pr21s%&PKA<H1>>>@&6ffG>efClaHnOb?@+
z%Ogk+F?|BrncyWY-pPL45AN4mdjFEPc7dWF>fRYL5SfYd<@mf9j~Pvh>!Ft$Tj8hu
zz|dq)X<{X!KYcN09Sf*QssZsKQzpuP@3eByMHs79<?-z3e~mw8h0L%j_X{`CCW3#?
za>0tqOoBMS1H^KrCA6TNw(3D4z-%mvv6T)l8xEInHdYMmn8B=!e$zoxx+hGhD0;*!
zHmbUqEb>J;yevxrK}ICg${!=H+kVg`rPg_{t=LF)qD;M<wW!C<g~jf$cv}H3PP~Hv
z8P|Z^s<nFbWh039dkNj`jb#*cstqt)>o{g``N19OtH_}^^;f(o>+68A{heORnX3!g
z8(nEO`89$oB?@FyY$XOqdx9#DFnp4EMYBiE9pFTs3|xrPpkf5E6LhuL3$jr^aFQ%w
z8kKZhmctJz{f;5Fm8o&OIou7`r1gkjT9ec;-n<^1;-*Oz(l$xf<T)hV<EACb9(bfw
z3ULn-iZ>ZqKj?@u3_kmdx8v@FC+xt2_rxqY$FMWhniJIj{k<s3XH^sAlW-9f#KkvG
zg9@F#7oo0H%=4<^#`pvhBvmA^B&s&}jykL6uc5A`_SdBVH2nQ&0n5+~om<vQz@YUn
zz6vD9hEs$1xq+6d+ncT!-z$t>FOqv@3jq+}Fd)(dm7Om*DBo9;yrkmbBY|elygSt4
zDw_8r2rRhais~HgqHt;GJrhO2Bd3dvIEBxJa*c@1WmZWxfuZ?>F^(2fvT9gbP7QI#
zU!l>9bXrcIPtmGrZs(>hH(;m$d0u1!P!=Ua95M>fVZEUyWG6iKFjrjDX}8AJospx%
zUMpS@j+o|y84$*Ht-B0ZiW`{4citryuDC?y+JK)#4X#&$2A*O{QcO(+1EN<sieK1w
z>3d=0%%&imW9+Fp_m1}gFBEe$XU_6*Wwl%0CPG|P96*OaaRvCSR)dEa)oSv*(^8{F
zl4^BXPg`m+NK>s2<5^1$CT*%!VLmkLaMxX5eJTK5sm4VX>uCU7#yq>oT^;6@$lO;b
zF;FX}LB>|w+Xp^vAa#hXA-38bb%&i<1c$ly{RY;?pbL%Fd5W4ez^<pO!l~C$131Uk
z_q$TG&FkV&qsWXcGrh%sDj*|!KsTb`d+2j-+ZnkB!rHFTrx*60bzXDw&%=mtLq-?K
zw?c!CgqxfpWW+$N2b&O`S|@zE+Td6xM3$<&Cf#ezX9EYJ61ERis&$(~p5}->MNPOy
zc{(4Qw$-9$y!6q%p2u^*Lw`tOX)?U_gBCjDlr*$dUV`GhZfNu50F2%n3SX|zB`$7l
zN<{Ec5$9#4kI@Px-wdAB6;#C1vP>$;f3$X!XvOL^w~G@qQ=?<TgX8G<xl|?&5dM5p
zeqb#3(Gqx6Xxg#qHGBIoR6vcbG&xVLwrp}}@#3*{1eCip2?n27nSHPKdEa%6)Olo$
z^szW+25=dRuR3BPTbK#}Z_?LufjSs`(#tM)N4IzOnGa(_(N9S7J@XKK=GhIxV`-x%
z_X+fcYp7{n!m@--_3a$R!TsoSM5##qQ~3iyeI1q~t(z3{l<x<hdR=1+?<e>3&kghY
z%d~T^)9q|Ou-%u|AnSGdqJ-yB(An<o2KR!FZ=*N#qF?KPet4G|t^Hr7cjKV)@n_<_
zr@XLe;#G8X|8DH$w=-AUU(d@jAvOY66wBix+1>^Ia`xI|NxjH+d0t+BEswG9Vmt7d
zUC#!;FQ_BK0}p)KdHniyCk{F&$Di%{Af+zifZ$hV;|1K~gW0%TR3ZFK885=#UL5px
zeS8DHzbF>tJiIGY!h!^F(H$4>@^NulmZQ53rLYO@K_?g#bP*kw^L}TVeON({(OdXq
zF<M;q_Zq5Mjk7^M7JH+qtt;`;bH&5?-t<qP9p^EaH{seZ@+<U&JHPuMzx+b_kNzh$
zI@X8mC%yB}{{bu60NRQr*<aZxDq?ux<gO|E!YCwW^AvVAH+uJasRK41%6~NW=a*mB
zb*N!8LEGVO^6i5-*iGOs{2mAACD3==ioHL^IyFT<yW*6`2Uj&xAnU6x%a36rt@-0_
z^o3rKWXf!EfW(Iz`abfj5(t->5n`BquL(?Bl#=jML$mAL8TmB_C9s~o(O1X>k3~(A
z`r7ci#BB9zoh<K4Eb-d{5V`tdc6S)!UxWfU6vo_Dql1IeUspokN@>7=@6cB;s$0#>
z^i%B(r|S_S%3EmmE&EBy?A9YuvBBL8Nb>|0o%YU1DmdLcvuxY=`(x<UB%8*;pOZM)
z+ui*xQQjERo6a?iq4!wEIZ3hC_EA}az%<8v!El?+=H)EA(C%&(rrCj-@umZ2dAve1
zOZc=zR07wVU{qA-JuTo&pN!#3g(Kbl0VWHp+KcRw>{%nB){d8>IYMARrh)Z(>j&mK
z>xfb6iDZRkHoX{|;0{Uf-zDtBY%G(%EV<h?2Z>7<@DTRN-u>P1VFa6Jmh9dC=0SJw
z{=;u?<fH;yFxVHshV6b4$+a9cn}8%-#LrU2WLe;!q1w4-o>U=3il*tpx7wGDx-Kx!
z1aCSrEl*zyWa2umD6i8!Xo^HQC(<9O8G3aUBi`&Rh=;u{KfwbYS?Cf~F<RPg!s|Nz
zT9)CiV;Djg4p@%&2p)bKR~0q;%_{#t_OBqqWu{d2UTfDC4L24GUAS707Dd-(m4uF`
zm6Rccx1CRT+zMXKg=-B=FJdcHwo<@s-!Aq{`JsbYCcI`n9<xTNm)w^Iw^p6I6e!}a
zLfz<EZI7zr&5>NA5wN;?pvjxq7`7{I=`b}AXGfu^2epV8lDxMcY}E}PTaBf@4|+S*
zs$%0?il#U;zZK(hT2mij)VG`4$nU9n$d1>d!Kxg8$YqNgST?oGcD_>Ow8Me+xAuBl
zHu_e2(!mSU)S@hd^ZcWN!BIg)>e>cwpga7)H+Mn+KpJBhFFR@5$|_jr=}H6YR60*`
zh1}wjye<kW8rZ{Vqtyh}9?onZ@@^6HIxD6h)I!=j@DdgaZ`mLuwD9G4ELn+yeUhYq
z3*H%5voTQF#dsB)RVaLSbeYJZ97g!EqHzLrVQz*7LI)0g8#xXRt&Uw<>R{zVi^Z)S
zkFsV_^?Q<H;h4+m#i(4uoJ3o+S9^PZ^s3Sf-vM^HeSw@Dttt7%Okknl+QE~}@ZW)}
zbCsmmpda;Zu4p=O?1`YvIQ9q#wZXl6ccoEUPnu*bMFnP;eyXXa;Yv=01D(XCv2`;1
z+(TwDLq<NCrBVUv1S-N-Z+fJarYcxfLZrKAQE=?NuBtB9jD1@~pxZm#CwVpv<&14_
z??pQ_xVpyQ>ffk9<SkV$#_Rdg?WQiFR%C}?Y@QU^BizW8>mU@hL=hAgU28Ffzsz33
zS^9NZE#xd^Q-t0nyRIQ1gh(gcnqaPHIogr)<RT$tuPKJXEaq@LFv=Tl>G=q$&7_#-
zgg*u+W<n`>kqVC@pK+t8rZ65gMqm0DX967G3zg^?ntZ0D8${Xf8l8b_W|2?KLzN1u
zK_n(Tv}1AT@%O<lWrH77A%5pu?%}?B3E34nBvPL+aj`~`Lpfe~dx~tpv-eBKn-_Za
z5_)mu^x;h^C~iM^xf8zZ9!0@6?4;lx_0DPUueeN`Ht0DeMq~BGP`6fUE}H(lt%_Uh
zFfhA_cGdNj_sHV#e(Z|szlk*NNfgTRxJ-GpS;)Z~Sp`evf`1M|p|y#E4@E|nmtipj
zD)2<+^b8J!;Cd5zO&A*gN%9KJ6ibI*(Xe_4EXsU}DaREuzI=s^Er-k@T&K4>;fWk;
z4bHEYYSt5u@KPfYip1fhg5#yoMcaY|RaZ7xNaHA9W>p4!q1aL#q~5gQE-^N!9FICK
zF|55Iy$ov)T2HXAn%?eivfB`PpkNsLftjq!x%8dUxKm#nj3*7SH$CtgUX@cZEDGtb
zi3wtB+s%!?z!?xcY2SD|oEq(N-ht+lgml335!L3GiQ{lN5$q<LSghj6`esOgQ)&B8
zN&TO0>VG-Phs6ZwPX9r)4a*nRa#<H50NR@^82D+?1GR-<2HvPG;rbqLHMl7Fg|MlU
z-&scC;nY0p*LNTa?C>AlSUqnP8#yr2?Y_P-xYZgpOl!AbW8v6prWPY9t>zgz@|W|)
z#x_d7_P^VKtFpm#d43l+wtX!jlciCh$T<AT9o^@Klwk&qyL<iKI5UCbDFxmulBnE9
z6nXXSNBbseK9ugD;-LC4N{PRm_dD5qmJQ#T@-5{=9;GVZG?aEyFRLS54}Qc9;#LK5
zP^V76A^XvoRoDU+o-QvK<BV~H5qgO|cdmh{#SRYbsaAf#Xvr-2v*;9Tdg@g0OmqrK
zie_&FO2fiPDQ0wpb6@={(yghMzlus%Q4+BRQfJ!~xA<~-f#va4FNUv(x-{`)S4J~>
zZP&y{>P%x5Mw-3L=To>ng<(W_b)k<zW8kDOkh7_|H<T<LT}k+Ce+_)%i`nMphUO4T
zxh{SE5#>(#nNzj+)EIveQ`Qeo)!W;0B9qsTe?W(ci^U~pWbgNq-D|~7n1#~;atBpE
z?B(6P2Qdhly$4a`ug=#OVF3DZnT?Qx7}`DCP4>PZr7~OCVRW1K2UvOd^kvi2GDAI@
zp$E}*>zgj27Y`ywU49^e>cqJ)oO5|R)<v$*pL3UA%qTk5aYZ|PhTG??9z9mXqINre
zOzb(I4&-8XTA+biQeQ!QHJT#V*XMt3vf$GcY69B5s8S6Dc>hK<Nx0N><pC|Uc?GyL
z5FA?o*~^SqFO-9hw8&Xn32RdPh+{st`@<K{P$`bf(h!sbj}lUHjY6(_TwVab&aOkL
zb{h(#T4S^Wpb#0o$TJfvvv-?EexXw9)>Lc@MDV)KYJ7zS^{u4c{U*G(Ck%D#PBbfr
z@jel@01x!L^s|M^sk9FZP%)-W{(X&3oY!#S{PR(h3uoMEXjCz~um83uT>FAgG$8Vk
z>a-7|#(`eUepCLE@^h(-!glt(4KnUw$u7fYh{)x6{vP-u()ftnYQd{%PFS_DdrIX%
zVqDOb%8d70XXI;QUPNv_l89x3Gizu~+>AShb{^?vE#t*PPY8tuOv~V{c=nbAI+?%&
z+jl%x8aqQ304?1prNkQq<&-^wLhHL>v}{+^L06e_(n)F<c6xT&I96l`r#kiHHTrQQ
zs1LNFq-Z$vW(_{fGsOl7>zYU#sit``YA$}ffVs%#Bk1IZ0?A3arnW|XBJ8IU_*1dJ
zyZR&zBn)2WhK^}GE#-e9k8~r7uV~HKm<z$t<6TG~SeR|l*NB^tPz%=Hw2RL5BlFfS
zt9<VGphv#z#7;bDn9F%GL?iX61ShMhl@jD)mb-4V26R^62wOKYm3ktbyaVKm7}J>g
za!0NK-%^`29XRZz+<vtOW|C=@+T0r+hBG%vjfB%e`>IN%#bRF7(F`<_0n<mFMceQ3
z-8A)m|J2jC>1~1DNtIm8I6&vTGe|!{ukZCfy8+1!F2nV@*wD?enofN+qpq6EnuD!P
zMC#1HA`taQWS^w^V(DQ8i=aE2z;aEh&<`YmZSvX9C$q&W49%3K90kC2Mm~>t=+(^&
zvxC&G$3q`QnrZqjU&X=mDKJ?-sZj|{N!y5|3RMzD)))fw)f^Ul!+oVxNa-878Ln8s
zoZCC$l$Ynspk{R%T9-yzcdM-PlxOO!xuR8yiwsO}%F6|euypZ&p2zB5U5q$aSkt7o
zIMMs(+{y{m0tV6s1Y*VJ$q8Y?SwC@`&q+)6b_;2?X)fFWH@_wKm!^Y)le#04k8}b-
zIa@-&b(1u)Z2TT%bD`fuPprDHz3-0^sK|qtp^7)0$7Ykwx?QNkCFEY*OpqksB4@{L
zRx)aTO<0DIg>5nn`G@2i%Z$9()|XfGlRS_}BM@?}EydTIL%mPXfTXs{ntfsAcW%7W
zuD3DN{CNDr)|G6X8Gm7Xh#s{9D23l{z+PF5tT*9%!|ecf)V)4h5xu$~^dK}?aP67L
z^VA%6)sRxIFH*`iEH4`BRQkHizckHrL;Qn$4eEi~5|MGvuwS)oKFNa^btvaKEY^rP
z+OlJP9cx`T_hnkT8a77b!g!e21|}>WoWk77{?Jc;mQ24We{pnIm&gxuT-AKUGW!%&
zL#7E!H;edYHj_vE5OP0NIbY<X(CDJ8+r?Mk0UJmhpfQr-%;1?koAMoL&#Xewk9BK?
z@0`BFk)NX4;4IiCp3m%`pW`%3vm*L~U*AUvUFAdqc9}~`e6%A2+jGX7B#<r%h-5#_
zCN<oJt>D#^v&!h*QSn7992uGGbY_|!rStNmkc}0b&AK6ry~|k`$T@h=`0V1U3k)(8
zzAF~L{k5GM9XhK;%Zur(pDf?`lU||Nv~XXpyMri=tgD=u`sFrP5=8IEeXm5}=??tb
z)|&?U;-?nonbkzIrYltw6W5OQZ<+ppB@i=KkFD~D_<J1S{_S_3WaDDcS!-E8de-Ie
zFP5y?<Nxc~$b}4xDp3M?cQ19);&8bnc)lQI)3BUkfL(xr1#m=%Xr<x8$OR8eAdtnB
zw81s7!kpCvFdVe-thKny-udFdXgcP3H3O27E1l2b!I6$|z|;?>P8NP`*L~8O%!RUl
zjwC}wRMqs0Z^in(y{3*4w}an=eGE^uCuGB};0bw&1W+;&iXz&LB(zgr=NE@6w-M~6
zj_)N`o<NIc`l0wxoEJlOaE*M$j!{T%En^2UWFsf%txVxHDk%g7V?ZcVPF0L@1?%Xb
zIv#Sg#ONs+2>X1Z5;%3EloRc8+n9Z)kCRgOM8X>zPStA>REaqNUF1k{?ckkb%(v^R
z2$Y4DAOBDu>t^SWDxK8`u5i?_=b>5t8i`T5^(K6oor+GJdHv(;_@ZmnZdB{`AB5#u
zCQPTC6v8L-v-$+9E~bas6>XHc>(xdWZWuO>y+Cptthn_WdJ8Pkb0#lzk6glSHGCI_
zC-izOiYf5?ZZ8gcHC@$7dQ2cy*b~RuOV=NH46&@dX>_`RdSf0+O{8&73J>n?D(&4p
zQ(b8)p+tJSkhc99mmHS*Czu4icN$%(8<+((uo>0!Wfz#|hO(TunvF0=mZjw@+lGjR
zHJwHD-E9wjBYRPuC0kc*!y+^(q#b3nP5xdj7o@pmr7gnVD-PV+pN&e~84Jczb6_a|
zh@ImW>Nqw~%&#_ZO4UWC=To1uS&yJq;jGWPef~s)r&PHQ%TAIy_q4kOeX0j-Ws~-1
zc6ss9206-KW11w}NywR<29d9skcxs_^%_d*n8;1Kx&ckqhx8IXB0cJCRj*H}?bRmA
zt#<k=D}pYwMa!G0Hu`mq#%Rl~^j<%3H3GKSCFU>a>vDcIgZrkV`1luyEyFS5#njn1
zT#H7+{v4NxV$mgGn&tCxry|$6pNwh(;VN&Dr{i)tALjeP<zg|b_IGw@8=@xQd|ZA^
zhUH`@+j;n)_s4I)+v)vr_m6wuJovWJnG^4)G`+|dc(~J4Tut~T+TVo35;IOFwuXAV
z%++7=RTEo9yqTZ~r&t57HkzZT(kP4?dqE|CV<?%~DmAQRAO=<3t=4pW5}6AnQgnDK
zrs({nv-B-Ws@Z1RWiE?r-n7&FV+t}$My&m-83kzh<fS(h5I-vr8D)*sBn6F+2v~!l
zF!_9e5YN?Rxg3wsZ(;)5ZzO#jpa-JLKectMPh(C-c|OArJdG^{5@MJ^PWe?{7wU&C
zqLr`ANV$4(?NCv7u`tHkR*(&(^aKS$1f}R~T2ywi*7;2<n2w5xfW5FQNF)$j)jEAf
zzMBo}Xz$PE==?!?UXTp?k`hkT2yw0aZ+bY>l>Lnb^sJykIBl^GzH2<FELv6)98_Tj
z+0kAK2x2-iksug%+LZF&FLR8bGqN~cpJS&L_k>qIMI4jLU}+7F4K0l?7hrpsrpIG&
zJ*iR1ieACufYeb!*(Fnd(R;0dpGKl_Rlf<Kb)_+HTYP^e#Wa1Y-Ycd}@70MMf#e8F
zg^^51ujrr515zv~E>X>Lx8}Tsib0T<l^az(6DH%R+n6xrg4OBB8D?|c`}e8Lhf))K
z^<&FV%_-UHI^9ywsq@U1A@KMerpwu)JcKoYb%D9B2d4sSGAJ#B8NcXt=66_h_NM}l
zn#<e+#=0qNaZs{3Ws)r}6ON3BWef2%-5W)%g?7B9F!bN~dF02(5LMYzbCKeW*q3|P
z*Z?G_;TFOr$A9nigIyU>bgn7(qu>FD)%48WLovqY54h<i`mFGohn<gD3)b4u5$NK;
zw$;Yn9@*(5b+65d$C5Dfr>dbdCtz6?d!!Da8cYMort|;-QQfup*7Ax`PejGBY|Bc*
z>)F%(>|~<wna@MqAUj6fpztbA4WKk}!-$EgMf%C}&BAq^)ez_03G&u&*225}xJQ2$
zJtJI=(a2~DIJ0x~`xkxZP(9Uq*Kyvtcj|7a__GIN%nKrCJNe^CC6KwDjJ?h6>`tGY
z&Nv;bB>ZNK!<OD%nvuai{QvEJ`*YhylCXbPmH!7st}9YL5F)5oqFQZYOY*M2*s(75
zrgH03ArLSm5rY60044FR_J9AnX9mFFNsyu>QX15*VqzYc>DP46^mH5Cd1L6Bqe>A6
zb*wCHUs?VH{6YmR>;JMRT=-B?pekl$ISw_b=imE)#os#FX2*mGeuR})0#meICCh|~
zz$i`r(~IJ8O09+(XQ3k2Li5JJJ>suyPIdQPO2TQ5+fV_HUx%H*_`e{D=}iNF3KMAL
z&}y<y^+r51LzE{Rm<v_N6Mk@4Ws7)b==k4dv9s?-(PTj)K^9neSV>Rjo2@{8P|P-R
zhfc^7$t0)xY}<-wO-;@#)9VrohYcZg2PLZ77X6ZbpM6_lzAlL(nMOg=^8H1zUE5tm
zFallLUwPxTviBN5Rptq&DP_Z{I|K6>M$=3CY44O?<H1@(dK@iySWCmET@fA^6!1dD
z0}KMY&t{a9rp5EdrnANrTP3&i1nz0U{d%O=d1>eNB%+F}tIS7kzTaH3e_Y-KjH-RT
zqd9)w_$B6r?s&PORPInGfX+obLqj<2rgEzmo29`=YFpL$Z-a^?`lF|f8XS=_uW-D_
zM!V3Wz;)to$LQ=}PHr$V^y+qtdoUJtxI7hn<gT4Yzt(S@i~s&5)m<B0G86cJ<qn5w
z!=)Up7xUp=U{pyz3^bru08`<PrBMf?-zaflOd38T{rD1t+SP2KGjd8kC*;y$q35zb
zc6OeEzhyMn*D}FCy}#xBJ}CUcxXYotv$*@E_>E*ltp-&5^h*6Og~dVi!;H$c*g_%r
zU`EUOU(R>E6h_tLdMOTmG1tpEUYFGlm*S{uyblY$mg3=3y8*p2Ld{t-D9@WT$3^Qo
z^Y0(myZhG9n2)I1@2|nP^sR~c(+cs9hLqVcwj>WLq*drCYC6Y?G~;=XQ?C6S*AR7u
zw`GbZUR2}jn2eyJhh*O64k}_1m8SG^>xFzC^3is+-JdSZ+rJYNq|*I-qtBkY|MvTP
z``N$oz1|<_gBQx}LjYikTVL?`XNv#9_sRql+Mno0{U7wfVDIoy+dCS-k3GqX&)~T-
zXC$J2!l8fL)3v_V-;F|RH}_s6qfrk-+3zmR|3^nh_52V2*7pw%j}8a={?Q@u|Ka}8
z!3*U;^8fRY|AWJ_{NI!E|3T#cpe+CUQu0grUoHPHe*RnU{V(r6EzJLihlh3if2i*j
z<^R#a5$Jz?$^ZBM;3*HDPW65xA$qhND+5h`xrH}Ij+Gn{yxdBwP%?SB#gmL4D{sKm
z42ai@sAHtcozHrHh^LRlRxW1o80=e&V{4AsU8i*8zGp@>_qPouF?`KrF3(9KsA57e
z<}8l>D}VPVq!&PUnIGoJi8bdY!niiQD4xdg$P8o6CcAvYinevU5hHHL^m^F^sh3-C
zrZC9xSb1lLj;(z3$0p3qc2wn8az!l5RDSly?$|V=q-u{$OvgP|hS)%xBn^J58Up$P
z(_XZ1yqC&qFSkz7oz)pS1V(ZJ5DKAFJe&GpdZQ@<@Ax<YC;V$K-{6B|5xh(WC7(XO
zSKb5|UJCv5kCl(_za+)^HlKpzX!<kDnLr!U;M6=$(#x%1$-kyfNTMG5^0D&QlRfSG
zms@8fvO<SC>GV#04->feFm}N)O(MAq#PQAcZveLZ%e{Aw3xyxvpPv76dag}uq3bgs
zf@2^N&o-lz6J6Wa_DiLI`d6-Vg{3+YX`yst#+c%i)_~z5g%)Z=#JuuD@8qPf=~X>M
zmJ`gd`y*|ys_>f=_h<-btg~0q0UlPLoa}3dRlU;`dD#Cxv@Q2fCKe$$^1e1Gw}oMo
z@Z@Bm4JvvbA7Hn8+7DH82G)rf*U)+IRrJmh2eGR`Sm_Ar9hPcMCs#n|BZ8A3Aa-7E
zy<$vD8TMBc5ihs?VmsFg{2qBH+ZM8axcyh$`T2`887Z)`oNTXxU3YuCp?L!oh8eVW
zZQqJ^U4P`~m#rSY(gJU^4Fif#wsVD;G}qtWg|Yo*7l!;-9Q$L)G3uW@MEAiPKm3*i
zghK~M>eS8>_hAt_mlr<C4yvoNLaJ;O7}DCx?u4q;U*mf2u-LtG6Ms!zGgO$%L*fSi
zibj|P0SJ<?r+od1%D>C*T(r`9JG=*);=pye?ES(6H^cNzi)NT~Xv8)vNe+AMgfS))
zA|Cw9_vlGC&c$Kfa3@TL7I=pjFdgPl{Uz%K>eJp$ey6{x({Rch8iu|L@J6u+0GhB*
zvB2Ln#f*u&#ee+q2Vh`~)E!XD*i(8F<r=zUE2jbf!a~c=|7PT+TshuuG9KF5%dP+R
zr}WS}B!P?mLw}_0{$MznK~p@-7?Kb#(coFOro5w<s6w21wwVn7tC$~dEfeJ{y{-zp
z^({jigC0y+V{_5`aI{OjU7B<?-W^SmDt9@&p<0jnO!NXL=mB(k{98ToTe~cq;xGP<
ziUX%J6A{L`hRI^CH<J0%GBm;jE@l_+6LeC9A!rc}1>ieD+HXN+p->eFC2{k(W~xl$
zU(Do)0&L=0V>-?&qS(Givo>0;&)sJMXqX8>$#<9^43Ix<L6%fYQk@%`iNur5DL-M2
zPm*%gJD%9#<<@KEbDB^u`2Y`yl{r71(rmQnNuE}0^*7hQf|ANiv&>88vw!8gNt$$Q
zQYC5Fr#_y3Jmszapv&7I33hYs1c5}GvD+6n#2Jlac3MXlC?2~sA~8EPqci4f)P&_N
zc#WRoYzZIGh4QtaQP4H(wV*`wS&h8ju#fBG>$0lwx}X5SD$T3r7yZ|zb^di>X@6Z@
z&0nVrIR9C>X4C0@A-5k;JR(poV9;@j)@&A_8)IrT<T#Ju`Y;6*4<n0&N=QK}&}%fc
zft-W5hY|b}lCrOz0O_6Fy6~+YvgvJ)Mm|X2(e&WGkf3lxEzwr2NsoSrlcTh(<^G@C
z|C9TF@_ADGf1f}7<JZ&k&suyNuWbKsZ?Iq3{~Pr84*PQd?;)OSFJ|l4^EYQ7>A!**
z&$F$80qSmn#x)HzYdG2}QO2&pZdVy#HB^-5sb~_oQ9N_WR*Mbht#(A%R4v)SvStRF
zI?=rPA>ZzpS1<Mh!zI_mt**#^Vdihp!aR$#0g+I<@OzO)%<UQtda$%3FyUIYwVlYq
zs5k(pb`2)h#Hy|t_)(mMm_dW4dFI91h2J%eW{AsqJ+n&eKh;&zO%w*TXikr%>ZT1^
zGiu;%>zA&Nv~0+r4-)~iqh%A~fgMlUR>t5spvW8H)NS47HZa(=+NgaC*Jq|-%N8S-
z+%o6ucJ<i>BzFCFffj8ikX^FET$k1uvrz=oFq%4TDFE-@;YB;c4DCgmxF+2XYT2y#
z0-NDZOSFrBjk7$%3nk~b!5O<PoqtbnGgZC&oPb*>Nbe05-e*)Pykr-cfGLXa6V{a)
znza9<{wM7}`K*QgSEW<m)&7G=FW7&BgZ({e|2@PbSzWIGa{ZU<zg++2`rl&x7x&0k
zwg21S+uJX!|9T(&|K<9B|4+{0ZY$?tw^eee+bW!jZ;1|bTNMs+TjJUHR{mgoOFR|d
z${mSs2@Y*r6^G$ll@4oL1qU@L#pL=g`CqR8@>$*bpF8_n)%xGp4~y&nV6ZRO{|Cpv
z<a@dPKgRkm@BcsE`Y-SQ%k{so{<Cw~)$;#h_x}e6{eisyFYP~x{|C1J^t~Uxm-e5O
z|JA-ici{g9`)_b`u*c=Uespk1<G<`3$@AX_d0r{K*FDAZZM?{)Oygnidwlb9i$(?h
zg7@?Af}B7HJmZq%<?p3wtahYedPVcvbX@bw&GMRN_bHR4I}yK!F7|!QL-&qIlCS~q
zJ<*{!lfVySC7MAy_@f1w55Nr29ih98I~LnJX{BI>#EVUbp+6~%v*=(e_`fQ=y)dH6
zE=mq|JbNr}PF>^Zg{}Yi4~2qI1RuJYVj%{yraUGLpu3~={>oyjGeD%VnNJhXQN_&l
zCT12&D`St!iC8a*`dd9FEf4Q>WZ7%O^XVMx2lzJ<D<UXaJ9;;b<s!wHgixXVSn0Jk
zhDwdbiH^GYK+!vj8DTyv+?f($9Ab~cjJioESfDk66cmSD-iW0kWf53US&(ynR~U0B
z@XQRV60rNJ{A#u^6*?9q<xT?K3eYVIvO*>J8i40L1Ah8SGi}?LBq@W$f;5lN*Zw8a
zc1+`NW40vB8e+PgnyNxFofGA2U)3*b@k0l5#@<6e8;q{<UkB3|;k9bRpx-e7%8q9k
zNp#+gK}Q*^Qva9hztsQbvxxpL-Uzr8|JUpPgZ;tasHp$<b?N`HPWpdJ2hqyZf4-m^
z7G@bSXmX4wNvWdngzT8;92ZS7_H<g9*bmv*GV_7wib|8d<=&p3u@|5_*aoWiG>amC
zXL>fgU=4W+<G>PLe1hqOLgr99>Db4BAwz%4;>2Kzc`xRW5)(?@$(rFmX@m<@rM#fV
z!f9uo@LzM93Rk_D`WCeNq}YysDZ^9FHWKllvp&Rv2E&LZsR0CFU>k>qkcmyjby&g=
z7zPcbt|a~E+OoQ+Uug%bt_?0z8bDfv_f+vxzv}AX@-nBDLcdyWe6%Z-H>D~nCa5Zz
z1!5bpL9dsZ7Y|$Ca&G!P%<Ua$sW>?SjlxPHvWqZx;XO4cod#=?CDB%{9ZWbh5v)iM
z2vjv@H~k8&c-}q6!x(ReCZ~XTsyZw*J1fxO*)2m1m<WAvIRx;69~pIbGbJ2BDNMV&
zuMT!fG$FzTgrrSI3&EuX6qb~2f{|QuNyXM1OvPDrmj&V28I4y1L+t5kf^dHh6-L%%
z6*PzyK~Rl&`t1((+u3wT@>wN`G|`k7=Cv@aL-PiQ))w4q8PpDr$8utKreZEjkkdB`
zcDV_FPLd<G8QZcOOjRgJcYK<6na*%%S(bHE(a%^0<xcI&#_?(B_G6;7%OSS`Xl+DK
zY1cYaGm4tJfRB99Ts*Mq2<rw!Yz8;w=3OCq@-<oupH>iJq&l#lmH;YnQYDxfq0R+!
z3z4t@pc5#o3J5@!CR4Yi%+F+g8BJ|O{+BeHZ318xbeFIC$!ajhq;i--)^;X@M$gPn
z73v9LNLJ2o(51+80T@<s)S3(qHka*=QnssIBFg(fxf8m4VbeJsPkhX4DyC)mp-p4L
z`ieF7BbxhoOePy)kd`LCPlGZyLlF1Os(^QX)&g$lXUL^av{BH3nN=*HLb2hI5d)z&
zouo+yD`$@q1Zv~n-4)CInA@3LLx`wKg`I;jWILI1Rw0~!LqR%EFpD7W>8*}?-yrx*
z8X3F<!z4dBCW)rU!M3Jc<j;TupGGr=#tG3Ej(}@0j1%_!-|I>IA5ftOO&yQei0EXa
zYJ<^x_l38Lg__Zu17o_>a=g-ep70*Gy!{mE9JLv)0YJFvIYF6BLv^$5cW7lM16O|U
zG@|-<T+G98F?1_8p-=S!Gaz^5?BDB`!-4t;7TsJbL2Tooe}4?3K}{{8f|M+xajKmS
zs&ung%9l@PpN<)Y*goG)M}<!@y&0@g(Ue3#{`g}Pm8>Iu?5#g_)UU2Nq`1ES4U-dm
z!~K>3!-_M7kKv{UFBhjD>#+ibB2qR>D8t}8%c&=9M)hF%m6mTVWZ1ZxfYZRlacB<x
z_Kdt@Ze3Sd4ke~RgEyvbYNLR37G&oJYJ32&uQNe5zz0*|c=eE>#l0~gCA6kjWOIR3
zn8<u+Rc-wi95=)bmt2wI`S_o*arTR^U;*Fh0gVyc!Azg0!k!0O_iN|VH11rsQo_<v
z3RW10WyuqFY!43@*oRIS#bu}I0`jQhgyF2q_&fE(kXSM84L8Xw{+nf;r+JY!7nO(-
zgcXGX7BYvtv=;+^V;Qa0sm1@#WOjd;-|qawqb^>+JeV|%-$YQlh%gXfUQ9J>>Jzoo
z*`9;zGKPOeBBGu~$!wn)LMzep&lEZHUDY>Pv6YUw^8T;%|Cj#%@_AhU|0MGNo%z4v
z{;#g@9~Aum4+aC7|7V^2|0x}?460G&TMo@io9F^;r=6_wPH@(>@5P|i(5?TpmeRwY
zsq6hs=){<smNlhAO>P3@^cXZ0e=>QR?{Ze})m}56<Ak3~P|a=&3(7GJThPAh_5>Jg
z)2j&VqCru0<J#o82Lr+^s%EUDrFhR;VUr{F9qyfSL%}6pH>V>~LqV6xAf^5%^*^cq
z$!A0Lza-1k{pf%D`o3P&{|*OzssF8={zvIx<>6nlt`nt=T|T2d8(eG3Zhe*<BE>7A
zS5wEeX;LvfzKZhqMdQ%)BHRNjUM5|iJDIH;X3n5#>(A^$T;+jD38EDjErn8JY(}W$
z)vKOoLO2b&&x??c@tz_m=9zvOk$ApZS#@$j6-wy4Y5zK#wcx_nDryKNlYB^!($JBP
z>FH2sKCFV!@xy3IXo<r>fTcVm*ey$I<_`y3rSl6+j9ofRV0eRh=u;OR2Bl8Tomdg1
ztR~H~xC~I~jd3ObY@ZfE-=p<7+iTfcZyzj1uMZaUfSh%fW2WpZkRb)W5>sz2#4Gd8
zvYjD~z{N2&kIUvdY4cM^_C0<a=*~dEuahfgd+g_1|K}iC!e;Z(1h^qVtmN@5G~(ni
zgcZ8Got@rN@~{idl&P0oGIwX+ZSyZA-dYq%R9an;I7XM;fMGxO6Jnv;4ItwP?agHJ
z!M7|}(~=E`Ie5SF<MCXi!xtFddzp*AXJ>MN3G%g}fR-_#9*}$A0+dUjONA>PUAi8o
z4!73W_u)VE<`LjVqb&TY0hib*hz0*!4xz77th_4maeA;%Ia!eFiJx^Q`AQuXQ1_}9
zcdr(}%R;^AjlC?2-dDo8ebohqSM$;Q*`JVt+QSz}Dw3@+Kogifgc^>e(1xwnYwWUs
z)poKu!|0kty?fAQ`bkRl26FLyGq9GR7WgDIe9n}Trwvu9@z>m`F#`+cv4EsPiNS`~
zco*VrYDA^80UX7?Pp>4>8LS6aUy6<?mE5)LN{fCg1;}-2Xq<&rW_pz~2Rl0a@A7JK
zHI^1a(VB5j15)Mzw<Kta&_R{1H3ehk2c%#6xo1wj2!jQXvj(_W5#828WMobzW^0nA
zwI7gX+4r6~(NZ8Rf}eFj2O72grEt>fKJW7QxJZV{jSXsLqCG;@#ngd3qJgscevv!t
z%w;4{UEA{!R(?P>qMv)_T!b)K06A-b3mPF(9jJS&Qe5Uxu<MUdqiRjWy#52yaq`7y
zPRbk%i(zR!kn+$p4V?gd+EO&FE#>lq5;KMQBIvm{Y>3^j85!RYfU(<tbA;D_KvGV=
z_{^!9gJCf&tp`$8O4AJ>GQ%q;4$W{j4@lTV>7>=yDT?bdP-y^3h~d1hTr-Ya5e{XJ
ztj#9E1tXQu-<+6x0xtlC`Z29v45a(nXXNu7*b9S3IIl+GM&nJ}&ZK>m*TF-?kyw+x
z0u1+44F1GoJ(2V_MI;34)z9eBbZ2fL*>X^M0olbu!}BNRt(`=Et4H~JXvmf-h570w
zN7g)`I6Y!x;RiN|QK|xCw}BRt{z_m~kwI<~l+L<<;z|?n80Z!R+Y_TNKAV$@p#L@S
zT7uMS=uGM2RwRea&QqYeA~aiqv-zZEF_7LnI2Sn1Sv@r8EH$YMdWo3R5)hfpI%Hy^
zHa!gbTx1^YiNG|u+r<AnE6d4N3_sftWutX$n-oUfSxXmj0Bm}vvUJ;kMy{b@e_7^9
z3g}Mq$x4K>ppveCvk*2@b2kfO_#_1QYH)em3YWjnkJddGZ>W?nj<^X1le`<|W{u(F
z9*q|t5XrMuA-Ms){l#$0VtM|q(P*AulQY#PAY#`Jt<>BjmQS6LIX*Fb>hFSy7c-L(
z{y6_jl1m0zeCX^VisNOs*gjqT*(vS;WmM5-?fsI{L^QQ55=FzQyIg{hCLV`cs=l_Q
zP=(S;fVSCA`;up}yX~gk6{f$YUsuE=ej5UA3_I2u&`Dk{8msJC)32&jMQ)oz&<nvj
zYep|vJUZw%fy@`cN&<iu=-^eWwvrx(p6}cHaCOqeYY9^3{JSO)UBCh2o*|mtskp%l
z6DAG6L-58FO(D!Q4F&#5^6};~@KYB*z8}yA4YWSnXQ8(x#M@Dijg<x574>%RifgB`
zzOiiwcMX6Ogb6XTgeHg<Z+B<7N)EZd>(ufgt-Kr=FaStGC@n3V|JY|(R8<-ONydMY
z@t<V;CmH`KiU0JG#3sF8c$XNUjq#uK{{CQJFUJ2oI?`qQr!|WIl+wY<!#|}xC()mn
zhX~$mr2&tG08Z1urg4yxqTJiSB8KIY;gG91?0m+!G9xE49M5(v^tb7SWa3{FTUGu9
z#491O9lTx~Wzj-~NLTZpUNSPzx#pj`P6PJv5d@LLeh-~E`HUH0Haa6DqmV?nv&mcd
zVQ9F%Z(SSoZZ&%wSZ1aQnsHitWKN;2=@~RZS`-;%G6j1+X+~fqMOg76(Kq5JSl!}V
za~k``wR7zZ9ZUGG<p(p`F>m1q3H%HrA})Xv6|a!+9fdToJ39S%{=`WEWMHa*DaYN!
zpO7?jX+9!XF1pr31!!GeM6Nbn6knuaViWGj-RS0TF*q)m4^s<puHa!MjNZTTavnLH
z02)p?l0F0;Pfn8=MAq!ea5_xE0w}`<R;+>`sqlkM%M8zN>E2idl7+I3@kfoP&0|As
zGJ)S!U|ZHWo=l3s?Y6t_m(K^2`989EXQ{})A|UEm>?oRL*NL@GtuwPQhL)(?6!ZX?
zPc!1WpqZVq>DRtCxK!cq_m^oGbqHyqd9<0AB74PS-a;Ej?stM`e#+2nFdxFBfV2lJ
zRNf#!3hRG4Q`~9@cMfnt?#@fsT1mTNI?fH2-Ayo71qeU1jK_2PoZD#<Vv2@yWLuxK
zI6J3jVF5Ko^zhCO`AubT24+QQ&S>^jJn8Sg;?-2;6>GfHDWovxAk(U~&#D}JLIqF<
z&ToSjiOq#JsXjox{YK9~1*n2tI~qyhhvB1aTZ}Humww(iF8)IAo%#%ldF(a1O*dkX
zZyRq4`jXgPpGiCoQo$g=vu4XYS4k_cZB?~OTkJ4;09O}kp#cX_+sS$dgB!<O_)dDc
z1#1}T4m>7~fH%yJ@~{3BGDwIwGTBrU^gSZ|3cDdJ(CMg(q`1=&7lsz`Ob}Jyb(&Jn
zi&VTE4NC&vFaoY4#QmKbZ}Mbi%WqZ{)5i!!^;GmGFMEMtH_=8a&bDDofNsIPQr?*n
z#+*rHK0TGE`4sycLMbGnK?!_0z~J-F5jRh2Dpf$R5sC{!av^9oLX<-lXP(#CRptLW
z|Lh#&yM6lZ;PUd{CHfXYM;XL*D@^k_Fa%0Wnm#XZs_DGBS5`B8uSt02M`Tg(@@CHs
zw0zoikZq!4%Ulx1)Y*?IDVF&}01k_w5fWR-<qC7_EPxX=#S8rFunlf<*r?i8c^*Q!
zao6Pzg1cg=ES_E!qxz=B4Mz*?iWJo8f$<+Yx0|9=WO^hH2e%%&Ie<2A!Q?iQ7THOB
ziwB7B`B5cRJz*sfUw%b<MBlk&|JC|Cm-*vI)~Ik8+2tR~1})%D>|IU!nWK6Fb%-aI
zxuo97>w2pAHLWAuT4bVR<LM)z43>sIUjf-J2iT1yA0Qf5Z80K(1_EuQyLTK;Dt5b5
zks>~6VfamXFf)oeR(v;-RP>CgNL8SQpqT(Ybmmc31gMKhYx<%p9cv>=O_ExU)Ko?o
zPCUzNvemR_hITuugNjc~cd8~hv<xKCEkbla8J&4Fwvd2>`cFx~NvgXqs#{Fd4Ndg3
zZ7J?}Wx2Xal1oXB(?q8Yl`SOjcpItJNxEyIyT!!aRg+tiNPSRIlA9#Al-z(GI`b%N
zA@K(FrD&71R!3`fVy%Gyt(IGHP;tY_txwGdo&IH}#2)>doTA|0L;ClK8yfichdD&r
z2XjMLnVaUG%FPcv2!sD0q3foKePrA9+&2Y!*?UZ~7QAFOg}$vhdw)=2F<ELoq0PT#
ziCg^&h+hSXtGcF3dA%S#*Q=rRWbQ(Pb_9+!C0R?deo(STqHC7ZSYshUjSZxvmL&bC
zB)zz59gXak+Xh%u+H#WOQi?-@XcnSaV<91m4dm8=B)3{}YZh<~E*F!arL)B(kW?qB
zF7vRM1aq8uCc$FfJd;35I7w@Dw6?5(yS{SU(H~TlL??+ZB|4yoW&sBk77}n!UkW%$
zYjw1?xQR;IiZ&uen<O`Z+{ir9MivxpM2a>^YxT6&Mt3V-Z=zS5D&k3Ik=(CII8$85
z`<CS%HLZAR(nLW3TyLalT2&w#?6?>tQs9Fk<mTL|izukf&?$s~kQeM4$$Tz}R^qKT
zT&I!?xlpQP49U&h@M>>(J0)wUV(9)TAQr~m<TvBgG#y+rD;iehiA%lj?@#I_!8pz_
ztP64xvT`+3VQ5V;&;|v`u;Mzd=K41@Bg}XFUT$_bIib7&sjgaBR?RLq{X2xZsi4jZ
zN!o84;0t5?Vm6BcNhZ;LF^ny-Vi1w8@As^0g~pWC=#}g=&?!RIj1`Os8RNKV#=N8_
z_$|;F2-<0c7;?m{ea*~wXO4R!XNE0K|L#|{*eFI@m{r;lV{M#{#g~C3P68W=BvMJF
z4MeIo*z{Fo+IY*ll0-NEy7dyy6?xVdpG|YS*fhFFqpW=Cc1hV?9rmOYf?o`JQYgf*
z;d6E@PDNcPQH7op%QP~J^i*2uR*BD`cDLO*{Az*DyV&v9j^z$%h{Db#?F`-^^^=Rp
z$=<LnlTH)f%MTw}^+Hu*y2$ZHE}1<qo||VXORB$HaFF$<`ehaxh2|b%O=0fQ)Quy#
z_{ha){T81l&Z*wWe{_zm)E{e-H3^(uuF+s~1%W$LE>7QU3e#3`PgTT6QKc_|BLj0I
z*jntWB;d<OJbhkZRBLXlR{_)vtw!g}44`x~8uQsg8`jkrcz*1=_S1!NX{yjxZ%4@U
zVec&rd5?C3&7i)wISc#ZfG^IpJ=|-KVjEKDiDCKcYRwIK`nQX7WrHH9*H5$<NLGxN
z)CN+9HyyQs7RxF%9gRr*wBe@0Bn|k=<OH>|mO#Wrj8Rj9vwW~)3QXPxOoAs4P@L#z
zI?)rp(6xdo2=n*$Z7WWu@I>M7*tbs{m<%c>dXtJ<Oe`vZJ6}`}T-<MF9y7!)Md{uM
z;KCAPc+iSA>HM>k<uh9>{x&oy0F6~E2-tibgU#p4_v2_`ATv=*J5Xp3P$k$P$fsi#
zEXdqf9vwSlXEeSuhOiS7W*F)@akxpz={zGt6b?O?l;?5r)0mdA1}D8ImY;D64CJ--
z1Qk2&aG}(rqbj*ZD;7yJYHS2XnzClxfh9`0G+LmeMc#*8GVsLv6;r1xEJZJd?e~Y&
zCo9eAUSn*PlZz+)AvILwBv*z`$kKLKd1IE7l4m{*#j-i-9p)f_Nno>{*i7-S{>-w%
zZ$=WV^3)qox$9E6=Z+xCm72~Uf>|MrGiB00r|{*-pAaRA&CLR7WnMA5?OX+z25=h&
zv&1lXR!B&Wvd=vjCDG{a#+3V=PXJtWLr1_yRfQ&O(8>z0NU0p;CF!O9mm7qLg>`~z
zSj`1z^GY-yq)%{FT6*nS76pyEStSaf#YR$fqlR_|U++4|w@7Qs!D^YX0($NYvoG_`
zZ0g*N)K15m!aGaJTXo@u-J}t=3SRMb-~2{Ab~a6@_(QsIhKfNoAj6=eF#tu5XBdh9
zn7r8aEOP?Akk5TS+OD?y(}j8acVgNkyq|CM*;Du5et&S7{Ttuw{eiysLb-hi08BBw
zH+=q?;(stuCb2UiC;CzU2OYrv!}nTWGTpOzuFM&UK&Cr(=->8W0nqxpsC7iUY^#P@
zC&BEl{C{|OSk3=>|6u>{;Dx?_aCmg6A01#l{a_!8mA>Tvdw*Ujz1Ka^@@&T&9V^p#
z*!v#eyxgKXG%9ovyVK`_?#PW*OwXL7o1`LM3N)`x$2G59l3oWKqC#^6{ihp65m)KP
z#C7342@TZDOwS_lvZwGvHrF2o_ORfBm6k@+E89m6u~f<i$v=VEO~|<13Ef!arzzr_
zi2Q2`s<4y1EgpI0YJycCIU{}-cN#mc@tYZf{zL4uV!>O4nqzuzEWD;rTPzlYQ_rA<
z2t1w)cM791J&%|Oy>v9TKO7o2#2JkN75RBuW8aU6;gQ=!Nz6=!!1ZI$bA@5Naf9)v
zAQneKVN}2d^iDlf+{kA;gXmf3*d(KuA9-Wyr#Cb$Ja>Kj@m%QR7eB_kowj0*5S8#h
zx}!*2^TvUX$=V%1ynulg=7qf(r>1MMgUv!AB~NNCl`+q~YCjUwGd{<5Iw7<~DCs+s
zzz0)lV#ee2hc}%BGc>%sAbl5@o^7%lt2Q-b*z0Hp<A*<BNW^2!3<BcW-R&r}cDF&P
zeU&t(79KXE#!!x_wA9FAR$`<%9yK6%TWSNfYe;SbPNZq;^V@*oH)_{K@kP>4Y6?|$
z6U;DH0{>6a<YM*#e!cU<8#A=oc0NGw;zP^1a1q3(!Oh3TTb5;y$|a7P$(Dix5f^_J
zNU%7ZTHIeqJlUWX>NhanqTQBslVrbTXkE9R3%VI}@gdvxtg(B6wrpx!W)vlqvZ;0_
z6BT<@dF`F59Z*qH=YT2}NYG}bY^o>qFwSaec^FobwU%EDlQSB+)c_q^-}uQ->?^iU
z#q^ILG3gObD!hkN*X_CRnRpDtNH#4hcDwV^H@_{IFKgZnM2$n(4a~M;F)~m{h9o4O
zwP}0Z$u$RhEkW3CJt%H<Hxo#S&U{;&SVjt!ubm0NxN}Jd2i?}yiefvd)E6p!qWd&j
z#hK-uD3f*%;{quh<M>G>#S}I)v4Qdz1qP_Im{8F4FZippJDCt*rW@pmFMHoWcwFHi
z3a!nK3^(eM+5Hk4tF2Ms2z!#O=gW#ZY!@6fU92XI;_}~*Ms6Y<lt_(!8~~q?+$&CV
z+51M?%PXyU`pcKKp+RV|p=8Ght(y4u7b8JVP9x%x5LLeOv+h7my~3$us5&uk4W7X$
zb#Sbbk@rYx{#9gvyf70o&t;)KxZL6N4zrt>gEPm9t19+uDh9T827SLnq;#_q74%hi
zI`PuE<cC?LxMYZ`(D_ahCjiVfa*WshnV$xlHJE9GMg($zH_-M}Of(3u2$-oDA_&OF
z4`K(s4Z#X^z{E$l5gsfpJJ_h_0VA9=-g^`oft{Pt&{)c^A3saRkc@G!jG-z>oXJ@F
zn*Q&{Wfj-?wgjgLS`2hRTxab2EFUiJz4Ri_45G0gOJ<VHv^X;{kRc%%2_f-&yy96f
zpAAz7Ga_Ut*YwL}m#ju^8QAy7EUGfF+4jSlu}#a5>FqzJ-;qbvluuQ5v0ta}epoMl
z^$Z|0;o{+1u<!&2MZ#-Jk2r>;-E-pb)V?Zj>E_tKTx>WnV6Tl3_<Pb;N!s?HWuf{7
zoeQ7If8mdm5AYCxeXKl+?uGhaJLNmV6~pZ>In;jTKvi8G<OZwu(-+*>tr;x(X94}R
z8M40Qko8tW205YjTMt>!hFr4A`T15cSVQu@eik1DQoh1NKMa(XT)m_7DKuDf?P>8n
z#d<Nw{?3fCiC5lnv=ui=baatR<WOw$isgF!gREYYc+?kHaAH3;U4vDR7MFl4lKCW0
ziro}$Y62Hk$pm3;%;}wu3eufpKhLLMru|?ql9DAQ-!moGTGX|YJLeye*Y|Tp^(;Ug
z3(~r2;l`8rs?^^UGzI78TqLf`WsW5a7)pPsK$SFD6A(dJGs?GWG~@E9r7lSW7Fs-7
zLp-mCZT7fCrE;=Tb#4_7RCdpF<>Up_^=FaBQt856L2}}Ry=tS+Ld{;HOlIZ0eL=#j
zx$6Lf;tQy;Q!DxkepT6_y_a{Z!cE5?bLa3m50}i-B6&MEEJj*0V%IpX{HbRb9WAo%
zzIK7`c2xQoAU>$~K(LA6#g-dfvCzQb)erOwudXB>aXjvU5?Nz1Ap-Y5DK(}Ye<UzT
z&pV62?IK83g$a4R`NADHym(7I{zUVTZiyxDbox#&N%9g-$-kziyC_mSv{gr9wiRYM
zwC;pl?Vtj^@U{hlgUBXUxHyV~$Z>tIhmDDeA%LG6zWKshzRFc7#>F)&-OH8!Ue?gE
z><afsP-Wbx>4+94E?D;~iK{=KxRUtq3sYCpP}0=8&{T?%J5bUc#8mQmC)V#wOw{T?
zy)aaq7?ox`JL;l}MsG)8tc#tsBe?sZtNr<Ol?0W^)%<VKD@=^@yf3(j%{ghxh$!jw
zP86IN7<^|VB-g59uuHIos4IN>C;6QsA$&DPy3F_5k{b#WDXY^}w?4%99;M<2wV&3F
z6faHp1@;AcC77;@cWgY{3~h``8pF#-L2$c1BXv~);}Glsnvl^Vy`e)l=v7VAcGm1{
zbkm5QS7pE^Jch$bhrzp3t^>nP>U<V)k*}KGHma_5M}Di=Kd1z(suVUA6meGpU<g0E
zJS7ag*;xfU*Ti7D1(V$Lvel}JadOMhtrm^VfB<?=PMAY-@}|xZDtBqvra>77iPd$R
zqAAugCteimr5DALY`FptDoN)j*>SDp#?^9T9tn~wSH+dd)nrb=IhTX`;@H+q>-i7D
zwlKqYz(taCCFgE5=L$spIE$_8`;2jyRDnNy`qVr9t<0*DG4F#h8MGxsZnlDZ&9Yi8
z#dFZs_k2f{+;;!m#$helZL`_UHDioda)(8Z_Rt#xPlVR=ib&>r0Ol*eTr%OhGhr((
zG~Hl)wkj^f&de(kJC@QtlUu5f&xRR7YI*Ri_bG@a@743(wTX^lMZn!JNB(>t9Qm{4
z$c^C0pCv~=7)Q2c$T#=FkZ&YIZUjTVkqo(BOXP26_~ygMRk0-W#)us_^20~TlPlrL
z)N_P;9-Sn<v8&@;z*`y{w4QfAe3YEKR-AkO{%`N@fpxK4$-7UAcM+wMdGDQhTk`FP
zPoM6AZK2oG-z48YDZZsRm5jSK2JG8UXCL3Mig#&b_>=~e?#K6%dpCu9^SG<!-;eJ*
z&(;;F5XGn|<k72o24^7zPT18&&#-`fF;o|cP7YMe-+t0L`}sq1*Y`nUBejr9e9&TF
zje@GR@Oks$^uDAIbS=ft6B9pxQz?I>{Fx_zS_&VS=zR$v=voS&CnkK5FQo91!lz02
zEF^v|F78eITwF-;^VG!8#f20<Qv9q;{Jgn8@$*KCpQmSfN%14a&kDtl+;*12M{YYy
z+4JzS=gryMkLN2DK5wj4_(0cE`fQZ+Ns(78eE_FY{5&=B!^c|4n6h%A)Jh($dLvqp
zM{*<j>B%Fx5iRA>vht{<L~^|0xfh3iFuO0olzl<Urza+#GSo^jCB@XTwj3C9&QLKZ
zj12?*8asx8f%*-jb1Wr=3?U|i|5ZBu;$IXPFP_e$?P|L}U6{9jC$PkY_w$WDd+Pq%
z?+*^Mf8%?-KhXDHD7Oy*fGH-hg3rHrp3h*QOk!t3PV}Sx4?2MThwrui)(iPOljjQc
zPMVZ8^ly8*2HMOnnlKUlYXrVKvp~;^wP1E9{y#iCtmFT^y}kVx`u@S;(V>2Hfc5l4
z{ovq*(wF>y@6Rix_qqoPsSVo1u`-Q^z3=hO%Pnd%(rEISr2zCv$Dc-u<>Hzd(M1yV
z5vGI(p?~ez#HN`JD7l1F3nRhPwn#&sKXE|g(^!|#J|v2HZ91-b<+20`G#7&Jy8aEe
zOZ(>PsEM5zS=YYDeBpb_Evqnc$*tqr<W><MR%V5M6dBOavp~H;9hgqenFKzp{?ROY
zxdnefnTW@l83e?$yW3G{?QZXs3p$#%i)CmT@D73*vP$~Xn@)lms1~4H;-^@8ERoB!
zXc~t82sJI5A|TQ#X2&}#m$d6K0|)OYeW6x>BTXDo^*Lc%qg-s*uy%xDQ&Kb!GhL)@
zSdNUF(8oB(&P4HtN|vuWZN)QUf=bC^*=tkJLA4VTh^gH@RR^(=sF$=uh6AYW0CjfX
z#Bu0cO=EHnU<r2+8RXUi+u1)(<2YB$wafBR?&Y;ZDDLvsn7w*RXfELnT6Rh__NT6`
zTmdmF`k6v)iOw95HNYwb5;me-VP2dr0vZaJI*t*hHwUL=)L(Rz^uN$BBt+@2ddU59
z4#<jOdt?LySZd(V)WrN6$qafukVj6W-1y<Q=*J&dJ)hp7oU<ck(9%@XlDS#=CO(t1
zXh}gWF9sH~iutFr?)v3X6lFv4zEQ5YyHlGOhgAS`9|i!+#kFH99|8rp0t!`M6}lAm
zRcNyB9aR~qN|HnpKZevhZ})=LliHoM|D^sW?LYY}Z~x6)$NP2{<8QA0ryuBtMf>ky
ze^1(f>tz33%sv1eyz|2wGqmacGBX8Xg)f*Q3tM=Z77r^pC{!b&ye{M~<|&Lt%Z?g;
z<x%nVAJie+5YCC_^_ZHGwBf`uLNbJ6&zd)ud_#X42h&)2O{@DJD49NV!75AxJY6yz
zJW(-N2J-5Ppb3G_IGLb{U0Y$u=1i@<>@EFiB%9*wy%EKDAQ3qe%uP+r^~zrV*9mC;
zXD3W5z)aILc5L!5w?5GsqV2;Woocs$VC-=*>pHQZF@IcyW?EpE%<4*mH<e5HpTx$k
zAX4b^;zRo$L@LY~#=<mn*3(6UchTFa1GZtspztd|Dhj2AV$y7OPLlgL>q09IE**qe
zV0b>$pKFKp8``imL%8WNi?vW;S%X>}Mi+E<wVfgT1;oM#Y%fotFQZDMT2<idRHned
zJN2_4*pEH&uk;jgaC!MS0BGEd$sho{&FLstAxWc}n~=_?f@9@*dg?wN!7ppfa>3V+
z;xnxU0%iiEKq)sdZ|7rzj=en=V9@&uz#kSR64Jp;K}3Z-pS@!H6k2H911_lSU~p4$
zHJiZ<M&lR}74bRMV`d&!7Q{%OV<bEFEDMa{DhM@{FQ3jn9V;+Y>l;kLWUD(p?8Jne
zxE*(cZ=Hy)XmkxQab^*|$Jg+NrLA>IBuusVEs#TA!<>J`&cy|~r*td;?^P$2j;1%m
zIT!Ov1a9syxq?#8Z!HpFr!|BUxFyJR+Yrk3g-gtcB)i=d4B`~%wh%63bk_ymnBk1>
zeB>Nf%tWFnuwn+NLSa8vbd6{F&^auVO6)5i;ZxI)Oo$i1!}n>k?b4=9c`{2tovx5L
zf@W|7>?SZWgN_w=hnWg|ur}%i8cd~=R;~kd6XhTr5rRuo50|E51wc#1R#<s*RlY?u
zU+=%~Jf$IZ1A6>9<-C%F3D&tKEjeW>EEg97wAmvG0SmeNBpJg~5(x|&OJQOO!N9&&
zEgp>DuxyTys6<4Y87?O}1e#kBo23wFB|s{t+iI>$$dr4W>?$Nn4J?|VF|p=AU}AwX
zGhBp{=>|Y?kvoNVkpaZ)eU95wFzc!9JX(@(em*U*mD|ujqQpsX?5G`>O?w&@CNzI8
zZOBucOxk88^j9~@JUP-@szNKYA<QC1n_$pl`Qy{4><FgD{Izb8u~i!cbPK0~6zTis
z;YJGE4GO0qHh4cxgMF2bmUqryCuRscwhYV=<^rS~;%=i)4PV?egn!0t1Ja4=rc|-5
zmYZ5ddHodsK`Td!;FJvJ&U}%P%;$>4&`1H(5t_zmP2MmQ3+*O=9grHGpzYUUDBJwY
zV<>yyfT1bSbh34b{|_n3LK02g(mqqo6y7<|F6je?ikL<`jA5ZjEW!*R<p~Jeh{EuN
zxAnvSrxQd<6cEc9I>c7eUgmIv*f$f^|HF1EBjYpq*M%PeN*q5t^(TN<C-Nz`|Fg4G
zwZ&H6EEOhEDr?x{z^X8rv~D{jG*LxyZ`T<reg4@TmE`M{hM4<&N;HhC0k!$P<5K0q
zcsT}e2Q_u~XG8AfD&R>ymc1qB<cGj_yg15NVC>1)+iiwKIwLx;|M~nGTN-C%=n|?%
zV}tFA9%3>HToftodWl)XZ4vsszi3QcH{FM?>TWU<rlYhNBBM%;OS?p`5WA$Jvd~Yb
zs*lVgKC7sFom?jF@%h?RCr_Jfmxl_{c{|^-f|Bs^gRb_q!GXGm=XT^j4^^G)r<Wz9
iUxs|-BOm$5M?UhAk9_1KANj2F^Zx-om3qMd@CpFZkD}KA

literal 0
HcmV?d00001

diff --git a/src/bayesvalidrox.egg-info/PKG-INFO b/src/bayesvalidrox.egg-info/PKG-INFO
index dc88c8335..279feebe3 100644
--- a/src/bayesvalidrox.egg-info/PKG-INFO
+++ b/src/bayesvalidrox.egg-info/PKG-INFO
@@ -1,10 +1,10 @@
 Metadata-Version: 2.1
 Name: bayesvalidrox
-Version: 0.0.5
+Version: 1.0.0
 Summary: An open-source, object-oriented Python package for surrogate-assisted Bayesain Validation of computational models.
 Home-page: https://git.iws.uni-stuttgart.de/inversemodeling/bayesian-validation
-Author: Farid Mohammadi
-Author-email: farid.mohammadi@iws.uni-stuttgart.de
+Author: Farid Mohammadi, Rebecca Kohlhaas
+Author-email: farid.mohammadi@iws.uni-stuttgart.de, rebecca.kohlhaas@iws.uni-stuttgart.de
 Classifier: Programming Language :: Python :: 3
 Classifier: License :: OSI Approved :: MIT License
 Classifier: Operating System :: OS Independent
-- 
GitLab