From fb6e2cff7acbab28c7b23f26de63d0b2d15adc26 Mon Sep 17 00:00:00 2001 From: Cheng Zhang Date: Sat, 28 Feb 2026 13:51:54 +0000 Subject: [PATCH 1/7] refactor --- experiments/llm-bitflip/pretrain/run.py | 9 ++- experiments/llm-bitflip/transform/minimal.py | 2 +- .../llm-optical-transformer/pretrain/run.py | 4 +- requirements.txt | 2 +- .../bitflip/fine_tune/__init__.py | 0 .../bitflip/pretrain/__init__.py | 0 .../bitflip/{ => pretrain}/arg_manager.py | 2 +- .../bitflip/{ => pretrain}/pretrainer.py | 8 +-- .../bitflip/{ => pretrain}/profiler.py | 0 .../bitflip/pretrain/transform.py | 67 +++++++++++++++++++ src/aixsim_models/bitflip/transform.py | 53 --------------- .../pretrain/arg_manager.py | 6 +- .../pretrain/pretrainer.py | 8 +-- .../optical_transformer/pretrain/transform.py | 6 +- 14 files changed, 90 insertions(+), 77 deletions(-) create mode 100644 src/aixsim_models/bitflip/fine_tune/__init__.py create mode 100644 src/aixsim_models/bitflip/pretrain/__init__.py rename src/aixsim_models/bitflip/{ => pretrain}/arg_manager.py (98%) rename src/aixsim_models/bitflip/{ => pretrain}/pretrainer.py (97%) rename src/aixsim_models/bitflip/{ => pretrain}/profiler.py (100%) create mode 100644 src/aixsim_models/bitflip/pretrain/transform.py delete mode 100644 src/aixsim_models/bitflip/transform.py diff --git a/experiments/llm-bitflip/pretrain/run.py b/experiments/llm-bitflip/pretrain/run.py index d72e4a4..e299d52 100644 --- a/experiments/llm-bitflip/pretrain/run.py +++ b/experiments/llm-bitflip/pretrain/run.py @@ -8,16 +8,15 @@ import math from pathlib import Path -import torch from aixsim_models.llm.profiler import profile_num_params from aixsim_models.llm import register_model_configs, register_pretrain_dataset from aixsim_models.utils.logging import set_logging_verbosity from aixsim_models.llm.evaluator import pt_evaluate_ppl, hf_check_ppl, hf_lm_eval from aixsim_models.llm.utils import convert_torch_to_hf -from aixsim_models.bitflip.pretrainer import pretrain -from aixsim_models.bitflip.arg_manager import ArgRandomBitFlipTransform -from aixsim_models.bitflip.arg_manager import ( +from aixsim_models.bitflip.pretrain.pretrainer import pretrain +from aixsim_models.bitflip.pretrain.arg_manager import ArgRandomBitFlipTransform +from aixsim_models.bitflip.pretrain.arg_manager import ( ArgJob, ArgProfiling, ArgMetrics, @@ -32,7 +31,7 @@ ArgMemoryEstimation, PreTrainArgs, ) -from aixsim_models.bitflip.profiler import profile_stats_hf +from aixsim_models.bitflip.pretrain.profiler import profile_stats_hf register_model_configs() register_pretrain_dataset() diff --git a/experiments/llm-bitflip/transform/minimal.py b/experiments/llm-bitflip/transform/minimal.py index 0f6aa17..aeac101 100644 --- a/experiments/llm-bitflip/transform/minimal.py +++ b/experiments/llm-bitflip/transform/minimal.py @@ -13,7 +13,7 @@ from jsonargparse import CLI from aixsim_models.llm.evaluator import hf_lm_eval, hf_generate -from aixsim_models.bitflip.transform import transform_model, TransformConfigManager +from aixsim_models.bitflip.pretrain.transform import transform_model, TransformConfigManager DEFAULT_DTYPE = "float16" DEFAULT_TASKS = ["wikitext"] diff --git a/experiments/llm-optical-transformer/pretrain/run.py b/experiments/llm-optical-transformer/pretrain/run.py index 9c66783..ad53b27 100644 --- a/experiments/llm-optical-transformer/pretrain/run.py +++ b/experiments/llm-optical-transformer/pretrain/run.py @@ -15,8 +15,8 @@ from aixsim_models.llm.evaluator import pt_evaluate_ppl, hf_check_ppl, hf_lm_eval from aixsim_models.llm.utils import convert_torch_to_hf, convert_hf_to_torch -from aixsim_models.optical_compute.optical_transformer.pretrainer import pretrain -from aixsim_models.optical_compute.optical_transformer.arg_manager import ( +from aixsim_models.optical_compute.optical_transformer.pretrain.pretrainer import pretrain +from aixsim_models.optical_compute.optical_transformer.pretrain.arg_manager import ( ArgJob, ArgProfiling, ArgMetrics, diff --git a/requirements.txt b/requirements.txt index 7a2ece5..5e2ebc8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,4 +13,4 @@ colorlog torchdata tensorboard lm-eval==0.4.7 -mase-tools @ git+https://github.com/DeepWok/mase@cz/mase-triton-bitflip \ No newline at end of file +mase-triton \ No newline at end of file diff --git a/src/aixsim_models/bitflip/fine_tune/__init__.py b/src/aixsim_models/bitflip/fine_tune/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aixsim_models/bitflip/pretrain/__init__.py b/src/aixsim_models/bitflip/pretrain/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aixsim_models/bitflip/arg_manager.py b/src/aixsim_models/bitflip/pretrain/arg_manager.py similarity index 98% rename from src/aixsim_models/bitflip/arg_manager.py rename to src/aixsim_models/bitflip/pretrain/arg_manager.py index 3551f34..7059279 100644 --- a/src/aixsim_models/bitflip/arg_manager.py +++ b/src/aixsim_models/bitflip/pretrain/arg_manager.py @@ -2,7 +2,7 @@ from typing import Literal -from ..llm.arg_manager import ( +from ...llm.arg_manager import ( ArgJob, ArgProfiling, ArgMetrics, diff --git a/src/aixsim_models/bitflip/pretrainer.py b/src/aixsim_models/bitflip/pretrain/pretrainer.py similarity index 97% rename from src/aixsim_models/bitflip/pretrainer.py rename to src/aixsim_models/bitflip/pretrain/pretrainer.py index 09fd7b5..ae204d1 100644 --- a/src/aixsim_models/bitflip/pretrainer.py +++ b/src/aixsim_models/bitflip/pretrain/pretrainer.py @@ -22,10 +22,10 @@ ) from torchtitan.utils import device_module, device_type -from ..llm.tokenizer import build_tokenizer -from ..llm.pretrainer import train_loop, build_meta_model, count_params -from ..utils.torch_module import TransformConfigManager -from ..utils.wandb_utils import wandb_update_config, wandb_extract_and_update_tags +from ...llm.tokenizer import build_tokenizer +from ...llm.pretrainer import train_loop, build_meta_model, count_params +from ...utils.torch_module import TransformConfigManager +from ...utils.wandb_utils import wandb_update_config, wandb_extract_and_update_tags from .transform import transform_model, make_transform_histogram from .arg_manager import ArgRandomBitFlipTransform from .arg_manager import ( diff --git a/src/aixsim_models/bitflip/profiler.py b/src/aixsim_models/bitflip/pretrain/profiler.py similarity index 100% rename from src/aixsim_models/bitflip/profiler.py rename to src/aixsim_models/bitflip/pretrain/profiler.py diff --git a/src/aixsim_models/bitflip/pretrain/transform.py b/src/aixsim_models/bitflip/pretrain/transform.py new file mode 100644 index 0000000..b9a7f26 --- /dev/null +++ b/src/aixsim_models/bitflip/pretrain/transform.py @@ -0,0 +1,67 @@ +from typing import Literal, Optional +import logging + +import torch +from ...utils.torch_module import TransformConfigManager +from ...utils.deps import all_packages_are_available +from ...utils.torch_module import set_layer_by_name + +logger = logging.getLogger(__name__) + + +if not all_packages_are_available(("mase_triton",)): + + def transform_model(*args, **kwargs): + raise ImportError("mase-triton or chop not installed. Please install mase-triton to use this feature.") + + def make_transform_histogram(*args, **kwargs): + raise ImportError("mase-triton or chop not installed. Please install mase-triton to use this feature.") + +else: + + from mase_triton.random_bitflip.layers import RandomBitFlipDropout, RandomBitFlipLinear + + + def flip_bits_in_linear(model: torch.nn.Module, config_manager: TransformConfigManager) -> list[tuple[str, str]]: + replaced_layers = [] + + for name, layer in model.named_modules(): + if isinstance(layer, torch.nn.Linear): + layer_cfg = config_manager.get_layer_config(name) + if layer_cfg is None: + continue + new_layer = RandomBitFlipLinear.from_linear(layer, **layer_cfg) + set_layer_by_name(model, name, new_layer) + replaced_layers.append((name, config_manager.get_layer_config_entry(name))) + return replaced_layers + + + def transform_model( + model: torch.nn.Module, config_manager: TransformConfigManager, transform_flavor: Literal["fc"] + ) -> list[tuple[str, str]]: + """ + Transform a model into the random bitflip form using the given configuration manager and transform flavor. + + Args: + model (torch.nn.Module): The model to transform. + config_manager (TransformConfigManager): The configuration manager for the transformation. + transform_flavor (Literal["fc"]): The flavor of the transformation to apply. + + Returns: + list[tuple[str, str]]: A list of tuples containing the names of the layers that were replaced and the configuration + entry that was used for the replacement + """ + if transform_flavor == "fc": + return flip_bits_in_linear(model, config_manager) + else: + raise ValueError(f"Unknown transform flavor {transform_flavor}") + + + def make_transform_histogram(replaced_layers: list[tuple[str, str]]) -> dict[str, dict[str, int | list[str]]]: + patterns = set(layer[1] for layer in replaced_layers) + histogram = {pattern: {"count": 0, "layers": []} for pattern in patterns} + for layer, pattern in replaced_layers: + histogram[pattern]["count"] += 1 + histogram[pattern]["layers"].append(layer) + histogram["total"] = {"layer count": len(replaced_layers), "pattern count": len(patterns)} + return histogram diff --git a/src/aixsim_models/bitflip/transform.py b/src/aixsim_models/bitflip/transform.py deleted file mode 100644 index b022eab..0000000 --- a/src/aixsim_models/bitflip/transform.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import Literal, Optional -import logging - -import torch -from chop.passes.module.transforms.bitflip import bitflip_module_transform_pass -from ..utils.torch_module import TransformConfigManager -from ..utils.deps import all_packages_are_available - -logger = logging.getLogger(__name__) - - -if not all_packages_are_available(("mase_triton", "chop")): - - def transform_model(*args, **kwargs): - raise ImportError("mase-triton or chop not installed. Please install mase-triton to use this feature.") - - def make_transform_histogram(*args, **kwargs): - raise ImportError("mase-triton or chop not installed. Please install mase-triton to use this feature.") - -else: - - def transform_model( - model: torch.nn.Module, config_manager: TransformConfigManager, transform_flavor: Optional[Literal["fc"]] = None - ) -> None: - """ - Transform a model into the random bitflip form using the given configuration manager and transform flavor. - - Args: - model (torch.nn.Module): The model to transform. - config_manager (TransformConfigManager): The configuration manager for the transformation. - transform_flavor (Optional[Literal["fc"]]): The flavor of the transformation. Defaults to None. - - Returns: - torch.nn.Module: The transformed model. - """ - - if transform_flavor is None or transform_flavor == "fc": - # *: use the bitflip transform pass in mase-tools - pass_args = config_manager.layer_name_to_config - pass_args = pass_args | {"by": "regex_name" if config_manager.use_regex else "name"} - bitflip_module_transform_pass(model, pass_args=pass_args) - else: - raise ValueError(f"Unknown transform flavor {transform_flavor}") - - def make_transform_histogram(replaced_layers: list[tuple[str, str]]) -> dict[str, dict[str, int | list[str]]]: - raise NotImplementedError("make_transform_histogram is not implemented.") - # patterns = set(layer[1] for layer in replaced_layers) - # histogram = {pattern: {"count": 0, "layers": []} for pattern in patterns} - # for layer, pattern in replaced_layers: - # histogram[pattern]["count"] += 1 - # histogram[pattern]["layers"].append(layer) - # histogram["total"] = {"layer count": len(replaced_layers), "pattern count": len(patterns)} - # return histogram diff --git a/src/aixsim_models/optical_compute/optical_transformer/pretrain/arg_manager.py b/src/aixsim_models/optical_compute/optical_transformer/pretrain/arg_manager.py index afb9bfd..4f6c411 100644 --- a/src/aixsim_models/optical_compute/optical_transformer/pretrain/arg_manager.py +++ b/src/aixsim_models/optical_compute/optical_transformer/pretrain/arg_manager.py @@ -2,7 +2,7 @@ from typing import Literal -from ...llm.arg_manager import ( +from ....llm.arg_manager import ( ArgJob, ArgProfiling, ArgMetrics, @@ -54,8 +54,8 @@ class PreTrainArgs: Communications library settings. memory_estimation : ArgMemoryEstimation Memory estimation settings. - transform: ArgRandomBitFlipTransform - Random bitflip transformation. + transform: ArgOpticalTransformerTransform + Optical transformer transformation. """ job: ArgJob = field(default_factory=ArgJob) diff --git a/src/aixsim_models/optical_compute/optical_transformer/pretrain/pretrainer.py b/src/aixsim_models/optical_compute/optical_transformer/pretrain/pretrainer.py index 46091b1..6a1ebae 100644 --- a/src/aixsim_models/optical_compute/optical_transformer/pretrain/pretrainer.py +++ b/src/aixsim_models/optical_compute/optical_transformer/pretrain/pretrainer.py @@ -25,10 +25,10 @@ ) from torchtitan.utils import device_module, device_type -from ...llm.tokenizer import build_tokenizer -from ...llm.pretrainer import train_loop, build_meta_model, count_params -from ...utils.torch_module import TransformConfigManager -from ...utils.wandb_utils import wandb_update_config, wandb_extract_and_update_tags +from ....llm.tokenizer import build_tokenizer +from ....llm.pretrainer import train_loop, build_meta_model, count_params +from ....utils.torch_module import TransformConfigManager +from ....utils.wandb_utils import wandb_update_config, wandb_extract_and_update_tags from .transform import transform_torchtitan_model, make_transform_histogram from .arg_manager import ( ArgJob, diff --git a/src/aixsim_models/optical_compute/optical_transformer/pretrain/transform.py b/src/aixsim_models/optical_compute/optical_transformer/pretrain/transform.py index 2abcfa1..c548ea5 100644 --- a/src/aixsim_models/optical_compute/optical_transformer/pretrain/transform.py +++ b/src/aixsim_models/optical_compute/optical_transformer/pretrain/transform.py @@ -5,9 +5,9 @@ from torchtitan.models.llama.model import Transformer as TTLlamaTransformer from transformers import LlamaForCausalLM as HFLlamaForCausalLM from mase_triton.optical_compute.layers import OpticalTransformerLinear -from ...utils.torch_module import set_layer_by_name, get_layer_name -from ...utils.deps import all_packages_are_available -from ...utils.torch_module import TransformConfigManager +from ....utils.torch_module import set_layer_by_name, get_layer_name +from ....utils.deps import all_packages_are_available +from ....utils.torch_module import TransformConfigManager from .layers import TTOpticalTransformerLlamaAttention, HFOpticalTransformerLlamaAttention if not all_packages_are_available(("mase_triton",)): From 42b0702d3eea3d6c923079fe12ffcf2f6a19ca6f Mon Sep 17 00:00:00 2001 From: Cheng Zhang Date: Sat, 28 Feb 2026 18:16:18 +0000 Subject: [PATCH 2/7] bitflip lora fine-tune experiments --- .../lora_finetune/fine-tune-bitflip-clm.sh | 103 ++ .../lora_finetune/run_clm_no_trainer.py | 983 ++++++++++++++++++ .../lora_finetune/transform_cfg.toml | 13 + .../bitflip/fine_tune/bitflip_llama.py | 45 + .../bitflip/fine_tune/bitflip_lora.py | 160 +++ 5 files changed, 1304 insertions(+) create mode 100755 experiments/llm-bitflip/lora_finetune/fine-tune-bitflip-clm.sh create mode 100644 experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py create mode 100644 experiments/llm-bitflip/lora_finetune/transform_cfg.toml create mode 100644 src/aixsim_models/bitflip/fine_tune/bitflip_llama.py create mode 100644 src/aixsim_models/bitflip/fine_tune/bitflip_lora.py diff --git a/experiments/llm-bitflip/lora_finetune/fine-tune-bitflip-clm.sh b/experiments/llm-bitflip/lora_finetune/fine-tune-bitflip-clm.sh new file mode 100755 index 0000000..bfe75f5 --- /dev/null +++ b/experiments/llm-bitflip/lora_finetune/fine-tune-bitflip-clm.sh @@ -0,0 +1,103 @@ +#!/bin/bash + +# Parameterized fine-tuning script with proper max_train_steps calculation +# Usage: ./fine-tune-bitflip-clm.sh [num_processes] [model_name_or_path] [per_device_train_batch_size] [learning_rate] [weight_decay] [gradient_accumulation_steps] [block_size] + +# Default parameters +NUM_PROCESSES=${1:-8} +MODEL_NAME_OR_PATH=${2:-"unsloth/Llama-3.1-8B"} +PER_DEVICE_TRAIN_BATCH_SIZE=${3:-1} +LEARNING_RATE=${4:-"1e-5"} +WEIGHT_DECAY=${5:-"0.01"} +GRADIENT_ACCUMULATION_STEPS=${6:-2} +BLOCK_SIZE=${7:-2048} + +# Function to get model parameters count +get_model_params() { + case "$1" in + "AICrossSim/clm-60m") + echo "60000000" + ;; + "AICrossSim/clm-200m") + echo "200000000" + ;; + "AICrossSim/clm-400m") + echo "400000000" + ;; + "AICrossSim/clm-600m") + echo "600000000" + ;; + "AICrossSim/clm-1.1b") + echo "1100000000" + ;; + "unsloth/Llama-3.1-8B") + echo "8000000000" + ;; + *) + echo "Unknown model: $1" >&2 + exit 1 + ;; + esac +} + +# Calculate derived parameters +N_PARAMS=$(get_model_params "$MODEL_NAME_OR_PATH") +N_FINE_TUNE_TOKENS=$((1 * N_PARAMS / 100)) +N_SAMPLES_PER_STEP=$((NUM_PROCESSES * PER_DEVICE_TRAIN_BATCH_SIZE)) +N_TOKENS_PER_STEP=$((N_SAMPLES_PER_STEP * BLOCK_SIZE)) + +# Calculate max_train_steps using ceiling division: (a + b - 1) / b +MAX_TRAIN_STEPS=$(((N_FINE_TUNE_TOKENS + N_TOKENS_PER_STEP - 1) / N_TOKENS_PER_STEP)) + +echo "Calculated max_train_steps: ${MAX_TRAIN_STEPS}" + + +# Generate output directory name +OUTPUT_DIR="./output/$(basename ${MODEL_NAME_OR_PATH})-bitflip-lora" + +# Generate wandb tags +WANDB_TAGS="${MODEL_NAME_OR_PATH},lr${LEARNING_RATE},steps${MAX_TRAIN_STEPS}" + +echo "============================================" +echo "Fine-tuning Configuration:" +echo "============================================" +echo "Model: ${MODEL_NAME_OR_PATH}" +echo "Model Parameters: ${N_PARAMS}" +echo "Number of Processes: ${NUM_PROCESSES}" +echo "Per Device Train Batch Size: ${PER_DEVICE_TRAIN_BATCH_SIZE}" +echo "Learning Rate: ${LEARNING_RATE}" +echo "Weight Decay: ${WEIGHT_DECAY}" +echo "Gradient Accumulation Steps: ${GRADIENT_ACCUMULATION_STEPS}" +echo "Block Size: ${BLOCK_SIZE}" +echo "" +echo "Calculated Parameters:" +echo "Fine-tune Tokens: ${N_FINE_TUNE_TOKENS}" +echo "Samples per Step: ${N_SAMPLES_PER_STEP}" +echo "Tokens per Step: ${N_TOKENS_PER_STEP}" +echo "Max Train Steps: ${MAX_TRAIN_STEPS}" +echo "Output Directory: ${OUTPUT_DIR}" +echo "Wandb Tags: ${WANDB_TAGS}" +echo "============================================" + +# Run the training +uv run accelerate launch --num_processes=${NUM_PROCESSES} \ + run_clm_no_trainer.py \ + --model_name_or_path ${MODEL_NAME_OR_PATH} \ + --dataset_name Cheng98/fineweb-edu-1.25B \ + --per_device_train_batch_size ${PER_DEVICE_TRAIN_BATCH_SIZE} \ + --per_device_eval_batch_size ${PER_DEVICE_TRAIN_BATCH_SIZE} \ + --learning_rate ${LEARNING_RATE} \ + --weight_decay ${WEIGHT_DECAY} \ + --num_train_epochs 1 \ + --gradient_accumulation_steps ${GRADIENT_ACCUMULATION_STEPS} \ + --lr_scheduler_type linear \ + --output_dir ${OUTPUT_DIR} \ + --preprocessing_num_workers 32 \ + --trust_remote_code \ + --with_tracking \ + --report_to wandb \ + --transform_cfg ./transform_cfg.toml \ + --block_size ${BLOCK_SIZE} \ + --log_train_loss_steps 50 \ + --max_train_steps ${MAX_TRAIN_STEPS} \ + --wandb_tags ${WANDB_TAGS} diff --git a/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py b/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py new file mode 100644 index 0000000..4595dfe --- /dev/null +++ b/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py @@ -0,0 +1,983 @@ +#!/usr/bin/env python +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# /// script +# dependencies = [ +# "transformers @ git+https://github.com/huggingface/transformers.git", +# "albumentations >= 1.4.16", +# "accelerate >= 0.12.0", +# "torch >= 1.3", +# "datasets >= 2.14.0", +# "sentencepiece != 0.1.92", +# "protobuf", +# "evaluate", +# "scikit-learn", +# ] +# /// + +""" +Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) +on a text file or a dataset without using HuggingFace Trainer. + +Here is the full list of checkpoints on the hub that can be fine-tuned by this script: +https://huggingface.co/models?filter=text-generation +""" +# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. + +import argparse +import json +import logging +import math +import os +import random +from itertools import chain +from pathlib import Path + +import datasets +import tomllib +import torch +import transformers +from accelerate import Accelerator, DistributedType +from accelerate.logging import get_logger +from accelerate.utils import set_seed +from datasets import load_dataset +from huggingface_hub import HfApi +from torch.utils.data import DataLoader +from tqdm.auto import tqdm +from transformers import ( + CONFIG_MAPPING, + MODEL_MAPPING, + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + LlamaForCausalLM, + SchedulerType, + default_data_collator, + get_scheduler, +) +from transformers.utils import check_min_version, send_example_telemetry +from transformers.utils.versions import require_version + +from aixsim_models.bitflip.fine_tune.bitflip_llama import transform_llama + +FC_CFG = dict( + x_p_exp=None, + x_p_frac=None, + x_zero_out_t=None, + w_p_exp=None, + w_p_frac=None, + w_zero_out_t=None, + x_seed_exp=0, + x_seed_frac=0, + w_seed_exp=0, + w_seed_frac=0, +) + +LORA_CFG = dict(r=32, lora_alpha=32) + +logger = get_logger(__name__) + +require_version( + "datasets>=2.14.0", + "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt", +) + +MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) +MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Finetune a transformers model on a causal language modeling task" + ) + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help="The name of the dataset to use (via the datasets library).", + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The configuration name of the dataset to use (via the datasets library).", + ) + parser.add_argument( + "--train_file", + type=str, + default=None, + help="A csv, txt or a json file containing the training data.", + ) + parser.add_argument( + "--validation_file", + type=str, + default=None, + help="A csv, txt or a json file containing the validation data.", + ) + parser.add_argument( + "--validation_split_percentage", + default=5, + help="The percentage of the train set used as validation set in case there's no validation split", + ) + parser.add_argument( + "--model_name_or_path", + type=str, + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--config_name", + type=str, + default=None, + help="Pretrained config name or path if not the same as model_name", + ) + parser.add_argument( + "--tokenizer_name", + type=str, + default=None, + help="Pretrained tokenizer name or path if not the same as model_name", + ) + parser.add_argument( + "--use_slow_tokenizer", + action="store_true", + help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", + ) + parser.add_argument( + "--per_device_train_batch_size", + type=int, + default=8, + help="Batch size (per device) for the training dataloader.", + ) + parser.add_argument( + "--per_device_eval_batch_size", + type=int, + default=8, + help="Batch size (per device) for the evaluation dataloader.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--weight_decay", type=float, default=0.0, help="Weight decay to use." + ) + parser.add_argument( + "--num_train_epochs", + type=int, + default=3, + help="Total number of training epochs to perform.", + ) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--lr_scheduler_type", + type=SchedulerType, + default="linear", + help="The scheduler type to use.", + choices=[ + "linear", + "cosine", + "cosine_with_restarts", + "polynomial", + "constant", + "constant_with_warmup", + ], + ) + parser.add_argument( + "--num_warmup_steps", + type=int, + default=0, + help="Number of steps for the warmup in the lr scheduler.", + ) + parser.add_argument( + "--output_dir", type=str, default=None, help="Where to store the final model." + ) + parser.add_argument( + "--seed", type=int, default=None, help="A seed for reproducible training." + ) + parser.add_argument( + "--model_type", + type=str, + default=None, + help="Model type to use if training from scratch.", + choices=MODEL_TYPES, + ) + parser.add_argument( + "--block_size", + type=int, + default=None, + help=( + "Optional input sequence length after tokenization. The training dataset will be truncated in block of" + " this size for training. Default to the model max input length for single sentence inputs (take into" + " account special tokens)." + ), + ) + parser.add_argument( + "--preprocessing_num_workers", + type=int, + default=None, + help="The number of processes to use for the preprocessing.", + ) + parser.add_argument( + "--overwrite_cache", + action="store_true", + help="Overwrite the cached training and evaluation sets", + ) + parser.add_argument( + "--no_keep_linebreaks", + action="store_true", + help="Do not keep line breaks when using TXT files.", + ) + parser.add_argument( + "--push_to_hub", + action="store_true", + help="Whether or not to push the model to the Hub.", + ) + parser.add_argument( + "--hub_model_id", + type=str, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--hub_token", type=str, help="The token to use to push to the Model Hub." + ) + parser.add_argument( + "--trust_remote_code", + action="store_true", + help=( + "Whether to trust the execution of code from datasets/models defined on the Hub." + " This option should only be set to `True` for repositories you trust and in which you have read the" + " code, as it will execute code present on the Hub on your local machine." + ), + ) + parser.add_argument( + "--checkpointing_steps", + type=str, + default=None, + help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + parser.add_argument( + "--with_tracking", + action="store_true", + help="Whether to enable experiment trackers for logging.", + ) + parser.add_argument( + "--report_to", + type=str, + default="all", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' + ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. ' + "Only applicable when `--with_tracking` is passed." + ), + ) + parser.add_argument( + "--transform_cfg", + type=Path, + default=None, + help="Path to the transform configuration file.", + ) + parser.add_argument( + "--log_train_loss_steps", + type=int, + default=None, + help="Log training loss every n steps. If not provided, training loss will be logged at the end of each epoch.", + ) + parser.add_argument( + "--wandb_tags", + type=lambda s: s.split(","), + default=None, + help="A comma-separated list of tags to apply to the W&B run.", + ) + args = parser.parse_args() + + # Sanity checks + if ( + args.dataset_name is None + and args.train_file is None + and args.validation_file is None + ): + raise ValueError("Need either a dataset name or a training/validation file.") + else: + if args.train_file is not None: + extension = args.train_file.split(".")[-1] + if extension not in ["csv", "json", "txt"]: + raise ValueError("`train_file` should be a csv, json or txt file.") + if args.validation_file is not None: + extension = args.validation_file.split(".")[-1] + if extension not in ["csv", "json", "txt"]: + raise ValueError("`validation_file` should be a csv, json or txt file.") + + if args.push_to_hub: + if args.output_dir is None: + raise ValueError( + "Need an `output_dir` to create a repo when `--push_to_hub` is passed." + ) + + return args + + +def main(): + args = parse_args() + + transform_cfg = None + use_lora = False + fc_cfg = None + if args.transform_cfg is not None: + with open(args.transform_cfg, "rb") as f: + transform_cfg = tomllib.load(f) + use_lora = transform_cfg["use_lora"] + fc_cfg = FC_CFG | transform_cfg["fc"] + lora_cfg = LORA_CFG | transform_cfg["lora"] + if use_lora: + fc_cfg = fc_cfg | lora_cfg + + if use_lora: + + def set_trainable(model: torch.nn.Module): + trainable = [] + n_params = 0 + total_params = 0 + for n, p in model.named_parameters(): + if "lora_" in n.lower(): + p.requires_grad = True + trainable.append(n) + n_params += p.numel() + else: + p.requires_grad = False + total_params += p.numel() + logger.info( + f"Number of trainable parameters: {n_params:,} ({100 * n_params / total_params:.2f}%)\nTrainable parameters: {trainable}" + ) + else: + + def set_trainable(model: torch.nn.Module): + trainable = [] + total_params = 0 + for n, p in model.named_parameters(): + p.requires_grad = True + trainable.append(n) + total_params += p.numel() + logger.info( + f"Number of trainable parameters: {total_params:,}\nTrainable parameters: {trainable}" + ) + + # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. + # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers + # in the environment + accelerator_log_kwargs = {} + + if args.with_tracking: + accelerator_log_kwargs["log_with"] = args.report_to + accelerator_log_kwargs["project_dir"] = args.output_dir + + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + **accelerator_log_kwargs, + ) + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(accelerator.state, main_process_only=False) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if accelerator.is_main_process: + if args.push_to_hub: + # Retrieve of infer repo_name + repo_name = args.hub_model_id + if repo_name is None: + repo_name = Path(args.output_dir).absolute().name + # Create repo and retrieve repo_id + api = HfApi() + repo_id = api.create_repo( + repo_name, exist_ok=True, token=args.hub_token + ).repo_id + + with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: + if "step_*" not in gitignore: + gitignore.write("step_*\n") + if "epoch_*" not in gitignore: + gitignore.write("epoch_*\n") + elif args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + accelerator.wait_for_everyone() + + # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) + # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ + # (the dataset will be downloaded automatically from the datasets Hub). + # + # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called + # 'text' is found. You can easily tweak this behavior (see below). + # + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + raw_datasets = load_dataset( + args.dataset_name, + args.dataset_config_name, + trust_remote_code=args.trust_remote_code, + ) + if "validation" not in raw_datasets: + raw_datasets["validation"] = load_dataset( + args.dataset_name, + args.dataset_config_name, + split=f"train[:{args.validation_split_percentage}%]", + trust_remote_code=args.trust_remote_code, + ) + raw_datasets["train"] = load_dataset( + args.dataset_name, + args.dataset_config_name, + split=f"train[{args.validation_split_percentage}%:]", + trust_remote_code=args.trust_remote_code, + ) + else: + data_files = {} + dataset_args = {} + if args.train_file is not None: + data_files["train"] = args.train_file + extension = args.train_file.split(".")[-1] + if args.validation_file is not None: + data_files["validation"] = args.validation_file + extension = args.validation_file.split(".")[-1] + if extension == "txt": + extension = "text" + dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks + raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args) + # If no validation data is there, validation_split_percentage will be used to divide the dataset. + if "validation" not in raw_datasets: + raw_datasets["validation"] = load_dataset( + extension, + data_files=data_files, + split=f"train[:{args.validation_split_percentage}%]", + **dataset_args, + ) + raw_datasets["train"] = load_dataset( + extension, + data_files=data_files, + split=f"train[{args.validation_split_percentage}%:]", + **dataset_args, + ) + + # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets. + + # Load pretrained model and tokenizer + # + # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. + if args.config_name: + config = AutoConfig.from_pretrained( + args.config_name, + trust_remote_code=args.trust_remote_code, + ) + elif args.model_name_or_path: + config = AutoConfig.from_pretrained( + args.model_name_or_path, + trust_remote_code=args.trust_remote_code, + ) + else: + config = CONFIG_MAPPING[args.model_type]() + logger.warning("You are instantiating a new config instance from scratch.") + + if args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained( + args.tokenizer_name, + use_fast=not args.use_slow_tokenizer, + trust_remote_code=args.trust_remote_code, + ) + elif args.model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained( + args.model_name_or_path, + use_fast=not args.use_slow_tokenizer, + trust_remote_code=args.trust_remote_code, + ) + else: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is not supported by this script. " + "You can do it from another script, save it, and load it from here, using --tokenizer_name." + ) + + if args.model_name_or_path: + model = AutoModelForCausalLM.from_pretrained( + args.model_name_or_path, + from_tf=bool(".ckpt" in args.model_name_or_path), + config=config, + trust_remote_code=args.trust_remote_code, + ) + else: + logger.info("Training new model from scratch") + model = AutoModelForCausalLM.from_config( + config, + trust_remote_code=args.trust_remote_code, + ) + + assert isinstance(model, LlamaForCausalLM), "model must be a LlamaForCausalLM model" + if transform_cfg is not None: + print("Transforming model...") + replaced_layers = transform_llama(model, fc_cfg, use_lora=use_lora) + print(f"Replaced {len(replaced_layers)} layers") + + # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch + # on a small vocab and want a smaller embedding size, remove this test. + embedding_size = model.get_input_embeddings().weight.shape[0] + if len(tokenizer) > embedding_size: + model.resize_token_embeddings(len(tokenizer)) + + # Preprocessing the datasets. + # First we tokenize all the texts. + column_names = raw_datasets["train"].column_names + text_column_name = "text" if "text" in column_names else column_names[0] + + def tokenize_function(examples): + return tokenizer(examples[text_column_name]) + + with accelerator.main_process_first(): + tokenized_datasets = raw_datasets.map( + tokenize_function, + batched=True, + num_proc=args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not args.overwrite_cache, + desc="Running tokenizer on dataset", + ) + + if args.block_size is None: + block_size = tokenizer.model_max_length + if block_size > config.max_position_embeddings: + logger.warning( + f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " + f"Using block_size={min(1024, config.max_position_embeddings)} instead. You can change that default value by passing --block_size xxx." + ) + block_size = min(1024, config.max_position_embeddings) + else: + if args.block_size > tokenizer.model_max_length: + logger.warning( + f"The block_size passed ({args.block_size}) is larger than the maximum length for the model " + f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." + ) + block_size = min(args.block_size, tokenizer.model_max_length) + + # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. + def group_texts(examples): + # Concatenate all texts. + concatenated_examples = {k: list(chain(*examples[k])) for k in examples} + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, and if the total_length < block_size we exclude this batch and return an empty dict. + # We could add padding if the model supported it instead of this drop, you can customize this part to your needs. + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: [t[i : i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + result["labels"] = result["input_ids"].copy() + return result + + # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder + # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower + # to preprocess. + # + # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: + # https://huggingface.co/docs/datasets/process#map + + with accelerator.main_process_first(): + lm_datasets = tokenized_datasets.map( + group_texts, + batched=True, + num_proc=args.preprocessing_num_workers, + load_from_cache_file=not args.overwrite_cache, + desc=f"Grouping texts in chunks of {block_size}", + ) + + train_dataset = lm_datasets["train"] + eval_dataset = lm_datasets["validation"] + + # Log a few random samples from the training set: + for index in random.sample(range(len(train_dataset)), 3): + logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") + + # DataLoaders creation: + train_dataloader = DataLoader( + train_dataset, + shuffle=True, + collate_fn=default_data_collator, + batch_size=args.per_device_train_batch_size, + ) + eval_dataloader = DataLoader( + eval_dataset, + collate_fn=default_data_collator, + batch_size=args.per_device_eval_batch_size, + ) + + # Optimizer + # Split weights in two groups, one with weight decay and the other not. + no_decay = ["bias", "layer_norm.weight"] + set_trainable(model) + optimizer_grouped_parameters = [ + { + "params": [ + p + for n, p in model.named_parameters() + if not any(nd in n for nd in no_decay) and p.requires_grad + ], + "weight_decay": args.weight_decay, + }, + { + "params": [ + p + for n, p in model.named_parameters() + if any(nd in n for nd in no_decay) and p.requires_grad + ], + "weight_decay": 0.0, + }, + ] + optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + num_update_steps_per_epoch = math.ceil( + len(train_dataloader) / args.gradient_accumulation_steps + ) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + overrode_max_train_steps = True + + lr_scheduler = get_scheduler( + name=args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, + num_training_steps=args.max_train_steps + if overrode_max_train_steps + else args.max_train_steps * accelerator.num_processes, + ) + + # Prepare everything with our `accelerator`. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = ( + accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + ) + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + num_update_steps_per_epoch = math.ceil( + len(train_dataloader) / args.gradient_accumulation_steps + ) + if overrode_max_train_steps: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # Figure out how many steps we should save the Accelerator states + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if args.with_tracking: + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config[ + "lr_scheduler_type" + ].value + experiment_config["bitflip_config"] = { + "use_lora": use_lora, + "fc_cfg": fc_cfg, + } + accelerator.init_trackers( + "Bitflip-CLM-Fine-tune", + experiment_config, + init_kwargs={ + "wandb": { + "name": args.output_dir.split("/")[-1], + "tags": args.wandb_tags if args.wandb_tags is not None else [], + }, + }, + ) + + # Train! + total_batch_size = ( + args.per_device_train_batch_size + * accelerator.num_processes + * args.gradient_accumulation_steps + ) + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info( + f" Instantaneous batch size per device = {args.per_device_train_batch_size}" + ) + logger.info( + f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}" + ) + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + # Only show the progress bar once on each machine. + progress_bar = tqdm( + range(args.max_train_steps), disable=not accelerator.is_local_main_process + ) + completed_steps = 0 + starting_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": + checkpoint_path = args.resume_from_checkpoint + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] + dirs.sort(key=os.path.getctime) + path = dirs[ + -1 + ] # Sorts folders by date modified, most recent checkpoint is the last + checkpoint_path = path + path = os.path.basename(checkpoint_path) + + accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") + accelerator.load_state(checkpoint_path) + # Extract `epoch_{i}` or `step_{i}` + training_difference = os.path.splitext(path)[0] + + if "epoch" in training_difference: + starting_epoch = int(training_difference.replace("epoch_", "")) + 1 + resume_step = None + completed_steps = starting_epoch * num_update_steps_per_epoch + else: + # need to multiply `gradient_accumulation_steps` to reflect real steps + resume_step = ( + int(training_difference.replace("step_", "")) + * args.gradient_accumulation_steps + ) + starting_epoch = resume_step // len(train_dataloader) + completed_steps = resume_step // args.gradient_accumulation_steps + resume_step -= starting_epoch * len(train_dataloader) + + # update the progress_bar if load from checkpoint + progress_bar.update(completed_steps) + + # Initialize step-based loss tracking + if args.with_tracking and args.log_train_loss_steps is not None: + step_losses = [] + + for epoch in range(starting_epoch, args.num_train_epochs): + model.train() + if args.with_tracking and args.log_train_loss_steps is None: + total_loss = 0 + if ( + args.resume_from_checkpoint + and epoch == starting_epoch + and resume_step is not None + ): + # We skip the first `n` batches in the dataloader when resuming from a checkpoint + active_dataloader = accelerator.skip_first_batches( + train_dataloader, resume_step + ) + else: + active_dataloader = train_dataloader + for step, batch in enumerate(active_dataloader): + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss + if args.with_tracking: + if args.log_train_loss_steps is not None: + # Step-based logging + step_losses.append(loss.detach().float()) + else: + # Epoch-based logging (original behavior) + total_loss += loss.detach().float() + accelerator.backward(loss) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + completed_steps += 1 + + # Log training loss every n steps if specified + if ( + args.with_tracking + and args.log_train_loss_steps is not None + and completed_steps % args.log_train_loss_steps == 0 + and len(step_losses) > 0 + ): + avg_train_loss = torch.mean(torch.stack(step_losses)).item() + accelerator.log( + { + "train_loss": avg_train_loss, + "step": completed_steps, + }, + step=completed_steps, + ) + logger.info( + f"Step {completed_steps}: train_loss: {avg_train_loss:.4f}" + ) + step_losses = [] # Reset for next interval + + if isinstance(checkpointing_steps, int): + if ( + completed_steps % checkpointing_steps == 0 + and accelerator.sync_gradients + ): + output_dir = f"step_{completed_steps}" + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + if completed_steps >= args.max_train_steps: + break + + model.eval() + losses = [] + for step, batch in enumerate(eval_dataloader): + with torch.no_grad(): + outputs = model(**batch) + + loss = outputs.loss + losses.append( + accelerator.gather_for_metrics( + loss.repeat(args.per_device_eval_batch_size) + ) + ) + if step > 64: + break + + losses = torch.cat(losses) + try: + eval_loss = torch.mean(losses) + perplexity = math.exp(eval_loss) + except OverflowError: + perplexity = float("inf") + + logger.info(f"epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}") + + if args.with_tracking: + log_dict = { + "perplexity": perplexity, + "eval_loss": eval_loss, + "epoch": epoch, + "step": completed_steps, + } + # Only log epoch-based train_loss if not using step-based logging + if args.log_train_loss_steps is None: + log_dict["train_loss"] = total_loss.item() / len(train_dataloader) + + accelerator.log(log_dict, step=completed_steps) + + if args.push_to_hub and epoch < args.num_train_epochs - 1: + accelerator.wait_for_everyone() + unwrapped_model = accelerator.unwrap_model(model) + unwrapped_model.save_pretrained( + args.output_dir, + is_main_process=accelerator.is_main_process, + save_function=accelerator.save, + ) + if accelerator.is_main_process: + tokenizer.save_pretrained(args.output_dir) + api.upload_folder( + commit_message=f"Training in progress epoch {epoch}", + folder_path=args.output_dir, + repo_id=repo_id, + repo_type="model", + token=args.hub_token, + ) + + if args.checkpointing_steps == "epoch": + output_dir = f"epoch_{epoch}" + if args.output_dir is not None: + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + + if args.output_dir is not None: + accelerator.wait_for_everyone() + unwrapped_model = accelerator.unwrap_model(model) + unwrapped_model.save_pretrained( + args.output_dir, + is_main_process=accelerator.is_main_process, + save_function=accelerator.save, + ) + if accelerator.is_main_process: + tokenizer.save_pretrained(args.output_dir) + if args.push_to_hub: + api.upload_folder( + commit_message="End of training", + folder_path=args.output_dir, + repo_id=repo_id, + repo_type="model", + token=args.hub_token, + ) + with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: + json.dump({"perplexity": perplexity}, f) + + # Log any remaining step losses that haven't been logged yet + if ( + args.with_tracking + and args.log_train_loss_steps is not None + and len(step_losses) > 0 + ): + avg_train_loss = torch.mean(torch.stack(step_losses)).item() + accelerator.log( + { + "train_loss": avg_train_loss, + "step": completed_steps, + }, + step=completed_steps, + ) + logger.info( + f"Final Step {completed_steps}: train_loss: {avg_train_loss:.4f} (remaining {len(step_losses)} steps)" + ) + + accelerator.wait_for_everyone() + accelerator.end_training() + + +if __name__ == "__main__": + main() diff --git a/experiments/llm-bitflip/lora_finetune/transform_cfg.toml b/experiments/llm-bitflip/lora_finetune/transform_cfg.toml new file mode 100644 index 0000000..d19d1df --- /dev/null +++ b/experiments/llm-bitflip/lora_finetune/transform_cfg.toml @@ -0,0 +1,13 @@ +use_lora = true + +[fc] + w_p_exp = 1.52587890625e-05 + w_p_frac = 1.52587890625e-05 + w_zero_out_t = 1.25 + x_p_exp = 1.52587890625e-05 + x_p_frac = 1.52587890625e-05 + x_zero_out_t = 30.0 + +[lora] + r = 32 + lora_alpha = 32 diff --git a/src/aixsim_models/bitflip/fine_tune/bitflip_llama.py b/src/aixsim_models/bitflip/fine_tune/bitflip_llama.py new file mode 100644 index 0000000..0ab2d80 --- /dev/null +++ b/src/aixsim_models/bitflip/fine_tune/bitflip_llama.py @@ -0,0 +1,45 @@ +import torch +from mase_triton.random_bitflip.layers import RandomBitFlipLinear +from torch import nn +from transformers.models.llama.modeling_llama import LlamaForCausalLM + +from ...utils.torch_module import set_layer_by_name +from .bitflip_lora import BitFlipLinearLora + + +def transform_llama( + model: LlamaForCausalLM, + fc_config: dict, + use_lora: bool, +) -> list[str]: + """Replace all Linear layers (except lm_head) with bitflip-aware layers. + + Args: + model: A LlamaForCausalLM model. + fc_config: Config dict passed to BitFlipLinearLora.from_linear or + RandomBitFlipLinear.from_linear. When use_lora is True, this should + include both bitflip params and lora params (r, lora_alpha). + use_lora: If True, use BitFlipLinearLora; otherwise use RandomBitFlipLinear. + + Returns: + List of replaced layer names. + """ + assert isinstance(model, LlamaForCausalLM) + replaced_layers = [] + + for name, layer in model.named_modules(): + if not isinstance(layer, nn.Linear): + continue + + if "lm_head" in name: + continue + + if use_lora: + new_layer = BitFlipLinearLora.from_linear(layer, **fc_config) + else: + new_layer = RandomBitFlipLinear.from_linear(layer, **fc_config) + + set_layer_by_name(model, name, new_layer) + replaced_layers.append(name) + + return replaced_layers diff --git a/src/aixsim_models/bitflip/fine_tune/bitflip_lora.py b/src/aixsim_models/bitflip/fine_tune/bitflip_lora.py new file mode 100644 index 0000000..ab1b704 --- /dev/null +++ b/src/aixsim_models/bitflip/fine_tune/bitflip_lora.py @@ -0,0 +1,160 @@ +import math + +import torch +from mase_triton.random_bitflip.core import random_bitflip_fn +from mase_triton.random_bitflip.layers import RandomBitFlipLinear +from torch import Tensor, nn + + +class BitFlipLinearLora(RandomBitFlipLinear): + """RandomBitFlipLinear with LoRA adaptation. + + Forward: Y = bitflip(X) @ bitflip(W + B @ A * scaling)^T + bias + Only lora_A and lora_B are trainable during fine-tuning. + """ + + def __init__( + self, + in_features: int, + out_features: int, + bias: bool, + device, + dtype, + x_p_exp: float | None, + x_p_frac: float | None, + x_zero_out_t: float | None, + w_p_exp: float | None, + w_p_frac: float | None, + w_zero_out_t: float | None, + x_seed_exp: int = 0, + x_seed_frac: int = 0, + w_seed_exp: int = 0, + w_seed_frac: int = 0, + r: int = 32, + lora_alpha: int = 32, + ) -> None: + super().__init__( + in_features, + out_features, + bias, + device, + dtype, + x_p_exp=x_p_exp, + x_p_frac=x_p_frac, + x_zero_out_t=x_zero_out_t, + w_p_exp=w_p_exp, + w_p_frac=w_p_frac, + w_zero_out_t=w_zero_out_t, + x_seed_exp=x_seed_exp, + x_seed_frac=x_seed_frac, + w_seed_exp=w_seed_exp, + w_seed_frac=w_seed_frac, + ) + self.r = r + self.lora_alpha = lora_alpha + self.scaling = self.lora_alpha / self.r if r > 0 else 1 + + if r > 0: + self.lora_A = nn.Parameter( + torch.zeros((r, in_features), device=device, dtype=dtype) + ) + self.lora_B = nn.Parameter( + torch.zeros((out_features, r), device=device, dtype=dtype) + ) + nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5)) + nn.init.zeros_(self.lora_B) + else: + self.register_parameter("lora_A", None) + self.register_parameter("lora_B", None) + self.r: int + self.lora_A: nn.Parameter | None + self.lora_B: nn.Parameter | None + self.scaling: float + + def forward(self, x: Tensor) -> Tensor: + # 1. Apply input bitflip (if configured) + if not (self.x_p_exp is None and self.x_p_frac is None and self.x_zero_out_t is None): + x, x_seed_exp, x_seed_frac = random_bitflip_fn( + x, + exp_halves=self.x_nearest_exp_halves, + frac_halves=self.x_nearest_frac_halves, + seed_exp=self.x_seed_exp, + seed_frac=self.x_seed_frac, + zero_out_threshold=self.x_zero_out_t, + ) + self.x_seed_exp = x_seed_exp + self.x_seed_frac = x_seed_frac + + # 2. Compute adapted weight: W + B @ A * scaling + w = self.weight + if self.r > 0: + w = w + (self.lora_B @ self.lora_A) * self.scaling + + # 3. Apply weight bitflip + if self.w_p_exp is None and self.w_p_frac is None and self.w_zero_out_t is None: + pass + else: + w, w_seed_exp, w_seed_frac = random_bitflip_fn( + w, + exp_halves=self.w_nearest_exp_halves, + frac_halves=self.w_nearest_frac_halves, + seed_exp=self.w_seed_exp, + seed_frac=self.w_seed_frac, + zero_out_threshold=self.w_zero_out_t, + ) + self.w_seed_exp = w_seed_exp + self.w_seed_frac = w_seed_frac + + # 4. Linear transformation + return torch.nn.functional.linear(x, w, self.bias) + + @classmethod + def from_linear( + cls, + linear: torch.nn.Linear, + x_p_exp: float | None, + x_p_frac: float | None, + x_zero_out_t: float | None, + w_p_exp: float | None, + w_p_frac: float | None, + w_zero_out_t: float | None, + x_seed_exp: int = 0, + x_seed_frac: int = 0, + w_seed_exp: int = 0, + w_seed_frac: int = 0, + r: int = 32, + lora_alpha: int = 32, + ) -> "BitFlipLinearLora": + new_fc = cls( + linear.in_features, + linear.out_features, + linear.bias is not None, + linear.weight.device, + linear.weight.dtype, + x_p_exp=x_p_exp, + x_p_frac=x_p_frac, + x_zero_out_t=x_zero_out_t, + w_p_exp=w_p_exp, + w_p_frac=w_p_frac, + w_zero_out_t=w_zero_out_t, + x_seed_exp=x_seed_exp, + x_seed_frac=x_seed_frac, + w_seed_exp=w_seed_exp, + w_seed_frac=w_seed_frac, + r=r, + lora_alpha=lora_alpha, + ) + with torch.no_grad(): + if linear.weight.device != torch.device("meta"): + new_fc.weight.copy_(linear.weight) + if linear.bias is not None: + new_fc.bias.copy_(linear.bias) + return new_fc + + def merge_lora(self) -> None: + if self.r > 0: + with torch.no_grad(): + self.weight += (self.lora_B @ self.lora_A) * self.scaling + self.lora_A = None + self.lora_B = None + self.r = 0 From 6a70169a824b3590c554958a2f2798f1f638b4d7 Mon Sep 17 00:00:00 2001 From: Cheng Zhang Date: Sat, 28 Feb 2026 20:37:01 +0000 Subject: [PATCH 3/7] docs --- .../clm-bitflip-lora-finetune.md | 212 ++++++++++++++++++ mkdocs.yml | 1 + 2 files changed, 213 insertions(+) create mode 100644 docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md diff --git a/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md b/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md new file mode 100644 index 0000000..09bede7 --- /dev/null +++ b/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md @@ -0,0 +1,212 @@ +# Bitflip-Aware LoRA Fine-Tuning + +This tutorial walks through how to run bitflip-aware LoRA fine-tuning on a pretrained LLM (e.g., `unsloth/Llama-3.1-8B`) using our custom training script. + +## Overview + +Bitflip-aware LoRA fine-tuning combines two ideas: + +1. **Random Bitflip Simulation** — During the forward pass, random bit flips are injected into both activations and weights of every linear layer (except `lm_head`). This emulates hardware-level bit errors that occur in approximate or unreliable compute substrates. +2. **Low-Rank Adaptation (LoRA)** — Instead of fine-tuning all parameters, we attach small low-rank matrices (`lora_A`, `lora_B`) to each linear layer and only train those. The original pretrained weights are frozen. + +By fine-tuning with bitflip noise injected during training, the LoRA adapters learn to compensate for hardware-induced errors, making the model more resilient at inference time. + +### How It Works + +Each `nn.Linear` layer in the model is replaced by a [`BitFlipLinearLora`](https://github.com/AICrossSim/NewComputeBench/blob/master/src/aixsim_models/bitflip/fine_tune/bitflip_lora.py) layer. The forward pass of `BitFlipLinearLora` performs the following: + +``` +Y = bitflip(X) @ bitflip(W + B @ A * scaling)^T + bias +``` + +where: + +- `X` is the input activation (with optional bitflip noise). +- `W` is the frozen pretrained weight. +- `A` (`lora_A`) and `B` (`lora_B`) are the trainable low-rank matrices. +- `scaling = lora_alpha / r` controls the magnitude of the LoRA update. +- `bitflip(·)` applies random bit flips to the sign-exponent and mantissa bits of the FP32 representation, controlled by per-component probabilities. + +The model transformation is handled by the [`transform_llama`](https://github.com/AICrossSim/NewComputeBench/blob/master/src/aixsim_models/bitflip/fine_tune/bitflip_llama.py) function, which iterates over all `nn.Linear` modules in the model (excluding `lm_head`) and replaces them with `BitFlipLinearLora`. + +### Entry Points + +| File | Description | +|------|-------------| +| [`experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py`](https://github.com/AICrossSim/NewComputeBench/blob/master/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py) | Main training script (HuggingFace Accelerate-based, no Trainer) | +| [`experiments/llm-bitflip/lora_finetune/fine-tune-bitflip-clm.sh`](https://github.com/AICrossSim/NewComputeBench/blob/master/experiments/llm-bitflip/lora_finetune/fine-tune-bitflip-clm.sh) | Shell wrapper that computes training steps and launches the run | +| [`experiments/llm-bitflip/lora_finetune/transform_cfg.toml`](https://github.com/AICrossSim/NewComputeBench/blob/master/experiments/llm-bitflip/lora_finetune/transform_cfg.toml) | Bitflip + LoRA configuration file | + +## Step-by-Step Guide + +!!! info "Environment Setup" + + If you have not set up environments, please follow the guidelines in [Environment Setup](../env-setup.md). + +### 1. Configure the Bitflip & LoRA Transform + +The transform configuration is defined in a TOML file. Here is the default configuration at [`experiments/llm-bitflip/lora_finetune/transform_cfg.toml`](https://github.com/AICrossSim/NewComputeBench/blob/master/experiments/llm-bitflip/lora_finetune/transform_cfg.toml): + +```toml +use_lora = true + +[fc] + w_p_exp = 1.52587890625e-05 + w_p_frac = 1.52587890625e-05 + w_zero_out_t = 1.25 + x_p_exp = 1.52587890625e-05 + x_p_frac = 1.52587890625e-05 + x_zero_out_t = 30.0 + +[lora] + r = 32 + lora_alpha = 32 +``` + +**Configuration parameters:** + +| Section | Parameter | Description | +|---------|-----------|-------------| +| (top-level) | `use_lora` | Enable LoRA adaptation (`true`/`false`). When `false`, all parameters are trained. | +| `[fc]` | `w_p_exp` | Bitflip probability for the sign-exponent bits of the **weight**. | +| `[fc]` | `w_p_frac` | Bitflip probability for the mantissa bits of the **weight**. | +| `[fc]` | `w_zero_out_t` | Threshold for zeroing out weight outliers / NaN values. | +| `[fc]` | `x_p_exp` | Bitflip probability for the sign-exponent bits of the **activation**. | +| `[fc]` | `x_p_frac` | Bitflip probability for the mantissa bits of the **activation**. | +| `[fc]` | `x_zero_out_t` | Threshold for zeroing out activation outliers / NaN values. | +| `[lora]` | `r` | LoRA rank. | +| `[lora]` | `lora_alpha` | LoRA scaling factor (effective scaling = `lora_alpha / r`). | + +!!! note "Bitflip probability" + The bitflip probability must be a power of 0.5 (e.g., `0.5^16 ≈ 1.526e-05`). The kernel automatically snaps to the nearest valid value. Due to limitations of the Philox PRNG, the minimum supported probability is `0.5^24 ≈ 5.96e-08`. See the [mase-triton docs](../02-model-behaviour-level-simulation/mase-triton.md) for more details. + +### 2. Understand the Training Budget + +The shell script [`fine-tune-bitflip-clm.sh`](https://github.com/AICrossSim/NewComputeBench/blob/master/experiments/llm-bitflip/lora_finetune/fine-tune-bitflip-clm.sh) automatically calculates the number of training steps based on a budget of **1% of the model's parameter count in tokens**. For `unsloth/Llama-3.1-8B` (8B parameters): + +``` +fine-tune tokens = 8,000,000,000 / 100 = 80,000,000 tokens +tokens per step = num_gpus × per_device_batch_size × block_size +max_train_steps = fine-tune tokens / tokens per step +``` + +For example, with 8 GPUs, batch size 1, and block size 2048: + +``` +tokens per step = 8 × 1 × 2048 = 16,384 +max_train_steps = 80,000,000 / 16,384 ≈ 4,883 steps +``` + +### 3. Launch the Fine-Tuning + +```bash +cd experiments/llm-bitflip/lora_finetune +``` + +The script accepts positional arguments to override defaults: + +```bash +./fine-tune-bitflip-clm.sh [num_processes] [model_name_or_path] [per_device_train_batch_size] [learning_rate] [weight_decay] [gradient_accumulation_steps] [block_size] +``` + +**Example: Fine-tune Llama-3.1-8B on 8 GPUs with default settings** + +```bash +./fine-tune-bitflip-clm.sh 8 unsloth/Llama-3.1-8B 1 1e-5 0.01 2 2048 +``` + +This is equivalent to running the underlying command directly: + +```bash +uv run accelerate launch --num_processes=8 \ + run_clm_no_trainer.py \ + --model_name_or_path unsloth/Llama-3.1-8B \ + --dataset_name Cheng98/fineweb-edu-1.25B \ + --per_device_train_batch_size 1 \ + --per_device_eval_batch_size 1 \ + --learning_rate 1e-5 \ + --weight_decay 0.01 \ + --num_train_epochs 1 \ + --gradient_accumulation_steps 2 \ + --lr_scheduler_type linear \ + --output_dir ./output/Llama-3.1-8B-bitflip-lora \ + --preprocessing_num_workers 32 \ + --trust_remote_code \ + --with_tracking \ + --report_to wandb \ + --transform_cfg ./transform_cfg.toml \ + --block_size 2048 \ + --log_train_loss_steps 50 \ + --max_train_steps 4883 \ + --wandb_tags unsloth/Llama-3.1-8B,lr1e-5,steps4883 +``` + +**Key arguments:** + +| Argument | Description | +|----------|-------------| +| `--model_name_or_path` | HuggingFace model identifier or local path. | +| `--dataset_name` | Training dataset. We use a 1.25B-token subset of [FineWeb-Edu](https://huggingface.co/datasets/Cheng98/fineweb-edu-1.25B). | +| `--transform_cfg` | Path to the TOML config for bitflip + LoRA. | +| `--block_size` | Context length for training samples. | +| `--log_train_loss_steps` | Log training loss to W&B every N steps. | +| `--max_train_steps` | Total number of optimizer steps (auto-calculated by the shell script). | + +!!! tip "Adjusting GPU count" + The first argument to `fine-tune-bitflip-clm.sh` controls `--num_processes` for `accelerate launch`. The script automatically recalculates `max_train_steps` to maintain the same total token budget regardless of the number of GPUs. + +### 4. Monitor Training + +If you have W&B set up (`wandb login`), training loss and validation perplexity are logged automatically. The training logs to the W&B project `Bitflip-CLM-Fine-tune`. + +- **Training loss** is logged every 50 steps (configurable via `--log_train_loss_steps`). +- **Validation perplexity** is evaluated at the end of each epoch on the first 64 batches of the validation set. + +### 5. Output + +After training completes, the fine-tuned model (with LoRA weights merged into the base model) and tokenizer are saved to the output directory: + +``` +./output/Llama-3.1-8B-bitflip-lora/ +├── config.json +├── model.safetensors +├── tokenizer.json +├── tokenizer_config.json +└── all_results.json # Final perplexity +``` + +## Results + +!!! warning "Results Pending" + The following results are placeholders and will be updated once experiments complete. + +### Training Curves + + + +| Metric | Value | +|--------|-------| +| Final Training Loss | *TBD* | +| Final Validation Perplexity | *TBD* | +| Total Training Steps | *TBD* | +| Training Time | *TBD* | +| Environment | *TBD* | + +### Comparison: Pre vs. Post Fine-Tuning + + + +| Model | Bitflip Config | WikiText PPL (no bitflip) | WikiText PPL (with bitflip) | +|-------|---------------|---------------------------|----------------------------| +| `unsloth/Llama-3.1-8B` (baseline) | N/A | *TBD* | *TBD* | +| `unsloth/Llama-3.1-8B` + LoRA fine-tune | `w/x_p_exp=1.53e-5, w/x_p_frac=1.53e-5` | *TBD* | *TBD* | + +### Resources + + + +| Resource | Link | +|----------|------| +| W&B Logs | *TBD* | +| HuggingFace Checkpoint | *TBD* | +| Training Config | [`transform_cfg.toml`](https://github.com/AICrossSim/NewComputeBench/blob/master/experiments/llm-bitflip/lora_finetune/transform_cfg.toml) | diff --git a/mkdocs.yml b/mkdocs.yml index f3abbab..67e49e8 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -66,6 +66,7 @@ nav: - Model Behaviour Level Simulation: - Random BitFlip: - LLM: "02-model-behaviour-level-simulation/clm-bitflip.md" + - LoRA Fine-Tuning: "02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md" - Optical Neural Networks: - RoBERTa: "02-model-behaviour-level-simulation/roberta-onn.md" - CLM: "02-model-behaviour-level-simulation/clm-onn.md" From 44b88254627766ff454d8dda0d37b94de269aa4a Mon Sep 17 00:00:00 2001 From: Cheng Zhang Date: Mon, 2 Mar 2026 12:13:21 +0000 Subject: [PATCH 4/7] wip --- .../clm-bitflip-lora-finetune.md | 2 + .../bitflip/bitflip-lora-train-loss.png | Bin 0 -> 53362 bytes .../lora_finetune/fine-tune-lora-baseline.sh | 104 ++++++++++++++++++ .../lora_finetune/transform_cfg_baseline.toml | 8 ++ 4 files changed, 114 insertions(+) create mode 100644 docs/images/bitflip/bitflip-lora-train-loss.png create mode 100755 experiments/llm-bitflip/lora_finetune/fine-tune-lora-baseline.sh create mode 100644 experiments/llm-bitflip/lora_finetune/transform_cfg_baseline.toml diff --git a/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md b/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md index 09bede7..4ad90d6 100644 --- a/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md +++ b/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md @@ -182,6 +182,8 @@ After training completes, the fine-tuned model (with LoRA weights merged into th ### Training Curves +![Bitflip LoRA Fine-Tuning Curves](../images/bitflip/bitflip-lora-train-loss.png) + | Metric | Value | diff --git a/docs/images/bitflip/bitflip-lora-train-loss.png b/docs/images/bitflip/bitflip-lora-train-loss.png new file mode 100644 index 0000000000000000000000000000000000000000..eabfde2e7afd29ea8f187dd54e0944bf16bb190c GIT binary patch literal 53362 zcmeGEc|4U}_%{qAX_rJ1$y^yC6dB4qr4o`kV!;Y7aU$-@VuLfft$AD3pdYaE>>jP z4lkXaxw=`IF!fA!pEexGG{PZb)%FTj7s1bTGhS01yg{3`~2CHsCq|1Y$X z?0Y09^rPe5|N8?{L3T`=TotarS4MAzpDgeS4~#7E<@}R*f}tR~SZj^bkF3~F)x-#T5T za2&+RKTEjuX`j`E^w8q@=_tWo{6hb?&`2t2J2m2bI*a$*A9W#B`{p&9~S8`N1=$x>uwZ?6D3u2e1_7ESrwEl5lM9iZim+#$&Fj2X6f@2!YecElqns$h zW-M34o$p^Z`$8o`w_d%S!Dk7`9!?1xUr>a}nIMFM=?Zb{lhoiq zz!**-e5QCng_$aL~++y6u>7Mc7=+3_G5lpzr9?zF4zizU#=V-SyKwpn9W~ zHa!j7p6uBE_WXLIIQmbJU@u2Ke$uw_rpuy2);H6g8e@4fvXtj<$jJB6@S(j1%?7cG1k<4nA*fII=tMzDGfVVER~`?LJYV@amU$DTGiA(3U50WpdKfmSERa-(+QWX)5_+a)ga3 zCmF*VXieKs!J~NmX2`FNLb>^T6|Ugor8bKm3NOEI;6##`{F6llJEYjmx-|nI}Nx5&il&>?)&E_xkZ=E zd<30%sG!+VkQ&16A-`=Zf-_j~8Iap^omq3T^hQ_15?WIh*H{!0=kvEF9oH$B!lLMf zb4@p7Qal(~w1_<2Wkq?)m$|au@ld}Fw^l09O-&i7tpPBZ!E`;8c+2yg8H_8~-8ORc zCIWc+1j^O=;KQNEL*r5r&+@qykHBHzW*LsPE7~*ud&#Gws3a}V$iwD_OwPI%9WGVB zrwS*moKo$OA`8r6uqqo<93carlq;1~Wb$J^I9Dk4cwH9FHY1=!trA|-@8QesLg-|f|HJ}q4)@c_S3u`?dtp9s2ylz}^eb?B2 z2kpud<(7pm5cV(e^_uig8?U!_MZoO?CCX2%`8t})p(^X* zsu)YdtPviIA#JW^J#xf%Ep`WU`&E^KbykZXI-}guO=zc`i2gMfv0;lCza`wZ1T9yv z)1cHNOKb>s_QUsTeS)B-~Fk(ofl5*(M=ZdbSOC5`=ygcjHFOA**+S) z_*N)0K?bN)BG^c8x=eaOZwHR>>9ZdZ|4?T$&*lmD#@})%-;Tthq-+mmJX6!_db{(i zKTIzVwGcx%OyTPduE^S@wZk@(S=?e(0kir!kBWv%zqpGCJBypE!-r+(;C*k?4ZVA< zBe8-SIqx|RTBVSLo$%$E$`{_YLg%ZWDP_!ak)5-!1!4)gx2!Wo2z4@z=)ZlPfo?8( zW#8c6{>Iie$|DQicp^hQZJol#(7<#0^$9k5;~_5<{^2`b`Hx<|T*k4CLn*HgYA#iCF(FgeHygP-=l^Rbd`3>LVfR~%VC`phT$3=a ztHbx69-*LfxxxYd(jsw_hMbQaxP92v-{I+L_;e~;2m4~uXnyW*v2HDsmTNN>j}ZQn zMK6E4pThc1$W9nK+dnsVN9tF!PxR8{iCd*na|Mrv^)*mIK0fT}#Dp#J-I3DYylnS~ zn|EY#y0jQs|49=&QH6vTRw8TPPd~TEI9cSbjaIQ}dt2l_yOz6RitU<4w}!;^0M8BH zO=2|eljv(sZ2AiVXt7c*qGn>$p4x(aJa3bhUatN0%t}u8gkI<*z3kUN&LfDU$ctid zk?~v3XE#@;;1x)LR4j&VF{!mSk+@l^SvlA%TPUSr{;2IBkE6g5xFU8*7(Q@BQ=^Bo zs>-jwh1)tZnoyK1Z-6)dp;fYJKsJQJ2jTBFSqK5TK4XH*nz>!sc}2Ethvo2*puF)o z6VD+^eS5E+#?5_Hy65jY$h(tW3r>YP2*Ch7=fnG|_ySKk_Ftt&dZsvoLd`kCx_-rU z`BLsc+obhITDWuXtFF|j7hM)baH0)$Gnz(XrWl3y?5;y8K+I+-}+)a31kg|yPujvytS4x>+o zD><+w8x9RiZa+Dk<(H^@{`1W`k6gi*?b?${o(>b(F?C<1AGIFs34b#r0F}ha_+>G{ zgS}fupD=)MLDA}LY-*nPW`nEkC6flZs=Pd_pKm5u6h|kVt&d=|)+y7kKl(mcF|K%t z%B>u9FW@S#z*pzapw5S|rw~oWC1eYeJzZlm|YiAHJzeD${%*;q^w^B^MBE%^q;{6sjX*q9kTfA)hzE;sP)Jl zrM#&e!c5oa63la00=)1vm=OIc(Q^MoVe9G27gn~gXV{SBsI4Mb19FUi%Sm!FrFwQRe>j{yHEx!g zXXfthU_bfTS3yM*Sm|-8izX}_#MpQW35cZ-^RtYDb_el92 zh_9^dbzrGB|M^DlL5t!g-1g}CBM|KIH~!$Ot~G3y^}0PO#+pZ<8&0HN6w_zzqP|>Y zo4dBrBQ3Tw(K;9r6uy-ulcF><{2_mWC4Mkuxj&UF`0u#SpR{+ke3|dD>&&|^FFgst z_YU!>ihOPrcrv%z&?d=gUj8R8WbIQ$!16e<_&M^YP+;6!_vL-Gocq@ijw@OwQUZNW zPp7)Fa1sK2AAiWMO@GKrLf|StMHLyv_uA5|R`x}1Uu0T{%Z7k0WH|Ypskm(quGk-2 zNC%478Oia>#kel3xj!<8okg|Q-5tKeDTfQOo-_46BMbafUTsqnMd_XsNti}>0Bk*- zU1}LVCMbY1EZZ4%yhX%eA~qAv5ZsQUIMp;=*s@vu6=}j~A1%XZg|mO{er-6dP+z>6 z`#{*9f1aw@6C!~qyQ34eQzY6LxEQKTtLi<=5^%7%Z!t{~_%Gus*_Tn>263(Z@!vX# z&e0qKFYAS3+xw(G)&wD|M>Z_NkMK z5{zcoVP?X4xw5drS37jZdb_4wJsB-mAyK{;=`kB6m;1%SNm56@aa`QYsP7WhI3}cp zUdR2QnOT4IASbw6x8$$rAiJU(>{OJZb>-G7fF~UN^g5gD_CT2haS=UZ;#%AcPd_zDkT)_ zp?4kY7bQk!juu*NqpU6Q8JNVXxyF4EDu&aS71vD!gpK`C-SC>ZU#DBAQsMK0h?;<* zxJuh(Q+7eSOjG>u;r=shW4zKItk1z7_#Ho8Nf&9{l$JWtj5|~hMObD&KSvz7(?QR^ zTzsk#6hl>&6{eVJ`b*icrt4Q>WM2w4%j(YMU)l_w5qG*Gj0D9`nZS?}HhS>q0J$QJ z>xQi7&-3SC(p>XP3!b5Lw#<0t+6?p?Hvi2AE>`ZxHuep8SYDCR$Qsuk`^wpif}u07 z{)M_mkQ1`5D`AJ@8%Iq(O=zAUAD5L{Ix#SL^|B21OASAx;#7N@&*_;|8;SK_ z#t#-Y_Jpmq&~ODKD|%V$Lg5jE|C9|777Is}T}LN$)p(w!$yD6go%~rPnF%7Z^@P9e z#6A8=vC)Kw?Cho~P0h^qzZ`9irdW*n1}`bDTw-OfvB!+}^aY*0tN+0+(k3V8&nA-% zK%$NA2-$DNRoH5dPJ<6>uR_iIl{2vKSeP{$DmPdAD$$@4lqu0Wj7L{|xIIx-5OB18 zx@fF%Zb<;e_Huq^e>S(W_+J+_Edi%odAiGNFMDfaCdwG$ykBNtgN?ud>FsSzN^JfG zn!D))ID}2R(^s`c+4)JHUx%07!l)E(6UMV>sqJc=RQxA$d7}=a+i~lxtFM=jU`y%n z3FosSlwWSTQIdUrJGXB!-U3CKiR~-rOk01?k@1Ec0mk9fs#SCCBX4yY0jgS!*;bnF zc+_~M&p_o6SMc~k`qy)Q>sz*HJ(H4aH68<%LFu7UELQ18R2=;Q>4zKHUBQAFLF#3b zI`0W4iA6&6XxPbJQ%`Ao9>2PqE*d|#rBhmU zTCyiyVRN&Dw{(mWL%4%kn`W@QeYbKy^drkjiKm-?%=-S~RzrGpvox>&)(h5AQ!+3% ztMD25q8(rLp-cXfM>qN+u=!T;kjcxH2 zOgHLWn)lZwg6ZO>cBV0i17TjLR5S#OfKK*=O{L`0r}rzdYQwMw*C z+0W1`Y8RHx&hlf1>o_>1REWCj`ZE=N%dr?U1b^^HshZg#Yy>l-*I+ZXUKcgo8?!$_ zYbvg#?LROyQZenhjvpyXnw5nRyU+)X(icveB?$bA(VxaI0?_VsFue0@9zPmiW$N2) zKR2CRR4)}w$3yT{f<1deyh|^)74)Ecl*k;3ayvn$k;<1WGw!Ve&XJ)XJSXvs=2|pO z56Vb=v5t3<{|_JTphF>p8eOri+4AnDQQ4SX1~ob8*sb2+kFLO=ijI0q=n4pbW^{FR zF4i)Fy%Q?KzFp>7Y41Vcf9$dPBi5olv-wm0I8IJOT^7a9 z>?;5OprsmM>HD>DIz>;%+P`nXNRyl14F!1tQFrJ1kYp%4*}6&y0!WDuTKOiHJ>_T0)v(ssW4Dp%Uf3z zciaCa2DL4l7_Mh?a;Ck@^2S(cxfu&!kG!q~|DBqlFI9jvPo_OLsvzIvTjaCcA0EfL zTdHg_1t=sAjXI@-6HA_KTf#ExRRr4CZFYzPo)2KFn05L+c^r; zu{w$1o38ipw~8h!>O>Rs^YZc%m8Ili8I5<(muA$>6w8CD3H)spV&>*fPD7rA>lpXU zLSzWhX>-HyySkT^ZyOR(yDLyr#FSPuJI`@we#4_aLq1oWS@Avw6Z~Ki(n-@THYwN- zQ1<%Hy}_9HduJ9pz#UO?`A0~T83UPTj309~8#klBq5QB$nS4Hkw*`3!;_L`wzaa{~ zr_`2s@>id2WtQ%W{v{z4Fg1X{?WgrLlv#g3h{UX}`dOi>1<6jx1dY*oY)+NgPpDli32P^+ z1Hw1`RkUt@ulQEFFCC+CyPvJu#Hlx@_zeeD4o5c}Y-ssSr2T`rUN2)_zq*?Sgwno8d!u zD_yI6ktu{+0u+g4+Vhv#$d^+Zwz7$CQ$Y3jgBZjgyyaMnWA)Kg{9>a6YLrc-4=Gaw znv($|Qb{i)0$lfLT@>VXb%#`o4jWv1?_h(6_e5XC)QJ?G4V|MWDgmm27U8Yx8v>v0 zCp>RUbRQMT9i#sMBZ-!al!N-bnRMTbW*L7(npzJ{{aXC&%8DeQIl3OTnj$0>H!!bTQKSa91OMdoteTjlX&Dr z0V+QpD04l;urNZa4)xs>f7BGY^jyJiaLr>l zfBEC!O<>Xypy;1SM?ygPw2LNmA0tLoxE|74+R=bjBu>6m=JK4;@^s`mR>n>NAWQeF zt5)ZQIeeJZSo~A<{4B7|B_DA2NANExiWrwy^Npnr>sq{YlG`POQA zF!HAeS|`>Hp?GWg{~B4#qt{|^r`VQ)FS}k8#EwwAQP?dQ(7htVH>~e(FhKUvtmP6(^DMPk(YHd7iL#TyerxkUi;0bC<|NZE=mJuw zqG&fzr+a82H>>CGhw$BpGm8tNvXckI;8O;v#?4Gq{61-`bg&hS>)QPL#-9NS(4+8z;G>mqiL700Ehqk@;S6aSvvnY!1(O>}4lO&#*yp7D|4zg5b;h>Io^u9H+ zMJ)TE*--300n+-lwO72J%^>lIfzl-fTnz!ue5#5J%$wPd%pUtNV~h`)tZm56UCSE5 zP1h`^$gFnb_qQ#@gCD`8E`iBpJKX6;C*n-YEkQ=uajybF3Bno9V?SE*WArx^_c@BdZZkAX}uF6N>MxxZ2=Y(ga2AMZTm0(KAK z113CnSl%2;C>MzY&(1=-NqT>y1Gdq6*N>NJg)ph<0sm`~=2f(^XMRE+=2ZIdDhxJe z=V_Kr+DX1$;4SBuyMX7aXRo2}`yX<`?hjp4VgTqD4y~BI7Ap^FTv7b1@4&H}JTyH` zNiO;-T8p9ogv>E&N#5b61C=?nxBm;s-F=(^ z(79(_3hvjSdDp?bU|^fCSj+?o?Rax;1F58MxtSL`{y-eF`BLBmNe~cFGZvm;9a&1m zh0_$dF(mzqpCVD>`^5PL)W$1FH7RjuipIUgZL?ZL6^294ziKzuAAXR5xp8;Bi|;am zAO3GSncd4~YV=cpn()(|G*2^+gIHevi@-(xoZaXRv0I>103;If>0uP?hwYjZQXcxc zy5NrBf1d?*z*zuFs_$yCKBZ&%tk9M`n_g8^(!^iGACdzOL#R!S{0BMB!v;r&t(8`g zy}|WMkQyF4L<+!0XC~@iOl9pZr8FMw7v1Sl#n*Xw^2E5iyNey#p8|<6Z#|RF>PWFr(h3D0s9?CY6Snf3B4;MhkC6GDbZJyi(qu*_w%(b^*{Z2;bA z2790?=eA?m6p=KUL(Y5x!kbx3)~hgZqVk+NqH#3Fjl1@kdUg3W{NEe{(%BN)_uLvw zJ&n`h0M(s>-x3-dksoPNr+ndco8LM{PoI^9DuS0hVVQRd1Y=`kjM+Hmt%M0LjCu+9 zD7nM%wsc&}3N7jo?cc3lre_LQH?iN;GM$*by?(!$;?=8vV6=_!wZTHaxhrwo%kS5^ zwKYqYLB6Bcmz(LF+t3;2(9`eBMx4N>C%l!N*4+awQqfbM% z9i7T5ft?NVr<*Ui=_$YdYyU_CDqQn>V1(?EUOOfEzSw)dGt8tZP8S~XLJ0~K(U{=> zraS>ta>l{IIj?v7HAKc6khzv?o5dH$Tuc&>r^xaRs0sJCYYV5{j;q}ZdLjS@Gi1}G zZ~A?K+k833f*k&Dpg=34*1C^jp@;U5(Rm0m0B-gtc{WL~h0jmuKIT3Mq3g$EgCe0B z37@0SQ2cw(7uN5;L;}WZv0w@~zHog_C8hotT|feq!w}dXr+1$3pWg(6;)wy&)tZ0L zllFN6CY3@u^mM$d{8k8RKs?mGev(}D-+Q`JkTNer;bQLc@HzzXS)hROEsZ3VzYnhG z0ObRe1r3f;35XWYH$Z+ZELTVR@DX-tZo?_V7HBsx8WIKe4A~X;^Mn$(oAe-A^x9_w zy1#d`xEWhKJlrj5&O-4+um4=3X;Q}(_RLzpj#QBFWDLFuO#I%KX``y|JDdRRBI(S{ zNZIwE;&-Njk$z|@$$qE7Q2EyvEt#W{g&wE=7k#?$=dMq>P5h_)mHgipl0?&df0CRl z_>O3@(mT`f5#%h1q82x(|DQhB8XUGi#^K_AYKo;EX}r0re1-r?25lb&dE4CUj4Jjf zY#MYVW_5iOnwYvDV|!F|IQrkDLkhRN#LaTPz07Bm31e$xUBP7%v;>dlwFB^7s4~yOg7Jv z$^!Z)Nx-6a7+c0`?`}Z89Z3HqT0vgzmB)>+&6QUi!z7dPW?l;lG+;=Md4*hCN3yI5 zxIsO%#}B~{Fs`n|F;>OB{>J~bv@I~k$vFh}sC1|!6%~}9U(wz4Tv|OPMRCUDG+6if z;}aT+r`qZze6QBmoZfJPAyxz!>XEc}fGMbgN3s-X$}9eP(N|A46HQ1pR?~KJH0SIg zQzfBg6OQpDVp$T?@Y>GA6mL-@D%!IZ-#x8^Za`}z)d)d>_=X1C5t+L;Z%PiO4^{Qp zyjLknHDOC40k!0!9L#HhS8h`SQ@@l_W>Jze<$2Td9?3_r#hfG>nrS0zMy9y#R&|75 z+r+fA>plO3fBT0i%z`C+<9$Qy$0yWz=)|G9JWbpU;^?-wKTM^LU)Zix72zFX4Sx>I zN_ypGR&r4k<~5t)4(jm=j{3Wd;X7UvS6@Epj#McDb~vsBoO8QIKBOHSW^YQ37vh^x z!$ZUDyZ`<>F)(g{LyMwh3FR{n^=7qI^fG++xxf3)?+)O9f&C2s$C-hV61P?(7!(wg z*SM=`0r#4i@v{PjH)y3;&HGUPe`N(+@U)x4dpxk7Ga`ex3sxWSQyk#muSQgpmP>?(q*?8kr^pL(vJDtECr5;!+9q)RQ&j=^JcRR)`cm8O&X?D4 z1p{L>kIiXsc|bJQ9@bqwj)Q%d7x4uo@0NZj1^Hg`n26h~yy^>0@joQnUV11Cvq|l{ z@YWo>^#%MYDw-CAolPW&Y|pm;Z2N5?DDaLb2+s1dysO8@jIHNjQn4yl_@R=DmGUJ6{*^Bds2rZEBPFR0U-Q5khV-vv^4dphjPwFw31YC zT>9O56ncyT*@VR|AyWTkb|qU0oRAYlv#eH{UPpF;E)WDoP3P5PIL!en@=@-R%Au9) z2Xr@qGpdvTKAtH+La>gxou+sJfWpRFU-dYpMA{`$lDk@$2h(kiVoFJwKr-y*1QkQ*6!0spR~l zEC36vDt+M10y~fDzZHNN`AK_Dm~?(POJ6c zAR=W<%#L*CVf_iFDZ*S|l%HjjEEX8Z;fAL2+%9+%SR-nbN!LZ{T3k)kQ)o&7DhC-M zk<<;LPc~I(iik5{IDS+WAuV{S_|C(l;<#TzpQNWC?~2dw2n$P{A_cJd^8tR?JC%~* zqb2`_sRkwgQW9H}T;tdNgke=YI2Ouryo`QZ_^h(JGTAJ0YTvTnZdgd5d|<%S$AiN9ZCqj&Xlpy_u7fu?1i?)~C)l^vDtly_>_g&yrMs`~O35 zD<3Kl*KvGd9&Q!1r$`mfR&K|9&C2@Bk({s{-X6JL)?>W&KFZ(bS0{vYr`VrLRI_seIs{^P{ z@9qviYS(0asV$!?dswL!*SNds_&#$5ppO$)I_G9G748M6}KG>h$(C)YZJ4Qe#P5 z_jBfymBf?}SoE9+4r>1N_$0!c$iW(ACWG<2q9rxA(bUn=;oPUfpt9}dE^V4R!%mbyT00hg_X*(GA_rPnkV`<97N|{=GGyOcZf;pm0*6XpuehxH zuGyqFk(Q(9MHww&?fmIRqUY;d3JA8T)IgJZ)%hHj3$02utT z!RC8U-a$w{`#Uej9thY+PEIRbj(nf)c-Qr3y{6xWk`{vuI6;CzAg z^-B4o1NQM|2~zWW3G+~L;IkNj2bfyr@AwzhNr(Nyq??M~meyHu0bhOjm<r4xVqo{7ID^V5D4Q`T8&Z-37t8B~tHS0^X%?|be}OLG0(W#7K_NZo*$eme zmoD+>w7?$qtB5c{VXo2OLb`xN1#}yQhK8cxFE;;fy;h8e7I~G*@Po>P zQ$k*zx+;rA@3jnaQgrgsTCq{ykj0Bx-|q6yaGeNGod_Y_**>P~xC{8I$8SjJxA2eH zkC{J#6uc-;SA4_N5Wf;Gw+^-YphEoSVh3oK6+IyI;95?cq9WQ)Af!|whF%s;LYrDy z3l~M`%HnVqO3kFB(#`JE6u+WtmL+&Dr>%QsH27ki#EnRPWPS#uRfbGpzOSX3J8j8C zU6L&Zl0o;0PA>(Yoog`EENpk_(&Zb+Tfc>}|KHDc#{E9igx7c$PiYHFPQcSZ0_W@( zU{Y7Be3z*>n&WLhoTfPUw04#zCuyNI6){-{Hxs|a430WYd)Hu+KScCO{^d*PDqmc$ z#JOz+l2o0|tI(8J=-2%%wj?Yx9d36Y>LE7W=7xFc+U6dLsZ(S2HvF^wcge4w09sCn zq$CgZ!j5(3@$++d2LzQt&YBPh2CMh-!`E|PvClN-)XDwFVHD>q-FDcTT2H-~WU{~8 zIz10mMBT*0C|c__Qq*$!`2*??Z;=i$2?3Pk^QltUE>K)lp=^Btz(Qpm$+CVIC?CzH zhR5ma>l94vjQ%a2g2LyKz?;C+`I5GV^_fRzRkAhk;-cf4NA@%Y&ttO~T5Ya%b&+SwlnbG=8Tl&s%3URS)Ob!AW*oW#%ZM8 zO_HNW^TH&PIt*GSz#b|$&|AAvG{+bQDmB*T*yYA={CX7*om9s%Q1S1270IOfqK;bE z9%*VKCouKc(r-+H?=$(L9G%xHvXgr4@9wU~OUMlna4ow+Ki(4dKMciJ7 zgTt*$ z^$VK~;*O%~(G9?xNtfO7cfvAlG;GzZ7O%;Tr{{v$-I|SJ_dbe8n0n(Y%B;mAIw?Y+ z&$%%>&}z$jwiMx2|HFF66FGxfwna^*uGYWUMm3@;^mX*Xdp1{qayqo6O=VW;k9O+C zX1!)AN?nZIK!-=#)L#VNe*Nb6>_fOQp-rObFc~F_Z?2*Fj&Pqt>F6;mkpoeh78dLc zW`pE>7M2jzyW70*Pgpc`kjH8x>>QHB@|^voUvnD*VPv%#PBKyIvFpV5;$n7sPIoyOC6KQa5L^~bw5R*1bH_4YJ;5=7@QF~_G{ zty3~ZxW%hx4=~neOrRbKE0aRgwU*2kM5Fy;4Y<~;ww{>5EpVV|(TpU`NQx^oE$Hg& z_!D+CyHm$@7SuH1yY28bj>~**Dqy|S**AePB3ao4VBL8`cNz@w)zo-*)H(J`{Kwye zK7Ndt^}XG0)Iru!((e(%rj*=7Gg~AAcZ4L;UjS}2)3Sc zOr+ft5;!^Kru|x3iu{|0#y&Re&k3V8rp1g|Lq#CbB(*9fp~!Tz zVA8D}+(_7u;V1Nc(4c4ML*-L8XLF}>qUR|(N39SLkgjFNUVG&>Ivhj79lEZr`9?PO_0}3tZI9{?~pQUQw1e zkB~mlpE(%An0X%6P-`YLg?B**Kl2VlKgO;U=I)UjHK3E?ziNhRKf57JLb1;$738@Z zmMh{LUZ4$)q^jh$^4y_Z4q;;8swUmz`z3rwF>1u-uB5>R!~sHhW6Z<+0JP%(*XBv| zlne&o*cWAj!|Gm)g=2~^-~#E`0|rt!zibJ~Eekp(?2dzzG)fMi1um6IP|)PY1LHd* zVX{*KOiRRdhNjQ#CZ8CaJN~kq8+=JqAu3z4rvw?#caH^}q)NeCZKO?oE>JRrSKX*T z=%${omfG#8l9-;67&Ieg;)FCxa+L)+#9^~w&X}#Z+|gcVa%c)C&_NRe9VNr3+BIT& z12)oOyx*ZnseSTBgRFs3;^XQoHJ^JIxHz_qTbmFW4GCK-d_}mQ5(tDTT)*4W`+Ha6tmnE z3QZUGo##hp+>H6(cVc_j1Nbi9M-t<%&K5Ek7BLs1s?LSlo>^KG8D5Vscz9hqY;hp| zh>bj6U}67Xfal-&pZ;kdd+B#-m1M1(rtFzHZ7y&zWc@AF5S&S96w%0-cd70rz62gd z`grwl!VEI+Cmr=#6Q4ER-I$*kic|ZD#?(4P5&0FEFr{jm6ngKzo@3-o!zwin9nyi(^ zUbhm-Sa5cyyqDT~I^KKIUc8n^oxSsih2kIjjjkLYZpShm&Vab>|Mnv@u%V;8@lw^O z$8XF-{Qf@}*DbSpk(#gk{@x0YhU*yMH|A+hyD?r{I?z#W{Y-izszL46DC0C~U zVzcix#Ma05-A)Y>3%&=bT=Wm;WBZ&tRwTa(abJ7e?`Y0RqC7_> zBMFsp;3EufW1FthF^t{rbI!oDeVl}t|3Z;Sx#lB)HYYMeLxGnVb8tat71 ztB=3re)!od9d&T)ULkdV07Z5x1u`!UD*G(nyTUWfFVAR6D!%KTUtSl{&;G2(!+7tV zE^ANO@H9vH2SPR>Ps3F%y;JUSvA@1X>?6L^sMm5+5^V926{)UNGNEdi&5kKfxM>tM z<2!+sJN>I*ZH`88xR*~;2e~om8 zla-5M^xZfq>=~TwB)lGvOGK^*Tq%~WS(qx-r`NgF>%HSqQnWOXP49u&m7hcF?^fmh z4qHB4F=tn=?ndO}Gsi|%Zt1+xS>q`15>QL79Su4L7js$geg$K9pnZ8j{^s!gpgyq& zw{9!)X=Dhf@%?yoqw7NtT~9vRwA%^)U7eR_GLFyicbyeBp7&z-L3o)KF)ibe2_swO zWp5-hQg2c#@gz53&7{5>(LFd|W9N7obv8OfNBy~}+dU1pdyyK#bKz;t2|rJa^ZLSd zechVGlbB;4{{7l6s{6^{0H{M*x9d4A)Zzw!?R`Rl%)pPHU2VWib<@E1drkV6cXv{r zITDtXRO&8X8LN-7*L$iiL-bj1e$YFZ`?*xkz~jEafb`CbW^@UUg>_I#gX619@UC6; zfon@^{blZ}-hHd*FAT9VKAdHkYdMDB8fV7?YAC7?rK@|{c6=0NRhtB7PoH@%()n)} z{FiB&bVvFxru9R{_*He@4-UH_d$FNmo~I>oaKx^2Oo^)*A+EtYSxodkbmIU%UR?5Y zWN~yHNJnuGQ;uBQv=zU}vxnqwOUC4WNPf(I@K5jqvmL^H!R(bRt8--h??wD-Zgbzx zk1c0mBr0K}fWy{S#AEzK;a1u^FDtiab{ZYGhf*L`BXuzfXM zzWfUKHz&l@lH;<}qO_FH-(ICTtXFPg+4FWvXTtb8L;q8h%}zeFks$=N3Ak<)=Dx zo+U;5^C+m*1Q~@)fsAa|uFLA^doqH#_`{mmgE|+9QC!ap3)OV@Ytki5+reYC$QW>1zp_V&;shqj?#);G=0(+)h0KPq#f4}y#?wu8F{AS)mGiPEj5hN|(zQzi@97TS< zRxa1mJAHrUG)<;#)VpNYJ>{ocR0|6Drfcd4J3{Vqd{%35-)i(sk}Yn5(uEvhXj<|tl^r~c~lNk=HU@Wd zGmA{mD1_jKPqqcTlLxNGq~CQ+-x-6O_?CO*s|#x3ivO9bJnw66xgJ?%B01)6_=lb_ zmg@=lSt2IZPp-91MBYx_sEV9~-_*{Uu%P5VmXW8O@tGk!kD4a6GFj{ zosE%=I6?0|=BQ3#D*F)P51DbRRkiT@M{}hkF7B&Q#;WrDgN|#6+J3kiMP1Y(x!Do!|+XxS!`k`!p$kRlWcCLnJf0tm4S7Kvp+q zYjXSz3Do?epm)Z+E`ZAF8Pc8=b-R`~RUt}Zf%;>H(}y6{XzFiut`f*UrW{;a-SanL zNp_<9#vHl!td5vqKUjvrTL++hAAG5$I}Wc;Fj3IlpPky38Bs@GXDl%sFE_cv!cR}l zksv?(p(0?=$?x|w<1`(d#$jIj+5xY0?I{d?56$aI(`~CA&&EG}@%v!+ytZ#5n{>vsSG`mAT1Z{D{{%NJSI&b*G≤# z3>*vM1T7|84kz*B&r}$^ zr@K4iiqETbWh}DSN4B0gIl(?MGZ#Sbu#1=`{FoZ_{o#c{PJVb*@{c*4X+;DBAKa>F zvBm~jh3b9#EHf9!6ZOrt6yf`-xKr93t?}vhMOk((y=Q1GUnY(ZZiQ1rp+%N< z<_$QH_`QpNqGqfb{W>uv^OhsA4!dLuF`xS14PCd246JJz{Gffn(&AfbRgmgE(jM8~Xg0(q zCaKP$(^$#n-x8ghu=VRS-A^#sFbT(+&!HO*jTn9?z26Kg|G5vUek4i_xu1ImQTjHJ|W>-0Q>;XCh^|IE7Ng(y&y1MBw5Y>&A5xH zXH8n4%7MfF4n3zLymW1=^misx9p8L}v0v(=UHI*uw^5Ki)ok*1Wyj zAW>;mIAvC>epOfacUH=J4Z&*NkTIkAoA1bX!Jwo!;VsNgAbS+<&G0b0z<;7++rNd& zT!Hiin$j2qVT4#aY5`sZt(v$R^UzD0osyi)FP$&AbnyYVVeReYnU_u)lg##Osh7h( z-?RCeyX8JBuHD;jL*V!0}YcR^-2V9w0J-^=>zb$Ke?=ZjAQTH;}Eczg?*)2mVl zx>J3&L7sxL$=y)3PAfk2gV9Yl=PzH4u7Fv_BSW z-Z!gAqh>m15c?$V!l%Etr*GLv(B007eOYN)IF?j_ZudPE7x@f(pot0?w?e|dZw#@G ztutIXd>*xO-XZEX`%Bq7Yd1AG=A6BA&cBa+uOje&QTG*YQAb_3fS{B}halbEpp*>V zUDDkh(jeX4Ee+D$9Rkv!bf?vZfexAPpGA=|Q5GQKhvYl^AozL!TA4?RfV%U~3!ktKUvvpxs+WNha?l(1nh8v^rJtvzk{Af{WGj{zVOcdsH;|I#{e%mx+#PrQU0k zjOwp*WO>oJZu?6>qOnM8qix2}5+TS&Id@6PqK=^ueVe!Y6N-Sm7+c;Y^J4lQb zWasF&J@wujnq(g>pFb7YH*<^przN!i*AnjjYYDS?wl3EG^@qq!A91N9J|q{&j7$}p z{JjMEDRA*t?_52qAp>00Pf{1DorBDeD$u0rmS*jyW_Vv^6p;f_!;}t&H;`1xlZ}K6 zcpFjRvpKVQXWfiN9QG{(c{UPSc27eb$6)Njnx=gIrjLzKHX4H(^;d)kPRn~)eUVQH zblVv@5&`mcyfy6nK5mKTHdhw*F75Wd@?_Mllvqfpt|6_aRxA~TztR-rxBSRq#zsK9P|sF$;1>1vSGN-) zF;~H*m5pbs1lqWlw7|J(!FksQ4>?nh6uVSi_T8=*E6vwUl zJ|h#PusQGUjr!5`j#x$KQc4y+){jc*X;_Jz7mPu2qEkj!UCWyvz=17}QH)QKaeRR+ z8aK%z8FuOTksrQ_r9YID39<&DhQ*0)4H_{>(b7hY>P&DK+LRrMUm#@k`KaHygH_kV zH}PsG22Q$(LAO4$9M1;z9M;2u%X(br>x@t$JyJlP=JF#w8**_qC&o; zny5(54_qdeq`et2T_PxbXgW)W4?QoRY8R=VN8%zsjqpx}Iqk2+I$FwTUchtG-D|&- z8Kel(pUY1oqMgr+H^=t)XUWSNDrp%S2WhlZQSOm+<_5S_YtwhP_iCOkx`3Fnu^ga9Fwp#a`gpdHC{#IIy zaFdmZZmi44!{b<W|TXE!U7+cqkt%2EQyyFS~SF?rfr6i1gbx^VIy_7BL+wrwQt2Ig{BfNLzPD?wu=C zJsw%j#im1+RqC|PA7uSwjaPM-In5Q7Q2MW2m;Wo*5`bBe1flZIF}MxK-8SpE1rbKEP%)%kvrFzO3_9@)5uWbY4BL`zsEWsfk1WVa<7I_d<6D zp{Ise=*7Jd!zDS`-obuP1skXP;jJmq4&mwOpu~MgYUh%3)~_EjkNPXY(QCW>^ZU?!=l-rd=u8PC*Xr*lwG=`~5&j4)Leq!7POcU)&yB!BiHgN_V zGyTK$W-XN5Pmh|v!*^Nuf~<8P4X-*L%UQzk_*zi|->HjP1mPbHd@1t0PK}l%O06Ta zd^+DkPgG~=ZM_Zcn*G*V^oc2*SE+dt4>Yt`{X4vmPKVy6Co^{0&aM+CxGoXRep#ny z01ShU8=%Ae+Qi61<{R$?*BY-HF4gbamAVQAy8Q9&grbdi7duKbvS*h<;Bb4PBVJlmrq#)6HsFOmTy0`Eq*a72c{i6fboDgh^$&ep$JGn{}Ul znKbQV9+oXa^To7+tIh;6-?k%YLuMoEQ5FZ2DX`jAT5d#2TwuEn{h-p9?}&)JzfX&H z_}s4buJSp4zt{1)xb!J3B!5~fe?>4BL3#ZT2ih!2Eqg-e?>Dyc=p;%WS>W00W zyNsf-!Q}bYhO5Zsru&+6>VtqK|1AIf56qDp94mlnbocy_Kmv(k&jyT@vH3I#)(K2Z=Sfs9ITLTdTPaSK4mEhnttlSt(3&X2y&P=n^y(l%&PSqwuXN$m)PWcgR zUJ~2#?^jymS0t_949v={k`t!`4uxzOb#hSAmPBv%vU7xEU5V7}f}g zqSdn)&VtpM>@q7IgIu5Pjvw)%979{Q(e1uTnfMz~wZ;R9-&8tF?~! z56Mi|v)nKhi8KWO1D`K3HfVY_Fr|$LBp03x0Y z+u>hWo*^frOq%10$j(hxx{2N*+(=zC8wR?gp;1yMmCm4UruTbH<;7+9>WQZv)S_hW zcg%6GtW&`lbQE)Qnu=ZDx21?3;-s9<%em-uEcj3fW6;G(qF~y{d6;25OCv8JgG$kA zYKC?Z2uLqLJ?CVenb?MXe?PDg-H>^d{$4gJWKp-u%USZys*zJ3*}3K70Kh-VjSH0I zBPsd*U(XW0cfm@1pxYhM=tIHs8Uglza#6f)q^0IQK;zWu&D|q z+X#1oEc1ehFTs`qy9ODJBiA;gS^?&qqZ8QKe7rq27kV*>BmuBm-lEEqZV+#CPwxnb zN8!T)V86xFh9?WW_gMNm;~5al&gFPkc$-*G#BZ)vXuCH$R;2){aO z*Y3sQhp}M7G!giJPLtaY;j44T_-6IjxV)4ZJ!lY%SXQOXxXMvd^O^y5frG3N%pZ*f z6*N;jVc91G9%0|{M~{+4ZDj7tHPU82l>D7m4kd`V=o+=b!)J_Knvx{@TA)%uy_vlW zPqg1qeYPR~c9gaC5}!5~>5$}iL2MeOW0CJjrKDB|ZHF{tN;bnKo9VTtrHpmtj*I3* zD5|7-r<*E*Rdkj{Xl|inq+YHQnFdt~hSk`b>ymfv z6OTq!HZ?~(>K}siONIk5RXj^s1?eKU1a6+w!UJ=LUOPSveWOFqGP68L+peZyYfcV6 zU1sG}jb715)yy4PjBSu5^TV2CgRss7{8kj0pVL38efd3sq*!Tnc>a}U zJSqgGqKKUTtIbWOIV!3wv|>Dl&*}9yJe|5qTZ>lrr?GddZcppPzPZ$4xhDb!4bmkl zlHX68y~E(3d~pI9)~vD*tuyxa*l*lUZGRvGc7B!y>%pP1k0wiuu0!m-V`_($z;8XP zCi2uR*q=(p}E`Z`d2?0_>AT(cU4Izup77;NW{@rto)g?I*2xyn*U8-sc6e9CDR+U{-g(JlZzgxri6;&ICM z_dmBHiW87!hCRW_D#RI-7NYN?>M~-^`yVbERQ65J9AemKO^%;(uP$SH0fX#bYxh+` zYo+I{vyFHVs~dgfKvZGqnEoK{n@wtA)eb}#D{a`ETT%f3`cgOFws^RX6?Ggfj_QUM#<&p+W0OxLTAxku8apVpavKbw8m zR09`k`Qct;Io3`J6FNDYFjb2nT@yY1Ve3hr1ksk=#dE=-=iqDK&w5+TBb`*(#hfgx z{)u`+hU|uns@tA#XjBSV4?dMoy(y_!wD&p&Xb&n6%}k5q&l|IyQpy6xmg{7r)bQ{{ zPz%&DO`&VZa!>2AhJ=+TPra2x`Es2||2Aj%rozW{+l;J@es>q^s%Nm&)_^=O4UdhJ zWsovnFY`{?4;|KnUcwvHWLG|NiTF-yZf`)o;e#I~BHFLPq#F5-Q&)G$}`PD03d%%ER;-LD-jyE zT^H}bVub};b!QCvflSfrz`3^-Tl?X24D#}Oo7 zYxUQoH_oqBq!L!IuEb@cLX_z3@S=8K7lr&4swd(gNJb$4*N!AqTcA)OG%mKye6c>qzqg37fqlZEM5jFRNJ{(MkEgE|q6d+TY!Gn@)iMd1 z)tG8pHVc>JKD?x>jSla>cH=Yh(Z*&o`koff>LhZgc@qP4dGRy$)sot6tdy$X6URG_ z;6aJ2EFWP-eO_&v#^fK{w5w;85tMcjG0^vx-jN=&y5Qk26VPE()FnuN&@FwIuhGK^ zcx7~I@v{65KdPEulOVfQurrsKQ;H?M+?sIdYu)xQpyYuM>;~z6M|(X;le=plKyRQe zcrr9LvfENAHar6azKYu{RqgM1?p^%%e-|o0jd~G8#1p2rQQ1!SucUv7_j+G)(5d{S zu#$1G?D-Bi14H1`^Ka6Q(>=`>&PIWJxpy$K*_~e&CvvAn`7>ZOrFZ^=bi0tsK@SRG_$bD5tm2iW2+v%h z${gO8=!UFB_&(5-F@+3$IfUlllYBV>912GXmZc&cHlMWHpC-$52cOX@FHaT8yR@0G zKuxIq-dA9fGWVLS&ea{9_A7CY=tIXh?VG$w?X0V=g$F6(2jht1?|@u)=Y_`Cbn;`) zv8c7Q(WKn_htr8PzkXx7do+7CNav$7g%z?i*>>r+Z@F$mJ_Djz?zqs|e8i|HRp$qD z3srP|W+%To9hM|$3KrvA&(@pU+4lAuQEt8jy*>&eSxh104o8WFCn1rGA7V>f1o1T1meIGH)>nqs zuQ-KVc@mY0$EFGssW1};b}5$BqA`l#O((Wd?%M)@yzdI(e|LNl&{mQ2`>-A_17rQ3 zp7lGsz^7W?>KtlL9+v^jO57m(Kwdy`f*8&jW!|s<;^jgZjGs4A{#&g=no&w|aP*)A ze57dOQ^nPCcSDaD_^`WNSe1wvsxDN@ErcFxORWF3s{B_(VH{v_r}uA|gkChOG&XBF zB?S4q|GcaikpC79ve~5)2?*rt*+VHK?CYHP85p9C^Lw1>OV}Btg~23fAxdQEEXew} z1DmJ$p4D2^Z<$JBfSwi$^t7Qs#b)t=ZM#7NjAbQ%?VWGn4VqvWA>uzV^3CHM@}?b~ z_V;6l9H~RN*Q+1L@jF?$sY<7kUR-C_=U2Qa37~%iKCSX{RW)o2G=}YQjczu(-*Mf4 zn%n}SQP#0ji<(cq=_#>o)bH-r5|!9**Ta^;mgnTY;(-ZJa3N#vl^eeN=aZJLI>N{! z3!c|<>2f;zouCoOXWz)2YR6|&B?*9FK2P&xgNGS*!oAzhj{t{+vo@J3nBKkB5Yajc z^i7HA{D2Mk(dbJvt!10 zi55E9*n|Xn&G`WR3w4@%f%a`0oRaLUTl+?<;LPpXcVwf!UDw{ualiuFmPR(9tBz%N zD|Ac>$Sdke+VNjv^QKonSlh6>2#Y5GLjzDBG(+BfdetyFo+kHJT}%hCj&!EI7&dsM zXO{jq9Tq&6H?67#fX-P+ERHI`Yw@r+1^qpxA=T&v5PkroGQ4q--PY-!U(!;YG_E-o zu&C3feJ^S5`K%Ny$yPUS`L`a$IrYtg^fHFc2y!-Aa<5FZ^h#!{y@tVLk9K>8|32nt z+L*RyE1E6>ZkS_AL$OU)kD}&PLc8z##xMwQ#%Fi?wOW*GAYm6~1yx^jD#^LdGe`H% z4wFi4UeKZ=)jGS+AO4Bk_ABk}9V}*_@{IycT zc+vQ~U1qgi;#Nzi45FDNUqx9@%4Sg*u1h$G=d}9XrWoJS5jFz|EWr=GKMb=Z`!E1I zZe*@>6*~l$APeG=^sxv_-SP>+SDC0*Dyk)uyv0L=ySAoP{PV!ScYIiHdW~SrWu4q@ zoZO>SvjO%Q2%<+GxzrSUK0X+y4*DeDWUc!=XZmsRm01v8Rn+52Yc{a101eHjX5;8F z<(Db1X$-Q;yCt3}&nf(KphWq|d+Uwyw4gyq5Ib3vZ;VG$B`Q_Aws8ixZAQQF?5Ezh zJo)jhpErrNngiN}z?Trx3!+Tj6x-4OmvSc@J%Obf(Yj0#|Hni> zO|GFzi$*z7YOn9Olgi8EiYb_XpOvThkJ+X!+OM!t{<9vRCM*t~0Zm9aLJTnj6v6K> zmw~ZE3@UXkOrb_;Ct#;(S!50Q$7+Cy1*v*C?X2bLV-vVGiA^pv^li<8N=(4*oO7yX zZwl`g00K&cw|gw(E#NqZk2N6#xu^Hl4Vy0j5Lu62A8`nz9o0KEtgp;o|Z1;hm)JHC{;9(Wb4vjfG1J))ii zsK-sCKn(@v0g#es<1{k1pvQ8G?AzK+nmSA^FqpnrzRz8yasSQpe3Rc`!DZ&W4!2! z>0Lwi)V$OEg|kl}eyf`pLLp8T;0vc|&_RDrA;rbe_tM>VKGX)**Q3gU| zsFSG@6DYQJ#Z1R|iO9`rhW`Bf&zRA!&C?pE65GT>hOkl5Rcj3HIqzqy8xB8GBgj5J zMXQU63W7=PpGbq^tdj3i?Hu(%*ahm#o~sol zbSgER#6EOV2X$~QJa*bVrx_K`(rmw&UwPENXYQ>rIhiRLqf@)Lee-kT(jHkKv6@!_ z3!^9^O6fq66x{%8VINKqc_giU4xzDss~Yg11IzL8gyBPc&M&Df0V2aQ59f`5bGSJ4 z2q)y+u7BG5o4tm4HuB_iL*J?UkDh}{F&)c!aD9M_U##Ks89`sdgv#o|y3{(TG-q)E zo=M$G-Hr*4RDYvA;lYAD>s{5Ngm%r2m-RGHAbN|X$pO}*$Q&K;UhS*}hq1f%ulayd zIOe1RXd*FelpKvh=iI*+No?9X&vifZLEwNwz0E(fWv~JY^^cNWqup&rM}htcK(c~` z%9&s3>L7y%ECZbLN&<5$`p0kO>`#i4sOd1mIrr3z{!Y2F*;ua)zWT89{8#7(NLRch zME^qIL3HRCfSR4_nH_D{)X(wNdDBZo9q=zkS`lPel30*b$ujXs@nUh>UtyBzch@l= z;h8YZSX}_6#?K5v?o}ogiIUC6kB|K!C8s*b)csa$mCBpcwcuqAI3KcdBJPF?iN;-i z^!EZU#N=Ep6%P}gaE;nIXQx7&ht7bTKpbUj(d$k#{%H=5&x+*#s{Gd8)CdNmEEemi(Vp2a=BGo2%A%Q$%E7H+52knEf>kX43Xnf zv*ZIb&h}w@2_d)hV=EecvhHP+<1+ys4pW97>LOjz9?&4H-g+0~GR1F{FWVMk6HKbLJQ_qskjKU%**Y?NU-q%uF46Yrf&VgnvB z5y*Nvg-2G)&Fo$p@4WN&j(jH04RG_^Y*~NFZUC+pAe1@s{g(Txg2CQQS^ucl9pyUf zse0s2>qiRcV}NggrbNP;pG2)jk!&;aCAsuY`{?Yn|HWO%a?|yp%8<8hMUy09d+*X1 z0gmFV8O=Z?+xgbvrF`HFGY}x&)SN?i-aVO z#D)FdPd2ukPBJ<3Lk)emI0&fw2Wv9Y4sqaX7IA2nFT@=AXLhDm3j1ev=6oo1z5vA1 zDf~lWP@WN*+hl6<NV$d zK4ymTW)n1&fUL9{`SK7DAJ0B-bNo>JeCulsyxBN?MjdL;dp*z~^F=l49<-`(&4rh&tW_s`O`e4$^gdE|i#VpsJ0zXyZGkG6$4UlpbkV zHUihjs`~))2+I81!G*Ek$iK+~xN%J_=N3S<9-zs-s)P;ygYWPBvU;UU{$tap`o%W; z%<4Z`2uhD-HoD$x!O71gl}k!TBUQ@})#3HBX=`QXaj6MfD~IFg4>--)4LpX5LcA7J zi#>qX3PWYX3R&|Mkqm1+ih_WhaZ_ay5CA`>m~b3$b`=>$C&*+s=Z`JFioOS~2HF^q zνADey|ob88LH7sFq8QJ7Rq=)%W;h*SRxk#uw2uHst*p4@W6rfDt?X_h)^;-ToF zg^cP_YDUw0TI~LbO~&cb?=7H#-mH)SC6byhV)RRqt`2T|drtBww5tsz-<(hDlRjvU zoh{~Hny>voW_-&D0i5ypF`<31XLHNti{UI!^*FCe`p2;EkrhMtXAQt5c{8Kun(FI2 zG9EbwupRI~|9+p=`@{`O3ecS!cFU5_P3t7-gxA#`PCO?a76|P^U3((&=!%fuOdwld zsxw|Kw|!JMeb>Z{tQ4Mr&#J1tua>{mNK4YzvF7>^h%Z!6E&~t~C@UTpoa1UNkg)H; zhp*!oS&!2)>@=;QgJ*XE&mPFz4g7~ddr4POz)CW14|vhFAb5Ga0S3 z(0v&2G={h>oFaFjch?-ocpVZ|JpBrP__}T7+Uj&tb_|?8MV~4O%=u}sLbwVl-5U*F zU^r?s5Hd*L5AwW^{0DF-vBX`r%2ty*RX9c}3RbaE%y@hncHax4DSmU6JEkm9_jWAq zSY-7@MYp>H*-4Xij$V$@?NCD9Wq6l+THC9nav=P{l|V<|Y7Q)QAn=>;qX!w6@wZM| zv$RmjWR;*jGz}TL={dfAvq@fRP*Zmg#$aaxg%i0bG#bC-u5dHgm&&!$>Sg6|6rLo_ zI4v^MwB<+9SLiZ{@;`~SBw|I`i+R7WM{15^Zlu3n?N?YZ9rrTRt0i}I+AY@fZG6l) z@L74hs{hvNanAX?e$JKc_21ZxoqvpC`ZA`q-x*JUCdT$FW?3FL{YiC*Zd|cu5lH0t zZEBW4S)*S{s<-+6_rtHG(UUe#*T&hI`1~;Nu=`g3OX}(RGjQ${bZA<~^G#>2fD3gH zUTUc`9*LLC@?xQdlEUtnFE7mUDSl2(VY<&bzp6J+WNXWt-MyV(Cc8l%eII(ze*Oxd zZKv~PX-OWLZ=Lh^`!8Rlmp^IDeBaQXbj-vh3&2fcX&KFC_jdHiXYmXeSs$RSOJ`H7 zl+)zzy^f?e;3HY>??_|QR?7o|-}W*ms;A}v*d;pnBR78btQzsi^N1%0_rPJb9E3bebgyRomx^fV zjtLLLB@Vn5xGFM_<%Y8QaIDbRLlXrZX_Y*TU%XI|^+ON-0L_qVtm%v-{12t!t{med zQzrd6&clm~js@y}qvE&zF>nXgjLwklgqL29=+Z3!PY||;JQI5D?=O6Ijf<=~(b8Yi zzofRXCka4RO~4rK?Z_W>uG~}0mOOOMz3A@<$p}#gHQarmyh#``N|27my`#9Tt1YC0N+WAUIjFrEZXnaCz*$BR(7(yR|B{~Ljj6` z^0XL|90n>ilq9kx;YxdP%GDcz5ys|0HXK)vH-80gf*#>XPzEHTB^Tmbnmb0>DHAo*niy? zu&JS21pQ^YTmsJbg}8cRhGZzHRVlD&)1$%%7Z~$JD4)#cN&HA<0L*1GK2M=qQUWE) zZgtQchEYJ_gC=UcSFvl6C&@#*4-**ato@1|M>pVdW=x;~EWA7|j>PslQ^(Vb2;J^b zO#RNAMX&PdrIX&Lb^mo^7t+(<7|2Rhbq(0PHFj zn+cOFh}M0-!lGfSCe)KS9?JYVg~n&}qK&HtS=pZ$ko7_H2`rCN>7uM#FHO%xh_R2q z-#qKEj98$_{qubP=eE|*30ma4_VEBM!8Te-Q9`UDzmEd0-Blq+fJ|0@{l+TKU)8_+ zs+9p3Xn4tg`PBs~@d^HO1la$)KvE18-eEN|FtRVmw13jr(k=7y@g9Ho-`5BH3;H_! z_5Z#b_~-QT|6UywRGaYs^GZ6T|5LmF^IRr0@YR0*d-F|(|DUP+cfU&t7FfIf_x5kk z)_;EnZ}g4$|K4#8{r}``0viAPccTt)H!uPlTe-Cvf)7KYxc8+$7b(ZVsr`@9Ghj?X zy5IYM#%Fxomj7b|+eVA27L zDz5VnIIi56BEqO76jfojP!y69#6+KTQCpCb(E6trW^JB1gmb++oKqH7w~Y)PqbKSF zM*;}_DU)?x7cF*(zNicBUYo~9_}RO-|0azj!0b(^?$@baLGn!pvRi_;L`oM;@A<_> zcYijA_%mEXb@S@+V(M(>XC~OC`X!TgyE1K!NTxz=IjA_$A?zdUZY7MJ#B!ACg#rKe z2@-a_HY?@&%0^@KPZZs~IDZOU_qfmA zU)to5&gPH?S&co`LPn1t!3f;Efr0c{}&)3@rLl9g>H3p80&5suQ zADo_?nVjeUu~_xXc{xQG&%60ZI_lxF9n{4_zN7YfpNzBcNie(kRmayw8WE85iJZp= z_E^ViaS*lt^l8My$7?eyiX@2&8lQy!cfBLK!l%s&o+*y}ru6-BS6LI*RQP&?Zm%N7 zyGdOMII{7Fb_1rr!Shr4JFUS9UBjJPyx|ie4q%2BlP^I{pyLk1p+~0R@%)}HJ0N<_ zO7TcfW8E`EDDXFr1?|lS)Tu$9h6MrDar4l3s9clNp@8VKIN=^vI4+e4gtbq9NDZxw z_8P)j*8L;{bCj$9^a|4VvdV^8ouPl=pOYbB9GFFbHAc+R`PD*`J1cd4uOZ;i+!wlr zglPOoKtrWi^(XJOUSSGUq^>jSsE+h6tc0H$I3m~gAMGJS-bY9`-8%(X@UMNYCKW~! ztTo)uULisEtj0+cOmn~xay#5V&PG*q*kMlya6itg6`?_F370az%e~!&XH7Mt)aXKY zuVf|+L#g_OIm9io0AY5CJCoo0{_fS*=NMM$5P(V&hfS^AKWm1r!SNVUz&5jbXC>g% zx$|pdo-Q)B4fLZULqf8;_7O7LPFlf0Vs!6Xz!AKS&Rch6_l(N8ffNx+;c*c3{m7gl zA478#UL?H}VU_-=&xg60Z-zhABEBHWDjU}j9}$>y+)6gEYsc;>yXD2CN=~%NG#0Et z`I${N&RM?B-{^?^h#U_;P(3M`erH$QZQnrpQIV=o+LzL%HAIyOUk{eTL3$|f)8j1z z4qp_~?;mbwkUs4$UFHFaI6m*1(ZO3Y?*`!}t^Ubh#uZ~hxEoPOw&zvkbYIFhf|Vi+ zn(b22Z7*X!@V8A0Io-p$u|BiH;qXuT;lPh78j|GBu}#AhgP80l=D^*ufO|y$C?JMN zv{fi5(JRcJ46asdF%0Da?SHWMpm_Vs;wy3R<`(C8T#e36U1FE#V|6 ze=QV4xTh4}g7|Zt+dd1g(`e|by(5N*AL&P3Cef;n_w>f}xeUUmmzaDKU?|SU#C5__ z-cE@JppC9{;g}40FE8<%`0h%RV*8vP|LwfrGqVj^Gd;4yLD*_{aolLGu`FVEh7WHlt(`^+?L;BY? zMOY7;euZjR=vwgVoNFeJ@)5jF4Mrya&@r|2h>spxw{#U|_^Q*!*X0lsG>B_Cex);* zX2Fh#WTs6~`Wc5;Cg|@n>L)1)5I)9bDzuT}4fOVef54?13Q*%yx3dVz4}zJ#>B-m9 zug=gdT%#sIOW`U@Mp?E)|DYpWJvK32X9Ik^$QhZ zRGEEvMaDP~iF00?|0sJJs?HKpuy;Cew zB}qdaH~wJpDM@c{5_NK;K-AA_Z&AQnI4}9|@&d`I$5>LfkN!E+#LB2@LGT?cGmTms zgXc+pj2@LL!s5x)^L;M3QXFv*Z zFJb#3(oeh~F||qJPA?q284LGEN@$ z|K-aNf4&$u{sSd*eS1dv&X5!p0>)66WD{qoSt0sX>SwG2oQQ+qw+MTAqu}VhONCd^ z7R0_oO@3ObIpPDM-?-4#9n|9r?q78~atbrIu~)l(Rldb}i{U7e&YlwEzv}qV$t-pK zw`tOjZw-zZE&Sy$dPQip{O;vP=iOki+8g2knC`U@ecmrP9DGN~A^of+m+RJW`WA^W zBwJ2yS)Y6<>Nq}o1^51jZ;pkLCC~nW!TWs!ZnMyD*J?%&m8HzCUb2qgK-CeyW|gRA zO)rC7Yz)IXbgeL?WCdz|JrBA2)G7>v2@~@tY_o%OPikc-S2!EU)ivF{ar8`@v?~PP zN?fDEnbieGxv3XKnK|Q9NsD8NrK-VkAxX+jTHWC|40~|DlA*y^Rer=J8l6Q5z$bI1 z*=V*#Gs?iUrqaL{)a)fe>3+Tcu)CEK(;JEAumRh~jm9O|7wcfs`rWrc-6SjrSHDWJxaT*E?qMR*g^?lf_RcUmEh)(q z+agGlH(qMwQq02T>1L;_I$>bdR46U`WM&ko*Yt<4I)f~jOoCORbY>86asD8y3171; z<#LPn4%jNG-5#wU0RQ0KQOK6#a84+v+Z4niyI5<86wYQpQ3s@Jjj;XnL|E{ez` zf6r$WoPDN{5>m^)3AG>yZ~@3fk(*Y&S^Q(M7qEY4{9y0~Ua~sbl2MZMaOouQAT#PA zS@6|5_v?##=!{Gv!}=AkfFn24#B zPL(Lcp=|TvglaFULgHns@pueK=bJi7>mz-a4ujGg!)#r_B6s^l>)~Qc!#Q2%-Xlg@LT&K(0kT zz|4Y!;RwpGB(kS&HM>-FFa+l?M8uEvqkt~|#n&0X2&$iJ9I}sD_!#!EScnQLeg0fs zc)2SWr7m_eyl4CjOoEp`FMJDV+HP0ZojTp@zItl?dcub;2r zgcTSa{PoS&t_gqr&-FN1*;zSV|2#5T8{ALRf2!ev3$&S>2-ZbM^q3y&)B zcXJ6R6NUk5cvCe*u-Px0t2KIrzFRR*uyj!FEiCg}-5Q>-T0JD9B-RD#0LDccA^1&w zKP*Z8KnL%avRc6)`=uw(aTNq5(c$Vz9;lb$+lAvjDMeTamOm5FaBF3AP) zAeXM7aQC)~%e$sZ*ZE@f?+SZzU8tR#oTCLapep=qNM7LFEhnjE;tlkQtD2!q2y)H#0{U-5YJ+kVR}OQR#} z>*)%995FqD`w6+CMM;58No!H-;K}vjz+q~86FA$|;J7oqwJE$kyy;K+pVXE8tfQ>s zq#4!(;jr9SAH@BLKcfbawGCc0c$%yb!SR11hVpwIRVGJ`>3LP-D?Q_(a`&)vax=Qi zB>kC1#)G){9R~Nem>g&{LudUL;tzi0zJa-l8a*=8=Jc^WF*a#&p8DXs*8j%~;84MI zH@oI0U%ily+KH@Zu1Z4T8dlQauy|;B%G(bGr@SsjM35x@W;FYhIWZ|vvsB$YJXH4q z0m;~#E0R&8mNfGU<+Zx+LKCtW1vgDE0zwM)bB-m)@t5#;_kkq;2}k+U29&-_dM3{nCC|(JG?s{zL$?DliR-^Q?!_Z!v#QmtR ze(N&Ttjrs|kv2zV;e}AIo1y6;p-w%0qnNs38c(kJeotj4s%ZgjkaemZU7Sm%$$WS; z<{0N%GA71}p{!1{IwZvb$>)zqKJ$+%oo|Nn=}D6TWswpKDXp-6V1Gtz458^ z#PI9|W`^hl$vL{S;i}*qo%HVxZnVjH4&1DPUfqdrFz4!^75-w>>7$LnLw$C~of?wg zM@}47YF?t#6aU6hwiHRj)k9r2xsCno8$PU;jGIuAL&uNWLVuuNVst=7kY4 zw2$fBbuP8KyPSM8E;)xdDJs9{0**nDU`-q!7s0vAZmCA{N=5L*J;WrOXA%#!tb6^Qfv}}!~_p}Yw;N-(tZ6DNw(U4(SqvnYiZ9dxSY4} z>d8>=52a_rC846PR3Qt#)Q!RuDG?-;R_vPQ`WEFgBjOhWEXCId7N!Iu92MPTGmAQ0 z@eqPQ2TSEqt=*kies`?-|3aU{#+uygOSFFqhf&o)XuOmeuhsXUj~RWcy?l zCplt~7kAX2R+i7mMS@0H5O|vF!W-NvC)U9Ewh*JLTs_~7G)~4mdpy~nc8a7RBAKVL zlIl57Cn?FfDZksp$x2Y)3!@W2{{#%;mcelLOLUN5+7!CN@c@+AX={e`G8-?=&g;{c z+rQxakhs_vt*>1aW0IUU)+aOVuaci5@ipNmChy_)e?TFZWWNgP+DTUIeS9_PLdl@!zZ8)gXi3b`0L%4tFwMQry>nek?Z!1hWI@;DHk7jwy6 z7*FH$ZNDJFv{z8UCbZZqmynJ%5!)r$-@8p<;zyUa!oEzGy@svmf?4UGKGiS91UeRo zU|r=H9LI!PI+fjR;3vh-(fuq}U?v~X{Y2T zbMUcf*eYjXQQWcqWg9q|p@q6OUp8!!2|R+--YK`j#PDC$5q0_sM-p8m2qGM(X5Icy ztje#Yj2-WNET89HRpmb?GQ!q-$L7F;bzV}9zGK{vYo{E?7_?R_%)LKv+6A%95)a@; z?z}Jyv7|=Pmxn{79L3bOFtM~&=4@PRMtYq3I1lYhHear4vuowz%Sk@GE3^W0WFwq`0!~^Lg|iI+n^9h zMoQT0ATz~@Az(9R8GM`Zhd#&t;Dsu3-M76osl%IpEbz6>+*1*bm|kC&oF~Tcr)PL% zRLLv97Be38zWN#1_nrVeJO7HJjzl8*6MR&D&BvCdUkMuGzrPmg!RbRoBaomOpzYfu zVUwVv%6|4`NgP&m1Nlop=}aIBQm-q|gJGxDg9t;OGxf1>cvv|lTW9^88VL#|gpM4V zcTOc_vMzTiw6q<8p1231fJd*SmrKVmUY!suQv6oRLd>&b^-XuIbY$YvI9xTErf)v0r8H~H(ps;Z!6M0be2A?SO&P_W zPN$)}f)iFh!YN?$F4MOO_Fp{Gie0VYt%``z4TQ#eP#G=C@eLDNm@0vl3x*1aGyxf{ zAaHC`t#by>5*JRnTuuG~_SWp8e_JR9mO0kwy_6d+EeLImsnK8UO68h%1YyY#3(JJM zsU<-iO*ycL2%n)?Gbxsl6MM#vEgPItNxU&lqDSl?5R73@ zXX#)lSg|Pz6PNfvM%Gu}fdaGepDS}^#LX1H#j|F(8y$*3`H>V8WPzNjl{K~lk5O`k zFx8KGN$U%xOgK*DJpFMzr%yj%J%)TW!t7mO^B_W$(chybo56S;T*c#}IZ_~@e-v#>@Wg?#JMKy3Yi&rCq11-AYbChXydSwwV~{IHf~pJxGmt42F+U-mzL-I zL&JiT)R}t31mWP=F_rb#S_{s1eJ!1;9x}FC=8dp^;L3mzRt*-0L3P(*Oa#Xfb|HQ< zKpp#O2MPLDiR0O{p`cRlwZ~xAbH0UDhxi8A!9ctM$)Ob>y`()b!Z5H10W8E#^R$vZ z!ebT4DKvgmyJc800iiJTXAW$v2~ka+9|QboGi^8Cwf?k*TRE>yEW8s=Ocd%5BW_&*%h}iOG*d+0D5* z8l}G&`Gu$%F(oir0(-er+jc!ycDt`+t zshWU$yfCL=f=4vB_zF1!1_wA^!+fCvW-I^9l^R9#E?#+Yl}{KJV#s&O05C%Do20NJ zY1`zoHW!@KsSARsI2o+>7S!b?{lKnmV#28*uN+?)q#$lg%kU_8b1`q1Q&q?=wl%lM zW|eTNg~}^YSgnjLX6op~D94?B#tB!=Njjt!?*hf!ICwY1!MCTfRT#rbUmfY!tuXz3a24vPt9jyXzjPtERgAU_ryR#!}Iv;dn(3bn(^1q)5Gp07dL zwY z1AOYPP8&(G%%0k#DEqpfBqL!xAC}69yWI1464j7Z)Yv+R&S3)tj6ibOHzMU!-!CbA zBcP#RMdU$!u>s(2gvP`3J_b#6NPG7cZImEA3HL>@8f;E2H*0zx>qpx3qABeANO(GH zmJ*tPJk%IiZJV8UtyyUlVOaSq;UkS?KZ{^sP_e_x8*808Gd`b3RxRND_Tf>p&2Am6 zB@Xi=ev2IG`8?Y=M0B-00FB!>K=yLm8r=qg2d8h&%%Sx+yzrkSXM3_Tv*&WTeEdMv z@uF(uQb>4bTSEW$@K{L&3#uw-bIq@f6yl4L785wl&z*-#&`;#}3OPRq0S0m6HE=s~qdTzjYOD&!RI<;r2nIN%u`eOzssYJ5By08(YDSx7*F=JxVqn{og zO{RT4MNk`J7Do027zvBQ5eDVq+`v~0`SjigBG8eN#?mMGig-+eG}a$;QVLxtXBu!3 zi*XJ7C_p4@TIM<26;Z}f%t8|=)|=x+d8VV$7IiLPTb4%tM4Dyb%lx)AVWY0V^q zrdd_h4zLB8^E>e2_X~&ElP;N+X=|m?V8h`9IWXn*(5$ihKLJpFi z!k|oY#IDlCwr$eVb?MnkMci5HGLY!O!l#s3r{vTeSYEAwcj!pYTC9z5CkZc0XH_1# z$kS%-g~rnw-iWw)BrR8- zdVp1Zs zG6hxA?bh2hP)RV*xXSB7f=9AlG$Oi16i4jZebd_}pH|s#G=oS)bwrM&_r|~>Ha(C@ z5lWNrL`G(`rNdnfeC#U%($bjScg@+e2}n^RQw2Gk#EPi|jjKb-ZCgv)^kkUw>!1;n zvXd6=KB2q`NX>V2qYW-{;GXJ5W&cRG83vDpN=b~3hDt2?Imf`4MHMydbB3=GV(*Fl zSp6&4VccNhGfROS_~)E-MxP;x89S|{F<8CN`hO1}Np4!^Vg6G1Q{=x5wWtJ5N=q_T zcbqczh_ZSyFJTodq8WEik1tF+En?|y=)*t{sxPxr%csxoW>X^ZHbGl7){0a2!p@&3 zh=gFJ6h9Iu*?bSX+g*vn{sDilOfVCp_6-{AHRhHY5k{x@XA_4o{< z72-ySw5hah5x<-l28^8+I{&o+LUkm{^C^aVYS65=U^D*oJ#dE-LuZbHsCADIToXrT zIj5m6CaGj(p!r$J_-VTk{!=_fts8B8K`oHi%Rf0W_t_~=xx>|~O%z%ClJ6lWF>*~8 z>`)xW5WUbqi6~=E(Pv!Lu$p5F7#%AV7fN z79hC0gy8P(?(Xgo+}#2M5AG5)xI96CkhIQUdf;`+IXJD1vcx-J3s?jDnHf=osR11F<1 zM;xXC-{~R|3Y_dksnS2GQC3&HXa&m{V2fApey}NqM3N9K9bvA2QcDneLlo~-)NI9_ zB|`D?klekniAgD|vunY=S%j-3IZ&Mr`T1U2L%8ku-${8_Xy!|J1{wWaZ?ifwX(yVJ zkVMc!JBvhwSP0p2(r{8pOyLTB#>hdl{Y)PSKOe7O_&vf$LIp*MU_aNjGc*ur=1;sO zTKlZr{w3Ut2r)Hb@JulYiglYb{Gp+QFY+$s08}~vw%msr)3aXh)3Z)sGZ4=a#iabxIhHd#NAIL5K{H<8#Tdeu>Bh07Y9_9+2^xPwz;2`~aHYyuhrt7@MKQ2DvvaUS>z)LW zA#Wjl1B6zUP|17|M`bKI{okn@Pq=?cr=WsFQgztevtwdu%c@+--k~0bMjy?uUgS@) z5nO?PWm3z2$6GR5eJ}#K%%%zhVVDJZ;%hCn3`LH4p|osBk}zwXm!JUe%N7#VV!ut} zj93Z$06ACDUs=*dI$=|%6@mT2HQj5Te4{El2KG=#m7>wH6+hgRuB~<-Lm10#%a5S@*?qY8_2yf2^^UW%=PRG=9nn8Q2@2$!6CGN2}Gom4TeWEJ(8MF97xor z08A1*BGn5%B{jQN_JX$+`ql;SVkLz6Gp9t!Vx}*VC``tb21Ar@k)WNzV*WodqOG=@;n|uvgwi&!h=Z zke2k1t{gRZ$qs4oBx%0S&d#Qps6UFb9RTx$Aq76=5U5Vq&2 zhY-4~6>jq5#Lr-+*o)O7!N34P3*31U`~eK-KPFpTX+YL9D%OUrlu523ykbY)JlUF^ zPAJayX!gxA=BqjTtE2>-Eb!%xZnPUNB$ezU zocaEZCww2h?ZBw7$Fmbq)0il&ykGC5>MN9Z*o2GgWl~bUH1jp)ehOu8<*sZR3<%wF z=g#ng%qSZYhF_ija$*&kf>So%cH>6AE|1;-7tw?mCjzU>mtvwYT+m%7dJ&b^X;xy) z^$03butTS-Vs2UWBUKKn5iF~=ZOQhT`TUD(4c)FE;Shje8j{>=WU{V-Z6bH;sg>`0+@hyUAIUHT@(sJvs zfAIHn#m=pT%zRixWynPrV=^_h?1fdl)4J61< zSAcG$W=!qmgH0>fqDL0VpY-@lX!}kbF!{>d2ItQJIRyi3C9v&PO<%hY#=x*>df1J; zq`7y-Ff#*Yvt`)5l3(3O^VJmrGW6N8(=Mj0&WZQ4$&>q+dyNo{exZ{w_EoC@yjI^vYS6TJ_?+? z$X$YsUz+)dEBYHQO1P1=y5ak6p(anbdDBul%XMLK6TaHA z(Nzpm)UPo!x@LDpbN1jy_zI*68m%-?Cri%wY}VIFm*U?bfFt(d7l{ujYj$kUiS|X` zY#`G|FXKn1jWNCFS)x4-8rNgq_(`9GQJ6n+=HvuP+t+B~`dcIQA4~m5MMK8kW1*kC zZ=*PvdC*X{>w_Z1NTV+#X6vr?-*V|+tMvjyV0oNmnM6|Kfd-PaamG^PE(wu3Kg0ap zHt)Scn3PwD5Sx#8TjY!)90|AJ`Z0Tve`S3kf!PcRgGcg6lrk*aQJbQBll(3$3mxMn zcuRuYELqaxRW_wO9ZWm0GzjpCav}Y7C*TA_0ZJLDg`C>lI-jyf{C*-K)yJ50RV9(3 zs2>Tmw0SVudCfH4V&RkU5{YE=IP zPWJQ|*a|zo4`;&$BK=UkPeMYtSqD<1QuZ^k&~g{%U$-bGJfp;JvjbGvv&jqTUYuct z_?AT(JOt|YBL{y0OVI%KFgK-Y4YQfdaoydug1H*RSeK{024>-FkVA7@7~%@D54k2z;TJVifY!< zk!TRoC?E_Sk>|miW#+s+abBUI`8mAUE2aJ83;VUb$6s?-em0GL5>P32>lc7T2Vj~^ zs&~0#K-4^`V(Z6ggl`~_VaY#Rx9UF#8cGCUAb9_bU3+FAg84HEVAWh|BS)SYWIXj8PU>1&Ad8u+fz zhhIjE#;YRbi+0veG+@IY(CNu9>AD%puBiD=)Cdfa0s@!GX~qq?NR1;T2?pjMO9;>S zcmkjB@H+hRBdGLI%5oeHN)FJ_5kBsyBd7l4)O=W-pd^e`1AnY|7c_%8`~6$KVXtKU zJV%pP6AP@|UnOOUV_Zp$@l$7(tjaL)!wL)pt2`2Yls}&`7+9|nbyWs!e#OE4Y8Xr= z_y7QQ+ui$Cy=GS%dh%j+Dt)w>VnEyHJlpBNVqwqk>_q^U7B@?VT1n4`@ni5UvReEC zl^9(XCtPR5V^Yj^ZK#T74HwYk6E(y<2-^GJ$bJP^Da&g z)&>1$e%C%Y8l_piyS7nA? zw86rwJX&d-V*NuP@c;W#S-Q=oVkh>q!jLD9vok})3{RD#hD+m{l(tIi2VqBvbcv>k ziPOy0u+PyFb}>NwNH_zMQ~+S>=&(&4UuUe)AFJ+ser$kiU*A9g<)24bdhyOnFfn`Z zGlzhJ;itmBT{aMhs;t(>OzbY5y0!J%%$X73;Z-7Qe7l+lXRD;+(ojThFy4F)i(t$;N%{+U%!$L)KMNSn5g(U!Xs4{a17NbRrp z12OyejNABy!P(H06|>Xoq9|r$ABKdFF!whu!v0{6725Rz(v^nqlhmThYGEQ$V!7mm zf=K`B2>s__ znS~oOl*R~W2p^CYYD)KEmME!%ez>=0DCvk5%9kB=Qnqj_@&+NLnv9H}a6Csp$l~GQ z9pH%s1pe3fGb;MW$y>!XnmGlhX}VVAS<9A7K}Xq6EM*T#*Oa|8L4>br3`{3+8X>)o zAW1#`*`ag!yB#pok3Zi>j8xHNj5x%g_mTGYY?#*Sg#;Xas-Yh}E@J%8YW|0|O@FAdILpQ9EydLqTIeBz%DCm8~vz_$tS41L70SBz^25J=Q?rA zT>F>DAf{+k+p=nBp6>c|s3lNd9!mb9w6Au02$mErthc5|2rMGt-?QsdcM>-F(1Yjg z`>INSDmy#r3zWDd#?RG%D;9w3V=Mh|^$T@45Lg}wTdWiI=m$we0EtE_3`B4R>)~K7 zE17p{(y#0U90{oX${O5Pg2ZHt!D|S!z@=zF%7b(x6 zN73^P3`irkfW~S&?%nT7f%x}=!2K-?_J&9GJuGNqiXfW@pf0KbUKqvjts&~O$Q0T; z(7%H)5)de}kVz7=zv4ou;8P|^g!;ys*fe&`fw}<+n^-;lwbe>HO{USOy^k$46p~Ja z&nRQ0l8FXO>VK^8`Pyh|dNd@0JlM&wgDCWib$1@$U!~|Xwj=x>OZ(5mN>~D+3a0;i zcxe8=di3AgWF0~Z_kVw~*DRPu4PcdacTG7p_ia1zoHR9_O%$&9 zPhB4#IwK29$V;2*MaIzLl9NX4GwRy6xs96Dd#y|WZ(?;m_q#9E&&3jBren=1`~|!; zDxA1;%&9%;2kCdz@zNU6ayM7}K1e0-dF-Fp0}60 z*DshcDJj_1s+@W`k(K{#!~VyVDSNsmr{HdmHK*CHw4sv1fw1@XU!gGjZv%C3$`ic6M@9l8J>BFLxMu7;!&DLzI+O1Yb6Cu;a2wkY4)NUpKD{5*&g0 z>QT=_f>1rJC~<%5s)vVq)|}POsKN>U^sJ=YqWiTikGmrI?lWS%1(LbAERWZ|`!!nH zv0@sj2`0aoHg)3u ze4fptrIOxlC8`mx?Y>;2)18yK#qDUK|J~~-$X4sjjZL@xBU)&Zc+;Z#Iiz#-RBJnQ zNrV3KwsrI+xG((fyu50y-}zdd30v&(ZsvC3?m3ZSl3!0J-T&l5ue8ta4pQ>0+nHH% zIP}>K5?S=jsh(Oj14{pggMw z3K%-&ar3aBFS&x(Zdy5QaVh2|jD!VUIn~D!{!>;;#sGogE@$Smnzhf%j!rr!teA=a zwHs^u-depkf`Jq6SoSG>RI4Z~#}-pnS0!3$@7s3cU#hF|KMvl0-!4=#^HB3I6Kv@-8cqD zPET2=>d%PbFd)96`FT5THw(u0DOOVlkCU5>YgRo$P|vp8>H6$H1#Ngxyy@=R%Ngr_ zmWk)x#Gs?i>B9I>+zENlGT-_~)l;9T=iQ~~MEZB2nOP+{!X!wA9=MB#VSprg?$0qt>6CsFs-tgSKY!ipO(_goxnK9Z9!5Oi zA;pY&85P%u6WLTA!T|PIV2K1L`faa>@ur<{(zzY~IF9h&d!nz;Z-B-}#xbCXDTczg z3cmhK^<5xPF zp#BTwp^vX#^+|B7n?+MG=$Z= z{<xno++V=M3gwNIE(tVe^(TfGl zro;(`Kh(;_2bQ#9!B;a1u*x-S7LkKSP%O@4iUbSjEaL=1-zLVopsGeZCp)|I)5(A{ zZp~-j2JuB8_X`N=>6&7OuW}F2;Ix`G z6gW|78GVgZeoyGe@HS$S_}QAb!|iuzIk&bxyZN{|Rr{s-dVzs$y-OlNTv76Ez>U0v zd#KNQ%_m{pe+|wpIc|ml9ZR7qsNPyRkLI-BA6u8dUhNZ;L@?-fdX|-a0x`*>0?NCU zvUj%j>f7GDxTq#ZHjOIF$-uD+0v%`pb-FFzK{G?$Z5BJ zN=*2&R#wEWn$_{zvs=MAsoT+p-WU*AFkQdSQS71fcI|)9Nrknz3h}>`y5{~7_j!#| z9W~yxxU=Z^s!=uEs(qyZJcfQS)iaf03N~Ke+qn@L{5b0`3|cP5g?O66&op@$T8prkW<_`C3&~ zJ13A23#s6f{)U#bv=X9zUPR*DvYWq)&y0~~z5^y18z0N-vwYXsn8b_zYiW0}#Fd}= z`I3DN2=t!@um;Ro>t5FmvhyWFav|2-`~7&~eF?xtOMTr=qR@=btIwDD-6RxZ*Sto6 zPgIPX3C4soY+icJ>VCXFUlx4;Ox8V*H|>boIRb!Josa(A4N?02P;Xcq-37FBjB1t< zXsVdV$ZxlwI#PG{y!tA{#cI4cFOpN8>!(k0Gcin#-X23SOpfU0@Ab6Oztga4rYo2r znwZ-KlPBo0b>LUp_lN1Gz?T;QCs5$evzrG%Q}*ldSmL#9Coa4&ffn~6bN!sW>`ce@ z?Bk!<)Y!3looh-u$=$}#?>Vw|sfm5$ z)7}Bh5vPHs;zX|fkY(GjL|4pPl)0#T?RCS zo=Qj-UB1nSPp3g0C&AwC)n?-yeN5qZr#ubzi`jmAa&qGyoiMc$9OKi5;{bPo6)u;< z^|M>%vL*WJ`mDkA0X7iYakBgN-Jiq*(159ne`~ssrg`<6m2qxfx@~j8C%-FGIkoSG zdJ}*Dg-jCbC?IK|mzOjG&fM$8&Cu_x;H6!OZ}QPOHOw&GbJJJY?(X7m`r@%XQsID& z)=9qt*UZwYxZ+&(;L{xTWQR>AC6YLqy8TM)`h0t*-EkLx3@U?T4H!NA1@Lx>jH?Xh zmX5;LRqa^Htnw%@zAvKszRtrYTd|*)+n;v-41{{V?bY~a`edYJB%~$n-on2NRM;cD zjNbk?Xv-c0F=Heis-qNp3>Ga?X4QLCv>J4Z7{F>iZ&>e`)oUz+J|y^FtF`Lms6{_+ ztmPoypUvqnSEgRg^a5ZaVLEbOZ$BLAKSvWTZ#^rPe2!v7xi#rh)RHn1?xMRNB(F@% zhji7H*tqzPZXccNalanoQkGTh7c3I<0U*atZsDrPS2FQ@?YLuVPDd;IZKBNBnliNC zC^D-V0`3)qk1nA^5KUP3cjCc}$>H<+Wp&+qs7r?u;@~vnU3%(g>1fDB=vxOPmiV7# zZmz${UP(wQ3R@io*Ir6WrlxhD)L3bmtDocQ>;D~=db`BvZk`Ejrl(V{UQab`>}arv zY*R%%`qShIZv57+ zuV=j(Z2JT03*0mzyE!6{a^g#sxNMQI^0R+=jUTrf9+4@vL;-EMa7hFa;^EKd?_DF> za!9+m?VV`LA&M!!3obgsuM+2%^d5RAnRr~@oG!x$5=u<=&~a!b#O4>|LPqEV3_CAt zu;C|AK%ZSHjgy53G^6ChaUg)2`F-Dsqn`=)jXcQq*8_%7ag!|ix)UIivM#PCzaybLR9f9Gcm zFD(c!r+RWOSDpPhU_8{KJuelU8E~S9Z+;b%L={qGc*N=}mB}D&o$}t7l=xD|o~+Mn zbZ26I?PI}-wqzz%1&gW9)0~h?^SRRBUMlJ-4P%%0Q?_EY}CpMkB=l>N9N$)+kz@hylXx8p-hth}Ce z*R~_?I=`z8e|m^yB6c+O7**2zRkAnLP@Nm(WLH_0oq+-`vq>ZMDfLU#@+xHAue-gr zVDFSOt}-6wf&yF+LLwYO7z;nMZXza0@vUO1&IOH+|CHbV(yM&vx`?==(}}hxi6#mk zU0jCGzL_DV9XGPS%^Q)0RVf<92?m5eS6GWDt9(b9=UZM5XcRCZWo6aJO>g_E6cmzC zCwKj~e>;FtXd4g^nbv@=3{(J!{NSZ|;Tur(zaOi&xMG~B+1dMLp|lwR=$);cJj9+A z`2+mpTM>mMeNGrMX=Zx*>mGDIs&1?MVF{703f2OACqM{ziw6J6J4&hsyln#}8|JLE zvp9<3JRd(CVQ9@8bf_X9fKe*4&rd6+S>tW)i%q)?;&|&Vu8Z4lc1tPRciHuEYV0~5G0QSbny>fR%lJ*(A(<#BPJ!PoZ;69nKA zV!yZXkt?Mrig$rbxk>YXuCoV>P~iF`IRhKFfT-aH$`)q0#_#GfTH=&h^6n4VtD{*h ziyJRWC?co;N#vronZ87UB4|N;LW+j)^yqMMd|XCeI)flZx5Z~0T6_#pA|>lQj=$Qh zutr(_C}_-f5ugs$s#Q>8#t7Sv&t>czxcC%y7Fi#sMpbFka>mElQs5u^a>22iFlXI( z=mlS7U8@7}BpPUg`>cvH&Zdmsm?-YRZl#SNdI&&0Ss58k8*#dHCFVZr+Z-D0T z`FZH?ZD&9*5W}8{(ACscRZ`O`<@L`jYwSH-_*nVB(vkCj8nez^aCx~auHb|L5sngZ zx0Dt%S?Q%Ox}66u?yq+i`SI`I$;-$A(KXNs-IGA(!RPfE!)sPq8Hh)DKU?d>7ndLP zY)Fvc(S8}x*LuW+bF}c_!;jG(Gvm({M_TQIOs~u!fDAsF-N%Nl@)C^2h3E5kzK|PZ zL5`a3%IS3<_N(#qYWxi>u#IhWP1=Z2#04I=#;EnPVE~+FO1RctQCR6=7d%{+fw=0M z@u>_#P@bld>VIgF$-yJQ&CRde*czlMAu)p-O8%~ooNy=w`y)lLC(`sw_1fJta4*-2Xi& z|4Vsc$Ie`&sARE_xsz2%XHi8%i$R9+T^ekNgN4V+G(TYrjvQR(X`_ka^D)@B!kHBVN{G8Jiw{!Wovn4{M-`AuDZOnth=2ib zBad*6w;frQ@pco}4FggDfM444YrFl}u@4h=mtO@tBj@o!0Y4p2JmnM#A^xXsrx!O( z$<0*R-K1Gm`lFFL7Cs&hW^Uh;sWne{;fn8kxm@m>Rsb!rHcZ}sm1;>UGRxZ4^}4y$ z%a^;YqB?*SN;drlmG6EBil@j(-FiF>KX22C<0li7e7d`8Y*4v}*c0g`B#gn3vT?IJ z??LDuJ(qx}@y5W4nIr_G?fJFu?a3bm!pmG+(s1AU``Y?mMYY|%8?uk#cd<4+e`zzr zc3M%NR$|sc?DMvRSzIRx7V*VEq%Q5WCtMX&p9Y&gc0(8Afb)&Q|0)-{p^u2yY>q!J z4_R>u(IJTEIX;I)8u+3}9zUvcUk@$Ur~ACCQkP1l_3Re%d>-t+YoQ2(-)`^oe6Jok z&pY4K(n-O9aOyqT*|T=zlX4z`jWaU^Xs5?^`7Y?zn3xb)n|2fgGD{jZ;$YOEwdcqf z=j)a$KiFsHU@o)L(!EV>W!+ZKZ33Qh$E5GcQ5-;e~)>Q{vm&nXC$x_jlsvd+~e$Vn}&JkQwOdNck= z1?oxV?s6G#hJ=X^-k+}N#b?#a0p=J~SJ*;$_u`EYus3blu|VKu&!F=NPywx4JvTqx z4J_E`=!&YnMxVw2^D86g$&M5qeU+F53Nr=xd^ek#uU}||z`5N%FHhsV26D&@p!}*L zYagSwHmli^tC_}e;8Qd}e9uJ|p`<&GbDv!%BLdE{Z~hWh*Iw&4?hH&onYF7Rp%y!i zSCG^DaQA6*`f@sQxFdTkyAn9q)W>va`NfnnG>dK&G`L9oarWLYm)n%b%Ww8XuX0RTqaEIS>GHW-kE6-=Ol z$MUf_(&>s`^GFkRmym;jLBCaFGWH&0tqx!!3a2$#J_CMb)&-zhb}oZm*|jBtERGMX z+T2h<4i7zNJDf%qCiGx5fKWXBr`H_T3TjjCiP3fkAT+?T+>Jf{1HwSD!;>QdqEG9s zop!()fo@mwd953_@KRA%{x`4M8K2~r_8Gxww>P)#I(H*cMj()wqFvhYMpq8Nt5`%G z9TISPcO8z_cErfjVCxZSjMZ)^L!^K62_WXMK9 zNL?%HJt`gDn{Y5s@D-2`1Q10dNI02?$2&}X2w-bEty)1qoGC9L$a{JVO#fvk_#C)D z3^>6cX6ZDNm64FCk|~ks!gT^n0{vw556jr!fBokAxz7)?tmoa&-<^yI(Lxz@Evb;V z`V(HPEr7mh-vE+gctT3h(WRT7F`l>_LZ)ZmrLA9YTIB?Tt$OOo#l*(rw{@}B3tibv zh}_Mlly*Aql*=q};Z7ux%oo3i{LJV8P{xe_3hLJDHa8vkf2=A|pc zm+&y8|F%i&cl-PwD==)u8!`o00chZq{>vd^1+hB5NRRss26`UopXICH{^^Ah^^w#>@KFq=g?)zW5n2J|RsbWD_HkezLj_2lFI7s{FjNz_c|Y4XH53qy>gooj-HGH8U?3obR-s-_U6ClQYm!Q{v(fm|tBa zj`}vE#18^#s#UW$(?(&Ui%Umc0sJmc1H2-(sn=jdS>}L^+;>KzwCjUE;-f6ss_LD< zROGyUYdx~Jxj=x?u(vSLQN)flzILi}a$XpF)-5e=+y)5GsH?55Ug!=L1b^;q4mP+%5g?igFYo5iuu?cXzUW3ONA+Uk*)jXl(iuxopQAz-z4ur>AV(1$Q z$~^?)7zbv7ZU;Yk1f3_IdrtB65b@<;$3(m4twKu;3 z#9~5DhUeA8pMgZ!AX0uuk_LZyV>2&&P>;#haXBY7!OpJPqKfSi0bcIq)0ML`2vqA) zSYN@ID**`D8gP5lQEHav3mHWYtV=zRI1zk(dF79B)};l3C?w6yO#!fTz?g&X4baSh z`bU<1@@8SQh zZhYqtaF=$^!67t6yd7!#E!3f)AAyWu=pOW=rlQvqf_0Ofman>2hyvibx>nVa^V(gd zcUh!=P)c?!%`B`!x+`kCm;`YFjIRPj{#VSKZtZvHOMYitjnkLIU$aL36fI{+unpGQ zSbLk#=sScesGi=fdcPVRgr9d9Zau=Q;(ZPxA#{6A5WmI>D`{R-zrV$ZiIt5={00uu z<8lBz8q#J?`)7d3Q_~ulx`*ZRfAsRxn*jWV$0yAhJY!*O@xj4U4(~c|IAP%K{{8(4 zEm+O3soUarFPm}t7%@MNL_mWrrS>F*k(hAPKJ8x*X&S8XM0)6f>`C|Yj&#KUG}wLJ z05|f~Gq-WstjIKO=u?zWb8!^KaAb&vO1yO9G36^;23N&+;m{;eGaVR>ay^lPn+VIP zZuU!*OFQAK5Pci_1S-3`B9F8@?4ib3L=j+au$8&An6y=|q#QR({@N#IWz$khAuj*y6r!c2d09 z2k0MwufcT2j~=k$$lBjCqJU!-er%^0J!6bRSW|YgQ}{j}Glu)ReV5EhAHFJ&cx*IWM7@`i8mN zHfq*1H#v@J6m)i_MPAaylf)hC^GUg>b$gsg0I)U|9WSQid`1+%Vb=};hy3nnpc&Xc zJ36y&zc;8Nt8JZP(^b<-2LI%>{p|Y^>`b{x5omA#c%W6^cf9yk1UObe8$gW(v-)va zO(m6iqdj+<^7rC}_h;_r<|U@4W|ebHK$)c9oE&rgMweaTqA8}7ZqG^+*z}`gG0mhw zU@HLrd^#)xSOv(LD*#*C`wtm$qYAH_>SBpjybBJ$o6E1C4L(sr{zkEI1C1AG$Mo6E zb4x73`vev%x5*U7hi!Skio zswOX1kj8Ao+)ab+tv2B3yIgknW~>+K0ueR=##IoUnR&O?xtPKCI$f%-+c|xegt6vX z*P)ZRL(r_9RlT}gSLRdekxLAUq;skEC5Z~HRx(13tMA^fD{I>XEaeVl8%i{)KsNrH&rmb z2}r%aTA1QW4(71kt+n_2`{z!yH<3bqx6s6CBAQMnSg5EaV>eqzV()&cnIuLtH#b$n z-gV=}LUuH=(pqOH!Hf}!KtXj{kY5;_Ntfx!<+rHU>h5%D^mq7YS6}U%1=59|%D-D6 z2%nwBtrE`3y!&oc3x(Q*6{yO-1F0l`B<5`xGOI5yU%q$v_4BuZFA7Xuq0G3)hhDSU zO85LeL%|k%b1}<=RkQZ_837LFUB~u?EZKlF)O17U+GurgZ{I)#uEeyYjAVgm)NAxp zNJeg|)27t*#oup^hOb)4Fl{IsZoC8p<@;iqgrlEV+Ks(M$A2ukUrIRxzRm3WQRY9m z16m>dw+lcLkIF|VQ`=oX{)eglZ9jzh$AJXz3NeTyi}JB*a(rT?yR?>svHkIG6_NkN zcw4!G9InCD1>iDPO|}RKteomnKPr@E?qR@TA(I|{wx~^SUbJ5Y5SNEb;o)*s!Dem8 z%1KHS@|0?T_vh6{ZB9UO=yRGGN26$@_=YlZ@8QwJi&(or<5N_#Ct|}A(o3C11$7-Q zRYeuMcH(ave=|{GsIa~g6TmH>x}gmrBItXd zg!&LonsBh*y8_x&7edME#c9HlF`clv-m!db)C>v#J~*M<*6OAz zwFTiMJiI|bc!vyc{{me;z0(Vw5lKuMC6J+F^|XYRy#Mqa?U-1Bzua?cL#XFDF^xC+ zh(McRXU&LYFwI*q=X{&Jm0yolz;WYm*-5?nAwwgVFduz|ldEfNEQ-5PZ6QvWmnTZ} zXoaiSJ2`M&9c@7mIX#6%6D-WO*6OA)cd3&TGnzQk2P$PrvG;MKky>~oX7#vX;U1A< zyKzMF2QFN~$trVgZCv4DaJ5Tv9}NHK85xSX=BSnSAs05(EzHkz(^DMqAAaTvbrbQY zf8H9-W0;LmMM^3wl$tPN!3{&a_E`BHFJ#X+iSN0cZbnEQaV74=kcNei{g8Wu4!so# zdhzWTaD6|1wD}>+mZQB}&X6O^ekJvc*Q2nkIEP~RXz2Z;QbtOh-Sa{42FcrS-jt70l&xKf98> zorr!nFOQ>nT74>k zp5pO$L*36pEegy^?Hs9dkL=WQAL8L4&rTFmb@}dXZBaTzmB*Q_{hUs^L`pU&vn|4zltD4fXQ)J{jGhMLfSC9W9lU^JuU3s$3S%xU=_pwCkdy zzO=UW>+!=#i)CvEy!6_gV6aDm5aW7YB!|`!$CyVa+e#UX$|jD#7@amtQ4Bq~8+N z1_H{98_9+045X@HsFDsT@6b?@eb$y{m)E$R_irJWo*jQ9z*NDCn#%VO?UfgdB^!?0 zi@%%3f}i<4{d6k&9QgEUi;6()ZZ^3x6o6oDT8_F464EhLh#)B@%kr~5wVYW7nEXk` zBt=6I-Pr7+p)IU$Ph*JWp_8a+EYy0vKS#$Mp+)LV_T{+Wm}bqqL=K4`5J^>Cz-Q+3 z)I3Rc=jN_Wqa4(bP|{N2Kn?Z_)f*J_p*Mdkz`98Tti;O5A;C7Fp1sJO;)eN$&AVM`r~a>bvxg@6x(J~m?w_~Ua$GvHC$~>uT}F4U-RolWifuS*H5$zK)t*QXZ|#ANW308f zFV!a0ggkz}K`2ToTv^f4sLVr!^Qkp|zSW)y7MIulf35}8h&z8;eBfqcxYe$4=nAn)Mh zh~0=wOi@s`Md0!7Z^YDlYxT3W+Q+-j!b&XjL!0Km7gML&oO11UO-y{>3RLDf*~mj? zlu{>jywl?$r(99o1J_?}attH-6Qlf_C`2 zkd}c8rKA=@^sr5;dNF&ZcpTK6f&5tl$(f_Gy%FErWTwfXI^bPKz{;X6+aldeEFklIjmM zzJaNVKFnGSdVd_5u60bLxGt{qDf{79+}-bfT0SRBY?7?Dpx5D*MIjjy-#>ZZcUvJ+ zN(V=x$>i^-@=a1q*3t7e^9FnU@vUHC+27X0q_SB)(vT}`bH8S18c~y8ZEWlByyY-n z7{L#&Ff1;){vie@$35IMb#S5x%93l^9Z$W-HWoIF2=NbdzF`(aq5{6(>b) zQ9u}}R-;lZVKd`cy}h*+C`Lm_?XVKE*n!?6=t0g0PrgVP24s(@wKh}DBfG^crW)vv_7cAq2xvI__O zPL>P?utAeXgI3Ox&Gg~zu(WZr{nCSwTG#o8+?2$wd>ju+#C0wmy< zk@@o`o!z}#Cx-VgQz`qsTH5zlzy3V#X>V(*l+ny=^U1uyQpeqYNEhSxmGaud`OM7@ zdzEXzgr{dTxhuU*gsr~Tp?oo%8iW3;_?~XFW!R$MldoE>LJZAJ+?`QOQI|i;{@!$& zapSTD(BF#LR8+Vi;N;h=n6{^mos?4V^873Xx&wE1F)}lNL)=lLK8VytucwHxi`sBN z{{9*?tG8^x@)58I_YNT z3jJ%zWjDx_UAHvYsO6RIf+~cbMUIW6ACn4n*uC3l^!I<#u~R+83IPZIk&ukaqc6DS zK?qJr#)B-ci3w8EOGh}4MKbxxz5x93e-EgCz}){l1{t6S3jOaVOyNQ#|NBY0D9QiS zg$IGgNWnl6?*ATpU?ku^!v7x5NN_+V`oBg1HykXGA^ES-LkJuAKac-^eoaYAp_w<3 gRr&AX5byuaVQ7;IXE^!Oe_oOlllxXJVi5Gd0Dg!mr~m)} literal 0 HcmV?d00001 diff --git a/experiments/llm-bitflip/lora_finetune/fine-tune-lora-baseline.sh b/experiments/llm-bitflip/lora_finetune/fine-tune-lora-baseline.sh new file mode 100755 index 0000000..9ca3670 --- /dev/null +++ b/experiments/llm-bitflip/lora_finetune/fine-tune-lora-baseline.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +# Baseline LoRA fine-tuning script WITHOUT bitflip (for comparison) +# Hyperparameters match fine-tune-bitflip-clm.sh exactly +# Usage: ./fine-tune-lora-baseline.sh [num_processes] [model_name_or_path] [per_device_train_batch_size] [learning_rate] [weight_decay] [gradient_accumulation_steps] [block_size] + +# Default parameters (matching fine-tune-bitflip-clm.sh) +NUM_PROCESSES=${1:-8} +MODEL_NAME_OR_PATH=${2:-"unsloth/Llama-3.1-8B"} +PER_DEVICE_TRAIN_BATCH_SIZE=${3:-1} +LEARNING_RATE=${4:-"1e-5"} +WEIGHT_DECAY=${5:-"0.01"} +GRADIENT_ACCUMULATION_STEPS=${6:-2} +BLOCK_SIZE=${7:-2048} + +# Function to get model parameters count +get_model_params() { + case "$1" in + "AICrossSim/clm-60m") + echo "60000000" + ;; + "AICrossSim/clm-200m") + echo "200000000" + ;; + "AICrossSim/clm-400m") + echo "400000000" + ;; + "AICrossSim/clm-600m") + echo "600000000" + ;; + "AICrossSim/clm-1.1b") + echo "1100000000" + ;; + "unsloth/Llama-3.1-8B") + echo "8000000000" + ;; + *) + echo "Unknown model: $1" >&2 + exit 1 + ;; + esac +} + +# Calculate derived parameters +N_PARAMS=$(get_model_params "$MODEL_NAME_OR_PATH") +N_FINE_TUNE_TOKENS=$((1 * N_PARAMS / 100)) +N_SAMPLES_PER_STEP=$((NUM_PROCESSES * PER_DEVICE_TRAIN_BATCH_SIZE)) +N_TOKENS_PER_STEP=$((N_SAMPLES_PER_STEP * BLOCK_SIZE)) + +# Calculate max_train_steps using ceiling division: (a + b - 1) / b +MAX_TRAIN_STEPS=$(((N_FINE_TUNE_TOKENS + N_TOKENS_PER_STEP - 1) / N_TOKENS_PER_STEP)) + +echo "Calculated max_train_steps: ${MAX_TRAIN_STEPS}" + + +# Generate output directory name +OUTPUT_DIR="./output/$(basename ${MODEL_NAME_OR_PATH})-lora-baseline" + +# Generate wandb tags +WANDB_TAGS="${MODEL_NAME_OR_PATH},lr${LEARNING_RATE},steps${MAX_TRAIN_STEPS},baseline" + +echo "============================================" +echo "Baseline LoRA Fine-tuning (NO bitflip):" +echo "============================================" +echo "Model: ${MODEL_NAME_OR_PATH}" +echo "Model Parameters: ${N_PARAMS}" +echo "Number of Processes: ${NUM_PROCESSES}" +echo "Per Device Train Batch Size: ${PER_DEVICE_TRAIN_BATCH_SIZE}" +echo "Learning Rate: ${LEARNING_RATE}" +echo "Weight Decay: ${WEIGHT_DECAY}" +echo "Gradient Accumulation Steps: ${GRADIENT_ACCUMULATION_STEPS}" +echo "Block Size: ${BLOCK_SIZE}" +echo "" +echo "Calculated Parameters:" +echo "Fine-tune Tokens: ${N_FINE_TUNE_TOKENS}" +echo "Samples per Step: ${N_SAMPLES_PER_STEP}" +echo "Tokens per Step: ${N_TOKENS_PER_STEP}" +echo "Max Train Steps: ${MAX_TRAIN_STEPS}" +echo "Output Directory: ${OUTPUT_DIR}" +echo "Wandb Tags: ${WANDB_TAGS}" +echo "============================================" + +# Run the training (same script, baseline config with no bitflip) +uv run accelerate launch --num_processes=${NUM_PROCESSES} \ + run_clm_no_trainer.py \ + --model_name_or_path ${MODEL_NAME_OR_PATH} \ + --dataset_name Cheng98/fineweb-edu-1.25B \ + --per_device_train_batch_size ${PER_DEVICE_TRAIN_BATCH_SIZE} \ + --per_device_eval_batch_size ${PER_DEVICE_TRAIN_BATCH_SIZE} \ + --learning_rate ${LEARNING_RATE} \ + --weight_decay ${WEIGHT_DECAY} \ + --num_train_epochs 1 \ + --gradient_accumulation_steps ${GRADIENT_ACCUMULATION_STEPS} \ + --lr_scheduler_type linear \ + --output_dir ${OUTPUT_DIR} \ + --preprocessing_num_workers 32 \ + --trust_remote_code \ + --with_tracking \ + --report_to wandb \ + --transform_cfg ./transform_cfg_baseline.toml \ + --block_size ${BLOCK_SIZE} \ + --log_train_loss_steps 50 \ + --max_train_steps ${MAX_TRAIN_STEPS} \ + --wandb_tags ${WANDB_TAGS} diff --git a/experiments/llm-bitflip/lora_finetune/transform_cfg_baseline.toml b/experiments/llm-bitflip/lora_finetune/transform_cfg_baseline.toml new file mode 100644 index 0000000..a4b26cf --- /dev/null +++ b/experiments/llm-bitflip/lora_finetune/transform_cfg_baseline.toml @@ -0,0 +1,8 @@ +use_lora = true + +[fc] + # No bitflip — all probabilities remain at default (None) + +[lora] + r = 32 + lora_alpha = 32 From 7ae68b69180c883740838a41a7d73afedb26a41f Mon Sep 17 00:00:00 2001 From: Cheng Zhang Date: Mon, 2 Mar 2026 13:18:48 +0000 Subject: [PATCH 5/7] baseline scripts --- .../clm-bitflip-lora-finetune.md | 42 ++++--- docs/images/bitflip/7b-lora-trainloss.png | Bin 0 -> 85450 bytes .../bitflip/bitflip-lora-train-loss.png | Bin 53362 -> 0 bytes .../lora_finetune/eval-bitflip-no-finetune.sh | 53 +++++++++ .../eval-no-biflip-no-finetune.sh | 53 +++++++++ .../lora_finetune/fine-tune-lora-baseline.sh | 104 ------------------ .../lora_finetune/plot_train_loss.py | 48 ++++++++ .../lora_finetune/run_clm_no_trainer.py | 101 ++++++++++++++++- 8 files changed, 277 insertions(+), 124 deletions(-) create mode 100644 docs/images/bitflip/7b-lora-trainloss.png delete mode 100644 docs/images/bitflip/bitflip-lora-train-loss.png create mode 100644 experiments/llm-bitflip/lora_finetune/eval-bitflip-no-finetune.sh create mode 100644 experiments/llm-bitflip/lora_finetune/eval-no-biflip-no-finetune.sh delete mode 100755 experiments/llm-bitflip/lora_finetune/fine-tune-lora-baseline.sh create mode 100644 experiments/llm-bitflip/lora_finetune/plot_train_loss.py diff --git a/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md b/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md index 4ad90d6..d2a7592 100644 --- a/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md +++ b/docs/02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md @@ -182,33 +182,41 @@ After training completes, the fine-tuned model (with LoRA weights merged into th ### Training Curves -![Bitflip LoRA Fine-Tuning Curves](../images/bitflip/bitflip-lora-train-loss.png) +![Bitflip LoRA Fine-Tuning Curves](../images/bitflip/7b-lora-trainloss.png){ width=720px } - | Metric | Value | |--------|-------| -| Final Training Loss | *TBD* | -| Final Validation Perplexity | *TBD* | -| Total Training Steps | *TBD* | -| Training Time | *TBD* | -| Environment | *TBD* | +| Final Training Loss | *2.50* | +| Final Validation Perplexity | *11.01* | +| Total Training Steps | *4883* | -### Comparison: Pre vs. Post Fine-Tuning +### Comparison with Baselines - +We evaluate the model under three conditions: -| Model | Bitflip Config | WikiText PPL (no bitflip) | WikiText PPL (with bitflip) | -|-------|---------------|---------------------------|----------------------------| -| `unsloth/Llama-3.1-8B` (baseline) | N/A | *TBD* | *TBD* | -| `unsloth/Llama-3.1-8B` + LoRA fine-tune | `w/x_p_exp=1.53e-5, w/x_p_frac=1.53e-5` | *TBD* | *TBD* | +| Bitflipped | Fine-tuned | Bitflip Config | Fine-tune Config | Train PPL | +|-------|---------------|------------------| ---------| ----| +| ✘ | ✘ | N/A | N/A | *7.91* | +| ✔ | ✘ | `w/x_p_exp=1.53e-5, w/x_p_frac=1.53e-5`| N/A | *1008.95* | +| ✔ | ✔ | `w/x_p_exp=1.53e-5, w/x_p_frac=1.53e-5` | Lora rank=32 | *11.01* | -### Resources +From the table above, we can see that *Lora fine-tuning effectively mitigates the impact of bitflip noise, reducing perplexity from 1008.95 to 11.01* for a 7B model. + +We can also safely assume that with more trainable parameters (e.g., a larger LoRA rank, or full fine-tuning) the model would be able to compensate for the noise even better. - +### Resources | Resource | Link | |----------|------| -| W&B Logs | *TBD* | -| HuggingFace Checkpoint | *TBD* | +| W&B Logs | *https://wandb.ai/cz98/Bitflip-CLM-Fine-tune* | | Training Config | [`transform_cfg.toml`](https://github.com/AICrossSim/NewComputeBench/blob/master/experiments/llm-bitflip/lora_finetune/transform_cfg.toml) | + +## Appendix: Evaluation Scripts + +The comparison table above was generated with two evaluation-only wrappers that reuse `run_clm_no_trainer.py` but bypass any optimizer steps. Both scripts share the signature `./script.sh [num_processes] [model_name_or_path] [per_device_batch_size] [block_size] [eval_max_steps]` so you can sweep models or batch sizes without editing Python code. + +| Script | Purpose | Notes | +|--------|---------|-------| +| [`experiments/llm-bitflip/lora_finetune/eval-bitflip-no-finetune.sh`](https://github.com/AICrossSim/NewComputeBench/blob/master/experiments/llm-bitflip/lora_finetune/eval-bitflip-no-finetune.sh) | Measures perplexity when random bitflips are injected during inference. | This is biflipped (✔) fine-tuned (✘) entry | +| [`experiments/llm-bitflip/lora_finetune/eval-no-biflip-no-finetune.sh`](https://github.com/AICrossSim/NewComputeBench/blob/master/experiments/llm-bitflip/lora_finetune/eval-no-biflip-no-finetune.sh) | Serves as the clean baseline (no injected bitflips, no finetuning) so we can isolate the effect of noise. | This is biflip-free (✘) fine-tuned (✘) entry | diff --git a/docs/images/bitflip/7b-lora-trainloss.png b/docs/images/bitflip/7b-lora-trainloss.png new file mode 100644 index 0000000000000000000000000000000000000000..601d2cbc1f28c0a769987fec3e7367afc543207a GIT binary patch literal 85450 zcmeFZd035W8#cZ˧ucA-K_?>t5e)sV1_d9;a_xIQF_J-YB>v`_yy07cJ&hxzPYs!kUY>T)TQ79BP zxub_vDHK*23T5tZ3t90mnHgH-(#UdMvx^H=Z(I%!SK!cdNWGBVvgoS2RagwK7END8 zTR%X*L`Ozh?;__*jb7i)Ve2;^KKRq2h0FME9la~FsB4&c$~7)LTZLcCBIWM5ORjmbyVy``sZXJ?hvI`> z?;A}|oeF(p>lj&pLd&s=scafcWtBio`{(2#urg;K>ae;I}HMN2UM>#z3Tj^W?v=Xz^*c#M8F zeIRNXSmd`l`nTWarJwz*`t#2}+ldET(2|0$EL|J7fY)7lpL736@h^J9L8_{%BNIKz zmJ=gg;xm)O)oJExC1QDb+cs=C_{_AlCPpQq>hZw^dxI76zjT{sgAGS+lx^^r2=`!) zR@W)L5lfF}`d+=N+*6Y%YF5T~JXA`5_3pD~zwI0GVwd>teZ-GDDoP>9qB-9~aP#Kx z^jlv91Xt!A%g@z_nM5wV{pBrL3{sTuh6>C=%ycInQnj*!Z9nn8S!q34MmJ9e;j*VC4B za5%R5iOEwpDL&Hf5DTLvoQMzzad|c65h~^3P;C{{M zxKxUH%*cpsro-IYI>~T_G>_M$?G!!P%h8LMn>+5p+y&it)f8{@_ci6|_Rx~G%*|tc zeSJsD4zhQ>yS>46qAR+mYNjeyEq={D&&kZCYxaZ<-lU0dw%USYssa^m9as^GhjLdUw>%8;(@P_H{*v_P3Yg%3p9?9GGE2?s^gb zNs4(^a%!ZKTh!vNE2H0P++;Y_`0Xx)mgMjU5o+Z2f=ShxX-0wLKpw;O?3#}c#k6Lo zr$kS_ps@Y&tIU=yTP8{jr(05|EuY$VC`wL#FYF(Xz46QAEc=e`v`WSJDr@FwUpBo) zmrkb{Dp04V2E2L@d9=M9kJNfZ4P3rnwg@dLDXBD^9$Mx6P}H)fGb+fSqVIXLL1_J8 z_UqS&lDBP1Gq2plXkBfSoBHyNKlaUdUlXs{z5SP1tH-kAmNGqSIhdXl%1QPWGPt!b zDt82wlG2Z#J)02XIbnXr)YS6ptIO2L@!<})NDB4Vt+3#I&fQgNN{J4+J7~RLhyZmx zy{&THWsfg4dChnnK73fi+?gzSguhgUd35lT8#k8V!RL*Cd*~?WUBxdbC|@c)lk3#q z5R0D;lAFA?dAn5J-XPoHU;o@@pzt;>f?{^|?8T+{85*m-^>eyqklW`cSInAn&j+-~ z8rSEZf5sd<6Qpl)Vsi5;4vvTt;hYasQ+Tu9RNEW8(oIu)_U`>`JjF^KI&hc&@^9b0 zyzoxmCtuBA_fYoRv=;ffP7c1M`$$jjZ_IX7b8tvozkYpHhLvux*Yx;Aee?7avQJ}^ zlk{_nzn**d^;~AqVR8}ElO59WHqH6I*RQMH*>wCa*5WOX+Xp-Q-n(~K9Dlg`_|ybl z4eN$H^2EMWQp^6^2NiR3^N8j3@`|2=-itxkN8~TJi+184yQ+GLZDl<~k(snVmC{Ayl_fRU0WX zZs(?;k|^h953L}5r~BCsUF)MOW1afy%&THlTbhWkGqUIHc z^s^nx1GEfcU%!4We>t*Sdy41TvuE_)XQkcF1h7yD>wNRKJM-9mG77yD%q}+ zdG0Z?Ld>&I;tIG)WfzxBvM2_5u3bf|Z5&EXhQu_pyq@}c-Dy`4ni{RQc_8VQ!Mfy^ zFwI1rSVU-dP2xlA?5<5#joH;^ON4_Iuz5nr8z>E z5A>ea^co%?AAhjVIY~J4%bawpdd=InZ_mDcPQQ5KiDyp~8G6=8p4r2n_EkSKRdUV< zIsfhcgB`|KMhBX=$9hVuQ79LGQaH&%@ebR4?u$&KVL{VSmZ9hP;eyGb>orOG@m^Cs zhOQ&!vX0Fj3@-W%_HR$1u63+xXDvc`w@p)mbJKZQyda8sd`OnpL!DiE@?3z}!Gf7- zH;%6ksx{NtcVA$niV)7(^tYxvs;G0zs#B4$Wx9L9FZB@6cyVi5OeU%@tv;nU4OhCtI zUwt(8iTUTJXIy(z-;R8HXcOl(Gws;#_VIyec7JwHgH4rY2dNv9j0WQqUnE^eVs7$C z#$m5)+_cGJ?E6=pr>C#!WSl+GVLKKh`LpkT(}#EBDv!ooFEjOOifj>5mVBQrkOZS|nIxVS1pmYr?Q z9%aR7wTEIhPOCP1P7caW4f=akb%c5)VZZB%n0&a!A8cF1C4cm&vbOd;&#|uy7!NzK zlPj>M5$A`$jkkqJ#JX=7vD4DCz5MD`?_g;qvf;7h+#z57E+By#Y`a}9-%plg2x%o% z352?@?|MDZ`{beZQ0<;3Po@>4b@BGxGUKU0xBje-2Aj5E*X~&EU1qnC5Yx_-thB04 z*-yP25|Y%6HPOj)P5&e@e5}<{&!POV@6DSvH`nfKu&H2lS1SPE#8hO+`twjDg_3hs z5f=gcc@y37{r3-ZY&sS*WMYK8IC$^a@dvx)uqyd$mimyS)|BTKpOT`CZ4u0@9JHh* zRbTyiLmHmK5w1H9;S>>EdDN39+5V0LP5PHu*;G%Tj-=kXL&st&P)PwXD!kJ5tW+v~ z)FvxjKgX$yZ1wU8Sxyv%=Ba*XQj>13*&Ci*FrBShe`x`G_H|xl{7RAyY7%uzx*P=e z?bFrGIBRSAMa$45>bh|DQD8DPd`z_G3O@_{+4akX1$Q4ljK>b>e6@JHdP(560IUbW zOK<&o3DU66qibSMDRqRnS*Lq=OwnoFA|^N7o@gNHk(9AG1$LeGuSUk$JkfAQj+ zC?uuH;Zj;%x}_t+f#xL0wf(JPjkZ^5I{{erR?p?AuA{xN(^JfN zWX8HwW1pIY6C04n4ZWr`NFGNdCuL^t@z-N~d3h%OTpr^yq7xFf zdLX-tV*~Pc4S3Du0H-8cCg)|;<%hWS?``y8H1-c{*swwL^t)w_{S7A&?&aOp@c{t= zQMRo`UC2C)cVjG+lx?d}MtQk*p1!I;RiO5K_2Er&mR`I{*nuHp+w$6?zR{`PDhYeB zHXteB;E8tzUPnm+E(Rq-aCCMYCi@3&@eg@!z5AI4n6Hnd z8!fbRXT=m_2t`S~zN0umsBx-#v~+B8(w&yaXzHjKZx=h;$+5W_qU$7_i6r(HY zNZ(rK3_z@GRK7+lCcURaP*Ct`uYJqwYyD1GkF>LDG}_2}!Te2@u8l5sYdF2M&`Su* zC8;UyeUt7vH6dt4QYcxHoT-mu&3YYf@rrMH*tvE_);UGU`a3UtNYh%y*?gw)1-4oBv zUHyEm*5jcj>OOb!KvZkgB^u<_ajst7W4+I^Cc%Q4Ft$%O%P#87LtiiH_7k4SuQ@|Q zLxSl{09*Y+A9m;GeW?2J2?-ybmbSH>cD6ohhX?qycpx2VnEqleyLxug`_dHvatBz@ zLyQF!342#nx4gODIMoP9Q=Jh%cH~~+*K?$9Rc8e6JR=_^X7yl~d5gTFVp7J`;Z^!= z*85OdSR4IIG#G9cSFtu)*|PD;0K(_<#=hE#TC==vx68W8E8S1Zmqp$Ti8?e8`p5(E zsRefy^WiZh4vC&AUO~e)V#?cm2cXE7V)lyoH`3m>8s@s#rdvI?Zq(`em_;7O)8OF6 zeC=;8BQO_8$;v8C9sy{y zz@`PfQj0Kbu1AB~F`z=y4zmrj}k|EGx96R9O~Dqd37B&0Fc4{oAafl%49?7M)%>5)l$vRZ45~_S8=VtXQ;hRP~3ei z#lUOIQBGi#abMES$&a>#jjdXGdXSyLu*n@f;&Fv%nRWiS2`TN>Qqo4R<}vK9qZRDW zJsX3%)brSf2TPd&B}6Fg1si_J&u!OwuG0scJ0~&X|H@Hn{>{(EIsl^9*+@~sOV^+_3IA+ zr{rfl4li?d~h?>)Yk;_tj6Vsf=dml?V)0GgXu-aQxn{ zk85^w2q;!nDoJmX(?QFFT(E+Zvwbji>{<5YqfN)|h54-(lNV3je&e}DZV{r{(u(w& z-GsR~apFFKS3r_<)Tz4Bh$DWg!{0voy``6IFEB%R)#ZESkiwsvn=Ah&(D#jZsOLni z{7u%2h_39K-6X!4Y(qmHrv!CUPhEDi?~PB&jw=#-U&6+{>nNJ8Vsz9f)|R_FDXR6Q zVw*M{Y0Pofl9%@du&b!3kQj2_W%A*kju$(5r@*Zz*1w-%Odtzoc_sJNr9W%vi3z`V z?-zXlW>@T`ZZR`-2o%D3EE`$5O(zmv<1y)(N&DcsRjXF@be2b?=Vxi9o-}H28)lAl zsTnizOPRd1(Q6tujdx-Jhq^AWGKjI8#3fbsetrhxV@g4KdMv_|k<;9DiCwA;0V3(@ zVYo#|sFF>rAxx~y^gvAJaH(`bjlIru>qOGQ?Q`ly zT-K$<7R@<2ej_j&8JYm(WF_gTs=RfsR_M)cB&j^S#vvfMV@G*}hfZwC!#+-t7lH${ zB>f&w6n+!5+aJxAw7=Q7Z{-Hb!C`u}qt`^20Hc*TL=VX*X^HTVyWg3w0m5CN>zeTC~Kmx`dvJTgZS3YWon) zoU_rKW`GcvzkHFj2c%{?VD;j0xfXb~fJ+Uf&V6+npg^dK?ym}VKM8FbeI1=Ad0wfj ztBdfKel*MKIE}nTeC=4kB;<7!FsxNy3w`LQj@(|<&-E5et|fp)sw|OVVPO@nlZ2x@ zGZ>)<&ITLsvGi1z5;c-$;C8%JlA#wc=Y)sE*q2L$eIVWGt5*lmCfNsy*maZE+0jC4 zQ0?1=i6@R8)g7k%@8jp+YG`P%NY>@J(`<~|)Lqqqk9H)*?>aA`JQANb#i2b?MnX}0 zX7aJkLA-HXJ2gftXw9f>$h!$3wcVNiRO)g@E3fC+hz0zHRlX!oB*WD^ORJ3B7JFO#DUR@ew=2`#sImaG10^k+ApkS z+kAWIvsB1auxUDrMTLXnD}edj(sPh>RzJUfuOJvKCI%%=MLI-2$(B|MuH&uKFcfC3U+<>;^n| zpktZi`%otoERjK8^Vpb6x_M>M)D+>}diH`3m>BN}ZEOw&tl{BOc+(JP#nFXaN*?|O z8k+;BY2f^d(#TU6;)UlAY}3tfvESp_@j;~2LE50e(<6AAIS5SUVta@}nI}aAD!$Dc z?i(_NOOEA&Drs7CQ+8g9Bqt}Q`S?(K{{SAU10JY|ptVVGP>@!%SakkGw-BTC`}gnS zHjngIJ=|?&CwF zVH^H-E>3U5jvc8rqW}l}?N`MeI>XSLxt?CLDGR#EPX7;1DVHGC)aEIb;hv)-Bl$yb z6plE4M&svfYlGAoFq)h4^z?h`6}DerJObz1?~#pquHIF^rfFwykN0?b{Ki7OpHTPv`LIZPtC`vF%;^Uj7CI=RZi()kJoSC_9%Hx+==8U z`2aYypCGo%EC4kE9%YM+CbDlp5lJ@4;}6!J(*pn%lhKFXItH>t*4Il*_`rLXf!6Au zp3Z0Xs~H&?rT6i=c0LZ%-Gum7`$0^hfe!eD#3(1ww1jY8kgWvM2}pRf*WRI!O>Fb7 zU1=2p`ct>18!FNKR!1xG6246||7C_7!@BWYA=^ZU^h}r3XteZHY|+MZU~QQ3J8I&R zlaaN64jZSYkhC9LCS@JDfB(KlU4LD=0{$Ttj6}bb6Ih+|r`HH+Cd^)^!GVoiw;pqD zu=SCh5MK53&yRoN*wjff>`(*YLD00tL-V0l;Tp&X)AKd)3>y6lkP7?IlQwyXKDl~z zVS8JfS=+nY0Vb_?bu-TyMV)$0q#89P$?Bd~?%ikap_cYddp2a+g5ER_a6(3|LgN?N z+j~|{HCi#)0d*N748p-y8c_M|Hd9W(5V?a_mXw2E>5;tj?&dTIxJ<^_XMkxiRNCf8 z8s8I|(KO`z5wvMlsF!`Wcc{DT>r72)b4h#VG6#wDLPAobj*xrMfz)3Jd(f7F#7%Kq z`R>W_hYm=&TH^z-Cjt7B>)9o0PmYyeVOuS@7MI`L;Lpw@ zp>3W>w_Ch&t1?8Y)$UV(G^INE?paK~Je2r?iOvXrLMIZz4h)ML5HV^MQ(nZxx?pNl z-bS8wx-QLJ<7Pl$U^fA)pjqO3_YJn)QF3*^mg+S<9GZ;=ahK<$3sJfV(fF#cFx|07 zlW~JpjxdTu&9LdNmIb5Z0Ql0AGmx*gfB%K$%a@NZn`aCV3C`Ois?dHBu8A#wyOQ`o z?r>H3;lqJtw*Zw{ERMTnT-w634Ek;|g}y)vgrQi@_oaiHd5VmS%r@ zU^&ad0`r5PAs`fF@_=wSB&;q(=1?b(C}~l!lu@Bx(V9m7Gp5!lyqeY>W+doH0nQ8?l=Gqq6y>araz{hr46aLIm@>G&D7zV8ya3irx!czk0Q7W~zC{ zyyym3z;1S%7?2^=nVUSZ8QO=_6J-sxu#mi7ld0qr7(BmVm8+9Fco4J5=AAZPOx@t* z9j2ciiKkma2@CNUJ;AWs8?wt|!eI*ail{1t4D4&jdM5u6t8ez6x>3C2kv+G29-2)} z@Jd>MzE7V$OLrM;kuL?u_8JJQ;+mN4_3PJ35iv)@7O-1lyBLIcRNG@vm;81oq4&0c zjKNJdxXrWd=GNHgkT}v-&1GHkz4I3kU3Ac&79*YI{in)+HHALGKrBEF)Yrg5|p+mnxfZ{?wx1)0rAJu`;=p3JxrcOjAXb%am zUcC}5se1F~O&8JYcinl`vS}Ey?z~y75Yy<|lgKUMl0x1gQ7=n%0XvU6E@TBSZ!#*A z{`5%`{o<)pr>dSDU%io^-|XG3 zb=lBGgc8@;2&AT_>ZF<7?#~&JBGMedZaKP=yTQSUs2QR*O?t>mH6T<(&kvmj%lr5x z&0)`){?NhurXjHa4ccClt-O*M(N_ZsW~K(R+o%d76$;zC9@^xWW9M=rEJ$Y4(svAa zq8QwrDn@5A29hDF_aM?~SiW==uJ&)=xcm0CF2ao*4&)6uU{$28DzHLCB%|rs`yDuVFwbsEhA`fV+NlRf_gxXCK$Lv4 ztm1ZUSMjijL4w{{eh|=?2II)AhRfQ`MsAf;NV5~VYdOk-A*4_Z3Px;P) z^bMsddIZV23*XgTrjH&I?HK6^?Iw7J;GyB+#y%f*-Xx?{=DFASxb6S>I5S{S6o;U8 z`=vey!As?#jXiVux0Dy)HZ}h+dW~HhY<(LKFUkZKn^-8$z+)pMqyVj$7y!T(&~uun z6d`YwoiGh<@A*21@`XmcW$iutXV-%KpUZN4e%M52zr6dDiQVnTSH?j?V*mK6T{>%r znf=SjeX}l%*+2j97bxCKUO<^qR#pzfo;*q=yh_Ti)4P6uKT5jLmM!Jj&Ku@rh9Gtrw%*BR|RakfZe)aff$#uaw^~9(ce=#~)h} z!i$#@Hqv7ccWFwYB*jePG%H1|jg`WIpX6I@`tIuKw1QnkKmYxU`0MJCCm$jUf9o-# zEL`@_*R)*`ow|pV8+{pN-m;x@y|>uS_5KMz$@dn!LRgGyHPJ43NY10M{Puxz z(zk(f@*;kcZ{LQLqUG9ud>*l_y~z0Cg!=lwK5r3UO5%qP#}EW$_2$gG&+=kL7Rw9r z^T(f!{3K;g>H;JFpROqI|8T(yCtqeDvXH}j|JOyB;5s3IZj}4!r=O&Fe)5))F`@i= z*qZX|e*FA-$$b@d^~i<>J#YZAF)>HcOyu({Ush&kXU8uGv5hV+y=#{`$}JKj(g?Ni z>-VV97pnYURNk@yEhaVbBo-C;;IW zg0BXuM^%=+>E++%P0vwXfhB;L`ET#F$D#8U^naZ$h&0L&`}~j0t39Z#UD(vUecQHx zl6ETpT4_%r(BArwD3Qh!axg=9y?Z!L`e)Bcu-%OPtTvD4;mQlVgT%GXrIZ8 z&!0aJ5IIUO~6p1k05YPu*N}8Yl5SEBgeoXE0{Ph=ZV2OnG?aQg~(q;+U^w(f?`gj9kA)^%> z?YK3cHyeO&Tv}S%%c(=MvN_kO;4f^>dF3P4QWD`RG-PLyp{)DMwn?!xxm&OoDq~Ry zueXn=RbADFDXmFF`aH6cw%h*I(k!{wZ5Obph^3N~(!=kg`nXpKpT_hh0-}h-F>|=sBVP zx@xl7!zriyHm3Z3X)jD*Guh%(xX!1rJj@erNkN!_zz4aW*24isA>OVGo|on5~iz56}AEE)qr5KJe%Xtr3#9-B2wXoVny?B;?tW6V95xh`*I9P~F*yL4#vi`ElHh&FwJ@DY^nHFNi-ths=+Uby zw;Sk1`GahJ-);x=B2TogBd{IDN^UzC9R!}oAx@NaX;jXT7_}I7OBj4{y<@0d=h>Vkq!SqriVScNQAN%>86uhz) zK~dS@7#!cObv7d&Ae3riL?LcYQpHtb^`T^nx{aPC9Uh!Eu_F!eI!2&vRBV0gPor*B zNCVy{j!Uo|KDkax7rc*`8R?>@LZ*sSt8X6i-Q}ziw=(f~WzNHjbcwxcmTF$~p@l4C z9CtoYDA!O0uFO^eZ+*DdCTVA`-qZHjU?8A3lU4*NxH)w_Q%;-g>Q&wRW?w$7VR_dh5vozh@*IPBKV0S zm%9!&6T%0;FC2jdOgAe(fRL=zwaL?HJ&z|40OvC>vC!Iqy7YLr8Fo1kj)7~I%0?x_ z@?j1&vwr{`AB3_zSH~zPbWn-I9&iH27TFgww>JBqe&frT4OO!WtQ8nLV3y;RPmWuD zw>%4c+HWaGJTY{b06G(%eNrV%_PISbg1Y0hYd3G+{HyqLfF`sH5QC!Jz8$r&h*RNp zdQCD`e`5|U%PQ?zx5B#TXbqPgJGOH!g&$>>>qnUl$uKSDAsT(f>K3G49k6G>U{PmE z0=o!bZe&W;okEr`9Tz>+ex-c-Gw-?7kwyoF zx3fmqraIHMb;YVx(P#-QA=?nDHU0Ga<>)l_m%_Ug%+y0OgW^YEx?F|T^TsD&bn;Xm zf}$o+zwYUo>-@x{ScUGb3c}4cz09)4B=`55lqSYn{nL?c=)IrS5Eh}I_OvcpE9t6^ z-BgVhF9C8uH@nx^r3~xFIK^Ob(M}zbObqpIiuTom8_x(cQo}2D zat7WN+<8NO%a6S_hX#&_4mz_N9E&|~56FeK=A zQ4O6h4h&V6KM~ohGf3z{8{HZTx5PuZ@fuxAi^gdM(@|6^hfuwq{YO!HccCIlZb(J^ z1i_?dIPO{F>${|~y!B1M%??gg3d_aIawZh-LqIeK38Lw}Uryi7%XD4nXFb3XnkxZ}^en*@kPSB<`nGMkR2#YEwuzSh#>yX*2_bfI>Pfd-4Oq8gT z9=D%H+D^Z^?dZhdJ0q?_LdZ)Fz1v_>`NVG;A8rY61SmdHo`Zm0#Pa~!1s-J5!4g@` zR@$j>381Z&WpD6@rxewrEb|twd~%a%t~6!obGQ+7L;Kt{DZ2A~au`UAw%TKp=er z@z{PlwRS$i+EAJKi@I2YN|Pr94Hcl~eWSkvy9^<1uCttnBx7#zsZZr zVn$#Jd3Qo0P=TISaWVQF#5c+jxV!pM5CoUQU^9%Zi(OzHUXlz!x_v_22jo8F==wnBzpQLBpK~pn*Z`@Ev zdIcVk5HovtQgO*yAu!`qU{K}g(em3GVnU}_7cP@HCNh`umnxz|#xlT66EsOOHw>%9 zn*I5iY0gX&*&8CC_7r9gz{y}pJTk;0ftd(PNJ;%C){%Ar`&V`#Dro|Q6x-_EF< z+#7@MT_ZV5a7qI3i2#F04!3zz!=W>T_2HKT!VcnxOFyz=FKXbDZmu&V*02vnEqK?S z5M5Y1ciuvte{#H)}!ShPA`f_L@mcy_5V8DJ8tv>nL$&IFlToW&LMf4nd9)^2Y+$*BEr zkw3D(TwH9C_YGIgP3M_nBMmwTvm14?Cq7W#Z-ZZB9y0RZvug2{Xfr$vR|EpR$!%cE zwr$71e*FqNbQuX0K=%A552CZ`01{ys%8TIe;twB29!|*+=Q|wxN&w@zyU*5!f%}dD z)S9If;U*-m3csB4t5%bJmP7wzyl__gedQite>N7-f7=(K7MyodpwSpt#7_Xe#h3a`&<8Mx7$*EuMG5p29 zW$TUSlR_=gEcM+LQP)-6&_;H?`*rR6?MV(2&r^N_2A%i!T}yca9mDq6pN$${z z6*5DM6siQpD-K4?`I0<#n)B~;(}&5FA|5`O`I@UAcdSTp;`#BADD$`p!Gm364w>vo zhc+kP1-0#EnACk1zRW*}1JaBVq5*r3LH~7f>HJ$dd)C|(_rUZ`S`+QwUVD?^?C(@_rnKv~xCXUd^GDQ=cNa%Sri*81+Iyqv+kDl0`~KtH~G z{9hlcvMc=vOXN-KoW9+S&={(vc;S`1-Gjs(Rn(!hw|#}b-h!G_cY zz+hoeplGbOLHqRS(?_XbJ+(UQ6HL5Q5L#=x?{HBruKqJezaVkXzePf#;q8XO?Y%5> z>d9WfJV^Oo&4*zfE5To0-0!h-uFSa&|0!OSdl+wUTDAYX^al_R0LPVMYf9!EHHsCu zGWSJkwSWj^p4gu_$$QHvCG`l11&z>E9%+?vlmqQ67P3wrWceF?7kg{jO1G!4l%j41 z3hT!ao&fe8J;6nPBkO*z4(^gm3JRCp5+?@XktwJYX_ho$GPRK1;wyCb-aUw2nXHB#z(nVy zrO-laCL1JH$7_+k0#j90P~Q!bJ=x*4ZXJeiBu=YfI$_c|5LKqbUVF~sg$DHZODHe> z{z5y(%_`i$Nxc5A3x1s&xe^(t8XB%_9^gK4B8*^U!ve;Q*&#Fx-pEtQ^bOEa?*0Jc z+9V>{1oB3@ZR-)>4n?TD70?X$<*-k=Vab2)Hn zK&wn9_(k#xA)ykFtthL+wmR_a>C?M+?-J?}rhRz;T{sP>Xa>3+lmZn_a;{*0lnfBb zing1caBB(46!6N59{P|{DH+g;zx2P~#{eZG1ZuCYJA|J(HT!Rd{jrnH+)mlib`2bi z*OKR_LyAQT6&dPDU~G_M1hw_|boMWpP%+PpcX_j%fg7q8RD(o!uWqsL4pobx4?p$!8d`Wu@;X>OL54s{b0y7OZc z8Mq?PNeT2Oh*D*6l9LmRH1}~kMyWB{ZG#y}6v!=Gw|>~bXtT>w$5-re{hhCPci^$2 z){zPs(27{BTI<~K7-hT%13gu6tJ0wdcSDidm0zUq+}OVKvM(zecO=XZX7F*7f~%pU z6Sqa}$!1Z}fONhdJ}D^!s^HYy+})@MhRe@?z!p4k-b8&;`X`F=9e9g~A+a{qIG<;c zH${2c#?RMAXO4_xu}H+JuX8CD%YXiPom{{o{#(Bb6Z4+J$O_++0hvi6nkI%hUE9Tp z4*+vJ>*ZWAzT$}Q)xm~A8&ir+FZ4DS7!uQ8v&`Iv>EMzEyMq5 zeAbW8gMlKgBqOoed2Fy_5yRo@57KoF+Kta&-e|Fwqbv3?%lCtHv`jCr|0kJx3lN(- zI2R&=3a%~v@S%!y`ruO4@uoCPGA5(XRIe}1cI<&y`xj^-)oxoMewKazT1XrNj8xL_ zpLlkJDAdqMqv5=L)-)A*w!b0kfs{wKy!o?4dbraGGK9qR!-UZCYfw)J{!FyrG zpz&!b!p|H&B{Io4JrXTV491HV4^P$iwwD-QzVg)f_~8$f*q5NZw7cHeK81edw|1W< z{3X;#LOaPn#6&H(P$-xHB9dFIc?%mwBt{ix3w1PhWW^WZi&{X0s%mO}rPpsRNw&M; zSH{AOw=tnCH26!$zfnv~3tic{RWL8MEb{pNazR~+(Ir&R?&&Z(QM1ussOR_`!uWg< zjAj@@o9eUis>Fkyo$iOotOCBbXICJh*Im1!`di+DM~MTKXAYv0IQfOM+Ln93Z9y!| z3;OjYFlR(K+!#28CrZ7hBQOHj13X>>7d%n=+dDc)l$rS{`_5JjS-M7w+DBxco?QT>|tK)-)Q@GElE?zZjRu`v!ccE5Y3S~$P_|O z%lVh{SDAq;DWcV4LW@jHV{@3pkOl-#TnXH6DBN|3-j91yRad_& zJ5ZU_y>GR^CMufclWmDy?v&oYgjV#22cil&&I5t`l5i6TG$lyGP_oOZSDUo^aaa_-k7vpvKvOvk-zV;HFmZpL`^$zS0Vk&sh`a7=&Ap`@KLj?5HGj27^# zBrqI{0j|N!rl<}^G{8C#OS~fRUsMo>P84N1C#OcMPubd~J|P$$Bon)2o&$G`XIRVX zpS5nYHyqijm!OQ8gXko^qtifBBK%r}<;0UD!{3BVAR`l?2MzL)ZAqpjRtx}WIzSpZ zY@=X$A|9m#ZkNZ<_NLI~(opJFVB+m%&OyiU;H?+t&t~s0ma6=j&AMA(yJD79B@lTPe6!sOh;<&Radr4k`ZFdLQ1+Pi6h(-wHO}sdkhw zM-PlXEYRUp>-X-_D_8yes>wMh#4(R?W*Bg9wSVxL?S_p(jyP{Gbi}-;7IXj5rsEGf z;iazu4W&C^8L%35Z9rz@alK>5Zn%zgK1RcnW%{lPn!Yw%U&Zg=$EF?Ah5$nbX#f`A zcg$ZlyHpUBcDWP|5;X$a7bc$I{5c*XaRLVg;20le4HyCWV@*p#j&vN>{XhYgR%_@X zGFRsNpZ^KT*LqojS@U<{qd-r{V1f!B}>inh;n@24%>OLs5AGw^+Gd@+~{I&%ZROp&f=lW&Gj{KJ*cP-6{5Ef3&a* z`3p3(r4q95FKfqBGd*vR1x8cUY%9iFZKVM%9Q~{;-SU$#jjhuglXOrcJ@r z6Oh_u!5y<}01Q7axO!wiX;_+nG?$=LQlzC$%>PS6VS8YBkhW!%v5J$~-(S7+eke4x~?w$x6?zdHG^o`L%%gUnH5 zJfL+rJQ%I!yod=CR_`x=m>IxYa(vp`v4+vK7EO*S0M3~l47_YR@$?Y23b|Qub(kAD z(WX&Ene>Pl^(b+gKYuP4j+NXKQTnHJpEaK7egJ+NvBe#UNAyfmR9z%Dl zLswxb7Rvu4(RxQq;Pe`D3I@M9C?ewBmI6OZyvoRQUZU<}=SZJQjU)sYlHM~~(M}xE zz!(CF$tVy-rJaQUY36sN#=gv*`pWKA2FWc9)}QQB^UiO5bmFDZw|l>l8cB}U!TekW zK{%MW>&8G{_1!IM4($P2aoDkBSTn*uS^V+i$NQh3@X$~BNgQ#wF$LS;2QXA|1&f!!3er(<|v44Yn@SWgt<02yu!TH~f2{fE^B~8bQS^$Wn`Bj7IwbbOo zyaz*m?UJeNM?DPUrzfU&64|4h=|3<2c-inG1@x`%Py6}2i;SMn7_+=!{>euJ&6vv-DR1Tq6 zUR_>H=y*1G($|VRD1(_I2OW@_4G2h$1S`}tGgUCtQU8+lUU)diz?mb54vo3+PhWPn zpS1%}yH8MTuXNFJ?=0e*ztMuD^)_iE*a#;>OaMQuVBW#-rzt5(3?51^W#AkO#OMwM zaW28o69-u$canXI&!I6JHebKbbN(_adhLmWg#Y@puAS5pYjn5+vY2oKuw^J9$!daz z_#GDLpKlL%AaQ=DO$%)MO z24^$-a~wgaGhH`la4di2)T}uc`y%59{i~deb-a(P`S8kVWKANN5(^mlCgy9%P=@S< z1G843$jHqbH%1wB=y1x|;T5$y|F{8odCpnD%QLGPBr>v80_QI?@uoP{+yYzq=+Bs- zJkb^}opsfr^X&GpzVU{KFKhlrHm;=hJdKFq_a z0r;(M@RugSEBJSXyi&iX_gG%>{MXy$khi%(X1n1=>qdVD>>W4MWly1Vtkf)cK6&uq zL2@J>IfDcTW)wyY%~rfC?(*x4ELe}SkR|UOdoM_7`lH&zy7=?*<>5GP3M}A+>t@Q? z8wifYnj$iG*X3*PF8+~ZAP_+_s(~vs4Y<`cc=bx|^`W)tDWk89ex_9rCM9IzR^Y#yzxPx% z&OTyg7-cOWH7M|3*BnoM`6E$qMvf ztc;&IjZ`RG1U1a>D?DE3$gHO<`{iG+g^SO#Wo)o@b`AxYjvUcL5+3*)^#|hDlnR|%x$vaezb#}D^tv zkoG2PhY!~+Fen|jJ&8E|QjIjs|0p>3SXzvU1)GTY<`@U7&LDGtIQ*|iDjF)PN|6tT zoA;xw{{J~Fc}#Q>pJN)_)Ht(G7Z6M1cR~thepo;hV(S*e8J<&_P^v^Twt3~F(SgKt zw#@O;{vYXdb%PQCfYGGy6ALNE4!iXC9R|<6qk5|HIys=#3Xtm~puxI*g_4vZ0JXGy zaDF3&{+(&-SYjB^1G zD1HC`1zdv?RSYj@^2*A}(rR}L3Kmc8fsMaY9DQWmpyihr7y2_euWv+~1hb}Hvo73J z@ZjNS-mlho9z2L6JY7L42Jg_IlF@~?c)*&mbMF6drB*e{O;KK+jBYTs!OlUDZD=+< za|Uv)96%&t-H90)O*8;wH8UhUxW>(UtRj-W?a%%so_pRgW7qM+OCjd{Dp(j z7VJh|h4X%_5_eoYJ2;y;lJnFE462REpZ!?g$i;QR#;*T zX&=LBHM90_aP<>7TrS;}Zb8P*$@!Xo6Uo~55jrucRv1!a4uXL5OS&}itZ80e)q)Lw zObAs46yih|cgd?VWKJCvt^)ka;yB05kMZpu))PvZkR05zZhlPfSuz3ha2w@_0}JAZ zZbwhmnV%AdPVkY@LC3vGGRg)Y(*;l7*B)s!keqy(A^&#T!P->P^>6_zeesKe|K5y- z9YyHi8+7-KRWeIHP|W|(DJL*xOe_RsSQjm8%u$FDFbldD5-{nxP6%9b9M+WnZXV5YwBWtZc|=@4$i!|+0A)#G?S za%Q4^C* zL{U*0Au~eCD%l#!j0h<^$||Mz`cj=9^uDiip3ga*hri$NJMMAa*L~d+8onmL(XRYx zK&5Y-jH?UqrB`hZ3%v&J)p=s$=AsUp(heJm2k1|fv+A2}MTbz- zBXxaL%Sb^FHG;5*c0G52wy%K8C2f;XO@e;nQe}c;i9_zp@gUJ~`9>rm;4Hd+i8k4y zt{MyC3Zb17;QQB^f=(P+>f?x>;f-n+qd)SZzd3~X)PUyL5olpsRT1Ii70Zt zk$@$yBHbZuZ2LcTLA3a11;Kl9luTe+wKJ`aqCResmlroEoJ;h}26JY1Mjy3D{#kT! z`#$Xt>LytNbwXH;Hk)@_Tsysml3{H_tVR8m+L3F%kgOP@?~UI-Tpb~ z9NBpVj8r|*uyXh@w<&%sIRR}I4)IAVkkI!yh#@wZH7PbAX$CX~@Iv>52<1sn_#Yo7 z$EEd*Qi9svuiYJ4NkV6cmB0%)L?)Rm2VP_Mu>?Ue=cJ(cKQ`cy(jka**WO%?4A5$l zPaN+nTe9S_!D%(T`-|z`Js8n76@t-;FF8HbZR&w(>_cgK8=fEeD$DD4Z>g(X zk**}q*>z+lW%O@;A3`1Sv4rpt@nioYIeHZlZE8A&u;+6rL)#`t#>imNfobltGfsw3 z8SSQha2WH>Vjw~P^*#5Y<4*4Y;hL$aeAaT&7>6K-rv2ex*;RSOkFEPH&+Nn{|Mw$9 z3KoytLluy}tlibZUTQ6+mS@rhM~Wo{Gmv2I{a-ud_%afWLm8R3SjKIhni~*Kx}Ki) z1OCgixVuErr$Dl~P12!kly1LCH<|1(fTdISa~GqMhb*(!bGm&Y#sQ`LMI!7^utvJf z_`Frqeje%3Lhb?vn0-7BO_k#koP^On8)Hhc>@)^qA_|nQzS%Zk>z}`T99>PfIN&$* zaW12p*A}4@KR#*cD~DXzUdF<$vXYt3=3o0x+GV$mozm6&6uTy~E6% zy~wrj<8O=p#-T}}PffNs#+`PhZJC%`6Z%!j}=pn`b zJ#6E^9^&MpQE$MMU%>=5zh=sFq?8MvHx3+DgVygbnmTa5zTYQ1snU}E8ZtsFR?rc&z^_a=(-F zu-@FC&gd;4t^ugk`L+gpUg5!guQj(ME-)v+2??sQ4q=Ue@a9ds#Pqy_eW-_bpqM)} zxcVJGHm-sCH@$@NZkHBy8PTF8g11B)31GlCkiwMDfwL~N%Sz%=af3}?}rLpZ9uG+3iSIyY9vV|xR zk(>(P8*;OfW~)kPb(w*#nq3&u@RIa2gFHdV5=8t6P0dR?o{9t_>mq!L1kIn*p*-rx z`_D^`&Y`C#)OpZ!PF@E#iIn%*kJDJXiM*+26O6e8nSqe|qPTcB&>zBgQU=_CKfLD1RUfxZtksF9yEJ1br!#QO@}X z*n>Du12ZQQdX-a}J`ptMds|rP&jkJGQ$j`K@X*IkvC?b1ZFaSR!+-wN=jSp(A3a4s zl@JE}Oi)}OV&D(a-9-hP49(Had=9>Q6ohUyHf$n8ub_EIn7Tv)5b_;_aRaZ9!7jRN zb2$b9Yqk!w5QYXFi!g;(U})yN=WO;gB8>2j1I ziyr`rpcFtPxDUGg7*zaN^9S%DS~OjfyJgjN({?B+_1QNq`1!?Mf0dFXI)LcxR{oqcd^Lnh9htvfJ1*%}0m8N1cY(*JhtShm<FoF{c_duC)2|B?PyPQFZ&(>a5~{$< ze&7FNYqt=3qs&fk+dm#i(it`)VITUNd@=6KT4o^@Ogmga5Ass@8wnyX0xEYnDA?jH z#nc4a6(}Kb$*08^sYwp>uT$aWM<`m3@CEnndbEFl4UD2mCFJ6Sz|H|AvjQrFg!zi* zPl55e`2#92qQ)Ct-}UDtpveo(PqkTqHM;*}yo`I%4-ocQb%>EV=j6LQDxg!!0ry>g zI@@;FRuu7@K7cKPa=mK*{rB!gHYF#T_DWT!T6k~D9X-jNi&(ySiiym5KF{T}O9)t5 zE05*WM${pVUPOWiGg8(ia+($43b+<0(i!A z>nm55DWeZ4MDv?p`ZX%lW$CmUNzy!tKtQ4-((Z_7^Ns)ZxdeZnA##jl;gc$&Bnx+V z^oGqyjiQeR<_FU|d8$sq!u{YIAm#n_98_!&co*s=TRKVH0^*x(-7 zH;4-mgS(YY_iZG2BtF#iC;9?07~-Fc5VL&Zf=@h&>mfodf!p4#sOVYqEkm1HILQl< zUQ8BAm#3p0goMN*D@YpTF!Rvg^{-7?x_r4X%2oJg zaIEO**m!q4Y=oI-{6_|0`m<`K8(bq~Sh5@~C}AI`dk7NAlQ?&Xhj~%BS2EMWpukMO zpv=fKEn@4=(+zf}QQ3$6gmIN6XFUHIruNZDc38>||~LQ9Of?hiKJ1!rRVog&k2jX`Z(NU6agzXN)AcNfZERKWM zC|MC}NWwbl?vZo_MTSpWcn2vzecg73NJRW61_ z(4A@##9gcAUh+18ZTNMsa>kRtiFgZ7mavuTJuC5zvC};jHQW(=*zWCDMAT4}+liiC z67xW$G(G9uSzhD)loG+c<8^cf9`D}~Iqz3BDnnrQL|wDZMk)=eRfJgZ>I(@vQO(xq zsDA`*R5=mp7Atk-jbzkltyRF{Y{#%%t8 zgqk>98*#j9MHlieO;F5}4izDw1dE?;y${JLCzXmnNS<=#+~lMca96~I95j&9kv0DB8^y<8|aJ@`M%BUs2mXJs(y?2QCjp%zMd7pIjzTcs=SZ4|-qYpB{<~vdbe*q%I z3So!%0UfaGmMVh`+{nXAz0=0VCPF8g3xM4LlWAXdKXn{56#^nb9?+CRAJ5J00nlyM zX}gh?g~k{xT(-2-Ek_HHVG82PU(4UO6f1NC%D?Cva0JKV?OG`k_JMnF2#Nz}Mvb?A z4mkvo4wr%jx56g~)DAzSK(ixllhV_#&2~LN*U`Ib$N)D;qd>y_S+;0# z?%1FxnuzbJj{@2p?!H9kxaBMs`>um{m%U9YA)KG3^?e3dI?MZJX%{cXTZ??ry1jKfW$f-6lD$kl zo6wZXZAY2TUx1`V(3*@J_>SP*hRmoz_hbN3;v++Aw3GGRaAO;5^S4sy7#WY@ar3ia!8NN$;xPpL%*fb%pEq02FKFc=Znn{>M9>&`>mU-b2{{r2qj! zrU}CIEN30m#?ensG$?YZho;|n-_D*p`d8e zO4;q=L!$Ocr9fh($0@_|@xDZMpAPWfc^GCeuN8Jm<1u+Z|H(S#&(do!&^6K1>Vrtm{6xbR! z7S==XzfDXlgq%t5Y0o4+4sex#GU@Y;Y9+lXZRqPWEWRZ4F_3k;R?}RKalZOXds;e# zBw~K|80nZ+up>B{vB~<6coxZ1uQlxmeE@YC49b(ON2Zf>4 z_lvlBZL-iur;+A!_-qgwB&Ytqy~beJImd<9dDT!@|;+I^LS zhES~{gR2Bci@J}D&J0&*DPVgf-weViqpFD|L$vQUsS5N#axJSOa9??Lt@T z#$WOP6w2c=h_0Vw;2Q5%C2gF@Q?FP+{F-#tk^zLI-Id>u$?Y)e7Xgvz5EU;Syr$qG zRLyqBfcEF;qm?U9pp!#B?R=~@C)K+{)14q~U>3WHpc)b8BYk4FN=F!-QD^vc3y||5E-RIq2ObbHcIJ z%Q1NwLV)9v;usO!1G@Pk2h~p-3xcPm~?ZY<4g!jwPXMg z`kNEU;5-6}0bE75HId{+z_k+>Gh~s77B2!agf&oPidHN7_8ZeSxk=<%fuP(4&#DVL zC%d8b0?Q2-xhWf{%Q$((tXjlRcR|>ZeUuVpZ@=82&+}vSyuSvyg7p#!Wx?3$Au=`^ z%8vkmqKMc%v!cEH|H9+kT_>T2M|e^KM3a>`gg^b~cze>i0)Vg+PxQHVUC%0h{w+`| z@o{{Xe$RiO?+=ne)8)X@x=kv=e2}EQL?$!@3>3+Z-FyB~=j2=4pyw8IF%;?s8F@@v zNn>~KVLfzuFbD=+M43!zC4?m6>H)&@PE+xtqKbEkxhS-H-*ULl`l9I zuQ1PTDUoj>K6hK9*-2=4?Ii^N!E3)_J6@)dfBar(r>-2QxrM%{Lx*ML0@^c$lMw!G z-^xo$D$sZ?s7xlqR-)Og%#Nt%A2u*p8mo5oJH6rfU8p+aA-Vq<2_<<~|LfP0PBWb|8tDKT5s#oD6fvhRPZMqdVsf5XG->s+afBs9JPw$qvbxXKAc*t~pH765 z1a2cYhxd*TCER+QN%^i zCFO@x)lY#y#sVy?j59z(9)xW*ObRk#lA#0&eqy1&~Ef4;(}o04;=M z{tWbHdzP~mmy{fr_4V~dqcD^bMvs5J({DR2e>g=@Xdo65It)2tiTtN{rqUdDiGi|8O|Ja{bfVtBCIFmH7Y9_(Pi_|OYb zA#HPZ9=wt#bdxp(dW70nTj^)CEv$s&5%09+(MjK$lvJ(<5f&6MXyj3 zA%fN4A;LuE<6YyE$2S`f4PBR!i}%3rLZrBxGKV0(GJx~8oy>c}_uQut0&J`Ix#{tM7kI4JH2x57 zxdVw7jAnnJGCpjL#|1~Xcd!;$a?i&z$^}oJl0I6f1(??NHsv<`c+le6o(%b|cbB`; zeF%ukcL?PDyeYM2bsuz#E5Aa&i0xb3nyNQr{@b{Z%Qm7wi)p>^zjm^XFq1wOWj7h; z%=uDRzPK{c!n}%FsZi|?Zv8TtJ)0s16%N1Q4y*M9f3eSMDqO9XM}eu2!FhnhQZ<?jU4NLYk+7gi{Jx;^)joa;c? z4#(?;Vr!dt{FQ<)r(9Nk9#Zm6<}=e!U~170jesv<*@upX>QeS+S})xz6jTuQ!cfds ziHZPCS|Ep}4&PH)V8h7Br~}br43%9AMdJ%VVN0C35czG{zn_hwhcox2sN({PloXq> z&q=qIX}IdxC(ZX4D+yv#Fw73-P!Kp1vRQ#Im`y?8Lgtc$*K>HGeb}zP+)Xo4=R6p& zG!!y=oG4lv4-=K(v+$e|iHQt|8$>Z{W){@;J?qo7SBBZQ%%-?zWJn@=eR6c$kB7D*-P$jXPm#?d!ftQcDd@kSo$M}<4`8@Kn z$eeq+w+J<&hvgn-3OgZ&)5>+00f6BN14;Mz@#FOlp9Ro8|8t3q3>T&Ejaky;qM~gd z*bdBcadFx28>b%2^uEJ;ApEXkt3#C3A~`RX9n=3Z^D~J|@CLLnXvr8GV&0U)RiAO} zn9U>YghF5)7s-gvJ8S`7a=Vs)p;p>I&vBMxu{b|swu!Is#H}lbXT*eEyf{a~vcU^@ zpB|DHIyySnhzNd!%vLCO>mx!4s6V3i8W}OExRu?biTdUm*kKqvn>p>u4Xk|i`OII- zPxc`^nB&z0>PRkfOBY?74@HG1n6#z+;|6+{7g#>*xHF{cZMYl}o&lAI*YB^G*U1!l z31N(RPB)l!k0Nhq`yOkWva@{N#KMlRa6!YR!j;KhQgW`*m+KbK;Z^wa6?%@ADP_NQ z!XoVijVlJ*6Cm*!=dZs^p;Fu0cqa4yH@~6X}&L6f^YF(sd2pzc;3T13; zEY#UNmYesh4wZbQ$@JfjCpFiAhcYj^BH|iYlcy*CcxOU@zM?|q{0(c}z2X;~aQ9MJ zz1B-<2L7b5Mlup_AW`=A?ZI?|N9*4_eqG0H^zfnQ8utp1C#CE&?1Ndic_}&n<5w{< zg)_{5Z=~B^ENsNX^n0WD!k_0+)76RiDAM>S^4IrAr9Un;!2ewkuyL(d@we5IzkWA) z2$mCD*fa9@uDA8{tnBJvimBitu1tbK4P>XzP2MS%0~A3~(QPU!^ynMVU%gm6>nG%H zH|lE`szTz|NXGdV)>=zf5UZ`S^dy#WP~Y;`I+6ul@xfEPV4)mx0$&UaT)cc&m`91B zX_diVp@p5Do!UA4YRl;Z8mKSI$o<}FFE1(g!}Po}-KkKdErJBfOiIEkXQp*ag@xx( znC8#tE9<8bpy$o(#&&2~AHnYNOy0dcJE-0xPAh;0ITgjt%}p!Iehm`bnTHM?imB?Z z+SzhndnM&-PJ5K5D^6A9-hY1zxpo^u4grKj#-`_#eum^JAhB7&Z3v9@>+ulp= zc>d%&egV+urqk>~LG+Nc5V^=>$jrdNkiy;6^p@FlpJJFv$oC3f@_E@|^Rnapf#dqeqCImTU9Pj=#BqCpV8A~%Z4adb9f%Z+xlsBL zaw$P+#|{OsPvTF|eQuG^QmB@nP4@cil)X-+tp@b=IZF+3WLT#; zGEWV~){P$HQ&Z8BVOdczLndSIm;I@Qv{U6`8wNSQ= z79KLLa-=CWt4;M(I{L457nbu9S-EoMJjy%V>3UAwt~1i1{0-L^d;0@A-*psRt?!1q zsh^fBwGk`#@VXZ9&!60X--HS8devnCr=TSqFWuxvHC5tDEu^w}InYq~xgF%H;f zC5ONOoL8C2=aobPs_P8&FE2{DkAX0m(G)pHS|!WhW;UUHBknB?}jI80JOA-bX>C1Ut-LaN0e7m3^)<|l~8o_=q zyF42JMghVneX)>tpFa6POM_F^nG+9*f`^=%8Z*KLHBHS~K`LhXO5X=2_oSj-t4NugHK_!Vz*REt@h= zEZ-R>eI6|^S?IR2I&lKRlFVduCE}lhiAOJ=!Jt515LSARQYh3iq2-02RrXVssL%h$ z;*`^0N;9jWrXbjNC%?#bpjyMy($XF?UshW-tjF+^*{Ff#1Gi%4nM7_xVWS@ zh7bAcQa$dP*LFWrSv@wEs$q}KFBwW zA#r29zPk^-dUfZ4ir+OUcj*Oud}~!o5!o?Jj_mFPQ!AO9i%{OZFS$I=0LKVIQPdPC z49@lGO{(eYhk_Ul1t7*-l8M*kMA6$$;9LKZ| zN}ZDINPO$@hh)q$-4Q0u#&ddrnUMFqFVF6S?lF)c@=>UYN%!T^*h7!Ys^#fyN*xXt)B!HTaM!DmljR4xws~qlWMj^zrGE9J0HtYoc(@4Qv9n|GvUJFZ(Uv82e`%e zPLJBH-+29|)-o)$Jm89k*CudKIkXzwi2T&&M!^aQ@=P|Mb!9Sc7h|hl4K7~3d^zb= zwMX3+W86F*K9s`P=y^gxDxb#`#)Gh77WUaKi?kFj+_{f~*k&*#=3|iqNso%s+uPeW zIB49#YGNWID2hJpT{S|C@pxw7Dh8X7TkD0&c~6no)ezV+~7KG>7CFhv?-ncGNk$f8mY+W@de=E~In=8f_Lw}K zCgrQ!To}U4d@F1E<%p*^KzyvZrR5CJnFXN3;?s&-knp}2x+FbBwd7A6pRyiD3CGw6 z7AG7g8C|i3faui@91tG9Uwq+cnl>(YW^{$uUY<)?V)#jQ>ZQimOpeK$S&~9~MLWS0 zc^=x>nHDUdp`aUCh;j?0P9VTo+4QK zNGvYgdWuywn(mN6M03e7i_OS%$>IZHvJ2f9!=e zWj}Hd$h5rqAwA)yQ}evNq^^v+{T}#ev&Cx{tG?8z=FQ8`S}*_SASzh!n^6g@T9x?r z1?>!n@ zyRsNIe82o+7d0m*=eu|Bc!$NyXE;iQ__UET*By9`O(9ZevNKrLMb8wq(WafrGOJ&) zq#kwMWXLFYH)Jk=!tP9Po_#=aO}TtT=~+cb=SBMG&r3TKm<7_PBjXi4LzpTGrvKDJ zYNa^mEw<14kFZ~c&J&ZCp0`_}=YsYmAnF<{P zgXQO;moH!1$+&0+1P9Ymdd6;OX}j)ayd;)LSrTwv9ch2y#mTl#i*mFO{BZDs8sGPT zj3k2s5NmnpM%J{nG!%AOlc5W0$CZl8GETKg&57OFd3kb~(Y(s0*qV6tN;dE0jMhXO z1#2}^TqovDynBE%ULZ=Id=ej%1`qu{>`Y{%Td&o9&4~2p zwW~F>_Fp`$d^Wf?`nva8{?0f5Sfa9LEA#U6Y4T)8^ehSYg)`vRqRzBSS$~k8|hvrKLHChN?4ZH23XW2xZc9V0dw$0bBmyK`zR>+vm7V zhfj#=W-SJWW@TgJk((?Hu)m#3E8pia>M21nK-6TL507b^#jf8Pr+W98>C{?|8XYe$dMyikPLVL zM!UyN9tsZ2^=z5t%V&Y(G=qXk7rdyisTSR?KK1@ltJcZ*_#*_Hg!bcXRIi_)VWkf)vIPUy z+*P8^?2OKb5VQ|OveP=gLQdh@j}x~?sKi%Yfu_J_7{R0(nwTU3Q*rk9A3jJlEEg1< zO@yz0t`ZbWYyN-! zs{CxG&ezr8t}>GxSg3O{P>|aws;ke>`8js#J4C)8pbp#*ZqH%pCs<%0GhUt zg>QK;UPCR}U`2!3IwcC8wFq8Z+w{k^&8uwOvuL(hBH zr1*!THCs|vFtQy{tXwB31Jmh10@FQv_=m;4idFiK{K6IqXxl&?7mCcNYIl~_y~DI% z3|1~Fa)9i1AE+1-PTvuO&5_9wwsf<-ZOX>MA^c^>wV{C*%kO>AbBwqrSn;;6t;Jte z`iz&`@*1n2yZvn~ncP<|Loa3)T9tOrH!n?Knc9W3?LjB zJkkslExG-R@Zh{R+^l!Q8XFr^o=0q$b$QP5{1h6W!tN>|PUmmdBM&x|iVKG)mavl` z>wj^c_s{1=g&i_xWSSGa_6P&zR!YkJTrBT7PJUiq3Wx%fmnJR}rq?nuGFn`AFJ^AO z*sJ^8r?As|nYmOD|7&lrhpWtraZeUaz9$A$PpMHRsqX79@G%ofkWsBx4yiO+cvkN3S(Ei1}$%%b9%Hv2EaBYuY3Odxs{!r z}g~jz?kE9mWChOA>!C7jGj*d>Ueg9!M%?ewTU6x>` zY46edN>S?icJW{Dh7GbKSdqG%Moz;NAOmqY)zVYoS@uIj^)NwoRq1x)ME0~0NHo!KIk)tQ;lL9DcMXCaP_ z7pc4I4;Fz5dj1mAmz9@`A3o#e^d3$RH50evOJxy!qmo*+2tv8^EJNMW27D_w7YM!2F`3jF@g> zb^Q2dIXMbwr`}*?v>?wv7Z+!C-4{sZ>UiC3cIfA9)6wC?cxf0Yf-CUw@CRXi zB6BI1{Qcd4WP}{4bY}gQp|$+&;e5ZdqZu*hc}T+R2S$o$5$m*=Ivl~Sg0@R_ z_Z}M?U=<&q)mGqo!45b1eoA?X;7VXz$;r!~X?V>fW}tVnHkM;}v{)#%uwvja{9L82 zmI300tjS@bkLt@3&0_#w`?v}3uapuGlZA{jQ?ZwPz32W7xI6iQzy&hJLHoq zp0+Tp_UhghurPnV;u+`g8m6MsehJ+x!{Qze>o@9|+PKz@II0qJ(kT^B;@wUEv&^Ih zghEI>5L8t85%^1l$gHn5H~$k}RIbxHf!CLw^7=9e>e0DWikG>3<^_zl60CQd*4&C9vw3%!oVOQhi?Y^S8gq>ObXHy2V{ zCK=?Y(O!D{@&1a$__Kc3BX*?nrKqL@_OG0hU-%r*V-~FSqB~?WY;7s7aYS7Ibu1?d zsD6JLa*czd2tS`bRlo|YGEJjjJIhv*14rwCXB|_W!~wXJ&M8qoHq!seGv9Shxu15M zGFxxA6)X4ry{dp6F*d9&9@rrgMP?d#Y37bko-7td`N1VK%-8$-L@jVEUB8yE{;)8P zZ0MA_7TK_7z|h78Gq3w3)mU+e{7J#X?~q#K_P8qfxT;c_c^89Ms!=xh@4s;WiOTs? z3)BwOsv9$@ebk%o5`b{G1fom5N3|4^C?}uV42yF#o%X%0XegKt5VLZ_w1*Dvp+tuE z;zQ|WOtUu_`4ln;u(n*OXcl|sRh(X{jo(!?#VR4IxZc*=%P^iM>#s3z{1)QFrL^iy ziD7`gghF~zHQb~W_v8yl>ZVIQ2l_lCPPdBgx7a%G`A5yatG#?}hsg<0j6g`}e~Nuk zYq#DxeHo|n!r|qtrK>sk>J7}WmR!>sDHIdu9Xoa`Gg@EGAiO-|@td1Zv3pBvk?E@y zO)(e}o0nf-$ZdJ*+2bOGuT(A)+#4_H&_z_tNYh^WBGSv;W2G_9r^sZd+)B^HCp@2{ za~|ugM~Vgo`y2&61eN_&t>!Z#&#$4Ou?VHx_hFt}H@t~kBP}1!NSE=L&s6dND#o2! z)7>-?gs}-4!o%VsmNvW2Z{h`7SZSG#%(L?Nv^!St^eJo|!;*cJ4Z%8e?iDjO0*EnP zdjMvxbN!TGun-sWPi`w>xjTkchl^vFO7L}6v(3&B;OHsy{_(p=WkxELK`L}7PSg$e zP2V9OFa!&( zjlW{n``s$copTjy>+dBhS1wAwf!AL%<@L!UZQi`ude`mz0mdPZbC+dG%{ti&g-kn{ zZXWckU4889GDX7z^9^`9gDFoZx^Q`l{E(fgw|8JW%@b5P{(Mo$l@mc$f=tAES46(j z5S2c^O}IqrAdsK3Dd*qPZL7rg=+`CKgmv2CjK$5BeC_iT*C#X5`L8|zgPI~e`Rt6B z=n=8Bg~^}0(9t_vlyz31Mi&?xrt{Y0ZtFL15_|4lELGxu`dfcxo)4FsBwWFfIwMyzG4IIIW#Bb4zUOSI@EQ?^_Uz%RT7kmJ3KDg}mQiN*jm` zM=P5j8UbO$n+uf&s5V^Fr4DW6%{TXo*VIHDox(ZYvR_F}U3()m6yWT+u7&RyTuw$S z8l>4Oa;l16IjX2o9UDg~vJ6v<6|wH)Rk@dfu+xge0|lN7Huw}W3M@Qq9+*q~(WZl> zL}xwai~`9scLhF5Q?m7cUV&}g4gIdsRR~66Gs{*e+O8aRWlGb~BxQ%W)4hO(t}gfI z&!69iDMot@oZNEjNb4qENB7y=Ue)3zrago^@Hd)r)=wc0gKPoHaG{Eb`giP-{cbJ& z2#asHvT)d#hbg?eU4`uSl;SFQCK6?Jb!^C5a1&m8U#XpG=AwQZHOZ8n2N74s{Fw5> z~M8q~t1SZiOuU}_TtX)sbd1*%p8_#4;n-UxzpCiJ@!B;iwy`hF+sZGOAzuyPZ z>)|4Drg4#$pd|tCdv`aFrpMQZ;_Pd$x#kOY)5uo_B!cQy`TvLucS_!g{wj3o2ST-& ztp|xU?U}N!e?mgq{odz4=MNtb@hKcyo=PFVf_MkJDR&&8v4ruCT`sBjJZ^{u?yVwj z^Us$9F>-O?;nW}^ldT1M%m1n=p#l*L=V6=(W8127-thD(){eX{2&hZIRP8-leE9xe z9wtlot(EbK#6(L>d4@j$;2CprYb4LT;_$i0ET#74p*8lUe#%J&P6(yQd~hwSM*8$o zk3kCIujG zr@q-J;@&Px?hA1Qu{NC^ z=1P3jD^*)pw>hrrFa`wz4zt+w9E75`yT30MGo&L_Wwd_MVJi4M&$(>Wlo3Ev{phRI z!iafVkWN9j2|UG~!nUAb6R!f_L*i1_PriR(m^}*?GonY842UAq*&_5#5XMY+dio;K ztUhW~&X+sg0E@f@?OHP^Xi(&YdhnUwRWAZU1K;pKvtSlbu?HwxR)FZ4p1q<|tZ-nU zsFMaa;rBZ$99-}ZFgT-QJ*eTUP?)7qc%1q+Z|QHP*3{H=La}P`TI0F+?|qmL`VMfu z>FawB1ID9m-na^x)6WW@w`tzGl{NFN^bO*>Qf!H*K0f6zu1ppocroSH>kbbdGt^i? zsiLNQ8|t|T?(+7=V<@c6JJy1U6bsGY%CGD{aZ=vq*15V7Dje|to}My?w+AzDy;_U> zXktY5yi}Bxq39z)(Sz*fv&P{kJ-~LAR8<#l($I|fUEzp_bvHl%A_y$M(jq(>P`pW0 zL24zAz5DhxH)Ni$2LW>phKYIh#Bn>eFKPZrT9!s0L{XO(WMFKi_K4O&)jv^XyyCF9 zs4G$ocbSiiJr%B!TGoom+iSphNdwI2rReZzc;B;Y(^mm78<#bId{$m=KC&KwaNVfq z*|Y1MZut9Gd}vidR~fV8CNrjm3zrVF7)svGkj>KJ+^wB(OnmIq&0UAfwk&9FskOcd z1nd`s@_#9EgwuST{l<>FU~btII~pgrLJ?sXowWo)6hCjePUB0o8lngWE?k zy6*c%Nl8|ApF6jYUC>QBrx-MUw_lMR6v6}TiwfPdXwPWi(dy7`(I49Ng5r-Bt^+L0Um)Oz=uC01!)`Gt} zgKOU7XHT6vb=^_bwdZu$$hDDcfuFCf`W~;pJ*k5_XBqjcEcecwkpKRC$JarX)q2bG znn(o^*ZZ%~73+?6pcXI($O@7BOUf)Dl@+1+z-u0d#QhEjQOg^>?d`SQr$cYtz+xpP zTE6a&>Ds@VZ!SMSPP-;osl2sX0??g5U!C^9tX8!-+V&qn_#IeMGNe|32`aVslS&k~ z%lG4yJlM-M7<#_!(ouryi*tIASioiTCM(-0w~65t}bkA;r-DJma zY7%e##=u6~$%v#jt`}yZW zTMI(ko+cL8%7bXY%f5c-U!NsZL-zESQYSvlg&-5JLoO!mh{scaP%`Yjs$00r7);aq z+6fJ>ci(V1$mXLbdL*@+l4@!Z&#Qkqfyie0u2O}=5~0`Od(`&KhA826^e{KO|wQ=qSpL-O>13kSg4>o+gNmXMj*@?UD;L9qR(w**Ryt+n5-Z@bUG-Z!dGMPob3CLA6wCZk_X+hWN_?L zM@ax9nmwR?-LIlbM=QIv+dSv(0;(6!E!qZ~fjse{{4rv#K5%%pBz%aDPy3U%vy?TxoxSTt`w@w-ed<1v zKi~b7>~x&yUYNwA-@6PH$zcH{XAI1wn=^;zSGjTT2e!0BmX@n1yen3K0KDRAiQ$u# z!VHH$eSMpUA<_#aQxGt zPrYY$9y1qjlq{yneJjh&V5ejI$2}{k?&GHxC@2?otB^t8* zg#uPOk3-YH`AEt1+Y7gayCg@~jhw1~z9jROyeAtl$*GS62000UJ8Y!dZKQxNE$r^8 zw6_@Jt&QF1Q)t%ZZEyNkd9hD}pr1Jp0CBMECsEzOHlnokRw#ycwW&rg&B$h7)a%~P z%Q%2cZm;CYf$k5UNFY+r%>Gl0wUtqk$u-9r^HSQtos-4rEE{xjywzgcs|&{k5*}0p zM})FBJ14f+J)7r0Hn`P4#?rFA;}ZjqWYO~*5`R9%pN%{=umK85_>Dr(tvi1Fe*Vu7 z40#E{3~BaF+V0FBKMPx}%B7amji?6=G1+RzjIm?ZEPI6#=P&8XoA&q-Xd6{l%J*>A zb+=DS>TDEYk>xHi|8nAeP}Mu1mFN4ckLRt7$X~gBW4LerKR=<*D$M>^+|@)jN<@W6 z(@w)J$4pLLzlBOrep%}%CQrVUc6*5J*& zHNS**A^t5qQl;tcM|*ucmj(%maD(M4e~w9t8cqp-%kX6z>gsQwyfq8GCc$qt_U=D$ zXs%$4!mFkN{{z43AW`X3yS$;G!#O){<6dTa?*8v zr?*aPq@QHycki%dyOaCqQN<5y72A=1rDc}f1xEIf=F?hn)6C^^m1+Pbn!CG2hQ%KS z=4=jokm3U9e)-0LTat^7f|=F22-}HeGAsd7%5PX=>08+}$@Hfm*Rhw0Bo@9XO0oI@ z-mLM~O0j8I4zJF4k)5gKY}vP&!9FfoKcqV!(EkUpNX;hsG1_-AunkD$!^lz5E(?h( z+FEC^L1id@R{z6U0`Z){C(uo?zX8{JNB8Sz=HK&kpXcj|!^?(Wnf67Xj{XZhe;FmS zI*rvXbR~$wdv~9I7Hz%y#O`DOET*KwH1$Gh0@*+b%QH1Kwc&Mpis!bG!$3Du`x(be}#F0Iq|?AH$CU$9sI+$2+XL z<`XSIL+>9YXDyr?rQDgNvTw`m;WwSuTHF!q|7=EmDJ0~4a`I}5oTB1P^l=)6G1sx< z+OvK7R2b6mS+NzxgXZfSx6P5!4B6@DmwvP88chc(U+y=r^FMJqSdm||5Vmd}&v2jZ z)=SZAD@}*k{uJ^%SHM!1qiLa15T*X!>u=@^C&*NrIJsCaVTfL&ui$a);tRDSt6LsD zn49&ZXg`ZawS+gTMs;{{%Az9CqxTsozILV_-yT-7Zgcu6=sUNg+1-+RXOHv2KQ<$B z2k{wqV`3|<6Fl?v1j4EWNYmhgctdkjGR>7x3aO`jv;O+`u6kMdkGhOqprSd zOxwoX(sO330l!s#@oIZ1@ZHuZJ4}1IYG;H}R%9s8tGn~5RjhBsZ+Us4WP>bs#b$1; z6TI_2naiAeWqLaG#o8lQtH*4&ic@&AW9W~b-KQjaS6)%As_AEqDGa0&DJ%Xa&(f;9JJDy>R%c7adP3wW6Al#p&Dpxtt%r2(yp959}-tGt)3%t^3fJ zlBM$?<&V`gzo`B~6nGpZaCD}HAF7KK!q6>WyF}28@lI=#>`fNO9m!YMhCR3|YFsok z!))DF)le!9@eNw{meZovmf%>P$0Ul5Z}^Idx}RdSSD(z8qNhla`CU{rz(+ zLeihD4cQS>&9OT%ZcFA)Ke2;crM9+KDVhd)UaSo_r0i-Gr&@%G7^;rO!^?1uix%Cc zk**u>P0w%7lzP(~rmUtVb$huEXV06{=F3lQEazEt!C}i5E8a{x;h{*q`VeVxO@>V& z`z~DGAJrpesiC}Xkx0GYspx?xG*<^b-9sN9d=YK4FU_`iRmGA=k?b3ys_<{O?`!*s zg^u0i+h)s!g3Ql@x8uxGnzrNJH5xa~KTRzo^>rW9uh7Ilt%aV6`c?xyS6zD2aCD1o zZlUCOS!S-&zz;`$vqmcE2(W+`FzRf9qb4o5B*(tEqaqvgZHUsi%beFo$=B zT;{ReDHc-MR0|F@`8u6<=H%yh>hCUJJa_J3XXS<dns)CI!VFg1^sTW_>~RmH^+r_OfJUeYHtFtuFGo zV&^=@Bcr)L*T2(o&fNdFE#&4deXC$bz8@dz zS0|UU@RKb~T-y}Y(%$!&E@Y|UJ$P(&OXgs|)~VG8NAB5~l2$b4?jG-6-Bol$c5b^ zGJbUSDSBwIU$t?Y;JjC5HjWLCKCB=%yRo;~+>bja5x;S>SH_%&r#A4{*3&+29CVpw zzdnEG+Z>DX9m^JQf9qZEEE8s7SO2Qt-`?1G$D^>Dzv~QdcKw$G&9A$A$nQuqR%l&4 z?+<_B@Z(=%xKqq$Us2n8cIhFm!n!Q_lKSYOd3m!W8QiY#Df$$tmH4`~b9fkbwzNB4 zl+`aUu-%p6Ce82UV4W&HJm_8#z1l2-znO?z?{Zgqb>s1p+JSd7<}wcZXJw~UyCOf= z`)*c$&q-74u4&YqrCiNgj%z+RXJwSM6oubo*2=N6k7kjjFA95qeHwfH=T>eLe|eZ zG-u1l`yGZ1i$pe3EVd@pS(q;Dd(u5-?`-$e?wG|=Q>$P}?nh#8%(LLn8C_gB6=Qop zp9y9YXp>ZxAU3Pu5cd;sjxwXS&`$yU(F00Zaq|3pHBF>ciy2R!j4zql!D^qmUa$WF zod`F_Y*Ecw7rB}Y#hNdND%<}URw$`|%h}V{th!}QRFYG@Dx1$T5p9RXU?>H@9U2)h z`QG$iSFHSkd7b8{dVj;W5l@##8|w^OXfX;{8r2>rzp>uH&XmzBOMBt3;aNU9BJ;om$FEfBA1}ioX~Z+ z`BSfktWXKQct zn(aSAOgOkF@YeD*+<;#xu9&GDaztKW*_sSa9!pujdCOBO)a4(+QB+qn6G!pTA((Zh zwz_sy5v-M-i}mgIJv3Xgl3UW7(ciepxz{(P%baSF^YLPH>B=Hn4xV6&=+){ZZ)KsF zyhjIKD39g#E@=1d_~CA~^(QYW*{f7#);k|8HxXZwvEy8Tkc|B4gYP=bGpl3`d+ilx zW8WA<8&^1tl5>HIaj3MgI^S6Q-?+ms1}PO@VwliSuZ=wgZAyAUcslu=+v;Dm0>D{) z|7Giwy3IZu+lKODlnyQSc71Gk=CO&#V=b4@Sz}Gh76Yz&Qq;u#T+Z5U!1SE(Ve0YOZ`;+z(8IgEbH!}XvvMo5^#bNM{Rh-L!yi`vZapkF7obP z^7VmtxRBFGjl*|)@zjEF^*!I;avgWbmDAb4OJ@-A{hi^ln(iNFopVBC z4&2+NrX96yXDq|ftwF^r?ut6Tzgk20BI=~;#znq9d09)ASe;PUysxddNS!iR-zE);ztV zu-0Q6qh&3*7d9?=aqR!_^_5{&woSW)fPxYtT_z}vq@*YUA}P`x0@6q~O1FZPl!}1T z-JK$(bazO1E@IE^^St~0z8`!0$Kw!1)>_wn&CFTzbjepKcI(XYMPDB`rK5xVY$>WG z#qpCwqpIvHZsC&^!J6=PuL<39D_0Q9#rE)`eHLZ~AT6b+g0wm6xHg!2uQ@D3-Mcw# z@EQSwE7Ip^wEEo2hn2^=#U87exsIP+k`p8SDV@0F+^pcHisZjHq&*KJQvS~o z0}Tw?)M{431eRFzJB^G+O~T{M`w28_gR^Q*o1?Qv zv+mx4o9)%J^2*t*tpdyEI>j%zwjFJo`Y|zW1T(2WhD3zLSu@1`8l}T)71M?Bt~qDw zRqpFVTAavW-E?I==?`=XG#_<*d#}QquoQDOu1gxRsFsdPUg+Je9TDB${;ar6%|UK8|aL{r_IE#D76aLi;SBg--}$ z-Vn#7e&eLnOIH@neh#@AZxf}k_L@C2f17m3WVWK@Rj*8vyk}aI= zrlNdXnoGxQC;dEZzA|E~Rwg~ZYW%l$sEvY0EGe;1I|}Q1U7-zFZU5hg<0?_HxSPYh z1&^X!rD_Gigw=ezM{;Ar!jH=SD5@#?CmQ4B-4Q19Oze_4x^pV=6~NF<2OF=LOw1}R z>Z!Hmi&iQ)hZX!cI|fOFPBe+uSAq!XVo0enbXM(TDQYH?{dB@!epa76CV2>IqJS$q zEuZAAF5~Ysz;B0@0>#H$iOo=YtxY+EN8@}fjJGxR$X$zZOT)vPkRimC>0^VH%#X&z z%$e)Ob&H8q`W%juvm3r_7Ls*~uFpHxE6-|d$geGrXFP~_O&&ro?O`i$KPW@vG!_J6 znnc6_uQ=ZPp_#I@Zx;JINPY?X^4~EsFhG3;W)CQyKJ-?OwndF=v$yi?+~l0fOFWC8 zStJsnB8vFp)Ns)@9qi!+-$GvSj^q2r_I~vBuzh()UAC|uQy~w*h(INj;(K+~{HyJ)mNMGev-sA)J{>T?+t7W`Kh;%+T~j}pAMWAf+Uj_ znRFurcwweeOKKMgewR^2@KzlzEn< zEFnOSNa!VSbYI({_26aZE$C9#2qnDfSsvh)sg3=80i>J&-cZ+?7HloCH2%u1G;Sv_ z_m0R=&k}y;7lnF$2k&akwu60L0|${RA3e21Z_w7{l}kUEqbC~vfH>I5Baf;y1iv+< z_sjJBvR4nI5UIPf*) zB8g}Ym$Rb4hKCsrMB_m3(J$q`qUEE4XZDBYD))s)qe7C-EMqCCLO>PWAoL1!xoRk= zt00B`CsGRdcPOY#c4A@{3hLSZ@hLRGQM{IWqTFAMZ7vY$f9q8!i=BtyxSNYC!U<-u26EnLvrf3I<2F6*IcWMRDgBJQU{I<%Y!8_ew+7-xwRW6uCw{7FQsFPWi(A9g-5K$+P;T zGPCE`j4od^E%_7$^HB^Bz3wx&q)j)N!DbiFsr;|AEh>*gW3M+&=HH=A=)^Z{&}tAl zonDu=y$dr3ZHmr>-_MbjQ=Ozl3hz6!x8mJ2UyEXuBz3`$?HuroiFCrqQnif0=*n_f zCJt7qsS@Sip5Z+=!Y};iIfLv~3^ZiSv8RRXhze7q7ySd%FO(@{;T0TCK6xKzO@!e4TI%v+G>*B-W`R4QKRY-QsC8OHiYa$lS36QARItNa>Xi=M#} zubFZO4xr~CI0zA)yq?ce(=`{Z*Wy~)86;5}hTj@Ic$v_Jby<_?AZ_Ys@+xI00{PU{ zvYFY}#y12}nhPX`>x8GyK9st28AC)SQ>kv~uBV`rvsFmICP^GWky?4qF+6C+SCHu! zF^0j!g6O$=dX`H|rU>xF`Y1uGrb)BLaleP`)~)jgM6SZM7za5If|{yc7ozC8@78w^ z>`!@9`Ri)3oh#EZq390bN2N;%V?x@n65Zexyk_s1gA$WupRwmO$m~$dvqd>k#Y#GeZ-zRkg#HRa zB^gW_Tyf53*fq{xos_h9X+If zuk=s?X|GD++^5Swaeta^PU|e@!M}6CDw~bVG$5Sbpg$=3hWTHs;QyKx?%ce21jzzf z{O|l=lz%wcL@N?=fY+ok*xJzEe6oB5qk!Ok3N}teXMC6r;p%*d$^t>dSLkDTHlEQ) zvo_hT%-YZ&B$Kd&gLVJ_K(MxYR?P}(p9}pqKgk@GaQI{PrKH?_Uhl-7zV`P}nYM>& zpc&_ z7@t5w9d6hYKD$GSM-L?vc+k~O?^5fd|J&mU@f@cZG`l)EB z?%B-#U>D?0N&YqP*g^>a|85__gf5oWKax5jBttcs4*>6rsjViCYYs)F?Ow8LO~aw* z8Z8?iX-XxX-kT&=c%c+ruQ#D8$oi#IBdYC2cHY&<$uYf>C;bH){?kEcqBIVg;D3Z8 zev;PKA}3LByEaU?`sLj;fJvWJ2?uY9s;qV*LEl=y+-wGRoO&pbH?vN)=*@R2{W|Zm9)til{{TqT)Yu@jMjT7 zSNUu!P4ZnRa2o`&V((&X;}>l1Ro4gbmfS{wSWw!=4pmZF(h!GTsy-n(cP_981vT?% zl+VFAfA|Q$LF55)(U(Xtlpa+-{nQ$>sj&1#2yBcSE&VXD}Ts ze=R3`RvXs%(rTj#n&xjA*^y4zM@j>I)2On`gu4QlJkn&5QryHGpGwff7=dLSHY80pTFUJ|6WC50RbNt;olt&5>OT{Om<#^(a#J%csHwdbD>nO)}COF zgIRZ@+2GbEy7d9HcQdk{*eF??PDT~q&B!>fER&ivsVW#i1EWF6%DjKuZ}ix)bSezN zc@+A-f7xZhv))UE7AYTR=4ZSRjKspA@%MOpssRIoZf8B0;OWkHaRb?XQ);K>`{3qv zSk^>(MRofNp10@X#`in=PxI^^bvpySVr&8+qoa*#?0%Lt+N7R!bRy=q&OSn z+IoY2s8FUGHL_v6*%o&DbL(Z?y#`QBaG1%nYf?vz+M&P&6YXYCKF`0@kXc(voI^?I zlZj*B8gOy`+GAk*XUcVE<5xdlK+~&XEwfZ5pE%pe8n!Tt2`f0i#us@lQJ*~jbnDGjrfV#}_i)kk=o z{SH4`6&p&OQiep)uFymwLvo# zHn~nmV2qQm-vnZbO?V@H7H!_d1Y_nIuxo|vdCKPV`QeG z3&dgD0LPa=$B_veJmWkS-98g%s>OycB;;cFpGur&z1Ng~qeizX`ZD`-GpgX?BSWD> z)TRH7DZ?df&F1N=tH;Fra74@2D^}R`8ZLn97!wjE!8F_oQb}J3cy>G88~nAhr&{Td z{UpqF+i*ukLtI>3eU7h$l`Yr9?9y4@L$U1o4#LG{lkfx5%PqfXC!Ai*zUtE|qC00v zah+<*H23UpZ3a)Tb*oPj5HLsh0^)Hw_*8Opy0d}QXmwtMa?*kLb#s3#$*C6%-V;|R zuS+*%#;3}Cm0P3o3b&%gl@Q=|NMfoZbA6tNN+=m#6i84HY5d-7@i92EnsNu|IT20+ zA3O{)V8{B+VFb0R45oXCdTpSeY+2^6dAnw4w2T8)eu7(7adbH;dHlG=*9s|E0YV?b26>W2ojFG@ z;9uTqVA{dfe;Ahou0?XA?Xl9G+)xL&I_BJ=y)H9FVq_Af!>N>Au*N7|Um)A0KAF zg-iHA_>NTeuVQ~$mD+x^_;W%9Gj`4q2I0zUI&06KB>winy5*v4_%(M-G2+Qf85&kk zSBBS8@e}K5DLzL7WV&!{2hy@b;C`K$bfDcZ=brS#K}$OzXh8AebBu}z11_(#`n0~g zwU9rnZ5=s#HP&U;M7Imea1jpkS$CqRU9Y^R&^Oo0#r_F+Kd+Q6q*U`oYFIjq{%pPl zm)ED}#lt-(Qed0P46Op818S(v3Bg7~D)hobN)Nt>Kw#(N7e>`{!W^6&j4qRgTr?Xr zeb?RA2SQwkOf=?1f60HS-wceD{)xH&vkszJX>QKX6=yI2rSU5G;`g4(~0t)YWW9{ zmXL;q)@2(sOs|bptUZu{_kJiMAUysL9ftr9xs?vx=;~7gpre%OBpa78U%@zz+g*7F z0P@Wvi^l2Yp<|^dxt7A-a*jM6GNMUcL#IvGniF;fBEd~ex6aK(i*zs`nx9&n*yS0fcCz20 za4TU_+ujGj3J3^7>ZeRCEs+m~;#9rVAFl*kA$>fMefcKckwG8%nNOG5!L`fnoD9`! z6M8|y<=K%r_W^3@MtIvzxzd!;1?b}6vQMVbNoe&#YyP)G{M&mwoI5%32~43HDP%}R zb13C?hLojfuihJmcg(Gvmn2?p;Zv7M1Kc);`o9B|&$CirIKSjSaS_BD?Y{O&%Bt=y zk=|O3*BglC_@}@m!fqo(Y>yVpVSo*p%|mI6dgx8tyT`|wR5aAohDn!*)IjrjPeJ*q z{bLTpNoR}9Qa<$MT&`LhLjtG{!>dX`kR!)t$!AeQ(u*~SkJk)MPqB+D3%p@_AI;tJ z8J@g;`PeliWV4wpSfa?(j2phdk!$|gMrpPmKU{Xj8xgL(KT>7ntr?1Zdv3|TQU2Pw z7L8elPjmZr)0??RWBbiF+G@yc{e7l@OVb!ze;TRqwjZYlCdP5SYO|9u>aFdnHz3Db ziJl&$2yuEn{`AD@w&M0a0>J_}H0AKy^^<0LYf0}BG~hUuT_ri>_XWprm@KK#*#`HG zJ)q)QqPE!qu!tx{RSL|A6doH4d|GEolJ8l%LViBtV8Ucd8OlaY9CqGC@oT+2qKf&b zP&}jLDyPVhchIvEb=x9EgYolFiz17W1AorVLpg@q$;K^p*Q-jso)DTvDyoNOU)mc@ zfb4>c=t-J4ei7|r-{MK^s0Xq99nP~1bkRe=1ZW&j@Q+WSMaB1QTAWT#i5qg&HY<7V zfaq9~MSgrC!vz=lJ5<#_TRpuojf#sXUb4)G{V1$G66p* z**PVbOjkkzp0PB96=Dr}(9(Lnh+bFvet97(7 zLB^`zLv0<;$I4E;h6Ck3dDu5Hlm@J=={#2orVBUJ-+RMVxEAvjX0_?188iZ8S110# z^F&Sgz^sU6dOF+FP5BdJ<#VH8l|2nf8bINkBTBoe`)I7R=vb4EbR_C@Dm^v zbx_Tsu=$73G$X9MOl*5`lTD~l2kl8Oj%ppYKd+VH|WAQ zU3J$tEo6EtdBhw=0rTjN$guz6>ixn|?Sx*s(y%~ws`GVg{-ndF6@zmy+02e(d8=K> z$56Y|f9QmA2A*R0ofS0XplZc%lnZ*B8*iU0E*}IGhMVnO8l4TfC)#QRE7t8O0<19ODg=6kqgLJAt<-kB%d2yyE+ZGFKxzf9avW==R4QGb9%r5C#85C|?p zM?)MVRj^^v!X+sf?|N^V3=%7fpjlw9M}nx2cbAZ$sj1mngZy$G>F-v*_J92RiGk(= z@f(nyS%BSfxqv6GS!u|*1g$ptr$v7K9u{0ybx?`f1slk)Q!YVEi6O#elX^O1Lv+~P zK|WmaaLDN<4uOZO^lR7jA$c}8choof+zH+|>DZ%s+QLZh9Y3F`H7aZk^RVcS(3AT# zIc|4~hp9+eRmN{bfb^0PQxmAz~9( zxasP8=b?a{9i+PJdL6TT(mr89pKN}~b#wylG_)S|IKH`1!>W%U4Sp zny^LV1FJo$+n;L43%EbmkZ-H#*UXVVtOsJ=rA=}dcayy~9}7OIviTiv$=}K!$BlB8 zrxU5L4V2CEHg}?xS2))DYddf?38dG2C2|5tJ%k^uK|>4h)Wz-BEk&^pFF4t)5x$on(bnPzz)7-oiKpxrw0wx zevqyzkU?pGiRW)%9qSk$F4q^8HIf|E*TBe3Np)j%AoX?f;6HTjNWBS{J~)RE`x4r9 zR#yB*zhaA{*=DSFX-=-L2Cysv!kof3cZ`jslE=!f{~JE`DU0_m3~t4UG#YrE5V*d{ zZ{uReP|~hrLXAF&yY8NPinM4=yC%a2o)cG-gHZ#tQG&z0B16QPF(GfmoBDHXzCS73 z?>#k|t(_3S&&NHxed=wBbHFeDk z+m&!*O;UHf>KoPLuKOb6_+SvxncV$47qenLO6G6F-+E%|fadgPd%|h6U}DWh#1djV zBx-skb1rTH5E3*huB+;_!p0>9VzlUP0;wDqn~_N_O0WTV6a6lT1ZOfRi?%BAiT7hE$o)2{62=qWedZT3FtN^j8J9;5PG zw&0KRKG(4AGp_<(5cFy_&HXl$)KaaAG9;qd_N}hGc^ALj1_u1l1Rj4oF2(J)X!j;^ z@XL&!Un30*oFWy^*5&%RIT+{WR?ddy+D%rn^gz4H2n~+w#5Rqt-|5U}*>t_S!|ayy&D@v!l!1HcAL3W2p$Vbl-ag7;_& z7H>6>sA!_2fo8b=Rj_i^Z$r|Gc?P+_hUE!5m+XM z%e1W4&{~&Ror&3Ks@s6v#-DW~)V0-r3f2r1cqZkn()n~-(qQ8RCacaCwp%wEqdsSU z<3I^!kHwfeoQvL|eW>tpHTS#T+0~8ePZffiynSxXb>Fk@3!ZBTlDbu>6deBU?Z#pv z3-_svtX|E%_p|(Jtq2zp+==Y*3}~^^buh%TFQ!yWcCfV85F2x#s%avc38HWhHxkau z7(0BxI#;s7tu@g~)-Aj1o0)PH{~fGT@ggE#dp2#^$AYI7ocM$81#$(4(+ej4gGRev zI-Mft8zT`!rQyL%sdyGJbU7^!Vy+zvCbl!R&Rd-XPP17w`wW&Cz+4!-m;_p41CbFmq%*=07v=x&FV*a8H!FQZGhxld~rFVdcGB|p%aGD&ZNyXlFI zwnK34t8XgYwA7=Ws}H=6qZ~mlXTWOdFf=&f#WY}8T<7Ew|89b-ITM^8R zK3?L3w3W``7m&xTnZHq8i=Ws;lf2RJq10ijvMfZIOvLM0+V$lQg|yo#Th&RUwxQum z$7QpO9B$py_boA?W|zYOSs( zcUE2LNqSCYrVD1FVT#7(IT)0dtysM)aT{EcwEzV=ZBq=4lP7hL(L7ZikUt2hk| zP$06&t(jwD_Onyr4=an?BO~hZKeoHh&UVMA?U+>^&>)`RyUOjyH7Lhe%HL7! z@1G1v4lwn#k~oHLHhv)z9g4z)nOy00=GQbqVIj(M-tw3)9$twfyoBd*R|Ci#=Ti0~ zqOKe7Fj>zQU>=(O79snL;2fy$W6zSYd9M(l>kbb_MMn>8wg;vPa#s_~^gQb(tDrN8h{L=(dAP z-&GvOWbKn*n+uW432jMPiR}Csva%~Kc+&f7jJF3TIKDi9_eSZ}ZgZxG&e=y&SLKXd z-Rx14haZ1Zo??uUifVVoURw{cxEJbLaN?4N5l$Xr@=T}wn#CPUO%KA6pyLB`59cfE zTCUd)Laww#2{9LrwCi$BPj7@7RMS@uqbOwR&gLGi%3=30>V8PW<}~o;NR6@3xig$J zDF>aalY!L@M$?X3ZMgE-u>D+auw;K#>gK*Rf`|U*jnna>qEV>@2v)}fP5DL^(U)!6 zDP?|9U1kUM7$c5fEgmhjFXFu-WL-~=`Se_@cz45iq>{weq~VWI_cJz=$6G(ORedY_ zs2pk)c5ZvMby(Z?2hGbr6jNdBYz%z;%3o?fV}#KbZ@$tGZVm&l%bV~s{&3)z;<(dl zK4g01ViU{XdY_LbFRu{%Qo?j=wA@MXwJ*A@zug$gNwbpu(8m{n1Yxau-A8Z5-K!iJ zqPTV`cWjFr|9oS-3_#n57~FLX-^Bf&OoNYYxrqIr2jy+J3HJn>q0JQ?@)aWr_lvyv zcs?rGq<0)9REy|qe!B-{?m9WHJQ(9GSz|i?s`=wG8(S{vMrJS7){HF6Rot&Cy4pV* z+h|pGcUsv+Fj?uS1HCNAt=ID8nUA)w2W52H8a3Bk{z@`91MRA>w)dUeb020sYw4O{ zeB9l(sq0lKds9hT+5sO?WA%f>X&+}BtHoiAs-h!i(%lF>eV!kn@>ua+5ZDM=n)X9G z=sNXiu>IUqhNVo^)Y@tQ%RBC}vVzfvv&4M(E#yc$9NNI@W7s{XjE07!>_V1cd4Kp? zkcO_-H}!(g97M&~oZjXNAP}k%Nq93~srb=C{t6l-!PQ4>y$1l^2GwPjKbOh(Vx`mb z-UbAA6+4EAzl-6VIJ-r(ac*Dpdkx~+p_so2PX4g+Z45{o-7^N1desbXUE?K@2_aIU zleuAoGVUH+rmY_blrWWB?im0Zx(Q<;dw%$?((+oAwyLj2Enh$pJ!OrEvy#pjcYgYC zv<6ND9zut-wd6zMJfMfyj_nVMh6KA8V`jHY9pgf4-QAfNu8^SAgg{e2=w4CVMy(3o zZ#HetPg*j0E4f#N{W`B={TO(j>BXVofdJ{OD`aRQyb|#vVc&(~woZF0|3^e(%R!(=%7?XM%}qNOT$S zq98qC2a|>4pOSZrJMCNbI*VKJ^p(vG{^^B6mUMK1fB`9_@J_){lfN(T5?L+E8nC?J zCz1AK7Di5ajVW7O;DR;e7N>!JL(+9t3;V7Da0)&qJ*P4@zbA*fCIoa&|`rn?IRF&E5BBVgnA8yUQPfWZbARvHbuv#p){U-G%ptHRD ze9Wem27)o;nv*oXTgd}dWhw%68i;GRhPw7x&surwrLZF=310N{8hzR|{=jaoL^9qSVR@XT9a%c~phuaHeB zGTdpS9=rAX;8w>V7S($4uN>kQ8ifn!UNAm8?FN~E9&2d&^y_P>%>36@j03b%66HBL zkHdcVPF_JEnvXYbkP2*NW`C>KG2Y&iaH9q9fmy!S6!2E5v$t$U%PtmbvmrJJ&(R0a zCKoniNS*7-bx=@Wwjz^os^A0Gl@>bmE1qMCt9L|<;i_)!St!g(XNS^5=T6fOwC z!i3+@TH6-+LmWDY`KFdl2vj60YtQ>US7%IcO7Ui8_aTMwv~o(p2SzrT{V%3Zy>))f9L7-}z5$#PYHd2%(QkqXbx?c>#bfGoeFqX;@!0Qp0K(nh3p zUb<{gz|iU1Pjo^%d5H1u%?)i;&q=;vS4M|_RIXwVAD*Po_lRy#3?|bm#eI|5vfS@=arU*KV=C0SlKEXeky6)_yTmS+>RmZn>XL2 zCjMk>Zk(jC+9ulu!8_cLw_UL8WEIlCq~SYeo4QOMfcD90r$QAGS>n_R(L3AQgU?5q zjmB3#IjsEZzN9+Qf~UU6lj)dXaa}2$C1EBqrtF?szn7>xc*)N}Nir4ui)vaygx9*#4-+fFAxO+wb@t+p104{jqZ z_Gem(HEvML?D+dlAxbxMeou~n!~}>KCp&$|S}^6SQYO8dCJk_^a?I4k6Z&tq-8`@+ zhFwUh;vY~8U|E7BdIclIyA=w$%D zPA^&pmVCiRv^?JT7DO<$I)2NB5?E2bWje-DbHg84)_r|rUE5vE1W%K}mAj_emdkJV z&ydAkCbMO0u4-~}xHFSc?@$ursL1zr;<#s%9KJ6{K0{%7#0gJ~&9_0C5qj%`+! zyiCHirMm13m)Bifi~kWqWPb&E5WfgR!i<99v(^ zj>l&cF|j^sBW!za0Exwk`Q3xBK7%SfH9&%+Fc8G#ZVVPE_?bs;?ELvN_JcX6L1|;% z^2I6)?gsNSMC4R2k0JvTMS;Np)yG!z#0T+Nh4HaG)3&>xi8rF<6^-jsCVAbX%-{w| z=(-nK^@Shu?lPl8#2(_14I*NC{B)j)N9N!{gnL-JFg!Esdm{uJ%&58q4qH5LZoorC zoXt>P2z$1ZcEBX3T5um96`2|=c^)r-#v5*0RnY+*4+z0{5;|JotgTICe7gtNQ<W|2F+xsgBaoF6Aj-N z7O8X5K{pdt`#}&K0M{5hxcS>y=3plrU#zB&Mfy%2@H@ey3fdimokWhZtAQF4{=DT^ zKi}ILPIX!dFFec)R8phqgfGVIU8#fc)r3wa2~Bm5nZjopV`-V3-(T!K5a#(w#~XI} zu~q%eHz0Z-*;u{L^v4fXJ`?WGpnlu&WBX-z4pexP>p@T7X{ng$1)p3H1!-B`{DI;F zr$YgmNFBD|7V)XLOqANHSP^~71;vXA8VU*NA{LTjl3ziY({|ktze=aJz1`Fz%f2ZuV zhrupwCBdyG;zZ$aBz6SC;2*k+yo20ojn(p~V{8k8@`LJIH&-ChqkuKNK+2{)$ouZA zY1~q}4ccBkS20I%%hhK+Gd0Dn@-NhaVHIW-k0&9$M5)_+ZO@ueZZnm}RnQR!wz*eK z1C`KTGFgZK?7X)nlL$?IegeR>*vT-RD|r&Y>1Q=ToK!joo(3F6ftlR)E|``&&g7_w z4m{$duKmOH91ajLw%FDtMKG_IfiMun6<|F`%hhKcT(Uv^Y&2@7_;e=L4vfq6@w8rN zRlzZ_6GNd?{+1X%Pf~}dQT}`|^od$qO}jO*9PHn;M&+A~WAx`IUYumKS=u$5LLYJF zZZ9Hj`lOrkeTU=^*2Z(5JCZr_6=1GS9_R0v88y`$^3+Pj=wl>V?aT11U4szweiL%k zqQiCJ%F9;fPn)_5J2$vJ_14cT?pG+Od{yZmxRLUW5(@lZv#<>bRGsUoQ5tMAf$M&d zu?ty@$;0PNaF9;6DQ}a#xLKTc6|6d6Pfi)Kzh(5R+|ilZSPuYUN~{RiwejkNEdb1r zdv|;h_jaj;`BFKeu#A^D@XAb4ot}LT5gVH=(PGq+mRkAWgl?XWv$N0>_UdDB6#0h_ zW03-G57CB1keYexdoy2z_SnpU^|Jz%O!n(dcmVGqqR7TZ_t9!8^Z$U9CxJ`uHr;dI z9E5m(xfyYlDlM!aZ!S^iVs60U>hrOqz4!EZPP48@vrCz%;!1)60vlg^l}I!+8OEn# zqAw_%j$;&MJWib}`6X+N0mNL>WMe4{{s{_<;MzM}BR~HD^Xla0qBDKd(D**N3=oH} zm?sl&I#+@SctV5yLQLz02AIBblqBGRP8vQ1GppXX(Co;meLmQ>5GQ!6dil@J=g7si zJ@%%oDpFY&{UK;3IM}Q>+nt}ie5momUHhLvmRF*%^CG8g1{cYxL)(OgcNVtON4#Wp z9J9d@K0mQVtIJTzv~Fi9RsHweXP0Nw72n7;<87Obk(7u`(LTm0 z-{DhmUtKI51wgE9E%-5Cl7hZNJlL{IbHhm{_JNz429+^Fqd}wjULzxNY&jO!ozu|=()fY z@V%J3=)S#KdcYs6iRkEVvUXP8b>N7?jTPX#0D^uh2SyTbjIlDl?jsWfcfv@~cti>! zwD8kyJN&_8l@7c3-oZQrOHouExe677a2}qvRSGcdH+7~ouv83ILy4c*@pUq2r)cXJ zvq*i8{@C?!YTIM)Nw_w;z6urSB0zjc7^5C?>(!no^RP-2D75~1gL(*%v-;KFu@X-z z25`Evm(8Muk5b{pfC=h#rOHK~D2h%Cjf1}`uebB@>Sj!))(084Iz8MGSAD$reqD!X z{iBU(aF}V2z4HRX;r|=R{AKzN)vlaP2Jn`Ho+Ua31S7FY z9th25T|}l+F0ZQK4O&)2IM)zfjmZ3=$g=F;H|;w4{Q0muE7oDJ)inEG(+gE-Ip5H9 zonlPezPtMR4kaRH#*pMRH8bNd8(==!YNbQ-!l82`gapEz%^jre4JKp5MiV?3vvq>) z2UO!8+jCE0hukOFA|xZ+TJW;ndQi&|9SxS(Si@EXzqLGO(SL@!FAI=S&?=KmCno9~ zI&6`vGrCRkM~n<5%!(9N=#u*Wd`$ri z7JA=Sm(Zg&96;)oSt<1ff}u^ghqV08FBfzkqvhGT-98hVP4?Stjw&wO3-*p+kOh-f zmFFyBuz4j$U-I?rZ`qH_bMWW+g&6Jj=wg<3J;VDye@Q!=gzIm)D3o0a*f%n!2Ft-JC zE=XqrnOqwE@)}~mX2=>veF0wXCnaWv>59CP*&#~qv=s~vFL*-EPaS1W6HpEZbw5Dy z3(9~xdA49n&%qB(W3M}$JWGcULXRFl-Wu48uEjvb7^pcNqGltodT>}*LM77n@((%K zzQG9vd-b&ctvHY}!b0R7KfjqY1tE3KwJ4pO#`A395ktLZ=yvEPxW4MGbZ3n4!_`f+ z`hacRaT&HaZ2l+`&aOFpIXH|VSSnnj#MBNVn#S7Pb%s7v`mLAO`gsLvcQFF-z$`jd zsNw(ANZ%t4f6%Q+W!rTOPA&{KqS(&H&bBqoy&F!@)@q|*Pg zyIclvSXE0cY0z|GFPhWAxMTbkGzkfb-6eyw&a_mFA3vj*^6i$MOUK>Ef&f(#;B_vF zh=}y{_fL1kajF&?$vh@%>FUB4IX~ev>iqcfvx=JBzsEirD(+O;JkEQK{cbiJ_;H4g zhOVjYW6-aZtl7~5Oxw3AcC3W?c5~wG4T}oKf@;8EV|QB|3Wa`K0ZTrZLv!KNkx>~U z`LBMd8aevNFy_VVew2T_d9-k~C*ef79y_Diigo)U&Yg|LzpMknTG2x7bI9U{CR7YD zu?}1CQm?)M+3AJ#%y{$Kj`-LmVA;}NZaA;ZkHiw`lk)?5hU=tFU{a?4H#UbWdoW(*g%tXK&E`})QuT$ zg?sp}X(KKP3^$Mj1jfUMngeWTLY4P&4>SRoddcse@|8CB z%Gm1hyI?LHH9F`1^@7WQRiT}l$WhH8dfn#_H^-`vB_khjS(#kaj^oW6zBv^)c3%_K z=Fzqx6fV8u4Ufe5j35GULhh%O<>1{rO{1xwhmghwLMXCbx)*>vy_r&T_qAZk1@N#c z?_pF~s@_dXBM_(3TgBQ@Iihpu~JlmZd@Uuk`fGBpfwTHl6i^;4H~ zt0-J2t9rtYuq&4PM^JP?{Ymb?{k!!BPX`qU^!bvlJW{_y0dBXXNsB$>x-|UK$+>rl zK9{hj4(@{knKfVdSPxBgOd~tT*yaI#f(n1TD6iqhk6sGIAW{DK(I@3h%9nn?qLzYc zXJWwim$GE?N#%90#Y38&ecr{=+jFo>_P z+BP$u_(fPH=%^d%D+7I0E0m`4@t(cL$6G(EM2)IV;F$&0=6&HIhnnGjS(b7mYxbo0 z{#r$sH|TrS^j-5xQ*Is)pszFNtS_REPZZ&(W?TO_RH!suh6DrT3Nov`a6Ry=m*I&S zq=N6{hznh3$uJ&v!>J}mTDs2T`o`xn^B}={j+gnV?QXrC1ZO@*;q{%Kw!#B`5#Y^oEmCGCF&SwC8aBDx=_zvB{a@8AE_5z8_u+M%=&+4zgBjf`^^O7N}_US3t z{h*JN9}NH>OiCicoaAQ-p(l}lqm`HPw+!*&8VqdwzBhU(c3e4KP~&N3 zK{AbrLlK%HexwJzx8)NKc zdh-kGwtJ&mcX$h<5~0x^Z4uK&j#?4N=V981z=HtCjZQkcPxP%j()MVcFGlB0q4PW_ zRNmM+lVmeqM?xsZsjz3kfb*#J1iW#e=IAjW16WGwnYjK7xmxkrk0DnD%hwGHlWpN} z3&QBB$;(`%sC?&?FeVWCM6OW*^ZEdGl=npm+ z{8Arf%JH%poyiUA_2i-C zRMnbBG_v3xf3s%pAv2KPrQ6iMvBAMqhV8;X4L5FM=XVPY|HMwF*1hDlnAL3doESRg zF`h^JYr^n=LEE6aR&RIWaI>;mTUutz(Xk0=e3IiNeds%u7dXLeS}?a3nNm3oN^f{Y zfkSo$M{7Ca;)gS}gS%EHo>XE|UkZ_j0e)__g{br?@e_w;tR}$r$ z-#=Yl+-|qw$^9ZTneKI*of^$X9Tq!PqQ!NJ{4aLK5yNFbWq@{v6j+E9c*LwuZAh68 zQn@NmhgXMm1qC=;W`y3VM`%3{W$@tty=wXnNQ)|mwd>M<#%ubopO-EK45Pk#<}rZ& zOFLRAQ0jl#x8XzlmIO%VtoCW9(&;_gsw`ewO9{>~klQ-#;)KpQFriIN8-_=fhBxI| z&-2XHsDdgx8j6N72zZOv0wKKsY2LC^ZvAS^oo5jUVl>-2E3ri$Jp1fna-Fx;>b{6& z<}_ySs^u5UP}o_bRvLAyps0GWxa?@h+i3PcHn71!WuL^O2EmMh%b9zYaE%TSdG2Qt z#pQWD3WyMXUX^r!R+>!Ros4QB6zso2F#EDh9$f`s6M4RQ&);cZ8oui3$N;BTN0XCJ zYT~f2WvXPrLe<=1<`rm}R z2ZB58PqT!tSC6-1LDuRFz<9+d#-_5qn`2w{3B%D(0QW?2er#Wt(v_q6cRFc}qY$Qb zU_%k1k|%C&+xW%ieJtwc8HE|K`0kVaWOGbV5ON7bx9NN z*|8%ayeo~7>PpgB%PKjE9gNM@nX~VdOtnt}@OSGR9Yg&}%@%i)P4M95K|7^t%T5DA zPA-kIS}-S`2^c@DR4D+#GZx!P^si+U_+p_g+}tb@X5ne$0l*w2lxHXnx5Ca+g9_I# z>Q)cM6_nzGr?XR)(Gj1yZQ-$oA;~)cIpOM@GI0sq6a-G8DMe*hL0Xg*l=^Zfc3*}U zR{t~juDq&q7nJux`srR{n`g{6Pnl1km`ThA;g|= z+aHdVv`6pB&qh4tq=MZ+-5k&F1&Aa7?Q0u37V52Z=9|uJzBO9a>GvG*PZv*i>q_Vg zqv=Z7p^=6L*Bxq~ksf)LihL(LAN}B^sMfg=ECP200Hy?(kE=)ArEf;)0OQ2!X`DX1 zTn^zJGld%QmSG8?JcONGpd=2ieyZNalIwMxx#d}Ab{QkfB)%AN%L{z;Erx{;duP^p ziqqw4r$|y2DJp5t9lRVTOJao@TDLYu*B-u=BB>J&VD_m##oiT~17A$@{tHR2uhL7G$ z(5k(9!CmO92AmKg*#gc&PcoLbuXC4(d2oqiA*!}RIR>zCuJy#8c70nbf^tuh|K-{&I|W8J|HwhapfxYRe&Ujr2m$pxZ3{V`vP zqzGVY*Q92t^{)YFmaVlA0TJgu@L1}Of|OBCyT5cC^^W-cc2)lTW#JLa2`H333gm;C zXBn#y6|m1>mXAsxb?q{HuROGUX%P@$gkJJFQs4jTXv`5OjexcQN`21d)Q41B(3|ER z+Cb=IJ>;H6jv4)&949(Y`b}W#FKOl2PYL%2mF(OUB%s~R$g=o(B!$D!)jtsM@q3M4C7QJ&tMUujs&(g1`~s zJN@OECff>?mz)(%+95HKmV$jYI^6d&2Zx$Y{G+oYdY@yt@0JZ`N(cUM0duC_vWCDr zl>sMI2GB+z#3yt}9-J{WdK#R0jdfvm*=u8i(tF}fG#mNQm$ks7GS~k1e%Ym<_?eD9 zUdyS{WL9iq%lcNoQ+sEMSEgjsz+ICDoYH<5MM}k)1a6*;UCR&cL3{ueGoK8JUIA$W z&F}Sw(=6k5aZn6%mvv#W zB})Ixtqj4ThU*|#uPRvLf;*mc95W#xU}R&_AP(q^PzbgF8AxAWuC8W(X(kc&F4N?gHDyLL&a3*f$3T{-H=KjGVhDUrVl zsZ_jmKK(c6WOVj?v~uT=aoP2Jc7yoQ)d(~qV#B8aI{p`Pf#5@^|ri zP$p)*N3KndM-4UYDu&nbRWdTN)vIVeg!eJMSYBh+lXk7;Ji?7bPY`KnkQJFim3Zb{ zG6rKH8kAfpW69Bu-zgR*MkZxphHI!e?bcmc?63JhAfr5r$Trn`mD9H$^=? z;VVlKUvFqde6U^pKU_-M$l z>(@?eDV9ENDMhbT%;BZ$aoQ2r8()LMc>_{yw>Q#wD{7cA|Hr4E`)T2#b2zd)2rb1f>alw)+HOio z*kwgYAdlakr+-?xruT_nmLt^%y%R3#%<;Ib{NFDMWt&mDPgV6N$GMapbEcLw+cS1N zlJDi;rt7BF#_u=&pMwy@Hsi{>P+{ zJ3{a5=?3Y)?j8+r6O)oIO>_6;#$;_ic%Y@mqUp?_6vn3K+#dE%W`G@4v$JMV)?WT^ zYb@BB@Zl)_T~JVPp5sc&j#+;{ewOG(T$gCF{~yN%oreEuTso^`FX_$jShVB6XAn)G zeoVtNJ7W8e-F0D@_SsnrD8wiKSd>53(4LvIf_(k_BC@lyr&~cT`74%mty|=Rqa`sR zKK$LaNqf+dXM5atr$kvak6u;kmoejqeNv|Eu^PLK9L7ezPWQG`3sYR&*WZ8TG&7=i zE@3{UWB)(gEt%FE-rk(k*{hBr+mF#GeT3J(dHwnj_Fzvp_S=+yyxOB}#8Y&`yIpHW z(DxI27x<}lNoL+?r*TPl@%PuPS^6KYnL_DZ&5b42(4gDW+R8r7aC1!ssKm29`c!on z7ni?y+&^5vHsudTH)FfSKe$Eo6=t8#62<>?A7!xWu!smD1qFq7CX_jg|K5L<-ar?y z=|+*bG#MG0>D(Xpn#ktLTRgH3eJ7ObS?kslWW$zcbQeuf{_Pb*Y6Q*db>w3`*8C4v{Z(z z-eV9}bE;Q+=vsqCc3;`;2gDRB_XAgxJQ`W-hxvZhxWE$EoX&$ZOyMH1E;PvFEdD^){Hw=CvB*h_QqJXI7+a% zP;XOlyY4S!iJ=1n)nZw*EHNHspBwx4T_g)wZODH&F>l?ul*^LDdeDkzpHE+|?(*h+ zQ?I-+??U}A6ltk@uHkHyNS)mv562#B zN87iW@nJ^1RbysVyJ&=JV07W#jh*$@d0Ge$!kM4fIrO~OTfgU4WlD{{PDFv=U0>fY zG=-*?r;6lh9>2Mqt($A!a8Uop@qq_R?4sxW{TTe$pnUrU{hEyLq>EE&kixzL2a>tt zbIT}EFBVEQgqRdX=Q$gqk-c+&iPW;ii`8O21MS&NWy$+zwY|9gFcnfT0&mj68o^l zzp^gHGR2}w(2Gy$eFos%hIDxTK5Ctg2in;Cs|tX|gPyBk#0_wBo} zZ~y+muB6fq#^l7=SZ@c#*Z=G$blKFe7>s@7!k9m>${Tl-I9V1_&cedia~~gMHC^N5 zr<$9aXEt43Wnus1)lIILvpw&99_v_T?845UIGbYfN~adzx}5WAh{egSqmFtu&u2@> z-Vd-#*{Qv%wzk&i@p88J^!d-2UVrh}LhmvN&c%_#S|cN$ydBU+DN|C3f3u>Zeta$~ zy1xf~qN+!ZxQ}-l2vE^K=;6;sJHm|f3ko`Ywe|1tx3sjh$ELlH{`!>8we?u?2d^0( z9(Vo@GgYc%lxQu*#l_rSXS&{r;>CM?D$$84ukq>gOB^xOyBmBS*Cv_OeEuWc&^S+e zRF+3+M9@s!wQp#e0ZaD6@blZ3J&vG`nXL~kX|6YJ+_13E?%!oV2^AfU`_lBY38&8;Riy*r0H4(bHc9T26CU|<1t!1p zO)j1;@@jijzOanX2~V-C*(W>ha!V*aJ8}1?)pCx-M{jv~#oE?Cc(B5ZiSltazV(|o zN4*J-iOYObo9dEuqUU|izJmwPTBT_Ls}2mjHa@8&PA?0u(>Kb+Z{C!YIOrEmnd#O39vBo{ zy!J@roTGnTYslv2_IMe+aEg2Rom9ZQKC@uwoPGtpu*=dI?7po{QbHF zZ_HR26|<#fwQA+c_ASBkPKo&i@_hG{zOseLlDTqna#|L|!9me#%K0}0{^pntC1GzM zaWE*DKdh;Fj}#C!+PqvS!E4?8`VMUIB7Zwsj(sv~aA-}XwOjuxYB>_p4rrFhVehGI zbwSH<+3O&M^U*oWv^Q<)JT}q1QE9HN0A&Z;U%=Gi1iST^N+*|>83Rl)ZI2^$PmQm^ zD@MG$#u>By68l5R)5l^T^7&MzpD{Me$lQNbl_uS^Mf+^llRmodR+yaip=hPAA8QWfRpb>Yb3?|zy|96!E3 zNNiJ{VblD2aie{O3J~K>{`M9>gX-6J)_0y7gDuE%DjnRvzduXQ$mkI^&UANGBpAJv zjd^}k()#@q*b&Dx5M`s%Y}76 z4_|0WGA$`bPfY9iZSG^b;6LY~S~I;gW@C@rMyjB&Fu5HqG>0^DwX%wEUY3;lvaM9?0suxAL?yH7;p8L%YJ7Iz4dL_;7rA*jz70Obqn~ZzZ9yEI zP;^v3``lBM$lVZ3JE*E!O%If_bjeO*xasWi=B>hh*pw56|Jr&q@$m~5TH<)|Djeff z7&zqReMYg6PQTAjc;jnd%ADDM291uVA z&t;E}<+@}rbWT1>#75lD=ngpD$TOz{t7_fLmM-1tsKSZG28lb-JCrU)<4?sQy^pk3gmX<6r=QGC<9MSFb! zRs&m*80L8DlL-JLKn31dFpZlC9W#5=qGb6K;MHyZmgZ*ZA{4}EEy59Y-@YzK(`c*p zT)w_~F1+(!Y0v&3vAoUV`40@aE_@K$w5crLYn1>MJ)_Vvz5C{+p&mw$ZegNh_puc8 zEPlO=P1^6TK{RlFhMKN!SA$6;DvA;EWIH9&=XM{=P&51Ox8EueK@9!WM2@ANDpU|> zo8|e}ApZbCzF{a@{x8D9_bZ+v8b6*BlK0~|g>al7KM)fB<2hj_Kb}9c`^M*f{7mWi zm$t_rKm5<$;s5$0|KShh%@DAh(fhIL(N2z6zyBV2G|9B1E+uB7&o6HiYL?!$utmP` zR+@;!_YndT(YyEwI>$0y6!D+e3egwN*$0)zHQ}Pvk01YS-TL*u4<1A!1hk{Q^1^eT zA3q+?`I^6~<9$vR8k1K>s;vj!{S1&+4UaIBcOwKV@~+t7{s_&FBLQqOKi&}9W?H<0 zNB&%yvlZp>U}sHdMq7|Zp4&LBUtUu3a8y)OCJGFbanxGO*VhZ4W!>Uch&HbXZ!g>! ztm)0GP#LEuCvmz?6Mo3zeRhWPP*>EMPx-T5++uYz)bqw#M0{y9(uW?tVTV+s{oQcY zKnty!$|#MhcmpN!OZZr*Fx4XN%>D3Hf*(Zt`uintP?bY>omR<135^2>JZ0@ahhV?4 zIvO}9Msp||203?ZZtGbZTpAI!-|ui1XOB3gM<{r{*7!hIZBpIS*PgN`D4hOM)n>_`fTOs(#MnlfdK!FBT;=5B?Ujb#X`LADiMP$>;63*Y#8*>B73`uN6T%S1 z*p^^>=F!s%XbUX~NZ2ct!ACuPv*{_-!GT%!8WrP>?CIOdL<TpFl)x4l=!@fy35zDX-n7OT)>ZK&%ZaLw0SGPx7;hH;5^VP$L_B- z3}HbWk!r}zKKN4;IFXoFp@D9yX#WOV>o`MXDW>~ak>>?1FIy^tz9ZCB!P*VX)*%E zN)ul>DputXpFeq*tv>K2g|mx9Q56dS$(sZl5PF` z;_MBZHy18gC0RT+-s3KXNG%p~I93eZ%zsAH?F%c#PW)Zv_-(5R+CQ_$HtwF41RyYB-{$5od+VR zDk=`CuIS=j^4qSP#V9Z=nJ3Z&*|I=R%hK9i!v6E10ULEzDk@-)vlp5 zxw6|#Y?9kU-X;Jcmo;9S>zc(7eeIb^lqap`$4lzMYbo_vpS757GR>o2cdV$C#vB^_TA#(PEk!PEL%|w~CQQ;dv4a z;7EF!2%;C?vn_11KB5D=Egc=BT`L_{iOp1Y^3!*Yx}(aN*Li@zA-kUr2Fg}BOe&lo zlGgSitJ5I>i@%on;RPd20K&pC6T8ragfjDdU9dDa>q}p8xpJQ%F;_E^*=8BStJ8h8 zFjEqk(b!bB7o$ihsMoX4@mEtiH%C_92*yY61A=SIR5^0w!PSMUHc;Ob6;=2M?5FQG zr_k3Kd$!U8g{U26k1oY!v~t&Ulm8Qa*5@&L+P_($VKmOEG!_C!tm56VjxVt zasB#RF!T1aS$)>(ho2l#>v3R{&T{N}_sDg0h^n6Ae(zo^1MJW7OT$qky`$lpQmQwX zZ@u^Mp zv0cCXGQtzlwOYZU<6d>Vfq@ZxSf?hsw;RbxAym;h_#j1TAog4tywJJ}oAPO#im&H~ zv|`!Bk>CrXtT32l)p!hHRU-$mdmM1%;HJ(~sg^aW@NsTwLX+1H)GAW4rx<&mKc6lw zDq23WP#R3>Ew=jW7MRy1M;%K(*>u(u&7EZ#ef$9)nRkCMec$T4D=CO{DGr^7q21`i zp&!E1K1qY~!2QuvmQyYi!gwOpLiwk_JbKN~ ztn((z$}-<`Wyiaxp}&QOR)GwrSTpp&5##%;woTo@#oOB8K))!tWs z8=q(X2i3vcs##b>2f6f7d8BQWeFhLIECK454b3&x*OX926o>pXgL~t~jS(+=&roLc zk1{6pq5fpTl{$bIa%A)MhV@GPm~u6 zofw&nmS42)4MST>QZlu!+xYl#`;WiRL+t<7LufRuYTfDpt6F(lR~}p;VPOIc^Q4ky0)z(!8ry`p%s@RbOU)NH!_DPqZWT zbLZ0EpZm0s>5}U)Jh;QES8-+n{C_MCJ=KDkdKivx>8e#R0M(FQ+P$RSmq9O*KvY?{ zTKdrZ2N0;k(M)?KUCbRuOTq2JYGcrSP5B?xO*2 zUvC;{zAKpT)Fk;6Wpdv9`GaLUISW^W7Vx?2A_QktP3XS{%SlKAW~mVp2Q`m@C)7lw zOhP2H#I>744RP*zT0lGf+LQHiYr>*FKzCY8BFDVLVO@zmAh%UIDwK;aH*mgT%=q}n z<<@39B@qS;+=*n6IjoOdcW3Geb{jYFmI`qAxFV4}UF2k{5Ou=0XRkfkEkU~`GlO)Q`hdvv)09*89Rdc8NybtfHmaHU?F(%=f4g#Y#951hE2IClE@ zjFu}6Ys5V=9SQ~fBCQ#@K95h=rjMD^u;XBUojC%4gr z$-&Ond$o_VEn1lKjS{UBBUdP`7#v^z^7)$yy~Y(S1#1I59@eH=2jgJ6P`JUuw*L0+ zG)6V}Z%o5A;6q+{s{fy>@+?wVypJ;31KF9F@doKJy0yp|xPeJO(!Egki-c#BI@tUbPWg zqtaOKCH~<9bM03r&W+5#Eqx9Tuqub!;irn^jV12BwR&Y)?g@4eao@=erp$V2)Wm#_ zj*0W(2Rn&=Giu})7z!^ze_35*-(6Sb^x&jco6`?fO2aX)SL{OTD~NM})}eQ~p4Gk@ zT|lvVGhub7`NZX4Oe;4+LAYFRl~r;y2wG#OBvEW}+X8`Zo75mee**)-rERF~Arqn~ zEnU%36%%MJyI{3+Lb=q>>KYn>*1`vZ74@KM#eKTxuG|r_{gPm8-e9%PvgM8}rId-p zs4;0Ui%k4x+=?TFSj55=KP#`2JawC2p}$z%xg#oK#`|DmS~wO#x+qZAAvz(h%lcO6 z9&ZbrQX4mFW@2LA^x}%oW5{FL>6~fyEH}va!}s~O>ZX_VoE8A^YHAAhR8uTnvpi)=HP&zwEe-35^JK86=Yk3>%*oCRy}2hCkU4TlwmM@GJOpkcw+tTcn4@~u7? ztk~#$i{dE*gwS3SA8a?_$t@AR`xA+4Mt#kNt%yKcR)mnWjak&E#=>CZ5+*)8zZ4Gk zyW5AVsw#<--Ii6O{)S-US1XMReFWkPjwYYHlT?D(ajaeXpFuKC9o!%<-!MhyB5l7= zy1aPgQorM43v)BRquaiO9&*ZnLZ|mLK_WsTHU7i5B^ZAWHRn9mP>J{z&g!>6co! z?!}fP3C|p8c#oI0!U6)Xvd4$&O@Z%BzcgeJibv0c?w<9=8Hbe%o7L;*IQs)bKZL>g z<2LFaKYlFTIWZXT&gA(7oVr0mqN;Z2(KTK%Y@Q_aLQEugRV&00ZNGC*z5<(C0veV1A95D=XYF)X6kFSUJ5*H*sd-m)& z7?guIc#a9CRF$Kmdm}#ZRAZ*ZJChl`#v{E=ZlwlkeC{YVD2{g7 zgU!v&51@Usc-D)Fm1i?s1FGnjgB?}jVD@eW0P9zd<~XMe_RNgBraeJl5 zumDc4ytfRPz7=K>Y5PRfIDZ8^%305e@loybDO5MN+;&D@9zzriYeuGpD{yc)0uSC1 zVhQtesP`#1Ey|3gCud$Jh;nV&FgKZe8JCd+#+mO-Hp#9{wI3O|fXYfuLWhCu4gf+I zBS~}|xrfDFeH6_{aolT6h}bKRKu@UtlEsUSphcTtd)|>2snv8%P0bTHw8ap(+*CNb zcgK=t%S?nbn*Y@gmiiDeY_>cRW0)1e6DNf8#%!0YT4f1aR{Iv3%1n0rv#nwUFzaBlb(etqsqIr5LvI_a~QlPBcshgjUP3yJ)eer9Eo*+FVKKm);pk+6dsw{FD^ z&~8G1x5Q~~nV)XSRpz>$hjhD1OiZ27G61!~=Nx>I$;p*HU*9MtxPlV-Rx3gMTgLlF zA9Raopsd~+=>wdaCW6zBB;d<{LF;#naOJjS%Ki!x@7ZENUX@rMZq*#{lSw@5H3e! z&;0)DVx*xmM?T&ZpyC~?mK>CSgf+Ws&mN(*Yq!s2Ur>hQ8V`3eNFvB46{!m{CWBV+ zqnT|%=Lw~%8H7rTjD!7Yn=X_dKxlyM*S`&d85r{Zr%#`zM7LoGSTOQ9>s)5|i%$3x zD0rO%uec0O;`-*zPnx73p;E7Z$sM&Sc_144qVvj?D}>q+;*T>%IuV1a0GKSr7xE~& z93gcQh-119**w;O6DF|d1*dY8+$vrpd z+{d{&td5wJXkstY0pFDPNbRh%JyZHJ?%NV76_Llex19_@M3C|FOuXjDIQPjblla#t zjg12Q{GU;pQ3vJjy-U9e9Q{6EmNKO72I+D=mC@l~c%-b2uqAsO6%h$egG#Hi{!*oQ zBG2%-4%{Jss4&+1vfB6{P5Q05n|)`s3a5Op2V~EsOO}K}OX)zCwF+Y&4*V%S3LvSq zO3JFTA;WGkLTQ}pGr(*MjscQgmT!gWPIBp}+)OgGz;K5k{cp4Xd}t8emI&HaQ@fKp z*0-NX2uc$pp{(Jac&Nt5knxR^XlrY;e0_VZtZnnJL2fK_@+Uy9$C6CF^qt!Ux*2%} zp9;LE=oVDiPR3k}*`fuC8bf|li|B}8Rv;1h1sj8+V5Hzkcll9ivJW9r9TZKsKs29l zQE=z}^G}`{7jz(Pfc&w>?%h}G(yR@P#*sV>WPc76C2{=sU*~lOyAB;4hnd{BXEl8> zaQS-0?tGjW2(&uHCWjBt*!0f4T86@bc-sjptMn#zq}dJ-$OGjluGeO`Cy+R>1-cp=HvFH0rVD=wF1VPaykdBcX?NK+r$v13O{9$67<-kqCDD|t8%L(CpM z?Y1{!)n+XTC4O@pC5=ivKhfP-4>4K_KmsSjTCed-0J@kfpK3^d`a+BQ!W)tj1?zQi z3*a*;h>pTwcLq<6Mew&{cF*G7)NO4OcYE`0r9KojzRJ+KO)Y=PsiYglTpMVm@7u`g zzzM!7xhygn(mFp(86keYNQ5mkofhgJ&h}x}fdhcwh!tjWm zcJ+au^G{-ctBuo6)vEgpKmpw>C6#76`|$d$TT9I8F-(0$xRzbYh_}Ru$Mv1pssv%malJjpiRJ4>PYt_(|x_P;+#6;}7+I z;BN5P?>%m4n4yV)O0vyx_oBT!5cNFP{5F;F3(+S9L{(NA@U2*j#C!H{P2!QU{RN()8s;cKMOuJc+9m{4FKWMV#_zek zlY^hUm8wtXfh*P21V~th6NNFOXBU?qfBQhve%!yuA?2xXtDF=4<&p!{x#>6A!IMWrzZOZEB9jvWahN?hI8W%%8(vcq8PCoA7TdR*c)o@1BtRwZYsy0k zdiL#T@3@3h@5aA+yI1l}j_g7H+8n_1p`vRhfw)nqE6Zb+SlbU)X~!U-RvR0jr4-#3 zCLU`Y>(KjQJ48m!+9#x1_;H;=HV?7204Ubvjzf@kyWM~4#@d}%M^3wu!;V+3Txph+ zEQ{KUoy4wL^^A$|VTQXwd3$~-DqDn3aeek((Gsu$iJ3?TSGajC#Ur`t=|>2H=WoXv ziXZ$+-Z|;Qw{;bQ5a@UxKCsFexh7|ddft0F8^qkbde-o2#|)7lYLk0lshz^8Ab-th9O zQW$Q~Fk--|3ZXCs#Dc&BQV5CZZ%Z`<6gdpK!cWDzM+Trg*|dav9Lchp^057bK-3+G zD^bTGlaBNnoqnHv6bB?$BKH>!9Myh-gV!cj27uu0+qa}FgY;1N5iHQpN#Et`uBo!a z0UY!k1GGFi7(;Mb$I|b^K8%#(Nu+#OtpV1_ZsX1id1RAcu%HsO!=F_mwMA#L|7y>= zaTBt*Y{@Ngk}gJP1|)mbuhby!!AYDy0N_&r)31y-$eY*cx8>+>sEpd@gDh-{;qw{D zeD45!R(E#JwA1=E0Hl6Ij|)013IIFsq18a)k4Z+ST+&-%=mCKNdB(UxN*nh#Ab2qj zoc$PZf4(tZ$EJxJ2sSf(AA1mtNn zaB(DKPl}URSXQw&g)?6}OhQ^cih-#iRbHSAoxli!n>U|nOa;ngAwiUaH_7Rz%c1na zANp1V{7*EJE;x7ft(LZ47~4#CO^a|Efoyf4^ApH&b>MjIw@_V{0VRO>bv!NLtw0HY zZYN_Q?#4!vc|cEo>l9pGWV_pQ#iO(Do%SHq*MYz%6cJ|7v6OTBkE)*k2ZjJ9f-;al vD8?bb^#81LCu2&!{+Dr#|F18JKFQH-xap#C>54t%KB&7?e~H^+c=7)Lk+{4+ literal 0 HcmV?d00001 diff --git a/docs/images/bitflip/bitflip-lora-train-loss.png b/docs/images/bitflip/bitflip-lora-train-loss.png deleted file mode 100644 index eabfde2e7afd29ea8f187dd54e0944bf16bb190c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 53362 zcmeGEc|4U}_%{qAX_rJ1$y^yC6dB4qr4o`kV!;Y7aU$-@VuLfft$AD3pdYaE>>jP z4lkXaxw=`IF!fA!pEexGG{PZb)%FTj7s1bTGhS01yg{3`~2CHsCq|1Y$X z?0Y09^rPe5|N8?{L3T`=TotarS4MAzpDgeS4~#7E<@}R*f}tR~SZj^bkF3~F)x-#T5T za2&+RKTEjuX`j`E^w8q@=_tWo{6hb?&`2t2J2m2bI*a$*A9W#B`{p&9~S8`N1=$x>uwZ?6D3u2e1_7ESrwEl5lM9iZim+#$&Fj2X6f@2!YecElqns$h zW-M34o$p^Z`$8o`w_d%S!Dk7`9!?1xUr>a}nIMFM=?Zb{lhoiq zz!**-e5QCng_$aL~++y6u>7Mc7=+3_G5lpzr9?zF4zizU#=V-SyKwpn9W~ zHa!j7p6uBE_WXLIIQmbJU@u2Ke$uw_rpuy2);H6g8e@4fvXtj<$jJB6@S(j1%?7cG1k<4nA*fII=tMzDGfVVER~`?LJYV@amU$DTGiA(3U50WpdKfmSERa-(+QWX)5_+a)ga3 zCmF*VXieKs!J~NmX2`FNLb>^T6|Ugor8bKm3NOEI;6##`{F6llJEYjmx-|nI}Nx5&il&>?)&E_xkZ=E zd<30%sG!+VkQ&16A-`=Zf-_j~8Iap^omq3T^hQ_15?WIh*H{!0=kvEF9oH$B!lLMf zb4@p7Qal(~w1_<2Wkq?)m$|au@ld}Fw^l09O-&i7tpPBZ!E`;8c+2yg8H_8~-8ORc zCIWc+1j^O=;KQNEL*r5r&+@qykHBHzW*LsPE7~*ud&#Gws3a}V$iwD_OwPI%9WGVB zrwS*moKo$OA`8r6uqqo<93carlq;1~Wb$J^I9Dk4cwH9FHY1=!trA|-@8QesLg-|f|HJ}q4)@c_S3u`?dtp9s2ylz}^eb?B2 z2kpud<(7pm5cV(e^_uig8?U!_MZoO?CCX2%`8t})p(^X* zsu)YdtPviIA#JW^J#xf%Ep`WU`&E^KbykZXI-}guO=zc`i2gMfv0;lCza`wZ1T9yv z)1cHNOKb>s_QUsTeS)B-~Fk(ofl5*(M=ZdbSOC5`=ygcjHFOA**+S) z_*N)0K?bN)BG^c8x=eaOZwHR>>9ZdZ|4?T$&*lmD#@})%-;Tthq-+mmJX6!_db{(i zKTIzVwGcx%OyTPduE^S@wZk@(S=?e(0kir!kBWv%zqpGCJBypE!-r+(;C*k?4ZVA< zBe8-SIqx|RTBVSLo$%$E$`{_YLg%ZWDP_!ak)5-!1!4)gx2!Wo2z4@z=)ZlPfo?8( zW#8c6{>Iie$|DQicp^hQZJol#(7<#0^$9k5;~_5<{^2`b`Hx<|T*k4CLn*HgYA#iCF(FgeHygP-=l^Rbd`3>LVfR~%VC`phT$3=a ztHbx69-*LfxxxYd(jsw_hMbQaxP92v-{I+L_;e~;2m4~uXnyW*v2HDsmTNN>j}ZQn zMK6E4pThc1$W9nK+dnsVN9tF!PxR8{iCd*na|Mrv^)*mIK0fT}#Dp#J-I3DYylnS~ zn|EY#y0jQs|49=&QH6vTRw8TPPd~TEI9cSbjaIQ}dt2l_yOz6RitU<4w}!;^0M8BH zO=2|eljv(sZ2AiVXt7c*qGn>$p4x(aJa3bhUatN0%t}u8gkI<*z3kUN&LfDU$ctid zk?~v3XE#@;;1x)LR4j&VF{!mSk+@l^SvlA%TPUSr{;2IBkE6g5xFU8*7(Q@BQ=^Bo zs>-jwh1)tZnoyK1Z-6)dp;fYJKsJQJ2jTBFSqK5TK4XH*nz>!sc}2Ethvo2*puF)o z6VD+^eS5E+#?5_Hy65jY$h(tW3r>YP2*Ch7=fnG|_ySKk_Ftt&dZsvoLd`kCx_-rU z`BLsc+obhITDWuXtFF|j7hM)baH0)$Gnz(XrWl3y?5;y8K+I+-}+)a31kg|yPujvytS4x>+o zD><+w8x9RiZa+Dk<(H^@{`1W`k6gi*?b?${o(>b(F?C<1AGIFs34b#r0F}ha_+>G{ zgS}fupD=)MLDA}LY-*nPW`nEkC6flZs=Pd_pKm5u6h|kVt&d=|)+y7kKl(mcF|K%t z%B>u9FW@S#z*pzapw5S|rw~oWC1eYeJzZlm|YiAHJzeD${%*;q^w^B^MBE%^q;{6sjX*q9kTfA)hzE;sP)Jl zrM#&e!c5oa63la00=)1vm=OIc(Q^MoVe9G27gn~gXV{SBsI4Mb19FUi%Sm!FrFwQRe>j{yHEx!g zXXfthU_bfTS3yM*Sm|-8izX}_#MpQW35cZ-^RtYDb_el92 zh_9^dbzrGB|M^DlL5t!g-1g}CBM|KIH~!$Ot~G3y^}0PO#+pZ<8&0HN6w_zzqP|>Y zo4dBrBQ3Tw(K;9r6uy-ulcF><{2_mWC4Mkuxj&UF`0u#SpR{+ke3|dD>&&|^FFgst z_YU!>ihOPrcrv%z&?d=gUj8R8WbIQ$!16e<_&M^YP+;6!_vL-Gocq@ijw@OwQUZNW zPp7)Fa1sK2AAiWMO@GKrLf|StMHLyv_uA5|R`x}1Uu0T{%Z7k0WH|Ypskm(quGk-2 zNC%478Oia>#kel3xj!<8okg|Q-5tKeDTfQOo-_46BMbafUTsqnMd_XsNti}>0Bk*- zU1}LVCMbY1EZZ4%yhX%eA~qAv5ZsQUIMp;=*s@vu6=}j~A1%XZg|mO{er-6dP+z>6 z`#{*9f1aw@6C!~qyQ34eQzY6LxEQKTtLi<=5^%7%Z!t{~_%Gus*_Tn>263(Z@!vX# z&e0qKFYAS3+xw(G)&wD|M>Z_NkMK z5{zcoVP?X4xw5drS37jZdb_4wJsB-mAyK{;=`kB6m;1%SNm56@aa`QYsP7WhI3}cp zUdR2QnOT4IASbw6x8$$rAiJU(>{OJZb>-G7fF~UN^g5gD_CT2haS=UZ;#%AcPd_zDkT)_ zp?4kY7bQk!juu*NqpU6Q8JNVXxyF4EDu&aS71vD!gpK`C-SC>ZU#DBAQsMK0h?;<* zxJuh(Q+7eSOjG>u;r=shW4zKItk1z7_#Ho8Nf&9{l$JWtj5|~hMObD&KSvz7(?QR^ zTzsk#6hl>&6{eVJ`b*icrt4Q>WM2w4%j(YMU)l_w5qG*Gj0D9`nZS?}HhS>q0J$QJ z>xQi7&-3SC(p>XP3!b5Lw#<0t+6?p?Hvi2AE>`ZxHuep8SYDCR$Qsuk`^wpif}u07 z{)M_mkQ1`5D`AJ@8%Iq(O=zAUAD5L{Ix#SL^|B21OASAx;#7N@&*_;|8;SK_ z#t#-Y_Jpmq&~ODKD|%V$Lg5jE|C9|777Is}T}LN$)p(w!$yD6go%~rPnF%7Z^@P9e z#6A8=vC)Kw?Cho~P0h^qzZ`9irdW*n1}`bDTw-OfvB!+}^aY*0tN+0+(k3V8&nA-% zK%$NA2-$DNRoH5dPJ<6>uR_iIl{2vKSeP{$DmPdAD$$@4lqu0Wj7L{|xIIx-5OB18 zx@fF%Zb<;e_Huq^e>S(W_+J+_Edi%odAiGNFMDfaCdwG$ykBNtgN?ud>FsSzN^JfG zn!D))ID}2R(^s`c+4)JHUx%07!l)E(6UMV>sqJc=RQxA$d7}=a+i~lxtFM=jU`y%n z3FosSlwWSTQIdUrJGXB!-U3CKiR~-rOk01?k@1Ec0mk9fs#SCCBX4yY0jgS!*;bnF zc+_~M&p_o6SMc~k`qy)Q>sz*HJ(H4aH68<%LFu7UELQ18R2=;Q>4zKHUBQAFLF#3b zI`0W4iA6&6XxPbJQ%`Ao9>2PqE*d|#rBhmU zTCyiyVRN&Dw{(mWL%4%kn`W@QeYbKy^drkjiKm-?%=-S~RzrGpvox>&)(h5AQ!+3% ztMD25q8(rLp-cXfM>qN+u=!T;kjcxH2 zOgHLWn)lZwg6ZO>cBV0i17TjLR5S#OfKK*=O{L`0r}rzdYQwMw*C z+0W1`Y8RHx&hlf1>o_>1REWCj`ZE=N%dr?U1b^^HshZg#Yy>l-*I+ZXUKcgo8?!$_ zYbvg#?LROyQZenhjvpyXnw5nRyU+)X(icveB?$bA(VxaI0?_VsFue0@9zPmiW$N2) zKR2CRR4)}w$3yT{f<1deyh|^)74)Ecl*k;3ayvn$k;<1WGw!Ve&XJ)XJSXvs=2|pO z56Vb=v5t3<{|_JTphF>p8eOri+4AnDQQ4SX1~ob8*sb2+kFLO=ijI0q=n4pbW^{FR zF4i)Fy%Q?KzFp>7Y41Vcf9$dPBi5olv-wm0I8IJOT^7a9 z>?;5OprsmM>HD>DIz>;%+P`nXNRyl14F!1tQFrJ1kYp%4*}6&y0!WDuTKOiHJ>_T0)v(ssW4Dp%Uf3z zciaCa2DL4l7_Mh?a;Ck@^2S(cxfu&!kG!q~|DBqlFI9jvPo_OLsvzIvTjaCcA0EfL zTdHg_1t=sAjXI@-6HA_KTf#ExRRr4CZFYzPo)2KFn05L+c^r; zu{w$1o38ipw~8h!>O>Rs^YZc%m8Ili8I5<(muA$>6w8CD3H)spV&>*fPD7rA>lpXU zLSzWhX>-HyySkT^ZyOR(yDLyr#FSPuJI`@we#4_aLq1oWS@Avw6Z~Ki(n-@THYwN- zQ1<%Hy}_9HduJ9pz#UO?`A0~T83UPTj309~8#klBq5QB$nS4Hkw*`3!;_L`wzaa{~ zr_`2s@>id2WtQ%W{v{z4Fg1X{?WgrLlv#g3h{UX}`dOi>1<6jx1dY*oY)+NgPpDli32P^+ z1Hw1`RkUt@ulQEFFCC+CyPvJu#Hlx@_zeeD4o5c}Y-ssSr2T`rUN2)_zq*?Sgwno8d!u zD_yI6ktu{+0u+g4+Vhv#$d^+Zwz7$CQ$Y3jgBZjgyyaMnWA)Kg{9>a6YLrc-4=Gaw znv($|Qb{i)0$lfLT@>VXb%#`o4jWv1?_h(6_e5XC)QJ?G4V|MWDgmm27U8Yx8v>v0 zCp>RUbRQMT9i#sMBZ-!al!N-bnRMTbW*L7(npzJ{{aXC&%8DeQIl3OTnj$0>H!!bTQKSa91OMdoteTjlX&Dr z0V+QpD04l;urNZa4)xs>f7BGY^jyJiaLr>l zfBEC!O<>Xypy;1SM?ygPw2LNmA0tLoxE|74+R=bjBu>6m=JK4;@^s`mR>n>NAWQeF zt5)ZQIeeJZSo~A<{4B7|B_DA2NANExiWrwy^Npnr>sq{YlG`POQA zF!HAeS|`>Hp?GWg{~B4#qt{|^r`VQ)FS}k8#EwwAQP?dQ(7htVH>~e(FhKUvtmP6(^DMPk(YHd7iL#TyerxkUi;0bC<|NZE=mJuw zqG&fzr+a82H>>CGhw$BpGm8tNvXckI;8O;v#?4Gq{61-`bg&hS>)QPL#-9NS(4+8z;G>mqiL700Ehqk@;S6aSvvnY!1(O>}4lO&#*yp7D|4zg5b;h>Io^u9H+ zMJ)TE*--300n+-lwO72J%^>lIfzl-fTnz!ue5#5J%$wPd%pUtNV~h`)tZm56UCSE5 zP1h`^$gFnb_qQ#@gCD`8E`iBpJKX6;C*n-YEkQ=uajybF3Bno9V?SE*WArx^_c@BdZZkAX}uF6N>MxxZ2=Y(ga2AMZTm0(KAK z113CnSl%2;C>MzY&(1=-NqT>y1Gdq6*N>NJg)ph<0sm`~=2f(^XMRE+=2ZIdDhxJe z=V_Kr+DX1$;4SBuyMX7aXRo2}`yX<`?hjp4VgTqD4y~BI7Ap^FTv7b1@4&H}JTyH` zNiO;-T8p9ogv>E&N#5b61C=?nxBm;s-F=(^ z(79(_3hvjSdDp?bU|^fCSj+?o?Rax;1F58MxtSL`{y-eF`BLBmNe~cFGZvm;9a&1m zh0_$dF(mzqpCVD>`^5PL)W$1FH7RjuipIUgZL?ZL6^294ziKzuAAXR5xp8;Bi|;am zAO3GSncd4~YV=cpn()(|G*2^+gIHevi@-(xoZaXRv0I>103;If>0uP?hwYjZQXcxc zy5NrBf1d?*z*zuFs_$yCKBZ&%tk9M`n_g8^(!^iGACdzOL#R!S{0BMB!v;r&t(8`g zy}|WMkQyF4L<+!0XC~@iOl9pZr8FMw7v1Sl#n*Xw^2E5iyNey#p8|<6Z#|RF>PWFr(h3D0s9?CY6Snf3B4;MhkC6GDbZJyi(qu*_w%(b^*{Z2;bA z2790?=eA?m6p=KUL(Y5x!kbx3)~hgZqVk+NqH#3Fjl1@kdUg3W{NEe{(%BN)_uLvw zJ&n`h0M(s>-x3-dksoPNr+ndco8LM{PoI^9DuS0hVVQRd1Y=`kjM+Hmt%M0LjCu+9 zD7nM%wsc&}3N7jo?cc3lre_LQH?iN;GM$*by?(!$;?=8vV6=_!wZTHaxhrwo%kS5^ zwKYqYLB6Bcmz(LF+t3;2(9`eBMx4N>C%l!N*4+awQqfbM% z9i7T5ft?NVr<*Ui=_$YdYyU_CDqQn>V1(?EUOOfEzSw)dGt8tZP8S~XLJ0~K(U{=> zraS>ta>l{IIj?v7HAKc6khzv?o5dH$Tuc&>r^xaRs0sJCYYV5{j;q}ZdLjS@Gi1}G zZ~A?K+k833f*k&Dpg=34*1C^jp@;U5(Rm0m0B-gtc{WL~h0jmuKIT3Mq3g$EgCe0B z37@0SQ2cw(7uN5;L;}WZv0w@~zHog_C8hotT|feq!w}dXr+1$3pWg(6;)wy&)tZ0L zllFN6CY3@u^mM$d{8k8RKs?mGev(}D-+Q`JkTNer;bQLc@HzzXS)hROEsZ3VzYnhG z0ObRe1r3f;35XWYH$Z+ZELTVR@DX-tZo?_V7HBsx8WIKe4A~X;^Mn$(oAe-A^x9_w zy1#d`xEWhKJlrj5&O-4+um4=3X;Q}(_RLzpj#QBFWDLFuO#I%KX``y|JDdRRBI(S{ zNZIwE;&-Njk$z|@$$qE7Q2EyvEt#W{g&wE=7k#?$=dMq>P5h_)mHgipl0?&df0CRl z_>O3@(mT`f5#%h1q82x(|DQhB8XUGi#^K_AYKo;EX}r0re1-r?25lb&dE4CUj4Jjf zY#MYVW_5iOnwYvDV|!F|IQrkDLkhRN#LaTPz07Bm31e$xUBP7%v;>dlwFB^7s4~yOg7Jv z$^!Z)Nx-6a7+c0`?`}Z89Z3HqT0vgzmB)>+&6QUi!z7dPW?l;lG+;=Md4*hCN3yI5 zxIsO%#}B~{Fs`n|F;>OB{>J~bv@I~k$vFh}sC1|!6%~}9U(wz4Tv|OPMRCUDG+6if z;}aT+r`qZze6QBmoZfJPAyxz!>XEc}fGMbgN3s-X$}9eP(N|A46HQ1pR?~KJH0SIg zQzfBg6OQpDVp$T?@Y>GA6mL-@D%!IZ-#x8^Za`}z)d)d>_=X1C5t+L;Z%PiO4^{Qp zyjLknHDOC40k!0!9L#HhS8h`SQ@@l_W>Jze<$2Td9?3_r#hfG>nrS0zMy9y#R&|75 z+r+fA>plO3fBT0i%z`C+<9$Qy$0yWz=)|G9JWbpU;^?-wKTM^LU)Zix72zFX4Sx>I zN_ypGR&r4k<~5t)4(jm=j{3Wd;X7UvS6@Epj#McDb~vsBoO8QIKBOHSW^YQ37vh^x z!$ZUDyZ`<>F)(g{LyMwh3FR{n^=7qI^fG++xxf3)?+)O9f&C2s$C-hV61P?(7!(wg z*SM=`0r#4i@v{PjH)y3;&HGUPe`N(+@U)x4dpxk7Ga`ex3sxWSQyk#muSQgpmP>?(q*?8kr^pL(vJDtECr5;!+9q)RQ&j=^JcRR)`cm8O&X?D4 z1p{L>kIiXsc|bJQ9@bqwj)Q%d7x4uo@0NZj1^Hg`n26h~yy^>0@joQnUV11Cvq|l{ z@YWo>^#%MYDw-CAolPW&Y|pm;Z2N5?DDaLb2+s1dysO8@jIHNjQn4yl_@R=DmGUJ6{*^Bds2rZEBPFR0U-Q5khV-vv^4dphjPwFw31YC zT>9O56ncyT*@VR|AyWTkb|qU0oRAYlv#eH{UPpF;E)WDoP3P5PIL!en@=@-R%Au9) z2Xr@qGpdvTKAtH+La>gxou+sJfWpRFU-dYpMA{`$lDk@$2h(kiVoFJwKr-y*1QkQ*6!0spR~l zEC36vDt+M10y~fDzZHNN`AK_Dm~?(POJ6c zAR=W<%#L*CVf_iFDZ*S|l%HjjEEX8Z;fAL2+%9+%SR-nbN!LZ{T3k)kQ)o&7DhC-M zk<<;LPc~I(iik5{IDS+WAuV{S_|C(l;<#TzpQNWC?~2dw2n$P{A_cJd^8tR?JC%~* zqb2`_sRkwgQW9H}T;tdNgke=YI2Ouryo`QZ_^h(JGTAJ0YTvTnZdgd5d|<%S$AiN9ZCqj&Xlpy_u7fu?1i?)~C)l^vDtly_>_g&yrMs`~O35 zD<3Kl*KvGd9&Q!1r$`mfR&K|9&C2@Bk({s{-X6JL)?>W&KFZ(bS0{vYr`VrLRI_seIs{^P{ z@9qviYS(0asV$!?dswL!*SNds_&#$5ppO$)I_G9G748M6}KG>h$(C)YZJ4Qe#P5 z_jBfymBf?}SoE9+4r>1N_$0!c$iW(ACWG<2q9rxA(bUn=;oPUfpt9}dE^V4R!%mbyT00hg_X*(GA_rPnkV`<97N|{=GGyOcZf;pm0*6XpuehxH zuGyqFk(Q(9MHww&?fmIRqUY;d3JA8T)IgJZ)%hHj3$02utT z!RC8U-a$w{`#Uej9thY+PEIRbj(nf)c-Qr3y{6xWk`{vuI6;CzAg z^-B4o1NQM|2~zWW3G+~L;IkNj2bfyr@AwzhNr(Nyq??M~meyHu0bhOjm<r4xVqo{7ID^V5D4Q`T8&Z-37t8B~tHS0^X%?|be}OLG0(W#7K_NZo*$eme zmoD+>w7?$qtB5c{VXo2OLb`xN1#}yQhK8cxFE;;fy;h8e7I~G*@Po>P zQ$k*zx+;rA@3jnaQgrgsTCq{ykj0Bx-|q6yaGeNGod_Y_**>P~xC{8I$8SjJxA2eH zkC{J#6uc-;SA4_N5Wf;Gw+^-YphEoSVh3oK6+IyI;95?cq9WQ)Af!|whF%s;LYrDy z3l~M`%HnVqO3kFB(#`JE6u+WtmL+&Dr>%QsH27ki#EnRPWPS#uRfbGpzOSX3J8j8C zU6L&Zl0o;0PA>(Yoog`EENpk_(&Zb+Tfc>}|KHDc#{E9igx7c$PiYHFPQcSZ0_W@( zU{Y7Be3z*>n&WLhoTfPUw04#zCuyNI6){-{Hxs|a430WYd)Hu+KScCO{^d*PDqmc$ z#JOz+l2o0|tI(8J=-2%%wj?Yx9d36Y>LE7W=7xFc+U6dLsZ(S2HvF^wcge4w09sCn zq$CgZ!j5(3@$++d2LzQt&YBPh2CMh-!`E|PvClN-)XDwFVHD>q-FDcTT2H-~WU{~8 zIz10mMBT*0C|c__Qq*$!`2*??Z;=i$2?3Pk^QltUE>K)lp=^Btz(Qpm$+CVIC?CzH zhR5ma>l94vjQ%a2g2LyKz?;C+`I5GV^_fRzRkAhk;-cf4NA@%Y&ttO~T5Ya%b&+SwlnbG=8Tl&s%3URS)Ob!AW*oW#%ZM8 zO_HNW^TH&PIt*GSz#b|$&|AAvG{+bQDmB*T*yYA={CX7*om9s%Q1S1270IOfqK;bE z9%*VKCouKc(r-+H?=$(L9G%xHvXgr4@9wU~OUMlna4ow+Ki(4dKMciJ7 zgTt*$ z^$VK~;*O%~(G9?xNtfO7cfvAlG;GzZ7O%;Tr{{v$-I|SJ_dbe8n0n(Y%B;mAIw?Y+ z&$%%>&}z$jwiMx2|HFF66FGxfwna^*uGYWUMm3@;^mX*Xdp1{qayqo6O=VW;k9O+C zX1!)AN?nZIK!-=#)L#VNe*Nb6>_fOQp-rObFc~F_Z?2*Fj&Pqt>F6;mkpoeh78dLc zW`pE>7M2jzyW70*Pgpc`kjH8x>>QHB@|^voUvnD*VPv%#PBKyIvFpV5;$n7sPIoyOC6KQa5L^~bw5R*1bH_4YJ;5=7@QF~_G{ zty3~ZxW%hx4=~neOrRbKE0aRgwU*2kM5Fy;4Y<~;ww{>5EpVV|(TpU`NQx^oE$Hg& z_!D+CyHm$@7SuH1yY28bj>~**Dqy|S**AePB3ao4VBL8`cNz@w)zo-*)H(J`{Kwye zK7Ndt^}XG0)Iru!((e(%rj*=7Gg~AAcZ4L;UjS}2)3Sc zOr+ft5;!^Kru|x3iu{|0#y&Re&k3V8rp1g|Lq#CbB(*9fp~!Tz zVA8D}+(_7u;V1Nc(4c4ML*-L8XLF}>qUR|(N39SLkgjFNUVG&>Ivhj79lEZr`9?PO_0}3tZI9{?~pQUQw1e zkB~mlpE(%An0X%6P-`YLg?B**Kl2VlKgO;U=I)UjHK3E?ziNhRKf57JLb1;$738@Z zmMh{LUZ4$)q^jh$^4y_Z4q;;8swUmz`z3rwF>1u-uB5>R!~sHhW6Z<+0JP%(*XBv| zlne&o*cWAj!|Gm)g=2~^-~#E`0|rt!zibJ~Eekp(?2dzzG)fMi1um6IP|)PY1LHd* zVX{*KOiRRdhNjQ#CZ8CaJN~kq8+=JqAu3z4rvw?#caH^}q)NeCZKO?oE>JRrSKX*T z=%${omfG#8l9-;67&Ieg;)FCxa+L)+#9^~w&X}#Z+|gcVa%c)C&_NRe9VNr3+BIT& z12)oOyx*ZnseSTBgRFs3;^XQoHJ^JIxHz_qTbmFW4GCK-d_}mQ5(tDTT)*4W`+Ha6tmnE z3QZUGo##hp+>H6(cVc_j1Nbi9M-t<%&K5Ek7BLs1s?LSlo>^KG8D5Vscz9hqY;hp| zh>bj6U}67Xfal-&pZ;kdd+B#-m1M1(rtFzHZ7y&zWc@AF5S&S96w%0-cd70rz62gd z`grwl!VEI+Cmr=#6Q4ER-I$*kic|ZD#?(4P5&0FEFr{jm6ngKzo@3-o!zwin9nyi(^ zUbhm-Sa5cyyqDT~I^KKIUc8n^oxSsih2kIjjjkLYZpShm&Vab>|Mnv@u%V;8@lw^O z$8XF-{Qf@}*DbSpk(#gk{@x0YhU*yMH|A+hyD?r{I?z#W{Y-izszL46DC0C~U zVzcix#Ma05-A)Y>3%&=bT=Wm;WBZ&tRwTa(abJ7e?`Y0RqC7_> zBMFsp;3EufW1FthF^t{rbI!oDeVl}t|3Z;Sx#lB)HYYMeLxGnVb8tat71 ztB=3re)!od9d&T)ULkdV07Z5x1u`!UD*G(nyTUWfFVAR6D!%KTUtSl{&;G2(!+7tV zE^ANO@H9vH2SPR>Ps3F%y;JUSvA@1X>?6L^sMm5+5^V926{)UNGNEdi&5kKfxM>tM z<2!+sJN>I*ZH`88xR*~;2e~om8 zla-5M^xZfq>=~TwB)lGvOGK^*Tq%~WS(qx-r`NgF>%HSqQnWOXP49u&m7hcF?^fmh z4qHB4F=tn=?ndO}Gsi|%Zt1+xS>q`15>QL79Su4L7js$geg$K9pnZ8j{^s!gpgyq& zw{9!)X=Dhf@%?yoqw7NtT~9vRwA%^)U7eR_GLFyicbyeBp7&z-L3o)KF)ibe2_swO zWp5-hQg2c#@gz53&7{5>(LFd|W9N7obv8OfNBy~}+dU1pdyyK#bKz;t2|rJa^ZLSd zechVGlbB;4{{7l6s{6^{0H{M*x9d4A)Zzw!?R`Rl%)pPHU2VWib<@E1drkV6cXv{r zITDtXRO&8X8LN-7*L$iiL-bj1e$YFZ`?*xkz~jEafb`CbW^@UUg>_I#gX619@UC6; zfon@^{blZ}-hHd*FAT9VKAdHkYdMDB8fV7?YAC7?rK@|{c6=0NRhtB7PoH@%()n)} z{FiB&bVvFxru9R{_*He@4-UH_d$FNmo~I>oaKx^2Oo^)*A+EtYSxodkbmIU%UR?5Y zWN~yHNJnuGQ;uBQv=zU}vxnqwOUC4WNPf(I@K5jqvmL^H!R(bRt8--h??wD-Zgbzx zk1c0mBr0K}fWy{S#AEzK;a1u^FDtiab{ZYGhf*L`BXuzfXM zzWfUKHz&l@lH;<}qO_FH-(ICTtXFPg+4FWvXTtb8L;q8h%}zeFks$=N3Ak<)=Dx zo+U;5^C+m*1Q~@)fsAa|uFLA^doqH#_`{mmgE|+9QC!ap3)OV@Ytki5+reYC$QW>1zp_V&;shqj?#);G=0(+)h0KPq#f4}y#?wu8F{AS)mGiPEj5hN|(zQzi@97TS< zRxa1mJAHrUG)<;#)VpNYJ>{ocR0|6Drfcd4J3{Vqd{%35-)i(sk}Yn5(uEvhXj<|tl^r~c~lNk=HU@Wd zGmA{mD1_jKPqqcTlLxNGq~CQ+-x-6O_?CO*s|#x3ivO9bJnw66xgJ?%B01)6_=lb_ zmg@=lSt2IZPp-91MBYx_sEV9~-_*{Uu%P5VmXW8O@tGk!kD4a6GFj{ zosE%=I6?0|=BQ3#D*F)P51DbRRkiT@M{}hkF7B&Q#;WrDgN|#6+J3kiMP1Y(x!Do!|+XxS!`k`!p$kRlWcCLnJf0tm4S7Kvp+q zYjXSz3Do?epm)Z+E`ZAF8Pc8=b-R`~RUt}Zf%;>H(}y6{XzFiut`f*UrW{;a-SanL zNp_<9#vHl!td5vqKUjvrTL++hAAG5$I}Wc;Fj3IlpPky38Bs@GXDl%sFE_cv!cR}l zksv?(p(0?=$?x|w<1`(d#$jIj+5xY0?I{d?56$aI(`~CA&&EG}@%v!+ytZ#5n{>vsSG`mAT1Z{D{{%NJSI&b*G≤# z3>*vM1T7|84kz*B&r}$^ zr@K4iiqETbWh}DSN4B0gIl(?MGZ#Sbu#1=`{FoZ_{o#c{PJVb*@{c*4X+;DBAKa>F zvBm~jh3b9#EHf9!6ZOrt6yf`-xKr93t?}vhMOk((y=Q1GUnY(ZZiQ1rp+%N< z<_$QH_`QpNqGqfb{W>uv^OhsA4!dLuF`xS14PCd246JJz{Gffn(&AfbRgmgE(jM8~Xg0(q zCaKP$(^$#n-x8ghu=VRS-A^#sFbT(+&!HO*jTn9?z26Kg|G5vUek4i_xu1ImQTjHJ|W>-0Q>;XCh^|IE7Ng(y&y1MBw5Y>&A5xH zXH8n4%7MfF4n3zLymW1=^misx9p8L}v0v(=UHI*uw^5Ki)ok*1Wyj zAW>;mIAvC>epOfacUH=J4Z&*NkTIkAoA1bX!Jwo!;VsNgAbS+<&G0b0z<;7++rNd& zT!Hiin$j2qVT4#aY5`sZt(v$R^UzD0osyi)FP$&AbnyYVVeReYnU_u)lg##Osh7h( z-?RCeyX8JBuHD;jL*V!0}YcR^-2V9w0J-^=>zb$Ke?=ZjAQTH;}Eczg?*)2mVl zx>J3&L7sxL$=y)3PAfk2gV9Yl=PzH4u7Fv_BSW z-Z!gAqh>m15c?$V!l%Etr*GLv(B007eOYN)IF?j_ZudPE7x@f(pot0?w?e|dZw#@G ztutIXd>*xO-XZEX`%Bq7Yd1AG=A6BA&cBa+uOje&QTG*YQAb_3fS{B}halbEpp*>V zUDDkh(jeX4Ee+D$9Rkv!bf?vZfexAPpGA=|Q5GQKhvYl^AozL!TA4?RfV%U~3!ktKUvvpxs+WNha?l(1nh8v^rJtvzk{Af{WGj{zVOcdsH;|I#{e%mx+#PrQU0k zjOwp*WO>oJZu?6>qOnM8qix2}5+TS&Id@6PqK=^ueVe!Y6N-Sm7+c;Y^J4lQb zWasF&J@wujnq(g>pFb7YH*<^przN!i*AnjjYYDS?wl3EG^@qq!A91N9J|q{&j7$}p z{JjMEDRA*t?_52qAp>00Pf{1DorBDeD$u0rmS*jyW_Vv^6p;f_!;}t&H;`1xlZ}K6 zcpFjRvpKVQXWfiN9QG{(c{UPSc27eb$6)Njnx=gIrjLzKHX4H(^;d)kPRn~)eUVQH zblVv@5&`mcyfy6nK5mKTHdhw*F75Wd@?_Mllvqfpt|6_aRxA~TztR-rxBSRq#zsK9P|sF$;1>1vSGN-) zF;~H*m5pbs1lqWlw7|J(!FksQ4>?nh6uVSi_T8=*E6vwUl zJ|h#PusQGUjr!5`j#x$KQc4y+){jc*X;_Jz7mPu2qEkj!UCWyvz=17}QH)QKaeRR+ z8aK%z8FuOTksrQ_r9YID39<&DhQ*0)4H_{>(b7hY>P&DK+LRrMUm#@k`KaHygH_kV zH}PsG22Q$(LAO4$9M1;z9M;2u%X(br>x@t$JyJlP=JF#w8**_qC&o; zny5(54_qdeq`et2T_PxbXgW)W4?QoRY8R=VN8%zsjqpx}Iqk2+I$FwTUchtG-D|&- z8Kel(pUY1oqMgr+H^=t)XUWSNDrp%S2WhlZQSOm+<_5S_YtwhP_iCOkx`3Fnu^ga9Fwp#a`gpdHC{#IIy zaFdmZZmi44!{b<W|TXE!U7+cqkt%2EQyyFS~SF?rfr6i1gbx^VIy_7BL+wrwQt2Ig{BfNLzPD?wu=C zJsw%j#im1+RqC|PA7uSwjaPM-In5Q7Q2MW2m;Wo*5`bBe1flZIF}MxK-8SpE1rbKEP%)%kvrFzO3_9@)5uWbY4BL`zsEWsfk1WVa<7I_d<6D zp{Ise=*7Jd!zDS`-obuP1skXP;jJmq4&mwOpu~MgYUh%3)~_EjkNPXY(QCW>^ZU?!=l-rd=u8PC*Xr*lwG=`~5&j4)Leq!7POcU)&yB!BiHgN_V zGyTK$W-XN5Pmh|v!*^Nuf~<8P4X-*L%UQzk_*zi|->HjP1mPbHd@1t0PK}l%O06Ta zd^+DkPgG~=ZM_Zcn*G*V^oc2*SE+dt4>Yt`{X4vmPKVy6Co^{0&aM+CxGoXRep#ny z01ShU8=%Ae+Qi61<{R$?*BY-HF4gbamAVQAy8Q9&grbdi7duKbvS*h<;Bb4PBVJlmrq#)6HsFOmTy0`Eq*a72c{i6fboDgh^$&ep$JGn{}Ul znKbQV9+oXa^To7+tIh;6-?k%YLuMoEQ5FZ2DX`jAT5d#2TwuEn{h-p9?}&)JzfX&H z_}s4buJSp4zt{1)xb!J3B!5~fe?>4BL3#ZT2ih!2Eqg-e?>Dyc=p;%WS>W00W zyNsf-!Q}bYhO5Zsru&+6>VtqK|1AIf56qDp94mlnbocy_Kmv(k&jyT@vH3I#)(K2Z=Sfs9ITLTdTPaSK4mEhnttlSt(3&X2y&P=n^y(l%&PSqwuXN$m)PWcgR zUJ~2#?^jymS0t_949v={k`t!`4uxzOb#hSAmPBv%vU7xEU5V7}f}g zqSdn)&VtpM>@q7IgIu5Pjvw)%979{Q(e1uTnfMz~wZ;R9-&8tF?~! z56Mi|v)nKhi8KWO1D`K3HfVY_Fr|$LBp03x0Y z+u>hWo*^frOq%10$j(hxx{2N*+(=zC8wR?gp;1yMmCm4UruTbH<;7+9>WQZv)S_hW zcg%6GtW&`lbQE)Qnu=ZDx21?3;-s9<%em-uEcj3fW6;G(qF~y{d6;25OCv8JgG$kA zYKC?Z2uLqLJ?CVenb?MXe?PDg-H>^d{$4gJWKp-u%USZys*zJ3*}3K70Kh-VjSH0I zBPsd*U(XW0cfm@1pxYhM=tIHs8Uglza#6f)q^0IQK;zWu&D|q z+X#1oEc1ehFTs`qy9ODJBiA;gS^?&qqZ8QKe7rq27kV*>BmuBm-lEEqZV+#CPwxnb zN8!T)V86xFh9?WW_gMNm;~5al&gFPkc$-*G#BZ)vXuCH$R;2){aO z*Y3sQhp}M7G!giJPLtaY;j44T_-6IjxV)4ZJ!lY%SXQOXxXMvd^O^y5frG3N%pZ*f z6*N;jVc91G9%0|{M~{+4ZDj7tHPU82l>D7m4kd`V=o+=b!)J_Knvx{@TA)%uy_vlW zPqg1qeYPR~c9gaC5}!5~>5$}iL2MeOW0CJjrKDB|ZHF{tN;bnKo9VTtrHpmtj*I3* zD5|7-r<*E*Rdkj{Xl|inq+YHQnFdt~hSk`b>ymfv z6OTq!HZ?~(>K}siONIk5RXj^s1?eKU1a6+w!UJ=LUOPSveWOFqGP68L+peZyYfcV6 zU1sG}jb715)yy4PjBSu5^TV2CgRss7{8kj0pVL38efd3sq*!Tnc>a}U zJSqgGqKKUTtIbWOIV!3wv|>Dl&*}9yJe|5qTZ>lrr?GddZcppPzPZ$4xhDb!4bmkl zlHX68y~E(3d~pI9)~vD*tuyxa*l*lUZGRvGc7B!y>%pP1k0wiuu0!m-V`_($z;8XP zCi2uR*q=(p}E`Z`d2?0_>AT(cU4Izup77;NW{@rto)g?I*2xyn*U8-sc6e9CDR+U{-g(JlZzgxri6;&ICM z_dmBHiW87!hCRW_D#RI-7NYN?>M~-^`yVbERQ65J9AemKO^%;(uP$SH0fX#bYxh+` zYo+I{vyFHVs~dgfKvZGqnEoK{n@wtA)eb}#D{a`ETT%f3`cgOFws^RX6?Ggfj_QUM#<&p+W0OxLTAxku8apVpavKbw8m zR09`k`Qct;Io3`J6FNDYFjb2nT@yY1Ve3hr1ksk=#dE=-=iqDK&w5+TBb`*(#hfgx z{)u`+hU|uns@tA#XjBSV4?dMoy(y_!wD&p&Xb&n6%}k5q&l|IyQpy6xmg{7r)bQ{{ zPz%&DO`&VZa!>2AhJ=+TPra2x`Es2||2Aj%rozW{+l;J@es>q^s%Nm&)_^=O4UdhJ zWsovnFY`{?4;|KnUcwvHWLG|NiTF-yZf`)o;e#I~BHFLPq#F5-Q&)G$}`PD03d%%ER;-LD-jyE zT^H}bVub};b!QCvflSfrz`3^-Tl?X24D#}Oo7 zYxUQoH_oqBq!L!IuEb@cLX_z3@S=8K7lr&4swd(gNJb$4*N!AqTcA)OG%mKye6c>qzqg37fqlZEM5jFRNJ{(MkEgE|q6d+TY!Gn@)iMd1 z)tG8pHVc>JKD?x>jSla>cH=Yh(Z*&o`koff>LhZgc@qP4dGRy$)sot6tdy$X6URG_ z;6aJ2EFWP-eO_&v#^fK{w5w;85tMcjG0^vx-jN=&y5Qk26VPE()FnuN&@FwIuhGK^ zcx7~I@v{65KdPEulOVfQurrsKQ;H?M+?sIdYu)xQpyYuM>;~z6M|(X;le=plKyRQe zcrr9LvfENAHar6azKYu{RqgM1?p^%%e-|o0jd~G8#1p2rQQ1!SucUv7_j+G)(5d{S zu#$1G?D-Bi14H1`^Ka6Q(>=`>&PIWJxpy$K*_~e&CvvAn`7>ZOrFZ^=bi0tsK@SRG_$bD5tm2iW2+v%h z${gO8=!UFB_&(5-F@+3$IfUlllYBV>912GXmZc&cHlMWHpC-$52cOX@FHaT8yR@0G zKuxIq-dA9fGWVLS&ea{9_A7CY=tIXh?VG$w?X0V=g$F6(2jht1?|@u)=Y_`Cbn;`) zv8c7Q(WKn_htr8PzkXx7do+7CNav$7g%z?i*>>r+Z@F$mJ_Djz?zqs|e8i|HRp$qD z3srP|W+%To9hM|$3KrvA&(@pU+4lAuQEt8jy*>&eSxh104o8WFCn1rGA7V>f1o1T1meIGH)>nqs zuQ-KVc@mY0$EFGssW1};b}5$BqA`l#O((Wd?%M)@yzdI(e|LNl&{mQ2`>-A_17rQ3 zp7lGsz^7W?>KtlL9+v^jO57m(Kwdy`f*8&jW!|s<;^jgZjGs4A{#&g=no&w|aP*)A ze57dOQ^nPCcSDaD_^`WNSe1wvsxDN@ErcFxORWF3s{B_(VH{v_r}uA|gkChOG&XBF zB?S4q|GcaikpC79ve~5)2?*rt*+VHK?CYHP85p9C^Lw1>OV}Btg~23fAxdQEEXew} z1DmJ$p4D2^Z<$JBfSwi$^t7Qs#b)t=ZM#7NjAbQ%?VWGn4VqvWA>uzV^3CHM@}?b~ z_V;6l9H~RN*Q+1L@jF?$sY<7kUR-C_=U2Qa37~%iKCSX{RW)o2G=}YQjczu(-*Mf4 zn%n}SQP#0ji<(cq=_#>o)bH-r5|!9**Ta^;mgnTY;(-ZJa3N#vl^eeN=aZJLI>N{! z3!c|<>2f;zouCoOXWz)2YR6|&B?*9FK2P&xgNGS*!oAzhj{t{+vo@J3nBKkB5Yajc z^i7HA{D2Mk(dbJvt!10 zi55E9*n|Xn&G`WR3w4@%f%a`0oRaLUTl+?<;LPpXcVwf!UDw{ualiuFmPR(9tBz%N zD|Ac>$Sdke+VNjv^QKonSlh6>2#Y5GLjzDBG(+BfdetyFo+kHJT}%hCj&!EI7&dsM zXO{jq9Tq&6H?67#fX-P+ERHI`Yw@r+1^qpxA=T&v5PkroGQ4q--PY-!U(!;YG_E-o zu&C3feJ^S5`K%Ny$yPUS`L`a$IrYtg^fHFc2y!-Aa<5FZ^h#!{y@tVLk9K>8|32nt z+L*RyE1E6>ZkS_AL$OU)kD}&PLc8z##xMwQ#%Fi?wOW*GAYm6~1yx^jD#^LdGe`H% z4wFi4UeKZ=)jGS+AO4Bk_ABk}9V}*_@{IycT zc+vQ~U1qgi;#Nzi45FDNUqx9@%4Sg*u1h$G=d}9XrWoJS5jFz|EWr=GKMb=Z`!E1I zZe*@>6*~l$APeG=^sxv_-SP>+SDC0*Dyk)uyv0L=ySAoP{PV!ScYIiHdW~SrWu4q@ zoZO>SvjO%Q2%<+GxzrSUK0X+y4*DeDWUc!=XZmsRm01v8Rn+52Yc{a101eHjX5;8F z<(Db1X$-Q;yCt3}&nf(KphWq|d+Uwyw4gyq5Ib3vZ;VG$B`Q_Aws8ixZAQQF?5Ezh zJo)jhpErrNngiN}z?Trx3!+Tj6x-4OmvSc@J%Obf(Yj0#|Hni> zO|GFzi$*z7YOn9Olgi8EiYb_XpOvThkJ+X!+OM!t{<9vRCM*t~0Zm9aLJTnj6v6K> zmw~ZE3@UXkOrb_;Ct#;(S!50Q$7+Cy1*v*C?X2bLV-vVGiA^pv^li<8N=(4*oO7yX zZwl`g00K&cw|gw(E#NqZk2N6#xu^Hl4Vy0j5Lu62A8`nz9o0KEtgp;o|Z1;hm)JHC{;9(Wb4vjfG1J))ii zsK-sCKn(@v0g#es<1{k1pvQ8G?AzK+nmSA^FqpnrzRz8yasSQpe3Rc`!DZ&W4!2! z>0Lwi)V$OEg|kl}eyf`pLLp8T;0vc|&_RDrA;rbe_tM>VKGX)**Q3gU| zsFSG@6DYQJ#Z1R|iO9`rhW`Bf&zRA!&C?pE65GT>hOkl5Rcj3HIqzqy8xB8GBgj5J zMXQU63W7=PpGbq^tdj3i?Hu(%*ahm#o~sol zbSgER#6EOV2X$~QJa*bVrx_K`(rmw&UwPENXYQ>rIhiRLqf@)Lee-kT(jHkKv6@!_ z3!^9^O6fq66x{%8VINKqc_giU4xzDss~Yg11IzL8gyBPc&M&Df0V2aQ59f`5bGSJ4 z2q)y+u7BG5o4tm4HuB_iL*J?UkDh}{F&)c!aD9M_U##Ks89`sdgv#o|y3{(TG-q)E zo=M$G-Hr*4RDYvA;lYAD>s{5Ngm%r2m-RGHAbN|X$pO}*$Q&K;UhS*}hq1f%ulayd zIOe1RXd*FelpKvh=iI*+No?9X&vifZLEwNwz0E(fWv~JY^^cNWqup&rM}htcK(c~` z%9&s3>L7y%ECZbLN&<5$`p0kO>`#i4sOd1mIrr3z{!Y2F*;ua)zWT89{8#7(NLRch zME^qIL3HRCfSR4_nH_D{)X(wNdDBZo9q=zkS`lPel30*b$ujXs@nUh>UtyBzch@l= z;h8YZSX}_6#?K5v?o}ogiIUC6kB|K!C8s*b)csa$mCBpcwcuqAI3KcdBJPF?iN;-i z^!EZU#N=Ep6%P}gaE;nIXQx7&ht7bTKpbUj(d$k#{%H=5&x+*#s{Gd8)CdNmEEemi(Vp2a=BGo2%A%Q$%E7H+52knEf>kX43Xnf zv*ZIb&h}w@2_d)hV=EecvhHP+<1+ys4pW97>LOjz9?&4H-g+0~GR1F{FWVMk6HKbLJQ_qskjKU%**Y?NU-q%uF46Yrf&VgnvB z5y*Nvg-2G)&Fo$p@4WN&j(jH04RG_^Y*~NFZUC+pAe1@s{g(Txg2CQQS^ucl9pyUf zse0s2>qiRcV}NggrbNP;pG2)jk!&;aCAsuY`{?Yn|HWO%a?|yp%8<8hMUy09d+*X1 z0gmFV8O=Z?+xgbvrF`HFGY}x&)SN?i-aVO z#D)FdPd2ukPBJ<3Lk)emI0&fw2Wv9Y4sqaX7IA2nFT@=AXLhDm3j1ev=6oo1z5vA1 zDf~lWP@WN*+hl6<NV$d zK4ymTW)n1&fUL9{`SK7DAJ0B-bNo>JeCulsyxBN?MjdL;dp*z~^F=l49<-`(&4rh&tW_s`O`e4$^gdE|i#VpsJ0zXyZGkG6$4UlpbkV zHUihjs`~))2+I81!G*Ek$iK+~xN%J_=N3S<9-zs-s)P;ygYWPBvU;UU{$tap`o%W; z%<4Z`2uhD-HoD$x!O71gl}k!TBUQ@})#3HBX=`QXaj6MfD~IFg4>--)4LpX5LcA7J zi#>qX3PWYX3R&|Mkqm1+ih_WhaZ_ay5CA`>m~b3$b`=>$C&*+s=Z`JFioOS~2HF^q zνADey|ob88LH7sFq8QJ7Rq=)%W;h*SRxk#uw2uHst*p4@W6rfDt?X_h)^;-ToF zg^cP_YDUw0TI~LbO~&cb?=7H#-mH)SC6byhV)RRqt`2T|drtBww5tsz-<(hDlRjvU zoh{~Hny>voW_-&D0i5ypF`<31XLHNti{UI!^*FCe`p2;EkrhMtXAQt5c{8Kun(FI2 zG9EbwupRI~|9+p=`@{`O3ecS!cFU5_P3t7-gxA#`PCO?a76|P^U3((&=!%fuOdwld zsxw|Kw|!JMeb>Z{tQ4Mr&#J1tua>{mNK4YzvF7>^h%Z!6E&~t~C@UTpoa1UNkg)H; zhp*!oS&!2)>@=;QgJ*XE&mPFz4g7~ddr4POz)CW14|vhFAb5Ga0S3 z(0v&2G={h>oFaFjch?-ocpVZ|JpBrP__}T7+Uj&tb_|?8MV~4O%=u}sLbwVl-5U*F zU^r?s5Hd*L5AwW^{0DF-vBX`r%2ty*RX9c}3RbaE%y@hncHax4DSmU6JEkm9_jWAq zSY-7@MYp>H*-4Xij$V$@?NCD9Wq6l+THC9nav=P{l|V<|Y7Q)QAn=>;qX!w6@wZM| zv$RmjWR;*jGz}TL={dfAvq@fRP*Zmg#$aaxg%i0bG#bC-u5dHgm&&!$>Sg6|6rLo_ zI4v^MwB<+9SLiZ{@;`~SBw|I`i+R7WM{15^Zlu3n?N?YZ9rrTRt0i}I+AY@fZG6l) z@L74hs{hvNanAX?e$JKc_21ZxoqvpC`ZA`q-x*JUCdT$FW?3FL{YiC*Zd|cu5lH0t zZEBW4S)*S{s<-+6_rtHG(UUe#*T&hI`1~;Nu=`g3OX}(RGjQ${bZA<~^G#>2fD3gH zUTUc`9*LLC@?xQdlEUtnFE7mUDSl2(VY<&bzp6J+WNXWt-MyV(Cc8l%eII(ze*Oxd zZKv~PX-OWLZ=Lh^`!8Rlmp^IDeBaQXbj-vh3&2fcX&KFC_jdHiXYmXeSs$RSOJ`H7 zl+)zzy^f?e;3HY>??_|QR?7o|-}W*ms;A}v*d;pnBR78btQzsi^N1%0_rPJb9E3bebgyRomx^fV zjtLLLB@Vn5xGFM_<%Y8QaIDbRLlXrZX_Y*TU%XI|^+ON-0L_qVtm%v-{12t!t{med zQzrd6&clm~js@y}qvE&zF>nXgjLwklgqL29=+Z3!PY||;JQI5D?=O6Ijf<=~(b8Yi zzofRXCka4RO~4rK?Z_W>uG~}0mOOOMz3A@<$p}#gHQarmyh#``N|27my`#9Tt1YC0N+WAUIjFrEZXnaCz*$BR(7(yR|B{~Ljj6` z^0XL|90n>ilq9kx;YxdP%GDcz5ys|0HXK)vH-80gf*#>XPzEHTB^Tmbnmb0>DHAo*niy? zu&JS21pQ^YTmsJbg}8cRhGZzHRVlD&)1$%%7Z~$JD4)#cN&HA<0L*1GK2M=qQUWE) zZgtQchEYJ_gC=UcSFvl6C&@#*4-**ato@1|M>pVdW=x;~EWA7|j>PslQ^(Vb2;J^b zO#RNAMX&PdrIX&Lb^mo^7t+(<7|2Rhbq(0PHFj zn+cOFh}M0-!lGfSCe)KS9?JYVg~n&}qK&HtS=pZ$ko7_H2`rCN>7uM#FHO%xh_R2q z-#qKEj98$_{qubP=eE|*30ma4_VEBM!8Te-Q9`UDzmEd0-Blq+fJ|0@{l+TKU)8_+ zs+9p3Xn4tg`PBs~@d^HO1la$)KvE18-eEN|FtRVmw13jr(k=7y@g9Ho-`5BH3;H_! z_5Z#b_~-QT|6UywRGaYs^GZ6T|5LmF^IRr0@YR0*d-F|(|DUP+cfU&t7FfIf_x5kk z)_;EnZ}g4$|K4#8{r}``0viAPccTt)H!uPlTe-Cvf)7KYxc8+$7b(ZVsr`@9Ghj?X zy5IYM#%Fxomj7b|+eVA27L zDz5VnIIi56BEqO76jfojP!y69#6+KTQCpCb(E6trW^JB1gmb++oKqH7w~Y)PqbKSF zM*;}_DU)?x7cF*(zNicBUYo~9_}RO-|0azj!0b(^?$@baLGn!pvRi_;L`oM;@A<_> zcYijA_%mEXb@S@+V(M(>XC~OC`X!TgyE1K!NTxz=IjA_$A?zdUZY7MJ#B!ACg#rKe z2@-a_HY?@&%0^@KPZZs~IDZOU_qfmA zU)to5&gPH?S&co`LPn1t!3f;Efr0c{}&)3@rLl9g>H3p80&5suQ zADo_?nVjeUu~_xXc{xQG&%60ZI_lxF9n{4_zN7YfpNzBcNie(kRmayw8WE85iJZp= z_E^ViaS*lt^l8My$7?eyiX@2&8lQy!cfBLK!l%s&o+*y}ru6-BS6LI*RQP&?Zm%N7 zyGdOMII{7Fb_1rr!Shr4JFUS9UBjJPyx|ie4q%2BlP^I{pyLk1p+~0R@%)}HJ0N<_ zO7TcfW8E`EDDXFr1?|lS)Tu$9h6MrDar4l3s9clNp@8VKIN=^vI4+e4gtbq9NDZxw z_8P)j*8L;{bCj$9^a|4VvdV^8ouPl=pOYbB9GFFbHAc+R`PD*`J1cd4uOZ;i+!wlr zglPOoKtrWi^(XJOUSSGUq^>jSsE+h6tc0H$I3m~gAMGJS-bY9`-8%(X@UMNYCKW~! ztTo)uULisEtj0+cOmn~xay#5V&PG*q*kMlya6itg6`?_F370az%e~!&XH7Mt)aXKY zuVf|+L#g_OIm9io0AY5CJCoo0{_fS*=NMM$5P(V&hfS^AKWm1r!SNVUz&5jbXC>g% zx$|pdo-Q)B4fLZULqf8;_7O7LPFlf0Vs!6Xz!AKS&Rch6_l(N8ffNx+;c*c3{m7gl zA478#UL?H}VU_-=&xg60Z-zhABEBHWDjU}j9}$>y+)6gEYsc;>yXD2CN=~%NG#0Et z`I${N&RM?B-{^?^h#U_;P(3M`erH$QZQnrpQIV=o+LzL%HAIyOUk{eTL3$|f)8j1z z4qp_~?;mbwkUs4$UFHFaI6m*1(ZO3Y?*`!}t^Ubh#uZ~hxEoPOw&zvkbYIFhf|Vi+ zn(b22Z7*X!@V8A0Io-p$u|BiH;qXuT;lPh78j|GBu}#AhgP80l=D^*ufO|y$C?JMN zv{fi5(JRcJ46asdF%0Da?SHWMpm_Vs;wy3R<`(C8T#e36U1FE#V|6 ze=QV4xTh4}g7|Zt+dd1g(`e|by(5N*AL&P3Cef;n_w>f}xeUUmmzaDKU?|SU#C5__ z-cE@JppC9{;g}40FE8<%`0h%RV*8vP|LwfrGqVj^Gd;4yLD*_{aolLGu`FVEh7WHlt(`^+?L;BY? zMOY7;euZjR=vwgVoNFeJ@)5jF4Mrya&@r|2h>spxw{#U|_^Q*!*X0lsG>B_Cex);* zX2Fh#WTs6~`Wc5;Cg|@n>L)1)5I)9bDzuT}4fOVef54?13Q*%yx3dVz4}zJ#>B-m9 zug=gdT%#sIOW`U@Mp?E)|DYpWJvK32X9Ik^$QhZ zRGEEvMaDP~iF00?|0sJJs?HKpuy;Cew zB}qdaH~wJpDM@c{5_NK;K-AA_Z&AQnI4}9|@&d`I$5>LfkN!E+#LB2@LGT?cGmTms zgXc+pj2@LL!s5x)^L;M3QXFv*Z zFJb#3(oeh~F||qJPA?q284LGEN@$ z|K-aNf4&$u{sSd*eS1dv&X5!p0>)66WD{qoSt0sX>SwG2oQQ+qw+MTAqu}VhONCd^ z7R0_oO@3ObIpPDM-?-4#9n|9r?q78~atbrIu~)l(Rldb}i{U7e&YlwEzv}qV$t-pK zw`tOjZw-zZE&Sy$dPQip{O;vP=iOki+8g2knC`U@ecmrP9DGN~A^of+m+RJW`WA^W zBwJ2yS)Y6<>Nq}o1^51jZ;pkLCC~nW!TWs!ZnMyD*J?%&m8HzCUb2qgK-CeyW|gRA zO)rC7Yz)IXbgeL?WCdz|JrBA2)G7>v2@~@tY_o%OPikc-S2!EU)ivF{ar8`@v?~PP zN?fDEnbieGxv3XKnK|Q9NsD8NrK-VkAxX+jTHWC|40~|DlA*y^Rer=J8l6Q5z$bI1 z*=V*#Gs?iUrqaL{)a)fe>3+Tcu)CEK(;JEAumRh~jm9O|7wcfs`rWrc-6SjrSHDWJxaT*E?qMR*g^?lf_RcUmEh)(q z+agGlH(qMwQq02T>1L;_I$>bdR46U`WM&ko*Yt<4I)f~jOoCORbY>86asD8y3171; z<#LPn4%jNG-5#wU0RQ0KQOK6#a84+v+Z4niyI5<86wYQpQ3s@Jjj;XnL|E{ez` zf6r$WoPDN{5>m^)3AG>yZ~@3fk(*Y&S^Q(M7qEY4{9y0~Ua~sbl2MZMaOouQAT#PA zS@6|5_v?##=!{Gv!}=AkfFn24#B zPL(Lcp=|TvglaFULgHns@pueK=bJi7>mz-a4ujGg!)#r_B6s^l>)~Qc!#Q2%-Xlg@LT&K(0kT zz|4Y!;RwpGB(kS&HM>-FFa+l?M8uEvqkt~|#n&0X2&$iJ9I}sD_!#!EScnQLeg0fs zc)2SWr7m_eyl4CjOoEp`FMJDV+HP0ZojTp@zItl?dcub;2r zgcTSa{PoS&t_gqr&-FN1*;zSV|2#5T8{ALRf2!ev3$&S>2-ZbM^q3y&)B zcXJ6R6NUk5cvCe*u-Px0t2KIrzFRR*uyj!FEiCg}-5Q>-T0JD9B-RD#0LDccA^1&w zKP*Z8KnL%avRc6)`=uw(aTNq5(c$Vz9;lb$+lAvjDMeTamOm5FaBF3AP) zAeXM7aQC)~%e$sZ*ZE@f?+SZzU8tR#oTCLapep=qNM7LFEhnjE;tlkQtD2!q2y)H#0{U-5YJ+kVR}OQR#} z>*)%995FqD`w6+CMM;58No!H-;K}vjz+q~86FA$|;J7oqwJE$kyy;K+pVXE8tfQ>s zq#4!(;jr9SAH@BLKcfbawGCc0c$%yb!SR11hVpwIRVGJ`>3LP-D?Q_(a`&)vax=Qi zB>kC1#)G){9R~Nem>g&{LudUL;tzi0zJa-l8a*=8=Jc^WF*a#&p8DXs*8j%~;84MI zH@oI0U%ily+KH@Zu1Z4T8dlQauy|;B%G(bGr@SsjM35x@W;FYhIWZ|vvsB$YJXH4q z0m;~#E0R&8mNfGU<+Zx+LKCtW1vgDE0zwM)bB-m)@t5#;_kkq;2}k+U29&-_dM3{nCC|(JG?s{zL$?DliR-^Q?!_Z!v#QmtR ze(N&Ttjrs|kv2zV;e}AIo1y6;p-w%0qnNs38c(kJeotj4s%ZgjkaemZU7Sm%$$WS; z<{0N%GA71}p{!1{IwZvb$>)zqKJ$+%oo|Nn=}D6TWswpKDXp-6V1Gtz458^ z#PI9|W`^hl$vL{S;i}*qo%HVxZnVjH4&1DPUfqdrFz4!^75-w>>7$LnLw$C~of?wg zM@}47YF?t#6aU6hwiHRj)k9r2xsCno8$PU;jGIuAL&uNWLVuuNVst=7kY4 zw2$fBbuP8KyPSM8E;)xdDJs9{0**nDU`-q!7s0vAZmCA{N=5L*J;WrOXA%#!tb6^Qfv}}!~_p}Yw;N-(tZ6DNw(U4(SqvnYiZ9dxSY4} z>d8>=52a_rC846PR3Qt#)Q!RuDG?-;R_vPQ`WEFgBjOhWEXCId7N!Iu92MPTGmAQ0 z@eqPQ2TSEqt=*kies`?-|3aU{#+uygOSFFqhf&o)XuOmeuhsXUj~RWcy?l zCplt~7kAX2R+i7mMS@0H5O|vF!W-NvC)U9Ewh*JLTs_~7G)~4mdpy~nc8a7RBAKVL zlIl57Cn?FfDZksp$x2Y)3!@W2{{#%;mcelLOLUN5+7!CN@c@+AX={e`G8-?=&g;{c z+rQxakhs_vt*>1aW0IUU)+aOVuaci5@ipNmChy_)e?TFZWWNgP+DTUIeS9_PLdl@!zZ8)gXi3b`0L%4tFwMQry>nek?Z!1hWI@;DHk7jwy6 z7*FH$ZNDJFv{z8UCbZZqmynJ%5!)r$-@8p<;zyUa!oEzGy@svmf?4UGKGiS91UeRo zU|r=H9LI!PI+fjR;3vh-(fuq}U?v~X{Y2T zbMUcf*eYjXQQWcqWg9q|p@q6OUp8!!2|R+--YK`j#PDC$5q0_sM-p8m2qGM(X5Icy ztje#Yj2-WNET89HRpmb?GQ!q-$L7F;bzV}9zGK{vYo{E?7_?R_%)LKv+6A%95)a@; z?z}Jyv7|=Pmxn{79L3bOFtM~&=4@PRMtYq3I1lYhHear4vuowz%Sk@GE3^W0WFwq`0!~^Lg|iI+n^9h zMoQT0ATz~@Az(9R8GM`Zhd#&t;Dsu3-M76osl%IpEbz6>+*1*bm|kC&oF~Tcr)PL% zRLLv97Be38zWN#1_nrVeJO7HJjzl8*6MR&D&BvCdUkMuGzrPmg!RbRoBaomOpzYfu zVUwVv%6|4`NgP&m1Nlop=}aIBQm-q|gJGxDg9t;OGxf1>cvv|lTW9^88VL#|gpM4V zcTOc_vMzTiw6q<8p1231fJd*SmrKVmUY!suQv6oRLd>&b^-XuIbY$YvI9xTErf)v0r8H~H(ps;Z!6M0be2A?SO&P_W zPN$)}f)iFh!YN?$F4MOO_Fp{Gie0VYt%``z4TQ#eP#G=C@eLDNm@0vl3x*1aGyxf{ zAaHC`t#by>5*JRnTuuG~_SWp8e_JR9mO0kwy_6d+EeLImsnK8UO68h%1YyY#3(JJM zsU<-iO*ycL2%n)?Gbxsl6MM#vEgPItNxU&lqDSl?5R73@ zXX#)lSg|Pz6PNfvM%Gu}fdaGepDS}^#LX1H#j|F(8y$*3`H>V8WPzNjl{K~lk5O`k zFx8KGN$U%xOgK*DJpFMzr%yj%J%)TW!t7mO^B_W$(chybo56S;T*c#}IZ_~@e-v#>@Wg?#JMKy3Yi&rCq11-AYbChXydSwwV~{IHf~pJxGmt42F+U-mzL-I zL&JiT)R}t31mWP=F_rb#S_{s1eJ!1;9x}FC=8dp^;L3mzRt*-0L3P(*Oa#Xfb|HQ< zKpp#O2MPLDiR0O{p`cRlwZ~xAbH0UDhxi8A!9ctM$)Ob>y`()b!Z5H10W8E#^R$vZ z!ebT4DKvgmyJc800iiJTXAW$v2~ka+9|QboGi^8Cwf?k*TRE>yEW8s=Ocd%5BW_&*%h}iOG*d+0D5* z8l}G&`Gu$%F(oir0(-er+jc!ycDt`+t zshWU$yfCL=f=4vB_zF1!1_wA^!+fCvW-I^9l^R9#E?#+Yl}{KJV#s&O05C%Do20NJ zY1`zoHW!@KsSARsI2o+>7S!b?{lKnmV#28*uN+?)q#$lg%kU_8b1`q1Q&q?=wl%lM zW|eTNg~}^YSgnjLX6op~D94?B#tB!=Njjt!?*hf!ICwY1!MCTfRT#rbUmfY!tuXz3a24vPt9jyXzjPtERgAU_ryR#!}Iv;dn(3bn(^1q)5Gp07dL zwY z1AOYPP8&(G%%0k#DEqpfBqL!xAC}69yWI1464j7Z)Yv+R&S3)tj6ibOHzMU!-!CbA zBcP#RMdU$!u>s(2gvP`3J_b#6NPG7cZImEA3HL>@8f;E2H*0zx>qpx3qABeANO(GH zmJ*tPJk%IiZJV8UtyyUlVOaSq;UkS?KZ{^sP_e_x8*808Gd`b3RxRND_Tf>p&2Am6 zB@Xi=ev2IG`8?Y=M0B-00FB!>K=yLm8r=qg2d8h&%%Sx+yzrkSXM3_Tv*&WTeEdMv z@uF(uQb>4bTSEW$@K{L&3#uw-bIq@f6yl4L785wl&z*-#&`;#}3OPRq0S0m6HE=s~qdTzjYOD&!RI<;r2nIN%u`eOzssYJ5By08(YDSx7*F=JxVqn{og zO{RT4MNk`J7Do027zvBQ5eDVq+`v~0`SjigBG8eN#?mMGig-+eG}a$;QVLxtXBu!3 zi*XJ7C_p4@TIM<26;Z}f%t8|=)|=x+d8VV$7IiLPTb4%tM4Dyb%lx)AVWY0V^q zrdd_h4zLB8^E>e2_X~&ElP;N+X=|m?V8h`9IWXn*(5$ihKLJpFi z!k|oY#IDlCwr$eVb?MnkMci5HGLY!O!l#s3r{vTeSYEAwcj!pYTC9z5CkZc0XH_1# z$kS%-g~rnw-iWw)BrR8- zdVp1Zs zG6hxA?bh2hP)RV*xXSB7f=9AlG$Oi16i4jZebd_}pH|s#G=oS)bwrM&_r|~>Ha(C@ z5lWNrL`G(`rNdnfeC#U%($bjScg@+e2}n^RQw2Gk#EPi|jjKb-ZCgv)^kkUw>!1;n zvXd6=KB2q`NX>V2qYW-{;GXJ5W&cRG83vDpN=b~3hDt2?Imf`4MHMydbB3=GV(*Fl zSp6&4VccNhGfROS_~)E-MxP;x89S|{F<8CN`hO1}Np4!^Vg6G1Q{=x5wWtJ5N=q_T zcbqczh_ZSyFJTodq8WEik1tF+En?|y=)*t{sxPxr%csxoW>X^ZHbGl7){0a2!p@&3 zh=gFJ6h9Iu*?bSX+g*vn{sDilOfVCp_6-{AHRhHY5k{x@XA_4o{< z72-ySw5hah5x<-l28^8+I{&o+LUkm{^C^aVYS65=U^D*oJ#dE-LuZbHsCADIToXrT zIj5m6CaGj(p!r$J_-VTk{!=_fts8B8K`oHi%Rf0W_t_~=xx>|~O%z%ClJ6lWF>*~8 z>`)xW5WUbqi6~=E(Pv!Lu$p5F7#%AV7fN z79hC0gy8P(?(Xgo+}#2M5AG5)xI96CkhIQUdf;`+IXJD1vcx-J3s?jDnHf=osR11F<1 zM;xXC-{~R|3Y_dksnS2GQC3&HXa&m{V2fApey}NqM3N9K9bvA2QcDneLlo~-)NI9_ zB|`D?klekniAgD|vunY=S%j-3IZ&Mr`T1U2L%8ku-${8_Xy!|J1{wWaZ?ifwX(yVJ zkVMc!JBvhwSP0p2(r{8pOyLTB#>hdl{Y)PSKOe7O_&vf$LIp*MU_aNjGc*ur=1;sO zTKlZr{w3Ut2r)Hb@JulYiglYb{Gp+QFY+$s08}~vw%msr)3aXh)3Z)sGZ4=a#iabxIhHd#NAIL5K{H<8#Tdeu>Bh07Y9_9+2^xPwz;2`~aHYyuhrt7@MKQ2DvvaUS>z)LW zA#Wjl1B6zUP|17|M`bKI{okn@Pq=?cr=WsFQgztevtwdu%c@+--k~0bMjy?uUgS@) z5nO?PWm3z2$6GR5eJ}#K%%%zhVVDJZ;%hCn3`LH4p|osBk}zwXm!JUe%N7#VV!ut} zj93Z$06ACDUs=*dI$=|%6@mT2HQj5Te4{El2KG=#m7>wH6+hgRuB~<-Lm10#%a5S@*?qY8_2yf2^^UW%=PRG=9nn8Q2@2$!6CGN2}Gom4TeWEJ(8MF97xor z08A1*BGn5%B{jQN_JX$+`ql;SVkLz6Gp9t!Vx}*VC``tb21Ar@k)WNzV*WodqOG=@;n|uvgwi&!h=Z zke2k1t{gRZ$qs4oBx%0S&d#Qps6UFb9RTx$Aq76=5U5Vq&2 zhY-4~6>jq5#Lr-+*o)O7!N34P3*31U`~eK-KPFpTX+YL9D%OUrlu523ykbY)JlUF^ zPAJayX!gxA=BqjTtE2>-Eb!%xZnPUNB$ezU zocaEZCww2h?ZBw7$Fmbq)0il&ykGC5>MN9Z*o2GgWl~bUH1jp)ehOu8<*sZR3<%wF z=g#ng%qSZYhF_ija$*&kf>So%cH>6AE|1;-7tw?mCjzU>mtvwYT+m%7dJ&b^X;xy) z^$03butTS-Vs2UWBUKKn5iF~=ZOQhT`TUD(4c)FE;Shje8j{>=WU{V-Z6bH;sg>`0+@hyUAIUHT@(sJvs zfAIHn#m=pT%zRixWynPrV=^_h?1fdl)4J61< zSAcG$W=!qmgH0>fqDL0VpY-@lX!}kbF!{>d2ItQJIRyi3C9v&PO<%hY#=x*>df1J; zq`7y-Ff#*Yvt`)5l3(3O^VJmrGW6N8(=Mj0&WZQ4$&>q+dyNo{exZ{w_EoC@yjI^vYS6TJ_?+? z$X$YsUz+)dEBYHQO1P1=y5ak6p(anbdDBul%XMLK6TaHA z(Nzpm)UPo!x@LDpbN1jy_zI*68m%-?Cri%wY}VIFm*U?bfFt(d7l{ujYj$kUiS|X` zY#`G|FXKn1jWNCFS)x4-8rNgq_(`9GQJ6n+=HvuP+t+B~`dcIQA4~m5MMK8kW1*kC zZ=*PvdC*X{>w_Z1NTV+#X6vr?-*V|+tMvjyV0oNmnM6|Kfd-PaamG^PE(wu3Kg0ap zHt)Scn3PwD5Sx#8TjY!)90|AJ`Z0Tve`S3kf!PcRgGcg6lrk*aQJbQBll(3$3mxMn zcuRuYELqaxRW_wO9ZWm0GzjpCav}Y7C*TA_0ZJLDg`C>lI-jyf{C*-K)yJ50RV9(3 zs2>Tmw0SVudCfH4V&RkU5{YE=IP zPWJQ|*a|zo4`;&$BK=UkPeMYtSqD<1QuZ^k&~g{%U$-bGJfp;JvjbGvv&jqTUYuct z_?AT(JOt|YBL{y0OVI%KFgK-Y4YQfdaoydug1H*RSeK{024>-FkVA7@7~%@D54k2z;TJVifY!< zk!TRoC?E_Sk>|miW#+s+abBUI`8mAUE2aJ83;VUb$6s?-em0GL5>P32>lc7T2Vj~^ zs&~0#K-4^`V(Z6ggl`~_VaY#Rx9UF#8cGCUAb9_bU3+FAg84HEVAWh|BS)SYWIXj8PU>1&Ad8u+fz zhhIjE#;YRbi+0veG+@IY(CNu9>AD%puBiD=)Cdfa0s@!GX~qq?NR1;T2?pjMO9;>S zcmkjB@H+hRBdGLI%5oeHN)FJ_5kBsyBd7l4)O=W-pd^e`1AnY|7c_%8`~6$KVXtKU zJV%pP6AP@|UnOOUV_Zp$@l$7(tjaL)!wL)pt2`2Yls}&`7+9|nbyWs!e#OE4Y8Xr= z_y7QQ+ui$Cy=GS%dh%j+Dt)w>VnEyHJlpBNVqwqk>_q^U7B@?VT1n4`@ni5UvReEC zl^9(XCtPR5V^Yj^ZK#T74HwYk6E(y<2-^GJ$bJP^Da&g z)&>1$e%C%Y8l_piyS7nA? zw86rwJX&d-V*NuP@c;W#S-Q=oVkh>q!jLD9vok})3{RD#hD+m{l(tIi2VqBvbcv>k ziPOy0u+PyFb}>NwNH_zMQ~+S>=&(&4UuUe)AFJ+ser$kiU*A9g<)24bdhyOnFfn`Z zGlzhJ;itmBT{aMhs;t(>OzbY5y0!J%%$X73;Z-7Qe7l+lXRD;+(ojThFy4F)i(t$;N%{+U%!$L)KMNSn5g(U!Xs4{a17NbRrp z12OyejNABy!P(H06|>Xoq9|r$ABKdFF!whu!v0{6725Rz(v^nqlhmThYGEQ$V!7mm zf=K`B2>s__ znS~oOl*R~W2p^CYYD)KEmME!%ez>=0DCvk5%9kB=Qnqj_@&+NLnv9H}a6Csp$l~GQ z9pH%s1pe3fGb;MW$y>!XnmGlhX}VVAS<9A7K}Xq6EM*T#*Oa|8L4>br3`{3+8X>)o zAW1#`*`ag!yB#pok3Zi>j8xHNj5x%g_mTGYY?#*Sg#;Xas-Yh}E@J%8YW|0|O@FAdILpQ9EydLqTIeBz%DCm8~vz_$tS41L70SBz^25J=Q?rA zT>F>DAf{+k+p=nBp6>c|s3lNd9!mb9w6Au02$mErthc5|2rMGt-?QsdcM>-F(1Yjg z`>INSDmy#r3zWDd#?RG%D;9w3V=Mh|^$T@45Lg}wTdWiI=m$we0EtE_3`B4R>)~K7 zE17p{(y#0U90{oX${O5Pg2ZHt!D|S!z@=zF%7b(x6 zN73^P3`irkfW~S&?%nT7f%x}=!2K-?_J&9GJuGNqiXfW@pf0KbUKqvjts&~O$Q0T; z(7%H)5)de}kVz7=zv4ou;8P|^g!;ys*fe&`fw}<+n^-;lwbe>HO{USOy^k$46p~Ja z&nRQ0l8FXO>VK^8`Pyh|dNd@0JlM&wgDCWib$1@$U!~|Xwj=x>OZ(5mN>~D+3a0;i zcxe8=di3AgWF0~Z_kVw~*DRPu4PcdacTG7p_ia1zoHR9_O%$&9 zPhB4#IwK29$V;2*MaIzLl9NX4GwRy6xs96Dd#y|WZ(?;m_q#9E&&3jBren=1`~|!; zDxA1;%&9%;2kCdz@zNU6ayM7}K1e0-dF-Fp0}60 z*DshcDJj_1s+@W`k(K{#!~VyVDSNsmr{HdmHK*CHw4sv1fw1@XU!gGjZv%C3$`ic6M@9l8J>BFLxMu7;!&DLzI+O1Yb6Cu;a2wkY4)NUpKD{5*&g0 z>QT=_f>1rJC~<%5s)vVq)|}POsKN>U^sJ=YqWiTikGmrI?lWS%1(LbAERWZ|`!!nH zv0@sj2`0aoHg)3u ze4fptrIOxlC8`mx?Y>;2)18yK#qDUK|J~~-$X4sjjZL@xBU)&Zc+;Z#Iiz#-RBJnQ zNrV3KwsrI+xG((fyu50y-}zdd30v&(ZsvC3?m3ZSl3!0J-T&l5ue8ta4pQ>0+nHH% zIP}>K5?S=jsh(Oj14{pggMw z3K%-&ar3aBFS&x(Zdy5QaVh2|jD!VUIn~D!{!>;;#sGogE@$Smnzhf%j!rr!teA=a zwHs^u-depkf`Jq6SoSG>RI4Z~#}-pnS0!3$@7s3cU#hF|KMvl0-!4=#^HB3I6Kv@-8cqD zPET2=>d%PbFd)96`FT5THw(u0DOOVlkCU5>YgRo$P|vp8>H6$H1#Ngxyy@=R%Ngr_ zmWk)x#Gs?i>B9I>+zENlGT-_~)l;9T=iQ~~MEZB2nOP+{!X!wA9=MB#VSprg?$0qt>6CsFs-tgSKY!ipO(_goxnK9Z9!5Oi zA;pY&85P%u6WLTA!T|PIV2K1L`faa>@ur<{(zzY~IF9h&d!nz;Z-B-}#xbCXDTczg z3cmhK^<5xPF zp#BTwp^vX#^+|B7n?+MG=$Z= z{<xno++V=M3gwNIE(tVe^(TfGl zro;(`Kh(;_2bQ#9!B;a1u*x-S7LkKSP%O@4iUbSjEaL=1-zLVopsGeZCp)|I)5(A{ zZp~-j2JuB8_X`N=>6&7OuW}F2;Ix`G z6gW|78GVgZeoyGe@HS$S_}QAb!|iuzIk&bxyZN{|Rr{s-dVzs$y-OlNTv76Ez>U0v zd#KNQ%_m{pe+|wpIc|ml9ZR7qsNPyRkLI-BA6u8dUhNZ;L@?-fdX|-a0x`*>0?NCU zvUj%j>f7GDxTq#ZHjOIF$-uD+0v%`pb-FFzK{G?$Z5BJ zN=*2&R#wEWn$_{zvs=MAsoT+p-WU*AFkQdSQS71fcI|)9Nrknz3h}>`y5{~7_j!#| z9W~yxxU=Z^s!=uEs(qyZJcfQS)iaf03N~Ke+qn@L{5b0`3|cP5g?O66&op@$T8prkW<_`C3&~ zJ13A23#s6f{)U#bv=X9zUPR*DvYWq)&y0~~z5^y18z0N-vwYXsn8b_zYiW0}#Fd}= z`I3DN2=t!@um;Ro>t5FmvhyWFav|2-`~7&~eF?xtOMTr=qR@=btIwDD-6RxZ*Sto6 zPgIPX3C4soY+icJ>VCXFUlx4;Ox8V*H|>boIRb!Josa(A4N?02P;Xcq-37FBjB1t< zXsVdV$ZxlwI#PG{y!tA{#cI4cFOpN8>!(k0Gcin#-X23SOpfU0@Ab6Oztga4rYo2r znwZ-KlPBo0b>LUp_lN1Gz?T;QCs5$evzrG%Q}*ldSmL#9Coa4&ffn~6bN!sW>`ce@ z?Bk!<)Y!3looh-u$=$}#?>Vw|sfm5$ z)7}Bh5vPHs;zX|fkY(GjL|4pPl)0#T?RCS zo=Qj-UB1nSPp3g0C&AwC)n?-yeN5qZr#ubzi`jmAa&qGyoiMc$9OKi5;{bPo6)u;< z^|M>%vL*WJ`mDkA0X7iYakBgN-Jiq*(159ne`~ssrg`<6m2qxfx@~j8C%-FGIkoSG zdJ}*Dg-jCbC?IK|mzOjG&fM$8&Cu_x;H6!OZ}QPOHOw&GbJJJY?(X7m`r@%XQsID& z)=9qt*UZwYxZ+&(;L{xTWQR>AC6YLqy8TM)`h0t*-EkLx3@U?T4H!NA1@Lx>jH?Xh zmX5;LRqa^Htnw%@zAvKszRtrYTd|*)+n;v-41{{V?bY~a`edYJB%~$n-on2NRM;cD zjNbk?Xv-c0F=Heis-qNp3>Ga?X4QLCv>J4Z7{F>iZ&>e`)oUz+J|y^FtF`Lms6{_+ ztmPoypUvqnSEgRg^a5ZaVLEbOZ$BLAKSvWTZ#^rPe2!v7xi#rh)RHn1?xMRNB(F@% zhji7H*tqzPZXccNalanoQkGTh7c3I<0U*atZsDrPS2FQ@?YLuVPDd;IZKBNBnliNC zC^D-V0`3)qk1nA^5KUP3cjCc}$>H<+Wp&+qs7r?u;@~vnU3%(g>1fDB=vxOPmiV7# zZmz${UP(wQ3R@io*Ir6WrlxhD)L3bmtDocQ>;D~=db`BvZk`Ejrl(V{UQab`>}arv zY*R%%`qShIZv57+ zuV=j(Z2JT03*0mzyE!6{a^g#sxNMQI^0R+=jUTrf9+4@vL;-EMa7hFa;^EKd?_DF> za!9+m?VV`LA&M!!3obgsuM+2%^d5RAnRr~@oG!x$5=u<=&~a!b#O4>|LPqEV3_CAt zu;C|AK%ZSHjgy53G^6ChaUg)2`F-Dsqn`=)jXcQq*8_%7ag!|ix)UIivM#PCzaybLR9f9Gcm zFD(c!r+RWOSDpPhU_8{KJuelU8E~S9Z+;b%L={qGc*N=}mB}D&o$}t7l=xD|o~+Mn zbZ26I?PI}-wqzz%1&gW9)0~h?^SRRBUMlJ-4P%%0Q?_EY}CpMkB=l>N9N$)+kz@hylXx8p-hth}Ce z*R~_?I=`z8e|m^yB6c+O7**2zRkAnLP@Nm(WLH_0oq+-`vq>ZMDfLU#@+xHAue-gr zVDFSOt}-6wf&yF+LLwYO7z;nMZXza0@vUO1&IOH+|CHbV(yM&vx`?==(}}hxi6#mk zU0jCGzL_DV9XGPS%^Q)0RVf<92?m5eS6GWDt9(b9=UZM5XcRCZWo6aJO>g_E6cmzC zCwKj~e>;FtXd4g^nbv@=3{(J!{NSZ|;Tur(zaOi&xMG~B+1dMLp|lwR=$);cJj9+A z`2+mpTM>mMeNGrMX=Zx*>mGDIs&1?MVF{703f2OACqM{ziw6J6J4&hsyln#}8|JLE zvp9<3JRd(CVQ9@8bf_X9fKe*4&rd6+S>tW)i%q)?;&|&Vu8Z4lc1tPRciHuEYV0~5G0QSbny>fR%lJ*(A(<#BPJ!PoZ;69nKA zV!yZXkt?Mrig$rbxk>YXuCoV>P~iF`IRhKFfT-aH$`)q0#_#GfTH=&h^6n4VtD{*h ziyJRWC?co;N#vronZ87UB4|N;LW+j)^yqMMd|XCeI)flZx5Z~0T6_#pA|>lQj=$Qh zutr(_C}_-f5ugs$s#Q>8#t7Sv&t>czxcC%y7Fi#sMpbFka>mElQs5u^a>22iFlXI( z=mlS7U8@7}BpPUg`>cvH&Zdmsm?-YRZl#SNdI&&0Ss58k8*#dHCFVZr+Z-D0T z`FZH?ZD&9*5W}8{(ACscRZ`O`<@L`jYwSH-_*nVB(vkCj8nez^aCx~auHb|L5sngZ zx0Dt%S?Q%Ox}66u?yq+i`SI`I$;-$A(KXNs-IGA(!RPfE!)sPq8Hh)DKU?d>7ndLP zY)Fvc(S8}x*LuW+bF}c_!;jG(Gvm({M_TQIOs~u!fDAsF-N%Nl@)C^2h3E5kzK|PZ zL5`a3%IS3<_N(#qYWxi>u#IhWP1=Z2#04I=#;EnPVE~+FO1RctQCR6=7d%{+fw=0M z@u>_#P@bld>VIgF$-yJQ&CRde*czlMAu)p-O8%~ooNy=w`y)lLC(`sw_1fJta4*-2Xi& z|4Vsc$Ie`&sARE_xsz2%XHi8%i$R9+T^ekNgN4V+G(TYrjvQR(X`_ka^D)@B!kHBVN{G8Jiw{!Wovn4{M-`AuDZOnth=2ib zBad*6w;frQ@pco}4FggDfM444YrFl}u@4h=mtO@tBj@o!0Y4p2JmnM#A^xXsrx!O( z$<0*R-K1Gm`lFFL7Cs&hW^Uh;sWne{;fn8kxm@m>Rsb!rHcZ}sm1;>UGRxZ4^}4y$ z%a^;YqB?*SN;drlmG6EBil@j(-FiF>KX22C<0li7e7d`8Y*4v}*c0g`B#gn3vT?IJ z??LDuJ(qx}@y5W4nIr_G?fJFu?a3bm!pmG+(s1AU``Y?mMYY|%8?uk#cd<4+e`zzr zc3M%NR$|sc?DMvRSzIRx7V*VEq%Q5WCtMX&p9Y&gc0(8Afb)&Q|0)-{p^u2yY>q!J z4_R>u(IJTEIX;I)8u+3}9zUvcUk@$Ur~ACCQkP1l_3Re%d>-t+YoQ2(-)`^oe6Jok z&pY4K(n-O9aOyqT*|T=zlX4z`jWaU^Xs5?^`7Y?zn3xb)n|2fgGD{jZ;$YOEwdcqf z=j)a$KiFsHU@o)L(!EV>W!+ZKZ33Qh$E5GcQ5-;e~)>Q{vm&nXC$x_jlsvd+~e$Vn}&JkQwOdNck= z1?oxV?s6G#hJ=X^-k+}N#b?#a0p=J~SJ*;$_u`EYus3blu|VKu&!F=NPywx4JvTqx z4J_E`=!&YnMxVw2^D86g$&M5qeU+F53Nr=xd^ek#uU}||z`5N%FHhsV26D&@p!}*L zYagSwHmli^tC_}e;8Qd}e9uJ|p`<&GbDv!%BLdE{Z~hWh*Iw&4?hH&onYF7Rp%y!i zSCG^DaQA6*`f@sQxFdTkyAn9q)W>va`NfnnG>dK&G`L9oarWLYm)n%b%Ww8XuX0RTqaEIS>GHW-kE6-=Ol z$MUf_(&>s`^GFkRmym;jLBCaFGWH&0tqx!!3a2$#J_CMb)&-zhb}oZm*|jBtERGMX z+T2h<4i7zNJDf%qCiGx5fKWXBr`H_T3TjjCiP3fkAT+?T+>Jf{1HwSD!;>QdqEG9s zop!()fo@mwd953_@KRA%{x`4M8K2~r_8Gxww>P)#I(H*cMj()wqFvhYMpq8Nt5`%G z9TISPcO8z_cErfjVCxZSjMZ)^L!^K62_WXMK9 zNL?%HJt`gDn{Y5s@D-2`1Q10dNI02?$2&}X2w-bEty)1qoGC9L$a{JVO#fvk_#C)D z3^>6cX6ZDNm64FCk|~ks!gT^n0{vw556jr!fBokAxz7)?tmoa&-<^yI(Lxz@Evb;V z`V(HPEr7mh-vE+gctT3h(WRT7F`l>_LZ)ZmrLA9YTIB?Tt$OOo#l*(rw{@}B3tibv zh}_Mlly*Aql*=q};Z7ux%oo3i{LJV8P{xe_3hLJDHa8vkf2=A|pc zm+&y8|F%i&cl-PwD==)u8!`o00chZq{>vd^1+hB5NRRss26`UopXICH{^^Ah^^w#>@KFq=g?)zW5n2J|RsbWD_HkezLj_2lFI7s{FjNz_c|Y4XH53qy>gooj-HGH8U?3obR-s-_U6ClQYm!Q{v(fm|tBa zj`}vE#18^#s#UW$(?(&Ui%Umc0sJmc1H2-(sn=jdS>}L^+;>KzwCjUE;-f6ss_LD< zROGyUYdx~Jxj=x?u(vSLQN)flzILi}a$XpF)-5e=+y)5GsH?55Ug!=L1b^;q4mP+%5g?igFYo5iuu?cXzUW3ONA+Uk*)jXl(iuxopQAz-z4ur>AV(1$Q z$~^?)7zbv7ZU;Yk1f3_IdrtB65b@<;$3(m4twKu;3 z#9~5DhUeA8pMgZ!AX0uuk_LZyV>2&&P>;#haXBY7!OpJPqKfSi0bcIq)0ML`2vqA) zSYN@ID**`D8gP5lQEHav3mHWYtV=zRI1zk(dF79B)};l3C?w6yO#!fTz?g&X4baSh z`bU<1@@8SQh zZhYqtaF=$^!67t6yd7!#E!3f)AAyWu=pOW=rlQvqf_0Ofman>2hyvibx>nVa^V(gd zcUh!=P)c?!%`B`!x+`kCm;`YFjIRPj{#VSKZtZvHOMYitjnkLIU$aL36fI{+unpGQ zSbLk#=sScesGi=fdcPVRgr9d9Zau=Q;(ZPxA#{6A5WmI>D`{R-zrV$ZiIt5={00uu z<8lBz8q#J?`)7d3Q_~ulx`*ZRfAsRxn*jWV$0yAhJY!*O@xj4U4(~c|IAP%K{{8(4 zEm+O3soUarFPm}t7%@MNL_mWrrS>F*k(hAPKJ8x*X&S8XM0)6f>`C|Yj&#KUG}wLJ z05|f~Gq-WstjIKO=u?zWb8!^KaAb&vO1yO9G36^;23N&+;m{;eGaVR>ay^lPn+VIP zZuU!*OFQAK5Pci_1S-3`B9F8@?4ib3L=j+au$8&An6y=|q#QR({@N#IWz$khAuj*y6r!c2d09 z2k0MwufcT2j~=k$$lBjCqJU!-er%^0J!6bRSW|YgQ}{j}Glu)ReV5EhAHFJ&cx*IWM7@`i8mN zHfq*1H#v@J6m)i_MPAaylf)hC^GUg>b$gsg0I)U|9WSQid`1+%Vb=};hy3nnpc&Xc zJ36y&zc;8Nt8JZP(^b<-2LI%>{p|Y^>`b{x5omA#c%W6^cf9yk1UObe8$gW(v-)va zO(m6iqdj+<^7rC}_h;_r<|U@4W|ebHK$)c9oE&rgMweaTqA8}7ZqG^+*z}`gG0mhw zU@HLrd^#)xSOv(LD*#*C`wtm$qYAH_>SBpjybBJ$o6E1C4L(sr{zkEI1C1AG$Mo6E zb4x73`vev%x5*U7hi!Skio zswOX1kj8Ao+)ab+tv2B3yIgknW~>+K0ueR=##IoUnR&O?xtPKCI$f%-+c|xegt6vX z*P)ZRL(r_9RlT}gSLRdekxLAUq;skEC5Z~HRx(13tMA^fD{I>XEaeVl8%i{)KsNrH&rmb z2}r%aTA1QW4(71kt+n_2`{z!yH<3bqx6s6CBAQMnSg5EaV>eqzV()&cnIuLtH#b$n z-gV=}LUuH=(pqOH!Hf}!KtXj{kY5;_Ntfx!<+rHU>h5%D^mq7YS6}U%1=59|%D-D6 z2%nwBtrE`3y!&oc3x(Q*6{yO-1F0l`B<5`xGOI5yU%q$v_4BuZFA7Xuq0G3)hhDSU zO85LeL%|k%b1}<=RkQZ_837LFUB~u?EZKlF)O17U+GurgZ{I)#uEeyYjAVgm)NAxp zNJeg|)27t*#oup^hOb)4Fl{IsZoC8p<@;iqgrlEV+Ks(M$A2ukUrIRxzRm3WQRY9m z16m>dw+lcLkIF|VQ`=oX{)eglZ9jzh$AJXz3NeTyi}JB*a(rT?yR?>svHkIG6_NkN zcw4!G9InCD1>iDPO|}RKteomnKPr@E?qR@TA(I|{wx~^SUbJ5Y5SNEb;o)*s!Dem8 z%1KHS@|0?T_vh6{ZB9UO=yRGGN26$@_=YlZ@8QwJi&(or<5N_#Ct|}A(o3C11$7-Q zRYeuMcH(ave=|{GsIa~g6TmH>x}gmrBItXd zg!&LonsBh*y8_x&7edME#c9HlF`clv-m!db)C>v#J~*M<*6OAz zwFTiMJiI|bc!vyc{{me;z0(Vw5lKuMC6J+F^|XYRy#Mqa?U-1Bzua?cL#XFDF^xC+ zh(McRXU&LYFwI*q=X{&Jm0yolz;WYm*-5?nAwwgVFduz|ldEfNEQ-5PZ6QvWmnTZ} zXoaiSJ2`M&9c@7mIX#6%6D-WO*6OA)cd3&TGnzQk2P$PrvG;MKky>~oX7#vX;U1A< zyKzMF2QFN~$trVgZCv4DaJ5Tv9}NHK85xSX=BSnSAs05(EzHkz(^DMqAAaTvbrbQY zf8H9-W0;LmMM^3wl$tPN!3{&a_E`BHFJ#X+iSN0cZbnEQaV74=kcNei{g8Wu4!so# zdhzWTaD6|1wD}>+mZQB}&X6O^ekJvc*Q2nkIEP~RXz2Z;QbtOh-Sa{42FcrS-jt70l&xKf98> zorr!nFOQ>nT74>k zp5pO$L*36pEegy^?Hs9dkL=WQAL8L4&rTFmb@}dXZBaTzmB*Q_{hUs^L`pU&vn|4zltD4fXQ)J{jGhMLfSC9W9lU^JuU3s$3S%xU=_pwCkdy zzO=UW>+!=#i)CvEy!6_gV6aDm5aW7YB!|`!$CyVa+e#UX$|jD#7@amtQ4Bq~8+N z1_H{98_9+045X@HsFDsT@6b?@eb$y{m)E$R_irJWo*jQ9z*NDCn#%VO?UfgdB^!?0 zi@%%3f}i<4{d6k&9QgEUi;6()ZZ^3x6o6oDT8_F464EhLh#)B@%kr~5wVYW7nEXk` zBt=6I-Pr7+p)IU$Ph*JWp_8a+EYy0vKS#$Mp+)LV_T{+Wm}bqqL=K4`5J^>Cz-Q+3 z)I3Rc=jN_Wqa4(bP|{N2Kn?Z_)f*J_p*Mdkz`98Tti;O5A;C7Fp1sJO;)eN$&AVM`r~a>bvxg@6x(J~m?w_~Ua$GvHC$~>uT}F4U-RolWifuS*H5$zK)t*QXZ|#ANW308f zFV!a0ggkz}K`2ToTv^f4sLVr!^Qkp|zSW)y7MIulf35}8h&z8;eBfqcxYe$4=nAn)Mh zh~0=wOi@s`Md0!7Z^YDlYxT3W+Q+-j!b&XjL!0Km7gML&oO11UO-y{>3RLDf*~mj? zlu{>jywl?$r(99o1J_?}attH-6Qlf_C`2 zkd}c8rKA=@^sr5;dNF&ZcpTK6f&5tl$(f_Gy%FErWTwfXI^bPKz{;X6+aldeEFklIjmM zzJaNVKFnGSdVd_5u60bLxGt{qDf{79+}-bfT0SRBY?7?Dpx5D*MIjjy-#>ZZcUvJ+ zN(V=x$>i^-@=a1q*3t7e^9FnU@vUHC+27X0q_SB)(vT}`bH8S18c~y8ZEWlByyY-n z7{L#&Ff1;){vie@$35IMb#S5x%93l^9Z$W-HWoIF2=NbdzF`(aq5{6(>b) zQ9u}}R-;lZVKd`cy}h*+C`Lm_?XVKE*n!?6=t0g0PrgVP24s(@wKh}DBfG^crW)vv_7cAq2xvI__O zPL>P?utAeXgI3Ox&Gg~zu(WZr{nCSwTG#o8+?2$wd>ju+#C0wmy< zk@@o`o!z}#Cx-VgQz`qsTH5zlzy3V#X>V(*l+ny=^U1uyQpeqYNEhSxmGaud`OM7@ zdzEXzgr{dTxhuU*gsr~Tp?oo%8iW3;_?~XFW!R$MldoE>LJZAJ+?`QOQI|i;{@!$& zapSTD(BF#LR8+Vi;N;h=n6{^mos?4V^873Xx&wE1F)}lNL)=lLK8VytucwHxi`sBN z{{9*?tG8^x@)58I_YNT z3jJ%zWjDx_UAHvYsO6RIf+~cbMUIW6ACn4n*uC3l^!I<#u~R+83IPZIk&ukaqc6DS zK?qJr#)B-ci3w8EOGh}4MKbxxz5x93e-EgCz}){l1{t6S3jOaVOyNQ#|NBY0D9QiS zg$IGgNWnl6?*ATpU?ku^!v7x5NN_+V`oBg1HykXGA^ES-LkJuAKac-^eoaYAp_w<3 gRr&AX5byuaVQ7;IXE^!Oe_oOlllxXJVi5Gd0Dg!mr~m)} diff --git a/experiments/llm-bitflip/lora_finetune/eval-bitflip-no-finetune.sh b/experiments/llm-bitflip/lora_finetune/eval-bitflip-no-finetune.sh new file mode 100644 index 0000000..cce79fc --- /dev/null +++ b/experiments/llm-bitflip/lora_finetune/eval-bitflip-no-finetune.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +# Evaluation-only run on train/validation splits with bitflip LoRA transform (no trainable params). +# Usage: ./eval-bitflip-clm.sh [num_processes] [model_name_or_path] [per_device_batch_size] [block_size] [eval_max_steps] + +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd) +RUN_SCRIPT="${SCRIPT_DIR}/run_clm_no_trainer.py" +TRANSFORM_CFG="${SCRIPT_DIR}/transform_cfg.toml" + +NUM_PROCESSES=${1:-8} +MODEL_NAME_OR_PATH=${2:-"unsloth/Llama-3.1-8B"} +PER_DEVICE_BATCH_SIZE=${3:-1} +BLOCK_SIZE=${4:-2048} +EVAL_MAX_STEPS=${5:-64} + +OUTPUT_DIR="${SCRIPT_DIR}/output/$(basename ${MODEL_NAME_OR_PATH})-bitflip-lora-eval" +WANDB_TAGS="${MODEL_NAME_OR_PATH},bitflip,eval" + +echo "============================================" +echo "Evaluation Only (Bitflip LoRA):" +echo "============================================" +echo "Model: ${MODEL_NAME_OR_PATH}" +echo "Number of Processes: ${NUM_PROCESSES}" +echo "Per Device Batch Size: ${PER_DEVICE_BATCH_SIZE}" +echo "Block Size: ${BLOCK_SIZE}" +if [ "${EVAL_MAX_STEPS}" -gt 0 ]; then + echo "Eval Max Steps per split: ${EVAL_MAX_STEPS}" +else + echo "Eval Max Steps per split: full dataset" +fi +echo "Output Directory: ${OUTPUT_DIR}" +echo "Wandb Tags: ${WANDB_TAGS}" +echo "============================================" + +uv run accelerate launch --num_processes=${NUM_PROCESSES} \ + "${RUN_SCRIPT}" \ + --model_name_or_path ${MODEL_NAME_OR_PATH} \ + --dataset_name Cheng98/fineweb-edu-1.25B \ + --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \ + --per_device_eval_batch_size ${PER_DEVICE_BATCH_SIZE} \ + --num_train_epochs 1 \ + --gradient_accumulation_steps 1 \ + --lr_scheduler_type linear \ + --output_dir ${OUTPUT_DIR} \ + --preprocessing_num_workers 32 \ + --trust_remote_code \ + --with_tracking \ + --report_to wandb \ + --transform_cfg "${TRANSFORM_CFG}" \ + --block_size ${BLOCK_SIZE} \ + --eval_only \ + --eval_max_steps ${EVAL_MAX_STEPS} \ + --wandb_tags ${WANDB_TAGS} diff --git a/experiments/llm-bitflip/lora_finetune/eval-no-biflip-no-finetune.sh b/experiments/llm-bitflip/lora_finetune/eval-no-biflip-no-finetune.sh new file mode 100644 index 0000000..a43dea5 --- /dev/null +++ b/experiments/llm-bitflip/lora_finetune/eval-no-biflip-no-finetune.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +# Evaluation-only run on train/validation splits with baseline LoRA (no bitflip, no trainable params). +# Usage: ./eval-lora-baseline.sh [num_processes] [model_name_or_path] [per_device_batch_size] [block_size] [eval_max_steps] + +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd) +RUN_SCRIPT="${SCRIPT_DIR}/run_clm_no_trainer.py" +TRANSFORM_CFG="${SCRIPT_DIR}/transform_cfg_baseline.toml" + +NUM_PROCESSES=${1:-8} +MODEL_NAME_OR_PATH=${2:-"unsloth/Llama-3.1-8B"} +PER_DEVICE_BATCH_SIZE=${3:-1} +BLOCK_SIZE=${4:-2048} +EVAL_MAX_STEPS=${5:-64} + +OUTPUT_DIR="${SCRIPT_DIR}/output/$(basename ${MODEL_NAME_OR_PATH})-lora-baseline-eval" +WANDB_TAGS="${MODEL_NAME_OR_PATH},baseline,eval" + +echo "============================================" +echo "Evaluation Only (LoRA Baseline, No Bitflip):" +echo "============================================" +echo "Model: ${MODEL_NAME_OR_PATH}" +echo "Number of Processes: ${NUM_PROCESSES}" +echo "Per Device Batch Size: ${PER_DEVICE_BATCH_SIZE}" +echo "Block Size: ${BLOCK_SIZE}" +if [ "${EVAL_MAX_STEPS}" -gt 0 ]; then + echo "Eval Max Steps per split: ${EVAL_MAX_STEPS}" +else + echo "Eval Max Steps per split: full dataset" +fi +echo "Output Directory: ${OUTPUT_DIR}" +echo "Wandb Tags: ${WANDB_TAGS}" +echo "============================================" + +uv run accelerate launch --num_processes=${NUM_PROCESSES} \ + "${RUN_SCRIPT}" \ + --model_name_or_path ${MODEL_NAME_OR_PATH} \ + --dataset_name Cheng98/fineweb-edu-1.25B \ + --per_device_train_batch_size ${PER_DEVICE_BATCH_SIZE} \ + --per_device_eval_batch_size ${PER_DEVICE_BATCH_SIZE} \ + --num_train_epochs 1 \ + --gradient_accumulation_steps 1 \ + --lr_scheduler_type linear \ + --output_dir ${OUTPUT_DIR} \ + --preprocessing_num_workers 32 \ + --trust_remote_code \ + --with_tracking \ + --report_to wandb \ + --transform_cfg "${TRANSFORM_CFG}" \ + --block_size ${BLOCK_SIZE} \ + --eval_only \ + --eval_max_steps ${EVAL_MAX_STEPS} \ + --wandb_tags ${WANDB_TAGS} diff --git a/experiments/llm-bitflip/lora_finetune/fine-tune-lora-baseline.sh b/experiments/llm-bitflip/lora_finetune/fine-tune-lora-baseline.sh deleted file mode 100755 index 9ca3670..0000000 --- a/experiments/llm-bitflip/lora_finetune/fine-tune-lora-baseline.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash - -# Baseline LoRA fine-tuning script WITHOUT bitflip (for comparison) -# Hyperparameters match fine-tune-bitflip-clm.sh exactly -# Usage: ./fine-tune-lora-baseline.sh [num_processes] [model_name_or_path] [per_device_train_batch_size] [learning_rate] [weight_decay] [gradient_accumulation_steps] [block_size] - -# Default parameters (matching fine-tune-bitflip-clm.sh) -NUM_PROCESSES=${1:-8} -MODEL_NAME_OR_PATH=${2:-"unsloth/Llama-3.1-8B"} -PER_DEVICE_TRAIN_BATCH_SIZE=${3:-1} -LEARNING_RATE=${4:-"1e-5"} -WEIGHT_DECAY=${5:-"0.01"} -GRADIENT_ACCUMULATION_STEPS=${6:-2} -BLOCK_SIZE=${7:-2048} - -# Function to get model parameters count -get_model_params() { - case "$1" in - "AICrossSim/clm-60m") - echo "60000000" - ;; - "AICrossSim/clm-200m") - echo "200000000" - ;; - "AICrossSim/clm-400m") - echo "400000000" - ;; - "AICrossSim/clm-600m") - echo "600000000" - ;; - "AICrossSim/clm-1.1b") - echo "1100000000" - ;; - "unsloth/Llama-3.1-8B") - echo "8000000000" - ;; - *) - echo "Unknown model: $1" >&2 - exit 1 - ;; - esac -} - -# Calculate derived parameters -N_PARAMS=$(get_model_params "$MODEL_NAME_OR_PATH") -N_FINE_TUNE_TOKENS=$((1 * N_PARAMS / 100)) -N_SAMPLES_PER_STEP=$((NUM_PROCESSES * PER_DEVICE_TRAIN_BATCH_SIZE)) -N_TOKENS_PER_STEP=$((N_SAMPLES_PER_STEP * BLOCK_SIZE)) - -# Calculate max_train_steps using ceiling division: (a + b - 1) / b -MAX_TRAIN_STEPS=$(((N_FINE_TUNE_TOKENS + N_TOKENS_PER_STEP - 1) / N_TOKENS_PER_STEP)) - -echo "Calculated max_train_steps: ${MAX_TRAIN_STEPS}" - - -# Generate output directory name -OUTPUT_DIR="./output/$(basename ${MODEL_NAME_OR_PATH})-lora-baseline" - -# Generate wandb tags -WANDB_TAGS="${MODEL_NAME_OR_PATH},lr${LEARNING_RATE},steps${MAX_TRAIN_STEPS},baseline" - -echo "============================================" -echo "Baseline LoRA Fine-tuning (NO bitflip):" -echo "============================================" -echo "Model: ${MODEL_NAME_OR_PATH}" -echo "Model Parameters: ${N_PARAMS}" -echo "Number of Processes: ${NUM_PROCESSES}" -echo "Per Device Train Batch Size: ${PER_DEVICE_TRAIN_BATCH_SIZE}" -echo "Learning Rate: ${LEARNING_RATE}" -echo "Weight Decay: ${WEIGHT_DECAY}" -echo "Gradient Accumulation Steps: ${GRADIENT_ACCUMULATION_STEPS}" -echo "Block Size: ${BLOCK_SIZE}" -echo "" -echo "Calculated Parameters:" -echo "Fine-tune Tokens: ${N_FINE_TUNE_TOKENS}" -echo "Samples per Step: ${N_SAMPLES_PER_STEP}" -echo "Tokens per Step: ${N_TOKENS_PER_STEP}" -echo "Max Train Steps: ${MAX_TRAIN_STEPS}" -echo "Output Directory: ${OUTPUT_DIR}" -echo "Wandb Tags: ${WANDB_TAGS}" -echo "============================================" - -# Run the training (same script, baseline config with no bitflip) -uv run accelerate launch --num_processes=${NUM_PROCESSES} \ - run_clm_no_trainer.py \ - --model_name_or_path ${MODEL_NAME_OR_PATH} \ - --dataset_name Cheng98/fineweb-edu-1.25B \ - --per_device_train_batch_size ${PER_DEVICE_TRAIN_BATCH_SIZE} \ - --per_device_eval_batch_size ${PER_DEVICE_TRAIN_BATCH_SIZE} \ - --learning_rate ${LEARNING_RATE} \ - --weight_decay ${WEIGHT_DECAY} \ - --num_train_epochs 1 \ - --gradient_accumulation_steps ${GRADIENT_ACCUMULATION_STEPS} \ - --lr_scheduler_type linear \ - --output_dir ${OUTPUT_DIR} \ - --preprocessing_num_workers 32 \ - --trust_remote_code \ - --with_tracking \ - --report_to wandb \ - --transform_cfg ./transform_cfg_baseline.toml \ - --block_size ${BLOCK_SIZE} \ - --log_train_loss_steps 50 \ - --max_train_steps ${MAX_TRAIN_STEPS} \ - --wandb_tags ${WANDB_TAGS} diff --git a/experiments/llm-bitflip/lora_finetune/plot_train_loss.py b/experiments/llm-bitflip/lora_finetune/plot_train_loss.py new file mode 100644 index 0000000..8cacaaf --- /dev/null +++ b/experiments/llm-bitflip/lora_finetune/plot_train_loss.py @@ -0,0 +1,48 @@ +import argparse +from pathlib import Path +from typing import Optional + +import matplotlib.pyplot as plt +import pandas as pd + + +def plot_train_loss(csv_path: Path, output_path: Optional[Path] = None) -> None: + """Plot train loss vs step and save the figure.""" + df = pd.read_csv(csv_path) + + step_col = "Step" + loss_col = "Llama-3.1-8B-bitflip-lora-r32 - train_loss" + baseline_loss = 2.06842 + + if step_col not in df.columns: + raise ValueError(f"Missing column: {step_col}") + if loss_col not in df.columns: + raise ValueError(f"Missing column: {loss_col}") + + fig, ax = plt.subplots(figsize=(8, 4.5)) + ax.plot(df[step_col], df[loss_col], label="bitflip r32", color="tab:blue", linewidth=1.8) + ax.axhline(baseline_loss, color="tab:red", linestyle="--", linewidth=1.2, + label="original") + + ax.set_xlabel("Train step") + ax.set_ylabel("Train loss") + ax.set_title("Llama-3.1-8B bitflip lora Fine-tuning") + ax.grid(True, linestyle=":", linewidth=0.6, alpha=0.7) + ax.legend() + fig.tight_layout() + + if output_path is None: + output_path = csv_path.with_suffix(".png") + output_path.parent.mkdir(parents=True, exist_ok=True) + + fig.savefig(output_path, dpi=200) + print(f"Saved plot to {output_path}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Plot train loss vs step from a W&B CSV export.") + parser.add_argument("csv", type=Path, help="Path to the W&B CSV export") + parser.add_argument("--output", "-o", type=Path, default=None, help="Path to save the plot (PNG)") + args = parser.parse_args() + + plot_train_loss(args.csv, args.output) diff --git a/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py b/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py index 4595dfe..60e7929 100644 --- a/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py +++ b/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py @@ -319,8 +319,22 @@ def parse_args(): default=None, help="A comma-separated list of tags to apply to the W&B run.", ) + parser.add_argument( + "--eval_only", + action="store_true", + help="Run evaluation on train/validation splits without any training or trainable parameters.", + ) + parser.add_argument( + "--eval_max_steps", + type=int, + default=64, + help="Maximum number of evaluation batches per split. Use -1 to evaluate the full split.", + ) args = parser.parse_args() + if args.eval_max_steps is not None and args.eval_max_steps < 0: + args.eval_max_steps = None + # Sanity checks if ( args.dataset_name is None @@ -644,7 +658,7 @@ def group_texts(examples): # DataLoaders creation: train_dataloader = DataLoader( train_dataset, - shuffle=True, + shuffle=not args.eval_only, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size, ) @@ -654,6 +668,86 @@ def group_texts(examples): batch_size=args.per_device_eval_batch_size, ) + if args.eval_only: + # Ensure nothing is marked trainable and skip optimizer/scheduler setup. + for p in model.parameters(): + p.requires_grad = False + + model, train_dataloader, eval_dataloader = accelerator.prepare( + model, train_dataloader, eval_dataloader + ) + + if args.with_tracking: + experiment_config = vars(args) + experiment_config["lr_scheduler_type"] = experiment_config[ + "lr_scheduler_type" + ].value + experiment_config["bitflip_config"] = { + "use_lora": use_lora, + "fc_cfg": fc_cfg, + } + accelerator.init_trackers( + "Bitflip-CLM-Eval", + experiment_config, + init_kwargs={ + "wandb": { + "name": args.output_dir.split("/")[-1], + "tags": args.wandb_tags if args.wandb_tags is not None else [], + }, + }, + ) + + def evaluate_split(split_name: str, dataloader: DataLoader): + model.eval() + losses = [] + for step, batch in enumerate(dataloader): + with torch.no_grad(): + outputs = model(**batch) + + loss = outputs.loss + losses.append( + accelerator.gather_for_metrics( + loss.repeat(args.per_device_eval_batch_size) + ) + ) + + if args.eval_max_steps is not None and args.eval_max_steps > 0: + if (step + 1) >= args.eval_max_steps: + break + + if len(losses) == 0: + logger.warning(f"No batches processed for {split_name} split during evaluation.") + return None + + losses = torch.cat(losses) + try: + eval_loss = torch.mean(losses) + perplexity = math.exp(eval_loss) + except OverflowError: + eval_loss = torch.mean(losses) + perplexity = float("inf") + + logger.info( + f"{split_name} perplexity: {perplexity} loss: {eval_loss} (steps={(len(losses) // args.per_device_eval_batch_size)})" + ) + + if args.with_tracking: + accelerator.log( + { + f"{split_name}_perplexity": perplexity, + f"{split_name}_loss": eval_loss, + }, + step=0, + ) + + return eval_loss, perplexity + + evaluate_split("train", train_dataloader) + evaluate_split("validation", eval_dataloader) + accelerator.wait_for_everyone() + accelerator.end_training() + return + # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "layer_norm.weight"] @@ -887,8 +981,9 @@ def group_texts(examples): loss.repeat(args.per_device_eval_batch_size) ) ) - if step > 64: - break + if args.eval_max_steps is not None and args.eval_max_steps > 0: + if (step + 1) >= args.eval_max_steps: + break losses = torch.cat(losses) try: From 84b430990aaa8ef3413bf3abe5fd44592e44e4da Mon Sep 17 00:00:00 2001 From: Cheng Zhang Date: Mon, 2 Mar 2026 13:28:27 +0000 Subject: [PATCH 6/7] fix docs --- README.md | 50 +------------------------------------------------- docs/index.md | 10 +++++++++- 2 files changed, 10 insertions(+), 50 deletions(-) diff --git a/README.md b/README.md index 210b5f1..ff92a7b 100644 --- a/README.md +++ b/README.md @@ -5,53 +5,5 @@ - Model Behavior-Level Simulation - Hardware-Performance Simulation -**🔖 For tutorials and examples, please refer to [this site](https://aicrosssim.github.io/NewComputeBench/)**. +**🔖 For milestones, tutorials and examples, please refer to [this site](https://aicrosssim.github.io/NewComputeBench/)**. -## Model Training - -### LLMs - -We adopt Llama-3 architecture and aim to support the following features: - -- Pretraining -- Generation (inference) -- Parameter-efficient fine-tuning -- `🚧 TODO` `🐌 LowPriority`: Supervised-fine-tuning -- Evaluation - -#### PreTraining - -The LLM pretraining is built on top of [torchtitan](https://github.com/pytorch/torchtitan). - -- Model architecture: [`Llama3`](/src/torchtitan/models/llama/model.py) -- Model configs: [`60M`, `200M`, `400M`, `1.1B`](src/aixsim_models/llm/model_flavors.py) -- Datasets: [`HuggingFaceFW/fineweb`](/src/aixsim_models/llm/pretrain_data.py) -- HuggingFace checkpoints: [AICrossSim](https://huggingface.co/AICrossSim) - -#### Generation - -We recommend using the HuggingFace Transformers library for generation tasks. -We provide a script to convert the torchtitan checkpoint to a HuggingFace checkpoint (See [this file](/experiments/llm-digital/pretrain/README.md)). - - -#### Parameter-Efficient Fine-tuning -- For models larger than 1.1B, we fine-tune pretrained checkpoints. - - LoRA fine-tuning data - - LoRA fine-tuning scripts - -## Model Behavior Simulation - -- [Random bitflip](/experiments/llm-bitflip/) - - Post-training bitflip transform - - Bitflip-aware pretraining -- Optical compute - - [Roberta on GLUE](/experiments/roberta-optical-transformer/) - - CLM `🚧 WIP` - -- Spiking neural networks `🚧 TODO` -- In-memory compute `🚧 TODO -` - -## Hardware-Performance Simulation - -`🚧 TODO` diff --git a/docs/index.md b/docs/index.md index 33b4fe6..3556a3f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -12,16 +12,24 @@ - [x] Filter out promising new compute paradigms by running small & medium scale experiments (Roberta on GLUE) - [ ] Scale up the promising new compute paradigms to large-scale language models - [ ] Fine-tuning/pretraining of CLM models (60M - 1.1B) + - [x] Random bitflip - [x] Optical compute - [ ] Spiking neural networks - [ ] In-memory compute - [ ] Parameter-efficient fine-tuning of larger LLMs (e.g., Llama-3.1-8B) + - [x] Random bitflip (promising results) - [x] Optical compute (failed to converge) ## What's New -- 🚧**4th Oct, 2025 Milestone**: Fine-tuning/pretraining of alternative compute paradigms on CLMs. +- **4th, Feb, 2026 Milestone**: We have successfully fine-tuned Llama-3.1-8B with random bitflip noise injected in forward passes, and observed promising results that the LoRA adapters with only 1.2% trainable parameters can effectively mitigate the effect of noise (reducing perplexity from 1008.95 to 11.01, with the original clean perplexity at 7.91). + + | Item | Description | + | ---- | ----------- | + | Llama-3.1-8B with random bitflip noise | [Tutorial](./02-model-behaviour-level-simulation/clm-bitflip-lora-finetune.md) + +- **4th Oct, 2025 Milestone**: Fine-tuning/pretraining of alternative compute paradigms on CLMs. | Item | Description | | ---- | ----------- | From b459158871c0681750c22046fb18b70ee3b20d1e Mon Sep 17 00:00:00 2001 From: Cheng Zhang Date: Mon, 2 Mar 2026 13:33:17 +0000 Subject: [PATCH 7/7] fix imports --- .../lora_finetune/plot_train_loss.py | 17 +++++++++---- .../lora_finetune/run_clm_no_trainer.py | 17 +++++++++---- experiments/llm-bitflip/pretrain/run.py | 24 +++++++++++++++---- experiments/llm-bitflip/transform/minimal.py | 9 +++++-- .../run_clm_no_trainer.py | 13 +++++++--- .../continual_pretraining/run_clm.py | 24 ++++++++++++------- .../lora_finetuning/run_clm_no_trainer.py | 13 +++++++--- .../llm-optical-transformer/pretrain/run.py | 16 +++++++++---- .../roberta-optical-transformer/run_glue.py | 3 +++ 9 files changed, 101 insertions(+), 35 deletions(-) diff --git a/experiments/llm-bitflip/lora_finetune/plot_train_loss.py b/experiments/llm-bitflip/lora_finetune/plot_train_loss.py index 8cacaaf..2378baf 100644 --- a/experiments/llm-bitflip/lora_finetune/plot_train_loss.py +++ b/experiments/llm-bitflip/lora_finetune/plot_train_loss.py @@ -20,9 +20,12 @@ def plot_train_loss(csv_path: Path, output_path: Optional[Path] = None) -> None: raise ValueError(f"Missing column: {loss_col}") fig, ax = plt.subplots(figsize=(8, 4.5)) - ax.plot(df[step_col], df[loss_col], label="bitflip r32", color="tab:blue", linewidth=1.8) - ax.axhline(baseline_loss, color="tab:red", linestyle="--", linewidth=1.2, - label="original") + ax.plot( + df[step_col], df[loss_col], label="bitflip r32", color="tab:blue", linewidth=1.8 + ) + ax.axhline( + baseline_loss, color="tab:red", linestyle="--", linewidth=1.2, label="original" + ) ax.set_xlabel("Train step") ax.set_ylabel("Train loss") @@ -40,9 +43,13 @@ def plot_train_loss(csv_path: Path, output_path: Optional[Path] = None) -> None: if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Plot train loss vs step from a W&B CSV export.") + parser = argparse.ArgumentParser( + description="Plot train loss vs step from a W&B CSV export." + ) parser.add_argument("csv", type=Path, help="Path to the W&B CSV export") - parser.add_argument("--output", "-o", type=Path, default=None, help="Path to save the plot (PNG)") + parser.add_argument( + "--output", "-o", type=Path, default=None, help="Path to save the plot (PNG)" + ) args = parser.parse_args() plot_train_loss(args.csv, args.output) diff --git a/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py b/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py index 60e7929..d3d8312 100644 --- a/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py +++ b/experiments/llm-bitflip/lora_finetune/run_clm_no_trainer.py @@ -34,6 +34,7 @@ Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ + # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import argparse @@ -42,9 +43,12 @@ import math import os import random +import sys from itertools import chain from pathlib import Path +sys.path.append(Path(__file__).resolve().parents[3].joinpath("src").as_posix()) + import datasets import tomllib import torch @@ -393,6 +397,7 @@ def set_trainable(model: torch.nn.Module): logger.info( f"Number of trainable parameters: {n_params:,} ({100 * n_params / total_params:.2f}%)\nTrainable parameters: {trainable}" ) + else: def set_trainable(model: torch.nn.Module): @@ -716,7 +721,9 @@ def evaluate_split(split_name: str, dataloader: DataLoader): break if len(losses) == 0: - logger.warning(f"No batches processed for {split_name} split during evaluation.") + logger.warning( + f"No batches processed for {split_name} split during evaluation." + ) return None losses = torch.cat(losses) @@ -785,9 +792,11 @@ def evaluate_split(split_name: str, dataloader: DataLoader): name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, - num_training_steps=args.max_train_steps - if overrode_max_train_steps - else args.max_train_steps * accelerator.num_processes, + num_training_steps=( + args.max_train_steps + if overrode_max_train_steps + else args.max_train_steps * accelerator.num_processes + ), ) # Prepare everything with our `accelerator`. diff --git a/experiments/llm-bitflip/pretrain/run.py b/experiments/llm-bitflip/pretrain/run.py index e299d52..7d2aff0 100644 --- a/experiments/llm-bitflip/pretrain/run.py +++ b/experiments/llm-bitflip/pretrain/run.py @@ -96,7 +96,9 @@ def generate_pretrain_cfg( silent=True, ) num_tokens = token_num_scale * num_params - effective_batch_size = batch_size * data_parallel_replicate_degree * abs(data_parallel_shard_degree) + effective_batch_size = ( + batch_size * data_parallel_replicate_degree * abs(data_parallel_shard_degree) + ) num_steps = math.ceil(num_tokens / (effective_batch_size * seq_len)) print( @@ -105,7 +107,9 @@ def generate_pretrain_cfg( print(f"Effective batch size: {effective_batch_size}") print(f"Estimated number of steps: {num_steps}") - assert transform_config.exists(), f"Transform config file {transform_config} does not exist" + assert ( + transform_config.exists() + ), f"Transform config file {transform_config} does not exist" with open(transform_config, "r") as f: transform_config = yaml.safe_load(f) @@ -116,7 +120,9 @@ def generate_pretrain_cfg( ), profiling=ArgProfiling(), metrics=ArgMetrics(enable_tensorboard=False, enable_wandb=True), - model=ArgModel(name=model_arch, flavor=model_flavor, tokenizer_path=tokenizer_path), + model=ArgModel( + name=model_arch, flavor=model_flavor, tokenizer_path=tokenizer_path + ), optimizer=ArgOptimizer(lr=learning_rate), training=ArgTraining( dataset="fineweb-edu", @@ -168,9 +174,17 @@ def pt_eval_ppl( seq_len: int = 2048, ): from pprint import pformat - from torchtitan.models import model_name_to_cls, model_name_to_tokenizer, models_config + from torchtitan.models import ( + model_name_to_cls, + model_name_to_tokenizer, + models_config, + ) from aixsim_models.llm.tokenizer import build_tokenizer - from aixsim_models.bitflip.transform import transform_model, TransformConfigManager, make_transform_histogram + from aixsim_models.bitflip.transform import ( + transform_model, + TransformConfigManager, + make_transform_histogram, + ) transform_config_manager = TransformConfigManager( layer_name_to_config=transform_config.layer_name_to_config, diff --git a/experiments/llm-bitflip/transform/minimal.py b/experiments/llm-bitflip/transform/minimal.py index aeac101..5a7bee2 100644 --- a/experiments/llm-bitflip/transform/minimal.py +++ b/experiments/llm-bitflip/transform/minimal.py @@ -13,7 +13,10 @@ from jsonargparse import CLI from aixsim_models.llm.evaluator import hf_lm_eval, hf_generate -from aixsim_models.bitflip.pretrain.transform import transform_model, TransformConfigManager +from aixsim_models.bitflip.pretrain.transform import ( + transform_model, + TransformConfigManager, +) DEFAULT_DTYPE = "float16" DEFAULT_TASKS = ["wikitext"] @@ -31,7 +34,9 @@ def eval_ori( ): """Evaluate a pretrained model as baseline.""" device = torch.device("cuda") - model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=getattr(torch, dtype)).eval() + model = AutoModelForCausalLM.from_pretrained( + model_name, torch_dtype=getattr(torch, dtype) + ).eval() model.to(device) tokenizer = AutoTokenizer.from_pretrained(model_name) diff --git a/experiments/llm-optical-transformer/continual_finetuning/run_clm_no_trainer.py b/experiments/llm-optical-transformer/continual_finetuning/run_clm_no_trainer.py index 488731c..726419a 100644 --- a/experiments/llm-optical-transformer/continual_finetuning/run_clm_no_trainer.py +++ b/experiments/llm-optical-transformer/continual_finetuning/run_clm_no_trainer.py @@ -34,6 +34,7 @@ Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ + # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import argparse @@ -42,9 +43,12 @@ import math import os import random +import sys from itertools import chain from pathlib import Path +sys.path.append(Path(__file__).resolve().parents[3].joinpath("src").as_posix()) + import datasets import tomllib import torch @@ -395,6 +399,7 @@ def set_trainable(model: torch.nn.Module): logger.info( f"Number of trainable parameters: {n_params:,} ({100 * n_params / total_params:.2f}%)\nTrainable parameters: {trainable}" ) + else: def set_trainable(model: torch.nn.Module): @@ -709,9 +714,11 @@ def group_texts(examples): name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, - num_training_steps=args.max_train_steps - if overrode_max_train_steps - else args.max_train_steps * accelerator.num_processes, + num_training_steps=( + args.max_train_steps + if overrode_max_train_steps + else args.max_train_steps * accelerator.num_processes + ), ) # Prepare everything with our `accelerator`. diff --git a/experiments/llm-optical-transformer/continual_pretraining/run_clm.py b/experiments/llm-optical-transformer/continual_pretraining/run_clm.py index a38b425..6b03ec8 100644 --- a/experiments/llm-optical-transformer/continual_pretraining/run_clm.py +++ b/experiments/llm-optical-transformer/continual_pretraining/run_clm.py @@ -283,14 +283,18 @@ def __post_init__(self): else: if self.train_file is not None: extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], ( - "`train_file` should be a csv, a json or a txt file." - ) + assert extension in [ + "csv", + "json", + "txt", + ], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], ( - "`validation_file` should be a csv, a json or a txt file." - ) + assert extension in [ + "csv", + "json", + "txt", + ], "`validation_file` should be a csv, a json or a txt file." def main(): @@ -680,9 +684,11 @@ def compute_metrics(eval_preds): processing_class=tokenizer, # Data collator will default to DataCollatorWithPadding, so we change it. data_collator=default_data_collator, - compute_metrics=compute_metrics - if training_args.do_eval and not is_torch_xla_available() - else None, + compute_metrics=( + compute_metrics + if training_args.do_eval and not is_torch_xla_available() + else None + ), preprocess_logits_for_metrics=( preprocess_logits_for_metrics if training_args.do_eval and not is_torch_xla_available() diff --git a/experiments/llm-optical-transformer/lora_finetuning/run_clm_no_trainer.py b/experiments/llm-optical-transformer/lora_finetuning/run_clm_no_trainer.py index 1ebfede..4a5bb8d 100644 --- a/experiments/llm-optical-transformer/lora_finetuning/run_clm_no_trainer.py +++ b/experiments/llm-optical-transformer/lora_finetuning/run_clm_no_trainer.py @@ -34,6 +34,7 @@ Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ + # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import argparse @@ -42,9 +43,12 @@ import math import os import random +import sys from itertools import chain from pathlib import Path +sys.path.append(Path(__file__).resolve().parents[3].joinpath("src").as_posix()) + import datasets import tomllib import torch @@ -395,6 +399,7 @@ def set_trainable(model: torch.nn.Module): logger.info( f"Number of trainable parameters: {n_params:,} ({100 * n_params / total_params:.2f}%)\nTrainable parameters: {trainable}" ) + else: def set_trainable(model: torch.nn.Module): @@ -709,9 +714,11 @@ def group_texts(examples): name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, - num_training_steps=args.max_train_steps - if overrode_max_train_steps - else args.max_train_steps * accelerator.num_processes, + num_training_steps=( + args.max_train_steps + if overrode_max_train_steps + else args.max_train_steps * accelerator.num_processes + ), ) # Prepare everything with our `accelerator`. diff --git a/experiments/llm-optical-transformer/pretrain/run.py b/experiments/llm-optical-transformer/pretrain/run.py index ad53b27..9a3eca5 100644 --- a/experiments/llm-optical-transformer/pretrain/run.py +++ b/experiments/llm-optical-transformer/pretrain/run.py @@ -15,7 +15,9 @@ from aixsim_models.llm.evaluator import pt_evaluate_ppl, hf_check_ppl, hf_lm_eval from aixsim_models.llm.utils import convert_torch_to_hf, convert_hf_to_torch -from aixsim_models.optical_compute.optical_transformer.pretrain.pretrainer import pretrain +from aixsim_models.optical_compute.optical_transformer.pretrain.pretrainer import ( + pretrain, +) from aixsim_models.optical_compute.optical_transformer.pretrain.arg_manager import ( ArgJob, ArgProfiling, @@ -64,7 +66,9 @@ def generate_pretrain_cfg( silent=True, ) num_tokens = token_num_scale * num_params - effective_batch_size = batch_size * data_parallel_replicate_degree * abs(data_parallel_shard_degree) + effective_batch_size = ( + batch_size * data_parallel_replicate_degree * abs(data_parallel_shard_degree) + ) num_steps = math.ceil(num_tokens / (effective_batch_size * seq_len)) print( @@ -73,7 +77,9 @@ def generate_pretrain_cfg( print(f"Effective batch size: {effective_batch_size}") print(f"Estimated number of steps: {num_steps}") - assert transform_config.exists(), f"Transform config file {transform_config} does not exist" + assert ( + transform_config.exists() + ), f"Transform config file {transform_config} does not exist" with open(transform_config, "r") as f: transform_config = yaml.safe_load(f) @@ -84,7 +90,9 @@ def generate_pretrain_cfg( ), profiling=ArgProfiling(), metrics=ArgMetrics(enable_tensorboard=False, enable_wandb=True), - model=ArgModel(name=model_arch, flavor=model_flavor, tokenizer_path=tokenizer_path), + model=ArgModel( + name=model_arch, flavor=model_flavor, tokenizer_path=tokenizer_path + ), optimizer=ArgOptimizer(lr=learning_rate), training=ArgTraining( dataset="fineweb-edu", diff --git a/experiments/roberta-optical-transformer/run_glue.py b/experiments/roberta-optical-transformer/run_glue.py index f679da2..3f1edf5 100644 --- a/experiments/roberta-optical-transformer/run_glue.py +++ b/experiments/roberta-optical-transformer/run_glue.py @@ -21,8 +21,11 @@ import random import sys from dataclasses import dataclass, field +from pathlib import Path from typing import Optional +sys.path.append(Path(__file__).resolve().parents[2].joinpath("src").as_posix()) + import datasets import evaluate import numpy as np