From 1b1db8b7a0838f0c851243e5f84dc729d7b1f113 Mon Sep 17 00:00:00 2001 From: kiurobox Date: Tue, 4 Feb 2025 02:48:00 -0800 Subject: [PATCH 1/9] Delete assets directory --- assets/fcpe/.gitkeep | 0 assets/hubert/.gitkeep | 0 assets/rmvpe/.gitkeep | 0 3 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 assets/fcpe/.gitkeep delete mode 100644 assets/hubert/.gitkeep delete mode 100644 assets/rmvpe/.gitkeep diff --git a/assets/fcpe/.gitkeep b/assets/fcpe/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/assets/hubert/.gitkeep b/assets/hubert/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/assets/rmvpe/.gitkeep b/assets/rmvpe/.gitkeep deleted file mode 100644 index e69de29b..00000000 From 1ad1ce8e5dc1d5ae4c8060211b439525deabc085 Mon Sep 17 00:00:00 2001 From: kiurobox Date: Tue, 4 Feb 2025 02:55:04 -0800 Subject: [PATCH 2/9] Delete lib directory --- lib/infer.py | 221 ----- lib/infer_libs/audio.py | 87 -- lib/infer_libs/fcpe.py | 873 ----------------- lib/infer_libs/infer_pack/attentions.py | 414 -------- lib/infer_libs/infer_pack/commons.py | 164 ---- lib/infer_libs/infer_pack/models.py | 1174 ----------------------- lib/infer_libs/infer_pack/modules.py | 517 ---------- lib/infer_libs/infer_pack/transforms.py | 207 ---- lib/infer_libs/rmvpe.py | 705 -------------- lib/modules.py | 559 ----------- lib/pipeline.py | 773 --------------- lib/split_audio.py | 91 -- 12 files changed, 5785 deletions(-) delete mode 100644 lib/infer.py delete mode 100644 lib/infer_libs/audio.py delete mode 100644 lib/infer_libs/fcpe.py delete mode 100644 lib/infer_libs/infer_pack/attentions.py delete mode 100644 lib/infer_libs/infer_pack/commons.py delete mode 100644 lib/infer_libs/infer_pack/models.py delete mode 100644 lib/infer_libs/infer_pack/modules.py delete mode 100644 lib/infer_libs/infer_pack/transforms.py delete mode 100644 lib/infer_libs/rmvpe.py delete mode 100644 lib/modules.py delete mode 100644 lib/pipeline.py delete mode 100644 lib/split_audio.py diff --git a/lib/infer.py b/lib/infer.py deleted file mode 100644 index 1f8e0fb2..00000000 --- a/lib/infer.py +++ /dev/null @@ -1,221 +0,0 @@ -import os -import shutil -import gc -import torch -from multiprocessing import cpu_count -from lib.modules import VC -from lib.split_audio import split_silence_nonsilent, adjust_audio_lengths, combine_silence_nonsilent - -class Configs: - def __init__(self, device, is_half): - self.device = device - self.is_half = is_half - self.n_cpu = 0 - self.gpu_name = None - self.gpu_mem = None - self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() - - def device_config(self) -> tuple: - if torch.cuda.is_available(): - i_device = int(self.device.split(":")[-1]) - self.gpu_name = torch.cuda.get_device_name(i_device) - #if ( -# ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) -# or "P40" in self.gpu_name.upper() -# or "1060" in self.gpu_name -# or "1070" in self.gpu_name -# or "1080" in self.gpu_name -# ): -# print("16 series/10 series P40 forced single precision") -# self.is_half = False -# for config_file in ["32k.json", "40k.json", "48k.json"]: -# with open(BASE_DIR / "src" / "configs" / config_file, "r") as f: -# strr = f.read().replace("true", "false") -# with open(BASE_DIR / "src" / "configs" / config_file, "w") as f: -# f.write(strr) -# with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "r") as f: -# strr = f.read().replace("3.7", "3.0") -# with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "w") as f: -# f.write(strr) -# else: -# self.gpu_name = None -# self.gpu_mem = int( -# torch.cuda.get_device_properties(i_device).total_memory -# / 1024 -# / 1024 -# / 1024 -# + 0.4 -# ) -# if self.gpu_mem <= 4: -# with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "r") as f: -# strr = f.read().replace("3.7", "3.0") -# with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "w") as f: -# f.write(strr) - elif torch.backends.mps.is_available(): - print("No supported N-card found, use MPS for inference") - self.device = "mps" - else: - print("No supported N-card found, use CPU for inference") - self.device = "cpu" - - if self.n_cpu == 0: - self.n_cpu = cpu_count() - - if self.is_half: - # 6G memory config - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 - else: - # 5G memory config - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 - - if self.gpu_mem != None and self.gpu_mem <= 4: - x_pad = 1 - x_query = 5 - x_center = 30 - x_max = 32 - - return x_pad, x_query, x_center, x_max - -def get_model(voice_model): - model_dir = os.path.join(os.getcwd(), "models", voice_model) - model_filename, index_filename = None, None - for file in os.listdir(model_dir): - ext = os.path.splitext(file)[1] - if ext == '.pth': - model_filename = file - if ext == '.index': - index_filename = file - - if model_filename is None: - print(f'No model file exists in {models_dir}.') - return None, None - - return os.path.join(model_dir, model_filename), os.path.join(model_dir, index_filename) if index_filename else '' - -def infer_audio( - model_name, - audio_path, - f0_change=0, - f0_method="rmvpe+", - min_pitch="50", - max_pitch="1100", - crepe_hop_length=128, - index_rate=0.75, - filter_radius=3, - rms_mix_rate=0.25, - protect=0.33, - split_infer=False, - min_silence=500, - silence_threshold=-50, - seek_step=1, - keep_silence=100, - do_formant=False, - quefrency=0, - timbre=1, - f0_autotune=False, - audio_format="wav", - resample_sr=0, - hubert_model_path="assets/hubert/hubert_base.pt", - rmvpe_model_path="assets/rmvpe/rmvpe.pt", - fcpe_model_path="assets/fcpe/fcpe.pt" - ): - os.environ["rmvpe_model_path"] = rmvpe_model_path - os.environ["fcpe_model_path"] = fcpe_model_path - configs = Configs('cuda:0', True) - vc = VC(configs) - pth_path, index_path = get_model(model_name) - vc_data = vc.get_vc(pth_path, protect, 0.5) - - if split_infer: - inferred_files = [] - temp_dir = os.path.join(os.getcwd(), "seperate", "temp") - os.makedirs(temp_dir, exist_ok=True) - print("Splitting audio to silence and nonsilent segments.") - silence_files, nonsilent_files = split_silence_nonsilent(audio_path, min_silence, silence_threshold, seek_step, keep_silence) - print(f"Total silence segments: {len(silence_files)}.\nTotal nonsilent segments: {len(nonsilent_files)}.") - for i, nonsilent_file in enumerate(nonsilent_files): - print(f"Inferring nonsilent audio {i+1}") - inference_info, audio_data, output_path = vc.vc_single( - 0, - nonsilent_file, - f0_change, - f0_method, - index_path, - index_path, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - audio_format, - crepe_hop_length, - do_formant, - quefrency, - timbre, - min_pitch, - max_pitch, - f0_autotune, - hubert_model_path - ) - if inference_info[0] == "Success.": - print("Inference ran successfully.") - print(inference_info[1]) - print("Times:\nnpy: %.2fs f0: %.2fs infer: %.2fs\nTotal time: %.2fs" % (*inference_info[2],)) - else: - print(f"An error occurred while processing.\n{inference_info[0]}") - return None - inferred_files.append(output_path) - print("Adjusting inferred audio lengths.") - adjusted_inferred_files = adjust_audio_lengths(nonsilent_files, inferred_files) - print("Combining silence and inferred audios.") - output_count = 1 - while True: - output_path = os.path.join(os.getcwd(), "output", f"{os.path.splitext(os.path.basename(audio_path))[0]}{model_name}{f0_method.capitalize()}_{output_count}.{audio_format}") - if not os.path.exists(output_path): - break - output_count += 1 - output_path = combine_silence_nonsilent(silence_files, adjusted_inferred_files, keep_silence, output_path) - [shutil.move(inferred_file, temp_dir) for inferred_file in inferred_files] - shutil.rmtree(temp_dir) - else: - inference_info, audio_data, output_path = vc.vc_single( - 0, - audio_path, - f0_change, - f0_method, - index_path, - index_path, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - audio_format, - crepe_hop_length, - do_formant, - quefrency, - timbre, - min_pitch, - max_pitch, - f0_autotune, - hubert_model_path - ) - if inference_info[0] == "Success.": - print("Inference ran successfully.") - print(inference_info[1]) - print("Times:\nnpy: %.2fs f0: %.2fs infer: %.2fs\nTotal time: %.2fs" % (*inference_info[2],)) - else: - print(f"An error occurred while processing.\n{inference_info[0]}") - del configs, vc - gc.collect() - return inference_info[0] - - del configs, vc - gc.collect() - return output_path \ No newline at end of file diff --git a/lib/infer_libs/audio.py b/lib/infer_libs/audio.py deleted file mode 100644 index 5c831766..00000000 --- a/lib/infer_libs/audio.py +++ /dev/null @@ -1,87 +0,0 @@ -import numpy as np -import av -import ffmpeg -import os -import traceback -import sys -import subprocess - -platform_stft_mapping = { - 'linux': os.path.join(os.getcwd(), 'stftpitchshift'), - 'darwin': os.path.join(os.getcwd(), 'stftpitchshift'), - 'win32': os.path.join(os.getcwd(), 'stftpitchshift.exe'), -} - -stft = platform_stft_mapping.get(sys.platform) - -def wav2(i, o, format): - inp = av.open(i, 'rb') - if format == "m4a": format = "mp4" - out = av.open(o, 'wb', format=format) - if format == "ogg": format = "libvorbis" - if format == "mp4": format = "aac" - - ostream = out.add_stream(format) - - for frame in inp.decode(audio=0): - for p in ostream.encode(frame): out.mux(p) - - for p in ostream.encode(None): out.mux(p) - - out.close() - inp.close() - -def load_audio(file, sr, DoFormant=False, Quefrency=1.0, Timbre=1.0): - formanted = False - file = file.strip(' \n"') - if not os.path.exists(file): - raise RuntimeError( - "Wrong audio path, that does not exist." - ) - - try: - if DoFormant: - print("Starting formant shift. Please wait as this process takes a while.") - formanted_file = f"{os.path.splitext(os.path.basename(file))[0]}_formanted{os.path.splitext(os.path.basename(file))[1]}" - command = ( - f'{stft} -i "{file}" -q "{Quefrency}" ' - f'-t "{Timbre}" -o "{formanted_file}"' - ) - subprocess.run(command, shell=True) - file = formanted_file - print(f"Formanted {file}\n") - - # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26 - # This launches a subprocess to decode audio while down-mixing and resampling as necessary. - # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. - file = ( - file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) # Prevent small white copy path head and tail with spaces and " and return - out, _ = ( - ffmpeg.input(file, threads=0) - .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) - .run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True) - ) - - return np.frombuffer(out, np.float32).flatten() - - except Exception as e: - raise RuntimeError(f"Failed to load audio: {e}") - -def check_audio_duration(file): - try: - file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - - probe = ffmpeg.probe(file) - - duration = float(probe['streams'][0]['duration']) - - if duration < 0.76: - print( - f"Audio file, {file.split('/')[-1]}, under ~0.76s detected - file is too short. Target at least 1-2s for best results." - ) - return False - - return True - except Exception as e: - raise RuntimeError(f"Failed to check audio duration: {e}") \ No newline at end of file diff --git a/lib/infer_libs/fcpe.py b/lib/infer_libs/fcpe.py deleted file mode 100644 index ddffd33e..00000000 --- a/lib/infer_libs/fcpe.py +++ /dev/null @@ -1,873 +0,0 @@ -from typing import Union - -import torch.nn.functional as F -import numpy as np -import torch -import torch.nn as nn -from torch.nn.utils import weight_norm -from torchaudio.transforms import Resample -import os -import librosa -import soundfile as sf -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn -import math -from functools import partial - -from einops import rearrange, repeat -from local_attention import LocalAttention -from torch import nn - -os.environ["LRU_CACHE_CAPACITY"] = "3" - -def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False): - sampling_rate = None - try: - data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile. - except Exception as ex: - print(f"'{full_path}' failed to load.\nException:") - print(ex) - if return_empty_on_exception: - return [], sampling_rate or target_sr or 48000 - else: - raise Exception(ex) - - if len(data.shape) > 1: - data = data[:, 0] - assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension) - - if np.issubdtype(data.dtype, np.integer): # if audio data is type int - max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX - else: # if audio data is type fp32 - max_mag = max(np.amax(data), -np.amin(data)) - max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32 - - data = torch.FloatTensor(data.astype(np.float32))/max_mag - - if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except - return [], sampling_rate or target_sr or 48000 - if target_sr is not None and sampling_rate != target_sr: - data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr)) - sampling_rate = target_sr - - return data, sampling_rate - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - -class STFT(): - def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5): - self.target_sr = sr - - self.n_mels = n_mels - self.n_fft = n_fft - self.win_size = win_size - self.hop_length = hop_length - self.fmin = fmin - self.fmax = fmax - self.clip_val = clip_val - self.mel_basis = {} - self.hann_window = {} - - def get_mel(self, y, keyshift=0, speed=1, center=False, train=False): - sampling_rate = self.target_sr - n_mels = self.n_mels - n_fft = self.n_fft - win_size = self.win_size - hop_length = self.hop_length - fmin = self.fmin - fmax = self.fmax - clip_val = self.clip_val - - factor = 2 ** (keyshift / 12) - n_fft_new = int(np.round(n_fft * factor)) - win_size_new = int(np.round(win_size * factor)) - hop_length_new = int(np.round(hop_length * speed)) - if not train: - mel_basis = self.mel_basis - hann_window = self.hann_window - else: - mel_basis = {} - hann_window = {} - - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - mel_basis_key = str(fmax)+'_'+str(y.device) - if mel_basis_key not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) - mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device) - - keyshift_key = str(keyshift)+'_'+str(y.device) - if keyshift_key not in hann_window: - hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device) - - pad_left = (win_size_new - hop_length_new) //2 - pad_right = max((win_size_new- hop_length_new + 1) //2, win_size_new - y.size(-1) - pad_left) - if pad_right < y.size(-1): - mode = 'reflect' - else: - mode = 'constant' - y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode = mode) - y = y.squeeze(1) - - spec = torch.stft(y, n_fft_new, hop_length=hop_length_new, win_length=win_size_new, window=hann_window[keyshift_key], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True) - spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + (1e-9)) - if keyshift != 0: - size = n_fft // 2 + 1 - resize = spec.size(1) - if resize < size: - spec = F.pad(spec, (0, 0, 0, size-resize)) - spec = spec[:, :size, :] * win_size / win_size_new - spec = torch.matmul(mel_basis[mel_basis_key], spec) - spec = dynamic_range_compression_torch(spec, clip_val=clip_val) - return spec - - def __call__(self, audiopath): - audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr) - spect = self.get_mel(audio.unsqueeze(0)).squeeze(0) - return spect - -stft = STFT() - -#import fast_transformers.causal_product.causal_product_cuda - -def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device = None): - b, h, *_ = data.shape - # (batch size, head, length, model_dim) - - # normalize model dim - data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1. - - # what is ration?, projection_matrix.shape[0] --> 266 - - ratio = (projection_matrix.shape[0] ** -0.5) - - projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h) - projection = projection.type_as(data) - - #data_dash = w^T x - data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection) - - - # diag_data = D**2 - diag_data = data ** 2 - diag_data = torch.sum(diag_data, dim=-1) - diag_data = (diag_data / 2.0) * (data_normalizer ** 2) - diag_data = diag_data.unsqueeze(dim=-1) - - #print () - if is_query: - data_dash = ratio * ( - torch.exp(data_dash - diag_data - - torch.max(data_dash, dim=-1, keepdim=True).values) + eps) - else: - data_dash = ratio * ( - torch.exp(data_dash - diag_data + eps))#- torch.max(data_dash)) + eps) - - return data_dash.type_as(data) - -def orthogonal_matrix_chunk(cols, qr_uniform_q = False, device = None): - unstructured_block = torch.randn((cols, cols), device = device) - q, r = torch.linalg.qr(unstructured_block.cpu(), mode='reduced') - q, r = map(lambda t: t.to(device), (q, r)) - - # proposed by @Parskatt - # to make sure Q is uniform https://arxiv.org/pdf/math-ph/0609050.pdf - if qr_uniform_q: - d = torch.diag(r, 0) - q *= d.sign() - return q.t() -def exists(val): - return val is not None - -def empty(tensor): - return tensor.numel() == 0 - -def default(val, d): - return val if exists(val) else d - -def cast_tuple(val): - return (val,) if not isinstance(val, tuple) else val - -class PCmer(nn.Module): - """The encoder that is used in the Transformer model.""" - - def __init__(self, - num_layers, - num_heads, - dim_model, - dim_keys, - dim_values, - residual_dropout, - attention_dropout): - super().__init__() - self.num_layers = num_layers - self.num_heads = num_heads - self.dim_model = dim_model - self.dim_values = dim_values - self.dim_keys = dim_keys - self.residual_dropout = residual_dropout - self.attention_dropout = attention_dropout - - self._layers = nn.ModuleList([_EncoderLayer(self) for _ in range(num_layers)]) - - # METHODS ######################################################################################################## - - def forward(self, phone, mask=None): - - # apply all layers to the input - for (i, layer) in enumerate(self._layers): - phone = layer(phone, mask) - # provide the final sequence - return phone - - -# ==================================================================================================================== # -# CLASS _ E N C O D E R L A Y E R # -# ==================================================================================================================== # - - -class _EncoderLayer(nn.Module): - """One layer of the encoder. - - Attributes: - attn: (:class:`mha.MultiHeadAttention`): The attention mechanism that is used to read the input sequence. - feed_forward (:class:`ffl.FeedForwardLayer`): The feed-forward layer on top of the attention mechanism. - """ - - def __init__(self, parent: PCmer): - """Creates a new instance of ``_EncoderLayer``. - - Args: - parent (Encoder): The encoder that the layers is created for. - """ - super().__init__() - - - self.conformer = ConformerConvModule(parent.dim_model) - self.norm = nn.LayerNorm(parent.dim_model) - self.dropout = nn.Dropout(parent.residual_dropout) - - # selfatt -> fastatt: performer! - self.attn = SelfAttention(dim = parent.dim_model, - heads = parent.num_heads, - causal = False) - - # METHODS ######################################################################################################## - - def forward(self, phone, mask=None): - - # compute attention sub-layer - phone = phone + (self.attn(self.norm(phone), mask=mask)) - - phone = phone + (self.conformer(phone)) - - return phone - -def calc_same_padding(kernel_size): - pad = kernel_size // 2 - return (pad, pad - (kernel_size + 1) % 2) - -# helper classes - -class Swish(nn.Module): - def forward(self, x): - return x * x.sigmoid() - -class Transpose(nn.Module): - def __init__(self, dims): - super().__init__() - assert len(dims) == 2, 'dims must be a tuple of two dimensions' - self.dims = dims - - def forward(self, x): - return x.transpose(*self.dims) - -class GLU(nn.Module): - def __init__(self, dim): - super().__init__() - self.dim = dim - - def forward(self, x): - out, gate = x.chunk(2, dim=self.dim) - return out * gate.sigmoid() - -class DepthWiseConv1d(nn.Module): - def __init__(self, chan_in, chan_out, kernel_size, padding): - super().__init__() - self.padding = padding - self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups = chan_in) - - def forward(self, x): - x = F.pad(x, self.padding) - return self.conv(x) - -class ConformerConvModule(nn.Module): - def __init__( - self, - dim, - causal = False, - expansion_factor = 2, - kernel_size = 31, - dropout = 0.): - super().__init__() - - inner_dim = dim * expansion_factor - padding = calc_same_padding(kernel_size) if not causal else (kernel_size - 1, 0) - - self.net = nn.Sequential( - nn.LayerNorm(dim), - Transpose((1, 2)), - nn.Conv1d(dim, inner_dim * 2, 1), - GLU(dim=1), - DepthWiseConv1d(inner_dim, inner_dim, kernel_size = kernel_size, padding = padding), - #nn.BatchNorm1d(inner_dim) if not causal else nn.Identity(), - Swish(), - nn.Conv1d(inner_dim, dim, 1), - Transpose((1, 2)), - nn.Dropout(dropout) - ) - - def forward(self, x): - return self.net(x) - -def linear_attention(q, k, v): - if v is None: - #print (k.size(), q.size()) - out = torch.einsum('...ed,...nd->...ne', k, q) - return out - - else: - k_cumsum = k.sum(dim = -2) - #k_cumsum = k.sum(dim = -2) - D_inv = 1. / (torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q)) + 1e-8) - - context = torch.einsum('...nd,...ne->...de', k, v) - #print ("TRUEEE: ", context.size(), q.size(), D_inv.size()) - out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv) - return out - -def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling = 0, qr_uniform_q = False, device = None): - nb_full_blocks = int(nb_rows / nb_columns) - #print (nb_full_blocks) - block_list = [] - - for _ in range(nb_full_blocks): - q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q = qr_uniform_q, device = device) - block_list.append(q) - # block_list[n] is a orthogonal matrix ... (model_dim * model_dim) - #print (block_list[0].size(), torch.einsum('...nd,...nd->...n', block_list[0], torch.roll(block_list[0],1,1))) - #print (nb_rows, nb_full_blocks, nb_columns) - remaining_rows = nb_rows - nb_full_blocks * nb_columns - #print (remaining_rows) - if remaining_rows > 0: - q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q = qr_uniform_q, device = device) - #print (q[:remaining_rows].size()) - block_list.append(q[:remaining_rows]) - - final_matrix = torch.cat(block_list) - - if scaling == 0: - multiplier = torch.randn((nb_rows, nb_columns), device = device).norm(dim = 1) - elif scaling == 1: - multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device = device) - else: - raise ValueError(f'Invalid scaling {scaling}') - - return torch.diag(multiplier) @ final_matrix - -class FastAttention(nn.Module): - def __init__(self, dim_heads, nb_features = None, ortho_scaling = 0, causal = False, generalized_attention = False, kernel_fn = nn.ReLU(), qr_uniform_q = False, no_projection = False): - super().__init__() - nb_features = default(nb_features, int(dim_heads * math.log(dim_heads))) - - self.dim_heads = dim_heads - self.nb_features = nb_features - self.ortho_scaling = ortho_scaling - - self.create_projection = partial(gaussian_orthogonal_random_matrix, nb_rows = self.nb_features, nb_columns = dim_heads, scaling = ortho_scaling, qr_uniform_q = qr_uniform_q) - projection_matrix = self.create_projection() - self.register_buffer('projection_matrix', projection_matrix) - - self.generalized_attention = generalized_attention - self.kernel_fn = kernel_fn - - # if this is turned on, no projection will be used - # queries and keys will be softmax-ed as in the original efficient attention paper - self.no_projection = no_projection - - self.causal = causal - - @torch.no_grad() - def redraw_projection_matrix(self): - projections = self.create_projection() - self.projection_matrix.copy_(projections) - del projections - - def forward(self, q, k, v): - device = q.device - - if self.no_projection: - q = q.softmax(dim = -1) - k = torch.exp(k) if self.causal else k.softmax(dim = -2) - else: - create_kernel = partial(softmax_kernel, projection_matrix = self.projection_matrix, device = device) - - q = create_kernel(q, is_query = True) - k = create_kernel(k, is_query = False) - - attn_fn = linear_attention if not self.causal else self.causal_linear_fn - if v is None: - out = attn_fn(q, k, None) - return out - else: - out = attn_fn(q, k, v) - return out -class SelfAttention(nn.Module): - def __init__(self, dim, causal = False, heads = 8, dim_head = 64, local_heads = 0, local_window_size = 256, nb_features = None, feature_redraw_interval = 1000, generalized_attention = False, kernel_fn = nn.ReLU(), qr_uniform_q = False, dropout = 0., no_projection = False): - super().__init__() - assert dim % heads == 0, 'dimension must be divisible by number of heads' - dim_head = default(dim_head, dim // heads) - inner_dim = dim_head * heads - self.fast_attention = FastAttention(dim_head, nb_features, causal = causal, generalized_attention = generalized_attention, kernel_fn = kernel_fn, qr_uniform_q = qr_uniform_q, no_projection = no_projection) - - self.heads = heads - self.global_heads = heads - local_heads - self.local_attn = LocalAttention(window_size = local_window_size, causal = causal, autopad = True, dropout = dropout, look_forward = int(not causal), rel_pos_emb_config = (dim_head, local_heads)) if local_heads > 0 else None - - #print (heads, nb_features, dim_head) - #name_embedding = torch.zeros(110, heads, dim_head, dim_head) - #self.name_embedding = nn.Parameter(name_embedding, requires_grad=True) - - - self.to_q = nn.Linear(dim, inner_dim) - self.to_k = nn.Linear(dim, inner_dim) - self.to_v = nn.Linear(dim, inner_dim) - self.to_out = nn.Linear(inner_dim, dim) - self.dropout = nn.Dropout(dropout) - - @torch.no_grad() - def redraw_projection_matrix(self): - self.fast_attention.redraw_projection_matrix() - #torch.nn.init.zeros_(self.name_embedding) - #print (torch.sum(self.name_embedding)) - def forward(self, x, context = None, mask = None, context_mask = None, name=None, inference=False, **kwargs): - _, _, _, h, gh = *x.shape, self.heads, self.global_heads - - cross_attend = exists(context) - - context = default(context, x) - context_mask = default(context_mask, mask) if not cross_attend else context_mask - #print (torch.sum(self.name_embedding)) - q, k, v = self.to_q(x), self.to_k(context), self.to_v(context) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v)) - (q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v)) - - attn_outs = [] - #print (name) - #print (self.name_embedding[name].size()) - if not empty(q): - if exists(context_mask): - global_mask = context_mask[:, None, :, None] - v.masked_fill_(~global_mask, 0.) - if cross_attend: - pass - #print (torch.sum(self.name_embedding)) - #out = self.fast_attention(q,self.name_embedding[name],None) - #print (torch.sum(self.name_embedding[...,-1:])) - else: - out = self.fast_attention(q, k, v) - attn_outs.append(out) - - if not empty(lq): - assert not cross_attend, 'local attention is not compatible with cross attention' - out = self.local_attn(lq, lk, lv, input_mask = mask) - attn_outs.append(out) - - out = torch.cat(attn_outs, dim = 1) - out = rearrange(out, 'b h n d -> b n (h d)') - out = self.to_out(out) - return self.dropout(out) - -def l2_regularization(model, l2_alpha): - l2_loss = [] - for module in model.modules(): - if type(module) is nn.Conv2d: - l2_loss.append((module.weight ** 2).sum() / 2.0) - return l2_alpha * sum(l2_loss) - - -class FCPEModel(nn.Module): - def __init__( - self, - input_channel=128, - out_dims=360, - n_layers=12, - n_chans=512, - use_siren=False, - use_full=False, - loss_mse_scale=10, - loss_l2_regularization=False, - loss_l2_regularization_scale=1, - loss_grad1_mse=False, - loss_grad1_mse_scale=1, - f0_max=1975.5, - f0_min=32.70, - confidence=False, - threshold=0.05, - use_input_conv=True - ): - super().__init__() - if use_siren is True: - raise ValueError("Siren is not supported yet.") - if use_full is True: - raise ValueError("Full model is not supported yet.") - - self.loss_mse_scale = loss_mse_scale if (loss_mse_scale is not None) else 10 - self.loss_l2_regularization = loss_l2_regularization if (loss_l2_regularization is not None) else False - self.loss_l2_regularization_scale = loss_l2_regularization_scale if (loss_l2_regularization_scale - is not None) else 1 - self.loss_grad1_mse = loss_grad1_mse if (loss_grad1_mse is not None) else False - self.loss_grad1_mse_scale = loss_grad1_mse_scale if (loss_grad1_mse_scale is not None) else 1 - self.f0_max = f0_max if (f0_max is not None) else 1975.5 - self.f0_min = f0_min if (f0_min is not None) else 32.70 - self.confidence = confidence if (confidence is not None) else False - self.threshold = threshold if (threshold is not None) else 0.05 - self.use_input_conv = use_input_conv if (use_input_conv is not None) else True - - self.cent_table_b = torch.Tensor( - np.linspace(self.f0_to_cent(torch.Tensor([f0_min]))[0], self.f0_to_cent(torch.Tensor([f0_max]))[0], - out_dims)) - self.register_buffer("cent_table", self.cent_table_b) - - # conv in stack - _leaky = nn.LeakyReLU() - self.stack = nn.Sequential( - nn.Conv1d(input_channel, n_chans, 3, 1, 1), - nn.GroupNorm(4, n_chans), - _leaky, - nn.Conv1d(n_chans, n_chans, 3, 1, 1)) - - # transformer - self.decoder = PCmer( - num_layers=n_layers, - num_heads=8, - dim_model=n_chans, - dim_keys=n_chans, - dim_values=n_chans, - residual_dropout=0.1, - attention_dropout=0.1) - self.norm = nn.LayerNorm(n_chans) - - # out - self.n_out = out_dims - self.dense_out = weight_norm( - nn.Linear(n_chans, self.n_out)) - - def forward(self, mel, infer=True, gt_f0=None, return_hz_f0=False, cdecoder = "local_argmax"): - """ - input: - B x n_frames x n_unit - return: - dict of B x n_frames x feat - """ - if cdecoder == "argmax": - self.cdecoder = self.cents_decoder - elif cdecoder == "local_argmax": - self.cdecoder = self.cents_local_decoder - if self.use_input_conv: - x = self.stack(mel.transpose(1, 2)).transpose(1, 2) - else: - x = mel - x = self.decoder(x) - x = self.norm(x) - x = self.dense_out(x) # [B,N,D] - x = torch.sigmoid(x) - if not infer: - gt_cent_f0 = self.f0_to_cent(gt_f0) # mel f0 #[B,N,1] - gt_cent_f0 = self.gaussian_blurred_cent(gt_cent_f0) # #[B,N,out_dim] - loss_all = self.loss_mse_scale * F.binary_cross_entropy(x, gt_cent_f0) # bce loss - # l2 regularization - if self.loss_l2_regularization: - loss_all = loss_all + l2_regularization(model=self, l2_alpha=self.loss_l2_regularization_scale) - x = loss_all - if infer: - x = self.cdecoder(x) - x = self.cent_to_f0(x) - if not return_hz_f0: - x = (1 + x / 700).log() - return x - - def cents_decoder(self, y, mask=True): - B, N, _ = y.size() - ci = self.cent_table[None, None, :].expand(B, N, -1) - rtn = torch.sum(ci * y, dim=-1, keepdim=True) / torch.sum(y, dim=-1, keepdim=True) # cents: [B,N,1] - if mask: - confident = torch.max(y, dim=-1, keepdim=True)[0] - confident_mask = torch.ones_like(confident) - confident_mask[confident <= self.threshold] = float("-INF") - rtn = rtn * confident_mask - if self.confidence: - return rtn, confident - else: - return rtn - - def cents_local_decoder(self, y, mask=True): - B, N, _ = y.size() - ci = self.cent_table[None, None, :].expand(B, N, -1) - confident, max_index = torch.max(y, dim=-1, keepdim=True) - local_argmax_index = torch.arange(0,9).to(max_index.device) + (max_index - 4) - local_argmax_index[local_argmax_index<0] = 0 - local_argmax_index[local_argmax_index>=self.n_out] = self.n_out - 1 - ci_l = torch.gather(ci,-1,local_argmax_index) - y_l = torch.gather(y,-1,local_argmax_index) - rtn = torch.sum(ci_l * y_l, dim=-1, keepdim=True) / torch.sum(y_l, dim=-1, keepdim=True) # cents: [B,N,1] - if mask: - confident_mask = torch.ones_like(confident) - confident_mask[confident <= self.threshold] = float("-INF") - rtn = rtn * confident_mask - if self.confidence: - return rtn, confident - else: - return rtn - - def cent_to_f0(self, cent): - return 10. * 2 ** (cent / 1200.) - - def f0_to_cent(self, f0): - return 1200. * torch.log2(f0 / 10.) - - def gaussian_blurred_cent(self, cents): # cents: [B,N,1] - mask = (cents > 0.1) & (cents < (1200. * np.log2(self.f0_max / 10.))) - B, N, _ = cents.size() - ci = self.cent_table[None, None, :].expand(B, N, -1) - return torch.exp(-torch.square(ci - cents) / 1250) * mask.float() - - -class FCPEInfer: - def __init__(self, model_path, device=None, dtype=torch.float32): - if device is None: - device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.device = device - ckpt = torch.load(model_path, map_location=torch.device(self.device)) - self.args = DotDict(ckpt["config"]) - self.dtype = dtype - model = FCPEModel( - input_channel=self.args.model.input_channel, - out_dims=self.args.model.out_dims, - n_layers=self.args.model.n_layers, - n_chans=self.args.model.n_chans, - use_siren=self.args.model.use_siren, - use_full=self.args.model.use_full, - loss_mse_scale=self.args.loss.loss_mse_scale, - loss_l2_regularization=self.args.loss.loss_l2_regularization, - loss_l2_regularization_scale=self.args.loss.loss_l2_regularization_scale, - loss_grad1_mse=self.args.loss.loss_grad1_mse, - loss_grad1_mse_scale=self.args.loss.loss_grad1_mse_scale, - f0_max=self.args.model.f0_max, - f0_min=self.args.model.f0_min, - confidence=self.args.model.confidence, - ) - model.to(self.device).to(self.dtype) - model.load_state_dict(ckpt['model']) - model.eval() - self.model = model - self.wav2mel = Wav2Mel(self.args, dtype=self.dtype, device=self.device) - - @torch.no_grad() - def __call__(self, audio, sr, threshold=0.05): - self.model.threshold = threshold - audio = audio[None,:] - mel = self.wav2mel(audio=audio, sample_rate=sr).to(self.dtype) - f0 = self.model(mel=mel, infer=True, return_hz_f0=True) - return f0 - - -class Wav2Mel: - - def __init__(self, args, device=None, dtype=torch.float32): - # self.args = args - self.sampling_rate = args.mel.sampling_rate - self.hop_size = args.mel.hop_size - if device is None: - device = 'cuda' if torch.cuda.is_available() else 'cpu' - self.device = device - self.dtype = dtype - self.stft = STFT( - args.mel.sampling_rate, - args.mel.num_mels, - args.mel.n_fft, - args.mel.win_size, - args.mel.hop_size, - args.mel.fmin, - args.mel.fmax - ) - self.resample_kernel = {} - - def extract_nvstft(self, audio, keyshift=0, train=False): - mel = self.stft.get_mel(audio, keyshift=keyshift, train=train).transpose(1, 2) # B, n_frames, bins - return mel - - def extract_mel(self, audio, sample_rate, keyshift=0, train=False): - audio = audio.to(self.dtype).to(self.device) - # resample - if sample_rate == self.sampling_rate: - audio_res = audio - else: - key_str = str(sample_rate) - if key_str not in self.resample_kernel: - self.resample_kernel[key_str] = Resample(sample_rate, self.sampling_rate, lowpass_filter_width=128) - self.resample_kernel[key_str] = self.resample_kernel[key_str].to(self.dtype).to(self.device) - audio_res = self.resample_kernel[key_str](audio) - - # extract - mel = self.extract_nvstft(audio_res, keyshift=keyshift, train=train) # B, n_frames, bins - n_frames = int(audio.shape[1] // self.hop_size) + 1 - if n_frames > int(mel.shape[1]): - mel = torch.cat((mel, mel[:, -1:, :]), 1) - if n_frames < int(mel.shape[1]): - mel = mel[:, :n_frames, :] - return mel - - def __call__(self, audio, sample_rate, keyshift=0, train=False): - return self.extract_mel(audio, sample_rate, keyshift=keyshift, train=train) - - -class DotDict(dict): - def __getattr__(*args): - val = dict.get(*args) - return DotDict(val) if type(val) is dict else val - - __setattr__ = dict.__setitem__ - __delattr__ = dict.__delitem__ - -class F0Predictor(object): - def compute_f0(self,wav,p_len): - ''' - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - ''' - pass - - def compute_f0_uv(self,wav,p_len): - ''' - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - ''' - pass - -class FCPE(F0Predictor): - def __init__(self, model_path, hop_length=512, f0_min=50, f0_max=1100, dtype=torch.float32, device=None, sampling_rate=44100, - threshold=0.05): - self.fcpe = FCPEInfer(model_path, device=device, dtype=dtype) - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - if device is None: - self.device = 'cuda' if torch.cuda.is_available() else 'cpu' - else: - self.device = device - self.threshold = threshold - self.sampling_rate = sampling_rate - self.dtype = dtype - self.name = "fcpe" - - def repeat_expand( - self, content: Union[torch.Tensor, np.ndarray], target_len: int, mode: str = "nearest" - ): - ndim = content.ndim - - if content.ndim == 1: - content = content[None, None] - elif content.ndim == 2: - content = content[None] - - assert content.ndim == 3 - - is_np = isinstance(content, np.ndarray) - if is_np: - content = torch.from_numpy(content) - - results = torch.nn.functional.interpolate(content, size=target_len, mode=mode) - - if is_np: - results = results.numpy() - - if ndim == 1: - return results[0, 0] - elif ndim == 2: - return results[0] - - def post_process(self, x, sampling_rate, f0, pad_to): - if isinstance(f0, np.ndarray): - f0 = torch.from_numpy(f0).float().to(x.device) - - if pad_to is None: - return f0 - - f0 = self.repeat_expand(f0, pad_to) - - vuv_vector = torch.zeros_like(f0) - vuv_vector[f0 > 0.0] = 1.0 - vuv_vector[f0 <= 0.0] = 0.0 - - # 去掉0频率, 并线性插值 - nzindex = torch.nonzero(f0).squeeze() - f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy() - time_org = self.hop_length / sampling_rate * nzindex.cpu().numpy() - time_frame = np.arange(pad_to) * self.hop_length / sampling_rate - - vuv_vector = F.interpolate(vuv_vector[None, None, :], size=pad_to)[0][0] - - if f0.shape[0] <= 0: - return torch.zeros(pad_to, dtype=torch.float, device=x.device).cpu().numpy(), vuv_vector.cpu().numpy() - if f0.shape[0] == 1: - return (torch.ones(pad_to, dtype=torch.float, device=x.device) * f0[ - 0]).cpu().numpy(), vuv_vector.cpu().numpy() - - # 大概可以用 torch 重写? - f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1]) - # vuv_vector = np.ceil(scipy.ndimage.zoom(vuv_vector,pad_to/len(vuv_vector),order = 0)) - - return f0, vuv_vector.cpu().numpy() - - def compute_f0(self, wav, p_len=None): - x = torch.FloatTensor(wav).to(self.dtype).to(self.device) - if p_len is None: - print("fcpe p_len is None") - p_len = x.shape[0] // self.hop_length - #else: -# assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - f0 = self.fcpe(x, sr=self.sampling_rate, threshold=self.threshold)[0,:,0] - if torch.all(f0 == 0): - rtn = f0.cpu().numpy() if p_len is None else np.zeros(p_len) - return rtn, rtn - return self.post_process(x, self.sampling_rate, f0, p_len)[0] - - def compute_f0_uv(self, wav, p_len=None): - x = torch.FloatTensor(wav).to(self.dtype).to(self.device) - if p_len is None: - p_len = x.shape[0] // self.hop_length - #else: -# assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - f0 = self.fcpe(x, sr=self.sampling_rate, threshold=self.threshold)[0,:,0] - if torch.all(f0 == 0): - rtn = f0.cpu().numpy() if p_len is None else np.zeros(p_len) - return rtn, rtn - return self.post_process(x, self.sampling_rate, f0, p_len) \ No newline at end of file diff --git a/lib/infer_libs/infer_pack/attentions.py b/lib/infer_libs/infer_pack/attentions.py deleted file mode 100644 index 94d61c89..00000000 --- a/lib/infer_libs/infer_pack/attentions.py +++ /dev/null @@ -1,414 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -from lib.infer_libs.infer_pack import commons -from lib.infer_libs.infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/lib/infer_libs/infer_pack/commons.py b/lib/infer_libs/infer_pack/commons.py deleted file mode 100644 index 2618e3ad..00000000 --- a/lib/infer_libs/infer_pack/commons.py +++ /dev/null @@ -1,164 +0,0 @@ -import math -import torch -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/lib/infer_libs/infer_pack/models.py b/lib/infer_libs/infer_pack/models.py deleted file mode 100644 index 06f58a90..00000000 --- a/lib/infer_libs/infer_pack/models.py +++ /dev/null @@ -1,1174 +0,0 @@ -import math -import logging - -logger = logging.getLogger(__name__) - -import numpy as np -import torch -from torch import nn -from torch.nn import Conv1d, Conv2d, ConvTranspose1d -from torch.nn import functional as F -from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm - -from lib.infer_libs.infer_pack import attentions, commons, modules -from lib.infer_libs.infer_pack.commons import get_padding, init_weights -has_xpu = bool(hasattr(torch, "xpu") and torch.xpu.is_available()) - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - if uv.device.type == "privateuseone": # for DirectML - uv = uv.float() - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - if hasattr(self, "ddtype") == False: - self.ddtype = self.l_linear.weight.dtype - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - # print(x.dtype,sine_wavs.dtype,self.l_linear.weight.dtype) - # if self.is_half: - # sine_wavs = sine_wavs.half() - # sine_merge = self.l_tanh(self.l_linear(sine_wavs.to(x))) - # print(sine_wavs.dtype,self.ddtype) - if sine_wavs.dtype != self.ddtype: - sine_wavs = sine_wavs.to(self.ddtype) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - logger.debug( - "gin_channels: " - + str(gin_channels) - + ", self.spk_embed_dim: " - + str(self.spk_embed_dim) - ) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - logger.debug( - "gin_channels: " - + str(gin_channels) - + ", self.spk_embed_dim: " - + str(self.spk_embed_dim) - ) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - logger.debug( - "gin_channels: " - + str(gin_channels) - + ", self.spk_embed_dim: " - + str(self.spk_embed_dim) - ) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - logger.debug( - "gin_channels: " - + str(gin_channels) - + ", self.spk_embed_dim: " - + str(self.spk_embed_dim) - ) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - if has_xpu and x.dtype == torch.bfloat16: - x = F.pad(x.to(dtype=torch.float16), (0, n_pad), "reflect").to(dtype=torch.bfloat16) - else: - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/lib/infer_libs/infer_pack/modules.py b/lib/infer_libs/infer_pack/modules.py deleted file mode 100644 index caae38b0..00000000 --- a/lib/infer_libs/infer_pack/modules.py +++ /dev/null @@ -1,517 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import Conv1d -from torch.nn import functional as F -from torch.nn.utils import remove_weight_norm, weight_norm - -from lib.infer_libs.infer_pack import commons -from lib.infer_libs.infer_pack.commons import get_padding, init_weights -from lib.infer_libs.infer_pack.transforms import piecewise_rational_quadratic_transform - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/lib/infer_libs/infer_pack/transforms.py b/lib/infer_libs/infer_pack/transforms.py deleted file mode 100644 index 6f30b717..00000000 --- a/lib/infer_libs/infer_pack/transforms.py +++ /dev/null @@ -1,207 +0,0 @@ -import numpy as np -import torch -from torch.nn import functional as F - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/lib/infer_libs/rmvpe.py b/lib/infer_libs/rmvpe.py deleted file mode 100644 index d0e591aa..00000000 --- a/lib/infer_libs/rmvpe.py +++ /dev/null @@ -1,705 +0,0 @@ -import os - -import numpy as np -import torch -try: - #Fix "Torch not compiled with CUDA enabled" - import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import - if torch.xpu.is_available(): - from lib.infer.modules.ipex import ipex_init - ipex_init() -except Exception: - pass -import torch.nn as nn -import torch.nn.functional as F -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window - -import logging - -logger = logging.getLogger(__name__) - - -###stft codes from https://github.com/pseeth/torch-stft/blob/master/torch_stft/util.py -def window_sumsquare( - window, - n_frames, - hop_length=200, - win_length=800, - n_fft=800, - dtype=np.float32, - norm=None, -): - """ - # from librosa 0.6 - Compute the sum-square envelope of a window function at a given hop length. - This is used to estimate modulation effects induced by windowing - observations in short-time fourier transforms. - Parameters - ---------- - window : string, tuple, number, callable, or list-like - Window specification, as in `get_window` - n_frames : int > 0 - The number of analysis frames - hop_length : int > 0 - The number of samples to advance between frames - win_length : [optional] - The length of the window function. By default, this matches `n_fft`. - n_fft : int > 0 - The length of each analysis frame. - dtype : np.dtype - The data type of the output - Returns - ------- - wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` - The sum-squared envelope of the window function - """ - if win_length is None: - win_length = n_fft - - n = n_fft + hop_length * (n_frames - 1) - x = np.zeros(n, dtype=dtype) - - # Compute the squared window at the desired length - win_sq = get_window(window, win_length, fftbins=True) - win_sq = normalize(win_sq, norm=norm) ** 2 - win_sq = pad_center(win_sq, n_fft) - - # Fill the envelope - for i in range(n_frames): - sample = i * hop_length - x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))] - return x - - -class STFT(torch.nn.Module): - def __init__( - self, filter_length=1024, hop_length=512, win_length=None, window="hann" - ): - """ - This module implements an STFT using 1D convolution and 1D transpose convolutions. - This is a bit tricky so there are some cases that probably won't work as working - out the same sizes before and after in all overlap add setups is tough. Right now, - this code should work with hop lengths that are half the filter length (50% overlap - between frames). - - Keyword Arguments: - filter_length {int} -- Length of filters used (default: {1024}) - hop_length {int} -- Hop length of STFT (restrict to 50% overlap between frames) (default: {512}) - win_length {[type]} -- Length of the window function applied to each frame (if not specified, it - equals the filter length). (default: {None}) - window {str} -- Type of window to use (options are bartlett, hann, hamming, blackman, blackmanharris) - (default: {'hann'}) - """ - super(STFT, self).__init__() - self.filter_length = filter_length - self.hop_length = hop_length - self.win_length = win_length if win_length else filter_length - self.window = window - self.forward_transform = None - self.pad_amount = int(self.filter_length / 2) - #scale = self.filter_length / self.hop_length - fourier_basis = np.fft.fft(np.eye(self.filter_length)) - - cutoff = int((self.filter_length / 2 + 1)) - fourier_basis = np.vstack( - [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] - ) - forward_basis = torch.FloatTensor(fourier_basis) - inverse_basis = torch.FloatTensor( - np.linalg.pinv(fourier_basis) - ) - - assert filter_length >= self.win_length - # get window and zero center pad it to filter_length - fft_window = get_window(window, self.win_length, fftbins=True) - fft_window = pad_center(fft_window, size=filter_length) - fft_window = torch.from_numpy(fft_window).float() - - # window the bases - forward_basis *= fft_window - inverse_basis = (inverse_basis.T * fft_window).T - - self.register_buffer("forward_basis", forward_basis.float()) - self.register_buffer("inverse_basis", inverse_basis.float()) - self.register_buffer("fft_window", fft_window.float()) - - def transform(self, input_data, return_phase=False): - """Take input data (audio) to STFT domain. - - Arguments: - input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples) - - Returns: - magnitude {tensor} -- Magnitude of STFT with shape (num_batch, - num_frequencies, num_frames) - phase {tensor} -- Phase of STFT with shape (num_batch, - num_frequencies, num_frames) - """ - # num_batches = input_data.shape[0] - # num_samples = input_data.shape[-1] - - # self.num_samples = num_samples - - # similar to librosa, reflect-pad the input - # input_data = input_data.view(num_batches, 1, num_samples) - # print(1234,input_data.shape) - input_data = F.pad( - input_data, - (self.pad_amount, self.pad_amount), - mode="reflect", - ) - - forward_transform = input_data.unfold(1, self.filter_length, self.hop_length).permute(0, 2, 1) - forward_transform = torch.matmul(self.forward_basis, forward_transform) - - cutoff = int((self.filter_length / 2) + 1) - real_part = forward_transform[:, :cutoff, :] - imag_part = forward_transform[:, cutoff:, :] - - magnitude = torch.sqrt(real_part**2 + imag_part**2) - # phase = torch.atan2(imag_part.data, real_part.data) - - if return_phase: - phase = torch.atan2(imag_part.data, real_part.data) - return magnitude, phase - else: - return magnitude - - def inverse(self, magnitude, phase): - """Call the inverse STFT (iSTFT), given magnitude and phase tensors produced - by the ```transform``` function. - - Arguments: - magnitude {tensor} -- Magnitude of STFT with shape (num_batch, - num_frequencies, num_frames) - phase {tensor} -- Phase of STFT with shape (num_batch, - num_frequencies, num_frames) - - Returns: - inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of - shape (num_batch, num_samples) - """ - cat = torch.cat( - [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 - ) - - fold = torch.nn.Fold( - output_size=(1, (cat.size(-1) - 1) * self.hop_length + self.filter_length), - kernel_size=(1, self.filter_length), - stride=(1, self.hop_length)) - inverse_transform = torch.matmul(self.inverse_basis, cat) - inverse_transform = fold(inverse_transform)[:, 0, 0, self.pad_amount : -self.pad_amount] - window_square_sum = self.fft_window.pow(2).repeat(cat.size(-1), 1).T.unsqueeze(0) - window_square_sum = fold(window_square_sum)[:, 0, 0, self.pad_amount : -self.pad_amount] - inverse_transform /= window_square_sum - - return inverse_transform - - def forward(self, input_data): - """Take input data (audio) to STFT domain and then back to audio. - - Arguments: - input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples) - - Returns: - reconstruction {tensor} -- Reconstructed audio given magnitude and phase. Of - shape (num_batch, num_samples) - """ - self.magnitude, self.phase = self.transform(input_data, return_phase=True) - reconstruction = self.inverse(self.magnitude, self.phase) - return reconstruction - - -from time import time as ttime - - -class BiGRU(nn.Module): - def __init__(self, input_features, hidden_features, num_layers): - super(BiGRU, self).__init__() - self.gru = nn.GRU( - input_features, - hidden_features, - num_layers=num_layers, - batch_first=True, - bidirectional=True, - ) - - def forward(self, x): - return self.gru(x)[0] - - -class ConvBlockRes(nn.Module): - def __init__(self, in_channels, out_channels, momentum=0.01): - super(ConvBlockRes, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - nn.Conv2d( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - if in_channels != out_channels: - self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - self.is_shortcut = True - else: - self.is_shortcut = False - - def forward(self, x): - if self.is_shortcut: - return self.conv(x) + self.shortcut(x) - else: - return self.conv(x) + x - - -class Encoder(nn.Module): - def __init__( - self, - in_channels, - in_size, - n_encoders, - kernel_size, - n_blocks, - out_channels=16, - momentum=0.01, - ): - super(Encoder, self).__init__() - self.n_encoders = n_encoders - self.bn = nn.BatchNorm2d(in_channels, momentum=momentum) - self.layers = nn.ModuleList() - self.latent_channels = [] - for i in range(self.n_encoders): - self.layers.append( - ResEncoderBlock( - in_channels, out_channels, kernel_size, n_blocks, momentum=momentum - ) - ) - self.latent_channels.append([out_channels, in_size]) - in_channels = out_channels - out_channels *= 2 - in_size //= 2 - self.out_size = in_size - self.out_channel = out_channels - - def forward(self, x): - concat_tensors = [] - x = self.bn(x) - for i in range(self.n_encoders): - _, x = self.layers[i](x) - concat_tensors.append(_) - return x, concat_tensors - - -class ResEncoderBlock(nn.Module): - def __init__( - self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01 - ): - super(ResEncoderBlock, self).__init__() - self.n_blocks = n_blocks - self.conv = nn.ModuleList() - self.conv.append(ConvBlockRes(in_channels, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv.append(ConvBlockRes(out_channels, out_channels, momentum)) - self.kernel_size = kernel_size - if self.kernel_size is not None: - self.pool = nn.AvgPool2d(kernel_size=kernel_size) - - def forward(self, x): - for i in range(self.n_blocks): - x = self.conv[i](x) - if self.kernel_size is not None: - return x, self.pool(x) - else: - return x - - -class Intermediate(nn.Module): # - def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01): - super(Intermediate, self).__init__() - self.n_inters = n_inters - self.layers = nn.ModuleList() - self.layers.append( - ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum) - ) - for i in range(self.n_inters - 1): - self.layers.append( - ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum) - ) - - def forward(self, x): - for i in range(self.n_inters): - x = self.layers[i](x) - return x - - -class ResDecoderBlock(nn.Module): - def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01): - super(ResDecoderBlock, self).__init__() - out_padding = (0, 1) if stride == (1, 2) else (1, 1) - self.n_blocks = n_blocks - self.conv1 = nn.Sequential( - nn.ConvTranspose2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=stride, - padding=(1, 1), - output_padding=out_padding, - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - self.conv2 = nn.ModuleList() - self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum)) - - def forward(self, x, concat_tensor): - x = self.conv1(x) - x = torch.cat((x, concat_tensor), dim=1) - for i in range(self.n_blocks): - x = self.conv2[i](x) - return x - - -class Decoder(nn.Module): - def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01): - super(Decoder, self).__init__() - self.layers = nn.ModuleList() - self.n_decoders = n_decoders - for i in range(self.n_decoders): - out_channels = in_channels // 2 - self.layers.append( - ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum) - ) - in_channels = out_channels - - def forward(self, x, concat_tensors): - for i in range(self.n_decoders): - x = self.layers[i](x, concat_tensors[-1 - i]) - return x - - -class DeepUnet(nn.Module): - def __init__( - self, - kernel_size, - n_blocks, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(DeepUnet, self).__init__() - self.encoder = Encoder( - in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels - ) - self.intermediate = Intermediate( - self.encoder.out_channel // 2, - self.encoder.out_channel, - inter_layers, - n_blocks, - ) - self.decoder = Decoder( - self.encoder.out_channel, en_de_layers, kernel_size, n_blocks - ) - - def forward(self, x): - x, concat_tensors = self.encoder(x) - x = self.intermediate(x) - x = self.decoder(x, concat_tensors) - return x - - -class E2E(nn.Module): - def __init__( - self, - n_blocks, - n_gru, - kernel_size, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(E2E, self).__init__() - self.unet = DeepUnet( - kernel_size, - n_blocks, - en_de_layers, - inter_layers, - in_channels, - en_out_channels, - ) - self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1)) - if n_gru: - self.fc = nn.Sequential( - BiGRU(3 * 128, 256, n_gru), - nn.Linear(512, 360), - nn.Dropout(0.25), - nn.Sigmoid(), - ) - else: - self.fc = nn.Sequential( - nn.Linear(3 * nn.N_MELS, nn.N_CLASS), nn.Dropout(0.25), nn.Sigmoid() - ) - - def forward(self, mel): - # print(mel.shape) - mel = mel.transpose(-1, -2).unsqueeze(1) - x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2) - x = self.fc(x) - # print(x.shape) - return x - - -from librosa.filters import mel - - -class MelSpectrogram(torch.nn.Module): - def __init__( - self, - is_half, - n_mel_channels, - sampling_rate, - win_length, - hop_length, - n_fft=None, - mel_fmin=0, - mel_fmax=None, - clamp=1e-5, - ): - super().__init__() - n_fft = win_length if n_fft is None else n_fft - self.hann_window = {} - mel_basis = mel( - sr=sampling_rate, - n_fft=n_fft, - n_mels=n_mel_channels, - fmin=mel_fmin, - fmax=mel_fmax, - htk=True, - ) - mel_basis = torch.from_numpy(mel_basis).float() - self.register_buffer("mel_basis", mel_basis) - self.n_fft = win_length if n_fft is None else n_fft - self.hop_length = hop_length - self.win_length = win_length - self.sampling_rate = sampling_rate - self.n_mel_channels = n_mel_channels - self.clamp = clamp - self.is_half = is_half - - def forward(self, audio, keyshift=0, speed=1, center=True): - factor = 2 ** (keyshift / 12) - n_fft_new = int(np.round(self.n_fft * factor)) - win_length_new = int(np.round(self.win_length * factor)) - hop_length_new = int(np.round(self.hop_length * speed)) - keyshift_key = str(keyshift) + "_" + str(audio.device) - if keyshift_key not in self.hann_window: - self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( - # "cpu"if(audio.device.type=="privateuseone") else audio.device - audio.device - ) - if "privateuseone" in str(audio.device): - if not hasattr(self, "stft"): - self.stft = STFT( - filter_length=n_fft_new, - hop_length=hop_length_new, - win_length=win_length_new, - window="hann", - ).to(audio.device) - magnitude = self.stft.transform(audio) - else: - fft = torch.stft( - audio, - n_fft=n_fft_new, - hop_length=hop_length_new, - win_length=win_length_new, - window=self.hann_window[keyshift_key], - center=center, - return_complex=True, - ) - magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) - # if (audio.device.type == "privateuseone"): - # magnitude=magnitude.to(audio.device) - if keyshift != 0: - size = self.n_fft // 2 + 1 - resize = magnitude.size(1) - if resize < size: - magnitude = F.pad(magnitude, (0, 0, 0, size - resize)) - magnitude = magnitude[:, :size, :] * self.win_length / win_length_new - mel_output = torch.matmul(self.mel_basis, magnitude) - if self.is_half == True: - mel_output = mel_output.half() - log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp)) - # print(log_mel_spec.device.type) - return log_mel_spec - - -class RMVPE: - def __init__(self, model_path, is_half, device=None): - self.resample_kernel = {} - self.resample_kernel = {} - self.is_half = is_half - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - self.device = device - self.mel_extractor = MelSpectrogram( - is_half, 128, 16000, 1024, 160, None, 30, 8000 - ).to(device) - if "privateuseone" in str(device): - import onnxruntime as ort - - ort_session = ort.InferenceSession( - "%s/rmvpe.onnx" % os.environ["rmvpe_root"], - providers=["DmlExecutionProvider"], - ) - self.model = ort_session - else: - model = E2E(4, 1, (2, 2)) - ckpt = torch.load(model_path, map_location="cpu") - model.load_state_dict(ckpt) - model.eval() - if is_half == True: - model = model.half() - self.model = model - self.model = self.model.to(device) - cents_mapping = 20 * np.arange(360) + 1997.3794084376191 - self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368 - - def mel2hidden(self, mel): - with torch.no_grad(): - n_frames = mel.shape[-1] - n_pad = 32 * ((n_frames - 1) // 32 + 1) - n_frames - if n_pad > 0: - mel = F.pad( - mel, (0, n_pad), mode="constant" - ) - if "privateuseone" in str(self.device): - onnx_input_name = self.model.get_inputs()[0].name - onnx_outputs_names = self.model.get_outputs()[0].name - hidden = self.model.run( - [onnx_outputs_names], - input_feed={onnx_input_name: mel.cpu().numpy()}, - )[0] - else: - hidden = self.model(mel) - return hidden[:, :n_frames] - - def decode(self, hidden, thred=0.03): - cents_pred = self.to_local_average_cents(hidden, thred=thred) - f0 = 10 * (2 ** (cents_pred / 1200)) - f0[f0 == 10] = 0 - # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred]) - return f0 - - def infer_from_audio(self, audio, thred=0.03): - # torch.cuda.synchronize() - t0 = ttime() - mel = self.mel_extractor( - torch.from_numpy(audio).float().to(self.device).unsqueeze(0), center=True - ) - # print(123123123,mel.device.type) - # torch.cuda.synchronize() - t1 = ttime() - hidden = self.mel2hidden(mel) - # torch.cuda.synchronize() - t2 = ttime() - # print(234234,hidden.device.type) - if "privateuseone" not in str(self.device): - hidden = hidden.squeeze(0).cpu().numpy() - else: - hidden = hidden[0] - if self.is_half == True: - hidden = hidden.astype("float32") - - f0 = self.decode(hidden, thred=thred) - # torch.cuda.synchronize() - t3 = ttime() - # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0)) - return f0 - - def infer_from_audio_with_pitch(self, audio, thred=0.03, f0_min=50, f0_max=1100): - t0 = ttime() - audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0) - mel = self.mel_extractor(audio, center=True) - t1 = ttime() - hidden = self.mel2hidden(mel) - t2 = ttime() - if "privateuseone" not in str(self.device): - hidden = hidden.squeeze(0).cpu().numpy() - else: - hidden = hidden[0] - if self.is_half == True: - hidden = hidden.astype("float32") - f0 = self.decode(hidden, thred=thred) - f0[(f0 < f0_min) | (f0 > f0_max)] = 0 - t3 = ttime() - return f0 - - def to_local_average_cents(self, salience, thred=0.05): - # t0 = ttime() - center = np.argmax(salience, axis=1) # 帧长#index - salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368 - # t1 = ttime() - center += 4 - todo_salience = [] - todo_cents_mapping = [] - starts = center - 4 - ends = center + 5 - for idx in range(salience.shape[0]): - todo_salience.append(salience[:, starts[idx] : ends[idx]][idx]) - todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]]) - # t2 = ttime() - todo_salience = np.array(todo_salience) # 帧长,9 - todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9 - product_sum = np.sum(todo_salience * todo_cents_mapping, 1) - weight_sum = np.sum(todo_salience, 1) # 帧长 - devided = product_sum / weight_sum # 帧长 - # t3 = ttime() - maxx = np.max(salience, axis=1) # 帧长 - devided[maxx <= thred] = 0 - # t4 = ttime() - # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) - return devided - - -if __name__ == "__main__": - import librosa - import soundfile as sf - - audio, sampling_rate = sf.read(r"C:\Users\liujing04\Desktop\Z\冬之花clip1.wav") - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - audio_bak = audio.copy() - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - model_path = r"D:\BaiduNetdiskDownload\RVC-beta-v2-0727AMD_realtime\rmvpe.pt" - thred = 0.03 # 0.01 - device = "cuda" if torch.cuda.is_available() else "cpu" - rmvpe = RMVPE(model_path, is_half=False, device=device) - t0 = ttime() - f0 = rmvpe.infer_from_audio(audio, thred=thred) - # f0 = rmvpe.infer_from_audio(audio, thred=thred) - # f0 = rmvpe.infer_from_audio(audio, thred=thred) - # f0 = rmvpe.infer_from_audio(audio, thred=thred) - # f0 = rmvpe.infer_from_audio(audio, thred=thred) - t1 = ttime() - logger.info("%s %.2f", f0.shape, t1 - t0) diff --git a/lib/modules.py b/lib/modules.py deleted file mode 100644 index 03911303..00000000 --- a/lib/modules.py +++ /dev/null @@ -1,559 +0,0 @@ -import os, sys -import traceback -import logging -now_dir = os.getcwd() -sys.path.append(now_dir) -logger = logging.getLogger(__name__) -import numpy as np -import soundfile as sf -import torch -from io import BytesIO -from lib.infer_libs.audio import load_audio -from lib.infer_libs.audio import wav2 -from lib.infer_libs.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from lib.pipeline import Pipeline -import time -import glob -from shutil import move - -sup_audioext = { - "wav", - "mp3", - "flac", - "ogg", - "opus", - "m4a", - "mp4", - "aac", - "alac", - "wma", - "aiff", - "webm", - "ac3", -} - -def note_to_hz(note_name): - try: - SEMITONES = {'C': -9, 'C#': -8, 'D': -7, 'D#': -6, 'E': -5, 'F': -4, 'F#': -3, 'G': -2, 'G#': -1, 'A': 0, 'A#': 1, 'B': 2} - pitch_class, octave = note_name[:-1], int(note_name[-1]) - semitone = SEMITONES[pitch_class] - note_number = 12 * (octave - 4) + semitone - frequency = 440.0 * (2.0 ** (1.0/12)) ** note_number - return frequency - except: - return None - -def load_hubert(hubert_model_path="assets/hubert/hubert_base.pt"): - from fairseq import checkpoint_utils - - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - [hubert_model_path], - suffix="", - ) - hubert_model = models[0] - #hubert_model = hubert_model.to(config.device) - hubert_model = hubert_model.float() - - hubert_models = hubert_model.eval() - return hubert_models - -class VC: - def __init__(self, config): - self.n_spk = None - self.tgt_sr = None - self.net_g = None - self.pipeline = None - self.cpt = None - self.version = None - self.if_f0 = None - self.version = None - self.hubert_model = None - - self.config = config - - def get_vc(self, sid, *to_return_protect): - logger.info("Get sid: " + sid) - - to_return_protect0 = { - "visible": self.if_f0 != 0, - "value": to_return_protect[0] - if self.if_f0 != 0 and to_return_protect - else 0.5, - "__type__": "update", - } - to_return_protect1 = { - "visible": self.if_f0 != 0, - "value": to_return_protect[1] - if self.if_f0 != 0 and to_return_protect - else 0.33, - "__type__": "update", - } - - if sid == "" or sid == []: - if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的 - logger.info("Clean model cache") - del ( - self.net_g, - self.n_spk, - self.vc, - self.hubert_model, - self.tgt_sr, - ) # ,cpt - self.hubert_model = ( - self.net_g - ) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None - if torch.cuda.is_available(): - torch.cuda.empty_cache() - ###楼下不这么折腾清理不干净 - self.if_f0 = self.cpt.get("f0", 1) - self.version = self.cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *self.cpt["config"], is_half=self.config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *self.cpt["config"], is_half=self.config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"]) - del self.net_g, self.cpt - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return ( - {"visible": False, "__type__": "update"}, - { - "visible": True, - "value": to_return_protect0, - "__type__": "update", - }, - { - "visible": True, - "value": to_return_protect1, - "__type__": "update", - }, - "", - "", - ) - #person = f'{os.getenv("weight_root")}/{sid}' - person = f'{sid}' - #logger.info(f"Loading: {person}") - logger.info(f"Loading...") - self.cpt = torch.load(person, map_location="cpu") - self.tgt_sr = self.cpt["config"][-1] - self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk - self.if_f0 = self.cpt.get("f0", 1) - self.version = self.cpt.get("version", "v1") - - synthesizer_class = { - ("v1", 1): SynthesizerTrnMs256NSFsid, - ("v1", 0): SynthesizerTrnMs256NSFsid_nono, - ("v2", 1): SynthesizerTrnMs768NSFsid, - ("v2", 0): SynthesizerTrnMs768NSFsid_nono, - } - - self.net_g = synthesizer_class.get( - (self.version, self.if_f0), SynthesizerTrnMs256NSFsid - )(*self.cpt["config"], is_half=self.config.is_half) - - del self.net_g.enc_q - - self.net_g.load_state_dict(self.cpt["weight"], strict=False) - self.net_g.eval().to(self.config.device) - if self.config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - - self.pipeline = Pipeline(self.tgt_sr, self.config) - n_spk = self.cpt["config"][-3] - #index = {"value": get_index_path_from_model(sid), "__type__": "update"} - #logger.info("Select index: " + index["value"]) - - return ( - ( - {"visible": False, "maximum": n_spk, "__type__": "update"}, - to_return_protect0, - to_return_protect1 - ) - if to_return_protect - else {"visible": False, "maximum": n_spk, "__type__": "update"} - ) - - def vc_single_dont_save( - self, - sid, - input_audio_path1, - f0_up_key, - f0_method, - file_index, - file_index2, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - crepe_hop_length, - do_formant, - quefrency, - timbre, - f0_min, - f0_max, - f0_autotune, - hubert_model_path = "assets/hubert/hubert_base.pt" - ): - """ - Performs inference without saving - - Parameters: - - sid (int) - - input_audio_path1 (str) - - f0_up_key (int) - - f0_method (str) - - file_index (str) - - file_index2 (str) - - index_rate (float) - - filter_radius (int) - - resample_sr (int) - - rms_mix_rate (float) - - protect (float) - - crepe_hop_length (int) - - do_formant (bool) - - quefrency (float) - - timbre (float) - - f0_min (str) - - f0_max (str) - - f0_autotune (bool) - - hubert_model_path (str) - - Returns: - Tuple(Tuple(status, index_info, times), Tuple(sr, data)): - - Tuple(status, index_info, times): - - status (str): either "Success." or an error - - index_info (str): index path if used - - times (list): [npy_time, f0_time, infer_time, total_time] - - Tuple(sr, data): Audio data results. - """ - global total_time - total_time = 0 - start_time = time.time() - - if not input_audio_path1: - return "You need to upload an audio", None - - if not os.path.exists(input_audio_path1): - return "Audio was not properly selected or doesn't exist", None - - f0_up_key = int(f0_up_key) - if not f0_min.isdigit(): - f0_min = note_to_hz(f0_min) - if f0_min: - print(f"Converted Min pitch: freq - {f0_min}") - else: - f0_min = 50 - print("Invalid minimum pitch note. Defaulting to 50hz.") - else: - f0_min = float(f0_min) - if not f0_max.isdigit(): - f0_max = note_to_hz(f0_max) - if f0_max: - print(f"Converted Max pitch: freq - {f0_max}") - else: - f0_max = 1100 - print("Invalid maximum pitch note. Defaulting to 1100hz.") - else: - f0_max = float(f0_max) - - try: - print(f"Attempting to load {input_audio_path1}....") - audio = load_audio(file=input_audio_path1, - sr=16000, - DoFormant=do_formant, - Quefrency=quefrency, - Timbre=timbre) - - audio_max = np.abs(audio).max() / 0.95 - if audio_max > 1: - audio /= audio_max - times = [0, 0, 0] - - if self.hubert_model is None: - self.hubert_model = load_hubert(hubert_model_path, self.config) - - #try: - # self.if_f0 = self.cpt.get("f0", 1) - except NameError: - message = "Model was not properly selected" - print(message) - return message, None - - if file_index and not file_index == "" and isinstance(file_index, str): - file_index = file_index.strip(" ") \ - .strip('"') \ - .strip("\n") \ - .strip('"') \ - .strip(" ") \ - .replace("trained", "added") - elif file_index2: - file_index = file_index2 - else: - file_index = "" - - audio_opt = self.pipeline.pipeline( - self.hubert_model, - self.net_g, - sid, - audio, - input_audio_path1, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - self.if_f0, - filter_radius, - self.tgt_sr, - resample_sr, - rms_mix_rate, - self.version, - protect, - crepe_hop_length, - f0_autotune, - f0_min=f0_min, - f0_max=f0_max - ) - - if self.tgt_sr != resample_sr >= 16000: - tgt_sr = resample_sr - else: - tgt_sr = self.tgt_sr - index_info = ( - "Index: %s." % file_index - if isinstance(file_index, str) and os.path.exists(file_index) - else "Index not used." - ) - end_time = time.time() - total_time = end_time - start_time - times.append(total_time) - return ( - ("Success.", index_info, times), - (tgt_sr, audio_opt), - ) - except: - info = traceback.format_exc() - logger.warn(info) - return ( - (info, None, [None, None, None, None]), - (None, None) - ) - - def vc_single( - self, - sid, - input_audio_path1, - f0_up_key, - f0_method, - file_index, - file_index2, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - format1, - crepe_hop_length, - do_formant, - quefrency, - timbre, - f0_min, - f0_max, - f0_autotune, - hubert_model_path = "assets/hubert/hubert_base.pt" - ): - """ - Performs inference with saving - - Parameters: - - sid (int) - - input_audio_path1 (str) - - f0_up_key (int) - - f0_method (str) - - file_index (str) - - file_index2 (str) - - index_rate (float) - - filter_radius (int) - - resample_sr (int) - - rms_mix_rate (float) - - protect (float) - - format1 (str) - - crepe_hop_length (int) - - do_formant (bool) - - quefrency (float) - - timbre (float) - - f0_min (str) - - f0_max (str) - - f0_autotune (bool) - - hubert_model_path (str) - - Returns: - Tuple(Tuple(status, index_info, times), Tuple(sr, data), output_path): - - Tuple(status, index_info, times): - - status (str): either "Success." or an error - - index_info (str): index path if used - - times (list): [npy_time, f0_time, infer_time, total_time] - - Tuple(sr, data): Audio data results. - - output_path (str): Audio results path - """ - global total_time - total_time = 0 - start_time = time.time() - - if not input_audio_path1: - return "You need to upload an audio", None, None - - if not os.path.exists(input_audio_path1): - return "Audio was not properly selected or doesn't exist", None, None - - f0_up_key = int(f0_up_key) - if not f0_min.isdigit(): - f0_min = note_to_hz(f0_min) - if f0_min: - print(f"Converted Min pitch: freq - {f0_min}") - else: - f0_min = 50 - print("Invalid minimum pitch note. Defaulting to 50hz.") - else: - f0_min = float(f0_min) - if not f0_max.isdigit(): - f0_max = note_to_hz(f0_max) - if f0_max: - print(f"Converted Max pitch: freq - {f0_max}") - else: - f0_max = 1100 - print("Invalid maximum pitch note. Defaulting to 1100hz.") - else: - f0_max = float(f0_max) - - try: - print(f"Attempting to load {input_audio_path1}...") - audio = load_audio(file=input_audio_path1, - sr=16000, - DoFormant=do_formant, - Quefrency=quefrency, - Timbre=timbre) - - audio_max = np.abs(audio).max() / 0.95 - if audio_max > 1: - audio /= audio_max - times = [0, 0, 0] - - if self.hubert_model is None: - self.hubert_model = load_hubert(hubert_model_path) - - #try: - # self.if_f0 = self.cpt.get() #"f0" - except NameError: - message = "Model was not properly selected" - print(message) - return message, None - if file_index and not file_index == "" and isinstance(file_index, str): - file_index = file_index.strip(" ") \ - .strip('"') \ - .strip("\n") \ - .strip('"') \ - .strip(" ") \ - .replace("trained", "added") - elif file_index2: - file_index = file_index2 - else: - file_index = "" - - audio_opt = self.pipeline.pipeline( - self.hubert_model, - self.net_g, - sid, - audio, - input_audio_path1, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - self.if_f0, - filter_radius, - self.tgt_sr, - resample_sr, - rms_mix_rate, - self.version, - protect, - crepe_hop_length, - f0_autotune, - f0_min=f0_min, - f0_max=f0_max - ) - - if self.tgt_sr != resample_sr >= 16000: - tgt_sr = resample_sr - else: - tgt_sr = self.tgt_sr - index_info = ( - "Index: %s." % file_index - if isinstance(file_index, str) and os.path.exists(file_index) - else "Index not used." - ) - - opt_root = os.path.join(os.getcwd(), "output") - os.makedirs(opt_root, exist_ok=True) - output_count = 1 - - while True: - opt_filename = f"{os.path.splitext(os.path.basename(input_audio_path1))[0]}{os.path.basename(os.path.dirname(file_index))}{f0_method.capitalize()}_{output_count}.{format1}" - current_output_path = os.path.join(opt_root, opt_filename) - if not os.path.exists(current_output_path): - break - output_count += 1 - try: - if format1 in ["wav", "flac"]: - sf.write( - current_output_path, - audio_opt, - self.tgt_sr, - ) - else: - with BytesIO() as wavf: - sf.write( - wavf, - audio_opt, - self.tgt_sr, - format="wav" - ) - wavf.seek(0, 0) - with open(current_output_path, "wb") as outf: - wav2(wavf, outf, format1) - except: - info = traceback.format_exc() - end_time = time.time() - total_time = end_time - start_time - times.append(total_time) - return ( - ("Success.", index_info, times), - (tgt_sr, audio_opt), - current_output_path - ) - except: - info = traceback.format_exc() - logger.warn(info) - return ( - (info, None, [None, None, None, None]), - (None, None), - None - ) diff --git a/lib/pipeline.py b/lib/pipeline.py deleted file mode 100644 index 9a5df716..00000000 --- a/lib/pipeline.py +++ /dev/null @@ -1,773 +0,0 @@ -import os -import sys -import gc -import traceback -import logging - -logger = logging.getLogger(__name__) - -from functools import lru_cache -from time import time as ttime -from torch import Tensor -import faiss -import librosa -import numpy as np -import parselmouth -import pyworld -import torch.nn.functional as F -from scipy import signal -from tqdm import tqdm - -import random -now_dir = os.getcwd() -sys.path.append(now_dir) -import re -from functools import partial -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} -import torchcrepe # Fork Feature. Crepe algo for training and preprocess -from torchfcpe import spawn_bundled_infer_model -import torch -from lib.infer_libs.rmvpe import RMVPE -from lib.infer_libs.fcpe import FCPE - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class Pipeline(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - self.model_rmvpe = RMVPE(os.environ["rmvpe_model_path"], is_half=self.is_half, device=self.device) - - self.note_dict = [ - 65.41, 69.30, 73.42, 77.78, 82.41, 87.31, - 92.50, 98.00, 103.83, 110.00, 116.54, 123.47, - 130.81, 138.59, 146.83, 155.56, 164.81, 174.61, - 185.00, 196.00, 207.65, 220.00, 233.08, 246.94, - 261.63, 277.18, 293.66, 311.13, 329.63, 349.23, - 369.99, 392.00, 415.30, 440.00, 466.16, 493.88, - 523.25, 554.37, 587.33, 622.25, 659.25, 698.46, - 739.99, 783.99, 830.61, 880.00, 932.33, 987.77, - 1046.50, 1108.73, 1174.66, 1244.51, 1318.51, 1396.91, - 1479.98, 1567.98, 1661.22, 1760.00, 1864.66, 1975.53, - 2093.00, 2217.46, 2349.32, 2489.02, 2637.02, 2793.83, - 2959.96, 3135.96, 3322.44, 3520.00, 3729.31, 3951.07 - ] - - # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device) - def get_optimal_torch_device(self, index: int = 0) -> torch.device: - if torch.cuda.is_available(): - return torch.device( - f"cuda:{index % torch.cuda.device_count()}" - ) # Very fast - elif torch.backends.mps.is_available(): - return torch.device("mps") - return torch.device("cpu") - - # Fork Feature: Compute f0 with the crepe method - def get_f0_crepe_computation( - self, - x, - f0_min, - f0_max, - p_len, - *args, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time. - **kwargs, # Either use crepe-tiny "tiny" or crepe "full". Default is full - ): - x = x.astype( - np.float32 - ) # fixes the F.conv2D exception. We needed to convert double to float. - x /= np.quantile(np.abs(x), 0.999) - torch_device = self.get_optimal_torch_device() - audio = torch.from_numpy(x).to(torch_device, copy=True) - audio = torch.unsqueeze(audio, dim=0) - if audio.ndim == 2 and audio.shape[0] > 1: - audio = torch.mean(audio, dim=0, keepdim=True).detach() - audio = audio.detach() - hop_length = kwargs.get('crepe_hop_length', 160) - model = kwargs.get('model', 'full') - print("Initiating prediction with a crepe_hop_length of: " + str(hop_length)) - pitch: Tensor = torchcrepe.predict( - audio, - self.sr, - hop_length, - f0_min, - f0_max, - model, - batch_size=hop_length * 2, - device=torch_device, - pad=True, - ) - p_len = p_len or x.shape[0] // hop_length - # Resize the pitch for final f0 - source = np.array(pitch.squeeze(0).cpu().float().numpy()) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * p_len, len(source)) / p_len, - np.arange(0, len(source)), - source, - ) - f0 = np.nan_to_num(target) - return f0 # Resized f0 - - def get_f0_official_crepe_computation( - self, - x, - f0_min, - f0_max, - *args, - **kwargs - ): - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - model = kwargs.get('model', 'full') - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - return f0 - - # Fork Feature: Compute pYIN f0 method - def get_f0_pyin_computation(self, x, f0_min, f0_max): - y, sr = librosa.load(x, sr=self.sr, mono=True) - f0, _, _ = librosa.pyin(y, fmin=f0_min, fmax=f0_max, sr=self.sr) - f0 = f0[1:] # Get rid of extra first frame - return f0 - - def get_rmvpe(self, x, *args, **kwargs): - if not hasattr(self, "model_rmvpe"): - from lib.infer.infer_libs.rmvpe import RMVPE - - logger.info( - f"Loading rmvpe model, {os.environ['rmvpe_model_path']}" - ) - self.model_rmvpe = RMVPE( - os.environ["rmvpe_model_path"], - is_half=self.is_half, - device=self.device, - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - - if "privateuseone" in str(self.device): # clean ortruntime memory - del self.model_rmvpe.model - del self.model_rmvpe - logger.info("Cleaning ortruntime memory") - - return f0 - - - def get_pitch_dependant_rmvpe(self, x, f0_min=1, f0_max=40000, *args, **kwargs): - if not hasattr(self, "model_rmvpe"): - from lib.infer.infer_libs.rmvpe import RMVPE - - logger.info( - f"Loading rmvpe model, {os.environ['rmvpe_model_path']}" - ) - self.model_rmvpe = RMVPE( - os.environ["rmvpe_model_path"], - is_half=self.is_half, - device=self.device, - ) - f0 = self.model_rmvpe.infer_from_audio_with_pitch(x, thred=0.03, f0_min=f0_min, f0_max=f0_max) - if "privateuseone" in str(self.device): # clean ortruntime memory - del self.model_rmvpe.model - del self.model_rmvpe - logger.info("Cleaning ortruntime memory") - - return f0 - - def get_fcpe(self, x, f0_min, f0_max, p_len, *args, **kwargs): - self.model_fcpe = FCPE(os.environ["fcpe_model_path"], f0_min=f0_min, f0_max=f0_max, dtype=torch.float32, device=self.device, sampling_rate=self.sr, threshold=0.03) - f0 = self.model_fcpe.compute_f0(x, p_len=p_len) - del self.model_fcpe - gc.collect() - return f0 - - def get_torchfcpe(self, x, sr, f0_min, f0_max, p_len, *args, **kwargs): - self.model_torchfcpe = spawn_bundled_infer_model(device=self.device) - f0 = self.model_torchfcpe.infer( - torch.from_numpy(x).float().unsqueeze(0).unsqueeze(-1).to(self.device), - sr=sr, - decoder_mode="local_argmax", - threshold=0.006, - f0_min=f0_min, - f0_max=f0_max, - output_interp_target_length=p_len - ) - return f0.squeeze().cpu().numpy() - - def autotune_f0(self, f0): - autotuned_f0 = [] - for freq in f0: - closest_notes = [x for x in self.note_dict if abs(x - freq) == min(abs(n - freq) for n in self.note_dict)] - autotuned_f0.append(random.choice(closest_notes)) - return np.array(autotuned_f0, np.float64) - - - # Fork Feature: Acquire median hybrid f0 estimation calculation - def get_f0_hybrid_computation( - self, - methods_str, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ): - # Get various f0 methods from input to use in the computation stack - methods_str = re.search('hybrid\[(.+)\]', methods_str) - if methods_str: # Ensure a match was found - methods = [method.strip() for method in methods_str.group(1).split('+')] - f0_computation_stack = [] - - print("Calculating f0 pitch estimations for methods: %s" % str(methods)) - x = x.astype(np.float32) - x /= np.quantile(np.abs(x), 0.999) - # Get f0 calculations for all methods specified - for method in methods: - f0 = None - if method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif method == "crepe": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, model="full") - f0 = f0[1:] - elif method == "crepe-tiny": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, model="tiny") - f0 = f0[1:] # Get rid of extra first frame - elif method == "mangio-crepe": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length=crepe_hop_length - ) - elif method == "mangio-crepe-tiny": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length=crepe_hop_length, model="tiny" - ) - elif method == "harvest": - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - elif method == "dio": - f0, t = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 = f0[1:] - elif method == "rmvpe": - f0 = self.get_rmvpe(x) - f0 = f0[1:] - elif method == "fcpe_legacy": - f0 = self.get_fcpe(x, f0_min=f0_min, f0_max=f0_max, p_len=p_len) - elif method == "fcpe": - f0 = self.get_torchfcpe(x, self.sr, f0_min, f0_max, p_len) - elif method == "pyin": - f0 = self.get_f0_pyin_computation(input_audio_path, f0_min, f0_max) - # Push method to the stack - f0_computation_stack.append(f0) - - for fc in f0_computation_stack: - print(len(fc)) - - print("Calculating hybrid median f0 from the stack of: %s" % str(methods)) - f0_median_hybrid = None - if len(f0_computation_stack) == 1: - f0_median_hybrid = f0_computation_stack[0] - else: - f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) - return f0_median_hybrid - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - f0_autotune, - inp_f0=None, - f0_min=50, - f0_max=1100, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = f0_min - f0_max = f0_max - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - elif f0_method == "dio": # Potentially Buggy? - f0, t = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - elif f0_method == "crepe": - model = "full" - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - elif f0_method == "crepe-tiny": - f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, model="tiny") - elif f0_method == "mangio-crepe": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length=crepe_hop_length - ) - elif f0_method == "mangio-crepe-tiny": - f0 = self.get_f0_crepe_computation( - x, f0_min, f0_max, p_len, crepe_hop_length=crepe_hop_length, model="tiny" - ) - elif f0_method == "rmvpe": - if not hasattr(self, "model_rmvpe"): - from lib.infer.infer_libs.rmvpe import RMVPE - - logger.info( - f"Loading rmvpe model, {os.environ['rmvpe_model_path']}" - ) - self.model_rmvpe = RMVPE( - os.environ["rmvpe_model_path"], - is_half=self.is_half, - device=self.device, - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - - if "privateuseone" in str(self.device): # clean ortruntime memory - del self.model_rmvpe.model - del self.model_rmvpe - logger.info("Cleaning ortruntime memory") - elif f0_method == "rmvpe+": - params = {'x': x, 'p_len': p_len, 'f0_up_key': f0_up_key, 'f0_min': f0_min, - 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius, - 'crepe_hop_length': crepe_hop_length, 'model': "full" - } - f0 = self.get_pitch_dependant_rmvpe(**params) - elif f0_method == "pyin": - f0 = self.get_f0_pyin_computation(input_audio_path, f0_min, f0_max) - elif f0_method == "fcpe_legacy": - f0 = self.get_fcpe(x, f0_min=f0_min, f0_max=f0_max, p_len=p_len) - elif f0_method == "fcpe": - f0 = self.get_torchfcpe(x, self.sr, f0_min, f0_max, p_len) - elif "hybrid" in f0_method: - # Perform hybrid median pitch estimation - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = self.get_f0_hybrid_computation( - f0_method, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ) - #print("Autotune:", f0_autotune) - if f0_autotune == True: - print("Autotune:", f0_autotune) - f0 = self.autotune_f0(f0) - - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int32) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch is not None and pitchf is not None: - feats0 = feats.clone() - if ( - not isinstance(index, type(None)) - and not isinstance(big_npy, type(None)) - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch is not None and pitchf is not None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch is not None and pitchf is not None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch is not None and pitchf is not None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - hasp = pitch is not None and pitchf is not None - arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid) - audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy() - del hasp, arg - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - def process_t(self, t, s, window, audio_pad, pitch, pitchf, times, index, big_npy, index_rate, version, protect, t_pad_tgt, if_f0, sid, model, net_g): - t = t // window * window - if if_f0 == 1: - return self.vc( - model, - net_g, - sid, - audio_pad[s : t + t_pad_tgt + window], - pitch[:, s // window : (t + t_pad_tgt) // window], - pitchf[:, s // window : (t + t_pad_tgt) // window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[t_pad_tgt : -t_pad_tgt] - else: - return self.vc( - model, - net_g, - sid, - audio_pad[s : t + t_pad_tgt + window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[t_pad_tgt : -t_pad_tgt] - - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - crepe_hop_length, - f0_autotune, - f0_min=50, - f0_max=1100 - ): - if ( - file_index != "" - and isinstance(file_index, str) - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - f0_autotune, - inp_f0, - f0_min, - f0_max - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if "mps" not in str(self.device) or "xpu" not in str(self.device): - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - - with tqdm(total=len(opt_ts), desc="Processing", unit="window") as pbar: - for i, t in enumerate(opt_ts): - t = t // self.window * self.window - start = s - end = t + self.t_pad2 + self.window - audio_slice = audio_pad[start:end] - pitch_slice = pitch[:, start // self.window:end // self.window] if if_f0 else None - pitchf_slice = pitchf[:, start // self.window:end // self.window] if if_f0 else None - audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) - s = t - pbar.update(1) - pbar.refresh() - - audio_slice = audio_pad[t:] - pitch_slice = pitch[:, t // self.window:] if if_f0 and t is not None else pitch - pitchf_slice = pitchf[:, t // self.window:] if if_f0 and t is not None else pitchf - audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) - - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if tgt_sr != resample_sr >= 16000: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - print("Returning completed audio...") - return audio_opt \ No newline at end of file diff --git a/lib/split_audio.py b/lib/split_audio.py deleted file mode 100644 index 90e52c9a..00000000 --- a/lib/split_audio.py +++ /dev/null @@ -1,91 +0,0 @@ -import os -from pydub import AudioSegment -from pydub.silence import detect_silence, detect_nonsilent - -SEPERATE_DIR = os.path.join(os.getcwd(), "seperate") -TEMP_DIR = os.path.join(SEPERATE_DIR, "temp") -cache = {} - -os.makedirs(SEPERATE_DIR, exist_ok=True) -os.makedirs(TEMP_DIR, exist_ok=True) - -def cache_result(func): - def wrapper(*args, **kwargs): - key = (args, frozenset(kwargs.items())) - if key in cache: - return cache[key] - else: - result = func(*args, **kwargs) - cache[key] = result - return result - return wrapper - -def get_non_silent(audio_name, audio, min_silence, silence_thresh, seek_step, keep_silence): - """ - Function to get non-silent parts of the audio. - """ - nonsilent_ranges = detect_nonsilent(audio, min_silence_len=min_silence, silence_thresh=silence_thresh, seek_step=seek_step) - nonsilent_files = [] - for index, range in enumerate(nonsilent_ranges): - nonsilent_name = os.path.join(SEPERATE_DIR, f"{audio_name}_min{min_silence}_t{silence_thresh}_ss{seek_step}_ks{keep_silence}", f"nonsilent{index}-{audio_name}.wav") - start, end = range[0] - keep_silence, range[1] + keep_silence - audio[start:end].export(nonsilent_name, format="wav") - nonsilent_files.append(nonsilent_name) - return nonsilent_files - -def get_silence(audio_name, audio, min_silence, silence_thresh, seek_step, keep_silence): - """ - Function to get silent parts of the audio. - """ - silence_ranges = detect_silence(audio, min_silence_len=min_silence, silence_thresh=silence_thresh, seek_step=seek_step) - silence_files = [] - for index, range in enumerate(silence_ranges): - silence_name = os.path.join(SEPERATE_DIR, f"{audio_name}_min{min_silence}_t{silence_thresh}_ss{seek_step}_ks{keep_silence}", f"silence{index}-{audio_name}.wav") - start, end = range[0] + keep_silence, range[1] - keep_silence - audio[start:end].export(silence_name, format="wav") - silence_files.append(silence_name) - return silence_files - -@cache_result -def split_silence_nonsilent(input_path, min_silence=500, silence_thresh=-40, seek_step=1, keep_silence=100): - """ - Function to split the audio into silent and non-silent parts. - """ - audio_name = os.path.splitext(os.path.basename(input_path))[0] - os.makedirs(os.path.join(SEPERATE_DIR, f"{audio_name}_min{min_silence}_t{silence_thresh}_ss{seek_step}_ks{keep_silence}"), exist_ok=True) - audio = AudioSegment.silent(duration=1000) + AudioSegment.from_file(input_path) + AudioSegment.silent(duration=1000) - silence_files = get_silence(audio_name, audio, min_silence, silence_thresh, seek_step, keep_silence) - nonsilent_files = get_non_silent(audio_name, audio, min_silence, silence_thresh, seek_step, keep_silence) - return silence_files, nonsilent_files - -def adjust_audio_lengths(original_audios, inferred_audios): - """ - Function to adjust the lengths of the inferred audio files list to match the original audio files length. - """ - adjusted_audios = [] - for original_audio, inferred_audio in zip(original_audios, inferred_audios): - audio_1 = AudioSegment.from_file(original_audio) - audio_2 = AudioSegment.from_file(inferred_audio) - - if len(audio_1) > len(audio_2): - audio_2 += AudioSegment.silent(duration=len(audio_1) - len(audio_2)) - else: - audio_2 = audio_2[:len(audio_1)] - - adjusted_file = os.path.join(TEMP_DIR, f"adjusted-{os.path.basename(inferred_audio)}") - audio_2.export(adjusted_file, format="wav") - adjusted_audios.append(adjusted_file) - - return adjusted_audios - -def combine_silence_nonsilent(silence_files, nonsilent_files, keep_silence, output): - """ - Function to combine the silent and non-silent parts of the audio. - """ - combined = AudioSegment.empty() - for silence, nonsilent in zip(silence_files, nonsilent_files): - combined += AudioSegment.from_wav(silence) + AudioSegment.from_wav(nonsilent) - combined += AudioSegment.from_wav(silence_files[-1]) - combined = AudioSegment.silent(duration=keep_silence) + combined[1000:-1000] + AudioSegment.silent(duration=keep_silence) - combined.export(output, format="wav") - return output \ No newline at end of file From 9a97c6a5b3fe639c60d83f1a0931ffc1cbf11fff Mon Sep 17 00:00:00 2001 From: kiurobox Date: Tue, 4 Feb 2025 03:01:24 -0800 Subject: [PATCH 3/9] Update models.py --- models.py | 38 +++----------------------------------- 1 file changed, 3 insertions(+), 35 deletions(-) diff --git a/models.py b/models.py index 7401db5f..ebe76c7c 100644 --- a/models.py +++ b/models.py @@ -1,37 +1,5 @@ -import os -import requests -from pathlib import Path +from rvc.lib.tools.prerequisites_download import prerequisites_download_pipeline -# Function to download file -def download_file(url, dest_path): - try: - response = requests.get(url, stream=True) - response.raise_for_status() # Check if the request was successful - with open(dest_path, 'wb') as file: - for chunk in response.iter_content(chunk_size=8192): - file.write(chunk) - - print(f"Successfully downloaded {dest_path}") - - except requests.exceptions.RequestException as e: - print(f"Error downloading {url}: {e}") - -# Directory structure -base_dir = "assets" -directories = ["fcpe", "hubert", "rmvpe"] -urls = [ - "https://huggingface.co/datasets/ylzz1997/rmvpe_pretrain_model/resolve/main/fcpe.pt", - "https://huggingface.co/Kit-Lemonfoot/RVC_DidntAsk/resolve/main/hubert_base.pt", - "https://huggingface.co/Kit-Lemonfoot/RVC_DidntAsk/resolve/main/rmvpe.pt" -] - -# Ensure directories exist -for directory in directories: - os.makedirs(Path(base_dir) / directory, exist_ok=True) - -# Download the files -for url, directory in zip(urls, directories): - file_name = url.split("/")[-1] - dest_path = Path(base_dir) / directory / file_name - download_file(url, dest_path) +print("downloading models...") +prerequisites_download_pipeline(models=True, exe=True) From 7b33671ceab7f9014b14fcde127dce0144b6b3f4 Mon Sep 17 00:00:00 2001 From: kiurobox Date: Tue, 4 Feb 2025 03:01:44 -0800 Subject: [PATCH 4/9] Delete stftpitchshift --- stftpitchshift | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 stftpitchshift diff --git a/stftpitchshift b/stftpitchshift deleted file mode 100644 index 4f62e315..00000000 --- a/stftpitchshift +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:eb2f50ea8e5ca1a11a587f11f25ba9182f9b24e2367ac480f430b3f04062782e -size 1822104 From 0b4e7ff6872dea69e9cf0902391c56301a5c27e3 Mon Sep 17 00:00:00 2001 From: kiurobox Date: Tue, 4 Feb 2025 03:02:09 -0800 Subject: [PATCH 5/9] Delete audio_input directory --- audio_input/.gitkeep | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 audio_input/.gitkeep diff --git a/audio_input/.gitkeep b/audio_input/.gitkeep deleted file mode 100644 index e69de29b..00000000 From d76c33bed668e3f19d942b2393349f524e49eeac Mon Sep 17 00:00:00 2001 From: kiurobox Date: Tue, 4 Feb 2025 03:04:05 -0800 Subject: [PATCH 6/9] RVC V2 --- logs/mute/f0/mute.wav.npy | Bin 0 -> 1332 bytes logs/mute/f0_voiced/mute.wav.npy | Bin 0 -> 2536 bytes logs/mute/sliced_audios/mute32000.wav | Bin 0 -> 192078 bytes logs/mute/sliced_audios/mute40000.wav | Bin 0 -> 240078 bytes logs/mute/sliced_audios/mute48000.wav | Bin 0 -> 288078 bytes logs/mute/sliced_audios_16k/mute.wav | Bin 0 -> 96078 bytes logs/mute/v1_extracted/mute.npy | Bin 0 -> 152704 bytes logs/mute/v2_extracted/mute.npy | Bin 0 -> 457856 bytes rvc/configs/config.py | 179 + rvc/configs/v1/32000.json | 47 + rvc/configs/v1/40000.json | 47 + rvc/configs/v1/48000.json | 47 + rvc/configs/v2/32000.json | 43 + rvc/configs/v2/40000.json | 43 + rvc/configs/v2/48000.json | 43 + rvc/infer/infer.py | 495 ++ rvc/infer/pipeline.py | 708 +++ rvc/lib/algorithm/__init__.py | 0 rvc/lib/algorithm/attentions.py | 243 + rvc/lib/algorithm/commons.py | 207 + rvc/lib/algorithm/discriminators.py | 160 + rvc/lib/algorithm/encoders.py | 218 + rvc/lib/algorithm/generators.py | 231 + rvc/lib/algorithm/modules.py | 124 + rvc/lib/algorithm/normalization.py | 31 + rvc/lib/algorithm/nsf.py | 196 + rvc/lib/algorithm/residuals.py | 250 + rvc/lib/algorithm/synthesizers.py | 237 + rvc/lib/predictors/F0Extractor.py | 100 + rvc/lib/predictors/FCPE.py | 920 ++++ rvc/lib/predictors/RMVPE.py | 560 +++ rvc/lib/tools/analyzer.py | 76 + rvc/lib/tools/gdown.py | 354 ++ rvc/lib/tools/launch_tensorboard.py | 21 + rvc/lib/tools/model_download.py | 385 ++ rvc/lib/tools/prerequisites_download.py | 104 + rvc/lib/tools/pretrained_selector.py | 63 + rvc/lib/tools/split_audio.py | 56 + rvc/lib/tools/tts.py | 29 + rvc/lib/tools/tts_voices.json | 5748 +++++++++++++++++++++++ rvc/lib/utils.py | 137 + rvc/lib/zluda.py | 43 + 42 files changed, 12145 insertions(+) create mode 100644 logs/mute/f0/mute.wav.npy create mode 100644 logs/mute/f0_voiced/mute.wav.npy create mode 100644 logs/mute/sliced_audios/mute32000.wav create mode 100644 logs/mute/sliced_audios/mute40000.wav create mode 100644 logs/mute/sliced_audios/mute48000.wav create mode 100644 logs/mute/sliced_audios_16k/mute.wav create mode 100644 logs/mute/v1_extracted/mute.npy create mode 100644 logs/mute/v2_extracted/mute.npy create mode 100644 rvc/configs/config.py create mode 100644 rvc/configs/v1/32000.json create mode 100644 rvc/configs/v1/40000.json create mode 100644 rvc/configs/v1/48000.json create mode 100644 rvc/configs/v2/32000.json create mode 100644 rvc/configs/v2/40000.json create mode 100644 rvc/configs/v2/48000.json create mode 100644 rvc/infer/infer.py create mode 100644 rvc/infer/pipeline.py create mode 100644 rvc/lib/algorithm/__init__.py create mode 100644 rvc/lib/algorithm/attentions.py create mode 100644 rvc/lib/algorithm/commons.py create mode 100644 rvc/lib/algorithm/discriminators.py create mode 100644 rvc/lib/algorithm/encoders.py create mode 100644 rvc/lib/algorithm/generators.py create mode 100644 rvc/lib/algorithm/modules.py create mode 100644 rvc/lib/algorithm/normalization.py create mode 100644 rvc/lib/algorithm/nsf.py create mode 100644 rvc/lib/algorithm/residuals.py create mode 100644 rvc/lib/algorithm/synthesizers.py create mode 100644 rvc/lib/predictors/F0Extractor.py create mode 100644 rvc/lib/predictors/FCPE.py create mode 100644 rvc/lib/predictors/RMVPE.py create mode 100644 rvc/lib/tools/analyzer.py create mode 100644 rvc/lib/tools/gdown.py create mode 100644 rvc/lib/tools/launch_tensorboard.py create mode 100644 rvc/lib/tools/model_download.py create mode 100644 rvc/lib/tools/prerequisites_download.py create mode 100644 rvc/lib/tools/pretrained_selector.py create mode 100644 rvc/lib/tools/split_audio.py create mode 100644 rvc/lib/tools/tts.py create mode 100644 rvc/lib/tools/tts_voices.json create mode 100644 rvc/lib/utils.py create mode 100644 rvc/lib/zluda.py diff --git a/logs/mute/f0/mute.wav.npy b/logs/mute/f0/mute.wav.npy new file mode 100644 index 0000000000000000000000000000000000000000..a7ecfbf9295b11a58fa1316e03ac3d0e85fa3ad6 GIT binary patch literal 1332 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlWC%^qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= oXCxM+0{I%o28KGCItsN4WCJcn1_p*vJQ@b0X<#%B43#th0Co!;s{jB1 literal 0 HcmV?d00001 diff --git a/logs/mute/f0_voiced/mute.wav.npy b/logs/mute/f0_voiced/mute.wav.npy new file mode 100644 index 0000000000000000000000000000000000000000..cf5c21bd4c9bfca9d8a39708454eee44757e608d GIT binary patch literal 2536 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+i=qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= xXCxM+0{I%o28KGCItsN4WCJdSQ7{?;qaiRF0;3@?8UmvsFd71*AwZK5000fP8-D-* literal 0 HcmV?d00001 diff --git a/logs/mute/sliced_audios/mute32000.wav b/logs/mute/sliced_audios/mute32000.wav new file mode 100644 index 0000000000000000000000000000000000000000..b4b5029205bf72dee5856bbe0c65c34337dc8dd4 GIT binary patch literal 192078 zcmeIuF$%&!5CzaN2awcOo*+a)(9*(%g`i-9)&WUp7wo*U2e5LlVE_Ja{xnm(YfdSB zs<xfvC#=DGfL=|;(xgS>L%;$8;eHxZ!cjIf??VIhP*|cpu#(9kLMFY?#1O{- literal 0 HcmV?d00001 diff --git a/logs/mute/sliced_audios/mute48000.wav b/logs/mute/sliced_audios/mute48000.wav new file mode 100644 index 0000000000000000000000000000000000000000..72822a01251e77d7d2a4a7da9d94805426829083 GIT binary patch literal 288078 zcmeIuF$%&^5CqVTSW9XvPY|LYXldbxKu|D2>wu=S3)*@wFXumm<(p|{n=Rg@Nhv+% zaXVa(<8xY-5mDrRzInWhm_-?F)Ah@IHm7uMy3{Z7Zim;n-Bp`?wXW;957Q9&ix40{ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF K5FkL{UjjeN{10CM literal 0 HcmV?d00001 diff --git a/logs/mute/sliced_audios_16k/mute.wav b/logs/mute/sliced_audios_16k/mute.wav new file mode 100644 index 0000000000000000000000000000000000000000..27a7d638558539c521aacf8c0f34bd0d4816aa9d GIT binary patch literal 96078 zcmeIuF$%&!5CzbQ2awcOo4^ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!CtM Ffgj{54+Q`K literal 0 HcmV?d00001 diff --git a/logs/mute/v1_extracted/mute.npy b/logs/mute/v1_extracted/mute.npy new file mode 100644 index 0000000000000000000000000000000000000000..ffe35e78401ba03731e2ebe48ed7a73d94198a8f GIT binary patch literal 152704 zcmbTQHnAw=qa-`hNM z%rjetW9D!i(+9ujdi(wbzaEeE*n6*at?PPS<63)o-`#l7kp~`f%rfEPaQP;u_Pg*O z7i_XivPtjLw%epfvdL-ZUU1O`C!cfDxfh(;FZSguE4bt(Jt)AAKh>*2D9o zKA~veC*+IGxdQzV7|v=5;c#>2_724i*rzpz`~b)HcxDB(#a!Zo%j zl&y{RsD+9>(hlspCL1-O?d<$heQa8cVe@Hpx(fY0cx+<)V|~ACE3|~{DaRY=bRk|H z>3Vh_d}|?ZLyzU0k1=l`T~C7N#pX~w4s+2I@(m9Py>DQY*`r>_~dH;rK(UOqA4@)<){{{E1cy5no4jFJI?cjqU z|A)TU+3I%ZQ_QDJ)k0VLB>RxJ1a5XI1{+@o?SW*jPsRz358!n_y04l--eT@aFntSe zCp@Cu!Suf#?{)ASh}ZV!L|=Yq&JXx?h3k!`P{sHNbXb%9_JR2i?MnPMu7#96GO-p0 zm@~}$xp*D2BviA|cR{l)J!bX})e~g?2Od5v?jY-5_#a*iNwa>jSoP-182cyUcS2Lh z#-kaF-%K&|5I$!)?%=w8uaI8rI1O#7UELfqv6hI}Y@*nEfzR%166dZr<{$Bu9@Z4n zbIsX_oR52l^2_E>w59v-rVs|f(SiMbfOnDW&*&k3%G(@A%Uw7x903u57}ep&FT}9-|C^9?tFy)=g>|vHjte^hjFB_$1nNH z_|q`HC%5^pSZr(qan3i@o#tIr58+t$VCQlG+Wp1Kx^ie99J70ekg)Nic>V(C1!%-d z6)|<;pLO-gm2#9gVcYzfC869K{ipEHKr0@LI~>m^qYInJ!Spp4-Xt%u>*nUA<_$CU zxZ_y1dsGe|0N)d2yrQvP@lQ4sE9KYl+)9^y$e1t2N09#k%F-SpG)p)Y`czg`I~+N|GCBv z#aAqp^XYXS{`a!QisJT9$DVjE1Lp;3)T{F4dPw`hxUQJI48NoJwpn}(V%wMSozW{4 z^1ghKo#l6&m$oNoCLHb9bQicDGJb^GhDVg0ZY~Dr(&tq8=~!MT_Lp&f9(jAx{{g&q zV9TL&jQN(*VS&EGjPq;Nj=kkq{+PKJ8$T5m^}cW&&O)cw7qLDpHNLNWqH{TuZWp;$ zD~qj-AB1kazUAOyv*IN*VkO^}-^MnnU+lU%|K(&pYu+qvLQ@D&E)L~6B(9WUi?)*M@@+LXJuJd|@D%yNaGL|Rz zIb%1$wFK`DOG3JqbM?34v+7l{(G_C89B=*%I`0g}U)I$BYYu5D76y^K2#@Q?9Y?3- z=)=~@zu9l0zTx=XMYoP@wKm+*#^>K>`YT!8#oTE0 z*VjUVrqD_;o$n0mu6TcrUOh|#8{CfP>#ncT_%h6LJe17o==!h^|K##9KM$_s>Y=no zX8*?qce_4v3A>7$XW;KCCa2J;j)&X`{1diEvn#z;?-i0(cx}Ni*56_y`fN!DVxOuY-6udoJYT2=5|1Pyw=Iw44;!7 zr_*JW@n!IM&-GSx6?fribI%dKA4fgv)>Zu@U_cw~R;*0t-RSy*qj*W$vTIzw#bUK4 zJ=Cr0Kd|46$69Q78$MHAvrm3AIkDzF&Sv=K2l2}-<^=N>ku?tf!(o`>{C9D+K3#_4 z-_d;b&Q7PdJyPh1j*P~BVxe(7ijjCvmN#3{dlUZKfgKar`@?WA8F7zrB)VJSE7%%+ zaSoeY%15W0JEAG%-SFQA?@OK6@KNteYjL@Uwv)am>Aj)x-SIImwI&tIoByz}7wP#6 z+)t4;$Z?>)6*SlRW^7*Y^Rr~$iDUe#n#pC%_04Z9&L=Z#ZS#FBJEPx#CqJ{*az%%8rn9FVmkcFrfukVS5wGO#A`Er|Hr1snfDMoz9+Vy6K}h+-&pcy zYH_XH3&xAprXGA`%}gfIVQ>5&q4)Bd+8mBCPkxr)7++QnZbuh#s*z;#Q+X8KmpVVg zyj}5o#gQ-4wekEOE_I-azWtO;H7nbSpJtl(yV})P-ytwf#d{^PZ=>H+t}my{`LN%L z*MaQLXW6rOe2>5QN$e-%ULb6w23^7q&Gemxx7g30H{V_}#JYiBJ`ZN`P#vn4@N01b zOn;esK77{Nbam|wJfmOuD!q_C{2aD|<#qD9*6hK|VY{@S^I!Sinp5`x#{L)a7zOv1Fn&z#JY#2=^R)A`^sE2Hll*>(c_;9j{EvIUbX%BiVB5XmyH@`T z=x2~o?i>o3!j}(+>^e61pYvJl+5~57@eNOSwH{KuvVWTY2){mLT;0jepx=l1en^iM zYa#4T=RU@cB~w0SC&Bn{Z4>=}!oDBgUFf?v8tZeo0G1@8#b=g$aDhC_t=|Wp#NZb zaxUMj3gdNn%)sw3IK+2cA4)bU4=|@cn!Vja-hmI<`PuZm1;5YG-mcF&8K%i``V@;` zTa#_if<5x)-{dEJPp7|iIGc#)D0Ek`a|d!RHuq}BP2uf}ueh&T>$CUGuNFfs^pE$K z@Rv(qK@+~CyM02knZ6TI}@J@02fF_tvZ z_c%5ghX14J4`lCy>1xlIu0-y1cE8{C>g>0QzE;N9aNf~z6Z6O`CmElv>CCmL%-k9n|n zBX22sxGJ?YdsKX@EBRmiCbkm%i;dy9(44i{X7ie7jxn_F>+s1NT!l85buZ4%cV>mE#uxZt_LT=DR#1+^t;~J z&2*V7zXp-XpJ|MpPtnu0yc@46+AZRIQ7u$G%z1##UV-;|ct*js2Kfi*H$T6W?O!98 z-IBBx%1z-|4!^z4?L^j|Xonh)b&ft^Gx&cnXR>oSn_Z{>eR^1La`sHGtcB!ZJk-B< zA5fkN<9%#s|C8>IHiP3e`{l@2{1)?Jnq$sebUN64aTNBX?>IUvgML@G>)`%u8TigI zXOyv<$wre5G4~W$`MDU(R&j544=nN^bj3TKkJi7j{NIR-k@|0dMT{oTIPM1DHS9MR zkLYi?lCP%kIx(^${)_o*1u~!OZ7*C?)A65R>_IsBC?AM+2@HeeBi{MGXg?)S3{?lP z-Ba>rU-*C3cMyKpKis(QspL#$esYfaZ_5AfXDUTbsobV&b!?nnIiFL~GXWZ2|a`Lt_mYbF27!}P7jcai4;^UtBv82z2m zcR+JEKdFoP0JgB_D!(Ofoo4kK)}7Rhx8S^+yf^rDE3&pgGt;~=cIeSUm z%z^Q8N(wEdSW8qr;(f4Amm~Gw{`0TFD#d~YI zOdy{>^65>Xgs~V!--3^(uv-^%dgIaGzV-2@Q2hoAeKUTo7Ma7J@wr0WcW3NViLdOe znwshQBW<20j?&A>6BotCe0aEdf3nRz{H?yl7|OQSN2l^MHc+=p`PitjVk@q7UGSCP<>uNXcDtG{ zCd0oe9{uPulP$FJbT$;PA!2yj^%Z!1gzf?~kLWv=jauU!pXF`?SAXp;cKw4LwqWOO zaJ`AwT>T&M^S&^}95~e275blP4#`RS-dY^0tI$kvE(fZ2jdek{Ej*_*!G>-s-H$i# zDRO_pH`dur;TQ$uL^hqwhn@o@Ap2(LAJTJXEhKZ;PF=|8oabmycFuB}aaaAo4zUezz6P5<8>euZQIqM|*{EhWhv{+q8z^b}~-YHw?bj@FlMt0Q+%p48}_?rbprR z53N1=cpvei_4#xCgW=f?t$2+wk$*^+5iRZu#AaV@SAM*WUaj$*q^$~jhHkXJab&K_ zmi=LBrF{qg2JG`5IjM1e%&ueCm)UcBbGxDcjo;UUK`qLr!8-)TS?1KVW2{H_(}n!< zrds1UPh6{tx5(>j{2}yX_gBn6cbLh~_B9S)`lqp#@W}BG8XPJ6#XU-L9Sqx`Ti&&`x`;eZqT4_^ZA$jW z+Gu{cj_L|S4Wv`KB+)lndNtmy( zebpZC`}B*A#=D0*&3g~cLF{n=em}uIN{p|D$8q|HwlvoBXx~(PX4aviitCc=_r>XS zHss&9FRADo@4549U}^)?wd5^ZZ`Ad;$H_mfhiV^vUm1&Q!)@|^1b!>9qnHfQ&RfIq z8M*hs&;RKc&fB5?7d%H8f2Apek)D?i;O{?SeWccSUy|R+{$j8Es;Ti@J-^Sm_zZLL z?vLkb^du{;1LZ_Gd&55%?-$`|#jexQZY2IMqemhh=v}qbzr=A{IuvFmYrpyt$jIbZ+J(OAK%_6^deo!rOxI~-uv0e7H7toa$$I$w!iMwA z*%NI-59@kk%~_eeR%rLrYHYJAIYZsIitk1p|I*sIpYzkmgt0sWhMioqLHLLBR_3n* zU)Lsi;rK3}@7L6LE?4zqH+hy^q&_cXi%Dn>!)@--9E)no+fkfv#_q>(+(&OUu>6coE-~i?3kTT~i2r12vs^T`DV{l-|B2_S=&m!Dukwe?y`617#7}K&?77$= z$m48PxXCGoumQisT9a?eCe89?1loP{AC6BydXS$ks9T2@hjbfs_rll#_RY}HGi?@g ztD`xeO!YACpR>~1{tbJ!f$c=cwcr*1jeDcXd~pQa*TM{Y-rfG-D6)RvrEzZ**Op3t z7GIKmES#nJ2hl@~iu;J_XL!#e_kEZ?Ws}w64S1~2zI)(9eiHXE%i(b}oN^)Dq6tqL+SS+0W#^s;?F?xPB5}c26##TMzhF!FQJ9^W>j|_DA}* zHhwVsY)sA+{X6pECTQeY(Nc3?*kqsQsP>dE89%%ps%zodo6M^m467xZ^U+XMgA$W)`sNo?>>ws{i2J+%Jt1%A89+)d#A9PJ2ttYE#E!f)2- zVj{Uq)yLJ0u|?5SThl$g`;e?Zd%RXxTk-0Khdn|5963M2@f4pOil?=s8V{d&>3Ft` ze%hFfhv?9q4F|BzigcZe$05e|`hPst%J_UM)`s*)_p$tw+Ovg~%$Z6L`=$IJ;@}DM zx|qZ7CH#dtP#g#2405(HX9d1L*1U`0{N9{ZwcD(t2cmhW*}hNzZD`MSJ>BtMda`x8 zA^a2Ze^vk1WZcCb7rEXFc6t;avhg`+X2N_IKXybjfxX+K+ezOJY<_7=_4s`Pi*F`;q+`GJTC8tqxsq~VV_;a$|5#>w#oiVWB2?&{AT@+ z_h`vY+WF2m(c0p<5gmIv6HvTDlAST4Rziu*cy`POJL>z2Ui1%Z zu~)>}k3F8(ADoYqH=Y}3U(#QU$9H(;oVsUN&A#~{#~!j`-$dO{PJ%u0J;!|VTEo-K zK3B76JA5C|x1Z}tWFGCXY{aND*I`%W*%egGCaPNV-K1; zhF&+Ci$~QF?K*h!d3c)qd(4;T0iJ@)c%KqJ>lH%GpV`Lu6BBfZd)vmls68BqiGQ(K z6m*mSsqdEZKluI*Jm>A)Sig#w(6x2_Fg&qls)6Cercm~DtrnE=IYrlc2;!zX!}WXO z-~)2xf6@VlI~|{4lM(QpMHX9TG0*FAM=q5WKW47y$)T-lo_C;M4&7pQ+d_Yn<1J)v z9`%52BR1KZtgsHLjF(lUNmogvel^UbbcO&HojN7 z*1VJ9$#)3J{Wbnu?7k3>SIpgYtktv4CQG9kdJl6kwI+^RS;V$McGji z=S%Oc^EzCu@%@D_#?q~`oNm##4}M~)+Drev=*|&K@;_Y_mc!jQoX(D&n?m?m9qOs= z$tFF?6%(-@m3PDW7a8-li`jL$V>8)5)AcZOKSaBe^H`5|BHvy##Us|yWVp3qB!0V* zFQ3zP`sUEWa;bZdUZ@LPeG1vOi z*PbSczWCIUe(6GZ_&J#g%L*@mxO{1j7a`o*&DrZguv+^Lobt_&ksIS>&Gzhdc;1vfpJp?``Az z@Z9sW!hKfp0{cc>uNZsH`Ni~*=cO2^x|1(2veWT>p&r6>_?)I)LjH>I{Lk@q`<=VZ zQv=Hz+3!wzbTW_Ks<+T~gT;E8Z?5kJG4(r4_6dRR`7h=VHt!ze?3##+WQMUG=Dndt z%gg+8F}I;OI}nX}m5;)AknaMz!PSLr{>%QCu+`_*u)*d}f@>WZ?!{x28n?7=Z&`1= zQ^_+p&l7(a;PZs$+tczpw)?dy)Lu%evWP*{GP-^ohjBMs}-BR zLDw_zf;&@N!s_VdPjwBNZRvH0*!@WVef&MO=Gy>zw2~hivd0;87fa=V?0hE7Y@W7q z&F<9&`15`GC0{q0SHRDv)nt7Kq0jKxQa^jfcdg|ba+|-C&c;?YcROtrcIjaLW^D5+ zoK5iZLvaf|dz0OY%p0^0wZ5$%Kgwvo7b)Vsbl z$&<(Nd(ZSy@l?>$G2xF&&IbId-?V9XOf$C~Y)7Cu0^c9_fbWBPU(BN0C^FmObs~M( zvX}wOsjk^RKL&rXm97NyGHmc~7(T}5etb4XI|MK1MM{q?@Lb}20ldo^f0I8xc5a^# zzmpA-=bx^p;rlQ?$6afvn$!NF-l5-_K524+VedJPM{YZ!7zk;#u;^;Wn zACTRKoIA~D`-I-<`S|j6{-e3W#r*2VZ)TGiC-L6kUA*>Y$A@DsqrDw=@s!_TuGmgi zmlwz4cM3bS$A?WD&k^?#m%Zf}OffeL>p=2v{7(_<Q6JA?h5S(p6eaRdCmCNaBk#$ z9)7#SelQvD^P%T4Dc$1!B;Fr{Gx3eOey86W7;dD`KsZx=Jkb2O4#~@W1+=+mE&F&T zzXzITxZ)m4JciTJh}(+&a%*1PKcyr2>^9eDqN7WCKiTSM^`Rq-*%R_(T{>>r- z?!9{J-;~ZHVe!pUjQ1#~lK)v-dc^ww5?dTf_hER#7QfNSH;1ott?^7gz)`+mcfaCz zdLQ52;-{qxT-!gz_eb%!9r=rNZPB)JJt<jsUHD~pyrbL=&ASSn+$e(n-2654or=GD65bb6a;w}K&CU3JiSLQ9k0rAi z-=1)NE5_NWIN3b4ro2?0SSp`(<12gQxG&E4t2Mqgj(dyZbbQ%3t|_r*glXOZA4323 z_)g%TkNEpbIF?7#zsbInj0@;IzwS9qA3B*&hxA*RCZUs8$(!bGPkui>+246@JRT71 zt?^&aycu{r-lSf_aT~dF@ac{BK)MZskIgG{%UDb3R`FLjmrgVF?Q2d-##VUHF}vIK z@6N<7~kBvJS*qb8{db7!|4A8Jv-pBajo$@zqro0+LVj)LTu$?J^dG1IXq`IH=dQ} zOPYKajCWkKJx9z_{l}u6$Sy1KIX%iA?D#gj$%%NMlzoNIH{$;sbJwbS2TWc&baCzZ z!1X5hukX8hd&m4}=Pm9_JwJ-i1oNNZTOL1qfN%pj=Ed5S+h1nup^r62zQt!C>3!sk zgI8_M)~vaw(kG|0QLqh$M@$C!ocyKFysQnooCAZpoxEW#oY6*^oX)>^Gzxb-q#rxjmfB2IZ-yLQD=ego|$IIEsdQ+*DLH;H7Pw5{puuXgpohQcB zt#mbfFQLbg>@gng*qV5C{s1|*z&0Glo=x_5YS(P^@-S^J-Uj>TY%E@Uk^BBIv#uwn zlf&P|zs-G-uFsIc-_=mKW1Q|p{&aNL>a&LB*IG~abN!E+?`qh4DgV4f?;Yq1Yqc)9 z6O4=Lv`(jy^63P03EkF&Lk=Y$nLC*eI~%W?zbjetJ9~y54#K0WSey=DL60-_NC3M$gyLUabuYh?zCmO>kKb*Me%l!RlMHF`4vR0^ z!@r5Kd*Paj|1@k#^o1mH(K8Se2?H$ zYk%AWXV!%LEZ8=yH*z3-gl=kG)HnVPrOfHqM_&iHSAlnXvEiLbzBbv1o4W+B6`WgZ zik)F!gR__$Nr8t!n7^9kCUgy zRq{AK7s*y6dnBIky1w7sF6PC&T}<{{=7_(z2gydD>rVIou;Y8^)Uu2Z(jIjGhE4mx zy9fOLrr*BCo+3+Z=98V*^{Y?ePIRxBa}}C*wA;*8^NQbKQp@v;#lW+UH`3`A@^(Rc z9UkkWioh(vH+0j-=XuNvHjeN8^6SxjKzFq$u2JbU$CvCS+nVzd+lh<(4l-b_ zuE%RVG^_LB)sAvHRm0Oy=(mD3D6Wk$?!-aShL6>e{e|4~f?DQI& zTw`uWarF>=+Tedb-RA0_hxU1Xz0z0;>l*mZGuA5o`#ngkdB3CElCC|-b8nd6hNrz- zIo`d&%6O@BNeis~(WtxGQS_edSn-?txl$cXFVXn77zg7vFv`2enht9*75~-5;^xK= zV&7l!EYYcv;Ys)kJcf|58f+J$A466}2A_wO(Ef|<)WuXy$i!K-tm{^4(?axLiOsHX z)4L!eyB_~d$QQ?@cqvBXakx1v=s(2yU1&F8>)r6ZoXyVEzp$5kVC&LseRt_|uaxso zcpU#__yV^0+&r^4k9k&{YQ7jL#`1x6BHsU$%aSpMj_Vp%pVB?iF12>vOUHHiFYaMC z@8g>wG&`}?`mnX7=X=g4*FyC_waR@`cC2|yUdzOC#5d0d#(<$umTB*2|Ku(>ZnTgV8M3H;Js{>9K)0WUKr{x}GdHIXdw_CHIK+Q?FGS-{yc<17z@yzs4 z{@Pq#iJ^E;7(S8%+ru4<&qpT?;%{iu!|-A2aJSevnSGY%{}~Oxh2HdO-Qu?q=$c*k zCI5(e;~8xEtiBbrP4q`vbk*>d#(V#&lleQ6AMaz2FlSd|>|BylIgjs5lgr2%1h?v?2G-_lhcYFKXD(vj_*(>nH$$g^7C8Rb}9U0@h^7a2Y3#_<9vSK3m-nocZXM<$rqq| zmVN10*i)A~(dBI8JHc?ec9wqEDc=?I=|y<&}D(BBI4f#k7qqBd4t=<#3t*gwAoEnG1-tDD*D ze0ba8e>14 z=Mz5ogAQ%Pz#wvF(eG7b=iwn%%5l!+f858F%fY9P1#46;2Ex(oB>z)$s%dO+pZI+O z=0e}~O^tnT@e+A)&+r4guzNw5M6Skfm&CB!RCDb!iWQxo2JglA4^~Ucr!e8q3%4AG2-kx24CF@^oxd^WH=z~x47)-me!DM|iU5mNAKiy`KI}&#J zUo3_zkhKvWyU~%av!|^MZOm(eWrX|SvAcwFglq3_s?8lwL4Oq5`^fJ=?t_}#&#hZ= zUlU_2etQwWMa$HIaJxOwo9usji@N7taZ|L*>5p}35g9wf@Iq6_Z(@Vi?6*_hGbPvd zC*?ZqANMXBk|pM=f?P43pTrK)MlU;l4e#>$Pt~4d-!sWs-#q)>WCwlYYo4{6uLdMt z;Sv{(Z=u9}u9cUePwaoiQr-?NTZTE_OK&VT&f&W&G<7z#rQd2TAs?cxqkYGZd+Yn0 z9+Af~_+KYx`kFV44%fP_;CqLC4neCH#}S1jDD~zUFw3 zxjoqmuL`ds)~bbKO&yBwq>IhWKhyWB`{8>o`~6gFJP*zi#~bNWvV(X^#Z0jlJZuo~ zE)Q}&8IB9|{fkWT6~A?j^)k559~MK_lI$ecwVE1jPyYm4Pc-&WZ#5X^PWb)__kMUB zk9G-uhmafXv6-xaj$HKS(zXkAi#Pd#L(OzPwv`?(&I#)LfJj$QZjV5cB z_FzjW`7!=}u-sN_#V-BSkQhsC@qNYIN#?#m*M8_mkvEF&;x(_+E!I5!#q)K~9R9|X zT!KGamN(ZL&&?~jTFemxTRDywgU^zq-h{W<#WSGP9-*4UCVl1pM10iBswX+WIPO9Q zIn}cGO@QY_Ja1u-J+)D6(?%T8DP1VXVUKwkpH0Mjj`%llvj5_DG``!Ay%<09i%rOW z!2H>Ddp$hrcv$n&4ykV<(Z{&$LB6~QbI9J)c=UOSN7W}J=k{s5SI$?rHn4TFKfdZf z@o+rCyOMd^z;X>cjDY21`-k)3o`$wNIyEBNF0P01T~H-|<2_e$AKBK2tS@=3&`)6F zt(*O}ki9-azZ`sD7?00ojy5;&6Md6I$=Q^y+meY#K3Xiref|Y>SPswK^lw1!#$>Ee zb03DUTAm+H&-Ll0RwTn7t=3Vic!7kU<8h`Vkn!}F^@G8h$fnVf6&K_xB^k_+ez-Z>5H4$A{8lNoE)-j8YH`|;PLtS!?eY_}Kgj0Wa#QV2ar-#= zz46ZBq<`bRE!~se_+JReIru#xe(n(8n>ud~Q#<{h2L``+jebkF!0T5sa&#l%d08DB zhG+C&Ych{)fmK~x;QWf3=U?<)*PQr0*5Uf1oNMSt_wpeay1=xWzT1uQZ~8r1hx6GS zHd>D#hqK48c&=tnOOxML!?c=t_A>dTX5TkE|2JDM$0kFJe{7F74j;K4_t$~#^EK)7 zKCE=h#6)?3{#Ee5iQce>3(@m?F_)}k_-{KgzKAV%X8&*Lc`v_y6*bg`VVCKf{LdK#fO{1ZOW8vEY{(1W5(m{RAZpVM5{`hxxX8f&78THwm{zK`z zGkmL%wb=DTjysSU{iVjldR8t$!@tQb&c#(?jVsxrig7|_vKHD~=&I(I@}l8O@fF;E zH+~ad@>?6v%#S7W1GZU)ek++L<_j{a65beB7voo&e|YVx0rcMzj@@B;(fh$}FkT7oJ9y2~_i5d8H*{O-zmD$XwSCFur}+Lp zT?OAA>C!pc&iNSU|1tM?w%Nk|;Mtn@m-;v3Pc=Ie2hlIZ&02S~qm8`;fAmRb`p$>( z3j7ZeQwf}6GsxNMPJEUxz5m7g2kjjgdzcsFPG8L3WCQ2-!z@+IB} zyU3sak-xTcu^H=gLicnUS!49kJ6wzJBQ?J>#^Y@ApTc8`9N&rj8_@0DqQ2p|DLxBe zzDhd^9(AXI;X*n*=X_cGzR|xP8E^AdjDrsx<#eRo$wK}7RLb#`pThEJoaeC~ZUp}u zFrO9Wn8z2%`fzNG=cDMJB71Lr;=Zyj=BYW}Y2=H|zg(M$?nL!o{VEeQQ^*<)_%RC5!Y_XN@F@Jt z;PVt7;%?H>qyzCU1d$`o%p-yvGXcoZj8i_+3=|yB#(_?kCJ&317Jy z-^CXEmhpGB4ZY-8eD<9FhSu5>K7}Pk_dLA!z$3=ueS8=SGrYy6;wSp#Qf(5M1zGR~ zeB!%=^gHje<#lmB+WV}rudo&SWc-z7_^yP$(yT$%dt|nTmo4LdI~@j3if^aX-`*ly z48`1t-;c%hCVm@{?MJ2@h~J@yb&W0M-(mXTPWUjt4VFF7^H=mog@1Ms`gS$X;NUt@ zzxQJ0W5!zW{L}h=2R?n#eP;cr$ej*@_oUUP#w)lJ*LD4esF(DPzn9IU9_^jqYwUV@ z%_jTqy6@P?ifgGlQgxC4Q=F5X{TqM2i+hHW|5LWhTI0Q@d2%3EqmwcCKJGY<4ds7% zToWEJ^uwzMysMGfOIw8IT0Doa-4t_PHqM_h?&9~r`3{b8&f$*ykhQlTi1GX-9xt#D zUskc^UfR<5=CTqmjprj`AlnGWSJ)K4&>Dt$@DC(s6wJrShaZ~!UW@$WVeZ8TtJd8U zG&i0DC4N^@jxr`DvfXQczhmc@;$3oE;o*KcdDz&p@_&6tv6*c^#$Z_g1M6mJ=oO!f z7Wge8ebEulb=|iuLDv@lU16}!WJkc<)|^A^Kb|x1ByAZwzONpv4)-@O zSuY#!O4XLcx|y#+-YG4f^|`)=oMz+VD(pbcdh(yX*>B{2&Yo}t`o!lTg*u$S1NYA6 zuTyJ$pB0}0q~ap{mz?b!UqMqNcOhP%z`~FOqpC{5Rn}8D_Rl&c}<- z!W47%;;S}}VxxN6*opWJ;CD7GHg&wmyz9y80P`Z}d={U{WdAgNpz--+bk}aP&)bim z3-5h*#Cs?@F`liB|C;bbJ8um;ow9e>>q&BMcia^pa`U^i5iQmr_cZ>MdGfb-N!yms z4>SK0@_N%j?8I+6f_hy1i(K_4`2wE<%y}1|wdH@DFXtO^)W_Hf^mr5IIe2`+Kh5qj z55?n8vgLDjqkQTrc5Xn&7xBGkF~)rNgB3p{d(h`CJ{_SSeSDvul2xpZCa$4%AI8a!Q}9FaSNZYb;93jK0<^L2u7q!a7F{}l9((eA z$6Dj}7WvWWx9}|CQnJ>S*XNlxi%+M+I1yiX(y4g-VSdc5g|O^~?lH33c>cG!W320) z@EfIda?Q8JM7Y(e>R88^b8uCQ)t4>UQ|yG7*x`Q1#8`LO&!Hb5Cri}vGs%Au{hl?k zxQjiW_xF3MQFR#A`)me1&Vy$RJiF1KZHi}_{@%BKie}%IMlCMof7~a;cY?|9eA1ij zJv6p1TIey89X7>pL-O~b=c8!H!V_a#%;aAg`$hcRSyQ{<+Y=A=Do=#{4d>Rq@;Yw+G}K1cKUMdE3+`Sh!LkvrX-2RtW!8NNgQmKFDjMeh6#IF>qR zzv^l+y^x%7=mwkD%pOCHp9YWpa9EWLtvsIXepDmv4a!+;;Fw-y{{8x+Kim@)?gdIY zoQ#C!BDg2Pf^WViI`_AYdeK_lpGNO9e9w6?xzp%#tLw4s!)DbDWVVMLO>&9;9=)t9 zWG$06o@1pCHHY#~bl3CkI^^z;2VKk2=A8!D@$~BJdNO%iqiJjI&M>fDzK#CnYmH}^ z$?~p8^auzBX5y?o&-fX5%`yKbve`L3ncYg)>QFws#dnzX#^1uo)X>sC zF+a$&ya)96p#M{7(WDvv>Qp61(rN17eCzQMO&Wf3GELA0Hc(%x$!xz4{xjHT3Y*Wv zexI(`;4v(W5 z=6VsnTjBEtS>M2MC;Qwgf7+qj8C?QTM}D?O=PTO(k9IwRoL$J-1^=Or0R}mpM?R|< ze+SQ@bfI_ogQNSua3%ih;_*-MLyvGcT`o6={P3{GC&?e;Vr%{1pve~b=0OhDV7iZO zVr-7l&kot%wZ^?nkQeFe*gXfbICZ1ymgIPTdRrm zx*E=d%xR4$`PusBuJ8KP`!B_fXg1}; z?eJp5_*_3eUn*LajlZ7~(B<3JJxjpjBe-W9yN161V!QqEINiLn@X#Nh#ipI{9th)N z{XdA;5wO+B{1k6FRoY|Zn^}{izhmsor(?9c{Lg1<>>Cby*n1Xreh?qZ|6Cm}H-~*G znfwyvWOPVpv&Tp>`7-Iq@4Z^sn0&bspVy?V@U`zs&UECb7;E7~WAeXz4(&iTmH)}p z`i4dx#&fz|(4_9dBA?>^Gi>4fRpZ~7_h0@Jm&J1U9RkPZ@ZBz+?uEni;YJRQf=&J> zXQCV8c!}{H=(VFc>yq(vlfOxdul$ei@bZOtj%Szk@wkjFHMgliTs^ z&5rWF`W}z<{4JD0?mu>cDb`eb(^#+KbEEj&KR%PlZq{Dcw-WrV%vp+7{)ad5i}hrR zYk8G!WnBJOJF;O#XZc_4&W>tr_8uJSQH=HUC;aY5yAjM?>_3LmeOEk=HU6VmpNsEc zbj>iwc;buVCpD@?Jn%uj4p|rKkM$g%^h>lKkSV_6yOnB)7-6qsgt!^p;+YR!_F|{C z$>^s2)yLkEjB{bSA0M_zhNIsVjrA?-Y3x1xtPSP%`s8-J*Gik&CqZu?AD@R8-go3r zz~p&KF$j)r@Ga_p$7&bzuQ#U?Y@6wOgI-UfdjY;**rPi=3itB=LN|$=EnS~M&K>xx z8;w2VdFT#w-K|;eV55%kj;uGnA*^=ghX>ek0J%@nw@K~4XOZWh=r<<*rPXmJL>zu*mZ1kvp)NW_}nMOBa3nOT1(^G#f+ZWF#MJk1I8qG zaM1RJ2@@bU$7~HS$VQT&y^zc{KskpbzR;D-J z#fEr@jRrrlmdpP}o$n3*GaCP-9cv-I+Ps6rT^%nmALM%2NZ-fse+^q3c8@Yw#;>3A zp633KUFYzr??ckjwZH$a<@R_*xErSWY|cF~f? zy?nAK`3IW&933AtpT6jK{xS zn8bQ%{mXMa@1)BXn%vLDV*I;^#caIKGJg?l{F01x^!z28$8=@e|Xf^((E|aWDEl zZcYzl|A1>f_UKt}{J#$c>?NC5W5nb#Fs@~6mg}zUhQ4YC&r#%ehAj~TlkvV3jr`B| z!7s+b%Pn$_to5wzJJlQS%wmj$m-;lm*DeoZPkGSz=87MaX|S{@8{fi(zUFTR!;7#l zWm`Ti*fqW%O6@Q5Bgv(2CFk;EVf>Q~u7flBXP!L_KjsV6P%#i5!ed={fA@S-{-=9r za=4WLasAJx|}Itzt*AV`%?Z{m)yJ9XdpX?|KQ#^+aKL* z4|AJS)x%dM~jK2O7b8N18>`T5UGg%A2%kkR|&AM>h;oSS7a1DE@0r}?o?IVgy@pRuA*U7N2`CA$50>d!& z8%M^P=(eLz3Fk8UtWWWMS#>OoL&+UMKeeK~8{g?L^lYIQp6`g|3F=Oa$2aKd+ms-- zlP6(2tLB{m-#qO>N5mXKZ(JK7nW5(r>FB6Eq*9 z-;Io0@RtAKVYCC#E%3g41R6EGO7Z`gj`F@5hVE%{<$pDw@2>Ixio62fnRG{&Mr;G| z~kLdVxFsY@%MYlLh_82a;LgM|JmrRYsnnfkD51)%mItt6S4Dyf5c&YmyxxV|K0x|7I9m=gx?kF=laH`u;E4IJ#G)jhv_xOBd(aEv*Eek zx%-E}?g{)JL^ht6WnwFPFk)n#2Z{`<7-sC=w_w|m)v^0K$+IS|Cu^Wt8z}(vtK7N zAEDEwbospQ|I8u#M0B(Ci;4947WLYkyUE>!jS|;4;h7qnP2MwbMO(2^{uDiq!)s+@ z{aU;Owk|w{*X?w$r>xkx`U%c|)BCZ%^FQ5?9#^yfc4%tGn(^}fzZwnib!-G*ek7dz zoNsLWANawS_j0|JId|hb1r9bW&UF4A`ODGeF!*D<97iUOh0?3FQbWi$eT>= zC++FB9Nl0m9ZvULJ?-YyPee{=1vU7IFPA9x*4z;%w^{UCSNe>7mcMl-1dH{NK8zF)r>f{s$cY zqdyZ1MSQHUsa6Yt|j|FC+UHV;j=@47f*W�XTz_~yy zJV5T-^oud}6Q1@u$#myzoL=EN^1FgwZ;I&~@e?1_IAdR<8_Pbg@cmkNvsZk^oyYv| zY|r~E+dpKyJshu?JDe`x8{1L;DdJ=%9CRw)#b1ptUu4J4&5wCA7XF=$-9j##rnBh) zf4uj~c0;#vt?`@OaszgGj<4=C)&~FoqmLSseuU2lYSZQNWFzxqev7Nni=21pZ4Vx6 zXQ>rG$%RAcz(&O|I$cHn+2)Y%p`GW6WV3sIweg{3tmJ$JeY+W7fgZ=89YY5^;yrA# z2)3{2iLUyFjMwE#cXHv1F;K0WHomthwl)6-Jde}I|LNQ8aWH-l!Qu^ADQDuh66Jhj zSGKs;IVMgJQepM+^K-A?C=F~)Z^*3H~);7}Lx z8RW^2N}Y=}BYry*pHUU+PIUv>`@?q{o_Cu=k8BTEe0$M|ubgcMlDisSgJ80DXMc(z z^(nq1j=!5K!ACq3K1WsT2ZOBpmNBhJ?BBj4D+B7Z5F?;5|Jeyhqqx|XZqBYx9Y z;F#%NUM@85VUD4vH7S>W<+jG3mUnfyw{ty2&hu|Fz_|54T?+Rk^rvex`0Z!0b}l)e z=pPK1Sj?xu6!)U`I^lVGuPa8?>q1QA{an+#S}(Pxv*%QO%UJ*4hI=x3YC-sbjl8o9 zqu^M|-Xqu}zHh$J_`T+f$#NuF1K5~NllJPzdM%+m5nWFh?}GOf@)jGLik^?NW9i(H zP0(cjbl%_m4avA4_Iv2^qIqLs?*@;WRI*d7mE}sXJj@ox;@>eyCb7>xTHIGGMhjAb zK(hm^)15zVd}34MTaEnbTI2s*iR*uMC|P6JfX$;0@&DnA@2|rf_`Ge-I^tp(s^1dqq+jlaDe#BjD%t?@Tiiq@t=XY{GCx2_tex{ojW8y>OO|ZC5VvIfG z`aQ?t_W!UaJDax={=L{B(?45ZH}>g8wwjUL4&(Apq@hBoHhq`iv&hT`&6IuF%@$j_epA#LQb*;8G?q9!v^%Zm7Q9$e_7H#Nx~aV^ zth2?0T3=1&XFe#b<<+bDu3>BcS8RSDJ6;D@wsR;?GX9wPec;}W%{)_ie;T}MU@#WHb1M&b6zAD}=C`Xi{;piOou1bDFpdr=^5FJ~Pt6{w)7=gZ*1JIFdcg@!?S8H%G}lb2_7W*O>edU)htj{u@{0 zol5t#@%MD|e(c5P$v4qn<~&8W1K9jxy2g9DX8!qx9p)R`mwdThT}an&9M?7XBY1YO zcF?2RiB4~$>1B*vllI#4c(o$m`Vs9@ zi@$e>OJjW-qVFTv)!}p%*PGC-9Xk)Fm+x8fWnpNC*A9*w_ips`4pUW3v4m@k#lj zVE??0yn2!T8<05zHrSKi^bP)hq)l2H_dOxUZ_0=0&wt?0C*|5{;~U7v`|#gj+78aUVe~zB@i=}5+HV}LrmYC`1oSJaPY03t zD!q4bj4^d5e^^6vu~M*6${*uY z>vMmRUysK~ee6~YrRNf|_$e(N7i#jq;LrHXE2t0Y9b+5u-~FJQtF1_n^x3xk@t`V`Tg-)#{4y{KVLO@e__sdWIkcM*a zE&i^J>*->8Exb3bHTICzPx!CS4v&z1JR9<5<2(5nn_bMYh9|2S=l5(!^M~uN!4>iI zWqgO4{}&##)m`RxHFksZS;n4G3p>>8`80W+x2-kqQ}YRIDps?F^m<<`{~LBbD<9G~ ziyvmfB<70&`pnPv#3w`hyEcoD#6|H9+gf)T-(T<5(s+lP4uSXQ=yP-_nUnFm9qnBF zhQmIceqW2#dtjDx$wc(08heM16KajW*OxCe_KmsnEu5e>J;;8K&~IzC|Nmp}KHs$_ zx_ALcM8Mt^R4^2KK}1n0`-vSDdso!h8}{CVG^r|0Ma2jzilSlxX?q7$z=C3b6-$)Y z-h0<`Pk7IDosZ`a*dKBwnOU>eZ?&1pB+oogkT(eLhtzLE$8BMI%+ayIzCUn>I2g*m zY!&7c+1h+?sN+()d3`VzbPII$oN8z?~2DQ%3eWp3tNwYjX$!V;NL}i`XpDY ze-&*@b~wTH!EpS+7em$0!bko`hd7^5tA0ygbY!;=@#4dHE(frv#-OC{#{Kg_$ zT`q`|c$D0_0(NuAi2sx2*}a{5eMKD3$LPP0V~Yte8b6c&&{0f;xokd+jNkOX>cYFb zDi1;~JtRKGL(V37mxg8U$_J1?Slui7C%KmEBjc;!<*)D!P0~!+(|CMMQbs29erA9zZrS#7S_|U&9yUs3E#cM z@+E%Ffu_(`Rek<&bfewBVGbLfU+T44RbStPO*(1w9R4ts)5z#5rru^__A4HQS*{lH zEZzbSybJl7wRJD9%3o{MI&OX!es}sdXNH|0px5;<|ENsAmUIvFWPYjbCVcp@wQ+HtO@^nMC$;`h4i zo#5xEd zWDTmXzLSglk#i>;`i2lIVSTB%n2dwql=H;`ZH`kv9FGslTnX+Mw7HJ$$En`}uM6Sd zm%Lll>DR+|aY+jpw`GGE-8|%r|>=Grp?t3y1vA+i7=$dbyGw;eIRkmy^4W zw$?o2jmQySrJTw271eL;TB#qzAAQxe$Moc1zmP9BrCPE#Ln-h$su z_8m*73F@c7a0FhR>%Gr}8OE^g8`e6b3mohl)>ey8wC#xh7yNZET(>LR(fB`vpYFu_ zQM8AkJ)RwIL%RY#8{xaYKK)c@c^2j!(L;C~Mvu$+>>b#|U-s%%Oh5EcZ9Dh0%hB`649T_8B_iU)X5e|FZrGeK)b<6%wl}(bz z@LvGKb9L48Wi{WBcVWG-dZwKR>uzM9%&(!(c-DIL4e-6FY%x5y;ep1s27pYsU z?Mn5&=TP4hmW*sR4ePe$`*M1h276GjZx{5Q$@AIn=c8+T(4M*;y!GACcXn*KaKtoasMwn^hK}lDS|#?F5|0Gt`}$G@A)Y_Yp1K>r7YRX z*}7s{hu19lens0(*<9EEkTuZxSbY1_QT&whKf0U@eRR>9?9dNV>?fz=`^f#KR=x91?{ywkzwCFpU6hX^>v6QF z;yVKUd9`Zn4DV6HcVE@_eGlR{Lfm<7h?dv(dAQG4R?lB;*!)0rN4lQ|#|Ln=bvAF! z?qiEv#lcQ|&sOCmb@VCEB$J$CsOxX=Tcggq4tdRfS!ZQ1M#u2yvGl##u?T(8VS%Gp zQT1neFF0RdtL_ceZ;I1Z-7j{%13cbsrEFd_Rlhr2yTY*%eO`t$(S9v5&Eul^=x-r&HL|W@pYt7G!uyJI z%04fU`6V5`qU)PvtRk=Fe{v+;{o&shuiIg_t{?J0`V&@mjn11>?Hg9GMbQbbx#;Pi z&r-e}dDiRmi?uff$1M|MEFSaJ$*7@D-!-i1aMxGWRo^gYTWjAF_O|eiFVgt6h zo6YLo?*e0j_f&S>O1n|$HxBl4UI9jHU6~w>^?MoFIXP*&TD4C|d9~}V4b{8G{6sp) zmtrZO(j!|3{9{)Mn55qjUR{cjv z*ypQ|EBOTS9&@lyc0OA1QI1u9bAx?u(T`z6^-+61-(OBI;fGw?lhuDt9v%^U9b+;mHr<$v-S*?VX!|H~bn)e!l3wa(l5q*ydplMp zwh;7iG(9x0K%N$hoM&`yDu1)T|w{ zGvQk-@w$5K4LX(N$xRd?tuF!Hh9kU!|*KCR{R$)Yx5r4 zAFP}%;ay)?8%+AL>n&tbHG`Iaz!ex(E5!oT}Jb-HWcj(neoVUW(s+Vsvx* zh`-qT;p7@VSzGzRbC$isTZ(pP^+(a;MLKMc$3*fraDR#O@^oAU?=A6rA8j9cxXxh8 zH&k~iyR;R_UV9@@^+=3sQ^)T;jg&F7HW70pWSgI!-h*HYUP*x-8f z^YN}z)`cy5;`1N8*Ker$+I%Qpr?{>uzrtF}eCId1FWa+xKYVY(PrqOO$=|oDJF-^& z7iYG!x?S1kvASxXuxtl*&`*?Sp*t0hx5;bJR@_8yvfaws?uNF1eYNjsc?BK+VSD{u z7%xI!o3_Who9Cs&V4lNHUC5||kB{OFJa_5KqqCKrMxO`8!r|!szA8W1z4gGPnYiVv zVnw#yRPG*uf9Pkwol~vjW^5Om*M~LC@LW_3(taP~|3dyfk}mrAXE?y9WeHBI}V_dJ;OLD9Nm;4*x-C$n`@ z?L!v5f)*6eyTe$D8iUn@RC%N8jg zN9(%pO~!+4(Zl^h`aG)bf9Q}nX5rBS&;1-jVNThhtu|-sCr6?^*fB)g&h*^CGua?? z_qf)lCfhl`g7??#If=|8*yIViY9DolX)W(xj?=a~IoB;4|Ffpr$g5I5Bp;*QN&83L zuZM46?V38Qb*JP-`jz4kdTr4V6^92J_BfxM-|MUA;%GP@&4P6mSm_b3f#=5R@2AI9 zeDtAZGyFsTPsU>&c|q@1%GP#&llK30FCLSS|7;gc!>=`r&y(Fn`3%?7@azK5$>Mc5 zn!C^rhxbExCgb^(`kU#uqxoEO_Ui`o9%QttRlnzpjXTkG+BfjYyqY~8#mONs*SjYt z?Uq<`g8x}{)5#x$mhZyvKjV4$S^Fy6v3I0?08F{#ci0EOHVh8>ruvTReUbc6#agy8 z*-yeHACo5NJ@2K@yB8y=*i8oFe=pgc*l2sY9>E?<$le6c4bx=(@a_lWP&VucOaF#t{~0_^o2Y)-UPfz?(U%=s>!WX0KNu~aRP$fsZxPyu z@%Tkpuf<ige(e>7t~w}`nyPGxVw(o%oE9$rV`!w%W8?D;l3`~%BrV($xe+qxf% z-}=ro*!V* zP5I6A+6|50mZhWE`F3>S8{^Kd-*o*Qn@q>IHC}w0x2#pqJJDo0(YwCt7s9)%{0=hL zN8b^q!T5^FvYU3ZVO|Tb!P>Qe*_vn6-M#n}n1<^$MK2odRqi1W=VI)3};W4Lf*>68rC3^+;+sUC1#pyhFK12Ux&3b0Q zu8p`1&tp-D`&syHrtE6%N5K(datJxv6d{J`Uals7`kN>35U`QALqj#!PL8crHZC&l zpJm73pWlo-y}ieJj;;S&PuIkE7;N|aG7RC{qT*`T+taPUHzK2hdbhRb8*5?UV+668rwl?#8x||0R9X8Sce+GJ^b#;p`08sp#HQ#-|xO zN4Gh8yWSd)o4l)-4O0{P{R0nMgt>hAxAyv^L`;PEE{-9;`?5C04eT2FrxuPI@t?*& z?P}g9;rAFjzt5&&uK2q6*jyZJJX_AJVyOq2Z^3>9EZgBF z?y`rCmu=}4E4u^DoAz0{5ItXn=dAL+ImQ4sS;`(;D>wcp;a%Spygqgwt*uze=n~e` z!q^|yK8qXqNO>vl(~tFgc!WMQF(1nZpu67ns&oqZf2j6jwb_w8*IM`^vpkq?)bsWA;E`zOSlk+m3#@s~w$LF!}_U`vn z_ZD1oG`|YpCF**Tk3QbSd2X%x?xQruBz&B*SD|gBuT0j~{waL!)$UbxybJy?x7jH0 zrl)vH=J4@L{4vtp<$AK&-vN84k~A`fiDjy7Uh1RzsuTiJq%@x55si;16Zi8S*{RiM8Y)M>{k(s56f( z&u*yJ%~JkOHukLaI$2>p-5+LsLMb+*an29as&PKnAB1=PF`K53*HwK${%w7fz7N+= za`0`Ko<{qHV>(>VlG76{UU7)G`FPDEYh8Q;{#(?KBy%7f-|)d}?D&Mb3&cmv~f!N$oTrOcZaTj+t{)>_D{w_R&#PYc~7u{NpXVA3d zL-}8>0^ z7vAe8P3X1>dtBO3{oXL%4en;--iz0v_=@LD|C&#S=>U4cTs_i4Wf-AE9H_m+sw@SB-pE3(#x)psu?9~U36tv%K<<9>RDI{9C|!Oq6|xGCFxMyF0N*7vCN z$S3m4yYgS2=6sT@h*z-D0OiBcHmk3`1qtgS*~MggCQZa&UJvhWFr4Ek^(FE@J-ud( z)b0{=E3t`~4`XDun|wH$4gT#>eN&OGPPaMeOKm@e>stB`CHG)$<$rn*xe1FY=-(=f?*5of=};3yD=L5TG;`QdCKV> z<_TrJ_T#m&k8Jj)GWj3&v&iI6`T`sAS+td-<~rE&IoGhH4~um;ih&L`$bZ!C6FNQ2 zPIuAs8spEMY~kH*H5bueSI@=o^{DnS%HLIYuQtYq3{6;@Oj_XM`9Ei)Vi9_|QOfJ+ zWLUng^URJ$PK5D4T8*6^gb9XN{~b?aiyXd@cxabSg=;T#w}`iUv<-YVL&MMEeSFZR z>`8xd5ZwVsZ#J067OOa}$76~1d!zXYy?!X!6Yt2eH@%*~rw7bWvE_;l)iZXwJez!r zR@>+Y=M1h`-G<7?iG99`hM_kP%nSA7>by5lmeXM=x*_h*t(WU`KNa3i$`5kAdwuo3 zvpft|x@K#@D+bdxt{+u@fwtl+>jAFZ#xpAGvhW)Jae zUv{}l`(g0h$Bw`7*DL7dNg|iSe5l%YZcY9%Ofqpcnor zp8Cjay8depdzuFod!g;GUE;o*>zDCb5$((D^C(+ivRQNrdAF{-q*r>f zBYcl)e270F>m~Yr>>S4U$#qr#7HvVcT*#m0mu<*>TipX}whNx?!#Ev}ozWcO{$2OM z9+T;zuKMPoH-0Oyp}sU*ALhGYl@HlDu8-l&ssK(2ZmZeM0tMaqt#f%p>UX`xO6ndJ17a=tj!hqFFqXn5hSHEHQ?#$~(J$iq zT>Qo=myhX(d_9d{o=1O&@o%kK^(|P`6Q1kXq6Pm2-!H*m&I2dUU3cQ&g>YUX79NA? zX=8(YPQ`ebixmkOsXB2O zf5KtL!PGeF7=(Iyh%o=&){lJ-(B%|8m;jmTVT$8H2=>my-d zv+%uHSdU3QKtD@;TXwixU32{zS;=_o07tn0K$|UL)yJg^;kbkj@;18xuKDn;1;4y6 zYtGh1^Plt^N1%JIhdnEl-9)$l!ZNbXKDEZ14s@Rb-{#t10ryqpnFB^Us1JTzUU?HT znmZqh5THXW+`LiV|d9+$Y@Nd2bT`~E80hRln_&VhK8c%1JWv8%Kj4x{|fnl)6v z=ZHgY-v?7detT_4vB7k_cO>s&<(nXy=^kaj$ipX;d;YK1 z=7%)+M$Ua#@)l?x!Q8?zR=Icx-_a%8DgP42`^gVEH=Dj8_J(T1kI8%NLtYr)Q+XeU z{*(`*2jGG;y25dcH8uXo+bipZb{M`t^79#DL0;yMquIw{J+!!3-F)^j<`tjPeML4K zM(zua9&%}h@>S4$&F?Fa{}#FOD*nUydFNxuKL`Gg>Bau>LTxsoi)XFy4NMy1$as@4 z567Lg>iuhRJ~}>#*dgjK&-gXT+|xJT46mMWgg$*h&HrAw@8bR`bD?LoTSttZg|3F@ zmOb>X#wC4HG>_a@*zP_D4B`EDdNBT*!lIulO4m>D`vUgstNuuI$0%Qc&-kGjM8=Ei zSLN@Z?^5wAZwq}{v=83xv^$IsnmYz68!aC2D@u0V3hz$({xjj43-d(v@F%@pKL+o2 z`W54ZO&mJc`#HY24>{0weKY9ne2f(3^%sYvf+u7rSy2^L?ifGo1T@uyjsy~0A5qPd(+QE6a7z|b-40r?c`q(^byzb zxiEy7p<7sMP57-mNBr!F*Lrj0hOzk-<58XKYw!#?_JQ+_;$$m&{BDeTU;8!KN?F)@ zGY;Q?B+5p?^eGvu(XoZ=R%{{Wv$pJnf6)uyAK+QZ_oYwcCC-Ze=$gYZcJ{L05-f$I zNun>*W-wVRqLWMUjc9V%=~`@y=52c4#WsAF>&L>pzFL>sg*;2N@O_1#mvHoh&{vkspL8u)bM9mt_cy?2JjpggFJ8l1MDi!v(PRyRnXjVlwEvfmx08LGI`LH4 zYc3k0?j5?CKNmNvi^$!_!RFPt@F1NMIL|=$_{{3NopKNK`tSTM^v%dTTfKLF#Ul56 z!}EjVJY`}qzMrq()wUD;4#hX*c~98oe=NR}x%!~b`2I8g?t|xdaD0gVxSIbQ)BY2< z+d04EyhF|ROx`n`?tWcIh|RC?4>o=kUNVb&-S@)hL3#EQ9XDs+N$f>dj(>D3o9xF{ z&C%kK4e{Ky0FKvtSdXkT57%#eCI6rAT73%<-XZ1>lP%W5SP=5RI2y0kc+Ao63wnx& zv@aiCN3T7UvsY~2uH+{=p-ul&?^#{khWV|cEBt2aB*v$)H-PWeJ({}=3ih(6?2adw=t8`$J! zI`+~g^dI`?_(!^m$MgZZPSHR9L$@u}-$YIWnhnvf$Ue`@-|^};gSCya&g}LOzMJaD z%v&P1ug37f_4ZTXJM6SGZfaU#re~m2C^({jR66-K%gMs_c4tb#;D# z{@=p9n)ADK3HNPkQL>J)giXt1(VYj2=b3_!V(-H9fAP3p*;?+`YOwzXJH$2fO!my3u4X$fb*{~7)3lo|iS{5tfJF>ImliU!kd65E`M-n=EtK(I55I|CL5l#rQ&X$B2hZ_-+MQwxeTbas9Y6pT}Y{y^?(S7rvKF z=@zZ8{3-VO29E#I@xC6Ob}7=**AHcEl&;0bE6`i4hVPA{dbG1#vq>=Vr?hb6fcSqIeTe=kV^Q2=TtaxdvwwJoTfgIFIjD zF22KFDbe%%-;|98F2j*{en^jI&vV$}O*sGJi|?HG(9ZZDUx@d5ct*~zv0L9j3s=bh zCXP`59^F3ECYS#ijN&1=8Lmm{^fBQ(lCUOI4wi4vDQiNvQZ2gq-5j8uLgJLKAtTauBoSH6?BPpNNH z^Gy}(LC;so8SDJ4SlUV(<3;h5Hh;*i4*2wkWnZ=v@6j~0tMm6pc#LtdP58gtkjL2( z>PmRUK=qr3SJZb5eC6U*+KlIm(P-PkvM*gstjF>F6J1L>oacTr-hJ4oiFnhBs@pNVzRsv?W=UGcmFb+!_jP}-uPc$^@q(DP$_mpbp`c$uzsbm;54z4p&)zbo5X7m2>p zz8g$ATb!bNJu8ziKwxApOu#TE1%G@W`yo>hmG;^?P-o`wl4P5uCD`2=0?*qubAI4wVUVKD< zDC8-SBAzr4!^A_FK!@H06ht{fRc7rHTts3s_WSze zefAtLCaxq)pO(A}Rni^(L!)CYLYMK?*z z%Kv;Tddkc2n~dTnI_%@#`v*Zk4=mWDmmtoGHt@GZS zfA7I}b+W}kX5FpWpRGHIWBq>`{Pz62ssvQqO2)>I`SjD`Q_;B z{glRVk)7A89}26nw4A2w7kt^R{8{@R>Sw^DKS;KQWgNOU`9q%0}+F&1{$-rN))G6&+v_FX5RiIhP*y ziLS%+b739>OMyo$U&Fpd z;k)8^L-xCx9p4~RzZM@=i_$)9`x0H2!)v*udd3QCztuaQ4tRz>;4N*>Mq{iBzR23r zPk$9X#zsrRNt7)p~K&3Vwiqpi*DLJ zh1YWKZxerFFXEHpR!2wj*H*`_*(f@10N3jHoT2@Rcs+p^UxxX69CBx{V<9^00kPN$ z&vGIE3pS7LlkYE+wTEQdbPTc|Mz!`BDqsO z1<&5<5Ar{-@44>GcH$)yOX(SGs9!Ax(%~>M`j!0Gr^Tn^DK?6i^eJzIw&rW2;lMlX z%3gn}yGlRz5WRv;*CppMc+4%r@8eKD*qTYNneGQ^(*@5D zUCaM;0sCw~hZUXWfBXSC?O{Eg4EBq}X!^VRL)5L(!sQo&h|i(QW|Fsu>m!_3r1Na& z5Enh@`3W8NfbrJ)D*wx7WT-3Ju+yb%Y2K1~o(=1NS%|&G&hV#A9a z7mta4Iy+O_@celJIqe+*?}K!Br=~v@FYFwCFBsOsb6E1pb=CYfzghVz^~?UheSP?h z|JibmfZ?Sc)w}WNo|^HcW-@E^Xj?J|XG_TW-tx{_?NK1B}gK>xf6lH+GMX$7dsUSs5?3i$24zA9`a`+5^v9(aZm=zp^9g zJEN}JKPlT(ISiqH%BN``x%b^mCMTmM%EeR?@_b$8=BaUrw>E69k4p4K*=gE5#P?Gg zyc6N`faeBfufyxTT+F7~p76d;=B5qiz}mj-n*F1u`d0TPe?*(s!R_G7n0<@=mHEyl zZ^jm zvtinF!}D);4C9S_$gRicTh;j{M4A3M8-$NBE^1)2-uOO*hOBfdydNn)OZykYHM+xG zPsb-&cAXkmBRLno)sMdCyU>5`3Ev&~!kw?mo?<4 zFi)7L%s7$nRcD=y{Dsbo<^NyKch$T%C$~Epw>ck<$L{39TioL;_p%@PU_HFL)T%wD z!~P86KZD~B=_pjqz|zCH1-_l})9>Y1HR#9La|74E@U7g4*fsQHsr-)* zfN2a``5*OHCf34wcr_>60*-IA-?>)pYnsc0*qk{Pd+B@1N5MD%uQj#(hiyWfHPbfq zQK!LqhW7G5pG=Rx%w$k3*C|!)#mUxc|&!}u9hQipN z9Yyx8?0BCuJ7v?oUtW%`2fOd7&HMIM-c0#ZFzk=lK=Mj{N#K+J*;lSlg=eC=Fjk-L zx|uq$UHvZ3+&dX&eda~=J=KrolMMFm4c3YMwjL48~J_gmEa@LEZl3jf3H6eTIIy7(}WhG z9++M2Ef>uruc~^vn4G*FV=+ z-w!9||79QY*PzGKFm+LmCX=hha`d?ppAIlBWUo!geU-d7VHqq|erL0v#KST0Jx*o| z{+Iu;aUwp4oQ2xT|Kv#JPvWUhPLEJtvi}_XjWv-yx2yMEyODJ-9`ZF>AI@%Mw1kr_ z(}GO%)beCBr{lkH&8Kz&^@)>fMQ{SC##<2J$=U4T~`ow%3TW!r&wOX~0TX_hX zyWkPms_!^cK8SwBe;e&Pq2;g0+%J{?`4@G5JLlSc_%^)y9<)E3?+u4|igtugKa$Y9 z?2YG!Y!K|#%e`0(bH=duV|IL9wI_GT|D+%N_Cj|jIcuojmdvlvhx`|7@$c|kLk#17 z`aW!8Fzok|tw`3N==24}Y%%(qvg6p`jL?n`CgJtFeAEZUL-E(=#*g)={-3AV4Uad` zb;R=?{9j><4$3yg=U%pgGkmX7oPzxUKl()5Q_$T7i?KR77nUaMwhGzXl50#W{C7{ZK>cuYoSWgeoQ?k%ztN3+ z`6&Oasa+fLpVG!0H@yb0wrE?i=?JrpUta(^s)?h0=Pw-`tcQ@{3XYmRJor2NsZl{YG%My~iQhiG%HGWk(V z;D?YOJ+!|QhU2xlLERYiQ`k;S$A!3k0&PA0mQSkhaqHyi&K z<536GB=^R{ViP*;M#g&Nhx}gvcX<9kna$3`|2ew+7fv=v_h3uU{}DTto8l=hLmZc9 z)|8n`!XMT$!}Ch!Kc!0fA1}}M*TASh&&F%>v^GQd@L9foguPa#*E8A~ceAa?-kW}} z)BQX7|1Ye!;=N_fQaC(gVLXIveRuV&ejmKQ;3-$acd~&l(f(jEzEC%n z{Oiaz1{A&UXo~Ol@X7yR_vCmO!u5N`jsx&#_k`V}^~`%tg!g83A^)#ewktc_3g_zV zy@T>E@Y@ByCtZKbXFs#~EqHvOek9%#$y|;df7dq1w~iauZHprvr^7HC=6lieZ@#|! zp=g`1<5c|t-dclw5u z&m*~8Y=+mA`s%y&H1tECsy~Y_#9;moJSWoe4w&VAb`gF4VvjIh87IQCW4V(tDEROh zhMs_#Xq`1TyKHb|GUtdwro_96{evZ1y1keJ@^`=*#4PGJ}3i%mF`eUYqR? zRW^%%`@!`s{+p1)M}_=PB3R5EcV;u57+a7&eW%rQvi{o!R_GRPt%I?$lEw&2t z|F7zNGbcvAb<9w99ohlp^wfSiwATz-W$}~w_A3_S=}qi2 z8=ef0M4NHs$p2Kolzq;QUGaO;wK+$&Dp{l1yu@b|`f0&m+WhLCAMz3M|4Zki*iQZz zbLbp&S&y9c(cOU0a@rf8!?zSg57=H+wzcc={Ii%HHo`0PBcsXd@A?>cQu-dv7W#zZ zL~SnOdpQ;D;okhIXfEgYt$c`FzAJ8%|K?7~CGOcImzT+L=OT=1MU8m61+^#~k%Q|(*-B)X~ew)P#~ zMTPI@$^-CuLVotg^Go#WYj-aBEAdHF^y6#Q+IjI`_Aq|L1L%ui+#5dg|Ke0M?3vLk z*-d%K|Gkv`%8pmM-bq}n4ST1$YEPqF-o!Vme;3Y>ANu&PP7?jYc8{Qc)%9sG&4q=% zQsY>rJQ6ec{>sA|<80UBuJrw2BL539o8LpnYlH3K?1RU~HRAyKf{tPRPfq9VRspq4xev(cMyw$sooMv{Xc1q*J$}0^u1nrA8mWX zc?>;osq-5p^%MDC?1ed1x;vgbF3bNg7fM3To^PBDF?%_C?Z-CHpgpR#te@TxO&sdiK=@0srTi`fK`EGc8SLe4V%kn>O&JS$^KlI_dj58cNv(0^TmVGPw zT)p2J=-cF9GJ`!s{$E3nPso1|R(j<>u+6)CvVg5y;I#(*Cpey?oBYq@f6BM%)!Ki< z#_~Ti_b(51^rZLa>?{A$ditD4kD;zlBd<4G-QhUUeL}_rJT}mm-AXo}W#*6N$J%;l z8Ba$CPa^;GrOM5f!`gGAPfLQ2_`Uj#)_Kk=(htt$&RVsXU-UIS)w>|J_gJztzC0_DBdY>H6;t_vF`pod%QhX7aJHhuCJL*%?&1=>3Q+O^5 z&;LaPmp-(di%$NRzD*6kOUj19G6Vf34b``1VV;q%iQm4;#6fuePoKm~j>H|=mJQMm z*=Q`9*WHKs_=$ba!T)Em{|k(#D(|EG5jf?4+5*pjLCl18mE8OPTpwL_Q};T$4)hkE z@yqHr!dsq1a1`PuzeL*;`0hyjZ{^d2=;%E}L{3&?*FClAjNhjC$^W9J&87OGmGHid zZT5AZ=X?Yk@>3q;)1Qsov+G7R-{_J16+1ji<~+2&%k{CfYHk#Zwd4o#f0aj5U9SLZ zTXKxm`PgOoAD)lGxS0>e^Cg%vHs}gR$p5zHmI1#pKYj>y^P^PG6yiCIlchc?S=Cwo z7xF*6yUPy7uQ?xGMb20790T`Q?L40p-RS*UeYO8uShFeFIOKoUn@-2!bvycAbnJxY zesaDJ>jvtAzo)aI@guWlnals;UUN+|{m#C9;o6JrHh6BQy|E+>aX3u< zVzir*e=WNIxNf4I{4ZbOGd$CoXqM3NMEXs3>`L~Y%ARBYS!jpTZ?^Zr^Xv2tuuiNu zzf-)2Id>J8^pXGZ-P*mR?ovLSL$6%f=6o<$n}g{Q@?YPO|Dist|Fs}n zjAgK;+dFP{e=wO_lO6Ehh2QtC#Z6%@nQjcT{4eEOvL;)9=$iiFSulTE{dMqNj<&n= zzhrdbht2rmZ1(Ge7r!K5@@+~6eCbG7<$rVqn++jv1}qn&-OKq-W$USz|K(@s@k*a? z{UyHJF7sLTk^0g6y@ah-QZN7Gv+2CV+{XA;hUe~cw0TsUiS+oB&Kr;;wz3GuC&V($ zA^*ekfA!vLD*MR)XhSwLht9-jtdA(J#G{Xx5#wPEJw6o9g6{gpvZFS;(X$Qyo1=Y1 znfS;i=?_}qe<_>vB!50T9mxj#lo^BKfvz`EC;#KM@%T#oc?X>oc>g1=#=x#`4C{Ys z=(E3LyT|Zp3Tu01Vl5eo?lR>&DSO3vsPn7r;eAo~CMB7Lt{od+fc7^s#a}vzULj}R z#_JjM%fYw`3=>^1tgC*z7#~!tez#h(Pxh61ITxL2%-BNPUFrQXU4|*2&7NnelmGc4 z?0vlR9c&=y%1zx*p`ULW!}oCU&B~|Y|5m;CvicDDUz+Dd#{c94OjmH*k%u7A=_{%3vBeTL=~@AjxkSFvA&(B$BJ`R3{FVQE4`G2+u4?M#>Dp}(GB>Kt!sOEkj*K^E;*f<-l zP3fD4d38~oz;Z7cS1FI_1xIA;j8BH`@u08#?}O$H*q(#+CvD|_SobQM70bTk+Zykq z)YCciZN(@yyh7P_%IFEvfW&7r2Gc* z|DgMu#+er6jdH-2@92>K`9UylfloW-_!ZxapYQS5M|*b7ZmY9*BOA*9=o7kyG4K;M zBClZI@SZo!SId9dVdq-4S50)5az_~dOEH?J@WWe-QZE0?>tPy#b|xF~bt0Fd(aKt( z+qY(YL|gHlg#4Ey(H8a9{6AVsjy|M(AGYS`CcS+(P8At|5V<@ zbCid%O#X-Wec1`{-3f0mJX*ss#W76m-huvR^zuJ?Bhbn5C(+zTKlvZMCnk-RVayBr z@0200<$umjxtK`#H;J5YW4~kJ{I1UL_}yP$tM-zr*3-=YGjW!P=V(6LiN(<8kC8)_!@s<$oUjL*#PT9pM<_IKqBo?h)nfy=LAbewyHpBB;SoTFfPx;a0$p2y+Hie_u*jfIE-z3;CZnsa3xT%%6p4w6bhj{>Ss-8OGj=*&y^?yTCdN9{FFup8iGF zW6qZt$A7^8U3{N`LH=i}!*#c|;y2`f-itm>&|L5OD+k=gf%Mshj;Z{V|H&|V$o~+_ zVf>Hff0U9(zx+VQM)YNi@UA}^zbqG{C-9eF`HA{6`5)cS&+?HUCpd_rdD>)vD*u^fdMIKR=UP_KfGdzkZp_uud2K!mrn; zI|x6|Ug4Y5@_S|a?~2AK!zYtlsr)ZysORfA^*^ES%U?J3sMaLI?|;%hFwCj*&K})r@XG&e z0RG11$osjF+u6?Q7Ly(Ho`8S2COhPRwMPt^c%bt09U=cmvZ4G>y+_XFf4Z6b8osUN z+@>(-cOy24o`GXU7-0%yMM~%FD6;RuuM-;%s`G7RI5|97D`7N-x_RnI5snd0UNx=f~*{4d{-H*3)KFnFiy&oXx6 z_ndu-i}7wjhJGr!y%y!i!}%tSd7TbFIre+Mnzmwx;wq^gY_88VH3Vy8e|93Hd2#oSSybp`mCk^@kDSq>eIX}|1 zsIS&n!gn6|?%MweLl^w?Bjvg9%KtL-A9AKV!8N=g|8whzsoctkkUx_xE}`>oA0d=i;B^8{$j; z=kmW;Ub~m{fmgCui1(Lk%kuvY{1>~H|Jl9x45u$Y#)HMFu^~Gi-y2;Yt9}t($S$^U zzbpJ3qVIs0em4%||6KPgH5fzLOAO}bNMRf;*)`b~%^*5$sH_dUEXOBwEA(AqJPH5L zQ_anS%izhH7q|@ohBT^?}YCz;QpiCDyL)b`JSJwXXWVvue%aNqj?p5c2;O@;|KQrhJge|HAm6h_gbjh4ufkKOJw<<^Vju zVXMCC_F|JAU};0vPh{#VGuV?i(1!8>NI>&zS zJq~L-?FDsW&pq2`d(d?rxpi_x zpO;;yPArxG;m79qj)fJrY#YbTYFdM# zUwm6#weNUwAe(%St_Q60KRoxvZM7A9#fNA|;{7H0K^OTS`k!pLIMt_yem6Z2W-%S# zjdoS~x5Tf%vgO==pbxx;E#-fB|6j0qIv%b3Pg>$N!We446P^*WA=-+K>?pEtR(1p( zKcoB4WNZd2-+3uq?SGX#O1At@yobzI(e^r+dcpa>{4Zb$`@=`_KZ{|H$^KTpeo?32 z(?-sQy+n$e*ul7+hy0iG;aj128a|QhZIsFX^mO(-T>f{ic{Vd2k^iy$58ob!-<_4> zHfopnEdbhP+C0L4i9Y5pb)~xb+PThFqjSjr$LPWy`M$1$ulljgSLhCLKZspd7B~0I zsm9zyZe{YnI3J$jX!cg@_Z#uuR)*d;Nkal6WA_xZdPLh^tUPRU90w$$mBshK)JpsI!$@_{#Q#0&e=%*7smf+P*?wpslC29tapUx^?YkQ2fJ=vi;Cmi&*F#W3pnAE z7s<158$aXbY+&3=n-$gn1BCg1$eFwe499ExvGE_jXj}I~oFB3t@Lo+nW&FPc#yRBQ z1J7bS&tOOQxj9I2m}4${UCHPKTYpCr_W4wt$(eXG-fO$x3?BJkitA!a2fT^=56}O_ z-|+Wx$lpRfgzrld>vr*Hb^1NN)jy>2KlHEV5IAbu=x?L0@O8lV+-16?8{s*XZ%+<( zKtI;m?}!q8N--GiEcJYy?9C2i>Z;#Amxn1c4uo$il4T@`7bUT^S?3w8}q+${BIopt99hY@!uHSIQ}=z|BVTa^Z&;Af8+eW z@%g{;`QLM8!dzaPt z{;%=BACv6|(YXIdZ@_cX9uN^1YQ!xAye4>rEQGpSK_B2>VYj zwFmV6_7d&yf3`Ec?5&>s*gc9?vES%p=)?bi+V3`r{5JFfZEx>h{g+;1|J1PWY35sp zu)kJ5&bOem?Zat5)MyXo1MJazwf6SmP3#9=Y+_H=uJ|ss@2NePvs;wUuSMy_aN3JE z{Lg)KB3$-l%~Cv9gLksMQ2q8KxA$o>+>c}q zeU1j%n|3LlTcEZ7a?#YjsrGCwZ$V4f^hbOKslQ(NuV_w(!GCCn|LQB-*8HD``zw^& zCpOvN@j2cj(8H1qgEJ);&-gZb$=(3(bIQ*_GljkXTOY;0J02qUO|-{r=em5;{Y5b6 z=#O>m;@&>S;s097!RR`&*Fxan`C0*#^zX8L4Riu0#Y#G~!JnVs8uFfWW5%!l&;LXFH%CC{LhJC@##Qzb;1m$nQ za&n#jrlQwb+S>oOoQ>B_@Eyfwch>p;Y_jbEAHMs|$Kc`H&+z{QVejbhzgAf%M?2-C zoWtI`-{P|)d#tZ)I2(QKI_%p!moBqsN9p_Q{JG;&^k>7-3WnFn+Krs8wBeJmuXnPh zdofj<=6*W*G4x!`{@)Yvv=?qM({YNjQ`w50k~U4>owr^>6ZIBl7#w^+P#6(Jbx*O@6wQjthNHW-=07#@Vw-qt`|-A^MbUNS{H3lP{QdC}@A(_}Z|od& zdI8Nkc%8~dQ|NIa`IGRxob4`P*9-CMivQNGSJGx<_xq94-hRPbX~*_e4vpn|I+T~8 z+l%~r_(1H&ljzlrUxt!%n!1k8$JIr#J(Kg+c+JOSb$Z;4whx&d(SKd%{~)xNGs%C^ zv{m*xtT&0jqx=`tQE1!QfB33e^*hyAvb2G?jZ{Yx@M*(5ytHZd8@u79%qOyz@I@2uUw>cm6(5uW~cBG0sI zLJprMV{1{q7i_cnw50?rfJ zdwDcN(DY-Mh47s~_XZfmd%S!@6iw0YU0ChgUGiDj|2vca)&HA{(@5OJSK0r2rgMK7 zt`ysg#7nRh89CbQRzAuqA zGOgNGq)i$U3Z?8V@ArhVB}-%rW8eAMcYe=v_x=4bkC{2=T3*+6uIoBy&OP^i)72T> zNb-NFul+AgdLbFh;1_GD{4e%k+n=uQq^~1A^oHNI&W^15UnsxxE&t;5E|_M}_>C-x@_bs>6~{$^GGp($23qWjirR5T=iOKn5l z8@E)vCEK|l4Tl)XKPG28{M*1ch7A76(Tp1#WH>7gSa>P?yj4`u!5e$t8&g^ zOya8?>r|uiEOW>C#z9<^#v_sc>D}ZEhG9N?&Sc-kaA?bS(tjGA@;^UF-$Sm&L9#J> z`6Bz6oFHREm|MX6opZ2v%Zi#tP8jEY>{Cn1yWAfqc7t8lX_Nouj&$&4xi>j)lFhf7 zoJwDCy{Wdf<&$`bdyy;uLwuI%TX7@XjD7HJwgLXN(Hu=C`c!)wYTKUdT^Q<({vPfV z^u{PQmf3yuw4r~5>+1t8o30IhrNjS#WlPY?fpR3@4TPtad-<7mrTa1R-*-)J`YId) z`E&pIQFJ^Rdv=IQ`H(N>pEdEXR#*Go`qck_l)tED>yUFhth>PVkG^+cTMEPa+CSGm z)fl`Yc3Q)JD0#!lz6{n+`J$q4bN)KQ{Qr|U{{{Zr>(xv8F00n^KRSp%QfS3QJ=?5f6#+t^J>y8eOozh{(MK z?E!TD3Cn=ckM4JVJ=a_!t*k`x6wmqat7! zlj*nO%WuhB8~)kaU(tRJe@u7%E?K>uC+RyAmVL=P!n{0RYz^o0!E*9NW7wJ8NybXt zWFw4cPR1@U9f?l~pUQ{)5b7H$#$sC$AuWn zf7iZ4)%zV)|8GnG4&-l7=Pj<^b65{$@;~R-TKv4rHu)dRji{ghc3Dct+uE+CTYf~% zomc8Rp<2tgygM0}^2J*C1O2(~ALSF{QH1=T3-7?HxdE25wHeD|C_l-es4E#?I3J|# z0hk)0^WQk3W+$JLd9%J|u3PB;7H)YJeI{=k>l*=6#-@9WeGXGo5%f2hgDa~*2y(vSn?bg?nc*j~yHePG*PTZrqXbT2c;#ymP6Jz2@a zY->;Edg#_8^91){UV99lW5}L{cW-SY=nVPax>{R%hIM^1rep2@+KYA34$?l1A9p5W zU9>wJ(ci;3i( z#4k^gp;pEZk#Vhl>;EFe;a1fs?@G>0_Wp+cO1dv}evzF&!wGw~H=6(0Dvwj~5})V1 z4jIRxeL~&M+=sZlj_!K%{#(`B|3@d?@XN_;g1*=5qby;Y{7#}%lcHGxt)&CkUfaf-Ls3YM&(?YD5_0`&cg(oNC zJJLApOWqOM#X)qqwwd^iV>Vmtj3)CF{X@v*qjUr9@;}VA=?HoFw-|yY6(5-~3I9o* z9s=v8?&V0j1KLgaL(Pi!HZDu~p`Gg)^cs_LZP->*N5Xo!7yW8TG(}%uV|gCD-ReCX zsV89`yid*=^Jw(EC?_MD&IgB)Rpr;;(2KFWjduB;^GP`#Z(DKA-}zZ+cfxxB+V9}W zjLUqq#w(Kp#Z`3e?tVkKH_^7h^~Z3i2@$&z>#1Z*W4v=Uik>y6k{RZ#VzB!Dvf5Cv zL;ja)QFu;@?j~jd$gzU4mT$q(rHHNL|!3l279>Pe6njl};s|Fm@e7oZn6;eTYp^KiMy82FFK@c)$At@`*gyOYma>U+@r&GgCt z@|H>zpX$0h-T$zuk1_f}e`nbGk@qXUr^&jNKCw`K;C?9Df#}|=i$cs~Yl)?+_+6Z) zPs6(m&fD;wM9ymbB3809$&>&2(fHS{syX72oaj4kVSJ5q!hg~K$b6VhVeY;L-~Hsi z%V*cRp6)o(AqMgxp0gHfXH(Q1?q}E`m%{URHULlLemK8~ySxWIFVOoB-Yd{tfgjGa zq@$tvKls>x14Z&b-_3C;{vuPK;?8Psi&_7ZCD|kBb zqxuv6|0~Rg;s5{g_1vFS@jS@B26UTe{(t_5XTOxs%Ar;N^GNnq>}f^z?fAZRtO;*d z_H3))IEDN#)=fNryEYdj5gPel!k6F9mMZ?|=`l`4W4e04?8uwaaX;Ly%X-+xlN;uL z`5)7rE@J;B+Pa`Sm<_G*9OGJ^XUFS*pz1%q$elo^SPK9DnS`-hmptQJ!kXM*9Ao|8 z!!yWnUDf|2v0F?QWBE{>2)fE0VceEpH8|dttmnwy56x)4kpD#k*WLMOAYS<&KY(_u zgUsSsV|WfspNQcV?mv|i@+FzLYW~kB@^xqZ+vEMq*bITGJ$=)(vm?93b*H*j|3l2L z=?--7$gb}lE7g*D`j4ygzkz5Ic;HIbhiOOrb+CTI?=V}Go3qrkX6F4}(4GK~dcRrDw&UX;&D$a8%|F@A)&|Y^tb}nH-2cI#PRHZih>Hv5t< zLVeq-qgdt^WT*yvX&?(@Rj(8Ucwi28q2Ug4gV7n zh5!G)m|fyDf~$Ox-tll8U$4fIKbV}u**X}OGmYiC^xh2fC_LY2pU(gH(eoOeuWFxz zemdD~NoKQa1vz1kn-2Fe+T?%S(|?;jr_GpWKP{-uub~#Fv*{j!#&cIK9=3rSeP+%q z|J8mme_u;peKktdo+#9=ebI#f2EBvaR%qlx*xN`CXN%{h+B5GK?CBr;sJ_2}_ba{{ z2EgqfM%Tt_ZJ3iNLK1oNS3-Nn1xf?kD zCw9KAm#@a*cw_Pp`JuL7UtRS-g?tmO<^LJk5D#)}ioXqv#x~S~u;&-U9o+`YbFQQM zD854eUq|lcR>w`_90dK3&v7D#tqS)LXLbcKCJl9L2{Sk znI-2xQ`ZjQvy0S@tDNtp=R38CpUS!Do2%8stwXI<@_X@YZC>AsuRe$AZMrume?#Zd z^i8FIcYS;D^*}KhLI)D)3ea4%}HOre0`z+ z_d_Q)@)y|~@?jkLceCwcaXZWXP-F0C)xI8Fy&M&M;rU`PJRkC<|CbHVm1V@PkI9(i z+}RQGpU#|*N_J)4w1xlB?TK~^yf4Fa6+GkEb~v1Gku6r@o$CCLJp4o4Hwn1$ilx#T zqP9=;G#lwj#b?<6FQ0Li6Jh^9dd)e5`3~3Tk#(43mRh>ly|GQVSMQeN-;Ez{4?5_b zsAfLwT3Z4~wpcv3oG;Tp!qF6#v&;uuv40%d z{1C6<%;!aSv~9`QnQVTEzNSll$A{6ejJ*X zj@;&Cp25zG$ynF*i=LNGM+a|i9xTIuwm!@!Blu$hdgGT|P0!uh)Vk;*xIX1Cbs&F8 z`8);1XRcp`mW$@`VW5wq58aN`nN07y* zA(x`p_=LVB)S{=!3O4S~M-$~AyYhOp=BFg9^MAZ(_96dR@>gefg{{ZJHUz!g$?k_? zJN^ATM1^w-N72byu9wELHh-KXuEcIG{?fg*4IuMVv2&*51u_EdP*``-zAOFWKO4?x z^T~M^4)rVgK^w=^ zP57@Nb2T(G*&+9{SNTMH8pd^ic6F=ve0`P}`kf!dOW0cq&t>Vh`e6zCZTXgT^&=a; zu%^of1~#mV(l^Pu3l8y^ z!(8^q^M>}x{PzES$6XEl}Qv8N_Fzm0#-{axeMz_~5DOJV%n`Q)m# ztAmU*qT?JG#78uVZQD6o(DSML&FJpQ{uX%dLN`dem<~QId~TJoIVUshe}(_V4Yf3G z&DSgF+06M7uH6v|RiK1>~worH4!>_ib z8#uq^S%(efsW2Vj+{d{?)!qzjosCb(Z{rsJFFHJTmT%Fewq)j+@IT|}05~otn|-DF zmW_sa0e$j6YXo13r8o13-g23xU*oBFSR|fW%ORdrosG}u;oZt z#eH%b`A_4ogT>k~eWy-+(LRff+n@`vIR_127Qc}l^5Svwo5R94iTuyrL4P@a57YKL zo8O{Syr#x5)T^+bsO_Vz4kQ0Zzu-9^?e6;Da^0Vvo9SrhSkwJX@|uzza{VdtnxOL; zXM8Ds^M9OcYf7#6sQ&cn0I7Ouaf>khu8GdiBm#*RbCJE3Eg zUq{c+uw8=RyqhmV|B&-AZE8$d>(usidhp{wI^QeKYX?OPBnQhlwR}q8W63%YVi^-3=|@ zCAU_hSWIRujmt4)TH*3fF78zD&;i;=VgxYwfHbdvD>##;|Qp{|TFm|61r;iXlEtZsV6j>3&e#p?t{qi8`3=2IGTdJ_F~|){C#veI41C z^2b>EHzR8gG-se)z-Be7{FROp{{QUB9k2Z;ei}sI4P?IR{uy)8Ao=$U`sQlvKGt0w zU0Y{FVazswyKuj_T6_Pem}_jt(PtfATMtyLQ8AM*Gg$aF?3EYC>s#6}if%M^)9If{ z#pgo&9v40qPx{CR9EICh8W ze177au-_K*SvK1Fvr27U6^*8EV>ULWlb&3BL?56D@$oZT;LTxAjc0h~NacT+r%Uk> z*7Hd-vc+EZF~6=y)*k$Qy8a*7r2eF2N9s)Z3A{7d@TTkY*eTAkj~&Cw57%NhlPhT_ zWBwSrDP%6-Z~0LiPVS^S&z{=1qOY&9+#Vjb#rNqC{>-XApC@-IeQHc@|1aMP-#0K$ z)ZT5C{4&JZiEI;d(bd{k@@Ee;oAKWg^u6f4n~ty8I#mA~=sOw9^XS|i25aS_jyyEc z=ane?jsBm=dd%~LTu9HO-&!c$h^&4v8Nb?mx+|T*#-)z`$X-a!UVNyAL`&!k&sw)v z)Ik2Z6NX3Ga3;INb+$Eqqv-0*=1s{t5MDkkf5CgRTshVKQ)KXEY5%X-431H7JdXB6 zeSDM7?`S>gx@)zz{~veOX6(af2#NTO2iBVh_1{G2+VsJnuYmPEIKQSN*7rJ_`{RF- z&Eg=N4eKWE2a*%=bPSAr(e2FUXYgyw*jGGVt*sA=n>=Iv!?AyObfSy*pyPgLV;NbeW$pMY*!f7E8baUA^xR9n z{iKx7i)myB+7a3|W?whs(}+D!!yCr%XR^NF&o1T#zE8wgw6hvLf-JST6dy?e|ML2% zH2)X&CL(bY)^CZ=@1srVc+~x)eDN{4YEL?j4M$dL`=D97YLu+YjRd-b2&Qkk;wmIChXV1+<(5g9M(I@?NF)h z{}&US)z@16$k6tMYb7~D@ZLh!hx)p+i+$w=^lc2E_Vf%sya&xe+W&Grg8n1%8qc%| zKKPS%=)_NW?<(Jx-|uDD-*AwVKFFpEw6BJijd4?QpP>I$bnGkUur+hO)$s*cW9n)jqIV%l~v?)w54s z6nl3e+qqhM{?F-3y1;xIxjWEzKAKkIpqKXd_&{H|i*a8@rm-(x(zZ64@RTpWc`(|u zw5y}hCh#wT^F;Rz;rbsxK4FY!%m1zMALzKLQhW9YH8W?9CbCm0spt`Q!nDB?$>ocmMpcjFz1vohjnrBi1rjeUqsKt{h^q8-}^$Ju;WNN zUUSa$8Rw*V$0)smyvZ=C2kB^IVSJ+s`NyDr#$iIVZ;`&>MI1|IWHA*PGYuy2e0Vs<~rt{2H~|WYO%!}f;@ovq>YE*d!43p^DkIq(XUC4hA@u?b>{#&tM!tQ6(&V>Hf+9%O-C%SvI zO>~y)CEoCSncU`jmh+{sjdVS&Lv4ROeCC=B=KqoG*r^&7YEo%_i&m%WA7lCrJ3HcQ zM*jb__p3&wcuQx{8S0v+<3!Fy4T~sIv(kOZKi_#f_qlx=IUsS`VnA~X1W z3V$8Jm+!)LleX|4(Z1x~h4yjhGtnN4Zh-ol+BKbzAhrd^MAdW$^W7 z+ehMMXeCNsa(^V7e_>nX`WINPVP8{6ANTX&HonP3_xZc zaB6Ls&97Li<9(OU67(E$7#&m?IsBvI%Xd5z!8?k1`G&$T^qZ!|J1) zZ^AxR8g!#Ad$^vWO>AUunJbM;){oAo^sfoap)i@B!h2A~`Bk4^kpBYTZs__SI6j5v z)eg1KUqb9At<|OB{3IrdHT8+j;x&3VxBefima#RwYn=@JKRnOtE6Do5{YL!U#~5tt zU1IqXp7+C;hggq3bFeS+ZenWvkg~6M#rk5J_IKHE1)OKV+=$(wW~%*X9P?iKD?jdne;cyyQYZ0b zt2@k9kr+)z^KBb6Q_#;P`#gTj$#~mYPUN%6R1=C(c&795VttEY-c3A&G29A1H6*?j zmQK#r09hltcE!i9nfy=Wba=0@xTEUx4K$voL(NTn{#Woz-r5mzVLiurc#iUU4j;## z^IOf=6`woO(+}^4aIQmE@Y6gx2J+(>Y;TVCd1L#WcH@w2TeYuRiIVAjeFd!QO>zRA zPoQ6*UH+%$n%Xm1Qngm6`x`c@Mfqmr{cWu9=GN}f&uH(hL{Z7MhH#0w{6u#3phx@^ zLwy(JVDdL{bSCo@GBzf2sWoZ1{|vUi==56;J*yrkM_>UP1N^vrSoZQ zmH%PQQH&9LBeg9->l|C}CKps{?_rcD!}gCgw3sXZ6E9a)quiK=&syU-=>B8-Xl-g~ z@;qB$j@NK~pSB}mJdPZ(6P{<1i_Gg!RidPm{;$~@=6Q8K?ytUF#)g=#1Kpnk(|71L zaL-rCYkcyJ_6Bs_0mBf->FDKCn47}BXLdSTH7yF5@W##Qm0Lx3dS21L89ArGVV^eL z6-IR<=G*)kW3r8ySy5MeXD4f?7KZWP4ecQIT;x2GOz$Lw&v?o@a+)RHPg&@>6z^m- z)8Xmr`epXMO0M-!sF4{TN9%=of$z41b3HapHkKc|FWL7JY<=**Nw;U+=x%*qS8My2 zWltD3VRuV%wU?LEyV9O@Z+4!lO?=kqx8}n$)qvgkLT)B)$^EUm>K%{+_@Ie4^(nrY zTzQoZWJd;{*h{r%lhCfiemR!iMefD$%eCYH?Lm%l&UV-Kmi{j6l%wTp=&IU6E%QuQ zsHagEv@NPpv^L+%<>*Uzeq-OB_^c1IpZUQ!#dyQ}EP24X+;Jy)Q|aKB_sn9FOyDJ8d`V-xO^Z`aAM*Z*t!ub7%LL81GMw)$QmSq4}~JMSJs| zybt?L$?Dp~es(e+nA-z=b_jU~vhQI0SL@g-jp@6}HUB3M;S2h@@iWZH zA~jf^r{7ksdFei$>}{M^(Lp0^g-{C$3BFK7WV? z;Qa}%)kozN@ z=QxhiJ`KK^_^-m>0lt2fRo`EGl7ITJ`9HG6NvWCVV!v0Qqjq&V&|3U4)p^^Vl zV}5uakC+bgMX{Y@OL{`=TkqGNEyY`O5eGJOtN!ex(?6N@a_>Xk+lOm%nJ!W#`>cdDz#_DwO36> zfBN|?T>)1^vaTZI7c%;}e*@mH`61MKG+8spTXeRAuiki#bA1J${|~k|9D~t`>9mXf zi|G2&d5UoqGqraY4|Dw)A8RXaqRTojYr~#FSSa!%KyOE5xv+-Z(#d_js|RB zgRHf+z3luKU4OChPtTO47STExVep>0gfxbgruR*W1OUSdlPWvdfhdD!g0c)|J zeEADTzR1}T=K5k#u#X*kvm^M}_(bQ@^B!N^qK(bTBjg?m&${r=!?O{&J|l@vptB|3 zNLy2D7x|wrlyiKNwZbPpifhT2d-;R>aJYNO)z_kDIdv=Aw@|{3*O0VO@O`B>)Xj1hu-=*QrE*;FjierItRIqF$4rru}MuIuHxdXQ|ZO%8^9&W{%# z*O3)u+^27EK3dHjurr(JE_cxO9p2I8vNa!#4$e&6=5Uucvhx8rPI8_NTLnDv>U%EqFPGZ|j&NJ2S-}yMyx7E>o3d`>FJb_kTgwGQS>zm?7_k&3RshUXGv8!4l@@Vgw(ZYMj>8AN)8O<`J%kvf(B&c2XZNMYA^Sdx|4|4{L$=#Yz;d z#5WW^@m<^x$IbYD!_y9)U)eMk-#KLN;xBI zmJipPqa6+5Z)%)>!atiXdo9s8bA2QFGWTqco5N*|oL_=}Q}V7S<4ohyUi$@Pg#5n% zjxOYecsLE+hVtK7glCyDhlkzabAzG*KdBpGk1T$Vj(^=($ZCx46u!;aG@oy-b6mwn zevJdxT#n{2?(aGwE5x+>1cssmo=x%JLCz?;N3qGd49{OPU_DRUbL1?fe+%uNVG?5! z?M>E4WZrEYm+{YHajl+2zIPk;8M3SR(7HH(pU)34cK?tkhBCe_ev(h!<>9XAwuGU3 z)p{9@f3&rNh2KJMWp`SCjb~%YKhMGOA)m_gBBzUA;|XY=Crd7tN4f4uR~P5EJY$cB zMO{ueG9KoHIOx8b%yH`T*K~$=%O>g2b~b#* zEeUeinS?xS4_hy5lbQG*(EhV)d7jNe*NZ%FOQP5$qjp7o0D+^esw$S`-Hk5FQr#*d@`;VhiXsA*JyxolPlS# z_?olpOZE+?e+T@N(eiorfpb+1+W$|7!4qQjNc4xH8ON5{m8kUG67ywjZc6rn^*yrK z66VaXZ&G{ay$?olm)yg~6t+5}x4yeaOAc{Z7W?ck+kPk3Rao zuC`wi=F{Y5yxYL@x!77u|M$UP_Xl}^c}&@V^@tq zB)76YaP6f3FFH6z5af`5+m?$-T2ui=}-1YVCcU)dyGDZ!7LMhxK>r~ZJ$FpqcT8+xRVEdJv-(j1H-@H@uSD3?#iFCAb+|Aeb7=wnvH{_qGe{F3e$-M~A z!^U+@vNt047#Ld^kAB(*vBOwLx8Yq$&aKWbvZIURcK!R(u}HhRoy&pvMD=q&a@rb~ zQGD_TSu@1zSDri8M1M8Cf56=j*4^;AkIj|o+wS>1ZCrT2g^mVv_F=ae&TmDtxtQy$ z{Z=@)CU-uYKcRvCgS8slgKve+0uLbv!V-qv>_4E@%)24KqEBIH9qbTW^Z06tF~wVctN)NntzM^x)1T5K=A%dLnelU! z`kqf~G#z2=?S2<>e!zPUUh$WUmvn!)?EjT5(21io)WRNov>jabufuzW$zj!~dMRUPOe1*tp#4E zdn+=;cGisCCiITN{}38J%8W-I##=osz7r>MF<(N*HR{!A@ICH+bF?|yZjP?mf2!)A>z?$!WBmCg ztS`g+aK#Mt-Z`p$*6=3&2HM)xi&g$sv$E^iGo60(Po{=M|A~{e(VT63u5n&b^}PV{ z`Muap?O$okR-)gD{I2X7%C75-_3GrzCC|8&{mD^tlS|z9f#<7$l`W6f)qZ;_TIx(@ z*e40kMB#b8obJ4(XPxeFtWPT)w7#p12H=s%{bU@hH)ah z!allKEN_LIf0H@vIvv)5{J0F>&B(B)m#vT^C$cZZD*RzzA=Kq^lIun2`#RLdsCS*` z9M{v)U284onK2jV(f+Q-vrl}bR~nmN#OzJRl3mGH=ufQHzIR@GzO-jnegsc5ec#YA zh~6bI_i|lN{`q`yp3i05!(cs<9i;sOxR+}`SuO8J&zH{p8IQ%gakVy&WGjuw7Vclh zH<6s4d^d!zE{5+Aady1>Guiy3`ePjv_M@X+y<5|ajq)MAk)GFA*-$*fhJNUd>=WE74Pz2==stYLApL~SFlN7?dxE@k z&^D;A?Ng@n>GE8fbk+V6+AXWTyNO>sWb11WYX{>JO=I8Q^vZ{XzsrBf{77E6)3+79 zd-3U8mD)S`#Yb#@n(cRJ8=-#!zjuWFc5|$H7uGW68n8@(X)pfTkDYMT{QijcUG>N8 zIEg&5pIt@oRCMyZY|CynFtt7_?1d!et>^$YJY@{taz7ZB)_mQmTH6aLD%zIfX@h?# zyjO~wI@sFsZ^)BN>Di9G>UVM|`-fE3RBKD)Tf!Gz?sKQ3$r_;lJ~ljxSFMZrD{0{R zBe;jr{~Y@F$oWe>PRI#5o0ESLzdh)F37#?h(jMOl_%`O3)#z^I9$jQ!DMrEm1bN4i zqt=Bzt?YN7ZS|s$dB{flVgBtveWC(wc_tX;d;_o?2PAC`1vU6r0;rm)m6PGPS-a2 zR>Gxb6&I2bkvmM=ldjdtux8vgIeDSI7xo{a{S9`=*JMldJEPqJ&V%5e zNp_A_oYmHU`)PZDy}|$g8s~%98lG=9C1bF$_RNr;O-2(jaX0@>HMV_Wd>-G)bd#U< ztk%8{Rs3A_9s$1;+RSO;^OB;9*PKy?_?idLk#vt_i+D)HWBPd2Z?>s%SD|f>&X~q9 z7Q2!C5G-L{65ruDDI%-9mK?qh&wyD^zPt{9!1ZR+&>cZ%Ycj=5{Dv`nkDd|k;SOu8qMn|Wb@s~8!Ik!5 zOG(e~%{&H~#IRzKW;t zxm&R=J8vYv4NS+7vmqQg9ysGaT$7oef=?`DXE-nNEPEl@tLYDzN4u8G`Tg`)VQQuQ z8~Tr>PmM_LuUND4$#Zn=LT5|z9%I9L#=zWF{LYtS$XPJoXIJ#92StCpBk+m2@VpqQ zS()(;@0pf8$nJvw75CmJ3BTc1o`b#(KTOBJIbASCPg^UB99?DN^ZKOLL$ z4L^i)ex^wa_l7>kfSCYq234KC7tru(Z{FBP^Zid>36)e21HB z$LhabUx*1~nSWok$H<p5#2K{b}}^W5PR!#nxmOFF{SHEOhgu^4vqQ-{Mf@F2 zhCB}AT$pddJ1^-ba2|nos^dg{s3W^Id^^H9jVyl3eisvaYrl=Glkte(coP4fM$Sld zqbjRnk4X=~KR%cM z_k&{KJ^F8Qy*Ap0Zv1;6-aC!kiRAa9 z_fi;V>E8j)3(=?n;l2H^e_Hfb6PN4zl^pX&J_zsc_%gafY>t76-O<7BpM_7~K@da<-du@<<k8 zjkG=PxJ7?N4*RkP`0h{p#Xs^%zpCFrHV!k%Xn=kRnn(3-Szr6@q>%slD1JPM&23?I z7AUp%X~TE=i)Zx1k+LbgkC44b=6>jIf081{DfU@W_{X)vGcQ?Z|pUfqq_!ZwY3^Ix>YA!p0J9gp4j*#tw=OIULW43|52VM89u6pn7XnIz{u(h$=3I;in zT_OKp;O8mY$CLAoy!fwD`;DLII?s0}RBN9DWjpCp&vVaYX?yk^kG^F!%5KxIb`5IvpoZooHnXm7@QN|0Hpt+`8i63^vxbtTzEUo(2| zbKRP}iR6o!e0O+G<*#ee%|ZXFSl$c`-{phgZm0h&`Ts+Gc(c4Ve`F)*{hO^P>)(MK zd63BI=rDOICelA0=Dylmu|FSl(SM}-%g}Zr|5y0b;baEiggnQej;D`5!*?wrV-h}> zDET%Yt`wL0aa=x%uW%K1k2lFuNgIbJcGt$}VuU1>!;qkqe|G=P6sdrs%D>tSttTl*ZV(5{9TlZ`WZ*~+TVZuzN~y!lW2 z4aS=-Vb060H9n_WzuVtT_hR4Ok-Sc;t+@FEza^iywecYFo`1)1JSL< z{vBiaobypI#^k7jVLcM<%2$`MbDX||VSF9$2>ovxhY3}mVerNBYVG%NllSUtzn51Y zgXR?S`6Is$#tq3FM4mcZjKDWkEc`^)^?0{KGm&ofCVvjTBeYN8=WSqL*YOw(ljt7I zX8D`V;=f_&yHtHPjsMDy<}ZHf;l989)(`bHrO!BKax8ik?-uZV#>Z{6FTnGIGe4GM zB!5vqc^SI!yqxiQB3{a~*mx5At|p@sx^MJdiAGEm7qPFQHt+vLJHq<_x`F6hX;Z(_ z7O=3tXr%okIE;CCJ_$7>`olY~o!O}thxa{`2I71!e>{Mmz3B&)+P*}#w!SObyNkIh zJeM7bry;pdkU0VG>FP*dJZFX`#z=O?uO=mn*wYk_5EISeI}84; z_^dyk_vm}g_&&iO2a~79WlQlkVb@FK`P?vlUK{>6*mw;Y*Tcgfx#z}IUKe4lP&jwv zD=`-K3X|{X+zkyo^4V}aNtW?Qr@}hQe$b`H#C%$NuAS>y<4$??ES=`bus0j8L*8F( zHdfiYWQ}$G2^m$gH>X1khduv1tbc+{>QOcpPJYVI;e#{K{7cr2?z{8*_Aow9uG$oS zM?37n)ckfCTCp5HN6F69_aGm%!Z)(wb7VRjxgVkLd43FZ6JVGw4n}DIpBf&Qsa+ zJv;d?-wKAiooBGioSPP8tzWf9f|vgi@tj(JmKUK{9}0CfV^==H`4{vV%&WuyBzyTk z`2+m{_+T!tM~6Qf!j>)J9OZrvKYqmqaayYDg%}O_ANJIX?eWcW-GOX#b_Pp&iT-EF z+>u=uk^fgkjdxt5Uu-9z=^(b*Kf0PKH4!b%jc}Owo}j6<{oSvSD(q(dIZx3jUO^Feskl;~CagaLa) z;~@6JI~?%^WNzkY3fD0Cu>=1+%eoZR56war&;s`1mIXF2<|hqYrZ|KCG<8XH4iH%Mx~ z-}hgZ)6b8qyi;`GH(HlZ(w_xv_kOU*^(x6Fg(l zHKr@X@0PH=OrE-wHxmarY<3uc-^x#ykR!gL zL1a8i-WK$Tzp&Pg7rWjN)~3ea+?j{D`F%1nbnWy#Bwz1g?Sz+;Yd`dVv9S>yAqKxg-<{m4u8$;> zjivlb)avki65)FrVSOL7FX~$H84xVuvv>*LrrMU63lFaP?9e+S!`W~Kd{@z@W{0&; zAvVKzwPNv;?L+?yc)L0eq_da)wOs!pCVJAdl-{NM`2)Fp9QJt%J}%dR**u)>&4%k> zvR){@Am@vUk1NsjV`me*Yq9G`_HKi3eeEmIZsZJKQbD^Be~m$R6I(|*H}B~Am`vl7 zKjnN0n>W(F6rOHuz84OAOWD`jr@^Xi%y1 z^pEA&uKM?ebtvrOt&soG(PW92u-_9c)F;-;mavArlS{>HV}mv|cB$`FMavu?+oybv zuU;WzPgvz`B-XR@_^Hr7)|kCaRzlC|^iFksqO);Ld=8RcNfzIvU$7@(*-Z*hHW9Yt#YszkpKI*K@n17?`#G*+&zJOt8n;cg_StyOcj;DWw?H=#Kia4T z`F-Ko)**LewV-?grg!O|R`nhiJOAXn?%L26VkF-^oay)<%C&(SXL!v1G^m%jOUV2RrXnF~D= z(RDk&Jw*3$`hTRqJ-Zk3|5fyno3*5GBec8df0gaVE{bqCc93@WK-qPOAVQ_Y< z)V{MC{RFH0$<1l;?&Qs4yI2kD?_`DRndtbkgefzo@eF;Z@yP)8%p`v+wpX>AGg6`)1_# zg-^`nA&0H;vym14hj*^_{n*SW`37*_g71BH9a8a~5wiB@zj|XrM}9U8Q}J$!m%O4s zJ2!BKGkchhkG)%7fyn0gUA?aH?xk&{vF=t?%J8&y zUm@!ZI(or?E^olMayn7xOEtmfMy+Ot^;~ubc_F{|G|sv6Ipmzmm-xdPE?ErY0zMcZ zw@-nq9`0S~SYQl>7~ffRlUbZV_Hwjpb@CCvpIG(X74O4z5Iarjw2lv-4`inr!zQlT zlJQ5n3;pjKha7L_`e5hA`gVq6b2?sg-yhGFWNgBYhUC8t#{tfr$>Y~-6Z81DFsnuR zi)6PYPpvF>hwC(S>%g)D*>~2fZ~QPGk9i}rCP{mwPWj^WHx1^IE?Lu#V3;yVspB-P(Rs|723sr@*&v|8l%_Xyc_?@4pC&_pU9Yl<|)3| zm0$N^v**7!hWk(U{2;~}R%+k9O$V~!Ji0Ec`W_3q8Elp7(GGAu>i*w=S8h(g=ROi| zk$RlHWnW&dWE-M854M*0J`rbfFlk2KTgEZ?c{*9M+#dyd@VPpfvojv<`d9h)6#n1Q z{tDBXd~!5;wW8pgTKtye_J?Cqz3+p<`Xt)3=?nG6`?RI?Qg&Csub&<1Ij(=>zc=Na zdYq1je{VPz20h0Ak$~S>oJK9^?27(12OZH#_}~cpX=&~tFQ3RS^T>LgFME@73*B?r zxfriHmTtrs|MJiAWT=hBeq;=Tfh;CnU?;vX{7$&#(UMaiO z!2De{t?k;FM8=>P>Uf0iH(_4~ty~KGw(+NE#6v8$%3Z5bdYC%=I@#m2J%IK`m|sSV zH|2vu9Sz^%h>nyeC&Dp@+@sa9x7akLzV>~x@*+APO0DBg=ZLSl5}oS-<+)8mDYvOlN5yN&kUl%C$1o zQn8%6Y+sT>RjMq!(->rJzmcIt0TN}?7 z@?E`8#ceqT{k8B;hv5?SYqI9JBaK+QKcVxp|)m8ta z@~AvKU4MveYrpVrN2=BpN6~XN|Fl8>I(m{SN8vX{ z<3(tUPr42NZ4c)I#zbBuz0ixZu$LHZil;vsW1469y3!f;es+Xos_{6OPo|Qy#Qi|} z+S7e3o>$QBZGHPWAC8B0_(K1gWd6C8%yIg>hnsDWo}I-q@p_u;hv@jjSnx$UUfkS| z{{-^3gh_maHA#FzkOffOkE+!*k!`)TCpJd!TkK_fLe}wP5 zs^88Q3tW)@Qzp8?t3G{tL-Gi{7pEnZLqwU2-?<2a^A*TGCn@oaGzj zcXRBZov%_li>7dWtcrOk%G+;?tbZ!t-JI0{q3rfa`nsXgOQP zkbjZ;qxfbG{inMg;$UC=8JpI0{WBj~&y;OpzX1K-crR4D595>Wusu!AP4I`h5d3GJ z3(qTsG0a{jSA9v?wN8P03Tn>sLDPW-sHv$RTgj6^^seuJ2xq zX77u?;Dbl_RIMxJVDdCQ>%a<2YAn<6?o)I5+E(9j@+bJ~7yfR=KKW5TPu4*&yjrP! z&pZ~#VcyP;fG2_RN%VcluOsVUKD~p!+2k10cwah)!E`WR$i48JIobaDM{85}^6=fS zozUbCxWl?3zQVaP`In$;P?X8NY{E z%U~D{|CX?KM*A^*@+ahd`8zqMY5PH&T3GgQ{jJyo z&{jozAzdTcAqJCw#mwdOUBDkBjrZOB@fJU4WG|Hy6Sa|(iMt54>_xe;I)ATi9Qwo7 z3f;@-uc)tmFEW#d+417&K5f6CnZqZqqw7WQ{ngrDKzxF>x%$|hpA1WwBNvk27v4kU zelLFc2fladc^G~^FN|yW{gUW&KI!0o=c@exwP7vxKgn@Px9YQ7 zxfc6=Qh5GIUqEw{ajEC4o7pB`Q!y6*P1cck<~aUj(?l44bpDvUrLG^ZnD_BK!G`Dg z!uSs)>;0%7amXEgzH$jHi|KEK zSN+SnR9F2E_iuEz40^S#QK|j+Oj!TfM6W$Z4i zVOY5&kz} z8jRL>$E*L}*pIK;vp_$KkJONYuIxnPHjTVj*)_&_Z+^IyjGNS}>+p-uur^E1)OMwD zdj^(aDKEpbWvI!;R{XFF z9iBPTUybW5G~2_&CuMK^cjEiryVAxf5tl`H*JeDr8}zSNSNk4Ix*wVAlQj^Iv2Yom zY#QF?^t@;c`6KqeZ>UYhV`9(xChkoxoMFz7_vDM4_;EAzkF&$rWqrw?<$Q&lbsyH2 zG5-_+<03wKy6W9bcI^t!yX5{0--p^p@sDu`V_43HWgE6#udfZco%Ef-x8bwK;Lp|R zeh@Ezl;@xmuJRudco}exaV%>f0-1uqS#A#=Ggh*?!|1^bA0|8k#*F-_)4{%vb%?hKw)N z`pgT(qzN!1JYO!(8F4NY^{T=T4 zs?6v$KKXmvFR0eOGZXB}uHugc?&q>`?PAsU{y#t;>J@qAIaO;(bnqtwew@z!XfDw|%o>~@i-XB}$NfgGpMlw$CVm(W z`JbzaxqZ4=O$zIZR2-I{!|*$rN6EX8{zpQaSZGM!*7(-Z4nyv~#Gjlb?w=LggX?SW zCq*~VwYxF9nEYjAT8k9c6`7m~?~~Q`bM}P$bXc?}C&}lfWIn0?Ik~qkzBQ}0_ezox z+Q#$kIJ$qT)PA!!y+~bKg60mG_T;M}^qx?S(y9Esz%_ zExW?AMY1-z+ZhATEMX5O+7V4}&ll#cq>a1}^(@F*ozK4&BkvlsqwqH(uLa+njs7-# zkD?dL#e?vk%m+8K=|1+}57QXujrs2y*!pXq57TtVZ+MsB^;KK8b_|=1S@@l@cpAOdZ`mL;ryE~&u$;n&4al)RN)O=Y7)IllvZolPy@|FR$n`mT z@*=s1!}*Ri@85hOr?Y?1zD~BBh~!;9qOSHkB8B~h=p~q+hReKDc0n82L+oFzeU6;X z5Rcz=w2pJICp)lOduO1$jhHit#0b3MGtRJ{%-*0ocW=F%hcP`j;L`Ux zEbLCiba(~}pIv628}pmg&BrRf16mdbS< zAr95G@ZNZ`+_>F@r-S}+V(8#H`zh7|E7-b_uhylvt+==ip3~U!D7+D!OUU_yZ_Qbm zaSi|Dl!(u4bnuD$2Vt*sbYqWk&0x&eqvuaJFT^_%O{=P0GKL>HLVgE1Rr(vE@6Cs5 zLDo(!?@+D%PHFxb4D2b@pR^D&cjG%8PHWTfPF>a&Ki?-k@IA?{)yW#7pI@?G`qaem z3{Xshs|CA4ZtcoP!(pFC_E0o$!q-vXa5O#HcZdGn^iLN*@~0T>{1@E^%J1%Q?M(h; zwA)qv#yELmKh7(DTZEi(aH)A=4>_OYxL*uC43jvkJ@a3q|1e`Gr{Z>1-=#3NgXx$G z^hUl*U4B| zjA5X4+MlFkR@z-rnT(YGC6ypC^fT~y9M&&KF-{Kh`HU;CwWITvs6 z$vt?x!}L49uh1smV{s8phwFZE6#Us1O%Brq+P8y2txtQ>W&X*JXZK|HW6;dTe+XOd z5}$G-oo}4(qjv{-0e@lS{I%V(l)TQQLhJ(IgbzVmo2Nc??finS=dWQDoMXWG#)vR)&<8UDlg>n=DC=br_zsmH~6+Go;vJU>`- zM3b#`#9BVOzP7KDcG12ezfEJS^{IHLbdk$f#ng}1{vGk{*TUDB-u=#{74({sk)pU zEw&eH3;rJO`~qBO!7zvJez01Pg>`8Zd~y{ZJVJjbzEuChGi%XK%soQCeXgQ~yq5pz zIsA43+@tX9#*e3xdm)|oi3N2v`bd2_i~N1~L@oS(E!}mz71i1Y@DU481Y1f6#XzJ) z1<|wjs2CX7fq|o9V|NcAEhShWI$$6eSQsehjICU|1FvJpy(o5H-|rgUKjt$tYd!IM zVm)h3?AeLfCo7^^lfF;U8z0$8FgN4hE^O!{PW(~6#%?k*wJaZu{!bXLhNT^yyP{pn zpN;TsM()FMfiJ@~U$im$gVCJJ=1>baV$XffH$cP3`GaJ9qh0My{zJE%i>`$|L;pg9 z_xFNr^p3|j(6QQ(JPrRtVs<#px3TRJI)A`FlDsbPcCePa>SsF z_~?D-SJN?#@8x*423t-4U;3u<(M&$n-E97P4zB4o zF^yKof1-RX*>F0$`K=g(<{9lRoPSH-bL8y^x4!f!@>BTV#BW>|b870MF=P%1?tflDQ6%RQYZQy?p9%uJo zbgsuPc@y4=C|aOD72OQ_IypA3;sczwBy%jBE#Y4gZm|k^QX020pM~qUILJMRpYOKk zpZsYwaxdHmji03BO7gbSekGmH@>O0l$J^vibTXbW_Kp>!4eh)0gL|Mw4|C4~^dS!9 zr)Szf$>yGX@toQ(FLL#zoXBV9tSF3`1#BPAcI%gPQ#?y-lj!)&Z<;*wF8Mva3g5;2 zGP7R2>z4GEXBW{um+pZumv9*mG5?ig+0H-lMe_aqTJ;`H_9zT;t?0*3=cD11^cj1$ z#pH+IvP9q$2J!e8<*ZVvZG zbbhOCmg7(Zhr!&)51X>{L-nGYaV|EoT3Nk|$4~h`#$FG29(Q~e{(9Ya7+`z||0w#~ z(-mr{e2YG$>oT%ear`XWE@a1aU&X%G+D@kXMBCuywukG;(z+qkhbkr$$R)EF@7m#4 zSWaQf_WIPtd^cm`QW$5x_rwM z{+R@CSM)cV2UjIK1d(p4FbNs<~Fy?EK-(75;C;N9kU_+<{;Tc?N%p|K8{#z8^p?H2mw=*6(^H3dlkE8;JbQ`;G*$PYlatZ(aoiDoTR3k-|EGMr6CQQE zkUQCy_U85EHg=6*r#NPBqPreO<2;`Vf5?NA^l1yfSx)DvM`8SD=ZpR65AprA z?ze+z-XM2hGDh>+7&6z_HyZ6b)|R367!#5Fk4|Lo{q-o@812^l!H@Abw*R0%O*>u1 zi}Z$d=1pWCiT4J&7qe|IJ`4AUZYTE<{Qb!KSbWdcZ@$S_Z&K4-v%nng1&1|k-hj5R z8iOZlkM<{VfH4kZYg;-VkY5wnV@zf2%l2`+4w@a<#cu(7xoh3nBx@r1H#(n%CVZQ= zGnu!LFMiQDIrSa_p|@hSf4Hzc8BQ{x^0QMC^=c2dc!PEVSE)0 z)&m$gR1v@np!)^lklP z(X~L+!?w~Q|9wv08}M$XZ>VE6HeLqn0DWij2frk9=;N2veXH;;MM8ga4VvlbuVLqb zY~0RVvI05H#lE@rPuaJNz8?71fb=4=e#Lhj%rQT;c07^XH{k0mPCqySyb--j=b;e+AiJfy#?wy)VGcI7H$&BOnM+!T-G9{cOz`-5Lb2D@NCS#9i# zXEy$y`DO}v#!h-Kod@&9Vtm8&nFq4F(I1HRDRjs3*Kfww8vL>y+V#;jktwgkbwWNv z&Y7=@`PM&+*}abKDDCU1lh#V%n!KEc;2>eRri}tHp;o^lZCFIv|U2~|KMnk$9xmMC!8NvuYS)GjbY0s`o%Xnf*!e> z+=1pq^KDl$ALYMBV?^B2YwK!({g?Lp>${&1tTn^$SPJuPfhK>Q-hZso__Bz|9LqI`(_=vX>k+F4Hr#U#fi~ z#DV5h`5V?3*)%qacXXzAT04+q+?HRd-?#AF6gtPiwGG-Aj6*()PegMoSvSHbuK9{= zm_tUXJ~%JerE44>^(1}ExVTB*iDJ^6j(zy`3w?L;C9sghs{s(^-m_H^9 z_V9cteL>r<@X48S9ep3rDNnOiVO1CMA^b~L_JDTw#Y^Bd_QJi*=w4e7TX?rntcqFg zg}3JO4awunvKc*};@cX{_F_0o`vrCH`odf%XKbT;(Xf~7Sj9C>Z|!|x9mdbr4*4i} z?t_cos4tyU$&-)yYjp5=_#Q%dt{Q$9Q2YyzzH&R;Y4jL}@#A=`2TJoxbUVLYE_b}A zRr!FAGWnX`CU<_*cbsjEarO;dtHUmzGkF{R7q+yZ2p-|7W z`_-avVfrsWufbOHO<}Iju4IS08-Gi${3&L$@89tLu75b$)7V(6SL>&AWqQ<(@V-uZ zBs}xbwqeKdblhtFIiD|gCgUdCmSpVCt|!S*cj9Hn+ZNh?L^BTWWIk$ze?Yx@$1%AX z{d3yPh2?tWx&IcKH^Le|Huj>I$;$M9p~$tfd3_9 z=@+rzfz4Oz@9Cb`Rj`Em_kp$tVG83w{6n2j@8Gvtcie$bI#AV$^V@EoKNFJ$O-t?qJKPJ zd`kwqla=_iC%OmN`4M{u+tibEFId`XYh(T23mx0jlk^=A=MivyP_MqlT*~`!52u7F za;+Dhg_WK7Zcj8PsuP{iom2Nrzos^-DXu5VVftQT=O*kqk3XlwwE+Jf@@Fue@5BDN zoO=LY2Yt8VH9q2j_3B;eV(*ZneAJfxL-;ZL7R&gLc88n4(rd_m7yixRmLG9T^6rB1 zVeMj-%kA>tw$Ws_<(~y?8fRON++FaP2jZRB@C$rx(A@^(OZYn1-8_mQPS5(ii<7@e1Av$rWt`l1~R=)|!j)h@!V|gcdXV;hi5AK!D`93 zcPFfS;CtHsRP~?@o^b8d-tjr|?O6WY75$3VV8g|`8@s<+=s(>acRz=0&qOmdCtDYe zEBN_i-)Fj(47R1lK(!{@O5X|O&*an1nyUZ)h+UT@A;&w>GmcMQ*Zv+mcE+b*SZdxptTc8c~c}4QQ2Vbm>zB4*`S-oSu1kU%>l1=IB&nDkj%?>h--TR1p zpzF;q{qaAGj^1JpdyMDuR(^e^~aU^4klmy&04bg2lvVTB&RRT z=BDtxEE@#RO7`>k@@Fz1GH>q>+YOEGq4-VZ0rlnU67@L0K!g64o=AARevxifj2;u*xA5wLp~Ju^mGtwV>0?1SHX{e;qO4l@#4kSa2=WL===vd#JOxm)>&}2ZZhYK z%em;EvX1-*=0&zw$sP#rQ}%D!&Vz9qyym{J{;P80RC;G_87nYOwmdx;MC$7W3 z8TxnGy9q4tg!Ni@1{?ktBwE$+mt>!-Pk!W?I<&cU*$w>gJ3GUA;~l(Dz`ZiLm)QSG z?z?zfz{&SnD>4VNcbdF#|35u|pPHlV0`JD`Tf^2FZ%?vMB z$35`#dB~e^|2~%s;kzB_HuUUHrh1s|!p?5|(ym_BqIe(JH-Yhu#_HZezUQLl-;db8$#v|^<1AILbt+^@tV!7_HmWXcGrjBIlM)|ER9E(@7knER@wP3Sc4BtSC z#`4`V=dJ3#$Kl+))$l%Vx{)?DBl;QN75v_xjeGOcLi}%v*Fbym&tf>H+RRDSyO;lS z+(NvA55K36+^mG*CcIzbd&gRfzmpzpHwMEsWyDAM9(a24>uP*7-i_SJ`i8OfANt?bw>P=d_1TBt zTb20ozZ$CVD@J|D9R|Z>duxsGZb4~XpZDY6N$7rL^CI?i$Ga~&xftRQYDT!<8R|qB z6Xgx`Y%Nd!XZwz>p==eC@?5lEvHgB>jl*OHyvA#3Oh=n*dz~J+UxZk+XM20NC+M4j z&v)L+*5(=4rFl=XcD2c+aKEgG7FE|vDcyw{R^8(qQ?KTeDh~qxqjGFhvdBwUC7b?i zKbT(?=cV|%p!w8hY-O#rg>(K0&yCUp(A~}t5q#!|^j7CDv+G9nrf04C#&w*McPiZ* z;DImlo>y+olFX|6{TaXfUaS5SJsHi9VGjM0jcP{zCYjsl3w|2HpKbVWMY0|vBgPkE z(H8yZX!^ju3c9xN-J`#|_ME=?Fl?;tyv8a9$u-&zfkl2qx3IOl{v-U(e>naX=zEH+ z;cOT{&n0YsfXu;ib(8w?cNp0mem7k_!mkOON3ru3yyn8}W`0>m|B?3d@bwnk^XNMU zmN4Erl5soC$Dt9^RD2SCiJ!!~D><*!JvW2(4t{BaR!)WAq?LE0eOiuorBA&quOa(t zzWRrqE8#Ej&>#GfePEf^-RfR;5S^>oE_OTxhF#e;ii{iivp?CPR_$jF_(klmvh~$>JiP5Z?3>w5mVpZ>L+|7br|+{e;+9U3`PT|cU8l^v&vwQ*7y&*67f zg_u;&>D8v{yFkbA|2<^R;%Buh>QDBqwksWn@&CO2!7#7P&PU1rgwDz6PCway-=kTwK%j`C)hVccRC4@bfOtm*ROchK_-+6Cm-$sI$^+1jp^ zdoMTok8`&6BlB8%YxMD1BCp~V(e9~UU7~+7x)J2wR;&KsDnpa^BJ*v1`-tI}Yv z?)t?zJnIPS_UuJtf3$UZKRW3T_hl3Hr0h)JHRKz&QT_2KU#fo@yc6nG4K6QH6Fx?_ z*v8+bxh3Vp@-DLf5Mw@$Q#!3(!u`HPJ&8``*MYDbn`LwOw&#yaU^^b|)8y}m<|Q^7 zclnyN>K=U2kzKvSVm=+0;yJ*vaTA4lA;-#nYSsU0m8;5~V;igQk5~D9Zm|-4NfaYiXPtp54%yqSPe>}tKyG+|vZ2ty-d$hmv%MJP;k`ro8d78dcVS3d%W-56t z;9h8BNBTWke`%kA&zMZ)U04(3#(#k}+6G+&Pk*$buE_f&_-6(23HAIxj?JOrTBLlP zoXhD8Z4*F`dl8^nbyxnOHwTHoS>^E1rh!S+dRD#dmV*A?NeMm?6Kf zc6l0Ys)pVPOKWx=0V90z8T76jY$fMMB_SYVGItPslShv3wiGMR4$Ec^ALjY?F5}*FY1T>jl& zqxXOu8@KsU?0todLFBP7>MCcAf#f{4P2r=@$!SGyAGuh@@I9t+powk+HjU;FLdUd}fA0N)i3%*$&c4ITXn%wWv z&*txmt`XPZn?K>llfKNaXQMU$C!5>T7kvizestev|C+ik-?9VY*oxdGeO(K zYSaRLmM7`OXs?8Iw!G;_){bbeg|7#@e-qd3=zH9D37*yXM*RtEh-^c??__@&{?q7M zkv!uq+Xn6j*mMaj$B}=$_TILw_^d|m6uuFw@J)>}w|~Pu(aCs1Z5(LZ+r0zvFJBZZ zc^AFOcT2tFI+DJ@&M)ALcgWfZZA-dCKHZO&pTh4fvV#}7kIPp}@ijPJjqJsEHh{II zxc|fF!{qK@SSI10%D)}RYjXUQKJhH}M8BDK<1SvMUafVrE%g8G9Oh(CvbyrK{L5x) z-Fy^sYteNb*%!k1F+DTz)Y1RMFWaK+ z80ca9z`610rn0(6<}5VUCE2bau4H{o_u=%;bvzusoJyv{a5I~ZwVkktyt?n}=x@?) zjD>qi`CI7r<|i_wldoC!7*d|!rjh1#0!gg?|eH7_-PA$-bvlor^JSRbH;C_Y{sp~TYlAWUnjHbNu@tWJ z(zW@ZtNlvs?hLmWlwUi(j4aQR^09KP6+Prft+ao{?p0tCi~Mde+Fsn7(etK##P2if z@=8r!k-pREaF4YxcND+V`vAI$Vimr*@d>#nv+sCvw-U=xvwE{(ef`$5*&;muW2?1a zSZ72>(l-mwUTD4u_QM(G$hl4a;|y;z?IDh*vvECf_%~mQL3s@9#zgX3_>EE7BtCPQy zczjGhY}sn?_LT?s>)%}8qp-clH*50q7`$7snQx2#!X9YuC0`s$1vf%%68hIh_TFU|Za8Ebc49=bE z-oN0>acu04?sM^2$2mVIXYpT8ItJCPWg4sh0!dy+HxJHp*mnCOIph2%I(BQ~Q^!m6 z#q^GbWh;1(!ru}9CFmEEv7f$owX-qX&@+Y_-Td z%ho6Ke~Gs(ooACbLOfc)u{Zu*wBN}$tLl3!*g-Zt)%>##pNn7F&G}0(ZYamUVNWNt z^TIh?OXS*U{*^YRVmKIcOe|8|%WcrvBB*4>|rZ%bZJrZN1>y4E^!i-gmyxbya_D>UyT0qzmiSZ-Vl>^taG&o-ZcA{U{rbq;rks z*vs3|^IO?cpK%;=t-OvM`;xtqKG(I`U9fl8*MV=;f$$D#^b8yOI9~;=d`+LVy$thp z>>onbuVh~DxX>^%6EAieQoR-^F^ou(;aUl zHb3%@97-0!@iIE~H~)&Pmi%%wU3c*5sW3Ibd?On9nxA58YV_ZlWbkiz-WiEm{2z3; zI5!r;^NxI}XIwkbu|K@W)4vp^*T_B)Z${_oj@6O!AAIYu*&I~9XaBnR{w!D9k$*A2 zwq?)R{Lw|9BFaPFR(NN8A z`CE9#z`Z4(Oe4eZ=i+_HSj)L_6rSznaxlCH9`0!-)@IRRu-ryYh|N9hRX4(SgW`T{ zfG@gAKHr4*lA1a~=g#<_b9^|SZ?q5NKXO8xk`egN!!uF5ji>T5wEKC_E9Ab~kjb&= z5Bfr_pMvLe7}fE3O+Fdexct9d)u8lLba$}vIDAVQ+$u!>92zl6%o*7N=da@ZQrlAd zsWmxZKdV-~TT{LXS64EuouV$*AJ5}S(4K_vT5IeR_<1_nGu4UL)Wr4RJ*e)U3%dWY zH$0oP22DRBcc8wl#c?j#E6M?U$+l!Zh7OKw2lksUl6T3M8_{ZLr^t<{NqytXb@3mf z{eGDKMAJc=d`x=F#f|Wf!5jSE4t-b0#(uGpd~qDV$ahP`VlBtZU_+OTBS);ldwj|N zw9O&sYW!@B>-fi+Cwe(<4%eDZ)%R)3J@{TN4D0^#G5lw;<1Sd(lzhv^FWG}8e~p~& zoX>MUzp=XaUrwYW=-E}@c=T*8E^3J458T7MwXym}VOal`kF)E0{o)oqt^G&(y1`+N zOAmHzJrKRGe{(t6r2RB$)2-rJVD!1I*rTpNqWzQ=Pf$UfDcXHTC6UIV{Mq#v0`}5GmpC8axAL8 zpmJrH<-h1Z~=@WD>b)$jBJI*g5?8~%}Wh)=RApA4;6&zj1wVTY%@ zo=)Q?UdYZD=`hw}IT5Zkqd)NQU)1P&ehk|2aHu(v@mbA}^pxA7<&*p@{mtmS5dS28 z+6#^6-^q!5@)O@JAZHKweuL#M_`rR7-d zgV5{_lh}rLdZJ(8*g>p9JP*cmBOGlVs{_eP;{H9%Ytv_a5Zz^))Z`f;ebzt`y7VV& z+3m^f%!WJIJIH>qc7DpeZS%yeE}C2;WEe zgI`~hrw5SvDO&jwZ>!%NnXxaYC+h2WOA}yjOD-JMH z*KGLO)Axz(?H#IbSBC4)Ty0Ip;_2k~P|fLdKPp@kr;8k~s6FK26u8umU~3^iGxa2i zjA8!C@6rCC_zYm58Wf&WgzM<685@Fc_nN{jj?G(d7O??lEYO;0rlcy{mDxxSdY!SX;za zV=oJJb7edavgKI531f1;7+)n0e3cG|_Y^enM`}=FT%_0Gd#17ao?~(e-aX;iw(hqb z;-pR_8$0jLzt(q2U-TvZ&TQlFQ&3#sNVV3qMY~AQsmkshX+PS{mZ>Ore zXKGtnU#H+#w&{=0C*N2t9wkE!ET403&I`|`ipR)mfo3M#TawdE`7>X#(0s^bql%P398ngZLD2f=SMrz=bE7yYug;%PUN?Ab(UE*(Gr1Kuc*<>!<%ij4p32!=s4wM~j<+Z895nKxx*i%4`p95Y*+cug z+Iz^sE_h#t_bPqo@QHlR{oRn(o2UNDybM0+g14j_*mnK`za$J=NRIWO-baV-1Qs_UoX0dn~@I+E<` z;W$!W8e{Po^keAw+Bi1;v%{=gkEmDoM$)(76uYQh;@&yGyw4X?@rr+>_JrD=-bc?) z>?Sw<4E=57sTJkkY~Ps7uueOh{LjVh3pVY{CgU*NcS*^}R>%9XKI`H92z8te*R3hq1c> z|5OvVZE2j92wPT(v1M12hJ z1IE+wZOadPiRJ6gcXB+g5s#R^$}iR`Nn1W?0sEU`SJQu7-L*HF^WbwYG~XEB$H;5u z`kHOkI|yF{9rVj#MJ|eWhY5d(XU2!cXZGxjuY^f$EqbwYwI*u_?Jb+C=MBk2a(YWP zg?yS|Ka!52_V1yY3@;t|?tGuY@hO^p8r|EXwgw=G$$2`>?iK=oG7PJ(BdIb2IyH>?}52 z%AV%@EMCQFWHPyWH;a;N@BVGd*EVz`at$2gFg$t3HL0DlgxXq(7S%i53)BN!h400xJU4&##^{Ql8JAzj($E& zm&)s>#W1X~de&Xnk+TFoSc(Sr%JbBkIS%!%J)Dc-=)$%;VbfP01#5fz-t0J${Jpdf zf|veeCwLan^QyUmKZ+)CypV6!AZLi{#247{FY;!gU5Mvx{uGBGFO{>|q1q17|0ztX z;CaQ?m;83- z?JB$Llb6v{>%tvivyO=`Y;a$n?lakU5V@Xp%jpz3@ z=rBf-;ri6@{C7EWn=Qs0a!Rh0L&?6J?#blrtj}03@MK@2hch1q_x{dbr(a!2`8fP8 zqnsVq;d0;+_P)tJv4~xJhj(nk^>KPK-R7Hg9scSlXS(4(lP+~U-Gp4escPOgY##{Ex$u2Q z_iQ|LB^!`=qWur@|3mb0x?Du=Zsc!4M$p-X&Ijn3LdK@_{3&-fMLQYawqnpRw9#`g z`K^}wFx!&-f53i2;1Q#f_}~om>(M*9aryh=cd*}gVxk|(I}YDz;+pf#k$gchS;p@k`Yqc0A%^p$$`Y`3@y9&*CB znbI4{zm*JQBXMt~Tqq94ck&4gw-`eU?AK%aRp^eXdG0`uF_vCrV@LTX-#h@@VRU^B z-#hfb$FAx0f292j`dj1ar|l&BqxgL@@>(^x&gO#&a4cHnd-U|&O4dB*v+21D-mUPS zpzkxf_#hud#v1(hDErnVOI@yNq;Z%IfqyLixiAePV`b;oGx5GKy~6L;il-dQ%(tbw z8veUC+)GH-fPuVt6#C}&qxByq2X2CWMS6cSekbdjsqY^0#>qu^BKJ_zZ_!NTvmY9K z3xv-$hADmt8o z7I{aoXGeDOPrNf-#(jDPpY9>2A4Iq1^z>iy z;t&m>Lp;-q8$H{^y9L@d@SMj#k2bhR1y=`{wxeGy%-cJ^hwVFPyOADqW8O#GagEjc z@8uQtt?(Kf(Hq+NE!3arGJZI#Ugbpf8^Fiu_*AYuOYil3ajpG2u)T|JYj%aYG?>0{ zU00B`R6ifZ_rba@xo^>PG@6XAjrqj8+~pGf8xB)9aglGyZTxgqQ+3T&d_(ri1@-efhh2*4|mYNv+t?a{<1(u3p_^$oj|)_9Z>&klWEX`d1+D5^_(neT*-s&-*S# zTQ=@!418Lv?hz$N!}OCHb`>ApfG^;B!u|_#jz%NLlg}KRm(z=lnTu;`nsJ%4`xSL& zOM5jwJ{8RyV$vT5H97kjrWttnD%!@k#zOAi%@5nN%UCIU)4>;E9T(oING7mZ4J|%0 zhYv@)IN%_&KU@d%^S|k7g{~{VzN+s`a>IIR6aHDn22bp{Zmz~>a~!Xvj)j^sguQA) z$lLILg8A`etRg4g7poubLv7w4-o9w|N4p!o$@KPv?^VbD!b5ksFOWXUCUGk65*N9U z%%!j3qm}gC$(GyHhqJ}@W&U`9?H8fF+V7^WM|(25cWS=jMdxd1N6M)NHY`!ILVT|E zylgG{tx?i_`S4xzHyQ`6`9xcJ09tV>N7}y$+udXzK+i(FQ~75M8Z|OC2SnD*@k9FC zkYx-chroT0*!9xCi0kM% ztxCpXa?O*)2kd%@-s6mk_t6~b{6qGzqqxU9zMHtnr*sIu-sE0hlN)&XAw8Ck`Hg(e zhXb^CsaN0csq&Qk_)hXd`<`O_^W)}2ZJo(o8^*D8uqVt#MJyk7pl7D{Q=ev!_~zPF?k7GMYx_@fM!3kzi{kXN1Ey%gSH)h z81mi74fXPJG>?$GI=-FR#@1q8{h!pT-+QN>(DQTQ|EZGoj3aBN=nA%K3v+YQTVA## z`#N|Y6!W2cbAQdVm0I;59O>_myVhopwQa7JN9tZ;Efd}~31c&i2XsX`FiifDl=ZgDE{Ba-rbQSV5yh9P*kqgh_(ud&x*7?ge^*Fqn z7!RWNZ09$!;S~DRvE-}9YAqD*!{^_q34{3jAY&$j!JN? zdbS~VGHm9AP(Q00GT3=@$GbPWM&gg%Yt_6}zPiY_I_PQ%!%vQl?Z`Sj{YYCExnulA z+uP27y(ix5$e6-@aE&MsF(Y@J*<{J$KZdHJ#F~#>IU^s+e|TqF~5uqc4vGY{>LEsk7M?R_tA>p@S5Kf zveVnywHo`b(J$`tpL}^3-^zt(E41o&aV=~g@$0K-uVJG+$h*P*DSflG!JM5I*1U9e zw5(%hFr5rFHG8op5B0Avb{pH|Z+aKLkL{0!(_9!` zz(0@Ko71Xw+b!t43sM-D(HMRG$U42Y{N2ZY$!}}5{2aR6_bB8}K8+mj9EEHD_!_j6 zVd^86*4B}F8`f9h{jG!z+2v%fNq@-avHa7D5B8{6-?uC`mS-cHs{gdlB6>fkYeReM z*l?{MuMBff^ku**r~54Tf4F82-!F`|t$W|cxY^V33b3@--j4p`=>6VUSVGQ!_`bd>!6UAHw`@E97sn1Kjr;OATUKulY6}n;XfRLasTfl#9_I zx_v`2{PsDzliV-iGDef`#9t=vcIi8rqi_>xtnU+lCRd# zz9pNUaxOQkx|Gs)5*bt2wJogbNpUxO|7VW)u(tgD#~bWJZa!)q^(uM%R6f|?`%3KU zP2T1(txrx3rqf{TB3@z|{x>8Fu^VZ-h0U|b_>CXcjZ#iTbND`t`8ND%jtcAL@^H4{ zO=rNm5uP7mnLz$W@?v*%&EQ;*Ef?YoGJdB2AKPAHvAMmx3u}qQcrV(bzYC^;wd(&( zs#;m6W0vu&rbLgBt9C@E!f~G(c#xcY7VQx7uOZ_}SdV0@+>Yc~aR5x8i^~Z+_&ssG zdS5kBPqH7_`86MX%Lcg_J&XTIv1v|z#$Rufe-giJBo`OJI81-2+YNM?8;V2i_vFu> z{IoOMm!f~jc^_DFat4z#i+#u0UoN*g!_X$kl&jnEdoyGDA{fca!?-h6iq?<2oW!*9YS`0wA;#u&G}1y6erVjCcXE=EEmG>V~TgtJ5E;;H@4=x zu}R%6C!=>yFRW3LIp`bM(~jLg8(Ra&{*B&CoPUh}P5ye(Mt{1twi>x=X*362-sHVz zaU4vBu^;{Om^Cpxt=QL`j8S~MAG{0sCDabJHPp!PEUvoV40uZH%+$VCl<#IgkfT>yr`Y!G-vHkR5y)>i8qh&24E1e!qnd zazhP{H;4Hk$2WSXMvRkAWZY0!oA`z;Wk3Epk&pHwXFnL!k?{Ppgfrd1x$)rR57mEc zR?jkjZSXs3{%<1lYPSAu%kaV-=Ds4x@2Ac_Q?Gt!kcYM36Z~{LJI~j?J=&M}W8xk z!IvALA4~sudX1~_TZ?RE{nl<}TXx-{t((m_j|RYWxb1i~*!T?db9m1&lQ+qlbncF> zKOXrSd{#UK!OBZD~Hw_n||4 zWB$%wl}GQh;dkqzL1IoQz9IL<#^v8zeGSG{wRKT%c43=Z z5UCeMlj|Qo4tW|szZ^p%XX2sSJabG}BKJ{pc5vRxR-n03-VUT|g8L3xlY6=5r{mxa z<66y1BU`ASVj6#fcb&R57~CJ~S9^-#Xn&CR_Q_kut9^JL5`GVpnM2}m&9XL{nEeCz zNn17$mTlSg7#Uv~i!JRRz`H(M!x)xd$s1_*7n4Ky^clXAhtV>8-JP$)SBL8F$c8R- zA1Kf6(`LRX_SC;7|H!BCTt6~yLv2k)=>Nt3PkJZH^Uv75uKlfW4tK5o5?`vnC0r$Y z!n!YSZEM5!jqu&Y4;z~|%rViX+WOjlM7NrK18hy|7W(o9a^7sLzQrBv$yVXpgw9>z zJ&X+VO}Ug`dgIx;v3dtIn#cFeO;%LzlKfoholevlqKY z*g`%Y#9ynT8E1n(Tu(&#Bjy&zkJPIFaZS{%WCHy=^NF#T4(0PD^k@t3<(DJTj-u-h z+r6+F^T`yviMsSO+RkX2*Uc5iN;iB1;QvOy+$wuH4|z{l_KM>!a>YE9*r$)PVIG++ z;rx#O4#am1zaN3No3+XmeRcXi5aXBmup?bl$-0{Cj(lBMTiwo{W@K$DN5-S)@1i5W zzE0*eG3pBIT6Jp%`NCK6-7vJ#Uawc*M@gE~o0GF0K6NGBa}M_pV?K|LX8#uS974yH z{IWj1LFTsdd=MMevBgnHR5BE>P{ec{w z>^WoV7oYb0x-(%~mxHq{Ez+s)5Jw~^fWKWUP#D?AY;4(PBpiADA zOZe<2y6LH&BdXQmx+D39AODB`4DyZXq=U8`y>&rYJ7r7B-I#qF!m%@5zsjE(_5<0n z5RKf8ezSd#w?Ew<+aHR@m`q3VkNG0}W-#glqiCxFS zu^5fHI9z*$>)!Gy{`*QU_J->=`^V4?rPqCY9q=zNb)RzWuj=1F|QhH%{&?z<&tJ3fo-ZZMzAmz&GUk@O6O^Q=YH|HULf z!up3f#~Lc?tS_p2k6k~#$)@;^65~7QIS;S3ad_Sxi+8xTEk~Otejxih_KfGx5X1G* zt;`2&(|@lqZ!9JY>eaoo@J;0SjmGL8f4P3G!VqFvTuJ_P?R;nAQ1|rxKsO&2YJV}A zAMfyf@4bAWW~Kx9ejPsDkKNDXU0ZBUV~1%{BPZOQ6E{!aMYu{q?* z^?a}thF$T6dOrhB^&q_y#(&A959#qcx#}8nM?Na>%f0elye(mDMrX+RUiNF~yO55} z_>V8cwS4hlqkE6mG-98sqv1NekQ>GKY`T)a)`0IT`ZmC0%%$eIEJxD`_Kq-K&yQP^ zI|a_0>1$;>Uc5(%X}DL$mh3LF_HC@*KaEG?wI23k_5c626xksc=E|vQ{CX+c>+p;g zJ2<0JwufQdO?zjwFhyb(o})(dVTj>6r5;7l+TkME#@e`_VI99N^7|u+8|2UnDQ&_P+XtvHf9v#%sDIY+dQu z3SF4PMtFACf!t@E%jry>rD|hVubU6;m#8x<@$YClI?nFM2EAbKR$@tH_pKL;ww&*r-y_K%*wOxwu2>SQHH_cw|7C)1B6%1@E z+ToQ0(L*(J7aMxwTa!-XuV80%0lwAQdVvhIE+Kk-?)LX+=F^8HOPy{3)M@cvM7p5s;O)%9)h zxbuIJbtDW6V35DzesT4i%kgCP<~M7pY$#b9z|sYMdos6mY|c#f#dlK8|E=Kbg?=tf zw|RHbm@PYy_a}U6XS9@^zrfv#9mZ+2CoF%vr}BjUugK(^yffb{s`<|o`-g2|EUasu zeX_CgU$hYn{pl6&=yf*mM>K*@Pv*xfv}(PXP%MZ(snZTr(<&Mr>C zZ;s49w4Fe=u~C>`(i71d|M9VCUf`2Mj3>6|9kt27=y>fb(z7;OV2$U&*vvWI`J0W^ z{{-ig#LxK8UH_)?zqk|aeGR@dX4?tA>tKEbb~Pi!Jllou{WB6zoImTGhT)fvrb20go@r0Vy4b465Gv;FBFWj5R zKcTN3{p;7O-!!LV_@Fhc?~u{k{P#wX1&3NzEGGLx`%m$=Cv!d-tK&V19`_NV!Dw$M zvz5)aPQ&kQiUZJwwOgojYF>II-`+vK_p`!W=tgVx-IqvhFKVz&(YFEmq~^Xl-_GF6 zzr@G0(d1%r6rZvY=C$bfp3c4SJwR?Nn>d#*kvRhI>H0g8x3L^xTXL&5`B8Kre;f>A zkaj0a9>>O67Uqv#(Y=TNdFMNm*TUFjb7Gwq;!}K8^S-gRtg-sdXx5hO4aIgA48~Mp z?U=XFw$%S-G$-p<*FASzgM2~W6XedOZx-I;_-neo8j<)8M*J?@POyGzfPW`Bw6^^= zHMcr)u(_kKba~`RwX0R@bpJ$hE#H?tt5P3)d{M{4QoY9)kY{{uoAo z#-18I@+zL|IE(`_i#dGR((!%hu7b<8XVHn=i8lJf{hM?Y8~dRNetkq9ZA|A5Y;5J& z*bCS4#eTNtF#cj}8WZtWe6@)jxSGA|+pC}D68-lZFW>z?op~eMmd)>ycOlI07*mIk zy9@rS(WnCj8F{1Q|FU@|8P_;A9zu@B*UG6a9bX39ApHlR&1?(t4J7+G@=88NU-Z(h z2A8YAVs5YAf9Olb1N=5m|BGy2nXR{o7hOq9KD*3zp!TVC&j+++Q@)AhFY@AZeAl}BML2Fz*1-W3+PRwUc~tT1P% zi^;xDELVnaYyP@{uh+(Ru;Ww7S`|Jutr!FI#dZI02M3zsKaW?xNvYO+f9q=j=U;3a zMn_lYGsKI$cp`u8z|Il1>N+gEQ(`X|A$4W13PZ_hui$!Ml; zNzJ`(>umOw&%iz%?p4q%Z1B4dzIukgkArPzvc;|Z51WENI-|Xr{0`b4!ZQ+%ukpyg zY?ihs$aQZoJew+}X#0qq)x{z7uf)%5!Thl{e96Ps$NzQzCya}j{SWb@v0TWF;uUq` zJ9$C6eEsCn9Cc)m2KNf^ zjo^3JRN+2U#%JLgCbMCHHYnEI)>7Jr@Fw8#kuspOxA~V z9?8FN;M-4Z=_>ldxIOwXr>{ZoA!MDWe;L~5XfBm!`?FzBK3<0m^HFgxA9kQ?nr$!~ z&%@dqzI>5yb@RdfWXP4QpRwM_@h|8v)W1^A|2*<}AAIe|>4ttdd=vF|q~kuZI1F7o zItRhhlP})WXPo9w(mg=`0{${DSJw|PMs4LB|0jdVQFqF-=sQ-u{)Io~ZdiYWwL{Xz z{vw!MXO#EzX-jg3TF;6{bP_oo^~QuH9{K3-SOnq#q3=<>Rv~ACL9mL+W}AT%t!MUAzUKggO+~4r)zU_r|Z2w>MmKYSnjkN@FKpi~o*dgKL%W+&YtI V#Ry|!x;FWh`@hds8;+Y=3{j%k$((GPBBem02^HWFJN!ddwk*pU^0}I=W%&Gtau>jLWy~ z8gE^9_U>DEinl)dlFP5Wy!XYYU2^%EXNC6tdtZ3PS=z5SxA&!Ix!-1&-S_Gg@3}{p zZ9By`#{XacnnlTG4N-Y^HA-%)kD{OIqjLMYDC<%m8%J*Y#ZlbH{nhZbsgH{5+3-8Mgim6ag^TU`YZOW+dYcs zqxk~g>g?H>UF?k~ut8tCR9|O)JYQRLI%4O=4N=-1=A+3N0$(Ti=2oM)H(5)oQT8sn zIec}8_Kq+NgZYAPQT*tlC?DGpW#pH4(szpM|J6mwoAB{p`XnE{z_!lpxRy<0>!b7n z?T=`Wx<}ce+FQYXHVmg$qx2^C8QEL8ew)m`i=wgt{axB_Wp59BH^I@2ue)gDqoj$p z!^l{~&rKIc`Cl;1qjLfJI(Dr`*6D26hp#)sf2_7uc<$oUhsj>zI??tu|BMlH81li3 zqU8HJej)!6Jn-eMocTHbmH%1+eY_vE^_DAy=dnU2vUUCm7|3f?j$=KY{(s3Kv!H#p$-a*dp^-;6{?V0$x{-1s8 zqInXgeYH>4em8lp<3r#$22MGb-pao5_~ll10NT77r31+tru`_`zT=x+&~?y%5}bq4 z8pH8Y7>tSZ4ZfYG-#Ck^{Js-iL$v=;U(3m|BMf4n{!RZG+V5t|ck<~3Im@R%u9?X_Sbgd1`9^}f!=$md)I*ct#(EiEZ6Zv!( zv44wC_Rv1S(UXE=etJ5NLbp?Ctr*0V0ntZ)4E6bFJzCA)5c9Q zy*|q2Q!37Bq+Nc;jbW85IlE&=T4>v#E-GU6;w11__IY(v_IN6$Q$i^ zyYscM-N()i=zkpTu6%bq`wye%Sa{md{{X&S)wVm)s4dCiWQ{=gw)Sn<73#`3dE3cZ zt;oXIxS4L_syNp@Uq_3(s?o;k1bQD?6lI?q-&5dy72a0tI8Oa)56`pYC+zD-HycBJ ziR##PmmGNQ|F9j(=ALSISN9Q|9bMm#W(&0PF)J%k`YF54lRK|eqWE-uFT&8Sdz7?e z!}~D5!sc1^+Q@o|9`VdSkT3mU4zZQz`Gzn|g4g)TzF8Q>gJGM>H~bcV!AAL)oWvIg zk|oaRH`+&#Wqv95A^RTKTJTXXxDGT{ALAeOBKfjvY|s;G_djgjiJs}s#$9guCp3pMN%IQC-8STZlg zYraZ?O^4M->BlfMVp}WcLHLg%>ty^7qI+6gPQq(!#e5uUP{^-%XLRx;E7)^9nQdKv zFP0PJ^279Qhi5Q+k1mYL4_up%LmcBRVO?%cAAqJKc`&4Wk-V#YOM0)=elu(%D^WbN z8kJAL^lP^$+k(%|sYDsOqTgWJlk7wIV!ms6nk8uOBWryzU{A22R2S1Z@PwS2E+_A% z|5p4XV7?daedev%bgghNAIqP}ytERfzo|{eeEDQu6t#!P_=#7?+uvC&&sy=>dvfSM zuFdmFQ+nh^GKxPJit$8#xSssE3!~^VIbh6X%d{Pf{vJNvf~;}%@|`?(2000TN1tm$ z6XvD-$AwY;m3}eLo+0~iI3Gt-@8}%#$mhS|9K^Qc$h#Zgm9YGX{!;Nc5S@8G>M4KP zSB$fTQMNTbKjRbY{4UoME46tcSMQ=W3!=if$gZMG-OB!gd4~2&$oqtzZP`18-rMkR z#HSnU+tOHbE~ksbE0w4?4ed>2KH4pc=2xOj{VLngbsm|W$t&D%2Fp%tnSx#n;zs;? zwDvEJk!9q_sd6Lq=hAhHwv~&b^a`?mhG|`V8`Aq?U6h}VcX7Sk#&ZlDxA55=+R&tr z;;V{72lV^94{L!@=oY%?+vsI}=EL$MSeo-gSFzd=Zuy=BJIo2u{_<#`bI2R{5|4+e zH%vRDACGpV^QPM6O@c0wM_|=#&pKG&rEeqs*3C%^z8J>-Zu~t=UmtmM0bGO0dl}umawlN9hrXFz zqf%TVV<3;KQFJky(XgH_USeB3M83LOzHfb*vd7vXU4l=pWY%S+^-rA9wL3eE*PMTo zEAc$2&G^ci$o);_#VYzfBCiqbIbRy5*)6cj{ajr0A?#mfobSiy!}b3qR&uI%hHpM` z|0}-9babS<7ysT;jq*pG&F^t{*T=zi6~BEZ{`#VB)}&)A=1^)a2Z?_2ZPP&WKs ziL!cp#$kRfd&jxAUaQrOmt1dRUO7b_YVP_gH0&;|m$D1#Igd{tc8E##3!V>&=(!d zSMod?LH;KEuz>Chte0Nli}&0YWZva$-iiMMqnOs#61T%Pl#lw0r+g^Sz`s75<#+L5 zLsTp`j^s;xn|9-+tfzA>TJu7-A06%Z?R2>p;>&O0*{ycz#GW+t=VPtLt=K=cKRik1ax<|eJ?rxGYOcG`bx4EtFf8-==OeOyMLVi${Uy$C z>L05O=Co&hR4gI$b$T~-oy%rTpt$CLjaoak zAKE*Tv86aIgZ~iiL&fqbHXfu+Z1Q8t;GZb3kMaxoBKZFYd3YZz7uHAVHT-od9{vgA zD6IdZlkp8lyPS^~(|Z%SyLXF{ar&BQH;3mt;~%Vj6`!4f#@taXXJ@Dp7iphNMo02J z6Gg7G!FZox<5aln(0}WCf}^86e-X_L?Z#rPznsRFkL91*6rRP(=4u1p@a&Pl?>G(K zx%4g4b_71To_^pyjAOD>KFo~$^mF>z9iHK$R{VCL{=djw+gVQKd!oHVjT-|;@L$NK zQ!D0ZKB>~NKO6e#yN&E;8=`n0_d7a|P`~&p?rPjl!V_xS#%Sb8dNb(0=9rN-kjdY+0>EU@-OQnKIf9T2^*}}b9)@=UG7(Nw4+}hM*lU&HbDP|`^OeV z*=h7yQy0cSG2C@TM^k+* zBBqzXJxpC}2IF|NIl0%;SLMgX_4b9(tGDTQ+WW$vv#}9*))whyd>hsc?a{6#uhjXl z)=15<$*J|8SM+T}*J;M?kNEjD{eiy+(77j`Gw{o?IN-jJo%iYATbnVKJxzu&5O1%o zO74YtT54aSUOu(J90h9^*aov>fcTbtq0Ywx@okH5c-JV|h}?@D@;n*OKOa}4{3G!X zK0lui)vT~y%HDvn-@GWZzY(ho(N*ZahjZ78bq|_}Wb6djzT`aYTqWZ(cowkP`Zs&C zTa-s|z?M$eKbA}}sm1bJco&nkf(`bJYI{+ATt|HKB|0^`Y)1Z*?pG(1FT%PnUC}Kn zKGoKjoKSb!mSlVpjw6ka{p7DXER3=2YW)4kX^giQ*=tpzvL75X`1Vz853qrpoR^kLjQ2ZQ-5tX-oI>Z07brx*eQz4_hvE|n{*(DPAkt~C}) z>$j|(zSjJ)Jw4s{dIcX(a(#Ox%65clCI0`2xjJ9lvos#^pJACz*4cEn(te}1DXuru z-X6a=X4@J=?O}PA4NLe1wp7hbHdlkyk5qnS>%g9pe|~+GEQV`m`Zgqw-QhVsvY(J2 zj^+h&3$~~^VLcdML)VeUNPBCu^Wc=%p;i_fpba@KKZ{UfccSMMaXx{c$LnjurxUb? zJ)pPwa1*%y%Xj_heO?Z%P3IH(`7r;2+>PPg1lk=%aK8Y)yWkpE zH4fO<6VGI_!nj{uyESiZepyCOJNoz0HWOW>M)FO(g6x<-!v6GpnAfb>KS8?#J?lFT zagY(ePToEEVmTru%fqQZjZ@C*TgYr_;$@lbkd8cQ!f0-G4<+9rYd=xz% zMQzDxVB2Qe&OviKOzyK&$PMdn@hy)Rvk`QE0skXppo!LNsO=}k8J`Tn(^)+2=@mWT zn?nC!G!L<5_wLq7uEVqap=jQ8eFa>nipTYMZ-qm?7Jn>?;spQI^s*y*j=lwSPbK?4 zId?xlpUW?2$u0Ve-(XflqZaTRE7^HuSdT@QlHG$3LY)3&Q$PJ;5Fe?p4g1uiB%ncLSv6LM%-B;1}hWi8d??u*K>=5^Kt~z&8x7z#?@pV34 z9KtilD`>)4*puxo`Rj4HW?swt@x7c$`#0Ffv;JwS?N4#rvl2z0(bpR0;b@Kbut#3J zNY6(4*j}D#ehvBj6L}Rf#32iF2cJhf!?+iIYm8(9dq%Nmd&kG@gg@UJ{bW8`p%&JY z`yd>X9ea?0Czb2vncBDFw*$4ENZ;=48ADeq*FVB_Hu_m)hxz12ev0JY=hgu7E${7K z{Vs2U;Y=9!6_>B8wY`G42~3~jPvO{9TmPzcw(|w>}P_3=qO0_TfM4RzgJVUqq&9|kipSFEr4&zE*r)oy|6}c6% z7Lg;rv$e&+Gf#0Tn%&tvn_r*T-)vBfeON9Ru)Pjb~7Olz1MA zzlLcRzB}NVga>_i4-#F5N4+Rs)<4MgX~zAXY{{)Rb9`c#hjHb3sd$mRuCQ4bM~B*9 zoQ6((((|kKqUd}{{}uE%hud|m4kYdAyocRe>kI3O1Kk zmQswrgY{OjdpV9*Lry1OPG#%64mrV3JF&Gb^?I|70H7zrth98Tc}OD8H{* zBeTc+l85@J?xr1`)#=#!KRZ&*cmR%@$-L3Ivw8M!dep>n2Fzj~jfG{nb*a3`#WF6m zjU(?>SXU>nv+GOf`&R!rz8&EhtX&L>GxV?0cNabTyMCGvhLO{e{maBLuuZKOpyaI=+C>b8LDM zznp}AINc}c@5R4&vp*qIjzrHnyjLvVbKXPlnQzP8$-bSvA14UC4e-`R_*qcZv)A1y`4$v;>@ypI3h#{WKB|AuQnaoL1?b*60Qd?f4xv<-3`6y%dPq!J}v$ohetkMzrr@Vr&X ztNc|ow>nzWJzQ)iuxT{iQ?$*+dju@2$(K;`PDQi1^~Scw{}$vwB@e1_SXU(%lRKhP zd-ssh9rJN=D!prvb0vM#;I8MtCFI;dj@-+{J09tJ3jS8k+v(eZ{I~cptXG5nEy%fv zZ`H*3Nw{`{`xCq|`Y+)PcI-^wFg(UVGLAm^6(8o@pFTYK4z9OHe=Yg1c#rU_d^#Cz zQ}>M=@+ay?)}gM$I5-B+3F@Oggj@|M*CqR9W2+aOyWm|@?B!E>D7{T8QFIFX#`D8A z;`3T1N)rcLvUMurNA9EKeTGk+56=?CiR=zBe;9U0n6sj(WG!(1!rt_k+E$@$>0D3$ zFmfJs9lrng*_!$lKG{zG*mFvZ(~wh1D`RR?bm9`7M0O{%U%Fn3uRr>i^?d=$L~R?< zYn;~h-Q35IyKh6r*ZRWR?p684XUPVYs5rP<`*tCHPORBkOsGa_$iwZ`8(7kDc;>ME zFml8#eW~6z68M%mUT*LXwc^>;F-U%2$8J7IZ&e?gv1>6uM(p|0+&-Di!}xeVdRM|a zfj(m*`+z?FDc03L8kRlr+^qj$v3L>YN32)zhB(Kok$)h1b)($T`FlQ@Mjjhdv5nQ& zY-{ykrFF<-WWPqicFjOHTmK05h*1%DkK&WrIhx+F>cM9CT9Y>n-E_8H z=e~4377jiszY>%CVf~uizT}gY7(2DSsge3-tD}41*<5=QWAH_KejzU-;~MtJ`9v+w zKT|(1WXqPuNd&WXd)gp(y`L#AsQ0|hzo{I)jUW23@iM;Oi@lrimHW7Z_5Wt-%I|z) zoTPK~y{5gt`ubsg6!+3@JcSyR@khEBIY(EbcnLc17N^Hse)ec1nmdsA2%oaJL8_rn@Hf0B-Q68Z-xFQ z?Ty4MjIE2v5})ucBHvaGI1gQGbcZ}^CN69CZj{pjUa#SQ-6-+qquU^iZ(ed%!y&!h!e@p>@!$NvxA=F7MX zJ64Huf!DPo3v@`VZ6YSvT^&svM=R ztq0$Ohz0Rk%ILo2*Xgc=RzY9Ay zG|q;(ejlc5o#k0JRct!baaDa39}edR;ygg#Jr4Cgznb2c9c!WuK715!sDbUYS!1Lj zkM^v1PYUl$*uG@*hUn&!*%<#-+WsPY7%Zt=z67>o_~L4~$V_HW3ImcX%=b%eEhqV7aN2W*)<4&R|AW62l*&zW${p~ILjo0BJY+3GMJUSE6P zk+mSpSSXB(=ooeK&}vk)!?yvx@ceuZ`nBmjl&>#C+lb!v#Q8pSYF+vh-DmUFr||P( zSaT%k(tVAY)nGWg;@M5x1br>ZX~mY1(?j@X8NZJ}`vAJGV%W5+T1ehQbiWSo3;K7~ z$JV?K_6$u`|32jW%=Jqiyw`33CmN$+;u?FrGfw>d^%I)L2H=)Bv^`8D2BO@lFOA|L;(dhbH^ zp=4aBO-%^3rzquq#r1p#+|l{wg^k&AxxUekx8Zw5oF?IwPw8}X*R$?BIM1h>uIN!T z`|Cf0+ynT&58a{GtflQEct&V%N&fq6{MPkgShuBP9{;RFyE_c$lD#8;Sd+x>;3?3L z;m=R`VjD8$X+BkKZ+&-mEEz}f{ifpgp89_MA~`|NAQ+E>_XYkR%f_7@ zCy~9Hdh;q=>$`sso$m+2bAQv}yj zYttWSXOU|z3GWr7@noJs&NB8t1piz5tUvNL#!^o>#5-QfFX~EmKP+S{^$wi|o3cc>SkI|wH8PjLwRAt%|GEP~fMFnkwWoZ`4tZ5>gm zz4ML_rr$V--(}Z#^6F)BH{iDcWXP}hApGl!@pSkd@sISsTF-Cf2OVfbEeYR!S5kp9_pU7>FX9jl93$cg3pcXjk-=g;tb*x*d(@o0PT z$x5_hl)PSz(p}&i%+@E!nh(#5`aAF;+iTD8lhLk$W~AJHUB5V{4PuNZwT39vvv>!` z6Yk%skCOG#^HsKnSowZ2KEeH)_Fv3Ph~aqXpg`aJ^(Nsw)gnX#0@N@p#FNkE{Iq&As_Ad?OLQ3l94diFoIK z%JKL4Oj~J8hwlUm^IBM!gf&H2OXkL6ZJzO56dfQA#%XQu|7|kvBI|zG)?#NJUU`^K z5|0+f*+6sCQ1;yyUNmxjCSP4n&b#~NjBgInezs$f>)R{ldGed0 zxsjdt;vCi{+6Iy(|I3Z!54`yZG^_aR=Jpf#JJ(F@SSDYZz(eTo>7Vr&(G=Ez})r) zyMml${5_serY!P4ovuanc0eOf@||FPH~7gr{WH-{hx1eRKL}^P`r7)T4Erbz^z2_6YFTawoc@(s`vdj|Fp#0nXFgbi+9u-t}W@?%l&HfP1W`SoN7x%UU55r4;Mo` z`SsQ?7ijaWQLL!iBc`XTabnDeXT+ofOwYr8ANy}0H~4HEc{v-J<2UaY#&bH0Z^r2R zSzix%gC>((vGq-QGv0!Xtt+*Coct1e>Q4Bsq?lpFbaV%bdJCykIVVi^6> z%{N!%-a`L;->%<87Cm8qH0gs+p2s2YTkx}YRasv${)<vQk(_xT* z`Ax=Z7=!X8HHM-!)#je=^<`t&Kc-?&ot&N2tQq1$ce$PZ_VTAOoN_#Un7{j?OXR|x zPvx(Wr;XUP0EQ5IIU2qPEyOSEZKfHW>OcWc^p~9ZOD$@v zeF=G6@a4PwbvRpgS6dGojakwWl=2-Wxz9qz7Uhp zKC8j+WgHbXZ#4e!-Ap(4y&R|GZ3rLFP;UB_B zi+pPXbEtL2r_N3B?CKap$J4M~AP4MqrPiSFv3w3=cs7fAqgChfP3oiQM91Z@{(}B3 zx<;^bX*EjK-=aTE@548UeV5>w41-+plFqX>yLyY)kZ_M~fWE5;c{Tg&bT@SL3jk7S@jnXv!|t2G_L%EC>Y`#jaoPpu3cK&n));1Oc~0*a zFnX@cKW&JLm)L&|Juj1a9G^TZ@7~k*AzS5E?Hi_($vFdm6UT1a3jARXfh~^pb>V~I z^oX-f{miT|Lm^XE*m)VQ>-zWvD=WBe5`pF>`E*ha!0nvo_>7%to`Wqs!>*f<9c#`A@eu$ z*}X6<7mq{W`j9VvfZ^o^--XkEHcYGBp9TLv5zMc`civ78|J?1+WJ3xns3*H@xF?+7o7HH z^559r;a{H@><;@tNvn_>Y}-(KH~c$kbB^|4>#MLgQcohj*h-%Kj^=DMYJdC(Oh=&o zsm|{+={QjPtMK-da}m1R<NIC+F$vzsTfW`HwC7J zZ2N{Rc^%d&g&dD|f#){9`H@a>DAdHTuT-9+?LB;-kRNh{|HJw}z1w*vTW=xH+@F|Z z6JsP6_w*M2+SM5DyO(#x^e@mK=FbXzA+`y5>PgO*N${O|Q`@`D_~Je7{FYtMAD8pl zD%X5l`({VIN+y%HXVrKYqc70@1Cto!{i?NfXn8!DEBN4DHTI5w?Fj4GxV1T8Js7Ul zKV5qc(^LE)KE-_JTlu>?U9ai8l-xHLd8bNFQ#OTl>N3X!{QOk(aj0dvdY70VlWq?3 zqOD+1_tU?vkH2w0TK~5E*_1C2R+rkbue%se;qTx1XgQhoapV7qh4E3UDG}RqYo$EI z%9xEqoF|FhOnhs~SG6#{j{ZO0Z;ozlvc4p1oId)~P3ZjI{m0}j6tiuDj~utdey{Pq z$zt^$Zy$Y^!DXDskHRk(i;}Ko*7-NXWUUjaJ7Eu}+*2McCevJ$)$@@Uq$U2`^@gyW z1^bc43e#$tH(?MGJdf44fnDH1n%nkKM~*8VjN_7vk2 z_A)2xGl%Al)Pvb%qb+Ww>l@dTVcA%oounR3aio0{3I~IPN0% zDSor3U&^Bl&g_4Sd@F_LSiTJTb%}m4Pi`lF8NQuhT$k)&axRe*Vf~M;9K}|AWe;-N z;c4Krw}Kz=O|C}ybLz&=|C^n7qMeZPz4 zlg^8bf#qb`6R*98Tp`YP!gp-7w!STP#Q&N5G5j=}-2U{u&hJla@5wjnb2NgU&)Lj} zMHhNn!Fwiu&R^hr2QgibPZzOwA-Y@iiElYi|3mbPZIBV>;ZVP`Gx=x(bT2rD=-U~# zc64mW&%K?W;j_tf{{dffe&VY%SMd+TFuaDyCLSC4YSMlGscywmb=Yk>PubY#v+K zn4C~w`^`|yzlHfZ>;oi+(UbD?_T+V{_q?ZlBZp_76t=hp8vY3LNpx+4Jwdhd9<^vg z7{nnt%zh}k9Di{l{1+HQE8+56kLU_*_CL~B;C~fO0@DXDJWbAY=bPDd2;c3;FF(_@ z1l<#`(qEhn?_^_e7#n7zfid3(eKT>BH|17na(Ud0FKG*lk=D_mJl~E7YFwz97EQ*gJ;ZyExRg zY`)`9y0(JtWwvzVe{)%QuFTmMdp3zZvm_b0&Ea}Uy;!GO`xYTS6Be;4hnYXZ_#2M* zesq7JIhWqY$@83&?n%D9NIL5OmF?f69-ZBEQKVN;Od z-DfhYIyPnUg?WVUvG&+TXDn~S-4*6M*h{rD4lkiMgA?8Vp?G(fmAOA-3uIMQa z5uG!fyQ1$&RtI|cI2Y6MLNYtEXD@xbh}$rE@Fr|8T6?@ew*KNM^u4v+hTgMa_>FjZ zel^Ock$H|BxB#B*$(;pvYjmRC@$kJ>hofy7nEk<&0iUPz6HwUYOx=z>?`W%YKFGC;{t7a;^p_qm`l9dD{jX#jIS=HlP@xBq7qN} zAbR^7;k)0I&f+_H0$21Cd*F!s8?QI;1OD;=vc6#7Quf1JpbL7!^I$OoE`BJEX4lnt z&!+z)@wXn$AEN(2epU0MrpCi%d}6#8&oA)zG+d{6-sI!AtheP-u?bm^(DMe&D_~y8 zcURG4Oo!hjMxVpJ0&ejN-}Q&@aKqm+DNZ7P3LoEM&&m3u_6^N#+F$1z^H$Wuu^jJh zWQ{WxeOH}qsh)cWU;aaXSpQFAXD`_0V9pMUeDAT1N4cNB4fF1N_W=9$CU*{@z|OHS2j8Da-lk&u6`Eh%|HM!8$r`AAb4Qrp zHmC1>@VA?1TEh zr{rw~K+v^`HhQ?0W7dG+EjzWJgTu^eoHhVU)JYazwJ)VKjq$Vs4vlK74HPtDyPGtFz!dNV;wRcV&5P#!~U^v48po5VQYLBJLa-;b$omo*0#kw?VeHc zev6E2vd3u)>xIYk_k?k_b@$iw?xU`bBlAwN`x3prVmo%s(l(afA7OsSxgDP@r1N`n zpRM>Ei8}r#8|8R@jk&ou?DlMnQ_-GHjxmt#px!=8-gLIJxA5DG^iNoysdzsQ>vz`f z-PQfA+4*MG?=R%#E%x&pp&zT(?2EQHSrI**_48$N624KcC(@?|rk?rBtI>bTrVz_> zf?oG0(s7WsY4{hj;ePg9SoL0C`xoTO(PBewA$I@pk2xpo^9I}Fi^ymU*TWUxC((Ba zy+7k~UEAaJTbuBmT24nckL?kj&GHn`b00f5sYdB+*V~ZW zhz#qRNX~^lqj(SJP`9_L_l*Ewya?YF?(5hLdw70`yM$}~#&oU@r4@c}Wh|~hJCdKp zDLj+J$D&W2+lcodIki39qhT9DM%eT5Y*BoUrlVuIwwY+(gP{`{r@=Icj_}@oTUhZF z3t{yf6=%lCNV?ADPxZW9v7q+%Pm*Wot!J;lM-*yUy0TLH-Y?$`-X8oTXOpA5*VX`` zExn1%CH6MuY2HBR1Z^??Juf$(AM0p4Fao!CTF6zf1l4|yERV9-^m+vT;sllIYWG+ z2ibR-He)`#lT1Eh*CFt$m*cybk6M%0Ps}3Nrs{u}%nRt!9(AUp zUca?Vyfb`shi8l2vs$#Nu@kU=X5KYclSlaVX#D?W)9!Tr#g7-$D}MQn#zmEX*%liI z+0)wOPgr9G|AqHnA^(z>$a~J%s+Uh!!?re>TaCM3=Hutw7Y+8k=-U+k|9tavE1rpD zw1llM%m?c4ZN6Q*TKj!`v^MP4oaI7zg8$`SaR}_Uvu!wrQ`Z07h4zWW<$zt1c> z=~JVV@#dQ+v@i9yg1(~nZ1Uu2VjM@4=o-jY^}Kvk`>(D~!QTbW{?1#H5qz|QEr;{V zTo?wh)oZH(FLi8dm;FQ4{wZbDW!yqDlP z!~HAdukIL&?@xA2s?>fX71mnCZ{%$c*)z$7^!?AAFjyb_iTKCOD*t}_SSJQ= zkT+g_o=N|C`WrQv@91|Q_ET!_iZ6rraqE;m`kKngr=3GQcWHTgg;Z@k!o9@U*D`DQVTHC+K@rQY>tiyXd89U-rKZ}Szhsw+0)*`RC{{{VKR@q2Ok)B$w;+mUw&Xi*jLIu(|c-Xt;dpf z?#;u=j_hgg{%tmobH7SFUgNuI^-(f|J$t&on2d>zJlIZVZ#b^zlSApg$N5^k=fZO? z+wS1=o_zQ-zn~BARKq$tyrU@Do(Txkco4vBsdZM;g>L|7c(El)*_ppB~J8t4jW1%>k zpX7KM;;|V2M#f{nKc7B!CZ9O(0OOW?`OG4}+h)@nl_;~HT25g10eID~;yt+jA!9n7 zkKpeJ!wGzB%!KFX%-TLXp6svKV?Hd;YN+iW#COB+6`HG@w?MN5#xK})f7Ke>-wz1; z;h&KuCdKpib33w+4e1K;IRGXYBYB$E$pgMm*^zakU*5%A!?lfT_Lg#|Y@uymxw{FQ z);C7n7YDIn5BHbC-xvQZ{!tsk+*xd-KjegG_h^p3itAh~JWprl;6y%$xjDRp2;cK( z_v(L>{Oj28554DCYJ2ZVe|}UWlWobolkFjg%(p52bT)a7*zvgd$ip-|b6x@4=XiXJ z5?!rrw)=mK!6V6hP20or?{T^-eDnnwDca`lx1+B$Kd3*oXTt}`wa(A&rRMvvZ*R25 zUbYU5ZTXF@+4E$orCFy%-l?+XKKSOKIf1{{U}IZt-C?lS%3jAmiLFPGbva#K=^yD_ z&wuKDnW5c)yms*114lD_x6}2Wc#Xz?9zAj;)YwqB<4~8^cD@qbPTJI;{2O+Lad9Fa z?(XQIJtbE?D()a>GC%IkkLH5#ydHc0DOPugQL+tr_t1YPez{tz$Hh9Zy-Kccp^9E` zhgjW(<~{VMcCW2f%Vu)+SooW&hacfxK(_H(48Z>lU#y_-A?^Ld>TSFiS3Hl)fdibo z!#6;G$orq1)v@sGnvoa2-7KGRZ5~Scz;;7JZC|CL_7m#i367L76eTi~cJQMl#M(Zm2qnTCfrbT|2MFyW1@;-W~VhzZL2czu| z!*$MLmGX1+i9A}xPIW9=)A)Ud%ophD)DXqf$T+^L{=sDJ7xqO`n9{b6IcniwFb=2h zo+=-r+nAn^SNl1__)^Q_v(UaO9v61Ef2O8Id~}1j^TXe6j?XvK^-@qSxqgh?Ph+kSq-eS7eg@3lZ z7}`+#`y1I|WIc~|4*g`82g<*@*{A*_H}maUcw3`6l}vy0GLge+OEQ-=)V{q8HkLzS z@cu6P2mQThA3>Ax!-it@1-TvhwH2G>Wn9wxZAFa}r`_b-h<|;RT|&lr;uxN(ALFCX zrr;S!>-X}Jfa57H3m0>(Yh*(nS848b*FP* zG$UbD57OHlGsHK-yRr6D={bljv5dMp=4rF$4D(|4B3;)t)PCC*_A&C6u*lESyb<$h z)T_Ji^2NovD%k+fp_SS@!c5+k#%=bKG4z4<_2m{72!RNyo+T8~1T@vG3(R_|VJCA=4nH*qvmoA+k>6n(?ouTRGuxYU*6 zPyX4{c?oRS^GUtye)#|3^SjCI74-7adaj4Da|4*B!t)(nyJ=sOtX>VZ_bPc<|MqcA z!QWXP{XmCtTpAOxJTK1GcLf`tbbTi~X0pLvL86A2O&qJU`y=}Hf#p&2&dR#l-ye#; z40iBCsCBoYxysShwSCU;cQcdTeDFGKH#=`6U*02w|FRIL)8XC?o;I+opzmn+#(Xpw z{~B_>v%ZJXzDoWVRdciRAoh!B9%SUIqUl;#C^;BE_F08YxEqch$73fapgJWQt z&wo?V?(f>T&zqqoGa{?}nE#EDu%3%va?fwsKJc+Q?ZF1?qoRpxK1%wyo+MTS(Vs)! zBgQg0XVUGn)8a=(T4iflALA#@(26#F?5b{yfVMGFB`u9igsn|!F+QyTsxAp zQofiQvfk*nVDnI8t3ms%#(NL-VO`hO|KUAEraot*=(`7o=Uq2v!*lh1*WKVdIR0!@ z^*c^u_z*heX?S;>chELQ?2kZe&n=q?=XUNNg>wV;e{LM?ig%rgzX?mWc_@|ZMSFD5 zkkLm?dw_nRswkPtnw%!{bWXsj1?Aw*@ zkq)||;Aiq{&+F$sUK__@e=&ZOjBmxbnf^P;vHnlzvZV!^r^{15jKZA2|0&#g$f@CQ zsl&NCn)PAF_H>V6pPb3ef9bJoZ$r*^=pH0+(xEPAKkAn=VQm_};kb~#F#q(oHXdEEUj%fqVo!$IJDH!N zeGyNvYddz+RrJtLN9vsJLD%{fzlCMvVDzuRW&bs-H_I(un}?(C$o~PYIui|pV>k2@ z*!rq=y2Jiw_+~n+?Xu2f{KKdFktbH=W7Y=sc=qu;8qwPmuHjsPhiMCJq4+TSXUzJcMuuC?c?axeW4;xpew+q2O)FJDySronTZ zzT?@z=I}QJ%BjYmJSu0QSpw@Uc|IRqwL1*>eVZ4vJFG90quFs)&`s~t?6^w|25W0W z-lwh~Cv&FQjDo2rUtEP>t`vKqnNG(ZuCLX8iTfdRkdySHb3oO8m>Ayey26e`{}Xk! z-%rFRka+>xOYJ3`=6VL6gVFXcr1!#Z?~aQ}n6YGjP;qu;!p?d%vWUyRSx*iM=?Sob&zvYtafOWTfJ{2w@EkQdES ziwE)D=2h!JI9ihXW2IIfvLo2ll)g6XT8gi&>weCMI}Vlm!58pHp3TDAIyZ-Ak30A( zHt!U#*4v+OSj*%q$bSv($?|Jt6)l{b;8_IIKt5P3mannZ8YMi-6^-fI7QRFAKj^#> zT=Fa)!4_k&Tpj+Nj+5y5j=r;8pNL1@h?<~}VXTP#NpyJrE0?h))RTQ+8A;YWI^U}J zzd5W04|LsI|L@vgQxjM59o^+$d^Wevc*l31>xp=$(0!cyXXqPG?+kG>K5{l>@-yGM z>ThwP8OVlf$a{(Gr|=DM{TaDO@%OF{a`OMm@eI8j3Tv_$O}Zhz`3=5Nk$X?$4dd}P zdV;UyVHukj`{E7$cpU8uj=pqnMb`a%rzS*o?D|k0zJT55@SoU3Kan%v80w(^Beajx z8`j;w*4Os@)1mY?buUNb@#dtaWb7fY{+Mk?<`u6 z{^EYmYVCW)U|ZRmO+Ulgf-b+03D5tSex*4`P8;)^Ewcd7HcR=zph^;Pg$Z$P{+0ITf3iuepl!1=?(Ecmwdj8SF6;%O{wjl%nN$m8z=dEGE;P08OtFL z=qh`%a~hxS&X$?je+o8SJ+REZliayaWI2zli1NjyRndm8o3@> zf9U6<@N8dPrcdq{b!zeY4YlvzOL0r?Bj?hpu?N@NYLgtVJu~kB*F!L_0oM%lUFhAJ zU9Dh$67HtjpK?A>TkiTMb?zm7);i%^s%TL+xeRw#*Xl<+%&~?%JOur{@I32mTqSMb zaV}@Wphkq}^71A;UEGUL@x5_*Z#60#^szVIO-+O~ewsb$*Ed?o(12ha81R#0bi_1*Kln+z|%NjY7lFAI+gx0+7BS-aryY2wZi(Y zPw8HJKFeC_Ta5POYVH5gNK&?#=fk&@r-D_gNJw7>H75l!8X3lGI)N| z-xl32c-qkYVb%In+jV5cY&zcg6Egc5C*E^Lz2P{+k?32UtYLh0i|c+c!4SrGGFrY~ zX)amHzOi)dr0rKSE_Qu6TaGnujzZ6l@)maZ9xbd(vd8&I9Mb1tY2n%$sr*%JZste1 zk?u*(YWmc*cnG=!;T_B;$H6d~J>AIrNMC#U?Hh&P*+j-z_-#Sq`;y{9beq9Enyy3i zuZQ0I(b{uEko_T<p_xI?J7jHxMhv3u`R^up_h9!G@OCEOS~qFpS?3)%LXK`M zZ;xd6J!C9!R+FR6<>grVj%CZX@G0%pr;O&6?9(e{#p0UjHw2`%gKD2 zzMt9iHT|9R-{qhq9@0=-+Y~+d(HviXBbIMz7vE$x#~3`uRyj{><#%L#6mO`#DcbMI z{1awlvbI-me=mEVo%~bY2LJo8K1u#i@;; zY_jLm%OBaA_(y7M?tUtts4L-paK_JJ9hQ#wJmNVve9IA@2je%{TJqySeYe5vU1Jn# z#3X#{!8?x3y*-k+hRN>#=Hq$z8C=@dRcbXivAQq;B zkLf?%^$58&6OQrKC^fc{SJ9)5uSK&T`ERrJK05E^*V6Tw=Dse*&Js4XhFLBqAt%K? zxrzKPc)E$*=GEHw_0jkAe1Sj2;7ZrtrRAXp8du?&H(<`f7`jYbH#+uL6Tep1_k%&r z%bJPBcjOxH;d{+ub+)0)-eI%-{df=hc7*G8I;|DM-zv!8!uu2X_FmF6@$yAtehT>> zYI3qMTI<1pJ>G;I-{r*nx^L_HB5iUetbHSEl<+-h*z+nbp+~Ni>(kYNZEeWi5RT*VUZneY zelf2V2a<0uCakk#`IUuO+{#zscOe7F83nW28TQX>zcDhugzpOCRq#!Rb);i$IoFEp zz4+<^?<4nxZxiG94Rn+FlHbDGBEG?KFnMZZ)` zTpcXlB}@L45uL#w^U!U|?-h33Ef<%Vw0AZkVm;%9k0ozjr4m)N&DbC!0{fL=E!IQS=-Rl zy#C+6Z#Gby*hcq|H3;_SU@~u}KWbB_YtIO4kQK(t4%+_4*Ti-6?&c6S4|6?Z+BuBo7K@Nmx<#Ghx583ctgLjyC*MXDF;syQYj%-rhzt6F9yV#B&jFT+jSL?E~ z#Wv(%$kEB<8nfYdROLLrh9}{t_;#4p|I{}nCBGMMk>UR#j*annCEZV;`5oU=WQ2OV zIX|AqzWeC=nGX(R|MOxtR^MrCfF7%B?{nu*k$Ir8wsXCGEj4Z`Y)|m# z#n#E!vP~SqS|xtWyO;{>yQ-sS!mnHI|6sroF0`1!54icA5%$p$}jk)mu5~qXt$+%4} zR=-YU=U8ngsO5v%dKdoo__r3PrQ$G698#DDlHVN9D}3;{_M6f6B~MPJhb&eH#L%}q z@h;vsv_u!=*YopqK0K5S1N4Ww6u!@q>(TMzKZ|TJ59^5d9eww_d@pF>f7%9@GL-ox^sx7xg>7N$EU)1cE!FYOvYldCp|B*?V><~e|6(Y z4i%o`ifL?JrTn zq-&rvZljiRu`POIG8$Jk2g7v>n&Vu<6VDXuO&g+gIQjRXJ6XM(MD9)I>sR%CB(JRh zi`Uq@WhE+4t^E6Uc9)^QS*;zyPc5tFA3l-CMH9LBmiss4d0W3fI7+^#G4VNKIY2yG z%h6GIMx)tDt?8&u49Yg*Ifi~d$a=B)Eq=*d&xK8YnDetU+=n`~8y_FZU+=K@1U`ID zpS4Fye>$68J!su`zGZt>usP((e#ZGe{PHPV z29rHNtgbPZ+LJp2?%}YW!R|KVw;cWb?0FXdF8cXDtjnYK$i0XRd6RsIe-&SlpB}Dm zURJF=cNeR5ujO%mBbr8ha3|j$Wez zEp|KCZpCf~lm;mk19ecb5m8hS&So$`#qM_Oyk7O%aecpQc>kEs%&hgq?}_!SHL+*I z*ItgyXWKXOI{2#253S@&6W_IQJPn47O!SGEMLW^)k@L1Nk9NEpOjnCr$md(g$5+*} zs9x3da!u{;Y3oJDRP>I+_m+xA^WQxAu@)V{FDIfiHZy(?_jA(A=--K-=kil5KFzhg z%w}=UKcUBbn%;$0l)sJ)eX=YRPUSjs2201NOVJw*^<`VXvC?@naiABbJEq%+vKmi`yX{=pTgBq)~WpuuL`i-TXJzo0duQc)a2>6Ea?SYA{ubG%)O^>FO*_EWQNDa_zaEVLA%nldvybE#vQMV- zOE#bH_$oZGMaE{bjD7Mana+0S*_mj54Q*&HuKRx@wuSoB7M8AR>y_rxt}xc*mb#az zJLUf3PF}k50N9z8}&WLk0EECrn3*Z#%y(s9c;QD?VIY*67rUa#ohcWN8*654O>>me+|EN!2gDN z_cxg#2d+kQBEDB1tG>^a&c_d1xKEL^hx5ZG>u9pgy`^Ug@mBhuV^?=^yr0bBY<-X1 z%i%HR;@jz44^0>!Bk-)__&5ICrCxo%A{j;IM0&5FQ|^X$TEabwWQd&H6wL-?Hsbxr z9)HrEt`Y`$6rH9vZ08zaYc@P6&f(bs{MnIof382&kALXuU-SJPd`q>>LN`?&g<5AG zEY_^K*9gNi+E;-$)J^rN7}ijIPpP^F4KZm=|IK3D5{)%MJVjnyDqn`W^^N%AF0uH8j1yr#-0=hSUBtdF zcrK#TxXs4c@5ZlcS^ktY>r6Jr{Hn&3e3Hyk&(6}{L;nPr&28lg&YzVpXWMU1=MQY- zo77ki|EF2{JxKW=*?;J7L5^A;u5F{W@sERTAKMM^%IWe-^W5HSlJl`ONAV3qvY?x7e{`;&XBI?$Wlzp^P@yI6a~FQW_V-2t$?%8zW#taHNr zUOYtS(c~XQ-)oK5*A1?%$(zEzZ=y~3wgcP;;b{r;sqo=V9wB$Q<1hwis7G7TDG&2C z>(%$YlO=e*Vbkew{0qk2^>0g$IkI>I-_6dK!WHfTyiK-eG{x(D^ozdd*w&exnYMHJ zZWXeglT$O$tM}2TuzqCQmcIV{a}}9W;r0Gcc<&|Qr~FL&&EOTgkgHiMa=RLb|8>3+ zndac+Ix_AyABQ?V8_j2qZ-pzP|7v^J3c=2d?PUwO@H@JL#O5${Xmxn{v5kHCN#x1Z zXkR(>1D!3{aVyl3wIh3D zy42iJ+xe)mkd47H;!4&&+GK~9TBaU7bB#4gx-4g5X$;Q_MN(Y__R{`mIf zm!4!_4{LvY>@1(xb~^vJG(Ns&=UL9bfXVtMeq8^x=r3ZYH9+wjA8f#`>)1G2dn3Ky zv2}acyOC#nl=H-NANm&P9|+?O?t6Vh_UmMwN_Iy!Y@*Ke(0>>D>99-`X--eSBpf1U!{uWH$YFlR6vOm3*5lWguilvBy> zLT4-Oe>YbDeUpq5gQx25my@xW+}`#(!}1_o-XXgI?In*@-;K=n(Dx|0>Qnx%>y7Qm zyOr#HYrfa%*m?0Uc_J1?%{B+!q4qnGvx^w0VbQ1b?#CCqi{*i}DE@%%MfK`E<#IG$ z*z?bp`#pUZ?Vschr*{!O`;m7W`Y`rK!OExkJ!CxUcqq&(+B!I&jMf~P`c6g457BJ$ z#4kC|aeK1XXG2#qp4E1b`FWhIK13r0` zbe0zvJ3bqB?dd@GevU@JHXO6Hz2v?UZ29f@zUAKy@mvPyNEq(W{~Db)!Mg`t>ud82 zD7>3k-oyud7QXYBKcxSZCf97__T`6tZE9Y2xwg;nEo9rj#lx72-sX$J_%^oPP2Lt{ zJq$Z}@#gf8Mt7)O4Y7QcyeaZ*E*tOB|2UjCz>okTB=4uFm&r0ZuGaP` zoiIeket2e9oJjBU+M796SBmz|PZ7UU(HXmm+LQ26Hq_o&4QtM#6`A4^S>NQar`PBg zw{RaT$S#K4S~i-~@odkpSCBnL3=f85izeUiwgx}QaoF&e;T^qcbz?}azL^i$5`Hh1 z_aWnIK0O1zr^$yYmp|D$`o$qVl&{*7{k)uh81^S^v-GPUX&cxZ(cYO_O-S3V23=6_tN)Hz(wwO^xfG%9bM3|z4NQ=H)qcQj&E*s zuY&C-HC4}~VtJj)*96Y&MY#Dp`GqeZa=ZvlYjTdlYy5@(GAd?UU#X|rQ{)ur9z=6j z6Zy{9M|8hw>$&SbC1lAPO=PoEH{ zN66gO_J*3U0o>=n>YeKHa{VvppQ3I4qgBlhb+_1)Uv}V^v5mgXL}!9-EFG7?=)Pl| z>2IYy%oFlAAESLDydCN8tnXAd7BCzy{*REqBCNOQ5A|(4Uz|subymdQY`9#!0iH(k zo@+8E$?G=s%|v%68-_I*1N_yGj$^E`>C3w4KN%i0#Wr+4#Ft;uv$nlwZpCg3{RWEu z)8y-|{PPw5VQkr#4+i0XfKEP7S0UqXGP;s670pQT_yhfW+q$0##}vnna`bHbjre~Q zJpYApg<2H72oHO*9pD?Kub)11Qg~)ttmpjPh1OYUcjJ%W*fNCPt@+m4Hff;q8T@zK zt1a0d{2||px5RS`J{kkpXJU0Mn(OrqWA8xwnBINhPHTQ=;=H|nC-VIe`2QtNL(ufKzt4F`a*o#b7`z{=6N$bB+T5Ru@8^pzwY$%rT_E@6 zOn5gS?#-@JoK}Z1)h3VQxn!(HXLr7xPyb4d?(yTTld;tHW|R2x!;|*y8mj-1E5?bF zxQ2CKG)A2EW8W6!yovrTI{NU%5pd3Pek(oK@rM|v8`C!y&K~Gn(7A*C#&k}E@ecZD z!G9edITd{a=OT92(M_l0Qn<$P*;E*g=ChI7tUr<)#3_tZv5A8nJE2np)2+#Vl%GF@ zGt{6?FzgM}BV=~dcRegK%xlj$UqfHO8s;>9PK}ph1WYsWucLh$8z$&mtL8uD%#EAV zF@uiv(OeXC@CQ4hFXZhWd~&l~K7k+Rv7;AXbanhXIUUJ1ZbFSq?q~Pf#``Gx)%J29 z@~($NO)AyAuzn~Gq_aOdxT486b*VT6O<&j_f`Pm+-^V@4?kq;%z}W>xIUd&i*<|O< zoO>TDpJ0Ee7%iZG3Rz$9pYandgYg`4Hlw#cTrnI2>4ZBxzlh(aXEc0!pq1z4_i|M} zCt^~J6ED7weIqfGr`bhno%d#<@Ab`S@=Y7D`dqH{<##!m@>zICwyNb%@zrVCx1fW~ z8QY6%^qoPr^+!CHo*(drvHlV)5e#>sdF=n~Y>if}%66nTaxA7{%@G-s@fBjizWj2& zKc76ljLpH}U6^o>ATkys@hNt<{f}?tT*1C{4?N*N&D!zt+OFTeQ4`EB@t5>x>PSy= z2GMa3`EB_4N7%0A<0~4gcOkNI^66GOPRDbyecRAy&sX7@NPHx{4cgWxV?3RFQSx7L zA)EH0Gt~b%YNYugYT%!4e1AsGHTD4W6y2}Ga2vfFH7>s&*Ma}km-I5SwPsFt@mmgw- zaTo20b{d?!z)6 zE5!%mwK4iZY)JK=!q)4_8%u6jrx;hoUd~TqQ$pqoWS#(vco*i8sxY;BzGD=r~%PT?Vcrv zS{a_j75oz&gZ~AX!x}(-6nC-VQ8?v68FFwFI&a~#mB}8bZ7|-0(2UdfC4ZkuZd-EZ zutU6(2(AywIYN#-${sew2io&>{-ga4+E+8Crjs>-js3_OgvL8k`Ah5yzi*1*JzIN1 z{%o>0z;iTzcIG#04+-y?4a_~s5Z|(setr%2HsW7xSEBuv4NLh#oWg%*<$Rev!G;!c z;bS>Ai`{B_c&?SqMRyIl{mC}Z6)V!Yk?S_=lN{dYTmC_^o+WrYTmP@k)`q%!G4QHu$*Jt&=WvfT zVSjQqpRH&uEk@<>P1U!D%RXfNAz#>^kyWhQs8+-H3R*dy^m7gO4c^7@4o7>P_!%>a zx)<(!mF!66f0EO|H{srMw3m6=J>y)?7d!BUF;M=8e+yW;lD{F{&$IIhdf1okNKSL| zCz9Jze-l~e<@8#zyR%m;u3jSyMw-K>6*gM(e_>SJq*t@emfb>Esn2*QSFKr zvDdf`&yvHnV}>s2Vb6wS9=-fqg!#?*NSC2Gi>yn{Eqk!_V0hPr#Tq@8Q^nKtHNt58 zWDhS`ejRr%{u$by;oGU=+uEEI^fVW5|C?3FmmsH{k4J9g2cW%$+;8|K*x1^(6mIie zF+=-i+C%-l3cf$|@nt4<)qBCG@`d>!9*yP}HqF)tXYnY1e~niRqe~o{|H6H;=oIVt z&)GjuELxE9jpw-w=^rIu9(MjJOxro++-6hG!0`wM9~g=Wss$8^-V)UVeq&k)=D}J)HiY1KcO0>o)!Uou7b* zZLxS1FjRSQJ|6|Yny0e+;MtqJpWG{3BtQP-`%7%%R^D3o?G?Wb_=N1O$ZPGqhW=M` z{9?!dG4J-){wy8*6NP#88qdKpzUxE>KSlZrb)>jWUypkAdx7*{WUx8hCym)0$)DVq zPsZW4kSgG_XbTAzF?9 zVlw%m97Jzev#!p!51^61Q5Yi|@Ij3o@+dL4#Pi8;pFK6lgy%KUBlZ`Oy)g`% zT#9ax^N8#(#O!XeZSaNq{U*Y^RvO3~js*c>D&A7_L z{r~;=My%qPe)T1P1BQWWRc|q0kMG%D+=%ZOa&Kds`=X(K#(R=CLA>r1FJmn{Lrdj# zbTMoz^L+=~baKxSpKfHlr2SPio7vaM{R-B$`fTBuRk$8Vy0cpiO`f++^!tQI_{sg7 zWLNq&!lPz{xg)H<%HDYTk|F-d)@<36FFrC3?$jnW;aV_j((c(yG!@pJ_-+QCeOZsPo6rvDyFHe-dAD3D4bP->-`{KvR(aCb{3K zd-w50N4n`P=GM(`)=ffKlfSF-$#62% z#cYUrqd((|@ZNp07eCFx*G${>aDL{u9HaHg)$BC>ZLl527P8A@8>-)?gnOV#Uo>Zt z`$1#1=80C&|6g(3njLuKnzhSouzhZvoMbc*v*_<2w`S|Vfq$MMXE=LKq|2DicZ8#l zc(145*pF|kdshL@Rjd~VnPVr@eVw*Zc;2lW$8d$()|#$e$kG?aL*7!{#4aNzzZ?I~ zw)f=Ez369=I|5|Z2YHhHu?Vz=W0M|OcqDt-I%Q1@V~)7 z{mI;lEV6+Wg85**~w~W%IFzh!lip+P}wP+Tgm%GVn#>6XVyI2>i!R4J~?k@+f zT7)T}=aW6U^XY*FUf9Sj8-A`uqkp14v99Kaed#&^&Dm^# zCAKEb4ux+fS(l>yRPORkx_7PmmO^m}+oz+y+4)z-MtE20eRQ4qW4d>9pXcK}_1{g` z#`=1&>tj5hHnGk8FbbBgwd#KYlP9zl{1)u`ir$tmtj3P3ZB zBVFK;Q_)}a|B}JF;u^{?;hI@PziC(OzS_)$zZr zH|R?4L3^0~ryG6SuqF?+ok-t?WS^>*yOzrS=3Db-Vl3r;{}k>UCe7&F)b^(Oawp7Z z)|daLwK;5ZH`|@>C-C`xWE{ZXJ@L?6$mwv+5Nc7pp*(rsIBiG&>el41YwK9Ao>7PA zLXm6XxG!AO$l|L|r_+td?2K--_6zuYv9;7>ZNrS4KVZI^j+fYcfc{XgoySL!xicFk zi&u?pYYklff1gYEhd;|J$=)2@Ty~h}!<>@u>G)%~bGBSyAL^d9QxW2Io3^>)dJMUP z*nd5n?_&Zm@Z%`>Hg!CapN~h=kBw8w3hT35(Em>?!n43D z$e6&M&DbZWi(6oP%r;2>QW*F?4%d%`-dN}==KmLdQ ze0bOsFom^I0z!_sf()L zOh&ghEq`XSNDi(__5}Q|q4|-nbIIIR+xm38gy#nG50^*$Q+{a9QwOT&)_*oF|NgA? zR(c>hzmLk_VapUgJA#aL(62~tSPL{67c=G7eD>dj-*rm3HZO02;}%$dZ>oOV8|KRB zJ-+>b{~r;HYuUK2x#9%!U*L-?(KdH{Tdn#(&Flg)r`4+8+@@c`W1SMaSDCkG_X{=O zD8erXOa6`2x!f3xo~7$~vee{IkIVIKY)%e!t_~LC#Pb4eli1ZnS38(DhVx=Jg*rR} z{;llKBXdlUh0i<7=|l9CF#PDfKK_Wec0S5x{aKvk{4O>BQnO4e`xV&l5-J&Ly<-A9G|u-3nxo$sk<1^Y)cns3 z+@r|dkuSEh$(eE&^u6gD4)1ex%d_el_(ihMGZ*aHP`%$*nBVfZ;k}(5|A@g+_$IRT zW4v4YmhUfeYdV?B=-O&wHIF1G!g!$V6MSlJJfr5FIC7qV??yJHbPRL60R2GytJ5Fy zVpVn?=lE5}{n@Z49evPW3fuPj)a}Uo+TmI_UO_$kn}7H}x<@YE&t7wWp>`DylF`U+ zH7lEfwkwR*59tc>s4g}G`S%sr)ZuKn>$MT+LYz0lb1B_dkbAOmz(?U-w3zJZ3iNH* zEf-2PDH5~zV03HSpDDlDmW@R#Ch>pC{RHmU<}c4p_mZNo;WGA9<1||rt=bsP$9ENd z=kx0WHL;WtgV1cQQTfiH+TI3-MZqo?YQki%Rt-x}RNh#aQi&#k+XL zel-}cCjU>z_%k*|aA&K!4*ErG4PHFpIb`fntKR7=->2&>cJ!6s zZ?p9=wP<59zk?}^gCE!|AB$bZ{A9d6$ulQs1K2m7zDxLKCc5|VpN=nlLveSQ_d$1) zt-rph0QRhZP+O%a`#!Y>&0#|`E&Id|A{=v`rF^d?oG6P z4bxHlb}LNUvW|4_$lvVBc5=K4->8w<5cFTFp>d<%PvU!ukME(Qxtvu;vWK+&#hy3B zvkm%}>5zlDno!Mwr#Rl5Z5xt3o~`%r^;G_uuI(o}nvp+;&5s(R`^xiO^s6(8yh}eI z<7Ylu39XtP->dzd?c8(2Bfr9Lj*G3;xpf<>-vH;$%`F)`=9oMYlQ0ME==kn>_083M zL+2~Q*ae>&5Nd5|u8EuDA4BgFeY5$qH@_H*+1=uDGMz2hu+%n^4L@qX)7;b`HrMg* zA^7BYJc+*D$W_CVDfEqVY^-Iy@uA7x3y+r26XqXl$8tmc59)tfY{e|(Yy5kYXZlUv z-6!``@i`RlMUHo-Ga_RfbwA9%XFA@Lo^JXtV1x0L4B|WfN)Le}B5Q^iF5~a4{wmr;tIa+jsA?Q-4(|728 z0o^vPhePboXV*CMMteRSKz3{Wa=WUj$KmOTe{-^rrN>qdvl`CZ#h^oN|e9UptbyF2Bj zVkh^D>y76yPUUi_Gbvve6U8aqTUd|W`|0=}o0^kWSPaI@vn1Gr_l%|Ms(woL|jHtFqy9zFdjEF=UHv8e%okrZz`)xLVTX zzGZe&&2N?XS4}OK!M+Hd1^7JgDE`ZiAGL>j2aEJQiZ?~y5&jPL3t?@6_66taNz#|i zZM1*OA1BduZ@v2dSR`-4J+DyL!#cj00c#FlcRY`fd$T;-%6R{X&7qEr zwc~%pNNp*Pg4djwZm#a`O3p_Ms&}f&;rN%4xdGcRWUrc+wn48g{Kh-lY*F?9aQV>2 z<=+CE5APRpa_>epTid>TyDL2MB+1x+9C_=|4_`ExZv$O3xYVQUcKsdb_HJf)<`gYv zmmDf@)F-atnk@W(U;G*$FT}U0IjkT0CUn=@H%Id*f4l?hqWOM9?Rbhq?uKxh?!9{2y2^byTZ%*T1gV$kXIiHgB$NSGq^TFwPojb!(y3U~q3Szl`s9 zm$$CL(xEVnq)S|j4e0Jh&X(j_d*<)pX%FvYbs^M`8=I`J$-YI*>4`nJOJ|}#9iGW> z8B_80bZ%*joU6A=A?PI_F)<*^TZC=~@>SK1nXMMm6s9o1FWv!E!gU?|^kAzx~RmTi2@p^eINMZ-M(l zA%{cln^t$P$y)CpapUvoHaNHAPd*M~HaVW2C$(P*;|*l|uKzXL{qFVRNq#~53tdg@ zonyZW+MW6N5q6KpdxDta3-3@C8J#;eRo{F|*N|hQ#r7s`o1i@nmJP+B6TL&6Z(1`y zuuH827O`edqq~VEB5|^A2CFfUg~&nm4n) zWZM_UYu-S{G0vZ*b0$B{w}o*=M}7uBl9!!qo!*T<&rsVupDB#vvM-xfw!KWpmvHe@ zF$peXuzFAP3s}Ol(GS@3q5SGh-gILjA^Qh0bd47e6yL+(Pv}^WjjPf<7T@N^bcn$+ zzB~*4H*DV<&3^Qb;QycDI^FSWboWvN z!CW22a9H=3E7IML4S&d=&z&E`r%U-k?nf*0TX?_Z5Ar`oZ+u2~z^z6_N3-!9eAjE2 zXGtgPt2@w}8;f-vhxPY0`sm9R)!bWwPb{*|e1Bqt=fiC947233cmbKm=@aMJyb<$9 zz8*O(o4l9q{8)ItW!DbQ<#n>3<44escWw*MoU%31Z_an&-H-P5>iZs9sL%g`Q>`g` zpgkJzTgLW-!3T~HC3kO_H#L9Hh4HsW-^+)4Bw2gd)ANxw^?wnY<~I7zAIh#!W8<)kh!+_pA63~WNfGXZ!&JJsg-P(E6KlMoCQmcwwboQ z)U*xBpd*^dH$Q1tM-%ZcyWqP|n`_DN94%Z|Mw9h#q28<@A6n!4o4psnXnxKc#oOsim$c3L%-{rBIMisw!7+Tn|QTFGf2K~fER{PAEKw&@Ew}T z+UN7%ZDS#+z%}DyCOL;)^Q*7o%J7XTiob9STXpv=6x7BAlK6w zcwV7HJqiDlP&R4n$iLg6fiJv=pDbnf?=TsUDIMuLWZl6A`|?_P_HXolr~bZmzZoGv zJcqoU{X4_?6dL0wy4~@eVuB}Sdv+ClyYTw~bhYM_d2miZJ6`{d^jSZI`~T@9-e237 z&TjLqMfhQ^e&eosXKNe`E!fY;lpcKeH8BJ#c!eSqwg$sG~wBi&6~h~EcrX=^L}M|CcQJ*@hWS-aUJ)w zcWoKDzZd4ZGNv2uN zU)CEYb7=Y~I(bzLfnyEw$D=VWlbzt(L)#?V2zHtWla_p(h~?qvhS#fap5|F&^^7Zi zneYB^|GyV`yT-w^C%N;`=5$=hm+D|CC$iw{;kK(`?22wqt-9}4tf>Du^53s7 zzs6k??T>7sFLymt-l6>kd3YL}#&^MNy6<<@Jx_t5JNpiVyA7Xo*RC!UFY?){usuib z7`XlZshrMl57S|s<=3DYL;e!A;w2c=k@ygJ2iUfkBirfQnw_tcehjMwd{0@G3n=IaW7|z0vJO=1Oc2@jI6-YiLsk^TBvFq(j{= zR~MUE^@adIwY=%~>6S2$CgmrpYdxbGy zer;W?4#bUQ_b^uEP_&G`S7E;trbpm+uOfU)DObnBJNn63v75ku-RYl#c91%8hd7_A z&s-Sd7q#KXo$=gEhde3S6V}1W|Hwbdcyk{z-4pH;*|S7`yjn8{t1UgrdmpBWi~JUk zemS3#8E=Ajit{_<@fvHT3{*z$7yUDWb>(eFj?zWk?_Lf&498Bk<6%to$?0-qxe>-z6J1~O%OZK#qOp2sq5QdK-MPs3R>Y(m><6LoTs+*X zNP3YuOw11?XMI>(k^3zh-jY+F;5$?NCi3Y`_^$I^hnet<(f0}&SJSUHl{G$rrCL8u zwLgO%&#I%veDfLGtpDR0|J;LTZ#tT4-bvB7KmGyo%XL{gLElTp;4$>5BgH%Ro1$xF ze+Zt9;oJ$&jqLvvhGpcPE?<_(^N=?S`BeSS-_idB{pw}7hEJ^};)tEkqcQho>TdZ0 zSv}Fwo!&3k9%S=3>dS`4<$>g0!GGGKhv6_@A~_lxugNRge2rLi zKBOzN`4Dk<0PXtx_8Z$bbbKaUawa{CocsCfY(6sfi(O&pS~Cxm(S;u}a?NE)U$W1H zsTEx(nQvBbyo`O5>b~8EW^MfQ(T8!n*zs<3-a($cO4iV3Em^jQeQU?x(0c(L=I8Lg zIJvlH7t5i;d?&a``VYj-wUG*#hqAWXJf`;_!ZEdZ5@{L)K3ES0%e&AqqdET=`(-;i)!cGBel>q2 z{mAG|zV&}H3;*F_S=6iF2UomYzKTiWO1J5~%sjl7n9AItfHo|DVHcmQ9Ghk2dOyeXIpf;)T{qo zPnv6Q;E!+STVom91H{YN&+ew@7ryBS?`-lH(X)f|581N2{ZRJ*M%MSv2cvxgwy}-X zH&Dvm@o%aBcN-hBb=W=+&ENR=Dv7m^b}ly2;kL81jf3Y@cAi%Eu8Fv;1rwS?y^NaM zZzk@K+TLq$f6^MjSPSo7hx@DHUC~0_33W8Sh;MyQCU0WbS#ZBWZfmk$bUXy@X>`nE z=e6W!_Wx;uY>kly&?M@n^iqx>f((aD8KYO#1_PFDFM%hxLE>O<}qrn|hKl46Y9J z_b2Bf?T?!)`|(#dbjD1oFaMp4uju&F-tX7ahqdun8tl4SPG86OtF!BHG^^1)#Rg;C z0^LXSh<|yoIw@Dwc)XyKKbx<)0&i2OMM0&$L z&RKFqK9=UC>_qX#Q}iPLCT)Fc)o&P+Sui}$hrQ*o`~TJbqx<+XC{pI?;)!tXzvveI>H-72;2d+mPt2_DQ z*GNx_sh5G5|>=Q7h4qmlNvo4;d(kjAHD-oJ^SeEI7i=FUq9dA zJXSuef#xoFHY5Ksd?V0|Z*pBIAGhZByJ6p!-gYovfp$yrm?=(s+P;P553;wU_l3r4 z9guy%ALF!@R-sOkF77+6rS-FC$qVSTF=LM#ty!#zELv_ zp4s$VhK{Uq1ndXGGn~8&^}!y#*HNCMZ#LQH^iqv3_Gyg5dq?3uYurU1@N22og|V6Z zT&sTb7uFKV9@fCe@I_C@@;hC~2J?LU5k14?wfQ}I&=&4h_JL!T{zY{@A?s6laEE#8 za5%2TKSA3ebob;R>!r*+@nRjvm%uR%&7NdFVmn!%SVafp_uMQWUAI<|lW^qPV&9Ug z*8K7?J)952SW$=bhxze#_t5XRKbW1r%comlQV*lHFs)iB|T%GOZbGR2)-ecaJ>R8T&YoFNsS+rv(AC;~b zie0rYh4DDJy$f9QC38N!Tbf^7W2Jr2?ks1*n*U_&m$GdXoSWin4_9ltZzjK=7@VYk zk$(3_vh`|FR+Ia~`N3GrpXHyM>eX-Mi#^DKt-5Z1L;F<6#!&o;e)y8^^sY?Kkz~9} zS7tAl(xds}YqY1!t0Ul@Uw5qqdyQ|cYx3)1@ti35Jw8T^gI`9&e>EL@^UufHj`s}h z71xV@H7%c0f3ts!EzRx4D%@kNuD#?-F-9(nL-jkBhhYxyinza%?W3<3n}hs$YC+mNvq%)t-cV15N}S9I3e8SH7uYhxkpp?^3( zSpS!w(P2H19>CsdWIsmN*7P6g+TCuww;u)){Gtjd2&Afus?c zwmhuS&!Km7GX9`%B|092JMcXQpW0CjmVe#l865fQXvWyDAzyYgH@f#;Jj8|}wpp-_ zYj97H4V%Cyj`0;_^|Sw-Pai-dZ^AR8Qk_ix@H{=#Qe(Er>1Ytwjp>|6{zd54gj=nQ zw}tm;GH2A>x(*&^8wlBuTp*{^T}L~t-Hy`6P@pb zcU62}^YJxolZ#Og?dn7FySN-Dr(P#>CcfV8_Z@97R@qQEI+-WglMR6HO7=WS*BtF3 zcgZPXuYB=6zpL%xdp#*X#wVDQ`_XYDJKNO#26v$KSIu`!$vsN{hh+6&m+_Lk4#z+2 zRac@?uFbTW!;(FmACBic`a@pzVBZ|Jv>@Yjde_0jr||@~p62{Y7#$b#DTXCKO07d1 zUxOxLUn{=c*BJQ@cK2+`=?mloxoTwb09_ehKF;TT@Q%>GvRd&K|D2BIXZ)f5$i?_n zcxJ%oxmBtj7M-;>BSTJMf%^AfVkqs3)3 z+@X%#ORlwi)*YsSXzbG=^uHk2N6;&`iyz3kgAIQ;9>PbrI$mtfd`|nD=+*jk6#u-$ z=E-d7%Wq%uRabg;)Yk#u?)av_yB>W%)~oNag=^CILffrybkvvIo^af)R((6ZY$-?9 zhJPIT)$o1Q=(iN)^LM(J{BEy3;5-h^y^cr0dnIfyqPb7|0rJ7U>V(d01u@eW$(`sg zF%IJi{;+1xj)Bb@G74>X@=HIpa|>9;(I@x9ypvBL?*jC2RKI=Szrp`xI$o>hooaYL z*RS4(=NH+2V$z#0HfA^9L_h2M$#H9Pe&_$b>fOWk0aHu;Ghn)!>8ey3Z^LcFRWq%kMUFdz8+)d?MBb`^$wV2Xdd+uuc!o7u1*R~?ZHCpy6z9(wd01M@)8uDSi`Y+39FFe=LISbEBIealJ zKl?V(RcM<#eoPJD6Rr;&pTHNNliA639eT3k4(JAwe{G}OG#=HxaE~Z9ZsK3XlHbZi z{rZ`G1K>YFeLe==+i>iKcNn=X@eUO;d7qwz*Z7ZD6+d|u-;4HcSdU_7uSL~=`jj6x zRnN~Nz9^^h&(8F#XXO(7+sds&wYm3R-B*#zVGhgqD;ojJi~Ql8kMeBt*VX3!Sv(K_ zvbtPwY%HX|h~@WeZf#sna(pLS?H)x5caEdM1-? zgmxEw_`|$k{$f+-vZZJz!5`+wbS=a<%{wJ-cd0KlmVR#h)9A%gyvm z=704!U){Kn5Ao{wVl-dVbqOAN!}u=fN?xbyWVGxm`xn)B$HRTYY`(T;#=07v%9pYq z`peP78Sd5OVeD=|-Uo251rIFo70##d*UIczmAv`2>O1}6{!{qvO1uGlPs_P&n_PpF zXPhP5;60x&=_@}#Ka@Ocz@jZ#tHANT{n*Cpn}g}=;;|mvtkd#U*l>5FXNYKHxw0GH ztH@kgt$EM7NZu7s!G03?UFhb!d@sk>vg;h@p;m}lcs^C!VN-(>ZPArvKMvpLehR( z8E2`Q6}~~3b%gOLx{tPb){wR4n@3^Y82-QLX>`1kygmso@hcoh6WO#m`R~es7wEW7 zySXdhU)xj8zf&LXX|$%WjpP4!?U#{%KN(-4If&nOMJF$-cQ59{GRkp@Ce-Wjyk)lr zzo|hhHxqq%3;BPEdGQmn!hPsX$(YNZ>Uw-QjOtLh4vAg2#9NWIpIG0e{bDvus#o80 zsOH8Y4b^wUidJ;Z#Je8(CAy8-xCxne*`B21Vg1(s#qH!CU$1`upWGo9U-0d5@~=nm zCp`}}MzJ|JxlSyW()Ez`UT}?5k2kV^ufe+^a%UVFE8;UxhIcmN8lB7ViFNqhY^XJ9 z$mf8kBfQPTX_rOTnKkz;LeMdVCYP*TS|QSs%kJKB-v5 z8?h_b--g`o|`alCF5`8C&u@IL^*oG7gS^BHK{i^B@?{US2@^WPUW_d(c^lJ_j$fp8_- zk9Izo&wisLUsyc{tbUWpUs)%z@4~;EoPUW-V?Q4+W*2B5$xkrl?lFh;PuWHPCECB^ zljpS0F#Ws)JQ=0f%AD!jbcn`IOyuOVeo5S`ma$*E{QT^K|C8_eNk&d~OR?J&MRuI_pjy;_s=M}8PtL)fHl zSJx!$D#SEfpZu}t|7`TmIGV|94D#FJ?~JCuxPQr(9mwdeZ@l}Cq1H}@7uN8bk?^ib z(wPtZ?l7HC{=qQx6=V6DsZZH3{QcPdU%7T2Ooi<*ven}BAO52!HjJ^RwxEOWQKOf0>WM zSiFbKPa3>4LDmJZ@oQnM7J)ZWQ~HxbqXj&)^t_dMqF z@NQ$=(!BIO`aas!yPR#=?J#x4Yi*o&ghjo{2ib36pYd%MzHLsYb#UIBJyW#xgCn)y zmX39_J%Cp1ibJ<^?e19oGx)OW=z4?=_qqrFu(jV}G)?HYr}M%lb3OjEU|buP_GFGh zdk$Z|r1l(xzZtvLy8>Tg4OhUEuB%ObjEt$mdLr7&UfnBpBKt0K+Tgp3oJ89Kahq&S zv6r#(oAvQp^o?NqVmW^ZAGwZ+w;^-5?F+K*Y+C;P!(noBA94n(*K6RVFZ)4EE@9hL zKAYnFriIp7^6_EVZYFDwntOiq+2_4s{H?LN&y|j^xffKnOXUMwDkOARh38mGHD zUPIeZa@xVVg*fqFQmb1N>t9*>4UUgTR~O4n+gLhYC7->SZ^(pv&)xrzzvruIa^QLA z@;ly4e=Jw#Ie#057WV8QsCq76K4b4%D|(y!O~hjpy4&+(3$!O_?{9s!+Vb^(Hky9V zf|4EGOF04kPq6HQt`$9O4bKR&IcUTwYtNpW$v(|?qjNNQH$IWO(UWT8`?c!XfBHL} zqr|0;zBX{3gho6eEfpQF2+UiB-Q-z4SWUIf!iw*NJ{ z=YxMgG)JIE7wzmG&vwRU4|wX@da!9B-Fwl|ANFIl-NJY0>VJof;X|9m_b;=e(xMtKf10v^6i?nB3SNndhrqI9)0BXf%rhNN<0J39 zXc|8Mk4Sa&0Hclt))A6={LDhJ^E z3dUbxS5snjsvKqfETNZ<%CgHd(t`hxQhyTlgB5+#DA*r#nZ#lQCp`#gV)WemcQ6fqgC6^Ag#2!?r%1 z@-caW%+uleFaFKsMI)IHk~tRdOgvY^(GJf+j<2#!mlNTgy^HCjBm7>Xyi$J`dVdfz z*NX8A;vkpG_ZzBjc}BrMSIYgd+CH-X);5g(gKFNB*4Ipa%Y$HJpo!PAwSnn=abrvT zE7^nT-3`xyWWPb5`kd{pp56hk>*BPN_J3*fj4^smOc#;6Gk?%seCm8E`^XLV^RtJv zZPQrI*Xc2A3hV#rXkBlF>;CLGbi0x>ls|frk1wUWy8r)=W8*uz9X@#-))~nGP0Qc) zvOZ78^V`L+>??;}skzU~=D~QkW5aQ_%jq@V(|zFkko^AIjv{9Px~cXJaO~oEb^Dnx zh+p;qe@)|?1Nj=(H0Tav-x!Hc7Q;Pk-_f~^JlhxDJ9zjaQYZ2u&IiF^tx;J2XXD8> z-jf~a?Sbc~=C=wSBVCx6?~@r3rdFkVYeb8^3?e{c5O=-Bg{ z;x^d3>bnAsJdD?Oe3y3boP_^$O}3-&QZZpi?s-DU`7+e(>CX8htm(qLEAiUsR;pF+ zu15E$k?T1AN4^yLj$qR^upf<$o>TXJA(|7&+Z_G3&iOQX(KZnN*?eN$hde3U$ccl|>`d=V`a0PU zr?(k?Hf58%S79tB`?_A*lV4W~zQs3%Onyr5hGRe2uaV0KS!Zpr$n#_LPm=$%*tNCh zmp7p<<`>K5wb^qe*+22~y=2L&#QRit9GXTmT9-)y%0;`{?Vt?JfcVl|TuH`CANM9kBY%oXidMthNY?`OVK z1F~iH>K*p*u2^*q;8}E;)Lg5eS;Ur&=#__Ij1~Axbw6%~Zyr7TUdXxh9{DnjEjKm# zmcR3twX1F6`&kM4WTI^c`q$TXJxn{||F_)#(D|A8jJdQc|4yo_$MAOGt60tLCoU7w zj%cdrk{$52Mf0S#jUCH_;wXH^YlOD21}p9-YjyfICWo#vCnG_75nC$Xqmje;SpB2X zUIz0!YUbr=*q4ZDITPO!^kSd#T@>cOwfJKIx%cr~TY0e?49Ce8ehJr^xphiTEdMzZ36{O};G#Gg;MtB(4HWD>Z5^ z+9&8~P4C^Xd`|9o`g+o*=2h?W&w%eSHqn)<4@npM@%rUQ#NO;Vo7~N};qRZc&qA{X z-nHqxl|O=C{vmUac&>%^12!zJd4}is3V!E0)xX zZLHn{%npQS*IMXrEh;1~HUuV<#KFsp9x~Dvjyj|h@i%+jKUKh~chpcsB z*_Dr*=-`+95Oiy*;qPc$*RiozcI1=S(IQBkN2KKdK>F?b98{S8$zPIT)OnM;w$1kr5 z`KspA%kb6MwjKRL(aVE;nwXrAub1{G*!$F?C|X3`2Kp}|>wWrX>3>4otMs+1dH&R} z{J+^Yg`1r44a=mTwdQ_!`oh|auik`{{&anEx4`qYSc^^BQhz&R?hMB-z;-+Qmy$D* z{Lkp?OWq!EG}qRvUcC!mp4a5rgYB*+^N7CZ`B(mwhrt%3-^pTs+J?EFZ)B zckys`e~oUmnC!`?J1p{eoxetFJIr2xcy~1kHV>h16BzbqpZLeeYm@KA7U*uH)4C`- z37>kByrO-g?N9X9e%bc$$?_CxSo1lJIK!|}+wxQ`s{5Br96 z$f4-hT9keY^PzNYFLo`-tHV3Lsd~;|&f~|I={*MjX?VV4|5C9w-{lwLyPEy0^KC2o zGV7Kdjd|lKv;NQJQM@VnVU8Ywe-CZM+|L;0ho-Fq+bIPu?z zOu3c4p>J*2+Td*mUt8m23fg1&eX;W+&<(^hL=LSR~SPNk#&Ri2KMcO zz8!k+S%i5YeF5DFbmpK~4XB>?t26Nd=F)xT5uD|J(1-i~JD~f@@dfqj8(YPW__c-W zmDqJrct|R3^%+zvw-h+yoazks+*(uIyM)6==gtREvQxB|4cTp-%c#T+^BYy zyU`VL^zmRbe;?(XJ&{;ua=tjvx!g#$B@b`W#E!FJ-<4dkOlNBU&0KCxmL26D|8}rl zM#qpk_2ihlsx@s_etVJsZgtML+1L6G7jJgP-NbWq$F4uij{G%=4mnnC=J?-qss}0k zaVs?HKrxd3AJ}jw-KpGWbMieJzKK)gryYOHv_|PqPII|)ANr~KS8(2pynV@)pYgWr zTUYLO(tdb7%H?4pM$vuhzUPUtwO$cyZ;+>V@NL(I>fUKAw@NiXTZgRcV0e&SJK=4i zeX%X#%XR6qX3B5CcO;tc#EFh#MOeDyo2*Z6N4@FJ8>|QU(43a2-?^NO$LKQ;#aEdp z9x*q3kAI9DUR~cPa&E0vzv+$XNj``DD(7D~-pIaLy}Eys-3vS0(o<~PqY=A|J>gzl z`Z`_jz&4W1S@7)1U%`e>@R{qAtI(-mVVz%e5!(|NdIqGiI_jl zK5Mu14fy0=+KElupdVpB6`mg2S0XF;AjnYnqL#4D#k)4Y-z={#RU>YoOTA0y)O^EA z`(U}SgTB#h87OD0AyV}`p2X&3@wVivN72Z;bVWR!$tNQ`<0w|6v#VTJ?=x}Dtmk62 zz6}0asb2kdC2eDGO`aXBeQW#!?A8A847H%MFpiQCQ!%UV$&ROQ7M@$^Jkxc<0yfL{ za9=)pUriW?zd7E`)!b_wzly&P-EG*i8~=v+p$%KU;dAq19>z;+vetuXf|yvRRQCyI ziH&FWNzC?J=%b@}wyC<86&(OWTWx$$d>!Ipe*k{@9DY+$G|BfL_49xJBR%8ws}14) zcQ(L0d=kCa>o?A;XU&J(?+4o@^vjDxorp);)0ws6zxL#gV8>0y{}LGH@h|M*K6UmV zJintqnr{c!D&J>MyH}wuR_}3tFIGjZde1f7n-160Ne436AKeYxLb({~z`onKCXnyF z_)Hy19>Md8%ZW6fbAjH zy~Js*{%}nz=0#ur>Q<|MSDidr^92AyJ_4CB{l*!Gb=ShN4grEAGMR7_phq^?QJ*?0%i)kQzq z@l*Q6AP?iXE4qi!eM+Z1PS+#Fxv$Xa`EYn9S9;GPjK#uz z@nQ_RA#@F9)9En%*;uXr%VXdgKz^{{1?wC3hWq|yO4hG@IF8(B>3JDX8-BTh4|YTo z>{r+F8(bG}!frAP<1hRkHU0_Tq53+&zAm2j=&fB+=lLu0^cb|CFRcDMA&)bTbZ!X0x*V>Jizmh9L3^^oJDnj1@<$g|xt8rSz<2W0gXiGwphkR-CvK|h zX--B&es+ZONqqUS|9!m7Ip4>tlfSO*R<=&5c^1zfFvWf8Z-s_W3h@v34fB`P;!Wta z-U#cH%$ScB>R*EmS*^;4VlE$!A}1m5F1Cg-zs!Cgv?rl?O6a{QZyLUYaxF)r{e@ zV1HNNXfb)(c-l!`?OCtBOCIiJrB~9wlI<#XS^r0JG&y5CYis92ZFIzg=$R-_MzYU& zst$+$k`Hrb`6t|$FRY$vSNDCpqniM04#yy|?TuzLw40i1m%%rTtX*L;PiHH`zB&FQ ztd|}ZgYRwp6W%$^SD?4F|B|27fM_}&g?V}pvVxCSKr1KnSNP*pep5rj`af=M-x81Y ze^@hx_y6~sINWD^kzNqwV;E&h+B`;Pyq`r`n8Bau*1BC0E^y;I`*aoam8 zmD0E=QrdfWk&r?n6uKy>q^ZUC>qdL;J#KsNy?*_k=lbJ5?!D){$Ll@L`_S~w0w){GoodzdoAACsd?Q^u)>izMtO+|eMk{{FCwvp~YeXOK^os2b{Lqr$r>n8M zux)Spf7HJXJGLk9SH~mKoDDyDQ6G3l(bWjuH4UC+vbBZ|p46Bs5^~?9KVbaVygnSy zFm!Kg>s?pAEJmT11L^1bzOAd?+bLQ*5A~{XeRaPn>HzB)`8C0q{X|UqlWAUy z+L%MSisOg;__yvS#ftBu>14~7sEs_^!}%9{awz(c3ou6JqGa<1 z-*a;O7(0%1zLRs;Ny+UE)wfBjdo9NsfBUfE5^*@Bq59rz9&%AF%6H?pKdo72J0C%| z{Life;*n&mBacq5cm1fp1N)}vlVg!MhHtu-XE`Ug_(BYiX{hekWc-oHh4>gUjzwb( zRPS032{l@6y^%lqu_HVqx&=?jw^sbw68?S3x>VoJ#`O0~s^4Z8J?Ra3xGC(nv3o7@ zTf%BRQ>^WHq`0hM9dI1G4kG6&eXZ?h@q_s`8Nw&O@n=z!6MVTLOgTSaiWg6Eu9`Ry zZ*#gspE@1Zm1S4BzmvmLEG7xr<@m0nKX0dJp#WV*8=qKW0M~#`Rn>_9Cl= z{YTEPu#ecXUaK{Vyr#*Ylwe61fZo2;A>x|xou&T@Xd_I4OFAm@@{w~#mu!alk z>2f_7J|;(9NXF5B62Hj#>bGgPqFKgH*AGcde#rBE$qu%S)nyS8!Ci>&Z& zNGCQeL;E$}&T{`D_8w2?mCjdX_jb1Z*zg8_Y)(eGQi;Cd zby2tu3^|s6!`}AVbN(F-w|orsG&?{W8;W{5#J4oQV!0CLu#&G5YxU?L=RNs%NBQKr zO@5o>+sM6;{Gn_U*R%&6H?Z+eG&j^${}mJd4<_7KFFM2bu3D;ol>gux#O^WZO8njE zUIWc8@a&22VL90ZT?=_}1S}K9ZhhyMvw0hNMsF5}&0Me_Ki*2tD0qG+OFhX4+D9-= zV%IzLZUx(Zu(g(V?lYyXdGoi)-M&`+<|krvEEmH1zxd7XqK{G|C&Sc?OnFdF5zBpQ z)o)*uE$r7KcQjh#I6MPNma5&`vOA3X_wASJZ>1L8Ot%ie)?R}} z4bH#9ce?&jY*-miwU)gpuzUE>l#zjXMJF#`Jc6y8Z951BfLw1J# zv1G68n?J$#hr+UjYk{0S`;$9^J?6gTYIf|ycSHE;S^C?e75jLYt%vpl$Q7S(?U|}A zksJx%gpA)o`yrV}>hEA9FF9UJSH^#%v8^U2`@*^wnuB0^4*w!_^rzO@koWl&6Ke{%x%$5Fmy)KPJMdNm_NxeJo)LTmZ1%z1gbhPreV(irf`0b6mlWO^3-vrYmdtJBo%uRF*YS9K?*Hd} zm4rE|3HpvT|8e2?JZ)_pw?`unlTYO1Jo4|+Hiuttr>`Bk{89cZ{&?bgHR<#E>K(B~djX}%8MX2_QFzuJ)Bgl-ZWPSyVa`8U%u zp1r%!e<%A6skskfzb)I()b}AfmK(blIKGX}HT7>`8(hZ*WA1i7EpSsSX0z2()5XxhsA<6wA+@2>2l?y4<&k$JH5Au#u{ z9|?D37>1+U7kz8{$?ktX!;Z_y9fM{+vgwRl!91Oh9_E*C>At?fH-!0hdv5=XdmtV8=R+%@a9!X;XSe;4?<^qpjPlN1|TzTq`oGw(x8Lm7#ND&2v9QiW zUnApm{TtQ%zQCGkS8eCPw5Il*(8~L00a>%@=~1i3ecnjS%y}t)hxcW|yN>B7eozC$ z^Zv{=aagBiJ^3=>(;vtVYau$azwul_UaC(mEX1Z52>*u8C$sMlx!Ie%ZM8Sy_ub)s z2Zljp{7(!saT$u=bCGcWKOTy%HN0c=C-}dizlpYM#Vyz{-uY|z#60U`oXCTGEUfB! zY>vp}K)eN68>uHBq4^WuJMh2DF7+mE1CMLs@^SQoVV~~23B04!B>3Y`VLgQIus%La zzud^gq*#w^{welusJ@Ai-6xiJ(p4vpt+a)EXU0hQ4MAAz#=D{YKwlr{!`Sn$`gBvB zwX}Bn)054DS(ywL*BuD^f4Q^o}2I3x+EIjP<Fc*20qz%1*Y&noe}>^eEn46pH-?h*XJZ(G+qGZS+)BOj~(ax!k# zZ|zh*$96X6U)aB6e@=tn727w)yDFN^Z13~&lkPdaz!#g~eYC+dVKi@(`&+HL{}OL$ z?>Z!WS322ywWOdrLVL9gY59HGVzv&xhCgKfaG|ht_#6A-1rkA(z#$ zu)Yh|B=J!&|3dC0HisC?t?GW*;r!6dn&WmpdYtaoYc($L(zGt+5Oz)=Xekb=1+uL;Y5BlI($$Fv# zd7a7bjz&%=1sl}j;u#qC(WXYG=9cJowqAtiZ8hX;ZMTr+eS_jUea&IqsG<73KzW8f zG0L_Rljq6c+j4Bpv#Fwb_bNJojE%kjGapS&AO7&1zIq?+05ayP%^$+iR^NR5$ME-- z@TFvITdUT^(W~%pj<2Wu`wZ5P$Q)+dLVu3-G%;O`T@R6E427DWs>#`w{P&9EJJ9h( zVx3)F4ySw5@y+P?DSH~Ov&n7Fj@RW*#P=Q9w;w%nJ6uO*tK;=7EgNKPoXUPNsOsNy zSw4Am(=;af;uD~ zWjFQIHAjL!y#E}Yi-r5H*<`W0Q2(K1J!-!|`x6b-`)g%WHha#KEy1_0Ic}gnv5Lou z$$jSDgN;di<*^Oy$1@zp|JC_c2)f^)mP?c}R>)vFK0kOzrY2H7=tI;M_y*TAL+1 z+vH*XA6+wHGhSlr*79y`Yl?@QFCL};FR^Q9%`(cln1tV9gx>_Ea-pb$CDcW}sh**= zY>3LE=>36=Yti0E_Dg7XgCX395%VhV`PONn>a)*$cOv9Twc)y>%0FJ{|%)QnL&BJ!HB;dy*= zEI$RWXFu_(>iKmrwviv5<;)grt1}MxJoWxhs4uDf%Epp)sCHv3p(E}|rnPxA1@CwA zq&J%F=z3es_mfldvQ$Ts=5$?BtG@A9c9ysM^2uN_A3`@4roL?Xk?b|KO%sPT9ABl) zwQjtV{Q+W|nv*=M$Umv`jx?DolkMI8@IA5gL$>}^@7s&?e*ycdbTwv&YrO0(^rx~{ zeDg5AZZU7PrsqSn{pofuEA4CFgp3XKZ-Dnj=WL6cvhNPM@1bi~`LvGxE!IAV^YQuU zjFYm?-aMJ{N3NcSZ*;{sIUhwvbFnjy%Wmv%sxPw*mcOAU-k{$#W+pDhXRsUxQ;<7f zj0XfgX#T9L-mi|&A>$14f8g8e$vTP6>#^^8{y2#*_M*q!Q)IBfpR^L2?eGsJ*SHPO z9nuz#L;lN?q?6;@wI5E#p8RsZoRk;oK)LZd{|sWg_~fa!-`Fw>&wl2cjLf&t)WdNp z99`M)CYm1Nc?#Lyl}`GjSVPHhEOH@P!iPFYt*ZyN8L!Xp2(33PTL z^GZ5*)kc4*-WQGV?bzVj+T7Y5KJ_l#r>OoH>jmfLzhoLZ^I|Gz;$3QLJY2?0ZXASl zQ96b`I^ucyJ{9w)$tbjcNG_c5X>^HI(u9oF@NH%P8XLV!9zGubdY*?GAiGv*PVeVK5 z=BveM0iWzf_t|(yH&p-a9PSquAujKrB{QE(#%=tyG8z1n97N}Z^5k>2g?Uj83pkR6 zXrHDtf$IQm#!`5eQI3UuHaj1H>pa-c)pnr%b?t9TGQW3UWiXT*gLxG!8f#~ju~6#$?*I(ZZEd4 zvzfl^V*IPX`=IvY<;GWVPttxj+Wuti!WU!NKiB`RJ#VZpa=t^Y`hSsZJlT8`awjuZ zibLfx-f#~mHa=5hDegtS_@`To(*}4a@Wt`k!dPh!v)UTg|B#N@nrjL<1k&KaQ%+F}|c8y})n7_c69gOq;*z&1$!1iQc zgYR{1&)|6s?lbDE^+_?3tq1VO8+3e$ZWyd1(0;})ed!hU7r^@jn_x*cXY20`u9x6> zME;)u6CB~2tijjC`u4_eqGpG>64pY+ll1QCcq(3b60Y^qA@sbXetd-YR({^u{fMdL zig&gZ8Zj#_BxgRX6WI5d7~n0&)zp&}_y5B+chS}Ff8|?=r{w#jrTI-Qi^MM4g{*IC z)xG6(j$A&JuQoK%OXQMUEb zwBhI1@z00fSWDe&%ZJgs%zMy*&sYxc?uB=e;x%Dz%)h}WtH9Bbp37|8vab;t?%hV0 z$*#=aoLEqbcI3F3?X$Le#6?*aIpk^__3#IEuHerkbdSD0U=?^)YwWY6~f zAn^_N9n+)fxLaI08EcoKJp>l(|D=`n0XFN*s0}~CSPa#-20gphdq+#6xM2$|>#9|Ct1LTeIl1}O6dX}t}*tRPFvp0i3 z>+XCAoL7@~5PRl`!8X?a=hl1=0LES5-GZD;$jG#9T<4v3I@aTdmbQ!V!CSmaP9Hj_ z;vdQv%khM96vq3>){p~WI!sNNiD#PQ-RZuQ9Y^c81`FdUCM*71Ec@Cnp=%*|{Flgq zaumG{2s<@~oAU>-h07bbE`zGRF(yGJebL(OUmUOT_+O{l0r1 z@PzNK#8;_9i~YCMaq>zm;*>w-PCT66KkBM^H|_)9zj9z>HTp@quF!V{x&8SjQ)dr> z{Z@RRvFRtgA&0)Jsc+)a&G{qF<#ebY#Z7W7JnQ|1uN(3EZ(_JEAB1`2NO%YH`C$Ir zMO!<5fTR49EHOyt^B?~ta;1p^GjyVjo*=;LNB=xB4q2IOBPhb|8^HSdP0-H-4CAEa`lXwy*r z4l|!(%(ryD2OXbkn@+dd9NvQu>-A6r(xkq6N3B$sBY9iAM*ga>xzC>sbIzW8x@{TX zs4cPee|!*M?dtddvF}I!%oXyg{8$@?pR^k%;eKPhHh=WgHp5;#!nlccf}>x9XP4r> z6&qf1{y8i|>|OgMA@`=~5AP%%Bo^KD?LyX9S=#C zC)4phzCLJ-$!G?={jC|^X2(%zyOVVRT_>?KJiAm^qvLAci$piZ{+Na+`^C0yt@^Lt zcy;pV%B)k;?a1J>Y<>Pdk1sx_XW!b2cU4`-h5P@-d;Af>auZD7qHPAlziL$@^b5rH zEV^^~AP36Q`t~dOKR|OR`+Bi&Z^!@OlTT^v*fZ~gJSqVw4B1Ddc7vi>i(pmRF;XR`S)F`R`LP2LGjQ<%Cptau)7 z{hy4+vx;`(ExbDsbtLx=?PK^RVb?42^<8!0VCU=GAIILa(W~p>{i$3oB@ZoL@%@yS z>pVk(eJoj>(eP#X-_-0hn6HrkeaV*>SqFWeX}_I)-C+-N_o+5xv&i}BZ+YmQnQ|5K z&k~PD`j*)W_p)IP?`@`Y=#kfnYmO|8r-9nLi0PLww-Sq2`ScESaD=*C9xX1{^W`)A zH3FVq=*%nWx^%p#-!*A606pH6Pr~{?v2H9@L2J!ZwqN4j(^B_(mzopMPDdlJGWQ*` zF2QED-NaXXoQ`Ib{0VE3@Efw?cYVEK^81f+GkEJ^S(#j8vwGiDY{T~mQe!FWO;(M6 zHs$Z_9IFfA`Y;+nHcZ9M=1N!F|S zc7w(GI2uRK-Nw}Ky}hF}-eD`ae=bGdY(2Zce-iUvsoOkhwK1J!{pse#)KM(O!F4|EJ{MU_DH4c^I8L zs)hAz&gGDMWyRsXxA-8O7xU|Ce5EfvImCwDlbqkk_7n8i;h*jJUh`fnG;`=?Pk3J^ z3Ui|Rm5*|s%M|DN8b z>9UTAp2zn^&AYv9kvHX+u2m+o|0wu&wC~H`#%Xm8Jb|8pYUy9}Ze;DLzNIgt+o-Pk z4pi~8`goZ0+4@~u=Fh;o7>>)dt!aNHdHrCyoS#2t(+RL$28a1J3VuG%-dN0*!~X=m zTiE}`Keymrw>*hv!r0I8lXC9^ z?F-1;PtH$+aaHl@>HJjf!|9z1%O%#G_rt-r(Rlisd*|mWG=0d}u&8Ql!nZLWC2O$d z0zNwumTU3+iau`eE(o87xiI9z8TjgK6Udt<&-&QQt8hOi{4OycL2u}vhi{~qwkFGc zfb?4Ohv4smZZ&*w(K)YHt%Zu!(H+jNhta_tg&cSS-(CE?2AU4y(Hh@_YLD0z2a><7 z_P@wkOO5i)$>L-_d;|Tu`j*#wrp0&s7vIZPVDaAK|*;1bp|yx(wd~@@Nx!kC6*a(e;oo zZ_@XaO`S`Q-KqMHSNf|M9S*}|YS9Mj_Xzf?bNR-6um)OnyZnp%W_Z@pE*25~@P1up zJSFOK7)SAa+D>8Pxp1wFh7Iv6bViOR`~9gp9_mPBtQFnh|Hytc+2@;+&F|$Y@@I~E z+=O4<(@5Xu%PnmDTAa`3F?iJSaL*#Y+WP-Y=kAG?`|{mZa9FR$XXEVz>wd6xgl82r zBgKC$?eD^~7dNG?OO zlD1E~)d3lx@q& z8-U-nU0N^p^R;)f_Bevx=h$_%IJmc*ok!-t2KW86jpc9SIQ^NdA!N7@9DNIido=N* z?7TwT$OiYiZG*I*%x4Q6!ybPJkD6G$*SwPNGIxRnhGHdjYIykGXZ*c;Rv)teJAOP7 z<~HPAfd;17`ahcjqw!k(W^x`JAM4*7wu{BEF&TVcu5W)a*?*cRrit|)^iM)7m%?>n zX-t&rPca9U?r2-`gV-i}S^I?e_YqsN@*f16vaE-*$E{7S zfJrR#J7Ji{52NU^KFQ^5h5tTz)`E=Qd||9-IsckNqt{>*kK7s~?g7&z7&g=246Qje z`$`?K&M%Lpmrt`P@V=()bGX`zjdw!w9qD?A&JFF2&wLj&>Rqym%^JL@#n-1q2vCo<x5Wc z$i=ki1DCu@x4~;34EL6k>$I!?@$TB`j(Z!g-r)-WXHp&o%g$_8pYvfb`~=S)Xpckt zqw8@#4DUQ8f1vG5=45?qq8pFyHgVmOO`F5oRxLS|FFw-02VS}p_o2e~phL|G>yC6L zykEfHiq19Eh5zgq)m6{b%IWOtg2!{&P3uw!6ThZ6acug~81LLd8PiNjQDiWz)-CA-4C>Sl7`2|Vh5_E*czez5YNW2J%HzQZMT@Stu4dvRm1bR{2DrqpLBn;kC5|;{So51wQ*9S zj~cvRv?4Y!oAZp0qv@Iso0w%I(ElOUzmc^r8{fpck2U?VXjXzx9_Lrl!6zv^@z&16 z_XW?C?;G)ryvpYCm+@SxnIX>UXfhw5Gj=?jJ~Yt2fuD;jdT29{*ZEv%+qvPXkbDXb6 zFARBtZXsWsY3>Vi_eSEgjNY5zd6EDB)Zd6N_`f`zFI;03e3jfO$Jm_yix+<>ACr3L z%itMG$6b7J16l9G-5k9+Fn$nSOTK(v9Cy{;Ml25(!(p(&9PWqa-L!vW|Aw5|iBE4L z%X&22hcEsne|LJ$WM32XYFx1^-hW}e*?u9rtPj$G`1mAkhwdeFl6s!sLGE_sUW;!V zavIy47qf@ZhB_bgbec1I(ajL|6z-F zXD7eLyPDgMey%^3wusd=c}-_{1r<*@9|p3(T!_`EJ)lB++$_bHiU^zA{WUTPVeJl(Sp91?vC0HP)~Asv>`oP zJO3Te>f+pwFODPUW%TXoewYoT^dE)p=7wr*SzN0>)VR&bAI$$7kS`y~ndV*o&F-#w zjxLt?dxk;A?8^sZDWzTOYd6h5X5Tvtzz(W7tn)gLPSQ2uyrfd}(ePMt>Xj?1xW$iU^PR zXKVAzNA}0_$G!B=6SrmfjQ{FAgVgh?J?VK7-W$OMi)i2vK7ok#vVuzyeXQe$EgZ2yW&FKzcZ7OV6TzB`u= zHkXg_({I2w!;)6%<@l|pu z-a%w-!YVU1HZx`*Jvc;*&At-XiWz+#h)g&ui@aRZRYaX#~2% z;XO{jdJ@Kbm>Y|!+TP=9F^E=GcjQ!9d*m_Q7dt)?rZIduK>rN1aF(s@FC)Jd47bp| zt*s?oVP4&juUqhi8lRP7MRziO#ds$(^I!2Qz1zVyobIl2Ypxjfz}HBdIvU@JzO6avzy`I1 zJ^T=Tgw9wD-vvr`p}z}R>UcI7hHKFr1mg&CFh>`UkUNy#SIdE7_)aa#)Um1tOyY-71LRS>DGV9;2f;J6-aTP*c9JL0;&CrM%wJ`!?^85t zQMiv6UGDrkc<<-K-T8cbx-NIEd%m1{n(kw@orFeQ!#g73zE9kiyur?Q6Q@h;Lyh{I zO}!jn$ItE2T&?X3ep(MtPrk60%Lb|)YDvIduBm^3ICj?dY(w?k)no!)@1tFVeOtlv z9iJ_t<6`YY>D(Akf9I|NinsOcOHNngD6OsdPx4>ttNAy7rB+>oq^;TcE9|$zHbd?j zQ}I#mnVs5D{ikg{oV?HK{2y;!RU@MXeD?Ywh+^*<|F3wG>< z??yV!J!L;}8t*vRv=6y{UlZ2Q@tf>k8E*LyPZU$*E&c>ud&jPiWA;VUNqGO~BW=gjfrw;)&n4CO9J9sFZ-?b>e%X)@w{`rfywAw(URPb0Cfo2=d+m>teHYnmOWhYQ z4(Gco#D)J#>#y`6W6ZtCXat$YM7|S$yN8j!P7a;bz0dvlVN~!j4B{Bx;fbF^{}}tk zI=u(oMsQr9?^trLVgHBvjG=Ndzkf-lx}SW(zn<-tC!${@&TY+se48{lHm64G$(w&3 z_U&t!Hk1?H_;i`J&h)O#zP99(nSREHT^%3KrYq1 zneQjV8^&uFyerkJckrW4$(U*E^G#&l4ew$V7r|j1MzhFfL;5g%^YFi>ttZ?A$s3KP zDPChdITRh=#EZVjir%CB+0Ud??*_4~9CL)c&rH7)LjpFhL*DZ>4T zGS|lk@k=mTn-*KMXMnA(zH8{)L0^PMohm28yc*uY+CombE{MgXGz}JetUG4W_e-C!wqc5!g_YD}u z`WXDz@r$vLi*GW3%?sE&%@)gtZ~Y(am-@!Q)*8=)Y#jja%eIiKI|Lkbic@&UC;W!6 zdS92#`GIgPUlGr&5!olxe=7cC)UM!%5XZ3AT$AjUHNOYfA25h#F~fPH-+UjnK=T|w z++^NrM%N24|4-lQwWxf}zOi~DUsCymr*&?9E? zZSeC!cy674j)#nL8@hwPo@d|tXxD+QPlNZ5=-yJ>gZ#8-t-4-LdpV!ZE_E%HQ)LHo z&N61*x69O&%x~V~@$hU(W?wXCqrIF?_#!dO)cbIcxfn<0TGsz!6o)#|!TP_RPWhFe z22&fcg|lQs7!RdfNVi8joIPTae8=uP$vsHlAbPIm8}mRi*81>!7(diL!TvC^=hJOI z4e!|`2dbTPW*MHCO-;!8ls}_)8O=`>+!{IoCt`X$S5}WDt97J|=_SDh6E_(hh*HLT5 zEuAAyf1{a$UJQ~6{2)h4=j9Nxjx#=to$PnMY;Buj4mcdP`S`9kf2c#z?s%@^?^o@c zk?}1*K8#;q7O9)YccvbsThY^8u36{ABhj!S+`CJ=XulH8%J6)~rpH$Jw~Xjm$~I#y zZ;W4!Nv4o_H=WksVa=az>)c#WJ*RaaDP4=+N3{ogyWtOYW(3(Qp--nKG@s$sL{Gl6&;3ety)z6Ot$!^ zo#6UTe6He$$6;t>Z%(N0qZ{*Oow_Oivc2eDWIrBHFLbw)tsX?{nsZx=Y9gl;^ZTh-^euwKJPzK)FXXcj$Z z+xH{aIxjqH2=`*cIyK#dzL#oz#EwC5+K2UUvI*>`()pCN%O&KT40n#_i8|lipnDB^ z%>jknDR-h@%}oD+v#+_R6%6kGCr{OT&!x^g2mCsKTy3#&6Sp+R#4@^^oVGB8ae5EB zbIISD9dE&Lk>i2vX^wt77$4ETrsGgEZldd6ysl}B7wF$uJ0E51+6J0wS`cM{1F zYlHB8`a~W?+u;3^EfZmzqJ0kE)M?uikJ_A$CJTmC4y6~NIiR-U+Y#%N*R-yBUo#K6 zdXBit;pk7c?+f4c`a0nYwzL*2zRmYz-@bBrPkA9$X-{W$uYcZg&4jX85uxHp(SFRsR5`8nDau*jci zC9ypemRrS3o+hv+qtJvghAxbk@N6l4*Yhqml+(!I&)m35d#M+L_+S_ETl@Z2KR9k= zgJ*tW{h#knzuI2()IP`iFMH9cj;77fjHK&Ia<8I~J!N=*(f$5-Gy23Xy^NkSoKJw| zbjNHejPW>(EqRdlR%hqZ`vzZ&L;OGSdzM`{8P9UF9HD)WhU)*7iW-@N>#F~jPOhWd zJ*;>V+eX8(vvtRfu*!w(7J8l}U(G9x*K!qd;7n6}Y)E%#sP2t~dxG&IHtkU}$MTyo z7Tu!8&1I83OHP4pLwOjEw-=Mk$oY23ifaS$FUP`fOoV$+iS|b?Q{Ijmg?J&E%;+NdNm|mZxr&moCx1^x#30$bp z(g9;8*4bIc)a`XEo=1kca+G!0fBb74=4;?-z`qZSukiJF=hOHljFX*gKk)+}Ch9)B0)G{9D7+sQ57J*J4tv78uKj43 zrq;xa{)6mG{ZnmkvMs{@7Oc;pH9n$4$ef^kYdrUB-?3KtHtWL&GwHq--Q#qxPLJ_Z z-6y*npJ#PR7~{M0wYprKihrbyKZ|$xva6iAM*mp)J9y`Z{xIf}b^R8gAXDv#`q{6o z28UcawW0b3Q|5keEQZCgZ0}FTGP+-esXt!1n+<1kjsDyC>64oEcdh!KQ+%}J`E*P% zM|DPbyna|>7_;@+)r6cc*}9Nj-_RA-!3UEyo{opmsQ>YDdRIczn64WA2cjLVhV6kj z;-gK-5u=a`X;{N;*HHb&I$j07j19(0Sg&OJ(EAA4Gs)}VSe%lMbQn8j58M6nT+U^a z>HN|8mE>4s7DLEhuKiXR*pobLE;^r0#!>ngdtTsw<0s1Tjg~(H(OR42SBts)uKo+= zXR_|(pC6j_Z@sq$$8ds8+MCzPELh?Q@;E|JDmBY#&Po+ zAM?LlEN8=iGC9|ge=M8T%W$78zSnpjhHe{UbboUBE?nbgCz1QIadRyFtHGn5r<3?& z3;6%Qe?1#tgnK-F#%FdJ9eJJFsz$Ft-i7F%uT|fvNq1lyUnfK8_fA(<&z8%`Z;r1M z`DdcNi(TqTSo7zv*SYUutqXr4XTtSiGMRjN6TPCp2`o3!b-NmIsJ2Py`Kx%a!EgM? za?d+&$_J}z??#{aC2x~+y?XMG_njU$7EU1VDEbG(B|? zqdgJdCg@j(@nH4Kx;otJj~9v6ax_1S`$1@WYa5SlHU3dQ^P6k#39xM}yidS!9=VsZ z!x}O<-?7?P{6lvgJf2VHYtz+QoB6H0k1hX^{Up2iCA|Y(Te7;abwB4nlA(?ylVMnm z&HHGZz!!aN56X>yYp(l@kqP+Pz+p{S-G>=V_Fy_L)&GE)ufe8_j(K#GnI6e^ViDFR zv8`MSFTY2vw6mjBvyy-KaTDjhArtd|cweFD3By!)7r=85+Y9zt|A(9k&mz)AXsjIq zO|-Q>@knN|XQI8f@C>u)g04NkUjz3-cAQi*hm*Gl3@4%6f2n(T>^}wVQ?Q(B4|n;q zzC+NO>(h?rbh0ACF8`&n2+Q_wmaS@Y^%a9rs;ZQ&&B7vx9Ms8 z6q9qFeWLFQ7JKN2+8~J98T)cr@Io;t}@-FqpHC}NR`SQJJr=A~zZzg{4n1wtD z>y&W+An#76>$_ZA;+m{%uI-Xw7yGY+_f_^CV{C`#AYo1Hx;?DLk_+wmD^gohelFUY z8#~gql|H}UDCJ$ak6-rC_7aSDJ9hsjU0MJ2bRWUKmi5(d3i5GceKDQW_@E!$NAdfG z4ZhJPrg!l9?exxr>m9!8h(?{rw$U~U&2T>0s-gPrbQ!~X3q23<)&A(4m&9w?LGMEX!OgMta$(1Jd}qz{XM!3$*b}0Fgev>sb>Iu z{G7gh+4GKe<2V{lzI=!`r7NM6kHQ?Eb+wLHd(v(7ZN~?*`Qvu@+dFT}uD{I}>T&wK zZ_2$@CkJZk2N|bZXYB%)Tq~Pt(i62?z46NZ=I96;WY`izOl8ZqvN{*;>M z?@lBBnpRs2{ z$KqKm;b*nB^j=_cG+9l^IjooGVffzQlhxqZ1g^Vm{d;>}ivK}!pCj{pd^5>a%Ze-c zV-qVr2=6e&VLfQ9 zMX%$VN%j))ZbuHkh5rLizA~RpNB1Kf5Af;fVoq0L{8#J$P0+DB8Od*%wl(`y-@VR0 zl$&kI`IfInYn#uva!GPu7bu{eAfQM|_`aTZG4$DLS%K{KES+#ol5$)iJ-PJL*3Ie@pgkrSD03 za7As!yC`*HdR(pgerP@s{d(Hh7WYqKeSpuztQhQk3pzI<=RUSlfPYyhHLBaE$rfSGWv?c;r2uE562Vg;Qz$-RsOt=|3;G$7k)E=rvv#Nil}UW zcNrgT+u$06A8uhAf0YNImFszk;a+5@QPDIyzte8KWnIvpgl~6p<$n4qtgoTCAiiS8F|>zE%elM{3J7@qJvuv|)RD>A~GVJQ8B z=>2I)6uk$-H@&LwBqbj?_ADqKhfgg^zZK)};5?MCGZ-h3-=4g8wX-?AM_keq-p>hZ z*ifJ2kVBWzCr-)d>>dV}v7PnihkI((f3>Cu+UIni$p+U3`D5bxs`{}ro@r=@$bpIM z=bz+K@w(IA_>b6@&4tStNUWjLlFVPV|3v43eBk*_aT1xkE%khhuY1yeE1q^_8F%q{ z=vQl~eshqwg!O6oZ>RqzZG+J~N$x=PRBl)AAgrXVP>bYpcs3iopgq_r*5P?$KGbmn zU+|N8Bm9m(mrKb?Y8v^;LYOWV!x?n-C+mAQF5=72=s2E?ZPAH$)|#xR$T_*@_s??c z6uRh6jH~2Y`(FG-W^pULudo5W_-#Bx=;)3n$omb(E9v?IE&oTMe#}ENu;xCa@x8A7 zh2%HoKiA;NSz^?WPyT>gEz7^8f1vfnTH0r$*#oWmkw4VnJ$~}Gs&hTT*KOfHUH=%o zkCA!5%VeBL_RHw5fn!H@ z+)mbM3o_D7>cF(?jrZW2UmQT&H6td!6%)>WQo{y zHU@Ws=@dL$)uPyax@2v8{tDM~ncOTNXY1#DzA4+*r&oV2zU6^X@B z_<2kF`R1xe_++~FU14eh+Xl8ntUJ{Fco#MPCK$%^tyq+O$=V6F;pAEu#;)U%N7&w( z-WmM*COqfVs{1hEdq-hziutN|9i~3;m`}pk3v*mC%yzG~f5?6W-nHm2@UXc!hMfK| zed72&JlC^vWph?P*vy4t&MhX;{SRHPQ=?h@wqilV5xkd6pcm?IJM?*FMpm+);?@s4Xm`2L6 zBl+Ys7(cD^%tre{IKL+2F#I>tzXdF#10K4>KRE-I*?7V;)0Xu1z}pFrYm-d94!?mZ zSF@>q`N|EROTcG7EAK)lZiz7$eh*OYLH`TntYa^q%9yOb@J*q2K0VXv>xXAqt@_?d zK2rN;`g3+~Z@w9U?`-nUg$;&kjpVvM5BC79aq@}c`GxUmO%mM-%bs%S?OxTlZxZz) zJJF`5W{2Uwo(}8(G!d7}>bx@wqq#BE++u*e>(5-=;&b@r9Ptl%ZH%WKyz9|d%=Vzu zy{7bBd?Wc_6r9894ER4V9#=Q6)YS0%hD^@nVI01{#C-_9oW+I-+MD9Nj~sDHwqgHT zd^FL<&ak%4b})yvb3Paz@9E@A`CJ_0Tj=P2`C zRjvQeTn*-}n-6zqYfn0^XsGV#Wnz|`q&9rO_e{CFOJUcyG1*g6}rRdk}e^-G}Fb*=cx> zq4zIx`tVVI*Jj6CL$!x%18cJe?J4>V_yT$AaJCIUcED%+R_|uN zDNe>~WNjQ9zoohxe!r0qfzjGDjo1KV(gV+>`W?sXXd9<(v{?0m>0mq?)T-b96<5J3W9~{P^R@b$@O3dBUl>c@!aJ4T5aSp19fM~F+vc|Y+1%B3 z9ow!J7rMi}pm-Oug5O%Ac~;K2w$9{w7%TB+^!mT0a#eFCzh*69S4XnFU|?@@1>LLS z9YqG;6|K<7zwpehx(_jd-}d_eD@&aarT<}G*TRf!nG2goUQ*od}D1j$Le-?Z@!p|za2hfFuYTiuF1~3^>50S zMSQR)y*D|(lFw#~_iAKLlOvtPznv`SS+-V{5L@ zz4NocF+W8e9Y4&5FxJ*1$F+Wxi_Lj($nj!bF{$bA%J!4t7))m*KlUNJHTnbb9YEGL ze76j~XSGkX^@Z#&^K48y}VmS}r z7{}Mpy%-oMU+$p9Ax|&iAynal9T|=h}zZ_Th`S)sA5>eGJbqIWgVb)m#50 z_IR!wi&yywp8e^+i@xpczo&DGtr@zH#NcDPdd{@Ya#%l1P{4!9T-FSts%lql) zqv#KMtmDG^IZ@-1P%Gw=TN=}I$dnW5uC;xC;z)veMc;Rh<}W~jIRpgEZv&E*W`5z@{S~bYjH|~e_(Fv ze6a0Kbz?YPU)Fh!gpNP5z4@~#+WX*t(e^bQAL8TN@ZF1aOLXR<^lJRU2gXHu3>#+a z?+1^23+Z9{#3Zc$^YO;p8QO1veI&Qua(^VTrF4`{;>W(k-oRcxj{WSo{xVw7EU4S4D^T6E!LryC&!_C zNZW;I?!Z4o`}zF48QSx0OVyC^YZCmi&&W;y)H**W^ zVZ7Z4E4#z`KO19hb_@KI(CkkCDz@%;4rON>xwjj;uZ3+FJ|0K*0ra~Eo}Uh5SpOSG z;hRp;X8IPO`NVb_`CE}Mc8T~TgYeuToi_24{2-yZ@j&q6{JIsqd>P)u&M#-nmH1+E8>4$(otR}T&>f$l?WFpuo`-d0 ze$~=yotU0Z_K$pXQq6q=a*dVvV6=1T+Lqi=WcJ5r?OI-F3%GtKdtG{az)g2)4y*nj zZ5G~UWM{SN+b;1pWEht@{Kbv<#?#f({y=ovlU*9*YR%e1`zMa&Os0O77tl8lPd)kn zuv2@w9ly*#M2Q)nGi0ZwA74Gks5>+17aJ!?#DW z=UVdRV=`JEkH9mUe~xs%7wiYp+XVhFwiea9U&HQ^##oRw%CURznf_!ste5gjsy*C; zoXl@yolmFlBy}Rx@Ppv3W9Liuav|LRFMec83;4qPKD(~^4p*oTk(yntr9y5U##co{ z^{!0RnO$qjsSo)$tfMxgXVTK@`K6+=-9SuSSKODvn}LqP+=RrSn(d&CJDe zzGy?%(=aS(sPa1A2<;{Ox10UP+JiqPv*~HHpSuV9KG~h|_Cf!laVn28eoi})Bi7k& zw$qkGMXENqH=2fZz}@=J6#Il6<0iX;{9WPbQmft-2yipj{ zTD72fj?br?pAI2sW4>FWU#zQZJ37-kxH{_Fm2Kw8FlVRNklPrqybkY;XHV!K;`l`J z-4{ybTYd<>i^=)DclAsvy{5ieizob%d(S`2BjK9~x$BQe{ue*M^r~?wF5$nuqdm}f zs~H>c$m8(s!D71OxA|MVLtQIJJBOhR&y9!U>qY0*^5DmsZ{+aFB3RU${C4!7iI;D~ z!uR3&C!5HQsqh}A|6uVI^YTkPt)2Iz;}rTfu?1j1b!MK=0&foOD$4>S{`>}CD`vYK)*?kR} z^I=w_qVOzuNBu43jXDykHN{+G>{_xz{n$!A7}r@VV`gl?hUPi`G|y(@6UnvkTfSsC zKfo6U{QMUEL`N7S)0{tw?hSVST<7-&eEG7zL2!=46L2q+;~SzqPi*-+ZLaNjvaW}> zuj9tXitClIhDzq(+l>6T_;?vxkELSseI2n}B{}dDd4O zY|(#W(p~OluNm@Ow6pTSpmvZXt|dtlj% z-udv3CY#M=YrL*!(ynrGAzA)cJ3WNle&RC#mZRXf01rL!XWE9V*STYJVd{>rHMqKA5?r`W-}i2RXlK6XRqtS@IxyguYHS@0Y{X z8Sf|jey=#32K!Jn!N14I)eXq`z8n&m;bKNHp}`3SVP((@dw!_eGZSN;D$ zI!Sxx{87C3k^Ku<*8k;VHQMjovatR?U7MPeU9a7`t60Umai=$UU&B@h({`|!_tIm@ zP)Cx10fRgo@7x&AI?{W-?fWI(J?D>y9S3`-levxkQkXvG$D74;UW2te-?KNHueQEP z?(4=xJ2HE!MNR9it=ZI^PS3`|b!KV|#NwI!PTv!@NBC8dmR40Fo*TOdKl)b=u3T{ z)Z9b0Urde|e2E7;upT}R+8N+DgX1J%6yue?#;VbIth5H0J7dcl~ z;|Iv-h4wnKuW(;~BK`)L&vU+xzRT^`$MZG+?7+v?)=__SH_>r#%{odR-$ZYibBD3v zRIxWFX5t)Q2b1+gX}pB{ZpA-vegMNtj!$R1`WiQb?=-&IDEI-U5QEjp>t>D_0>?!A zq5L<>{!(^~V-x)4H1zI2g?S{~f*d{!-{nZRTvA=XmFBK|fc9VPtyRNXtsIS}FPt-A zzZRyDLvzS@M_WxU{qFwfr?8k4GgvZnVGduRW|gmzE&tMP`r6y?!7nv^U#b<$ogcuC z?TvN5%C@D8ZI!>K%dPk1#xeN1(YY4;&f@po@QrA&9)w{_I@e(H`_6w1`ixmMDSyIT zq|TPNX>ZCmc=O+<_NdUv9)pBQ~w^E+}epF!UL=>3f!*qxurFYU?QlJ1dYjiLW3 zI_9y%IE;>i;Xvp8@D$|z!*>DGLFmt;-#nKZn^DNiHQ}5Om*?AME3z8FaG&uoRoi5n zF&~~`=W;z0>%1}9YIe-`Q763eKU}vJlVBRmAARt*fqV3l>b`8EKBO1H)kn>7uQt@% z&2Nz2!@Sr& zqGvbsv*21%@BJNgf8zU%?l2Pj;`d>Gh^~M6XA=5m#`Lamyk1}ZM@g_fzXgs1$b6doPtaUpOs`Sz85UcN zgS?To%<1%$j=P}WkgVD8ts-83*Hv>#cy}q=hp*Tj-N}zT;r-iQyuvk0xVB2RHjkOl zV)>b0M9=nYKS%D3WE-5t2sH9F>jiVTPa0|`d0~AYu2;*Yb>2H=OJ}^+v*n)bcAktv zGf1qvkhdz@t;KdB+aLE%)4uwSfVUs{18UX(6ouyu@po`PMy9+B&wisVz4P<1V|kUV z=eGd&q8Ul%@3wfUy2&4xX&Vop+>Vn5&wpXK5~d5}yK91M5AvU8?=g4|BkM4E7Gy8C z85_}#u-^e+xSzJ6etwLNfgm^MqvT|M`PA_pj(>)GWqOQ<@LaK2=6r4T-H*OA|5#VX zFR59Iv%~LTg4|YyPJC!|y;C@-#^ZJa5Y$2ari~l(|HlVv7*@rfm8~Ng7ayt3#+GhGE;`@*- zU*d`Q>TG#=8{Z6o=?C<~>0OmQU7fekzY)51jqg`s{}bG9E{gBUHJz7P!pPx{iBV~b2*bwh3^_UGlj00%meXEKr2TwwwLeF z5&BMY+zjsHj5o)5Q`?%_Ld{r-?F)^wjxd}-ZlJx8ju{Qr{r_StEVs}zk8ft_KTlpf zO6GmA|70K5|CixETbw(qY3K6M8~EM-4|90l+Ox4U>AoW9r$ar;=a7-odkK8K9Jfbz zIld9v#5Ot`{!hufQvC0Bz8Qayw(ms$Lv)I1G)2D6CG!ULV79)S>1zc0M`C@xSnRsA zdY>vej;x=ZuVQTM!=KZPjZn)v81qk&wUC_4oUcQNbzIzzT|3d!m`&b!h$qo86Wtzs zuqFA%aCQoxkD_xQK5Bz+J=?)@uf5#c-!?=XJ)cR$E3E&EUfKqbY5iY(D_?&gXMg)= z$Ta3sbtb;a@r`0S3`VqBr~2x9y5aZRB}~asZOzFu7Q?rj%7x^u>s)Rm=FEs);ki&) z@0HESl!y6OWQBUBenmH+yIC%4%f8lrhjVh0H^p_E2Jb-Au?PM`YMy!U(aw%fCgTG- zy5Xlc{|p8`&*fLBCDBYY&1%*EROWB8X)QJzKlz>3{KjAO0=c{L^Ih<&vtfN5a&7uV-^vyZ=ugZZGKcY*B!bOZP$ zHCFa&aLC8RoJdF3VV16FY47M+}-J$k1Gd>2~dA8Gr1x?d*!r73Vr< zXWRE{Z|p0ZvGr^?8}PSh&jF4P)b_Tu#i!ZU56yRQZ&_chEtH$;@5b$77HogQ(~%Cf zKH7}V%lODQT4_ecE@ZT&$Ft{fZ=12Z93@{H(7$0#{Y98z56_XZ5Z7zr+<>kx=@|y= zQv2^*Ye`2vtmYl*FWRoJtJd?PBk`Ui=C9GYt3Gy@BwR zY͏rI*6ZXt63+@txCU(-%>zNeqczw$18UD3AU^YiI6jwTEEb_%=sJ*n~xS6(Q2fvr^)zog7 zPP2C}757au{8ns% zCSe8di%X-udY?&cX@noYklG}lD>!0if#HC-)|uQG8HeIMfj#E8Q5W+0tx#X$b8WZLc|Y1GT+?TYXY2en zGMxMAzvJjT$940KQcaDFU4{G9WFvg)K%sxd=hv$D19IOWg!xJ_Q_bV2Xcc`AOeg*6w^3be+iNuN<>4mv@o5O7Rgn3%m>01!n6y@lN>k@#spn|A6K}x!R9ES86x^ zPny)k5Z@1eX$FiMx!5PM_Y)$tSkJB6axLi(y4~7}v$2k75q3Vwz zhr&8s?0zfdvv_^-rogG@Mc2_k4-dX@|5)@zdq3S_jMsTNWK4wfLBhX7x7a@=47;g%b7a< zXv_yCw> zs{cDL*_3_*%g^kI(7h@TpF$@uqDRDZ6EYqVgD zzGy_&7+8LkpCM0z{1uK{lX(|e`gfV(JrJ$>5be&N`hQ{k$<9Rk3R!sb^_=%M&V)RA z5smYBEM4kASkH=2TWTy3>bp#ynPM3}M?p=0UE7=U9rVodHR=pF^!|?-h>o@rHul!y%8uQ9u=-ZQg<4-i&@e6Qt zq(@!LTEc%Ko};wG8=kf0U*aF+`Wd!OdRrTlKX;K+&qkQS{6F^%XZEvpwW(;v)<4PN z^Dyr##5BDCZ5YRQl25ym6Y}YXhU%TnY7O)bK0BFz@P_yBqSMiAW!sSaKk>K1yFH!j z!!-h?E1h4!h6!wZ5e+<Ei+v0F4^}5PmV1Z5 zadN%;KJ$xh@gC@!cPqj@ZxZJJ|7M>&$p@i%gCG8c=O8%f4r?OOV8@5ynZf=L&+Y1} z|NclX($<78=4<;AhI#aEO8+Zzs2jdp#HBZVpR%o`>-tOj89(*GC$7b*^d8SwpKAMx ze)IqI6Mp#|jssvDjIM^~Q2O=lPzTFX$yL4`0arVkIB# zArE#TKg|E1!uJAvYHKk=-nAny`1O!lHRi>}x>zg=u`Blxr%%njHgjz@9nXpTWw01u z%Jty68SM=C=keoE?OBbl^=I)+4uQdYfAM@6&Z*MB0I&Q7(>@w zcIzYIS%2BNR^2PaTcQzz%I16Vgt_n~=>OrHYv|gL9lw&>72Q!V{{+`mxYFLf|0J_5 zeFM=yY5$FDa<=5J^m}nJ{*|N1zMZZo=zP!q1oFnxbHDYE$#5>^pQ{^u)6ULec=h${ zTlrwEy!su;SS~*8+M{$^|1X|0ejHf4{+Z1=?NZ*Y#pXX7r1C2v=ASip_=d!{|9f-8o16VULD6{|{ZA@CK}#kz-y{ zJjAb|_BoDjg7qghgueFzIZrnDT^_mlmV1}G<;Xp*&EW_AC;TR77&qbv_o&`m&B={! z=CA*;r70V3p>q@aljvVxy!b!N|MR13#_W3a$MN1|{nXR%I+r{h1!vRP0w=U&PwO)%^TeF|+&kt-A zoA7R0(vm-pg+pIVey(}H21a!h~RP|2g_OAI~gX(*Fc@(J|=}_12gtrx+ zJVWnZaIDRS5zfD(ZxSBINt=dhT|QRJV>YITk$u%_EXiklcV?|x^UY_8r5c^xP+!eE zvRU}XinIAaSo0}2XVZiH7}4YYHVxR9kolf^nP}gxu6l2>kON`eHF}h;!EEeh|D-BBn&&Gd{9Ox`>X5x8OyV%G7=F_*e%iU;a z`UddHHGJ{kKGlE8gfXpnkIdfaQn-Gie}8Qo)QpMjZY55SXm2a8)P{U(GS}ySeLH*J`JBnA}-b<=2e18qR*7hUoWo1*gOf`0~ zDGl~7#`7tEPbT9xZJWa}oUXGR%cpcv1Ao9uXZSt7Y>a#Io%ni3^48F|4`*Y>r;X^_ zP7bpxw{Wtt}Sv(&7y0EG1QB93o%D)Hj;RX2G!|mIX zSp4FT@QrAwzQ-!%ai$+9XWH|1o|D%T&#Cau!xQYjABH~k-cOExobhvd1fTAScA(sQ zsKHt>+|JVr@ztR@6pn#4HI$xqM9weq(TCj+7MQ{)~MX-8oDNq3`v?Z6B7 ztK_tylMO`+vecBs{d{o_8a~V2qn8^uRR7zUsyXpE+wtVAscjd!MmnB?c8Kkj2H$N5 zez|a+d+v!f@06ia+sixIyPfO!bKkXPSMvqE*;Lntkg={P009-{0nWr zv!Mq*d0O;l$7|MxHeg@VklXN&RcC$6QHWi5mXNSD?Lc-6y?&F(zWdDix zK-c-Z9L|?-!m+LxPGwgs?Pt+-JUmb1QO6@SGaZWOBHOoW`n-DB_;o3pM`$|)ZF8}2 zj<%@x{fz7SP*@L6JM+P7b-vAk@h-lr2>P1Av=`cw(e9+8vO-jlnCyc=vEu&FtlM!|8M{eAV`e_}&-G6&%cyx+oqB&=%~ zztxEBRfaD0=3_Zqvu@~);Je$khwl#;qQ9A+wjsa2_FKt20+#pD_r?E;^A+qI zSo3UwJzv+a{s#LEKA6m})9Bd@-7?pHW5=QV`?CA7gVh?ii(A=IS6AKZMTd#uZep+h zWCQ5kmLJ3=>Ti5^PhY*Cp$}zudY^1RnD<1tm$87%FgHtIhwpFr*;W`6%D?D52#!DN z{C2u)7dmEZHqSR1JEA+7f7qX&O_v-j?uX@5v~$tO`SNl2_p+(QrM%2PCgU}}n?d(S zwd&ox;tK5@&likf;&V_sX820oeI^JR5?vCYb`JLn2 z(1=Mf8HQ<&zk%;N`SBjP7nsA>iGg}mE`jYyeEaB+awhMDHq_}o;CCOC-_DP6GnI4k z0RHL+o187~L-(X}{w-FbX^DO`{(3%sfPb$cQ=dt^R}!9^rTe2lh41y7;+PRdlFs7ljLST($}(`YSn)Rlw06Cfvm$_A8L!_(wXT0L}#1~YcFB^ zDndN^;Q5DtW;(WZk?q(}z4IJwi=X794_5mneuB)#^j{~3u7~Fqau%}TTl|-?Z6w?$ znhRZTf2Y`8ZLdFP>?`C!$*n6U_F$tPlo9- z*C#m-_d;F8VIkW$q2qdXy@mbI|#c6=%zbF#lh{_B+Tu4)%NKgeSk9zHY7^ z!QPheedPC#e$;NQrJPdpzAmhd(aht=?fL9*zMVn#hWx*e;}c<-<6K^qJ@`LYvyZH| zw(9yWY@1H@RCA1t$$Xtp)cUYa6#kbhYee?Be9#)-BDk#6#_y83DSzIef4EqGu?IW4>y5v{hB$mYmYamTX^@C zcW>fXr(<~$-q8$e)I|$8&0+Hc;rrF~@nl8#$J#<3cYy6m_Phd@`ViL3Gxnv|%D1=4 zKMVadbTjyFq!>-(i}tY13_RxO<`elZu$b@V57YG%y}!8^*g#yqB=cUiSniZB!Kbf< z-ylgB^6QOosxO&3lF0c?9m(dR)Bg+gt~gJQo42M9@KtkInxm&Py?cq=)+e@iEEehA z?7t44L9S169`0vX2H)fBgl1pY(8eL|huD_GKgso8uJKKBi}$;qCtu&K)#r!)} zOXH8o2-{bV2g&6j`1+8yFMOx-&0lIli2vX8qD#9uPF?>(JP)W<&ll1+_-rk-?2gPw z-Q>8}C6f<}FYvwE$NH%KB!1gZPCUt9^vA<$)pM5ctzEX2xUnfZmW+qYzhDUS|7;UD zTIzS9&z)cMo4#yt-x=TJT1V%Xl53m`^Phx|(k-;<|Iseu)z>v+Q!Z~qK4)X#9U;yw z_-`M&+OlIJzM6foUu=u_=$V0cmGd@yu|2ltM}gX4Pl;$Mo#B7xl!W>-!vq@ zs>|1DS8t*<`TTM396L|faU7Yuv_(Ib?yKaf+(@^P6HD1>O`&Wn7Wc_{wKh4LF8AZ% zJ~|tS_8ge?-F%07^C|m}**M1bI6IG{E7*4uSsiTmIX@KL&g9>RZ(CUBIIg329{P+A z_GHiLt_@({RcwD44?kr+?Juz3jW7D4Js$5}@NEgxqx`+h^-=itu&((CIWd|}<~Z}v zUqEI3_x@}i{jr5)(9*u(g#BwJ$;YrafO2YGq!~@vQS_>Z<>j<W=Ait&QZ=kykeKY7k+V!t& zyEk~|N9Gjm7sK!peeykf7LLDrh!Y*;$KT@7SIdnYp8?Mlwk|L}G{-ZIj`isZ_o-cB z+m0Vc%FXTY{D%HQy78CW^Fcot_Cz~D{|%VjGsGKc8>dfomIvMCz^Ar@$?Zz+_VVZ- zeH!NMWU|j$R{h6)n0q86$o#MNQO1L*bcTL*fvq2&ZgdTY^AVW&Fj<2veXHDoulOxm zTU$>)t=EsqNz5CvBV03INc8DgY;v}RwZ>>Y$0?c=-%YjZ`FuKFKIsd|Roa^CLkr~0 z{XrkS84QQ;r{^I>TmD_a&&Hr|&z^}-(jSghXm^7D9=7sY&X z`qn&qLO&S&3C<64O{~lB#bV+q-$TnERT}!$$gM*v&eXw zO%3cBYQH9%uY!GT*EdwlM&fx7-n;dwjreAF{p4JH)5!dt-XHOw%-;b+6Fghfy+3hIdFLpIljjZ@yQw)8^Zo$xc-3W zaJa-PgSWiY@jkGeYEHB@ADzoL&BXf{dRo{oa{X1lxeHBa8#&2Q{4}2|c^Ue0_-*)P zVT13m#Wb|>fAW+Vow>}tJ{vx-t$yzDI^Ftj=o4WLGU9{iFg_Va{{CdkpHf{Z_hiEx z@O@9_95no14CNob$;c?zgo!W94*FF;{0sRZ=wFI&Z~1#A8$v%lDd-^QC)e)a>pNf} zBYc-!SYJ%nCqpd5T4FkiZ1e0w|A~g+U&Y2n{FcEZ@1kbnFbF?iWkb~4yTs&VcEJ_; zR5Xu1`okK3_!g_yxy*Bv~-U}`7 zku$H^Mv&be5BuX`aJoxDR}GPeoy zs&oJxQ^+*8OIGp4Wb~J_N52SlKa7J>omlx^IqhlNh~Kv1|Ifwkb-2t=vWNNWZ@wCg zS3IM$@Erqt=!fS2*->P@PWMFdU7Id8q?eJ~+5RLpzX97=dwo2W7s+gKdJdK|#E1T5 z4!y^dJ012m_-{nBE}Hvb_^qbCSgTrW|1KKy|7uN_o;Y*7kM@Pu+pdT6QS$Z6lFn2O z%6Eo;xIFpB{wBKS*UUrEU#9QxAqG#?`>uFdRqw+*BOx#Rw@8XF{fn;q@R1v2MYlV5 zPnEyV&O7m~PyT+c?`VIr?Ew6*^3e$K;?HDl_J_4-btJ#rvDhX#JFPiq54-+3+HcT) zO}=?mc=xI}h@QRpbYHsuXPnEN|HjXs(76SBPiNx?boZ~Vey=6m8~kjZ@dO#ZAqwj_ zWqpW+?q&qW9R+K9c6 z;y<0f(3jNyRBYqTVU*9&^W?4%=VWaWnj6_DSL0rI%!|T3L>%Zx*^j~Fexfv1MV>lNem$P% zwmZo%|IasqVNHI0*7kp9MT{|1%Q4Ll7EA+oYEeq??dFWqT z*iT2tr`Zd1_i!BMiv4?4&+7|5jP;%di9D={^xiGEDK=UTpy7W$yw;G zv9%-PS33Vs+ZW=qq4Oij-xJn<>AyX-yZ1}UEXOaGgMD{&#~UZM!aKCC znv3Rj#=a4*-Go=)h*Rdp+%j03|O72khuY#j7o92t}2Jjt77k`E=?c;nGKAk|Wea?_#?Di%!4K?3r9AcKkOR zCX#(T-A6cIi|kMNq?hw$@+8<+4->!S`@(;mP2Q$!;$MMIO-dHYi--B@3AFMpJX6Dc_u5#V>xwSg;w)$2kvuf$sQy zIQ7AF1U)0!f-m||A87;6zhIaQ&t5QY%6E5ZQ$xc%FP*CY_pDXF>y?E1I#?~i6Xver zn~L%lJbap-%GO`W-PT<4WAvAc>r}F~Xz)Lmbn4^j2@Sr_Mt7a{qVf1|#k;q2xsl#Z z){eHTwI2vu4edKH??s>73+txEOfuaQCRd>848wkWfi^t5iR4%!Pr^E4yoxW3C)Ilm zJJHdatZVo{ZdAXQwrMR&uBKP+<@A)Umy2O&2sz2-j<#FiJ;Nq0WmkB}&8MijPqXLE zns@5SWKa0dw{)2C^b7o}#Es4A%Xq%9ABg50TYKAKaNJl|&Arm2=z9&8gUI@TtRL)e z(>7Pz1$alJe+WK#P_DFHE)R~z|B!lh54wfu_GIg&Y}^*_*>J2^^ZvYRcd|iD%1mw? z&3_NzZRLD1x`*raWwxvfYg6>6$lJzv&HuAuY#zoh`{KXMwZ7I9PFCgPJ*g6Dkd_G6}OuXuRF7{zPv}8x|C;l

cHQ^4||Dk&i z{9;wSVl2I0p7C!wmYy)){|E0#c24H+A!N#@=m|7;I^Kp)t%XH$F|1FO2jJ<=-np*n z7iABAoNqH{O4ZaVHiwd}FD6esTD^Caf2jTGI`7NVF^Sw}Wa=}8dXe)(a;#(HYpD;$ zXPYO#59|EiQGN}b&(NVyluP(--DHgSF^-Zy>Mv6!55(F1#&MnxSJD*XjzP*nY^!7d_`!=F;Px{}N51+78ZHVMe+M11* z@Wmh6^~J0UyPClD3VkU*FQR{i&Gob!iZdYnH_hhZ8X z>#o@@&h?Y(+3QTSJ?pFgyDoo+<*_}g{~^iwD{amXVV=^7oW63M58~P~b6mQN7x7fs z{((i`%615CV!q0ro^l>tgYky@26ZC+kiS}6bNLQET=8FW{~S7YBfACqg>aBtoW=IV zdpJLdTzrx9n8>%ktf=|DV?iUw*IJynZQoj2MrX$35nBz6rc0Wu#$k-pvQnd5clHKa9dCT*! z^kp?L8BJz8ex6}0y0@;X?ZLN2jBi3s|Azf9G1wg)|A%LjahU(B^Fo*+)KrF z7}=-bYgMa$+c&xkuCL&n49i>EPhrz$>f$kYwz55~{Sba%(c3yn&HpCAcsf~p5!PNK z{U+bb^~|*pqwD)r^Z!y@itX@TWWO~U{Vq9@F7{?SqHSdV1RF+?(c7H=K{VTG_g?^U zuKhGJ^}i(C%a{{|u_{MfK5W~@@iM*{%0Bne@$TYrA~`psXJ33J+3$={FD>az5wp$$+{HZ#cch_I@N{rvLn177v}#_y z>*K`zQvPg0&wto-skSd+YE)OPtELI;->{$V(pZ%=<%fy-(#H7pw`%@xo*VUr{S@`= zK>CiR|1$I&$mdCLs%z0te13uZ{q0=89v*!p)*q|ipA2J}8W-LdD1U}W4asKX-_*vJ zX=j+avu&09x0aVQ71NYV{is@7lcT}EaXtQ6n|_#`Rf`h5*@1X-IP`@u2T7h)ZQr5f4uRzt*S1Hy7N4pMu{DpR8Glc(HD_BZw0DztqWbv^8wxVcptlVh+S%WW ze^YY)#5>pZ^)qboRUvnwxU?4ELAHbG z(pQSt>sNp0D|h3ej>qGBhVK8!v-{Xaez_mocG~WR^Fw}n4A#kVXBU2WT>AtuGyhLI zxxRlx^$a@aKH|G%>TjGUjy z6_c=T9&JJ||A)2YqM3HSPe;P>8Ce}%3pHRV|EiCb?b_1GuKgcb{958K*dOvLeh2P1 z*t{8@gS4+O?oQW7zozF?oBmb2#!qBdYu#tCr4`yk=_EhQ4bmg<$%!Q7!YA~8@A~bw z1!SKD*HE@!Pj6T9-m@*z4^G4XD0|U``?ENF`*0J* zZ;(W5;(Y+$gY3Bh@5Z%i-90`A&${r0=L)0X)IXEK_=nNi*ZC-U_n>qAz4)J+Fs5!@XwmYp?2=LwGh@Od!*3G4%hg_M8d#Vw?F)SewswC+BkU+MFM)7bbh6 z3$^MY{*YH`PqDrqO^fAf1>0MrlSj!q^1bvfXM1`l8si>E^D|!?*Am#Hnee{ICplRc zI?u=netzB@_*Hh-lXVtZ^ThpizIcqU4n(_+81!_#4O#ZZseHgUr5uUHA*}0!XBS0d zc#hKl#xZi%Fplgm;LGE&1~vF?pYlKXy&6iEbi) zQk~~lWGk!JCEW|22hbqK7{LV%+7=|0k?}Lwx<#ux8NV*@g{;ihhm!LV5-u;bB z9o(Z%Bz}f}h zwd6EHe>JQx@}cAOS@_P@HiO;?^odC#r{c?D?&`QR{;yy=6aS8K;5J`?0F_hG&yu{U@%GwUwMv2gAEz$);$w zTW;;QclBOtGP=Qd=9;{yyZ4#d{3;Y3A39$+)M^e+AgnA8kn%--TysVV0lfH=WUufpnRo_;Z z%ivNE%WcVh$u)H;|6bc-`a@pcTJK#TeELYFpO>xa%=n@MTrfuZdj12Royk0(O^4%q z5MOuKFGGKrwkzDj&q8}19WOY)*>Nv;YrU$vso!C&p0s-YAGPI^kDVU{ z=fUXu>sJHm+lKAxO8&HQXF5zb(&N3Kv<3TJFQ%|ZohgUo+nVm%T{EXHZg5?FiPt0R zPO@V%w&e5e?04ex@D9o$o{6q$%sk!oJ+)s+&e!Ds#kLpuNM7Yzh!_84OW-)lc_Xz# z4u&=J@LMy9Tu;@JRBWrYm;rQr<63vN%e~nAKihzfjoEfHet5z>F~1$nDReJjqwy;} zp5E3q>qqu?lF^3Ff8b|J`0gP;QQN0=)i=vI9m(_Bo1kAQzE?Tlp<(rJ!7oF*wewYc z@HXA@J|0uH}pMN4_cc_B&aJkUL#l@Q?XJG*ryD z!>9g6;+tMnXReJe_~AKv)?wcp=!VrtxmXvA*z^V;&ZGxTaUlKch{*zed4%4_;B^ll zb!F!&IW?Rg&Rky26{3UrU=dlT;2D8;0K3E7!#g?ASLi3#`yCT{HY4kuU>mt-0KDy+wWie8K0v*lzxx97yiNY%uPGwW^5UGxPssp9cSz z2WMZp&Ht;p<=13=R~Mz?5$R{)8ALLY4|GMT6r@MNuY84w+;O_#1e2fN}Z@gxHE}!C$*ztdGuq7imJ`~?TG;7!&>iSc3 z%7Oey+f&-r)%a8RzIA*jy$@^e$3K^{=QuX(;@Ug5rN*v{@GtLEeS;OxLfcz^7LQWx zh=$re!pAT91NKwk8vy4C_14<)uHk$Re~Nb|U((IxMDY1BbZq2&f$?)RnZx+tLT#gK z)%{m|J>NILd>9OwoSFp#-NiyYzw3YeT%JOY^JpS6Vr)?|8>%yq7RL^J^sVjZxxfrkfj6!Ul=2N_BiFSK38u!D%orr3rro+c;ZTfR?z@a{|t z^4}mo%p1+&^ZV(PWBG1mSR0KRt4nv+d+&>_*J*EJe2!|>f2)P}Qp$mFvp)^;IL z@l1gCbw1>SM87JAvGd=qZH?z?xWcI4Rg4e#-6;1Hjspw84=S*?phiV=s7xIPfRjw4LkLfn%lwo}3 z@AOw~&(>GJ-=2*j>pokE(-v?)&(2ZkzjplsGJkV^lD!;EN8@E5V_)897X@kTJ>A@>1Z;p7yHhxohvra!5xvY zANtmG$;o>}@#a&%Y@jhFmY;x{#irSFMaUL32JEGC0le5`8@VQI4O$M_pW-N}k-! z7mDZE{C)%d@;d&QyxYiF7mvP_tHtqnHRKO+)>OYkP7h}H_Tory)Q#LDmsP*-oOdK| z3ch*x#H6?eotheNMc0Gue@36Vhp*<0t4;YvPEE z@9?}v-YPo(uzd~ZyMB{!W4O<7thVLGsA!F(dgdE_i03`}$jOJ1CC-H$OJ3uH8vji+ zUwM-L_VVg1G<&+fEjfSk!8`iatztKoj2*?&{3`q=PIhUnde#+B7W2oJt^Ur7&FOl0 z`q+nUUG{Igyqe3Whr)Cld)~pnBVD^W9?gepV7!ibym*EGg^M1+zi-W0N2hhsq_yKw zFq?x!>RN8joqoiY-Qm1Qe~=1 z%c}2OV))|w*#Cw0G5oqY9XrVh^POUawpa0OP4_fBTh>>2^1kSvrqexqnEz*mcyGjx zYw0-#pMIB(6WdU$*W~BVE--kRM1V>{s zdyz4!R{cL%v@`v3JwBFx-wvjWVR#hHRt?qn^kMBL+Z+9{cppU5tXBQTRQb5$(69C| z=YIf(-N`fmkM*PMTz(4v@t#(?nRtwKeJFg7qYr)WD!xhC#}{c|{yu{}`d)k%EF;N< zH(h}L0lKEU_F`T2UQYV3`GS62nuCP@dk)XCv)lM)EwbLGV>uoAMRp1-n>tr3s&Az3 zXAkV<0zT`itvg%fUt;`=$HV$G9gXquZ^;ia|K@{TYsD8=(%nw~KT})4v!(0ZY;w9h zp1#Foi(&Y_B|k~qNov)tHNSo6+KaF(CA%%W7s=`U=$PD4efv-v+e@{-49_q#cI$)1 zPh{PW*Vq>K6vyuP=ec${EFa3d|G+bh4s_u+F~S;InEz+5k#W4^L-Z?sH@Z&#g*jML zw5#wOjpjJ`&9lRLVce9iki+Y$|FdD5$>+W|iO!~Lr0cWoJE0#6%LR`2*N4O)8Owj< zhxOm^+t7JOvbRI0-(*|T5y5HAHEjXMIegS#`(<+H-;ULpNWI8=I&PyzT*N=(7KvfG zwwMg(rwM4k!XL&MwKT*iJ_+4{uC21KHB`SbpZ&l__m<#qL{5bMGcgsr>YnZuw9n%`oJ~*B*$B;-aKo2wf$v20kCD;C^{>S7 zg?jHIvztB1gW6uA;|{uhbiTc7)_79u3F-BCp1{9U9Cv}w`#)hW75YcoTzqHq=k=az zcBN-yxiLjNuYpTEOZ_Tq;LFBjf66x_Dz?xi)oK^&9fx zcgvIY`Kl3JL&-A_NOo?hz5$Hu*-3tkx9G)IH6-(%PV_ZdcRC(IZcNrU=;qRYF8s#S z=oinfhLJakjfXk^8}D!S17N!x&3@z`$S2R@?@piGE8JI@P8m(_Z9M9=W;Ev~6ak@tF|8_@LN z$1!9*iI2SK32hVMgRMNhzIxV}hFUZl-D5D_qXKRkQPKW(I*H+SJu8^G}7E9?5`9WWJZy?+g#_z)(`pN`;xs2?G z#GkEEkh>0^tLaepqV{MyqlG8n%pcco-8wq}%_wqmHi%8}GF`jV^94FKM{Cft$o1Le z_vHU3@>JdyV{Lo#f&40V)8F}|d_fE^Xz=?x{QIoq!{B^H4qn9u{jhivF85Pmts?rz z9A!iD&Ht0xuIu+`FYOY=Fh2gjB`HR_PrQq_zM2p-1U87fwP!H$HR`Dhy2dY#MjiGPZROV`Z}M@ zXCYTUrRyI4Z7cp4sXd-67RQ)JY)AK5WS)-iVszHVGk(Z6BI7$Yp3SF|`0O*X!n$aK z{`odcWEL;g`)ye5#)Ie<{a_>+F&{m^k8&^nkNr^RYw^ty@SF?h9b{f@)0d+oVS8Oo zQxl5JHih0jjZ2%V52urFewWTp}VU z@a(T`8eMa3Tf4R$+ti9`{bEhJA13p0dTxjBDDvDBL`RB4=x?pX!CXGnmQe5WE^HF} zXgBzmX&VR2>2&==ZX@%>g=qOW`B#1QEHI2+(Up7@Y<7>9EFk-Ea`oFdj3=A8{v6$F z!L%+JAHX6vqmRh>g$y;W%Bvybb1FI8qPv5=O}+nfjq_LQyuZj_>!Z_$!#DWJ5O#=9 zSZ_>ERA=RMrvF6eq2G`%wnoQC(VN=trBi>+H-mi(*ZyM5$?)uBKgN93dysJ(cy@8# zQ(x0w?2RAZ#QZvKO#g|s>OZuS&*|>MAG_mk!dJ^-_S_*n`;4YpuQw)yIYhBPec^sy zu0*3;!ylbv^E|Nl5nml&oyY%d%2(NDuwyJeVqB^f;al49>?fH7#~RKj+j_A1Ejcoi zU)#BE{ifI&zHs0A4IB9;7x!rBvg*CMRF0PQ__slSllJxLZ4GySx}O)HAJD5=@keYJ zqQ4I3!)xJx72PDU3GLrw?<$+PL>rMg+wpN|Z&m9S+Z&t8EnyrXR$cjjKYCK~7sB?iwxeK@gJI1!>EZqV z2hr@oo-U4qZ$f|X09OZn-G5=06VVQ%V`a_nFp(eYxBIYfRh?(@a$%5b|6`vznTSjA zne|4#C>HQ7zeaQxw~)6Ct$tiwBqnmTJlyf!t~KH3x59ODhR}65o=z~aBYs@_BlZu# zewX-krgyJel$>NgUEYoN{P>O9>gVfc(tj}erueVqw`PtngmEo&gW2~0`2*qKiSE;F zC!v$?@j7UYZ^Shqcw6%8 zZ|o4K>?mWKdw_IjIv#UvINyZ%|4+_G!t@8aQ{~}MaY)b~C#I?6)9|UwiFLrJv+?T- z^1smzOSO*PY7cpc*R!cm+w)uac@Dc~k*S|#gV9fh<8J#c*>(v#ZW2r5OX%;#RgRmm ze*^9OS+pm6M>g$+e0H8w{cHZ` z%k@I6=eXX+`F&_!MAO{)7vyix{`=I~@I3YgIzoLnriPjwekV7rp}h|NzsQ>a?*x6L z3pp+2*Dl7m;qY}Ma|~>c(({A%kLyfY^wcHGEY)3_0Pim zzxWQ$_1Mg>$x+L#y=a#oiF=*6J)EQI+?sDcthXK|Prf8)IbBDQE2c>^*QYr@hh1Wj z@2-~1yT~|No?0ib_-sw~okHGa^z4uBK(@k`?y7c~|A%|n{BbcD3E$1Gze(Qyu8ia4dmqE%c!_ zX0E-(hrhGqR;Hx6 zWwgh`Jl{3Bki5z6FVWoN_-Q!4)348RejXpy@Vr6ShvuT!*uTsV`c^*F=6*i9neINY z%7O6wCud*zrafEpSo>LY?MgR1=?Shq4co0~x2{$Hi4~rCC)>g@hpf?L4y*a5g-p4Y zi$Ok1-o5KQIMv)#=<^kmDW9f+< z=i&R`&FOw!EeW)X`EVKizu@1ItPSOV3%VP~xQM-zwf{*k?D-_ev-$f3Hb07oPvYn7 zZ-LRjTi(FU#fWD#9ExaYdpIKQ=={L1n3Xw1#x zJ?Sv+7_oL%!S37y}trz5^y@ps`@*kbeG_zrCk^63wB9*S1~ z%iWKpP2d|2+kU|&@|w!0h@bW$e~>=Vl+I6rUA5{r7n0l23?u7WemM#j{Vs208-Z?@ zTD2BiguG%$Zv8UfQa}9Ic>X27jzzN@J@erBml*c0tA4XO-A0?-DPnlnaeO#=FWJN( ztRKeTqwC?Dfst@cBu9>B*NO27u>35S)`EXAdw*=GzD3KvB#V#AVE0w7^(X5?vaS@P z8y}0JIkl<=B<=XNJG(A&em)!9k~^PoPT}7>#A-3VQ0ulPV?4ZKmJFct3^ez<){FgD zllu_f(b~Fuui-(oeaN%^AJ(mk$>O;aT;9=0Q#SBdIn?#vY~As`#5dcs$$U4K@8uA3 zTF}!EAA94j=FqT53+Uyekb}i+d{2qZCiY{*^BpqPfb4#9tan6jpbpuiMfo?pafOceDK(G@sP0W7ztL-=6pzxz2Cd8hFlj-Fq$h#Wm|_ z+Q+hwZP6KUoGkW_lJ~3FPC=t@XIwEDi_KlQ>;a)^c zb2w6Sq}(`?{R$@?d04~VTP(ImYYq@!QSZGI{pn}6jDhceXm@rVe7z_7)0cRM3hfYM z_d zM26og$Y0QYO8x5ZPdB3TVY=6+vl~0bp;()pom?NvZ~9nvF`Q~^M0fEfUu|gr6Zyxm zp&8#_$=?(CK!4BOpQZ9EnQEJ+e!oM1E3q{0gtg3gAG|A^hZ@j_kM+IOx3-TH}Zi_zXyMpZd@Ug9WckwaT)u-^^0=d}6 zo#59;QvEDBjO@Q$Uqt5&*E_pDj}1@5vjzWs=zJ@2nq^#5Px2dFyQd+FpHOdh;^VH~ zliLNJ5VJ;d{!;#2L*8sg_Vw&NTfMs$j;-+x)3#Wf`k%e2&0H*L%~!^r^iw)UIaZs( zJTG6vaXmbLx__HT_6=k|1gE;3XZ)~_J{Wv%97)xq%zU93MaTNC@pEjQGt7m{_vuT> zUl-=}*%IP7nEf}SA1;5#vGH7dm&5Z8UOp@@a7~=Ee)Ou%VT~f|w!C`ATK15O%V8Z3 z-_2-VA%8!*^qugnPI&>^-^ptVXCwR9$>ihY##(ic5Kpk*mwxp!=YxEJes&9a>lu^i z%I_7=I{Mk3uE{?0xNHO+uhmt*LzK^|H|IkC6Wt~M^GB#@`N3pe%AX&z@fCW<%KO%I zy~Ot2&>loy@OwY@HNjWoqZ9GYww+{q9_=Ua-)noBuddUFPN)C6CFU!5>+=O+Z?xd{4G4=(Y7V;9D0wXuX(MySIORH!|ilz?z%h)zcUx{ zUpkYlk$f|P4gaVE(_j*Ve7d}QTkJ#aX+q9i{+xi$I2bRWC-_mk@_*S{(6=?4f8~oa z$ua-S*Q946zg~oAUcGsY_5{o59@?j*ZCO1jZJ&mmT+0+x8-J9=#EHk81l= zyh45WTHD_E>e%)O8M9&iKYD-AzJ%QC(d-WMI5Zd5Rqq4FH^?#h8rB)pS&rp>_7poE zXZz7lelEYFQ`pl~JD+6l>OcQ-K3B{OcKpD;Ze*HYl=@}#7GItU+YR*n2YpLe=uX}U zK8Izgb3V!S>?JIbi8_Rd6d$`!u(7p$kc*nP}Wu%SHLe0yT+8gJ>dSbCHyr;AKUdL)j^s`uA zLgtrj+ZF$b>^~f)vD$tj)3=53Ogg$QcTb_e?}F|ydRw_}{E7#{Ihw4m#OYzOzGFjE za_{ilx;YGAHi(tJXA#Jg6_nlZl>c;sgE13BN)`z`rB#qnW!k3_o>eBJ}i*GkOK z(JsfcAmp55{V3ZKPw?5jb-t@7b1S}svs(YZ41XdnXp`^x=mlf6S{UCYU&8x&y9b^0 zZAgb0rS7}ZG0Rr}wy5zw=ZowRdJZP9G0f)wQET`8=h7z!i=WxEG3@$Ev2%U(4pQ}9 z+ui6_s>SMBe0)uPg>5~$|Dbm$c_C-J+8Xm;BYF?y&%x@7+%MmUS$$2;hx2xN*bq&J zQ+~wL9skW!O=&z~9pH(&03DEv;G?+G){BjW$?yLEvuUyRg+_!oIRmi&|0|9M^YyTSRx_`V_QHhf>w zzc*i*|7RiYA!g#4+>EEUnluyNFgn*V_ud`f*>qn>XGi|3$9pzj&z1AVc;HNSfcrZ8 z?dd-f?I1MwvUioW;K^w2p_jbE?;4hi@b2rn-~TE3wY(SB{m``G*L$pS@>jt>(b|05 z7i~mFdp0!3&)=zj8nvOv{3G0#Ws`e)4+PJGT9oopI-32rko%(8b+DfS*PE{IPe-l}Y_FKfP6M9$+_n5JM75^nCzNNnvJsZ%EKmJL6 zT*5DFkUy9V_pVhGZ{ybWER zF=n9UF#Yj z#p+e~MmE`s{-4M_g^!x@`BnJdC0jm}ACukBbvasg)?ZiBv!P@8m&(1c1{UUB@c=Rp zpm!~D!&vt*zray!P_vc__h58;v3qOrej1kJ(8{s!oVAn_VXmJvg|(BoUEq8qzBzEO zK!39H266eoxq6VD0AJi756~}i?HIKBX(ortF?4Q6u6#)3SE$SJ+s@a6N1x5C`IXnh zGK*gKtfd;0w>AG4&*XBO_oHI9DE~rD-kQt?arz!#DmORdhYRs8)yBT?Od@^4vH3$f zpI;i2Ii4S<;|ueaR&+iN=WBjn_HNgvkZoQbo_FWhlGh8CA@H5a?|hNH#rMPQ|EmtI zAYaaeZ-+wOWIKsRGq!Ew{1|p#K*n@3rlFmLpAX`3aJ*sb!lsM(lYeq`GZDw)b^JRT zo2~zsJImom;XFwkZbBp0@xHL_6zH5!L;GJoR!6h7>3fcz{_MWlarcI5{VhG1&fVyE zhCTh@o&oP>j*n%}9rSm^!}eGnCijtV3{IEA=3XYcjn5`(`@KQUB6CAJcSM)TH8nB) zln-8_PoF3@WY>GzLf>ag@dN$=S{WVycN!WHV&QTcy0Y> zd$C(VmNlSwp#9tG)eJVA>bmD4*{#7wcxKzwjX2W&4?J=uyk8Ixq<64wE!fuQgA)F^ z_MOSyf!^7Cumjyw)#(v*%iriObzvG^i6?u!v5Z_{^-2D^&@8~>~ zZzrMsirn?#ptJZ@n;MhC9^Si%e%1a8Io<;ZbD8KDw(JUFe1m_GjZwcLES9a%@ z4s7Vd2kJ{$M=rmIQC|%2PgeIdC)BFnR4IqiqwYqxsoP`e*v@!+o!WICGDL-`e* zNaiu{kKvOq)O-D;c$xfGWE@Vf{#|?yQp&L#9Oc;$R1`-fh}&KmK}iiVCQ$h-hAw-4prk@!L!Ta)ns?4#wMef}6< zT~5|{tM^GqF&|9cjdb^d9j5dpwp^+Ga#-e&^BMi)*_zQEV${JnX>J$hgUK#*+=l-N zJg3=ygyjj>hS71U`RZ`;7P3V?Wk;glke;*2li%@tzF3DJ#UQMmM?2VtkwIr^eo)*c zHan1G4KB6*oatxr9qjyv%#+~@<8h}tea`WTd>wLSC>zdjPEWE1f9%Oedy^UJ>W<>w z2+b%o<7}axxrd4`)82s|HYDBH@`2;E*|{};ErMkwT72o(eD=NL$K-Pu6AyOW4qZ$4 zw=I1COK$wAIL{(?XY~A^JzEr+Wzxcbtu`P`5 z^~;swuxFoYzFPb&7ej8G%J(D4X$8;su3trNC;UzLGhp~jtooCGzUyy@;hT=-L2BHM z?j-ke=X3Zr#CRqwzl+JhhU)k5!}ki&Wn>-0A0=E}$eSY%pWxTwaP3FVj_BVb>s9v! z0-erVQne=UsUn9{aW~c;m2?IbOioThnnNL5=J&hrE|44ydN0WAaZ#b z|66~a1HNES>piO9g3S0MxfowJ{(4sX*?1PH0T1DMi;eOl zn#N!D<#DhMs#X8B6VGJd?exsSdr8fDeo?*0nGA5ev3DFU<)^tROtOw<+o#T3 z!u)`5(vRpJmH*?PIVw!~czx>U*u6juebS8W8QOMDc`Q&TQm$jE_Oj4t#3;*id>3Xp2!{5^NL&W<3*kWFgtP+Fe{BWvk&DrrR{U?z<;gO#T)T+!iQa*2du`gAWrcpYSQ) zr*9fpL#)n0{}x$m!?rP`6&%X5E z-Q{_7^bz}w(Z9-;dj0Vb`;~k<72Q$dvLB!5_fbDG)ayiC!~7#&i*Dm)yook-ATuw> z)^;u)MCr4^KLfog(+h2ROym|*FKN^1@vhO5YjVNw% z?e;qFu7%i>r$3ZzjdmelytC74-U$_pulSkFXrSxA^TBWA9EA5kGD3gQcZx01%8$G| zOdV>~f4aoKsw;e(ysUktHNpa}Z^f$#IbUh3HKfE?pV^_?yMmHX0pH#xh@r5bzM<7InT|Bv5wKAqjm(R@r!ZCV~mf3~h@Z-VbJ ze)x{QJ@}<5`j+&~t9kFl_Oo10@jXHIc5FHp|6zP{75#sZG4rt~Js*$vqQf(wupXKm z?)Zy(b5%GG#Y1P}Sy6eLYxBw5fSzV}E|pWph$4(rU&yO3(YKQWVi)Fl`2x5{+4O-z zd-V+X2L5Q}d^VX|l6@jPJsqpN@gTGb9euPP!ybLM2;;)@_WQ%8J|vrybDVSiARn_j zrfEdx@$R+vtQnh@xz}a)-mZVrTTO+tBl|OL4SZ<+AMP)Toz%}G>AAk4`W_{|i>xX7 zp+26x2*V}l`98VOUjHhtpkt=veb6_8NgjrKsJt_pT-!={b}jpd*H%B13%PY5pUIIz z-;eIJe-`aT*yVNjo+28=?t$#SOuIgoJO%4e{%u3g@!~b5q53VaybEks@Y7H-cGV_N z)7kK}muK_kPgmELiQ{?Vus&Unqi+n`>9)sVRR_~9j!&iUQCKtE|Fd-G@n1~)AHYXx z5g{o_7)h2&*_Zl$j6Gz@?%Kw_?__66drAoHO{Gw%R9e5^@7VX9i|b-6AzaIKaqqR< z-}6krKjtwr=e(EKdpYlOX8FwLTy*j{^^7TM#g?uxj6u5{UwsC{5Wb;5yx$b(?sKp; z-b`%Y)dxfLDZT6A-(H+%!1V!{&1|2u;h*~dvE2HpuDVATtA)kd*6`~k)%)Cqu^R3} zg?F^#k$Bw8D#r6k@WGuhTA$=AwTE{B*qnxEBpqS-UB5Xoti?-lDnCMV5`S(erq(~1 zoQ@yj^Z$@{5Ddri(FnF5#TGRoS652+*NZz=`j!D(uC7(@X@qy4!kQtxe;l5vmVRp% zYIJNYm3$QLcPFlObK^VIrZ8{C8>3ef%irm7&6CTi_;T3HCDm^?LOc(#l(43&{t(8%}g|XMfv3yK#BeOrd82P>?Lo#V=+&vHmsr0JANu%lw)0u^3f()=%U|j4_NU=J zAMb4PV*XsnZa&C@Pp$t`=fx3dHgo^~B3Rys^HlPzqoOU;LA1%ubas~8;+gal+jYtA zK;{rS2h}{!!FL4zUXAaX-qmyad?B70bf_uOOgcQDEH<*|kMImT>C0E2!+H*%23uFw zd{+XtnPTxZ3_r=a2KKC!bMw&}b44G%+)e+U=KX^0JJ9h@eBzj?Q+X$J=8@!rde45z zJd1qybj!i$?zgQZKh)qW?2m`-eDbWDiXn1Or%z1B14inu5GA$%hDiXLd6vRNAh{h>Z);U2(md{|Fj zgytByuO-8M=i)|d+l6$wMvGe3s{bF3Lw*iIvo8M5^oeJ9M<-c8hkQ?rtFZo0UlIfJ zOL816+cda`ZhM4`ljsOIhruSdBYrQ<&*eOr)^q+Qniu5yd*TGOmPmM}FI!|F@%8UZqR4MflHy=~waSh;A{OUi8g`;X(d6 zna)t(R%n~iyLt~WnJ90j$c^@NjU{jQ#%c|ki$xj3RMLARJ6<$*ng7z0j7iwTb#M9{ z{H^KiO76yVUCxdk{IP*&hIeW2ru|hi7xN?gqj6+~aj*}%&Ddq0Nv%Oj>;EiV13U=# zM(i*@gy#z3osBq#<7xgf$7E~s!zXBVB8%^{-f-OuOJ}<7(FSv|r}oLVE^v$|{{hEC zwciB`JF9QjwW9koei&QtodVCQjQ#v^v}f|uCjl3kd)n_t_aCtHP5Bl7J;5Je`u!+- z3wa)%Nrd|o`7h+`tA8^(;40-&a*M5}yql}-1^b20Ung^lc!zP{N$#&*b8S;&w|XIe zlaJXuimv_OJ%&F9*H!<+mds-RO1^u5o(||n!56R{#TRqgAzwlr%h$p;U;9RALcUGq z`>p66CC2=o8Q0-`ll(sT_jf+B(R-O_{(}CpdTUHP^k;W8RQKibd3-m7yw}L(gZL2o z9pHswxadM7^m>hbD-0ZayOy6M*uI1?NVjt#~Fy^j=zqw<%mv)y+)0j0ec0%lDQ&{f#&~iTeILNGF*!%+`@uTm5ba~hX&GeVzjfuB-nY2;YK47~fF#Y$gW%)Je96Ysv6FXSf!P-+;Z; z?|xadzrFWp^UKLHKI8rMPiAlMVOPiUJDX4TiR>7wP5za8+nz-)|Ds;3cu|qchlOM=jMCRz1nd*y0!gIi#+d?=$tccvf8;hOUu}#q^y^ zUk|w}|D(ZpKH=B9t(i`w#}>ZFl^tY%3SH*vsI$2={C*^Z|Atz%4vEg!&z8bCNabmA zEWa)wyEQ*_tGQ>SZw`IdW9ffj;J>t4O}yax8NS!}Z9lrbj}qQ_PM&7ddF069=t29b z=nlSn)rf{GnvH|5tWs^c`M4XJgDag_vcz^XJIglOdC#A|$}=9YY^{+4)pI1lx~m<;nn^ ziu?KG6c}FBcPH7}V(Mowa#~ zGV6?IjCc%&t1J3F`G_CFH`=np( ziZ8>f_NTC<|G~!(>EUY8d-ldg`fx3Zt~Wl+$KgJBaX;JC%J94-b6pq>qQ5!4?mdKY z7uM^4PMnVmYtQDif`0g)c-P_>W)OA*{YZ;o(e6gJV zE9%z#FXE}SYOLkv-pqI6;vRMGUBY}G{Ki+Yh^_0`dhp+cV*UcTH?iX@G(E+6w6-(! z?}k?Vqht94w&)yj`3Gq;4$gyY z8`#|ckDAa?56}CsZ6^*FvBmm7d&KjgIdJe*(u~Yia2Pky!Dy^^@=oN9qJO;olXQgb zNd4rLVJz-!?mPtFR^*T1rw8EUk8rOf7K?OG?FZrwc&C$h0iVB3_m|?lk^Z0P9m;=~ z>U#+uIhsA$SdHO)7ua_+&Q7OuXKVJ5cdhxpC%&h|bSrWX!yB$&wx+xFn)#;;_?FjK z|CbP%r{aaR>iz#{6`k`Mysz(>*#!G3#>n+@du&HQ#7> zyrVaJWq5Ns6v*`SRpV?WclVxx8H)uN$4;!+vcphJ4L-p*b zE4$UvWDGekvY`>aFU z*^q65?qM|TYHPj=V=fB$TfMV&nSF>8JL73E?&f@4COh(nzEDrY>Khl2j0h>Pvx@>jMw(Coz5T6yWjacT}R>Vr)@b~Ze#27eY{i0 zpG)DNY5eUD)8TaW_g}!b=pPP`SZ5#gsovEHYuz-1f3m&?bf?j`f-RwDsh!bPV)(lD z?l6pm-MeIgH=E)34RW@le;-(lrEq;-ru6l6-k;Cf;IWp?X1hnTjk@LDPkFg^>;G`? zDBL>?d0)**-}mu-b@on_pS$7<@!2=Tiyg1kTsOplHDpfeVd=42eNZxlP^YM2w#Phqqk&)!!17Rcka z+Kz)s?v$~8FEXZ}y`S&ygukb?wtA6Hg5gFwrfM_SmhvgNk#EjMH(Ja#;s0&$?ZsCQ zHB@WEcw@4Izb1Ls^$r{VFjlr@Psl&($wHluzbEGac|8m5BXrM)?=f;uA!~ozR@%so zkK*%p>3WIZYIv`f*MsZbXCzB}(vfKSD(#N{ukiI}|1xa{>N7UW2JKrl`u?hQ-B$Y5 zgG4QkZX@Fic)w_H4dMJ+ahr%ILk9+={=i{chSCS|AN?k${)Lu_XPVR zavp-uf7oWNlIs8Fb2(DD_Ym*LFGKJRheN(bL)bmL=0BUvWlzDWhJHa{NfmvmSQxL%IolIUKK|*_wU9p3kgj&ar=4+fX&e`#WJBn$P63TgbVX zzV-3Xg8d?~I1~Rbb>92q|J%JAauYwCz|JS(T~3F4n`yvkeVUuAvZZ|bIlI)a>@Ylk zC+By%)c5N5!jGX@1*}hJ?4G}IQT8Igy=MXY>BAdc zWxtw?P_xvFd;}SLlRwb0^=>-Kdq{P)HUBw&9oa`}@5aWX8a#)PJH~j*zHBBxSQmxy zS^g?^>%rL)-)&@{OpmdhcX0d+JkP*$uw44eUM-KpT3`VhYp7(Qy)hB;IkuH!>6nQo zqr18F*vWX_L$4m>6VS+kT#bvux>rn-;ppIr=JUaxFkVfkT9j>$cA+|SyzOQfFSAy@ zUi$%PS2?%-55LWcw^A$W(HaX`$o*;BnzAL*wsja|@U*AnLG}!mgNGS6H(S$97CU*6 zjmNW@yg!59u4s&}Vle*0Yt?V9(}Q5WimpX$GzWzL|H=+!-)r;*|LzNq98dW+fg{|L zjm=f@rTE`u^J;v}(Cr3KBYoG@{HB}EQ;f5I@=hJhPe#M<;d-^0!8ZQNcfx-TOnh40 zr~Z;tgm-Ylm_HcpF0h_O{^?}DMXsEyo;%!AvmSA7{hF@p_~@0@|GkCp29%@e`3t$m zdM-YR7)P(7X$P}dr+?FLUeD!rB<^`GMqAWXV?4aakTjLkv)MkLezFtKO{! zjEF3?Bb%+yyb*pEknitUZ4AE!$os2B+p~XjHcn=TxTgofu$8tG*>?xoebG<0i9!5; z*nV2`JsQ61&89D5;`4Hc2Jgzi(;WW+@_DhCZ-`!O615?GBRJoK+#`+2EyevtU~yE+I!ZlzZ0pNtlaT+xQ`tpXFBCK6Bnchgy|1k>lUtJG$nbd06FN zX`JOSl=Ij(lH3t|;ALDI-Zg*Htvkic!J^niU!^m31p4S6L z=j)-pg^qqOzFt>-M&|dbC;48RI+2XRn`@uod<;CNv*i@~oUYTwxf_ zHtPiYUi3Z;^K>-b@E$>y8kXH<+>C2bL&&#|EoRYc&QH!Vw!I^rHi)NnMgD4?`xs;l zX8&~kZ;Q*`&Al!mJJKP02RD7V;tg z!Z>P1-x!z|8ZX1p%hPyFt$Kf|98A{T=o-lGize9rqBuNBk2;i@lanuP`@$4(HK~b- ze0ZN6<2fH{n*i$!7^ahTj5y7+4{<#kmjB>=n!b+u#VYxo{1a{C_&T<~5>|6kw2S{Y z+rc&AygK*z#N$A;t;krb<~bs|Nj5sl9KAI~{v2N#i{YKmLXIbDM)*c+eigfn%XFGC z-IIQPO1FX)P3n3k-T_ac-?P7PkF-2=Z)@9?)q6+bdMjSRmN&?JUCz$r!$-t}j^qh8 z$?3|jP>bYeF3+-$Yu1hM+tLBX;g{@k&pTDqQe!2oV^XopLQOk{j1_phvCkSSxrH`|{rka{Jgn%kE~z(x?pd%*VebR-W`OMydX9zFxXD++WIjswn=$pzjYD9y3oIOV@RB`89es<%?6{F-D`Abhc1Cn>rtY=N0tF7^5%Z9n43q z(VwcH{n==3n~~k2PxVd4a4$Dpdl$RWDSuKiDdkwSwsYepI^F&YZ4aqQd>*$T-}Oa! zP8*-6PyVELuy=bjN8($~KX<|L599m@bp7?$Yd03t9gX3QYSnu*^jsY2czT2 zewUn3ORWEsbLl%7rjBHWSaxq%^KL{h^be3Zo_*Hlk=R71Y1_$r%@6V04rKU^W4PW3 z_oDKQ-Vbf+VRkdVQS7lUjE6biMc&*_&I#Hk^Edxwr=aaE4^E)p*i2nRW&Bf4gL^G8 zl^11`#5)7{;ZLrnM?OT`)x=QWayVb&&r{4r|0K`4Da0V1L!PmjtJU# zxrtnuA{O`7++&5|EciDe<1uu%$T4ex;y-dn9v9~G6#is9`45tL0Gg}l{*r$V6^nq+ zyDx=tQ2no8inceopQ4rj;hoE9PuB&L`T8Dh1L;5KRfM@{ubSFvE*!+}gsvmWls`$TmW*f9{pi}V$=W2$tI75Jfv$Li z9o_ML#s1;2mSS)j|CQu)asC~Bo#T72c+wjg^c(xS1Xy4Af-iq8e;JJ_91nr(=9%$Yk-p6w^zP%r=Hf(Pz zUq|w*8dlAjpYZ<}@*lSSz!wv>?~l%S5ASFtC#yx~n#>poHNOm4dph6A{t!N}j!5}4 zBO^Cnl1b|P7ch!xakBo;8>;uMlIHmR)+xL@S<1uobhy_e&)PV8QOtjr=ex1%BiCf= zRQ9!VYwT=yZFh>}-DH1Pzvf!|)gXg>{)<+keT#l$GMc?6?@IX-jUoG9vZv}F%}?L( z%lgLA-tatV6Vqt4n4V9TXFuuo#^G>zvdFpo$v3381D$*ob#y&(su=z$x4V!x4ZoTn z_p8Y}ye-knm+)PXtUKOZ&IEj$@SSI@@tzecu8p&w z*mpRY--^N4b=Cg}h2Qw63)wf)zOU!jAGnVOM=^m-XNtiJ?F0C}AKu5w-VEl>_F=4S zLH5?Rf%J3}XL(r8CVw!D=EM9NxCi3@Tg~-^t*d)?owS8|ejfWSXY&Ve%Y#(B%M0Pq zR{Tzve2$0EyNH~3v^~UzzZx6l#bd-!y)PD%)gJBk_*bi^>(jp$x+}%y*FM!fy;yB7 zE{5$2eFu~GKePw1VHo^B;+4mtHivaam=D7|6d!gLXDb%Ufhb? z8@(ri_GlQ^K{rJV?tpcFoBC0nQfCbZ%lCX0^6Ck8!WW5qw2zoQ$^Yxsto6|}(H7#U z&WGHInmadt=Pl@Z9*&XptS0Ys?Ms$b|EW~IjJM9V!WL@8uJry`tNyz(zgIl?J6CVB z3FH<0rEZ1i>cvpw=NUTXeY6dpZOCSSwsV8~-{KPS!K=o`W47DL{+uma$fajjR<$%d zQ;YiY!(h17q%aSJ_Y;F{@s@mItcAI{xLRE}gU{Cn9_vzo%cDwUO+4wd(HK@GD_K^QwJw&dIA=f$~>#eVbuHy>dPbBLGG<=ux zPrNQ)-Nz4Y>2s~0x%ZO|(SD-6^=-jdabI>`i*5j#ZRmQQAL&Z^kgFc#3;Ba>ne~5a zZcHDgJLK#!_&TCX`R!}6u4J?KPV-@W_$6PBz#}&CF!qe&n`gD{t>(;XV5fNZ5#w3( z9bVaOI(H%;lR8*>omb_%^J$@<)R0HP5T9Kjx9SA33MO^C6p`v2CP3)cAkl zI|;V2<}Y!L&bKWlb4RiKVVUc4I77|p!{!ZOr7sRXyieOS^2H?CK$~%%O@;pt^fUCE zJF}tmU4bu*LAF-!N9~2CjsA)7ggT+#Ma$t(lhQDz-d*nd0FD>n4Qsw7{ICR`4mR@3 z`FKlxE7>Vm!>wTk{XC(N^#?8SGwnxRNtG4S339JR1$2FZ^=TrC3lOi{P8^hT|nN8 z^wSaQc+|e;cMEWB$_Lh&`E>LLkim|8Fs!$jqp#B^4$)1<%2M{92k+6&R~uJyE36R< zK8^R3+dZ6{Bhnjd)$gfGw&nlj<2G{O3^+1=Rioqm=scBfV>}rLoArOFZJ8V{UKOkB z&^OX^D|yY(8~5o7JOdrCgU2`0(^>5K9*zy!vp=2F96to}CUmNc>5g^P?*}7evuKnD z|3=5h<*$5qPJ`cv(ASlYA+YOD`jIWxMHt`f;XMeQ`VsE^huB8e|1p0RU-N&WO>EL9 zVftpd>nlEbiXE%O^fLUb$a)yfD(&mi(ZxKoP#Zs%uaflxz6a{7-(7{V7B`C7hWHL+ z+aBmLvfrf3ar&})e|&?vr_Y-Cq$j-cBs(2#7rb-m`K-5VYU|63*`+;o|3A!w$+q-w zz`wHTdo=Em_{CBGTa=F}6PLHN@ zitV3kayMyi{r?-g)q(gL@{Rj!2iPxcaQ)j*eT%MqP|f*;TsB8BTo1ysw`15N>&h@k zrlO{aS!IRQ(&6B&!iJYHU!@00KAqg4HgwnaCJ5AL<{-G$}{w68&bFeg^+0+>6P5*vmKWoemVz)XKu74tNOTF_J-HrA^Yp`%1>wSKE zQoAt{=9bhoP5v}Jf3klBY#TejlCR`YLT*k+x(UC`XUiZu+Oqv2_8g|pUJA=W{4okG zJJLz$M>*dP-9_}5`VT}eFVkJ%xJCb7bo6G=4tNH@sosWXc*P;)<@~*t>)A{`b%3v_ zzEB^Zww5>ru4~~{Q{v(H*Tee)n)l)Ql;5mdk_{Wz{AOH^CHJ8VKHaw_uf=kl99W1} zUS{p!>x4GUQF1-9&WisE{}{R+v;E56QFYb(&EdN@VVzT)CBKcoXj8t+8>%@Y~p;5(2RcT^&->W1E%Tqes@6U#(Wpn_5ans2;YYI+ro7gy3LJ+*V(WD zmY&W%cMI=PW{05ti9g<~tL`gjo5L(N(MbEzYQO8naGyQ=o;O^tCI#NBV7-DrA7dN) z3OSaC`DF{|+ro1pA0+S^BVo=@huWWn?q0+`@7uPGu`3T1E z`DuH$(pO%LUTnj9G#yf_>TC7AF+M40sbhoLH;XS{K?_rP0Q#@QJkd51?dR+?rqi>@WLG>Mp3R*fWc$gS*dG5eVmy#7;uVvh zvM;Pp;)~UXR`4EfTc5niY#gh-589>tTVtObNxr97?I~Zu+uZT`=n^s;`G4)2YjHX^ z;$QbJ((~wh#P)Bz>Ow+({&wA(_cZWc@T}S z_uF(f+^ugqovqk%XhXH83itoBr|8&+tZne@Mn^w1;#i19c$Y1DA>=Cm-Hsp5w3BhA zX2e(1BbTzn(G9Pw{`W0<33hX7zA^r-8+}6t_RY|UQF!+&{1Mm~pmKRhG!^JkjVy$^Xk>77abTh@L%zm&TVYNUN|08Wpi?Z_dZ_@ob@OH-1{5-&Vt948-@^Cebn1-uO$6wQ3$y zpHn$mOr~cvY*#zqsmA~EZ4Wl%O`GvG{`7gd+=Fix$%oeHt&8J>y%!U!Q*-J4j9l?d zAJX1OOeWB^h(G=X!vkc>@3Lk)M_a7#EI$8&O&##z&u(x1-LM}E@v@rSuO%o~y1ieI-M zuK#5iBXw^in$5&wKDnLry+Q9-^lP)XH9aHf8DgB?R;%vW<>6Z9XZD=kP_1p^FXZi7`-OKpw%#Iss|{370a zYP+e`VB|O<*;yi(mA$mT#A6KY`3$toa{w z+*QBze=$h@uq7GM;9GdM3;6d+IE?Yw{r_x~wnzD3x$$xt{8QC&a!Nee0I@n9@8e?J zLM;omeqzo260w;|M+@(s{lF*dX}bwN7!rE(8v_p=Z_u&69QeJq=KjLY?1L@28m5Qv zUr4@~CkL@hT}j80H4AM!K0DL?05tRQ^y1&9Bs6EzzksZh z$@$LOzB?UkPP_x3?t=DU*gtXJO0E}TwZTg3QF8BOKi%P4F?~yHcGNb`nEBXsW_R)* zt;sz(817@kT71a%CG@YvWBs4)MF0L|yN44Gg0UBykEG{#c>0;M?n3{N@!{TGvKW>p zwBJl_U-tbS#xdS0YDPzhLl1dzEm^DaAB^rU_H?4tIE@FgVI#KR4EGLldVszom{_MtA7u2`;fCK*|*Zq-tuIa$G|mR`!Kn`w(-@e zR(->$cmkIB?)M%@?v3ah$Q!P&iQ{K$>I&Ip6wBe3gIQa${!Y)`@SKnKI9QI5W5>zS z$Jr3_brZau8>{CT@o@I^W%oOE)xG;-Df<68z6!5+u}DF1>f~c@H{5%%6nCHDo#pmN|TTHn}^{(FN_@M6QO;6iQ`s7~q&0qI?!nb4ch3s#_uGVCSdTPxUep?=} zB)#xh|EFKzIgqUXlC7>)b3kX9A7kHewtuI7?58T&**u^w7{|wxXN;5+#Ce!D_C)eJfw}yUd^Iut)q99)cy4|v`|=H6W?!%IUEUer zHMQz}=5Svl-;V5I&ga{|3;zsz<0Btx@0n%tGJT84U7zl=`1T9AxQY*culIcr<8gqz z6wiDztn2XIC^Fd@?S8tMKb?({wtcKbf1PjX$lI;p zKi_e8JV(+o*6{=K=_S~g!3#X9E%`w#K`6ZWg&d&e54DSywUa}M9P zF@8G3?;0y!Aot$Xm!KO7`+ekw98LIQPrjTEqi=quJ+&Xrjy>>yC|4hc7fo?3ERM55 z^nIybH+oN`5nUHDKP6`$Ie#BKaw~p`4M)(+KhgNzy%%CUk8r*i%^_%P#n=4&wipb= ze=gtcL{=+43v17lVCtbJPO)|fxqh6Q_pvn~&IV4jhARf5W?&UB*wi|Ci2Vdtd#V!uNcAwXV&YYlA!g zmd=y-K2^hvhs2zcOs99Au{g~)6&@FFxf7n(mh1D~D!#6vzsK=-ZI3!Wq^|lug;M^d z?dhCmKOIgvR9?-N?zV|)U>Gxai~q92SW48dXa}Vp5 zwBJwXQTmMEVhKB%kSDfD2YTdFbSVr$j`e@}rS{+Xd=X4H8Hd5g54+df6%GF6Nxp(B znhDfxcd@<^|6cNsuae_oELK+EIjYwG+wtwjFwMl%f)Crm&<-xw z_emSL?xOGSz2bbXv9pNXLt*&E<{oOnhhd!(eOp)6oN^}qU&Ts|O^u^Wu7r1#3pFJ+ zKEiK=OFD~Ct3F*>{XcxR4f=K2sxAbcVmGvGju*R@-kyPw+ix`>s~K?~-*^w-%BiM$vm>W6hMz z!Yl8S6WMW%Z*OksxG&zA9RuioVGX|Yb-4&zcsD-S8gB`2OXsKQU!VNP;D$Bdpt1Tk zNOBaP`d`!X0qxtfwT>iPOcv(_9M~Ern^1*GvGf0ExN3g*tCN4C-Ias=^^+=vGp$UbM2>~ z`wMx-d-#q^d8#_povh(_FJ#x&K^EH2*uUPIe2V%Tk4wHOx;HGH%iKJe?!>@>f23O5aQL8=LvOde6NZtLuuSq~|XE@Rb*^C9MD5 zKPZ;Kwt!t>e0Gtq*YKG&Ntn~(*Wnz4wjQ>%w4dnQc!Nh80&dkTK@l!l!@yiE}t^ZSXpt{!0VNcFx+>_Kd0*+x-SiQ#OFQdYH87%%!^@YVa(iZf2Z2o z!1j}2`LeG1zo8`7K39y!v3Fnb74xily)njb1K~3U((}pfp??Sad-YG?!!z)0N!KQ1 zJ>1}31@vl2KF*q{Io$u?&!O6PhR^yx+`o?7$k9J*?&qUhN=Kw^Ej02W>L@=yay}b< zH+Zc7Lr%u~(jT#91-yKkZjXKe`;4b@t9tK)$(NXa?qt_6a$g|pB0BEX2Ungr-^u2> zFy`O5mp1;3&WHClGT2iNehv1Kcs(a@fc43Bk;Xz%jv3)rF2b(RXpapY3BhHrM7ESMYnZ4PEL;JQ4oZ z>^s!@f9=|u-*&ZkJ{iwg82dC-ziTgz?bLbn53?$xp5r6o0#k?FVAJu zj`SFp>AjAFo>$N;X{i2FIdT6#`WbyEHh=2+;b^*FMe|RwzRGb}XZ==Jt^X5sFE$tC zTf))b{sA?qkuCg}ihDec+)v4>v+c>g5dV=d?ZQv$T6~T;ua>JT*sv4%w>n>j_qKY^ zv-$Q{bAJQ7$WPSv=m2^rqkRtcr|=(5e(=!&=11?bN5jaS$EJR;9|CK4xu*Ul1K~T@ z`EG1fTT(SMJ`bMo-hli6@d&bxV)ys#G)Cit={UAlefu_im!!OuUcQcl|8qEIpnr(2 z@7TOYo#!84!tWoWj_h2iUyUw3 zo6IhU_dwfQd~5w*o(z}T7m*iz#D=qKe&^30Uprqx&QdZzlIM@uAFBUHb?9+p{|Wse zw-2{{qy0pFJ%!!v=nu!^$$ZT`wZv~lm$9J(nw`ltE=s{d@fIC%a?4_ zmbRmJivHPfj^vxG@O7<4k#%_9f?Ro=^JRRyy?iXU#yJ*^`f+q5CMf!hN9-U-wbd&9&_)e)p5tlKpri zehc>q!|zAp!PXK@;ro~V9PhaLHS7O3?T4`G9diE--!%TV1}kRqZG$-bpXrouvP;No zWcLU@dsFN_2>xWlD|A?krTiAw|Dj%odsg|I>f!)$uSWa3HtpftW$|cyv&s6_`hRos z&%u8zUH>qq)c&#`IajgsVC&<9;Czm(E7-G}_L=zJ;)^fD>{4=uu}ywe*A{X=Vs|OF zk^8Y}s0V||Rz+mw)%;|1TUHXgiObFz$!K+?jv3 zpz8v*TvGF0VLtwik5+3JyRiNb@0^yW)XZ`Ax6nTVzMl9GBYPO$@T^UqWX5O)NAXhN zwYF(n^V?C!yVH9X-~L?;_CniRd+@#SRIsZoVdt~*2l`t$w*D{Xll1`G|E^s=gm(|( zNid+#NAYbN#|N`%WAti4@vP%r=zdEs_Q5BYvgTxL0GGKiJC*EpV1+Nai|@R9kzS$y zkeV7)tXX4sC$p{J|2}IB8jsmXx}UX;lZWRwRPS~b`|xdOf7SJkrH$eku0F_)%X(^}qC%ISO32f!_!u|h}kCUeC7)Zw@+B%UzX1Kjn@7D zxGB94%f*7en}bYr@;m>xF}orDF~NV%uXFr0-Xm?tvSmlxBJDTf*^B;L$@tQ9=Yjmy zgN^E0G8~rs#PLMuajp6v>Es8rqw#)Ee^36DAE|L!_Th{7*?D`-J-&MX@k*~bGUxXq z#OhGKHip7~EN8FKc@2Gh8Qlt(SVg1IksTM#U*Lh`h5I(~ z;e22%oGyVa)X9$8I-|YNo}Y6yBzm6D@2I(sruR=e*qIp*b@yugdMk z_p3VBNp#orwbS<@yRBs+^L-(vaTj_XB)c{G2@S?STw~dErQ;dm^Bx{~7L!@61=f-e zck%5V=8vcJA0}r%WaszjJF#PKu!W4%$REI-t;uTd_;gsN!=TpY-R!&58R%|elRPir z2*0xp&r%XOketfy9ob%Qyog)#gZzW=e=`oGx9 z+_es$j<@k|GMb(J$a@iQ7%R8&^HR9?qLa*$kHR=Dy24jyGq=TCYws(@Y)HD$)r1W3 zO4f(r0(ssXom`GPI-g>HhkYM%USiv3b=B{GtLy!co1ciyced7ejue~u`kU4~vnRVd z+yCwO4Eh$3QHu3~`ZdoNzUCYANoKw+U!!BHdQM;BI31#WKAUbwa{%7|IiJG^kJ!X6 zjJuFa>Fx4ee>ADV??>3x*s$jPmu>O6=I1Ne z)eY@se0R6@S@v*5%d~9;llTA2CT!Wn`7iu`6g|(Q`3Ks|9dF}zSAW)3-`Xr5=6lzu zW#<|?KDK^M7vtR>-OyU4J8MSvzUYnJJcrf$Fj-G_9gF5>Sk*Q!`FJ*g=6dRY55Iy_hpXL~bu--0y zPzwUiL2UX~yu>F_n@a2KaDAWMg!cVDYsS)ZYj8v@$uKWwVGHB0C7ak%ih1^q7+*r) z@%;K8`n}y7I1R4f=$JzOM{GKW{K2r8=ZeOfXP0oU%jSp4>r0MjUg6p?UWtBf{p={e zZm8zF@IFe>4#pGNcc)zZ$oUNGo5%6D;e%b!s-N-O^s%M9(>8%FF)41~uYbUH4-9S4 z$-`ncJ%h#dV!qg%-@fLDC*)m1R;W|M=`n|fF__$6^Iz@kg0&dT|DDl2M6X=RA0u}T zU-fft{1q$cHO7(~ZB57sSTZ(*x@r9%eZj7!aue=QC(6yqbnh+=WBqRY@->~vui@R@ zv-o8Z{p>CV!fsro-NgM3{JWr2b5m>M_$AkD`=bf#sE+U)C^j?L(UPu>@rUtXZ4z)r zHT|v3V3bwy3$zgKGygZ?kzI-`pU!tH_=}kNf}0U2K~h;>MRTKg#j!X!6%( zles05$LZ#Hj$~6mZEw;ufc(ACO{e2oI-A%or(?L<`!Ra5qho7+!zrG_VYdEH)US9b zZ12+Xv+*h)q9JU4R}NbLr>~0jzHEAj?f3EP4A_4|)7Ex1j5%Eyxn0nhBPt)APsfMs zXlDBgo!F&(9qLVbvc5z3^9p*yZ!J1$>nX;=wNGH{VcHKO6OQCnwrp$tZ``GFH`tb* z2shcu<#_ida~WU1O;3L`pW&N{=lfdqeZg!ib!#5m_^~MQ*TZSOP)jAVEtY4Ahxq2!|C#HcbZxRSIxd8DQy9jwb*bZbVKC>1=UT->c=$YEOQy)NhsZsV z&cE^XT4=6i$K}o+U{{z+&cS;U+BxV(vVRNuR_nhXt$8LqyDP@g%h&PAupZB*N69*g zocq|^hQ46)rYp@kXb(rb3dWY!I3KcW0h<@fdG=?`_&n5s#dzd-VXUOC1JmvC%wum@ z!`2xiayDI?%wP+@CvDZvZq94LX7o2Zj?u6)V2^vt-Mi4vZFFCouO}Fvjc9J>-&1W7 zJWb%(*zp+c#$$RNtnxJvd3p>SJF#ggf89a{+d}-x71sZEz}Z!#og~i;{KZ$CmVdLEdP-xd-mc(ehoS24&0WTBc8aq%nV- zMIS#W=F;#kPBK<|Bbk4a-Az0Wln0yP-*vF?CtKP$-a*?< zeAKsry}mo4hNog$OkYv$U!iwj+edX# z`Wt<1Z1Z56LYMfaSHrWres)*&PHo82hHA|ho=NBEvOVP2U+CJVo}c*~f4EnW@nb0` z(*N*xME`K~;vC+c&W5A2{?C3w!`|X0zB;77s;k-6* zPT#m%^_}lTZOKRczub>&ek()P&Bl6L{bOJ~qt0_Law2;FAr_tVkAml1=NIvPFE%(& z=F|BRxs&bR=6B<;*dG1=;9rPd{+9cDSMy8b_+|DR!|Ao=+Kbitb=ZC%IsUV-ybGQq z=vabxNAirv=n;0-!3amZjkZy?u4v>*@-6; z;}3azJN`PyFNxP?a!ZaRtMR-+&nIkJ1#63%_h-Z=tUto>AKLe@|CNu=pyx66ea<(= zUNw)d;@7EUFSmb%?@pEfo#}bT@vxd_P4wTcub;M1haTmFCAN{;jk}PGxo2U~{`ON~ zyP}VEElmCB+?Kwx?T^&{W4(8c;9Lt`cqigqu@cK<3|m4jiAj1RpYutQ!K^*pS1w=2 zzmjhCA-ppguS4gCY*>qZ$JnbQnemrhDi)u*PHTex5!+P$_l{b!EqeF=%XFoC&wL>6 zhJ=@i&vJBj9Opp3zN5dM}zg;eL#r`17aqi$lJaEyUmy=2K?E^4ONW@ z@4MvNi~E1o>jTAvpF(Xf4tGs1ez|!v>cXyL^-U+QCBNbg_qL+&Zi4)XFJt3&HP6Yk z-9wi;9)3F-o?quD^QCz#aeb1$wX$0Om$T_F_^Bhi_vY90@x2D0e281gpU>Gdoh`;e z{)c1vl5EAtw~*Tne(V2ixR^WwyZV^kr+ow)#_`=e7ze`B0smR}`@^vb?1N$H>-_JY z*B)&?KZuKEgWS6TX{*_gFN4!b@jZk7b|P57(!rl`ZI> z&z|eado9rFw~uQu%bW0@e#LY){EzNUV7rAKLD#kLY~x&>g!i7)bNNrLDqE7V8s<*u zoAbv}mZP?*sFJ=oi`etg!x%+S9WUJ^Y*ZgX8`BD7i@d z_hy^F3* zJmGj3dS1iZ3(p)eY$B%NnlD`Yom{K#|A+fPFZ722)?^UzxWsHHCFFAX4Z|l zIvUSpkM)1b55+q0-b&_$csA32H=bqs->rG?h_9^wX*DTl33Yx;v=(=dtlLc05Y%$?V?&{kwEMA~x!GSVNTekWWtZ7kb%P%JJf=I_n6v zx(k{x)?>$8k<}0F7v#ZGuHwU2v`?w4ehU+S%09j?muTM>-W(QrknDtK0p5g9_Erb1 zC*pO*cRtz&@LF5t-Pn`VA{XACU%<%z!ni0-foBhP7{Ae@#*%qAzSvsdyqey`&u5@( z&9*_FD|evh4m@}8%eC6S(k@Ta<6!9ti+MgBNXJENo3~OuR!e5Fb+eE!Y#J!v$D!}& zyko8UpRfEU{UgwS&!!&o=n`~2&>T>!p25ZU(fJBHLT+@xH@jAiul#mv^)2{zJ-&Ve z?Gl*7m{d#Bk6{@L$3MkjIsS#(A18kr{%~I-=9^V~eh)k|U_Tq};p~1!{Hz7zmF$AA zaR0wxfBYc2Bk-ux)m(0_Nt@v5j(0yk+SvK_wkwUl7=3#-w_S^@V$~ z5#MEUEO*UYeD1t?gZJ6kVcg}$O#BACPm7}%#jVw&0qzxdY_Oi;pC83DqIWlZEy!8O z?%UWoSv%QrD|X7=vL*V-_J2ilFPqL|)5HAuAzz7ObhW%nwRyKBH|C1l*>8-bd)NCf zHu3ltFI=(Ol1zcWk-rC!e~3KUPt43e`2=!;j2+pv4H?tzH&yRX)V3vAayy<+#yRrw z0)2A1{MzwaFsSX}etj{4yy0-Q=Iig!e5>y@yypLKJ(3vLh2Pnf@3G5R&dfhy{h$96 z&o1;gwH4w$7&h{v$DI#FGm_kit~dIVp_YeyC|;-2dL~@sMYH&4H#qTxdtYT3FMGhc zoL^@0@72!lu2pk?xL*+Nxm4HSCHZPgG+M5Rdwd(Lm#}FdS-+C|ZJo7X;Nvs(t2~?! zf}Y#yeGI-3hkN*bed&Axm?Lv0Tk>oe;Os`h&o+WoZc&z^u*Sc{FJon-4<{oc| zYd<=St$celx5NJ+J^QjTBeN^I;r830*%R)s)eUW>`zVE2=OG8zfoUpw>zr~(WAz`; z@%}Kx`mIrlk!)65vlsZnoLV{$_ovF2;rfiOzl%>3virfhT)$k5|D)|-GJdhii?Ciw zSHoybCBLIFE@L?o)+_NVHQ)R6?!&h9H1N$ZJP+89Mt>~5p8sc$@aGEK2<_q*gRKImCZij1YvisB7gRGhS@*YfG@Tx1}J)isscD;|jB|i;>Z=UwcVQNdCF_kNeaNRZd4R;Y`EwIAkqZ9E&++}EMw7{2`*+{RHl*1jLv`||Nzahi&LwtnmX@cZ6m zd-3=ZCh^Je7iv>9ODxW2)0XH*(7l|kH{tnR`+fFfY;EuLqS4wv5{pa7AAolqGM^HM6>RG(zK_7YAG=%g^L2DQgC1Xcy#B8ns^5Yp z1NdnpwjD=Ki2Vo7_f_8>~FrCL`!hJR?CZxo{$M{gT=yKd|v7Qs_D4M_A321koBGM^i{38u8dyb z=l|B$)Z4A~W%8lB^LLD`k?1F~M_mZ-EXUseDaOMfHt`5}!gx8=ai|0B_-qI)2a)%z z_bQJ?Tj!kL(pzfPyW=5;ga4ED>#A>>mm|$BpP-*DZb$L^fA!IsTL0&ti|Z(|ztOff z*^j_~DIBlVyFXx_ehJPRc~6n$T_i7Dn6vpf_-r6MfZ#>K3e@jgEgT+|L#Wm*hya`_4MJpD-uEs{I@IK6@N7*(>|EYKv^;Q$@PsAJM zxqHcOWZ$mjyu>#>@W`{UmMN?iqF3?a3v2Ey)a`|J);s)v4f!XKY5kwd$3p%T;r@Sr z^&`Y>jJBuj_fWIWgy$mt-cQMgIv%;wGf=qH)6(yM)7|-SANoh}-3|CIRDa}cl+k%7 zti#~EpFZE7&D4=-J$P>kGPP~)_#VDnitk;tVq7*grr4YrXYpKp_pxD?{)@%+W3g(% ze}lwAT`C{Lqvn_A!UAW=zw{FNcP9Hqxx?N_9godB)f)C8{U_)~-|RhOWjNjZQms$_D}Hi6eE%n#2Fq&We-Anq!m%r!u$KRm zujNX*2>v-})^Du7b(q`o9bk=_?Fm;szpB$|%9rvf zZtY&(rm)?C*SO2C$Nv@I$j|VcDKjVKPr^G0&4qM6Y|QWMdlcsSu>P;s6Xx8K&y!Zp zH-Y7H{Ki+frq9`0bcg*yn7Y{xB>!f%8r$(-#r!O7#F;7;W>DEhx6t7n%VY*JK>`ZVC(^d^?!(A81Ly!GG=SvSi5l$Z$$TYj@7g9 zEHIU0+4r80d{4iPnm-tR2Up+~m;)^ZNyDl%rdM4YcA&NfYTkELs&UhK-sOD&0 zXQhMr@J<-FCp-9K9yzTVta}@(Z}f(DRC0M2?x*G-qv^m$H;M7-+ICi-N7&zPJbfsy zcNd4?ztTc*i%W#S`HZmjsqD=M*zw+T1uk(eV~^xdw=h zi!lF(XXEiFu&&~7`5E#d+`BI2U8+XsuG`~p(0s=?#%gtsRy|Fw#UIve*8jx}^d0GQ zZ5-DBMK5}rqJNyc!7%n@_hLR7LjT=l$cgL}GD2>TL-z*$qxIM8Qy22RobN=QbzQa| z-aQ@9z;i$Q29TkKRQ2d$$HVXqh2;}+?xO1sV|^oSax^{z{Xo1wllv9>jIFTd3-eIX zhfjNvJsK@vB%hNpvBCX4e!n^N^G!>%i)v~=+UNW>?kh2GV4pFZ4{&@f+O_eF$LpC| z0eg94|L%ulUvk) zwx^5mf5{(@qnEew7veOXY-`i#MKtco1zh2Mx_E@`ftvN5KKYYa|CbNYwVYkU$sCJ* z0o&eMS^ajfoP=+v@wyED{(Q#|u^fwz(H^d2SMiDb&E1Pi&o*AfKitPFHkRi%!g*a? z^=vb9&!^{2{7;axxy^5;a`8?1DZHCflAA0+D}U2V*fz}lu>Zk256@)w`CW0; zoo&|t$=c?w`FttABY9K8Uv9_ON7SwPmi|oq|4_fwf@=LQ|FeG^kE__{*?BsG-HYkF z!M-EE9VpkH#IqhB{j1LJ7v#(=c8Wn!znm1-t`$L{~*uKj-;=+STf@^`X#yq|AzPqbJ|2UoP%#$ev?C`Ivn4iFI+3$ONV+IewUR@<&S6CG`GI` zra`qP6{q3_=X*5z4Za**8~rF_T$A*wh;Fmj+X3i$7WmVU6{go zY|U2e2HU>_F1j-OFQP3SA55?OOpMX+ZJX@>*Z=X0FuLy*_aWy3IGS>*ktNtyea;Z8mjyM;k#P- zAXxS%Z@TsWF=+SDR`TghwmR~s;(G#yZTaxhn)+k>eu{qqTaI9(@s_C%iS>W+C7VW} zxBjo*LzzSF_2|sk;deioSVzIGr_g(@UQVUM`oHqoZ7|A}>@+^z98EiYVLe1=n0Mm{ z@0axI%Ukis!DOpz`B7-)L%a#QHqzGu&v5;3$(!A^vn|xw^cgYhBAy-DAV%T6m|~~8 zHQ&wHj&JwUu1<${l+r%reofXiJgdq2lP|-&1@a@E&nI8l9%R$Qwux}6FUb<~zkJV5 ztoe^N`692PdhNTTeNnC69^M7?G`F|@FP^2xcu1PlyFl9u=-cweb@U#m-TOab{a@WX zwEnN2>71r-EZV2x-*RR3JMQFMI_t=n3+Zre8<4Bk<<|ev?&S35(~t)@*dNa)-tYcD z_U`-biX!t95Z47bK*XXh&ku1qnLNqRo4WlLli+!vY_CApr|M!3M$-( z5pzzvuB*H2m=$waS6yAduRi#BuIG=KALg1f-PKj^s;;iC>FJr1WMqz|mwBZv-K~0c@SR_m(oRK zhiATLvB$Y+?nA#X|6}*)8T60f$8W>DlT9XT4!!tgm=~j=>WuTUKYb2ndpaivib-8+ zjrlX<2hpnf<8$r*DR!eRMr(Vo7`YNY3;4>;O}@PhlV|(k-Y(=1c{g}=V5=YTUkeYq zlykMUwi; z>uEC<{e))!0qr**iVq`aCpOty|2|3Y!DNW{(JW*Baqm=#P00rAHB$evUBc$^N^o^z zmrLQ03&uC1|B4;t|LNIqb|+&WcD+QuP9f(_d{gBclY6J~_k-=&VncfWLo9g(#%G*Y zCTEbo{E!^|AAjPkpQ7n>Gw+04Gh1b8<2|e}*T&P>W`W~mZLI&NE7QXm&YRV(uYGs^ zdIA{_G&RvO75!GXN;@(6!m}7XM@mB#pfJ!JChsN8~>3LovvR_#8;o^>+6pN#h&d);KD!LKE`;_sGrb?%TEdeg99sExE1e zcQ5}LqHRxg3zQ9`yS$??uZR3UpX|69_P@~cXmka>;zF1g!t={W{vWsUdv~%X8E>;; zyvA=nlo@yN#rO^<2j4JvWD{!c$Hkg8YmM)egmuS!5SoZBKIIc@slUN-vbNijwHZ94 z;nFYZ`;IB-Ze^#;xi9}~f%il3tw-k_^k3Nj{~%v&qrLV2bVvQ&676JdN6G(Z!*Kxq zx9jun@QebNQg^`e60M&VIGOuKJ5Pqc~kCp^gRp*YO4>EQ?%zh>3j6M z5ADjKoNZ>STk8BI`jO_yu5@{V9ml|aCw^jG@Sp5A*Y0VeNAx4xM>nbCOYw63)0=Kn zVL4PE1V24odv-`KtTp}*xqL;N_hI>sT={>wA0O9e`7nBPWP_n>dp-=CkaHzEb6L1I zE?jB#^^^Q2ihlHK@N_4yO4%&(b$x-@Y0@*F~;pgQ30PBq~!I6dBWeaVrYnIE@wMG9e z+{fWDM?2#rf0Rv5!~gog%b31>iSMexaldvj7YDJ^4f0#qL;l}*p0*p_tHQOr@i7*T z?To#tV%=cn2RW~%-D-FortQAyzM`{npG_k7F#WJMTR+5>w>aPF+WLQbi~E+aCi97L zs(-?oNa9&dOqXl`AJ{bL0kgR)9)S0+%8kj)7z=%zg!}1d$b&Q92-bbsv z!$VyUflFM{T3FnE9C#>Va@!DtZ&%=Y;x=D(~8V{*kokj1Jh7#AA@ym zdR|WU%k*aVe7FOqNdBL`r_DFUP_y>(|B!R%z0eu!@kDfVk3){|Uoy_ZV?M0>ISp|q z6BEpDVc$!1CL7n$Zp~I7S(8f8$)lp4uKU416Tda!?&$2ut^b#+)a)P7=W?*(C;Y~e zo8j?1n&r+-^m~KO`ZbsTH`f2}q|-C7wqMe?hmX&|>#-*P1&Mbre*Zapnrq`{@%UMv zUWa}Y{D;A?L(RQBoiEe=UvkmEvEfAcMw`#sG|YLqxh&q8J}0pEQQC?X(LinaWA?K3 zOY8qhlQ!?v+$-QKzU9*$4>x&^sh#){jgdpW&%TdpcTlbIZP08CnezXz_cjYY{4tEb z;(fQa#%TGx!?-VBWXD~}ehRNk*gPj^Wp?a@-|$*PRvP+V%nP|ob{)BwI?Hd0|A}iK z>6=Z^wuWslHWk-`zeXqFw+)%?{BGqu0xt7xyeB+;U|8UIg3V&%WL>~dUc}K`0;#*<8O8sGYtWHK&YwQ;d z_lNN;wsG$na)~fsg?q}d7dm05u%9-5gv`IgFp3|_D-vTltZC67|tIGGdCO-2*=9FOie$JKaUp@Md$cF%r{%8sTlzV<-zuL48~wxo*o1jIzKaj_G}g`kkv&1VF&HhR&qiz_ zUyS~Xe+PI1{yVh&J^#=5#y_kjT@AyUY-gNDr?9tsh9vL|`Tzg)?|SUq4DSGZ=@+eu zehYKjdh9yRZ}!dxqWjeEgSGF(k9Q&SDdlY6__p3`xQE$uXC6ws;(Y_UL9mERackF| zYSt{Y|CJ7t(eSBU{-2$xejB#$i~qrFb1U14ebGtEy0Pb+n)pKZijevgHSKpQ6T#rF35( z@2&X~U(LkVM8D-|i!rVXJXa-WOKtaZt>4nEm0h{CvA&_LiO)qhjy&8}fhBNO|=`hRqmK0ZPHX>`^%;d`E8uWGuZ z<4yK^m^^Wzd_di8=+8x$!E>&@;JeucuuP%L`ReIaZi@d*crNJW9zomn`EKW6cQQ_d z_m61I10e^@{^R;a^gHO&naz#=xz1b5w?3iMmw5hF+YqOpbo56P@_%zmJ`Jx89L8z% z9vs`Sqkbsn;Hlpe{IdDVhQcIvh3D&eif)qM)9CO9Tt}f_=K6Mg4nVh~vSu{D(RB~M zM`;trTc&<2eb}b>Gpxhm8dhJi{vXElRO6@(S%cVi+or~Qx3RIAeC~H!^`pt*v*Ev- zLc9;pjf(rUe-E#l$(b%j4nlt*Tdai7LipwX;eTcF5B0_UFt&z4{vUlS_O?Jf+2Na+ z$*j;$n|t{8Hu&8H!#GD%t?{02!B-l44!Yr~EdGqFZe-T<*;sA2Rn{utV3YQ2wzqNL zTH8J0){iM$#Ush?jn51GY9(@9hxMt#_(*OKFm49$S>$~q56UHe{m+fcNLy*G}eN`c(e`} zFL1w3UeZIGp|A~6-h+HGGOSU@w=3U*d}Ybec=6T7yB}Zcn~%()W7zg_dRzzp*09Z1 zwv5j0$odwKGswN$wecG6vC6d8cz+@fd;0F?KkSf+yZKT09a87(%I|>@mh^J%dXc#X z*{d6m2k~68tp7KjXWhom zAHt-6LjGSqNN&(!82?zWsgXP755UnM&j?M3ALIO9j7B~h_I;OPRJtu5y&UJ0`2ySg z|Mvff{6Br0PGV|!&!Je`Z@v^Q#p@6&D&IT*d`$mWPFk^!dS0lEG`=MEj z-6FQRnLgr6m=p7B^@CU%t;cS+z;^&l9ogh3I7ai&MdY-EG4#=BI`GY6Fj{?{cVWj9 z;dxy9ec0?e2mRtyOni^tzW-eW-17hASZ()Ux9(^rI$rhrUUltZJBQr+@oSH7MyKP% z=`g>oN&i#Xv`+SR&U5+hUii!Z^X{!?&g)>`Z&yw+Tt?>*b z?3vAnz_1H>^8e&q^s%+567R{%$G~}8ZN<9v4)AoN?+J7?uJX6|xA7g-;5*B{%Hz=X z(dHetxP+`GSf`^Ezrvbje1S6gf7pi`^4GA&5*g#sP3l%B`;#VX&~u};#?G6-ps&Low#IY3mEinP`DXAK zn2$1q2S{Xbb({kaal;%tNu-;C`E7c!DPx*h6DZdeo zIWe6@c6j#mgx|0v`mel5n@)5JbI2p=-&U?K!gobVV=y{K*?si7gOBmq#xt8Y^yw1% zT_1dr zlaA81HQn@kme%}V8@tH=i_h86eP-A@73TB&UVXnh9y8$5x5+FveVeU!r0YV*`s}o= z+~I%dJCHYxt!D5M`G4Mx+=N{YFwU-Jt2XHQSayo?p2jo1^ZVri|7DAkhFXKTuyj&3}Tdf{sDg z?r8hM@fbeGvgf1Dd@sy5@oI3a?|LuSYmu=Hdko-vn<-yc`yBr}@#SYB_J+0acsFO5 zv&-<@TfOJ?$-C%R!CRjuAJg#^d|rq13I4sVx_$I#;lCHUs%vYU1$(h~yaztplRI8t zO;9$Ie{PI_SYtPa6MhozLGtIxpFpQ`>*izj6Z@jh+MS_)*fhKw)W~s1@*Q%E9niem zIIh10C{$a9^LV4V##gOoM5idgB{a z*?WAjJ6!r9o!iv-wn-xv&BtR1nmh4)gMH7YySXW%L-=38OiV5GadFfb^Yp4DS_;;rhnfYpL{FUEr(274H z2hP@nVlbBGqk@3{c&uuin@jPt_#+Pu z`(xb`huE_x44a~xPOt0nFt=ncvxV3k*1e*S@D@kQAK(n*q!r%B;c1MA@lcrevjgz{ z)NekJKS)-|H|Y|sgy)aw?o#)V^JA{Vf2ggG@8fg{HoXI$k@`~G%)M@;4~u5UP_*oq z;aC2^-+RM0)v<@zw@~}T)or8h&+r^gZfNti;}?Bu{Xafh+{Ql@e~PvE5_`qx!^LOw z47MrC?`6N&$XSZdmUypY?03Ph4L@16i40|8WaIwsLhXjyPr#Rp`{^ULq+Rjd0)F{_ zHrX1g{tt01AM6Z&u?yZip+AIvv)HAFzS9q>`5-dSg#R%wz9!omL?NfhA46mQD0c|- z;?gp_TBHBA$vv(9xCiZ<@H~JnC6|AO?`DQIrR)OsxF7!`>npzTe3vpZ@*&DEMbFO@ z{T<#@h{w{cCtctU2>Z0jrYmpC&ifSU9)@HWrYuAd?6jcM}DR6EqIU9{t-0iX!{(#3&`o^s5$;j z|JNMy|LlyV){TOm`tEJ`&ZsrMN1Tea(G$v-;&U?SX z9OihkzCD$kc64e@->^P)pt>jMb~U^|<12QBHIw8Zx|=(SFb~R)yPv# zGK}B&aq`xyHNGDgo{^OMXb($5zwrIF=sh@JkPA2Cb*cH{ME#SnZ#VT5v}e1pHqv-6 z;9UIY;b*)@nL|F8nBU^{=$PWYM7!-^?C4!H<34;RFZ?%AdO1upU9)}2D^o|*&Rhv^ zSWAq9fBeio^2KN{KF`znTj#Cl@ppC!_d(wovvRO}4(w0Cnk*5E@H)ZqBzza?yYK1o z1^X>*G7ppSUwp5lLrm_Ka2P*f{Xf5yZT@EL;T`hR()xcivnIyS`E+pHQ|6g01k+nwtpX^7MP3W~T%%frbGyPs? z_m=GUk@AgT7fZt!D-KhaYI`idtifl@gxFUU%bT zU9`v4?60QhHEi%GoIB~Om&H7>wDFu-%q{On`v9GchcNbvhw%Fkdij4?&nx#=F8>en zL)gcV%m2%^^gD~}vC8Urov!arr3ZOQU$z~F$4%nZ>iFLXzx+Szscsu>X23H5?N0RM zw;>13$G|og{@L1v@BbKw1uWtIEv(-p#%^*8S&RLCn{D((F8|NW1LaXL-lv_I8SYtQ z<0E=oeKXsSay}KWMQkD84*P%N3-HJtEf+UpYw=e+US#_at0vQ5>`3myGx((V6ZLOu zd}F-)fy7+B2{HQ#^8bYP05(|8f5?iL z!L=T}x8h5E@O~UWeV3Z^3Vu~Sh9?Z+KaY#v>f531fk%IN)E4T*`Emvsu%?~RvtisG z-(Y{33uC_Mi)I%6-ch#--EMDg!Erh@`hoN|?X5CnSon80-GQK4mn2G`B=3~jdn9lkpmj9Q3 zLASl*1vYs?-M?X80A~*4!+d|BwwI{qzhT|CoIw6L>bIlgCiu@nw~c-|iQEm*t=rt# zvk@J-!p1fXHs1eDjl<|h_S}Jt=`daDkpHJ2(&=+`?`d}e+WBm96>ReV=u6jPb=1-I z%KGYKb=~L@`oZ|f2f?-zKYaj?A+X%1-t&g|LcDIJ#|*S@YI_L&FYqI_Df^(?-m#4D z{}}p5`(3rcFS&;O$D%{%zi1o^QVlv_UiwDSDz-inzpOa>1$ZpvIGB1 z*&$jL4z@{eVxtgWKOlE99M+|?{?6x!1#QqSg1ZY|);$vWf5Bgi_UNtaWg)J$^m`(H z-~FEd2R)4cs2kp6*hZ|$d&4D`WL=dFLc0~2yTP(@QzJi4YJ6#&a{X2Ck8+B-PR{4Z z{a^_93h`#Hr^7M=U0*U^pNOXCI3&w({g3Lb%MO(e)d`&9`PzSXNEnc z@u@I8ShvQZj^BlQzVds#?k4AMxC_`WFz3UX)U}!9n*Su?Yhum|`F|oG3Vl-!QOAB^ zzDTW^#`>+eMZ1u*Ep$Fy`!J@jAom#f7ic3th<|eYr57v?{PW@LApb9JRDV%ZW38*KqIYx>7WIHO5EYw|rN^_$2+zM}_!E8r}sn)9QMgwvS8w($OZq)8UmC`~aAbp&?XHbn zBCG|6oIN44I0=ur>fhue_v4k4%RkF$#`H^K%DL)`vsbZxgn^UO5})1Jvo8kXyuIj4kxL4;`PUzrG7OQn9!5HS7># zlKemI=V(v1{wm(0L%&)hhs)Q4;YsC7U7y?3@UM7Za^@N{N0a?By;owJlN`&HAC3MZ zKGfFv5PDn=+v{TH&1^3g7S}0T$yr=T;0e$2qKDv| zOurN9L+`Q^KJx4Qpt|R{d~hI|5MMfzHyPH$$=Dp$E_4|$u9BOzRDKtl8=D&cKUf;S z`Qz}eS8MFSZN$Q$SFoWzwJ9Ppq1&fzFU(@RE*6iE;ulBZy(MfD;C|ij*|2PFr z!QDaKHF$TXulX%EcPAIHk+qg=YkF+FV*gJjj|$)EYwT~IL1t_CI+EMTA^*=;c5VMp z-WR?f^@lkoy%vwqhhO`>X;ULd5B~`f{k)`+|HpT+$5Zq>T8!g^VLwE;4tqn(S@PMh zYK?b;!h0ZL|4%s#))?+T@tIx7`-?Fq?+kNH@>;ODL;j!Kj&?p?!}$<>$`iEL&qY`( zxDuu&nEY>GbP69j(Dg@nU5PJ04sj&ClYP!)2RPy};?4)`<9?_-h;AP#9|v>D|KDZb zk@WxCxjjsGvx_lWuw8MC{&0}J1d@TADCeQB6-L-j=?Vcy67ulb}FpYhev%`OzSNzAOSQ2>UpE+(`Vc*;sO;3hH z{-3X_-EsK;1J5Ds{D``p;C;~?wHbLOZ0;qZX=vrq>8bSKC&`24{Ls{hsUiQ54^|fT z|19)-Cp4Yi3v{O6S?oC$ZJjRriDM_@c@%lpK$D@$#Id9U8{9;f>H1^aTI1V#MNTKN ztK=6kzb(a=Vl)|J_4RBrcp z3G4P*KYb>bOdh8ny6iN%JxYdqru6JugI_WezJuid`XlUD$zNsrk?b^uoT+H;Vbi1V z<@3>>_}iuQwH}(+@EWb`2jgx{w5K>ecioE4-@#&yAi9HW`lsXgR7-efE87O%nN8M2 z^uuo2EMybUwo7a1`G3gfbMY*^r)ev<%C^QcVe2)GodK|{rcHv^Y4G;YCg}aZ(kS`` z*7oeI52F4x`5nG^mA?Oz^VwpYHu^TZO8G}<`r>6?%W81QtFmjg8AhhHpZGIlx|y!) zu*dcA42Ek0Uf-A(I_jTGwf#lA32467R{kISJ?o}?Ke9Z_j%PW)s1D9#C;c!-Uxl_O z%b(VxLo2rU8=7ClaQT0p!T&kinpa}uHRS)%MDianMvc|~TmO$g#P4RfpLK|h@&4?+ z3Vd`3b8WOfIo-vJz3_WNxx6BJ0M;>R3w5hHudi)Q{Tggx&Pc8@{<_r~-$;mN(eo~4 zu%`GI0cT%WHgNn|n|ZH%?>TtAWjmw{GXNqdU%8>9b{%>U{4 zH~7KM+HHyNK;tCj|F^*yyMEd=-;G+;-52A#OdI)caVFkkTJ|p4!G5FJ>$tkPnNN>E zC;!jM4fj~(^X&GR_FtfX2u)6h^UbGM<8=v|LF9$?|MB>oUAKP#&RCz#cHI))34Y7} zQ}bJhC*?u>;bPcU(e@MgSMqy|^6j;SExSVh@X_Q0a=vhUplu5CJm)LnkpHLV~AU;dkQfyW1Fy!9KjIQ z212fw*VJ#KjeZXI$O-)d&ah@$4#$5O`e45a& z+jxyb-2d2^c|>3RgwNOPdbj%7%9c8Z++|~Y7yHeo`R(}K0Bb+(V&?krtWH)d`X5#2XZU`G#~;zG<=N&B+AW~#?fQ5lKFQwU zzBoQstQ3E8n34>pA$}({-&fT3U~TTeb2~h{t2+$dS#YkwoTGIea6eXGxp!{c7q--L%x<)_o_IT$*U$%GdMAi}7?(O`WzBA{AwWR29JQDSf=yQD#-KYLLxlb!| zzWhIX7v^60w}!#KnsO}NGVQv2xW9oGM|VQ}4CYpk=T*RjRQc#Lth!*@!3 zMgBh(pX15UM`2wk*$BTw)P->}05AHco$;Bh&9QXa(irGU)<`y!1Bd)SyA$mwe2!LU z{Dzz^>q5^9YYm@{>6Nl!I7eaq{~bEtWDGq_p8P*L%=rZN-2%4pWc`Kixw0MTbuQZb z@Do#tf%>m2x!v^jres|PkGUc4uI=G+nBLmUqoQ8Qu7qg@UivfXNOmVY+KNwXnz{eZ z`q1|WJoR%j5TEDJnm6*z+0J~D&M-!Q&;O&l$Ti=lC$T>t40~I0b8GR3nsvVLJ`LME zs1KjQ?RCXgxrPiJXAALdDX(41VDe=_4W{;ycU zUkb8{!+y{I_lNN#y!x|8&~s|N@fkW$BU78O^0hA?KWZu{aTD>`%7w#_5XZj zJU)gQZMI6SvHqVOqK#Ns97v}=WPYWs_1-e@I*9Fi=-WPg;X#;|k}LmDJF{WnVGPFl zApMa3;XMtpz2Rri;o*0_9L=rTd_tDFFRW#UwS;T|Y@aE=8Qm22okrHFcy4Aqt?Ty* zaQ_4S2)rMGeJ8SC5c4+IR{o#=#)rnjc(1nF#r!(t0Ac@6VojyI5`XbJlK(gM81D#| z{J&fqzcba1gPShJHsaVRa2&0@_5XAz9uFw%#7;NCA^%Ue)t3BX8a&2J){WfNmp0xn zDo1O_KSHdHeqaBO*fkrD{{;?q%5TwTvG#A_r!T_(pKLDN;$Ae5Y<#0T(VT_jLXKc87v1T!S54lkAN%96YR&)A!myIcA$P1=mU zKc3=j(FNURo}YCkNB=Zp_?6m!gogjcd%))ixpSJ;8vlh7y=X34*%)|?9lEeX$R7s7 zI-%|!*1XdZ-%Ih5&lcm@gKsDD|A@aQ-SFI>-8{E0jO}oL9G#*4&M?k_Pv2#`)1wP} zt*-73G}Z;nx6qScegVsP=l94nKZo_cEX*tBlQ^vZZ-xH{^cJ&==g^#qW0>~f#x=PpYC@vzVG4*Q!!BtKE7^0t2lC9-Fs)f zeJfNS^f10&px2}1eX89eb+^%}wfQ;2|6(}xV}2%|86R{br-%Bk=*2Gm z_Qrl;UMenD*G^x3WSj>bZX~;><5qNGjtT4k@1yw=whsbcW%{hRQv0>VCRoa8Fm-Uq z|MM^P>6YZ{hvZ&(208Db?g9D@hUI=d*do0Gk6_!|@%4 zt-r%w(CBA8zBaz@!h4SMQW)m_eqY;+KG;+rh5WzhNA7Q~?`FHuH$$~yzmhM7Z%E~E zMPcr`fL;&5bf`LGI=>8tW1Q#fdvjC?XY!^w;S~0ph~IU1%m2f=OnE6>8>1bJW&->V z!uS_9SjK*vI?Pq^`nATpuz5@Ejz)VQ+E2AJf8?vf{sTQ0z&s1au6%MU^5y^8NPDQq z`OW6#AoWA&r*G2T@i~Zpn3GcRJKQgXJt*Z2cvmO?f68BmcZBQ5$dFs4yTE2GF?yWt zYt+0m;rdj^T=jn=*E|*W|Ah7bXu4SNlGu74IzC(;f%kswGs!rZjXxZXy=;5ZZ#BOU z=4<2e+McfR|CrBb`!BU;O}mbGg!X-$^>b=GMz^9pqSkmnG+)eb^+7aOo%}!R%%83M z#qa5}Uih?OJNF{#O?2r`?``ONJ^k1)tpBIq=>z_kuv2(G6es%VdF>WzUt{Nkl!@0N zr;2}H|Idu8a-eo^(fMrc{zT^u=<)aH)=Hvvyywt0cg_Cou*ViS?KGX>wYy^BrX4A0My%ApG@t z*n^hQGZznvz4-tehI^9q6yxq+$}f{QKZ;)dAHF3Joyfl3V48sTFKjoHO?uMnT)6m3 zw3hOLcm#XP|Km-`W7DJ#=a%gKu>8M14VkwTt&!PPSpKk2LKf`xWbL)NO@B9B_b4zw4KNV}k{>Wk}`RCKa`=!a(Z1WJj zPm%Fst?^7X%xUG-`0T7s{-2tAveWSlej)B)Eq8PSt|52ve9LPp{q5ZMhVVsTB=18{JujyXs9;8PzEb{+&9k%3W;k!6dtjz~7 zZO<-y(3ziwZ;Pb6)7@AI`G5HdUO&)vJ$*V~n+3{4-OY5G;?uQ;FbS}HLmC#J|E#Z$=NthPlJVD7y2gs5|&Qz^+zX$G}iwYX}cr&^8cta ze*Msmudn!~SqFW0iu$AQu>Zdt>i2>rjqj)wH?qq#GPi?eCs=2~X}rW+iJhzQhk@!x z*BZ~H{%`$1dt5)YQhps77m^WlxdoQRj<=Oxug~P%;l3q1QrUfMnzM^o9rh4sZ#DVO zB%6|5G-M=HeIlo`7~=vIgNN zR)_!G4zVPOjfr-6o{au8w6KJEHfy1N2HU}%4G_~>sGFs1*QUnzKk_x5o7i|ETXf{3 z+wfnu4n7cvc#^ZL{6FbVZ?U{=i^l*sR)squ_jLS1{x7~H3)t=#<*!)3s*~3r-+r#w zWv{R=uq%A}r@Wi5is8}6WZ+S52Hzg!@c;N)G{$IJHxI2s){}HyU-=09hVbKw=s(tv z9q4(PvWN_GQ1Jzu%KuY%$}P#;zb1By)3fka2iSl; zx8tvDRv6F8aqKi7PJR{Dm90iLz4ID9*s|PFnfyQGT=6=1&nDv@7}~owR~4(mj#pSy zEH1&LZ@uw+Jo%@#tJKYf>~Gw~r@0n43;BP6HaV6}F7X|uONmjS4zkua_D>|=JzugeJwi-5 zn%ypjZB5r7=@WgP+@xKP6?#Yg*~WYmeW3kUj`z`>46}JPzk>YEZ1Jh<+wkh>XaVc@ zuD8O6?W6nPg{{OddqCM>^6$}h8QWZj*ECq}WUs$zdpQ2jD4S9DZY=pd@qdy|5nGA5 z`3CHwpTk;c65VHfXieo}R2Ig1 z@cqr;xe2ZXu0O)}X6LXj-5=h5`VM`$NIlz!cS2J6e`-87)?wxUw)je;RoHpn^nqPQPN>|^|g{r}}SZMUm6zSErC;=C5R^Z7YH zEq8|-){y@v#%r=S+|TRlHuM{=J?!Now);1GJ;A2I7d;0`9%ipKwEs%^iO!#~)oI#3 z==el?ewY0W!xw(vMdsab^ULyabN60t;6WQq? z`b-c5_*#e~hrOnJ(62k5p??nadpf-GiID%tABmZx$XQATY-tblqv){A zbvwMiWye!spX>TBu+P&b4gE#GTiNC*{(Bz2_oFf2l>6#O&-#kld<&N3^1Amt>f(dG zK3t3*hi(VDPT>Prqdmay_S$vA;|TWI8=Zc(Amq7~zB#hi*f*Cp^9g-W&P5mY|960K zHaow^7QJ9=i|!e*c`RG^*YEUAuP2)gvjO;h2m2J5$DvycTTR?}p1eoU@9H>`?nBw- zWp%s5)Gy#7YakqN<8v-t7rNe{F5hylv&|)Rxeq^no5)3qgUNW^0e^fl`sMUK8n(;n z-5Sk7`bYj>cn+WPrSxgP#hJ7R9&N~Yimm%N53jHI|A_h845LR2IB(Jxo^*L>;H}YKTWV5jczM)#-pKoSpSQ5qz8K?!B_Nc1W$RQ z`Lmlo-?#||a+k7mcgH{E$>Lxf)?@Z^eUW1ZJzB%j38oI(w~?Ft#(&S~)tLXX@0E`v zOB^qsRsT5*+v58sTi#91C;U(TpUuJNW$g}QZ!#nazNH8A`D|w1$iJq~gW5fX?#Egq z-Zu8M8VAKr@XP;W_HW!XJ&*TIjvM*K?b_*^VvMn~0lOZD_fd3QE+4oK4Ow}2>;EU3 zWBStdeY~&K521XjcJtWh*xzj=|1aMs|6zI`faYWP#y8n_EhfK@W;FUR20PN1J{fEg z+oi^R$bX_;$>ul3!Tf0^Ir9IkGd)N7Jx$$yY$WbP>%bhw-WBxHPx)`Ke$4K2t+1Ao zH^FugzdfBE`=Hx{%wN#8^c%182y?;?aP5WuK}WN;FfDYBX*ox_k9Fd<^S<8Jmi!`d-l6g*>p0uTOmK53F{#B-OUH`|M+V4W5|U+z5}mS z_?|gF#&%f8h|W^plP%^19n^)~VO@AuVT;#ljWvewJ+&f(`73@t zUb}b6dl3B?c36b&I()+Wzgyz}0o|vw-7{$QVc7o@b%DcJ%!2>XHSftDeaU(f%?WVd zr%y+tZB`e2$($DT#^3LhAII|l#-3~Ay`0U)bCv7UH&%H{vj`sj{elaJYcQ#7x*Zo$5v8H>9(?|`?tEEk^=HY{V;o+)J^ z7OqCWnPe2~JB#ep?_h)X@MrsCra6#Ksr8xY`ntT4JacWdi{oYd_J@h1Le#?8qe-S6JM8`En=|$>R*MDEKvAH81 zTVJtP@)q*0z>|-Lb(!Kz_IO{rLzO+E?#g=O{omjp$uu}S;TQ7%Z}4*e)9~ZD@bJ5I zd-}|G9*l`vTWBotGpKu?PZjN`%9%IR0;Q9kR zrl4yUBL^x!5q%rHABJmtxDxX1|H(rfYC|?VXBX?EmDzcsb|JUSoe!+Z)3x0SzsYnQ zE7sI%jrIRB^hJg~*k?~=*8dCRKTGf(#|GPI+XC;Y_T#^Qq>&g>aa&vagk$rtEX=Pec2V_8+p1Hu?Y9^gD;~ zo6f^?Df$DfUBa64(UPspS^9r8`hINxD=c^G!^g;D`;`46_A6cxC-=eoR=h6~%NF|0 z7MU>}?yKWt@LLGSKOH}l8}73X?bX<`m52Ca{XaajPcDJQn2N7PH>Jt_9NO3Q(PQ-$ z-{3G;<(*x(bG;N?8uVQU}^fR5l)7PV#SL{O?q+A}=c%HrtUAtgo?M{V# zZoTonuHqj0vUzqL`%D%8p5#A&V2Alljd?8F58X&H;ST-{ONhB)Z7}2*<&N$b#psrE;&l!zH={j*9Db44>7fs@d-a+8Kiig%J#6g1hprd9 zt_jl;eRU)I4n(_&wtOm`&sGD;{(+8D@S4B|@~0%F_Y1H!qu;@CjAIC!h)MAt;;{Tb z#k06Z*<1LWqus^$tOv^z_|)lk9$Vd~oPT7VqZG&C@hZK=o5s4@a^?Cl^~^MG?Kj&O z1N7%t!Di^6gY!$cdf=ho^JiV}g>Hm4lWL8)8jWajKT-F72drXyxR(#>6xr=fjXg#| z?_w=D_GXJiwF`O2Kxbnk6T4zMmq*cQ3w$=>PxAk48QZL+9Y0FN*6_Y(I*8oI$ypB; zx@Hg3h26tiO;M{`b1;_wh1W~Q(4%;T_5Z!JJCOaihJC!Yudo$d*$#Z>Jhb=W* zw*(gXfBBNKo!H_jI>4CEtl0w*;)t>l^q3CELFhNbV_nzg?DQ(w^-~^c`!Ou*qG;*@V>~w{vrQQ7t{AG<@8CdxyN_Y`&i}1N`Y@OM<1tjdRzSq^ctftE+wx{ zuMPR*S@>KC8(T&#ea}6CU(5;jxycRAH^az=(Ofh~s5=gC{ZjU)?_b58Bk&qS)}Bim zc|&$GzMr9iCHoG3eo@RvI~>hOeb9o9ZdYDY7GloIa5dE$-wzD$pq1O`$IpVT$wOD% z*3{Tr9f=9CSRQrLM_1v$Dmk#0r=yeqhx?vvzOnQ(d%te1EJfcR{pqedvcWv}&t2&n z`iG6eGnVutzxPnLk@&JZInR@)yvXSHfHD48vB^A4*$Os*8ju)sN^*~ei75f@$_2u zHb;kbrT888S?KyEJ|mY*N0MRvKj*7qFGGBb`ln&P0ImE#TaztcBkQrClXmj|XkX`L zX!Kh=k6t&pZf2KOWN*RFy`6(CcT|1_`)#SNv-ZL7?!lLy@i}x@69%;9hK@_&?_2kN zfO+|!+P8qmc*~8+kU!<(W@G<9A1w|QYp>s}@t=X|hj_2-_(p8m)j1s37!woOcru;! zPht;LCKje^z;JuLabFnr7i8?(h-vSl>7?Cl#@C5xZZ%JZd0GA+*2RqXAfwej4j97LBt!a205vHqWq57+#sCw?0_&!*>kum!$hoOa|7^Xg(AyI!Qo$RG-tb5;(&t8ky z#9HItJmROxP_l+P2m2|{o;P2uN$+#m|{m$Wb@ZpGt(T4Vh` z+S2bS+FeJdq}KRuXSAWR0d(yK!yd}oz_g9_d*M3)k8PE;bUl^Kj;_z=w{6kxrrg-b z^>=Q(ru#cyqT4!-;dq*#%KPa1BYAaavAqoWzx*R@Q)|ro(fRE2k@lzI{~+GWo9v^8 z?N>e{cTMK1pGEg2=zSj`j`3ZrZ-Px{sta>%@TvdeF^}!^Rj_@qUv6&7t#2oAmDBK- zJEg&<2Wr1LK3A~Gamqs;^`^G}hVg0s*yIR(c?0?K|Hk_NRWOhp2mg@QhV{P2KElWF z+f;d5aiR;1ZT)7yEbx7Z59;6K0yGcE@xN8RsMh#aMzIOovUg-ZRQAl`#`lU6ajd+Y zPD5Zn7{(rKA-8M9hCSJI8#?d9Mq+QeF+G3sdnNUw$$5(H2RnCTk5Rq6dxlp>I(!57 zapW4~nH(taicg1OI2nt?zc1M5OxF+Lf4DaLsUIOW8Uy9EuozEiLQWg>d-J97Fo|=y z|Njj>6CKDGIG3N&gf&aP9@ZX0{-3YO22ZFH6B_IPlgSXv@{#&uWo>UFmkp!I_yym&vx)DaS)=Bj z#P5(Vv0oAVSNuqWz8&d@Pt*_n=4f_NcOIP2vRezd?{YmD|HIkpS~B=Rv;o|w=z~ya z&ItEV0e5<`c>NDJml{J4qQB5}6F)u&T@P}`JA&=x2+3*mIv4+YVeX)Py!T z@w^hh-mu;R!yGbCXG1u$G3@vu+bu%3uj^0QkKae~|Dq)OMsmL6PwO`~)`oLqG+9Vb z<389s-vvK8PkEM@Gm9MSjoJG6UC(}ERal2OkXn8S=Jx^;%q{{$tYwDWD8)1Xg6y!SawYoXm529#A0=zo8ou`bcY^f{JSM`h zO!<5Ep`A*Pqt*AtpD(2RC|;-8eV_KX!rLA1o$C~O4R)MF@2l{7 z9DnyrnK)EjgonN@Z#Q-h#Pdb=d=tN$w7UhJ`81h_=OOA&f#)ZEcO_p6`K>q?@u{%x zm$$Dq;#Bzsn*YM&zARrQU?6iC{RY!7Vjud(`{HkGXYCz(Yq!X;j$C0|e)v9r?Sc;e zbTT={a?IyKuG+}sE{EfP%KPBi&(V`DAHnlMbidTR8)J=;UGl47I1OLCl2N{&yK$}Y z{hwk1d#j5+pfi8Z4%T;jun)a6{u~X0=~=q%V4RAfA+7}cA^%S%Y14vUYqHxF;Wtcd zng`#O|7Y;T+pxz$ILBkL)}+#mj6C`rRD9iJ9>Av@83(juy}Cee~D*EjGmy z)V<}*E+M{!wd7d5h~)oCH@bagEjK|*iT_!&9 zy=-N)6WQo)>rcf?JL`iUEb8TC7#dn1Me{t+5j_KTLteoy^J>-le-CjY#ENSDzgqvV*8i*Z z|7!ieTK})s|Ev8!)&8Gq|Ig+1YX48Q|EJpjbERkUy*-1zrC#m-srLU=`+utaKh^%9 zYX48Q|9=bnzU=o3`y{LV|Ms)B_1vY6b>-unAF_w9+W+5pXQA5v@3})8{xMnpSndC> z_WxJ=|EvA~)&Bo#|9`drzuN!bT3&FI9Qt_cOKrXLu#sG0ocB5&k}G_q>}I)SEAK({ zmup|pRPFz-_WxJ!|5oq+R`36QU#s5#?PPAK-v6!M|NU>RdjGe2|F?So*M5cS`#-)H zQ0@P(zW?KU2i5m~JcD`5v#E3J_xIg~>ia*{_kXJI|5!&_}@ZGBF`=8bR|7!n#wg11`|6hIo^H%T3H(OtP z+Z;LA+)#c0v-+6dx`VCx+Il@Be>XufG3Z?f|_WxJ=|EvH1^8QEl|6l&!WUT*P zx!V7x?BG92Mtc9{0r}FvTJ`^5n>SVe|K&e*s{jA8hpqbmuj>E5s{jA0{{O4`|F7!* zf2;rht^WUav_0-m+2d|)vHJgCYpd1&|5pG1TmAoU_5Z)s|NmD1|2xOJ+HQ{O|9`9h z|J{-w_*OzX&sy35>nPjVvoqg0lRJ4PTQ)C=lFa&J?thGxr+>F6{!n{*mbso`FH4j4 zr=Dn^NBfm^$-R_~^*`~|{-5gmpKbkDefaieI*RmBdc$it@?XGa@f`xr-$+aovK zc$w?^yWUZ8&z@1%(jMA((Cw+do$_a@_5VVwN$ej?zh#$>^(dLlmi?Nes9OJz(rW#` zv0tKE|1YZb|7!ieTK})s|Eu-?YW=@j|F72nqxb~*@+i+g!hhIS>;KjIf3^N!t^Zf+ z|JC|`wf-L^&-RMSU2E0)f3^M}MO)URXi9UGUFVrb;yVWRSVtQ=s_*|q<-a}qdD^#s znyT;rglAyowVsz%|Nk2m)%Sm@@BdWa|Ea$JQ+@v@O8a>>*|T>PSO5Q8{r_+E|G(A# zpX&dAqheFvcd7ROM8)Ck(8oPxwf`q7?b|8l*BOM+tt0I;$HjSQe$RS`)(H)H(xYGMJIbIuS9>m@|}vPc+@^Y>zHNd zYX47E?fbs{KFJ{-0|9PqqK2+W+JG!?Ab3qYdOwr?>~ZOFq?CzI46v{jAxY zY|ZW;wJ2X&{r^|>|6lU!>i@r@bVK_hR$Urp-UZBW@T_x^F>{yuja8DUc+B(hRg{nL z&e9)@qjmkR*8i*Z{}S)7&0j9_eT=u&d!8Ikv_H36|F72ntM&hC{l8lOuh##o z_5UauYK}BtMuWWHRK5ROz5iRi|69HPTfP5Vz5g5aj`E+p+qtFttLpoo)%(BhtJBTh zLto*(`Z)U|W_#BAru*sY{okmxS0o;>r11^h>iu79*GF5!uHOHx-v7PQnp|%&;}-rf z$GZ%tH%0l`*5bz4SM^~%N*0MB`?~(LDN20zHd@s;ZcG1T=l|f#@0y})eetDQ|Bv!& z|9@0e>;F-{M^lu(=srFCx8oq=MO$U zuusd_`_y|Q-Frt_KXls|mj@YpM=pu7JB?HSkC&Y%cJJa`<^NUwU*-Q%bb#@5i+QQ~ z{!dg?`F|7-7bh1jiPA6p=f#<|D1OMkM(^)d`M%y7othc{=`bag@~5ui;(I1I)<-l+DnlEuX6P|5W+^3F6FK^03kFnHKt8 z<^NUwf3W<$%KxM6@}*Hy?f;456Rn@LH}7`z?e2Nzl7rkEUS$65qU=LC>q>IhaqjW9 zM>o~nb1}NsOQZNbG+(*z`&@k4U-|v!9^WgBI@@phoqI-WHA$F*s{Fso|LwPoGdavd z{GrPKtNee7*l~qCf1dTEK78UWzOk!#lOFv}MnY*k+vjnvko0r0iz7Yb!SH&&C%t z`R_w^d_?_@Y+LRBulE0scCY(XJxaY>n0MnJqkLEIaX!?s7M0)do8#oU_t&HBjpits z&6iF${;Ku=D*tb?&TzT+9K>)Se=%~93*e^g$@HjlfXUf+I|b5`U) z)%(95{%15|Q)AtKN^y$ER}}fKSiXh&AL(kpK^fZ0|MMGRXw3!_wD(SU z`i`;*=)U%!RD<9d*c8RTHAVRn^i6oZYp?dZ-fx%)k8df*SJ3?q_f%2fg}&IcaZV3` zVL6=JDjS2&yr1vE200mQlzDEFY>Os=wskEZ6$39LpBxX!||hi|C|pia~z6e@Y)D zcXKj&!4l@KkN1p9W%>TvcT>j4!o6oSicjg!c;$Lj>?3#HPXA28<0}2qo;|1WjZ@*$ zCO#M4LvYO|>rFBqq4TaVwOSfQUGeMUn*PbbXz84M1K)*YtZ_H zJy(sM(Qc_WTjD`}bY+wOYk})8`eK~+=hxiBtMh(b^j~`>&teaGUv!u@N8$61apzlj z86QqAtw-q*%GW`E|Kg~a2G3T;-YrY{qPm0m!UoQ3!5zj!&svnNf&b~+-wg9%c#n3lG06XP{jZo>AI8%Fe+@TA;a&j2qE?N6(G4 ze;vOU&}ZzqnX>cs&ENgrYiU%TwKPgMs71vH*Kgr-jNezlwG%!s`rYg}n4J6A^q{)^ z+l!-kD7%*UeZ^kqsQ-$sPGDpEN{d$YsMss~hU@Qe_As9R(G-<$!nXrVZSehwK6EW^ zU@zl2e}G-CC*u<|BVp(Qzi|;gP0n6C;w~7(l9a#4)>iT}^~q)EpCNZk{8opZPe=5MuV0}c7)**Wd&JcVfmuA;#Rg2zW>Ir1^HjG_i9O$ z+(VzIo9s^$+Yj+O=yNYyoxvtsv;PnDd>P*-+4Q$s6wx(&hbkQi+mYlvXN>hxzvG@! z@-4gGNZw6w=Gxs$=e}&Y7XGLEeJV_ciM>P6ut%{S4CkOd3AW{IdOUjsKb;205LokI zcQPgzzweX#3Y+X(;}7`Fg>kjsQTeWZ8jd%=$alndF8ovQyO)hlVt>9`Y^UrM_3Jd5 z``}uI-*%I>4@Sn0BKJ1unso%cTb~Chxz}O!~<0{$9@e;Z= zu#94p&*^iN_WUyu1EVE$TzhGhucA!{zYlh_rPpfCGxd{spu{)C#iTdezDpJ#iN0lv z|FQ1|{E0r{d42IE*;mqkPx5Z7*++=yJaLI{CopC2!rFuE!tdSSdzH<$qIYZdQCE%= zZzpRX`n4sR0?m7L^8Np00RDaGzH1Ssi(#*+U)%TpjfYs=k0#cm>=Eaq#H^F>+#T(4 zYX@|7|HUIN2z^On=ZRYUTT%##W;LVtk6T z(XNh%zAs?QN_@`KXRX;loXpQ6cad^)T@q}v9saMe-8IInu~h2&WFZ;yl1zLr#nf^{ zJ<1Dx(wDp+`GYw<>CaBnYu+(I%dXi0>W;;4GCuk-KabrvatzUaZTs$iVblJwe2drD z+ALSsnJif2(_ns;UE4d4rI&S~=srBm7x{LzD0>)9JNqQM@za0n_s-}>qPv*RZQS>6 zizj=HeZp8n-!wWv51e*RQWGFNN_lm1DTvC$uD zQObu({hnU}_c4x_>3sqE(PTcapYDeDURavwAYLXT{hkchw&>QQXYhpx_FG~3nXS6( z=QG)9kNS%Low?oIJr|xk$%8ZR?tCx$qsiD?-0OtLVRY1giMg+wOQ!J@;~7n?$qiuA zPuU&hJwU(fom(0sJH!1E8E~dyeBXd}7kxjNEgy&VIrVf+#m9zizcuDkW#T|Qf~;rR z<$J%oYr7Zz9kt)daV6a<+iJQ@?^nQ^Y#Mx*S z{O^S0Hf8kB|K#@@t4V2#4q@ZoW9`8`FnU7d$IUi z%!7?TWcL3g@7JR83-XG%huEF)yKEQy z=8!Y69wl=8=ux@QpXoHdIf|b|_lf#L$WWKg#OEZ}J@MtwVeZZQ1YMRyvAH}Q3g1-v zjKb?`bXSvUoQ8Xp^j3PW$p$Mc8>Q_8V~P!n9ko}M{zzYAK0k$w2}`29g?5u%&w~H$ zP=@X`V`DJcgZ0~2=$qjZSE3L9A4~TEZ8fp=0X#@iKt${XG>BcnhA8D8q9BU(*}KNB z*t>#g=tYVGf;5Ss(yX9>z`ewZVnOWPD0Y2f_u0SS9KOZMN;0#{fA86Q&t#Hwf%!vY zF^*4uAp1Jwe|)DXSqJS1yonecsdT#dMJ4(m^Ag<^%iIb=D?(PW)n7bU-u zv6w%e#hbx12K{5?ZSG#(3VT=Czn!XW)!*sCU0%i3lk|zvQXc1gmTahow6uO6NB0f< z&|IyXM#jnX@LhJc*jNJ3WIS)-hdE8iu>Q%m=BI7R2y2Ri_ye9ejP2!gy=MKt0^eh> zeL$WX8p-Xj{*S-f-{1Fw7pD9adg0IIYvKRxjx#*>)>+TNB7d`!(U>oyZe;xAdX2f- zc;?@@o&i%$P4&4Uu7hJIGQY-m6Ff`dd<)%RSbNbii0^(EYmLadkiKbf9*xHl+N06@ zd_B5L$qhbgMfb5wtV@iq@4(|J>=ozbL9W~A-;BJ@bnJ$%0eKg(eLCHTxexxCR}&Rt zB9~X?FKX3+oxC4}HTa^V{`D$RvbS8i5Z^+0KV;Xf@W{QqufFx@n&CVaf8_dzPJBw= z%jn0jdlz`mV2j#V9B5uTAFl1u%eU+u{_DvX{a}&v<;(nDIJ&}gKR^Gc&f4Ex&=3FX z_`jh4BJFGHtovZD$gvO`2e|IWrd9lQk+HF^E72z%$Zm}GGkrarZznJ0gxE-iX?qvV zP&RHz=17=V&~ZCIUcyf;(Ctc22l~6?Ri|P;k2iK*57r5A?u34s{Ex{|2U7VH>PE6i zPToPzaqN)4<@5CIUa_Xuc8k8F;H5V`nQnjoKlzxfm+cKK3--yw{`lIFy+6K?=X@D` z3XA$!ijgwMw*wi*IgZSq?fCVe8haqFSM;PE+f#s!cO%r{zl&YLFSfp*N5HuKkNmSwQLz8F4z+MA3t0Z zWvBCZJ+gl0zgP7?rLP_xAzsvt;v6=eK>zb>HkYKHdEy3mCz5adpX8M&o&&?7#&BEu z&WAIl>l%9cz@=W~7aNaDt;Mf|@f(c+MJi1U9rAVBR?VQMLG}SvscKQ zOJ`$v-l?hnO`>?UHuGfh2;QmWPbSlNlc%&JKUj;rj787;HQ?Z&BM7Ei{_zOhHv9# zQPhAuu@)T~E;{ZPXm5i78><~2X(7avs9>z4J}Rz6ILp za30FPPs!g+@bO8a?qm^J1>Swpzh&JSaBpfXU(jd$pUUCb+A;YTUgMem!oSx4S#Q^! z$g%#h zue=eCvFvOn9x}cjj^<45VlJ+qM5)?c7?(apA3h!GrQE?W#J=$Uz&kyZiv|?Z{Af%5z|Ug*_Lc8SnZ>cG(}O z?u&F}gIq2~@p0hq3RlZYlw9vR#KmM7BeHgZ{e1GwJ7q%mM(lVP=JDu5&FetUgYaBR z$3yB@3)kkC@IJ5n2j&4wtNE0Pxni6?^G>mlUyNf||0ma@4eS37Z2p3djQkH_m7CE% z;(u4Z6esBm>}6}Z6MwXE{t50=@yurDP~-gq`mgb=k3ZntjQ;(c`-yMynr}k(499FZ z@rQl4=qvH8&LyqgXU<2~Md<{%-gW(v^F3_)49`jQiRt`lW3oP77dkJp-?1%BYD__I z)RND7ka+^$t?hG7hvhS|5W!Km)H;e^|7GvYx+oe&-gL4<-o49@pWrd#6efqjF z8bbC!zJIi~>f`b*@;-v$=e06ov0mQ6cJWXQfb}rCK4-@O?dPG}5{{kgqU;{F|Fk$t zPj|fySzF?VCD|S3OVB)JjGDmn1Nl?MLnr#@)LOTa=@~WpMBmqBh=p{-QhPh_UW(uP zKiZs3u~Ys-?izYmiPvq(RFlFuhv&|)UdiS<_d%zANB-_faSPk0!0}uq%KpnQ2k2L$ z@@cM*g}p5wEq6cDxQ85jV@Z_DtN3R9Q{fq*-*p2fgH8Ncl3$wZ!a5*g zPogdVN8b!IDZZ1~(TLm$_{Bu=9*kq%tK*UQ&*e&7U*A&n+mq!Pwy^$>?{@to8?G_t z*0AMzXotb%|2TRwN}yX))taPQzpHu7tZHn~>&UTC!Co#^NSU&R=Ycix>1 zzmR`hrMlkFt*30^qy_J<$Ekv%4zCqsN3W9jetds z4EqV`#r(e)ImRz}SUlc={vA3WVcP+YAIMRovX;*4;yakW5GQ}5y#n8U^7%d2i^x?E zi`~U&YyJy+RgZS6zGn*0>|u|;?9X=Nl@#XC8`Y9o+Q-s=JiAU}t9XwOBx8;CFOBtO zaDU>?njOiK=gHah*0J|h{<%eaFE|I2&o{}Y>`2L-iqBk=9ZrrpCNiI= zS5%^yZ^Ltc+6vEh>g8}_vAy|Ws=9cS_GFp$DcL)qP1xCv+)z(Xg->2Yhq0xL_L(qD zK=&HD-^IX=ayyLON43>8dD0Ny5#|7MP_bBiyeUUQEyr&3${m?vFOqg6`IBy&UafHeUHV+IotIzI4mE=ndzBzBA!_kjx|DvfmqD$rn#M z#BkUjD2!=&9QsqNA^z2FOyhmnVc#x!P96LLPV`X?+3V8r4!%$mM&r4Q{Q2Y#`V8Vm9_hdJ;`~EO!9NFAIXJm!#dBju#Y9D4Z9Xws~qI~ikL93g!M>f z?#a~NVt<(Ac>V%>yTP)W>|S_bFV%psZ;_b`;*Z#5{h!X$f0p$>48d>7zHr`-CiwDo z^5k21Hx~9>LcNIkz!Dkz-Hr8WHTDEyT)?Iy;qk7lx-Y-ZvZ&mNPI(cD*J4wCZ3@q3 z^xO9=VJaRFs|S+%5t(n|ABFxkvg@H)P7ZscUHE7Va>M@r!|0a6a*weIaakcR+?&Vp z%(LikWIgX*Y{%=!r(5aWxDv(t)mHcG^W|(jiLE)`{Yv(Da!0CNQ~CV?v=714oG&kd zLG8`#|CBet)z$SdV}320#8mXFF`%;y`lrBpBAMbjF@9nHzx)KwC3K3h;?)}ee=SV@ z{!jKSTL$U-PFqUOLgR5e3`6-j?YcJhx7Lp*Iv9>eD^V8K|GUvYp;G;(Hsoh`KF+px zKM|dAN$ZT+HhlLDpKJ;HO8V5{>_{|xkRM9lL5>U1u`d|~R|7haHg*TGTU;hf@NPy| z#ICJepBZ$(yNN#Q#>6;9;x#pICja46W0sH6w+4M9wATM&?T+1G*vxj3IJQkmvFV7>tKR)r8u{)Vo@yrMBw)&c}pU&`{ zRM@`_I^y1R4kx!KoMJAoxAy=4ip@n~&n-F?ZGF7cU|+8P6Y_fT3HHa4jZ*+>}WYW$bDnvQ34F>1bx#9J|t+*Za!?TPI_jDVvfn)Q}>w}+3v;`h{xYxFJ04^MV7JZ;$hqVb+<9{7v? zW$b$hCRoY~;b=v7H~G_so^g0en9gALyRP*W_wmgo4)alb2^{yZZxg<=|6j$}(fFua z$zVEfL%#>V#B916CRoE7s8r_@>;Lpp{pwfQ4UUVnFQ8{KS?9uVF!>L{V6Mx5v51Y_^)70X5!N!!#0qTX{c8j6V+R z%lcc>(;Dp#&cC8L5vIv(9Kk2glKru^#^l$K-;B;gdtdaO$=aCgFvmZDm(R+++6M3+ zUq;($>t`H}(f=Ahyo2|B{+vbc=X9KiwsD;~pD)zh{1W52${J?-I?wQG&}T4zqR!Em zcGQ1@{r2Gh5Zm&*^n4u6rvEE8-@~WZqM03hqwO>H^Hn4^i%^G7qvtO%)sNo(^eu%) zt&PSR6ZJn^jplwd_tSMN*~Yf)OU5DWewp2TUESku;d&?XXOr7W?1-uGUMt-PKMdK{ ze080^-^BX&aQ^LnB6{PH&!~;ckf&k&{{dfo!8hyijrd8_ppc8X^?%rRN~YlXh+Uu2 z_j9eaFxrh^dx=l3!P^P#!|b~W|J&%SZ<3*8%prd{{oiVDi~e;wr^46~wq2ZG*8dcF zN6@j-KIAJkzGXlw$5ZvFkS|4l`~&zYV4UE(8yn0`$&vhJEg9z9cyIF7$gwF0%9Bt1Hm6wP*)p4|H7vubMd?&fVy0fbLEh*K&En@FHJ)quVj_7{ z;0foCU|T@9aSY!vgj_8*aBp7Ak8%H|xcLF?<@|7tJo{7oBG~TGZr>%@6OQBM=x?2? z`|;`K>_3>^{*@>>hpczx+8OlTPd@$Oy+v3DC2R2S!p`qt_bizn0sDvAht!H`@v*(S zR7cj%Wbt9%j*r}j^<6fc4ZX-7PPcrE+mLgNYc{3b^c{rHcW}iebX*@1YHdt@f>%gYX-eXxbFb}EPj~c{8~i~5u-PfmFfS0 zp1ts`gMW~k$FE^cTef2N2v{D6aR7hF-|)UC8HDaDm=|~#^ai{y8MB;>!_ZCBz79LB z|Kls*joojMJBlr9(2vyiJRa+Z z;xK%F&~>`&_14OdpQ*JchF?u9LO$PKvG2{cMrf`jXPWU|K_(v;3;1L_Um4%9pBcXY zDdkn(9ZxTIi0z;+KF#5Klj{4bJr-N1kmb3v6f4mcWVXYLE>Q!M2lPE&6J;0kmztOD z28Y^^Zm3S%--@oo(*yp8;q3rxY|dtL*#9roqjCy))6s7VU+~E`j;++=lFW(n?@(i4 z+{&l%4s)!6?Hlquvlc&*8M9?4a;*QO2l@L6K3ht6cXEQB=ja!&saPrhpkr~st1nWk z_&wdaQhh$jPUM4U#n~|Ns&=NeWDccoxY(Eq!=voACMv_a>JoPHQT~njW}p}n59O`K zxVIP@z$f+9fU_&+P&5{ez=sdj=MJvb(b!`oQG^Klv z_O@)0YY~0P4q|F|erg5#hWsan^6>n&Gi>~rEh2v|T&?7;yvw#DN9~T**H2#YjMzSq zZ~tM(>&E6tx<7{@#K>oKG(^+Vn9jtrSo=@p-NJrrf%INu!!P+tW4#3%jwioP-x*{* zjh8LaO1wLg^&35Mp$z+##wI=u?I!pi=ciiZ+I(5{-F$pP#W!$86um%iPcp*#-!ol& zK3Sv5S%`j}rT+dSEL~u={*U`I}B!V zUCz_Kj`cqu$JR&56|2 zSPd+jku?Wyd63=67P!OvwyY`o)16B)*5xbpJD**NGI;Z)WSWCB`mmW1_XdeT|i`GGWT-`tZtRg3C#V_2w z;jQmpzE#(8i^8?svi{H2#&lgZuPw|UvHLbY_!`!0VK#qft;DF<4mGT7h0na0wI@&B zWaiCu4E!U=t<$#}ZF~6kho_!+H)K%ZfrH|^{CbT;@V)K>gV)vPQF;*Ji zuzyy5%D!oiuic;OzEPe3*WH+ec(_!pI??wZyK4JgOnlGJAy@Wg`-WtVmTPyh<7r0& z{rphQCi^o!zq`(J8M?)6I@|S+`o1HxsY6U>skV*SCMHws|KwG4#wyOqQUmf@K2zhu zx<2!sASEm8zePRS?b$aLAK3|bH)fYO2y43Rc6OeGcN4nkD9s)DHhgsp8nrK9$=|;_ zCcwCxV+I^2)<%i-fA$t!U-MrRIuo?=C%g~Jl$6OJ-SH0xGW!n>sT@__g%#P&bMiQ5WcVJ z9PNAxADoHjp&D@y^AhqefYrR5f5EqMJQfF$^>x;aeAnr%b>1C_1?Tc4I>cbf@97}+ z9F6{viuXP4w`9jErY0VYF?fQO7r>U@){Zm*+%AzbIuwS#=E;kOfv*P(LdZta`FcsQAT;Xj*x zb-z5>n2Cd|w=oIy-Qj*8hHc3Ho!syE&N@{DoTTHL?MmR0D-qdA2SW@`V=*)vIc<+3R50n?#;{Q(;; zB>#9v1=f(0kHdI7o9DA#d_?!~%v zE`p~%y2kuDjtv*FYg@W*g6{~iQj*ya?<2MLtI0Xg{q<;{r9+Jg&(ziZSa`FMFrP}U z^?&jjSz#SGwZ?w7HQ2g%?;w-^Q+1#)PsbDKSxDY_>{s)$)?{7~h8@(WZ1YCvz)@9LheVYmE85YehYU=V5g0$zDMFuImr< z{p7p}{Tn)m{eL-Ir~&B=aVPF$^F-MHPsLPrhFJN>*}5dGz0yC~JP6jf;@e;N)Vu5% zHp|uGgqoT%-TFU#kC`q&y9-?d<@&k!PpMOv_+~U47`AjmN4}RybXWVziW3MvW9pCqI zPHtq6saJ#9bPk-Y9FM}Fc7*kR-j`0fTeL6iA>us%7T^D;BgD(;=*HIZpZ1;EcOsdU zO7+{SXdpkF0MnlQ9md=GKm0x`-=E#aDmE_V4Sc>e9oLXQT%Wip=hN3PiE^03nmiTz zNr=16*kL@1XUP4~b$h;SB8MjM@d4VzQ$Eu5JEX${OL9)<5B?4I#Oh+{{a3ORnP;>2XZMfc z`&a+Y`d_5GKg@T9T0!;_a(mQxw;?voBZWDpRR0R!|0nI_=IiL&uyY~0W6_0Y`S$Mj zs#s4fsm|^BJne_mbH1_OHsmuK7l_q{e9%R%+zsosWS&KK4}NMdhPH=Ey(`3Pcn-|p z!W*;aS+d|MSHjj8%?!9&xNa;)%$JFpUEYlE`WkaB`VZ9G9ntlszaBXm`d^HX^?z75 zL>*u~q7tR&k^3n9E6D0sXI^oZQ-%012R?3I#H(JXVmMx|roZT*KVQ$id<%X{!n`mP z&3u@LurbuNUFm%v_Q^1R1rwjg_Wz>_ztL5!tczl?mHJ&=c)wZIb0MBFpKbNwsIdl2Lt#m@8et1ZPPfsVc~cXtZD;up0bpF_qXeD|XFf99mu(K*mq z9#vx>&RB{4u+Nfh?YtSxkI;VsJJp_~3A+6p@+aP{X6-i|5&ci${S?+obneVY55sYU zu~8@UH}HQCUqd`w@yT@hc<0#L2MdDR!dla)*hji_tp34?|YxHhrUtx9wO&!*luI{nS5|D9U)g* zS9}kM=Q@746Xws2`83z!AiIX2Blgw9Gm@OlbMHhrj>hvjoUP%rCeO}w^rUkFo^2gu zmvSucOzuAT*_!Vm&!>>F55MtWHbsBP-wT#i=d|#9#&SRR)19w$KHc#$+j_!CZob<6 z9ma57I;~a0^JZcl65fl)>U{A=@GA_jk{fJ$jJ<2{Yfl?hyzdeR3-p-_(lAcGfhg3e z=mBjL`FaLiY;- z;`<-|gCDPcr<_j0KNc3xt%YEeZrteK-#_qhmvmA(Dby)w$mzkT`pPbCb56Hj7^)YZA2+R90 zE`p1mib506|4&u!s7Xs%ut>O#aX#W*&pALUfG8{@1#J-gRL<(K?%9Ny>H5^_-- zg>My-$!N|uZX3dM0lq!tz&q&WN4Cfq9Zb%7`iw;;2FtD3IFQUCIpoUyYCYlqLhMHS!v2Ku zxSD?-tXS{Mt@e2C!ZX$RbH3OC*7abxi5=(DxeBHR^vU^rD}77oX~|dWd3g(Y`=M** zu-1vgyfYg1Z>{58$KCKOMKht!zK{A5;%H0PC*fZy9_s1uVJr@UMQmm-@yGtKe9Oks z_*TPVoJ#N5i-+7lO8*b|&#aBo6WBHlkGh)86sJv{jYC|rQyzppwX{1ug*}x$@&5h) zW8Q&YW0qcA@q2Uf7wPNA$1UWsT*}xShwpz@xbBQ*Bm4hT){a{kkNj!vORWD-a}8?| z_Ed(_(;fD$>7D?G^?$rG+3nCg!7pry!*fpXL0|Gmu=gr<%@dF7CcY0uqb9|dvRnRC z*JYFFp)2hFhu@9ljmR{=<>J5Ohin2H?j!dQepm(nD)NKfN07CQ?bglFRd}yq-+0Gk zORMXl#C%sY(JuDV*>X2WE2gVs)f?U0zL7qbzEpn?vWL+(#{19B*m@<}U+V1H;`>9t zx|@GiQ~gGxx?lDqo6NmokD**F&Q5b2j<1$4n!>+pSKo`E-;TaHaGV?L;`cqs+>h*H z|LXi2-Nz2~JwF$oZH)aII2Or|2I7BHxpKF@>tQ&B9Uaje*JbT~^f*|brT=8I-lwAt zKjq@^aK}nA@ML57YPH(GIgIaVKZPCr@%4t~PX7K?UbcaEfVRfyZ-@7HG_Y6qO5|&Z z{V3$Q9ICEg`{3J#FCNl96wj;twE?@wYWo1jN7Z!C6{WEc-%&+V9qL}ONE{3`UJJzy z+N`0v)|M~cgMAu&pTgIRoz_OtX6Rpl<7GC*npWuhIm_GfUE{OL{h!X}=j1zlYEaY= zul0YrDZS^xwXe8pr|ngIO~L`Eyb0fp#1s6TiI7_lYHQ@aRjv8J*sW8ycE9{iZBLPX z41MP2=m+@aWG0v5^U<|s!;NGQ;``Nj59FhTzb6x|?Z zGVQ(Us_!(y{(l&&cu(VUIk}DSwxwgH{yq5kMz%aIuGG-HAU{XH8@(Yf<#g6WjJ^%q zkB*3L`>Wx5*f4iy-cRM$YS9eWP4TSe1MwOM-);zl`W^N*8kC6#pbY@>U+YR-RVd)mymZMJsJKFD&Ds{ z*5`xzY`O*CHt2`b&7S=Gn(E&TDQ4+AtWI6(7)6_qJDRLntHSqV>?+LB@7473}(2uJL8k5$36UXbfX9ki5vZo!Epg{ao8! z{4Tzu+4NZxERxHw`Ekz2c3Jy(0@~m`kzX!gm$4}(YBztxbIE3Bd_4N2 zUH<_?0<-mh){y)$^fXYX__+F>=^C|n8JoMZ;b(lk>2)8A+vuX&CA&^urf%cej0~|*?&)movUbM1 zPw)d>-C;I{#hrE4eX#6TG3q7JV^VB|CgP6nA3YLiwfhB^{-vK|9=|3Gud!xUG+Cq>Ery{ zlb*-W^niNyW8d#~17WtuD&XaNMG=9*l{;&c=S2`-Pq9cWo?_efT+ydp+{I zIuD^^BCL;)JBW_Y*dQ0W6O3xwMH zmi0~-eAdu~F-RKYdzw!U#5=I2T37SsbnM8Uc8=@#fvwRnGUP{cviacy`mAxYCya~z z=XhuM`_Xrp@w)@AkMMk06Q#?@JBR$K__x-MzX92v~QGRa*m)}ffN1_pb zaR>Y(9coI5hj;_NV?(j2@%{|o95%G%tGCEiKl8!(UxN8t^1ikfUm!*8lNr zK6n|Hjh3$coz78YjnRK<#cz1T_}_HMuk;Cg;hL@GHkIoC5mSACQ?Pkg{%O?3Z^-Do zQk>k*h6(r|hHW$%kI8jo9GyqDedu&Je{?42D)U$PhU_>zKN(l~nC^panm%nMPJl*Ico-Exls7?2qmW_B{_%Bfk3@9Zcn;u&j^9x-RMG{#5#Qfx{T({F%u0 zw597Fa34tDVziUcZ6Hq~z6iXhkhP*q^>-(+?*;N6WHp6Btd!S>|AY=a`ik`vKnambNl&j_0S#{O_<0mfY|Lt+liLi+Om~X@0 zSJH{iYFRcOrn|-a-s~`sh4-@ISuuWATx_=$aq?Tiby21IH;qc;6nshIhfo5R~Idq~l;_0|{N31PZIz0_@VmdH zDIX0aOWuVwXiRSS+fTVUD1B7UM{>;fk!jAJYmLqEu(#%~R&;NJe*(Er!F`Ip57ET< zFQDUe$4dHop&tp?3gfhc7+DIZaVXT#uumH1?Sjp@aVAL{m2ISAj*I%Fir?A|}^$4<7!*vJW&zIwU^)+)$D(`w^Cos|JMIu&nY&R(RHxSL*I|xFS=jD?^pA4 zH}{*P*&mh+z5B4Q7==0hF>QPn_UDpUmR8^GB(2FEg#RG&-lOYP{a-teW}E!a&mqqk zN43VGf$Ov3I@H{_i?KS;IMqjg61_)|Bah6o$vN zo#*=dT7JW`72mYiCJ*Ah@C|ad|6jbM?a7G9JP|o7}zWHRmNC@s00) zlK1p|%dUp(?_~a%Wz1mAw^LtsLpzBLk>f))1*{wMqj@PAFZRxI7US7;dafkf+*f{M z{c#f+x5Kg;w!g{Rt5W^DGWp%EFLdmR=L!8Y(aDGCOY`=_{CSn@d&n9_*7a=SFMziA5l zhS7h?S;FpaVqg=;2Yj32d7F*bv3(1^Z-i$6`H!35`jB%Zdv|7w`J(!KyE%K?k?}aX zU)Xbz^X_yWgwEP4?16>ve`5I;?V&B?+lY#JQ2X_4kf-5!ChVQ2awodXnSX*UNl(Wt zdcNYTY4F9aC(se%Mervfd=;HnQh9F2VmWI_rc|PL*x-n>W)Z==hp{Cd0P9 zd|N}_Dt+_ZKTz=vADZ)F`jYHV@hza|8vWy7TBzNepNoaCkC64TXZRpJd{MTli<0x5 z&(*gNnuUDV49#`w(3{?o-HHEtHiqxkLj5`lE@NMw;`#;l^+zxMikWmA;aILuUgz79 zU*W$V&M&Y}-lx6j*x5Wj$MvIleil2{|LMN;pU59w)!1%q*#)Nd`sIIi6rb4B%I>f6 zUX5&Pw&XH2r@Gz-T^}-jptqI&iLRSjkBjlJCy|+ZlUlKK3>$ue`v&pyynB9)&tqez z-I^e7%a&d796{f0env7#1O~e}{2TzgvQ*{DR*% zbU%vx=U}-KPgA_|D7*{E+K_j-+Wiq+pBuZv(GuRX#K!;F=$oYSBJ2Ns(2gVTeZ2fz z9dGs(C5(^jyNsqJj2o5{Pr*T9r%KOlRoaBFt^LY z$ofAGW7dQXe;bQ~oJZ7I)6ns#^WYlm8M24q-GL8>YAa!y$Txk_Y{)-*;Ja7cRp@%1 zKTda_jEmX|ZxA-WFu7g6VR+*T5mhvK9FE*Dh{@AHz3^(eSlL zdV;zjUXyLq&V~H$xwE*D{q0~m7R^|^@-b`1mM|8^J{_-3OvG|KUM?rX{?>7H{bTNY zkWHQW`spRrzq=CiRk{iOkmvW4CmzC>l?@%%J;{4yZtf5p)jj{vKDpK&xbgUh9Ba<3 z89ucmy^DVv`3CzTGzFf|ed{2{lLO!w#r6=d_7{_XVf_pJ5`6W=)NN!1{MIq)po%e; zThAG%>E`}T$hw-pMlZ8o!LJU)C-UXX^os9bWAZB-FXNA{{6TXj+=D~13{|qDZTs~2+ zLcNHeMSqqtsiVW1yt<~JEiUe%>t=J{PWaS{Vko`6DxN*r_q+K2n(R)FEAg8%(w_K@ zWA%6K9q5!3#ZKgoAnP`|E zU+_nHy)n8p;KR2O`77(J0pV^TkNc2)Aij=8R4#|@(2C~*-vVuF4n0@jB=R0bznS`a zB42E!zZYzy(GG;k^J#Q1-%ZuF2R!n%_|{n6jIRlQY#|={SE~CVMOS_1!_43RDQBR0 z1is7RJen*ykxV5YhVn}~>hrJnTH)Pmav|S5fe%ku|Cb}^6bI35VssKazLhWc(P1tL zHpaclIWO2{%;bFB7?u!UM~aa-72m4C>vNcE%H1Z zS^t-GhyC*CZ1;oNzaM+Mk$FOpYy9fzpKJa9Cz^(E-Ad0D^qv7vE7-gGeL~PPk=!qg z?QxarS|)Fb{~R%QGwko+L!Zoqvjv*nwT*{S4i;i9m+R>i{HLSai+={Vj>wtDhURqa zCdV#g`%$oTrt3lN^U1pi|HayNab8rZu4&Wn`L90SkUML%<>KX6b)`4F8w?JK+g&ZLABio6((%&ty70Gu7s+;cssIPoh`-iXJuB1+4e;K|{1~rVrrj zqR(8Knt!6^V)QL~aVNF4gSK(#<$LlD{MHGndKBJ~q)+k5G5Fe( zIiphj+w&ow)46KOG&p9m;}`ArVxs01e_K`CWHtxvVmtdtT%AeYvE-|3`Qn=DyMbsf z8^vuH`>Ye(H}J{FX1U8=twFV*1^reZhp za} zbnVyRH7?b)`L6g5)BZBs#cyof(l@PX9>P~g_wU;6pA|Le?ZcKoSodzr9`eK5tL)46 ze)xJBTXiFu2dlah^~Nja%7f6|Z9Lv{?ymhd_`*Kco7z5*-|HBc#dM}*eoo%W#_An> zllZ1NACVoN<+5Xpd!lVubaFV<{OB}x$i;F?b{?&rU16>)c80ZL?KqUat>_xJ%zJk9 zC&@#$#s`rX@QbPNj9s>?^$bNO8Oc%FgM7J>)f1n8JI0gMPM`I9xwH09*y-E1@VntO z#7RWYSeV&VUP1OIaNSStl`yp@Z#I8^=KKSk#=W{vo3nolvW_+{T_$%=Mn8(L-__Qs zvi5HPWoXxEH%~;yw0O=nJIaRe{zVpjX~?nu=%Zz8e^b~(9a^oOeW`acY3}<<_GDr; zyNCP^Xz`|R)_C_o{y_33v(3207igPIuG}cPU?5wcyzsB-# zdbUFM0okL)!QS|1$m0R}YK`wqz6|lArsdZfYx8Cn)&wu(3FDA;u~rht){s~pRHgLMQfU176kD?Q`J^1TduoZI1% zAIW_%-e)Y`D0VrK`ExL~kyB!yasSR*z@lpC$g+z_|mOX6$>0-_?`i z5%-PB+qcHMaQY90<#s%G@rhV0_%M1L-Whn!p~)-ows1en{l+kKrt4U|awpoCyf@&x zkezHy_$m6h;`s+Y-~WU?l_dBveEansf9>kGAhY!!2J096&S%xkqJu@_Mv;DKLM8gegE?ojHm1O{eRKjyPEUinFxEp zFo}G%F;l0C-sF#{^Ia8RkA`cZu^XptQ~DpI=M%mX+xeDk_bxNNLH`gw+J#@ACc_s6 znei-*U#T9Z9oYFRJd?|*kg9rdA0$Js0Cqy~Kf)3lCJe!9NP z_o@Yj>Gw3C|WTUz8@&x<);yHww9g-=zoRrL;3TF+SOa% z#*Vx2hq-Nm>v!ot8@4g(kTqah1Ir4290&LBXhx&4-%xJ3r27A$q_62~i$Bo*L`Qo% z;R^nauc&zUUhy86y}j^v=J(C$KUSOlg|LSZ)|AP^>{+T!U5|!ovqxNxL)%qbKeEL^ ze2rrT`xd|^pR;f1+Sj0kqcwZZ)4#-Zz&u_5Dn7nk+uuv8?*OxbWT)yGzvbS2Mk`=# zi0)qg33d5t@-}m9z;`Rz^GT)pO?&)%sPpuzFX1;Wr5KDted-V6Q|LmD)gxyU_WeQT z;m!w?N9OB<3k zkB^6ubuC$)D%HKyFz1%J_Q_<6&!{DTvnR}1nHb4;fkRA%eeMK~)L2D9|3Z9&$tua} zgHBu(hu~SC+*Ww>XTOQRb@}!qIoNkC?kqJfgT!4=*gDpE_w{)7|G$d6k=F&^mTb5j z2H%_JTf47AHx%y)bnl2}cR1do>lnKCAnRVbThjS0AKVUq7@srMhll8>$9L*P6mloT z<{gfI*b;Jk3AvxpzXe~daj1>ihuW_ZlOxd_K+nQ}gG|@)b@WZ4i_FY8XS+E+#~1IB zJrh)_0h=b>=FFav^`T({eMBSuibA2%K&=*L*{Mzo^pQy+@0{uC39Cf^#D81 zCG&mpJOK^dY?Ue-cjlRc=DAI-U9+D(lX!uG)9yua$V&8c%d= zASXV>!;Z2EUtb6B0Wh3SM*(kWrz^RZp0Fl=g^qWv55wN{&m%DLp21M=h0tOiL$DmFs94jKHHo}gWfWCQRG*KgfWnqQ;(d~hgya;dsU^%9?sFlU^qozKF2 z6>=`Nt|@LNvoT$vt{m)oI~d3?*3pOW#Y6f!-lJU0 zn zcbDYvM;1HsW&C^yT^I1#R9NTZwKk8g!aI_Vx*B5$dvooF;gyfka&q{o`pw}{cpK;! z|3x#$A~FY&vojwxLH8)%_^vuyWlVR%8*IOT4R5JIXS3@${PH}yk)HFd8&4yH&m-++ zSpTbo;d{aG?2&}{yBr2{cS&z~4$O0mnc9>lm9>ArjIYv*jX{uIPoLkn6(`f}St-8{ z-`U#E7rR&Ef0M2nzEGd@9M2wb)Ti56#o;}@91g$fD0b9tZC1A6k5&BgHJZ2B5x(bW zincEr&)}K$fBG0q)~Dg`_NG_Ca5&n__-Z>cp7A~Vx^ia?zf8oZUMJ!u>_24Q8|QL6 ztj(hz;j6HB79V~_ZYC}={PH6ijQ2nE%FpWek53urJ>c)6?`tw+mjg zijnj}a;+o6Iy>Eg?+zlXmc4h<(Mk?KL*}-4+!yuWPWbtFbf>dTe1>n0Lfy$r*pI=t z0uDM8d%$5&EL%jsdXPTLw@2{b{bmJD7Lt;8iMASO7-s{me#(-Uwr&8+Ci?ZaZ-Q(r}zrbxo}zkXXkr2dXf7v^gqm&2d$Bh zhVKae8A$$MZ6CAQm}cTW9R+Wg_vK9Wo!wM6tmD4jGV>X|Ltx#Ut#n6!$_;jxYHvnn z(HiZ)#&Q&y?;4XY$(tzt!rbr(c_HUtb?k_Lg8p&-##Uo8L(O}WA8vqQGQVtqeysB; za9@E={f*8hQygSHwBN!9OUTaI_=)zmWXi3)DL=L3qmr*ict?dlUq>C-oo@&8sXCR) z$5g(RFOb<&yKydG5!?UZsg+-Q*We>3ceK&}9DKX-Nk_3Qr%HCleaU#s7=8)Q!E}6p zrwzZJ!~Pf5<^#xNYvNfu*%18<^FTBFyRrLKN8d{I@7-HeL{ZoMtKZe+cNml1@O9KD z?&6doTsR8k2Mf_=joR zpIq|eE6IADo{SyFA)Cyf3-EatS9oSGN792Y{|nuZ>b)3BU=Hh`;vRmlCHpb_>QTBM zosWw#&&bgpkt+@m4 z_W0JJ<3i`F)t5f5Lwqf%^LOFkYJ_$Werx7owXgMZ3UacvyZ%p>Z8312J5Qici4ZVBOxc$P4ix4tim&I{A_J? z&$-wgu0QcT$)I%JE1cq4MAYTpkY*wSV&TmR>L zk$pn0@hPmKil(*IcO=EW@@z*~XYoM=?F=@w#Q!~CZ0y=vCJ+8NysrBHLnKF_+kkAo zDgH;s*Dy7;PPG@4b%%8Y-|$hkPhFIL&z|+*T^G-V+V*4n4)C=_vxHprs=8lRV|;s| zmuKlzI6Y%$-SnR*kLj*{8+;2)yXZIO>2mbPpdVACc9Jn4zLD%&qwONLY|74^=-r%* zeb9>Q(q3NP2foi?9mFq-Ysq)5rbj2@ZA*T;rPaT0o*YI8zPJ(DL)q}EHN#G1JkE~8 z-5;lY8#HXmLVO-c?>6%00sVX6`;lL!I}d?j(K7E;;H-zfn|t-DIESuDaDK_oI(_rB zzvZycTZH;^g5zO*_Pg_Y#lg{drW>37?A)20KKRF@Z&I z=GEt0d0)0*=hpDgqW1~$;QeU2oL{HXvlaQqzr0gFd$JGk-;VzfGU2X%5B?4Qmd2z% zeY4oyT>DM%g?eHw9`;?*3(0FBf8=g*o*ey7ogT(MwiRJ**9-Q0@g2-}zZlCm*xVQ& zAH*RBFQ8MsEgvEG40Y`)*PAa}i?ab6hnXXm(mwzXou#!^64qIbVg8x!8?{e%EQ9rf zrQSbRqH-y|z0n*-<~!Ohu;;ob`_3hYzU(vD_h&;_@_cI)jis**ESJf{gYl?Qd1rKc zvm=K6K6BnEwwytBM}E7W|3;E!&n)pT{ahX#kMAl+Zw>8yyB@f^2 z=6wGvogdR>y_a63=BQ`cLi*~#ul{7g&I9ophinlalbvpAUGk~9Kj8R?Pvm{M&;MhZ z1M}f9ba&i^U(7|{o2QLWs(zG{>F=j+YZ&+D$F1m|u5F<)z6(yiu6~Oh=Ck_f<#Pf@ z9`f^YG_M=G%gBQ-4>|KCo;|gDZ&{vfg>vPH=AK-ugeh ztE>JN<1V#5ukg1(%ds$g2mefK81Y?brPCA z4+==#2vGTtl zvtmDtUtbTh$oZOY=FwLlzBTBFxF3qAA-a`(Q-^L#{&{Sn{dPY2i|pJOf2#hR#IBF= zY|F=^@K~dS^>y*OJsx?L>`F#&I>R&Phy2=Ej{l_pQ~Vz}7Qo$_-e>vDb561=+tiWx zS{Uvm>m}oNFWf!r{NMI)48yzYvg+Tkidt)%$bNM;3%S}IPg?O?amNaHui~$-U=o+5 z94vmUjS73&H4fidrbEaOSJ8NJ@+JFrFphJ&u3ZzV zCE+{f((_t6!np^$$2*>eXB4d8I*&jPh(iCw<@VZ{$lg))u0huEw3< zd^sOj|L22Y3-R|dTu)W}PDQM!rC}|a-vj?F=jHNdKR%#08pbYhoxRF0zoY#Go~GK5 za=#hhsF6{auiv6$H9HoXd&9e7Iho4m9ACOQ{^5LdvG*CRDxM$3;z~9?V-9|p4c5xB zxXgQ?kt69~IBs-pJmZ7tpW^cag$&5XrIbP}vj80W#p_EPPei{)(t92JQG8fq9f0;dw%*}BtQpk4d^>tyawa29^^K=Fy*9`4nN@Lf=5o=Vvtaw2(-{CC)}ip)&g(fl(XmecUwLf(Ja`y`orxAWndGatj2loimu-i@=e>NWGpW2Q zj)tcb`4^&jn(n*UIg8H^*H%wVU83z_ymBsXsc&6)o`tJBzFHVNIM1n6YjXZ48DZYr z8IL+$HlQQHv%q;Tfn?Aox72{9{I4u>Q#6LpBDPd#oLHa4ydVq zV^iGbH!FM=?azk^x?b?zp?@T-Z;~|&-|PBo@ZDBL zne@HK_7(U&vqrbkz0mbvHEaKV!Up)Pt&&=}Uoj@Hz&nxt3&q)mXl83~3CquP{zv;1 z_nA01-qC05YO3ul*Wx%i$Nl^82b<0j_vX+f>>H|E-^u*cwrBJ3o(4V7JK%NM$@;k z@g2Nu?YE`_&?fM7rc+#nzc-#A&)$pZ=|aW?*S9-+t`2|yv*>TmKaAcrFt?!h0Y2D_ z&hap*vEh4>@XQ_7U+GO`E@AK8WG&KX&66MI{!98B;^nvUHhfK7t5fA^YSGOw4iz6E z#$MJwykd=QT!*mFK3qDUZgwZDVcms&5&cuh_=aqG5T4D7@ce(2z7Ez9o?+wN@LHEd z!`RjY?@N5qLTvAjwwXRQ7Du2rM!C37ALWl3d_NWMWaANRQe%s);Lgx)0mI$=GLxM1 z=EGPCpG(4V#r;rWH!#^m11m*S~d z%%7{}zx|!^68D#iYvUeYFV9!l?+o#|oZPF)Y|Fl{VBQ@5G43DW(>?L;%C-w(?ppCT zW9d2r|Bo>3>bOh$TX35jvUzxxqHiTn=2=UPWsA6v`k~*O+#~3xJDo>H2jja&Uwb^G z*=BA^52xo#F?<%ET*uFcve9}edK$+6Em`}_A%3Hr&b#V`yqPEDPUWP^I1J%IKJ@z7=2+HXxq(Xk!;;yC*k zzL(+IQk&c;n`_&I-b?9v5nri&AN^rHAMpRr7-&yh)>i-iQuetyK89nNSPt{XL1n1x({Cq5AU9Q;qicqjS4^Fe1a@1t{9Hf+S7bLr_q)<8Vg+ers9 z`|$PpV)|`#?2XnD-I~;x1i}zB_At=T63NHP!v=^ldoWs>AJIvNjFx`J!2D z8lY}G4cmeE*Cl&LadwgWFU0I2fBrB|MQ<{Kt@0(}pV)XuE9iMl zoNnQIJO1at!ul;cME__uE$6#cd@xtrNHU*<6rTRB6@(s!R16?=qI);A+(an$L zBXEC1|6FJFx@^qXawfC>Pj<({rtn;!eaTm^Ipk{kit~4LybQ}W^p|w1&EfgCx?c5+ zQ^Hlshs@lQ>x=ess9D+7^z^X49F8{R)k-o}qZ?1AG0pZu{}A4j(cj1?9qE4_&Hea3 z;_n*AI>vpl{!RJdJ~7)3oqe8izHw|x<~n5mL)Uom*p{ruu7~j780Wji1I*E7Y`vv! z?f+*rf{v8WpCkJe{5|PYhr;(+p^k?bDeIBLAL0K$NE*7|6~B6z@pt)-cSQ0kGNxrC zvJNEw0a%8!N&e;clXDhc^If)${@H99$;UV2Rp;{O`TQo=-*+PNPuA8KPPquwVzqzaIMZOpfzip0BBkM0Q`8>?m zz;dW#7j0*tS)grZ#r_7};w*XBzSSA}=HNY-{!Q2*9`d?Mb$>CkhAhN(oV&l1oFCDh zhORwcF_4JAWGDO!*&Slk+O_=B*dNRGeh%XwucRyFX?OfLFJAjwgM%v7zd4(T*T{21 zw18cEk|TB#wJewKVcnmh%bU^NN&ByKKE>uMVf@WF&S%GUupF)4H$wA1++w6u)6yRJ z$HE=z%sbj=vt2C>-_V71Ojs+#_lg5Gr>te;G5m4}U*AK{#<2B4&)(`@8NVglXuF0T zZ_wQWUrNV*Y~5L(oC$TbybIQE`Qt%t%U}=wxsWY~xgLsdIsMHX?-(!sPVYy1KRwst ze?zSw>pZ;9_nz8+K%;hsZ_CnK=$S!3KW6*#t#x%a85VW^|46#)xUH!+4&Vbs0TsIi zCyI*LNj*2i4#WoADRzt9qmhzERB#9gDgp|k#Pcw)ySrw$zP7Kg@qW+vW1ns3T=Bc& zT<5IrJ8eYHa(He>|0Vt8M!VB-x?{16AJ=|?eswRs*YUq(^>=)y`sunin+I1-0to-dqFFh;~R9l$@K&V9mH4_IF`uT}p+l!ScgOTP6(?iwIJfbId> zj&1Os8;qUknrQFZJ0EWRt&Mi98lQol6wS~KztrrZt$%Bi@uH*9;#C*8^rpD$^ za&(}3WX4Un-<@pIP~HDecGtER8{VKptU^8|S3185-6MQ99v(SSe!<>$&c7pXkA7{7D=0etQ8-^Mqu(zPjDSFcsS zWmj&x%=<`q2e9RR`nKY4>yG3s^U)9T;c@)t<7%#NsqcBXgAdy|o=C<5_^$Qtig_rx zO}kjcBkkMJ;Tke)k7jY5@2qY7T@IwH6?`4^tF5W{6qDHA%h-`4$#ZOJ4CjCNhM#jZ zN$J|yx-j3x@dNmFL_fJ^Zm>QG$j zA@ROvxt?4KKAnl~TlS#O|Kit)7 zMeZ47bRhdz_H<_R6zy}^y9ZhHa&Ak8ffLO`P=yn*?`J%NrU#ef-%&sIKU;YO_ZOIo8XbZWp4f!3( z3-u%T$#dVhBOkySk7Vy=V)z=F;o5T+-c{LhzVnu1w7&k+>3JURkJ0xJ{nkLgh+)X_ zXJB5R+(BqB70`y*wtR78v{u009i8_kOeRvnbd^a0r!quqW`}}aY2Na&U#~}}!>wA-o z@6f%*&W9YEhm)6J*_@mo;DA4pgJG1eNFnW;%m>ibL*Zgyqh+j9b%d{f^|2xiACyqFPo13Hv22^s$oTtF;&0ovUnod zADd$v*k$~rtH{BDHFKGqa8Izz<;8>O+ry*wl|RvA{a?&v?@wgxsQq;L)C-0+U_3y) z_aJ8>p5_R3*N5!CpzB~&pP@udW@CwVLThit?(>1JX;@2=VUtg zDcpBWRzZKL`ua zWB4mSldR3~T6gBKr|t2aO#ffv{51Ri=J$wzHZ-yulO zN7syQ@rp*UE!5~w`H~+)j>IF-A7g(DpWMyvhxud?oo_q$+&MfiNnfWU_<#4Vp2gZ5 z&*7eDHi^uU=+DJ3chcWrCo9Z3;XB3TC3G(vJ6k*7Tc5m29%IXW&c#3Fi_9F5KMhky zGLDfidy#o1%x}OdAM)4fxfNeqzMIO9p=2)LgUM**eELD>>izOKV)Mb;*JsylbPS^x z-f+#AUEz3j@}Acp)=1a$^?T$D)OHeIZvoF_eAb{T+H5F&Cp%sZ&Bb(%=ZBxn$q%8E z!_^wZ+?buqA7O3`@im`?`}GOm6n)vYPkr?}0m;X7bY;^`=zG98*tT}V%5V651CsuS zp8c&=w$c{vCCb_SQO9%WYEI4(2#i)rmd;t4fu8@8#hq*2O59;QA}J`eV-V^SM21x1$_s@seZ=0^HU#uDg0_r zaS2(Q@aK>6YOtC#3|&h)H-vQ-3@!QRW3kwbjBC-0O>`&Qj)nCfF$na0T#9LU?il|- zp4^J*O%8Cr0!F9EWjef54(I>mbaI=Q?Snm1q^U0>DMuqKO^kY5K|d-_J=HwNPLOMK&Mt}W}U-@VM{vv0WA z;?2b-U@dO&e(r^GW>>uP*w(97{qAJhS6$eaoKfV2_cIPr&*Xc!ZjI~N{R+9A<*D;z z1k9amx#JG(>aXvLl~}{GxNt8aX~ze<^2ZA6j&tSA4769UIb0|9hj}q9=g6&v{9&w> zC=iOjiz{Z7We|8@31n5iU|6sVS!8_?)JXg>+9Nr7bI=|-nRG*v*wLDrE%?vVMAwx?ZrG?7nS4X@BlK8s(C*R z&!=SWM(=O-BjFsSZSTTwP{7uMZ0rAUEg5{9-=%FSy06)G5j|(>zd=l9v9pzaF^!(6 zRlh%;idnvl|E-h5vz4?xU+yla4R+}+d@f})&mkx7AGT4|pk8fV4@1~k@ zuTSkL`taMU{56ujC!;mjL__ia#@2(-g<8a~fw!Q)@cl)3Jvz1~yRvx<-3#q6$1g9! zHC+6R^R0~~b8l(x2+x}$>ze3yIXAfjzwm`|oZkui>HILu`HyhjYCY%OyD-;9d{K?X z=K5ZtU+vH4qZj9dy!=`AZsYuOSm96jA->P?GB&osbBn&Qj$6WaxV}xa^`!GO{u}B% z;n#=A_)z=RFpflTP6)qm7~e=&hIR)uaxb0k{9X0(JhuJL?qOtZPuEm&XpK*uFRgdN z+>i}m_Xljcg6=;0wyRa&U1ahy`qs7qUhAFo60$$%|1spXf?=GtK@G0q8Y*9f_cgLz z=r~#%o?PzcE%@*gwx?+8U|q_u)8y#2WWk?}*1kXdjnK}o^ZPh*jlW`Y!|##A>QEYN zcTX~*x0ougZ^hFeeS30_g?DJZd0kFCL4N}*2jlsGo|;&?=1tV1=m)xXhFh*h|Mh;u zbH-ZDxa^3293QM;*HU(VPuAUHv%X{N$#9P^I=-R$-SzNnpfJ{w5F!KeiY9 zdGc5dOyqGf9QOAeZ$Y*-S9pv=lAgzeoxyty2kLqwy-An z_NcA=mSZY;yYT1Z^gaOB;dB_Ead)-yTRdm;#p`T7k)D&uxrDs8^c$DyGI#0B1 zjrJtEwv|7jj`o0CoeS?_lne2%L*IRLPbH%}Z2e%l5Erl>+RES9dm5fCY{5sn(idX!6Me=; zaXa4c`2R+_K7$RG@SHh32hV!r`G}lH=vYnvNU}GSS1{&N$v#jX6*cQCeX-b~N!GEA zsCPZ)`qH?HH|Lk#;9XCQA7 zW3}9jj%#Yw|0!3`Hm#w`ANWzNOHS7}-Tn*y+Ln%9>gu_0okDlSH?Ps}xTxpb8P1!^ z-@$C#h3}fdatb+{i+f`@$SB@~r9Yi5*~ljWOEF*nzp$PthwimjKaT(Y3nTo691MAp zZwvcwbhV{pJoyj8b+dS!%}*C=Z^N#$^r>Ts8WiGRJj_Qs(bor!_@(M_xugBs^tQk= zkdJ0)n}O~g`sb>B?>g>MtNw2xx`AJfi|{Nmool-k{WY*yH>Z5?rt z>&5-(Hl;Vz@Y`XRuhF-T`7GX^kGHDx9d@1i${#!H-wD>Q^}X3peWw}j^_3gKP_oCf z`SK6*3z^}*TFUlPUgmrl{(Cvp$W%S8zCmqT^W7wQf7`P^%opK1%xo$A{psxw(}m9e zX7^_JE?ZW8cV2uz?^*D)CFfYjayWX>?@w*Q*PHXl`|NynnR^oU;XZGUcUAHqv+XSA z@+t3Bi<0~0^GkRa!S%W|`5gVi(r_$(HsLVZ6D{b82?WxA1%}>+XDQnEq~v@~6b4 zj*dz2O&9AU#9}oxC!^`2{~g%bdIRNX)6lcW|uEwp)!q@d)3c zWFy3TeKqKIG>6l3j7?7DTe9&_I>TC-&cy%!R?j<5AbTzE|Hm+faXX#wA4cDUzLT6A zm)Z4vI}>kD+m>{^i~kt$Xic{3yL1g0*k6i$5ynQ1owvZ*h^&|Oh3}K(aV|enZBlWQ`M&l1_)%vm=$#>ol3IErXY*k}7+^`q&F$p$b#0Qfd_hRc=c;=9? zh)KD`o8cTQL+tk)f+wW)^@n6WHj1Swx412g{uAZ64ct7Nm zo%w1b<8dwa8fGni$DffgksAlaTeb;gnTyhE`0)q0#su8< zljUOYkvz}()mQhC^5?~NCY>W}gUC2g4CbxGlr|y9%MuWzQ36H+6gin(;NiKTuyiM=o17thC!4m49rl zKV1JrHPXBquJxW z_yL|a{1@gH-#u2p|F}0Fz?)38$>-!*{Aye|l`Qed*&fN4xj9CEz`jgh(6@rW`tpOEDqFx{Z5K_X=K?yl=S%raTr;{ewIvO? ztj?6bm`C?_EidlFe5Lv(y*GSe9E5mxrt?yJV>`MMkM(~M{H(uR&bRU`{Z!oUusy)X?~5njM2p!o zj1Rs=ua?HYIz9kjXSTE?`#4zt!~grzy%k*i5j|;LVIT4)-CFJW49{3J-^d?@rF4{P_)=8$9ou!?wr7p%MS|ApahEH;1)1{>O~FiQ1R*cWdX{ zz%hiZcgZ(z6z-RY_y4mu#o=1}1>zTKJ0E1nsSV!qNNys(rM49|HfOK0DR;aY{R8zq z;JlGqaVdPin|nVXYXICo)~)<5tHmaHxbB*zw z#%A+jyadfGIySL?k(?d*dpC4H857%I2XkXJi<= z(XBOoeD3{-Ft0|>(BCQG447ayuM}!|Jk@p(yz)Kn5d6W_Q}p0@y%-1Oq&NBFuZMk{zk3)u4c^Dt^Dn@`DpLccMiR4)EsxMp8v<* z{|x`Tk@u(jId-a1VeM43L!aB<$zMwyi)B6%_DRNpd@8RNgE92}X8$dowqiAj{uuUK z`KE<2`&E7Q4nX!EyH}MDcawD;`nULJ6EfA8Xu3Qwj-#gR>%pHF^Y?vxavI%d!Wwj` zyQSPq?sWX1{W{C4?-jy*{c61gOJOWUPt$Wv%{^!Qv+3&3&X>(!`pnU}!zSp-po2IaRu{LXwcy&HIPE22fc?WSh z()JRXuVDSu{wjKn`)n7h@}KBpOXB~33s{pgkvtaJc{+{q$Tr;-KTa#fu*m)y# zpTKjV`P|%8$m8@!+ZeK^;&HtdwI|l7xjL}=ga+qA>mt6f%ilx=>J!aR}s{66oFgR`}cPjdA;PlQ+sMq0l z^pL@gMo>1`sEW47^lg{S)t`?KKwarGm6`nfhZOK{+Lk(RE zx-P;W?%A26^1~Xc=M~}i#3T3E^1JPav-3$bqiWTfEIx~kax9&KpWpJ^=>3T;A#ZoI zpQ&DmdvUn^X?)p~%*E_HSG>1_<2-h4Nbh5CY=>@B$EV?a!~C=lx$1dwH5<=j_YA(7 zr%ikQ6`H?c;^S~%s$@^r9bI0NN8;2?Y}~U+4&bL*jz^&x%BKCre;3&0N!h?BYDMvn zZ!Y@7HIO{AV`Dv;xU~9x*?58L+Q;GUf$m5c`p_%q;<;U_Zwk{T`1l})Gjk8BY+~IZ z-@@1~+rV=M8}AEzcy^W38^M1&`lj;jRhZktu~$HIlH#< zd+txgUJS}3ta0Q=wj4G-i4Ljxei6-Fn{iS&PAAFV&$Yvse?eYC_nBljLvPHNXtLYT z^J^|glaFCtsK)FF(@tXERR5iL-sYzl;FX_=m?s^`T*kI{wS6cr?zO!StMzZbt9@&j z+oC(grgrALsBd%l?J&9?6KgqN+zaDOevwDzn&@_OJ_x3vuuUgp6*>m+Ndw#k-es=U z)~AzA*^^@F-K?T7+;T6I!`Y6t>VImgYx@lSC*-Q>iTI@OWp9x!e=~DMxg5?l=y@OS zgWAtQQ__Vm{tLE4U~cSOjANL~f5`ifT;4%HJBrEb{WP+Tq3pkE-QW26IJ(VP`$68! zvuz9ONqjm&-w6KOgFip3xAul%d%S8vJQV%Ka$*rVX9WATJ<1oo(2x`E`Ia}6yN0|s z_QUl_=~^P(<4l&5y8$_mpjUHJ_o}jaFwT{G=a6#<{QL4D+2OYu!(1A_)v)rq>=N-= z2k#7ab|Jqb`iI4R5V~+relu7uVdE73m0vOW#Si4I%WuDrwR!M&^ctVDBZfb|!|_`DF&w=$Mmo$oBSL?TV>H7ur zCFIF1HP3qZvYqzB$mOF{9_Q!jGiK5spF!qIF;Az! z!-s{nU1Y3;|5%CmEI*z9mdep>=^ITx{N+IMCX&%z|DE`DGvDu4?->stsr|+Kc*co` z{79yt-JA_CYP*gdyRq$UHq;#NAl~8rV;Il!DBBBdGrnPG{;y+ef)Llx7T!xt_ar;q z|DVjJ4Ph~lmKhzrU^##3%KNhFZti`G#Q2ZIKl&E#+vys{w$t>@Ve4RV4sqR={2E)$ z>B(7Q+M8{1Cw>a=?P&YrKZLHk+1B2(I59|z#cHG+GJk~ooz?pdozNM3*~@BLk6QJg z9P!<7zKA}|?Gbt3it7p(-sj)ue6v40#Wz{RH}&Ygl(#R@8SdAuL*4~!qqq9jsANMM z<9HT7g!yN*{TbpD!FmM@?~&hx-Y4O_l07fO@|w5|K-bcK4!x_u9BRBZO8N+SaxOg^ z&rtoV($xWmTlw;F_HMnb(wnw%{s38Z_%F7fN>9j-Bk=kkE*#R!oQ=9g_-z`qx?PjcoyEz^{q?Sc=^zmKc>-l<0AL)jHmzb z!K!GFB!4NHL&$g*Mz$p*wCzy1#)IKRGS+u|KaAJ%pYfdC4adFmMxMsQ>FiJb5N&cV z+ueKK^rm(EX$_c~+tW~U?$;OQChPyQ5nBh-0e|rlnLo&zXX>l_ye0YNXc_8B?+KLA+SsFuJ?dS94~_kuX1({n&mdIbUf1gPbt#M&P@S z4QgBhXE{|2+tyXTC7T?HZzlb&3$h#3j*ZBD9{tgL`;u|IJsi!uR_~Lfjrb|_2V9TW z%$e+1K-W%W&eC?e+Bc`pI~pCTdsM|5#;h8XTepVygwu|2Pq2?*x!dt3bho4P6SiCb zmyOwTjJCMuo&(>hC#BpD&)IW*VQo>iVqa7HoSt)FxC^~8SaxGum>&*6JI}Tg9qLZF zrp~uDZf?-m1MOVfdye~CAKn`H$$5f4xe$wUdRl}3IAYUn+T?F$zDxg=Lt%V1)29vv z8?uAN)BG5o_f@~qb1=LMJNrHmj?4J&c|PqzzPwE2PS&8Fc3{Ir{Bb;HTit-MXtvwSdtG0DNMVo&)!=6t&QkU74-iedOxE!-E)2H7^Ie+Eqa6|XK&UWTc)n3EgqExM68nm&Fl z#WMO#`>k{}RyJ_lgpESs1h7`ZsPw$DNMR=JKklZN13<7S7(FlxxanL zx6|lYZk&Co|4G=U(9>7@7(jl1z9*c|ra#oRcN+Kro`bZ(8GM^} zCx2V*>$2k>YSAmN&R(M2?!4>;?Z{&QG%z^1FfMh4c;}>tVdpymRx0 zyic^9#lEA+c0W07&?dJcF^Nvn&Ue`+Y-^>@*h>egUEezI7seqRv-#pM81{y@Q>}V` zHn(1hFL2x&R{vpBc0uR)Wb^@_!W7AyNPH6WLAfURr#f!Nulw@R7`n|#;oGu!N9Rx5 z54K;X#%v>(TH$$4{}}Y=ufz^$@O(KIm)LXT=z9L# zK>wiqYFwwV4k(f-A+8^N$UAB=_X9C1{S!uJ)$=HyMFtGjW~i_f*E z)*9IfZ0+LsSUGt)UGSuB+0<2^m?dNN4Pq1jCwJiAg`S)AxyP7aiQc`lbc8)w#Y^y< zhweT!o#2~d>ja0Gg|%re9*O%1`DS%~FN6(0)7LE#;N`@u>Sm8<4XD`c23UbwSN8UgZ-s@pyXHW#0qN$LT-T@z&&@ zgU8xD{N`SM2K)Xi9>hM^o*LHw#g*od&-i)){sYiW=I5r~ z6EYrCwuJxxjb71atb}W`@V{Fbxy5Vr2U#buQGBEC`EQ=%^VPbiV23SyD-^Y`y<$u8 zKS$3l@Xdu~TXmtI{lKZtFG z7>t((7uz>e7y6oey1;N7yertUjdA<~y4AF)>*2X&aV(wpvf&Dtz2`7vvU{$#ehRt(o( z;k~J{J^RA-^LK1J3eElSokj-PMSolFxQ%!IjmIM7a9IC;$zRvP8e%>hy=$BJ2*)?$ z@56Tkf*!nw8-L%|j3aT9W4UpX58*TRCQs_yVVNZ|-Vv~B&)>xNow@(z`jz)OZ*Hi* zQ3&7Z6ipqQ)AG03eg@jG{_IH30@&ZTKhFB55e)99hqZaUJ-@X;dptdpVPJoBzw@r- zc(#}tOIfH_Z}QFF>{3gjNA(+fX%n`tsc$4a4~pR)#^nh2A}_>)Ki(bfndF^}ehvOV zSKjQ@u=01njxmPrqT>#<_4VGjf^{7J>CRV&%T~N@eYcGtea!aZvrov{Uf(Z#HJ;3H59oi6J8Hj@%n6QH*gpdE;pQ&$YI!Ta^o4~@ z$yDufzWRP;J9zjl9bT*6k12j8|9JUX@N+l+pFn4_1kGA_)ynb-7;`o!WFD<1m0r4{W$r&h4#}L+{Y$gU-Y0D zUB{2j<;XLby#+!^k^GCHFeA}M9lx?xGTCTpdy2ebd zCnMz54cfQD--AslogdNvru^##;|^@v7f&aByTkN1Ki{Di4j@x4iMo0IHJ^++@^9gn zHh6!pcmISftH8C5JvqrObo?lm=dtf>wzSpOjSO`ymV4Rv;>4~b#CA-5wI)nnpuZd0 zH}cPU*5GTho$u1mY=_XbT>IwaZUDQSF3h2E1O8dAeS$35bJsrk-f-{CXS>v*n7xI1 z5!Q6!nmm?osktEemR;+s7iw|Qh+KB2uw>uCdLUn{kEW;XFq<)w9K-Kj$dSwOcrq^~ zt0Ntk(JzMKo<-7>UgIM?y9(ES$$XfDJzMkVv1p&hdxkc0vOOET^P(+>+5MdSU$zX; zZvM}f@}b&TT_3$;E;*e3U*-37>!J~C-vI7C&|XbOxK=qDhCdwd1LHzIzKm__()$Hp zY>rNSOUzU0#&qFJ)!<@3?dHIEBeL$ne?BZ5u&aR$OB%$-xbK4I1-W&+^HK2dWy0R@ z+$tKykJmX~s;$Oni*3E>cucMTgp7K8=A^VQyi4tmVgp~MBW!XfSG&r!$iGJ&U7>$N z_BW%$xXG_1w;f;I#)qqEbedXm?bhGGPg&l{;rD<@!s%?9E9(4Sy7|O$NkFl6Qr`$^ZM1QR@ zPDVTh?n!L<&V0}ujhv6xAp1Fd>`BahaYwe83!=el*hv5B81(Ii?jdz&RWy&X-#Z2Q zX7=xBZ_8$ND%_h0*E{KX+WPSQ$?!faKVNjbIUJifU(#jeyH{jpt?56AKG%_HKYmuX z!@Zd7a{jtU`zG>6ZA!b-FK^3p$bE`lF)Cp%Z`XFBw#S_xzy`Q8b5nY`9C(p$C)r=b zhiXr_Uy$`6_rh8fog(IU(sw0YgUGmnPddYHEm(SAEsX1Mk1skLT?_4Nl6yR#KA^Vm z!v44A(P(nox=A2!Mq5r z(PWMwTl`D+f3i#Xa@`ue#*W;NPZN*Vto7tYbOKv$)!&kBo5HXj|8%6k89C<|Q<>wD zFiop-ZHzvQn-ZRT;opV)uVD%Nv-#y$>(w2!-7hxV(s7OB^LO#UeNdcg9MzE91ijp&yv zu{CenNgi*3??3#$C7plPsa;}!9b4!P&uincj*Y2!6knZ7&RFs%lCdTIE%kdBHkxC< z0H5_oxs0qia9u#})96Be^tY)MaXb89@(0<8`v}F=a_T8IKTucw$4XcaCvU+0Et~7D zf1a+ZetR|Dip=?9{vYd-k@_wmM@>qnz$lNRRmp5Fj#p`Ok0w03%IGf^)vEspNT=}S zIQW&*(sFn@#BD!{{zJ-(l}ac6DLnA@clN7@p6T6!TPlDwo=f`=p-V_YteP^u**|Z$H60WgOo2awRAC4g53Mb06IA=^rK@=yPKy zd~YAVkuC1mZ!S#lg>gM{jmLcG617on=?G7bW;)E)pkY0jej4Oy-&X$-_F`Jx2+N^# zeq8T=zu2#qCJ)fjfz7vy!E0<XOPyhRH z?27*9AXkj|BHW)zx1pmS3{ABkK<@)+V)EbSuVHj7sIkXACv$$<36{03|HVD56;e48 zKVolQ%~N)T+zsEljL+^llMQGL)zom}9 zb$lNmTtd!T^6**Mj=>k|LN|S_;4*geo9U9D;dhgYd+0Y$hIe+NuwH$gt{VSni-IpM zwYmQv-ouR7BX=MEI0)Y&c80mnn2ukzO_JA>?D;L-o^L<2?aof?g>0`nzllfY>GT{( z?#Xyhch2TyS)FInV2iE85Y^HAsJ9}$iqvqZOxy>8A2S(qT>Zbc) z^mT9WTlr+dQS3y%Jj`E2JBW<$ogd7OGaI}=OrD$$&+$vX$yaUg4xIbDPr!Pm7+qzI zt8v*nbiGC9?r1-ePyZFu6~^Ci`xD6dh<s*B)Xq zP~R)s541f4*US8Ad?ZKI+(TlwF%-vgVmrBUEPY+%btAOPVHqID`mv!6nmhEHbBhny zv!6Y?Bfc)hHtmM?2)?<_u{u|V9Pg?&Z0MK`Wk2%vVp9jWpCI#G@=im0Cfm=cc_*q? zeg7K|AoCh@k@G{yoK5$&j$fp6B%N!soo(UyV|ZU7?ozAn*Ozm(u{Y)4=yEnZg_fN7 z8@^tR&)_V!bG(AS?a>ayM`qk?se4t{Q@vrD4V$%1xt;#I=pLeP7yAv=2JtWUCVL25 z)*#n;{GJ%j(jR`e>21E>luZ7KkK>bFVKJVv20ABe|Crw&(f&|_-%#Rv^I!28TMlZd z)_d`lc*+?4Vl-%;PFTJ`;XxDEb9sIB zTRGXs_$P_a3Vioj$Gwkcy8an>zU1?}(79#`_k+^DXr6=Xa6ZtNjK#YSUwp@wFW|YC zUGwOX@5$L@{zlf1=vQbTC4cXPk_UD5Ie4@VPGqnxW_BQ*iA)=bG zC?}&+jE%))&9a8r6qbwW>Wr>Ap02g(zGisWBRPZqTN(16o<$Ukj%HOA5M$RQL&G)S1Vg6Gyf?t!RbT)Oo6P}j*eh*pyv#DFzQ2p!h>9**v zmk(+}c&9Ml%YNH>_w~{259|DT^C(|-mKVP9PCvx2M&3NzDdrpj`f_s+>;D*Ww_U#smbvMi>v!h)iuI9^iPI$XL3iP zJKg?zSmbWfom}HMeCHW&EyvcgZdpUD8tDslTYTeC7tVv_1U@(uO*K)rL zb*}TxDeKWA?(tJ%Jp_F{9bx@H3(w8+%-kD^Tlxsu_t9~hnD*!Uf7#^PpxjzpM{8v{}G<$^v=MSs0G&ZnY>NJD+&48 zNv#-z|9*Z7W9KL1^$#-lBxf=IsI}!J*!sc$kofaM`9J3;qkoM3JCX4`eCC358on)j z|Er!CYJ0w#YXQEGuVTj-+k@zLVDF`9$H8@4*J`bq--!PhzWyH18gii%JkPbg}af(KaW0xqdY`YK!M}+jw$T zZSemjYF111=E!_qcASNO1s>NR(L!y;RMwa+pBlHj@!4Rxbrhf6#Q)F1coW`#$bSgd z$>i=z@8i}y=klXig>_d6dv=?gnabbcKJaMUtLVZUuo_>C=98zyPTnVXYu~xf_gju< z)>Yp@r@!DcE~C}u;o4$ltVef?;W6ai1k;5uibwe}o35qrAG|-4^QSSgrSbeO-wj&k zoiO(GGlyKqu6tmg?VNAp7O>rH6QA%+Xwr{;;uII~8)rpteSeUdqHl+9ptdkS8l!O= ze&N@+1wX`aEKoDvsWW!?_Eb5-?&4qit34FN-_m-X!I4)2C^ zG?Ked;`5xKJkgx^ruug^9Y2$KJN$FlI}ra?@S1;8Vc&R;EguIu zd6UZLDCEd4;v01S1KWvgGB%>d_B)c_pG|XZo6*r3j)(d582se0Gi|<8k@)C_lRHi<~EH%Fbt#Jgyw13%}v4o;n{L| zH2P`su}Opbwsd!+V>_{US{|H*rUv6uzL5X%YWAD6qp>md1fTaKGsOOQ_|TO{!!(3E zYrv4d(E$9nSex-nxF4GTu5FpVx7i}rsoYPW#4qQ=Iw5IktasOTuo&8k3GgIctLvX) zo_6zbemPs0kkeIs_u!*@(27%b8TrS-)}Cx~h0pjbhv*)wBKZ|BT_ed#tXclhGeZx7;pPuTvY@yAC-|;9rBzQ)`}c+xR5BCy~g@ zayq_^$YV>m-(8H>z5*|Qm;1wai2e}Qiya@U{bcPAIz}7kY`M4IJ8atJO!*KU`>+9) z>>uygUgmtm`sy3e@@7~9c0P>z!azsfV84;AHU4wi{Qw))^W=3pGwr*RGoGK`)pkik zwSFv*)UN(Uu8*Vb;H5tshUQ)}MW>lv}BKSnkW8 zY)|*aYo81wYYUjCi0wQyViQfIV-bC~llde5NWM-XcL*NiJvssZ1M*}GK1LVTRmH*l zvk=a!Gmsprf!*3IXciR%a&n_ivGdNS@ zO1?%Xc~0{TnX8a5*U}sHd*3NMqY2jx>7#r&n%osj{k8_ZpOLq!_QtT7%c8BV8D4fg zn@@+5e^UL*?=!!&KiOQ^8?Fv)zli+BQy;N`)cgmm+UUV5Bk5dZBp+Zk^N9{okd<#vHVwmbNIqu)B6V>8p>b;`_%MEa;?HNCYTAA}r^^VrgA&+a-f4-&gM$O25)^BpHW6d!iO}#bk zdVFczh5Jsq`K>sY{6X;8M-9(kE_lQ??s-B|YKZLZrPgezQGcw2sey zg}oKs2bkL*mOE?1xW4|wVKe5UckO?L;Ty5|m!G@~SE^~@8+<934_n_lUa@M2Ax}kbcbq)BVWiYG7*Td9V zf3Vdv|2T5K344AvFFhoOjN#r%5Y`aMwO~${F?u5=)^v{L04d0!u?O*+360T7S_sOCI)xQ6*@ffu0 z*F0BuK8l>X$l$l^ewfTn@osRq{*Di=H^HQSOW9<*3 z=M8kbiNT)sViwk`d1EsFpnn_Vb{;>h%I>-LaxGl<7B`Uh2*2=gHURCp@Vp5Bk$n3x zIZukqReZcFI_tCYA#%)5)w9#ij;9+_Z@{uD8^(&e8jv4IZb$aCrGxCEz4PVnmmCF? z91PFHLcJ=ioeOK6d=>i}Uz)Tn^vW#A;GHhP+0Oy&I9jQk+Te zcku4v`a!ORXXdeZ#}m-bUZiHiH;uoBIM2|@)AV$F7m`1=uKEx0R85Tc<;Mo?8`rIT zUOtr{VtV9VZlCplZyP>QH^X;g5nJ+&_~i>a+rYgc9eiE=2Edgy_q_3&f0G^A{y4r~ z@czU881EGRYa%~7)EUq22OYM1FRTJ_J@z0~rc!~#;`4mjsYHMQqRokod$+PTem|-b8z$>3q=P7?AGx_FI zGQa1CIe7lUGX(8}c=|Z+LC*j-Uya`SJmUXwf3qA%PE&oyvJqXrgLv{y^-TMEHHOXM zzGt{T3)fTO{i>v${aWPCwrxhv|HQn%+}RucJ7HVVsd|?wz67rK(AL9pJegu2-oFd; zZ9Ws9dXQfNZ>T$Je6l`2c0(6pdN!=h;ps`n8vJoBe4g9Hfp@rC*$(Da^?$l@p3l3Q z|1Ks|f9kgcO8!kYrmvH>;bQR_`tzMPA+G@*>;ITd;krIOTAx}{Jk8c=#_gWsy9qh7 z8(dewH;Mm@jpBLpWH(ONPqbtOwfT<0?jb(FNeQ$c7!gWdUq`p(&T2AH|{`toFV0`1q z?uC8>Hmt$!#$@tQAy&l{xQwCX1va*W{~fY#!8@9al3(sbH;NCJunT|iZRUP;ei)v~ z=+{Dfm$n`8H^}8BHNQ(tt~}!i^?7OaThQr8av7#@FEY9W#{KYZpaxGQdpw?CX9ve( z8eeK&dV_EFA?Idtp7QRS*ra@((v`@W;vM`4*-u2j9o{G4k&Dq#GB)PhRq=LFi~FP7 z40iRsd{5i!YzX7X^-c7Z^FjLBk~=}$qm*2d1eE;RR1NO>{4gV zM9-ySz9l*Qm#Po(mgeCDjKAOA=bFsN{GK=A|1gHO(SI!2ZD0&*rvtQ;kv-&^!dfRi zlf6&L0c+OsIeOoMU;IN|O+9Z3v5tG<@9jAFaD)2ldy$Nd$zNoz(@_0iMtD}7e64Ly zF&oFW0rnx+jpeAPb(<~J!|**rVhlwmv2T6)%S1Noix)0+%7#y2x&_%CUw_x(JbUy<{OTKT40JdxgG^(|BD zhLgLfR{cIuR;Qi6!uwz85`25{2YKN+Yt#qcJ;>h2vHoJL;~V+#SiTvIrv~pZK5NN_ zHgs>vKjy|z)3di>H3rf*;araPBYXCw&Cq&hE}KEdyJ~n}eK%_FW_tjACpxaA_j&yC zwcMKA56SFJ|Ccr27?Hh&Sajf%NBLo-{sM+$8mjpy%wv)3!t5bDYD3({-Ze>nG`)P4 z?an9G;vv`4F~;}n?7aZZe7deC=M&pfx;D3;tgTtibB3BC)fd5j(zCcg8hfU`zo2%(&0IOWNlXPV?Nqg+yw6+{@g|W zY-B&UzWVkmJev(zi>J|lCZ_MyRlfsS9Kk;=yhFWE9KYm`+1gJ=^Sk5S_#$)sMg#oh zzpL*K@}DB>81_$MyRn{`TeIoxGp_RO=&aX28qRa%RmWfhEDN;_BC8XhzH5A+&ejy) z+hl&@e--Yfe>55De{rbTS$BmR8t%^}v*n%nBN_?!U*h!=48~`EzU>(@XNx(zs`c!- z{Ia$C|BdAF-sF4#CwdHTKmG8=x50cFo7>{s6V|i%*4$dc7Waquzhue(WH)j=A5Dy_ z1aCIo^^Uc3IUI&5?D-o8zR!QQZra1~Yv!8g$#n0bFh25i?5C3{PT^bpbUiW;)Zd1F z{>)~=BuAsS*#CH)--WWtk>ZI(zUhGBHu}XRJa5cCpywbmJ~VHOLpDr{#Pwc$x;u#HE zVOgW1b4WOn?-*I zdX2kcYsXXJpH^G>POSV&Vab0cZwdOTY`9#%{LdDd>n4-;C%MK(Sht0_GnOMc9O1rq zvgXq2x2%)>@EGfvHDc75U%KMk70s#a;j`!oHYDVC&^|{yT;W~o+!d}XdnpXSRa>#P6eN!7df zI&z2MJJj)d^jo$a4Mb6Cx|vmZ_8 z$1uQ`*W-Hu)*<@jOSzX#3_^V?&Lq3B_}ABqg*kr$o^|mbAddW4?qZBtH{@%ht>=#( zbe(Gpc(Vu{pnZn*65n?_c&!A#Vrx-)3j1C1>GhXYwH)c4qs)wYTj+Ii2*!nZ5lOUqWldm+p_+Miad?u&-sE6zjh4ZdkAKerc`LHe)7R|oS}+O0Lhx~F(R zOy|p^GaJl%hGPY~ zFbF8Dw`$qh;Oue(7iabY8 zpQUd;-8q}it6Ta0GhbzDYXL{`etq?QVY;>B_sm)Uvf&wYYFx?}MJF<^uT|H7$x-rW z9-D{LeV+cw_T)x?%a1+a_?aDh!4xpxMu+iTs(abad|{qUuGjWFe~smbuJXiMqiic) zL-4^8wR7J0Z*`hv_b^12Jzn<~2l>R;78VM)8**JRSLzbGa)QfQ4QJ4eEZ^%7MOxwWnFn{n@ z`DlIhd#d4nW%%AO+m)Vsw6)})lpk|6?dZG*-l=@&9oTR`JKI?PjIm!RANeM|ly2`K zEq6kDfy?>JdU$>Z`YdO{sm10uC8e9u}eB{%42 ze>nlJFph5I^C#u&nPO?~NZ#U)T<1Ula6K;+G@E*)SU)`3>kh z*VG@lLp)j=um2Ut`^0Um`DPuooweJ?@-p1>O2306`0ik~<@K%!tgX%`|5*04AuC5` z9OgaYKM}q9ns3Uc4Pd<#->>#__)m_OSJ|F~E4*VrUQO&m&k}sv^V8MzkYgX9P2pJ` zT^}(y*!g_e+#`x%jy$hON_q6Be7~aRzJ~jmTcMk7TZJuS_~$%&?l$kf2iHKdR@AwE zQ|Bg;|E2!Vtd|~ubz^j&*ZaLIcK6`7joEW6pU9nPALByqB)e+EleOdHg56u`AEV#B zzVNPfaS;ryY^&6&dvE1by6z_LgZk<>H}b9d&9zUmvGF)s{=gm2ta(mGH@-w|$bUl9 zNj!UyyP2HmP0un|3-nL3>)yKR8ZwvHvHXqu+rpT*ge}YPqpSY&Xr}$o{CzJuYG1rR zosY7+9~*mWZ-j5IoUg|>-quumdwTzM%(vN1Y+H}s-^Ak)_U}zc8|@MMk7D05bgipK zh-VgXpI`ItK@lZCI3~aP{`Viene#DxFbobkkz0Ed2a$2D@jZ=gCp1*w*QBeeIUCC- zb7x+_sqW=d^n2Gc{?>jrdq=}Pj*JF z3v2ptt)6U)?jSh+fyca3e6Bv-0_WIAyc;auh4LN9gO%mp^Hr%EtBR`H;;+;Tur%TS@E@hhh!K1-e^o&#u&$w`4QD`7C^UkaId( zc^rS@cusxw-fk59J_WA1?13*A*KnUPy%mOaVE;s%+}BOQTJ3dy>BDxIa_g(2mppCK z5Ji*NxrKW=jmhds*0*X}6Ep|Yf0+G!_H+1r4BW0QQ+9=SVzTM5su8KObGgF68utrJj!IFx+Pz9)#DmV090#KfT?_mUrQOjkp88FZl9s{jPPxHBxx@qTIc~ zw-aQ=#=tp_*M#wIGQ-?yt(4saoA_n#q1}_s>Q5rCvPrNm=~DgoS#@9CJ%t$V!aG9g zzx?a^IBw7GSL>^1D&@nESZmX>F}-SLzJ17#T6Nz!xgFm%u(r^rKbx%oI>#@F!ERlx zF_wAGEr-_?o0GJsFob8udpK?k>)p;VoBktV66dfsEl+bi&zk-#?dn_d8oG83ez&?-{nt^pAw6;qC`Uyy$nzcpnezm$!& z%inA-IuAB~PPZRPr#U5hSbxCizF76FWFs=S)z62eF&&L?{0kp;Ay@4#PIm5ocDmSn zDM!LLU&&7VdJP&fv!?3FHDbNLm<}d)C?5Jly@)o2^(l4gb?4UA;rH&MVPX;T{&&84 zn@+w6V=R+%vFpJ2P;nW$xcU|&;_ql9cHboDPSC!?0#Kb8$OZ4+!IOm}yx{#zlkHp(W_ zH<6t4`RFSAM|;oaTIcF}vIY!&VYtk80e`r6R!)#l$7mnHAE9O}wOvQQe9eB=enhQ$ zE}VrmAzQ-S8^4R^X*36mse9G&?)=@fE=ogft%qqEOpR<8vFk5=-Ri1u2okxOU0{;O;M{tpDRf#Wdt?9X1SXw>%isfIr_mb*c`ZUa) z9m$ll=~ZHK8^6B=bNF4aP!o1iKLW{Tl0Q3eEc4ND^I=G9e!6m-&6Y}{lC)r1AU*XEenjT&TyW{*Efmp6Zn?!iFq!( z|Cb#G#|XY@O`qBs55#Y5W#-fHPR##Vy6d>FsrP^2BNR{&6)ZrU2#Ot8AorUYD7Jzf zIJ>)hNSCC5f;bRcP{2Usz8MA*cDM6mw;w;o*Zw`v`2DfRwzDf7%1~o=mw{&Jf!T**@9vtNM9~f1Et*LZ6(C^wH>S zxQvlu{Ve2iG6ME4Xbxe=0j_7ZfqY?IDB8)jpV2+-T7-Xd@~#tOK1#dU8{eb0aNq3O zB)+=@<}rMC8X2K~?5N-GrR`w+efeNN*Tg0{(|!ir!^ksF2 zQTzII+=I7`xNl3IvAJY_>7Jw5ie9;yeaeQeVzm;sjm)v&N=|d_b=dcJeKj_%Yg^3z zL3EQ>wsGxovad(?AGM>cI>3Js4CyTL=IB54#e5vi)+WEIdC|sf%kjyDFjq@LU+U}n zO)&CbJ{-nS1D_PrLA44~T2I$P^5kXovuktN_5uuh(|Insnf2B0HIj{Cb-$lJhvrVl zp5;X!yY?%ebfEKMK9k4s^K?I}E~<0o77f+2+$`8hZn+hjsj!@3yO)w6@C|FQ<|Jog$0xad72eFTx|5u&Pd0PjQvREdgx|?z6ZL^7$nA!wWW$Md z#;%%q1|0QbEccSR?D-S@lVm@Q{zAT4ouBwRytkMhiwP93Cq8G`#hR=uf z^lVhVjF->W)xv#on6F0aadEpCY!BOIqPejU#rvZze9gY^|A1tQr411N+4eDueD?C?-y)h&C9-ms5 zc?TiPMZ&#)bO3twG*$0H?JMMK=$Fx-`jY;fw-bl;$ht?apiPF#*W1~|?}@ob{Hxf_ za-FZjGn#ZLf1S*x+3-K)cwh3)kQa;CFfG^&=PX#xr1wsFvpL#OSLUGqQXBs#_rdWS z+58zlgue%D=fFJ#u8C-t!Y+>saW6+Yzn$KbSJ_@YH&jpJKWuN1aftE3`cop8ld1gF zoV*(OJF%Ib>>9G(<*)PHV{Hn98k+TFOR#HjgBpRR1NiyYjz5O`XH)a1qzB;9$`nFNopB~Tca(R>d;F@u&xQo3_==l=w=JNWP`sx{A5$-_- z1v;|d7muB5exv5RG1_bKUFckGO5{^4F6kIHe#D>NXR5wy5Z?RHACfa*Y>8ffD0X1u zE9}3VEn}S@zRIV`K=#PH@H{BKnEdwWme-8Kjyu$%Sl_SSwFwv&q3Lf6{v7Jsn`GSL zeE&d8?$>0EXsG@JJqvo(wPYHb-r^_z$i=<*)!4K-AAhR8og=n)y1qzzd)M*i*QghV z!Tue2$7)-P%n4{Vb?&%$mOqYXuQ4;|jQi6u)$z&bHh12mm-}|tM`-_&kJu7Fzz-XG z|MC;O#@>YdatQ2dZE=9(#qg+u)!NGmXsr$8Vqd<(^4*_3lW1ne$)Bnoh4dU2-y;H{yq5$lT0k z{lB<_&Nb!K3VbcuRiJsm-nd(SPmUNAtD_a;_%-VWoAS?tjev-N@l_*ShC?*tWc zpcn&-x|DR$rUs<@H>~;=)VFYZhdMXThWU8JZ|Np=-VdqyzNh0~>HC+A7w~pQ^QmKY z6ql3#kZ0waJBA^=Q=h1H*|Y2(AkXlcThM!nw$sV|1#hg)cprXWkWM69?JL5ZXAAmI(>~7m zsoEcR9PH{vzOgAa=9hiQy^4>;t$2j4Bj7)ZoU?4k!Q?glN$!U{&U?5o+ui;YIknVw z6F*LKZM^#QJ70$PcbjValb_CG=SVmwh-(}8+SJVL$lH$&^&$ADH~`PXVmjCOelvU< z*sg-(AUY0L8*a2MWoKLMBO9vs$)cykW2STOuV#H})%tz@yM0I3)tva0x>dg|(~rYA z5}q-}f5J3Hd?u)mTf;+d@i~7#hsOApUBxFCIKCO4LfzO1zHR8ZjsF@PPgh_6LjNRQ z>s{6Se|s1&Qj=TZd$FhA){z5$u`L={a=xklH@k`BCVa6TIT0UxK$o14#4gjGHe$pdw}w2@#=<;?d3qeF#id^t%){e>)!O;AU03J)d^i|y1!PF*KjTu z%JFcGB!3j%oXkCJ`cY;qEW7iGxp!ELOnQ=k7+kxdGk=VZ!vBu^_%9!A2gjl8I8D2I z{$i!EeIor^^NV?88gQ(G&zvrwYEJMbO#jf)K_7aMouwEJah*L`*V?Le_kGFg_qZ65 z3xBkkJo+16^*?(W=0Wy1JKxo{C!NnU?z1O)8=fX~hPA5jPUpttHI}!F`CtHBhO2i2HqpF_Wur=JXl^^p@ zJ(|_{&$wIaALXNDkCQ*|ll=_3C-qx>Gk%M_LGa#AkF~@Z|CR|NM4ejOl%s)dd zFTaxW2ji`WV;eG!2kGb92J&Aox_+a}*in9o_6B)&p6wp;KXZL9oNsEcGe^2qOje*B zUh~a(cBnIV3iTF8Ds8^G;aK z=QH`3v@!o@ThUwl`riLpPn+75-3WvJAHI_mcR}BsZ0p+DfQIT_gJPieX6AceiO)3t z+X~Nm_0{u%v>!V@#M5DwuaecAzr>!8@IU3a3t6+sVQ=*w)#=)dQ)#H-Z@}D0ULV&` zJ>w}~gW(&oATt@n*YdX9js2Z$i?rX3$Cl354nw(vINwU1c$AaHO}|K6I)Bst2Ak(N zd8j@2x_+@f^`6|_Om05J&yDGtYu^{X>)5pp+WW*Hf#o9pZE1U=q592x#J6P+SRY_x zBiA=4TkS}K4SUJKPz&d}b_&0cor+`ByrJ@4m?Kr|^y*$%cMtcS@rLNuz>}E=pDG@5 zHnv_A-Nde2$$s0WKSY0+hsmR22K=|4Q5 zZZN)(C;EM;QR#}F)q6kLM18VUH-~F`jh`QN{Uo}K*-=Nw-e)Q&*Q|xZIvdW;bkAbv z1=?Ga`?Ff9hJ^1=C7brNjwMc4z`uoS4{Ph}c$}>@-J1ko^NToyJ{;yhky@8N58H1I z=36jtNtbzYJ_YvQ>#FCzsXCS)Pu5B_-Q?S@cpif1GkkxN*$B@mX!$FrCtcI^ePDW? zj!*F2&&PMH!c>lQjg9fzXw|m3FFNrKYpKZ;agEjEdtFZ)cMwZ`E%OW{ndtlr{?TXC zKh@nu<~8bK(hQ~+aKBS0$6`0DUlf8&9BSWmqPso3ZOtFlclHoon5{K}7CA<$BPvs-)`e7cPEv09k zJXr49Z1OgTajNIi`h0p1S^8f34gPnvA7u;o2+u8y($?Z4|I5|X-?QQQ3HD|5&K1`! zdae5ZOg>F-kUQ7X{XU=E&;M##a#F4O58Z_SOEQX(W5+sv4gVOjK6d^c+1>T2KKTED zaiHtyl4r@#2U2xA#I0CbXH5m|er$ocDA3K&9&+k*w*O1t(PGxj^@r#UcIt1j+8ynN zc4u^ATIe^?b#fqed<+a~OEjv%xQ=cF{rX5)D<}uRXlyN}kh?mZukrb3>X5lb^r-Xs z^mO36lN$Us2JP|qH_=bk#Y7&Z@*+IH2=l^_SMibJ(HEY7={OyRJJ@qD`Oeesa_Me5 zFV?;$dC%LgVD~qC->Fu8izq*ztWoHQe_P5+#zt3 za_Lz8>Oiy)%JKcl5917M>2Hfx{lAocnaSC_u{_)u&BgrN#m1JhHD7KPaPsj?erQkw zRkgNYQKJajvz_ALjp=SVfn^um6po(?1kU5xJBbf=z}H>Q zx5M`rIkU74*7g>`7*Y)O_}NZzG$A$i~PMHsWsf%il0tzdY|`KIXRuf#E)-GOYcjH?hzG!1p0~38RI+^e1v*&ZZ)tkRJfmhx~AGscCG92aQe4dHbFKl{_t}S5s(b)Tp+`fanq2zWa ze@*px968(d(ysP}XH&(scvand2A4%5&aiHF47KtF^}~$Qmq9*CcmAP2SUcHJ!oVXW{8x=lOxWm}hS;8SeAb zj(p!A_Aa&u$==X4{XAcjpI=fxo~LIb45JoR??A^lv%d%bC44l;wXyQ&d-UOW+8%0} zd|m*V1J3eFz;&5_wD)gO+Gu?J;zG&BKBVl)64wf zxo%?a8ZUyaHC{Es3$(^>SUi6&K16p_eT6yP=NB)s|2FrmSMb5M^7b_Traa^^JKi{iBm&vWE#3-b|T zr#%}0UrWBZ5Y~Bo@DdsLk|*H%k<8X~ZsuNpG9Bt=H9qvidnew{*t8ekIcR3$y%$zB zAj}=IwX8Xr=cZk1)}#@*V}%p ztA3lGEvIjKgL?^>-ddFx$$WA*5G&`&*Z8l5`D=KluM)IZg`t)Qe(Yve1TSzvTk)X-C#oWN(0eDSWfxHup?M zkuyX7L~MOYp6*Ege{9~dD*u72C)|6ZABL`vZ69*flIr>TxA^*y_ZXeOvR@61@x@m; zA0!@fw)laL8{k$qvPW%iqnUz_&S)yUW5~N2PBNoL^smqVAMnY1{yvePTnq0!X6vD! z<@%+zgV}L3nSZgfo&K>AdB%Xm+E$k1U#b1Bnl&kYza4Kxj`jaE?w5mclfPC{DZa~_;tP}he~5fm@mdRI)8~DjQi=vaLjgoFh3g8%KOo;XKy{I{9WwT z{d^!@d>4s(sB7^f_S>*c{fLbdvH5Y-RAJ#Ov;wzA+pt?Y`kmoIMHvCpUD@;v>vvc@20Zl6#GYE?(h29 zaBOdzgYHzkC%g6teQ)s{9pU|-WC1+q)VybaM?O{WKi@+aKa{u0gD1uOeLO$#zj)=H z(B2ALd-gVA+h%IbGPKvwv9|Uuzr=Z{H%r;^A2N?(>&5h)M^DI+cJghQYa^XsfOgMSeKqN(?TC86Qzd^QzR-6! z;@hLi9wqi_M5>mD`;>ABn@@n{RkA)No4l+Q-G9o})#1v>c#@rald-Suac%q+?jgee zz{mPl*0xrC|0mq@rZ2#*jwbENoMJzpj)q0mH$w7T8diN5<$nIVP+XhT`|UM+@}7J|HtNy#SV0`E$>41Z|r>^ z@3-)@5W0sOs_)~3xWuqW z)@G~ca!<2ge@~yJ{~aU{E~IZ~GGjI#1oJuC29vdb{6%oh zA!kNI^`G+DMfA(<XuX8L^8M;>eo9J$l+np5ZR zpqU1@zMJ$W;}FLS9P?Asi;QE<8$VxEy%QN9?%EpgvM*ZW3F8zwP4TQt$N$LoUP|0w zn;fj}b;#4)xVIkz7Y-OV{3p$5;@4gOZyA#Og#jhOSq|=yZO(xmJzc;rT+| zAJ3Nbp2*f|Y_R?xekYgGS3FDKXgKE3u@0Kt`AB&-Q(S}p*2HUk$WD?Q!`b>Vp7rT` z%XRr1pG=o|cCi(IhW8k@)tl@Bf$e!G_PGWFL&HKBqO=H`C`JpdajcV?( zn4HLV^;kl{i{*|tY^d$nut9%KI;lCM9X|oHF(|zerhoa&J#BFttPy&3FE&4jT8C?Re{p^$-uW(z5oJ}lOU`$;<9R=gr-Zls@IGC1>* zwy<6^nXm3A-&{9)294TMnA69-;gnAWeARf5wkT{bp*tRy`}NP;$?c0yAJ0#-EmlYG zg6&K^yTSGo>@CFbd%D6s%bw_a;@`@~Z~3Lpjf2^n`qw4!{zUH=bc$v8mT+E@=^e`y zf831BYuGub=01-d#)rJGacrFSLF7!pFK?r%Y_aYTp2g)y)m7i^%;i?n3GZIypGsHg z8?EK>Vf+ba-q`*^^`Q;FeFa-1KE6(#-0S#bya$pw!0{J!c?TdJgf3S*?}Q=r`v$TP zQA;*v?-1X$SqGjE(R8QF93YIT*=>$*b)Bu@9geWR8+4Sv+uWyxHIvY1i_`JFRp-0P zYDhoWzJ_Bf*VKeC=L&1!>36ONTbF8I7tQvzt@Yvm)80{gJLjQ)^@PhDA??8iF)J6L z|Htt+{J@61vtxargg-gXxje6)EsfDOPJiY9@N6Q#ySD0kpK4*2k=cX~^zmddpIilJ z9bGGJTflb(*`r}*OQeq$_rY^NIcxK=97$X7WsG+#^6!FI4i#^pTZq2D_8u^v=zM#0 z^Xko4>b*;83-xfKJ~xg34o9;&j4#9cGQNV&R&cGGRR5tE*0v+h1H+n3vR5riHXvuD zw#I1HfcQAa>QS;UyY_CVzA;)o+j~`e;B8B;Tu9f|e-46WBV#~cZO4k$(KhxZePBVC z+=rf?EXWEuGO^Axe7sNbL72ZDi7)v70JJZ=K1tqPfagfZU!hZrizawdxbD`TqVJ>+ zeCc|P+$-T*X3o-;jE!p5Z#%<#A7Q?f%_m>KjeoAWmnQ30_Kii;%{y%T6Q1i8^Z4ov z*Pi3IN7#~)cMPn5dpG?>G-`Ah_riT_d8&5(Fc-V@FZpx_o!{ddDQ*w2_i1(y7mJr` z-X(^43Lfix$;b89Th!7PkgYxu4sM|_i==@F;6D}Sgf;XX6(Pmewk^Iy@;*fzbP zdXF@$CFZZtd4u!W`agTZ_kFAPxx^uZJv)rfP4ts1#ex3tEHY`K77c{!c=Y|fPje-F z<}k(WWIYsWLCyQ0)=S3GHw?BP@SjZA75wodAF?NF9ehKVJj-Hsen-Zup^ZIs=P-u) zp3)cob0^h@vqgBl|C4uyL0t^@IdNll4Tn?ShcUER2*YSPOTN&bvb)JX2JL73w+EUx z#Oh%A)rt)F9_2>t!x#J$);7cQoI;F?S!BbR@8Vihv>{)n@zYx{bcf|~Ix;!EvG%v| zo=eVPnCG$m0zQ5i-Gc7bdkpE;?ALG7apcjLOoQXU_>Z(djVw8r*0bSJZEM>Wi0{|7 zK8vebTm9yy8~@9j_3qs@Yx?=PJVO6?k4*ZDi}?_5SQ9Aa zT{5gzeOE8pkNtPC+4X8H+T6YcnOD`K^eQrbmj@ft(+B@3{Zj2ne$*$&*Q#d~c}sX+ zgKeb!S!5na|6w+EhUY)Y_H0&D@?*s4BshYP{)3-ANgKYH47dD^y4%02y)_%!!Mt5f zPQdyJA3vcVO+tSN{MVo}jzznY%~#p!WDZ>HH_g4Q#neSPyt%n{{4^T}82=t8Pp;&@ z>!)y}H;W0K>G|-(lWYs?Alv@NgkAZR9qAZ6eHT^lN``MvhV`R#B3d;lynmNp@3=<) zboVvux#oS_Y$x-VE6^TH*9cfgz}f}=7j37pLEfZ|(7MMgr;6>{@`V2M#@ec9Xs?mk z4$nw9URhwhgN??#gv|T~HtJXDbg~A@qdOO=XZ$mo-oyBQe|~6;#(X3`)3~w3`rx}{ zcZXwlZSpq1#MOfmM?i?Nzjg#P|3S#$B3;}^5oHyehWUq8nyC&N2A z*<^HI@U3w;TAS=!JWtwz+z0sOQoQaFOLOvM2)?)Kt9zH^HGJ!kx3K1(POO%~YJMGs zzTSe4F8nx2j;&_P&`hjf^&1C$HEqu)VNXWhf$i2NlbhIeuj5_d{saGiU^@nl{15Y$ z;&#Vu%=SgIz}Cul&{xRM^YN(xh4CpJUR$+(zZ}N>V0(d$ztPLL$=Up{ruc-|4WqxS zI{Q7JicQf6ul^8u?p0g|>mxAGmH&iiHS@6d`9Y4wd>BoJ=LGWC(zZQJfAWKVSA9$W zCi?G#@g`e;ZR-`)w|b&Y$!uIyYbIe%lum_Loe1mh;aNd==27ei+bi;A2%B~$^9OkN zGpwf;^>U+b74FhI2*m{0wje*$Lp3IO#(9|kAK;qY$oMMWhTKi~U}HXh#QAgR^}Y0c zZN{nu_Tpvd=ZbSL`w!gv@Oc#Cs&-}l=`7Ix%5MAU05-{)WD0+7Qt$sA>sLKv8f5-K zc5*BmKc(k0*G8kAW`8c4g?yzhMCOx;c}}>8kH00)T1>P=&HF^#CFsRH*{i{CztOi~ z>k#oDq>eV=-5#zTgAe7OXH41GFnPw5?ahYu*uMc8!KNMYJ&9IK!t?m5PG9C)D{{NY z%~#+#na=;v6~Qrz%sKoL`o(B;w&&!$5$Xu7qF^=cU<1qifjK8g& zlyV~4BgDn|k?MqgP(BWi7^gq7`69NdRq?Jcn>!ZQlfOj#X27y7J;C-PoNuCD^yX7z zMI<+4zD(XH_fxrWqhm2D=8Absd~&WB!G?@Y&%njDP^)wIg^|7+))lKY%YW(X0{bTo zQNAU<^U*%u;5#U2u7^*b2{9_zAD$cK6Y0GI%|Lcu%AVcedL7+5us!5jjoyx8as-)Q zqE|PgpV{VJ>+%|L-$VP}WVMG)-4FACVjFaK(K8-hW4cH3fAH@h#}Dh{m&sZEC#FBV zuM+0i>Hq4hZ$p;av(M3wH}{<^XLjU=N&1)mobHaNFP;%F&2zpb+ArDvi0j?#FDGYh zdHw`h>%gF&Mfz>FcTa0nY@$DJZ?A?V9kt)Uzq`=?spFJv@hZr?`%4HI~McsACh5?oHnUd&kB;&(Vjx?(b_geyP;Zj z2>-6mfAiphF`WrpV|eyOEAHiRewIU-+7N!*nT&vEG&->lbIo)ZJ~0jXkew>#DP3df z-Awy!4SwrQo;h8)KujKh>0-P`qph(;%}Q^i2VKFIFb@plLDZL?cJe^IN*;8)hIlQ2 z^&)Xx22%rhxAVh2WWOh8kHhcTZGI2mKS*{nI?f;`!oNUY`i<;|==(o0KZwqgYt?^| zB%$wEM=9z;G zGF~KWGK?Rg;rpzgJ~oIAf5B$WJN}EW-w{tWDEb8dHE^zA#|z>*pIv-he$W4pp*e$% z^KD-_m$S*;+V+O`9CBaq?qO5-db=L__3di>T>M+ny)d-H*^(W*i2dg5y#-GrZCA5F zzGPd=laKMpc>^@d zoj*-)EBl_}-o)N}&BYM5Y|Nj(lJ_yX&+r^-`$FD-NdD_`a69J%`R5kc^o^MAa6eQ0 zY%kWKmgTSV&4qB9&!*xXU1-}Dj+e>3pZu^6Z2q4O#d8O}BglC~`=;!oKR>@#y-yM5 z?%5Hw>YgHP!Ip=`;&OFhiTt?&{Tnd+m#1>2A{r_oQQrJyLMvt8f;=;vWEQW zz`xtl`;)e_@l7S?2ixBDp5d@7*swY~*Jsl>`aaV?nvKS8E35 z7R7U9uE$^MW>^ad&(yP4`pT7bsJWrWW_(({!Ty{KwKcKUoSvcH_9JUe*Z;*M*UDGP z3Vk*v?|68xf^R;)mfG6dxx@f62mTYXn~9@#HBUz5(i?R&~q^PKo4W7iFI zji^=M0?U`->jY01zTXZ{Cw6X%X1v(wH|fW4*OR#qf40E?Xw7_IoZoO9d>{wHdxMGj zf4N2-edhL^`F)gQ*NdLy^F?ui_($-6BEMci`!t?Vi(X-yJk1xlwosfOtX1pr`K>U# zN&b3l>(8(Ep&4tQ(bBloM_X-?+GTDmw?cd}at+wVR@XnFYYBV<(4WGd<=W+K{2aPg zh~u%Q`sr;SV(^*^qrtz^uL%AED#F%p50fZ~ku%Q5-2>XOg!~Eh~>Y%>SeJ0B-zx8URJ5WDh3#~sc8oAS+iu&rD8#s}YyqkAk`IhwvjuD)6r+rxY9VI8A% zE&T6D@k4FZ{mNc=*_?HUuSbPbe_XuJ&J6?fqbFywCXP7k! z{z+G&H8)A+b#|zIdpH}=yvJ_!AfCpi*=)Oyu2$@^_MRCl(xHx@WlI~h-}2`#ba-Z5 zIxp^l@jN(Qv7co71x;r@Z3k;-Ilo+9HI{e2dyqUv&)W3dYyT(O@Lt|iYSlgR;xu^a z%ywXxnpifcXAm5nY|pc=4v*XkYiPN((;|{*i;T;1Ge1^b>+o#kSiFkW;QfIe85zL` zANH)~Q)MH3av|KS#-EUNyBwc_mJKC8=A-edlVt&~KAArXtGb?tKL5L1TuRpJaCc?X z&HOqXUNs@S2Nl-o(=bo##jl6gs&6vI`cc~2aUc9m*wNkCyA0Mx=~EM;KgH=-J~uBY zx7BtK`X|}1E`|33<154oe|f!j{!HJZuNz;_^_#?YaD7YW@#NRjzn^^qpZR|_z}OSB z(YPI1bB``%;|XXcyMA53OrAQO-wT7WvAl)sS@qTbWfTu-(Ta?V{WY|B%iri`OCm>NzE94yU&0>;+fQQqNcJ4-ct6-*q$jNDJxIqM+V!pQ z4oWK5(=}jf)eyz^iPH!er}Ft2dK+NWf3n@tw6VS8xU1OFAHF3L>!mmujvi; zW7!B}(vvNBFIe@z=SJzm2|8m)G|=@Ku1!`mc7m&;emI>EhV$7Td~&sX31bzz(?xXibvn4d>R-`N zIvdrD%kb?6!|+vgC!b2@MR?^xG~Ol$@|)nHoCNfhBV#}~JI~wOYS=lW0WHukIhW8G<7m?kH?g`G%qf6Z5SJ5qEr+byWDZ7`- zhjqyK$Tj&>d|Bta!*sQl|L2jhTz;=L(*KULduIyMor$LYTcp6}Qi>dOo= z=nHE=DpPY?QfG$X$LfO z#9{|_?9K=C=y{CX`Rv$M+s^3Ku~2u?&-g^#(n;`{W2Bvp`Frv8`HQRH)P^}ju`66{ z>DrZixWe~mvn?E}Be8jW5#~^X*}GU?&EU^(UAu#g;-9~)CSCxGnwiMA;vhCnr2AXX zC&W14mThz->%$^mNo;<$65mAj&#|v3=U96-<^A;Au>L=p{3F>N^6HeDbzbN4xtfP; zMaTK%zQPYf(Jat++*3p+8>?HOQ_rGb;cn63nG2iXkI7DNtFPAHs`oq8(z1&j~)3&^D9Y`S?FzhjB9(^Xl36_Ut&*@ecesgdJfFX${9$a{Cb1c7t^pEb3!; zCYOp=V*IJ@S4Q&RTznsh%i6W--TGu(c6Wg3DtZRuC9@nr|9xos!>fOmgNzeR<#Bs9 ze}VoL_B>c;&5#e9)~fGkW%_K{0*;~h@37s2R^G>AkX-;*S?{+c<`{CJoFT54!*rAD zZP}@R<>Tmj7>-+D=Fen!+D{lKmqX@Ytf1>f;Be)YL z`5JluC)angjxX zTJ;;I@GgD143wTJ>G7_*%YSLDzKlKFZ&Z@!y%w*^_OgUrwjv z(eA!Oie9XuzhJwb%oyhGWbTj8+$}kK9ckgKyX$ewSKI;;+8&oI|Ghf2ffe`KjOkhTjsFH5r4-z-|Zo5E&l!)&l>8?X6Pp1y+i)bKqD{9TWi&OBCs5O5rs@5*UJ!9lv zwwR}Hz7ErM=n8u48dkk~sBh(0p*L>`zt@a!k`G}H@oCMwx8l%&tv9 zT6&A~Z`pH~ZBNHRc279OFZ1jtsfXP+lbvdu6@fMwzECr(eI! zN7Pom2lTv{J5S!T4|9W!`0{$&T6C)E`B>O~a__ja^M3MY4|?wCsg5|7r|FP-&kUca z)@)-nw(KDuYH}oo;T^|l0u0uj%E!f}KOCW!jZ_DwvOD}gW2`MayE@W5@g;IMhPwmI z>TLWF8CTf%WTUYwJVOiX|4}Ehj)(0?v~_Ii*RbmS(50}x7VyE^oDHAz(Zl?AAzSCL zaV@&rkT;3$XKd)hvySv9d|hkR^V^~=+C%tf3>klGyPw}yp!p8{DA(jmv7__Ra9@LF zn)Bb(DYY>nd(t5Yi%j7H@06UA8K$9(f$fMX5qVzo>R&EO8W%jU# zIvn2r$U?s13u~v*FXsI_lYOIFtDdFqtCG#(zn%Vr=`y#;wuL&hmS|pdCo`Hd^Y~6{Y(7pFITgTY^a`fg?F-I`5gQmeMrZ~#`=5Z%2*iXLi{=z zA!hPEUg3V}5Bnpv@67kFiq$*%_TPNJ1`M%lx6-wbTCow!Je7XYG-|Bov#Qql8 zg8egWd{aKyz51`?09p0isfvQvtith zukCJbuCMJ0&jjs_w9AROHQ)3i?_ct+r=vL?VqWZQ%z2pH_Iz|VeADc&fq(A?YtQoY zEU}-5uOI)M&nEpRY0aN|K3qL)ud%Oas=Wb?*k<}|nz{b0{qyX2lbg9^}GS()?xSXz_i>$&}lE}?sExhujT$dcRF6!#q5$vDL zp1zL9>-*=p-Zx-Yd&68soWr_MzMQ^WY(w!GXA*d#1IZtwmTyV!Ft*=j&o4zr#?`jp zV6x5|)<>$jR}Z|`yM79p`bByVn#bVO$1`<1?twpoBa9p2S?WOc?M2thx>f&g)(3AF z*UWRmdqZJNOoQ*vgZq#A>KiP@0CLYFM?NKTAyb#Lh34Kbkb6#@Z}6y3%g7u`PKjm$ z`UUKG-ga+|&&7EdedK2|Z12jiu>KE2T;Oq!6z(Ns{P|w+&x7MB^Xj>PbKf7e}-S>ShTkDdUT=oxQ{D3!?GLM|KJbysY6A-?$tYX z)$bF;HJVJ`di*jM{gyBcCi@0<-Hz`;`S&Eg+ZL%I{PhXDpJVG|j$fnaOgR?T(yq}q z4By+ZT!p?PIhm~~ycg2_9=z)BAzgn75bPlQ$dv$FTEJ80*Uh z{%{=66N`c9ZX>f7d5^e$gII4)pZ*=z&e8~t_x$tMTtBmJ)mRbcEVmms_&(=@(tJI< zPmoX4|BT7;Gr!_>`-+aF>`42Qy?}{Y5^Y0^YtjD&Tz6JJs z%jv6K-w^#&$7`6=Hp3(C0e5EXOZ1oQ4%gmtE|1Ia*~-Rvv~g{e`>FrY&5o#*>&E2t zD71Ig{DxEg_$KJoU)OD@{y!udidKyaYo+CQG<&k?Lb_d#4tBn^WBn+)fZnU&KeAr^ zRJT47D|wf%gXXCQ|Fg?(I5Xp1WPVc4bl$`H*XZuUa}nEKgmpV|pC#uQGR3Ui$apDk zC7AHrlg=}qG^S|MBtNsIFJZukZpHAm+vL2`B zUAZOR`N`sT_Dxz^P2&4Ct=^YD&X_`l2e9XY=heDW;Bx|}bjJ;maBSg)`@ zsxAu8Wm9qsIgu?zyNIl7#h1+VJ92v2LS0)%_uJ^-r@Nl7k3uI8ve9VY!9SBtC!$*) z2K6QZ?5?kmi*npn&U_MG0fSw=mxQ;rE6pGe#cM!?7weV z_1npP$y95SP2pOXo(Xl;GqPk3zif==d^ps|@Efe64ZZuS4a4nkA#WwVmtCJhUOV4n zdxRY!r}xJv1dc|ovqRE`s({A;dx8c zUX8u3-v2B%$TRZW;duuCT=w*o_iu>X-T`-zMaDDm?27&!F}_0EcE<8U`JfQ%%jmuW zZz|8$s`>UQetA`N#eYtN_XXKg$KP@|eG1<3{A4^0_kmeoG`GRkge`BPYfZ;KbYJcG zG_h+#cDTp>3LpMlUq~KCJCM)l3h@ii4?`U&*K(|1gzp6uqv`k`pAJ=(j zg)IH0gsBwgWD!i(h|;lq)s&peU^oa+Pmp)2z4%3B#ZS_y=7#&$Y7Kcm{;DH?pjzA@R%h_z z4A}Nq6vh9r`BC?=J1Wj2`N&M=*CI~>nkepZ*GR`hpq{v3S2qiKO>zdejEY}7Y{{&)^uf8qOE zOy>Cyk!5?V`reKFN`Ke(n(OL#etEt5Cp*s4_L1Xe4XfUro$C5hn673MKW9&o^^|dG zSNeCLZ}Ot*yQbkC@kqYqn~2}m=r>?r54N`9lWXihVgH8c#i(4$|D$0%o4#MP%Y`(d z^DCH)_3>47+)%52FPrPf+3~Qb+vR9;gSYXRbLG9r>O`mSSf}g`^S>}(Ot+}RM@E<< zjwWM5jV#ysJ3ojX^20hps3*w_XnqoVeZCxkP9KP#MRy_0uduhV^POtuZ0x2#?P9!j zT%G~D{t$i-k@eQT5gzlP{C>Idp;+L{w=~~=lbkoq;f6ZDQm*bTXXSj^7EcrLk@MBN z>+QwyQ!)yi=Ff7nPp7;A{^6w4;$YS!(Q^@V>bUwwtOhYutolcXBb69 z#|kx|zx^HTcpo-3D7}F1I)}AwG&kcr&wOVam`0K#pR;f2Z(dvVJ9_sIVa+tG;pG{< z-|*?JFsKpPGB&Q3L=$%c5{0lh|uOpWG;FDkJa{B8X zf1}=9Zr=)@xTg9{@qcvnv;P3!2!1yP7QvriqL~BV$NbS<4UmJ`MQp{F{a)vH7wmb8 z-xib8gdM_zI-#&-? zbNg@bFVTh8Jd+)t35T{~g6C^;I~}I|3Vdp65`ObVXSoo*ZP>lJYrC-Z z&^l|@wr1M5hVO3nJYheTu9Pl)Dr@ODy!SPhtnTvoAv(y2ZehcqUe$j{W=-ihQrpL9 zpL2h|lfHTe9nI;Sjc+q}hw#4~DQk7=sq5>TL+prWBpgS$_L;nn7W+?ea`!;5=4X@8 zm;7`Xov=pQSFNSNd^BAfiorVeYIbR>Xs#vebN0VQPl(rYGQK14TJr8fdzkB){Sx>( z(KXU_bGKFVO7@2N?Bys|n#-EZGBW{Ix-iRNoHaYj)-Lq6_owEAgtonOYduhT~Hjs_*|4Q^@_D zjHPHhiOU4nrm}H0x?iQ|M?T-2j05~7 z^BSt>Nbx7+yv6<#8?2kac%nYmTwD&ouMbA&;%Nrk2lQ@W+gzJ^6u+)rZo~u0R*U0l zuJy!o4jY~pC$%h3T$={>31q)d&&}k#&3_~KvztD(u6{>$aU)rWYyX~%F3!Klvpt*d zb-s12dS5BLx0ikl^AmitMJ>w6Oxqa8;LJC(&9{ApzOgy`T=I6IOTQ0mh2c3`IZ)em zWCh<^vq&!I%g-E(VSYK@wOsGTcj|e58#|8UORBPDVeVF~-O1gh)aGU z{!wtx*%v^A0sCyJjMl);=GiuVBd z6#8R=b|M<~$MPU~7Y_G=;hAzAVsaf?c=HWc=}6>tSgVX*!8=uLyn=s6Ili3yVPxMd zAK#Z#v*4;@-(#?Lx9`G^@Aywl3x1B)YcP&Gf0o|Q?U%Ce1G4UO+!c;D^qpPEdy_p& z@a$>3k4)ozxTgxgWy#f^bO-H=;A{e83w9hw&tyDnlY6PU*O@)?BOy1t$@VsxVW{i(qdyRTf3m08+L3*jIsMVv?`m-0>$m3eAl+L|)yvmMTo;FE zrM&2Z|5!e`hP|)jxe)&Y{4s(}VjR8$6}M;Cfn@PTyc4`H)K~Aum8-L747p^4_49Nu zx=uBAo}uqftyRC>EA)j_?#E4y3r{#U56QMC>zz8!kKp-}|5~&8Wwy<*KL@T`*zTSw z|Hn0BZ|WW`4SnZA=ReW;tn2&}y{5hnvPT!5njcB&u1|M~ETYm_3qg2DPE_{ADzP0(_4?OqC znNf0JhT|LQ*vhs0=)MVkPd+(-zQJOzEnP_`|0I*hFi*+f!uPp0zDs8qd*wrRJim;t zRqvTr_pPJFv_?iF_I-nHeORs(n;qdxwacS?YkHdS_b_;OBV#`Q>_~=OD-Xfrc}qM5 z&O#htv+bhq;7xx<|2-Lp^HB`*YL0t8Sv?<4UJ{oZoKGNMEsa*tac!_?Q8kB+^rf`3 zzVoA)U8DUD`c6l0>?x0dbw2-066aIvs_(OexktHnoqNf})i>+HJh{9LZ(D8s`FtDK zr@-@o`0q`Q_wbSx;@=pqOUXWmEoxvS*UN=+vaj}~ct^tUH(Mt1iE*>|%O_xyhMsE8m@NG!nhedi{>S|#5bKJw(>JM#Qu4FPosUw`DXH~o_^nL z${G|azHBx5d?MfG;vsg? z0y6lxcn6+8=@G|#9-4Oa$f00&eiYg4j@!Um&j;H({uvf{s`se*H@b_=A#8YwU+UR> z8T=C*FSLJ~o-uNDI$ivnPUD*c=o{eJ{JC76Jol>UO7ixF;c{E3f9t_`oW3vz&EfE$ z#J&aeZsK12FMaL@^Vnr#^mKjo9za;v4QqR0JuQ6<&n{}lV0zXeM;znG_5eJCjW_Z< zzh0jRa!0biT~F^MsmE{?jo5Ra>rEYZb<9tBM|O8KmV2I-o&dLgSl!c|Nxpl6{6KPd zL;o!-Swq!ls=hFp%wysHh7ZnU%Mw@~;@6hwH)H3Md@@=rhdDmU@$-(midO?2|3mXh zeRW?`_M+z;+p}cB9?f%owAf_MpR_JHj?EqA$m1|{(!QMUr=$DK^$%QY-eBE9e2gvG z2-|XcCN=nNFP)9m|M%ov$>u%y;lc*@F!YLV`3&2;i08(zya?A5Xx^0{Y%Dfq(}mjg zhitUt|1PS&)f#?3oyBVWZS?MjuB$xu9%HC25!<66%NljmAJ)}i_#1|9^vjuYA?z(- zJ(P@P^zTl88-727-ZSWF>e@zP@ezH;;B8`4Hww=_;uFxA>lf$Jd5hSFoa+F?{^rgb zYI_0gbM^icoBl_%-$_nem^#6+CSJbI`|TnWOC@85wh)_# z7gfIt4fm(nDYlld_GE{cr}}R6H2i4Gm=Dy5a&6Gxperpcsi3Vc~J6Ex*>Jy?u*8Ps(Azyuq{(}9BdcO&R zX9S!lh}}1Qa0Y#w@Yztf#3h0)ynCAsqU&QAH*$Wi?MSj0lJ|&xBla1?6Ic?qhB-#? zZPL2VH~d_Gfv!KrVsHIa9V@!i_n_a39>))Z$yy!#qk=(oM$ItAaAuH9l&@6vVgeF)1BBBjrU8a>Tb>z%Dry-9^S7^j_fjIC$1V^MLIeahpQNZRqP=zX!&_ zWdBbrV)FF4_+fbV;wyIN`gb9>%dW8Z=hxQuZ_9N#oKCM`4Al_8rHsm$IXWwwdIFxyTFT zhrBq-wm-kXlN?IMh47aAZ=R7H!j1!s2WuK5Zs#lgH@Ts~e~XeaMl3gG$HC6YE&H=O z$Y>T=N5;3}Bp9E-FJF?y&hJ-~Lk+l2-?HYPk7w7LwiCr`f3;A|vgLT@^mO0j z`gZI)+qMCGBiYswHuWkRhIUe2^{gxv$7nuW6XAUu%@Fp%8js=E#^wko!7azaI%%GX z+Z!VC!2e$5X!j`BL5qolZ8Q<+b;uo^l z(pvUj-8=di&c>g8uB75;l(8pp|s+DO>-WFu7iC%7JQ^{=47guXzV?Kcl zPiGQqirH@P{vi+dS0@795HxC0csDeq zsC*uuHFdlVdv>(diNhy+_<#78lXD<>ztgiieS8{y9%4xU7UDx@HUO`8>kG9poy~@c zaCSv^zWoSzPuRWs-}h?m>uqw~(`GNpo4;MxSCiJkF1Y%!VO`_a3C`t167qLFKHnFn zFX;J|o-hs%tgq(vV)K*%HF!RURc@pc z*s%$_KP3C0I&*6B7vtF;Z9jCMh;ujmav|hJ^qTt674`@4=_}ch_D?tXjYltQsq7t$ z?>Kxr)znAZrOxGZ{uH^DPdU;T`%Zsgoghw-qKm(g{6VcbhndHf(*FOc&O9n;v`T6|WD`2y{?Yafnx zUp`gu;vLn?L3Q3SCcBQ#K`-jjN*df$I$&F`i0 zeMI)beC7FQ^$n^=>Q;@(eZ}D>?OWh!LC+`{d%(Jt?N+vohhb~_&bIH&uD$GMv!|)H z7x?@uW7RbLa78WY2zJY}khA5R_8nj{ZdUWjko(%=XUVzNzK`n*`Oik)i=MuBF0a>O zmu~^&YugVJ?_MxoiZbSX_=*K6?_aZXVxo}=dwj4>-nRI%{A$q^F zp9kN4d?4ov{XV(W*tv(;e}!%w*;nCt#_jwnrB^4|Y$(KYtaT(l{O+ z3#)vK$q(xo`9l7h;Jlgr3VOXWoTsjJp-(@~X5(RZLQeVz-KRQ!R1SRy_vU2pX4?m? zKAiWe6GJrHkb51vJ?lMt;wv%AwkLC-c>N3GJi6LC{}KHper`_37TOo%+e@9h9FBYG z8*5`v`<%s^320cm|*D;QFnOzlGcQRb0V8`_{}AtW$N+ z-ck*0to;vjRQVhJJ0qF}2W+L-#l6M)&pPi!J7-^UKbw}TF?<%@|0%YqS??9sPsx8? zKJ3OmbCY}%wZDOmkfY1cOd``9B5|)$Os-Y$n&qLc%ZaoQpF`k2L`;X{Uq%-k$&2K5 zgyCxE53%KSxsSdaroWGsN8j-k{mCHw|409Wbc~V%_4LX4d=lG!LjMan?a;mm`yhMo zn}>I5LO$0yUrQVwq^B#L&ycq!xo5hjZf4_b=ej0t zY3LjK!|{u@$#920JXfB{_2P5-!f$gH!SKfaW9hEruBf&?fR6%#2%?}^prc5r*dhkc zVFbIe+cB}PiH$?IG$Q4wq@p6Ii0~W-5f!^T#_o=**LuJE@cuEMnb~{AZ^d4FPnf9f)i(t24d=hdDxSBY>t_GF_TBKx z^P(x-*4V|Hct+UP=FUX zIXjT|0{&DS&1d$2!8ehw?&Rkm#Nbo@gCX)< zB(eUFBYu-l<*Rb;GQ8J#Z}BI5=FU74<2UJXe=a+nKh%Th-OB23L~KN+I$T~U7VblY z=WGSt$yB*9kqx8RXG}zG@mm*U-n$Fy|I&Sh^b>N}R}8JNg?~d1FXP)8#?eW}vKkbw z5p!`%X0b`Wm9S;TOKwai8*1N%6HKQHFl+T25H@=;$ z-&~qBB6EATtxqmoVf~c8WBd=H_XOKpWM9BXAHZnt%h!Ny8NP&Fdy`?@g=Yb&e9Jx( zzZ)vGdp^;7j(68@t)E^*&J)fjk|oEJFW`R*)_HW!L31~mayNgKPi`{~&LV3uA6`iI z?sP4}lW2RL{+%kd>)d2L`=9YO)qe(g)-aiC>S8Wip4E4i_Kvn^;F;xIZs$v3)}B3N z|AAcaOut;^{CK(|HiofryYsAC`-WN8)m+<}Z5N31HvG~DR(2Gv&={wgoR6Q>wh0{k z9j+fU*E?k=K4(u!X9`PNz}=Nhb-7?$On%X~zV_|FVn6YiXsq1Ty_PTOw)(oGxzqlC z;=hjl8Wry|m~*>nH*S(va_}|da0BgPmCMt_53vmxo)6!S&j2tBaCL?L8|ijHfuvS!zi-9Nkmwv(NfFA7?)kj^DMP3d>D& z8F$ebxO>spRqTzM;_gcAzCzkX+jRCe;HzD2v$bdB&2-$VF3NTzv!NWmna|!MvnAaJ z!pHYf3vEy6e;@Yd?0_#jui_mV_K+1{!?)rXFy*uL!LJi$8*3m!v76@+N0*S0dy7k-qAL?D%NC8_(`+Ah&xdg;jCsBJL;HWoovRkQ*HK)Jr)RzA31~}m!usTN6NjbfTJY0LWSbkx zPvu@WwD*%GM+##so{Yx)QM5tRsapGHXx7p32l(MkPUGXQXqw5jbJ1M@uel@d2U9(r z!MC5#`(>RvTB&_wC;Vqa@($Vt#_HE-8uS>>FMlcj7muEP84(G>Sk?xZ(` zV`F$rwy8(yx$rEeLw(EoR$QBCLtnOpUyi1I(Jz9pg}U~fzH$6ZO=h>(>N4Lk4 zH7SCBUa-lH;&HT|=@vujUBmu7vi3)R7z}%nxfNY}627~dvM>G~&AV(hZsO(a4LQip zJW($fYTE?;7&!k+|1DxXo&TnwV|Q+STzan}oo_pfU5)VZOMDI4SHXU@8XElHlHP?d z42EAWrhZo+Kgn0WI=_nC@L##ZVFa-OhSwd;Vf?Iv%E<(Y;@1+}gbJ8%@>L zgdHQ$o{MLTFox*f8s3g@40UYH7`_9Qe+BoUFs~4ob?A^Y`Qc(`3};Wkc`A&zYF|!A z6WfDg_ps~c*Wi1R9P!K5uA+hVb@6n^YrRl5!ZX!*?)(XTbJ4D4eYS?<+sXY5)+5dN zFeh!~NmsJvb$YtZcnPuL+#=GLbi=6wdRhqMVB&^0# zb_NVh@qMTUOe0sEvdy(^#~w8yH$J1qbjgS0XtF1Z`A6udv#}lB-|+|Aqx1RfF}z|{ z`+wA(jQI$@wsd}|?|#0h!ZDVdb=CHVt%E~-Z%0lT$6u+f2RiOd<}UO$t5{2t>-Sj6 z%%A53B<7*+?-f%9* zi}U&DI5Oo@Sa)R`i{WAH8Ki9q?6c4`#=lQp?HVmSx5~n`Kr8;+2Ik|*+JIdfqK#}5 z^oO|5F&_NJHGGdX@1$)qIZx10hjtoFz5`IsC1arNaJEdTxKD1Z4))CG7yfvbfA*pG zzhd!*4PUr6%^naZ@7QEp;4xoBH;^6HQPbIR8+vig)Pzi~h2QVQ=W3tm_%SpgPvmpR z-RKzicJ{4!uTnd^qxrD@0n46f)ZeT^w{acTg_-B^$z*yTW4ruLzan>l^KQmc zKQrFHt@z097+J6J&2gtol42{{MpZGMT|8}w-Mk7bU{1)Ev&EkN7IgZQjR z*7bo#{)kt8qT|Le&ZhG*d~&joFOhjJc}1JGX!ajISR&r=mJ{ik!!~nWxNgl}*Ju1> z=D+aXZaUZ)c#FS4%*+5-O% z=wH&`h|HtVwB!T!gzKVcM>xiiMQ3>TsdxnKyLiY;%?IhZ>^P%)?H&EHtFa7AaxWR{ z;<=0MYm(8QjIYqQuDA#8n2ljApBoR+!D>^*esG9|<1uuY|HAtc`I}^oweN|qjxE3A ziP?D~+5DQQlLf5d-Gt~C?SJ8WfFEaR6WcJC#pf7Pw^gmB=;xDYD{>-pig%2|vx2B4 zIlbVPx6uLc#&o!!8mnpLj{Mm>4!D!%lqgxhI6&0=&1h~_`E+`@?E?KzQ56| zAm=3KbJ47V=|Q=$0bOz-8-V9c{A)S>%vxMub_xvt)BY3QkSl6;ejcCRM(<*nf2sIy zR=#Zj!@=YYsdLYm%?sFk8onp^Cg6Hb%+;&xFnrbzWm9bz^T#pTuBKygo%z+Z!;$oy z3#%L}>*Y^#bdBtd=kSat;?p#99_F^!`Dz(F>Tz-*xt>X6e4P(OHw2cW*f@*5^Wl(p znfuu37x=sVKi>Xi{9#Yq>9sBhefRw;TxfgVG<^`8E1hb+vcM%YE2*6W*U;ex03d9XCVM zMEh_wFOlPZQhJH@5pe7d&tme=gCSg>!B7_H!t?%@?T66+bHy`JH0ngOG5^ejOI?h| z`aOYlM%)UowMNm|TJ>di-$BMW>)0?q?&;W=EUg2wtI>*Sz9wwn@>?IWuC<-T-;McU zvc9fjJs+-6U)}%Dt<~}|`qh`>c=sq?Fy}PyZf=HE42xaplb6|4`1mw~tr%h4??9H= zCY!PE1bF|n`DR)42HLQ$Gw$*x_z#7#CA+p_W60rs#O@0i^d}SeYC1nXYCfCf8C$sL zv8cZGoVm7s_zCZQaE=oPIh1W%>;pak33w0}3Z?a+V8vC{9 zCGGi0&g8GN>${5I@i~7ApLIyQ7aT3Mbw?{EnK6@0tkm9R55M2a=b~%MCi8jJ!5*e! zIiIy-*ED#=Fw}}@C;VZ~X@TY%So_n_2aaae|Bq`=^lxDOf2(hf%fIAQczdAhX8RU~ zd+B}v%@lp|vi4p;N$xxBX->{!-zD8eJGtq<>}W~%mv~0QE>_V!+UJU?<77u`&zHqq z9)|n$(F5jT*lN!V`_R>1jL&vHLY%}b-i97I65i`6jP3FlzVBk+tWx`)Upd)+ZbdHA z;n`v|iM}c3#SH>KY=5ggQ^>domd)he#`La9e=B@ySaP`dsGIq^@HT+EC!WvA6Z2@k zybiUcFZxI2%m8>g%jMB@?C$)ey4Bw*zELi(hwn8y!rarz81Y{XNycBF2mhfxmd_u; zzbU!_?sv6-LwyYI4yCzR{tM$4bZwv>I8NNl&j!LFuj6N7HU7){>D~zMU2F~Wqqv6W z2(^1t*XdscPj@zH&(9^VHQX)LpXTO|Q`sq|d5?;FeSDrQiP9U=7p2%P0*kx>mX93ATxE<%C>DgJ^ z#d!LWB@d#<$l%lXC+F5lsX7+!rKG#FiT{f>@IOrc%V^u+9ZLRv?77gs3d3LYI*;4H z993OQh&%hjJHYWiY+Re~Uqr74mz%;gQXa1|=Ul=+C+k}X+b*zuMdu9X_mXvLz28yM zeK~AD;TvQuzbZ#Q!2@5sg#4TNt{1GAIlmUqS?Jeyeib<%;S2b8vi_JRH?Kz16!u58 zU1zLY%LkqD1U2#ucJXC0TYVhDZ~bBYn9nXHi*Hi%MTEbc%AOPP+~Rna{zKS5i$5pY zrogzf`uCl_5olb)76-8F4fV#lEa_z1mLE?it6ux{d^Ls*VV%&pE-F8F-VeT~?2n{Z z4u$a&&A0W|Z)_&Ua3SBbj%s8B{*vqQO|}mDR(7lDO9#LOYq}@<{x%+)RLsllF|O0k z;M@!5PuO7|i0KQ@f6Axf<@<2|saOU7Sn@`}Hqf>XJ;6rnk!WkWze3v?&3*J9={&^t zb+Ljc{HCj%N8VkYyDU>P26wN0>nJ-Jo*T5kCJtg$yXQF-+cd{e%mE58EHM)b?`gdg%7y088QLlu@~=1TU3JdNKnCN_fUSTZ}JI}WCKu%Aii zTXa7y7O5Eh>>B|$xQF~I3~lJ(uRM{Ho>Pb44dv(aTO@~<%dffmuE&2JT?aZ&?Kgzw z5jrl0xfvb*H6GQU)8Faunq~eG&Sbp7s&Q}EL#`OSF6UAzL&*! zakcil-^jh~WG;KQ(sv`D%wWZm$|gLswwckFpyn|d1RK(;d+#&Ld@tp$DJ7Bw>d|Dk7NKDh_|1hSq% zH^7`^d}e#1d0D=UgtrQ3Yx=K%XD5ERgKoK&*Ten1_5E}B+QMkO#2eD*|Kq~?E4xrk zI`B!OW}@IHPX#=IO%nvv6r9Q7({$~RqX#$7BwqW)+G!!#Wx z&zr(KkJ%`)k2D^p=sQ$=x6?Mes;02jc+W0k;||&vs{cK;|6cKbLi+X>W5d zwa8tQk27}B8S*ZIJKVN$%+pT1F(Z9JIIST)c=I)Kv5@R~O-1$rF2v|20 zi+jqjFCL4?o1veK>{q$aoy@Oo2lK_J)=1OEkPZ1!e9D*6 zoBZq9NZK5hkfZOgO)iIb0V89*e1aUcA;dad#}-4y>~T3b6x})X*5Y*DMdt1Byr<2a zRNe%q*hK5wzpou{))<{NS@{jTRrY*BmisJ8JI9OoxPPVgn}R6J$MeXz%lRblXCKF> zYIN#-$tYYKeE~~|tMx%S4))RHzs1kSRCqpCv{M(2?|8iSk>q?X_h5+FU-}kMc``rW z%@$+3Fh7U2VcgJp7cyFQ_pUa53B2CbioUD3CuaMTFJk!lDZ7o_)9d}0vE%moLcSad z^HO~G;op<8DjA)f6gH97`C)U z%YVfjI+~EXzj!o#>8ID~08e+s}=|6R@CD=QV?Bn&HTJb+oyT>gcSm7P3g4^OyLJ&-2~k*bPp$=kzL>ep?+xM2 zmwFb6cW<@pHu~?x*P~kdKdW&6JvEo6eeoQ`2KiJR&Ic2m&+lIQzC?Bi|4xSCVLI0m zBkw$B=a7339sRWRvQ6bvc9yeXN#%ok9?9Tp?fas6H@MIiV_}fHDgQ_F(2gPVm%19} z=xn%tKywNGjrHG0rZJkH4J+R#+KNYDeTpACSo=&wtA57I(60^WwXjs_IUU{LN)2m% zn&VmQ?8Rqco^6h%3IA*dn|#UlVi&*X{2lT*4d1H!0S?!N@y}s?q7SC>O+097_b-O? z%XG&R@a|QueP=MdS6EIW^Ax$&PTwSUk0P(HadV!0*qT4Dqo23m!N0S<`_#2{`Qg<1+V42R zv$*iwIoggtmIgj}?t#~HyYTLObW^4Fe`jIsRIGr#5gB9nQ|(Bt7b5d@(wq+Yly-pa zHhm494|1+nMRG3V=V*}q`?j8Z)Q+ro$(lpH`#NEc4r{sOD(&Xqq!h#B#o`F^@0Dls z$b7i2_D#9yLB4TK9R0v1<00plqFZ<0JfmYDdGLgN%S!E=#$k-az1b9eGZCJE3AnHI~gq(Z1~5-5B@}y=rs3HeAMFd0w4&Z1gT0=>+FOc$_$^lY znt9WFod^GgYoc%X@L2SV>6^^|!^oN_FX73@@_}3k*WJ-EWWCE5SCPM|zDwBnEL+XL z>6dIj%l;}@FLg~~{a=K!HA4KGvthDly{@bBJMg@zeP_pQ`1w=1)RerRoZJPTHORh{ zt+(1+?-ni5^J}s#9|t`zz}kS0_t3n=2RrC{0N*vP2l*;LyuS8*pYT3fq-K>1>G@8K z*pkp$&c?qHx)$u)oQz50v5$N>gKy+aA|}z}ug7CH-{Y8#>p#nwY6gWxIArU zY#zaX1Ie1q4tR?Y<9qmR8e4BA<30YG0Bgk7HOc>m9M2-7|2e-z##y(gB@lScUOkyFOaG@0A;GrF;(0jnbgkI8PeWr}jl( zsH=B79!ut3b+v2%^aHw&B**nbCZE#`#_i<76>L-=lJWX3z_S5aL*Oumvp)RrFRZ@( zRNm_R5^Fu}k@+R*!Joawna{Hm@bX*PUk+@+H*M(p5Y{Ige~X^ZQvJyWkbgGZ*RbU) zIu}%ofr@Ja=W1lsLhak1j4&p5hHU}*CiU(&x}H9k{2u6x!SX=I>Q)#}*$jL)Yx_(r z-iLc@?Q8oj@bo(G3Fuox>^DYt3tzv@ZqFCem9VJ|wQI&zd)59|Evlza{Y&^MJm(Md zV%Ep;4eVQ=os-zSJ^icb&gIoq?P8k4QgpJl(tkAEk^WcsWEW|SHW}V=fLVf}tzAT3G@$UDC`;hc(f^IkcJJ36u z+?jCPZH^X)P#-dDx>5}a^Kx+;yLJ`xyTyR7;=}Qn>!LUDiC1<$*$3KBX4@wG7vj1t z9aG?1SMGjB*RgWtc5yLR7T?jMR+UfNwm{dJ&ChG^Zhx}2Huh8TcGPwgz4yc2TP^#% zQhOgK`GP#TlAS7N*k7DpHD+z-EU$-@@R_ z_SVksX#XX(`-)L#{@O-+s2`r2gnQ)SetuZv#W(X&7xCx=Tc^4xk?F z+nW3?r}F*q?Z+ps4bw}|^J_HS-k8hwwKv`h^HS6qrm&vFQ>txA=Zf$3vbSFQ`LG3i z6O4tSRsZwBF7+zi+3}-nzY~vZu2>x{JJEGDjOs;nBb?^oYu35_z+1xl zm9Y?fIuhS*cphY}UmZ;NGU>su?^J8wvJJjSy0F_lf$%?oaqDHZ`Wm+ zeMx(IHei!Eqxhb$yXu=t&z<$YJHF5bz%L{X8oA_7tKd- zvA5_zx3L;`Lo*PrgX(K*uf$v%?j1(Y(c^hh_9d*-VIF9IMEBbNJ%;=DDc?n(qGeY$ zuUh-hy;98bL6zG5s`N%W=D~1)KKDuzzDXbE*M;J8JlXA?kAR1)Xg1u3>kDySB)=WU z)>ru`?JMZsosP$i`E_6wi||aUScGp2Jez6Tl%D1l_x#`$;={)^cki8AL4~pYR?HPgo5`+{Nmkw$|L5}SCG-o_gtP7MH#Y7iPk-^YetBJN$cB&$ z_)_iJT`=6J4TjS7TXqQ@eaL=5pYfgWQFavCarLWX)`iYJo&UPz zlzZvb@`aA_M(cp1;hIbSf&A4D?K=1;@Tppvvn71THhNJWzo3nu;^k!BTV;#54rcE# zK9MI`KYVhr7)bt8c;=A5f5q?T$d$|G&W^=CIf2aA$!^Wo737X}{!Lx&o279b-1Yt! zy`CInI}PJ`64~3sxQn*6@O9)*^GIQf=NTjV!+JU7TGGXy-f(X;3vuk|_*U)Lo3r}D z{yu-yv-w9@7wT(IZ;CFwFK`%q>Xbbc+ z$?pk+wse2T=B3>HTAqWx9jt4kTi`vA?&Q3J=3esOC+{Wk{)Vh(>~pWWG)9t!s2Svlgc}*DD z9?_Y<&*zbTwJMR**(=&Q*VpdF$LG>>2K|@Wx|-kUE#b(6eFgoU=o%_s@+IrR7PyOf z@U%eJ#PJw{*$s2TR ztNj=@bkIHz-rK8ehrNmh*7V?t>vJ|-L+*?2ONX^P-N|TJ{-Wap$0P7~SGiV`%%S1G zA46R)&V+9S{^RwJcm8{Q?c1{D#bm7Xp2$Pa?AwY?@k!>-0D6z1V+UCFC3iVoaHOyD z@eh2}fV_|8T63`@Gww{U*cIwPxg#0P&~8Sic*pW3_PhF!LxuHDwvWC8tUvnc--Fzc zr`O~(&72LDjBSL5lfzoKsln?8Z_ zN51M>$KUWiE+;l8<3X67LA&;&o|~b+5d8!=UcmD@d@mSh>PEPRE2?tiAUsR?^-z2( zVS9+KPxzBR@~&{Zt|%hDS%_|#7&JwbSsUy|7C%Rq*q)VFpR-{TI!4g>BpHv(_ZRul zJQD7Ar}x9)xnx+Ahj-iKkmG7?(b4hR@E(CDgJ&c1+R(oVJZtdlef+VGw&!6UL&hlW zY|lT0|8?=6O2(mVTrMUXFRT4NHr!WBzSygFZ6E$;Fz37cOL`~KISt(+`tIpo``^L* zYy2b7f652zvGqCie>XZh*LjB)T@$=7)6=}_e`4h1-}wEWCLPSy3;151B;#y+n~k<# z6YnkcweR;Q>$2$+?T64Ck@HTqcHLbzMx*A3-(?kh@X=_twP*A0e6>ECpK$&^_T9$S>&(Z_OLgoBdc!+n&*3>;Oh%Boh3z%x@6oq4`ZtX; zF-UgQ_J@7QId;UW*tM57u}fbPyBTPICu0Gh)Ztks$1?bSg=qnrx#muFFU-dYKV@5r z!L@9)PRiDRZKB-i*S&U49Nr;H&HY73V{|)x{tq#2NyZk5cNx{Zh<~;x`yDiw!21q6 z)+YNdwyDe6_ugS_52I^=Vw6~nQ?vLjnOF6l0Xnxr+te5q-z=w}f8zVm9%64yM0zLWqCWEpTs`>m``7FU-2#bv1nh?HsAglHu6a>Ps2Tq(%3FHl0WQ7|G+;Oro+g4 zrds>XeVG5kbCXchl9}RrEZ>NEIhmZNVeADfA0}Io$LFQ|ink=I0^?QovGGq<@n9Hp z`qn37dcEt*`r7Xci+WhLV8grG1D+}TCI6Cx@$PKD$oW?YQ&>-Udjif z(Vn5N7rf&g_pG=cVOOx>HntrL&n>WQVBZ>lsMB98tG%BXe?ZnMx<9G+Ef#toXWymO zC>Fb-1#CaTBo^5dWQDpjirzWwRJ&8_nB*vO$I-E|{@LhucAVnpgUB3H9_@IE{i(2R z7yL!WNP2GP`yKdAJ{RL*Uq}B6`v=kdL%v!TuH&OW<;;G|JTrkg%tPO^FR!nCw>Q}x zZ&z}{9QUvOMfywr+ni7MAf5rcF_}zdhq0QxP*?lTQ@FPg?P62=!+nS3UNU>*ITY6Y z?bo5(wN~Da%w725Ix;@SYfMFZ!bEPg37r4(1)r3j6_!_`Co>a=NS;SK;yD!Vhwz_9 zkGU(%=V6^)ZfD&#keqA9^Z~xOR*sCsYixwIVs;kn`_i|SZC|{@wOxz%zxML5IK0kW z!*;Yu7%Lqd^M9y&A-1(OueDg}-O9KLtgn$XAB{0rUM9}7jd41%+xc+1vyJolFv60wg(qMnt2nH}KJtB^BHIe)udF}(pICM-zg_8jJmbiH zQcMQg2$xBA-qtm!iJ>P_+p?Ay@cSxI;X8`fRX*K9mdTc{iC&UeP6MufFxs4Zzj7}~SJ z99SFzcYnGXnQQnaovCer^K~3=jjm^u*SxELPV$n(8-gvAj(+}muA7r&PKiz;%-7Bu3|3EUn z;gdEnj@7n}<2i6Q)Ba-p>hEc}22ams&u4r_PjR32#%LSx!vMDTkvn7YoT$AGe++=- zNV<%N()fs9a|}mV*QN{f1^dG|U5cj{eWUd8Z`p<&`@p_~x5|C`r7-|<$+{&t+>|%V|#QFU1W#fz$72zQDeh>;fN2j z6?Djj@SZ}p)VrG@Uz*T82H*KG9}Cm|?DuV)^d542YaDhY)?fKy;x~)_x7b!8 z`xm_3(Hsuzm3$?qYX8Y_32Z%K{mZt6KCw@?=BGQnf3yToYrKDu@t)jL2Xk>O-*A6) z3i+qf+l<}Q$P=%;nf>c%PU5Q!?_T`X1ztH{%E$1YO|~wcf!1ILp_S*^-p0_4`o%qU z-Cw&u+1GmHCiFev@P0>lPb6JbUwfVy-dV}Jup`vy`{@j0;%+t@=g~0!*ptmq*4N%u zD&=V2o9r9af|Kbn#>+kGy)OfU=TGU2(`)jo`#CTtl| zseQYm_*~mH^jwBFrk~8(vy&6>t2@Osc+Oy-+{z-)iTEYkk{vtJt^R~-)@&_!zQlJ5 zObeY~z`oz=SK~NA+eGr^V>U|L)%34VXBYZr({~SC!}&ka|MxQA3aIl8m_P0!uY>lb zXysk@typ?C5uPz;D_xt{)zy9*hn^t6HDyt>+N zcgs)Q+vDqy^YIF_8{s_>wq~}c*n1#(-_vt8yN<%YwstbY{ipm|v{g3k3-9Y{O?Ut4 zQ^!w>=)0V}ZPA4s91ce=1_S81Pum1`@Lk5osds0?HD-FX_-u&wU^M33WC5QugR|?*#S$mHhe?)aSRM{ zC0h@+7qzoJ|D2pj;{FzYAEv##{vrGv`nJ~gAYJdV=@Wh&14Bo6+sc=gXm^3{PVGg- zJJe$0-dSe;iXWkejC@z^ufuZ{UFu`}8`(V^tF^^)zWbGIwYawHT&@Pyw2g=b&+hTeCWp=~M|E4AnR)z5X0T#a3y=ZDGH zhHShJ=J9pjYe#oGJGZipgXJ6ayRlOZ&YF>Vf%Z4>ZpU7^mRzm9Bc9994b--rjaQN- z$HP67;#_gyukc(WIUmlu98Vx;qA~I(IqG=)EEzwrtEav&-hkISoZm^3%&fm#Xa9GQv#($X4(%hI8$MQq|MPK%S zXR_mUVB_DYIr@(H#J!L=DY^L-VH_CC!;GEw{IgPR{X#toV}HL&?Yjf@3OuRDHQu42|7%DtXh$=?Oq4n||R zIF1h%sG~=tYs1be9C9mM%VvM54Q$9Cqkqe~)z9$8cCUSRF}s|cMlimLZVD{B$_4(5 z(3cw-J8DI`6CF!nUTFU+8}ESi5a(yR@3$!)*WzhQc2(+p>x7(b#AO^AKf`?t%w-9UZ zhQ#P<*O}{)<(WWuew0ms!dRhnM_(fc0uJ59V9(iMQnE&-I~?{#N{3`hG}~Up8YGe_aKU3W5~<@$K>n zZ9hA{jofwMhBxlS-|BDnfxchiJR4sZb{!-S)w^=5dhfE3(Oy3p;d&yyxz2B}=zg?X zyFZ`3FNa@86Ry!#xF%>!#*^e;udOr8Kf%t%u+A#NScW&*NBbLmvJ1Jl!4c-x?a^&d zCckCj|Df+?%N#tTD%Q?u7inLiMx6xLaPjGcUVX`0lkqP{j@^bC9_IpLSz!7@vC>pX@81C+AXh)~Vro zJK0>{fiS_4{YH;^T6ljVH6O>%%gTH7UGvF&dt6%H ziaY2tuH(;LXFLMS@nSX@O~}>9(40e-7#D5C>|AyP8~845&aZ9J|Ly!Vvaca)Q+|_| zVQ!1>Hx}uOFJ;?FY&!^z`)sw}MmNG^4vhIUH~wQ={tH_N!KY3|+nV2U_;1iR3?Hnu zHPL>~#VJY9-i5B>+;|M@jB+Ji#znZ-6-~5VDVG=8hi4!gkReAi>)W&k+2m!$Lm}^q zb@)&|Mg@BYpr3+oru{b+?`4sfz&e8d5#(PEvuoq>8Ry4V06s`ccF#+G@kWKZq>>`$;=EKjanKWV}528CLO&PKTfW6LHIof#P!V zey9I$YjUxUo)N!(Fj{Y=^Ni6zr>^Dnmf{-LX1Vi}pVDoOK{n*4RBG#+ViEs&XFJ)s z>YZ(JOL>79^r2&Ou{=xv_G;}m#{SWK zIS$5pZNJd@giXGNbwjb7Z|vh!$l9095A)5LWYqELFZy1mGt_4FI$KMBXFhlqZ$I+x zfOi31*TXnNjz7y5<0cyGSPcr#0?TpiX)liLY@P5tPY#<(af^)M>_oaZa6VK_SK%GZ zUrWqeT`RRUP&}2KBgn@eC-m;AeI9IU;%P|65*EXN3Eh*%R*7>_z8+@ED6_Q~UeW&(C3e z#Q6xeyanf}?Ai_8doV3w+W`6plX;q$45`-Mw~sfKCu^b;!}tRG{`|5D-@VB<+S5(s zF8Nug@#m4>m>(P2t5MO${5z@6dwl3mChrUFXKD97Z7iT8BNjLr*BnV?H#3TH^;%&Syj*D`H#=Sy_jM#oyKxL%J|RZ z;UWC9x13tc)^U#agquI}>GmD?p%tu;)4e-9Pw|0yDCg5+iui1b=4o=fv77(mfpA!R zCu^Xsn)lz4@5WI%1l=IMT_XRU;4^hUCO;mwdQF&zn&cjQcxGOXg}J|Rdl(G+le3V_ zZOLd-UH$*;&*D8FPQD4_DcpBXj)FmLi}^j^j>c%e7@n@ne8Y-A#V3um?+?#EY-oqa z+9?^%=Go+nb2P<%CvC^^#h;EZf`gp=2fUlZtfoaLka;*;R>Jx||DNFZO0?FwDO-zK z^d7*cYF54m|BiIth;3`b_Rwm5@lgG*irLBhsJ`S`#kbHLA0tlNEUD#NzMgTv4Y{rt z!t(SJ zkv?nQP_IM1E!mJ9tp+V3^CB4L;5ClI^;^79+sHca9gy>?{c`;$8h8EaoeAIeeEl?A z?w6Na()+3QDqDWn{-zw=pT57)eL?2aWS+x!A@;r1uV>_JbFus%*=kcV01iG&?j_?g zbaFa8Gb`Aa53cYPS;k;(jb_am?Lg0qWXk!{y^N4^c^DhFlf`%W4D*_`T)5X8s~zR| zI=O~sYjeL1I&H2x4`jWhZ`0i!tZzep?tpto_6s33j5o5Ka#)v72ba(hZ{Yo!k3xJpl9Aa@kUNX;h37D1wT1Dy9eVyt4|cwS z-kfd2wSNJxe9s#>{@SrRm9Hf8J2qYlM_+VDlDDmM<2u>~e>Z-dOV9gs90^M|+jH_| zYweHl{ny<*-?u*l?u9m3${BRuZ0tTqcgWR|{y>o?bPnW>20WO zF21uWHC*A@f7}7id&T5LI{Wa+i+p_wSvP6_+ZZwL<-5|~L0+tfKavm2_-s(6_KsI- z>=wt$&3XDCT1}JftN%(k#5BC)Ro=i?&E#2F|4&t`I+N3tFD~aB>y`LrF+7Nj;OBwj zxRd@<>fE2kr^c4`>~JqX9>QlaY)1uO+8$)fS7>*Ewy1U{*b$=Sk#^qHi6|7K6#Pe73^bg zymy`ZaQOHxUW@FIb6cUiUfixF>k2V@OO6fHc96EE_(qX`51XddxzEBzaVz+vyo#@$ zQ%mkPCN5rDyVsk%iDn&R;uT}-c|LlKzC+;J7^dIxUda9@jniZ8`L-O)_TypMg1+H$ z;8p$<&*TeorqL^R%l7!?WD@3#Q?;E6zwuw(f$lZB2H=fhzK!kSzPWj{tS28|z9m|D z6*uKWcBE7BY(U;-{4$134>@-)Gdwp5btmGVasUjmx%Y18yNH8*?H;`xEbrBKF3j`f zMobR-%0c$G@MBkdHM&@d?^QObANiwjSRbar4`-=mm+=9d+1=XzhsHgoQtqU$*lwd| zX0`Uekg0qv{$h{upZteTxs|qrG^{`_okz<{56K( z$7vf;b?={@YD0ROeGl@@f3>yba&kUr=h|u$tkH{nBd*0Y#>b7?H-#aCaV2|N$dNgC zZ=_=}K7HZ8gwh4oDEpMH`^w7~?2U==OgY~hu1)Y?ipP8ut1l70()*JcU&g!tzh7U1 z>kK;nrn@^?>REIk+9&Ysh0i^PbTM0x_x^WR!e- zKtDgHyNTmy{k`G&T5hNZd5rD=wzj}CkAFkX|GA zjK%mFHdoZ9p8U1Ky5=#w#z46j9da$3#NJ8lkf-Gf^tG*sE5Ed1kNgVn#l>CWGG1%< zE8GVO_iD=<>EWxaBfc;Be-D1Wi!FoTw9YE}YwLyYT)39O@IP_ik38e4?8IM}<8R5X zA#}c_)@?=ZR&4w4vf8%?iUas;q3i4i`2J1&hw?)Sdw>2}^zNF9n6!7u~1zV^mv6y!g}=Bv@LC%>0X zPaFU0Mf{*X@4H9M#P&C}W^+8V&>2JFeZzDLKmLq_~r_D!rcEDd4*>vav<#o zll(3DI^I~nbyjjYT4N-6g3V{}FMEo^wI8d0%S!EcS<&8byudH8g}RVh?}c~l!*y!y z-r!Vxo9W*V&NE=U1=bn*?&kAz(VwPmAAV{2Xq2vC*US39h2v6os^8@#e7)Hc@R6JD zuWcxvFVLB1L;fW4AlU;>wLJR(j`{M2{k8k7YHP{9820Q~a>Og{r;nY{d4TEreic2Z_3}^=H`9S8YlUQ{Cl3`kI=aOiZ3I#HO!mi z3(uTC;opbopSf)Hb>(dIJ@{yy>gwOToxzW(lWMohRWPLdG5N4`v&Gh5NgydXW8yzMtdkjfKhNKI8aZv1zMspsh2T zJkLn^A=;5&o08WG%_ENGa`=vP`mS6&8ttuYHIIjDi16EqsHwPC(KN3_>Dz3&$+$DG z=4xhmo}AxEj=EnqpnIX?hvnk4{BC|J`7XR$nm$2qC-@t=_Pm^&tL51YnAEOv62Cnz zZ`IyRo<`TA9c4W*g)c*$?FrMfFuX_i7JNC2?RQvL)$#ec74x#b3&oQC$$9L!U)y`y z3VQ#m%~(l}W7|3~ZOX@cbB?$x*~7JuA?I6ukz>ia`u{Yx$qdgd%H3i3hi}9$euJ-#vFt~7 zusy118!tc4s?@&uQOb=lzQX%zwYBnmSRNAJ+biBjvZ?tAnZwGi%!Ee{ zMtoj64s|PPq0QPkyBK`~G>dKKtUT7Pb`_)HY)@u)+fnd0hN&4$@;O{@COgW*`RFEy z$LIY29-1wjAImpQDtv`zSM2c7}w zj)n0;IF5z+Yvc4TI-d3%mCy2xwTW#}4{KYwd$2qWxE|rRh%H~zHy8gq?5QKWs~la4 zzZYyl&Mp3X_*wS1(Z33P1HRfr&2~R1XKT0*7w&VVL&*F?-uws61GYi(Q2vB_Q>if( zHRP8+$)3X29q@%Z(MvvD0OJ7T%9t(#?QFlFX^MX&yie2Ln_Txa@<-{MhOZNPHN19R zBWJRUZT^Qb)T8j-x$+!(zJcuz_h`)}VJ#4zyC=8ud7-|3%OA#ZejZ=`Qs?<9ei-7W zYJ<6@4E5qZa`vqlhx8qWrV8If=#9hjId%AU{yH4K(XhRN_bvU#b@)C+ypaCm={Nwt zwMel@TbOgZ%C)16!FSMhhUrf9-p@$>XI-&~o*meJs;wWsfzE#>+jt4rPJ{h6wlyk zuBHxwX*QnV$NubkjGY@=pP4J7aklf>V@yOJ$Omx>^F^^6e%B4fOmh3u(O+Ajoxtuc z+P0!+p)o6OYIECT_{_IiuuBaL>&9X*pA3g(Z}B{yY_^x%v%z=iYTuZc<+z`|tMJWG z+pM#byV3l_uJ!e8$L{;^&*g(I-Tfaj>~mrHQoO`DGoF*p>G)R+#<6J+yccM@6Q+6g zVJU@A^WG|3D+txrSCc3TA$jIug{*h`775hZ?nbp#LlOXcQ`D@SN;|3 zd=-zUd$2g|fcG#uw&iQ}ExdPIE=RW*{-^O6!_h2xS7mowemueX@p!~L-lpn)9J+Jr zJSV14T*A5~ZA?Z5{UA83L(-}GUV=s4OO7|bjO}y@eLeBt?D$*ppGcQ)Bo=&@TRY|A z8;AK--h{P9BJSZCe|XOzK8f6yVLpsMb|i!Evq{cJ)2lBz8n#!fuFKSnLfb21^Ef|D zMzgE)o5(qfPPvgRx9cZH?*3HYK0!;k}7?j=H=Z zU14ne!nP;rF_x1nU{dqrcKqQQEItE$LvlB+xc)Il_rtpbU%T!}V>DM(te0T2#tF|I zBkO?j4L%*DeI6QlUA|(wQ5+tWifbRnLt$!zMl zpX~#>2CSA5-XjXn(nH-YE`~!6M!orCh}e!s^SJYdWcqecd@4J}qm#?|w&aBP$dPo0 z`ZQOcniub;e;v3(J%1YwyQ5BgX3IPW&2EIle4eS-(ZO<~b*1+GnxcJTG9s$oX^4`XK5MApSM?HcJ z#~btES!gbTWo%vTf8@fwhiHq6_wihduI2a_vL@^QO8;Kk(I>-gFUpq(VYr9S=Ch|W z9U&eI(H;$NLBD*BL)?SRp6IT#cHpb*H@s`fBk$~$d>fu8hB-992>*ZgUj2KVz4f=n z`xn`TxV|pNt_vf&OW4CZ#W~F3`XMv6WA}uTo6YB^>%W|g9>&)+Saz$ZV`z?Y{1yzy z#b$!cn**8 zRV;=72s!*UpS-7S6@Bs|e5;`_ZlVP+vL!t4DvaZ3q;qzba;ymVxQ7}qx6^Zeoiz+u zaz6Ua81Ki%{rP2g^b7c^uN=Ht+h_Qu!eY+J2WUT;wc%`9UY>U>u z*iKD#-J2Z|u<`#g{FlJ8slA$!?uF*0s%Jd>Wed+_OElqoE#;f!-7Yqpk@XII9qG6X zj+0^g9;VTbpMigneIv0y3#K3C`8@Q-Yql7UwQS?~?sPocquq&ae$5Bce}Ut@9dE&& z@5JIL`tIV3$Mwx}zLox8`S%TaPe7+0g!?+#68hR}+Y!chV4exfmi+w``PTmN$;No5 zzrXky*Wp=NIuPwRasI}bX~iEmup3`tE=iX2;}dlKQxR)69Eq;Az4})^LdLz0bMiM+ zC**FiA^w!y8Dv=N7aPHJElleeV{$S4<}%!~P1bcTZqf00n)AU%{4Yo1W^!U6->;HO zgV^~SY!{+g!DlfH>#_Ayb@ovEP}5q#(1}lW^=`eGB^}6Gmt14DT*UUJ^6N1=HzfNs zdUu9(DEeO7PK5C_TXXn^lRZu?G1rxYVP;pj-cGvk!yMS=)6eJmt=bNtW1juKa9Ynr zKiSla>4Bz1cs;SXWyTevQ6h$0Lr{(LS4wb?^;$e6Hixc;10~I{UstbE&*Frn2x3e5&tH z@wFC>W|0qH@=`Z>UG=RB<3wE#_j==cF+Y!vx%MmNbv;?nll^^t?SB^Ie&qO#PO-0f zQ~sqNleY<-{80{*8{!fCpH0L!iajI3I*6W-^Y7KGvufpA_9OqFMy*JW(KnW#FNDcj zrFapqF`C`St}a!-F@QJZ6MyIY8HM_FWxaV?Tt?wFuJc35JO$r<=yO}h>n8I5tE%6a zid(2DM^`-)XNR0Bt|Zf3m%WIuHw?Rw>o{sFr_`sS0h`V3aYLBG*f@xcar!Soe-xYU zhV3^xyhD)wLGEq(H=yfoHh;tCmyy2&=70Eg8+={JnS@>rWgpo#Gp;{FPj9gU{?Teg zLf_`-)R??8n)T33u6kz8)|>hFdhJu-oL#N`Phpwy^{40>I{$`V*YIH;%Nnsq-W2Wl zW3FvCG6wR+L}TtXwDs@>ANso>qn2eM4`B#v zpRxkifn?v~HyDS(KadXhfRZ`zPlD6<3hT(Uo*Y}ju6%cXQ#->uiSa7?R_r`hdpq=N zz+v4Sp23FS_J%b_c|Kn2>gX!E_Heu(o)^`dlAKZKFQxY}eUt0tzWy&@|A7xqA|uzo zoqeReIUHlid`Q1Bn)G3Jcuub^F{jqn)ch9rXX9Y=>In45k=q~c>-FEmr><2p&-;sd z^W5+BT?0!GG@ZylO?=eHw6%K+htqkWzM16TC^jEDz7}t5bo8fBk{#yo7W$sAIv1D4 zcm}iSSo+wO?4vd{A~V(gBpt?5)Wvx(co*SW0DlH&~+a@UGT4u z4)$=*w%iORd`Y+J>i0*%GAI zeK`2Dd=jRCbjYFdY?y}F-s0yfy8p2KdD{T}C$iBxD*T6Pev50E2gUeRI$ovgMm&S) z87j9ftE+vhH2gPJdb$2K=v%>Eq5n)Y^WokK<`(#?Fcf5;gYFpqod<(B6%Tus`!d=v zCgCYsvg;Z=KaydL*RCTUSdA+hP0m=pJ<5J48OQV4uk?Lj-s^{UXLJXV`I`O<9S552 z$q9W8>*N`nN2oi!;9TUM#<%E#EOH}xlhBv+g0myP-k`S0o8(>k20v;ok5Bxgqs8QV zHrz{=abH|raouG7w~KaTIXMAM!WLsAoyJ#kB3!Q)Z5;nBH(#P}U46H9t36LHH^aY} z%?;Ev-$F>PWzR(C>RfaSTh7Ao*<7+8eIv~mdl{$g*fW%_R<;$6*H90_)|}twcBzfU z7~lB>awi#Y@56U4+io$AN7i{)6ULLoY`V7J;CmL{?cjfmo`3P3#*Qxf zwybxZtbYdCJ^0|by4tsCO0_6*Z!0s7vWJbksbZy(-=3P3VmK^1~do)A{9d`LzsgwLAS+ExQQsax$CHEq9aoY;~Nr zXKQa5n(+farhY3L&w}GUZR^uHj9kwIYQI(5hpaX6-$~~%{c26Jl#Uf_YmIjbf4u_V z@s6!!qNeN~!Uqe;_-TtjT{0_(Xah#8U!xpYt@{FGkwf?_` zJYzMy3zZDA4Y6GA! z$B5|H?#`j5L{dP&p%hV31VoX08C&e`80_x)na_J4o-hQ!Sbe*>oWMtL)`pvO&e~AnD$m{hx@DIOjK5k9WiSw)&~YmgCs7 zP>yvm4uWlJZt`B8Z{qMp$ZPo&iCeY_92b#meUNNH_Go#)A4LJrcJQ=zT<2Qe*v-v7 zQDZb`khi;iSA23K|H~ZkfIQ>hWFngXgPFbIoq=eW+Vda!ThS$CpH1g;wZ6T_m(Qvb zZ;JK4@@f$JfpEPd)b+U!L5a0ZE?QikVr}*Vn^t3xMA?GIl3}wEc*N%=~HbhA(DonQK>*;?ZP$q^%{Ld>p>VQnZ0_wZ0wb zJ_c4f7VZaUo1y#0_O9c>Xq&<@0Il^*c$O5av+*0X)%P!pHEh{|&k}7rp_@hL^Xv^g zZREu6eEXa{hB^Pu@nyEVVLFs-`IcTJ<|E-yqV+X;yT_~ zQcF@}znX7blk+XwPJI43{=3cTL&!X;$J%#T-&J!i(EoN#^&UiNEmRm!>Erl@X;(Ya z;A4J|7x0I5Vqpys?u8U1#EU;t;~xt?sLGvbH% zVmWdPUtL7Ti7<Yy^DOaZI)t6~{WaMvK2P_Lj*3*phIcb~ztR z(04N%#3TKg-HXWVyu$j=o{y8@mpOd+3m$9Xu65Oq{^H+AO z_}|OkKj|#=xpx?H-A&QzcP+wwhd4&FEi5Uzi`la)JEyUs2ODnGzMovW7WS#uTYdjO z?{YEkg=RXeq+N$cGi(O)ARLt+SH~@& zud~(MukgDElpc5p*UK=;k8sVCUMWrq z9^)w%_pru~{#WteRDAR#T|M}a-wN|YcsD%!_APwVDZCRF{b$<~rh)v>kZ;t!WM-}B ztmIr2?1t$JeSDaf5Zs*4dj*Y8$ zGjB8jwKHx5Z#y#F*Nl$QHs9R!4P3{nYYW%% zhjn1SiFo&rQ{TBR3g>&O0ei@e|0>nr4TtBr@eF*M!?J>o_4w!tbgscl`(hFwE#hCk z3eN}fhuD8SJm-qvJ1`DKH`VwXCC)+q_0|=8<9pI}89w#1kTWst*$Fk(XSn53c6v^p z1)kpI9)fl%x%?2Gv6QV~Ib7f8e7hOiI{Q<5RG;eB2LQ@)NQqz_kY-?Wk>Y^V7Q4e~;34B%W9Bw8wK3 zTlgi)(A~vvXH=^1gT=weZ)%?_Z#Q6bs4dnR<*Bf!llee08sS%)5@R@BmrV!R|82Xa z;yW?si3c3-P5zte-r+FI&FBW029fcI>-?eowS4F-#NWHk#;N zvE9*mU%uLl?~K#upLlk{KD(J=P>XzL|UFA|I6(A@?{_zs|Xp!}BH``IAx%lmBV zSbC@X6)|QUS&mnQIE^> z=@Qd&jat)+&j#?z&uo?_Wo!L&$(&24@s=KHdT5vfM0IGsy>XeoVqClL9iFe{VwGRr zW9=GpQ#x)Tw+3GyINoAQPkuNR&dK)Mq8&)aNp;o#`$$`&>%bRx^XoKytzoF3`;$$z zu)Qemn~--iJFe0932ct@GvJ$pXB->r*!BqeL*P1yp80a@c)WFD<61vF15f+$V~MXI zV>@=8s{iwl6YRL$@trpByM*iUd^P>=cQ+@p#Xa|gkFt?`CZ16kckkI+!Y{|l#m31w zY*fFpE#RnV>+k$p^iQ&L722hI1atN&Y`2MZ(^XNpA6)FLUv5_KIjTX~Y&M+Y9G>u8 zE!h@NbG7#`e*BJ%!RkRf&xPemG@4B_`D7g2!};JD{6F@n-VX@xnj{;My_md5wI5*| z_o-u}<2AMJmy07mg?sBc|K+b3qsCG?l|FjHGl@8?TP~nya8334vAnak`t62zbM|eE z{&jjs(enhFjrnRCd#}VB?&a(uroC(*)2BwpAEA{eQDf)t*;c@N2)_UH(HZ9TJcmb4 z=j&*@*VuZ`e6y2wIhgrQS#mEPIU3&e2>WoIkPISIeM?%f;UVn@qUlbr`73GDz4~7X z#Zt6Q`9jP~`4Zm6j6*!xT^yl(7F#FR!i*PPxh0wA_i#Ow)Ysn`|M~Fmtd=aH^J($B z4BdL#W{Tl_`eDpRv0rU3o+bN!{3ke9t4qEJ*9w{XU-Ty1`yO#~?XS}(-o?volpBe0 z6`rf7$I*E^JnBX?QNMgk)U5nu=cmwhD!kqF53h6IhCE|4w+_kL@aIbX87#Ne`VVXJ zjkB-@$mL1-f!Z+0vD%VfNY4|urBp-h~l3&kT>G$g5wm%+YAbv}Js3YMW&2(%{^<9EUtP;OL$gS(rm&E2> zd2+9H-C}w>^T$d0Z=|mm`)=a9y3>%G;^lxo!i06^3UY3W!z6%_~%nt{_aiYGrYO3$R zrz6A=wrp3t+rV@o+283mM&sSk?1*=_ifbRZThnvbMN|ix{x30wXq(lvx)H; zp9xDRa+>IS0-pNpoXv+@z<&a|r|^a`xfOjQ@g7R%6f$Gym#F2Ji~E**us7cJ#*p^% zW3il%wCY(;w>nK#HIH7i`phX0!izc7!1258=Zb4l4?Tax#$P0U`7GOeVzfTL<#PFd zWOT0ae=_EVr}4`1XcXV=hi6Ce4C8YXm|8owZj0fL&p)NtWv0gunLmF0r^L-C+IDp8w*)2b!JAxiE&#zSPXXk%tAEx@WTCs#@>*id%(jV=ly@*nnSJ%Y&Wie-yp~ zZAX0cBnRt%6khXdxUZACUz`m_zfPTd-Ol&G(-mD0bmtpa(<}bVTmMD)*${t!19CrxAd+{?@GK=D(W*`zqr0RgZyUV zB)9V8=~zrxOSbfqzuPuL<# zM%Lf_#Yd?+UiPt7JkK5p$0P6rKYYYLay;L=w)*XXWNSEfAa_%C&>6;LHpcpJKW%T3 z_bh*(<@i$kHDa<}rTROt>iZtu>G>P~OYluc%eMSAIJc>**2D4^ePijlPy5&8oX#fo zGQ2BOy;o=-_oZ{Dn%SV%w^78uAK&qB{5^lpgm)4CCvB_Hid(pjh~DJ)#%$P<-nYrR z0sYf-h)uYE7}ux&I=ln;M@=fa;y*-;53~)H(+k)^Z#0B|-G56yBf~S?;w>@Qh^#O9 z?bemmZ(k?dp}n0Q8^g6e{ciB>Q9Z9ZJ_YF&G7>ybkp{>cFmr zwrBN)c(FS=vc-WlNw@hcCg@%s}PKO|>7+(~8ay?uF8{Z7cX=-t~X+w;R1bZpC>;kPsGUtsU1 ze6`4W*IJ@{h;KG0dm`Qq;D)UTHE;G`wTHXmuLptu_ z<3ssQtjfP?syPzwheu(q?PfEdWzV}GItb1?|L6!BrIzLzUu{@0kBP5(l&%_Ck+seU)0jM>qeo@M;ki=UdH zsX;%Iy!L2cAghK>-)TzRBM$f7^N4>=hu^*UXdF8a=bsJO?0P7FNqarlqCb*xEx))o zma28pF0S{?75Or>-d)Obd}HYH%sSkIFW-lwi@x9|zAgD6kI?^S{X3lO@o0B*z17gz zK8xJfU>i=xQ~bH(TAbm(1d>pTo3gJN-mBUF8sGiJ7dyCD0e5QbMF+w+EVNq(huYr~ zefx0T2J=+3Vp1#N*n4o7Sq1) zv%9#Jj9c9AJcV7Cuwxe4?ev}OzRGNU^rd69wZ$8BzYFUo&L3p&I{XsGcXR$|!C!67 zQ+t!?n@I^@a0D)Sv{Wa{M^m zs~m4kj=uC!wmvK$d$8vhe)@>9HBtT}TgV9agTs3ODSzbmk<*eK`5lQ*z#sE>G}Hbcameb-ZRWZE!QI{1 znTL04bY1Z8OGiDt=8b3++|TV}9%IkGm9=wF8?voMii7z()Wf~WDADqBc#fERMiA!h z@O$G#?vs$9pE3wt`Yc7fNL=S zodECVuz%)wG5U4+Onkz7am7Tq)`j)7I67%xyCTk>rqwEUKS zV;jyl)-I_Uo1SdD0MFKS)obk1yk1;S-ar`7=eyU~FYm+JG**KO<00RrA~)0r@hT_6 zo#6@jzPa-bJ-p9poZLtL#q_trJB|(kUc8h(*ye6+B<2jN? zd02zZX77>KK3l*yuj1LCbqQbQ>!X>#zfbA^Q7yg}p0~*}&Xe)>$B-}XnK2T}ubgkg zyJaEX$>p%P_nTkc-8d0{IO3&jm<9WP#zei~XS%EfLOhC_=(rkBKQ@jhe>J)0;PA|? z*%{d}PPl*5fyM9iBf$4ftg=n~yOb_M&eH z87(TFm(caIc!XT|aAo!X($cYTKLlF`JVWVwlV9GbUHko!`?bl@s5!mE@n1=17`I(R zn;h*A_ir`T`(#B!{#p*BoDA!&DCEmQXysyVeOB;sbdzIisAM>MjP)q2g%70bU9!y= z0Za8h??e23KYZ*-*ii)kbPqAc6YyG-q?f`R##SO{y2u5w$hz?dKV*OKEqjZB{e~}w zlKZ)g{Zev6IaZxH+dPj9{w>+DQC8=!ez+s)*h zFE#~R&t>O+XQf$)y?3_D?&xQS zcC^=s*E-Gz>%Rns2P;*~;oeo=p6pX$?qS=M-(bwbx|+YM@3g9C;XT{%zG}XR40Ck( zgmutF^TG@4aBUO^|36i;_S?)K)42s7Uax(B{e8>>WTtKDK8LK+>^CQSTee(Jp4jI7 z#dT*mAFSn9v>|`iz&Mf4zhRWeai|-UoYR}u!*>HahQQeXZZVJeBm52_`Ho(>TCh3H zgYnsDcZWfKNB?QRo~&ANTA%zH=X_T*CU<=?c$myD;EU{cc7CAi{3Yb{fqNObTcH=L zf=%K6V!D(1#INBU?-bT(27O!0TeP`YCDXMHLcdjA^}8eK#_&EWo;%v!C$k}bwI$5U zA-Bp49PdjOe-ycdtf;Z&3*XI1>xEwnTPLLd9|3V z1r_gOiO&J_j)36`wAQoP3bucQ-dvlJkr^vlU%b0(H=l?6j$|MYusWg*?bY| zc-o2X`7mr`JU*-6cnr_VlIHl>o9qeuo9tZ-H#|uvakYjhe#I+aQZ{D$qCEotDfXRf z;U=SkFO1nR-u|V3I6n^8e+iry^W_U@j%4S3$m!9 z)Y~!YUlVcqOaF&^<-W*TvVu7M^#JmF-=r{{KMAkD2pu|E@TL-)_^sE_si_ zbvP``^#AU7Ihu|zontL>jq|JFs-U}(e6=vVLsx9zxC8#}Dz4x8S=1#s8t}qvkcJJz3I#M!j)@KfkH!`O*fpsFALT(=d*AZ-Z#C{T9&v>#I z;rUF?x_40?h1a;wAED2?;%QetP!o#R#Pd{mLk^v%tqGkcqGfw{MjM`~rl-RF3|;1w z{BN?qhJ6z_VJ^?Jztn!uI%_Rsp^Nk9_~KK)lfRyvAIvc=;TmXbSaDw)zx)c{e@|SS zr1ivjGQD@xI|uC*bbKl{e~={y!g?hMdAAJ=J@D&~r=bseDq z<@iM7U;^8NUv5SBJ{dQWb-nW&jEV8=aSt(5^OK1G-{G6LypJfa3iTvj0LRreYu`0l zg*W))EV*!^T-t>`*Z0{eFsTpap=y6icBt_=eBu6R*_4gF$h5Wz&l;mq=-1)Tu`r2m zq!#9n(e*pKPlx?geQXZT2$HdIT2EB(zaNiJ3^KN)4bjQN>0} zJSI23;Tv?>FgW(pE(Y;!@IOfIbh7S&xw-faWn+lfL*&8|{zovrlRsWTbE|x5%#Wkd zEG0v{!u3nM3t3_9)zfiAub3nQ_+<*eT!rUEK0ijAF&-Ta1Kic;MW>7BWv~s^Ua)OA zyQ3P6PT(g;rg!ZNT#_kJ&VloY&#M**VxGfvX0d*kD_g18macwLw~dV1U5XT zpUyB&^WX8BBa+G5o1v|v>mInSh3g;p0{%1p%?WY14u&s1Kz~8bcW|*cy9}TFN?wKO zF~0a1?h($t>l5bG{42+I)%cwSp1b(<8vM)X`w|9qqAT(hPYgfqgSRIimB!fxWHr^_Tzt=geJhx( zb;}{P1GJr?P6U4)jAta-3t`+2FaC5GpKK{V%?;TX74a0S+3cA^=2F+I?!CpQqWiSY z|LoG=o%}6eKFBkquf^g}JZ)k8KYn|j|K7pB8~Ts*_2Szw2L8YkV)71*hd7_-_h2i7?8$WE(mzlt&?MC7dtg`@r@UJLGPBAYCo^ncV#FihC+*&Rd>CFJj9~ypO^i zu(T(?85xVoerAPlBcQ!Ve}C<>fhN-o3#(qKEQS#`yQ^TzPFd_D^BB2V>jCv{WSXp^zMxBZ`+Y_SY)B)xOFDmG zOAWgR8VkN7lsv$`BizeM;GL)K7kDnkKN_af;P1gt4d}kf`6qNm#>Gl9SJ1_-GaL4D_?}>k7vM#lWQ9QWf z{XF(O!4EH@{h6HW$T-&+H7?5aom(Fj?;9VZ<-qB1e@FI4WE&&lzD)WcIlqt-%b)d( zoq71-Pwu7*)<6^1fpN$$xe^)k<NNufr-t5$_I zX3>>RWBBWM`_93iW+j7Q1cx6ZvGejk9h1-t&nLuO(A4Qsmm4tu`N z-S3Y##BXhui(Obh$N$z?ud?fXIxe>LB718&7RLCOaQ?!l524$O{7{q6)P93nd?WiK z?IHFdN90kyLjPQw`7d=3D;`5fZ@NQ1cE=~TqQ2zp;k*g{fNeW*oX6K}2=^;v<0U!- zMs|cXM>5Cpf8;%j_XM&Q;@bemDda7t>uXph>pKkJ$=YE`eb773<%lW(!f4oDs z*r&gW-Epu@Bxic1`mTMl8#>Ph!gpWt_4scd+)vT}3T#u&hkx+3+{w3~vuF3}HF%I2 z?xPpS;Q5aZ<0?Ea3)gSORp_h}q7U@FUa9^zFBa=aJj1tSiXZs2mhRG?ZP9R8R*J*6 z)@iP{i&?e?#`g-ctc7yelZ9;jkPYEF{yDb3P2Xx*=lGxJy~TBR@+OfzfZh%HbFT68 zGJib(kaxOa-I$JMwccgMD_6>aXyr#XlCJsshH94!(Je6dWP8xrRLz;d_kGM8YxIvJ zcO1RV$fGa)n9iN+s_XCaaaam{oA7lPv`hK4PFu!ri^OCBeT!?V<1DNZiqGi&m5$M5 zY)4;zWASS3#!lkCWA+Y=P4%Bh)_yQuZeNeiqsje0de-%PbQ}3$UaPLjye}WtyhTpl zP;=5^$Kstl4+DS2VJzJg_#FFfQuQ~*#%@#}j!%@{U4(yZ&JnwUfPfp;vdJU7R1SzY{$VY3~pJaI_2f`HPyh zb-6>O`pvc6`vgTt_8NnQ91iQfus)8}+(L}i3U%Z@#e}E2qG88=p)k>lbT`W@K5rg?9s^PE`9#a4Xx{Dal^Ov*3O-o+L{s0`yo8Lo75Als9c0v!CuW&^%n#Jp8Z94YXH=@+ zohawkR__yr8d$c3A=2I&uKF1Jv6Zz`}@~rbxVuR=c66rIFl2z9nYzAUxK{xuoo~*gm)Dgd$G4M zOijp$$mZkBnmx6a49`}wC+X0a9YNfeRJ_+v(hI_=tY?J!})_>bv`NCqrsHyOhUb9Oi{o9WKMzAI?u3(9C@T-sUY&H(2|5^3s2y#y0 zf8#pt3;zPLBD6bIs^1vM*MnbOimqnEX86M#vkK1-WPT(cpAm;J#@>f-q32E&{H@74 zNBa`GOme1tsr*Ao3;Xc7eTg-Sj&_Tk-4KW5^}n|9KVpC!n~!gdR=}{ z(4+5c+g=s-e%Vn|=f8N-l3PB)4|CC6MP_gQy^ije;o`%*vo-F~@K2Ge#!EiHxqM99 z*uwfP#Oyg^Zg=*rgMM^Pb=?`ByAS}iduPZgvgwaQ?rekhSo?2ltN$-l{9fZ7cH_>sAJa8( zn`5(Y$i7+aP%9!b<11>tqfO^Dep3&M;joXU=MEUVk(I%8B)-Sl+Cg8abDi`he03~+ zc=A?c+)e%taNG#bY5ct|8IQqnjyPRN4!KdN4KI*)J$o0h=>WEj*Y*e=>#XcUGUY^= zM-o0N%_$KqRX?4tt&L4RC@XSn6#V2TTj9Tz@8-d#FIVr%divV4Wiy!S=;HhIY(Cqf z!e%vVQ}}1;laED9v=zAdFsvuSZ!XI)w{${poJH`adv`Y<^J4?{9DwH&etRFb!|1+N zp3415U5M)N>`rcbaymMfpRxFcwRLhXzOLx*=NoJJaBnxeO8*-EsL!q^VBZg}=V2kQ za6MH0p2zq68~DW{4s%K~v0HB65C2};JCWa=T|2Q?JPLr^tr2XjWPKZG(Z%hsm>D z`EWd*QLD^Pe7YPSaf%tY#_TnyeX|TLnW4yg?Zw}91#(OQAzhRPd!NzzH zS&h-GW~=*r)prOUq;H76b@;{eN|PybLO*=`h!xwTFh;jD|F9wa?mKHM@2vBy_a|3`F-*t2iW)@jG&Nus zbjQMX8NVFmp6Jc&y{6(n(D=@AbdSF4=$uZc_IUoP>TeXnwP5y(`DX|F5Ae{R9)zBs z!`u||B!3IuFz)#;dqw+-O7)p)^}f#bXwJ5sisv{!TwPavj+xy}$2qVpbN(*BY=HM2 z<6s-OZ?~O7#(K7q^odvXeoKA!W#nj29)<)KQvKd> z9LpOqOpdlatG$;0#>xXZ9)3rYon`yjF|2XPcVp-nK-Vpm>US2417X{ou8GF;V32z=^LbuBapZ*$ngyglFXN$h;gamaxtJ>18^dkp{GjYfP^@hp4kGdD+b z;r-irH@d~E#2?nyvD}H*#j^<;KNOc2)TYLGt|r6$8^0xo_GFtiN4VFM?#7l2>F!46 zi*%psydK%J*#0iu!|f;yLXm1KsS*M533tJ+)JKa>w98iG=Y!5;a53Rh#FaV z6wTRno3_93zrxRtpp&~1nbrFVE5)>p92EO}5FgQ*$+!3swZ8_I1JSQS`;&ZX$tF1y z$*E#St?x(5{b6*eC2>8mc^ltoI;PaFo&Oq<>G?vw1pSq@{xcrWP&U+u)7((LMfNXv zYSHy(#|yS&%t6`{IT7v|6?~Duh29t|cSlEGw2(iqVDr1M?~YcDi65v`zoi=I=t3^4 z3E7?K;V+(kcYILN=l<(kIbJ7p!@;s?n===dPI?>g_u{x0QW3&@FP5Eh><5TwXJ61IA zU8XIx$#AHGo)^EsUN@Z*=Ty~HlMvh~oOCq8@{@-*E7jwkKkA%AMXFA5wbuZmV;Q5h1daczH-&1Q`i`lnq_Wnch zhI?SW=-Gq*8Z>jrokGr*P)Og>y@C z@g*JL9}n9z#sD9OF;c$8k1z2}Gh4v=Ke77U_9dJ7ESG=fRs7}~ZN>BCoU6`!B~QZn z<8(HHZ$o2aE53Y)KJ})!9B+beEW9Vzu6;-BGv^-}pVsx6yiLzu;T{kEKiReq-p1mw z9KOq7IE)QV$XN~BKXBHkqam7w#&U1{b@~s7_gVbS_--@D&uQ#Nb?*|K?mLs({ zuW{W6@42=$4^^LA#{%LTxVPSJKG5?KgViIbe8}oDJvW4Ca>|KWyKIt=5F;_WDDt?to<>AN3?>X{~RI z@Y9EU+Je4@^8X1mp>C~V&-G{?M`NDK-iQA*=jOWTSjTw7`;rC!maQw*_lnX*_^vg- zeZ-b;91l>>`@yY71@R}9rbODe!j8t3Z0*d@0I#qg7+mp@eWNc zeLY6EbGPSMmCNS_Zn?~li9LT{f$hqyS6rXN3o-y^||x>7yEDI+6?;4 zPg%yV@9>Qp8;5JmL1b=5ek~uK09P0%`?(@wd}jZ7>&r>23GS?`3Q`=X+H#h zV?3Qs_8X2HINm}H-T=mB+CuyeC);m(i@7j9Esno>4*ws!I-n2VS)T}_x{-U{67B&+ ze3GfXQFA!I?(O(tk@E-To@=D= zJ)m&!Hm`wYT{5;NL!OiqV1Jd)j6KFhd?(EEDw(OKooFxa*%9bZQ*#bLdzs@s)ZO3s zv=156&^*R(d>nq~9ec)=J_Vb+PD0%|7rpq!eP9T5C3>|aw_XkJ#>TzL|DNod<==1^ zjuMB~>VNQ$F_~D`h50J0A?I%X{!=`%mEIwwXFYm`!aG`QZDGy}*Fsrad{4@kE%+(wQ0YX(~mC2LZ}+S74s?P_!Sx4yx3)*#lEO~_x@@fdvx ztbO2HMc;mWcpm+Sk$pKHHb?)l{X4d;!>+UC_b6kvzxJkR#W8%RFnwG5mD;3p4xbL7U4c&>BaK|KGjR;`EOAUfy3ES9A@7WcHj$vHok@-Kha zp1sxkE8A-iHSY~N9)|NYct~dp`LIz9H;!4HofF+7I8Rcy@#JHpl#w z{NuO-+zZr4VQJ8|lAKjhPF}J?v7$ldEg0>*eg7hrAogj#hj%hkrifhft4>=7X14iY2-$#H+b z>Z-pTDGRu+7QaW~KO2ql7~d}*AK7*$Z#kPbhi7Ab_rtobJ^z*qV39M~EHqQaygff` zOvaA2ez(AeZ}o3TmT?sQ54~rL$%E{*9xKMNN1aJ$!8jT(JHvbT;rHqJt7JT2Kaia5 z@Ux+i7x@SLehS*T>TpvrdjZc1ZLgu31jAk0Qrn3zO(3g2zU$EcN7lO5kf)My9+_vr zuqT_t_!pCq7tv1QXFR2K_-b zw2$DQo#5(@PrUNca&#!(+4#gcylWQTg(zphJP=PedI!VsJe;lZu0-1me}kIp_ZLfc zm7nVG4)fLMw?a3K&lAYZ%G2$%Z|(RF^gV4~SYuRRKa)K7N{Z9%fAfqo`1npSy^hRD zaCg;iUdg)Aw<-DivTZE>|Ka^n-xcIe&=+!t+(=%BI#bH|JlJyx`;DQfk?*nIVJvQ1 zajoavmfczFd(O7s>3G%7rME|w!fNQc+34@-N$w>nQv(eIr&sx$x zvgdj>7w8-DlXYsCXQF@Q=nU94sB_|51T-;XH04_X|EkNol)@*{fC zL^reJShP>bmv{LD{xYR&H<%u*Tl+uai}=OZ&RstiL$t5Qe<$fXk-rauV=fHVJISHW z*VK8I`H-C9N9(uJdOv#6~DsfFkghZVrR#-WNbpV`6H=? z^B;0grX$EO7SbtfYCzUX{nOS>=JNXgT zHu1f%)phr-9eWzl^P3pnWZ#PJUF6tZXolh!=j42T`IY`#$X?98dmT4~bsnCk_NUv= zgsldq#%kKWWN%HUxiOYE@fLiyGn^~wdL90QVO4jc!|4mPBEW;CL&anxemEX> z_srwX$Te28Pt=!RY;EzaQ&;`HVfuBg-*3xnXZLY`G5Xt@^=5WXf+zUo zBk`%&AA_IYV!FfHFzoXo+Rc^WKbDGKbgky^i{#|lFdQrn>Pz~MZM-pdF8Y|P-_gIs zrZ9)j=iAq8+rhrtehT>`#daE=miA}Rqs}Ko$eqdd8SwVkX6+TeQJvMVcyEiG$MOA3 z=K@<8V{h87vGHrD!^uE7U<{UB=+&32gT-fJH4tAXHnbsA-bOd02|3e|o{;0}VthDX zihXgkczG6|t;d(Y>fh3wyuN*h3V)N+*zxvc^k(<%&Nre*EeSc99aiI6gX3_$GJ@|f z!()6!{q@~|ZX%wW#dHk)YC`J1MSM?9_4+JoN!9>72f%WK{P~|}jvXD}u+lxX8gm4` z(fgRQ`RQkEC%|ESkgJ_ZJ#9wk(-`KZ$C8O*w3@|5FX=i-qklfghmQ&)(^;{x_HwEv;T|2Ncnmz#`j@WYmiNpfYk>UByoR4mw; zne(&VVK$Ga=I#7*GS9#(r&4t!+)vMTblx0I#r|G$R?CNeWKSa3ypVjt@2A4V4@tp} zA&y<6q-t8eC0{SCsq&_?nxN8VTm`UP0Q&KGe@`VL6vx`&?bl zH};&OWP{u(>yv$w{nzCFhu=Ns(m0G$?UVVmqgszQVq2&UZm76NDFzFS*SYLH-1#>8 z+oI?1{8M@O8r}EXUePa~3K-KX$b1RzV{5ASV3P;bk>R!Of1&@`=Dm&REBHpUM|*WG z@H2Uzz;F;h_M&@V`_t*V4$ZCXnXPTSdU~k&{9pLn^3efm=>2T&%NDsEy+Ky+6b1>;d{QX8}I{ErXIdOfPYJ1LBrqBp*PBi!d{fc{ssvuoS~M1L1~|H|iO zaLe^_H}<`&?P>V5Wh3~aIeR{(e?E+B)Uj~^E4kZfUq9&Qo1677hWQNi#z+3IG5Wd~ z%lWW&C?Aw>=kxU>e(U4ddn5S-bMDi4n!wM;*&c8`Nls4~4j`8=vlZGtSC8Fi%1>bP z0PUO8x0M{_vp8V7HTcMW5xGO{Zzs>(nH%5b1-2>VG=b$uHhm1Y^W<&&JLs3&`BQ4m z^KhLmM&0o4Nk&KhctSp$OSWhD)o01G*ztmx?1ASN{oToqZPp-R49CV_aReFt^@&9_ zhHS~shW8xpj>G%j@wNKclT8x$aDNZZ{NWzn>4onv`qZX0V&`YtBRTvXc{gaEF8_?L z=u`gv1h2fw8{%I^$0lh06vxYrhf1BiT;=y9Xl9}rh-WIjYEyMRJl1i-@9vf5JHx*N zxjCK3IUdYUzwn(r4>hnz)EfJEFErE1y_hX?YN~Tn;kkFzmK}4+pI52=?^oW~eSFV@ zl5L!C%LciTssEw2rvva?Pi0H#T1o#9wyki!h@DG}p^R zT*4YJ=aA?aRmgVVy{)+zk2XN+pW6BjeE$qYv65Q`&c~JCqvGJ zbzwA}9P7mF3VeHs3yjg>bZx-aot@tXTL*Jv_-`Edkjlm6Hr02K{WIi!0iWj~*;p8B z_~3gvKZlMN*inyOwpE|K$&vCR_6=qCEAnIr-FM(W4fYwfkc;G1*MD7&=Roro|1@RG zIQrC={5AFTA#%Gr|Br7kAY1O13(4FB&Mx$;A^AP@T*~LC;$OzjgdG$4$F)#A-{w5L zM;!1M_sO|bT!!FznJq9!SKD^Qw~JiZhpran%8Nu!glCl@=L>Z*y!RO1LrdP*_6F?h z!P>|8I26xZw8nh4skX-U3$*Qk=UX^FHNRh`{VL~jDEW_FY%GWJQ5$rP$>Yy*XE}GQ z_BHGy*Pp0w`6Tp@^4rhyZz)WEqnX{}_%AW|2<8LmGLKb%$1qTC469WC zpD(=Ini${32!2>Xhu`F7$MNBKdLOfeXRIf|_dC7f7sFBN57+m_d_FtFd~zo}E9q~m z{ae0oz{VE*_6`ZXeCy;YF46D$u z57WUgUMDW{B|0AOGurxDQ-!h7)i$1ATo>i!hqY$-J!7UG=6jH@K8J5%7~ zBVd_r{4^l*Pqw|!4%dG1DDtj%K8pOK$hcL#-4?d3==`|GI)Od%Gkp~9Ft#qSy-apT z?en^?y&m|+aVaj{$az>k^s{e7#)jtKOS@IC(UUjX#}Cm>{M-aBo5ODp!+qD}8`w6% zvnf5^+X&z44Qrdi9G0Ifk4M0K8XL$fFCyb;blS>ke9DipF`ew?{0}-RZ2Ac z^&b0S&ZoljGTptxS>0RU>r%evTPuBXcJ9@ou8XbW5OtF|lXSh}v3)?XM72|koJm0SLoJEZL zICedj-iTIQ!@DWz5VYHq|6E;^T*k&ud^pDOA+~kpbsN48<7B9N8Kc;5{;vKe;yLy{ zMaFRc8`8b{&S-KJEWgov1ik&y)@M&=xwDe~9^}o}_XpfZ>pM-}{)O%(=dX*0xP)hJ zr8zu46pr^C^LbIl}u;;hXy(e8S)J0JkL%ZqU)p>~ZFgij^o6yT|Yq4cde`b zRxi1P@72|q?r_bRH>c|X`>=*nx8i5%Tf(+><~Y}M@e(p;!1$H8%z$lGjd$A7)YwMw z&xv)OwRz8O5uIvIp}th_E<0ABT?bE?1CHdUBjl8EP`xME49#kr7#Cz^8{$15 z&lB+9Wqh3Kyb+o5J$*#`{?6;*dyEg}lkp83ms;bQJF;K-_f)c$)BO^A=jiL8#)oI* zr;6X=y6W?UBEc^g3$-bFzq0l}2M@(3HxfA#o?oZxRB{JAEBO`X{3FNL!q}T%ZswO8 zZIANb8n#appE0)A=szCK-+Un_s?XBQF5ADd&_Y@}=szgqud=f9Hm;=>cYm;zKEe~_D{EJR@{4cseY+J?79m)FxPg_`K(c2Ag zq$VWjTFEtbmOU!f_dqlCC7BKnKbH0M^M5>roQ95{g6W(Z>r{Ggfpa36J@vOGbDn(* zd-)P-av0ySwOt&c+n2nUTx-#y8#!BxRZ-&|B-iGnE7kAZl^2rzd!_oFqIhgw^&PI{ zWAX-~U&XGKa*l0j2Yzp+ZZ6UHA-l*(pVW3HJ(KDE;UTwh=nMCG>ceo7czsC6-P*RS zxQ^qG+hE+${J<~y+4Rm|RsHs5$ctcmcy}&6%U-TWt%6N>_kc}o;+@&FKY#IGu@AZ( z$nbtenvnCZ_`D`w=AGgndD+z*ekW}5Gx{ChjqHA&AJnTU(EX1cEy$IJ@#k{WJW&2fuQ8v`#3PPL13vBqhZsil*?AzkQ|z~u7n9YA z2g%tP-(KW|vGzT?mZCoc|E_943)shzBj2){D%E@5#b7wkhPh*%b)+`&%m;hcu??P& zozH;(Ts76W&sOqRXE>jzF>c^*Om<(}CglDFhke?{Sni4M3H$aioT>j+`D|?E-iZif zzGy}E4RT}-o$XxKe!%6v?gp8+<>(qCSLoi4*=yj&uAfT;I|CyYad$`~IV| zEjl@p{)bPDa=r=cg=h)8`7XU|MfH0GQK+pA#cTjaLyEny!Ffyh)v|w-@v4qQZ#bVTKfB3^9?l!FsT~Yx((fytFY(@w@ygUy#%Cwrr`dfqXJ{qWX@n;h!U%H-RDK zl$_2c!Bq0YS9J1!w7IsqY&pVyeX;Ano_q28)>f!v@h9Y(=VQ5;y z1I$O$mD+!#-I&jtk!2o;S37>YuKFJ~=}5BngFEPcMXWa_uZEAFfblx*r;)eSN^31P z-z`4#@V}48SkJnWUCXy$$W63y7kYP+zeDK#oUcBmM_tY3N`5)|$N70zI)1V?y#xL= zb-rWjx%Zdo7rS0Q75!#)Yv+&&_ABYU65m!m_`vuuuF^h^2dwpDx(6A?Q@qT$mP46w zl&=qiwMVoLS=Jx%j`W>s?71Hu%f0dkF(0e_Xc%^f@mF=Pr`Vfw!u%BTZQ0Z_zh1Dd zmsIDzq&~fkw8NDBA3O)}i#$lKqEB9y>(XVej$JF4UBt6F{~bfuX!tMI7v4=$5Ayr) z&gSZ?jUXAlKw&FcKY07jy8bLd=Y$?e(1OZ`j89# z$U2?fE7@{3-^jOg*MNr%xe-m)f3w)R4vLd|ImW%l$*&NP0)S$EVA06T;wnNc}^?zLBzP_5&pWHjybR7)h z5Pq|n40CUYuGk!w9agEn%NRF>)4C&HkIb#uHF0^ApUxlW>VFMJV?5r{{xjqG=(_4X z$uPIXjbR#rcL6^6mi-LJ6#n=|zFY;%E4I_{Eh973G4o1(FTT-qy}*Wx;XGISk=mxg zDn>=c@fZAiH(xH`t0(L~uy0|`FeYO$N}D=r4WT{0s3 z`}kr@x)4z$zBmqqdFdnoiFcH0JYgueA6~|F6zp<{vRHkEd%SdySo#&(k}^BkL;Z-hKtEA?V{woJ$SJJ~UgS5tF#0O>9H0%nygVA)89N*2w=S>HA3k4{$7n zPY!0^qB$KszW9DT74ka5b2ob?pxK%1o!Ima|EwWr8*Lw>fhqW+c*r^5qfqx8w@4jJafhZI0tfu!MSItY+`C%UUeiy`nagIf_g%uin!=mh1z``w8C- za2kI}BN#&sYE0LUmFhP~!#frsufp@5@+@?_@b}5iVJc&}z_#=hylc=K8|jZ^%%|%n z7~R`U4x)b$8TS}()<5A{brE8^5Wjd9Z;R)K_7ljYJJg86IykzEEt{&-|AS@2ihBf( z6E+>R(r+2)P+zhpwvnsU13qklUS7mY(EeiVd{l8y5nUI0_&(wLu%3DsDh?e9qrw$ACzZ-d#m1x)L+cc!-aurJ3y z!5nw1x%wH{N5EAdj(&U|XyHlR^N)TZv$1_!I=a!n*!uP!dRyXu!SO9Hzr;qhqxw6N z5$+3xcn-ohnVlEcRR4=Lto8CR*LD}DZhXIo{-a>q8O?7taVQ@opRGv`twc zp-rw8Q_!BNP0S?q0hXVsV(VMe9;Nb zK(bF{+n3_G%K2Eb&GY%bF!EKofXs+aam|*C{iSSf%!gu{Z%?lK^ z@6F=_@Vj;h?+=B0#OY0Fe-^K4eDtSyZ3+7_dASho&MT|;%aQ`F*T}vkU}3`!{AYa> zb%9HL$nIdvW#~8KhuQW$>)d0u-=9tQ(*L`dy+uz4bpMFeyZC$KYm8r9qn5VIv`;l> zEY%mu!QhueVVwXc{-~m@j&FKfn_bUdIaqyOQJ*g3t27sc^<=chy7k{cN4N9b*iQOt zKNODcXx4G=IwGG9%ZGSB=A-M#3NrSq_u<#`*f$!_ZhZIz86Wavs{JFfYuWcZdjCxn z=D+-2_;-UP-yJ~Y9cfJGqg*HgxyIHw}%sF3i`_ z3Hk=giRW1=V(DND-t z({LqA*0OJ7yOy|-eV-zgqLoO?*b-TzMBneXvCEcSwrfI`YhSMY@_U}?_s2YD=FEF} zy_fSoXU@!gKI7SP4Za&~Q`y^JuI$D3Z`iOM8$QAREI+a%{#M-7+4u;&OVLjvZ!5ZC zO^uh?+UV9azv*GGCWYVaD10v;Pk=Mz_WdxRD-VH9?uC2h%sXQFBXpilHr@FmK7G)( zC7K7}YpKopO!ff&CUnn$?RNIFC!2*v&LlhX7rIzJR`0CBnKUDFC%TTs-%GsjZ}ea7 z?30J#`BOQAFQ?)C3HH8ltfNhigy*2?k@C6|x?au?v%Ti}AhGz+dzJcQ_5!T1=dZGd zAFKKOF|}3y7j`w;R{b%v!~ft{tk~e5atR7$ZhE!`k2ZbM80vi!#W&p1-=< z&xYl6ed2mP&B^aV&Lw=YKFnup?@fk&nKrc#^Synv9ZJVobkFnEO!BtpgRyK-ugbO6 zKYcDd4~tHOBlMG<(1$#GMO~OmXTY)nnc=;!@%-{Q+2i5ZhJJpEZa{Y>8Aq$dFUpxU z`RQ)HiRs>&Eg^@GMtcGI{FW_8bA^4@Sgn!8`boAu8-~`oFQ<2hdf!3dy~**z?EF$YjE)`7x$TlOWKYJ53{t@=+Ae8<-Co08G3`jK1?&jO;0 z@WB_3%inkhf0^Y5)AI^{Zd-jojxI2FNnha+1 zx2~P*cmf=U($$yU$2WLB#-`u+hdt$C_OPZAyYCdMb;+FL`dr(?^#7rLeWCs6n%u^F z85z64`Mu-Vb@qmL;lh1Fv>m_jMS1|;D|Rs-ckLH+LtwbcwS8+*G>adrD0b5Vc0PMVp10OM5%x!dZ{Qz__cnBQJJv^2K8x=~zfg?M7lRx4 zd!Bgy0++fS?$g5jFT8JD8jqqLa2`nJtoo|9$Cu-o>-uE;r^C6LYoGDU1nujhZwk+C zbpH##J{FH|FmHD}N$l@Ib1yyC6tnqwy1K5m$MP{5;{3=*s&|L8wtTS;oPUtjfev|6 zF5%PF*|HNG*q-iXdj?;7`%}eXEBwdUuL;i$_v{h zHV5zHzIGznoy2%(&GSWc9q5}Nhr~8MPON9paUJ>Hoy)!C3w{dm4r_vwYf*L+dH3_r z5%hXyQmQwlKAz2G8(Xrr=#44i-Zc-tyo7wV==hI9(dF8k; zSB4?vw=u+rZl2BaMJK-d5&kdgtKVXcm&&v6@IB7QlgO28(c7+fMZb(allbW}*z|#@ z9p9WprZ^?1@ym^l^~GY}Mq?6t)Q+?XSz?zrvgyC*X2bm`n#E+R0ae_(l6fWl-=p7v zu8-_5V&_}1=`V?~uZ;0tz>o4OJbO$wzueLJ7cm$Ud{Fk8KY6yEQQ)P`gO_ZMnM zp|$|&ZeLJ@tJszgJ%pZ*J@J>vd|CL z;GczbzU_D&d2|e(UFGQr{+nyAWemyZ*IUcrv-9ZgY<@9-u3zQXuCN)u!!!C&tK)~r zJPVE37lXC&eONyW^Zzi14exeF-_Xa#WH)WvlXmz9xz-!Ku`88h>1h66i$3)>8j7|z zELXZ7`lecu8%M&ooB4I*HG|i;KxwF9JA^hl@;h5U;s3+g+l8&K`POSbEF;iAKyO>} z?-YxZYt?rv`B!LffIZ0A5`NgC!_b@~9^#i;voG(rwba(j`A#rS=hH9wvM-(bMyg+h zd-pKT#p0COCv+9dVLw)UCX+dlpI=3H0!$y^ePP#C>;1k>iWZU=>Vom9pq-libA)?RmmqPqH!0vG&A&2icF{ovdv;yj#$7i1WY1O0MN-61g1d zbNOs_TE9>F(y3O&yYNeIdfLF;R$P7&mt&nDP1j&;%h@(V`*wUL=hJui=4!SMCjTGq z0mUsEPu_-fdN(5J<(Mxc{VN^EANBU^$i|3IPyF&atn-C?sIVrK^t4{|G@f(Fe~BF* z(Vx(7{K>n@1M`gNYR_Xj()%J?)uG~Va&tTvupvVKZ?vPx^A1Uv8^x=;{%?BShlRWf zV`20Z{crOr9bug*Jew#dk>%ah@Lo{Jz3_aXdiR5$t9g5%4S8_|x|d;@LZA1Q!Z;Wm z&-VX{b!R^QjURV|{jC1pk>QtnYu;xP$NM~worEuOZfpzVTc{hk`j(^1+uQVw^WT<7?rsN#v{C)T`+m>YMx8eJ)1coqn zh4;3iANcJU@_r+CA9h|SpDxDxm^ciD&3$V81^N-N%lr5?cKysJ|8dMu1w6U&IzEYB zI^#FUYEResaJ@(VR@TMm+g}xQx^_BS8~7^ZBj4vk=t%4@<;&a1XomJa{QiF-Gv;L7 z`La)cF@x`z6n9){W!%``6UX@58QucJApan-viBj~sa z{>#MlZy2`Kz6ZH>Bm{`2VXOwM=xd{?1{ppAEieI_5@$d?cE z(K=-ApoSf|>Ns5t?`SjyzwvW14AyO|8})}t49gDMo`>Nf=X1or3tD+lTqVByp*;j< z*vjVm;k*Xl-}H}?&9vQ!uDS6{-j@6qayq#ekNy(AyGrK3Ay@JU{)6e!o-V@k2)akn zErM%2Ix)=DxA;l>``9ylNfe%Ag?pInOqgEhhd0~{EQIq_J~{=am*6d7)%T(g8@;a} z{zK$nH~bsJPG=aCqN~ZA0mB42n>ikd&b+L6Se%;Du>h`~Vl{%yb?2{W=N9`xstBb|CFwHJbT{tru;eCwXMZ*a-IMCBj0!* zi)YjUjqx!WMR&ws2ho4IdO41+E!a|L+Y$e5_8YTnod)j))~fd~lWXk1hv&Jv>X}mc zE<5-kc@?&&<>pkj{|qDSVLh~XlP#mjdfoBSwoqTpQ(|+N`~&%7o|e%S=yP_4wd<@c z+<)o=o3mq*cH?KY&bI?S>%;vB{UL|%u2tWbhPBtC7aI0dzZ=nyA2&2V<;xUZIr80&6>eLkDMq=rVC%4nPX=4@Z=rFQ>#iL(__3A9t*AeJ?kb7}M zRfmeDFiaAUWBDbALmw*sVEe}WHII*%;5!-ZG3@>Xh6m8i!ZQf2rv1%-VBH_zIcz)t z&kFt+XTPAn`d#Iy6`BEZLXKqS=h^vUGTav4+wIHm%h?=qWiE`Tk=+;XA+C#K`V+a{ z@5!ufg)zSLd^_EneX-;5@;v0?rFb4>&-Jj~#eO!VY)n6fdlR-F;My6+oXLDP5$&$_ z*TC8t-QKph$Xmd+@6dl{(~q)u>GcjpdJB2m(zR6l->?mHypT+>C>>XI##o!k<6>hr z+-CnNy2~7Isej1T@_jOPCw~e(^p)N4{j=toF50m5cKi?R+iQOU9sV+mNqxw6UoZv0Uwq z?!z~MyubOqB|E%(oo&bmFY)Ev2G6eeEcj@tHEX$7(3>BKW_GRmUCm-`=kll5a{X>a~*#qP{DQ0(?&$M!IvXdXI; zo^Ne(v-}hW^}JeRG~T6O(bpYjaR~3P#i3u5S8aTq41jGVo%fJ+6@5o(o2u-2F}(tbBD4cx|xkt{Ne}2M6JpH#;4yT-aCjMbUeXz`B})*;$p}B z4e^%9r%T z57;(`oxRxR{<92nK6OnV$LHG5vfXVxaw{|!vPrz-+48zQI~IsVz~0=o#kNA*1$Au1 zbHDqEv3#zcMNgA`G8^{5zcK%IfSsT67JQ2~I)km#oNt4_pW65jxpb-g+gnVhk+G*3 z9?RGL$-PpYVs9oEsahMJGZg)5)q8KT_1i?h4|B&9zH)PY=~i;^#ZNf@1J;n+C-KQN z_I*M3<@`c#_BB~k$e)T%T*9-u^kQuv)4#c6TiH!}Py6G^JdfVRfmZ!mLe|;r8cL7n z^kHp0Ur&tghAY84r52?zIpO>+n2uA+w!l-525)wW@Z+LGg z+t_+k$k#4pw1A-(-tXbx0Hzb^{)-RSpuahqUGZ%XpO{6j!#WC&aVmUI6rN2amy?&0 z;U1)T2wzk9*qDn0h^rPYe=k71VeNr}w zj>qtJ6gPDymh0t-^7laW->X?+eC}pEd0gJi)3&uZPb6m}G2e_0Ay@R3*gT}*@5r-| z@UBbsys)_(*pBSIVC`mm(R}nr`p41nnp$|7Slz&;bJ-|A^2cFc7lv2tw}gKq^kQ1@ zYdIF*cly{FFds$lld!1QC40-~_~SU|>$-LwnUPqIvKP~sPa-~wo55r)FIjH?x*E;z z)w;$V=*}b08h40gw5j7Q*nf=cux=1du$L3Dnw;`Q;$AGs$RBWiq}u@UmyEsOY0kbEVA!!{t=G019V^(whA^&`&0M=t-}-^vq2#TlU2V-q zv1=!}^9;WYB6D?kM)R?_rUU5f1aEh=t?|sYS+_2`y8ceXs=iQDuU5*59ki{0!97%X z_b9if9oA0s>Ew+;JB+NC(W>#`+l}-aJ`wAo*~6dt68h)S7w#>d5XW=aXpD{JZ8?(t;U2Uj z8t?O!E7%hJdIOA`SSNhg`OorYt~kAGKZI}oD?WFi-;dreVIB(S2yOo&^8v@~OZA7e z1se4_xt6}?*ggTa-Cci=?D2Kg`;Xz7em2{6bvJ23b`#eR!21i_592lGi`-8{FOuDj z-520J1>LB+>RrmR8GNnSu%>bC&YJ%XkavU8Y@&Z}%_mD?WKXDl`440dMSmfk>+|!u zT4>F9c$(QW4rB~yjuUOXLw>Z%+ zTor?m&*46?eAcyR=@X;sUUm^oXv!t79ZA+aK2dARo3-EV+?=uq_2Oo-`tex@^4tr> zv;EGOd&clgvzlX$V1G~kFve%6vg1H@{tPQWC+*la3a$_%bN{j=qbr|<+7j+H+{+ZZ zSmVj`oMKx9Q)e=txAm6y z_3%!FXOo)up~w)A!nY~;sTh;YX$ZAREkM?b~kEe4XeGicPu>Ac1&slQjW%PUN6UV{O&-Dg5zCWAfmN2mLcjE(7w!FIe&)UPRcN?SXz)EB}!W%{m-@3UXD52n+#!u%%} z*Qkd&)U1D$9mH=JHB|qDTJA|tXxo$@E)lm!@r|X^y?k8H-*d>joy!aUXcQD-y=VUU~hoT*xJ>+B;G2T#r z8%f8SaH!8=U9Z^Nejqtw5(Vrpvu_`|H$dCb93-XtPFN4Kuh*Y*m^UZ$b~Mu*t2No?hkh#lw*0UGJuBJmo-66?`nhoZPVPYC`}ue$lJy9= zTcG`$|2k>=os6;ae|7Rtbv{d6COzgGc|6aO72+y))9H=XcbDZljy+dQhodK}Xl0JM z2D?9ZJ;bsv8~CGWO4p<8drtdr+K1{tAFyBCGhA`r{!4k4=4ZQuW zKiPM6{xdz^XDBbPGv>hLo;1w)^DsWLGZ{jDz(1c%wXh8NriMk!?Z@+<{uRHsxO!(k ze_#GJkg+dZ8;cTn{<4JB+W>*$TEl?a!@M^SadhKRh!_uR^!GO`lKZ z=o^=4yAigF*l;-ij&weYytnvoSGwseTJwjsuksoFX-yc%YTFFGv8vq9z8Sm4Kg`XN zm*qlt^yV~4n}+HgzGzE6dC#@`;r6^R3i)uA>!Ge}sEyA{eX3w@JP!Vs$e+m;GQxAA z@I6QZZyt_&iG73XSL0KMJdn43${zKqfGZ7k#Qjat9L?rzKisv|gIwq2#_N-*j}*7y z9cF7?^UfaL8E8JV-XuS(HF@~5!+TB1t#k}z=Qn64k#(2YG_k40*lx$!OzZO zDSiFfR)>Fz{h~(qZshevD^K!{a%g&^_W&BJb-=8>-0sQ0gYkdFAAz=`da(h$YX*Al z=hNF?zP=Af8`ssUf~PN)VuI3uv&L&&ga3NqtKki z-t}E`FOf`atkxd0Bjts4y7GSw)(XiTRWtsBT^}l?ZZ?{#m)`$)D=gYy7w zOU?CK(<|SiAK7#~J)bmGYw7uqV*6%eRjZ>N@l3)W{FdV@=~36iZ=Hwur5CKSqdXe+ z%jm1a_dWkdaEx<*=9za`gNg5VzBZcI$l8pbx&--Tjo_1KVSP$$`msq(ET2Uy2a~(m z+J{d4tN6ZVc@{oCD$ZxaD`?FnvYlODriS=GyHa0G)Q*y_M7@hM*rvj?HvjCzXTR6U zHM-lP8w~qPsE~fM8 zSG6yr??JbIk-kT6Gg!VL!@MLu5$(&)m%zP}T^G`^qZ*^XB@6kb9?wU0)&KoQ z6B?`EH;UTRb0OP1YacF;CevX|%ha&=FTVQ*MzttBF9_pcdb~Ad{V>c&^C4{g8m`sp z6z}3MdwH9#plg_#F%FJv>DkYIbA9Z7^Ze)4gzXnsYZT#|s!$iAtzEyEj*j>T^QUnm z8_?f8jURi`J6AkM+ZLc7YMV&MTKq6T+gf~~@8ui9p`ND+o8?$J#{N=rXY4J<*ZDfkW6O`pI7<#Kb*&$I{VF^gk2;9W&)Sb*$7gb8Pd?d>En!Zg z&!kOkd*U&U20M~SzFY{0aVtAm9N`HywzR%lETH2vvM%IjdgH9IdIp_s&(}@JAIYD0 znYV0&C(LV}g3Gl~E7P@%RS${xcQ7|)`{B;JitAx`Zsx+DUNU<9xL9o5W;eGMl42U;7y6z10@uM!62!3G!nin>(PrO)l|S_??Vm z!Gbcl0xv*79_F`U4wtTpw*@;3Hm_-aGrl$0@lP?FMcxW{XV(0VfY=$Y!?TP~ z(<6PdcpA^2j>Rh92A-Ht{FX%0!CdYza+}eyB_8|iTE`v9=?mvn*X|+bc=`|07V7sS zeDQUy`i+R_Rr?-dcr46fSg2ja$X%;7{PJUTa~rE~#^OK841H<}{je2hv+ehWRdd2H z7g4jqJ%4@#jE(x=&g3}{wJ|@N&eLGKP)x5T=Uu#^Z#*jo!S}|SY%v*|kRh(oyL8-x zcOc$N>Q?>FQi#@CorOy=W_oIfajP2qf}W_}6VGW$#A&lm82i}x!u#)2wlVXoW3b@-#B&hXs4Y)Rhj_BX1t z6Y1l-@GLmm8{Ysr@8!cQ(8|yFF14>qqxS~L_>#>Rka-?iC!pEV{vJBzU_6q&W62!L z=loEv>5Sx6-VVk)VcrCeTU^Ii%G3ODJZs=l%cGCjGo4TL%V;Ry}`tnqN+(uMfHIXR}^>D=vi`$~T73x>m@$0O{IcQpC5%1rjzkl%o=om6{+rhrg=gQ{E8-Svaln2RtZGO}SG==pZ_ACEYwl6= z$H?s`Zm^cqV1zT-jUVJdxyCBLg!f-E_x9zFa14d#6j-f=rufSD4b9@b%C(5U*>6WwA_e%TWJ&?S` z_`}?79^0QnYa9*lx`gkX!gK04ya5qx@#JUtElwb~!3ts2wbrS~56=h{E4twAgniQ7ATtG4AZ$7}NAU^?Y!x-aa; z#n@VDu5TwX?CL}@gwOq6Vty7Zp}v_L7Qr^ZL6)x+tJB~L?H~N=KM)`n`s{b`|sxL2g;q-$hi%k zIWWwGX|8Ptat5OLqt03i{hlx7%k{~B%f~5nevM{1zVl#Oll`Olejn%N<;CatI^*e2 zUmMr2&~Dtyk0mRDlGS$V>FWuWzvXT==H5=_{N2-n^b4-X>puRPTJ4 zM}~Ff@LVXrf#21Q@Q!K0w&EuKOSC=CmQCn-LA+=44L?`U*ghuz34HQ9IzTM?k@GMg zw5FpAoKy5|{W`4K#_kh~i^%K^_vhp-V!xPF->5F9FC)LZoVs4y)3CpY_eJ#jZ8VzP zrfgdbH(Ak}Hnkv{E5@(N6Ez^0YsH25tWSn}nBc#BJLAzEFw3jxQGV)3#`C+XOR$}S zcM$v`4~D?k5=?--U7$viXU5FD4w!M|X=nJrD&;NC9nOnxu2 z`|;H%7_NosAoOjV!ALxju+(BjLN* zoUappu?+VmVLdxNi|$ck6UJNhKlz!A2gyF$b-x=H?g5ML+U0+?0~{^nkp7de&JHX3t06@gvv$GzK3VRUlCArjPoRC1%vtzW=ezFo zPBb5K9}@B)+;2n&@N`1-w}{En4eZt7stbK zELwf9m`BIf?7Rc+I(YQwqQACrF#K1}sb4W$lco04@OAYqgLm%2yFB^hd_O|JGp`DB z>%2K%1zDTIbZ%p{_7Yz%R#!P+3C~3F+Xm15j`hu=6`e1!W4Rn}LGMUY3;Gh@=H&LVe9zx=-Pji9G)b(@ zdmZ`F&c77r;e0Yb@YCx!+s^qZ^j?iG)TqZAs{a+JzB3CxQ-`X2xmQ2kpRIGqYemi? zbZkt;BaGFtu_W&zZ$|L_6u8yLXdjz#A=K;82cl*0UXK1&w6AElmJr{kZ5ueAuT|@z z;kjrukpK0k>@V2PrsroiPLl_GRlbDo12P`7^))9sMC`1?Ci+gO%i-Q7e-|D5!yKV{ z1|~P7X?$_Ny48-H*5n<8HstRO?0Fo9*1q4q8jkzm{{;UD!RO-Dm#y2v+s1jQQ>(ka z1D?eV)%V$X&aSVtbtLx;GWS5=8kRw5^_6I-ym+foU1Dz&HmU#7Lv_|e-CIA7?iTIS z>5u3>7S6eJ>m&I+o`KA@KZ{H?w;b>K=4$mlV$hYm8{zo_hA_^V=Y@HEygs{*aP2a3 zz7?A8g6+u0vv-^Mo3^#OdO=0|%wUz=Pp32P_C8OHOq$c1A`Ou7d z*qzsr+n0`a<$52_97DeULMDF~YisATbO;$EU4I_-3&O<1n_0^o~ zC-Fjgz8J6jpX7JAfCL~p*tc~=@PES2Vd!%*y4T3A zdC!8J(Q>qBqkQGp_O9KlttGl}51>y)@+!=!vbW^Am=t%i_Y>DcZTH?sdb75!Q5}T*M(g2%bqcu15 iQ=F@hNe4QcibXegYOtR~o-reyF23?LTSqS3-}rxjk{^cv literal 0 HcmV?d00001 diff --git a/rvc/configs/config.py b/rvc/configs/config.py new file mode 100644 index 00000000..e6490936 --- /dev/null +++ b/rvc/configs/config.py @@ -0,0 +1,179 @@ +import torch +import json +import os + + +version_config_paths = [ + os.path.join("v1", "32000.json"), + os.path.join("v1", "40000.json"), + os.path.join("v1", "48000.json"), + os.path.join("v2", "48000.json"), + os.path.join("v2", "40000.json"), + os.path.join("v2", "32000.json"), +] + + +def singleton(cls): + instances = {} + + def get_instance(*args, **kwargs): + if cls not in instances: + instances[cls] = cls(*args, **kwargs) + return instances[cls] + + return get_instance + + +@singleton +class Config: + def __init__(self): + self.device = "cuda:0" if torch.cuda.is_available() else "cpu" + self.is_half = self.device != "cpu" + self.gpu_name = ( + torch.cuda.get_device_name(int(self.device.split(":")[-1])) + if self.device.startswith("cuda") + else None + ) + self.json_config = self.load_config_json() + self.gpu_mem = None + self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() + + def load_config_json(self) -> dict: + configs = {} + for config_file in version_config_paths: + config_path = os.path.join("rvc", "configs", config_file) + with open(config_path, "r") as f: + configs[config_file] = json.load(f) + return configs + + def has_mps(self) -> bool: + # Check if Metal Performance Shaders are available - for macOS 12.3+. + return torch.backends.mps.is_available() + + def has_xpu(self) -> bool: + # Check if XPU is available. + return hasattr(torch, "xpu") and torch.xpu.is_available() + + def set_precision(self, precision): + if precision not in ["fp32", "fp16"]: + raise ValueError("Invalid precision type. Must be 'fp32' or 'fp16'.") + + fp16_run_value = precision == "fp16" + preprocess_target_version = "3.7" if precision == "fp16" else "3.0" + preprocess_path = os.path.join( + os.path.dirname(__file__), + os.pardir, + "rvc", + "train", + "preprocess", + "preprocess.py", + ) + + for config_path in version_config_paths: + full_config_path = os.path.join("rvc", "configs", config_path) + try: + with open(full_config_path, "r") as f: + config = json.load(f) + config["train"]["fp16_run"] = fp16_run_value + with open(full_config_path, "w") as f: + json.dump(config, f, indent=4) + except FileNotFoundError: + print(f"File not found: {full_config_path}") + + if os.path.exists(preprocess_path): + with open(preprocess_path, "r") as f: + preprocess_content = f.read() + preprocess_content = preprocess_content.replace( + "3.0" if precision == "fp16" else "3.7", preprocess_target_version + ) + with open(preprocess_path, "w") as f: + f.write(preprocess_content) + + return f"Overwritten preprocess and config.json to use {precision}." + + def get_precision(self): + if not version_config_paths: + raise FileNotFoundError("No configuration paths provided.") + + full_config_path = os.path.join("rvc", "configs", version_config_paths[0]) + try: + with open(full_config_path, "r") as f: + config = json.load(f) + fp16_run_value = config["train"].get("fp16_run", False) + precision = "fp16" if fp16_run_value else "fp32" + return precision + except FileNotFoundError: + print(f"File not found: {full_config_path}") + return None + + def device_config(self) -> tuple: + if self.device.startswith("cuda"): + self.set_cuda_config() + elif self.has_mps(): + self.device = "mps" + self.is_half = False + self.set_precision("fp32") + else: + self.device = "cpu" + self.is_half = False + self.set_precision("fp32") + + # Configuration for 6GB GPU memory + x_pad, x_query, x_center, x_max = ( + (3, 10, 60, 65) if self.is_half else (1, 6, 38, 41) + ) + if self.gpu_mem is not None and self.gpu_mem <= 4: + # Configuration for 5GB GPU memory + x_pad, x_query, x_center, x_max = (1, 5, 30, 32) + + return x_pad, x_query, x_center, x_max + + def set_cuda_config(self): + i_device = int(self.device.split(":")[-1]) + self.gpu_name = torch.cuda.get_device_name(i_device) + low_end_gpus = ["16", "P40", "P10", "1060", "1070", "1080"] + if ( + any(gpu in self.gpu_name for gpu in low_end_gpus) + and "V100" not in self.gpu_name.upper() + ): + self.is_half = False + self.set_precision("fp32") + + self.gpu_mem = torch.cuda.get_device_properties(i_device).total_memory // ( + 1024**3 + ) + + +def max_vram_gpu(gpu): + if torch.cuda.is_available(): + gpu_properties = torch.cuda.get_device_properties(gpu) + total_memory_gb = round(gpu_properties.total_memory / 1024 / 1024 / 1024) + return total_memory_gb + else: + return "8" + + +def get_gpu_info(): + ngpu = torch.cuda.device_count() + gpu_infos = [] + if torch.cuda.is_available() or ngpu != 0: + for i in range(ngpu): + gpu_name = torch.cuda.get_device_name(i) + mem = int( + torch.cuda.get_device_properties(i).total_memory / 1024 / 1024 / 1024 + + 0.4 + ) + gpu_infos.append(f"{i}: {gpu_name} ({mem} GB)") + if len(gpu_infos) > 0: + gpu_info = "\n".join(gpu_infos) + else: + gpu_info = "Unfortunately, there is no compatible GPU available to support your training." + return gpu_info + + +def get_number_of_gpus(): + if torch.cuda.is_available(): + num_gpus = torch.cuda.device_count() + return "-".join(map(str, range(num_gpus))) + else: + return "-" diff --git a/rvc/configs/v1/32000.json b/rvc/configs/v1/32000.json new file mode 100644 index 00000000..2f28f4f6 --- /dev/null +++ b/rvc/configs/v1/32000.json @@ -0,0 +1,47 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 12800, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sample_rate": 32000, + "filter_length": 1024, + "hop_length": 320, + "win_length": 1024, + "n_mel_channels": 80, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "text_enc_hidden_dim": 256, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [10,4,2,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16,16,4,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/rvc/configs/v1/40000.json b/rvc/configs/v1/40000.json new file mode 100644 index 00000000..3961ddb6 --- /dev/null +++ b/rvc/configs/v1/40000.json @@ -0,0 +1,47 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 12800, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sample_rate": 40000, + "filter_length": 2048, + "hop_length": 400, + "win_length": 2048, + "n_mel_channels": 125, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "text_enc_hidden_dim": 256, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [10,10,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16,16,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/rvc/configs/v1/48000.json b/rvc/configs/v1/48000.json new file mode 100644 index 00000000..41ea3b62 --- /dev/null +++ b/rvc/configs/v1/48000.json @@ -0,0 +1,47 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "epochs": 20000, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "batch_size": 4, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 11520, + "init_lr_ratio": 1, + "warmup_epochs": 0, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sample_rate": 48000, + "filter_length": 2048, + "hop_length": 480, + "win_length": 2048, + "n_mel_channels": 128, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "text_enc_hidden_dim": 256, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [10,6,2,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16,16,4,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/rvc/configs/v2/32000.json b/rvc/configs/v2/32000.json new file mode 100644 index 00000000..eabab7b5 --- /dev/null +++ b/rvc/configs/v2/32000.json @@ -0,0 +1,43 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 12800, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sample_rate": 32000, + "filter_length": 1024, + "hop_length": 320, + "win_length": 1024, + "n_mel_channels": 80, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "text_enc_hidden_dim": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [10,8,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [20,16,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/rvc/configs/v2/40000.json b/rvc/configs/v2/40000.json new file mode 100644 index 00000000..e1ba44a9 --- /dev/null +++ b/rvc/configs/v2/40000.json @@ -0,0 +1,43 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 12800, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sample_rate": 40000, + "filter_length": 2048, + "hop_length": 400, + "win_length": 2048, + "n_mel_channels": 125, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "text_enc_hidden_dim": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [10,10,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [16,16,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/rvc/configs/v2/48000.json b/rvc/configs/v2/48000.json new file mode 100644 index 00000000..1a4da9f5 --- /dev/null +++ b/rvc/configs/v2/48000.json @@ -0,0 +1,43 @@ +{ + "train": { + "log_interval": 200, + "seed": 1234, + "learning_rate": 1e-4, + "betas": [0.8, 0.99], + "eps": 1e-9, + "fp16_run": true, + "lr_decay": 0.999875, + "segment_size": 17280, + "c_mel": 45, + "c_kl": 1.0 + }, + "data": { + "max_wav_value": 32768.0, + "sample_rate": 48000, + "filter_length": 2048, + "hop_length": 480, + "win_length": 2048, + "n_mel_channels": 128, + "mel_fmin": 0.0, + "mel_fmax": null + }, + "model": { + "inter_channels": 192, + "hidden_channels": 192, + "filter_channels": 768, + "text_enc_hidden_dim": 768, + "n_heads": 2, + "n_layers": 6, + "kernel_size": 3, + "p_dropout": 0, + "resblock": "1", + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + "upsample_rates": [12,10,2,2], + "upsample_initial_channel": 512, + "upsample_kernel_sizes": [24,20,4,4], + "use_spectral_norm": false, + "gin_channels": 256, + "spk_embed_dim": 109 + } +} diff --git a/rvc/infer/infer.py b/rvc/infer/infer.py new file mode 100644 index 00000000..ae78283d --- /dev/null +++ b/rvc/infer/infer.py @@ -0,0 +1,495 @@ +import os +import sys +import time +import torch +import librosa +import logging +import traceback +import numpy as np +import soundfile as sf +import noisereduce as nr +from pedalboard import ( + Pedalboard, + Chorus, + Distortion, + Reverb, + PitchShift, + Limiter, + Gain, + Bitcrush, + Clipping, + Compressor, + Delay, +) + +now_dir = os.getcwd() +sys.path.append(now_dir) + +from rvc.infer.pipeline import Pipeline as VC +from rvc.lib.utils import load_audio_infer, load_embedding +from rvc.lib.tools.split_audio import process_audio, merge_audio +from rvc.lib.algorithm.synthesizers import Synthesizer +from rvc.configs.config import Config + +logging.getLogger("httpx").setLevel(logging.WARNING) +logging.getLogger("httpcore").setLevel(logging.WARNING) +logging.getLogger("faiss").setLevel(logging.WARNING) +logging.getLogger("faiss.loader").setLevel(logging.WARNING) + + +class VoiceConverter: + """ + A class for performing voice conversion using the Retrieval-Based Voice Conversion (RVC) method. + """ + + def __init__(self): + """ + Initializes the VoiceConverter with default configuration, and sets up models and parameters. + """ + self.config = Config() # Load RVC configuration + self.hubert_model = ( + None # Initialize the Hubert model (for embedding extraction) + ) + self.last_embedder_model = None # Last used embedder model + self.tgt_sr = None # Target sampling rate for the output audio + self.net_g = None # Generator network for voice conversion + self.vc = None # Voice conversion pipeline instance + self.cpt = None # Checkpoint for loading model weights + self.version = None # Model version + self.n_spk = None # Number of speakers in the model + self.use_f0 = None # Whether the model uses F0 + self.loaded_model = None + + def load_hubert(self, embedder_model: str, embedder_model_custom: str = None): + """ + Loads the HuBERT model for speaker embedding extraction. + + Args: + embedder_model (str): Path to the pre-trained HuBERT model. + embedder_model_custom (str): Path to the custom HuBERT model. + """ + self.hubert_model = load_embedding(embedder_model, embedder_model_custom) + self.hubert_model.to(self.config.device) + self.hubert_model = ( + self.hubert_model.half() + if self.config.is_half + else self.hubert_model.float() + ) + self.hubert_model.eval() + + @staticmethod + def remove_audio_noise(data, sr, reduction_strength=0.7): + """ + Removes noise from an audio file using the NoiseReduce library. + + Args: + data (numpy.ndarray): The audio data as a NumPy array. + sr (int): The sample rate of the audio data. + reduction_strength (float): Strength of the noise reduction. Default is 0.7. + """ + try: + reduced_noise = nr.reduce_noise( + y=data, sr=sr, prop_decrease=reduction_strength + ) + return reduced_noise + except Exception as error: + print(f"An error occurred removing audio noise: {error}") + return None + + @staticmethod + def convert_audio_format(input_path, output_path, output_format): + """ + Converts an audio file to a specified output format. + + Args: + input_path (str): Path to the input audio file. + output_path (str): Path to the output audio file. + output_format (str): Desired audio format (e.g., "WAV", "MP3"). + """ + try: + if output_format != "WAV": + print(f"Saving audio as {output_format}...") + audio, sample_rate = librosa.load(input_path, sr=None) + common_sample_rates = [ + 8000, + 11025, + 12000, + 16000, + 22050, + 24000, + 32000, + 44100, + 48000, + ] + target_sr = min(common_sample_rates, key=lambda x: abs(x - sample_rate)) + audio = librosa.resample( + audio, orig_sr=sample_rate, target_sr=target_sr + ) + sf.write(output_path, audio, target_sr, format=output_format.lower()) + return output_path + except Exception as error: + print(f"An error occurred converting the audio format: {error}") + + @staticmethod + def post_process_audio( + audio_input, + sample_rate, + **kwargs, + ): + board = Pedalboard() + if kwargs.get("reverb", False): + reverb = Reverb( + room_size=kwargs.get("reverb_room_size", 0.5), + damping=kwargs.get("reverb_damping", 0.5), + wet_level=kwargs.get("reverb_wet_level", 0.33), + dry_level=kwargs.get("reverb_dry_level", 0.4), + width=kwargs.get("reverb_width", 1.0), + freeze_mode=kwargs.get("reverb_freeze_mode", 0), + ) + board.append(reverb) + if kwargs.get("pitch_shift", False): + pitch_shift = PitchShift(semitones=kwargs.get("pitch_shift_semitones", 0)) + board.append(pitch_shift) + if kwargs.get("limiter", False): + limiter = Limiter( + threshold_db=kwargs.get("limiter_threshold", -6), + release_ms=kwargs.get("limiter_release", 0.05), + ) + board.append(limiter) + if kwargs.get("gain", False): + gain = Gain(gain_db=kwargs.get("gain_db", 0)) + board.append(gain) + if kwargs.get("distortion", False): + distortion = Distortion(drive_db=kwargs.get("distortion_gain", 25)) + board.append(distortion) + if kwargs.get("chorus", False): + chorus = Chorus( + rate_hz=kwargs.get("chorus_rate", 1.0), + depth=kwargs.get("chorus_depth", 0.25), + centre_delay_ms=kwargs.get("chorus_delay", 7), + feedback=kwargs.get("chorus_feedback", 0.0), + mix=kwargs.get("chorus_mix", 0.5), + ) + board.append(chorus) + if kwargs.get("bitcrush", False): + bitcrush = Bitcrush(bit_depth=kwargs.get("bitcrush_bit_depth", 8)) + board.append(bitcrush) + if kwargs.get("clipping", False): + clipping = Clipping(threshold_db=kwargs.get("clipping_threshold", 0)) + board.append(clipping) + if kwargs.get("compressor", False): + compressor = Compressor( + threshold_db=kwargs.get("compressor_threshold", 0), + ratio=kwargs.get("compressor_ratio", 1), + attack_ms=kwargs.get("compressor_attack", 1.0), + release_ms=kwargs.get("compressor_release", 100), + ) + board.append(compressor) + if kwargs.get("delay", False): + delay = Delay( + delay_seconds=kwargs.get("delay_seconds", 0.5), + feedback=kwargs.get("delay_feedback", 0.0), + mix=kwargs.get("delay_mix", 0.5), + ) + board.append(delay) + return board(audio_input, sample_rate) + + def convert_audio( + self, + audio_input_path: str, + audio_output_path: str, + model_path: str, + index_path: str, + pitch: int = 0, + f0_file: str = None, + f0_method: str = "rmvpe", + index_rate: float = 0.75, + volume_envelope: float = 1, + protect: float = 0.5, + hop_length: int = 128, + split_audio: bool = False, + f0_autotune: bool = False, + f0_autotune_strength: float = 1, + filter_radius: int = 3, + embedder_model: str = "contentvec", + embedder_model_custom: str = None, + clean_audio: bool = False, + clean_strength: float = 0.5, + export_format: str = "WAV", + upscale_audio: bool = False, + post_process: bool = False, + resample_sr: int = 0, + sid: int = 0, + **kwargs, + ): + """ + Performs voice conversion on the input audio. + + Args: + pitch (int): Key for F0 up-sampling. + filter_radius (int): Radius for filtering. + index_rate (float): Rate for index matching. + volume_envelope (int): RMS mix rate. + protect (float): Protection rate for certain audio segments. + hop_length (int): Hop length for audio processing. + f0_method (str): Method for F0 extraction. + audio_input_path (str): Path to the input audio file. + audio_output_path (str): Path to the output audio file. + model_path (str): Path to the voice conversion model. + index_path (str): Path to the index file. + split_audio (bool): Whether to split the audio for processing. + f0_autotune (bool): Whether to use F0 autotune. + clean_audio (bool): Whether to clean the audio. + clean_strength (float): Strength of the audio cleaning. + export_format (str): Format for exporting the audio. + upscale_audio (bool): Whether to upscale the audio. + f0_file (str): Path to the F0 file. + embedder_model (str): Path to the embedder model. + embedder_model_custom (str): Path to the custom embedder model. + resample_sr (int, optional): Resample sampling rate. Default is 0. + sid (int, optional): Speaker ID. Default is 0. + **kwargs: Additional keyword arguments. + """ + self.get_vc(model_path, sid) + try: + start_time = time.time() + print(f"Converting audio '{audio_input_path}'...") + + audio = load_audio_infer( + audio_input_path, + 16000, + **kwargs, + ) + audio_max = np.abs(audio).max() / 0.95 + + if audio_max > 1: + audio /= audio_max + + if not self.hubert_model or embedder_model != self.last_embedder_model: + self.load_hubert(embedder_model, embedder_model_custom) + self.last_embedder_model = embedder_model + + file_index = ( + index_path.strip() + .strip('"') + .strip("\n") + .strip('"') + .strip() + .replace("trained", "added") + ) + + if self.tgt_sr != resample_sr >= 16000: + self.tgt_sr = resample_sr + + if split_audio: + chunks, intervals = process_audio(audio, 16000) + print(f"Audio split into {len(chunks)} chunks for processing.") + else: + chunks = [] + chunks.append(audio) + + converted_chunks = [] + for c in chunks: + audio_opt = self.vc.pipeline( + model=self.hubert_model, + net_g=self.net_g, + sid=sid, + audio=c, + pitch=pitch, + f0_method=f0_method, + file_index=file_index, + index_rate=index_rate, + pitch_guidance=self.use_f0, + filter_radius=filter_radius, + volume_envelope=volume_envelope, + version=self.version, + protect=protect, + hop_length=hop_length, + f0_autotune=f0_autotune, + f0_autotune_strength=f0_autotune_strength, + f0_file=f0_file, + ) + converted_chunks.append(audio_opt) + if split_audio: + print(f"Converted audio chunk {len(converted_chunks)}") + + if split_audio: + audio_opt = merge_audio(converted_chunks, intervals, 16000, self.tgt_sr) + else: + audio_opt = converted_chunks[0] + + if clean_audio: + cleaned_audio = self.remove_audio_noise( + audio_opt, self.tgt_sr, clean_strength + ) + if cleaned_audio is not None: + audio_opt = cleaned_audio + + if post_process: + audio_opt = self.post_process_audio( + audio_input=audio_opt, + sample_rate=self.tgt_sr, + **kwargs, + ) + + sf.write(audio_output_path, audio_opt, self.tgt_sr, format="WAV") + output_path_format = audio_output_path.replace( + ".wav", f".{export_format.lower()}" + ) + audio_output_path = self.convert_audio_format( + audio_output_path, output_path_format, export_format + ) + + elapsed_time = time.time() - start_time + print( + f"Conversion completed at '{audio_output_path}' in {elapsed_time:.2f} seconds." + ) + except Exception as error: + print(f"An error occurred during audio conversion: {error}") + print(traceback.format_exc()) + + def convert_audio_batch( + self, + audio_input_paths: str, + audio_output_path: str, + **kwargs, + ): + """ + Performs voice conversion on a batch of input audio files. + + Args: + audio_input_paths (str): List of paths to the input audio files. + audio_output_path (str): Path to the output audio file. + resample_sr (int, optional): Resample sampling rate. Default is 0. + sid (int, optional): Speaker ID. Default is 0. + **kwargs: Additional keyword arguments. + """ + pid = os.getpid() + try: + with open( + os.path.join(now_dir, "assets", "infer_pid.txt"), "w" + ) as pid_file: + pid_file.write(str(pid)) + start_time = time.time() + print(f"Converting audio batch '{audio_input_paths}'...") + audio_files = [ + f + for f in os.listdir(audio_input_paths) + if f.endswith( + ( + "wav", + "mp3", + "flac", + "ogg", + "opus", + "m4a", + "mp4", + "aac", + "alac", + "wma", + "aiff", + "webm", + "ac3", + ) + ) + ] + print(f"Detected {len(audio_files)} audio files for inference.") + for a in audio_files: + new_input = os.path.join(audio_input_paths, a) + new_output = os.path.splitext(a)[0] + "_output.wav" + new_output = os.path.join(audio_output_path, new_output) + if os.path.exists(new_output): + continue + self.convert_audio( + audio_input_path=new_input, + audio_output_path=new_output, + **kwargs, + ) + print(f"Conversion completed at '{audio_input_paths}'.") + elapsed_time = time.time() - start_time + print(f"Batch conversion completed in {elapsed_time:.2f} seconds.") + except Exception as error: + print(f"An error occurred during audio batch conversion: {error}") + print(traceback.format_exc()) + finally: + os.remove(os.path.join(now_dir, "assets", "infer_pid.txt")) + + def get_vc(self, weight_root, sid): + """ + Loads the voice conversion model and sets up the pipeline. + + Args: + weight_root (str): Path to the model weights. + sid (int): Speaker ID. + """ + if sid == "" or sid == []: + self.cleanup_model() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + if not self.loaded_model or self.loaded_model != weight_root: + self.load_model(weight_root) + if self.cpt is not None: + self.setup_network() + self.setup_vc_instance() + self.loaded_model = weight_root + + def cleanup_model(self): + """ + Cleans up the model and releases resources. + """ + if self.hubert_model is not None: + del self.net_g, self.n_spk, self.vc, self.hubert_model, self.tgt_sr + self.hubert_model = self.net_g = self.n_spk = self.vc = self.tgt_sr = None + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + del self.net_g, self.cpt + if torch.cuda.is_available(): + torch.cuda.empty_cache() + self.cpt = None + + def load_model(self, weight_root): + """ + Loads the model weights from the specified path. + + Args: + weight_root (str): Path to the model weights. + """ + self.cpt = ( + torch.load(weight_root, map_location="cpu") + if os.path.isfile(weight_root) + else None + ) + + def setup_network(self): + """ + Sets up the network configuration based on the loaded checkpoint. + """ + if self.cpt is not None: + self.tgt_sr = self.cpt["config"][-1] + self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] + self.use_f0 = self.cpt.get("f0", 1) + + self.version = self.cpt.get("version", "v1") + self.text_enc_hidden_dim = 768 if self.version == "v2" else 256 + self.net_g = Synthesizer( + *self.cpt["config"], + use_f0=self.use_f0, + text_enc_hidden_dim=self.text_enc_hidden_dim, + is_half=self.config.is_half, + ) + del self.net_g.enc_q + self.net_g.load_state_dict(self.cpt["weight"], strict=False) + self.net_g.eval().to(self.config.device) + self.net_g = ( + self.net_g.half() if self.config.is_half else self.net_g.float() + ) + + def setup_vc_instance(self): + """ + Sets up the voice conversion pipeline instance based on the target sampling rate and configuration. + """ + if self.cpt is not None: + self.vc = VC(self.tgt_sr, self.config) + self.n_spk = self.cpt["config"][-3] diff --git a/rvc/infer/pipeline.py b/rvc/infer/pipeline.py new file mode 100644 index 00000000..6f9e554e --- /dev/null +++ b/rvc/infer/pipeline.py @@ -0,0 +1,708 @@ +import os +import gc +import re +import sys +import torch +import torch.nn.functional as F +import torchcrepe +import faiss +import librosa +import numpy as np +from scipy import signal +from torch import Tensor + +now_dir = os.getcwd() +sys.path.append(now_dir) + +from rvc.lib.predictors.RMVPE import RMVPE0Predictor +from rvc.lib.predictors.FCPE import FCPEF0Predictor + +import logging + +logging.getLogger("faiss").setLevel(logging.WARNING) + +# Constants for high-pass filter +FILTER_ORDER = 5 +CUTOFF_FREQUENCY = 48 # Hz +SAMPLE_RATE = 16000 # Hz +bh, ah = signal.butter( + N=FILTER_ORDER, Wn=CUTOFF_FREQUENCY, btype="high", fs=SAMPLE_RATE +) + +input_audio_path2wav = {} + + +class AudioProcessor: + """ + A class for processing audio signals, specifically for adjusting RMS levels. + """ + + def change_rms( + source_audio: np.ndarray, + source_rate: int, + target_audio: np.ndarray, + target_rate: int, + rate: float, + ) -> np.ndarray: + """ + Adjust the RMS level of target_audio to match the RMS of source_audio, with a given blending rate. + + Args: + source_audio: The source audio signal as a NumPy array. + source_rate: The sampling rate of the source audio. + target_audio: The target audio signal to adjust. + target_rate: The sampling rate of the target audio. + rate: The blending rate between the source and target RMS levels. + """ + # Calculate RMS of both audio data + rms1 = librosa.feature.rms( + y=source_audio, + frame_length=source_rate // 2 * 2, + hop_length=source_rate // 2, + ) + rms2 = librosa.feature.rms( + y=target_audio, + frame_length=target_rate // 2 * 2, + hop_length=target_rate // 2, + ) + + # Interpolate RMS to match target audio length + rms1 = F.interpolate( + torch.from_numpy(rms1).float().unsqueeze(0), + size=target_audio.shape[0], + mode="linear", + ).squeeze() + rms2 = F.interpolate( + torch.from_numpy(rms2).float().unsqueeze(0), + size=target_audio.shape[0], + mode="linear", + ).squeeze() + rms2 = torch.maximum(rms2, torch.zeros_like(rms2) + 1e-6) + + # Adjust target audio RMS based on the source audio RMS + adjusted_audio = ( + target_audio + * (torch.pow(rms1, 1 - rate) * torch.pow(rms2, rate - 1)).numpy() + ) + return adjusted_audio + + +class Autotune: + """ + A class for applying autotune to a given fundamental frequency (F0) contour. + """ + + def __init__(self, ref_freqs): + """ + Initializes the Autotune class with a set of reference frequencies. + + Args: + ref_freqs: A list of reference frequencies representing musical notes. + """ + self.ref_freqs = ref_freqs + self.note_dict = self.ref_freqs # No interpolation needed + + def autotune_f0(self, f0, f0_autotune_strength): + """ + Autotunes a given F0 contour by snapping each frequency to the closest reference frequency. + + Args: + f0: The input F0 contour as a NumPy array. + """ + autotuned_f0 = np.zeros_like(f0) + for i, freq in enumerate(f0): + closest_note = min(self.note_dict, key=lambda x: abs(x - freq)) + autotuned_f0[i] = freq + (closest_note - freq) * f0_autotune_strength + return autotuned_f0 + + +class Pipeline: + """ + The main pipeline class for performing voice conversion, including preprocessing, F0 estimation, + voice conversion using a model, and post-processing. + """ + + def __init__(self, tgt_sr, config): + """ + Initializes the Pipeline class with target sampling rate and configuration parameters. + + Args: + tgt_sr: The target sampling rate for the output audio. + config: A configuration object containing various parameters for the pipeline. + """ + self.x_pad = config.x_pad + self.x_query = config.x_query + self.x_center = config.x_center + self.x_max = config.x_max + self.is_half = config.is_half + self.sample_rate = 16000 + self.window = 160 + self.t_pad = self.sample_rate * self.x_pad + self.t_pad_tgt = tgt_sr * self.x_pad + self.t_pad2 = self.t_pad * 2 + self.t_query = self.sample_rate * self.x_query + self.t_center = self.sample_rate * self.x_center + self.t_max = self.sample_rate * self.x_max + self.time_step = self.window / self.sample_rate * 1000 + self.f0_min = 50 + self.f0_max = 1100 + self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) + self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) + self.device = config.device + self.ref_freqs = [ + 49.00, # G1 + 51.91, # G#1 / Ab1 + 55.00, # A1 + 58.27, # A#1 / Bb1 + 61.74, # B1 + 65.41, # C2 + 69.30, # C#2 / Db2 + 73.42, # D2 + 77.78, # D#2 / Eb2 + 82.41, # E2 + 87.31, # F2 + 92.50, # F#2 / Gb2 + 98.00, # G2 + 103.83, # G#2 / Ab2 + 110.00, # A2 + 116.54, # A#2 / Bb2 + 123.47, # B2 + 130.81, # C3 + 138.59, # C#3 / Db3 + 146.83, # D3 + 155.56, # D#3 / Eb3 + 164.81, # E3 + 174.61, # F3 + 185.00, # F#3 / Gb3 + 196.00, # G3 + 207.65, # G#3 / Ab3 + 220.00, # A3 + 233.08, # A#3 / Bb3 + 246.94, # B3 + 261.63, # C4 + 277.18, # C#4 / Db4 + 293.66, # D4 + 311.13, # D#4 / Eb4 + 329.63, # E4 + 349.23, # F4 + 369.99, # F#4 / Gb4 + 392.00, # G4 + 415.30, # G#4 / Ab4 + 440.00, # A4 + 466.16, # A#4 / Bb4 + 493.88, # B4 + 523.25, # C5 + 554.37, # C#5 / Db5 + 587.33, # D5 + 622.25, # D#5 / Eb5 + 659.25, # E5 + 698.46, # F5 + 739.99, # F#5 / Gb5 + 783.99, # G5 + 830.61, # G#5 / Ab5 + 880.00, # A5 + 932.33, # A#5 / Bb5 + 987.77, # B5 + 1046.50, # C6 + ] + self.autotune = Autotune(self.ref_freqs) + self.note_dict = self.autotune.note_dict + self.model_rmvpe = RMVPE0Predictor( + os.path.join("rvc", "models", "predictors", "rmvpe.pt"), + is_half=self.is_half, + device=self.device, + ) + + def get_f0_crepe( + self, + x, + f0_min, + f0_max, + p_len, + hop_length, + model="full", + ): + """ + Estimates the fundamental frequency (F0) of a given audio signal using the Crepe model. + + Args: + x: The input audio signal as a NumPy array. + f0_min: Minimum F0 value to consider. + f0_max: Maximum F0 value to consider. + p_len: Desired length of the F0 output. + hop_length: Hop length for the Crepe model. + model: Crepe model size to use ("full" or "tiny"). + """ + x = x.astype(np.float32) + x /= np.quantile(np.abs(x), 0.999) + audio = torch.from_numpy(x).to(self.device, copy=True) + audio = torch.unsqueeze(audio, dim=0) + if audio.ndim == 2 and audio.shape[0] > 1: + audio = torch.mean(audio, dim=0, keepdim=True).detach() + audio = audio.detach() + pitch: Tensor = torchcrepe.predict( + audio, + self.sample_rate, + hop_length, + f0_min, + f0_max, + model, + batch_size=hop_length * 2, + device=self.device, + pad=True, + ) + p_len = p_len or x.shape[0] // hop_length + source = np.array(pitch.squeeze(0).cpu().float().numpy()) + source[source < 0.001] = np.nan + target = np.interp( + np.arange(0, len(source) * p_len, len(source)) / p_len, + np.arange(0, len(source)), + source, + ) + f0 = np.nan_to_num(target) + return f0 + + def get_f0_hybrid( + self, + methods_str, + x, + f0_min, + f0_max, + p_len, + hop_length, + ): + """ + Estimates the fundamental frequency (F0) using a hybrid approach combining multiple methods. + + Args: + methods_str: A string specifying the methods to combine (e.g., "hybrid[crepe+rmvpe]"). + x: The input audio signal as a NumPy array. + f0_min: Minimum F0 value to consider. + f0_max: Maximum F0 value to consider. + p_len: Desired length of the F0 output. + hop_length: Hop length for F0 estimation methods. + """ + methods_str = re.search("hybrid\[(.+)\]", methods_str) + if methods_str: + methods = [method.strip() for method in methods_str.group(1).split("+")] + f0_computation_stack = [] + print(f"Calculating f0 pitch estimations for methods: {', '.join(methods)}") + x = x.astype(np.float32) + x /= np.quantile(np.abs(x), 0.999) + for method in methods: + f0 = None + if method == "crepe": + f0 = self.get_f0_crepe_computation( + x, f0_min, f0_max, p_len, int(hop_length) + ) + elif method == "rmvpe": + f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) + f0 = f0[1:] + elif method == "fcpe": + self.model_fcpe = FCPEF0Predictor( + os.path.join("rvc", "models", "predictors", "fcpe.pt"), + f0_min=int(f0_min), + f0_max=int(f0_max), + dtype=torch.float32, + device=self.device, + sample_rate=self.sample_rate, + threshold=0.03, + ) + f0 = self.model_fcpe.compute_f0(x, p_len=p_len) + del self.model_fcpe + gc.collect() + f0_computation_stack.append(f0) + + f0_computation_stack = [fc for fc in f0_computation_stack if fc is not None] + f0_median_hybrid = None + if len(f0_computation_stack) == 1: + f0_median_hybrid = f0_computation_stack[0] + else: + f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) + return f0_median_hybrid + + def get_f0( + self, + input_audio_path, + x, + p_len, + pitch, + f0_method, + filter_radius, + hop_length, + f0_autotune, + f0_autotune_strength, + inp_f0=None, + ): + """ + Estimates the fundamental frequency (F0) of a given audio signal using various methods. + + Args: + input_audio_path: Path to the input audio file. + x: The input audio signal as a NumPy array. + p_len: Desired length of the F0 output. + pitch: Key to adjust the pitch of the F0 contour. + f0_method: Method to use for F0 estimation (e.g., "crepe"). + filter_radius: Radius for median filtering the F0 contour. + hop_length: Hop length for F0 estimation methods. + f0_autotune: Whether to apply autotune to the F0 contour. + inp_f0: Optional input F0 contour to use instead of estimating. + """ + global input_audio_path2wav + if f0_method == "crepe": + f0 = self.get_f0_crepe(x, self.f0_min, self.f0_max, p_len, int(hop_length)) + elif f0_method == "crepe-tiny": + f0 = self.get_f0_crepe( + x, self.f0_min, self.f0_max, p_len, int(hop_length), "tiny" + ) + elif f0_method == "rmvpe": + f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) + elif f0_method == "fcpe": + self.model_fcpe = FCPEF0Predictor( + os.path.join("rvc", "models", "predictors", "fcpe.pt"), + f0_min=int(self.f0_min), + f0_max=int(self.f0_max), + dtype=torch.float32, + device=self.device, + sample_rate=self.sample_rate, + threshold=0.03, + ) + f0 = self.model_fcpe.compute_f0(x, p_len=p_len) + del self.model_fcpe + gc.collect() + elif "hybrid" in f0_method: + input_audio_path2wav[input_audio_path] = x.astype(np.double) + f0 = self.get_f0_hybrid( + f0_method, + x, + self.f0_min, + self.f0_max, + p_len, + hop_length, + ) + + if f0_autotune is True: + f0 = Autotune.autotune_f0(self, f0, f0_autotune_strength) + + f0 *= pow(2, pitch / 12) + tf0 = self.sample_rate // self.window + if inp_f0 is not None: + delta_t = np.round( + (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 + ).astype("int16") + replace_f0 = np.interp( + list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] + ) + shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] + f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ + :shape + ] + f0bak = f0.copy() + f0_mel = 1127 * np.log(1 + f0 / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * 254 / ( + self.f0_mel_max - self.f0_mel_min + ) + 1 + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > 255] = 255 + f0_coarse = np.rint(f0_mel).astype(int) + + return f0_coarse, f0bak + + def voice_conversion( + self, + model, + net_g, + sid, + audio0, + pitch, + pitchf, + index, + big_npy, + index_rate, + version, + protect, + ): + """ + Performs voice conversion on a given audio segment. + + Args: + model: The feature extractor model. + net_g: The generative model for synthesizing speech. + sid: Speaker ID for the target voice. + audio0: The input audio segment. + pitch: Quantized F0 contour for pitch guidance. + pitchf: Original F0 contour for pitch guidance. + index: FAISS index for speaker embedding retrieval. + big_npy: Speaker embeddings stored in a NumPy array. + index_rate: Blending rate for speaker embedding retrieval. + version: Model version ("v1" or "v2"). + protect: Protection level for preserving the original pitch. + """ + with torch.no_grad(): + pitch_guidance = pitch != None and pitchf != None + # prepare source audio + feats = ( + torch.from_numpy(audio0).half() + if self.is_half + else torch.from_numpy(audio0).float() + ) + feats = feats.mean(-1) if feats.dim() == 2 else feats + assert feats.dim() == 1, feats.dim() + feats = feats.view(1, -1).to(self.device) + # extract features + feats = model(feats)["last_hidden_state"] + feats = ( + model.final_proj(feats[0]).unsqueeze(0) if version == "v1" else feats + ) + # make a copy for pitch guidance and protection + feats0 = feats.clone() if pitch_guidance else None + if ( + index + ): # set by parent function, only true if index is available, loaded, and index rate > 0 + feats = self._retrieve_speaker_embeddings( + feats, index, big_npy, index_rate + ) + # feature upsampling + feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute( + 0, 2, 1 + ) + # adjust the length if the audio is short + p_len = min(audio0.shape[0] // self.window, feats.shape[1]) + if pitch_guidance: + feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( + 0, 2, 1 + ) + pitch, pitchf = pitch[:, :p_len], pitchf[:, :p_len] + # Pitch protection blending + if protect < 0.5: + pitchff = pitchf.clone() + pitchff[pitchf > 0] = 1 + pitchff[pitchf < 1] = protect + feats = feats * pitchff.unsqueeze(-1) + feats0 * ( + 1 - pitchff.unsqueeze(-1) + ) + feats = feats.to(feats0.dtype) + else: + pitch, pitchf = None, None + p_len = torch.tensor([p_len], device=self.device).long() + audio1 = ( + (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) + .data.cpu() + .float() + .numpy() + ) + # clean up + del feats, feats0, p_len + if torch.cuda.is_available(): + torch.cuda.empty_cache() + return audio1 + + def _retrieve_speaker_embeddings(self, feats, index, big_npy, index_rate): + npy = feats[0].cpu().numpy() + npy = npy.astype("float32") if self.is_half else npy + score, ix = index.search(npy, k=8) + weight = np.square(1 / score) + weight /= weight.sum(axis=1, keepdims=True) + npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) + npy = npy.astype("float16") if self.is_half else npy + feats = ( + torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate + + (1 - index_rate) * feats + ) + return feats + + def pipeline( + self, + model, + net_g, + sid, + audio, + pitch, + f0_method, + file_index, + index_rate, + pitch_guidance, + filter_radius, + volume_envelope, + version, + protect, + hop_length, + f0_autotune, + f0_autotune_strength, + f0_file, + ): + """ + The main pipeline function for performing voice conversion. + + Args: + model: The feature extractor model. + net_g: The generative model for synthesizing speech. + sid: Speaker ID for the target voice. + audio: The input audio signal. + input_audio_path: Path to the input audio file. + pitch: Key to adjust the pitch of the F0 contour. + f0_method: Method to use for F0 estimation. + file_index: Path to the FAISS index file for speaker embedding retrieval. + index_rate: Blending rate for speaker embedding retrieval. + pitch_guidance: Whether to use pitch guidance during voice conversion. + filter_radius: Radius for median filtering the F0 contour. + tgt_sr: Target sampling rate for the output audio. + resample_sr: Resampling rate for the output audio. + volume_envelope: Blending rate for adjusting the RMS level of the output audio. + version: Model version. + protect: Protection level for preserving the original pitch. + hop_length: Hop length for F0 estimation methods. + f0_autotune: Whether to apply autotune to the F0 contour. + f0_file: Path to a file containing an F0 contour to use. + """ + if file_index != "" and os.path.exists(file_index) and index_rate > 0: + try: + index = faiss.read_index(file_index) + big_npy = index.reconstruct_n(0, index.ntotal) + except Exception as error: + print(f"An error occurred reading the FAISS index: {error}") + index = big_npy = None + else: + index = big_npy = None + audio = signal.filtfilt(bh, ah, audio) + audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") + opt_ts = [] + if audio_pad.shape[0] > self.t_max: + audio_sum = np.zeros_like(audio) + for i in range(self.window): + audio_sum += audio_pad[i : i - self.window] + for t in range(self.t_center, audio.shape[0], self.t_center): + opt_ts.append( + t + - self.t_query + + np.where( + np.abs(audio_sum[t - self.t_query : t + self.t_query]) + == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() + )[0][0] + ) + s = 0 + audio_opt = [] + t = None + audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") + p_len = audio_pad.shape[0] // self.window + inp_f0 = None + if hasattr(f0_file, "name"): + try: + with open(f0_file.name, "r") as f: + lines = f.read().strip("\n").split("\n") + inp_f0 = [] + for line in lines: + inp_f0.append([float(i) for i in line.split(",")]) + inp_f0 = np.array(inp_f0, dtype="float32") + except Exception as error: + print(f"An error occurred reading the F0 file: {error}") + sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() + if pitch_guidance: + pitch, pitchf = self.get_f0( + "input_audio_path", # questionable purpose of making a key for an array + audio_pad, + p_len, + pitch, + f0_method, + filter_radius, + hop_length, + f0_autotune, + f0_autotune_strength, + inp_f0, + ) + pitch = pitch[:p_len] + pitchf = pitchf[:p_len] + if self.device == "mps": + pitchf = pitchf.astype(np.float32) + pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() + pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() + for t in opt_ts: + t = t // self.window * self.window + if pitch_guidance: + audio_opt.append( + self.voice_conversion( + model, + net_g, + sid, + audio_pad[s : t + self.t_pad2 + self.window], + pitch[:, s // self.window : (t + self.t_pad2) // self.window], + pitchf[:, s // self.window : (t + self.t_pad2) // self.window], + index, + big_npy, + index_rate, + version, + protect, + )[self.t_pad_tgt : -self.t_pad_tgt] + ) + else: + audio_opt.append( + self.voice_conversion( + model, + net_g, + sid, + audio_pad[s : t + self.t_pad2 + self.window], + None, + None, + index, + big_npy, + index_rate, + version, + protect, + )[self.t_pad_tgt : -self.t_pad_tgt] + ) + s = t + if pitch_guidance: + audio_opt.append( + self.voice_conversion( + model, + net_g, + sid, + audio_pad[t:], + pitch[:, t // self.window :] if t is not None else pitch, + pitchf[:, t // self.window :] if t is not None else pitchf, + index, + big_npy, + index_rate, + version, + protect, + )[self.t_pad_tgt : -self.t_pad_tgt] + ) + else: + audio_opt.append( + self.voice_conversion( + model, + net_g, + sid, + audio_pad[t:], + None, + None, + index, + big_npy, + index_rate, + version, + protect, + )[self.t_pad_tgt : -self.t_pad_tgt] + ) + audio_opt = np.concatenate(audio_opt) + if volume_envelope != 1: + audio_opt = AudioProcessor.change_rms( + audio, self.sample_rate, audio_opt, self.sample_rate, volume_envelope + ) + # if resample_sr >= self.sample_rate and tgt_sr != resample_sr: + # audio_opt = librosa.resample( + # audio_opt, orig_sr=tgt_sr, target_sr=resample_sr + # ) + # audio_max = np.abs(audio_opt).max() / 0.99 + # max_int16 = 32768 + # if audio_max > 1: + # max_int16 /= audio_max + # audio_opt = (audio_opt * 32768).astype(np.int16) + audio_max = np.abs(audio_opt).max() / 0.99 + if audio_max > 1: + audio_opt /= audio_max + if pitch_guidance: + del pitch, pitchf + del sid + if torch.cuda.is_available(): + torch.cuda.empty_cache() + return audio_opt diff --git a/rvc/lib/algorithm/__init__.py b/rvc/lib/algorithm/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/rvc/lib/algorithm/attentions.py b/rvc/lib/algorithm/attentions.py new file mode 100644 index 00000000..37367ada --- /dev/null +++ b/rvc/lib/algorithm/attentions.py @@ -0,0 +1,243 @@ +import math +import torch +from rvc.lib.algorithm.commons import convert_pad_shape + + +class MultiHeadAttention(torch.nn.Module): + """ + Multi-head attention module with optional relative positional encoding and proximal bias. + + Args: + channels (int): Number of input channels. + out_channels (int): Number of output channels. + n_heads (int): Number of attention heads. + p_dropout (float, optional): Dropout probability. Defaults to 0.0. + window_size (int, optional): Window size for relative positional encoding. Defaults to None. + heads_share (bool, optional): Whether to share relative positional embeddings across heads. Defaults to True. + block_length (int, optional): Block length for local attention. Defaults to None. + proximal_bias (bool, optional): Whether to use proximal bias in self-attention. Defaults to False. + proximal_init (bool, optional): Whether to initialize the key projection weights the same as query projection weights. Defaults to False. + """ + + def __init__( + self, + channels, + out_channels, + n_heads, + p_dropout=0.0, + window_size=None, + heads_share=True, + block_length=None, + proximal_bias=False, + proximal_init=False, + ): + super().__init__() + assert ( + channels % n_heads == 0 + ), "Channels must be divisible by the number of heads." + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.k_channels = channels // n_heads + self.window_size = window_size + self.block_length = block_length + self.proximal_bias = proximal_bias + + # Define projections + self.conv_q = torch.nn.Conv1d(channels, channels, 1) + self.conv_k = torch.nn.Conv1d(channels, channels, 1) + self.conv_v = torch.nn.Conv1d(channels, channels, 1) + self.conv_o = torch.nn.Conv1d(channels, out_channels, 1) + + self.drop = torch.nn.Dropout(p_dropout) + + # Relative positional encodings + if window_size: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = torch.nn.Parameter( + torch.randn(n_heads_rel, 2 * window_size + 1, self.k_channels) + * rel_stddev + ) + self.emb_rel_v = torch.nn.Parameter( + torch.randn(n_heads_rel, 2 * window_size + 1, self.k_channels) + * rel_stddev + ) + + # Initialize weights + torch.nn.init.xavier_uniform_(self.conv_q.weight) + torch.nn.init.xavier_uniform_(self.conv_k.weight) + torch.nn.init.xavier_uniform_(self.conv_v.weight) + torch.nn.init.xavier_uniform_(self.conv_o.weight) + + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + # Compute query, key, value projections + q, k, v = self.conv_q(x), self.conv_k(c), self.conv_v(c) + + # Compute attention + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + # Final output projection + return self.conv_o(x) + + def attention(self, query, key, value, mask=None): + # Reshape and compute scaled dot-product attention + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + + if self.window_size: + assert t_s == t_t, "Relative attention only supports self-attention." + scores += self._compute_relative_scores(query, t_s) + + if self.proximal_bias: + assert t_s == t_t, "Proximal bias only supports self-attention." + scores += self._attention_bias_proximal(t_s).to(scores.device, scores.dtype) + + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length: + block_mask = ( + torch.ones_like(scores) + .triu(-self.block_length) + .tril(self.block_length) + ) + scores = scores.masked_fill(block_mask == 0, -1e4) + + # Apply softmax and dropout + p_attn = self.drop(torch.nn.functional.softmax(scores, dim=-1)) + + # Compute attention output + output = torch.matmul(p_attn, value) + + if self.window_size: + output += self._apply_relative_values(p_attn, t_s) + + return output.transpose(2, 3).contiguous().view(b, d, t_t), p_attn + + def _compute_relative_scores(self, query, length): + rel_emb = self._get_relative_embeddings(self.emb_rel_k, length) + rel_logits = self._matmul_with_relative_keys( + query / math.sqrt(self.k_channels), rel_emb + ) + return self._relative_position_to_absolute_position(rel_logits) + + def _apply_relative_values(self, p_attn, length): + rel_weights = self._absolute_position_to_relative_position(p_attn) + rel_emb = self._get_relative_embeddings(self.emb_rel_v, length) + return self._matmul_with_relative_values(rel_weights, rel_emb) + + # Helper methods + def _matmul_with_relative_values(self, x, y): + return torch.matmul(x, y.unsqueeze(0)) + + def _matmul_with_relative_keys(self, x, y): + return torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + + def _get_relative_embeddings(self, embeddings, length): + pad_length = max(length - (self.window_size + 1), 0) + start = max((self.window_size + 1) - length, 0) + end = start + 2 * length - 1 + + if pad_length > 0: + embeddings = torch.nn.functional.pad( + embeddings, + convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), + ) + return embeddings[:, start:end] + + def _relative_position_to_absolute_position(self, x): + batch, heads, length, _ = x.size() + x = torch.nn.functional.pad( + x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]) + ) + x_flat = x.view(batch, heads, length * 2 * length) + x_flat = torch.nn.functional.pad( + x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) + ) + return x_flat.view(batch, heads, length + 1, 2 * length - 1)[ + :, :, :length, length - 1 : + ] + + def _absolute_position_to_relative_position(self, x): + batch, heads, length, _ = x.size() + x = torch.nn.functional.pad( + x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) + ) + x_flat = x.view(batch, heads, length**2 + length * (length - 1)) + x_flat = torch.nn.functional.pad( + x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]]) + ) + return x_flat.view(batch, heads, length, 2 * length)[:, :, :, 1:] + + def _attention_bias_proximal(self, length): + r = torch.arange(length, dtype=torch.float32) + diff = r.unsqueeze(0) - r.unsqueeze(1) + return -torch.log1p(torch.abs(diff)).unsqueeze(0).unsqueeze(0) + + +class FFN(torch.nn.Module): + """ + Feed-forward network module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + filter_channels (int): Number of filter channels in the convolution layers. + kernel_size (int): Kernel size of the convolution layers. + p_dropout (float, optional): Dropout probability. Defaults to 0.0. + activation (str, optional): Activation function to use. Defaults to None. + causal (bool, optional): Whether to use causal padding in the convolution layers. Defaults to False. + """ + + def __init__( + self, + in_channels, + out_channels, + filter_channels, + kernel_size, + p_dropout=0.0, + activation=None, + causal=False, + ): + super().__init__() + self.padding_fn = self._causal_padding if causal else self._same_padding + + self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = torch.nn.Dropout(p_dropout) + + self.activation = activation + + def forward(self, x, x_mask): + x = self.conv_1(self.padding_fn(x * x_mask)) + x = self._apply_activation(x) + x = self.drop(x) + x = self.conv_2(self.padding_fn(x * x_mask)) + return x * x_mask + + def _apply_activation(self, x): + if self.activation == "gelu": + return x * torch.sigmoid(1.702 * x) + return torch.relu(x) + + def _causal_padding(self, x): + pad_l, pad_r = self.conv_1.kernel_size[0] - 1, 0 + return torch.nn.functional.pad( + x, convert_pad_shape([[0, 0], [0, 0], [pad_l, pad_r]]) + ) + + def _same_padding(self, x): + pad = (self.conv_1.kernel_size[0] - 1) // 2 + return torch.nn.functional.pad( + x, convert_pad_shape([[0, 0], [0, 0], [pad, pad]]) + ) diff --git a/rvc/lib/algorithm/commons.py b/rvc/lib/algorithm/commons.py new file mode 100644 index 00000000..2524abc4 --- /dev/null +++ b/rvc/lib/algorithm/commons.py @@ -0,0 +1,207 @@ +import math +import torch +from typing import List, Optional + + +def init_weights(m, mean=0.0, std=0.01): + """ + Initialize the weights of a module. + + Args: + m: The module to initialize. + mean: The mean of the normal distribution. + std: The standard deviation of the normal distribution. + """ + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def get_padding(kernel_size, dilation=1): + """ + Calculate the padding needed for a convolution. + + Args: + kernel_size: The size of the kernel. + dilation: The dilation of the convolution. + """ + return int((kernel_size * dilation - dilation) / 2) + + +def convert_pad_shape(pad_shape): + """ + Convert the pad shape to a list of integers. + + Args: + pad_shape: The pad shape.. + """ + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def kl_divergence(m_p, logs_p, m_q, logs_q): + """ + Calculate the KL divergence between two distributions. + + Args: + m_p: The mean of the first distribution. + logs_p: The log of the standard deviation of the first distribution. + m_q: The mean of the second distribution. + logs_q: The log of the standard deviation of the second distribution. + """ + kl = (logs_q - logs_p) - 0.5 + kl += ( + 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) + ) + return kl + + +def slice_segments( + x: torch.Tensor, ids_str: torch.Tensor, segment_size: int = 4, dim: int = 2 +): + """ + Slice segments from a tensor, handling tensors with different numbers of dimensions. + + Args: + x (torch.Tensor): The tensor to slice. + ids_str (torch.Tensor): The starting indices of the segments. + segment_size (int, optional): The size of each segment. Defaults to 4. + dim (int, optional): The dimension to slice across (2D or 3D tensors). Defaults to 2. + """ + if dim == 2: + ret = torch.zeros_like(x[:, :segment_size]) + elif dim == 3: + ret = torch.zeros_like(x[:, :, :segment_size]) + + for i in range(x.size(0)): + idx_str = ids_str[i].item() + idx_end = idx_str + segment_size + if dim == 2: + ret[i] = x[i, idx_str:idx_end] + else: + ret[i] = x[i, :, idx_str:idx_end] + + return ret + + +def rand_slice_segments(x, x_lengths=None, segment_size=4): + """ + Randomly slice segments from a tensor. + + Args: + x: The tensor to slice. + x_lengths: The lengths of the sequences. + segment_size: The size of each segment. + """ + b, d, t = x.size() + if x_lengths is None: + x_lengths = t + ids_str_max = x_lengths - segment_size + 1 + ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) + ret = slice_segments(x, ids_str, segment_size, dim=3) + return ret, ids_str + + +def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): + """ + Generate a 1D timing signal. + + Args: + length: The length of the signal. + channels: The number of channels of the signal. + min_timescale: The minimum timescale. + max_timescale: The maximum timescale. + """ + position = torch.arange(length, dtype=torch.float) + num_timescales = channels // 2 + log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( + num_timescales - 1 + ) + inv_timescales = min_timescale * torch.exp( + torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment + ) + scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) + signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) + signal = torch.nn.functional.pad(signal, [0, 0, 0, channels % 2]) + signal = signal.view(1, channels, length) + return signal + + +def subsequent_mask(length): + """ + Generate a subsequent mask. + + Args: + length: The length of the sequence. + """ + mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) + return mask + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + """ + Fused add tanh sigmoid multiply operation. + + Args: + input_a: The first input tensor. + input_b: The second input tensor. + n_channels: The number of channels. + """ + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +def convert_pad_shape(pad_shape: List[List[int]]) -> List[int]: + """ + Convert the pad shape to a list of integers. + + Args: + pad_shape: The pad shape. + """ + return torch.tensor(pad_shape).flip(0).reshape(-1).int().tolist() + + +def sequence_mask(length: torch.Tensor, max_length: Optional[int] = None): + """ + Generate a sequence mask. + + Args: + length: The lengths of the sequences. + max_length: The maximum length of the sequences. + """ + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +def clip_grad_value(parameters, clip_value, norm_type=2): + """ + Clip the gradients of a list of parameters. + + Args: + parameters: The list of parameters to clip. + clip_value: The maximum value of the gradients. + norm_type: The type of norm to use for clipping. + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + if clip_value is not None: + clip_value = float(clip_value) + + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + if clip_value is not None: + p.grad.data.clamp_(min=-clip_value, max=clip_value) + total_norm = total_norm ** (1.0 / norm_type) + return total_norm diff --git a/rvc/lib/algorithm/discriminators.py b/rvc/lib/algorithm/discriminators.py new file mode 100644 index 00000000..99251ad6 --- /dev/null +++ b/rvc/lib/algorithm/discriminators.py @@ -0,0 +1,160 @@ +import torch +from torch.nn.utils.parametrizations import spectral_norm, weight_norm + +from rvc.lib.algorithm.commons import get_padding +from rvc.lib.algorithm.residuals import LRELU_SLOPE + + +class MultiPeriodDiscriminator(torch.nn.Module): + """ + Multi-period discriminator. + + This class implements a multi-period discriminator, which is used to + discriminate between real and fake audio signals. The discriminator + is composed of a series of convolutional layers that are applied to + the input signal at different periods. + + Args: + periods (str): Periods of the discriminator. V1 = [2, 3, 5, 7, 11, 17], V2 = [2, 3, 5, 7, 11, 17, 23, 37]. + use_spectral_norm (bool): Whether to use spectral normalization. + Defaults to False. + """ + + def __init__(self, version, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = ( + [2, 3, 5, 7, 11, 17] if version == "v1" else [2, 3, 5, 7, 11, 17, 23, 37] + ) + self.discriminators = torch.nn.ModuleList( + [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + + [DiscriminatorP(p, use_spectral_norm=use_spectral_norm) for p in periods] + ) + + def forward(self, y, y_hat): + """ + Forward pass of the multi-period discriminator. + + Args: + y (torch.Tensor): Real audio signal. + y_hat (torch.Tensor): Fake audio signal. + """ + y_d_rs, y_d_gs, fmap_rs, fmap_gs = [], [], [], [] + for d in self.discriminators: + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + """ + Discriminator for the short-term component. + + This class implements a discriminator for the short-term component + of the audio signal. The discriminator is composed of a series of + convolutional layers that are applied to the input signal. + """ + + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = spectral_norm if use_spectral_norm else weight_norm + self.convs = torch.nn.ModuleList( + [ + norm_f(torch.nn.Conv1d(1, 16, 15, 1, padding=7)), + norm_f(torch.nn.Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(torch.nn.Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(torch.nn.Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(torch.nn.Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(torch.nn.Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.conv_post = norm_f(torch.nn.Conv1d(1024, 1, 3, 1, padding=1)) + self.lrelu = torch.nn.LeakyReLU(LRELU_SLOPE) + + def forward(self, x): + """ + Forward pass of the discriminator. + + Args: + x (torch.Tensor): Input audio signal. + """ + fmap = [] + for conv in self.convs: + x = self.lrelu(conv(x)) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + return x, fmap + + +class DiscriminatorP(torch.nn.Module): + """ + Discriminator for the long-term component. + + This class implements a discriminator for the long-term component + of the audio signal. The discriminator is composed of a series of + convolutional layers that are applied to the input signal at a given + period. + + Args: + period (int): Period of the discriminator. + kernel_size (int): Kernel size of the convolutional layers. + Defaults to 5. + stride (int): Stride of the convolutional layers. Defaults to 3. + use_spectral_norm (bool): Whether to use spectral normalization. + Defaults to False. + """ + + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + norm_f = spectral_norm if use_spectral_norm else weight_norm + + in_channels = [1, 32, 128, 512, 1024] + out_channels = [32, 128, 512, 1024, 1024] + + self.convs = torch.nn.ModuleList( + [ + norm_f( + torch.nn.Conv2d( + in_ch, + out_ch, + (kernel_size, 1), + (stride, 1), + padding=(get_padding(kernel_size, 1), 0), + ) + ) + for in_ch, out_ch in zip(in_channels, out_channels) + ] + ) + + self.conv_post = norm_f(torch.nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + self.lrelu = torch.nn.LeakyReLU(LRELU_SLOPE) + + def forward(self, x): + """ + Forward pass of the discriminator. + + Args: + x (torch.Tensor): Input audio signal. + """ + fmap = [] + b, c, t = x.shape + if t % self.period != 0: + n_pad = self.period - (t % self.period) + x = torch.nn.functional.pad(x, (0, n_pad), "reflect") + x = x.view(b, c, -1, self.period) + + for conv in self.convs: + x = self.lrelu(conv(x)) + fmap.append(x) + + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + return x, fmap diff --git a/rvc/lib/algorithm/encoders.py b/rvc/lib/algorithm/encoders.py new file mode 100644 index 00000000..e52f9e7d --- /dev/null +++ b/rvc/lib/algorithm/encoders.py @@ -0,0 +1,218 @@ +import math +import torch +from typing import Optional + +from rvc.lib.algorithm.commons import sequence_mask +from rvc.lib.algorithm.modules import WaveNet +from rvc.lib.algorithm.normalization import LayerNorm +from rvc.lib.algorithm.attentions import FFN, MultiHeadAttention + + +class Encoder(torch.nn.Module): + """ + Encoder module for the Transformer model. + + Args: + hidden_channels (int): Number of hidden channels in the encoder. + filter_channels (int): Number of filter channels in the feed-forward network. + n_heads (int): Number of attention heads. + n_layers (int): Number of encoder layers. + kernel_size (int, optional): Kernel size of the convolution layers in the feed-forward network. Defaults to 1. + p_dropout (float, optional): Dropout probability. Defaults to 0.0. + window_size (int, optional): Window size for relative positional encoding. Defaults to 10. + """ + + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + window_size=10, + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + + self.drop = torch.nn.Dropout(p_dropout) + self.attn_layers = torch.nn.ModuleList() + self.norm_layers_1 = torch.nn.ModuleList() + self.ffn_layers = torch.nn.ModuleList() + self.norm_layers_2 = torch.nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append( + MultiHeadAttention( + hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + window_size=window_size, + ) + ) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN( + hidden_channels, + hidden_channels, + filter_channels, + kernel_size, + p_dropout=p_dropout, + ) + ) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class TextEncoder(torch.nn.Module): + """Text Encoder with configurable embedding dimension. + + Args: + out_channels (int): Output channels of the encoder. + hidden_channels (int): Hidden channels of the encoder. + filter_channels (int): Filter channels of the encoder. + n_heads (int): Number of attention heads. + n_layers (int): Number of encoder layers. + kernel_size (int): Kernel size of the convolutional layers. + p_dropout (float): Dropout probability. + embedding_dim (int): Embedding dimension for phone embeddings (v1 = 256, v2 = 768). + f0 (bool, optional): Whether to use F0 embedding. Defaults to True. + """ + + def __init__( + self, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + embedding_dim, + f0=True, + ): + super(TextEncoder, self).__init__() + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = float(p_dropout) + self.emb_phone = torch.nn.Linear(embedding_dim, hidden_channels) + self.lrelu = torch.nn.LeakyReLU(0.1, inplace=True) + if f0: + self.emb_pitch = torch.nn.Embedding(256, hidden_channels) + self.encoder = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + float(p_dropout), + ) + self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward( + self, phone: torch.Tensor, pitch: Optional[torch.Tensor], lengths: torch.Tensor + ): + if pitch is None: + x = self.emb_phone(phone) + else: + x = self.emb_phone(phone) + self.emb_pitch(pitch) + x = x * math.sqrt(self.hidden_channels) # [b, t, h] + x = self.lrelu(x) + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(sequence_mask(lengths, x.size(2)), 1).to(x.dtype) + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return m, logs, x_mask + + +class PosteriorEncoder(torch.nn.Module): + """Posterior Encoder for inferring latent representation. + + Args: + in_channels (int): Number of channels in the input. + out_channels (int): Number of channels in the output. + hidden_channels (int): Number of hidden channels in the encoder. + kernel_size (int): Kernel size of the convolutional layers. + dilation_rate (int): Dilation rate of the convolutional layers. + n_layers (int): Number of layers in the encoder. + gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 0. + """ + + def __init__( + self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + ): + super(PosteriorEncoder, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = torch.nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = WaveNet( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + ) + self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward( + self, x: torch.Tensor, x_lengths: torch.Tensor, g: Optional[torch.Tensor] = None + ): + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + def remove_weight_norm(self): + """Removes weight normalization from the encoder.""" + self.enc.remove_weight_norm() + + def __prepare_scriptable__(self): + """Prepares the module for scripting.""" + for hook in self.enc._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.parametrizations.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.enc) + return self diff --git a/rvc/lib/algorithm/generators.py b/rvc/lib/algorithm/generators.py new file mode 100644 index 00000000..ccc2358d --- /dev/null +++ b/rvc/lib/algorithm/generators.py @@ -0,0 +1,231 @@ +import torch +import numpy as np +from torch.nn.utils import remove_weight_norm +from torch.nn.utils.parametrizations import weight_norm +from typing import Optional + +from rvc.lib.algorithm.residuals import LRELU_SLOPE, ResBlock1, ResBlock2 +from rvc.lib.algorithm.commons import init_weights + + +class Generator(torch.nn.Module): + """Generator for synthesizing audio. + + Args: + initial_channel (int): Number of channels in the initial convolutional layer. + resblock (str): Type of residual block to use (1 or 2). + resblock_kernel_sizes (list): Kernel sizes of the residual blocks. + resblock_dilation_sizes (list): Dilation rates of the residual blocks. + upsample_rates (list): Upsampling rates. + upsample_initial_channel (int): Number of channels in the initial upsampling layer. + upsample_kernel_sizes (list): Kernel sizes of the upsampling layers. + gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 0. + """ + + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = torch.nn.Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock = ResBlock1 if resblock == "1" else ResBlock2 + + self.ups = torch.nn.ModuleList() + self.resblocks = torch.nn.ModuleList() + + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + torch.nn.ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate( + zip(resblock_kernel_sizes, resblock_dilation_sizes) + ): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = torch.nn.Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x: torch.Tensor, g: Optional[torch.Tensor] = None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = torch.nn.functional.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs == None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + x = torch.nn.functional.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def __prepare_scriptable__(self): + """Prepares the module for scripting.""" + for l in self.ups_and_resblocks: + for hook in l._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.parametrizations.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(l) + return self + + def remove_weight_norm(self): + """Removes weight normalization from the upsampling and residual blocks.""" + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class SineGenerator(torch.nn.Module): + """ + A sine wave generator that synthesizes waveforms with optional harmonic overtones and noise. + + Args: + sampling_rate (int): The sampling rate in Hz. + num_harmonics (int, optional): The number of harmonic overtones to include. Defaults to 0. + sine_amplitude (float, optional): The amplitude of the sine waveform. Defaults to 0.1. + noise_stddev (float, optional): The standard deviation of Gaussian noise. Defaults to 0.003. + voiced_threshold (float, optional): F0 threshold for distinguishing voiced/unvoiced frames. Defaults to 0. + """ + + def __init__( + self, + sampling_rate: int, + num_harmonics: int = 0, + sine_amplitude: float = 0.1, + noise_stddev: float = 0.003, + voiced_threshold: float = 0.0, + ): + super(SineGenerator, self).__init__() + self.sampling_rate = sampling_rate + self.num_harmonics = num_harmonics + self.sine_amplitude = sine_amplitude + self.noise_stddev = noise_stddev + self.voiced_threshold = voiced_threshold + self.waveform_dim = self.num_harmonics + 1 # fundamental + harmonics + + def _compute_voiced_unvoiced(self, f0: torch.Tensor) -> torch.Tensor: + """ + Generate a binary mask to indicate voiced/unvoiced frames. + + Args: + f0 (torch.Tensor): Fundamental frequency tensor (batch_size, length). + """ + uv_mask = (f0 > self.voiced_threshold).float() + return uv_mask + + def _generate_sine_wave( + self, f0: torch.Tensor, upsampling_factor: int + ) -> torch.Tensor: + """ + Generate sine waves for the fundamental frequency and its harmonics. + + Args: + f0 (torch.Tensor): Fundamental frequency tensor (batch_size, length, 1). + upsampling_factor (int): Upsampling factor. + """ + batch_size, length, _ = f0.shape + + # Create an upsampling grid + upsampling_grid = torch.arange( + 1, upsampling_factor + 1, dtype=f0.dtype, device=f0.device + ) + + # Calculate phase increments + phase_increments = (f0 / self.sampling_rate) * upsampling_grid + phase_remainder = torch.fmod(phase_increments[:, :-1, -1:] + 0.5, 1.0) - 0.5 + cumulative_phase = phase_remainder.cumsum(dim=1).fmod(1.0).to(f0.dtype) + phase_increments += torch.nn.functional.pad( + cumulative_phase, (0, 0, 1, 0), mode="constant" + ) + + # Reshape to match the sine wave shape + phase_increments = phase_increments.reshape(batch_size, -1, 1) + + # Scale for harmonics + harmonic_scale = torch.arange( + 1, self.waveform_dim + 1, dtype=f0.dtype, device=f0.device + ).reshape(1, 1, -1) + phase_increments *= harmonic_scale + + # Add random phase offset (except for the fundamental) + random_phase = torch.rand(1, 1, self.waveform_dim, device=f0.device) + random_phase[..., 0] = 0 # Fundamental frequency has no random offset + phase_increments += random_phase + + # Generate sine waves + sine_waves = torch.sin(2 * np.pi * phase_increments) + return sine_waves + + def forward(self, f0: torch.Tensor, upsampling_factor: int): + """ + Forward pass to generate sine waveforms with noise and voiced/unvoiced masking. + + Args: + f0 (torch.Tensor): Fundamental frequency tensor (batch_size, length, 1). + upsampling_factor (int): Upsampling factor. + """ + with torch.no_grad(): + # Expand `f0` to include waveform dimensions + f0 = f0.unsqueeze(-1) + + # Generate sine waves + sine_waves = ( + self._generate_sine_wave(f0, upsampling_factor) * self.sine_amplitude + ) + + # Compute voiced/unvoiced mask + voiced_mask = self._compute_voiced_unvoiced(f0) + + # Upsample voiced/unvoiced mask + voiced_mask = torch.nn.functional.interpolate( + voiced_mask.transpose(2, 1), + scale_factor=float(upsampling_factor), + mode="nearest", + ).transpose(2, 1) + + # Compute noise amplitude + noise_amplitude = voiced_mask * self.noise_stddev + (1 - voiced_mask) * ( + self.sine_amplitude / 3 + ) + + # Add Gaussian noise + noise = noise_amplitude * torch.randn_like(sine_waves) + + # Combine sine waves and noise + sine_waveforms = sine_waves * voiced_mask + noise + + return sine_waveforms, voiced_mask, noise diff --git a/rvc/lib/algorithm/modules.py b/rvc/lib/algorithm/modules.py new file mode 100644 index 00000000..8a2dad1a --- /dev/null +++ b/rvc/lib/algorithm/modules.py @@ -0,0 +1,124 @@ +import torch +from rvc.lib.algorithm.commons import fused_add_tanh_sigmoid_multiply + + +class WaveNet(torch.nn.Module): + """WaveNet residual blocks as used in WaveGlow. + + Args: + hidden_channels (int): Number of hidden channels. + kernel_size (int): Size of the convolutional kernel. + dilation_rate (int): Dilation rate of the convolution. + n_layers (int): Number of convolutional layers. + gin_channels (int, optional): Number of conditioning channels. Defaults to 0. + p_dropout (float, optional): Dropout probability. Defaults to 0. + """ + + def __init__( + self, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0, + p_dropout=0, + ): + super().__init__() + assert kernel_size % 2 == 1, "Kernel size must be odd for proper padding." + + self.hidden_channels = hidden_channels + self.kernel_size = (kernel_size,) + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + self.n_channels_tensor = torch.IntTensor([hidden_channels]) # Static tensor + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = torch.nn.Dropout(p_dropout) + + # Conditional layer for global conditioning + if gin_channels: + self.cond_layer = torch.nn.utils.parametrizations.weight_norm( + torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1), + name="weight", + ) + + # Precompute dilations and paddings + dilations = [dilation_rate**i for i in range(n_layers)] + paddings = [(kernel_size * d - d) // 2 for d in dilations] + + # Initialize layers + for i in range(n_layers): + self.in_layers.append( + torch.nn.utils.parametrizations.weight_norm( + torch.nn.Conv1d( + hidden_channels, + 2 * hidden_channels, + kernel_size, + dilation=dilations[i], + padding=paddings[i], + ), + name="weight", + ) + ) + + res_skip_channels = ( + hidden_channels if i == n_layers - 1 else 2 * hidden_channels + ) + self.res_skip_layers.append( + torch.nn.utils.parametrizations.weight_norm( + torch.nn.Conv1d(hidden_channels, res_skip_channels, 1), + name="weight", + ) + ) + + def forward(self, x, x_mask, g=None): + """Forward pass. + + Args: + x (torch.Tensor): Input tensor (batch_size, hidden_channels, time_steps). + x_mask (torch.Tensor): Mask tensor (batch_size, 1, time_steps). + g (torch.Tensor, optional): Conditioning tensor (batch_size, gin_channels, time_steps). + """ + output = x.clone().zero_() + + # Apply conditional layer if global conditioning is provided + g = self.cond_layer(g) if g is not None else None + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + g_l = ( + g[ + :, + i * 2 * self.hidden_channels : (i + 1) * 2 * self.hidden_channels, + :, + ] + if g is not None + else 0 + ) + + # Activation with fused Tanh-Sigmoid + acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, self.n_channels_tensor) + acts = self.drop(acts) + + # Residual and skip connections + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + res_acts = res_skip_acts[:, : self.hidden_channels, :] + x = (x + res_acts) * x_mask + output = output + res_skip_acts[:, self.hidden_channels :, :] + else: + output = output + res_skip_acts + + return output * x_mask + + def remove_weight_norm(self): + """Remove weight normalization from the module.""" + if self.gin_channels: + torch.nn.utils.remove_weight_norm(self.cond_layer) + for layer in self.in_layers: + torch.nn.utils.remove_weight_norm(layer) + for layer in self.res_skip_layers: + torch.nn.utils.remove_weight_norm(layer) diff --git a/rvc/lib/algorithm/normalization.py b/rvc/lib/algorithm/normalization.py new file mode 100644 index 00000000..878ec09d --- /dev/null +++ b/rvc/lib/algorithm/normalization.py @@ -0,0 +1,31 @@ +import torch + + +class LayerNorm(torch.nn.Module): + """Layer normalization module. + + Args: + channels (int): Number of channels. + eps (float, optional): Epsilon value for numerical stability. Defaults to 1e-5. + """ + + def __init__(self, channels, eps=1e-5): + super().__init__() + self.eps = eps + self.gamma = torch.nn.Parameter(torch.ones(channels)) + self.beta = torch.nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + """Forward pass. + + Args: + x (torch.Tensor): Input tensor of shape (batch_size, channels, time_steps). + + """ + # Transpose to (batch_size, time_steps, channels) for layer_norm + x = x.transpose(1, -1) + x = torch.nn.functional.layer_norm( + x, (x.size(-1),), self.gamma, self.beta, self.eps + ) + # Transpose back to (batch_size, channels, time_steps) + return x.transpose(1, -1) diff --git a/rvc/lib/algorithm/nsf.py b/rvc/lib/algorithm/nsf.py new file mode 100644 index 00000000..5476adab --- /dev/null +++ b/rvc/lib/algorithm/nsf.py @@ -0,0 +1,196 @@ +import math +import torch +from torch.nn.utils import remove_weight_norm +from torch.nn.utils.parametrizations import weight_norm +from typing import Optional + +from rvc.lib.algorithm.generators import SineGenerator +from rvc.lib.algorithm.residuals import LRELU_SLOPE, ResBlock1, ResBlock2 +from rvc.lib.algorithm.commons import init_weights + + +class SourceModuleHnNSF(torch.nn.Module): + """ + Source Module for harmonic-plus-noise excitation. + + Args: + sample_rate (int): Sampling rate in Hz. + harmonic_num (int, optional): Number of harmonics above F0. Defaults to 0. + sine_amp (float, optional): Amplitude of sine source signal. Defaults to 0.1. + add_noise_std (float, optional): Standard deviation of additive Gaussian noise. Defaults to 0.003. + voiced_threshod (float, optional): Threshold to set voiced/unvoiced given F0. Defaults to 0. + is_half (bool, optional): Whether to use half precision. Defaults to True. + """ + + def __init__( + self, + sample_rate, + harmonic_num=0, + sine_amp=0.1, + add_noise_std=0.003, + voiced_threshod=0, + is_half=True, + ): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + self.is_half = is_half + + self.l_sin_gen = SineGenerator( + sample_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod + ) + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x: torch.Tensor, upsample_factor: int = 1): + sine_wavs, uv, _ = self.l_sin_gen(x, upsample_factor) + sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype) + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + return sine_merge, None, None + + +class GeneratorNSF(torch.nn.Module): + """ + Generator for synthesizing audio using the NSF (Neural Source Filter) approach. + + Args: + initial_channel (int): Number of channels in the initial convolutional layer. + resblock (str): Type of residual block to use (1 or 2). + resblock_kernel_sizes (list): Kernel sizes of the residual blocks. + resblock_dilation_sizes (list): Dilation rates of the residual blocks. + upsample_rates (list): Upsampling rates. + upsample_initial_channel (int): Number of channels in the initial upsampling layer. + upsample_kernel_sizes (list): Kernel sizes of the upsampling layers. + gin_channels (int): Number of channels for the global conditioning input. + sr (int): Sampling rate. + is_half (bool, optional): Whether to use half precision. Defaults to False. + """ + + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels, + sr, + is_half=False, + ): + super(GeneratorNSF, self).__init__() + + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.f0_upsamp = torch.nn.Upsample(scale_factor=math.prod(upsample_rates)) + self.m_source = SourceModuleHnNSF( + sample_rate=sr, harmonic_num=0, is_half=is_half + ) + + self.conv_pre = torch.nn.Conv1d( + initial_channel, upsample_initial_channel, 7, 1, padding=3 + ) + resblock_cls = ResBlock1 if resblock == "1" else ResBlock2 + + self.ups = torch.nn.ModuleList() + self.noise_convs = torch.nn.ModuleList() + + channels = [ + upsample_initial_channel // (2 ** (i + 1)) + for i in range(len(upsample_rates)) + ] + stride_f0s = [ + math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1 + for i in range(len(upsample_rates)) + ] + + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + torch.nn.ConvTranspose1d( + upsample_initial_channel // (2**i), + channels[i], + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.noise_convs.append( + torch.nn.Conv1d( + 1, + channels[i], + kernel_size=(stride_f0s[i] * 2 if stride_f0s[i] > 1 else 1), + stride=stride_f0s[i], + padding=(stride_f0s[i] // 2 if stride_f0s[i] > 1 else 0), + ) + ) + + self.resblocks = torch.nn.ModuleList( + [ + resblock_cls(channels[i], k, d) + for i in range(len(self.ups)) + for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes) + ] + ) + + self.conv_post = torch.nn.Conv1d(channels[-1], 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + self.upp = math.prod(upsample_rates) + self.lrelu_slope = LRELU_SLOPE + + def forward(self, x, f0, g: Optional[torch.Tensor] = None): + har_source, _, _ = self.m_source(f0, self.upp) + har_source = har_source.transpose(1, 2) + x = self.conv_pre(x) + + if g is not None: + x = x + self.cond(g) + + for i, (ups, noise_convs) in enumerate(zip(self.ups, self.noise_convs)): + x = torch.nn.functional.leaky_relu(x, self.lrelu_slope) + x = ups(x) + x = x + noise_convs(har_source) + + xs = sum( + [ + resblock(x) + for j, resblock in enumerate(self.resblocks) + if j in range(i * self.num_kernels, (i + 1) * self.num_kernels) + ] + ) + x = xs / self.num_kernels + + x = torch.nn.functional.leaky_relu(x) + x = torch.tanh(self.conv_post(x)) + return x + + def remove_weight_norm(self): + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + def __prepare_scriptable__(self): + for l in self.ups: + for hook in l._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.parametrizations.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + remove_weight_norm(l) + for l in self.resblocks: + for hook in l._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.parametrizations.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + remove_weight_norm(l) + return self diff --git a/rvc/lib/algorithm/residuals.py b/rvc/lib/algorithm/residuals.py new file mode 100644 index 00000000..87805f72 --- /dev/null +++ b/rvc/lib/algorithm/residuals.py @@ -0,0 +1,250 @@ +from typing import Optional +import torch +from torch.nn.utils import remove_weight_norm +from torch.nn.utils.parametrizations import weight_norm + +from rvc.lib.algorithm.modules import WaveNet +from rvc.lib.algorithm.commons import get_padding, init_weights + +LRELU_SLOPE = 0.1 + + +def create_conv1d_layer(channels, kernel_size, dilation): + return weight_norm( + torch.nn.Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation, + padding=get_padding(kernel_size, dilation), + ) + ) + + +def apply_mask(tensor, mask): + return tensor * mask if mask is not None else tensor + + +class ResBlockBase(torch.nn.Module): + def __init__(self, channels, kernel_size, dilations): + super(ResBlockBase, self).__init__() + self.convs1 = torch.nn.ModuleList( + [create_conv1d_layer(channels, kernel_size, d) for d in dilations] + ) + self.convs1.apply(init_weights) + + self.convs2 = torch.nn.ModuleList( + [create_conv1d_layer(channels, kernel_size, 1) for _ in dilations] + ) + self.convs2.apply(init_weights) + + def forward(self, x, x_mask=None): + for c1, c2 in zip(self.convs1, self.convs2): + xt = torch.nn.functional.leaky_relu(x, LRELU_SLOPE) + xt = apply_mask(xt, x_mask) + xt = torch.nn.functional.leaky_relu(c1(xt), LRELU_SLOPE) + xt = apply_mask(xt, x_mask) + xt = c2(xt) + x = xt + x + return apply_mask(x, x_mask) + + def remove_weight_norm(self): + for conv in self.convs1 + self.convs2: + remove_weight_norm(conv) + + +class ResBlock1(ResBlockBase): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__(channels, kernel_size, dilation) + + +class ResBlock2(ResBlockBase): + def __init__(self, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__(channels, kernel_size, dilation) + + +class Flip(torch.nn.Module): + """Flip module for flow-based models. + + This module flips the input along the time dimension. + """ + + def forward(self, x, *args, reverse=False, **kwargs): + """Forward pass. + + Args: + x (torch.Tensor): Input tensor. + reverse (bool, optional): Whether to reverse the operation. Defaults to False. + """ + x = torch.flip(x, [1]) + if not reverse: + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + else: + return x + + +class ResidualCouplingBlock(torch.nn.Module): + """Residual Coupling Block for normalizing flow. + + Args: + channels (int): Number of channels in the input. + hidden_channels (int): Number of hidden channels in the coupling layer. + kernel_size (int): Kernel size of the convolutional layers. + dilation_rate (int): Dilation rate of the convolutional layers. + n_layers (int): Number of layers in the coupling layer. + n_flows (int, optional): Number of coupling layers in the block. Defaults to 4. + gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 0. + """ + + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0, + ): + super(ResidualCouplingBlock, self).__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = torch.nn.ModuleList() + for i in range(n_flows): + self.flows.append( + ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) + self.flows.append(Flip()) + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + g: Optional[torch.Tensor] = None, + reverse: bool = False, + ): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow.forward(x, x_mask, g=g, reverse=reverse) + return x + + def remove_weight_norm(self): + """Removes weight normalization from the coupling layers.""" + for i in range(self.n_flows): + self.flows[i * 2].remove_weight_norm() + + def __prepare_scriptable__(self): + """Prepares the module for scripting.""" + for i in range(self.n_flows): + for hook in self.flows[i * 2]._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.parametrizations.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.flows[i * 2]) + + return self + + +class ResidualCouplingLayer(torch.nn.Module): + """Residual coupling layer for flow-based models. + + Args: + channels (int): Number of channels. + hidden_channels (int): Number of hidden channels. + kernel_size (int): Size of the convolutional kernel. + dilation_rate (int): Dilation rate of the convolution. + n_layers (int): Number of convolutional layers. + p_dropout (float, optional): Dropout probability. Defaults to 0. + gin_channels (int, optional): Number of conditioning channels. Defaults to 0. + mean_only (bool, optional): Whether to use mean-only coupling. Defaults to False. + """ + + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False, + ): + assert channels % 2 == 0, "channels should be divisible by 2" + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.half_channels = channels // 2 + self.mean_only = mean_only + + self.pre = torch.nn.Conv1d(self.half_channels, hidden_channels, 1) + self.enc = WaveNet( + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=p_dropout, + gin_channels=gin_channels, + ) + self.post = torch.nn.Conv1d( + hidden_channels, self.half_channels * (2 - mean_only), 1 + ) + self.post.weight.data.zero_() + self.post.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + """Forward pass. + + Args: + x (torch.Tensor): Input tensor of shape (batch_size, channels, time_steps). + x_mask (torch.Tensor): Mask tensor of shape (batch_size, 1, time_steps). + g (torch.Tensor, optional): Conditioning tensor of shape (batch_size, gin_channels, time_steps). + Defaults to None. + reverse (bool, optional): Whether to reverse the operation. Defaults to False. + """ + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) * x_mask + h = self.enc(h, x_mask, g=g) + stats = self.post(h) * x_mask + if not self.mean_only: + m, logs = torch.split(stats, [self.half_channels] * 2, 1) + else: + m = stats + logs = torch.zeros_like(m) + + if not reverse: + x1 = m + x1 * torch.exp(logs) * x_mask + x = torch.cat([x0, x1], 1) + logdet = torch.sum(logs, [1, 2]) + return x, logdet + else: + x1 = (x1 - m) * torch.exp(-logs) * x_mask + x = torch.cat([x0, x1], 1) + return x + + def remove_weight_norm(self): + """Remove weight normalization from the module.""" + self.enc.remove_weight_norm() diff --git a/rvc/lib/algorithm/synthesizers.py b/rvc/lib/algorithm/synthesizers.py new file mode 100644 index 00000000..2a1aa236 --- /dev/null +++ b/rvc/lib/algorithm/synthesizers.py @@ -0,0 +1,237 @@ +import torch +from typing import Optional + +from rvc.lib.algorithm.nsf import GeneratorNSF +from rvc.lib.algorithm.generators import Generator +from rvc.lib.algorithm.commons import slice_segments, rand_slice_segments +from rvc.lib.algorithm.residuals import ResidualCouplingBlock +from rvc.lib.algorithm.encoders import TextEncoder, PosteriorEncoder + + +class Synthesizer(torch.nn.Module): + """ + Base Synthesizer model. + + Args: + spec_channels (int): Number of channels in the spectrogram. + segment_size (int): Size of the audio segment. + inter_channels (int): Number of channels in the intermediate layers. + hidden_channels (int): Number of channels in the hidden layers. + filter_channels (int): Number of channels in the filter layers. + n_heads (int): Number of attention heads. + n_layers (int): Number of layers in the encoder. + kernel_size (int): Size of the convolution kernel. + p_dropout (float): Dropout probability. + resblock (str): Type of residual block. + resblock_kernel_sizes (list): Kernel sizes for the residual blocks. + resblock_dilation_sizes (list): Dilation sizes for the residual blocks. + upsample_rates (list): Upsampling rates for the decoder. + upsample_initial_channel (int): Number of channels in the initial upsampling layer. + upsample_kernel_sizes (list): Kernel sizes for the upsampling layers. + spk_embed_dim (int): Dimension of the speaker embedding. + gin_channels (int): Number of channels in the global conditioning vector. + sr (int): Sampling rate of the audio. + use_f0 (bool): Whether to use F0 information. + text_enc_hidden_dim (int): Hidden dimension for the text encoder. + kwargs: Additional keyword arguments. + """ + + def __init__( + self, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + spk_embed_dim, + gin_channels, + sr, + use_f0, + text_enc_hidden_dim=768, + **kwargs + ): + super(Synthesizer, self).__init__() + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = float(p_dropout) + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.gin_channels = gin_channels + self.spk_embed_dim = spk_embed_dim + self.use_f0 = use_f0 + + self.enc_p = TextEncoder( + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + float(p_dropout), + text_enc_hidden_dim, + f0=use_f0, + ) + + if use_f0: + self.dec = GeneratorNSF( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + sr=sr, + is_half=kwargs["is_half"], + ) + else: + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + + self.enc_q = PosteriorEncoder( + spec_channels, + inter_channels, + hidden_channels, + 5, + 1, + 16, + gin_channels=gin_channels, + ) + self.flow = ResidualCouplingBlock( + inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels + ) + self.emb_g = torch.nn.Embedding(self.spk_embed_dim, gin_channels) + + def remove_weight_norm(self): + """Removes weight normalization from the model.""" + self.dec.remove_weight_norm() + self.flow.remove_weight_norm() + self.enc_q.remove_weight_norm() + + def __prepare_scriptable__(self): + for hook in self.dec._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.parametrizations.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.dec) + for hook in self.flow._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.parametrizations.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.flow) + if hasattr(self, "enc_q"): + for hook in self.enc_q._forward_pre_hooks.values(): + if ( + hook.__module__ == "torch.nn.utils.parametrizations.weight_norm" + and hook.__class__.__name__ == "WeightNorm" + ): + torch.nn.utils.remove_weight_norm(self.enc_q) + return self + + @torch.jit.ignore + def forward( + self, + phone: torch.Tensor, + phone_lengths: torch.Tensor, + pitch: Optional[torch.Tensor] = None, + pitchf: Optional[torch.Tensor] = None, + y: torch.Tensor = None, + y_lengths: torch.Tensor = None, + ds: Optional[torch.Tensor] = None, + ): + """ + Forward pass of the model. + + Args: + phone (torch.Tensor): Phoneme sequence. + phone_lengths (torch.Tensor): Lengths of the phoneme sequences. + pitch (torch.Tensor, optional): Pitch sequence. + pitchf (torch.Tensor, optional): Fine-grained pitch sequence. + y (torch.Tensor, optional): Target spectrogram. + y_lengths (torch.Tensor, optional): Lengths of the target spectrograms. + ds (torch.Tensor, optional): Speaker embedding. Defaults to None. + """ + g = self.emb_g(ds).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + if y is not None: + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + z_slice, ids_slice = rand_slice_segments(z, y_lengths, self.segment_size) + if self.use_f0: + pitchf = slice_segments(pitchf, ids_slice, self.segment_size, 2) + o = self.dec(z_slice, pitchf, g=g) + else: + o = self.dec(z_slice, g=g) + return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + else: + return None, None, x_mask, None, (None, None, m_p, logs_p, None, None) + + @torch.jit.export + def infer( + self, + phone: torch.Tensor, + phone_lengths: torch.Tensor, + pitch: Optional[torch.Tensor] = None, + nsff0: Optional[torch.Tensor] = None, + sid: torch.Tensor = None, + rate: Optional[torch.Tensor] = None, + ): + """ + Inference of the model. + + Args: + phone (torch.Tensor): Phoneme sequence. + phone_lengths (torch.Tensor): Lengths of the phoneme sequences. + pitch (torch.Tensor, optional): Pitch sequence. + nsff0 (torch.Tensor, optional): Fine-grained pitch sequence. + sid (torch.Tensor): Speaker embedding. + rate (torch.Tensor, optional): Rate for time-stretching. Defaults to None. + """ + g = self.emb_g(sid).unsqueeze(-1) + m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) + z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask + if rate is not None: + assert isinstance(rate, torch.Tensor) + head = int(z_p.shape[2] * (1.0 - rate.item())) + z_p = z_p[:, :, head:] + x_mask = x_mask[:, :, head:] + if self.use_f0: + nsff0 = nsff0[:, head:] + if self.use_f0: + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec(z * x_mask, nsff0, g=g) + else: + z = self.flow(z_p, x_mask, g=g, reverse=True) + o = self.dec(z * x_mask, g=g) + return o, x_mask, (z, z_p, m_p, logs_p) diff --git a/rvc/lib/predictors/F0Extractor.py b/rvc/lib/predictors/F0Extractor.py new file mode 100644 index 00000000..bc3b61f3 --- /dev/null +++ b/rvc/lib/predictors/F0Extractor.py @@ -0,0 +1,100 @@ +import dataclasses +import pathlib +import libf0 +import librosa +import numpy as np +import resampy +import torch +import torchcrepe +import torchfcpe +import os + +# from tools.anyf0.rmvpe import RMVPE +from rvc.lib.predictors.RMVPE import RMVPE0Predictor +from rvc.configs.config import Config + +config = Config() + + +@dataclasses.dataclass +class F0Extractor: + wav_path: pathlib.Path + sample_rate: int = 44100 + hop_length: int = 512 + f0_min: int = 50 + f0_max: int = 1600 + method: str = "rmvpe" + x: np.ndarray = dataclasses.field(init=False) + + def __post_init__(self): + self.x, self.sample_rate = librosa.load(self.wav_path, sr=self.sample_rate) + + @property + def hop_size(self) -> float: + return self.hop_length / self.sample_rate + + @property + def wav16k(self) -> np.ndarray: + return resampy.resample(self.x, self.sample_rate, 16000) + + def extract_f0(self) -> np.ndarray: + f0 = None + method = self.method + if method == "crepe": + wav16k_torch = torch.FloatTensor(self.wav16k).unsqueeze(0).to(config.device) + f0 = torchcrepe.predict( + wav16k_torch, + sample_rate=16000, + hop_length=160, + batch_size=512, + fmin=self.f0_min, + fmax=self.f0_max, + device=config.device, + ) + f0 = f0[0].cpu().numpy() + elif method == "fcpe": + audio = librosa.to_mono(self.x) + audio_length = len(audio) + f0_target_length = (audio_length // self.hop_length) + 1 + audio = ( + torch.from_numpy(audio) + .float() + .unsqueeze(0) + .unsqueeze(-1) + .to(config.device) + ) + model = torchfcpe.spawn_bundled_infer_model(device=config.device) + + f0 = model.infer( + audio, + sr=self.sample_rate, + decoder_mode="local_argmax", + threshold=0.006, + f0_min=self.f0_min, + f0_max=self.f0_max, + interp_uv=False, + output_interp_target_length=f0_target_length, + ) + f0 = f0.squeeze().cpu().numpy() + elif method == "rmvpe": + model_rmvpe = RMVPE0Predictor( + os.path.join("rvc", "models", "predictors", "rmvpe.pt"), + is_half=config.is_half, + device=config.device, + # hop_length=80 + ) + f0 = model_rmvpe.infer_from_audio(self.wav16k, thred=0.03) + + else: + raise ValueError(f"Unknown method: {self.method}") + return libf0.hz_to_cents(f0, librosa.midi_to_hz(0)) + + def plot_f0(self, f0): + from matplotlib import pyplot as plt + + plt.figure(figsize=(10, 4)) + plt.plot(f0) + plt.title(self.method) + plt.xlabel("Time (frames)") + plt.ylabel("F0 (cents)") + plt.show() diff --git a/rvc/lib/predictors/FCPE.py b/rvc/lib/predictors/FCPE.py new file mode 100644 index 00000000..12f6c346 --- /dev/null +++ b/rvc/lib/predictors/FCPE.py @@ -0,0 +1,920 @@ +from typing import Union + +import torch.nn.functional as F +import numpy as np +import torch +import torch.nn as nn +from torch.nn.utils.parametrizations import weight_norm +from torchaudio.transforms import Resample +import os +import librosa +import soundfile as sf +import torch.utils.data +from librosa.filters import mel as librosa_mel_fn +import math +from functools import partial + +from einops import rearrange, repeat +from local_attention import LocalAttention +from torch import nn + +os.environ["LRU_CACHE_CAPACITY"] = "3" + + +def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False): + """Loads wav file to torch tensor.""" + try: + data, sample_rate = sf.read(full_path, always_2d=True) + except Exception as error: + print(f"An error occurred loading {full_path}: {error}") + if return_empty_on_exception: + return [], sample_rate or target_sr or 48000 + else: + raise + + data = data[:, 0] if len(data.shape) > 1 else data + assert len(data) > 2 + + # Normalize data + max_mag = ( + -np.iinfo(data.dtype).min + if np.issubdtype(data.dtype, np.integer) + else max(np.amax(data), -np.amin(data)) + ) + max_mag = ( + (2**31) + 1 if max_mag > (2**15) else ((2**15) + 1 if max_mag > 1.01 else 1.0) + ) + data = torch.FloatTensor(data.astype(np.float32)) / max_mag + + # Handle exceptions and resample + if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception: + return [], sample_rate or target_sr or 48000 + if target_sr is not None and sample_rate != target_sr: + data = torch.from_numpy( + librosa.core.resample( + data.numpy(), orig_sr=sample_rate, target_sr=target_sr + ) + ) + sample_rate = target_sr + + return data, sample_rate + + +def dynamic_range_compression(x, C=1, clip_val=1e-5): + return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) + + +def dynamic_range_decompression(x, C=1): + return np.exp(x) / C + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + return torch.exp(x) / C + + +class STFT: + def __init__( + self, + sr=22050, + n_mels=80, + n_fft=1024, + win_size=1024, + hop_length=256, + fmin=20, + fmax=11025, + clip_val=1e-5, + ): + self.target_sr = sr + self.n_mels = n_mels + self.n_fft = n_fft + self.win_size = win_size + self.hop_length = hop_length + self.fmin = fmin + self.fmax = fmax + self.clip_val = clip_val + self.mel_basis = {} + self.hann_window = {} + + def get_mel(self, y, keyshift=0, speed=1, center=False, train=False): + sample_rate = self.target_sr + n_mels = self.n_mels + n_fft = self.n_fft + win_size = self.win_size + hop_length = self.hop_length + fmin = self.fmin + fmax = self.fmax + clip_val = self.clip_val + + factor = 2 ** (keyshift / 12) + n_fft_new = int(np.round(n_fft * factor)) + win_size_new = int(np.round(win_size * factor)) + hop_length_new = int(np.round(hop_length * speed)) + + # Optimize mel_basis and hann_window caching + mel_basis = self.mel_basis if not train else {} + hann_window = self.hann_window if not train else {} + + mel_basis_key = str(fmax) + "_" + str(y.device) + if mel_basis_key not in mel_basis: + mel = librosa_mel_fn( + sr=sample_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax + ) + mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device) + + keyshift_key = str(keyshift) + "_" + str(y.device) + if keyshift_key not in hann_window: + hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device) + + # Padding and STFT + pad_left = (win_size_new - hop_length_new) // 2 + pad_right = max( + (win_size_new - hop_length_new + 1) // 2, + win_size_new - y.size(-1) - pad_left, + ) + mode = "reflect" if pad_right < y.size(-1) else "constant" + y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode=mode) + y = y.squeeze(1) + + spec = torch.stft( + y, + n_fft_new, + hop_length=hop_length_new, + win_length=win_size_new, + window=hann_window[keyshift_key], + center=center, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=True, + ) + spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + (1e-9)) + + # Handle keyshift and mel conversion + if keyshift != 0: + size = n_fft // 2 + 1 + resize = spec.size(1) + spec = ( + F.pad(spec, (0, 0, 0, size - resize)) + if resize < size + else spec[:, :size, :] + ) + spec = spec * win_size / win_size_new + spec = torch.matmul(mel_basis[mel_basis_key], spec) + spec = dynamic_range_compression_torch(spec, clip_val=clip_val) + return spec + + def __call__(self, audiopath): + audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr) + spect = self.get_mel(audio.unsqueeze(0)).squeeze(0) + return spect + + +stft = STFT() + + +def softmax_kernel( + data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device=None +): + b, h, *_ = data.shape + + # Normalize data + data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.0 + + # Project data + ratio = projection_matrix.shape[0] ** -0.5 + projection = repeat(projection_matrix, "j d -> b h j d", b=b, h=h) + projection = projection.type_as(data) + data_dash = torch.einsum("...id,...jd->...ij", (data_normalizer * data), projection) + + # Calculate diagonal data + diag_data = data**2 + diag_data = torch.sum(diag_data, dim=-1) + diag_data = (diag_data / 2.0) * (data_normalizer**2) + diag_data = diag_data.unsqueeze(dim=-1) + + # Apply softmax + if is_query: + data_dash = ratio * ( + torch.exp( + data_dash + - diag_data + - torch.max(data_dash, dim=-1, keepdim=True).values + ) + + eps + ) + else: + data_dash = ratio * (torch.exp(data_dash - diag_data + eps)) + + return data_dash.type_as(data) + + +def orthogonal_matrix_chunk(cols, qr_uniform_q=False, device=None): + unstructured_block = torch.randn((cols, cols), device=device) + q, r = torch.linalg.qr(unstructured_block.cpu(), mode="reduced") + q, r = map(lambda t: t.to(device), (q, r)) + + if qr_uniform_q: + d = torch.diag(r, 0) + q *= d.sign() + return q.t() + + +def exists(val): + return val is not None + + +def empty(tensor): + return tensor.numel() == 0 + + +def default(val, d): + return val if exists(val) else d + + +def cast_tuple(val): + return (val,) if not isinstance(val, tuple) else val + + +class PCmer(nn.Module): + def __init__( + self, + num_layers, + num_heads, + dim_model, + dim_keys, + dim_values, + residual_dropout, + attention_dropout, + ): + super().__init__() + self.num_layers = num_layers + self.num_heads = num_heads + self.dim_model = dim_model + self.dim_values = dim_values + self.dim_keys = dim_keys + self.residual_dropout = residual_dropout + self.attention_dropout = attention_dropout + + self._layers = nn.ModuleList([_EncoderLayer(self) for _ in range(num_layers)]) + + def forward(self, phone, mask=None): + for layer in self._layers: + phone = layer(phone, mask) + return phone + + +class _EncoderLayer(nn.Module): + def __init__(self, parent: PCmer): + super().__init__() + self.conformer = ConformerConvModule(parent.dim_model) + self.norm = nn.LayerNorm(parent.dim_model) + self.dropout = nn.Dropout(parent.residual_dropout) + self.attn = SelfAttention( + dim=parent.dim_model, heads=parent.num_heads, causal=False + ) + + def forward(self, phone, mask=None): + phone = phone + (self.attn(self.norm(phone), mask=mask)) + phone = phone + (self.conformer(phone)) + return phone + + +def calc_same_padding(kernel_size): + pad = kernel_size // 2 + return (pad, pad - (kernel_size + 1) % 2) + + +class Swish(nn.Module): + def forward(self, x): + return x * x.sigmoid() + + +class Transpose(nn.Module): + def __init__(self, dims): + super().__init__() + assert len(dims) == 2, "dims must be a tuple of two dimensions" + self.dims = dims + + def forward(self, x): + return x.transpose(*self.dims) + + +class GLU(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + out, gate = x.chunk(2, dim=self.dim) + return out * gate.sigmoid() + + +class DepthWiseConv1d(nn.Module): + def __init__(self, chan_in, chan_out, kernel_size, padding): + super().__init__() + self.padding = padding + self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups=chan_in) + + def forward(self, x): + x = F.pad(x, self.padding) + return self.conv(x) + + +class ConformerConvModule(nn.Module): + def __init__( + self, dim, causal=False, expansion_factor=2, kernel_size=31, dropout=0.0 + ): + super().__init__() + + inner_dim = dim * expansion_factor + padding = calc_same_padding(kernel_size) if not causal else (kernel_size - 1, 0) + + self.net = nn.Sequential( + nn.LayerNorm(dim), + Transpose((1, 2)), + nn.Conv1d(dim, inner_dim * 2, 1), + GLU(dim=1), + DepthWiseConv1d( + inner_dim, inner_dim, kernel_size=kernel_size, padding=padding + ), + Swish(), + nn.Conv1d(inner_dim, dim, 1), + Transpose((1, 2)), + nn.Dropout(dropout), + ) + + def forward(self, x): + return self.net(x) + + +def linear_attention(q, k, v): + if v is None: + out = torch.einsum("...ed,...nd->...ne", k, q) + return out + else: + k_cumsum = k.sum(dim=-2) + D_inv = 1.0 / (torch.einsum("...nd,...d->...n", q, k_cumsum.type_as(q)) + 1e-8) + context = torch.einsum("...nd,...ne->...de", k, v) + out = torch.einsum("...de,...nd,...n->...ne", context, q, D_inv) + return out + + +def gaussian_orthogonal_random_matrix( + nb_rows, nb_columns, scaling=0, qr_uniform_q=False, device=None +): + nb_full_blocks = int(nb_rows / nb_columns) + block_list = [] + + for _ in range(nb_full_blocks): + q = orthogonal_matrix_chunk( + nb_columns, qr_uniform_q=qr_uniform_q, device=device + ) + block_list.append(q) + + remaining_rows = nb_rows - nb_full_blocks * nb_columns + if remaining_rows > 0: + q = orthogonal_matrix_chunk( + nb_columns, qr_uniform_q=qr_uniform_q, device=device + ) + block_list.append(q[:remaining_rows]) + + final_matrix = torch.cat(block_list) + + if scaling == 0: + multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim=1) + elif scaling == 1: + multiplier = math.sqrt((float(nb_columns))) * torch.ones( + (nb_rows,), device=device + ) + else: + raise ValueError(f"Invalid scaling {scaling}") + + return torch.diag(multiplier) @ final_matrix + + +class FastAttention(nn.Module): + def __init__( + self, + dim_heads, + nb_features=None, + ortho_scaling=0, + causal=False, + generalized_attention=False, + kernel_fn=nn.ReLU(), + qr_uniform_q=False, + no_projection=False, + ): + super().__init__() + nb_features = default(nb_features, int(dim_heads * math.log(dim_heads))) + + self.dim_heads = dim_heads + self.nb_features = nb_features + self.ortho_scaling = ortho_scaling + + self.create_projection = partial( + gaussian_orthogonal_random_matrix, + nb_rows=self.nb_features, + nb_columns=dim_heads, + scaling=ortho_scaling, + qr_uniform_q=qr_uniform_q, + ) + projection_matrix = self.create_projection() + self.register_buffer("projection_matrix", projection_matrix) + + self.generalized_attention = generalized_attention + self.kernel_fn = kernel_fn + self.no_projection = no_projection + self.causal = causal + + @torch.no_grad() + def redraw_projection_matrix(self): + projections = self.create_projection() + self.projection_matrix.copy_(projections) + del projections + + def forward(self, q, k, v): + device = q.device + + if self.no_projection: + q = q.softmax(dim=-1) + k = torch.exp(k) if self.causal else k.softmax(dim=-2) + else: + create_kernel = partial( + softmax_kernel, projection_matrix=self.projection_matrix, device=device + ) + q = create_kernel(q, is_query=True) + k = create_kernel(k, is_query=False) + + attn_fn = linear_attention if not self.causal else self.causal_linear_fn + + if v is None: + out = attn_fn(q, k, None) + return out + else: + out = attn_fn(q, k, v) + return out + + +class SelfAttention(nn.Module): + def __init__( + self, + dim, + causal=False, + heads=8, + dim_head=64, + local_heads=0, + local_window_size=256, + nb_features=None, + feature_redraw_interval=1000, + generalized_attention=False, + kernel_fn=nn.ReLU(), + qr_uniform_q=False, + dropout=0.0, + no_projection=False, + ): + super().__init__() + assert dim % heads == 0, "dimension must be divisible by number of heads" + dim_head = default(dim_head, dim // heads) + inner_dim = dim_head * heads + self.fast_attention = FastAttention( + dim_head, + nb_features, + causal=causal, + generalized_attention=generalized_attention, + kernel_fn=kernel_fn, + qr_uniform_q=qr_uniform_q, + no_projection=no_projection, + ) + + self.heads = heads + self.global_heads = heads - local_heads + self.local_attn = ( + LocalAttention( + window_size=local_window_size, + causal=causal, + autopad=True, + dropout=dropout, + look_forward=int(not causal), + rel_pos_emb_config=(dim_head, local_heads), + ) + if local_heads > 0 + else None + ) + + self.to_q = nn.Linear(dim, inner_dim) + self.to_k = nn.Linear(dim, inner_dim) + self.to_v = nn.Linear(dim, inner_dim) + self.to_out = nn.Linear(inner_dim, dim) + self.dropout = nn.Dropout(dropout) + + @torch.no_grad() + def redraw_projection_matrix(self): + self.fast_attention.redraw_projection_matrix() + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + name=None, + inference=False, + **kwargs, + ): + _, _, _, h, gh = *x.shape, self.heads, self.global_heads + + cross_attend = exists(context) + context = default(context, x) + context_mask = default(context_mask, mask) if not cross_attend else context_mask + q, k, v = self.to_q(x), self.to_k(context), self.to_v(context) + + q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v)) + (q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v)) + + attn_outs = [] + if not empty(q): + if exists(context_mask): + global_mask = context_mask[:, None, :, None] + v.masked_fill_(~global_mask, 0.0) + if cross_attend: + pass # TODO: Implement cross-attention + else: + out = self.fast_attention(q, k, v) + attn_outs.append(out) + + if not empty(lq): + assert ( + not cross_attend + ), "local attention is not compatible with cross attention" + out = self.local_attn(lq, lk, lv, input_mask=mask) + attn_outs.append(out) + + out = torch.cat(attn_outs, dim=1) + out = rearrange(out, "b h n d -> b n (h d)") + out = self.to_out(out) + return self.dropout(out) + + +def l2_regularization(model, l2_alpha): + l2_loss = [] + for module in model.modules(): + if type(module) is nn.Conv2d: + l2_loss.append((module.weight**2).sum() / 2.0) + return l2_alpha * sum(l2_loss) + + +class FCPE(nn.Module): + def __init__( + self, + input_channel=128, + out_dims=360, + n_layers=12, + n_chans=512, + use_siren=False, + use_full=False, + loss_mse_scale=10, + loss_l2_regularization=False, + loss_l2_regularization_scale=1, + loss_grad1_mse=False, + loss_grad1_mse_scale=1, + f0_max=1975.5, + f0_min=32.70, + confidence=False, + threshold=0.05, + use_input_conv=True, + ): + super().__init__() + if use_siren is True: + raise ValueError("Siren is not supported yet.") + if use_full is True: + raise ValueError("Full model is not supported yet.") + + self.loss_mse_scale = loss_mse_scale if (loss_mse_scale is not None) else 10 + self.loss_l2_regularization = ( + loss_l2_regularization if (loss_l2_regularization is not None) else False + ) + self.loss_l2_regularization_scale = ( + loss_l2_regularization_scale + if (loss_l2_regularization_scale is not None) + else 1 + ) + self.loss_grad1_mse = loss_grad1_mse if (loss_grad1_mse is not None) else False + self.loss_grad1_mse_scale = ( + loss_grad1_mse_scale if (loss_grad1_mse_scale is not None) else 1 + ) + self.f0_max = f0_max if (f0_max is not None) else 1975.5 + self.f0_min = f0_min if (f0_min is not None) else 32.70 + self.confidence = confidence if (confidence is not None) else False + self.threshold = threshold if (threshold is not None) else 0.05 + self.use_input_conv = use_input_conv if (use_input_conv is not None) else True + + self.cent_table_b = torch.Tensor( + np.linspace( + self.f0_to_cent(torch.Tensor([f0_min]))[0], + self.f0_to_cent(torch.Tensor([f0_max]))[0], + out_dims, + ) + ) + self.register_buffer("cent_table", self.cent_table_b) + + # conv in stack + _leaky = nn.LeakyReLU() + self.stack = nn.Sequential( + nn.Conv1d(input_channel, n_chans, 3, 1, 1), + nn.GroupNorm(4, n_chans), + _leaky, + nn.Conv1d(n_chans, n_chans, 3, 1, 1), + ) + + # transformer + self.decoder = PCmer( + num_layers=n_layers, + num_heads=8, + dim_model=n_chans, + dim_keys=n_chans, + dim_values=n_chans, + residual_dropout=0.1, + attention_dropout=0.1, + ) + self.norm = nn.LayerNorm(n_chans) + + # out + self.n_out = out_dims + self.dense_out = weight_norm(nn.Linear(n_chans, self.n_out)) + + def forward( + self, mel, infer=True, gt_f0=None, return_hz_f0=False, cdecoder="local_argmax" + ): + if cdecoder == "argmax": + self.cdecoder = self.cents_decoder + elif cdecoder == "local_argmax": + self.cdecoder = self.cents_local_decoder + + x = ( + self.stack(mel.transpose(1, 2)).transpose(1, 2) + if self.use_input_conv + else mel + ) + x = self.decoder(x) + x = self.norm(x) + x = self.dense_out(x) + x = torch.sigmoid(x) + + if not infer: + gt_cent_f0 = self.f0_to_cent(gt_f0) + gt_cent_f0 = self.gaussian_blurred_cent(gt_cent_f0) + loss_all = self.loss_mse_scale * F.binary_cross_entropy(x, gt_cent_f0) + if self.loss_l2_regularization: + loss_all = loss_all + l2_regularization( + model=self, l2_alpha=self.loss_l2_regularization_scale + ) + x = loss_all + if infer: + x = self.cdecoder(x) + x = self.cent_to_f0(x) + x = (1 + x / 700).log() if not return_hz_f0 else x + + return x + + def cents_decoder(self, y, mask=True): + B, N, _ = y.size() + ci = self.cent_table[None, None, :].expand(B, N, -1) + rtn = torch.sum(ci * y, dim=-1, keepdim=True) / torch.sum( + y, dim=-1, keepdim=True + ) + if mask: + confident = torch.max(y, dim=-1, keepdim=True)[0] + confident_mask = torch.ones_like(confident) + confident_mask[confident <= self.threshold] = float("-INF") + rtn = rtn * confident_mask + return (rtn, confident) if self.confidence else rtn + + def cents_local_decoder(self, y, mask=True): + B, N, _ = y.size() + ci = self.cent_table[None, None, :].expand(B, N, -1) + confident, max_index = torch.max(y, dim=-1, keepdim=True) + local_argmax_index = torch.arange(0, 9).to(max_index.device) + (max_index - 4) + local_argmax_index = torch.clamp(local_argmax_index, 0, self.n_out - 1) + ci_l = torch.gather(ci, -1, local_argmax_index) + y_l = torch.gather(y, -1, local_argmax_index) + rtn = torch.sum(ci_l * y_l, dim=-1, keepdim=True) / torch.sum( + y_l, dim=-1, keepdim=True + ) + if mask: + confident_mask = torch.ones_like(confident) + confident_mask[confident <= self.threshold] = float("-INF") + rtn = rtn * confident_mask + return (rtn, confident) if self.confidence else rtn + + def cent_to_f0(self, cent): + return 10.0 * 2 ** (cent / 1200.0) + + def f0_to_cent(self, f0): + return 1200.0 * torch.log2(f0 / 10.0) + + def gaussian_blurred_cent(self, cents): + mask = (cents > 0.1) & (cents < (1200.0 * np.log2(self.f0_max / 10.0))) + B, N, _ = cents.size() + ci = self.cent_table[None, None, :].expand(B, N, -1) + return torch.exp(-torch.square(ci - cents) / 1250) * mask.float() + + +class FCPEInfer: + def __init__(self, model_path, device=None, dtype=torch.float32): + if device is None: + device = "cuda" if torch.cuda.is_available() else "cpu" + self.device = device + ckpt = torch.load(model_path, map_location=torch.device(self.device)) + self.args = DotDict(ckpt["config"]) + self.dtype = dtype + model = FCPE( + input_channel=self.args.model.input_channel, + out_dims=self.args.model.out_dims, + n_layers=self.args.model.n_layers, + n_chans=self.args.model.n_chans, + use_siren=self.args.model.use_siren, + use_full=self.args.model.use_full, + loss_mse_scale=self.args.loss.loss_mse_scale, + loss_l2_regularization=self.args.loss.loss_l2_regularization, + loss_l2_regularization_scale=self.args.loss.loss_l2_regularization_scale, + loss_grad1_mse=self.args.loss.loss_grad1_mse, + loss_grad1_mse_scale=self.args.loss.loss_grad1_mse_scale, + f0_max=self.args.model.f0_max, + f0_min=self.args.model.f0_min, + confidence=self.args.model.confidence, + ) + model.to(self.device).to(self.dtype) + model.load_state_dict(ckpt["model"]) + model.eval() + self.model = model + self.wav2mel = Wav2Mel(self.args, dtype=self.dtype, device=self.device) + + @torch.no_grad() + def __call__(self, audio, sr, threshold=0.05): + self.model.threshold = threshold + audio = audio[None, :] + mel = self.wav2mel(audio=audio, sample_rate=sr).to(self.dtype) + f0 = self.model(mel=mel, infer=True, return_hz_f0=True) + return f0 + + +class Wav2Mel: + def __init__(self, args, device=None, dtype=torch.float32): + self.sample_rate = args.mel.sampling_rate + self.hop_size = args.mel.hop_size + if device is None: + device = "cuda" if torch.cuda.is_available() else "cpu" + self.device = device + self.dtype = dtype + self.stft = STFT( + args.mel.sampling_rate, + args.mel.num_mels, + args.mel.n_fft, + args.mel.win_size, + args.mel.hop_size, + args.mel.fmin, + args.mel.fmax, + ) + self.resample_kernel = {} + + def extract_nvstft(self, audio, keyshift=0, train=False): + mel = self.stft.get_mel(audio, keyshift=keyshift, train=train).transpose(1, 2) + return mel + + def extract_mel(self, audio, sample_rate, keyshift=0, train=False): + audio = audio.to(self.dtype).to(self.device) + if sample_rate == self.sample_rate: + audio_res = audio + else: + key_str = str(sample_rate) + if key_str not in self.resample_kernel: + self.resample_kernel[key_str] = Resample( + sample_rate, self.sample_rate, lowpass_filter_width=128 + ) + self.resample_kernel[key_str] = ( + self.resample_kernel[key_str].to(self.dtype).to(self.device) + ) + audio_res = self.resample_kernel[key_str](audio) + + mel = self.extract_nvstft( + audio_res, keyshift=keyshift, train=train + ) # B, n_frames, bins + n_frames = int(audio.shape[1] // self.hop_size) + 1 + mel = ( + torch.cat((mel, mel[:, -1:, :]), 1) if n_frames > int(mel.shape[1]) else mel + ) + mel = mel[:, :n_frames, :] if n_frames < int(mel.shape[1]) else mel + return mel + + def __call__(self, audio, sample_rate, keyshift=0, train=False): + return self.extract_mel(audio, sample_rate, keyshift=keyshift, train=train) + + +class DotDict(dict): + def __getattr__(*args): + val = dict.get(*args) + return DotDict(val) if type(val) is dict else val + + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ + + +class F0Predictor(object): + def compute_f0(self, wav, p_len): + pass + + def compute_f0_uv(self, wav, p_len): + pass + + +class FCPEF0Predictor(F0Predictor): + def __init__( + self, + model_path, + hop_length=512, + f0_min=50, + f0_max=1100, + dtype=torch.float32, + device=None, + sample_rate=44100, + threshold=0.05, + ): + self.fcpe = FCPEInfer(model_path, device=device, dtype=dtype) + self.hop_length = hop_length + self.f0_min = f0_min + self.f0_max = f0_max + self.device = device or ("cuda" if torch.cuda.is_available() else "cpu") + self.threshold = threshold + self.sample_rate = sample_rate + self.dtype = dtype + self.name = "fcpe" + + def repeat_expand( + self, + content: Union[torch.Tensor, np.ndarray], + target_len: int, + mode: str = "nearest", + ): + ndim = content.ndim + content = ( + content[None, None] + if ndim == 1 + else content[None] if ndim == 2 else content + ) + assert content.ndim == 3 + is_np = isinstance(content, np.ndarray) + content = torch.from_numpy(content) if is_np else content + results = torch.nn.functional.interpolate(content, size=target_len, mode=mode) + results = results.numpy() if is_np else results + return results[0, 0] if ndim == 1 else results[0] if ndim == 2 else results + + def post_process(self, x, sample_rate, f0, pad_to): + f0 = ( + torch.from_numpy(f0).float().to(x.device) + if isinstance(f0, np.ndarray) + else f0 + ) + f0 = self.repeat_expand(f0, pad_to) if pad_to is not None else f0 + + vuv_vector = torch.zeros_like(f0) + vuv_vector[f0 > 0.0] = 1.0 + vuv_vector[f0 <= 0.0] = 0.0 + + nzindex = torch.nonzero(f0).squeeze() + f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy() + time_org = self.hop_length / sample_rate * nzindex.cpu().numpy() + time_frame = np.arange(pad_to) * self.hop_length / sample_rate + + vuv_vector = F.interpolate(vuv_vector[None, None, :], size=pad_to)[0][0] + + if f0.shape[0] <= 0: + return np.zeros(pad_to), vuv_vector.cpu().numpy() + if f0.shape[0] == 1: + return np.ones(pad_to) * f0[0], vuv_vector.cpu().numpy() + + f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1]) + return f0, vuv_vector.cpu().numpy() + + def compute_f0(self, wav, p_len=None): + x = torch.FloatTensor(wav).to(self.dtype).to(self.device) + p_len = x.shape[0] // self.hop_length if p_len is None else p_len + f0 = self.fcpe(x, sr=self.sample_rate, threshold=self.threshold)[0, :, 0] + if torch.all(f0 == 0): + return f0.cpu().numpy() if p_len is None else np.zeros(p_len), ( + f0.cpu().numpy() if p_len is None else np.zeros(p_len) + ) + return self.post_process(x, self.sample_rate, f0, p_len)[0] + + def compute_f0_uv(self, wav, p_len=None): + x = torch.FloatTensor(wav).to(self.dtype).to(self.device) + p_len = x.shape[0] // self.hop_length if p_len is None else p_len + f0 = self.fcpe(x, sr=self.sample_rate, threshold=self.threshold)[0, :, 0] + if torch.all(f0 == 0): + return f0.cpu().numpy() if p_len is None else np.zeros(p_len), ( + f0.cpu().numpy() if p_len is None else np.zeros(p_len) + ) + return self.post_process(x, self.sample_rate, f0, p_len) diff --git a/rvc/lib/predictors/RMVPE.py b/rvc/lib/predictors/RMVPE.py new file mode 100644 index 00000000..970c5e58 --- /dev/null +++ b/rvc/lib/predictors/RMVPE.py @@ -0,0 +1,560 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np + +from librosa.filters import mel +from typing import List + +# Constants for readability +N_MELS = 128 +N_CLASS = 360 + + +# Define a helper function for creating convolutional blocks +class ConvBlockRes(nn.Module): + """ + A convolutional block with residual connection. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + momentum (float): Momentum for batch normalization. + """ + + def __init__(self, in_channels, out_channels, momentum=0.01): + super(ConvBlockRes, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=(3, 3), + stride=(1, 1), + padding=(1, 1), + bias=False, + ), + nn.BatchNorm2d(out_channels, momentum=momentum), + nn.ReLU(), + nn.Conv2d( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=(3, 3), + stride=(1, 1), + padding=(1, 1), + bias=False, + ), + nn.BatchNorm2d(out_channels, momentum=momentum), + nn.ReLU(), + ) + if in_channels != out_channels: + self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) + self.is_shortcut = True + else: + self.is_shortcut = False + + def forward(self, x): + if self.is_shortcut: + return self.conv(x) + self.shortcut(x) + else: + return self.conv(x) + x + + +# Define a class for residual encoder blocks +class ResEncoderBlock(nn.Module): + """ + A residual encoder block. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (tuple): Size of the average pooling kernel. + n_blocks (int): Number of convolutional blocks in the block. + momentum (float): Momentum for batch normalization. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01 + ): + super(ResEncoderBlock, self).__init__() + self.n_blocks = n_blocks + self.conv = nn.ModuleList() + self.conv.append(ConvBlockRes(in_channels, out_channels, momentum)) + for _ in range(n_blocks - 1): + self.conv.append(ConvBlockRes(out_channels, out_channels, momentum)) + self.kernel_size = kernel_size + if self.kernel_size is not None: + self.pool = nn.AvgPool2d(kernel_size=kernel_size) + + def forward(self, x): + for i in range(self.n_blocks): + x = self.conv[i](x) + if self.kernel_size is not None: + return x, self.pool(x) + else: + return x + + +# Define a class for the encoder +class Encoder(nn.Module): + """ + The encoder part of the DeepUnet. + + Args: + in_channels (int): Number of input channels. + in_size (int): Size of the input tensor. + n_encoders (int): Number of encoder blocks. + kernel_size (tuple): Size of the average pooling kernel. + n_blocks (int): Number of convolutional blocks in each encoder block. + out_channels (int): Number of output channels for the first encoder block. + momentum (float): Momentum for batch normalization. + """ + + def __init__( + self, + in_channels, + in_size, + n_encoders, + kernel_size, + n_blocks, + out_channels=16, + momentum=0.01, + ): + super(Encoder, self).__init__() + self.n_encoders = n_encoders + self.bn = nn.BatchNorm2d(in_channels, momentum=momentum) + self.layers = nn.ModuleList() + self.latent_channels = [] + for i in range(self.n_encoders): + self.layers.append( + ResEncoderBlock( + in_channels, out_channels, kernel_size, n_blocks, momentum=momentum + ) + ) + self.latent_channels.append([out_channels, in_size]) + in_channels = out_channels + out_channels *= 2 + in_size //= 2 + self.out_size = in_size + self.out_channel = out_channels + + def forward(self, x: torch.Tensor): + concat_tensors: List[torch.Tensor] = [] + x = self.bn(x) + for i in range(self.n_encoders): + t, x = self.layers[i](x) + concat_tensors.append(t) + return x, concat_tensors + + +# Define a class for the intermediate layer +class Intermediate(nn.Module): + """ + The intermediate layer of the DeepUnet. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + n_inters (int): Number of convolutional blocks in the intermediate layer. + n_blocks (int): Number of convolutional blocks in each intermediate block. + momentum (float): Momentum for batch normalization. + """ + + def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01): + super(Intermediate, self).__init__() + self.n_inters = n_inters + self.layers = nn.ModuleList() + self.layers.append( + ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum) + ) + for _ in range(self.n_inters - 1): + self.layers.append( + ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum) + ) + + def forward(self, x): + for i in range(self.n_inters): + x = self.layers[i](x) + return x + + +# Define a class for residual decoder blocks +class ResDecoderBlock(nn.Module): + """ + A residual decoder block. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + stride (tuple): Stride for transposed convolution. + n_blocks (int): Number of convolutional blocks in the block. + momentum (float): Momentum for batch normalization. + """ + + def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01): + super(ResDecoderBlock, self).__init__() + out_padding = (0, 1) if stride == (1, 2) else (1, 1) + self.n_blocks = n_blocks + self.conv1 = nn.Sequential( + nn.ConvTranspose2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=(3, 3), + stride=stride, + padding=(1, 1), + output_padding=out_padding, + bias=False, + ), + nn.BatchNorm2d(out_channels, momentum=momentum), + nn.ReLU(), + ) + self.conv2 = nn.ModuleList() + self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum)) + for _ in range(n_blocks - 1): + self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum)) + + def forward(self, x, concat_tensor): + x = self.conv1(x) + x = torch.cat((x, concat_tensor), dim=1) + for i in range(self.n_blocks): + x = self.conv2[i](x) + return x + + +# Define a class for the decoder +class Decoder(nn.Module): + """ + The decoder part of the DeepUnet. + + Args: + in_channels (int): Number of input channels. + n_decoders (int): Number of decoder blocks. + stride (tuple): Stride for transposed convolution. + n_blocks (int): Number of convolutional blocks in each decoder block. + momentum (float): Momentum for batch normalization. + """ + + def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01): + super(Decoder, self).__init__() + self.layers = nn.ModuleList() + self.n_decoders = n_decoders + for _ in range(self.n_decoders): + out_channels = in_channels // 2 + self.layers.append( + ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum) + ) + in_channels = out_channels + + def forward(self, x, concat_tensors): + for i in range(self.n_decoders): + x = self.layers[i](x, concat_tensors[-1 - i]) + return x + + +# Define a class for the DeepUnet architecture +class DeepUnet(nn.Module): + """ + The DeepUnet architecture. + + Args: + kernel_size (tuple): Size of the average pooling kernel. + n_blocks (int): Number of convolutional blocks in each encoder/decoder block. + en_de_layers (int): Number of encoder/decoder layers. + inter_layers (int): Number of convolutional blocks in the intermediate layer. + in_channels (int): Number of input channels. + en_out_channels (int): Number of output channels for the first encoder block. + """ + + def __init__( + self, + kernel_size, + n_blocks, + en_de_layers=5, + inter_layers=4, + in_channels=1, + en_out_channels=16, + ): + super(DeepUnet, self).__init__() + self.encoder = Encoder( + in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels + ) + self.intermediate = Intermediate( + self.encoder.out_channel // 2, + self.encoder.out_channel, + inter_layers, + n_blocks, + ) + self.decoder = Decoder( + self.encoder.out_channel, en_de_layers, kernel_size, n_blocks + ) + + def forward(self, x): + x, concat_tensors = self.encoder(x) + x = self.intermediate(x) + x = self.decoder(x, concat_tensors) + return x + + +# Define a class for the end-to-end model +class E2E(nn.Module): + """ + The end-to-end model. + + Args: + n_blocks (int): Number of convolutional blocks in each encoder/decoder block. + n_gru (int): Number of GRU layers. + kernel_size (tuple): Size of the average pooling kernel. + en_de_layers (int): Number of encoder/decoder layers. + inter_layers (int): Number of convolutional blocks in the intermediate layer. + in_channels (int): Number of input channels. + en_out_channels (int): Number of output channels for the first encoder block. + """ + + def __init__( + self, + n_blocks, + n_gru, + kernel_size, + en_de_layers=5, + inter_layers=4, + in_channels=1, + en_out_channels=16, + ): + super(E2E, self).__init__() + self.unet = DeepUnet( + kernel_size, + n_blocks, + en_de_layers, + inter_layers, + in_channels, + en_out_channels, + ) + self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1)) + if n_gru: + self.fc = nn.Sequential( + BiGRU(3 * 128, 256, n_gru), + nn.Linear(512, N_CLASS), + nn.Dropout(0.25), + nn.Sigmoid(), + ) + else: + self.fc = nn.Sequential( + nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid() + ) + + def forward(self, mel): + mel = mel.transpose(-1, -2).unsqueeze(1) + x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2) + x = self.fc(x) + return x + + +# Define a class for the MelSpectrogram extractor +class MelSpectrogram(torch.nn.Module): + """ + Extracts Mel-spectrogram features from audio. + + Args: + is_half (bool): Whether to use half-precision floating-point numbers. + n_mel_channels (int): Number of Mel-frequency bands. + sample_rate (int): Sampling rate of the audio. + win_length (int): Length of the window function in samples. + hop_length (int): Hop size between frames in samples. + n_fft (int, optional): Length of the FFT window. Defaults to None, which uses win_length. + mel_fmin (int, optional): Minimum frequency for the Mel filter bank. Defaults to 0. + mel_fmax (int, optional): Maximum frequency for the Mel filter bank. Defaults to None. + clamp (float, optional): Minimum value for clamping the Mel-spectrogram. Defaults to 1e-5. + """ + + def __init__( + self, + is_half, + n_mel_channels, + sample_rate, + win_length, + hop_length, + n_fft=None, + mel_fmin=0, + mel_fmax=None, + clamp=1e-5, + ): + super().__init__() + n_fft = win_length if n_fft is None else n_fft + self.hann_window = {} + mel_basis = mel( + sr=sample_rate, + n_fft=n_fft, + n_mels=n_mel_channels, + fmin=mel_fmin, + fmax=mel_fmax, + htk=True, + ) + mel_basis = torch.from_numpy(mel_basis).float() + self.register_buffer("mel_basis", mel_basis) + self.n_fft = win_length if n_fft is None else n_fft + self.hop_length = hop_length + self.win_length = win_length + self.sample_rate = sample_rate + self.n_mel_channels = n_mel_channels + self.clamp = clamp + self.is_half = is_half + + def forward(self, audio, keyshift=0, speed=1, center=True): + factor = 2 ** (keyshift / 12) + n_fft_new = int(np.round(self.n_fft * factor)) + win_length_new = int(np.round(self.win_length * factor)) + hop_length_new = int(np.round(self.hop_length * speed)) + keyshift_key = str(keyshift) + "_" + str(audio.device) + if keyshift_key not in self.hann_window: + self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( + audio.device + ) + fft = torch.stft( + audio, + n_fft=n_fft_new, + hop_length=hop_length_new, + win_length=win_length_new, + window=self.hann_window[keyshift_key], + center=center, + return_complex=True, + ) + + magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) + if keyshift != 0: + size = self.n_fft // 2 + 1 + resize = magnitude.size(1) + if resize < size: + magnitude = F.pad(magnitude, (0, 0, 0, size - resize)) + magnitude = magnitude[:, :size, :] * self.win_length / win_length_new + mel_output = torch.matmul(self.mel_basis, magnitude) + if self.is_half: + mel_output = mel_output.half() + log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp)) + return log_mel_spec + + +# Define a class for the RMVPE0 predictor +class RMVPE0Predictor: + """ + A predictor for fundamental frequency (F0) based on the RMVPE0 model. + + Args: + model_path (str): Path to the RMVPE0 model file. + is_half (bool): Whether to use half-precision floating-point numbers. + device (str, optional): Device to use for computation. Defaults to None, which uses CUDA if available. + """ + + def __init__(self, model_path, is_half, device=None): + self.resample_kernel = {} + model = E2E(4, 1, (2, 2)) + ckpt = torch.load(model_path, map_location="cpu") + model.load_state_dict(ckpt) + model.eval() + if is_half: + model = model.half() + self.model = model + self.resample_kernel = {} + self.is_half = is_half + self.device = device + self.mel_extractor = MelSpectrogram( + is_half, N_MELS, 16000, 1024, 160, None, 30, 8000 + ).to(device) + self.model = self.model.to(device) + cents_mapping = 20 * np.arange(N_CLASS) + 1997.3794084376191 + self.cents_mapping = np.pad(cents_mapping, (4, 4)) + + def mel2hidden(self, mel): + """ + Converts Mel-spectrogram features to hidden representation. + + Args: + mel (torch.Tensor): Mel-spectrogram features. + """ + with torch.no_grad(): + n_frames = mel.shape[-1] + mel = F.pad( + mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect" + ) + hidden = self.model(mel) + return hidden[:, :n_frames] + + def decode(self, hidden, thred=0.03): + """ + Decodes hidden representation to F0. + + Args: + hidden (np.ndarray): Hidden representation. + thred (float, optional): Threshold for salience. Defaults to 0.03. + """ + cents_pred = self.to_local_average_cents(hidden, thred=thred) + f0 = 10 * (2 ** (cents_pred / 1200)) + f0[f0 == 10] = 0 + return f0 + + def infer_from_audio(self, audio, thred=0.03): + """ + Infers F0 from audio. + + Args: + audio (np.ndarray): Audio signal. + thred (float, optional): Threshold for salience. Defaults to 0.03. + """ + audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0) + mel = self.mel_extractor(audio, center=True) + hidden = self.mel2hidden(mel) + hidden = hidden.squeeze(0).cpu().numpy() + if self.is_half == True: + hidden = hidden.astype("float32") + f0 = self.decode(hidden, thred=thred) + return f0 + + def to_local_average_cents(self, salience, thred=0.05): + """ + Converts salience to local average cents. + + Args: + salience (np.ndarray): Salience values. + thred (float, optional): Threshold for salience. Defaults to 0.05. + """ + center = np.argmax(salience, axis=1) + salience = np.pad(salience, ((0, 0), (4, 4))) + center += 4 + todo_salience = [] + todo_cents_mapping = [] + starts = center - 4 + ends = center + 5 + for idx in range(salience.shape[0]): + todo_salience.append(salience[:, starts[idx] : ends[idx]][idx]) + todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]]) + todo_salience = np.array(todo_salience) + todo_cents_mapping = np.array(todo_cents_mapping) + product_sum = np.sum(todo_salience * todo_cents_mapping, 1) + weight_sum = np.sum(todo_salience, 1) + devided = product_sum / weight_sum + maxx = np.max(salience, axis=1) + devided[maxx <= thred] = 0 + return devided + + +# Define a class for BiGRU (bidirectional GRU) +class BiGRU(nn.Module): + """ + A bidirectional GRU layer. + + Args: + input_features (int): Number of input features. + hidden_features (int): Number of hidden features. + num_layers (int): Number of GRU layers. + """ + + def __init__(self, input_features, hidden_features, num_layers): + super(BiGRU, self).__init__() + self.gru = nn.GRU( + input_features, + hidden_features, + num_layers=num_layers, + batch_first=True, + bidirectional=True, + ) + + def forward(self, x): + return self.gru(x)[0] diff --git a/rvc/lib/tools/analyzer.py b/rvc/lib/tools/analyzer.py new file mode 100644 index 00000000..f4b79434 --- /dev/null +++ b/rvc/lib/tools/analyzer.py @@ -0,0 +1,76 @@ +import numpy as np +import matplotlib.pyplot as plt +import librosa.display +import librosa + + +def calculate_features(y, sr): + stft = np.abs(librosa.stft(y)) + duration = librosa.get_duration(y=y, sr=sr) + cent = librosa.feature.spectral_centroid(S=stft, sr=sr)[0] + bw = librosa.feature.spectral_bandwidth(S=stft, sr=sr)[0] + rolloff = librosa.feature.spectral_rolloff(S=stft, sr=sr)[0] + return stft, duration, cent, bw, rolloff + + +def plot_title(title): + plt.suptitle(title, fontsize=16, fontweight="bold") + + +def plot_spectrogram(y, sr, stft, duration, cmap="inferno"): + plt.subplot(3, 1, 1) + plt.imshow( + librosa.amplitude_to_db(stft, ref=np.max), + origin="lower", + extent=[0, duration, 0, sr / 1000], + aspect="auto", + cmap=cmap, # Change the colormap here + ) + plt.colorbar(format="%+2.0f dB") + plt.xlabel("Time (s)") + plt.ylabel("Frequency (kHz)") + plt.title("Spectrogram") + + +def plot_waveform(y, sr, duration): + plt.subplot(3, 1, 2) + librosa.display.waveshow(y, sr=sr) + plt.xlabel("Time (s)") + plt.ylabel("Amplitude") + plt.title("Waveform") + + +def plot_features(times, cent, bw, rolloff, duration): + plt.subplot(3, 1, 3) + plt.plot(times, cent, label="Spectral Centroid (kHz)", color="b") + plt.plot(times, bw, label="Spectral Bandwidth (kHz)", color="g") + plt.plot(times, rolloff, label="Spectral Rolloff (kHz)", color="r") + plt.xlabel("Time (s)") + plt.title("Spectral Features") + plt.legend() + + +def analyze_audio(audio_file, save_plot_path="logs/audio_analysis.png"): + y, sr = librosa.load(audio_file) + stft, duration, cent, bw, rolloff = calculate_features(y, sr) + + plt.figure(figsize=(12, 10)) + + plot_title("Audio Analysis" + " - " + audio_file.split("/")[-1]) + plot_spectrogram(y, sr, stft, duration) + plot_waveform(y, sr, duration) + plot_features(librosa.times_like(cent), cent, bw, rolloff, duration) + + plt.tight_layout() + + if save_plot_path: + plt.savefig(save_plot_path, bbox_inches="tight", dpi=300) + plt.close() + + audio_info = f"""Sample Rate: {sr}\nDuration: {( + str(round(duration, 2)) + " seconds" + if duration < 60 + else str(round(duration / 60, 2)) + " minutes" + )}\nNumber of Samples: {len(y)}\nBits per Sample: {librosa.get_samplerate(audio_file)}\nChannels: {"Mono (1)" if y.ndim == 1 else "Stereo (2)"}""" + + return audio_info, save_plot_path diff --git a/rvc/lib/tools/gdown.py b/rvc/lib/tools/gdown.py new file mode 100644 index 00000000..eb5ca071 --- /dev/null +++ b/rvc/lib/tools/gdown.py @@ -0,0 +1,354 @@ +import os +import re +import six +import sys +import json +import tqdm +import time +import shutil +import warnings +import tempfile +import textwrap +import requests +from six.moves import urllib_parse + + +def indent(text, prefix): + """Indent each non-empty line of text with the given prefix.""" + return "".join( + (prefix + line if line.strip() else line) for line in text.splitlines(True) + ) + + +class FileURLRetrievalError(Exception): + pass + + +class FolderContentsMaximumLimitError(Exception): + pass + + +def parse_url(url, warning=True): + """Parse URLs especially for Google Drive links. + + Args: + url: URL to parse. + warning: Whether to warn if the URL is not a download link. + + Returns: + A tuple (file_id, is_download_link), where file_id is the ID of the + file on Google Drive, and is_download_link is a flag indicating + whether the URL is a download link. + """ + parsed = urllib_parse.urlparse(url) + query = urllib_parse.parse_qs(parsed.query) + is_gdrive = parsed.hostname in ("drive.google.com", "docs.google.com") + is_download_link = parsed.path.endswith("/uc") + + if not is_gdrive: + return None, is_download_link + + file_id = query.get("id", [None])[0] + if file_id is None: + for pattern in ( + r"^/file/d/(.*?)/(edit|view)$", + r"^/file/u/[0-9]+/d/(.*?)/(edit|view)$", + r"^/document/d/(.*?)/(edit|htmlview|view)$", + r"^/document/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$", + r"^/presentation/d/(.*?)/(edit|htmlview|view)$", + r"^/presentation/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$", + r"^/spreadsheets/d/(.*?)/(edit|htmlview|view)$", + r"^/spreadsheets/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$", + ): + match = re.match(pattern, parsed.path) + if match: + file_id = match.group(1) + break + + if warning and not is_download_link: + warnings.warn( + "You specified a Google Drive link that is not the correct link " + "to download a file. You might want to try `--fuzzy` option " + f"or the following url: https://drive.google.com/uc?id={file_id}" + ) + + return file_id, is_download_link + + +CHUNK_SIZE = 512 * 1024 # 512KB +HOME = os.path.expanduser("~") + + +def get_url_from_gdrive_confirmation(contents): + """Extract the download URL from a Google Drive confirmation page.""" + for pattern in ( + r'href="(\/uc\?export=download[^"]+)', + r'href="/open\?id=([^"]+)"', + r'"downloadUrl":"([^"]+)', + ): + match = re.search(pattern, contents) + if match: + url = match.group(1) + if pattern == r'href="/open\?id=([^"]+)"': + uuid = re.search( + r'(.*)

', contents) + if match: + error = match.group(1) + raise FileURLRetrievalError(error) + + raise FileURLRetrievalError( + "Cannot retrieve the public link of the file. " + "You may need to change the permission to " + "'Anyone with the link', or have had many accesses." + ) + + +def _get_session(proxy, use_cookies, return_cookies_file=False): + """Create a requests session with optional proxy and cookie handling.""" + sess = requests.session() + sess.headers.update( + {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6)"} + ) + + if proxy is not None: + sess.proxies = {"http": proxy, "https": proxy} + print("Using proxy:", proxy, file=sys.stderr) + + cookies_file = os.path.join(HOME, ".cache/gdown/cookies.json") + if os.path.exists(cookies_file) and use_cookies: + with open(cookies_file) as f: + cookies = json.load(f) + for k, v in cookies: + sess.cookies[k] = v + + return (sess, cookies_file) if return_cookies_file else sess + + +def download( + url=None, + output=None, + quiet=False, + proxy=None, + speed=None, + use_cookies=True, + verify=True, + id=None, + fuzzy=True, + resume=False, + format=None, +): + """Download file from URL. + + Parameters + ---------- + url: str + URL. Google Drive URL is also supported. + output: str + Output filename. Default is basename of URL. + quiet: bool + Suppress terminal output. Default is False. + proxy: str + Proxy. + speed: float + Download byte size per second (e.g., 256KB/s = 256 * 1024). + use_cookies: bool + Flag to use cookies. Default is True. + verify: bool or string + Either a bool, in which case it controls whether the server's TLS + certificate is verified, or a string, in which case it must be a path + to a CA bundle to use. Default is True. + id: str + Google Drive's file ID. + fuzzy: bool + Fuzzy extraction of Google Drive's file Id. Default is False. + resume: bool + Resume the download from existing tmp file if possible. + Default is False. + format: str, optional + Format of Google Docs, Spreadsheets and Slides. Default is: + - Google Docs: 'docx' + - Google Spreadsheet: 'xlsx' + - Google Slides: 'pptx' + + Returns + ------- + output: str + Output filename. + """ + if not (id is None) ^ (url is None): + raise ValueError("Either url or id has to be specified") + if id is not None: + url = f"https://drive.google.com/uc?id={id}" + + url_origin = url + + sess, cookies_file = _get_session( + proxy=proxy, use_cookies=use_cookies, return_cookies_file=True + ) + + gdrive_file_id, is_gdrive_download_link = parse_url(url, warning=not fuzzy) + + if fuzzy and gdrive_file_id: + # overwrite the url with fuzzy match of a file id + url = f"https://drive.google.com/uc?id={gdrive_file_id}" + url_origin = url + is_gdrive_download_link = True + + while True: + res = sess.get(url, stream=True, verify=verify) + + if url == url_origin and res.status_code == 500: + # The file could be Google Docs or Spreadsheets. + url = f"https://drive.google.com/open?id={gdrive_file_id}" + continue + + if res.headers["Content-Type"].startswith("text/html"): + title = re.search("(.+)", res.text) + if title: + title = title.group(1) + if title.endswith(" - Google Docs"): + url = f"https://docs.google.com/document/d/{gdrive_file_id}/export?format={'docx' if format is None else format}" + continue + if title.endswith(" - Google Sheets"): + url = f"https://docs.google.com/spreadsheets/d/{gdrive_file_id}/export?format={'xlsx' if format is None else format}" + continue + if title.endswith(" - Google Slides"): + url = f"https://docs.google.com/presentation/d/{gdrive_file_id}/export?format={'pptx' if format is None else format}" + continue + elif ( + "Content-Disposition" in res.headers + and res.headers["Content-Disposition"].endswith("pptx") + and format not in (None, "pptx") + ): + url = f"https://docs.google.com/presentation/d/{gdrive_file_id}/export?format={'pptx' if format is None else format}" + continue + + if use_cookies: + os.makedirs(os.path.dirname(cookies_file), exist_ok=True) + with open(cookies_file, "w") as f: + cookies = [ + (k, v) + for k, v in sess.cookies.items() + if not k.startswith("download_warning_") + ] + json.dump(cookies, f, indent=2) + + if "Content-Disposition" in res.headers: + # This is the file + break + if not (gdrive_file_id and is_gdrive_download_link): + break + + # Need to redirect with confirmation + try: + url = get_url_from_gdrive_confirmation(res.text) + except FileURLRetrievalError as e: + message = ( + "Failed to retrieve file url:\n\n" + "{}\n\n" + "You may still be able to access the file from the browser:" + f"\n\n\t{url_origin}\n\n" + "but Gdown can't. Please check connections and permissions." + ).format(indent("\n".join(textwrap.wrap(str(e))), prefix="\t")) + raise FileURLRetrievalError(message) + + if gdrive_file_id and is_gdrive_download_link: + content_disposition = urllib_parse.unquote(res.headers["Content-Disposition"]) + filename_from_url = ( + re.search(r"filename\*=UTF-8''(.*)", content_disposition) + or re.search(r'filename=["\']?(.*?)["\']?$', content_disposition) + ).group(1) + filename_from_url = filename_from_url.replace(os.path.sep, "_") + else: + filename_from_url = os.path.basename(url) + + output = output or filename_from_url + + output_is_path = isinstance(output, six.string_types) + if output_is_path and output.endswith(os.path.sep): + os.makedirs(output, exist_ok=True) + output = os.path.join(output, filename_from_url) + + if output_is_path: + temp_dir = os.path.dirname(output) or "." + prefix = os.path.basename(output) + existing_tmp_files = [ + os.path.join(temp_dir, file) + for file in os.listdir(temp_dir) + if file.startswith(prefix) + ] + if resume and existing_tmp_files: + if len(existing_tmp_files) > 1: + print( + "There are multiple temporary files to resume:", + file=sys.stderr, + ) + for file in existing_tmp_files: + print(f"\t{file}", file=sys.stderr) + print( + "Please remove them except one to resume downloading.", + file=sys.stderr, + ) + return + tmp_file = existing_tmp_files[0] + else: + resume = False + tmp_file = tempfile.mktemp( + suffix=tempfile.template, prefix=prefix, dir=temp_dir + ) + f = open(tmp_file, "ab") + else: + tmp_file = None + f = output + + if tmp_file is not None and f.tell() != 0: + headers = {"Range": f"bytes={f.tell()}-"} + res = sess.get(url, headers=headers, stream=True, verify=verify) + + if not quiet: + if resume: + print("Resume:", tmp_file, file=sys.stderr) + print( + "To:", + os.path.abspath(output) if output_is_path else output, + file=sys.stderr, + ) + + try: + total = int(res.headers.get("Content-Length", 0)) + if not quiet: + pbar = tqdm.tqdm(total=total, unit="B", unit_scale=True) + t_start = time.time() + for chunk in res.iter_content(chunk_size=CHUNK_SIZE): + f.write(chunk) + if not quiet: + pbar.update(len(chunk)) + if speed is not None: + elapsed_time_expected = 1.0 * pbar.n / speed + elapsed_time = time.time() - t_start + if elapsed_time < elapsed_time_expected: + time.sleep(elapsed_time_expected - elapsed_time) + if not quiet: + pbar.close() + if tmp_file: + f.close() + shutil.move(tmp_file, output) + finally: + sess.close() + + return output diff --git a/rvc/lib/tools/launch_tensorboard.py b/rvc/lib/tools/launch_tensorboard.py new file mode 100644 index 00000000..7f74e316 --- /dev/null +++ b/rvc/lib/tools/launch_tensorboard.py @@ -0,0 +1,21 @@ +import time +import logging +from tensorboard import program + +log_path = "logs" + + +def launch_tensorboard_pipeline(): + logging.getLogger("root").setLevel(logging.WARNING) + logging.getLogger("tensorboard").setLevel(logging.WARNING) + + tb = program.TensorBoard() + tb.configure(argv=[None, "--logdir", log_path]) + url = tb.launch() + + print( + f"Access the tensorboard using the following link:\n{url}?pinnedCards=%5B%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Ftotal%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fd%2Ftotal%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Fkl%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Fmel%22%7D%5D" + ) + + while True: + time.sleep(600) diff --git a/rvc/lib/tools/model_download.py b/rvc/lib/tools/model_download.py new file mode 100644 index 00000000..ab1b136e --- /dev/null +++ b/rvc/lib/tools/model_download.py @@ -0,0 +1,385 @@ +import os +import re +import six +import sys +import wget +import shutil +import zipfile +import requests +from bs4 import BeautifulSoup +from urllib.parse import unquote, urlencode, parse_qs, urlparse + +now_dir = os.getcwd() +sys.path.append(now_dir) + +from rvc.lib.utils import format_title +from rvc.lib.tools import gdown + + +def find_folder_parent(search_dir, folder_name): + for dirpath, dirnames, _ in os.walk(search_dir): + if folder_name in dirnames: + return os.path.abspath(dirpath) + return None + + +file_path = find_folder_parent(now_dir, "logs") +zips_path = os.path.join(file_path, "zips") + + +def search_pth_index(folder): + pth_paths = [ + os.path.join(folder, file) + for file in os.listdir(folder) + if os.path.isfile(os.path.join(folder, file)) and file.endswith(".pth") + ] + index_paths = [ + os.path.join(folder, file) + for file in os.listdir(folder) + if os.path.isfile(os.path.join(folder, file)) and file.endswith(".index") + ] + + return pth_paths, index_paths + + +def get_mediafire_download_link(url): + response = requests.get(url) + response.raise_for_status() + soup = BeautifulSoup(response.text, "html.parser") + download_button = soup.find( + "a", {"class": "input popsok", "aria-label": "Download file"} + ) + if download_button: + download_link = download_button.get("href") + return download_link + else: + return None + + +def download_from_url(url): + os.makedirs(zips_path, exist_ok=True) + if url != "": + if "drive.google.com" in url: + if "file/d/" in url: + file_id = url.split("file/d/")[1].split("/")[0] + elif "id=" in url: + file_id = url.split("id=")[1].split("&")[0] + else: + return None + + if file_id: + os.chdir(zips_path) + try: + gdown.download( + f"https://drive.google.com/uc?id={file_id}", + quiet=True, + fuzzy=True, + ) + except Exception as error: + error_message = str( + f"An error occurred downloading the file: {error}" + ) + if ( + "Too many users have viewed or downloaded this file recently" + in error_message + ): + os.chdir(now_dir) + return "too much use" + elif ( + "Cannot retrieve the public link of the file." in error_message + ): + os.chdir(now_dir) + return "private link" + else: + print(error_message) + os.chdir(now_dir) + return None + elif "disk.yandex.ru" in url: + base_url = "https://cloud-api.yandex.net/v1/disk/public/resources/download?" + public_key = url + final_url = base_url + urlencode(dict(public_key=public_key)) + response = requests.get(final_url) + download_url = response.json()["href"] + download_response = requests.get(download_url) + + if download_response.status_code == 200: + filename = parse_qs(urlparse(unquote(download_url)).query).get( + "filename", [""] + )[0] + if filename: + os.chdir(zips_path) + with open(filename, "wb") as f: + f.write(download_response.content) + else: + print("Failed to get filename from URL.") + return None + + elif "pixeldrain.com" in url: + try: + file_id = url.split("pixeldrain.com/u/")[1] + os.chdir(zips_path) + print(file_id) + response = requests.get(f"https://pixeldrain.com/api/file/{file_id}") + if response.status_code == 200: + file_name = ( + response.headers.get("Content-Disposition") + .split("filename=")[-1] + .strip('";') + ) + os.makedirs(zips_path, exist_ok=True) + with open(os.path.join(zips_path, file_name), "wb") as newfile: + newfile.write(response.content) + os.chdir(file_path) + return "downloaded" + else: + os.chdir(file_path) + return None + except Exception as error: + print(f"An error occurred downloading the file: {error}") + os.chdir(file_path) + return None + + elif "cdn.discordapp.com" in url: + file = requests.get(url) + os.chdir(zips_path) + if file.status_code == 200: + name = url.split("/") + with open(os.path.join(name[-1]), "wb") as newfile: + newfile.write(file.content) + else: + return None + elif "/blob/" in url or "/resolve/" in url: + os.chdir(zips_path) + if "/blob/" in url: + url = url.replace("/blob/", "/resolve/") + + response = requests.get(url, stream=True) + if response.status_code == 200: + content_disposition = six.moves.urllib_parse.unquote( + response.headers["Content-Disposition"] + ) + m = re.search(r'filename="([^"]+)"', content_disposition) + file_name = m.groups()[0] + file_name = file_name.replace(os.path.sep, "_") + total_size_in_bytes = int(response.headers.get("content-length", 0)) + block_size = 1024 + progress_bar_length = 50 + progress = 0 + + with open(os.path.join(zips_path, file_name), "wb") as file: + for data in response.iter_content(block_size): + file.write(data) + progress += len(data) + progress_percent = int((progress / total_size_in_bytes) * 100) + num_dots = int( + (progress / total_size_in_bytes) * progress_bar_length + ) + progress_bar = ( + "[" + + "." * num_dots + + " " * (progress_bar_length - num_dots) + + "]" + ) + print( + f"{progress_percent}% {progress_bar} {progress}/{total_size_in_bytes} ", + end="\r", + ) + if progress_percent == 100: + print("\n") + + else: + os.chdir(now_dir) + return None + elif "/tree/main" in url: + os.chdir(zips_path) + response = requests.get(url) + soup = BeautifulSoup(response.content, "html.parser") + temp_url = "" + for link in soup.find_all("a", href=True): + if link["href"].endswith(".zip"): + temp_url = link["href"] + break + if temp_url: + url = temp_url + url = url.replace("blob", "resolve") + if "huggingface.co" not in url: + url = "https://huggingface.co" + url + + wget.download(url) + else: + os.chdir(now_dir) + return None + elif "applio.org" in url: + parts = url.split("/") + id_with_query = parts[-1] + id_parts = id_with_query.split("?") + id_number = id_parts[0] + + url = "https://cjtfqzjfdimgpvpwhzlv.supabase.co/rest/v1/models" + headers = { + "apikey": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImNqdGZxempmZGltZ3B2cHdoemx2Iiwicm9sZSI6ImFub24iLCJpYXQiOjE2OTUxNjczODgsImV4cCI6MjAxMDc0MzM4OH0.7z5WMIbjR99c2Ooc0ma7B_FyGq10G8X-alkCYTkKR10" + } + + params = {"id": f"eq.{id_number}"} + response = requests.get(url, headers=headers, params=params) + if response.status_code == 200: + json_response = response.json() + print(json_response) + if json_response: + link = json_response[0]["link"] + verify = download_from_url(link) + if verify == "downloaded": + return "downloaded" + else: + return None + else: + return None + else: + try: + os.chdir(zips_path) + wget.download(url) + except Exception as error: + os.chdir(now_dir) + print(f"An error occurred downloading the file: {error}") + return None + + for currentPath, _, zipFiles in os.walk(zips_path): + for Files in zipFiles: + filePart = Files.split(".") + extensionFile = filePart[len(filePart) - 1] + filePart.pop() + nameFile = "_".join(filePart) + realPath = os.path.join(currentPath, Files) + os.rename(realPath, nameFile + "." + extensionFile) + + os.chdir(now_dir) + return "downloaded" + + os.chdir(now_dir) + return None + + +def extract_and_show_progress(zipfile_path, unzips_path): + try: + with zipfile.ZipFile(zipfile_path, "r") as zip_ref: + for file_info in zip_ref.infolist(): + zip_ref.extract(file_info, unzips_path) + os.remove(zipfile_path) + return True + except Exception as error: + print(f"An error occurred extracting the zip file: {error}") + return False + + +def unzip_file(zip_path, zip_file_name): + zip_file_path = os.path.join(zip_path, zip_file_name + ".zip") + extract_path = os.path.join(file_path, zip_file_name) + with zipfile.ZipFile(zip_file_path, "r") as zip_ref: + zip_ref.extractall(extract_path) + os.remove(zip_file_path) + + +def model_download_pipeline(url: str): + try: + verify = download_from_url(url) + if verify == "downloaded": + extract_folder_path = "" + for filename in os.listdir(zips_path): + if filename.endswith(".zip"): + zipfile_path = os.path.join(zips_path, filename) + print("Proceeding with the extraction...") + + model_zip = os.path.basename(zipfile_path) + model_name = format_title(model_zip.split(".zip")[0]) + extract_folder_path = os.path.join( + "logs", + os.path.normpath(model_name), + ) + success = extract_and_show_progress( + zipfile_path, extract_folder_path + ) + + macosx_path = os.path.join(extract_folder_path, "__MACOSX") + if os.path.exists(macosx_path): + shutil.rmtree(macosx_path) + + subfolders = [ + f + for f in os.listdir(extract_folder_path) + if os.path.isdir(os.path.join(extract_folder_path, f)) + ] + if len(subfolders) == 1: + subfolder_path = os.path.join( + extract_folder_path, subfolders[0] + ) + for item in os.listdir(subfolder_path): + s = os.path.join(subfolder_path, item) + d = os.path.join(extract_folder_path, item) + shutil.move(s, d) + os.rmdir(subfolder_path) + + for item in os.listdir(extract_folder_path): + if ".pth" in item: + file_name = item.split(".pth")[0] + if file_name != model_name: + os.rename( + os.path.join(extract_folder_path, item), + os.path.join( + extract_folder_path, model_name + ".pth" + ), + ) + else: + if "v2" not in item: + if "_nprobe_1_" in item and "_v1" in item: + file_name = item.split("_nprobe_1_")[1].split( + "_v1" + )[0] + if file_name != model_name: + new_file_name = ( + item.split("_nprobe_1_")[0] + + "_nprobe_1_" + + model_name + + "_v1" + ) + os.rename( + os.path.join(extract_folder_path, item), + os.path.join( + extract_folder_path, + new_file_name + ".index", + ), + ) + else: + if "_nprobe_1_" in item and "_v2" in item: + file_name = item.split("_nprobe_1_")[1].split( + "_v2" + )[0] + if file_name != model_name: + new_file_name = ( + item.split("_nprobe_1_")[0] + + "_nprobe_1_" + + model_name + + "_v2" + ) + os.rename( + os.path.join(extract_folder_path, item), + os.path.join( + extract_folder_path, + new_file_name + ".index", + ), + ) + + if success: + print(f"Model {model_name} downloaded!") + else: + print(f"Error downloading {model_name}") + return "Error" + if extract_folder_path == "": + print("Zip file was not found.") + return "Error" + result = search_pth_index(extract_folder_path) + return result + else: + return "Error" + except Exception as error: + print(f"An unexpected error occurred: {error}") + return "Error" diff --git a/rvc/lib/tools/prerequisites_download.py b/rvc/lib/tools/prerequisites_download.py new file mode 100644 index 00000000..6eb24ea6 --- /dev/null +++ b/rvc/lib/tools/prerequisites_download.py @@ -0,0 +1,104 @@ +import os +from concurrent.futures import ThreadPoolExecutor +from tqdm import tqdm +import requests + +url_base = "https://huggingface.co/IAHispano/Applio/resolve/main/Resources" + +# Define the file lists +models_list = [("predictors/", ["rmvpe.pt", "fcpe.pt"])] +embedders_list = [("embedders/contentvec/", ["pytorch_model.bin", "config.json"])] +executables_list = [ + ("", ["ffmpeg.exe", "ffprobe.exe"]), +] + +folder_mapping_list = { + "embedders/contentvec/": "rvc/models/embedders/contentvec/", + "predictors/": "rvc/models/predictors/", + "formant/": "rvc/models/formant/", +} + + +def get_file_size_all(file_list): + """ + Calculate the total size of files to be downloaded, regardless of local existence. + """ + total_size = 0 + for remote_folder, files in file_list: + # Use the mapping if available; otherwise, use an empty local folder + local_folder = folder_mapping_list.get(remote_folder, "") + for file in files: + url = f"{url_base}/{remote_folder}{file}" + response = requests.head(url) + total_size += int(response.headers.get("content-length", 0)) + return total_size + + +def download_file(url, destination_path, global_bar): + """ + Download a file from the given URL to the specified destination path, + updating the global progress bar as data is downloaded. + """ + dir_name = os.path.dirname(destination_path) + if dir_name: + os.makedirs(dir_name, exist_ok=True) + response = requests.get(url, stream=True) + block_size = 1024 + with open(destination_path, "wb") as file: + for data in response.iter_content(block_size): + file.write(data) + global_bar.update(len(data)) + + +def download_mapping_files(file_mapping_list, global_bar): + """ + Download all files in the provided file mapping list using a thread pool executor, + and update the global progress bar as downloads progress. + This version downloads all files regardless of whether they already exist. + """ + with ThreadPoolExecutor() as executor: + futures = [] + for remote_folder, file_list in file_mapping_list: + local_folder = folder_mapping_list.get(remote_folder, "") + for file in file_list: + destination_path = os.path.join(local_folder, file) + url = f"{url_base}/{remote_folder}{file}" + futures.append( + executor.submit(download_file, url, destination_path, global_bar) + ) + for future in futures: + future.result() + + +def calculate_total_size(models, exe): + """ + Calculate the total size of all files to be downloaded based on selected categories. + """ + total_size = 0 + if models: + total_size += get_file_size_all(models_list) + total_size += get_file_size_all(embedders_list) + if exe and os.name == "nt": + total_size += get_file_size_all(executables_list) + return total_size + + +def prerequisites_download_pipeline(models, exe): + """ + Manage the download pipeline for different categories of files. + """ + total_size = calculate_total_size(models, exe) + if total_size > 0: + with tqdm( + total=total_size, unit="iB", unit_scale=True, desc="Downloading all files" + ) as global_bar: + if models: + download_mapping_files(models_list, global_bar) + download_mapping_files(embedders_list, global_bar) + if exe: + if os.name == "nt": + download_mapping_files(executables_list, global_bar) + else: + print("No executables needed for non-Windows systems.") + else: + print("No files to download.") diff --git a/rvc/lib/tools/pretrained_selector.py b/rvc/lib/tools/pretrained_selector.py new file mode 100644 index 00000000..e982fac5 --- /dev/null +++ b/rvc/lib/tools/pretrained_selector.py @@ -0,0 +1,63 @@ +def pretrained_selector(pitch_guidance): + if pitch_guidance == True: + return { + "v1": { + 32000: ( + "rvc/models/pretraineds/pretrained_v1/f0G32k.pth", + "rvc/models/pretraineds/pretrained_v1/f0D32k.pth", + ), + 40000: ( + "rvc/models/pretraineds/pretrained_v1/f0G40k.pth", + "rvc/models/pretraineds/pretrained_v1/f0D40k.pth", + ), + 48000: ( + "rvc/models/pretraineds/pretrained_v1/f0G48k.pth", + "rvc/models/pretraineds/pretrained_v1/f0D48k.pth", + ), + }, + "v2": { + 32000: ( + "rvc/models/pretraineds/pretrained_v2/f0G32k.pth", + "rvc/models/pretraineds/pretrained_v2/f0D32k.pth", + ), + 40000: ( + "rvc/models/pretraineds/pretrained_v2/f0G40k.pth", + "rvc/models/pretraineds/pretrained_v2/f0D40k.pth", + ), + 48000: ( + "rvc/models/pretraineds/pretrained_v2/f0G48k.pth", + "rvc/models/pretraineds/pretrained_v2/f0D48k.pth", + ), + }, + } + elif pitch_guidance == False: + return { + "v1": { + 32000: ( + "rvc/models/pretraineds/pretrained_v1/G32k.pth", + "rvc/models/pretraineds/pretrained_v1/D32k.pth", + ), + 40000: ( + "rvc/models/pretraineds/pretrained_v1/G40k.pth", + "rvc/models/pretraineds/pretrained_v1/D40k.pth", + ), + 48000: ( + "rvc/models/pretraineds/pretrained_v1/G48k.pth", + "rvc/models/pretraineds/pretrained_v1/D48k.pth", + ), + }, + "v2": { + 32000: ( + "rvc/models/pretraineds/pretrained_v2/G32k.pth", + "rvc/models/pretraineds/pretrained_v2/D32k.pth", + ), + 40000: ( + "rvc/models/pretraineds/pretrained_v2/G40k.pth", + "rvc/models/pretraineds/pretrained_v2/D40k.pth", + ), + 48000: ( + "rvc/models/pretraineds/pretrained_v2/G48k.pth", + "rvc/models/pretraineds/pretrained_v2/D48k.pth", + ), + }, + } diff --git a/rvc/lib/tools/split_audio.py b/rvc/lib/tools/split_audio.py new file mode 100644 index 00000000..65e7ba94 --- /dev/null +++ b/rvc/lib/tools/split_audio.py @@ -0,0 +1,56 @@ +import numpy as np +import librosa + + +def process_audio(audio, sr=16000, silence_thresh=-60, min_silence_len=250): + """ + Splits an audio signal into segments using a fixed frame size and hop size. + + Parameters: + - audio (np.ndarray): The audio signal to split. + - sr (int): The sample rate of the input audio (default is 16000). + - silence_thresh (int): Silence threshold (default =-60dB) + - min_silence_len (int): Minimum silence duration (default 250ms). + + Returns: + - list of np.ndarray: A list of audio segments. + - np.ndarray: The intervals where the audio was split. + """ + frame_length = int(min_silence_len / 1000 * sr) + hop_length = frame_length // 2 + intervals = librosa.effects.split( + audio, top_db=-silence_thresh, frame_length=frame_length, hop_length=hop_length + ) + audio_segments = [audio[start:end] for start, end in intervals] + + return audio_segments, intervals + + +def merge_audio(audio_segments, intervals, sr_orig, sr_new): + """ + Merges audio segments back into a single audio signal, filling gaps with silence. + + Parameters: + - audio_segments (list of np.ndarray): The non-silent audio segments. + - intervals (np.ndarray): The intervals used for splitting the original audio. + - sr_orig (int): The sample rate of the original audio + - sr_new (int): The sample rate of the model + + Returns: + - np.ndarray: The merged audio signal with silent gaps restored. + """ + sr_ratio = sr_new / sr_orig if sr_new > sr_orig else 1.0 + + merged_audio = np.zeros( + int(intervals[0][0] * sr_ratio if intervals[0][0] > 0 else 0), + dtype=audio_segments[0].dtype, + ) + + merged_audio = np.concatenate((merged_audio, audio_segments[0])) + + for i in range(1, len(intervals)): + silence_duration = int((intervals[i][0] - intervals[i - 1][1]) * sr_ratio) + silence = np.zeros(silence_duration, dtype=audio_segments[0].dtype) + merged_audio = np.concatenate((merged_audio, silence, audio_segments[i])) + + return merged_audio diff --git a/rvc/lib/tools/tts.py b/rvc/lib/tools/tts.py new file mode 100644 index 00000000..9b30c6e1 --- /dev/null +++ b/rvc/lib/tools/tts.py @@ -0,0 +1,29 @@ +import sys +import asyncio +import edge_tts +import os + + +async def main(): + # Parse command line arguments + tts_file = str(sys.argv[1]) + text = str(sys.argv[2]) + voice = str(sys.argv[3]) + rate = int(sys.argv[4]) + output_file = str(sys.argv[5]) + + rates = f"+{rate}%" if rate >= 0 else f"{rate}%" + if tts_file and os.path.exists(tts_file): + text = "" + try: + with open(tts_file, "r", encoding="utf-8") as file: + text = file.read() + except UnicodeDecodeError: + with open(tts_file, "r") as file: + text = file.read() + await edge_tts.Communicate(text, voice, rate=rates).save(output_file) + print(f"TTS with {voice} completed. Output TTS file: '{output_file}'") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/rvc/lib/tools/tts_voices.json b/rvc/lib/tools/tts_voices.json new file mode 100644 index 00000000..b76cf447 --- /dev/null +++ b/rvc/lib/tools/tts_voices.json @@ -0,0 +1,5748 @@ +[ + { + "Name": "Microsoft Server Speech Text to Speech Voice (af-ZA, AdriNeural)", + "ShortName": "af-ZA-AdriNeural", + "Gender": "Female", + "Locale": "af-ZA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Adri Online (Natural) - Afrikaans (South Africa)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (af-ZA, WillemNeural)", + "ShortName": "af-ZA-WillemNeural", + "Gender": "Male", + "Locale": "af-ZA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Willem Online (Natural) - Afrikaans (South Africa)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sq-AL, AnilaNeural)", + "ShortName": "sq-AL-AnilaNeural", + "Gender": "Female", + "Locale": "sq-AL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Anila Online (Natural) - Albanian (Albania)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sq-AL, IlirNeural)", + "ShortName": "sq-AL-IlirNeural", + "Gender": "Male", + "Locale": "sq-AL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ilir Online (Natural) - Albanian (Albania)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (am-ET, AmehaNeural)", + "ShortName": "am-ET-AmehaNeural", + "Gender": "Male", + "Locale": "am-ET", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ameha Online (Natural) - Amharic (Ethiopia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (am-ET, MekdesNeural)", + "ShortName": "am-ET-MekdesNeural", + "Gender": "Female", + "Locale": "am-ET", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Mekdes Online (Natural) - Amharic (Ethiopia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-DZ, AminaNeural)", + "ShortName": "ar-DZ-AminaNeural", + "Gender": "Female", + "Locale": "ar-DZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Amina Online (Natural) - Arabic (Algeria)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-DZ, IsmaelNeural)", + "ShortName": "ar-DZ-IsmaelNeural", + "Gender": "Male", + "Locale": "ar-DZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ismael Online (Natural) - Arabic (Algeria)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-BH, AliNeural)", + "ShortName": "ar-BH-AliNeural", + "Gender": "Male", + "Locale": "ar-BH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ali Online (Natural) - Arabic (Bahrain)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-BH, LailaNeural)", + "ShortName": "ar-BH-LailaNeural", + "Gender": "Female", + "Locale": "ar-BH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Laila Online (Natural) - Arabic (Bahrain)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-EG, SalmaNeural)", + "ShortName": "ar-EG-SalmaNeural", + "Gender": "Female", + "Locale": "ar-EG", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Salma Online (Natural) - Arabic (Egypt)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-EG, ShakirNeural)", + "ShortName": "ar-EG-ShakirNeural", + "Gender": "Male", + "Locale": "ar-EG", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Shakir Online (Natural) - Arabic (Egypt)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-IQ, BasselNeural)", + "ShortName": "ar-IQ-BasselNeural", + "Gender": "Male", + "Locale": "ar-IQ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Bassel Online (Natural) - Arabic (Iraq)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-IQ, RanaNeural)", + "ShortName": "ar-IQ-RanaNeural", + "Gender": "Female", + "Locale": "ar-IQ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Rana Online (Natural) - Arabic (Iraq)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-JO, SanaNeural)", + "ShortName": "ar-JO-SanaNeural", + "Gender": "Female", + "Locale": "ar-JO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sana Online (Natural) - Arabic (Jordan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-JO, TaimNeural)", + "ShortName": "ar-JO-TaimNeural", + "Gender": "Male", + "Locale": "ar-JO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Taim Online (Natural) - Arabic (Jordan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-KW, FahedNeural)", + "ShortName": "ar-KW-FahedNeural", + "Gender": "Male", + "Locale": "ar-KW", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Fahed Online (Natural) - Arabic (Kuwait)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-KW, NouraNeural)", + "ShortName": "ar-KW-NouraNeural", + "Gender": "Female", + "Locale": "ar-KW", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Noura Online (Natural) - Arabic (Kuwait)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-LB, LaylaNeural)", + "ShortName": "ar-LB-LaylaNeural", + "Gender": "Female", + "Locale": "ar-LB", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Layla Online (Natural) - Arabic (Lebanon)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-LB, RamiNeural)", + "ShortName": "ar-LB-RamiNeural", + "Gender": "Male", + "Locale": "ar-LB", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Rami Online (Natural) - Arabic (Lebanon)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-LY, ImanNeural)", + "ShortName": "ar-LY-ImanNeural", + "Gender": "Female", + "Locale": "ar-LY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Iman Online (Natural) - Arabic (Libya)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-LY, OmarNeural)", + "ShortName": "ar-LY-OmarNeural", + "Gender": "Male", + "Locale": "ar-LY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Omar Online (Natural) - Arabic (Libya)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-MA, JamalNeural)", + "ShortName": "ar-MA-JamalNeural", + "Gender": "Male", + "Locale": "ar-MA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Jamal Online (Natural) - Arabic (Morocco)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-MA, MounaNeural)", + "ShortName": "ar-MA-MounaNeural", + "Gender": "Female", + "Locale": "ar-MA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Mouna Online (Natural) - Arabic (Morocco)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-OM, AbdullahNeural)", + "ShortName": "ar-OM-AbdullahNeural", + "Gender": "Male", + "Locale": "ar-OM", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Abdullah Online (Natural) - Arabic (Oman)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-OM, AyshaNeural)", + "ShortName": "ar-OM-AyshaNeural", + "Gender": "Female", + "Locale": "ar-OM", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Aysha Online (Natural) - Arabic (Oman)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-QA, AmalNeural)", + "ShortName": "ar-QA-AmalNeural", + "Gender": "Female", + "Locale": "ar-QA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Amal Online (Natural) - Arabic (Qatar)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-QA, MoazNeural)", + "ShortName": "ar-QA-MoazNeural", + "Gender": "Male", + "Locale": "ar-QA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Moaz Online (Natural) - Arabic (Qatar)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-SA, HamedNeural)", + "ShortName": "ar-SA-HamedNeural", + "Gender": "Male", + "Locale": "ar-SA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Hamed Online (Natural) - Arabic (Saudi Arabia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-SA, ZariyahNeural)", + "ShortName": "ar-SA-ZariyahNeural", + "Gender": "Female", + "Locale": "ar-SA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Zariyah Online (Natural) - Arabic (Saudi Arabia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-SY, AmanyNeural)", + "ShortName": "ar-SY-AmanyNeural", + "Gender": "Female", + "Locale": "ar-SY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Amany Online (Natural) - Arabic (Syria)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-SY, LaithNeural)", + "ShortName": "ar-SY-LaithNeural", + "Gender": "Male", + "Locale": "ar-SY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Laith Online (Natural) - Arabic (Syria)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-TN, HediNeural)", + "ShortName": "ar-TN-HediNeural", + "Gender": "Male", + "Locale": "ar-TN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Hedi Online (Natural) - Arabic (Tunisia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-TN, ReemNeural)", + "ShortName": "ar-TN-ReemNeural", + "Gender": "Female", + "Locale": "ar-TN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Reem Online (Natural) - Arabic (Tunisia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-AE, FatimaNeural)", + "ShortName": "ar-AE-FatimaNeural", + "Gender": "Female", + "Locale": "ar-AE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Fatima Online (Natural) - Arabic (United Arab Emirates)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-AE, HamdanNeural)", + "ShortName": "ar-AE-HamdanNeural", + "Gender": "Male", + "Locale": "ar-AE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Hamdan Online (Natural) - Arabic (United Arab Emirates)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-YE, MaryamNeural)", + "ShortName": "ar-YE-MaryamNeural", + "Gender": "Female", + "Locale": "ar-YE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Maryam Online (Natural) - Arabic (Yemen)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ar-YE, SalehNeural)", + "ShortName": "ar-YE-SalehNeural", + "Gender": "Male", + "Locale": "ar-YE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Saleh Online (Natural) - Arabic (Yemen)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (az-AZ, BabekNeural)", + "ShortName": "az-AZ-BabekNeural", + "Gender": "Male", + "Locale": "az-AZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Babek Online (Natural) - Azerbaijani (Azerbaijan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (az-AZ, BanuNeural)", + "ShortName": "az-AZ-BanuNeural", + "Gender": "Female", + "Locale": "az-AZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Banu Online (Natural) - Azerbaijani (Azerbaijan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (bn-BD, NabanitaNeural)", + "ShortName": "bn-BD-NabanitaNeural", + "Gender": "Female", + "Locale": "bn-BD", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Nabanita Online (Natural) - Bangla (Bangladesh)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (bn-BD, PradeepNeural)", + "ShortName": "bn-BD-PradeepNeural", + "Gender": "Male", + "Locale": "bn-BD", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Pradeep Online (Natural) - Bangla (Bangladesh)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (bn-IN, BashkarNeural)", + "ShortName": "bn-IN-BashkarNeural", + "Gender": "Male", + "Locale": "bn-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Bashkar Online (Natural) - Bangla (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (bn-IN, TanishaaNeural)", + "ShortName": "bn-IN-TanishaaNeural", + "Gender": "Female", + "Locale": "bn-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Tanishaa Online (Natural) - Bengali (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (bs-BA, GoranNeural)", + "ShortName": "bs-BA-GoranNeural", + "Gender": "Male", + "Locale": "bs-BA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Goran Online (Natural) - Bosnian (Bosnia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (bs-BA, VesnaNeural)", + "ShortName": "bs-BA-VesnaNeural", + "Gender": "Female", + "Locale": "bs-BA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Vesna Online (Natural) - Bosnian (Bosnia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (bg-BG, BorislavNeural)", + "ShortName": "bg-BG-BorislavNeural", + "Gender": "Male", + "Locale": "bg-BG", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Borislav Online (Natural) - Bulgarian (Bulgaria)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (bg-BG, KalinaNeural)", + "ShortName": "bg-BG-KalinaNeural", + "Gender": "Female", + "Locale": "bg-BG", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Kalina Online (Natural) - Bulgarian (Bulgaria)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (my-MM, NilarNeural)", + "ShortName": "my-MM-NilarNeural", + "Gender": "Female", + "Locale": "my-MM", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Nilar Online (Natural) - Burmese (Myanmar)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (my-MM, ThihaNeural)", + "ShortName": "my-MM-ThihaNeural", + "Gender": "Male", + "Locale": "my-MM", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Thiha Online (Natural) - Burmese (Myanmar)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ca-ES, EnricNeural)", + "ShortName": "ca-ES-EnricNeural", + "Gender": "Male", + "Locale": "ca-ES", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Enric Online (Natural) - Catalan (Spain)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ca-ES, JoanaNeural)", + "ShortName": "ca-ES-JoanaNeural", + "Gender": "Female", + "Locale": "ca-ES", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Joana Online (Natural) - Catalan (Spain)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-HK, HiuGaaiNeural)", + "ShortName": "zh-HK-HiuGaaiNeural", + "Gender": "Female", + "Locale": "zh-HK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft HiuGaai Online (Natural) - Chinese (Cantonese Traditional)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-HK, HiuMaanNeural)", + "ShortName": "zh-HK-HiuMaanNeural", + "Gender": "Female", + "Locale": "zh-HK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft HiuMaan Online (Natural) - Chinese (Hong Kong)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-HK, WanLungNeural)", + "ShortName": "zh-HK-WanLungNeural", + "Gender": "Male", + "Locale": "zh-HK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft WanLung Online (Natural) - Chinese (Hong Kong)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-CN, XiaoxiaoNeural)", + "ShortName": "zh-CN-XiaoxiaoNeural", + "Gender": "Female", + "Locale": "zh-CN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Xiaoxiao Online (Natural) - Chinese (Mainland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "News", + "Novel" + ], + "VoicePersonalities": [ + "Warm" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-CN, XiaoyiNeural)", + "ShortName": "zh-CN-XiaoyiNeural", + "Gender": "Female", + "Locale": "zh-CN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Xiaoyi Online (Natural) - Chinese (Mainland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Cartoon", + "Novel" + ], + "VoicePersonalities": [ + "Lively" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-CN, YunjianNeural)", + "ShortName": "zh-CN-YunjianNeural", + "Gender": "Male", + "Locale": "zh-CN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Yunjian Online (Natural) - Chinese (Mainland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Sports", + " Novel" + ], + "VoicePersonalities": [ + "Passion" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-CN, YunxiNeural)", + "ShortName": "zh-CN-YunxiNeural", + "Gender": "Male", + "Locale": "zh-CN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Yunxi Online (Natural) - Chinese (Mainland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Novel" + ], + "VoicePersonalities": [ + "Lively", + "Sunshine" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-CN, YunxiaNeural)", + "ShortName": "zh-CN-YunxiaNeural", + "Gender": "Male", + "Locale": "zh-CN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Yunxia Online (Natural) - Chinese (Mainland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Cartoon", + "Novel" + ], + "VoicePersonalities": [ + "Cute" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-CN, YunyangNeural)", + "ShortName": "zh-CN-YunyangNeural", + "Gender": "Male", + "Locale": "zh-CN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Yunyang Online (Natural) - Chinese (Mainland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "News" + ], + "VoicePersonalities": [ + "Professional", + "Reliable" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-CN-liaoning, XiaobeiNeural)", + "ShortName": "zh-CN-liaoning-XiaobeiNeural", + "Gender": "Female", + "Locale": "zh-CN-liaoning", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Xiaobei Online (Natural) - Chinese (Northeastern Mandarin)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Dialect" + ], + "VoicePersonalities": [ + "Humorous" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-TW, HsiaoChenNeural)", + "ShortName": "zh-TW-HsiaoChenNeural", + "Gender": "Female", + "Locale": "zh-TW", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft HsiaoChen Online (Natural) - Chinese (Taiwan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-TW, YunJheNeural)", + "ShortName": "zh-TW-YunJheNeural", + "Gender": "Male", + "Locale": "zh-TW", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft YunJhe Online (Natural) - Chinese (Taiwan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-TW, HsiaoYuNeural)", + "ShortName": "zh-TW-HsiaoYuNeural", + "Gender": "Female", + "Locale": "zh-TW", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft HsiaoYu Online (Natural) - Chinese (Taiwanese Mandarin)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zh-CN-shaanxi, XiaoniNeural)", + "ShortName": "zh-CN-shaanxi-XiaoniNeural", + "Gender": "Female", + "Locale": "zh-CN-shaanxi", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Xiaoni Online (Natural) - Chinese (Zhongyuan Mandarin Shaanxi)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Dialect" + ], + "VoicePersonalities": [ + "Bright" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (hr-HR, GabrijelaNeural)", + "ShortName": "hr-HR-GabrijelaNeural", + "Gender": "Female", + "Locale": "hr-HR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Gabrijela Online (Natural) - Croatian (Croatia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (hr-HR, SreckoNeural)", + "ShortName": "hr-HR-SreckoNeural", + "Gender": "Male", + "Locale": "hr-HR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Srecko Online (Natural) - Croatian (Croatia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (cs-CZ, AntoninNeural)", + "ShortName": "cs-CZ-AntoninNeural", + "Gender": "Male", + "Locale": "cs-CZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Antonin Online (Natural) - Czech (Czech)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (cs-CZ, VlastaNeural)", + "ShortName": "cs-CZ-VlastaNeural", + "Gender": "Female", + "Locale": "cs-CZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Vlasta Online (Natural) - Czech (Czech)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (da-DK, ChristelNeural)", + "ShortName": "da-DK-ChristelNeural", + "Gender": "Female", + "Locale": "da-DK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Christel Online (Natural) - Danish (Denmark)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (da-DK, JeppeNeural)", + "ShortName": "da-DK-JeppeNeural", + "Gender": "Male", + "Locale": "da-DK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Jeppe Online (Natural) - Danish (Denmark)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (nl-BE, ArnaudNeural)", + "ShortName": "nl-BE-ArnaudNeural", + "Gender": "Male", + "Locale": "nl-BE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Arnaud Online (Natural) - Dutch (Belgium)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (nl-BE, DenaNeural)", + "ShortName": "nl-BE-DenaNeural", + "Gender": "Female", + "Locale": "nl-BE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Dena Online (Natural) - Dutch (Belgium)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (nl-NL, ColetteNeural)", + "ShortName": "nl-NL-ColetteNeural", + "Gender": "Female", + "Locale": "nl-NL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Colette Online (Natural) - Dutch (Netherlands)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (nl-NL, FennaNeural)", + "ShortName": "nl-NL-FennaNeural", + "Gender": "Female", + "Locale": "nl-NL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Fenna Online (Natural) - Dutch (Netherlands)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (nl-NL, MaartenNeural)", + "ShortName": "nl-NL-MaartenNeural", + "Gender": "Male", + "Locale": "nl-NL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Maarten Online (Natural) - Dutch (Netherlands)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-AU, NatashaNeural)", + "ShortName": "en-AU-NatashaNeural", + "Gender": "Female", + "Locale": "en-AU", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Natasha Online (Natural) - English (Australia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-AU, WilliamNeural)", + "ShortName": "en-AU-WilliamNeural", + "Gender": "Male", + "Locale": "en-AU", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft William Online (Natural) - English (Australia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-CA, ClaraNeural)", + "ShortName": "en-CA-ClaraNeural", + "Gender": "Female", + "Locale": "en-CA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Clara Online (Natural) - English (Canada)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-CA, LiamNeural)", + "ShortName": "en-CA-LiamNeural", + "Gender": "Male", + "Locale": "en-CA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Liam Online (Natural) - English (Canada)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-HK, SamNeural)", + "ShortName": "en-HK-SamNeural", + "Gender": "Male", + "Locale": "en-HK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sam Online (Natural) - English (Hongkong)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-HK, YanNeural)", + "ShortName": "en-HK-YanNeural", + "Gender": "Female", + "Locale": "en-HK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Yan Online (Natural) - English (Hongkong)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-IN, NeerjaExpressiveNeural)", + "ShortName": "en-IN-NeerjaExpressiveNeural", + "Gender": "Female", + "Locale": "en-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Neerja Online (Natural) - English (India) (Preview)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-IN, NeerjaNeural)", + "ShortName": "en-IN-NeerjaNeural", + "Gender": "Female", + "Locale": "en-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Neerja Online (Natural) - English (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-IN, PrabhatNeural)", + "ShortName": "en-IN-PrabhatNeural", + "Gender": "Male", + "Locale": "en-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Prabhat Online (Natural) - English (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-IE, ConnorNeural)", + "ShortName": "en-IE-ConnorNeural", + "Gender": "Male", + "Locale": "en-IE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Connor Online (Natural) - English (Ireland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-IE, EmilyNeural)", + "ShortName": "en-IE-EmilyNeural", + "Gender": "Female", + "Locale": "en-IE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Emily Online (Natural) - English (Ireland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-KE, AsiliaNeural)", + "ShortName": "en-KE-AsiliaNeural", + "Gender": "Female", + "Locale": "en-KE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Asilia Online (Natural) - English (Kenya)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-KE, ChilembaNeural)", + "ShortName": "en-KE-ChilembaNeural", + "Gender": "Male", + "Locale": "en-KE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Chilemba Online (Natural) - English (Kenya)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-NZ, MitchellNeural)", + "ShortName": "en-NZ-MitchellNeural", + "Gender": "Male", + "Locale": "en-NZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Mitchell Online (Natural) - English (New Zealand)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-NZ, MollyNeural)", + "ShortName": "en-NZ-MollyNeural", + "Gender": "Female", + "Locale": "en-NZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Molly Online (Natural) - English (New Zealand)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-NG, AbeoNeural)", + "ShortName": "en-NG-AbeoNeural", + "Gender": "Male", + "Locale": "en-NG", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Abeo Online (Natural) - English (Nigeria)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-NG, EzinneNeural)", + "ShortName": "en-NG-EzinneNeural", + "Gender": "Female", + "Locale": "en-NG", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ezinne Online (Natural) - English (Nigeria)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-PH, JamesNeural)", + "ShortName": "en-PH-JamesNeural", + "Gender": "Male", + "Locale": "en-PH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft James Online (Natural) - English (Philippines)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-PH, RosaNeural)", + "ShortName": "en-PH-RosaNeural", + "Gender": "Female", + "Locale": "en-PH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Rosa Online (Natural) - English (Philippines)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-SG, LunaNeural)", + "ShortName": "en-SG-LunaNeural", + "Gender": "Female", + "Locale": "en-SG", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Luna Online (Natural) - English (Singapore)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-SG, WayneNeural)", + "ShortName": "en-SG-WayneNeural", + "Gender": "Male", + "Locale": "en-SG", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Wayne Online (Natural) - English (Singapore)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-ZA, LeahNeural)", + "ShortName": "en-ZA-LeahNeural", + "Gender": "Female", + "Locale": "en-ZA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Leah Online (Natural) - English (South Africa)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-ZA, LukeNeural)", + "ShortName": "en-ZA-LukeNeural", + "Gender": "Male", + "Locale": "en-ZA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Luke Online (Natural) - English (South Africa)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-TZ, ElimuNeural)", + "ShortName": "en-TZ-ElimuNeural", + "Gender": "Male", + "Locale": "en-TZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Elimu Online (Natural) - English (Tanzania)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-TZ, ImaniNeural)", + "ShortName": "en-TZ-ImaniNeural", + "Gender": "Female", + "Locale": "en-TZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Imani Online (Natural) - English (Tanzania)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-GB, LibbyNeural)", + "ShortName": "en-GB-LibbyNeural", + "Gender": "Female", + "Locale": "en-GB", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Libby Online (Natural) - English (United Kingdom)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-GB, MaisieNeural)", + "ShortName": "en-GB-MaisieNeural", + "Gender": "Female", + "Locale": "en-GB", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Maisie Online (Natural) - English (United Kingdom)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-GB, RyanNeural)", + "ShortName": "en-GB-RyanNeural", + "Gender": "Male", + "Locale": "en-GB", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ryan Online (Natural) - English (United Kingdom)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-GB, SoniaNeural)", + "ShortName": "en-GB-SoniaNeural", + "Gender": "Female", + "Locale": "en-GB", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sonia Online (Natural) - English (United Kingdom)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-GB, ThomasNeural)", + "ShortName": "en-GB-ThomasNeural", + "Gender": "Male", + "Locale": "en-GB", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Thomas Online (Natural) - English (United Kingdom)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, AvaMultilingualNeural)", + "ShortName": "en-US-AvaMultilingualNeural", + "Gender": "Female", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft AvaMultilingual Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Conversation", + "Copilot" + ], + "VoicePersonalities": [ + "Expressive", + "Caring", + "Pleasant", + "Friendly" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, AndrewMultilingualNeural)", + "ShortName": "en-US-AndrewMultilingualNeural", + "Gender": "Male", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft AndrewMultilingual Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Conversation", + "Copilot" + ], + "VoicePersonalities": [ + "Warm", + "Confident", + "Authentic", + "Honest" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, EmmaMultilingualNeural)", + "ShortName": "en-US-EmmaMultilingualNeural", + "Gender": "Female", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft EmmaMultilingual Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Conversation", + "Copilot" + ], + "VoicePersonalities": [ + "Cheerful", + "Clear", + "Conversational" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, BrianMultilingualNeural)", + "ShortName": "en-US-BrianMultilingualNeural", + "Gender": "Male", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft BrianMultilingual Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Conversation", + "Copilot" + ], + "VoicePersonalities": [ + "Approachable", + "Casual", + "Sincere" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, AvaNeural)", + "ShortName": "en-US-AvaNeural", + "Gender": "Female", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ava Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Conversation", + "Copilot" + ], + "VoicePersonalities": [ + "Expressive", + "Caring", + "Pleasant", + "Friendly" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, AndrewNeural)", + "ShortName": "en-US-AndrewNeural", + "Gender": "Male", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Andrew Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Conversation", + "Copilot" + ], + "VoicePersonalities": [ + "Warm", + "Confident", + "Authentic", + "Honest" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, EmmaNeural)", + "ShortName": "en-US-EmmaNeural", + "Gender": "Female", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Emma Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Conversation", + "Copilot" + ], + "VoicePersonalities": [ + "Cheerful", + "Clear", + "Conversational" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, BrianNeural)", + "ShortName": "en-US-BrianNeural", + "Gender": "Male", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Brian Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Conversation", + "Copilot" + ], + "VoicePersonalities": [ + "Approachable", + "Casual", + "Sincere" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, AnaNeural)", + "ShortName": "en-US-AnaNeural", + "Gender": "Female", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ana Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "Cartoon", + "Conversation" + ], + "VoicePersonalities": [ + "Cute" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, AriaNeural)", + "ShortName": "en-US-AriaNeural", + "Gender": "Female", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Aria Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "News", + "Novel" + ], + "VoicePersonalities": [ + "Positive", + "Confident" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, ChristopherNeural)", + "ShortName": "en-US-ChristopherNeural", + "Gender": "Male", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Christopher Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "News", + "Novel" + ], + "VoicePersonalities": [ + "Reliable", + "Authority" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, EricNeural)", + "ShortName": "en-US-EricNeural", + "Gender": "Male", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Eric Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "News", + "Novel" + ], + "VoicePersonalities": [ + "Rational" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, GuyNeural)", + "ShortName": "en-US-GuyNeural", + "Gender": "Male", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Guy Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "News", + "Novel" + ], + "VoicePersonalities": [ + "Passion" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, JennyNeural)", + "ShortName": "en-US-JennyNeural", + "Gender": "Female", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Jenny Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Considerate", + "Comfort" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, MichelleNeural)", + "ShortName": "en-US-MichelleNeural", + "Gender": "Female", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Michelle Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "News", + "Novel" + ], + "VoicePersonalities": [ + "Friendly", + "Pleasant" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, RogerNeural)", + "ShortName": "en-US-RogerNeural", + "Gender": "Male", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Roger Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "News", + "Novel" + ], + "VoicePersonalities": [ + "Lively" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (en-US, SteffanNeural)", + "ShortName": "en-US-SteffanNeural", + "Gender": "Male", + "Locale": "en-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Steffan Online (Natural) - English (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "News", + "Novel" + ], + "VoicePersonalities": [ + "Rational" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (et-EE, AnuNeural)", + "ShortName": "et-EE-AnuNeural", + "Gender": "Female", + "Locale": "et-EE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Anu Online (Natural) - Estonian (Estonia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (et-EE, KertNeural)", + "ShortName": "et-EE-KertNeural", + "Gender": "Male", + "Locale": "et-EE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Kert Online (Natural) - Estonian (Estonia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fil-PH, AngeloNeural)", + "ShortName": "fil-PH-AngeloNeural", + "Gender": "Male", + "Locale": "fil-PH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Angelo Online (Natural) - Filipino (Philippines)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fil-PH, BlessicaNeural)", + "ShortName": "fil-PH-BlessicaNeural", + "Gender": "Female", + "Locale": "fil-PH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Blessica Online (Natural) - Filipino (Philippines)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fi-FI, HarriNeural)", + "ShortName": "fi-FI-HarriNeural", + "Gender": "Male", + "Locale": "fi-FI", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Harri Online (Natural) - Finnish (Finland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fi-FI, NooraNeural)", + "ShortName": "fi-FI-NooraNeural", + "Gender": "Female", + "Locale": "fi-FI", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Noora Online (Natural) - Finnish (Finland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-BE, CharlineNeural)", + "ShortName": "fr-BE-CharlineNeural", + "Gender": "Female", + "Locale": "fr-BE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Charline Online (Natural) - French (Belgium)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-BE, GerardNeural)", + "ShortName": "fr-BE-GerardNeural", + "Gender": "Male", + "Locale": "fr-BE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Gerard Online (Natural) - French (Belgium)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-CA, ThierryNeural)", + "ShortName": "fr-CA-ThierryNeural", + "Gender": "Male", + "Locale": "fr-CA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Thierry Online (Natural) - French (Canada)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-CA, AntoineNeural)", + "ShortName": "fr-CA-AntoineNeural", + "Gender": "Male", + "Locale": "fr-CA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Antoine Online (Natural) - French (Canada)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-CA, JeanNeural)", + "ShortName": "fr-CA-JeanNeural", + "Gender": "Male", + "Locale": "fr-CA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Jean Online (Natural) - French (Canada)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-CA, SylvieNeural)", + "ShortName": "fr-CA-SylvieNeural", + "Gender": "Female", + "Locale": "fr-CA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sylvie Online (Natural) - French (Canada)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-FR, VivienneMultilingualNeural)", + "ShortName": "fr-FR-VivienneMultilingualNeural", + "Gender": "Female", + "Locale": "fr-FR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft VivienneMultilingual Online (Natural) - French (France)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-FR, RemyMultilingualNeural)", + "ShortName": "fr-FR-RemyMultilingualNeural", + "Gender": "Male", + "Locale": "fr-FR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft RemyMultilingual Online (Natural) - French (France)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-FR, DeniseNeural)", + "ShortName": "fr-FR-DeniseNeural", + "Gender": "Female", + "Locale": "fr-FR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Denise Online (Natural) - French (France)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-FR, EloiseNeural)", + "ShortName": "fr-FR-EloiseNeural", + "Gender": "Female", + "Locale": "fr-FR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Eloise Online (Natural) - French (France)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-FR, HenriNeural)", + "ShortName": "fr-FR-HenriNeural", + "Gender": "Male", + "Locale": "fr-FR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Henri Online (Natural) - French (France)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-CH, ArianeNeural)", + "ShortName": "fr-CH-ArianeNeural", + "Gender": "Female", + "Locale": "fr-CH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ariane Online (Natural) - French (Switzerland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fr-CH, FabriceNeural)", + "ShortName": "fr-CH-FabriceNeural", + "Gender": "Male", + "Locale": "fr-CH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Fabrice Online (Natural) - French (Switzerland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (gl-ES, RoiNeural)", + "ShortName": "gl-ES-RoiNeural", + "Gender": "Male", + "Locale": "gl-ES", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Roi Online (Natural) - Galician (Spain)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (gl-ES, SabelaNeural)", + "ShortName": "gl-ES-SabelaNeural", + "Gender": "Female", + "Locale": "gl-ES", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sabela Online (Natural) - Galician (Spain)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ka-GE, EkaNeural)", + "ShortName": "ka-GE-EkaNeural", + "Gender": "Female", + "Locale": "ka-GE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Eka Online (Natural) - Georgian (Georgia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ka-GE, GiorgiNeural)", + "ShortName": "ka-GE-GiorgiNeural", + "Gender": "Male", + "Locale": "ka-GE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Giorgi Online (Natural) - Georgian (Georgia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (de-AT, IngridNeural)", + "ShortName": "de-AT-IngridNeural", + "Gender": "Female", + "Locale": "de-AT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ingrid Online (Natural) - German (Austria)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (de-AT, JonasNeural)", + "ShortName": "de-AT-JonasNeural", + "Gender": "Male", + "Locale": "de-AT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Jonas Online (Natural) - German (Austria)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (de-DE, SeraphinaMultilingualNeural)", + "ShortName": "de-DE-SeraphinaMultilingualNeural", + "Gender": "Female", + "Locale": "de-DE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft SeraphinaMultilingual Online (Natural) - German (Germany)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (de-DE, FlorianMultilingualNeural)", + "ShortName": "de-DE-FlorianMultilingualNeural", + "Gender": "Male", + "Locale": "de-DE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft FlorianMultilingual Online (Natural) - German (Germany)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (de-DE, AmalaNeural)", + "ShortName": "de-DE-AmalaNeural", + "Gender": "Female", + "Locale": "de-DE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Amala Online (Natural) - German (Germany)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (de-DE, ConradNeural)", + "ShortName": "de-DE-ConradNeural", + "Gender": "Male", + "Locale": "de-DE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Conrad Online (Natural) - German (Germany)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (de-DE, KatjaNeural)", + "ShortName": "de-DE-KatjaNeural", + "Gender": "Female", + "Locale": "de-DE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Katja Online (Natural) - German (Germany)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (de-DE, KillianNeural)", + "ShortName": "de-DE-KillianNeural", + "Gender": "Male", + "Locale": "de-DE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Killian Online (Natural) - German (Germany)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (de-CH, JanNeural)", + "ShortName": "de-CH-JanNeural", + "Gender": "Male", + "Locale": "de-CH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Jan Online (Natural) - German (Switzerland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (de-CH, LeniNeural)", + "ShortName": "de-CH-LeniNeural", + "Gender": "Female", + "Locale": "de-CH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Leni Online (Natural) - German (Switzerland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (el-GR, AthinaNeural)", + "ShortName": "el-GR-AthinaNeural", + "Gender": "Female", + "Locale": "el-GR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Athina Online (Natural) - Greek (Greece)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (el-GR, NestorasNeural)", + "ShortName": "el-GR-NestorasNeural", + "Gender": "Male", + "Locale": "el-GR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Nestoras Online (Natural) - Greek (Greece)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (gu-IN, DhwaniNeural)", + "ShortName": "gu-IN-DhwaniNeural", + "Gender": "Female", + "Locale": "gu-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Dhwani Online (Natural) - Gujarati (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (gu-IN, NiranjanNeural)", + "ShortName": "gu-IN-NiranjanNeural", + "Gender": "Male", + "Locale": "gu-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Niranjan Online (Natural) - Gujarati (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (he-IL, AvriNeural)", + "ShortName": "he-IL-AvriNeural", + "Gender": "Male", + "Locale": "he-IL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Avri Online (Natural) - Hebrew (Israel)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (he-IL, HilaNeural)", + "ShortName": "he-IL-HilaNeural", + "Gender": "Female", + "Locale": "he-IL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Hila Online (Natural) - Hebrew (Israel)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (hi-IN, MadhurNeural)", + "ShortName": "hi-IN-MadhurNeural", + "Gender": "Male", + "Locale": "hi-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Madhur Online (Natural) - Hindi (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (hi-IN, SwaraNeural)", + "ShortName": "hi-IN-SwaraNeural", + "Gender": "Female", + "Locale": "hi-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Swara Online (Natural) - Hindi (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (hu-HU, NoemiNeural)", + "ShortName": "hu-HU-NoemiNeural", + "Gender": "Female", + "Locale": "hu-HU", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Noemi Online (Natural) - Hungarian (Hungary)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (hu-HU, TamasNeural)", + "ShortName": "hu-HU-TamasNeural", + "Gender": "Male", + "Locale": "hu-HU", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Tamas Online (Natural) - Hungarian (Hungary)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (is-IS, GudrunNeural)", + "ShortName": "is-IS-GudrunNeural", + "Gender": "Female", + "Locale": "is-IS", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Gudrun Online (Natural) - Icelandic (Iceland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (is-IS, GunnarNeural)", + "ShortName": "is-IS-GunnarNeural", + "Gender": "Male", + "Locale": "is-IS", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Gunnar Online (Natural) - Icelandic (Iceland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (id-ID, ArdiNeural)", + "ShortName": "id-ID-ArdiNeural", + "Gender": "Male", + "Locale": "id-ID", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ardi Online (Natural) - Indonesian (Indonesia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (id-ID, GadisNeural)", + "ShortName": "id-ID-GadisNeural", + "Gender": "Female", + "Locale": "id-ID", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Gadis Online (Natural) - Indonesian (Indonesia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ga-IE, ColmNeural)", + "ShortName": "ga-IE-ColmNeural", + "Gender": "Male", + "Locale": "ga-IE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Colm Online (Natural) - Irish (Ireland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ga-IE, OrlaNeural)", + "ShortName": "ga-IE-OrlaNeural", + "Gender": "Female", + "Locale": "ga-IE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Orla Online (Natural) - Irish (Ireland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (it-IT, GiuseppeNeural)", + "ShortName": "it-IT-GiuseppeNeural", + "Gender": "Male", + "Locale": "it-IT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Giuseppe Online (Natural) - Italian (Italy)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (it-IT, DiegoNeural)", + "ShortName": "it-IT-DiegoNeural", + "Gender": "Male", + "Locale": "it-IT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Diego Online (Natural) - Italian (Italy)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (it-IT, ElsaNeural)", + "ShortName": "it-IT-ElsaNeural", + "Gender": "Female", + "Locale": "it-IT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Elsa Online (Natural) - Italian (Italy)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (it-IT, IsabellaNeural)", + "ShortName": "it-IT-IsabellaNeural", + "Gender": "Female", + "Locale": "it-IT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Isabella Online (Natural) - Italian (Italy)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ja-JP, KeitaNeural)", + "ShortName": "ja-JP-KeitaNeural", + "Gender": "Male", + "Locale": "ja-JP", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Keita Online (Natural) - Japanese (Japan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ja-JP, NanamiNeural)", + "ShortName": "ja-JP-NanamiNeural", + "Gender": "Female", + "Locale": "ja-JP", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Nanami Online (Natural) - Japanese (Japan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (jv-ID, DimasNeural)", + "ShortName": "jv-ID-DimasNeural", + "Gender": "Male", + "Locale": "jv-ID", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Dimas Online (Natural) - Javanese (Indonesia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (jv-ID, SitiNeural)", + "ShortName": "jv-ID-SitiNeural", + "Gender": "Female", + "Locale": "jv-ID", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Siti Online (Natural) - Javanese (Indonesia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (kn-IN, GaganNeural)", + "ShortName": "kn-IN-GaganNeural", + "Gender": "Male", + "Locale": "kn-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Gagan Online (Natural) - Kannada (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (kn-IN, SapnaNeural)", + "ShortName": "kn-IN-SapnaNeural", + "Gender": "Female", + "Locale": "kn-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sapna Online (Natural) - Kannada (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (kk-KZ, AigulNeural)", + "ShortName": "kk-KZ-AigulNeural", + "Gender": "Female", + "Locale": "kk-KZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Aigul Online (Natural) - Kazakh (Kazakhstan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (kk-KZ, DauletNeural)", + "ShortName": "kk-KZ-DauletNeural", + "Gender": "Male", + "Locale": "kk-KZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Daulet Online (Natural) - Kazakh (Kazakhstan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (km-KH, PisethNeural)", + "ShortName": "km-KH-PisethNeural", + "Gender": "Male", + "Locale": "km-KH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Piseth Online (Natural) - Khmer (Cambodia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (km-KH, SreymomNeural)", + "ShortName": "km-KH-SreymomNeural", + "Gender": "Female", + "Locale": "km-KH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sreymom Online (Natural) - Khmer (Cambodia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ko-KR, HyunsuNeural)", + "ShortName": "ko-KR-HyunsuNeural", + "Gender": "Male", + "Locale": "ko-KR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Hyunsu Online (Natural) - Korean (Korea)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ko-KR, InJoonNeural)", + "ShortName": "ko-KR-InJoonNeural", + "Gender": "Male", + "Locale": "ko-KR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft InJoon Online (Natural) - Korean (Korea)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ko-KR, SunHiNeural)", + "ShortName": "ko-KR-SunHiNeural", + "Gender": "Female", + "Locale": "ko-KR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft SunHi Online (Natural) - Korean (Korea)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (lo-LA, ChanthavongNeural)", + "ShortName": "lo-LA-ChanthavongNeural", + "Gender": "Male", + "Locale": "lo-LA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Chanthavong Online (Natural) - Lao (Laos)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (lo-LA, KeomanyNeural)", + "ShortName": "lo-LA-KeomanyNeural", + "Gender": "Female", + "Locale": "lo-LA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Keomany Online (Natural) - Lao (Laos)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (lv-LV, EveritaNeural)", + "ShortName": "lv-LV-EveritaNeural", + "Gender": "Female", + "Locale": "lv-LV", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Everita Online (Natural) - Latvian (Latvia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (lv-LV, NilsNeural)", + "ShortName": "lv-LV-NilsNeural", + "Gender": "Male", + "Locale": "lv-LV", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Nils Online (Natural) - Latvian (Latvia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (lt-LT, LeonasNeural)", + "ShortName": "lt-LT-LeonasNeural", + "Gender": "Male", + "Locale": "lt-LT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Leonas Online (Natural) - Lithuanian (Lithuania)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (lt-LT, OnaNeural)", + "ShortName": "lt-LT-OnaNeural", + "Gender": "Female", + "Locale": "lt-LT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ona Online (Natural) - Lithuanian (Lithuania)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (mk-MK, AleksandarNeural)", + "ShortName": "mk-MK-AleksandarNeural", + "Gender": "Male", + "Locale": "mk-MK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Aleksandar Online (Natural) - Macedonian (Republic of North Macedonia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (mk-MK, MarijaNeural)", + "ShortName": "mk-MK-MarijaNeural", + "Gender": "Female", + "Locale": "mk-MK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Marija Online (Natural) - Macedonian (Republic of North Macedonia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ms-MY, OsmanNeural)", + "ShortName": "ms-MY-OsmanNeural", + "Gender": "Male", + "Locale": "ms-MY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Osman Online (Natural) - Malay (Malaysia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ms-MY, YasminNeural)", + "ShortName": "ms-MY-YasminNeural", + "Gender": "Female", + "Locale": "ms-MY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Yasmin Online (Natural) - Malay (Malaysia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ml-IN, MidhunNeural)", + "ShortName": "ml-IN-MidhunNeural", + "Gender": "Male", + "Locale": "ml-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Midhun Online (Natural) - Malayalam (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ml-IN, SobhanaNeural)", + "ShortName": "ml-IN-SobhanaNeural", + "Gender": "Female", + "Locale": "ml-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sobhana Online (Natural) - Malayalam (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (mt-MT, GraceNeural)", + "ShortName": "mt-MT-GraceNeural", + "Gender": "Female", + "Locale": "mt-MT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Grace Online (Natural) - Maltese (Malta)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (mt-MT, JosephNeural)", + "ShortName": "mt-MT-JosephNeural", + "Gender": "Male", + "Locale": "mt-MT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Joseph Online (Natural) - Maltese (Malta)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (mr-IN, AarohiNeural)", + "ShortName": "mr-IN-AarohiNeural", + "Gender": "Female", + "Locale": "mr-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Aarohi Online (Natural) - Marathi (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (mr-IN, ManoharNeural)", + "ShortName": "mr-IN-ManoharNeural", + "Gender": "Male", + "Locale": "mr-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Manohar Online (Natural) - Marathi (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (mn-MN, BataaNeural)", + "ShortName": "mn-MN-BataaNeural", + "Gender": "Male", + "Locale": "mn-MN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Bataa Online (Natural) - Mongolian (Mongolia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (mn-MN, YesuiNeural)", + "ShortName": "mn-MN-YesuiNeural", + "Gender": "Female", + "Locale": "mn-MN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Yesui Online (Natural) - Mongolian (Mongolia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ne-NP, HemkalaNeural)", + "ShortName": "ne-NP-HemkalaNeural", + "Gender": "Female", + "Locale": "ne-NP", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Hemkala Online (Natural) - Nepali (Nepal)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ne-NP, SagarNeural)", + "ShortName": "ne-NP-SagarNeural", + "Gender": "Male", + "Locale": "ne-NP", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sagar Online (Natural) - Nepali (Nepal)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (nb-NO, FinnNeural)", + "ShortName": "nb-NO-FinnNeural", + "Gender": "Male", + "Locale": "nb-NO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Finn Online (Natural) - Norwegian (Bokmål Norway)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (nb-NO, PernilleNeural)", + "ShortName": "nb-NO-PernilleNeural", + "Gender": "Female", + "Locale": "nb-NO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Pernille Online (Natural) - Norwegian (Bokmål, Norway)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ps-AF, GulNawazNeural)", + "ShortName": "ps-AF-GulNawazNeural", + "Gender": "Male", + "Locale": "ps-AF", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft GulNawaz Online (Natural) - Pashto (Afghanistan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ps-AF, LatifaNeural)", + "ShortName": "ps-AF-LatifaNeural", + "Gender": "Female", + "Locale": "ps-AF", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Latifa Online (Natural) - Pashto (Afghanistan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fa-IR, DilaraNeural)", + "ShortName": "fa-IR-DilaraNeural", + "Gender": "Female", + "Locale": "fa-IR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Dilara Online (Natural) - Persian (Iran)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (fa-IR, FaridNeural)", + "ShortName": "fa-IR-FaridNeural", + "Gender": "Male", + "Locale": "fa-IR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Farid Online (Natural) - Persian (Iran)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (pl-PL, MarekNeural)", + "ShortName": "pl-PL-MarekNeural", + "Gender": "Male", + "Locale": "pl-PL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Marek Online (Natural) - Polish (Poland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (pl-PL, ZofiaNeural)", + "ShortName": "pl-PL-ZofiaNeural", + "Gender": "Female", + "Locale": "pl-PL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Zofia Online (Natural) - Polish (Poland)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (pt-BR, ThalitaNeural)", + "ShortName": "pt-BR-ThalitaNeural", + "Gender": "Female", + "Locale": "pt-BR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Thalita Online (Natural) - Portuguese (Brazil)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (pt-BR, AntonioNeural)", + "ShortName": "pt-BR-AntonioNeural", + "Gender": "Male", + "Locale": "pt-BR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Antonio Online (Natural) - Portuguese (Brazil)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (pt-BR, FranciscaNeural)", + "ShortName": "pt-BR-FranciscaNeural", + "Gender": "Female", + "Locale": "pt-BR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Francisca Online (Natural) - Portuguese (Brazil)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (pt-PT, DuarteNeural)", + "ShortName": "pt-PT-DuarteNeural", + "Gender": "Male", + "Locale": "pt-PT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Duarte Online (Natural) - Portuguese (Portugal)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (pt-PT, RaquelNeural)", + "ShortName": "pt-PT-RaquelNeural", + "Gender": "Female", + "Locale": "pt-PT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Raquel Online (Natural) - Portuguese (Portugal)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ro-RO, AlinaNeural)", + "ShortName": "ro-RO-AlinaNeural", + "Gender": "Female", + "Locale": "ro-RO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Alina Online (Natural) - Romanian (Romania)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ro-RO, EmilNeural)", + "ShortName": "ro-RO-EmilNeural", + "Gender": "Male", + "Locale": "ro-RO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Emil Online (Natural) - Romanian (Romania)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ru-RU, DmitryNeural)", + "ShortName": "ru-RU-DmitryNeural", + "Gender": "Male", + "Locale": "ru-RU", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Dmitry Online (Natural) - Russian (Russia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ru-RU, SvetlanaNeural)", + "ShortName": "ru-RU-SvetlanaNeural", + "Gender": "Female", + "Locale": "ru-RU", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Svetlana Online (Natural) - Russian (Russia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sr-RS, NicholasNeural)", + "ShortName": "sr-RS-NicholasNeural", + "Gender": "Male", + "Locale": "sr-RS", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Nicholas Online (Natural) - Serbian (Serbia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sr-RS, SophieNeural)", + "ShortName": "sr-RS-SophieNeural", + "Gender": "Female", + "Locale": "sr-RS", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sophie Online (Natural) - Serbian (Serbia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (si-LK, SameeraNeural)", + "ShortName": "si-LK-SameeraNeural", + "Gender": "Male", + "Locale": "si-LK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sameera Online (Natural) - Sinhala (Sri Lanka)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (si-LK, ThiliniNeural)", + "ShortName": "si-LK-ThiliniNeural", + "Gender": "Female", + "Locale": "si-LK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Thilini Online (Natural) - Sinhala (Sri Lanka)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sk-SK, LukasNeural)", + "ShortName": "sk-SK-LukasNeural", + "Gender": "Male", + "Locale": "sk-SK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Lukas Online (Natural) - Slovak (Slovakia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sk-SK, ViktoriaNeural)", + "ShortName": "sk-SK-ViktoriaNeural", + "Gender": "Female", + "Locale": "sk-SK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Viktoria Online (Natural) - Slovak (Slovakia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sl-SI, PetraNeural)", + "ShortName": "sl-SI-PetraNeural", + "Gender": "Female", + "Locale": "sl-SI", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Petra Online (Natural) - Slovenian (Slovenia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sl-SI, RokNeural)", + "ShortName": "sl-SI-RokNeural", + "Gender": "Male", + "Locale": "sl-SI", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Rok Online (Natural) - Slovenian (Slovenia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (so-SO, MuuseNeural)", + "ShortName": "so-SO-MuuseNeural", + "Gender": "Male", + "Locale": "so-SO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Muuse Online (Natural) - Somali (Somalia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (so-SO, UbaxNeural)", + "ShortName": "so-SO-UbaxNeural", + "Gender": "Female", + "Locale": "so-SO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ubax Online (Natural) - Somali (Somalia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-AR, ElenaNeural)", + "ShortName": "es-AR-ElenaNeural", + "Gender": "Female", + "Locale": "es-AR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Elena Online (Natural) - Spanish (Argentina)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-AR, TomasNeural)", + "ShortName": "es-AR-TomasNeural", + "Gender": "Male", + "Locale": "es-AR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Tomas Online (Natural) - Spanish (Argentina)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-BO, MarceloNeural)", + "ShortName": "es-BO-MarceloNeural", + "Gender": "Male", + "Locale": "es-BO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Marcelo Online (Natural) - Spanish (Bolivia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-BO, SofiaNeural)", + "ShortName": "es-BO-SofiaNeural", + "Gender": "Female", + "Locale": "es-BO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sofia Online (Natural) - Spanish (Bolivia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-CL, CatalinaNeural)", + "ShortName": "es-CL-CatalinaNeural", + "Gender": "Female", + "Locale": "es-CL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Catalina Online (Natural) - Spanish (Chile)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-CL, LorenzoNeural)", + "ShortName": "es-CL-LorenzoNeural", + "Gender": "Male", + "Locale": "es-CL", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Lorenzo Online (Natural) - Spanish (Chile)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-ES, XimenaNeural)", + "ShortName": "es-ES-XimenaNeural", + "Gender": "Female", + "Locale": "es-ES", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ximena Online (Natural) - Spanish (Colombia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-CO, GonzaloNeural)", + "ShortName": "es-CO-GonzaloNeural", + "Gender": "Male", + "Locale": "es-CO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Gonzalo Online (Natural) - Spanish (Colombia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-CO, SalomeNeural)", + "ShortName": "es-CO-SalomeNeural", + "Gender": "Female", + "Locale": "es-CO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Salome Online (Natural) - Spanish (Colombia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-CR, JuanNeural)", + "ShortName": "es-CR-JuanNeural", + "Gender": "Male", + "Locale": "es-CR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Juan Online (Natural) - Spanish (Costa Rica)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-CR, MariaNeural)", + "ShortName": "es-CR-MariaNeural", + "Gender": "Female", + "Locale": "es-CR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Maria Online (Natural) - Spanish (Costa Rica)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-CU, BelkysNeural)", + "ShortName": "es-CU-BelkysNeural", + "Gender": "Female", + "Locale": "es-CU", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Belkys Online (Natural) - Spanish (Cuba)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-CU, ManuelNeural)", + "ShortName": "es-CU-ManuelNeural", + "Gender": "Male", + "Locale": "es-CU", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Manuel Online (Natural) - Spanish (Cuba)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-DO, EmilioNeural)", + "ShortName": "es-DO-EmilioNeural", + "Gender": "Male", + "Locale": "es-DO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Emilio Online (Natural) - Spanish (Dominican Republic)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-DO, RamonaNeural)", + "ShortName": "es-DO-RamonaNeural", + "Gender": "Female", + "Locale": "es-DO", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ramona Online (Natural) - Spanish (Dominican Republic)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-EC, AndreaNeural)", + "ShortName": "es-EC-AndreaNeural", + "Gender": "Female", + "Locale": "es-EC", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Andrea Online (Natural) - Spanish (Ecuador)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-EC, LuisNeural)", + "ShortName": "es-EC-LuisNeural", + "Gender": "Male", + "Locale": "es-EC", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Luis Online (Natural) - Spanish (Ecuador)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-SV, LorenaNeural)", + "ShortName": "es-SV-LorenaNeural", + "Gender": "Female", + "Locale": "es-SV", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Lorena Online (Natural) - Spanish (El Salvador)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-SV, RodrigoNeural)", + "ShortName": "es-SV-RodrigoNeural", + "Gender": "Male", + "Locale": "es-SV", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Rodrigo Online (Natural) - Spanish (El Salvador)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-GQ, JavierNeural)", + "ShortName": "es-GQ-JavierNeural", + "Gender": "Male", + "Locale": "es-GQ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Javier Online (Natural) - Spanish (Equatorial Guinea)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-GQ, TeresaNeural)", + "ShortName": "es-GQ-TeresaNeural", + "Gender": "Female", + "Locale": "es-GQ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Teresa Online (Natural) - Spanish (Equatorial Guinea)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-GT, AndresNeural)", + "ShortName": "es-GT-AndresNeural", + "Gender": "Male", + "Locale": "es-GT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Andres Online (Natural) - Spanish (Guatemala)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-GT, MartaNeural)", + "ShortName": "es-GT-MartaNeural", + "Gender": "Female", + "Locale": "es-GT", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Marta Online (Natural) - Spanish (Guatemala)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-HN, CarlosNeural)", + "ShortName": "es-HN-CarlosNeural", + "Gender": "Male", + "Locale": "es-HN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Carlos Online (Natural) - Spanish (Honduras)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-HN, KarlaNeural)", + "ShortName": "es-HN-KarlaNeural", + "Gender": "Female", + "Locale": "es-HN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Karla Online (Natural) - Spanish (Honduras)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-MX, DaliaNeural)", + "ShortName": "es-MX-DaliaNeural", + "Gender": "Female", + "Locale": "es-MX", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Dalia Online (Natural) - Spanish (Mexico)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-MX, JorgeNeural)", + "ShortName": "es-MX-JorgeNeural", + "Gender": "Male", + "Locale": "es-MX", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Jorge Online (Natural) - Spanish (Mexico)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-NI, FedericoNeural)", + "ShortName": "es-NI-FedericoNeural", + "Gender": "Male", + "Locale": "es-NI", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Federico Online (Natural) - Spanish (Nicaragua)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-NI, YolandaNeural)", + "ShortName": "es-NI-YolandaNeural", + "Gender": "Female", + "Locale": "es-NI", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Yolanda Online (Natural) - Spanish (Nicaragua)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-PA, MargaritaNeural)", + "ShortName": "es-PA-MargaritaNeural", + "Gender": "Female", + "Locale": "es-PA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Margarita Online (Natural) - Spanish (Panama)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-PA, RobertoNeural)", + "ShortName": "es-PA-RobertoNeural", + "Gender": "Male", + "Locale": "es-PA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Roberto Online (Natural) - Spanish (Panama)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-PY, MarioNeural)", + "ShortName": "es-PY-MarioNeural", + "Gender": "Male", + "Locale": "es-PY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Mario Online (Natural) - Spanish (Paraguay)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-PY, TaniaNeural)", + "ShortName": "es-PY-TaniaNeural", + "Gender": "Female", + "Locale": "es-PY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Tania Online (Natural) - Spanish (Paraguay)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-PE, AlexNeural)", + "ShortName": "es-PE-AlexNeural", + "Gender": "Male", + "Locale": "es-PE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Alex Online (Natural) - Spanish (Peru)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-PE, CamilaNeural)", + "ShortName": "es-PE-CamilaNeural", + "Gender": "Female", + "Locale": "es-PE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Camila Online (Natural) - Spanish (Peru)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-PR, KarinaNeural)", + "ShortName": "es-PR-KarinaNeural", + "Gender": "Female", + "Locale": "es-PR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Karina Online (Natural) - Spanish (Puerto Rico)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-PR, VictorNeural)", + "ShortName": "es-PR-VictorNeural", + "Gender": "Male", + "Locale": "es-PR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Victor Online (Natural) - Spanish (Puerto Rico)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-ES, AlvaroNeural)", + "ShortName": "es-ES-AlvaroNeural", + "Gender": "Male", + "Locale": "es-ES", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Alvaro Online (Natural) - Spanish (Spain)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-ES, ElviraNeural)", + "ShortName": "es-ES-ElviraNeural", + "Gender": "Female", + "Locale": "es-ES", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Elvira Online (Natural) - Spanish (Spain)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-US, AlonsoNeural)", + "ShortName": "es-US-AlonsoNeural", + "Gender": "Male", + "Locale": "es-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Alonso Online (Natural) - Spanish (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-US, PalomaNeural)", + "ShortName": "es-US-PalomaNeural", + "Gender": "Female", + "Locale": "es-US", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Paloma Online (Natural) - Spanish (United States)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-UY, MateoNeural)", + "ShortName": "es-UY-MateoNeural", + "Gender": "Male", + "Locale": "es-UY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Mateo Online (Natural) - Spanish (Uruguay)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-UY, ValentinaNeural)", + "ShortName": "es-UY-ValentinaNeural", + "Gender": "Female", + "Locale": "es-UY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Valentina Online (Natural) - Spanish (Uruguay)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-VE, PaolaNeural)", + "ShortName": "es-VE-PaolaNeural", + "Gender": "Female", + "Locale": "es-VE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Paola Online (Natural) - Spanish (Venezuela)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (es-VE, SebastianNeural)", + "ShortName": "es-VE-SebastianNeural", + "Gender": "Male", + "Locale": "es-VE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sebastian Online (Natural) - Spanish (Venezuela)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (su-ID, JajangNeural)", + "ShortName": "su-ID-JajangNeural", + "Gender": "Male", + "Locale": "su-ID", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Jajang Online (Natural) - Sundanese (Indonesia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (su-ID, TutiNeural)", + "ShortName": "su-ID-TutiNeural", + "Gender": "Female", + "Locale": "su-ID", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Tuti Online (Natural) - Sundanese (Indonesia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sw-KE, RafikiNeural)", + "ShortName": "sw-KE-RafikiNeural", + "Gender": "Male", + "Locale": "sw-KE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Rafiki Online (Natural) - Swahili (Kenya)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sw-KE, ZuriNeural)", + "ShortName": "sw-KE-ZuriNeural", + "Gender": "Female", + "Locale": "sw-KE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Zuri Online (Natural) - Swahili (Kenya)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sw-TZ, DaudiNeural)", + "ShortName": "sw-TZ-DaudiNeural", + "Gender": "Male", + "Locale": "sw-TZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Daudi Online (Natural) - Swahili (Tanzania)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sw-TZ, RehemaNeural)", + "ShortName": "sw-TZ-RehemaNeural", + "Gender": "Female", + "Locale": "sw-TZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Rehema Online (Natural) - Swahili (Tanzania)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sv-SE, MattiasNeural)", + "ShortName": "sv-SE-MattiasNeural", + "Gender": "Male", + "Locale": "sv-SE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Mattias Online (Natural) - Swedish (Sweden)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (sv-SE, SofieNeural)", + "ShortName": "sv-SE-SofieNeural", + "Gender": "Female", + "Locale": "sv-SE", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sofie Online (Natural) - Swedish (Sweden)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ta-IN, PallaviNeural)", + "ShortName": "ta-IN-PallaviNeural", + "Gender": "Female", + "Locale": "ta-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Pallavi Online (Natural) - Tamil (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ta-IN, ValluvarNeural)", + "ShortName": "ta-IN-ValluvarNeural", + "Gender": "Male", + "Locale": "ta-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Valluvar Online (Natural) - Tamil (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ta-MY, KaniNeural)", + "ShortName": "ta-MY-KaniNeural", + "Gender": "Female", + "Locale": "ta-MY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Kani Online (Natural) - Tamil (Malaysia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ta-MY, SuryaNeural)", + "ShortName": "ta-MY-SuryaNeural", + "Gender": "Male", + "Locale": "ta-MY", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Surya Online (Natural) - Tamil (Malaysia)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ta-SG, AnbuNeural)", + "ShortName": "ta-SG-AnbuNeural", + "Gender": "Male", + "Locale": "ta-SG", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Anbu Online (Natural) - Tamil (Singapore)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ta-SG, VenbaNeural)", + "ShortName": "ta-SG-VenbaNeural", + "Gender": "Female", + "Locale": "ta-SG", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Venba Online (Natural) - Tamil (Singapore)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ta-LK, KumarNeural)", + "ShortName": "ta-LK-KumarNeural", + "Gender": "Male", + "Locale": "ta-LK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Kumar Online (Natural) - Tamil (Sri Lanka)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ta-LK, SaranyaNeural)", + "ShortName": "ta-LK-SaranyaNeural", + "Gender": "Female", + "Locale": "ta-LK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Saranya Online (Natural) - Tamil (Sri Lanka)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (te-IN, MohanNeural)", + "ShortName": "te-IN-MohanNeural", + "Gender": "Male", + "Locale": "te-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Mohan Online (Natural) - Telugu (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (te-IN, ShrutiNeural)", + "ShortName": "te-IN-ShrutiNeural", + "Gender": "Female", + "Locale": "te-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Shruti Online (Natural) - Telugu (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (th-TH, NiwatNeural)", + "ShortName": "th-TH-NiwatNeural", + "Gender": "Male", + "Locale": "th-TH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Niwat Online (Natural) - Thai (Thailand)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (th-TH, PremwadeeNeural)", + "ShortName": "th-TH-PremwadeeNeural", + "Gender": "Female", + "Locale": "th-TH", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Premwadee Online (Natural) - Thai (Thailand)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (tr-TR, AhmetNeural)", + "ShortName": "tr-TR-AhmetNeural", + "Gender": "Male", + "Locale": "tr-TR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ahmet Online (Natural) - Turkish (Turkey)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (tr-TR, EmelNeural)", + "ShortName": "tr-TR-EmelNeural", + "Gender": "Female", + "Locale": "tr-TR", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Emel Online (Natural) - Turkish (Turkey)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (uk-UA, OstapNeural)", + "ShortName": "uk-UA-OstapNeural", + "Gender": "Male", + "Locale": "uk-UA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Ostap Online (Natural) - Ukrainian (Ukraine)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (uk-UA, PolinaNeural)", + "ShortName": "uk-UA-PolinaNeural", + "Gender": "Female", + "Locale": "uk-UA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Polina Online (Natural) - Ukrainian (Ukraine)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ur-IN, GulNeural)", + "ShortName": "ur-IN-GulNeural", + "Gender": "Female", + "Locale": "ur-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Gul Online (Natural) - Urdu (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ur-IN, SalmanNeural)", + "ShortName": "ur-IN-SalmanNeural", + "Gender": "Male", + "Locale": "ur-IN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Salman Online (Natural) - Urdu (India)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ur-PK, AsadNeural)", + "ShortName": "ur-PK-AsadNeural", + "Gender": "Male", + "Locale": "ur-PK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Asad Online (Natural) - Urdu (Pakistan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (ur-PK, UzmaNeural)", + "ShortName": "ur-PK-UzmaNeural", + "Gender": "Female", + "Locale": "ur-PK", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Uzma Online (Natural) - Urdu (Pakistan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (uz-UZ, MadinaNeural)", + "ShortName": "uz-UZ-MadinaNeural", + "Gender": "Female", + "Locale": "uz-UZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Madina Online (Natural) - Uzbek (Uzbekistan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (uz-UZ, SardorNeural)", + "ShortName": "uz-UZ-SardorNeural", + "Gender": "Male", + "Locale": "uz-UZ", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Sardor Online (Natural) - Uzbek (Uzbekistan)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (vi-VN, HoaiMyNeural)", + "ShortName": "vi-VN-HoaiMyNeural", + "Gender": "Female", + "Locale": "vi-VN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft HoaiMy Online (Natural) - Vietnamese (Vietnam)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (vi-VN, NamMinhNeural)", + "ShortName": "vi-VN-NamMinhNeural", + "Gender": "Male", + "Locale": "vi-VN", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft NamMinh Online (Natural) - Vietnamese (Vietnam)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (cy-GB, AledNeural)", + "ShortName": "cy-GB-AledNeural", + "Gender": "Male", + "Locale": "cy-GB", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Aled Online (Natural) - Welsh (United Kingdom)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (cy-GB, NiaNeural)", + "ShortName": "cy-GB-NiaNeural", + "Gender": "Female", + "Locale": "cy-GB", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Nia Online (Natural) - Welsh (United Kingdom)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zu-ZA, ThandoNeural)", + "ShortName": "zu-ZA-ThandoNeural", + "Gender": "Female", + "Locale": "zu-ZA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Thando Online (Natural) - Zulu (South Africa)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + }, + { + "Name": "Microsoft Server Speech Text to Speech Voice (zu-ZA, ThembaNeural)", + "ShortName": "zu-ZA-ThembaNeural", + "Gender": "Male", + "Locale": "zu-ZA", + "SuggestedCodec": "audio-24khz-48kbitrate-mono-mp3", + "FriendlyName": "Microsoft Themba Online (Natural) - Zulu (South Africa)", + "Status": "GA", + "VoiceTag": { + "ContentCategories": [ + "General" + ], + "VoicePersonalities": [ + "Friendly", + "Positive" + ] + } + } +] \ No newline at end of file diff --git a/rvc/lib/utils.py b/rvc/lib/utils.py new file mode 100644 index 00000000..c514e8bb --- /dev/null +++ b/rvc/lib/utils.py @@ -0,0 +1,137 @@ +import os, sys +import librosa +import soundfile as sf +import numpy as np +import re +import unicodedata +import wget +from pydub import AudioSegment +from torch import nn + +import logging +from transformers import HubertModel +import warnings + +# Remove this to see warnings about transformers models +warnings.filterwarnings("ignore") + +logging.getLogger("fairseq").setLevel(logging.ERROR) +logging.getLogger("faiss.loader").setLevel(logging.ERROR) +logging.getLogger("transformers").setLevel(logging.ERROR) +logging.getLogger("torch").setLevel(logging.ERROR) + +now_dir = os.getcwd() +sys.path.append(now_dir) + +base_path = os.path.join(now_dir, "rvc", "models", "formant", "stftpitchshift") +stft = base_path + ".exe" if sys.platform == "win32" else base_path + + +class HubertModelWithFinalProj(HubertModel): + def __init__(self, config): + super().__init__(config) + self.final_proj = nn.Linear(config.hidden_size, config.classifier_proj_size) + + +def load_audio(file, sample_rate): + try: + file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + audio, sr = sf.read(file) + if len(audio.shape) > 1: + audio = librosa.to_mono(audio.T) + if sr != sample_rate: + audio = librosa.resample(audio, orig_sr=sr, target_sr=sample_rate) + except Exception as error: + raise RuntimeError(f"An error occurred loading the audio: {error}") + + return audio.flatten() + + +def load_audio_infer( + file, + sample_rate, + **kwargs, +): + formant_shifting = kwargs.get("formant_shifting", False) + try: + file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") + if not os.path.isfile(file): + raise FileNotFoundError(f"File not found: {file}") + audio, sr = sf.read(file) + if len(audio.shape) > 1: + audio = librosa.to_mono(audio.T) + if sr != sample_rate: + audio = librosa.resample(audio, orig_sr=sr, target_sr=sample_rate) + if formant_shifting: + formant_qfrency = kwargs.get("formant_qfrency", 0.8) + formant_timbre = kwargs.get("formant_timbre", 0.8) + + from stftpitchshift import StftPitchShift + + pitchshifter = StftPitchShift(1024, 32, sample_rate) + audio = pitchshifter.shiftpitch( + audio, + factors=1, + quefrency=formant_qfrency * 1e-3, + distortion=formant_timbre, + ) + except Exception as error: + raise RuntimeError(f"An error occurred loading the audio: {error}") + return np.array(audio).flatten() + + +def format_title(title): + formatted_title = ( + unicodedata.normalize("NFKD", title).encode("ascii", "ignore").decode("utf-8") + ) + formatted_title = re.sub(r"[\u2500-\u257F]+", "", formatted_title) + formatted_title = re.sub(r"[^\w\s.-]", "", formatted_title) + formatted_title = re.sub(r"\s+", "_", formatted_title) + return formatted_title + + +def load_embedding(embedder_model, custom_embedder=None): + embedder_root = os.path.join(now_dir, "rvc", "models", "embedders") + embedding_list = { + "contentvec": os.path.join(embedder_root, "contentvec"), + "chinese-hubert-base": os.path.join(embedder_root, "chinese_hubert_base"), + "japanese-hubert-base": os.path.join(embedder_root, "japanese_hubert_base"), + "korean-hubert-base": os.path.join(embedder_root, "korean_hubert_base"), + } + + online_embedders = { + "contentvec": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/contentvec/pytorch_model.bin", + "chinese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base/pytorch_model.bin", + "japanese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base/pytorch_model.bin", + "korean-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base/pytorch_model.bin", + } + + config_files = { + "contentvec": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/contentvec/config.json", + "chinese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base/config.json", + "japanese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base/config.json", + "korean-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base/config.json", + } + + if embedder_model == "custom": + if os.path.exists(custom_embedder): + model_path = custom_embedder + else: + print(f"Custom embedder not found: {custom_embedder}, using contentvec") + model_path = embedding_list["contentvec"] + else: + model_path = embedding_list[embedder_model] + bin_file = os.path.join(model_path, "pytorch_model.bin") + json_file = os.path.join(model_path, "config.json") + os.makedirs(model_path, exist_ok=True) + if not os.path.exists(bin_file): + url = online_embedders[embedder_model] + print(f"Downloading {url} to {model_path}...") + wget.download(url, out=bin_file) + if not os.path.exists(json_file): + url = config_files[embedder_model] + print(f"Downloading {url} to {model_path}...") + wget.download(url, out=json_file) + + models = HubertModelWithFinalProj.from_pretrained(model_path) + return models diff --git a/rvc/lib/zluda.py b/rvc/lib/zluda.py new file mode 100644 index 00000000..482009cc --- /dev/null +++ b/rvc/lib/zluda.py @@ -0,0 +1,43 @@ +import torch + +if torch.cuda.is_available() and torch.cuda.get_device_name().endswith("[ZLUDA]"): + _torch_stft = torch.stft + + def z_stft( + audio: torch.Tensor, + n_fft: int, + hop_length: int = None, + win_length: int = None, + window: torch.Tensor = None, + center: bool = True, + pad_mode: str = "reflect", + normalized: bool = False, + onesided: bool = None, + return_complex: bool = None, + ): + sd = audio.device + return _torch_stft( + audio.to("cpu"), + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window.to("cpu"), + center=center, + pad_mode=pad_mode, + normalized=normalized, + onesided=onesided, + return_complex=return_complex, + ).to(sd) + + def z_jit(f, *_, **__): + f.graph = torch._C.Graph() + return f + + # hijacks + torch.stft = z_stft + torch.jit.script = z_jit + # disabling unsupported cudnn + torch.backends.cudnn.enabled = False + torch.backends.cuda.enable_flash_sdp(False) + torch.backends.cuda.enable_math_sdp(True) + torch.backends.cuda.enable_mem_efficient_sdp(False) From b7dc4b1ad1a4d7b62047dcd6c8fc8c02723789dd Mon Sep 17 00:00:00 2001 From: kiurobox Date: Tue, 4 Feb 2025 03:05:02 -0800 Subject: [PATCH 7/9] IMPORT --- download_audio.py | 68 ++ install.bat | 87 +++ requirements.txt | 43 +- rvc_cli.py | 1897 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 2082 insertions(+), 13 deletions(-) create mode 100644 download_audio.py create mode 100644 install.bat create mode 100644 rvc_cli.py diff --git a/download_audio.py b/download_audio.py new file mode 100644 index 00000000..baffc000 --- /dev/null +++ b/download_audio.py @@ -0,0 +1,68 @@ +import os +import argparse +import yt_dlp + + +class MyLogger(object): + def debug(self, msg): + print("[DEBUG]", msg) + + def warning(self, msg): + print("[WARNING]", msg) + + def error(self, msg): + print("[ERROR]", msg) + + +def progress_hook(info): + status = info.get("status") + if status == "downloading": + downloaded = info.get("downloaded_bytes", 0) + total = info.get("total_bytes", info.get("total_bytes_estimate", 0)) + if total: + percent = downloaded / total * 100 + print(f"[DEBUG] Downloading: {percent:.2f}%") + elif status == "finished": + print("[DEBUG] Download finished, now converting to WAV...") + + +def download_youtube_audio(url, output_path): + os.makedirs(output_path, exist_ok=True) + + outtmpl = os.path.join(output_path, "%(title)s.%(ext)s") + + ydl_opts = { + "format": "bestaudio/best", + "outtmpl": outtmpl, + "logger": MyLogger(), + "progress_hooks": [progress_hook], + "postprocessors": [ + { + "key": "FFmpegExtractAudio", + "preferredcodec": "wav", + "preferredquality": "192", + } + ], + "verbose": True, + } + + with yt_dlp.YoutubeDL(ydl_opts) as ydl: + ydl.download([url]) + + +# Command-line interface for local usage. +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Download a YouTube video's audio as WAV using yt-dlp with debugging output." + ) + parser.add_argument("url", help="The URL of the YouTube video to download.") + parser.add_argument( + "--output", + default="downloads", + help="Custom output directory (default: 'downloads').", + ) + args = parser.parse_args() + download_youtube_audio(args.url, args.output) + + +# gyatt dyum made by NeoDev diff --git a/install.bat b/install.bat new file mode 100644 index 00000000..ccec91bb --- /dev/null +++ b/install.bat @@ -0,0 +1,87 @@ +@echo off +setlocal enabledelayedexpansion +title RVC CLI Installer + +echo Welcome to the RVC CLI Installer! +echo. + +set "INSTALL_DIR=%cd%" +set "MINICONDA_DIR=%UserProfile%\Miniconda3" +set "ENV_DIR=%INSTALL_DIR%\env" +set "MINICONDA_URL=https://repo.anaconda.com/miniconda/Miniconda3-py39_23.9.0-0-Windows-x86_64.exe" +set "CONDA_EXE=%MINICONDA_DIR%\Scripts\conda.exe" + +call :cleanup +call :install_miniconda +call :create_conda_env +call :install_dependencies + +echo RVC CLI has been installed successfully! +echo. +pause +exit /b 0 + +:cleanup +echo Cleaning up unnecessary files... +for %%F in (Makefile Dockerfile docker-compose.yaml *.sh) do if exist "%%F" del "%%F" +echo Cleanup complete. +echo. +exit /b 0 + +:install_miniconda +if exist "%CONDA_EXE%" ( + echo Miniconda already installed. Skipping installation. + exit /b 0 +) + +echo Miniconda not found. Starting download and installation... +powershell -Command "& {Invoke-WebRequest -Uri '%MINICONDA_URL%' -OutFile 'miniconda.exe'}" +if not exist "miniconda.exe" goto :download_error + +start /wait "" miniconda.exe /InstallationType=JustMe /RegisterPython=0 /S /D=%MINICONDA_DIR% +if errorlevel 1 goto :install_error + +del miniconda.exe +echo Miniconda installation complete. +echo. +exit /b 0 + +:create_conda_env +echo Creating Conda environment... +call "%MINICONDA_DIR%\_conda.exe" create --no-shortcuts -y -k --prefix "%ENV_DIR%" python=3.9 +if errorlevel 1 goto :error +echo Conda environment created successfully. +echo. + +if exist "%ENV_DIR%\python.exe" ( + echo Installing specific pip version... + "%ENV_DIR%\python.exe" -m pip install "pip<24.1" + if errorlevel 1 goto :error + echo Pip installation complete. + echo. +) +exit /b 0 + +:install_dependencies +echo Installing dependencies... +call "%MINICONDA_DIR%\condabin\conda.bat" activate "%ENV_DIR%" || goto :error +pip install --upgrade setuptools || goto :error +pip install --no-cache-dir -r "%INSTALL_DIR%\requirements.txt" || goto :error +pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --upgrade --index-url https://download.pytorch.org/whl/cu121 || goto :error +call "%MINICONDA_DIR%\condabin\conda.bat" deactivate +echo Dependencies installation complete. +echo. +exit /b 0 + +:download_error +echo Download failed. Please check your internet connection and try again. +goto :error + +:install_error +echo Miniconda installation failed. +goto :error + +:error +echo An error occurred during installation. Please check the output above for details. +pause +exit /b 1 diff --git a/requirements.txt b/requirements.txt index ebfaa174..07150ff9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,16 +1,33 @@ -av +pip>=23.3; sys_platform == 'darwin' +wheel; sys_platform == 'darwin' +PyYAML; sys_platform == 'darwin' +tqdm +wget ffmpeg-python>=0.2.0 -faiss_cpu==1.7.3 -praat-parselmouth==0.4.2 -pyworld==0.3.4 -resampy==0.4.2 -fairseq==0.12.2 -pydub==0.25.1 -einops -local_attention -torchcrepe==0.0.20 -torchfcpe +faiss-cpu==1.7.3 +soundfile==0.12.1 +noisereduce +pedalboard +stftpitchshift yt-dlp audio-separator[gpu] -edge-tts -gradio==4.40.0 +omegaconf>=2.0.6; sys_platform == 'darwin' +numba; sys_platform == 'linux' +numba==0.57.0; sys_platform == 'darwin' or sys_platform == 'win32' +torchaudio==2.3.1 +torchvision==0.18.1 +torchcrepe==0.0.23 +torchfcpe +libf0 +transformers==4.44.2 +matplotlib==3.7.2 +tensorboard +gradio==4.43.0 +certifi>=2023.07.22; sys_platform == 'darwin' +antlr4-python3-runtime==4.8; sys_platform == 'darwin' +tensorboardX +edge-tts==6.1.9 +pypresence +beautifulsoup4 +flask + diff --git a/rvc_cli.py b/rvc_cli.py new file mode 100644 index 00000000..d0bd406a --- /dev/null +++ b/rvc_cli.py @@ -0,0 +1,1897 @@ +import os +import sys +import json +import argparse +import subprocess +from functools import lru_cache +from distutils.util import strtobool + +now_dir = os.getcwd() +sys.path.append(now_dir) + +current_script_directory = os.path.dirname(os.path.realpath(__file__)) +logs_path = os.path.join(current_script_directory, "logs") + +from rvc.lib.tools.analyzer import analyze_audio +from rvc.lib.tools.launch_tensorboard import launch_tensorboard_pipeline +from rvc.lib.tools.model_download import model_download_pipeline + +python = sys.executable + + +# Get TTS Voices -> https://speech.platform.bing.com/consumer/speech/synthesize/readaloud/voices/list?trustedclienttoken=6A5AA1D4EAFF4E9FB37E23D68491D6F4 +@lru_cache(maxsize=1) # Cache only one result since the file is static +def load_voices_data(): + with open( + os.path.join("rvc", "lib", "tools", "tts_voices.json"), "r", encoding="utf-8" + ) as file: + return json.load(file) + + +voices_data = load_voices_data() +locales = list({voice["ShortName"] for voice in voices_data}) + + +@lru_cache(maxsize=None) +def import_voice_converter(): + from rvc.infer.infer import VoiceConverter + + return VoiceConverter() + + +@lru_cache(maxsize=1) +def get_config(): + from rvc.configs.config import Config + + return Config() + + +# Infer +def run_infer_script( + pitch: int, + filter_radius: int, + index_rate: float, + volume_envelope: int, + protect: float, + hop_length: int, + f0_method: str, + input_path: str, + output_path: str, + pth_path: str, + index_path: str, + split_audio: bool, + f0_autotune: bool, + f0_autotune_strength: float, + clean_audio: bool, + clean_strength: float, + export_format: str, + f0_file: str, + embedder_model: str, + embedder_model_custom: str = None, + formant_shifting: bool = False, + formant_qfrency: float = 1.0, + formant_timbre: float = 1.0, + post_process: bool = False, + reverb: bool = False, + pitch_shift: bool = False, + limiter: bool = False, + gain: bool = False, + distortion: bool = False, + chorus: bool = False, + bitcrush: bool = False, + clipping: bool = False, + compressor: bool = False, + delay: bool = False, + reverb_room_size: float = 0.5, + reverb_damping: float = 0.5, + reverb_wet_gain: float = 0.5, + reverb_dry_gain: float = 0.5, + reverb_width: float = 0.5, + reverb_freeze_mode: float = 0.5, + pitch_shift_semitones: float = 0.0, + limiter_threshold: float = -6, + limiter_release_time: float = 0.01, + gain_db: float = 0.0, + distortion_gain: float = 25, + chorus_rate: float = 1.0, + chorus_depth: float = 0.25, + chorus_center_delay: float = 7, + chorus_feedback: float = 0.0, + chorus_mix: float = 0.5, + bitcrush_bit_depth: int = 8, + clipping_threshold: float = -6, + compressor_threshold: float = 0, + compressor_ratio: float = 1, + compressor_attack: float = 1.0, + compressor_release: float = 100, + delay_seconds: float = 0.5, + delay_feedback: float = 0.0, + delay_mix: float = 0.5, + sid: int = 0, +): + kwargs = { + "audio_input_path": input_path, + "audio_output_path": output_path, + "model_path": pth_path, + "index_path": index_path, + "pitch": pitch, + "filter_radius": filter_radius, + "index_rate": index_rate, + "volume_envelope": volume_envelope, + "protect": protect, + "hop_length": hop_length, + "f0_method": f0_method, + "pth_path": pth_path, + "index_path": index_path, + "split_audio": split_audio, + "f0_autotune": f0_autotune, + "f0_autotune_strength": f0_autotune_strength, + "clean_audio": clean_audio, + "clean_strength": clean_strength, + "export_format": export_format, + "f0_file": f0_file, + "embedder_model": embedder_model, + "embedder_model_custom": embedder_model_custom, + "post_process": post_process, + "formant_shifting": formant_shifting, + "formant_qfrency": formant_qfrency, + "formant_timbre": formant_timbre, + "reverb": reverb, + "pitch_shift": pitch_shift, + "limiter": limiter, + "gain": gain, + "distortion": distortion, + "chorus": chorus, + "bitcrush": bitcrush, + "clipping": clipping, + "compressor": compressor, + "delay": delay, + "reverb_room_size": reverb_room_size, + "reverb_damping": reverb_damping, + "reverb_wet_level": reverb_wet_gain, + "reverb_dry_level": reverb_dry_gain, + "reverb_width": reverb_width, + "reverb_freeze_mode": reverb_freeze_mode, + "pitch_shift_semitones": pitch_shift_semitones, + "limiter_threshold": limiter_threshold, + "limiter_release": limiter_release_time, + "gain_db": gain_db, + "distortion_gain": distortion_gain, + "chorus_rate": chorus_rate, + "chorus_depth": chorus_depth, + "chorus_delay": chorus_center_delay, + "chorus_feedback": chorus_feedback, + "chorus_mix": chorus_mix, + "bitcrush_bit_depth": bitcrush_bit_depth, + "clipping_threshold": clipping_threshold, + "compressor_threshold": compressor_threshold, + "compressor_ratio": compressor_ratio, + "compressor_attack": compressor_attack, + "compressor_release": compressor_release, + "delay_seconds": delay_seconds, + "delay_feedback": delay_feedback, + "delay_mix": delay_mix, + "sid": sid, + } + infer_pipeline = import_voice_converter() + infer_pipeline.convert_audio( + **kwargs, + ) + return f"File {input_path} inferred successfully.", output_path.replace( + ".wav", f".{export_format.lower()}" + ) + + +# Batch infer +def run_batch_infer_script( + pitch: int, + filter_radius: int, + index_rate: float, + volume_envelope: int, + protect: float, + hop_length: int, + f0_method: str, + input_folder: str, + output_folder: str, + pth_path: str, + index_path: str, + split_audio: bool, + f0_autotune: bool, + f0_autotune_strength: float, + clean_audio: bool, + clean_strength: float, + export_format: str, + f0_file: str, + embedder_model: str, + embedder_model_custom: str = None, + formant_shifting: bool = False, + formant_qfrency: float = 1.0, + formant_timbre: float = 1.0, + post_process: bool = False, + reverb: bool = False, + pitch_shift: bool = False, + limiter: bool = False, + gain: bool = False, + distortion: bool = False, + chorus: bool = False, + bitcrush: bool = False, + clipping: bool = False, + compressor: bool = False, + delay: bool = False, + reverb_room_size: float = 0.5, + reverb_damping: float = 0.5, + reverb_wet_gain: float = 0.5, + reverb_dry_gain: float = 0.5, + reverb_width: float = 0.5, + reverb_freeze_mode: float = 0.5, + pitch_shift_semitones: float = 0.0, + limiter_threshold: float = -6, + limiter_release_time: float = 0.01, + gain_db: float = 0.0, + distortion_gain: float = 25, + chorus_rate: float = 1.0, + chorus_depth: float = 0.25, + chorus_center_delay: float = 7, + chorus_feedback: float = 0.0, + chorus_mix: float = 0.5, + bitcrush_bit_depth: int = 8, + clipping_threshold: float = -6, + compressor_threshold: float = 0, + compressor_ratio: float = 1, + compressor_attack: float = 1.0, + compressor_release: float = 100, + delay_seconds: float = 0.5, + delay_feedback: float = 0.0, + delay_mix: float = 0.5, + sid: int = 0, +): + kwargs = { + "audio_input_paths": input_folder, + "audio_output_path": output_folder, + "model_path": pth_path, + "index_path": index_path, + "pitch": pitch, + "filter_radius": filter_radius, + "index_rate": index_rate, + "volume_envelope": volume_envelope, + "protect": protect, + "hop_length": hop_length, + "f0_method": f0_method, + "pth_path": pth_path, + "index_path": index_path, + "split_audio": split_audio, + "f0_autotune": f0_autotune, + "f0_autotune_strength": f0_autotune_strength, + "clean_audio": clean_audio, + "clean_strength": clean_strength, + "export_format": export_format, + "f0_file": f0_file, + "embedder_model": embedder_model, + "embedder_model_custom": embedder_model_custom, + "post_process": post_process, + "formant_shifting": formant_shifting, + "formant_qfrency": formant_qfrency, + "formant_timbre": formant_timbre, + "reverb": reverb, + "pitch_shift": pitch_shift, + "limiter": limiter, + "gain": gain, + "distortion": distortion, + "chorus": chorus, + "bitcrush": bitcrush, + "clipping": clipping, + "compressor": compressor, + "delay": delay, + "reverb_room_size": reverb_room_size, + "reverb_damping": reverb_damping, + "reverb_wet_level": reverb_wet_gain, + "reverb_dry_level": reverb_dry_gain, + "reverb_width": reverb_width, + "reverb_freeze_mode": reverb_freeze_mode, + "pitch_shift_semitones": pitch_shift_semitones, + "limiter_threshold": limiter_threshold, + "limiter_release": limiter_release_time, + "gain_db": gain_db, + "distortion_gain": distortion_gain, + "chorus_rate": chorus_rate, + "chorus_depth": chorus_depth, + "chorus_delay": chorus_center_delay, + "chorus_feedback": chorus_feedback, + "chorus_mix": chorus_mix, + "bitcrush_bit_depth": bitcrush_bit_depth, + "clipping_threshold": clipping_threshold, + "compressor_threshold": compressor_threshold, + "compressor_ratio": compressor_ratio, + "compressor_attack": compressor_attack, + "compressor_release": compressor_release, + "delay_seconds": delay_seconds, + "delay_feedback": delay_feedback, + "delay_mix": delay_mix, + "sid": sid, + } + infer_pipeline = import_voice_converter() + infer_pipeline.convert_audio_batch( + **kwargs, + ) + + return f"Files from {input_folder} inferred successfully." + + +# TTS +def run_tts_script( + tts_file: str, + tts_text: str, + tts_voice: str, + tts_rate: int, + pitch: int, + filter_radius: int, + index_rate: float, + volume_envelope: int, + protect: float, + hop_length: int, + f0_method: str, + output_tts_path: str, + output_rvc_path: str, + pth_path: str, + index_path: str, + split_audio: bool, + f0_autotune: bool, + f0_autotune_strength: float, + clean_audio: bool, + clean_strength: float, + export_format: str, + f0_file: str, + embedder_model: str, + embedder_model_custom: str = None, + sid: int = 0, +): + + tts_script_path = os.path.join("rvc", "lib", "tools", "tts.py") + + if os.path.exists(output_tts_path): + os.remove(output_tts_path) + + command_tts = [ + *map( + str, + [ + python, + tts_script_path, + tts_file, + tts_text, + tts_voice, + tts_rate, + output_tts_path, + ], + ), + ] + subprocess.run(command_tts) + infer_pipeline = import_voice_converter() + infer_pipeline.convert_audio( + pitch=pitch, + filter_radius=filter_radius, + index_rate=index_rate, + volume_envelope=volume_envelope, + protect=protect, + hop_length=hop_length, + f0_method=f0_method, + audio_input_path=output_tts_path, + audio_output_path=output_rvc_path, + model_path=pth_path, + index_path=index_path, + split_audio=split_audio, + f0_autotune=f0_autotune, + f0_autotune_strength=f0_autotune_strength, + clean_audio=clean_audio, + clean_strength=clean_strength, + export_format=export_format, + f0_file=f0_file, + embedder_model=embedder_model, + embedder_model_custom=embedder_model_custom, + sid=sid, + formant_shifting=None, + formant_qfrency=None, + formant_timbre=None, + post_process=None, + reverb=None, + pitch_shift=None, + limiter=None, + gain=None, + distortion=None, + chorus=None, + bitcrush=None, + clipping=None, + compressor=None, + delay=None, + sliders=None, + ) + + return f"Text {tts_text} synthesized successfully.", output_rvc_path.replace( + ".wav", f".{export_format.lower()}" + ) + + +# Model information +def run_model_information_script(pth_path: str): + print(model_information(pth_path)) + return model_information(pth_path) + + +# Model blender +def run_model_blender_script( + model_name: str, pth_path_1: str, pth_path_2: str, ratio: float +): + message, model_blended = model_blender(model_name, pth_path_1, pth_path_2, ratio) + return message, model_blended + + +# Tensorboard +def run_tensorboard_script(): + launch_tensorboard_pipeline() + + +# Download +def run_download_script(model_link: str): + model_download_pipeline(model_link) + return f"Model downloaded successfully." + + +# Audio analyzer +def run_audio_analyzer_script( + input_path: str, save_plot_path: str = "logs/audio_analysis.png" +): + audio_info, plot_path = analyze_audio(input_path, save_plot_path) + print( + f"Audio info of {input_path}: {audio_info}", + f"Audio file {input_path} analyzed successfully. Plot saved at: {plot_path}", + ) + return audio_info, plot_path + + +# Parse arguments +def parse_arguments(): + parser = argparse.ArgumentParser( + description="Run the main.py script with specific parameters." + ) + subparsers = parser.add_subparsers( + title="subcommands", dest="mode", help="Choose a mode" + ) + + # Parser for 'infer' mode + infer_parser = subparsers.add_parser("infer", help="Run inference") + pitch_description = ( + "Set the pitch of the audio. Higher values result in a higher pitch." + ) + infer_parser.add_argument( + "--pitch", + type=int, + help=pitch_description, + choices=range(-24, 25), + default=0, + ) + filter_radius_description = "Apply median filtering to the extracted pitch values if this value is greater than or equal to three. This can help reduce breathiness in the output audio." + infer_parser.add_argument( + "--filter_radius", + type=int, + help=filter_radius_description, + choices=range(11), + default=3, + ) + index_rate_description = "Control the influence of the index file on the output. Higher values mean stronger influence. Lower values can help reduce artifacts but may result in less accurate voice cloning." + infer_parser.add_argument( + "--index_rate", + type=float, + help=index_rate_description, + choices=[i / 100.0 for i in range(0, 101)], + default=0.3, + ) + volume_envelope_description = "Control the blending of the output's volume envelope. A value of 1 means the output envelope is fully used." + infer_parser.add_argument( + "--volume_envelope", + type=float, + help=volume_envelope_description, + choices=[i / 100.0 for i in range(0, 101)], + default=1, + ) + protect_description = "Protect consonants and breathing sounds from artifacts. A value of 0.5 offers the strongest protection, while lower values may reduce the protection level but potentially mitigate the indexing effect." + infer_parser.add_argument( + "--protect", + type=float, + help=protect_description, + choices=[i / 1000.0 for i in range(0, 501)], + default=0.33, + ) + hop_length_description = "Only applicable for the Crepe pitch extraction method. Determines the time it takes for the system to react to a significant pitch change. Smaller values require more processing time but can lead to better pitch accuracy." + infer_parser.add_argument( + "--hop_length", + type=int, + help=hop_length_description, + choices=range(1, 513), + default=128, + ) + f0_method_description = "Choose the pitch extraction algorithm for the conversion. 'rmvpe' is the default and generally recommended." + infer_parser.add_argument( + "--f0_method", + type=str, + help=f0_method_description, + choices=[ + "crepe", + "crepe-tiny", + "rmvpe", + "fcpe", + "hybrid[crepe+rmvpe]", + "hybrid[crepe+fcpe]", + "hybrid[rmvpe+fcpe]", + "hybrid[crepe+rmvpe+fcpe]", + ], + default="rmvpe", + ) + infer_parser.add_argument( + "--input_path", + type=str, + help="Full path to the input audio file.", + required=True, + ) + infer_parser.add_argument( + "--output_path", + type=str, + help="Full path to the output audio file.", + required=True, + ) + pth_path_description = "Full path to the RVC model file (.pth)." + infer_parser.add_argument( + "--pth_path", type=str, help=pth_path_description, required=True + ) + index_path_description = "Full path to the index file (.index)." + infer_parser.add_argument( + "--index_path", type=str, help=index_path_description, required=True + ) + split_audio_description = "Split the audio into smaller segments before inference. This can improve the quality of the output for longer audio files." + infer_parser.add_argument( + "--split_audio", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=split_audio_description, + default=False, + ) + f0_autotune_description = "Apply a light autotune to the inferred audio. Particularly useful for singing voice conversions." + infer_parser.add_argument( + "--f0_autotune", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=f0_autotune_description, + default=False, + ) + f0_autotune_strength_description = "Set the autotune strength - the more you increase it the more it will snap to the chromatic grid." + infer_parser.add_argument( + "--f0_autotune_strength", + type=float, + help=f0_autotune_strength_description, + choices=[(i / 10) for i in range(11)], + default=1.0, + ) + clean_audio_description = "Clean the output audio using noise reduction algorithms. Recommended for speech conversions." + infer_parser.add_argument( + "--clean_audio", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=clean_audio_description, + default=False, + ) + clean_strength_description = "Adjust the intensity of the audio cleaning process. Higher values result in stronger cleaning, but may lead to a more compressed sound." + infer_parser.add_argument( + "--clean_strength", + type=float, + help=clean_strength_description, + choices=[(i / 10) for i in range(11)], + default=0.7, + ) + export_format_description = "Select the desired output audio format." + infer_parser.add_argument( + "--export_format", + type=str, + help=export_format_description, + choices=["WAV", "MP3", "FLAC", "OGG", "M4A"], + default="WAV", + ) + embedder_model_description = ( + "Choose the model used for generating speaker embeddings." + ) + infer_parser.add_argument( + "--embedder_model", + type=str, + help=embedder_model_description, + choices=[ + "contentvec", + "chinese-hubert-base", + "japanese-hubert-base", + "korean-hubert-base", + "custom", + ], + default="contentvec", + ) + embedder_model_custom_description = "Specify the path to a custom model for speaker embedding. Only applicable if 'embedder_model' is set to 'custom'." + infer_parser.add_argument( + "--embedder_model_custom", + type=str, + help=embedder_model_custom_description, + default=None, + ) + f0_file_description = "Full path to an external F0 file (.f0). This allows you to use pre-computed pitch values for the input audio." + infer_parser.add_argument( + "--f0_file", + type=str, + help=f0_file_description, + default=None, + ) + formant_shifting_description = "Apply formant shifting to the input audio. This can help adjust the timbre of the voice." + infer_parser.add_argument( + "--formant_shifting", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=formant_shifting_description, + default=False, + required=False, + ) + formant_qfrency_description = "Control the frequency of the formant shifting effect. Higher values result in a more pronounced effect." + infer_parser.add_argument( + "--formant_qfrency", + type=float, + help=formant_qfrency_description, + default=1.0, + required=False, + ) + formant_timbre_description = "Control the timbre of the formant shifting effect. Higher values result in a more pronounced effect." + infer_parser.add_argument( + "--formant_timbre", + type=float, + help=formant_timbre_description, + default=1.0, + required=False, + ) + sid_description = "Speaker ID for multi-speaker models." + infer_parser.add_argument( + "--sid", + type=int, + help=sid_description, + default=0, + required=False, + ) + post_process_description = "Apply post-processing effects to the output audio." + infer_parser.add_argument( + "--post_process", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=post_process_description, + default=False, + required=False, + ) + reverb_description = "Apply reverb effect to the output audio." + infer_parser.add_argument( + "--reverb", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=reverb_description, + default=False, + required=False, + ) + + pitch_shift_description = "Apply pitch shifting effect to the output audio." + infer_parser.add_argument( + "--pitch_shift", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=pitch_shift_description, + default=False, + required=False, + ) + + limiter_description = "Apply limiter effect to the output audio." + infer_parser.add_argument( + "--limiter", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=limiter_description, + default=False, + required=False, + ) + + gain_description = "Apply gain effect to the output audio." + infer_parser.add_argument( + "--gain", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=gain_description, + default=False, + required=False, + ) + + distortion_description = "Apply distortion effect to the output audio." + infer_parser.add_argument( + "--distortion", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=distortion_description, + default=False, + required=False, + ) + + chorus_description = "Apply chorus effect to the output audio." + infer_parser.add_argument( + "--chorus", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=chorus_description, + default=False, + required=False, + ) + + bitcrush_description = "Apply bitcrush effect to the output audio." + infer_parser.add_argument( + "--bitcrush", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=bitcrush_description, + default=False, + required=False, + ) + + clipping_description = "Apply clipping effect to the output audio." + infer_parser.add_argument( + "--clipping", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=clipping_description, + default=False, + required=False, + ) + + compressor_description = "Apply compressor effect to the output audio." + infer_parser.add_argument( + "--compressor", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=compressor_description, + default=False, + required=False, + ) + + delay_description = "Apply delay effect to the output audio." + infer_parser.add_argument( + "--delay", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=delay_description, + default=False, + required=False, + ) + + reverb_room_size_description = "Control the room size of the reverb effect. Higher values result in a larger room size." + infer_parser.add_argument( + "--reverb_room_size", + type=float, + help=reverb_room_size_description, + default=0.5, + required=False, + ) + + reverb_damping_description = "Control the damping of the reverb effect. Higher values result in a more damped sound." + infer_parser.add_argument( + "--reverb_damping", + type=float, + help=reverb_damping_description, + default=0.5, + required=False, + ) + + reverb_wet_gain_description = "Control the wet gain of the reverb effect. Higher values result in a stronger reverb effect." + infer_parser.add_argument( + "--reverb_wet_gain", + type=float, + help=reverb_wet_gain_description, + default=0.5, + required=False, + ) + + reverb_dry_gain_description = "Control the dry gain of the reverb effect. Higher values result in a stronger dry signal." + infer_parser.add_argument( + "--reverb_dry_gain", + type=float, + help=reverb_dry_gain_description, + default=0.5, + required=False, + ) + + reverb_width_description = "Control the stereo width of the reverb effect. Higher values result in a wider stereo image." + infer_parser.add_argument( + "--reverb_width", + type=float, + help=reverb_width_description, + default=0.5, + required=False, + ) + + reverb_freeze_mode_description = "Control the freeze mode of the reverb effect. Higher values result in a stronger freeze effect." + infer_parser.add_argument( + "--reverb_freeze_mode", + type=float, + help=reverb_freeze_mode_description, + default=0.5, + required=False, + ) + + pitch_shift_semitones_description = "Control the pitch shift in semitones. Positive values increase the pitch, while negative values decrease it." + infer_parser.add_argument( + "--pitch_shift_semitones", + type=float, + help=pitch_shift_semitones_description, + default=0.0, + required=False, + ) + + limiter_threshold_description = "Control the threshold of the limiter effect. Higher values result in a stronger limiting effect." + infer_parser.add_argument( + "--limiter_threshold", + type=float, + help=limiter_threshold_description, + default=-6, + required=False, + ) + + limiter_release_time_description = "Control the release time of the limiter effect. Higher values result in a longer release time." + infer_parser.add_argument( + "--limiter_release_time", + type=float, + help=limiter_release_time_description, + default=0.01, + required=False, + ) + + gain_db_description = "Control the gain in decibels. Positive values increase the gain, while negative values decrease it." + infer_parser.add_argument( + "--gain_db", + type=float, + help=gain_db_description, + default=0.0, + required=False, + ) + + distortion_gain_description = "Control the gain of the distortion effect. Higher values result in a stronger distortion effect." + infer_parser.add_argument( + "--distortion_gain", + type=float, + help=distortion_gain_description, + default=25, + required=False, + ) + + chorus_rate_description = "Control the rate of the chorus effect. Higher values result in a faster chorus effect." + infer_parser.add_argument( + "--chorus_rate", + type=float, + help=chorus_rate_description, + default=1.0, + required=False, + ) + + chorus_depth_description = "Control the depth of the chorus effect. Higher values result in a stronger chorus effect." + infer_parser.add_argument( + "--chorus_depth", + type=float, + help=chorus_depth_description, + default=0.25, + required=False, + ) + + chorus_center_delay_description = "Control the center delay of the chorus effect. Higher values result in a longer center delay." + infer_parser.add_argument( + "--chorus_center_delay", + type=float, + help=chorus_center_delay_description, + default=7, + required=False, + ) + + chorus_feedback_description = "Control the feedback of the chorus effect. Higher values result in a stronger feedback effect." + infer_parser.add_argument( + "--chorus_feedback", + type=float, + help=chorus_feedback_description, + default=0.0, + required=False, + ) + + chorus_mix_description = "Control the mix of the chorus effect. Higher values result in a stronger chorus effect." + infer_parser.add_argument( + "--chorus_mix", + type=float, + help=chorus_mix_description, + default=0.5, + required=False, + ) + + bitcrush_bit_depth_description = "Control the bit depth of the bitcrush effect. Higher values result in a stronger bitcrush effect." + infer_parser.add_argument( + "--bitcrush_bit_depth", + type=int, + help=bitcrush_bit_depth_description, + default=8, + required=False, + ) + + clipping_threshold_description = "Control the threshold of the clipping effect. Higher values result in a stronger clipping effect." + infer_parser.add_argument( + "--clipping_threshold", + type=float, + help=clipping_threshold_description, + default=-6, + required=False, + ) + + compressor_threshold_description = "Control the threshold of the compressor effect. Higher values result in a stronger compressor effect." + infer_parser.add_argument( + "--compressor_threshold", + type=float, + help=compressor_threshold_description, + default=0, + required=False, + ) + + compressor_ratio_description = "Control the ratio of the compressor effect. Higher values result in a stronger compressor effect." + infer_parser.add_argument( + "--compressor_ratio", + type=float, + help=compressor_ratio_description, + default=1, + required=False, + ) + + compressor_attack_description = "Control the attack of the compressor effect. Higher values result in a stronger compressor effect." + infer_parser.add_argument( + "--compressor_attack", + type=float, + help=compressor_attack_description, + default=1.0, + required=False, + ) + + compressor_release_description = "Control the release of the compressor effect. Higher values result in a stronger compressor effect." + infer_parser.add_argument( + "--compressor_release", + type=float, + help=compressor_release_description, + default=100, + required=False, + ) + + delay_seconds_description = "Control the delay time in seconds. Higher values result in a longer delay time." + infer_parser.add_argument( + "--delay_seconds", + type=float, + help=delay_seconds_description, + default=0.5, + required=False, + ) + delay_feedback_description = "Control the feedback of the delay effect. Higher values result in a stronger feedback effect." + infer_parser.add_argument( + "--delay_feedback", + type=float, + help=delay_feedback_description, + default=0.0, + required=False, + ) + delay_mix_description = "Control the mix of the delay effect. Higher values result in a stronger delay effect." + infer_parser.add_argument( + "--delay_mix", + type=float, + help=delay_mix_description, + default=0.5, + required=False, + ) + + # Parser for 'batch_infer' mode + batch_infer_parser = subparsers.add_parser( + "batch_infer", + help="Run batch inference", + ) + batch_infer_parser.add_argument( + "--pitch", + type=int, + help=pitch_description, + choices=range(-24, 25), + default=0, + ) + batch_infer_parser.add_argument( + "--filter_radius", + type=int, + help=filter_radius_description, + choices=range(11), + default=3, + ) + batch_infer_parser.add_argument( + "--index_rate", + type=float, + help=index_rate_description, + choices=[i / 100.0 for i in range(0, 101)], + default=0.3, + ) + batch_infer_parser.add_argument( + "--volume_envelope", + type=float, + help=volume_envelope_description, + choices=[i / 100.0 for i in range(0, 101)], + default=1, + ) + batch_infer_parser.add_argument( + "--protect", + type=float, + help=protect_description, + choices=[i / 1000.0 for i in range(0, 501)], + default=0.33, + ) + batch_infer_parser.add_argument( + "--hop_length", + type=int, + help=hop_length_description, + choices=range(1, 513), + default=128, + ) + batch_infer_parser.add_argument( + "--f0_method", + type=str, + help=f0_method_description, + choices=[ + "crepe", + "crepe-tiny", + "rmvpe", + "fcpe", + "hybrid[crepe+rmvpe]", + "hybrid[crepe+fcpe]", + "hybrid[rmvpe+fcpe]", + "hybrid[crepe+rmvpe+fcpe]", + ], + default="rmvpe", + ) + batch_infer_parser.add_argument( + "--input_folder", + type=str, + help="Path to the folder containing input audio files.", + required=True, + ) + batch_infer_parser.add_argument( + "--output_folder", + type=str, + help="Path to the folder for saving output audio files.", + required=True, + ) + batch_infer_parser.add_argument( + "--pth_path", type=str, help=pth_path_description, required=True + ) + batch_infer_parser.add_argument( + "--index_path", type=str, help=index_path_description, required=True + ) + batch_infer_parser.add_argument( + "--split_audio", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=split_audio_description, + default=False, + ) + batch_infer_parser.add_argument( + "--f0_autotune", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=f0_autotune_description, + default=False, + ) + batch_infer_parser.add_argument( + "--f0_autotune_strength", + type=float, + help=clean_strength_description, + choices=[(i / 10) for i in range(11)], + default=1.0, + ) + batch_infer_parser.add_argument( + "--clean_audio", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=clean_audio_description, + default=False, + ) + batch_infer_parser.add_argument( + "--clean_strength", + type=float, + help=clean_strength_description, + choices=[(i / 10) for i in range(11)], + default=0.7, + ) + batch_infer_parser.add_argument( + "--export_format", + type=str, + help=export_format_description, + choices=["WAV", "MP3", "FLAC", "OGG", "M4A"], + default="WAV", + ) + batch_infer_parser.add_argument( + "--embedder_model", + type=str, + help=embedder_model_description, + choices=[ + "contentvec", + "chinese-hubert-base", + "japanese-hubert-base", + "korean-hubert-base", + "custom", + ], + default="contentvec", + ) + batch_infer_parser.add_argument( + "--embedder_model_custom", + type=str, + help=embedder_model_custom_description, + default=None, + ) + batch_infer_parser.add_argument( + "--f0_file", + type=str, + help=f0_file_description, + default=None, + ) + batch_infer_parser.add_argument( + "--formant_shifting", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=formant_shifting_description, + default=False, + required=False, + ) + batch_infer_parser.add_argument( + "--formant_qfrency", + type=float, + help=formant_qfrency_description, + default=1.0, + required=False, + ) + batch_infer_parser.add_argument( + "--formant_timbre", + type=float, + help=formant_timbre_description, + default=1.0, + required=False, + ) + batch_infer_parser.add_argument( + "--sid", + type=int, + help=sid_description, + default=0, + required=False, + ) + batch_infer_parser.add_argument( + "--post_process", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=post_process_description, + default=False, + required=False, + ) + batch_infer_parser.add_argument( + "--reverb", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=reverb_description, + default=False, + required=False, + ) + + batch_infer_parser.add_argument( + "--pitch_shift", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=pitch_shift_description, + default=False, + required=False, + ) + + batch_infer_parser.add_argument( + "--limiter", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=limiter_description, + default=False, + required=False, + ) + + batch_infer_parser.add_argument( + "--gain", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=gain_description, + default=False, + required=False, + ) + + batch_infer_parser.add_argument( + "--distortion", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=distortion_description, + default=False, + required=False, + ) + + batch_infer_parser.add_argument( + "--chorus", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=chorus_description, + default=False, + required=False, + ) + + batch_infer_parser.add_argument( + "--bitcrush", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=bitcrush_description, + default=False, + required=False, + ) + + batch_infer_parser.add_argument( + "--clipping", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=clipping_description, + default=False, + required=False, + ) + + batch_infer_parser.add_argument( + "--compressor", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=compressor_description, + default=False, + required=False, + ) + + batch_infer_parser.add_argument( + "--delay", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=delay_description, + default=False, + required=False, + ) + + batch_infer_parser.add_argument( + "--reverb_room_size", + type=float, + help=reverb_room_size_description, + default=0.5, + required=False, + ) + + batch_infer_parser.add_argument( + "--reverb_damping", + type=float, + help=reverb_damping_description, + default=0.5, + required=False, + ) + + batch_infer_parser.add_argument( + "--reverb_wet_gain", + type=float, + help=reverb_wet_gain_description, + default=0.5, + required=False, + ) + + batch_infer_parser.add_argument( + "--reverb_dry_gain", + type=float, + help=reverb_dry_gain_description, + default=0.5, + required=False, + ) + + batch_infer_parser.add_argument( + "--reverb_width", + type=float, + help=reverb_width_description, + default=0.5, + required=False, + ) + + batch_infer_parser.add_argument( + "--reverb_freeze_mode", + type=float, + help=reverb_freeze_mode_description, + default=0.5, + required=False, + ) + + batch_infer_parser.add_argument( + "--pitch_shift_semitones", + type=float, + help=pitch_shift_semitones_description, + default=0.0, + required=False, + ) + + batch_infer_parser.add_argument( + "--limiter_threshold", + type=float, + help=limiter_threshold_description, + default=-6, + required=False, + ) + + batch_infer_parser.add_argument( + "--limiter_release_time", + type=float, + help=limiter_release_time_description, + default=0.01, + required=False, + ) + batch_infer_parser.add_argument( + "--gain_db", + type=float, + help=gain_db_description, + default=0.0, + required=False, + ) + + batch_infer_parser.add_argument( + "--distortion_gain", + type=float, + help=distortion_gain_description, + default=25, + required=False, + ) + + batch_infer_parser.add_argument( + "--chorus_rate", + type=float, + help=chorus_rate_description, + default=1.0, + required=False, + ) + + batch_infer_parser.add_argument( + "--chorus_depth", + type=float, + help=chorus_depth_description, + default=0.25, + required=False, + ) + batch_infer_parser.add_argument( + "--chorus_center_delay", + type=float, + help=chorus_center_delay_description, + default=7, + required=False, + ) + + batch_infer_parser.add_argument( + "--chorus_feedback", + type=float, + help=chorus_feedback_description, + default=0.0, + required=False, + ) + + batch_infer_parser.add_argument( + "--chorus_mix", + type=float, + help=chorus_mix_description, + default=0.5, + required=False, + ) + + batch_infer_parser.add_argument( + "--bitcrush_bit_depth", + type=int, + help=bitcrush_bit_depth_description, + default=8, + required=False, + ) + + batch_infer_parser.add_argument( + "--clipping_threshold", + type=float, + help=clipping_threshold_description, + default=-6, + required=False, + ) + + batch_infer_parser.add_argument( + "--compressor_threshold", + type=float, + help=compressor_threshold_description, + default=0, + required=False, + ) + + batch_infer_parser.add_argument( + "--compressor_ratio", + type=float, + help=compressor_ratio_description, + default=1, + required=False, + ) + + batch_infer_parser.add_argument( + "--compressor_attack", + type=float, + help=compressor_attack_description, + default=1.0, + required=False, + ) + + batch_infer_parser.add_argument( + "--compressor_release", + type=float, + help=compressor_release_description, + default=100, + required=False, + ) + batch_infer_parser.add_argument( + "--delay_seconds", + type=float, + help=delay_seconds_description, + default=0.5, + required=False, + ) + batch_infer_parser.add_argument( + "--delay_feedback", + type=float, + help=delay_feedback_description, + default=0.0, + required=False, + ) + batch_infer_parser.add_argument( + "--delay_mix", + type=float, + help=delay_mix_description, + default=0.5, + required=False, + ) + + # Parser for 'tts' mode + tts_parser = subparsers.add_parser("tts", help="Run TTS inference") + tts_parser.add_argument( + "--tts_file", type=str, help="File with a text to be synthesized", required=True + ) + tts_parser.add_argument( + "--tts_text", type=str, help="Text to be synthesized", required=True + ) + tts_parser.add_argument( + "--tts_voice", + type=str, + help="Voice to be used for TTS synthesis.", + choices=locales, + required=True, + ) + tts_parser.add_argument( + "--tts_rate", + type=int, + help="Control the speaking rate of the TTS. Values range from -100 (slower) to 100 (faster).", + choices=range(-100, 101), + default=0, + ) + tts_parser.add_argument( + "--pitch", + type=int, + help=pitch_description, + choices=range(-24, 25), + default=0, + ) + tts_parser.add_argument( + "--filter_radius", + type=int, + help=filter_radius_description, + choices=range(11), + default=3, + ) + tts_parser.add_argument( + "--index_rate", + type=float, + help=index_rate_description, + choices=[(i / 10) for i in range(11)], + default=0.3, + ) + tts_parser.add_argument( + "--volume_envelope", + type=float, + help=volume_envelope_description, + choices=[(i / 10) for i in range(11)], + default=1, + ) + tts_parser.add_argument( + "--protect", + type=float, + help=protect_description, + choices=[(i / 10) for i in range(6)], + default=0.33, + ) + tts_parser.add_argument( + "--hop_length", + type=int, + help=hop_length_description, + choices=range(1, 513), + default=128, + ) + tts_parser.add_argument( + "--f0_method", + type=str, + help=f0_method_description, + choices=[ + "crepe", + "crepe-tiny", + "rmvpe", + "fcpe", + "hybrid[crepe+rmvpe]", + "hybrid[crepe+fcpe]", + "hybrid[rmvpe+fcpe]", + "hybrid[crepe+rmvpe+fcpe]", + ], + default="rmvpe", + ) + tts_parser.add_argument( + "--output_tts_path", + type=str, + help="Full path to save the synthesized TTS audio.", + required=True, + ) + tts_parser.add_argument( + "--output_rvc_path", + type=str, + help="Full path to save the voice-converted audio using the synthesized TTS.", + required=True, + ) + tts_parser.add_argument( + "--pth_path", type=str, help=pth_path_description, required=True + ) + tts_parser.add_argument( + "--index_path", type=str, help=index_path_description, required=True + ) + tts_parser.add_argument( + "--split_audio", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=split_audio_description, + default=False, + ) + tts_parser.add_argument( + "--f0_autotune", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=f0_autotune_description, + default=False, + ) + tts_parser.add_argument( + "--f0_autotune_strength", + type=float, + help=clean_strength_description, + choices=[(i / 10) for i in range(11)], + default=1.0, + ) + tts_parser.add_argument( + "--clean_audio", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + help=clean_audio_description, + default=False, + ) + tts_parser.add_argument( + "--clean_strength", + type=float, + help=clean_strength_description, + choices=[(i / 10) for i in range(11)], + default=0.7, + ) + tts_parser.add_argument( + "--export_format", + type=str, + help=export_format_description, + choices=["WAV", "MP3", "FLAC", "OGG", "M4A"], + default="WAV", + ) + tts_parser.add_argument( + "--embedder_model", + type=str, + help=embedder_model_description, + choices=[ + "contentvec", + "chinese-hubert-base", + "japanese-hubert-base", + "korean-hubert-base", + "custom", + ], + default="contentvec", + ) + tts_parser.add_argument( + "--embedder_model_custom", + type=str, + help=embedder_model_custom_description, + default=None, + ) + tts_parser.add_argument( + "--f0_file", + type=str, + help=f0_file_description, + default=None, + ) + + # Parser for 'model_information' mode + model_information_parser = subparsers.add_parser( + "model_information", help="Display information about a trained model." + ) + model_information_parser.add_argument( + "--pth_path", type=str, help="Path to the .pth model file.", required=True + ) + + # Parser for 'model_blender' mode + model_blender_parser = subparsers.add_parser( + "model_blender", help="Fuse two RVC models together." + ) + model_blender_parser.add_argument( + "--model_name", type=str, help="Name of the new fused model.", required=True + ) + model_blender_parser.add_argument( + "--pth_path_1", + type=str, + help="Path to the first .pth model file.", + required=True, + ) + model_blender_parser.add_argument( + "--pth_path_2", + type=str, + help="Path to the second .pth model file.", + required=True, + ) + model_blender_parser.add_argument( + "--ratio", + type=float, + help="Ratio for blending the two models (0.0 to 1.0).", + choices=[(i / 10) for i in range(11)], + default=0.5, + ) + + # Parser for 'tensorboard' mode + subparsers.add_parser( + "tensorboard", help="Launch TensorBoard for monitoring training progress." + ) + + # Parser for 'download' mode + download_parser = subparsers.add_parser( + "download", help="Download a model from a provided link." + ) + download_parser.add_argument( + "--model_link", type=str, help="Direct link to the model file.", required=True + ) + + # Parser for 'prerequisites' mode + prerequisites_parser = subparsers.add_parser( + "prerequisites", help="Install prerequisites for RVC." + ) + prerequisites_parser.add_argument( + "--models", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + default=True, + help="Download additional models.", + ) + prerequisites_parser.add_argument( + "--exe", + type=lambda x: bool(strtobool(x)), + choices=[True, False], + default=True, + help="Download required executables.", + ) + + # Parser for 'audio_analyzer' mode + audio_analyzer = subparsers.add_parser( + "audio_analyzer", help="Analyze an audio file." + ) + audio_analyzer.add_argument( + "--input_path", type=str, help="Path to the input audio file.", required=True + ) + + return parser.parse_args() + + +def main(): + if len(sys.argv) == 1: + print("Please run the script with '-h' for more information.") + sys.exit(1) + + args = parse_arguments() + + try: + if args.mode == "infer": + run_infer_script( + pitch=args.pitch, + filter_radius=args.filter_radius, + index_rate=args.index_rate, + volume_envelope=args.volume_envelope, + protect=args.protect, + hop_length=args.hop_length, + f0_method=args.f0_method, + input_path=args.input_path, + output_path=args.output_path, + pth_path=args.pth_path, + index_path=args.index_path, + split_audio=args.split_audio, + f0_autotune=args.f0_autotune, + f0_autotune_strength=args.f0_autotune_strength, + clean_audio=args.clean_audio, + clean_strength=args.clean_strength, + export_format=args.export_format, + embedder_model=args.embedder_model, + embedder_model_custom=args.embedder_model_custom, + f0_file=args.f0_file, + formant_shifting=args.formant_shifting, + formant_qfrency=args.formant_qfrency, + formant_timbre=args.formant_timbre, + sid=args.sid, + post_process=args.post_process, + reverb=args.reverb, + pitch_shift=args.pitch_shift, + limiter=args.limiter, + gain=args.gain, + distortion=args.distortion, + chorus=args.chorus, + bitcrush=args.bitcrush, + clipping=args.clipping, + compressor=args.compressor, + delay=args.delay, + reverb_room_size=args.reverb_room_size, + reverb_damping=args.reverb_damping, + reverb_wet_gain=args.reverb_wet_gain, + reverb_dry_gain=args.reverb_dry_gain, + reverb_width=args.reverb_width, + reverb_freeze_mode=args.reverb_freeze_mode, + pitch_shift_semitones=args.pitch_shift_semitones, + limiter_threshold=args.limiter_threshold, + limiter_release_time=args.limiter_release_time, + gain_db=args.gain_db, + distortion_gain=args.distortion_gain, + chorus_rate=args.chorus_rate, + chorus_depth=args.chorus_depth, + chorus_center_delay=args.chorus_center_delay, + chorus_feedback=args.chorus_feedback, + chorus_mix=args.chorus_mix, + bitcrush_bit_depth=args.bitcrush_bit_depth, + clipping_threshold=args.clipping_threshold, + compressor_threshold=args.compressor_threshold, + compressor_ratio=args.compressor_ratio, + compressor_attack=args.compressor_attack, + compressor_release=args.compressor_release, + delay_seconds=args.delay_seconds, + delay_feedback=args.delay_feedback, + delay_mix=args.delay_mix, + ) + elif args.mode == "batch_infer": + run_batch_infer_script( + pitch=args.pitch, + filter_radius=args.filter_radius, + index_rate=args.index_rate, + volume_envelope=args.volume_envelope, + protect=args.protect, + hop_length=args.hop_length, + f0_method=args.f0_method, + input_folder=args.input_folder, + output_folder=args.output_folder, + pth_path=args.pth_path, + index_path=args.index_path, + split_audio=args.split_audio, + f0_autotune=args.f0_autotune, + f0_autotune_strength=args.f0_autotune_strength, + clean_audio=args.clean_audio, + clean_strength=args.clean_strength, + export_format=args.export_format, + embedder_model=args.embedder_model, + embedder_model_custom=args.embedder_model_custom, + f0_file=args.f0_file, + formant_shifting=args.formant_shifting, + formant_qfrency=args.formant_qfrency, + formant_timbre=args.formant_timbre, + sid=args.sid, + post_process=args.post_process, + reverb=args.reverb, + pitch_shift=args.pitch_shift, + limiter=args.limiter, + gain=args.gain, + distortion=args.distortion, + chorus=args.chorus, + bitcrush=args.bitcrush, + clipping=args.clipping, + compressor=args.compressor, + delay=args.delay, + reverb_room_size=args.reverb_room_size, + reverb_damping=args.reverb_damping, + reverb_wet_gain=args.reverb_wet_gain, + reverb_dry_gain=args.reverb_dry_gain, + reverb_width=args.reverb_width, + reverb_freeze_mode=args.reverb_freeze_mode, + pitch_shift_semitones=args.pitch_shift_semitones, + limiter_threshold=args.limiter_threshold, + limiter_release_time=args.limiter_release_time, + gain_db=args.gain_db, + distortion_gain=args.distortion_gain, + chorus_rate=args.chorus_rate, + chorus_depth=args.chorus_depth, + chorus_center_delay=args.chorus_center_delay, + chorus_feedback=args.chorus_feedback, + chorus_mix=args.chorus_mix, + bitcrush_bit_depth=args.bitcrush_bit_depth, + clipping_threshold=args.clipping_threshold, + compressor_threshold=args.compressor_threshold, + compressor_ratio=args.compressor_ratio, + compressor_attack=args.compressor_attack, + compressor_release=args.compressor_release, + delay_seconds=args.delay_seconds, + delay_feedback=args.delay_feedback, + delay_mix=args.delay_mix, + ) + elif args.mode == "tts": + run_tts_script( + tts_file=args.tts_file, + tts_text=args.tts_text, + tts_voice=args.tts_voice, + tts_rate=args.tts_rate, + pitch=args.pitch, + filter_radius=args.filter_radius, + index_rate=args.index_rate, + volume_envelope=args.volume_envelope, + protect=args.protect, + hop_length=args.hop_length, + f0_method=args.f0_method, + output_tts_path=args.output_tts_path, + output_rvc_path=args.output_rvc_path, + pth_path=args.pth_path, + index_path=args.index_path, + split_audio=args.split_audio, + f0_autotune=args.f0_autotune, + f0_autotune_strength=args.f0_autotune_strength, + clean_audio=args.clean_audio, + clean_strength=args.clean_strength, + export_format=args.export_format, + embedder_model=args.embedder_model, + embedder_model_custom=args.embedder_model_custom, + f0_file=args.f0_file, + ) + elif args.mode == "model_information": + run_model_information_script( + pth_path=args.pth_path, + ) + elif args.mode == "model_blender": + run_model_blender_script( + model_name=args.model_name, + pth_path_1=args.pth_path_1, + pth_path_2=args.pth_path_2, + ratio=args.ratio, + ) + elif args.mode == "tensorboard": + run_tensorboard_script() + elif args.mode == "download": + run_download_script( + model_link=args.model_link, + ) + elif args.mode == "audio_analyzer": + run_audio_analyzer_script( + input_path=args.input_path, + ) + except Exception as error: + print(f"An error occurred during execution: {error}") + + import traceback + + traceback.print_exc() + + +if __name__ == "__main__": + main() From e5270120a308aef32d968dd82443e5102f425b94 Mon Sep 17 00:00:00 2001 From: kiurobox Date: Tue, 4 Feb 2025 03:05:31 -0800 Subject: [PATCH 8/9] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index efa7440e..ec30166d 100644 --- a/README.md +++ b/README.md @@ -16,11 +16,11 @@ Please support the original RVC. This inference won't be possible to make withou - Youtube Audio Downloader ✅ - Audio-Separator (Voice Splitter) [Internet required for downloading model] ✅ - Model Downloader ✅ +- TTS Support #### Currently Working - Settings 🛠 - Microphone Support -- TTS Support - Gradio WebUI ### Installation From 8d89b6e16a1fe6ad0917fc86029569143010bc72 Mon Sep 17 00:00:00 2001 From: kiurobox Date: Tue, 4 Feb 2025 03:06:48 -0800 Subject: [PATCH 9/9] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ec30166d..86041ed5 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,8 @@ ### Information Advanced RVC Inference presents itself as a state-of-the-art web UI crafted to streamline rapid and effortless inference. This comprehensive toolset encompasses a model downloader, a voice splitter, and the added efficiency of batch inference. -Please support the original RVC. This inference won't be possible to make without it.
-[![Original RVC Repository](https://img.shields.io/badge/Github-Original%20RVC%20Repository-blue?style=for-the-badge&logo=github)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) +Please support the Applio. This inference won't be possible to make without it.
+[![Original Applio](https://img.shields.io/badge/Github-Original%20Applio%20Repository-blue?style=for-the-badge&logo=github)](https://github.com/IAHispano/Applio) #### Features - Support V1 & V2 Model ✅