diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 284d1fcaa4..9ed8ec27f7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,13 +13,18 @@ repos: hooks: # Run isort to check only (don't modify files) - id: isort - args: [ --check-only ] + args: [--check-only, --filter-files] - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. rev: v0.14.4 hooks: # Run the linter to check only (don't modify files) - id: ruff-check + - repo: https://github.com/PyCQA/flake8 + rev: 7.3.0 + hooks: + - id: flake8 + additional_dependencies: [flake8-pyproject] - repo: https://github.com/crate-ci/typos rev: v1.39.1 hooks: diff --git a/benchmarks/regression/benchmarks/arguments.py b/benchmarks/regression/benchmarks/arguments.py index 662ccec7eb..35028db8f9 100644 --- a/benchmarks/regression/benchmarks/arguments.py +++ b/benchmarks/regression/benchmarks/arguments.py @@ -10,9 +10,9 @@ class Processing: def setup(self): grid = Grid(shape=(5, 5, 5)) - funcs = [Function(name='f%d' % n, grid=grid) for n in range(30)] - tfuncs = [TimeFunction(name='u%d' % n, grid=grid) for n in range(30)] - stfuncs = [SparseTimeFunction(name='su%d' % n, grid=grid, npoint=1, nt=100) + funcs = [Function(name=f'f{n}', grid=grid) for n in range(30)] + tfuncs = [TimeFunction(name=f'u{n}', grid=grid) for n in range(30)] + stfuncs = [SparseTimeFunction(name=f'su{n}', grid=grid, npoint=1, nt=100) for n in range(30)] v = TimeFunction(name='v', grid=grid, space_order=2) diff --git a/benchmarks/user/advisor/advisor_logging.py b/benchmarks/user/advisor/advisor_logging.py index af49ee9258..04ef2ed740 100644 --- a/benchmarks/user/advisor/advisor_logging.py +++ b/benchmarks/user/advisor/advisor_logging.py @@ -9,18 +9,18 @@ def check(cond, msg): def err(msg): - print('\033[1;37;31m%s\033[0m' % msg) # print in RED + print(f'\033[1;37;31m{msg}\033[0m') # print in RED def log(msg): - print('\033[1;37;32m%s\033[0m' % msg) # print in GREEN + print(f'\033[1;37;32m{msg}\033[0m') # print in GREEN @contextmanager def progress(msg): - print('\033[1;37;32m%s ... \033[0m' % msg, end='', flush=True) # print in GREEN + print(f'\033[1;37;32m{msg} ... \033[0m', end='', flush=True) # print in GREEN yield - print('\033[1;37;32m%s\033[0m' % 'Done!') + print('\033[1;37;32m{}\033[0m'.format('Done!')) def log_process(process, logger): diff --git a/benchmarks/user/advisor/roofline.py b/benchmarks/user/advisor/roofline.py index 12578fda92..83f7526a22 100644 --- a/benchmarks/user/advisor/roofline.py +++ b/benchmarks/user/advisor/roofline.py @@ -205,7 +205,7 @@ def roofline(name, project, scale, precision, mode, th): log(f'\nFigure saved in {figpath}{name}.pdf.') # Save the JSON file - with open('%s.json' % name, 'w') as f: + with open(f'{name}.json', 'w') as f: f.write(json.dumps(roofline_data)) log(f'\nJSON file saved as {name}.json.') diff --git a/benchmarks/user/advisor/run_advisor.py b/benchmarks/user/advisor/run_advisor.py index b3cef8387a..8d20607da4 100644 --- a/benchmarks/user/advisor/run_advisor.py +++ b/benchmarks/user/advisor/run_advisor.py @@ -132,7 +132,7 @@ def run_with_advisor(path, output, name, exec_args): # Before collecting the `survey` and `tripcounts` a "pure" python run # to warmup the jit cache is preceded - log('Starting Intel Advisor\'s `roofline` analysis for `%s`' % name) + log(f'Starting Intel Advisor\'s `roofline` analysis for `{name}`') dt = datetime.datetime.now() # Set up a file logger that will track the output of the advisor profiling @@ -140,9 +140,8 @@ def run_with_advisor(path, output, name, exec_args): advixe_logger.setLevel(logging.INFO) advixe_formatter = logging.Formatter('%(asctime)s: %(message)s') - logger_datetime = '%d.%d.%d.%d.%d.%d' % (dt.year, dt.month, - dt.day, dt.hour, dt.minute, dt.second) - advixe_handler = logging.FileHandler('%s/%s_%s.log' % (output, name, logger_datetime)) + logger_datetime = f'{dt.year}.{dt.month}.{dt.day}.{dt.hour}.{dt.minute}.{dt.second}' + advixe_handler = logging.FileHandler(f'{output}/{name}_{logger_datetime}.log') advixe_handler.setFormatter(advixe_formatter) advixe_logger.addHandler(advixe_handler) diff --git a/benchmarks/user/benchmark.py b/benchmarks/user/benchmark.py index 4439ad6066..f2f29ffe6a 100644 --- a/benchmarks/user/benchmark.py +++ b/benchmarks/user/benchmark.py @@ -1,4 +1,5 @@ import os +from contextlib import suppress import click import numpy as np @@ -73,8 +74,10 @@ def run_op(solver, operator, **options): # Get the operator if exist try: op = getattr(solver, operator) - except AttributeError: - raise AttributeError("Operator %s not implemented for %s" % (operator, solver)) + except AttributeError as e: + raise AttributeError( + f"Operator {operator} not implemented for {solver}" + ) from e # This is a bit ugly but not sure how to make clean input creation for different op if operator == "forward": @@ -95,7 +98,7 @@ def run_op(solver, operator, **options): args = args[:-1] return op(*args, **options) else: - raise ValueError("Unrecognized operator %s" % operator) + raise ValueError(f"Unrecognized operator {operator}") @click.group() @@ -157,15 +160,17 @@ def from_opt(ctx, param, value): # E.g., `('advanced', {'par-tile': True})` value = eval(value) if not isinstance(value, tuple) and len(value) >= 1: - raise click.BadParameter("Invalid choice `%s` (`opt` must be " - "either str or tuple)" % str(value)) + raise click.BadParameter(f"Invalid choice `{str(value)}` (`opt` must be " + "either str or tuple)") opt = value[0] except NameError: # E.g. `'advanced'` opt = value if opt not in configuration._accepted['opt']: - raise click.BadParameter("Invalid choice `%s` (choose from %s)" - % (opt, str(configuration._accepted['opt']))) + raise click.BadParameter( + f'Invalid choice `{opt} ' + f'(choose from {str(configuration._accepted["opt"])})' + ) return value def config_blockshape(ctx, param, value): @@ -181,18 +186,18 @@ def config_blockshape(ctx, param, value): # 1. integers, not strings # 2. sanity check the (hierarchical) blocking shape normalized_value = [] - for i, block_shape in enumerate(value): + for block_shape in value: # If hierarchical blocking is activated, say with N levels, here in # `bs` we expect to see 3*N entries bs = [int(x) for x in block_shape.split()] levels = [bs[x:x+3] for x in range(0, len(bs), 3)] if any(len(level) != 3 for level in levels): raise ValueError("Expected 3 entries per block shape level, but got " - "one level with less than 3 entries (`%s`)" % levels) + f"one level with less than 3 entries (`{levels}`)") normalized_value.append(levels) if not all_equal(len(i) for i in normalized_value): raise ValueError("Found different block shapes with incompatible " - "number of levels (`%s`)" % normalized_value) + f"number of levels (`{normalized_value}`)") configuration['opt-options']['blocklevels'] = len(normalized_value[0]) else: normalized_value = [] @@ -205,8 +210,10 @@ def config_autotuning(ctx, param, value): elif value != 'off': # Sneak-peek at the `block-shape` -- if provided, keep auto-tuning off if ctx.params['block_shape']: - warning("Skipping autotuning (using explicit block-shape `%s`)" - % str(ctx.params['block_shape'])) + warning( + 'Skipping autotuning' + f'(using explicit block-shape `{str(ctx.params["block_shape"])}`)' + ) level = False else: # Make sure to always run in preemptive mode @@ -274,8 +281,8 @@ def run(problem, **kwargs): # Note: the following piece of code is horribly *hacky*, but it works for now for i, block_shape in enumerate(block_shapes): for n, level in enumerate(block_shape): - for d, s in zip(['x', 'y', 'z'], level): - options['%s%d_blk%d_size' % (d, i, n)] = s + for d, s in zip(['x', 'y', 'z'], level, strict=True): + options[f'{d}{i}_blk{n}_size'] = s solver = setup(space_order=space_order, time_order=time_order, **kwargs) if warmup: @@ -305,11 +312,11 @@ def run(problem, **kwargs): dumpfile = kwargs.pop('dump_norms') if dumpfile: - norms = ["'%s': %f" % (i.name, norm(i)) for i in retval[:-1] + norms = [f"'{i.name}': {norm(i):f}" for i in retval[:-1] if isinstance(i, DiscreteFunction)] if rank == 0: with open(dumpfile, 'w') as f: - f.write("{%s}" % ', '.join(norms)) + f.write("{{{}}}".format(', '.join(norms))) return retval @@ -343,13 +350,13 @@ def run_jit_backdoor(problem, **kwargs): op = solver.op_fwd() # Get the filename in the JIT cache - cfile = "%s.c" % str(op._compiler.get_jit_dir().joinpath(op._soname)) + cfile = f"{str(op._compiler.get_jit_dir().joinpath(op._soname))}.c" if not os.path.exists(cfile): # First time we run this problem, let's generate and jit-compile code - op.cfunction - info("You may now edit the generated code in `%s`. " - "Then save the file, and re-run this benchmark." % cfile) + _ = op.cfunction + info(f"You may now edit the generated code in `{cfile}`. " + "Then save the file, and re-run this benchmark.") return info("Running wave propagation Operator...") @@ -364,7 +371,7 @@ def _run_jit_backdoor(): if dumpnorms: for i in retval[:-1]: if isinstance(i, DiscreteFunction): - info("'%s': %f" % (i.name, norm(i))) + info(f"'{i.name}': {norm(i):f}") return retval @@ -405,9 +412,10 @@ def test(problem, **kwargs): set_log_level('DEBUG', comm=MPI.COMM_WORLD) if MPI.COMM_WORLD.size > 1 and not configuration['mpi']: - warning("It seems that you're running over MPI with %d processes, but " - "DEVITO_MPI is unset. Setting `DEVITO_MPI=basic`..." - % MPI.COMM_WORLD.size) + warning( + f'It seems that you are running over MPI with {MPI.COMM_WORLD.size} ' + 'processes, but DEVITO_MPI is unset. Setting `DEVITO_MPI=basic`...' + ) configuration['mpi'] = 'basic' except (TypeError, ModuleNotFoundError): # MPI not available @@ -419,8 +427,6 @@ def test(problem, **kwargs): benchmark(standalone_mode=False) - try: + # In case MPI not available + with suppress(TypeError): MPI.Finalize() - except TypeError: - # MPI not available - pass diff --git a/conftest.py b/conftest.py index 195f3b96ba..65bfddb71c 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,6 @@ import os import sys +from contextlib import suppress from subprocess import check_call import pytest @@ -40,29 +41,29 @@ def skipif(items, whole_module=False): accepted.update({'nodevice', 'noomp'}) unknown = sorted(set(items) - accepted) if unknown: - raise ValueError("Illegal skipif argument(s) `%s`" % unknown) + raise ValueError(f"Illegal skipif argument(s) `{unknown}`") skipit = False for i in items: # Skip if won't run on GPUs if i == 'device' and isinstance(configuration['platform'], Device): - skipit = "device `%s` unsupported" % configuration['platform'].name + skipit = "device `{}` unsupported".format(configuration['platform'].name) break # Skip if won't run on a specific GPU backend langs = configuration._accepted['language'] - if any(i == 'device-%s' % l and configuration['language'] == l for l in langs)\ + if any(i == f'device-{l}' and configuration['language'] == l for l in langs)\ and isinstance(configuration['platform'], Device): - skipit = "language `%s` for device unsupported" % configuration['language'] + skipit = f'language `{configuration["language"]}` for device unsupported' break - if any(i == 'device-%s' % k and isinstance(configuration['compiler'], v) + if any(i == f'device-{k}' and isinstance(configuration['compiler'], v) for k, v in compiler_registry.items()) and\ isinstance(configuration['platform'], Device): - skipit = "compiler `%s` for device unsupported" % configuration['compiler'] + skipit = f'compiler `{configuration["compiler"]}` for device unsupported' break # Skip if must run on GPUs but not currently on a GPU if i in ('nodevice', 'nodevice-omp', 'nodevice-acc') and\ not isinstance(configuration['platform'], Device): - skipit = ("must run on device, but currently on `%s`" % - configuration['platform'].name) + skipit = 'must run on device, but currently on ' + skipit += f'`{configuration["platform"].name}`' break # Skip if it won't run with nvc on CPU backend if i == 'cpu64-nvc' and \ @@ -137,9 +138,9 @@ def EVAL(exprs, *args): def get_testname(item): if item.cls is not None: - return "%s::%s::%s" % (item.fspath, item.cls.__name__, item.name) + return f"{item.fspath}::{item.cls.__name__}::{item.name}" else: - return "%s::%s" % (item.fspath, item.name) + return f"{item.fspath}::{item.name}" def set_run_reset(env_vars, call): @@ -179,7 +180,7 @@ def parallel(item, m): if len(m) == 2: nprocs, scheme = m else: - raise ValueError("Can't run test: unexpected mode `%s`" % m) + raise ValueError(f"Can't run test: unexpected mode `{m}`") env_vars = {'DEVITO_MPI': scheme} @@ -189,8 +190,10 @@ def parallel(item, m): # Run xfailing tests to ensure that errors are reported to calling process args = ["-n", "1", pyversion, "-m", "pytest", "-s", "--runxfail", "-qq", testname] if nprocs > 1: - args.extend([":", "-n", "%d" % (nprocs - 1), pyversion, "-m", "pytest", - "-s", "--runxfail", "--tb=no", "-qq", "--no-summary", testname]) + args.extend([ + ":", "-n", str(nprocs - 1), pyversion, "-m", "pytest", + "-s", "--runxfail", "-v", "--no-summary", testname + ]) # OpenMPI requires an explicit flag for oversubscription. We need it as some # of the MPI tests will spawn lots of processes if mpi_distro == 'OpenMPI': @@ -247,10 +250,8 @@ def pytest_generate_tests(metafunc): @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_call(item): inside_pytest_marker = os.environ.get('DEVITO_PYTEST_FLAG', 0) - try: + with suppress(ValueError): inside_pytest_marker = int(inside_pytest_marker) - except ValueError: - pass if inside_pytest_marker: outcome = yield @@ -281,15 +282,13 @@ def pytest_runtest_makereport(item, call): result = outcome.get_result() inside_pytest_marker = os.environ.get('DEVITO_PYTEST_FLAG', 0) - try: + with suppress(ValueError): inside_pytest_marker = int(inside_pytest_marker) - except ValueError: - pass if inside_pytest_marker: return if item.get_closest_marker("parallel") or \ - item.get_closest_marker("decoupler"): + item.get_closest_marker("decoupler"): # noqa: SIM102 if call.when == 'call' and result.outcome == 'skipped': result.outcome = 'passed' diff --git a/devito/arch/archinfo.py b/devito/arch/archinfo.py index 0273e88a22..020f66323d 100644 --- a/devito/arch/archinfo.py +++ b/devito/arch/archinfo.py @@ -18,7 +18,7 @@ from devito.logger import warning from devito.tools import all_equal, as_tuple, memoized_func -__all__ = [ +__all__ = [ # noqa: RUF022 'platform_registry', 'get_cpu_info', 'get_gpu_info', 'get_visible_devices', 'get_nvidia_cc', 'get_cuda_path', 'get_cuda_version', 'get_hip_path', 'check_cuda_runtime', 'get_m1_llvm_path', 'get_advisor_path', 'Platform', @@ -391,7 +391,7 @@ def cbk(deviceid=0): return None return cbk - gpu_info['mem.%s' % i] = make_cbk(i) + gpu_info[f'mem.{i}'] = make_cbk(i) gpu_info['architecture'] = 'unspecified' gpu_info['vendor'] = 'INTEL' @@ -780,7 +780,7 @@ def __str__(self): return self.name def __repr__(self): - return "TargetPlatform[%s]" % self.name + return f'TargetPlatform[{self.name}]' def _detect_isa(self): return 'unknown' @@ -1141,7 +1141,7 @@ def supports(self, query, language=None): elif query == 'async-loads' and cc >= 80: # Asynchronous pipeline loads -- introduced in Ampere return True - elif query in ('tma', 'thread-block-cluster') and cc >= 90: + elif query in ('tma', 'thread-block-cluster') and cc >= 90: # noqa: SIM103 # Tensor Memory Accelerator -- introduced in Hopper return True else: @@ -1202,10 +1202,8 @@ def march(cls): try: p1 = Popen(['offload-arch'], stdout=PIPE, stderr=PIPE) except OSError: - try: + with suppress(OSError): p1 = Popen(['mygpu', '-d', fallback], stdout=PIPE, stderr=PIPE) - except OSError: - pass return fallback output, _ = p1.communicate() @@ -1248,7 +1246,7 @@ def node_max_mem_trans_nbytes(platform): elif isinstance(platform, Device): return max(Cpu64.max_mem_trans_nbytes, mmtb0) else: - assert False, f"Unknown platform type: {type(platform)}" + raise AssertionError(f"Unknown platform type: {type(platform)}") # CPUs diff --git a/devito/arch/compiler.py b/devito/arch/compiler.py index edfeec17db..a0b4ce8a75 100644 --- a/devito/arch/compiler.py +++ b/devito/arch/compiler.py @@ -1,6 +1,7 @@ import platform import time import warnings +from contextlib import suppress from functools import partial from hashlib import sha1 from itertools import filterfalse @@ -43,16 +44,20 @@ def sniff_compiler_version(cc, allow_fail=False): return Version("0") except UnicodeDecodeError: return Version("0") - except OSError: + except OSError as e: if allow_fail: return Version("0") else: - raise RuntimeError(f"The `{cc}` compiler isn't available on this system") + raise RuntimeError( + f"The `{cc}` compiler isn't available on this system" + ) from e ver = ver.strip() if ver.startswith("gcc"): compiler = "gcc" - elif ver.startswith("clang") or ver.startswith("Apple LLVM") or ver.startswith("Homebrew clang"): + elif ver.startswith("clang") \ + or ver.startswith("Apple LLVM") \ + or ver.startswith("Homebrew clang"): compiler = "clang" elif ver.startswith("Intel"): compiler = "icx" @@ -92,10 +97,8 @@ def sniff_compiler_version(cc, allow_fail=False): pass # Pure integer versions (e.g., ggc5, rather than gcc5.0) need special handling - try: + with suppress(TypeError): ver = Version(float(ver)) - except TypeError: - pass return ver @@ -335,21 +338,21 @@ def make(self, loc, args): logfile = path.join(self.get_jit_dir(), f"{hash_key}.log") errfile = path.join(self.get_jit_dir(), f"{hash_key}.err") - with change_directory(loc), open(logfile, "w") as lf: - with open(errfile, "w") as ef: - - command = ['make'] + args - lf.write("Compilation command:\n") - lf.write(" ".join(command)) - lf.write("\n\n") - try: - check_call(command, stderr=ef, stdout=lf) - except CalledProcessError as e: - raise CompilationError(f'Command "{e.cmd}" return error status' - f'{e.returncode}. ' - f'Unable to compile code.\n' - f'Compile log in {logfile}\n' - f'Compile errors in {errfile}\n') + with change_directory(loc), open(logfile, "w") as lf, open(errfile, "w") as ef: + command = ['make'] + args + lf.write("Compilation command:\n") + lf.write(" ".join(command)) + lf.write("\n\n") + try: + check_call(command, stderr=ef, stdout=lf) + except CalledProcessError as e: + raise CompilationError( + f'Command "{e.cmd}" return error status' + f'{e.returncode}. ' + f'Unable to compile code.\n' + f'Compile log in {logfile}\n' + f'Compile errors in {errfile}\n' + ) from e debug(f"Make <{' '.join(args)}>") def _cmdline(self, files, object=False): @@ -395,9 +398,11 @@ def jit_compile(self, soname, code): # ranks would end up creating different cache dirs cache_dir = cache_dir.joinpath('jit-backdoor') cache_dir.mkdir(parents=True, exist_ok=True) - except FileNotFoundError: - raise ValueError(f"Trying to use the JIT backdoor for `{src_file}`, but " - "the file isn't present") + except FileNotFoundError as e: + raise ValueError( + f"Trying to use the JIT backdoor for `{src_file}`, but " + "the file isn't present" + ) from e # Should the compilation command be emitted? debug = configuration['log-level'] == 'DEBUG' @@ -708,12 +713,10 @@ def __init_finalize__(self, **kwargs): # explicitly pass the flags that an `mpicc` would implicitly use compile_flags, link_flags = sniff_mpi_flags('mpicxx') - try: + with suppress(ValueError): # No idea why `-pthread` would pop up among the `compile_flags` + # Just in case they fix it, we wrap it up within a suppress compile_flags.remove('-pthread') - except ValueError: - # Just in case they fix it, we wrap it up within a try-except - pass self.cflags.extend(compile_flags) # Some arguments are for the host compiler @@ -1005,15 +1008,9 @@ def __new__(cls, *args, **kwargs): elif isinstance(platform, IntelDevice): _base = OneapiCompiler elif isinstance(platform, NvidiaDevice): - if language == 'cuda': - _base = CudaCompiler - else: - _base = NvidiaCompiler + _base = CudaCompiler if language == 'cuda' else NvidiaCompiler elif platform is AMDGPUX: - if language == 'hip': - _base = HipCompiler - else: - _base = AOMPCompiler + _base = HipCompiler if language == 'hip' else AOMPCompiler else: _base = GNUCompiler diff --git a/devito/builtins/arithmetic.py b/devito/builtins/arithmetic.py index 6c19f056be..64f1e0547b 100644 --- a/devito/builtins/arithmetic.py +++ b/devito/builtins/arithmetic.py @@ -33,7 +33,7 @@ def norm(f, order=2): op = dv.Operator([dv.Eq(s, 0.0)] + eqns + [dv.Inc(s, Pow(dv.Abs(p), order)), dv.Eq(n[0], s)], - name='norm%d' % order) + name=f'norm{order}') op.apply(**kwargs) v = np.power(n.data[0], 1/order) @@ -63,24 +63,24 @@ def sum(f, dims=None): new_dims = tuple(d for d in f.dimensions if d not in dims) shape = tuple(f._size_domain[d] for d in new_dims) if f.is_TimeFunction and f.time_dim not in dims: - out = f._rebuild(name="%ssum" % f.name, shape=shape, dimensions=new_dims, + out = f._rebuild(name=f'{f.name}sum', shape=shape, dimensions=new_dims, initializer=np.empty(0)) elif f.is_SparseTimeFunction: if f.time_dim in dims: # Sum over time -> SparseFunction new_coords = f.coordinates._rebuild( - name="%ssum_coords" % f.name, initializer=f.coordinates.initializer + name=f'{f.name}sum_coords', initializer=f.coordinates.initializer ) - out = dv.SparseFunction(name="%ssum" % f.name, grid=f.grid, + out = dv.SparseFunction(name=f'{f.name}sum', grid=f.grid, dimensions=new_dims, npoint=f.shape[1], coordinates=new_coords) else: # Sum over rec -> TimeFunction - out = dv.TimeFunction(name="%ssum" % f.name, grid=f.grid, shape=shape, + out = dv.TimeFunction(name=f'{f.name}sum', grid=f.grid, shape=shape, dimensions=new_dims, space_order=0, time_order=f.time_order) else: - out = dv.Function(name="%ssum" % f.name, grid=f.grid, + out = dv.Function(name=f'{f.name}sum', grid=f.grid, space_order=f.space_order, shape=shape, dimensions=new_dims) @@ -217,4 +217,4 @@ def _reduce_func(f, func, mfunc): else: return v.item() else: - raise ValueError("Expected Function, got `%s`" % type(f)) + raise ValueError(f'Expected Function, got `{type(f)}`') diff --git a/devito/builtins/initializers.py b/devito/builtins/initializers.py index eb44c0360e..d8c6055e20 100644 --- a/devito/builtins/initializers.py +++ b/devito/builtins/initializers.py @@ -59,18 +59,18 @@ def assign(f, rhs=0, options=None, name='assign', assign_halo=False, **kwargs): eqs = [] if options: - for i, j, k in zip(as_list(f), rhs, options): + for i, j, k in zip(as_list(f), rhs, options, strict=True): if k is not None: eqs.append(dv.Eq(i, j, **k)) else: eqs.append(dv.Eq(i, j)) else: - for i, j in zip(as_list(f), rhs): + for i, j in zip(as_list(f), rhs, strict=True): eqs.append(dv.Eq(i, j)) if assign_halo: subs = {} - for d, h in zip(f.dimensions, f._size_halo): + for d, h in zip(f.dimensions, f._size_halo, strict=True): if sum(h) == 0: continue subs[d] = dv.CustomDimension(name=d.name, parent=d, @@ -143,15 +143,15 @@ def __init__(self, lw): self.lw = lw def define(self, dimensions): - return {d: ('middle', l, l) for d, l in zip(dimensions, self.lw)} + return {d: ('middle', l, l) for d, l in zip(dimensions, self.lw, strict=True)} def create_gaussian_weights(sigma, lw): weights = [w/w.sum() for w in (np.exp(-0.5/s**2*(np.linspace(-l, l, 2*l+1))**2) - for s, l in zip(sigma, lw))] + for s, l in zip(sigma, lw, strict=True))] return as_tuple(np.array(w) for w in weights) def fset(f, g): - indices = [slice(l, -l, 1) for _, l in zip(g.dimensions, lw)] + indices = [slice(l, -l, 1) for _, l in zip(g.dimensions, lw, strict=True)] slices = (slice(None, None, 1), )*g.ndim if isinstance(f, np.ndarray): f[slices] = g.data[tuple(indices)] @@ -182,7 +182,7 @@ def fset(f, g): # Create the padded grid: objective_domain = ObjectiveDomain(lw) - shape_padded = tuple([np.array(s) + 2*l for s, l in zip(shape, lw)]) + shape_padded = tuple([np.array(s) + 2*l for s, l in zip(shape, lw, strict=True)]) extent_padded = tuple([s-1 for s in shape_padded]) grid = dv.Grid(shape=shape_padded, subdomains=objective_domain, extent=extent_padded) @@ -193,7 +193,7 @@ def fset(f, g): weights = create_gaussian_weights(sigma, lw) mapper = {} - for d, l, w in zip(f_c.dimensions, lw, weights): + for d, l, w in zip(f_c.dimensions, lw, weights, strict=True): lhs = [] rhs = [] options = [] @@ -238,13 +238,15 @@ def _initialize_function(function, data, nbl, mapper=None, mode='constant'): def buff(i, j): return [(i + k - 2*max(max(nbl))) for k in j] - b = [min(l) for l in (w for w in (buff(i, j) for i, j in zip(local_size, halo)))] + b = [min(l) for l in ( + w for w in (buff(i, j) for i, j in zip(local_size, halo, strict=True)) + )] if any(np.array(b) < 0): - raise ValueError("Function `%s` halo is not sufficiently thick." % function) + raise ValueError(f'Function `{function}` halo is not sufficiently thick.') - for d, (nl, nr) in zip(function.space_dimensions, as_tuple(nbl)): - dim_l = dv.SubDimension.left(name='abc_%s_l' % d.name, parent=d, thickness=nl) - dim_r = dv.SubDimension.right(name='abc_%s_r' % d.name, parent=d, thickness=nr) + for d, (nl, nr) in zip(function.space_dimensions, as_tuple(nbl), strict=True): + dim_l = dv.SubDimension.left(name=f'abc_{d.name}_l', parent=d, thickness=nl) + dim_r = dv.SubDimension.right(name=f'abc_{d.name}_r', parent=d, thickness=nr) if mode == 'constant': subsl = nl subsr = d.symbolic_max - nr @@ -259,7 +261,7 @@ def buff(i, j): rhs.append(function.subs({d: subsr})) options.extend([None, None]) - if mapper and d in mapper.keys(): + if mapper and d in mapper: exprs = mapper[d] lhs_extra = exprs['lhs'] rhs_extra = exprs['rhs'] @@ -353,8 +355,9 @@ def initialize_function(function, data, nbl, mapper=None, mode='constant', if not isinstance(data, (list, tuple)): raise TypeError("Expected a list of `data`") elif len(function) != len(data): - raise ValueError("Expected %d `data` items, got %d" % - (len(function), len(data))) + raise ValueError( + f'Expected {len(function)} `data` items, got {len(data)}' + ) if mapper is not None: raise NotImplementedError("Unsupported `mapper` with batching") @@ -374,14 +377,14 @@ def initialize_function(function, data, nbl, mapper=None, mode='constant', f._create_data() if nbl == 0: - for f, data in zip(functions, datas): + for f, data in zip(functions, datas, strict=True): if isinstance(data, dv.Function): f.data[:] = data.data[:] else: f.data[:] = data[:] else: lhss, rhss, optionss = [], [], [] - for f, data in zip(functions, datas): + for f, data in zip(functions, datas, strict=True): lhs, rhs, options = _initialize_function(f, data, nbl, mapper, mode) @@ -391,7 +394,7 @@ def initialize_function(function, data, nbl, mapper=None, mode='constant', assert len(lhss) == len(rhss) == len(optionss) - name = name or 'initialize_%s' % '_'.join(f.name for f in functions) + name = name or f'initialize_{"_".join(f.name for f in functions)}' assign(lhss, rhss, options=optionss, name=name, **kwargs) if pad_halo: diff --git a/devito/core/autotuning.py b/devito/core/autotuning.py index 76812edf2e..fa833b84a1 100644 --- a/devito/core/autotuning.py +++ b/devito/core/autotuning.py @@ -38,8 +38,8 @@ def autotune(operator, args, level, mode): key = [level, mode] accepted = configuration._accepted['autotuning'] if key not in accepted: - raise ValueError("The accepted `(level, mode)` combinations are `%s`; " - "provided `%s` instead" % (accepted, key)) + raise ValueError(f"The accepted `(level, mode)` combinations are `{accepted}`; " + f"provided `{key}` instead") # We get passed all the arguments, but the cfunction only requires a subset at_args = OrderedDict([(p.name, args[p.name]) for p in operator.parameters]) @@ -84,7 +84,9 @@ def autotune(operator, args, level, mode): if timesteps is None: return args, {} else: - warning("cannot perform autotuning with %d time loops; skipping" % len(steppers)) + warning( + f'Cannot perform autotuning with {len(steppers)} time loops; skipping' + ) return args, {} # Use a fresh Timer for auto-tuning @@ -134,8 +136,11 @@ def autotune(operator, args, level, mode): # Record timing elapsed = timer.total timings.setdefault(nt, OrderedDict()).setdefault(n, {})[bs] = elapsed - log("run <%s> took %f (s) in %d timesteps" % - (','.join('%s=%s' % i for i in run), elapsed, timesteps)) + log( + f'run <{",".join(f"{i[0]}={i[1]}" for i in run)}> ' + f'took {elapsed} (s) ' + f'in {timesteps} timesteps' + ) # Prepare for the next autotuning run update_time_bounds(stepper, at_args, timesteps, mode) @@ -154,7 +159,7 @@ def autotune(operator, args, level, mode): best = min(mapper, key=mapper.get) best = OrderedDict(best + tuple(mapper[best].args)) best.pop(None, None) - log("selected <%s>" % (','.join('%s=%s' % i for i in best.items()))) + log("selected <{}>".format(','.join('{}={}'.format(*i) for i in best.items()))) except ValueError: warning("could not perform any runs") return args, {} @@ -210,10 +215,9 @@ def init_time_bounds(stepper, at_args, args): return False else: at_args[dim.max_name] = at_args[dim.min_name] + options['squeezer'] - if dim.size_name in args: - if not isinstance(args[dim.size_name], range): - # May need to shrink to avoid OOB accesses - at_args[dim.max_name] = min(at_args[dim.max_name], args[dim.max_name]) + if dim.size_name in args and not isinstance(args[dim.size_name], range): + # May need to shrink to avoid OOB accesses + at_args[dim.max_name] = min(at_args[dim.max_name], args[dim.max_name]) if at_args[dim.min_name] > at_args[dim.max_name]: warning("too few time iterations; skipping") return False @@ -271,9 +275,9 @@ def calculate_nblocks(tree, blockable): collapsed = tree[index:index + (ncollapsed or index+1)] blocked = [i.dim for i in collapsed if i.dim in blockable] remainders = [(d.root.symbolic_max-d.root.symbolic_min+1) % d.step for d in blocked] - niters = [d.root.symbolic_max - i for d, i in zip(blocked, remainders)] + niters = [d.root.symbolic_max - i for d, i in zip(blocked, remainders, strict=True)] nblocks = prod((i - d.root.symbolic_min + 1) / d.step - for d, i in zip(blocked, niters)) + for d, i in zip(blocked, niters, strict=True)) return nblocks @@ -299,7 +303,7 @@ def generate_block_shapes(blockable, args, level): if level in ['aggressive', 'max']: # Ramp up to larger block shapes handle = tuple((i, options['blocksize-l0'][-1]) for i, _ in ret[0]) - for i in range(3): + for _ in range(3): new_bs = tuple((b, v*2) for b, v in handle) ret.insert(ret.index(handle) + 1, new_bs) handle = new_bs @@ -324,7 +328,7 @@ def generate_block_shapes(blockable, args, level): level_1 = [d for d, v in mapper.items() if v == 1] if level_1: assert len(level_1) == len(level_0) - assert all(d1.parent is d0 for d0, d1 in zip(level_0, level_1)) + assert all(d1.parent is d0 for d0, d1 in zip(level_0, level_1, strict=True)) for bs in list(ret): handle = [] for v in options['blocksize-l1']: @@ -369,9 +373,9 @@ def generate_nthreads(nthreads, args, level): ret.extend([((name, nthread),) for nthread in cases]) if basic not in ret: - warning("skipping `%s`; perhaps you've set OMP_NUM_THREADS to a " + warning(f"skipping `{dict(basic)}`; perhaps you've set OMP_NUM_THREADS to a " "non-standard value while attempting autotuning in " - "`max` mode?" % dict(basic)) + "`max` mode?") return ret @@ -385,8 +389,8 @@ def generate_nthreads(nthreads, args, level): def log(msg): - perf("AutoTuner: %s" % msg) + perf(f"AutoTuner: {msg}") def warning(msg): - _warning("AutoTuner: %s" % msg) + _warning(f"AutoTuner: {msg}") diff --git a/devito/core/cpu.py b/devito/core/cpu.py index b090552838..5b5aa8448e 100644 --- a/devito/core/cpu.py +++ b/devito/core/cpu.py @@ -106,8 +106,9 @@ def _normalize_kwargs(cls, **kwargs): oo.pop('gpu-create', None) if oo: - raise InvalidOperator("Unrecognized optimization options: [%s]" - % ", ".join(list(oo))) + raise InvalidOperator( + f'Unrecognized optimization options: [{", ".join(list(oo))}]' + ) kwargs['options'].update(o) diff --git a/devito/core/gpu.py b/devito/core/gpu.py index eaf68b0a5b..7f88d9aa0e 100644 --- a/devito/core/gpu.py +++ b/devito/core/gpu.py @@ -116,8 +116,9 @@ def _normalize_kwargs(cls, **kwargs): o['scalar-min-type'] = oo.pop('scalar-min-type', cls.SCALAR_MIN_TYPE) if oo: - raise InvalidOperator("Unsupported optimization options: [%s]" - % ", ".join(list(oo))) + raise InvalidOperator( + f'Unsupported optimization options: [{", ".join(list(oo))}]' + ) kwargs['options'].update(o) diff --git a/devito/core/intel.py b/devito/core/intel.py index 6ab2f70b5f..429bbc3a02 100644 --- a/devito/core/intel.py +++ b/devito/core/intel.py @@ -4,14 +4,14 @@ ) __all__ = [ - 'Intel64AdvCOperator', - 'Intel64AdvCXXOmpOperator', - 'Intel64AdvOmpOperator', - 'Intel64CXXAdvCOperator', - 'Intel64FsgCOperator', - 'Intel64FsgCXXOmpOperator', - 'Intel64FsgCXXOperator', - 'Intel64FsgOmpOperator', + 'Intel64AdvCOperator', + 'Intel64AdvCXXOmpOperator', + 'Intel64AdvOmpOperator', + 'Intel64CXXAdvCOperator', + 'Intel64FsgCOperator', + 'Intel64FsgCXXOmpOperator', + 'Intel64FsgCXXOperator', + 'Intel64FsgOmpOperator', ] diff --git a/devito/core/operator.py b/devito/core/operator.py index 1bf7ed41bc..35a34e5898 100644 --- a/devito/core/operator.py +++ b/devito/core/operator.py @@ -1,4 +1,5 @@ from collections.abc import Iterable +from contextlib import suppress from functools import cached_property import numpy as np @@ -182,8 +183,9 @@ def _normalize_kwargs(cls, **kwargs): o['parallel'] = False if oo: - raise InvalidOperator("Unrecognized optimization options: [%s]" - % ", ".join(list(oo))) + raise InvalidOperator( + f'Unrecognized optimization options: [{", ".join(list(oo))}]' + ) kwargs['options'].update(o) @@ -194,7 +196,7 @@ def _check_kwargs(cls, **kwargs): oo = kwargs['options'] if oo['mpi'] and oo['mpi'] not in cls.MPI_MODES: - raise InvalidOperator("Unsupported MPI mode `%s`" % oo['mpi']) + raise InvalidOperator("Unsupported MPI mode `{}`".format(oo['mpi'])) if oo['cse-algo'] not in ('basic', 'smartsort', 'advanced'): raise InvalidOperator("Illegal `cse-algo` value") @@ -224,8 +226,9 @@ def _autotune(self, args, setup): else: args, summary = autotune(self, args, level, mode) else: - raise ValueError("Expected bool, str, or 2-tuple, got `%s` instead" - % type(setup)) + raise ValueError( + f"Expected bool, str, or 2-tuple, got `{type(setup)}` instead" + ) # Record the tuned values self._state.setdefault('autotuning', []).append(summary) @@ -285,10 +288,10 @@ def _build(cls, expressions, **kwargs): for i in passes: if i not in cls._known_passes: if i in cls._known_passes_disabled: - warning("Got explicit pass `%s`, but it's unsupported on an " - "Operator of type `%s`" % (i, str(cls))) + warning(f"Got explicit pass `{i}`, but it's unsupported on an " + f"Operator of type `{str(cls)}`") else: - raise InvalidOperator("Unknown pass `%s`" % i) + raise InvalidOperator(f"Unknown pass `{i}`") return super()._build(expressions, **kwargs) @@ -302,10 +305,8 @@ def _specialize_dsl(cls, expressions, **kwargs): # Call passes for i in passes: - try: + with suppress(KeyError): expressions = passes_mapper[i](expressions, **kwargs) - except KeyError: - pass return expressions @@ -319,10 +320,8 @@ def _specialize_exprs(cls, expressions, **kwargs): # Call passes for i in passes: - try: + with suppress(KeyError): expressions = passes_mapper[i](expressions) - except KeyError: - pass return expressions @@ -336,10 +335,8 @@ def _specialize_clusters(cls, clusters, **kwargs): # Call passes for i in passes: - try: + with suppress(KeyError): clusters = passes_mapper[i](clusters) - except KeyError: - pass return clusters @@ -460,9 +457,9 @@ def __new__(cls, items, default=None, sparse=None, reduce=None): # E.g., ((32, 4, 8),) items = (ParTileArg(x),) else: - raise ValueError("Expected int or tuple, got %s instead" % type(x)) + raise ValueError(f"Expected int or tuple, got {type(x)} instead") else: - raise ValueError("Expected bool or iterable, got %s instead" % type(items)) + raise ValueError(f"Expected bool or iterable, got {type(items)} instead") obj = super().__new__(cls, *items) obj.default = as_tuple(default) diff --git a/devito/core/power.py b/devito/core/power.py index 0b0fe86533..8d475ab084 100644 --- a/devito/core/power.py +++ b/devito/core/power.py @@ -3,10 +3,10 @@ ) __all__ = [ - 'PowerAdvCOperator', - 'PowerAdvCXXOmpOperator', - 'PowerAdvOmpOperator', - 'PowerCXXAdvCOperator', + 'PowerAdvCOperator', + 'PowerAdvCXXOmpOperator', + 'PowerAdvOmpOperator', + 'PowerCXXAdvCOperator', ] PowerAdvCOperator = Cpu64AdvCOperator diff --git a/devito/data/data.py b/devito/data/data.py index f9280305f6..7da3b0eaf8 100644 --- a/devito/data/data.py +++ b/devito/data/data.py @@ -70,7 +70,7 @@ def __new__(cls, shape, dtype, decomposition=None, modulo=None, # Sanity check -- A Dimension can't be at the same time modulo-iterated # and MPI-distributed - assert all(i is None for i, j in zip(obj._decomposition, obj._modulo) + assert all(i is None for i, j in zip(obj._decomposition, obj._modulo, strict=True) if j is True) return obj @@ -118,10 +118,13 @@ def __array_finalize__(self, obj): # From `__getitem__` self._distributor = obj._distributor glb_idx = obj._normalize_index(obj._index_stash) - self._modulo = tuple(m for i, m in zip(glb_idx, obj._modulo) - if not is_integer(i)) + self._modulo = tuple( + m + for i, m in zip(glb_idx, obj._modulo, strict=False) + if not is_integer(i) + ) decomposition = [] - for i, dec in zip(glb_idx, obj._decomposition): + for i, dec in zip(glb_idx, obj._decomposition, strict=False): if is_integer(i): continue elif dec is None: @@ -240,7 +243,7 @@ def __getitem__(self, glb_idx, comm_type, gather_rank=None): shape = [r.stop-r.start for r in self._distributor.all_ranges[i]] idx = [slice(r.start - d.glb_min, r.stop - d.glb_min, r.step) for r, d in zip(self._distributor.all_ranges[i], - self._distributor.decomposition)] + self._distributor.decomposition, strict=True)] for j in range(len(self.shape) - len(self._distributor.glb_shape)): shape.insert(j, glb_shape[j]) idx.insert(j, slice(0, glb_shape[j]+1, 1)) @@ -309,7 +312,7 @@ def __getitem__(self, glb_idx, comm_type, gather_rank=None): # Check if dimensions of the view should now be reduced to # be consistent with those of an equivalent NumPy serial view if not is_gather: - newshape = tuple(s for s, i in zip(retval.shape, loc_idx) + newshape = tuple(s for s, i in zip(retval.shape, loc_idx, strict=True) if type(i) is not np.int64) else: newshape = () @@ -373,7 +376,7 @@ def __setitem__(self, glb_idx, val, comm_type): glb_idx = self._normalize_index(glb_idx) glb_idx, val = self._process_args(glb_idx, val) val_idx = [index_dist_to_repl(i, dec) for i, dec in - zip(glb_idx, self._decomposition)] + zip(glb_idx, self._decomposition, strict=True)] if NONLOCAL in val_idx: # no-op return @@ -388,7 +391,7 @@ def __setitem__(self, glb_idx, val, comm_type): val_idx = val_idx[len(val_idx)-val.ndim:] processed = [] # Handle step size > 1 - for i, j in zip(glb_idx, val_idx): + for i, j in zip(glb_idx, val_idx, strict=False): if isinstance(i, slice) and i.step is not None and i.step > 1 and \ j.stop > j.start: processed.append(slice(j.start, j.stop, 1)) @@ -435,12 +438,9 @@ def _process_args(self, idx, val): for i in as_tuple(idx)): processed = [] transform = [] - for j, k in zip(idx, self._distributor.glb_shape): + for j, k in zip(idx, self._distributor.glb_shape, strict=True): if isinstance(j, slice) and j.step is not None and j.step < 0: - if j.start is None: - stop = None - else: - stop = j.start + 1 + stop = None if j.start is None else j.start + 1 if j.stop is None and j.start is None: start = int(np.mod(k-1, -j.step)) elif j.stop is None: @@ -482,7 +482,9 @@ def _index_glb_to_loc(self, glb_idx): return glb_idx loc_idx = [] - for i, s, mod, dec in zip(glb_idx, self.shape, self._modulo, self._decomposition): + for i, s, mod, dec in zip( + glb_idx, self.shape, self._modulo, self._decomposition, strict=False + ): if mod is True: # Need to wrap index based on modulo v = index_apply_modulo(i, s) @@ -490,10 +492,11 @@ def _index_glb_to_loc(self, glb_idx): # Convert the user-provided global indices into local indices. try: v = convert_index(i, dec, mode='glb_to_loc') - except TypeError: + except TypeError as e: if self._is_decomposed: - raise NotImplementedError("Unsupported advanced indexing with " - "MPI-distributed Data") + raise NotImplementedError( + "Unsupported advanced indexing with MPI-distributed Data" + ) from e v = i else: v = i @@ -523,7 +526,7 @@ def _set_global_idx(self, val, idx, val_idx): # Convert integers to slices so that shape dims are preserved if is_integer(as_tuple(idx)[0]): data_glb_idx.append(slice(0, 1, 1)) - for i, j in zip(data_loc_idx, val._decomposition): + for i, j in zip(data_loc_idx, val._decomposition, strict=True): if not j.loc_empty: data_glb_idx.append(j.index_loc_to_glb(i)) else: @@ -536,17 +539,16 @@ def _set_global_idx(self, val, idx, val_idx): data_glb_idx.insert(index, value) # Based on `data_glb_idx` the indices to which the locally stored data # block correspond can now be computed: - for i, j, k in zip(data_glb_idx, as_tuple(idx), self._decomposition): + for i, j, k in zip( + data_glb_idx, as_tuple(idx), self._decomposition, strict=False + ): if is_integer(j): mapped_idx.append(j) continue elif isinstance(j, slice) and j.start is None: norm = 0 elif isinstance(j, slice) and j.start is not None: - if j.start >= 0: - norm = j.start - else: - norm = j.start+k.glb_max+1 + norm = j.start if j.start >= 0 else j.start+k.glb_max+1 else: norm = j if i is not None: @@ -580,7 +582,7 @@ def _gather(self, start=None, stop=None, step=1, rank=0): if isinstance(step, int) or step is None: step = [step for _ in self.shape] idx = [] - for i, j, k in zip(start, stop, step): + for i, j, k in zip(start, stop, step, strict=True): idx.append(slice(i, j, k)) idx = tuple(idx) if self._distributor.is_parallel and self._distributor.nprocs > 1: diff --git a/devito/data/decomposition.py b/devito/data/decomposition.py index 7faa0753c0..72c6de39c3 100644 --- a/devito/data/decomposition.py +++ b/devito/data/decomposition.py @@ -109,17 +109,17 @@ def __eq__(self, o): if not isinstance(o, Decomposition): return False return self.local == o.local and len(self) == len(o) and\ - all(np.all(i == j) for i, j in zip(self, o)) + all(np.all(i == j) for i, j in zip(self, o, strict=True)) def __repr__(self): ret = [] for i, v in enumerate(self): bounds = (min(v, default=None), max(v, default=None)) - item = '[]' if bounds == (None, None) else '[%d,%d]' % bounds + item = '[]' if bounds == (None, None) else f'[{bounds[0]},{bounds[1]}]' if self.local == i: - item = "<<%s>>" % item + item = f"<<{item}>>" ret.append(item) - return 'Decomposition(%s)' % ', '.join(ret) + return f'Decomposition({", ".join(ret)})' def __call__(self, *args, mode='glb_to_loc', rel=True): """ @@ -248,7 +248,7 @@ def index_glb_to_loc(self, *args, rel=True): # index_glb_to_loc(slice(...)) if isinstance(glb_idx, tuple): if len(glb_idx) != 2: - raise TypeError("Cannot convert index from `%s`" % type(glb_idx)) + raise TypeError(f"Cannot convert index from `{type(glb_idx)}`") if self.loc_empty: return (-1, -3) glb_idx_min, glb_idx_max = glb_idx @@ -275,7 +275,7 @@ def index_glb_to_loc(self, *args, rel=True): else glb_idx.start retfunc = lambda a, b: slice(b, a - 1, glb_idx.step) else: - raise TypeError("Cannot convert index from `%s`" % type(glb_idx)) + raise TypeError(f"Cannot convert index from `{type(glb_idx)}`") # -> Handle negative min/max if glb_idx_min is not None and glb_idx_min < 0: glb_idx_min = glb_max + glb_idx_min + 1 @@ -356,7 +356,7 @@ def index_glb_to_loc(self, *args, rel=True): else: return None else: - raise TypeError("Expected 1 or 2 arguments, found %d" % len(args)) + raise TypeError(f'Expected 1 or 2 arguments, found {len(args)}') def index_loc_to_glb(self, *args): """ @@ -415,7 +415,7 @@ def index_loc_to_glb(self, *args): # index_loc_to_glb((min, max)) if isinstance(loc_idx, tuple): if len(loc_idx) != 2: - raise TypeError("Cannot convert index from `%s`" % type(loc_idx)) + raise TypeError(f"Cannot convert index from `{type(loc_idx)}`") shifted = [slice(-1, -2, 1) if (i < 0 or i > rank_length) else i + self.loc_abs_min for i in loc_idx] return as_tuple(shifted) @@ -448,7 +448,7 @@ def index_loc_to_glb(self, *args): glb_stop = loc_idx.stop + self.loc_abs_min return slice(glb_start, glb_stop, loc_idx.step) else: - raise TypeError("Expected 1 arguments, found %d" % len(args)) + raise TypeError(f'Expected 1 arguments, found {len(args)}') def reshape(self, *args): """ @@ -520,7 +520,7 @@ def reshape(self, *args): elif len(args) == 2: nleft, nright = args else: - raise TypeError("Expected 1 or 2 arguments, found %d" % len(args)) + raise TypeError(f'Expected 1 or 2 arguments, found {len(args)}') items = list(self) diff --git a/devito/data/utils.py b/devito/data/utils.py index ab8db341e6..c2f044ddaf 100644 --- a/devito/data/utils.py +++ b/devito/data/utils.py @@ -54,7 +54,7 @@ def index_apply_modulo(idx, modulo): elif isinstance(idx, np.ndarray): return idx else: - raise ValueError("Cannot apply modulo to index of type `%s`" % type(idx)) + raise ValueError(f"Cannot apply modulo to index of type `{type(idx)}`") def index_dist_to_repl(idx, decomposition): @@ -64,16 +64,13 @@ def index_dist_to_repl(idx, decomposition): # Derive shift value if isinstance(idx, slice): - if idx.step is None or idx.step >= 0: - value = idx.start - else: - value = idx.stop + value = idx.start if idx.step is None or idx.step >= 0 else idx.stop else: value = idx if value is None: value = 0 elif not is_integer(value): - raise ValueError("Cannot derive shift value from type `%s`" % type(value)) + raise ValueError(f"Cannot derive shift value from type `{type(value)}`") if value < 0: value += decomposition.glb_max + 1 @@ -90,12 +87,11 @@ def index_dist_to_repl(idx, decomposition): elif isinstance(idx, np.ndarray): return idx - value elif isinstance(idx, slice): - if idx.step is not None and idx.step < 0: - if idx.stop is None: - return slice(idx.start - value, None, idx.step) + if idx.step is not None and idx.step < 0 and idx.stop is None: + return slice(idx.start - value, None, idx.step) return slice(idx.start - value, idx.stop - value, idx.step) else: - raise ValueError("Cannot apply shift to type `%s`" % type(idx)) + raise ValueError(f"Cannot apply shift to type `{type(idx)}`") def convert_index(idx, decomposition, mode='glb_to_loc'): @@ -107,7 +103,7 @@ def convert_index(idx, decomposition, mode='glb_to_loc'): elif isinstance(idx, np.ndarray): return np.vectorize(lambda i: decomposition(i, mode=mode))(idx).astype(idx.dtype) else: - raise ValueError("Cannot convert index of type `%s` " % type(idx)) + raise ValueError(f"Cannot convert index of type `{type(idx)}` ") def index_handle_oob(idx): @@ -341,7 +337,7 @@ def mpi_index_maps(loc_idx, shape, topology, coords, comm): owner = owners[index] my_slice = n_rank_slice[owner] rnorm_index = [] - for j, k in zip(my_slice, index): + for j, k in zip(my_slice, index, strict=True): rnorm_index.append(k-j.start) local_si[index] = as_tuple(rnorm_index) it.iternext() @@ -387,7 +383,7 @@ def flip_idx(idx, decomposition): (slice(8, 11, 1),) """ processed = [] - for i, j in zip(as_tuple(idx), decomposition): + for i, j in zip(as_tuple(idx), decomposition, strict=False): if isinstance(i, slice) and i.step is not None and i.step < 0: if i.start is None: stop = None @@ -407,7 +403,7 @@ def flip_idx(idx, decomposition): start = i.start + j.glb_max + 1 else: start = i.start - if i.stop is not None and i.stop < 0: + if i.stop is not None and i.stop < 0: # noqa: SIM108 stop = i.stop + j.glb_max + 1 else: stop = i.stop diff --git a/devito/finite_differences/coefficients.py b/devito/finite_differences/coefficients.py index 543bb5c9ba..8a9b66b826 100644 --- a/devito/finite_differences/coefficients.py +++ b/devito/finite_differences/coefficients.py @@ -5,7 +5,7 @@ class Coefficient: def __init__(self, deriv_order, function, dimension, weights): - deprecations.coeff_warn + _ = deprecations.coeff_warn self._weights = weights self._deriv_order = deriv_order self._function = function @@ -34,7 +34,7 @@ def weights(self): class Substitutions: def __init__(self, *args): - deprecations.coeff_warn + _ = deprecations.coeff_warn if any(not isinstance(arg, Coefficient) for arg in args): raise TypeError("Non Coefficient object within input") diff --git a/devito/finite_differences/derivative.py b/devito/finite_differences/derivative.py index 8d572e2a69..ecb1060631 100644 --- a/devito/finite_differences/derivative.py +++ b/devito/finite_differences/derivative.py @@ -1,5 +1,6 @@ from collections import defaultdict from collections.abc import Iterable +from contextlib import suppress from functools import cached_property from itertools import chain @@ -283,7 +284,7 @@ def _validate_fd_order(fd_order, expr, dims, dcounter): expr.time_order if getattr(d, 'is_Time', False) else expr.space_order - for d in dcounter.keys() + for d in dcounter ) return fd_order @@ -467,10 +468,7 @@ def T(self): This is really useful for more advanced FD definitions. For example the conventional Laplacian is `.dxl.T * .dxl` """ - if self._transpose == direct: - adjoint = transpose - else: - adjoint = direct + adjoint = transpose if self._transpose == direct else direct return self._rebuild(transpose=adjoint) @@ -493,7 +491,7 @@ def _eval_at(self, func): x0 = func.indices_ref.getters psubs = {} nx0 = x0.copy() - for d, d0 in x0.items(): + for d, _ in x0.items(): if d in self.dims: # d is a valid Derivative dimension continue @@ -570,10 +568,8 @@ def _eval_fd(self, expr, **kwargs): expr = interp_for_fd(expr, x0_interp, **kwargs) # Step 2: Evaluate derivatives within expression - try: + with suppress(AttributeError): expr = expr._evaluate(**kwargs) - except AttributeError: - pass # If True, the derivative will be fully expanded as a sum of products, # otherwise an IndexSum will returned diff --git a/devito/finite_differences/differentiable.py b/devito/finite_differences/differentiable.py index f0d2b77d1f..63f5e9e5df 100644 --- a/devito/finite_differences/differentiable.py +++ b/devito/finite_differences/differentiable.py @@ -170,7 +170,9 @@ def _eval_at(self, func): if not func.is_Staggered: # Cartesian grid, do no waste time return self - return self.func(*[getattr(a, '_eval_at', lambda x: a)(func) for a in self.args]) + return self.func(*[ + getattr(a, '_eval_at', lambda x: a)(func) for a in self.args # noqa: B023 + ]) # false positive def _subs(self, old, new, **hints): if old == self: @@ -449,11 +451,12 @@ def has(self, *pattern): """ for p in pattern: # Following sympy convention, return True if any is found - if isinstance(p, type) and issubclass(p, sympy.Symbol): + if isinstance(p, type) \ + and issubclass(p, sympy.Symbol) \ + and any(isinstance(i, p) for i in self.free_symbols): # Symbols (and subclasses) are the leaves of an expression, and they # are promptly available via `free_symbols`. So this is super quick - if any(isinstance(i, p) for i in self.free_symbols): - return True + return True return super().has(*pattern) def has_free(self, *patterns): @@ -500,8 +503,10 @@ def __new__(cls, *args, **kwargs): return obj def subs(self, *args, **kwargs): - return self.func(*[getattr(a, 'subs', lambda x: a)(*args, **kwargs) - for a in self.args], evaluate=False) + return self.func( + *[getattr(a, 'subs', lambda x: a)(*args, **kwargs) # noqa: B023 + for a in self.args], evaluate=False + ) # false positive _subs = Differentiable._subs @@ -592,10 +597,7 @@ def __new__(cls, *args, **kwargs): return sympy.S.Zero # a*1 -> a - if scalar - 1 == 0: - args = others - else: - args = [scalar] + others + args = others if scalar - 1 == 0 else [scalar] + others # Reorder for homogeneity with pure SymPy types _mulsort(args) @@ -636,7 +638,9 @@ def _gather_for_diff(self): ref_inds = func_args.indices_ref.getters for f in self.args: - if f not in self._args_diff or f is func_args or isinstance(f, DifferentiableFunction): + if f not in self._args_diff \ + or f is func_args \ + or isinstance(f, DifferentiableFunction): new_args.append(f) else: ind_f = f.indices_ref.getters @@ -743,20 +747,20 @@ def __new__(cls, expr, dimensions, **kwargs): except AttributeError: pass raise ValueError("Expected Dimension with numeric size, " - "got `%s` instead" % d) + f"got `{d}` instead") # TODO: `has_free` only available with SymPy v>=1.10 # We should start using `not expr.has_free(*dimensions)` once we drop # support for SymPy 1.8<=v<1.0 if not all(d in expr.free_symbols for d in dimensions): - raise ValueError("All Dimensions `%s` must appear in `expr` " - "as free variables" % str(dimensions)) + raise ValueError(f"All Dimensions `{str(dimensions)}` must appear in `expr` " + "as free variables") for i in expr.find(IndexSum): for d in dimensions: if d in i.dimensions: - raise ValueError("Dimension `%s` already appears in a " - "nested tensor contraction" % d) + raise ValueError(f"Dimension `{d}` already appears in a " + "nested tensor contraction") obj = sympy.Expr.__new__(cls, expr) obj._expr = expr @@ -765,8 +769,11 @@ def __new__(cls, expr, dimensions, **kwargs): return obj def __repr__(self): - return "%s(%s, (%s))" % (self.__class__.__name__, self.expr, - ', '.join(d.name for d in self.dimensions)) + return "{}({}, ({}))".format( + self.__class__.__name__, + self.expr, + ', '.join(d.name for d in self.dimensions) + ) __str__ = __repr__ @@ -800,7 +807,7 @@ def _evaluate(self, **kwargs): values = product(*[list(d.range) for d in self.dimensions]) terms = [] for i in values: - mapper = dict(zip(self.dimensions, i)) + mapper = dict(zip(self.dimensions, i, strict=True)) terms.append(expr.xreplace(mapper)) return sum(terms) @@ -840,7 +847,7 @@ def __init_finalize__(self, *args, **kwargs): assert isinstance(weights, (list, tuple, np.ndarray)) # Normalize `weights` - from devito.symbolics import pow_to_mul # noqa, sigh + from devito.symbolics import pow_to_mul weights = tuple(pow_to_mul(sympy.sympify(i)) for i in weights) kwargs['scope'] = kwargs.get('scope', 'stack') @@ -879,7 +886,9 @@ def _xreplace(self, rule): return self, False else: try: - weights, flags = zip(*[i._xreplace(rule) for i in self.weights]) + weights, flags = zip( + *[i._xreplace(rule) for i in self.weights], strict=True + ) if any(flags): return self.func(initvalue=weights, function=None), True except AttributeError: @@ -925,7 +934,7 @@ def __new__(cls, expr, mapper, **kwargs): # Sanity check if not (expr.is_Mul and len(weightss) == 1): - raise ValueError("Expect `expr*weights`, got `%s` instead" % str(expr)) + raise ValueError(f"Expect `expr*weights`, got `{str(expr)}` instead") weights = weightss.pop() obj = super().__new__(cls, expr, dimensions) diff --git a/devito/finite_differences/finite_difference.py b/devito/finite_differences/finite_difference.py index 77545552a1..30199fb3d8 100644 --- a/devito/finite_differences/finite_difference.py +++ b/devito/finite_differences/finite_difference.py @@ -1,4 +1,5 @@ from collections.abc import Iterable +from contextlib import suppress from sympy import sympify @@ -92,7 +93,7 @@ def cross_derivative(expr, dims, fd_order, deriv_order, x0=None, side=None, **kw f(x + 2*h_x, y + 2*h_y)*g(x + 2*h_x, y + 2*h_y)/h_x)/h_y """ x0 = x0 or {} - for d, fd, dim in zip(deriv_order, fd_order, dims): + for d, fd, dim in zip(deriv_order, fd_order, dims, strict=True): expr = generic_derivative(expr, dim=dim, fd_order=fd, deriv_order=d, x0=x0, side=side, **kwargs) @@ -143,10 +144,7 @@ def generic_derivative(expr, dim, fd_order, deriv_order, matvec=direct, x0=None, return expr # Enforce stable time coefficients - if dim.is_Time: - coefficients = 'taylor' - else: - coefficients = expr.coefficients + coefficients = 'taylor' if dim.is_Time else expr.coefficients return make_derivative(expr, dim, fd_order, deriv_order, side, matvec, x0, coefficients, expand, weights) @@ -184,10 +182,7 @@ def make_derivative(expr, dim, fd_order, deriv_order, side, matvec, x0, coeffici weights = [weights._subs(wdim, i) for i in range(len(indices))] # Enforce fixed precision FD coefficients to avoid variations in results - if scale: - scale = dim.spacing**(-deriv_order) - else: - scale = 1 + scale = dim.spacing**(-deriv_order) if scale else 1 weights = [sympify(scale * w).evalf(_PRECISION) for w in weights] # Transpose the FD, if necessary @@ -210,26 +205,21 @@ def make_derivative(expr, dim, fd_order, deriv_order, side, matvec, x0, coeffici expr = expr._subs(dim, indices.expr) # Re-evaluate any off-the-grid Functions potentially impacted by the FD - try: + # unless a pure number + with suppress(AttributeError): expr = expr._evaluate(expand=False) - except AttributeError: - # Pure number - pass deriv = DiffDerivative(expr*weights, {dim: indices.free_dim}) else: terms = [] - for i, c in zip(indices, weights): + for i, c in zip(indices, weights, strict=True): # The FD term term = expr._subs(dim, i) * c # Re-evaluate any off-the-grid Functions potentially impacted by the FD - try: + # unless a pure number + with suppress(AttributeError): term = term.evaluate - except AttributeError: - # Pure number - pass - terms.append(term) deriv = EvalDerivative(*terms, base=expr) diff --git a/devito/finite_differences/operators.py b/devito/finite_differences/operators.py index f3f159f4ad..156466324e 100644 --- a/devito/finite_differences/operators.py +++ b/devito/finite_differences/operators.py @@ -70,7 +70,9 @@ def grad(func, shift=None, order=None, method='FD', side=None, **kwargs): try: return func.grad(shift=shift, order=order, method=method, side=side, w=w) except AttributeError: - raise AttributeError("Gradient not supported for class %s" % func.__class__) + raise AttributeError( + f"Gradient not supported for class {func.__class__}" + ) from None def grad45(func, shift=None, order=None): @@ -116,7 +118,9 @@ def curl(func, shift=None, order=None, method='FD', side=None, **kwargs): try: return func.curl(shift=shift, order=order, method=method, side=side, w=w) except AttributeError: - raise AttributeError("Curl only supported for 3D VectorFunction") + raise AttributeError( + "Curl only supported for 3D VectorFunction" + ) from None def curl45(func, shift=None, order=None): diff --git a/devito/finite_differences/tools.py b/devito/finite_differences/tools.py index 91c8b43c85..3fe9b4f4ab 100644 --- a/devito/finite_differences/tools.py +++ b/devito/finite_differences/tools.py @@ -51,8 +51,9 @@ def wrapper(expr, *args, **kwargs): try: return S.Zero if expr.is_Number else func(expr, *args, **kwargs) except AttributeError: - raise ValueError("'%s' must be of type Differentiable, not %s" - % (expr, type(expr))) + raise ValueError( + f"'{expr}' must be of type Differentiable, not {type(expr)}" + ) from None return wrapper @@ -73,9 +74,9 @@ def dim_with_order(dims, orders): def deriv_name(dims, orders): name = [] - for d, o in zip(dims, orders): + for d, o in zip(dims, orders, strict=True): name_dim = 't' if d.is_Time else d.root.name - name.append('d%s%s' % (name_dim, o) if o > 1 else 'd%s' % name_dim) + name.append(f'd{name_dim}{o}' if o > 1 else f'd{name_dim}') return ''.join(name) @@ -102,41 +103,41 @@ def diff_f(expr, deriv_order, dims, fd_order, side=None, **kwargs): # All conventional FD shortcuts for o in all_combs: - fd_dims = tuple(d for d, o_d in zip(dims, o) if o_d > 0) - d_orders = tuple(o_d for d, o_d in zip(dims, o) if o_d > 0) + fd_dims = tuple(d for d, o_d in zip(dims, o, strict=True) if o_d > 0) + d_orders = tuple(o_d for d, o_d in zip(dims, o, strict=True) if o_d > 0) fd_orders = tuple(to if d.is_Time else so for d in fd_dims) deriv = partial(diff_f, deriv_order=d_orders, dims=fd_dims, fd_order=fd_orders) name_fd = deriv_name(fd_dims, d_orders) dname = (d.root.name for d in fd_dims) - description = 'derivative of order %s w.r.t dimension %s' % (d_orders, dname) + description = f'derivative of order {d_orders} w.r.t dimension {dname}' derivatives[name_fd] = (deriv, description) # Add non-conventional, non-centered first-order FDs - for d, o in zip(dims, orders): + for d, o in zip(dims, orders, strict=True): name = 't' if d.is_Time else d.root.name # Add centered first derivatives deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, side=centered) - name_fd = 'd%sc' % name - description = 'centered derivative staggered w.r.t dimension %s' % d.name + name_fd = f'd{name}c' + description = f'centered derivative staggered w.r.t dimension {d.name}' derivatives[name_fd] = (deriv, description) # Left deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, side=left) - name_fd = 'd%sl' % name - description = 'left first order derivative w.r.t dimension %s' % d.name + name_fd = f'd{name}l' + description = f'left first order derivative w.r.t dimension {d.name}' derivatives[name_fd] = (deriv, description) # Right deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, side=right) - name_fd = 'd%sr' % name - description = 'right first order derivative w.r.t dimension %s' % d.name + name_fd = f'd{name}r' + description = f'right first order derivative w.r.t dimension {d.name}' derivatives[name_fd] = (deriv, description) # Add RSFD for first order derivatives - for d, o in zip(dims, orders): + for d, o in zip(dims, orders, strict=True): if not d.is_Time: name = d.root.name deriv = partial(diff_f, deriv_order=1, dims=d, fd_order=o, method='RSFD') - name_fd = 'd%s45' % name - description = 'Derivative w.r.t %s with rotated 45 degree FD' % d.name + name_fd = f'd{name}45' + description = f'Derivative w.r.t {d.name} with rotated 45 degree FD' derivatives[name_fd] = (deriv, description) return derivatives @@ -171,7 +172,7 @@ def __new__(cls, dim, indices=None, expr=None, fd=None): return obj def __repr__(self): - return "IndexSet(%s)" % ", ".join(str(i) for i in self) + return "IndexSet({})".format(", ".join(str(i) for i in self)) @property def spacing(self): @@ -224,7 +225,7 @@ def make_stencil_dimension(expr, _min, _max): Create a StencilDimension for `expr` with unique name. """ n = len(expr.find(StencilDimension)) - return StencilDimension('i%d' % n, _min, _max) + return StencilDimension(f'i{n}', _min, _max) @cacheit @@ -322,7 +323,7 @@ def make_shift_x0(shift, ndim): else: raise ValueError("ndim length must be equal to 1 or 2") raise ValueError("shift parameter must be one of the following options: " - "None, float or tuple with shape equal to %s" % (ndim,)) + f"None, float or tuple with shape equal to {ndim}") def process_weights(weights, expr, dim): diff --git a/devito/ir/cgen/printer.py b/devito/ir/cgen/printer.py index fc5c23b904..120d49aea7 100644 --- a/devito/ir/cgen/printer.py +++ b/devito/ir/cgen/printer.py @@ -1,6 +1,8 @@ """ Utilities to turn SymPy objects into C strings. """ +from contextlib import suppress + import numpy as np import sympy from mpmath.libmp import prec_to_dps, to_str @@ -115,10 +117,8 @@ def _print_PyCPointerType(self, expr): return f'{ctype} *' def _print_type(self, expr): - try: + with suppress(TypeError): expr = dtype_to_ctype(expr) - except TypeError: - pass try: return self.type_mappings[expr] except KeyError: @@ -308,10 +308,7 @@ def _print_Float(self, expr): """Print a Float in C-like scientific notation.""" prec = expr._prec - if prec < 5: - dps = 0 - else: - dps = prec_to_dps(expr._prec) + dps = 0 if prec < 5 else prec_to_dps(expr._prec) if self._settings["full_prec"] is True: strip = False diff --git a/devito/ir/clusters/algorithms.py b/devito/ir/clusters/algorithms.py index c6c76567aa..083eb36354 100644 --- a/devito/ir/clusters/algorithms.py +++ b/devito/ir/clusters/algorithms.py @@ -197,7 +197,7 @@ def _break_for_parallelism(self, scope, candidates, i): if d.is_local or d.is_storage_related(candidates): # Would break a dependence on storage return False - if any(d.is_carried(i) for i in candidates): + if any(d.is_carried(i) for i in candidates): # noqa: SIM102 if (d.is_flow and d.is_lex_negative) or (d.is_anti and d.is_lex_positive): # Would break a data dependence return False @@ -229,10 +229,7 @@ def guard(clusters): for cd in cds: # `BOTTOM` parent implies a guard that lives outside of # any iteration space, which corresponds to the placeholder None - if cd.parent is BOTTOM: - d = None - else: - d = cd.parent + d = None if cd.parent is BOTTOM else cd.parent # If `cd` uses, as condition, an arbitrary SymPy expression, then # we must ensure to nest it inside the last of the Dimensions @@ -326,7 +323,7 @@ def callback(self, clusters, prefix): # SymPy's index ordering (t, t-1, t+1) after modulo replacement so # that associativity errors are consistent. This corresponds to # sorting offsets {-1, 0, 1} as {0, -1, 1} assigning -inf to 0 - key = lambda i: -np.inf if i - si == 0 else (i - si) + key = lambda i: -np.inf if i - si == 0 else (i - si) # noqa: B023 siafs = sorted(iafs, key=key) for iaf in siafs: @@ -435,7 +432,7 @@ def callback(self, clusters, prefix, seen=None): # Construct the HaloTouch Cluster expr = Eq(self.B, HaloTouch(*points, halo_scheme=hs)) - key = lambda i: i in prefix[:-1] or i in hs.loc_indices + key = lambda i: i in prefix[:-1] or i in hs.loc_indices # noqa: B023 ispace = c.ispace.project(key) # HaloTouches are not parallel properties = c.properties.sequentialize() @@ -463,7 +460,7 @@ def _update(reductions): for c in clusters: # Schedule the global distributed reductions encountered before `c`, # if `c`'s IterationSpace is such that the reduction can be carried out - found, fifo = split(fifo, lambda dr: dr.ispace.is_subset(c.ispace)) + found, fifo = split(fifo, lambda dr: dr.ispace.is_subset(c.ispace)) # noqa: B023 _update(found) # Detect the global distributed reductions in `c` @@ -478,7 +475,7 @@ def _update(reductions): continue # Is Inc/Max/Min/... actually used for a reduction? - ispace = c.ispace.project(lambda d: d in var.free_symbols) + ispace = c.ispace.project(lambda d: d in var.free_symbols) # noqa: B023 if ispace.itdims == c.ispace.itdims: continue @@ -492,7 +489,7 @@ def _update(reductions): # The IterationSpace within which the global distributed reduction # must be carried out - ispace = c.ispace.prefix(lambda d: d in var.free_symbols) + ispace = c.ispace.prefix(lambda d: d in var.free_symbols) # noqa: B023 expr = [Eq(var, DistReduce(var, op=op, grid=grid, ispace=ispace))] fifo.append(c.rebuild(exprs=expr, ispace=ispace)) diff --git a/devito/ir/clusters/cluster.py b/devito/ir/clusters/cluster.py index a0283865a4..6c65f4b97a 100644 --- a/devito/ir/clusters/cluster.py +++ b/devito/ir/clusters/cluster.py @@ -1,3 +1,4 @@ +from contextlib import suppress from functools import cached_property from itertools import chain @@ -60,7 +61,7 @@ def __init__(self, exprs, ispace=null_ispace, guards=None, properties=None, self._halo_scheme = halo_scheme def __repr__(self): - return "Cluster([%s])" % ('\n' + ' '*9).join('%s' % i for i in self.exprs) + return "Cluster([{}])".format(('\n' + ' '*9).join(f'{i}' for i in self.exprs)) @classmethod def from_clusters(cls, *clusters): @@ -96,9 +97,11 @@ def from_clusters(cls, *clusters): try: syncs = normalize_syncs(*[c.syncs for c in clusters]) - except ValueError: - raise ValueError("Cannot build a Cluster from Clusters with " - "non-compatible synchronization operations") + except ValueError as e: + raise ValueError( + "Cannot build a Cluster from Clusters with " + "non-compatible synchronization operations" + ) from e halo_scheme = HaloScheme.union([c.halo_scheme for c in clusters]) @@ -186,10 +189,8 @@ def dist_dimensions(self): """ ret = set() for f in self.functions: - try: + with suppress(AttributeError): ret.update(f._dist_dimensions) - except AttributeError: - pass return frozenset(ret) @cached_property @@ -396,10 +397,7 @@ def dspace(self): oobs = set() for f, v in parts.items(): for i in v: - if i.dim.is_Sub: - d = i.dim.parent - else: - d = i.dim + d = i.dim.parent if i.dim.is_Sub else i.dim try: if i.lower < 0 or \ i.upper > f._size_nodomain[d].left + f._size_halo[d].right: diff --git a/devito/ir/clusters/visitors.py b/devito/ir/clusters/visitors.py index 694de41142..98ffbad36d 100644 --- a/devito/ir/clusters/visitors.py +++ b/devito/ir/clusters/visitors.py @@ -141,7 +141,7 @@ def __new__(cls, *args, mode='dense'): elif len(args) == 2: func, mode = args else: - assert False + raise AssertionError('Too many args') obj = object.__new__(cls) obj.__init__(func, mode) return obj diff --git a/devito/ir/equations/algorithms.py b/devito/ir/equations/algorithms.py index 00228c7cbb..b3a78c0ebe 100644 --- a/devito/ir/equations/algorithms.py +++ b/devito/ir/equations/algorithms.py @@ -126,11 +126,11 @@ def _lower_exprs(expressions, subs): # Introduce shifting to align with the computational domain indices = [_lower_exprs(a, subs) + o for a, o in - zip(i.indices, f._size_nodomain.left)] + zip(i.indices, f._size_nodomain.left, strict=True)] # Substitute spacing (spacing only used in own dimension) indices = [i.xreplace({d.spacing: 1, -d.spacing: -1}) - for i, d in zip(indices, f.dimensions)] + for i, d in zip(indices, f.dimensions, strict=True)] # Apply substitutions, if necessary if dimension_map: @@ -140,7 +140,7 @@ def _lower_exprs(expressions, subs): if isinstance(f, Array) and f.initvalue is not None: initvalue = [_lower_exprs(i, subs) for i in f.initvalue] # TODO: fix rebuild to avoid new name - f = f._rebuild(name='%si' % f.name, initvalue=initvalue) + f = f._rebuild(name=f'{f.name}i', initvalue=initvalue) mapper[i] = f.indexed[indices] # Add dimensions map to the mapper in case dimensions are used @@ -319,8 +319,10 @@ def _(d, mapper, rebuilt, sregistry): # Warn the user if name has been changed, since this will affect overrides if fname != d.functions.name: fkwargs['name'] = fname - warning("%s <%s> renamed as '%s'. Consider assigning a unique name to %s." % - (str(d.functions), id(d.functions), fname, d.functions.name)) + warning( + f"{str(d.functions)} <{id(d.functions)}> renamed as '{fname}'. " + "Consider assigning a unique name to {d.functions.name}." + ) fkwargs.update({'function': None, 'halo': None, diff --git a/devito/ir/equations/equation.py b/devito/ir/equations/equation.py index 8a8e821d94..29945903a9 100644 --- a/devito/ir/equations/equation.py +++ b/devito/ir/equations/equation.py @@ -92,9 +92,9 @@ def __repr__(self): if not self.is_Reduction: return super().__repr__() elif self.operation is OpInc: - return '%s += %s' % (self.lhs, self.rhs) + return f'{self.lhs} += {self.rhs}' else: - return '%s = %s(%s)' % (self.lhs, self.operation, self.rhs) + return f'{self.lhs} = {self.operation}({self.rhs})' # Pickling support __reduce_ex__ = Pickable.__reduce_ex__ @@ -174,7 +174,7 @@ def __new__(cls, *args, **kwargs): input_expr = args[0] expr = sympy.Eq.__new__(cls, *input_expr.args, evaluate=False) for i in cls.__rkwargs__: - setattr(expr, '_%s' % i, kwargs.get(i) or getattr(input_expr, i)) + setattr(expr, f'_{i}', kwargs.get(i) or getattr(input_expr, i)) return expr elif len(args) == 1 and isinstance(args[0], Eq): # origin: LoweredEq(devito.Eq) @@ -182,11 +182,11 @@ def __new__(cls, *args, **kwargs): elif len(args) == 2: expr = sympy.Eq.__new__(cls, *args, evaluate=False) for i in cls.__rkwargs__: - setattr(expr, '_%s' % i, kwargs.pop(i)) + setattr(expr, f'_{i}', kwargs.pop(i)) return expr else: - raise ValueError("Cannot construct LoweredEq from args=%s " - "and kwargs=%s" % (str(args), str(kwargs))) + raise ValueError(f"Cannot construct LoweredEq from args={str(args)} " + f"and kwargs={str(kwargs)}") # Well-defined dimension ordering ordering = dimension_sort(expr) @@ -294,7 +294,7 @@ def __new__(cls, *args, **kwargs): v = kwargs[i] except KeyError: v = getattr(input_expr, i, None) - setattr(expr, '_%s' % i, v) + setattr(expr, f'_{i}', v) else: expr._ispace = kwargs['ispace'] expr._conditionals = kwargs.get('conditionals', frozendict()) @@ -304,10 +304,10 @@ def __new__(cls, *args, **kwargs): # origin: ClusterizedEq(lhs, rhs, **kwargs) expr = sympy.Eq.__new__(cls, *args, evaluate=False) for i in cls.__rkwargs__: - setattr(expr, '_%s' % i, kwargs.pop(i)) + setattr(expr, f'_{i}', kwargs.pop(i)) else: - raise ValueError("Cannot construct ClusterizedEq from args=%s " - "and kwargs=%s" % (str(args), str(kwargs))) + raise ValueError(f"Cannot construct ClusterizedEq from args={str(args)} " + f"and kwargs={str(kwargs)}") return expr func = IREq._rebuild @@ -330,5 +330,5 @@ def __new__(cls, *args, **kwargs): elif len(args) == 2: obj = LoweredEq(Eq(*args, evaluate=False)) else: - raise ValueError("Cannot construct DummyEq from args=%s" % str(args)) + raise ValueError(f"Cannot construct DummyEq from args={str(args)}") return ClusterizedEq.__new__(cls, obj, ispace=obj.ispace) diff --git a/devito/ir/iet/algorithms.py b/devito/ir/iet/algorithms.py index 1715c9c2d1..e805c34f42 100644 --- a/devito/ir/iet/algorithms.py +++ b/devito/ir/iet/algorithms.py @@ -48,7 +48,7 @@ def iet_build(stree): uindices=i.sub_iterators) elif i.is_Section: - body = Section('section%d' % nsections, body=queues.pop(i)) + body = Section(f'section{nsections}', body=queues.pop(i)) nsections += 1 elif i.is_Halo: @@ -62,7 +62,7 @@ def iet_build(stree): queues.setdefault(i.parent, []).append(body) - assert False + raise AssertionError('This function did not return') def _unpack_switch_case(bundle): diff --git a/devito/ir/iet/efunc.py b/devito/ir/iet/efunc.py index c2207094c8..1a17202140 100644 --- a/devito/ir/iet/efunc.py +++ b/devito/ir/iet/efunc.py @@ -36,12 +36,14 @@ def __init__(self, name, arguments=None, mapper=None, dynamic_args_mapper=None, # Sanity check if k not in self._mapper: - raise ValueError("`k` is not a dynamic parameter" % k) + raise ValueError("`k` is not a dynamic parameter") if len(self._mapper[k]) != len(tv): - raise ValueError("Expected %d values for dynamic parameter `%s`, given %d" - % (len(self._mapper[k]), k, len(tv))) + raise ValueError( + f'Expected {len(self._mapper[k])} values for dynamic parameter ' + f'`{k}`, given {len(tv)}' + ) # Create the argument list - for i, j in zip(self._mapper[k], tv): + for i, j in zip(self._mapper[k], tv, strict=True): arguments[i] = j if incr is False else (arguments[i] + j) super().__init__(name, arguments, retobj, is_indirect) @@ -216,8 +218,10 @@ def __init__(self, name, grid, block, shm=0, stream=None, self.stream = stream def __repr__(self): - return 'Launch[%s]<<<(%s)>>>' % (self.name, - ','.join(str(i.name) for i in self.writes)) + return 'Launch[{}]<<<({})>>>'.format( + self.name, + ','.join(str(i.name) for i in self.writes) + ) @cached_property def functions(self): diff --git a/devito/ir/iet/nodes.py b/devito/ir/iet/nodes.py index 1c8ff5fbff..a8a3a35b00 100644 --- a/devito/ir/iet/nodes.py +++ b/devito/ir/iet/nodes.py @@ -5,6 +5,7 @@ import inspect from collections import OrderedDict, namedtuple from collections.abc import Iterable +from contextlib import suppress from functools import cached_property import cgen as c @@ -103,11 +104,13 @@ def __new__(cls, *args, **kwargs): obj = super().__new__(cls) argnames, _, _, defaultvalues, _, _, _ = inspect.getfullargspec(cls.__init__) try: - defaults = dict(zip(argnames[-len(defaultvalues):], defaultvalues)) + defaults = dict( + zip(argnames[-len(defaultvalues):], defaultvalues, strict=True) + ) except TypeError: # No default kwarg values defaults = {} - obj._args = {k: v for k, v in zip(argnames[1:], args)} + obj._args = {k: v for k, v in zip(argnames[1:], args, strict=False)} obj._args.update(kwargs.items()) obj._args.update({k: defaults.get(k) for k in argnames[1:] if k not in obj._args}) return obj @@ -116,7 +119,7 @@ def _rebuild(self, *args, **kwargs): """Reconstruct ``self``.""" handle = self._args.copy() # Original constructor arguments argnames = [i for i in self._traversable if i not in kwargs] - handle.update(OrderedDict([(k, v) for k, v in zip(argnames, args)])) + handle.update(OrderedDict([(k, v) for k, v in zip(argnames, args, strict=False)])) handle.update(kwargs) return type(self)(**handle) @@ -251,8 +254,8 @@ def __init__(self, header=None, body=None, footer=None, inline=False): self.inline = inline def __repr__(self): - return "<%s (%d, %d, %d)>" % (self.__class__.__name__, len(self.header), - len(self.body), len(self.footer)) + return f'<{self.__class__.__name__} ({len(self.header)}, {len(self.body)}, ' + \ + f'{len(self.footer)})>' class EmptyList(List): @@ -318,8 +321,8 @@ def __init__(self, name, arguments=None, retobj=None, is_indirect=False, self.templates = as_tuple(templates) def __repr__(self): - ret = "" if self.retobj is None else "%s = " % self.retobj - return "%sCall::\n\t%s(...)" % (ret, self.name) + ret = "" if self.retobj is None else f"{self.retobj} = " + return f"{ret}Call::\n\t{self.name}(...)" def _rebuild(self, *args, **kwargs): if args: @@ -327,7 +330,7 @@ def _rebuild(self, *args, **kwargs): # have nested Calls/Lambdas among its `arguments`, and these might # change, and we are in such a case *if and only if* we have `args` assert len(args) == len(self.children) - mapper = dict(zip(self.children, args)) + mapper = dict(zip(self.children, args, strict=True)) kwargs['arguments'] = [mapper.get(i, i) for i in self.arguments] return super()._rebuild(**kwargs) @@ -375,10 +378,8 @@ def expr_symbols(self): elif isinstance(i, Call): retval.extend(i.expr_symbols) else: - try: + with suppress(AttributeError): retval.extend(i.free_symbols) - except AttributeError: - pass if self.base is not None: retval.append(self.base) @@ -428,9 +429,11 @@ def __init__(self, expr, pragmas=None, init=False, operation=None): self.operation = operation def __repr__(self): - return "<%s::%s=%s>" % (self.__class__.__name__, - type(self.write), - ','.join('%s' % type(f) for f in self.functions)) + return "<{}::{}={}>".format( + self.__class__.__name__, + type(self.write), + ','.join(f'{type(f)}' for f in self.functions) + ) @property def dtype(self): @@ -481,8 +484,10 @@ def is_initializable(self): """ True if it can be an initializing assignment, False otherwise. """ - return ((self.is_scalar and not self.is_reduction) or - (self.is_tensor and isinstance(self.expr.rhs, ListInitializer))) + return ( + (self.is_scalar and not self.is_reduction) or + (self.is_tensor and isinstance(self.expr.rhs, ListInitializer)) + ) @property def defines(self): @@ -575,7 +580,7 @@ def __init__(self, nodes, dimension, limits, direction=None, properties=None, # Generate loop limits if isinstance(limits, Iterable): - assert(len(limits) == 3) + assert len(limits) == 3 self.limits = tuple(limits) elif self.dim.is_Incr: self.limits = (self.dim.symbolic_min, limits, self.dim.step) @@ -594,11 +599,11 @@ def __repr__(self): properties = "" if self.properties: properties = [str(i) for i in self.properties] - properties = "WithProperties[%s]::" % ",".join(properties) + properties = "WithProperties[{}]::".format(",".join(properties)) index = self.index if self.uindices: - index += '[%s]' % ','.join(i.name for i in self.uindices) - return "<%sIteration %s; %s>" % (properties, index, self.limits) + index += '[{}]'.format(','.join(i.name for i in self.uindices)) + return f"<{properties}Iteration {index}; {self.limits}>" @property def is_Affine(self): @@ -715,10 +720,8 @@ def __init__(self, condition): def functions(self): ret = [] for i in self.condition.free_symbols: - try: + with suppress(AttributeError): ret.append(i.function) - except AttributeError: - pass return tuple(ret) @property @@ -748,7 +751,7 @@ def __init__(self, condition, body=None): self.body = as_tuple(body) def __repr__(self): - return "" % (self.condition, len(self.body)) + return f'' class Callable(Node): @@ -795,8 +798,11 @@ def __init__(self, name, body, retval, parameters=None, prefix=None, def __repr__(self): param_types = [ctypes_to_cstr(i._C_ctype) for i in self.parameters] - return "%s[%s]<%s; %s>" % (self.__class__.__name__, self.name, self.retval, - ",".join(param_types)) + return "{}[{}]<{}; {}>".format( + self.__class__.__name__, + self.name, self.retval, + ",".join(param_types) + ) @property def all_parameters(self): @@ -893,11 +899,10 @@ def __init__(self, body, init=(), standalones=(), unpacks=(), strides=(), self.retstmt = as_tuple(retstmt) def __repr__(self): - return (" >" % - (len(self.unpacks), len(self.allocs), len(self.casts), - len(self.maps), len(self.objs), len(self.unmaps), - len(self.frees))) + return ' ' + \ + f'>' class Conditional(DoIf): @@ -926,10 +931,10 @@ def __init__(self, condition, then_body, else_body=None): def __repr__(self): if self.else_body: - return "<[%s] ? [%s] : [%s]>" %\ - (ccode(self.condition), repr(self.then_body), repr(self.else_body)) + return f'<[{ccode(self.condition)}] ? [{repr(self.then_body)}] ' + \ + f': [{repr(self.else_body)}]>' else: - return "<[%s] ? [%s]" % (ccode(self.condition), repr(self.then_body)) + return f'<[{ccode(self.condition)}] ? [{repr(self.then_body)}]' class Switch(DoIf): @@ -970,7 +975,7 @@ def ncases(self): @property def as_mapper(self): - retval = dict(zip(self.cases, self.nodes)) + retval = dict(zip(self.cases, self.nodes, strict=True)) if self.default: retval['default'] = self.default return retval @@ -997,9 +1002,9 @@ def __init__(self, timer, lname, body): self._name = lname self._timer = timer - super().__init__(header=c.Line('START(%s)' % lname), + super().__init__(header=c.Line(f'START({lname})'), body=body, - footer=c.Line('STOP(%s,%s)' % (lname, timer.name))) + footer=c.Line(f'STOP({lname},{timer.name})')) @classmethod def _start_timer_header(cls): @@ -1037,7 +1042,7 @@ def __init__(self, function): self.function = function def __repr__(self): - return "" % self.function + return f"" @property def functions(self): @@ -1060,19 +1065,15 @@ def expr_symbols(self): f = self.function if f.is_LocalObject: ret = set(flatten(i.free_symbols for i in f.cargs)) - try: + with suppress(AttributeError): ret.update(f.initvalue.free_symbols) - except AttributeError: - pass return tuple(ret) elif f.is_Array and f.initvalue is not None: # These are just a handful of values so it's OK to iterate them over ret = set() for i in f.initvalue: - try: + with suppress(AttributeError): ret.update(i.free_symbols) - except AttributeError: - pass return tuple(ret) else: return () @@ -1094,7 +1095,7 @@ def __init__(self, function, obj=None, alignment=True, flat=None): self.flat = flat def __repr__(self): - return "" % self.function + return f"" @property def castshape(self): @@ -1148,7 +1149,7 @@ def __init__(self, pointee, pointer, flat=None, offset=None): self.offset = offset def __repr__(self): - return "" % (self.pointee, self.pointer) + return f"" @property def functions(self): @@ -1171,7 +1172,7 @@ def expr_symbols(self): ret.extend(flatten(i.free_symbols for i in self.pointee.symbolic_shape[1:])) else: - assert False, f"Unexpected pointer type {type(self.pointer)}" + raise AssertionError(f'Unexpected pointer type {type(self.pointer)}') if self.offset is not None: ret.append(self.offset) @@ -1223,7 +1224,7 @@ def __init__(self, body, captures=None, parameters=None, special=None, self.attributes = as_tuple(attributes) def __repr__(self): - return "Lambda[%s](%s)" % (self.captures, self.parameters) + return f"Lambda[{self.captures}]({self.parameters})" @property def functions(self): @@ -1259,7 +1260,7 @@ def __init__(self, name, body=None, is_subsection=False): self.is_subsection = is_subsection def __repr__(self): - return "
" % self.name + return f"
" @property def roots(self): @@ -1281,7 +1282,7 @@ def __init__(self, ispace, ops, traffic, body=None): self.traffic = traffic def __repr__(self): - return "" % len(self.exprs) + return f'' @property def exprs(self): @@ -1330,7 +1331,7 @@ def __init__(self, name): self.name = name def __repr__(self): - return "" % self.name + return f"" class UsingNamespace(Node): @@ -1343,7 +1344,7 @@ def __init__(self, namespace): self.namespace = namespace def __repr__(self): - return "" % self.namespace + return f"" class Pragma(Node): @@ -1356,7 +1357,7 @@ def __init__(self, pragma, arguments=None): super().__init__() if not isinstance(pragma, str): - raise TypeError("Pragma name must be a string, not %s" % type(pragma)) + raise TypeError(f"Pragma name must be a string, not {type(pragma)}") self.pragma = pragma self.arguments = as_tuple(arguments) @@ -1512,7 +1513,7 @@ def __init__(self, sync_ops, body=None): self.sync_ops = sync_ops def __repr__(self): - return "" % ",".join(str(i) for i in self.sync_ops) + return "".format(",".join(str(i) for i in self.sync_ops)) @property def is_async_op(self): @@ -1589,8 +1590,8 @@ def __init__(self, body, halo_scheme): self._halo_scheme = halo_scheme def __repr__(self): - functions = "(%s)" % ",".join(i.name for i in self.functions) - return "<%s%s>" % (self.__class__.__name__, functions) + functions = "({})".format(",".join(i.name for i in self.functions)) + return f"<{self.__class__.__name__}{functions}>" @property def halo_scheme(self): diff --git a/devito/ir/iet/utils.py b/devito/ir/iet/utils.py index 1f693a7299..86b23adbbf 100644 --- a/devito/ir/iet/utils.py +++ b/devito/ir/iet/utils.py @@ -32,7 +32,7 @@ def dimensions(self): return [i.dim for i in self] def __repr__(self): - return "IterationTree%s" % super().__repr__() + return f"IterationTree{super().__repr__()}" def __getitem__(self, key): ret = super().__getitem__(key) diff --git a/devito/ir/iet/visitors.py b/devito/ir/iet/visitors.py index 31b9356dce..aafbeec2fc 100644 --- a/devito/ir/iet/visitors.py +++ b/devito/ir/iet/visitors.py @@ -64,7 +64,7 @@ def maybe_rebuild(self, o, *args, **kwargs): """A visit method that rebuilds nodes if their children have changed.""" ops, okwargs = o.operands() new_ops = [self._visit(op, *args, **kwargs) for op in ops] - if all(a is b for a, b in zip(ops, new_ops)): + if all(a is b for a, b in zip(ops, new_ops, strict=True)): return o return o._rebuild(*new_ops, **okwargs) @@ -103,7 +103,7 @@ def lookup_method(self, instance) \ def _visit(self, o, *args, **kwargs) -> LazyVisit[YieldType, FlagType]: meth = self.lookup_method(o) flag = yield from meth(o, *args, **kwargs) - return flag + return flag # noqa: B901 def _post_visit(self, ret: LazyVisit[YieldType, FlagType]) -> ResultType: return list(ret) @@ -113,13 +113,13 @@ def visit_object(self, o: object, **kwargs) -> LazyVisit[YieldType, FlagType]: def visit_Node(self, o: Node, **kwargs) -> LazyVisit[YieldType, FlagType]: flag = yield from self._visit(o.children, **kwargs) - return flag + return flag # noqa: B901 def visit_tuple(self, o: Sequence[Any], **kwargs) -> LazyVisit[YieldType, FlagType]: flag: FlagType = None for i in o: flag = yield from self._visit(i, **kwargs) - return flag + return flag # noqa: B901 visit_list = visit_tuple @@ -296,7 +296,7 @@ def _gen_struct_decl(self, obj, masked=()): fields = (None,)*len(ctype._fields_) entries = [] - for i, (n, ct) in zip(fields, ctype._fields_): + for i, (n, ct) in zip(fields, ctype._fields_, strict=True): try: entries.append(self._gen_value(i, 0, masked=('const',))) except AttributeError: @@ -327,9 +327,10 @@ def _gen_value(self, obj, mode=1, masked=()): else: strtype = self.ccode(obj._C_ctype) strshape = '' - if isinstance(obj, (AbstractFunction, IndexedData)) and mode >= 1: - if not obj._mem_stack: - strtype = f'{strtype}{self._restrict_keyword}' + if isinstance(obj, (AbstractFunction, IndexedData)) \ + and mode >= 1 \ + and not obj._mem_stack: + strtype = f'{strtype}{self._restrict_keyword}' strtype = ' '.join(qualifiers + [strtype]) if obj.is_LocalObject and obj._C_modifier is not None and mode == 2: @@ -400,12 +401,9 @@ def _gen_signature(self, o, is_declaration=False): prefix = ' '.join(o.prefix + (self._gen_rettype(o.retval),)) - if o.attributes: - # NOTE: ugly, but I can't bother extending `c.FunctionDeclaration` - # for such a tiny thing - v = f"{' '.join(o.attributes)} {o.name}" - else: - v = o.name + # NOTE: ugly, but I can't bother extending `c.FunctionDeclaration` + # for such a tiny thing + v = f"{' '.join(o.attributes)} {o.name}" if o.attributes else o.name signature = c.FunctionDeclaration(c.Value(prefix, v), decls) @@ -481,7 +479,7 @@ def visit_PointerCast(self, o): elif isinstance(o.obj, IndexedData): v = f._C_name else: - assert False + raise AssertionError('rvalue is not a recognised type') rvalue = f'({cstr}**) {v}' else: @@ -510,14 +508,11 @@ def visit_PointerCast(self, o): elif isinstance(o.obj, DeviceMap): v = f._C_field_dmap else: - assert False + raise AssertionError('rvalue is not a recognised type') rvalue = f'({cstr} {rshape}) {f._C_name}->{v}' else: - if isinstance(o.obj, Pointer): - v = o.obj.name - else: - v = f._C_name + v = o.obj.name if isinstance(o.obj, Pointer) else f._C_name rvalue = f'({cstr} {rshape}) {v}' @@ -526,10 +521,7 @@ def visit_PointerCast(self, o): def visit_Dereference(self, o): a0, a1 = o.functions - if o.offset: - ptr = f'({a1.name} + {o.offset})' - else: - ptr = a1.name + ptr = f'({a1.name} + {o.offset})' if o.offset else a1.name if a0.is_AbstractFunction: cstr = self.ccode(a0.indexed._C_typedata) @@ -549,10 +541,7 @@ def visit_Dereference(self, o): lvalue = c.Value(cstr, f'*{self._restrict_keyword} {a0.name}') else: - if a1.is_Symbol: - rvalue = f'*{ptr}' - else: - rvalue = f'{ptr}->{a0._C_name}' + rvalue = f'*{ptr}' if a1.is_Symbol else f'{ptr}->{a0._C_name}' lvalue = self._gen_value(a0, 0) return c.Initializer(lvalue, rvalue) @@ -563,10 +552,7 @@ def visit_Block(self, o): def visit_List(self, o): body = flatten(self._visit(i) for i in self._blankline_logic(o.children)) - if o.inline: - body = c.Line(' '.join(str(i) for i in body)) - else: - body = c.Collection(body) + body = c.Line(' '.join(str(i) for i in body)) if o.inline else c.Collection(body) return c.Module(o.header + (body,) + o.footer) def visit_Section(self, o): @@ -744,10 +730,7 @@ def visit_HaloSpot(self, o): return c.Collection(body) def visit_KernelLaunch(self, o): - if o.templates: - templates = f"<{','.join([str(i) for i in o.templates])}>" - else: - templates = '' + templates = f"<{','.join([str(i) for i in o.templates])}>" if o.templates else '' launch_args = [o.grid, o.block] if o.shm is not None: @@ -778,7 +761,7 @@ def _operator_includes(self, o): """ Generate cgen includes from an iterable of symbols and expressions. """ - return [c.Include(i, system=(False if i.endswith('.h') else True)) + return [c.Include(i, system=(not i.endswith('.h'))) for i in o.includes] + [blankline] def _operator_namespaces(self, o): @@ -839,10 +822,7 @@ def visit_Operator(self, o, mode='all'): signature = self._gen_signature(o) # Honor the `retstmt` flag if set - if o.body.retstmt: - retval = [] - else: - retval = [c.Line(), c.Statement("return 0")] + retval = [] if o.body.retstmt else [c.Line(), c.Statement("return 0")] kernel = c.FunctionBody(signature, c.Block(body + retval)) @@ -1206,14 +1186,14 @@ def __init__(self, match: type, start: Node, stop: Node | None = None) -> None: def visit_object(self, o: object, flag: bool = False) -> LazyVisit[Node, bool]: yield from () - return flag + return flag # noqa: B901 def visit_tuple(self, o: Sequence[Any], flag: bool = False) -> LazyVisit[Node, bool]: for el in o: # Yield results from visiting this element, and update the flag flag = yield from self._visit(el, flag=flag) - return flag + return flag # noqa: B901 visit_list = visit_tuple @@ -1236,7 +1216,7 @@ def visit_Node(self, o: Node, flag: bool = False) -> LazyVisit[Node, bool]: # Update the flag if we found a stop flag &= (o is not self.stop) - return flag + return flag # noqa: B901 ApplicationType = TypeVar('ApplicationType') @@ -1580,8 +1560,7 @@ def generate(self): lines = list(i.generate()) if len(lines) > 1: yield tip + ",".join(processed + [lines[0]]) - for line in lines[1:-1]: - yield line + yield from lines[1:-1] tip = "" processed = [lines[-1]] else: diff --git a/devito/ir/stree/algorithms.py b/devito/ir/stree/algorithms.py index 6fb229fdc3..f5e76ca209 100644 --- a/devito/ir/stree/algorithms.py +++ b/devito/ir/stree/algorithms.py @@ -46,7 +46,7 @@ def stree_build(clusters, profiler=None, **kwargs): maybe_reusable = [] index = 0 - for it0, it1 in zip(c.itintervals, maybe_reusable): + for it0, it1 in zip(c.itintervals, maybe_reusable, strict=False): if it0 != it1: break @@ -204,7 +204,9 @@ def preprocess(clusters, options=None, **kwargs): syncs = normalize_syncs(*[c1.syncs for c1 in found]) if syncs: - ispace = c.ispace.prefix(lambda d: d._defines.intersection(syncs)) + ispace = c.ispace.prefix( + lambda d: d._defines.intersection(syncs) # noqa: B023 + ) processed.append(c.rebuild(exprs=[], ispace=ispace, syncs=syncs)) if all(c1.ispace.is_subset(c.ispace) for c1 in found): @@ -228,9 +230,9 @@ def preprocess(clusters, options=None, **kwargs): # Sanity check! try: assert not queue - except AssertionError: + except AssertionError as e: if options['mpi']: - raise RuntimeError("Unsupported MPI for the given equations") + raise RuntimeError("Unsupported MPI for the given equations") from e return processed @@ -290,7 +292,7 @@ def reuse_section(candidate, section): # * Same set of iteration Dimensions key = lambda i: i.interval.promote(lambda d: d.is_Block).dim test00 = len(iters0) == len(iters1) - test01 = all(key(i) is key(j) for i, j in zip(iters0, iters1)) + test01 = all(key(i) is key(j) for i, j in zip(iters0, iters1, strict=False)) # * All subtrees use at least one local SubDimension (i.e., BCs) key = lambda iters: any(i.dim.is_Sub and i.dim.local for i in iters) diff --git a/devito/ir/stree/tree.py b/devito/ir/stree/tree.py index 6e4b48ec3e..e033c9fd15 100644 --- a/devito/ir/stree/tree.py +++ b/devito/ir/stree/tree.py @@ -29,8 +29,7 @@ def __repr__(self): return render(self) def visit(self): - for i in PostOrderIter(self): - yield i + yield from PostOrderIter(self) @property def last(self): @@ -79,7 +78,7 @@ def sub_iterators(self): @property def __repr_render__(self): - return "%s%s" % (self.dim, self.direction) + return f"{self.dim}{self.direction}" class NodeConditional(ScheduleTree): @@ -105,7 +104,7 @@ def __init__(self, sync_ops, parent=None): @property def __repr_render__(self): - return "Sync[%s]" % ",".join(i.__class__.__name__ for i in self.sync_ops) + return "Sync[{}]".format(",".join(i.__class__.__name__ for i in self.sync_ops)) @property def is_async(self): @@ -129,8 +128,8 @@ def __repr_render__(self): threshold = 2 n = len(self.exprs) ret = ",".join("Eq" for i in range(min(n, threshold))) - ret = ("%s,..." % ret) if n > threshold else ret - return "[%s]" % ret + ret = (f"{ret},...") if n > threshold else ret + return f"[{ret}]" class NodeHalo(ScheduleTree): diff --git a/devito/ir/support/basic.py b/devito/ir/support/basic.py index d3f7d00247..4405cb3b63 100644 --- a/devito/ir/support/basic.py +++ b/devito/ir/support/basic.py @@ -1,4 +1,5 @@ from collections.abc import Callable, Iterable +from contextlib import suppress from functools import cached_property from itertools import chain, product @@ -90,7 +91,7 @@ def __new__(cls, access): except AttributeError: # E.g., `access` is a FieldFromComposite rather than an Indexed indices = (S.Infinity,)*len(findices) - return super().__new__(cls, list(zip(findices, indices))) + return super().__new__(cls, list(zip(findices, indices, strict=False))) def __hash__(self): return super().__hash__() @@ -98,7 +99,7 @@ def __hash__(self): @cached_property def index_mode(self): retval = [] - for i, fi in zip(self, self.findices): + for i, fi in zip(self, self.findices, strict=True): dims = {j for j in i.free_symbols if isinstance(j, Dimension)} if len(dims) == 0 and q_constant(i): retval.append(AFFINE) @@ -108,11 +109,8 @@ def index_mode(self): # q_affine -- ultimately it should get quicker! sdims = {d for d in dims if d.is_Stencil} - if dims == sdims: - candidates = sdims - else: - # E.g. `x + i0 + i1` -> `candidates = {x}` - candidates = dims - sdims + # E.g. `x + i0 + i1` -> `candidates = {x}` + candidates = sdims if dims == sdims else dims - sdims if len(candidates) == 1: candidate = candidates.pop() @@ -130,7 +128,7 @@ def index_mode(self): @cached_property def aindices(self): retval = [] - for i, fi in zip(self, self.findices): + for i, _ in zip(self, self.findices, strict=True): dims = set(d.root if d.indirect else d for d in i.atoms(Dimension)) sdims = {d for d in dims if d.is_Stencil} candidates = dims - sdims @@ -149,12 +147,12 @@ def findices(self): @cached_property def index_map(self): - return dict(zip(self.aindices, self.findices)) + return dict(zip(self.aindices, self.findices, strict=True)) @cached_property def defined_findices_affine(self): ret = set() - for fi, im in zip(self.findices, self.index_mode): + for fi, im in zip(self.findices, self.index_mode, strict=True): if im is AFFINE: ret.update(fi._defines) return ret @@ -162,7 +160,7 @@ def defined_findices_affine(self): @cached_property def defined_findices_irregular(self): ret = set() - for fi, im in zip(self.findices, self.index_mode): + for fi, im in zip(self.findices, self.index_mode, strict=True): if im is IRREGULAR: ret.update(fi._defines) return ret @@ -338,7 +336,7 @@ def distance(self, other): return Vector(S.ImaginaryUnit) ret = [] - for sit, oit in zip(self.itintervals, other.itintervals): + for sit, oit in zip(self.itintervals, other.itintervals, strict=False): n = len(ret) try: @@ -385,9 +383,8 @@ def distance(self, other): # Case 3: `self` and `other` have some special form such that # it's provable that they never intersect - if sai and sit == oit: - if disjoint_test(self[n], other[n], sai, sit): - return Vector(S.ImaginaryUnit) + if sai and sit == oit and disjoint_test(self[n], other[n], sai, sit): + return Vector(S.ImaginaryUnit) # Compute the distance along the current IterationInterval if self.function._mem_shared: @@ -441,7 +438,7 @@ def distance(self, other): # It still could be an imaginary dependence, e.g. `a[3] -> a[4]` or, more # nasty, `a[i+1, 3] -> a[i, 4]` - for i, j in zip(self[n:], other[n:]): + for i, j in zip(self[n:], other[n:], strict=True): if i == j: ret.append(S.Zero) else: @@ -572,7 +569,7 @@ def _defined_findices(self): @cached_property def distance_mapper(self): retval = {} - for i, j in zip(self.findices, self.distance): + for i, j in zip(self.findices, self.distance, strict=False): for d in i._defines: retval[d] = j return retval @@ -646,7 +643,7 @@ def __repr__(self): @cached_property def cause(self): """Return the findex causing the dependence.""" - for i, j in zip(self.findices, self.distance): + for i, j in zip(self.findices, self.distance, strict=False): try: if j > 0: return i._defines @@ -778,7 +775,7 @@ def is_storage_related(self, dims=None): cause the access of the same memory location, False otherwise. """ for d in self.findices: - if d._defines & set(as_tuple(dims)): + if d._defines & set(as_tuple(dims)): # noqa: SIM102 if any(i.is_NonlinearDerived for i in d._defines) or \ self.is_const(d): return True @@ -863,16 +860,12 @@ def writes_gen(self): for i, e in enumerate(self.exprs): terminals = retrieve_accesses(e.lhs) if q_routine(e.rhs): - try: + with suppress(AttributeError): + # Everything except: foreign routines, such as `cos` or `sin` etc. terminals.update(e.rhs.writes) - except AttributeError: - # E.g., foreign routines, such as `cos` or `sin` - pass + for j in terminals: - if e.is_Reduction: - mode = 'WR' - else: - mode = 'W' + mode = 'WR' if e.is_Reduction else 'W' yield TimedAccess(j, mode, i, e.ispace) # Objects altering the control flow (e.g., synchronization barriers, @@ -910,15 +903,10 @@ def reads_explicit_gen(self): for i, e in enumerate(self.exprs): # Reads terminals = retrieve_accesses(e.rhs, deep=True) - try: + with suppress(AttributeError): terminals.update(retrieve_accesses(e.lhs.indices)) - except AttributeError: - pass for j in terminals: - if j.function is e.lhs.function and e.is_Reduction: - mode = 'RR' - else: - mode = 'R' + mode = 'RR' if j.function is e.lhs.function and e.is_Reduction else 'R' yield TimedAccess(j, mode, i, e.ispace) # If a reduction, we got one implicit read @@ -1066,7 +1054,7 @@ def __repr__(self): shifted = f"{chr(10) if shifted else ''}{shifted}" writes[i] = f'\033[1;37;31m{first + shifted}\033[0m' return "\n".join([out.format(i.name, w, '', r) - for i, r, w in zip(tracked, reads, writes)]) + for i, r, w in zip(tracked, reads, writes, strict=True)]) @cached_property def accesses(self): @@ -1254,7 +1242,7 @@ def __init__(self, expr, indexeds=None, bases=None, offsets=None): for ii in self.iinstances: base = [] offset = [] - for e, fi, ai in zip(ii, ii.findices, ii.aindices): + for e, fi, ai in zip(ii, ii.findices, ii.aindices, strict=True): if ai is None: base.append((fi, e)) else: @@ -1324,9 +1312,8 @@ def translated(self, other, dims=None): return {} v = distance.pop() - if not d._defines & dims: - if v != 0: - return {} + if not d._defines & dims and v != 0: + return {} distances[d] = v @@ -1351,7 +1338,7 @@ def dimensions(self): @cached_property def aindices(self): try: - return tuple(zip(*self.Toffsets))[0] + return tuple(zip(*self.Toffsets, strict=True))[0] except IndexError: return () @@ -1377,7 +1364,7 @@ def retrieve_accesses(exprs, **kwargs): if not compaccs: return retrieve_terminals(exprs, **kwargs) - subs = {i: Symbol('dummy%d' % n) for n, i in enumerate(compaccs)} + subs = {i: Symbol(f'dummy{n}') for n, i in enumerate(compaccs)} exprs1 = uxreplace(exprs, subs) return compaccs | retrieve_terminals(exprs1, **kwargs) - set(subs.values()) diff --git a/devito/ir/support/guards.py b/devito/ir/support/guards.py index 574fe490ec..deba5be148 100644 --- a/devito/ir/support/guards.py +++ b/devito/ir/support/guards.py @@ -454,7 +454,9 @@ def simplify_and(relation, v): covered = True try: - if type(a) in (Gt, Ge) and v.rhs > a.rhs or type(a) in (Lt, Le) and v.rhs < a.rhs: + if type(a) in (Gt, Ge) \ + and v.rhs > a.rhs or type(a) in (Lt, Le) \ + and v.rhs < a.rhs: new_args.append(v) else: new_args.append(a) diff --git a/devito/ir/support/space.py b/devito/ir/support/space.py index dda334d3d1..0fcaf8f423 100644 --- a/devito/ir/support/space.py +++ b/devito/ir/support/space.py @@ -97,7 +97,7 @@ class NullInterval(AbstractInterval): is_Null = True def __repr__(self): - return "%s[Null]%s" % (self.dim, self.stamp) + return f"{self.dim}[Null]{self.stamp}" def __hash__(self): return hash(self.dim) @@ -149,7 +149,7 @@ def __init__(self, dim, lower=0, upper=0, stamp=S0): self.upper = upper def __repr__(self): - return "%s[%s,%s]%s" % (self.dim, self.lower, self.upper, self.stamp) + return f"{self.dim}[{self.lower},{self.upper}]{self.stamp}" def __hash__(self): return hash((self.dim, self.offsets)) @@ -247,8 +247,9 @@ def union(self, o): ovl, ovu = Vector(o.lower, smart=True), Vector(o.upper, smart=True) return Interval(self.dim, vmin(svl, ovl)[0], vmax(svu, ovu)[0], self.stamp) else: - raise ValueError("Cannot compute union of non-compatible Intervals (%s, %s)" % - (self, o)) + raise ValueError( + f"Cannot compute union of non-compatible Intervals ({self}, {o})" + ) def add(self, o): if not self.is_compatible(o): @@ -312,8 +313,10 @@ class IntervalGroup(Ordering): @classmethod def reorder(cls, items, relations): if not all(isinstance(i, AbstractInterval) for i in items): - raise ValueError("Cannot create IntervalGroup from objs of type [%s]" % - ', '.join(str(type(i)) for i in items)) + raise ValueError( + 'Cannot create IntervalGroup from objs of type ' + f'[{", ".join(str(type(i)) for i in items)}]' + ) if len(relations) == 1: # Special case: avoid expensive topological sorting if possible @@ -332,7 +335,7 @@ def simplify_relations(cls, relations, items, mode): return super().simplify_relations(relations, items, mode) def __eq__(self, o): - return len(self) == len(o) and all(i == j for i, j in zip(self, o)) + return len(self) == len(o) and all(i == j for i, j in zip(self, o, strict=True)) def __contains__(self, d): return any(i.dim is d for i in self) @@ -341,7 +344,7 @@ def __hash__(self): return hash(tuple(self)) def __repr__(self): - return "IntervalGroup[%s]" % (', '.join([repr(i) for i in self])) + return "IntervalGroup[{}]".format(', '.join([repr(i) for i in self])) @cached_property def dimensions(self): @@ -426,7 +429,7 @@ def is_compatible(self, o): """ if set(self) != set(o): return False - if all(i == j for i, j in zip(self, o)): + if all(i == j for i, j in zip(self, o, strict=True)): # Same input ordering, definitely compatible return True try: @@ -550,7 +553,7 @@ def index(self, key): return super().index(key) elif isinstance(key, Dimension): return super().index(self[key]) - raise ValueError("Expected Interval or Dimension, got `%s`" % type(key)) + raise ValueError(f"Expected Interval or Dimension, got `{type(key)}`") def __getitem__(self, key): if is_integer(key): @@ -621,7 +624,7 @@ def __init__(self, interval, sub_iterators=(), direction=Forward): self.direction = direction def __repr__(self): - return "%s%s" % (super().__repr__(), self.direction) + return f"{super().__repr__()}{self.direction}" def __eq__(self, other): if not isinstance(other, IterationInterval): @@ -654,8 +657,10 @@ def __init__(self, intervals): self._intervals = IntervalGroup(as_tuple(intervals)) def __repr__(self): - return "%s[%s]" % (self.__class__.__name__, - ", ".join(repr(i) for i in self.intervals)) + return "{}[{}]".format( + self.__class__.__name__, + ", ".join(repr(i) for i in self.intervals) + ) def __eq__(self, other): return isinstance(other, Space) and self.intervals == other.intervals @@ -667,8 +672,7 @@ def __len__(self): return len(self.intervals) def __iter__(self): - for i in self.intervals: - yield i + yield from self.intervals @property def intervals(self): @@ -719,7 +723,7 @@ def __init__(self, intervals, parts=None): parts = {k: v.expand() for k, v in (parts or {}).items()} for k, v in list(parts.items()): dims = set().union(*[d._defines for d in k.dimensions]) - parts[k] = v.drop(lambda d: d not in dims) + parts[k] = v.drop(lambda d: d not in dims) # noqa: B023 self._parts = frozendict(parts) def __eq__(self, other): @@ -798,9 +802,9 @@ def __init__(self, intervals, sub_iterators=None, directions=None): self._directions = frozendict(directions) def __repr__(self): - ret = ', '.join(["%s%s" % (repr(i), repr(self.directions[i.dim])) + ret = ', '.join([f"{repr(i)}{repr(self.directions[i.dim])}" for i in self.intervals]) - return "IterationSpace[%s]" % ret + return f"IterationSpace[{ret}]" def __eq__(self, other): if self is other: @@ -853,8 +857,8 @@ def generate(self, op, *others, relations=None): directions[k] = v elif v is not Any: # Clash detected - raise ValueError("Cannot compute %s of `IterationSpace`s " - "with incompatible directions" % op) + raise ValueError(f"Cannot compute {op} of `IterationSpace`s " + "with incompatible directions") sub_iterators = {} for i in others: @@ -922,10 +926,7 @@ def project(self, cond, strict=True): * either `cond(d)` is true (`cond` is a callable), * or `d in cond` is true (`cond` is an iterable) """ - if callable(cond): - func = cond - else: - func = lambda i: i in cond + func = cond if callable(cond) else lambda i: i in cond dims = [i.dim for i in self if not func(i.dim)] intervals = self.intervals.drop(dims, strict=strict) diff --git a/devito/ir/support/symregistry.py b/devito/ir/support/symregistry.py index aca49757a6..d7fca20f7c 100644 --- a/devito/ir/support/symregistry.py +++ b/devito/ir/support/symregistry.py @@ -45,7 +45,7 @@ def make_name(self, prefix=None, increment_first=True): if not increment_first: return prefix - return "%s%d" % (prefix, counter()) + return f'{prefix}{counter()}' def make_npthreads(self, size): name = self.make_name(prefix='npthreads') diff --git a/devito/ir/support/syncs.py b/devito/ir/support/syncs.py index 753b8f28fb..67623e5fe0 100644 --- a/devito/ir/support/syncs.py +++ b/devito/ir/support/syncs.py @@ -57,7 +57,7 @@ def __hash__(self): self.function, self.findex, self.dim, self.size, self.origin)) def __repr__(self): - return "%s<%s>" % (self.__class__.__name__, self.handle.name) + return f"{self.__class__.__name__}<{self.handle.name}>" __str__ = __repr__ @@ -75,7 +75,7 @@ def lock(self): class SyncCopyOut(SyncOp): def __repr__(self): - return "%s<%s->%s>" % (self.__class__.__name__, self.target, self.function) + return f"{self.__class__.__name__}<{self.target}->{self.function}>" __str__ = __repr__ @@ -98,7 +98,7 @@ def imask(self): class SyncCopyIn(SyncOp): def __repr__(self): - return "%s<%s->%s>" % (self.__class__.__name__, self.function, self.target) + return f"{self.__class__.__name__}<{self.function}->{self.target}>" __str__ = __repr__ diff --git a/devito/ir/support/utils.py b/devito/ir/support/utils.py index 41805e0ba8..53f6a55bb0 100644 --- a/devito/ir/support/utils.py +++ b/devito/ir/support/utils.py @@ -1,4 +1,5 @@ from collections import defaultdict, namedtuple +from contextlib import suppress from itertools import product from devito.finite_differences import IndexDerivative @@ -139,7 +140,7 @@ def detect_accesses(exprs): for e in retrieve_indexed(exprs, deep=True): f = e.function - for a, d0 in zip(e.indices, f.dimensions): + for a, d0 in zip(e.indices, f.dimensions, strict=True): if isinstance(a, Indirection): a = a.mapped @@ -160,7 +161,7 @@ def detect_accesses(exprs): # accesses (e.g., a[b[x, y] + 1, y]) or 2) as a result of # skewing-based optimizations, such as time skewing (e.g., # `x - time + 1`) or CIRE rotation (e.g., `x + xx - 4`) - d, others = split(dims, lambda i: d0 in i._defines) + d, others = split(dims, lambda i: d0 in i._defines) # noqa: B023 if any(i.is_Indexed for i in a.args) or len(d) != 1: # Case 1) -- with indirect accesses there's not much we can infer @@ -197,11 +198,9 @@ def detect_accesses(exprs): other_dims = set() for e in as_tuple(exprs): other_dims.update(i for i in e.free_symbols if isinstance(i, Dimension)) - try: + with suppress(AttributeError): + # Unless not a types.Eq other_dims.update(e.implicit_dims or {}) - except AttributeError: - # Not a types.Eq - pass other_dims = filter_sorted(other_dims) mapper[None] = Stencil([(i, 0) for i in other_dims]) @@ -243,10 +242,8 @@ def detect_io(exprs, relax=False): terminals = flatten(retrieve_terminals(i, deep=True) for i in roots) for i in terminals: candidates = set(i.free_symbols) - try: + with suppress(AttributeError): candidates.update({i.function}) - except AttributeError: - pass for j in candidates: try: if rule(j): @@ -368,10 +365,8 @@ def minmax_index(expr, d): """ indices = set() for i in retrieve_indexed(expr): - try: + with suppress(KeyError): indices.add(i.indices[d]) - except KeyError: - pass return Extrema(min(minimum(i) for i in indices), max(maximum(i) for i in indices)) @@ -388,6 +383,6 @@ def erange(expr): sdims = [d for d in udims if d.is_Stencil] ranges = [i.range for i in sdims] - mappers = [dict(zip(sdims, i)) for i in product(*ranges)] + mappers = [dict(zip(sdims, i, strict=True)) for i in product(*ranges)] return tuple(expr.subs(m) for m in mappers) diff --git a/devito/ir/support/vector.py b/devito/ir/support/vector.py index 4aa74ba060..02e26e2a02 100644 --- a/devito/ir/support/vector.py +++ b/devito/ir/support/vector.py @@ -74,7 +74,9 @@ def __hash__(self): @_asvector() def __add__(self, other): - return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart) + return Vector( + *[i + j for i, j in zip(self, other, strict=True)], smart=self.smart + ) @_asvector() def __radd__(self, other): @@ -82,7 +84,9 @@ def __radd__(self, other): @_asvector() def __sub__(self, other): - return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart) + return Vector( + *[i - j for i, j in zip(self, other, strict=True)], smart=self.smart + ) @_asvector() def __rsub__(self, other): @@ -109,7 +113,7 @@ def __lt__(self, other): return True elif val > 0: return False - except TypeError: + except TypeError as e: if self.smart: if (i < 0) == true: return True @@ -124,7 +128,7 @@ def __lt__(self, other): return True elif q_positive(i): return False - raise TypeError("Non-comparable index functions") + raise TypeError("Non-comparable index functions") from e return False @@ -145,7 +149,7 @@ def __gt__(self, other): return True elif val < 0: return False - except TypeError: + except TypeError as e: if self.smart: if (i > 0) == true: return True @@ -160,7 +164,7 @@ def __gt__(self, other): return True elif q_negative(i): return False - raise TypeError("Non-comparable index functions") + raise TypeError("Non-comparable index functions") from e return False @@ -184,7 +188,7 @@ def __le__(self, other): return True elif val > 0: return False - except TypeError: + except TypeError as e: if self.smart: if (i < 0) == true: return True @@ -199,7 +203,7 @@ def __le__(self, other): return True elif q_positive(i): return False - raise TypeError("Non-comparable index functions") + raise TypeError("Non-comparable index functions") from e # Note: unlike `__lt__`, if we end up here, then *it is* <=. For example, # with `v0` and `v1` as above, we would get here @@ -214,7 +218,7 @@ def __getitem__(self, key): return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret def __repr__(self): - return "(%s)" % ','.join(str(i) for i in self) + return "({})".format(','.join(str(i) for i in self)) @property def rank(self): @@ -253,7 +257,9 @@ def distance(self, other): """ try: # Handle quickly the special (yet relevant) cases `other == 0` - if is_integer(other) and other == 0 or all(i == 0 for i in other) and self.rank == other.rank: + if is_integer(other) \ + and other == 0 or all(i == 0 for i in other) \ + and self.rank == other.rank: return self except TypeError: pass @@ -269,12 +275,14 @@ class LabeledVector(Vector): def __new__(cls, items=None): try: - labels, values = zip(*items) + labels, values = zip(*items, strict=True) except (ValueError, TypeError): labels, values = (), () if not all(isinstance(i, Dimension) for i in labels): - raise ValueError("All labels must be of type Dimension, got [%s]" - % ','.join(i.__class__.__name__ for i in labels)) + raise ValueError( + 'All labels must be of type Dimension, got ' + f'[{", ".join(i.__class__.__name__ for i in labels)}]' + ) obj = super().__new__(cls, *values) obj.labels = labels return obj @@ -287,16 +295,20 @@ def transpose(cls, *vectors): if len(vectors) == 0: return LabeledVector() if not all(isinstance(v, LabeledVector) for v in vectors): - raise ValueError("All items must be of type LabeledVector, got [%s]" - % ','.join(i.__class__.__name__ for i in vectors)) + raise ValueError( + 'All items must be of type LabeledVector, got ' + f'[{", ".join(i.__class__.__name__ for i in vectors)}]' + ) T = OrderedDict() for v in vectors: - for l, i in zip(v.labels, v): + for l, i in zip(v.labels, v, strict=True): T.setdefault(l, []).append(i) return tuple((l, Vector(*i)) for l, i in T.items()) def __repr__(self): - return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self)) + return "({})".format( + ','.join(f'{l}:{i}' for l, i in zip(self.labels, self, strict=True)) + ) def __hash__(self): return hash((tuple(self), self.labels)) @@ -333,14 +345,15 @@ def __getitem__(self, index): return super().__getitem__(i) return None else: - raise TypeError("Indices must be integers, slices, or Dimensions, not %s" - % type(index)) + raise TypeError( + f"Indices must be integers, slices, or Dimensions, not {type(index)}" + ) def fromlabel(self, label, v=None): return self[label] if label in self.labels else v def items(self): - return zip(self.labels, self) + return zip(self.labels, self, strict=True) @memoized_meth def distance(self, other): @@ -356,7 +369,7 @@ def distance(self, other): raise TypeError("Cannot compute distance from obj of type %s", type(other)) if self.labels != other.labels: raise TypeError("Cannot compute distance due to mismatching `labels`") - return LabeledVector(list(zip(self.labels, self - other))) + return LabeledVector(list(zip(self.labels, self - other, strict=True))) # Utility functions diff --git a/devito/mpatches/rationaltools.py b/devito/mpatches/rationaltools.py index 93b0d14ed6..7469cc34f4 100644 --- a/devito/mpatches/rationaltools.py +++ b/devito/mpatches/rationaltools.py @@ -74,10 +74,7 @@ def _together(expr): elif expr.is_Pow: base = _together(expr.base) - if deep: - exp = _together(expr.exp) - else: - exp = expr.exp + exp = _together(expr.exp) if deep else expr.exp return expr.func(base, exp) else: diff --git a/devito/mpi/distributed.py b/devito/mpi/distributed.py index 480609cd2f..01edaaaa69 100644 --- a/devito/mpi/distributed.py +++ b/devito/mpi/distributed.py @@ -80,8 +80,8 @@ def devito_mpi_init(): if not MPI.Is_initialized(): try: thread_level = mpi4py_thread_levels[mpi4py.rc.thread_level] - except KeyError: - assert False + except KeyError as e: + raise AssertionError('mpi4py thread levels not accessible') from e MPI.Init_thread(thread_level) @@ -117,7 +117,7 @@ def __init__(self, shape, dimensions): self._dimensions = as_tuple(dimensions) def __repr__(self): - return "%s(nprocs=%d)" % (self.__class__.__name__, self.nprocs) + return f'{self.__class__.__name__}(nprocs={self.nprocs})' @abstractmethod def comm(self): @@ -147,7 +147,7 @@ def is_parallel(self): def glb_numb(self): """The global indices owned by the calling MPI rank.""" assert len(self.mycoords) == len(self.decomposition) - glb_numb = [i[j] for i, j in zip(self.decomposition, self.mycoords)] + glb_numb = [i[j] for i, j in zip(self.decomposition, self.mycoords, strict=True)] return EnrichedTuple(*glb_numb, getters=self.dimensions) @cached_property @@ -157,7 +157,7 @@ def glb_slices(self): Dimensions to slices. """ return {d: slice(min(i), max(i) + 1) if len(i) > 0 else slice(0, -1) - for d, i in zip(self.dimensions, self.glb_numb)} + for d, i in zip(self.dimensions, self.glb_numb, strict=True)} @property def glb_shape(self): @@ -202,7 +202,7 @@ def glb_to_loc(self, dim, *args, strict=True): """ if dim not in self.dimensions: if strict: - raise ValueError("`%s` must be one of the Distributor dimensions" % dim) + raise ValueError(f"`{dim}` must be one of the Distributor dimensions") else: return args[0] return self.decomposition[dim].index_glb_to_loc(*args) @@ -284,7 +284,7 @@ def all_numb(self): """The global numbering of all MPI ranks.""" ret = [] for c in self.all_coords: - glb_numb = [i[j] for i, j in zip(self.decomposition, c)] + glb_numb = [i[j] for i, j in zip(self.decomposition, c, strict=True)] ret.append(EnrichedTuple(*glb_numb, getters=self.dimensions)) return tuple(ret) @@ -370,16 +370,20 @@ def __init__(self, shape, dimensions, input_comm=None, topology=None): self._topology = tuple(1 for _ in range(len(shape))) # The domain decomposition - self._decomposition = [Decomposition(np.array_split(range(i), j), c) - for i, j, c in zip(shape, self.topology, self.mycoords)] + self._decomposition = [ + Decomposition(np.array_split(range(i), j), c) + for i, j, c in zip(shape, self.topology, self.mycoords, strict=True) + ] @cached_property def is_boundary_rank(self): """ MPI rank interfaces with the boundary of the domain. """ - return any([i == 0 or i == j-1 for i, j in - zip(self.mycoords, self.topology)]) + return any([ + i == 0 or i == j-1 + for i, j in zip(self.mycoords, self.topology, strict=True) + ]) @cached_property def glb_pos_map(self): @@ -388,7 +392,7 @@ def glb_pos_map(self): MPI rank in the decomposed domain. """ ret = {} - for d, i, s in zip(self.dimensions, self.mycoords, self.topology): + for d, i, s in zip(self.dimensions, self.mycoords, self.topology, strict=True): v = [] if i == 0: v.append(LEFT) @@ -461,9 +465,9 @@ def neighborhood(self): # Set up diagonal neighbours for i in product([LEFT, CENTER, RIGHT], repeat=self.ndim): - neighbor = [c + s.val for c, s in zip(self.mycoords, i)] + neighbor = [c + s.val for c, s in zip(self.mycoords, i, strict=True)] - if any(c < 0 or c >= s for c, s in zip(neighbor, self.topology)): + if any(c < 0 or c >= s for c, s in zip(neighbor, self.topology, strict=True)): ret[i] = MPI.PROC_NULL else: ret[i] = self.comm.Get_cart_rank(neighbor) @@ -499,9 +503,11 @@ def __init__(self, subdomain): super().__init__(subdomain.shape, subdomain.dimensions) self._subdomain_name = subdomain.name - self._dimension_map = frozendict({pd: sd for pd, sd - in zip(subdomain.grid.dimensions, - subdomain.dimensions)}) + self._dimension_map = frozendict({ + pd: sd for pd, sd in zip( + subdomain.grid.dimensions, subdomain.dimensions, strict=True + ) + }) self._parent = subdomain.grid.distributor self._comm = self.parent.comm @@ -514,16 +520,21 @@ def __decomposition_setup__(self): Set up the decomposition, aligned with that of the parent Distributor. """ decompositions = [] - for dec, i in zip(self.parent._decomposition, self.subdomain_interval): + for dec, i in zip( + self.parent._decomposition, self.subdomain_interval, strict=True + ): if i is None: decompositions.append(dec) else: start, end = _interval_bounds(i) - decompositions.append([d[np.logical_and(d >= start, d <= end)] - for d in dec]) + decompositions.append( + [d[np.logical_and(d >= start, d <= end)] for d in dec] + ) - self._decomposition = [Decomposition(d, c) - for d, c in zip(decompositions, self.mycoords)] + self._decomposition = [ + Decomposition(d, c) + for d, c in zip(decompositions, self.mycoords, strict=True) + ] @property def parent(self): @@ -558,8 +569,10 @@ def subdomain_interval(self): """The interval spanned by the SubDomain.""" # Assumes no override of x_m and x_M supplied to operator bounds_map = {d.symbolic_min: 0 for d in self.p.dimensions} - bounds_map.update({d.symbolic_max: s-1 for d, s in zip(self.p.dimensions, - self.p.glb_shape)}) + bounds_map.update({ + d.symbolic_max: s - 1 + for d, s in zip(self.p.dimensions, self.p.glb_shape, strict=True) + }) sd_interval = [] # The Interval of SubDimension indices for d in self.dimensions: @@ -576,8 +589,10 @@ def subdomain_interval(self): @cached_property def intervals(self): """The interval spanned by the SubDomain in each dimension on this rank.""" - return tuple(d if s is None else d.intersect(s) - for d, s in zip(self.domain_interval, self.subdomain_interval)) + return tuple( + d if s is None else d.intersect(s) + for d, s in zip(self.domain_interval, self.subdomain_interval, strict=True) + ) @cached_property def crosses(self): @@ -608,18 +623,27 @@ def get_crosses(d, di, si): if di.issuperset(si) or di.isdisjoint(si): return {LEFT: False, RIGHT: False} elif d.local: - raise ValueError("SubDimension %s is local and cannot be" - " decomposed across MPI ranks" % d) + raise ValueError(f"SubDimension {d} is local and cannot be" + " decomposed across MPI ranks") return {LEFT: si.left < di.left, RIGHT: si.right > di.right} - crosses = {d: get_crosses(d, di, si) for d, di, si - in zip(self.dimensions, self.domain_interval, - self.subdomain_interval)} + crosses = { + d: get_crosses(d, di, si) + for d, di, si in zip( + self.dimensions, + self.domain_interval, + self.subdomain_interval, + strict=True + ) + } for i in product([LEFT, CENTER, RIGHT], repeat=len(self.dimensions)): - crosses[i] = all(crosses[d][s] for d, s in zip(self.dimensions, i) - if s in crosses[d]) # Skip over CENTER + crosses[i] = all( + crosses[d][s] + for d, s in zip(self.dimensions, i, strict=True) + if s in crosses[d] + ) # Skip over CENTER return frozendict(crosses) @@ -664,10 +688,11 @@ def neighborhood(self): # Set up diagonal neighbours for i in product([LEFT, CENTER, RIGHT], repeat=self.ndim): - neighbor = [c + s.val for c, s in zip(self.mycoords, i)] + neighbor = [c + s.val for c, s in zip(self.mycoords, i, strict=True)] - if any(c < 0 or c >= s for c, s in zip(neighbor, self.topology)) \ - or not self.crosses[i]: + if any( + c < 0 or c >= s for c, s in zip(neighbor, self.topology, strict=True) + ) or not self.crosses[i]: ret[i] = MPI.PROC_NULL else: ret[i] = self.comm.Get_cart_rank(neighbor) @@ -678,7 +703,7 @@ def neighborhood(self): def rank_populated(self): """Constant symbol for a switch indicating that data is allocated on this rank""" return Constant(name=f'rank_populated_{self._subdomain_name}', dtype=np.int8, - value=int(not(self.loc_empty))) + value=int(not self.loc_empty)) def _interval_bounds(interval): @@ -742,8 +767,10 @@ def decompose(cls, npoint, distributor): # The i-th entry in `npoint` tells how many sparse points the # i-th MPI rank has if len(npoint) != nprocs: - raise ValueError('The `npoint` tuple must have as many entries as ' - 'MPI ranks (got `%d`, need `%d`)' % (npoint, nprocs)) + raise ValueError( + 'The `npoint` tuple must have as many entries as ' + f'MPI ranks (got `{npoint}`, need `{nprocs}`)' + ) elif any(i < 0 for i in npoint): raise ValueError('All entries in `npoint` must be >= 0') glb_npoint = npoint @@ -850,13 +877,13 @@ def _C_typedecl(self): # # With this override, we generate the one on the right groups = [list(g) for k, g in groupby(self.pfields, key=lambda x: x[0][0])] - groups = [(j[0], i) for i, j in [zip(*g) for g in groups]] + groups = [(j[0], i) for i, j in [zip(*g, strict=True) for g in groups]] return Struct(self.pname, [Value(ctypes_to_cstr(i), ', '.join(j)) for i, j in groups]) def _arg_defaults(self): values = super()._arg_defaults() - for name, i in zip(self.fields, self.entries): + for name, i in zip(self.fields, self.entries, strict=True): setattr(values[self.name]._obj, name, self.neighborhood[i]) return values @@ -959,7 +986,7 @@ def __new__(cls, items, input_comm): star_vals = [int(np.prod(s)) for s in split] # Apply computed star values to the processed - for index, value in zip(star_pos, star_vals): + for index, value in zip(star_pos, star_vals, strict=True): processed[index] = value # Final check that topology matches the communicator size @@ -979,7 +1006,7 @@ def compute_dims(nprocs, ndim): if not v.is_integer(): # Since pow(64, 1/3) == 3.999..4 v = int(ceil(v)) - if not v**ndim == nprocs: + if v**ndim != nprocs: # Fallback return tuple(MPI.Compute_dims(nprocs, ndim)) else: diff --git a/devito/mpi/halo_scheme.py b/devito/mpi/halo_scheme.py index 4bc3c44b51..4d12299025 100644 --- a/devito/mpi/halo_scheme.py +++ b/devito/mpi/halo_scheme.py @@ -1,4 +1,5 @@ from collections import OrderedDict, defaultdict, namedtuple +from contextlib import suppress from functools import cached_property from itertools import product from operator import attrgetter @@ -38,7 +39,7 @@ def __new__(cls, loc_indices, loc_dirs, halos, dims, bundle=None, getters=None): getters = cls.__rargs__ + cls.__rkwargs__ items = [frozendict(loc_indices), frozendict(loc_dirs), frozenset(halos), frozenset(dims), bundle] - kwargs = dict(zip(getters, items)) + kwargs = dict(zip(getters, items, strict=True)) return super().__new__(cls, *items, getters=getters, **kwargs) def __hash__(self): @@ -151,7 +152,7 @@ def __init__(self, exprs, ispace): def __repr__(self): fnames = ",".join(i.name for i in set(self._mapper)) - return "HaloScheme<%s>" % fnames + return f"HaloScheme<{fnames}>" def __eq__(self, other): return (isinstance(other, HaloScheme) and @@ -401,7 +402,7 @@ def owned_size(self): mapper = {} for f, v in self.halos.items(): dimensions = filter_ordered(flatten(i.dim for i in v)) - for d, s in zip(f.dimensions, f._size_owned): + for d, s in zip(f.dimensions, f._size_owned, strict=True): if d in dimensions: maxl, maxr = mapper.get(d, (0, 0)) mapper[d] = (max(maxl, s.left), max(maxr, s.right)) @@ -532,8 +533,11 @@ def classify(exprs, ispace): # practically subjected to domain decomposition dist = f.grid.distributor try: - ignored = [d for i, d in zip(dist.topology_logical, dist.dimensions) - if i == 1] + ignored = [ + d + for i, d in zip(dist.topology_logical, dist.dimensions, strict=True) + if i == 1 + ] except TypeError: ignored = [] @@ -569,7 +573,10 @@ def classify(exprs, ispace): combs.remove((CENTER,)*len(f._dist_dimensions)) for c in combs: key = (f._dist_dimensions, c) - if all(v.get((d, s)) is STENCIL or s is CENTER for d, s in zip(*key)): + if all( + v.get((d, s)) is STENCIL or s is CENTER + for d, s in zip(*key, strict=True) + ): v[key] = STENCIL # Finally update the `halo_labels` @@ -597,16 +604,13 @@ def classify(exprs, ispace): # Separate halo-exchange Dimensions from `loc_indices` raw_loc_indices, halos = defaultdict(list), [] for (d, s), hl in halo_labels.items(): - try: + with suppress(KeyError): hl.remove(IDENTITY) - except KeyError: - pass if not hl: continue elif len(hl) > 1: raise HaloSchemeException("Inconsistency found while building a halo " - "scheme for `%s` along Dimension `%s`" - % (f, d)) + f"scheme for `{f}` along Dimension `{d}`") elif hl.pop() is STENCIL: halos.append(Halo(d, s)) else: @@ -683,7 +687,7 @@ def __new__(cls, *args, halo_scheme=None, **kwargs): return obj def __repr__(self): - return "HaloTouch(%s)" % ",".join(f.name for f in self.halo_scheme.fmapper) + return "HaloTouch({})".format(",".join(f.name for f in self.halo_scheme.fmapper)) __str__ = __repr__ diff --git a/devito/mpi/reduction_scheme.py b/devito/mpi/reduction_scheme.py index f3a412f07d..43dfe59a50 100644 --- a/devito/mpi/reduction_scheme.py +++ b/devito/mpi/reduction_scheme.py @@ -22,7 +22,7 @@ def __new__(cls, var, op=None, grid=None, ispace=None, **kwargs): return obj def __repr__(self): - return "DistReduce(%s,%s)" % (self.var, self.op) + return f"DistReduce({self.var},{self.op})" __str__ = __repr__ diff --git a/devito/mpi/routines.py b/devito/mpi/routines.py index 9fa639a7a1..74c788fa0e 100644 --- a/devito/mpi/routines.py +++ b/devito/mpi/routines.py @@ -119,7 +119,7 @@ def make(self, hs): # Callables haloupdates = [] halowaits = [] - for i, (f, hse) in enumerate(hs.fmapper.items()): + for f, hse in hs.fmapper.items(): msg = self._msgs[(f, hse)] haloupdate, halowait = mapper[(f, hse)] haloupdates.append(self._call_haloupdate(haloupdate.name, f, hse, msg)) @@ -315,12 +315,12 @@ def _make_bundles(self, hs): # We recast everything else as Bags for simplicity -- worst case # scenario all Bags only have one component. try: - name = "bag_%s" % "".join(f.name for f in components) + name = "bag_{}".format("".join(f.name for f in components)) bag = Bag(name=name, components=components) halo_scheme = halo_scheme.add(bag, hse) except ValueError: for i in components: - name = "bag_%s" % i.name + name = f"bag_{i.name}" bag = Bag(name=name, components=i) halo_scheme = halo_scheme.add(bag, hse) @@ -354,11 +354,13 @@ def _make_all(self, f, hse, msg): def _make_copy(self, f, hse, key, swap=False): dims = [d.root for d in f.dimensions if d not in hse.loc_indices] - ofs = [Symbol(name='o%s' % d.root, is_const=True) for d in f.dimensions] + ofs = [Symbol(name=f'o{d.root}', is_const=True) for d in f.dimensions] - bshape = [Symbol(name='b%s' % d.symbolic_size) for d in dims] - bdims = [CustomDimension(name=d.name, parent=d, symbolic_size=s) - for d, s in zip(dims, bshape)] + bshape = [Symbol(name=f'b{d.symbolic_size}') for d in dims] + bdims = [ + CustomDimension(name=d.name, parent=d, symbolic_size=s) + for d, s in zip(dims, bshape, strict=True) + ] eqns = [] eqns.extend([Eq(d.symbolic_min, 0) for d in bdims]) @@ -368,16 +370,18 @@ def _make_copy(self, f, hse, key, swap=False): buf = Array(name='buf', dimensions=[vd] + bdims, dtype=f.c0.dtype, padding=0) - mapper = dict(zip(dims, bdims)) - findices = [o - h + mapper.get(d.root, 0) - for d, o, h in zip(f.dimensions, ofs, f._size_nodomain.left)] + mapper = dict(zip(dims, bdims, strict=True)) + findices = [ + o - h + mapper.get(d.root, 0) + for d, o, h in zip(f.dimensions, ofs, f._size_nodomain.left, strict=True) + ] if swap is False: swap = lambda i, j: (i, j) - name = 'gather%s' % key + name = f'gather{key}' else: swap = lambda i, j: (j, i) - name = 'scatter%s' % key + name = f'scatter{key}' if isinstance(f, Bag): for i, c in enumerate(f.components): @@ -410,8 +414,8 @@ def _make_sendrecv(self, f, hse, key, **kwargs): bufs = Array(name='bufs', dimensions=bdims, dtype=f.c0.dtype, padding=0, liveness='eager') - ofsg = [Symbol(name='og%s' % d.root) for d in f.dimensions] - ofss = [Symbol(name='os%s' % d.root) for d in f.dimensions] + ofsg = [Symbol(name=f'og{d.root}') for d in f.dimensions] + ofss = [Symbol(name=f'os{d.root}') for d in f.dimensions] fromrank = Symbol(name='fromrank') torank = Symbol(name='torank') @@ -419,9 +423,9 @@ def _make_sendrecv(self, f, hse, key, **kwargs): shape = [d.symbolic_size for d in dims] arguments = [bufg] + shape + list(f.handles) + ofsg - gather = Gather('gather%s' % key, arguments) + gather = Gather(f'gather{key}', arguments) arguments = [bufs] + shape + list(f.handles) + ofss - scatter = Scatter('scatter%s' % key, arguments) + scatter = Scatter(f'scatter{key}', arguments) # The `gather` is unnecessary if sending to MPI.PROC_NULL gather = Conditional(CondNe(torank, Macro('MPI_PROC_NULL')), gather) @@ -445,7 +449,7 @@ def _make_sendrecv(self, f, hse, key, **kwargs): parameters = (list(f.handles) + shape + ofsg + ofss + [fromrank, torank, comm]) - return SendRecv('sendrecv%s' % key, iet, parameters, bufg, bufs) + return SendRecv(f'sendrecv{key}', iet, parameters, bufg, bufs) def _call_sendrecv(self, name, *args, **kwargs): args = list(args[0].handles) + flatten(args[1:]) @@ -456,7 +460,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): nb = distributor._obj_neighborhood comm = distributor._obj_comm - fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices} + fixed = {d: Symbol(name=f"o{d.root}") for d in hse.loc_indices} # Build a mapper `(dim, side, region) -> (size, ofs)` for `f`. `size` and # `ofs` are symbolic objects. This mapper tells what data values should be @@ -504,7 +508,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): parameters = list(f.handles) + [comm, nb] + list(fixed.values()) - return HaloUpdate('haloupdate%s' % key, iet, parameters) + return HaloUpdate(f'haloupdate{key}', iet, parameters) def _call_haloupdate(self, name, f, hse, *args): comm = f.grid.distributor._obj_comm @@ -566,7 +570,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): nb = distributor._obj_neighborhood comm = distributor._obj_comm - fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices} + fixed = {d: Symbol(name=f"o{d.root}") for d in hse.loc_indices} # Only retain the halos required by the Diag scheme # Note: `sorted` is only for deterministic code generation @@ -574,7 +578,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): body = [] for dims, tosides in halos: - mapper = OrderedDict(zip(dims, tosides)) + mapper = OrderedDict(zip(dims, tosides, strict=True)) sizes = [f._C_get_field(OWNED, d, s).size for d, s in mapper.items()] @@ -582,7 +586,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): ofsg = [fixed.get(d, f._C_get_field(OWNED, d, mapper.get(d)).offset) for d in f.dimensions] - mapper = OrderedDict(zip(dims, [i.flip() for i in tosides])) + mapper = OrderedDict(zip(dims, [i.flip() for i in tosides], strict=True)) fromrank = FieldFromPointer(''.join(i.name[0] for i in mapper.values()), nb) ofss = [fixed.get(d, f._C_get_field(HALO, d, mapper.get(d)).offset) for d in f.dimensions] @@ -596,7 +600,7 @@ def _make_haloupdate(self, f, hse, key, sendrecv, **kwargs): parameters = list(f.handles) + [comm, nb] + list(fixed.values()) - return HaloUpdate('haloupdate%s' % key, iet, parameters) + return HaloUpdate(f'haloupdate{key}', iet, parameters) class ComputeCall(ElementalCall): @@ -624,7 +628,7 @@ class OverlapHaloExchangeBuilder(DiagHaloExchangeBuilder): def _make_msg(self, f, hse, key): # Only retain the halos required by the Diag scheme halos = sorted(i for i in hse.halos if isinstance(i.dim, tuple)) - return MPIMsg('msg%d' % key, f, halos) + return MPIMsg(f'msg{key}', f, halos) def _make_sendrecv(self, f, hse, key, msg=None): fcast = cast(f.c0.dtype, '*') @@ -633,16 +637,18 @@ def _make_sendrecv(self, f, hse, key, msg=None): bufg = FieldFromPointer(msg._C_field_bufg, msg) bufs = FieldFromPointer(msg._C_field_bufs, msg) - ofsg = [Symbol(name='og%s' % d.root) for d in f.dimensions] + ofsg = [Symbol(name=f'og{d.root}') for d in f.dimensions] fromrank = Symbol(name='fromrank') torank = Symbol(name='torank') - sizes = [FieldFromPointer('%s[%d]' % (msg._C_field_sizes, i), msg) - for i in range(len(f._dist_dimensions))] + sizes = [ + FieldFromPointer(f'{msg._C_field_sizes}[{i}]', msg) + for i in range(len(f._dist_dimensions)) + ] arguments = [fcast(bufg)] + sizes + list(f.handles) + ofsg - gather = Gather('gather%s' % key, arguments) + gather = Gather(f'gather{key}', arguments) # The `gather` is unnecessary if sending to MPI.PROC_NULL gather = Conditional(CondNe(torank, Macro('MPI_PROC_NULL')), gather) @@ -658,7 +664,7 @@ def _make_sendrecv(self, f, hse, key, msg=None): parameters = list(f.handles) + ofsg + [fromrank, torank, comm, msg] - return SendRecv('sendrecv%s' % key, iet, parameters, bufg, bufs) + return SendRecv(f'sendrecv{key}', iet, parameters, bufg, bufs) def _call_sendrecv(self, name, *args, msg=None, haloid=None): # Drop `sizes` as this HaloExchangeBuilder conveys them through `msg` @@ -682,8 +688,12 @@ def _make_compute(self, hs, key, *args): if hs.body.is_Call: return None else: - return make_efunc('compute%d' % key, hs.body, hs.arguments, - efunc_type=ComputeFunction) + return make_efunc( + f'compute{key}', + hs.body, + hs.arguments, + efunc_type=ComputeFunction + ) def _call_compute(self, hs, compute, *args): if compute is None: @@ -697,14 +707,16 @@ def _make_wait(self, f, hse, key, msg=None): bufs = FieldFromPointer(msg._C_field_bufs, msg) - ofss = [Symbol(name='os%s' % d.root) for d in f.dimensions] + ofss = [Symbol(name=f'os{d.root}') for d in f.dimensions] fromrank = Symbol(name='fromrank') - sizes = [FieldFromPointer('%s[%d]' % (msg._C_field_sizes, i), msg) - for i in range(len(f._dist_dimensions))] + sizes = [ + FieldFromPointer(f'{msg._C_field_sizes}[{i}]', msg) + for i in range(len(f._dist_dimensions)) + ] arguments = [fcast(bufs)] + sizes + list(f.handles) + ofss - scatter = Scatter('scatter%s' % key, arguments) + scatter = Scatter(f'scatter{key}', arguments) # The `scatter` must be guarded as we must not alter the halo values along # the domain boundary, where the sender is actually MPI.PROC_NULL @@ -719,12 +731,12 @@ def _make_wait(self, f, hse, key, msg=None): parameters = (list(f.handles) + ofss + [fromrank, msg]) - return Callable('wait_%s' % key, iet, 'void', parameters, ('static',)) + return Callable(f'wait_{key}', iet, 'void', parameters, ('static',)) def _make_halowait(self, f, hse, key, wait, msg=None): nb = f.grid.distributor._obj_neighborhood - fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices} + fixed = {d: Symbol(name=f"o{d.root}") for d in hse.loc_indices} # Only retain the halos required by the Diag scheme # Note: `sorted` is only for deterministic code generation @@ -732,7 +744,7 @@ def _make_halowait(self, f, hse, key, wait, msg=None): body = [] for dims, tosides in halos: - mapper = OrderedDict(zip(dims, [i.flip() for i in tosides])) + mapper = OrderedDict(zip(dims, [i.flip() for i in tosides], strict=True)) fromrank = FieldFromPointer(''.join(i.name[0] for i in mapper.values()), nb) ofss = [fixed.get(d, f._C_get_field(HALO, d, mapper.get(d)).offset) for d in f.dimensions] @@ -756,7 +768,7 @@ def _call_halowait(self, name, f, hse, msg): def _make_remainder(self, hs, key, callcompute, *args): assert callcompute.is_Call body = [callcompute._rebuild(dynamic_args_mapper=i) for _, i in hs.omapper.owned] - return Remainder.make('remainder%d' % key, body) + return Remainder.make(f'remainder{key}', body) def _call_remainder(self, remainder): efunc = remainder.make_call() @@ -797,7 +809,7 @@ def _make_haloupdate(self, f, hse, key, *args, msg=None): fcast = cast(f.c0.dtype, '*') comm = f.grid.distributor._obj_comm - fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices} + fixed = {d: Symbol(name=f"o{d.root}") for d in hse.loc_indices} dim = Dimension(name='i') @@ -809,15 +821,19 @@ def _make_haloupdate(self, f, hse, key, *args, msg=None): fromrank = FieldFromComposite(msg._C_field_from, msgi) torank = FieldFromComposite(msg._C_field_to, msgi) - sizes = [FieldFromComposite('%s[%d]' % (msg._C_field_sizes, i), msgi) - for i in range(len(f._dist_dimensions))] - ofsg = [FieldFromComposite('%s[%d]' % (msg._C_field_ofsg, i), msgi) - for i in range(len(f._dist_dimensions))] + sizes = [ + FieldFromComposite(f'{msg._C_field_sizes}[{i}]', msgi) + for i in range(len(f._dist_dimensions)) + ] + ofsg = [ + FieldFromComposite(f'{msg._C_field_ofsg}[{i}]', msgi) + for i in range(len(f._dist_dimensions)) + ] ofsg = [fixed.get(d) or ofsg.pop(0) for d in f.dimensions] # The `gather` is unnecessary if sending to MPI.PROC_NULL arguments = [fcast(bufg)] + sizes + list(f.handles) + ofsg - gather = Gather('gather%s' % key, arguments) + gather = Gather(f'gather{key}', arguments) gather = Conditional(CondNe(torank, Macro('MPI_PROC_NULL')), gather) # Make Irecv/Isend @@ -833,7 +849,7 @@ def _make_haloupdate(self, f, hse, key, *args, msg=None): ncomms = Symbol(name='ncomms') iet = Iteration([recv, gather, send], dim, ncomms - 1) parameters = f.handles + (comm, msg, ncomms) + tuple(fixed.values()) - return HaloUpdate('haloupdate%s' % key, iet, parameters) + return HaloUpdate(f'haloupdate{key}', iet, parameters) def _call_haloupdate(self, name, f, hse, msg): comm = f.grid.distributor._obj_comm @@ -843,7 +859,7 @@ def _call_haloupdate(self, name, f, hse, msg): def _make_halowait(self, f, hse, key, *args, msg=None): fcast = cast(f.c0.dtype, '*') - fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices} + fixed = {d: Symbol(name=f"o{d.root}") for d in hse.loc_indices} dim = Dimension(name='i') @@ -853,16 +869,20 @@ def _make_halowait(self, f, hse, key, *args, msg=None): fromrank = FieldFromComposite(msg._C_field_from, msgi) - sizes = [FieldFromComposite('%s[%d]' % (msg._C_field_sizes, i), msgi) - for i in range(len(f._dist_dimensions))] - ofss = [FieldFromComposite('%s[%d]' % (msg._C_field_ofss, i), msgi) - for i in range(len(f._dist_dimensions))] + sizes = [ + FieldFromComposite(f'{msg._C_field_sizes}[{i}]', msgi) + for i in range(len(f._dist_dimensions)) + ] + ofss = [ + FieldFromComposite(f'{msg._C_field_ofss}[{i}]', msgi) + for i in range(len(f._dist_dimensions)) + ] ofss = [fixed.get(d) or ofss.pop(0) for d in f.dimensions] # The `scatter` must be guarded as we must not alter the halo values along # the domain boundary, where the sender is actually MPI.PROC_NULL arguments = [fcast(bufs)] + sizes + list(f.handles) + ofss - scatter = Scatter('scatter%s' % key, arguments) + scatter = Scatter(f'scatter{key}', arguments) scatter = Conditional(CondNe(fromrank, Macro('MPI_PROC_NULL')), scatter) rrecv = Byref(FieldFromComposite(msg._C_field_rrecv, msgi)) @@ -904,7 +924,7 @@ def _make_remainder(self, hs, key, callcompute, region): # The -1 below is because an Iteration, by default, generates <= iet = Iteration(iet, dim, region.nregions - 1) - return Remainder.make('remainder%d' % key, iet) + return Remainder.make(f'remainder{key}', iet) class Diag2HaloExchangeBuilder(Overlap2HaloExchangeBuilder): @@ -988,8 +1008,12 @@ def _make_compute(self, hs, key, msgs, callpoke): mapper = {i: List(body=[callpoke, i]) for i in FindNodes(ExpressionBundle).visit(hs.body)} iet = Transformer(mapper).visit(hs.body) - return make_efunc('compute%d' % key, iet, hs.arguments, - efunc_type=ComputeFunction) + return make_efunc( + f'compute{key}', + iet, + hs.arguments, + efunc_type=ComputeFunction + ) def _make_poke(self, hs, key, msgs): lflag = Symbol(name='lflag') @@ -1017,7 +1041,11 @@ def _make_poke(self, hs, key, msgs): body.append(Return(gflag)) - return make_efunc('pokempi%d' % key, List(body=body), retval='int') + return make_efunc( + f'pokempi{key}', + List(body=body), + retval='int' + ) def _call_poke(self, poke): return Prodder(poke.name, poke.parameters, single_thread=True, periodic=True) @@ -1219,7 +1247,7 @@ def _arg_defaults(self, alias, args=None): # Buffer shape for this peer shape = [] - for dim, side in zip(*halo): + for dim, side in zip(*halo, strict=True): try: shape.append(getattr(f._size_owned[dim], side.name)) except AttributeError: @@ -1292,7 +1320,7 @@ def _arg_defaults(self, alias=None, args=None): # `torank` peer + gather offsets entry.torank = neighborhood[halo.side] ofsg = [] - for dim, side in zip(*halo): + for dim, side in zip(*halo, strict=True): try: v = getattr(f._offset_owned[dim], side.name) ofsg.append(self._as_number(v, args)) @@ -1304,7 +1332,7 @@ def _arg_defaults(self, alias=None, args=None): # `fromrank` peer + scatter offsets entry.fromrank = neighborhood[tuple(i.flip() for i in halo.side)] ofss = [] - for dim, side in zip(*halo): + for dim, side in zip(*halo, strict=True): try: v = getattr(f._offset_halo[dim], side.flip().name) ofss.append(self._as_number(v, args)) @@ -1342,8 +1370,8 @@ def __init__(self, prefix, key, arguments, owned): # Sorting for deterministic codegen self._arguments = sorted(arguments, key=lambda i: i.name) - name = "%s%d" % (prefix, key) - pname = "region%d" % key + name = f'{prefix}{key}' + pname = f'region{key}' fields = [] for i in self.arguments: diff --git a/devito/operations/interpolators.py b/devito/operations/interpolators.py index 9104fb426c..1ac838c050 100644 --- a/devito/operations/interpolators.py +++ b/devito/operations/interpolators.py @@ -233,7 +233,7 @@ def _rdim(self, subdomain=None): rdims = [] pos = self.sfunction._position_map.values() - for (d, rd, p) in zip(gdims, self._cdim, pos): + for (d, rd, p) in zip(gdims, self._cdim, pos, strict=True): # Add conditional to avoid OOB lb = sympy.And(rd + p >= d.symbolic_min - self.r, evaluate=False) ub = sympy.And(rd + p <= d.symbolic_max + self.r, evaluate=False) @@ -301,21 +301,28 @@ def _interp_idx(self, variables, implicit_dims=None, pos_only=(), subdomain=None mapper = self._rdim(subdomain=subdomain).getters # Index substitution to make in variables - subs = {ki: c + p for ((k, c), p) - in zip(mapper.items(), pos) for ki in {k, k.root}} + subs = { + ki: c + p + for ((k, c), p) in zip(mapper.items(), pos, strict=True) + for ki in {k, k.root} + } idx_subs = {v: v.subs(subs) for v in variables} # Position only replacement, not radius dependent. # E.g src.inject(vp(x)*src) needs to use vp[posx] at all points # not vp[posx + rx] - idx_subs.update({v: v.subs({k: p for (k, p) in zip(mapper, pos)}) - for v in pos_only}) + idx_subs.update({ + v: v.subs({ + k: p + for (k, p) in zip(mapper, pos, strict=True) + }) for v in pos_only + }) return idx_subs, temps @check_radius - def interpolate(self, expr, increment=False, self_subs={}, implicit_dims=None): + def interpolate(self, expr, increment=False, self_subs=None, implicit_dims=None): """ Generate equations interpolating an arbitrary expression into ``self``. @@ -330,6 +337,8 @@ def interpolate(self, expr, increment=False, self_subs={}, implicit_dims=None): interpolation expression, but that should be honored when constructing the operator. """ + if self_subs is None: + self_subs = {} return Interpolation(expr, increment, implicit_dims, self_subs, self) @check_radius @@ -350,7 +359,7 @@ def inject(self, field, expr, implicit_dims=None): """ return Injection(field, expr, implicit_dims, self) - def _interpolate(self, expr, increment=False, self_subs={}, implicit_dims=None): + def _interpolate(self, expr, increment=False, self_subs=None, implicit_dims=None): """ Generate equations interpolating an arbitrary expression into ``self``. @@ -372,6 +381,9 @@ def _interpolate(self, expr, increment=False, self_subs={}, implicit_dims=None): # E.g., a generic SymPy expression or a number _expr = expr + if self_subs is None: + self_subs = {} + variables = list(retrieve_function_carriers(_expr)) subdomain = _extract_subdomain(variables) @@ -450,7 +462,7 @@ def _inject(self, field, expr, implicit_dims=None): eqns = [Inc(_field.xreplace(idx_subs), (self._weights(subdomain=subdomain) * _expr).xreplace(idx_subs), implicit_dims=implicit_dims) - for (_field, _expr) in zip(fields, _exprs)] + for (_field, _expr) in zip(fields, _exprs, strict=True)] return temps + eqns @@ -471,7 +483,7 @@ class LinearInterpolator(WeightedInterpolator): def _weights(self, subdomain=None): rdim = self._rdim(subdomain=subdomain) c = [(1 - p) * (1 - r) + p * r - for (p, d, r) in zip(self._point_symbols, self._gdims, rdim)] + for (p, d, r) in zip(self._point_symbols, self._gdims, rdim, strict=True)] return Mul(*c) @cached_property @@ -487,7 +499,7 @@ def _coeff_temps(self, implicit_dims): pmap = self.sfunction._position_map poseq = [Eq(self._point_symbols[d], pos - floor(pos), implicit_dims=implicit_dims) - for (d, pos) in zip(self._gdims, pmap.keys())] + for (d, pos) in zip(self._gdims, pmap.keys(), strict=True)] return poseq @@ -567,8 +579,10 @@ def interpolation_coeffs(self): @memoized_meth def _weights(self, subdomain=None): rdims = self._rdim(subdomain=subdomain) - return Mul(*[w._subs(rd, rd-rd.parent.symbolic_min) - for (rd, w) in zip(rdims, self.interpolation_coeffs)]) + return Mul(*[ + w._subs(rd, rd-rd.parent.symbolic_min) + for (rd, w) in zip(rdims, self.interpolation_coeffs, strict=True) + ]) def _arg_defaults(self, coords=None, sfunc=None): args = {} diff --git a/devito/operations/solve.py b/devito/operations/solve.py index 6545fc4ead..8e8541bf09 100644 --- a/devito/operations/solve.py +++ b/devito/operations/solve.py @@ -1,3 +1,4 @@ +from contextlib import suppress from functools import singledispatch import sympy @@ -32,10 +33,8 @@ def solve(eq, target, **kwargs): Symbolic optimizations applied while rearranging the equation. For more information. refer to ``sympy.solve.__doc__``. """ - try: + with suppress(AttributeError): eq = eq.lhs - eq.rhs if eq.rhs != 0 else eq.lhs - except AttributeError: - pass eqs, targets = as_tuple(eq), as_tuple(target) if len(eqs) == 0: @@ -43,7 +42,7 @@ def solve(eq, target, **kwargs): return None sols = [] - for e, t in zip(eqs, targets): + for e, t in zip(eqs, targets, strict=True): # Try first linear solver try: sols.append(linsolve(eval_time_derivatives(e), t)) diff --git a/devito/operator/operator.py b/devito/operator/operator.py index a0f3d06410..779106d75a 100644 --- a/devito/operator/operator.py +++ b/devito/operator/operator.py @@ -1,6 +1,7 @@ import ctypes import shutil from collections import OrderedDict, namedtuple +from contextlib import suppress from functools import cached_property from math import ceil from operator import attrgetter @@ -400,10 +401,8 @@ def _lower_clusters(cls, expressions, profiler=None, **kwargs): # Operation count after specialization final_ops = sum(estimate_cost(c.exprs) for c in clusters if c.is_dense) - try: + with suppress(AttributeError): profiler.record_ops_variation(init_ops, final_ops) - except AttributeError: - pass # Generate implicit Clusters from higher level abstractions clusters = generate_implicit(clusters) @@ -468,10 +467,8 @@ def _lower_uiet(cls, stree, profiler=None, **kwargs): uiet = iet_build(stree) # Analyze the IET Sections for C-level profiling - try: + with suppress(AttributeError): profiler.analyze(uiet) - except AttributeError: - pass return uiet @@ -626,11 +623,11 @@ def _prepare_arguments(self, autotune=None, estimate_memory=False, **kwargs): args.update(p._arg_values(estimate_memory=estimate_memory, **kwargs)) try: args.reduce_inplace() - except ValueError: + except ValueError as e: v = [i for i in overrides if i.name in args] raise InvalidArgument( f"Override `{p}` is incompatible with overrides `{v}`" - ) + ) from e # Process data-carrier defaults for p in defaults: @@ -756,10 +753,8 @@ def _known_arguments(self): ret = set() for i in self.input: ret.update(i._arg_names) - try: + with suppress(AttributeError): ret.update(i.grid._arg_names) - except AttributeError: - pass for d in self.dimensions: ret.update(d._arg_names) ret.update(p.name for p in self.parameters) @@ -1009,11 +1004,10 @@ def apply(self, **kwargs): except ctypes.ArgumentError as e: if e.args[0].startswith("argument "): argnum = int(e.args[0][9:].split(':')[0]) - 1 - newmsg = "error in argument '%s' with value '%s': %s" % ( - self.parameters[argnum].name, - arg_values[argnum], - e.args[0]) - raise ctypes.ArgumentError(newmsg) from e + raise ctypes.ArgumentError( + f"error in argument '{self.parameters[argnum].name}' with value" + f" '{arg_values[argnum]}': {e.args[0]}" + ) from e else: raise @@ -1064,7 +1058,7 @@ def _emit_timings(timings, indent=''): _emit_timings(timings, ' * ') if self._profiler._ops: - ops = ['%d --> %d' % i for i in self._profiler._ops] + ops = [f'{i[0]} --> {i[1]}' for i in self._profiler._ops] perf(f"Flops reduction after symbolic optimization: [{' ; '.join(ops)}]") def _emit_apply_profiling(self, args): @@ -1410,12 +1404,12 @@ def _physical_deviceid(self): else: try: return visible_devices[logical_deviceid] - except IndexError: + except IndexError as e: errmsg = (f"A deviceid value of {logical_deviceid} is not valid " f"with {visible_device_var}={visible_devices}. Note that " "deviceid corresponds to the logical index within the " "visible devices, not the physical device index.") - raise ValueError(errmsg) + raise ValueError(errmsg) from e else: return None @@ -1444,10 +1438,9 @@ def nbytes_avail_mapper(self): mapper[host_layer] = int(ANYCPU.memavail() / nproc) for layer in (host_layer, device_layer): - try: + with suppress(KeyError): + # Since might not have this layer in the mapper mapper[layer] -= self.nbytes_consumed_operator.get(layer, 0) - except KeyError: # Might not have this layer in the mapper - pass mapper = {k: int(v) for k, v in mapper.items()} @@ -1510,10 +1503,7 @@ def nbytes_consumed_arrays(self): or not i.is_regular: continue - if i.is_regular: - nbytes = i.nbytes - else: - nbytes = i.nbytes_max + nbytes = i.nbytes if i.is_regular else i.nbytes_max v = subs_op_args(nbytes, self) if not is_integer(v): # E.g. the Arrays used to store the MPI halo exchanges diff --git a/devito/operator/profiling.py b/devito/operator/profiling.py index b00c5cf04b..6a82928277 100644 --- a/devito/operator/profiling.py +++ b/devito/operator/profiling.py @@ -196,7 +196,7 @@ def summary(self, args, dtype, reduce_over=None): comm = args.comm summary = PerformanceSummary() - for name, data in self._sections.items(): + for name in self._sections: # Time to run the section time = max(getattr(args[self.name]._obj, name), 10e-7) @@ -275,7 +275,7 @@ def _allgather_from_comm(self, comm, time, ops, points, traffic, sops, itershape sops = [sops]*comm.size itershapess = comm.allgather(itershapes) - return list(zip(times, opss, pointss, traffics, sops, itershapess)) + return list(zip(times, opss, pointss, traffics, sops, itershapess, strict=True)) # Override basic summary so that arguments other than runtime are computed. def summary(self, args, dtype, reduce_over=None): @@ -318,7 +318,7 @@ def summary(self, args, dtype, reduce_over=None): # Same as above but without setup overheads (e.g., host-device # data transfers) mapper = defaultdict(list) - for (name, rank), v in summary.items(): + for (name, _), v in summary.items(): mapper[name].append(v.time) reduce_over_nosetup = sum(max(i) for i in mapper.values()) if reduce_over_nosetup == 0: @@ -460,10 +460,7 @@ def add_glb_vanilla(self, key, time): gflops = float(ops)/10**9 gflopss = gflops/time - if np.isnan(traffic) or traffic == 0: - oi = None - else: - oi = float(ops/traffic) + oi = None if np.isnan(traffic) or traffic == 0 else float(ops / traffic) self.globals[key] = PerfEntry(time, gflopss, None, oi, None, None) diff --git a/devito/operator/registry.py b/devito/operator/registry.py index c8aac315b7..35e5646204 100644 --- a/devito/operator/registry.py +++ b/devito/operator/registry.py @@ -45,15 +45,16 @@ def fetch(self, platform=None, mode=None, language='C', **kwargs): mode = 'custom' if language not in OperatorRegistry._languages: - raise ValueError("Unknown language `%s`" % language) + raise ValueError(f"Unknown language `{language}`") for cls in platform._mro(): for (p, m, l), kls in self.items(): if issubclass(p, cls) and m == mode and l == language: return kls - raise InvalidOperator("Cannot compile an Operator for `%s`" - % str((platform, mode, language))) + raise InvalidOperator( + f'Cannot compile an Operator for `{str((platform, mode, language))}`' + ) operator_registry = OperatorRegistry() diff --git a/devito/parameters.py b/devito/parameters.py index f545139649..0412380533 100644 --- a/devito/parameters.py +++ b/devito/parameters.py @@ -197,7 +197,7 @@ def init_configuration(configuration=configuration, env_vars_mapper=env_vars_map try: items = v.split(';') # Env variable format: 'var=k1:v1;k2:v2:k3:v3:...' - keys, values = zip(*[i.split(':') for i in items]) + keys, values = zip(*[i.split(':') for i in items], strict=True) # Casting values = [eval(i) for i in values] except AttributeError: @@ -215,7 +215,7 @@ def init_configuration(configuration=configuration, env_vars_mapper=env_vars_map except (TypeError, ValueError): keys[i] = j if len(keys) == len(values): - configuration.update(k, dict(zip(keys, values))) + configuration.update(k, dict(zip(keys, values, strict=True))) elif len(keys) == 1: configuration.update(k, keys[0]) else: @@ -269,7 +269,7 @@ def __enter__(self): configuration[k] = v def __exit__(self, exc_type, exc_val, traceback): - for k, v in self.params.items(): + for k in self.params: try: configuration[k] = self.previous[k] except ValueError: diff --git a/devito/passes/__init__.py b/devito/passes/__init__.py index f4ac2783c3..c92c64481e 100644 --- a/devito/passes/__init__.py +++ b/devito/passes/__init__.py @@ -30,7 +30,7 @@ def is_on_device(obj, gpu_fit): if isinstance(f, TimeFunction) and is_integer(f.save)] if 'all-fallback' in gpu_fit and fsave: - warning("TimeFunction %s assumed to fit the GPU memory" % fsave) + warning(f"TimeFunction {fsave} assumed to fit the GPU memory") return True return all(f in gpu_fit for f in fsave) diff --git a/devito/passes/clusters/aliases.py b/devito/passes/clusters/aliases.py index 8c33913326..f0c25a74be 100644 --- a/devito/passes/clusters/aliases.py +++ b/devito/passes/clusters/aliases.py @@ -92,10 +92,7 @@ def cire(clusters, mode, sregistry, options, platform): # NOTE: Handle prematurely expanded derivatives -- current default on # several backends, but soon to become legacy if mode == 'sops': - if options['expand']: - mode = 'eval-derivs' - else: - mode = 'index-derivs' + mode = 'eval-derivs' if options['expand'] else 'index-derivs' for cls in modes[mode]: transformer = cls(sregistry, options, platform) @@ -207,7 +204,7 @@ def _do_generate(self, exprs, exclude, cbk_search, cbk_compose=None): Carry out the bulk of the work of ``_generate``. """ counter = generator() - make = lambda: Symbol(name='dummy%d' % counter(), dtype=np.float32) + make = lambda: Symbol(name=f'dummy{counter()}', dtype=np.float32) if cbk_compose is None: cbk_compose = lambda *args: None @@ -376,7 +373,9 @@ def _generate(self, cgroup, exclude): candidates = sorted(grank, reverse=True)[:2] for i in candidates: lower_pri_elems = flatten([grank[j] for j in candidates if j != i]) - cbk_search = lambda e: self._cbk_search2(e, grank[i] + lower_pri_elems) + cbk_search = lambda e: self._cbk_search2( + e, grank[i] + lower_pri_elems # noqa: B023 + ) yield self._do_generate(exprs, exclude, cbk_search, self._cbk_compose) def _lookup_key(self, c): @@ -386,11 +385,11 @@ def _select(self, variants): if isinstance(self.opt_schedule_strategy, int): try: return variants[self.opt_schedule_strategy] - except IndexError: + except IndexError as e: raise CompilationError( f"Illegal schedule {self.opt_schedule_strategy}; " f"generated {len(variants)} schedules in total" - ) + ) from e return pick_best(variants) @@ -525,10 +524,8 @@ def collect(extracted, ispace, minstorage): unseen.remove(u) group = Group(group, ispace=ispace) - if minstorage: - k = group.dimensions_translated - else: - k = group.dimensions + k = group.dimensions_translated if minstorage else group.dimensions + k = frozenset(d for d in k if not d.is_NonlinearDerived) mapper.setdefault(k, []).append(group) @@ -571,7 +568,7 @@ def collect(extracted, ispace, minstorage): # Heuristic: first try retaining the larger ones smallest = len(min(groups, key=len)) fallback = groups - groups, remainder = split(groups, lambda g: len(g) > smallest) + groups, remainder = split(groups, lambda g: len(g) > smallest) # noqa: B023 if groups: queue.append(remainder) elif len(remainder) > 1: @@ -590,7 +587,7 @@ def collect(extracted, ispace, minstorage): offsets = [LabeledVector([(l, v[l] + distances[l]) for l in v.labels]) for v in c.offsets] subs = {i: i.function[[l + v.fromlabel(l, 0) for l in b]] - for i, b, v in zip(c.indexeds, c.bases, offsets)} + for i, b, v in zip(c.indexeds, c.bases, offsets, strict=True)} pivot = uxreplace(c.expr, subs) # Distance of each aliased expression from the basis alias @@ -599,7 +596,9 @@ def collect(extracted, ispace, minstorage): for i in g._items: aliaseds.append(extracted[i.expr]) - distance = [o.distance(v) for o, v in zip(i.offsets, offsets)] + distance = [ + o.distance(v) for o, v in zip(i.offsets, offsets, strict=True) + ] distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)] distances.append(LabeledVector([(d, v.pop()) for d, v in distance])) @@ -726,14 +725,14 @@ def lower_aliases(aliases, meta, maxpar): m = i.dim.symbolic_min - i.dim.parent.symbolic_min else: m = 0 - d = dmapper[i.dim] = IncrDimension("%ss" % i.dim.name, i.dim, m, + d = dmapper[i.dim] = IncrDimension(f"{i.dim.name}s", i.dim, m, dd.symbolic_size, 1, dd.step) sub_iterators[i.dim] = d else: d = i.dim # Given the iteration `interval`, lower distances to indices - for distance, indices in zip(a.distances, indicess): + for distance, indices in zip(a.distances, indicess, strict=True): v = distance[interval.dim] or 0 try: indices.append(d - interval.lower + v) @@ -797,12 +796,12 @@ def optimize_schedule_rotations(schedule, sregistry): iis = candidate.lower iib = candidate.upper - name = sregistry.make_name(prefix='%sii' % d.root.name) + name = sregistry.make_name(prefix=f'{d.root.name}ii') ii = ModuloDimension(name, ds, iis, incr=iib) - cd = CustomDimension(name='%sc' % d.root.name, symbolic_min=ii, + cd = CustomDimension(name=f'{d.root.name}c', symbolic_min=ii, symbolic_max=iib, symbolic_size=n) - dsi = ModuloDimension('%si' % ds.root.name, cd, cd + ds - iis, n) + dsi = ModuloDimension(f'{ds.root.name}i', cd, cd + ds - iis, n) mapper = OrderedDict() for i in g: @@ -813,11 +812,13 @@ def optimize_schedule_rotations(schedule, sregistry): try: md = mapper[v] except KeyError: - name = sregistry.make_name(prefix='%sr' % d.root.name) + name = sregistry.make_name(prefix=f'{d.root.name}r') md = mapper.setdefault(v, ModuloDimension(name, ds, v, n)) mds.append(md) - indicess = [indices[:ridx] + [md] + indices[ridx + 1:] - for md, indices in zip(mds, i.indicess)] + indicess = [ + indices[:ridx] + [md] + indices[ridx + 1:] + for md, indices in zip(mds, i.indicess, strict=True) + ] # Update `writeto` by switching `d` to `dsi` intervals = k.intervals.switch(d, dsi).zero(dsi) @@ -851,11 +852,8 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, """ Turn a Schedule into a sequence of Clusters. """ - if opt_ftemps: - make = TempFunction - else: - # Typical case -- the user does *not* "see" the CIRE-created temporaries - make = TempArray + # Typical case -- the user does *not* "see" the CIRE-created temporaries + make = TempFunction if opt_ftemps else TempArray clusters = [] subs = {} @@ -887,10 +885,7 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, # Functions to minimize support variables such as strides etc min_halo = {i.dim: Size(abs(i.lower), abs(i.upper)) for i in writeto} - if opt_minmem: - functions = [] - else: - functions = retrieve_functions(pivot) + functions = [] if opt_minmem else retrieve_functions(pivot) halo = dict(min_halo) for f in functions: @@ -906,7 +901,7 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, # The indices used to write into the Array indices = [] - for i, s in zip(writeto, shift): + for i, s in zip(writeto, shift, strict=True): try: # E.g., `xs` sub_iterators = writeto.sub_iterators[i.dim] @@ -921,7 +916,9 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, shift=shift) expression = Eq(obj[indices], uxreplace(pivot, subs)) - callback = lambda idx: obj[[i + s for i, s in zip(idx, shift)]] + callback = lambda idx: obj[ # noqa: B023 + [i + s for i, s in zip(idx, shift, strict=True)] # noqa: B023 + ] else: # Degenerate case: scalar expression assert writeto.size == 0 @@ -930,11 +927,13 @@ def lower_schedule(schedule, meta, sregistry, opt_ftemps, opt_min_dtype, obj = Temp(name=name, dtype=dtype) expression = Eq(obj, uxreplace(pivot, subs)) - callback = lambda idx: obj + callback = lambda idx: obj # noqa: B023 # Create the substitution rules for the aliasing expressions - subs.update({aliased: callback(indices) - for aliased, indices in zip(aliaseds, indicess)}) + subs.update({ + aliased: callback(indices) + for aliased, indices in zip(aliaseds, indicess, strict=True) + }) properties = dict(meta.properties) @@ -1094,8 +1093,8 @@ def __new__(cls, items, ispace=None): processed.append(c) continue - f0 = lambda e: minimum(e, sdims) - f1 = lambda e: maximum(e, sdims) + f0 = lambda e: minimum(e, sdims) # noqa: B023 + f1 = lambda e: maximum(e, sdims) # noqa: B023 for f in (f0, f1): expr = f(c.expr) @@ -1112,7 +1111,7 @@ def __new__(cls, items, ispace=None): return obj def __repr__(self): - return "Group(%s)" % ", ".join([str(i) for i in self]) + return "Group({})".format(", ".join([str(i) for i in self])) def find_rotation_distance(self, d, interval): """ @@ -1137,7 +1136,10 @@ def find_rotation_distance(self, d, interval): @cached_property def Toffsets(self): - return [LabeledVector.transpose(*i) for i in zip(*[i.offsets for i in self])] + return [ + LabeledVector.transpose(*i) + for i in zip(*[i.offsets for i in self], strict=True) + ] @cached_property def diameter(self): @@ -1152,18 +1154,18 @@ def diameter(self): continue try: distance = int(max(v) - min(v)) - except TypeError: + except TypeError as e: # An entry in `v` has symbolic components, e.g. `x_m + 2` if len(set(v)) == 1: continue else: - # Worst-case scenario, we raraly end up here + # Worst-case scenario, we rarely end up here # Resort to the fast vector-based comparison machinery # (rather than the slower sympy.simplify) items = [Vector(i) for i in v] distance, = vmax(*items) - vmin(*items) if not is_integer(distance): - raise ValueError + raise ValueError('Distrance is not an integer') from e ret[d] = max(ret[d], distance) return ret @@ -1213,7 +1215,7 @@ def _pivot_legal_rotations(self): assert distance == mini - rotation.upper distances.append(distance) - ret[d] = list(zip(m, distances)) + ret[d] = list(zip(m, distances, strict=True)) return ret @@ -1227,7 +1229,7 @@ def _pivot_min_intervals(self): ret = defaultdict(lambda: [np.inf, -np.inf]) for i in self: - distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets)] + distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets, strict=True)] distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)] for d, v in distance: @@ -1252,7 +1254,7 @@ def _pivot_legal_shifts(self): c = self.pivot ret = defaultdict(lambda: (-np.inf, np.inf)) - for i, ofs in zip(c.indexeds, c.offsets): + for i, ofs in zip(c.indexeds, c.offsets, strict=True): f = i.function for l in ofs.labels: @@ -1294,7 +1296,7 @@ def __init__(self, pivot, aliaseds, intervals, distances, score): self.score = score def __repr__(self): - return "Alias<<%s>>" % self.pivot + return f"Alias<<{self.pivot}>>" @property def free_symbols(self): @@ -1335,7 +1337,7 @@ def __init__(self, aliases=None): def __repr__(self): if self._list: - return "AliasList<\n %s\n>" % ",\n ".join(str(i) for i in self._list) + return "AliasList<\n {}\n>".format(",\n ".join(str(i) for i in self._list)) else: return "<>" @@ -1343,8 +1345,7 @@ def __len__(self): return self._list.__len__() def __iter__(self): - for i in self._list: - yield i + yield from self._list def add(self, pivot, aliaseds, intervals, distances, score): assert len(aliaseds) == len(distances) @@ -1396,7 +1397,7 @@ def cost(self): # Not just the sum for the individual items' cost! There might be # redundancies, which we factor out here... counter = generator() - make = lambda _: Symbol(name='dummy%d' % counter(), dtype=np.float32) + make = lambda _: Symbol(name=f'dummy{counter()}', dtype=np.float32) tot = 0 for v in as_mapper(self, lambda i: i.ispace).values(): @@ -1427,7 +1428,7 @@ def cit(ispace0, ispace1): The Common IterationIntervals of two IterationSpaces. """ found = [] - for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals): + for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals, strict=True): if it0 == it1: found.append(it0) else: diff --git a/devito/passes/clusters/asynchrony.py b/devito/passes/clusters/asynchrony.py index 8ee7792068..e32190ddef 100644 --- a/devito/passes/clusters/asynchrony.py +++ b/devito/passes/clusters/asynchrony.py @@ -249,10 +249,7 @@ def _actions_from_update_memcpy(c, d, clusters, actions, sregistry): else: assert tindex0.is_Modulo mapper = {(i.offset % i.modulo): i for i in c.sub_iterators[pd]} - if direction is Forward: - toffset = tindex0.offset + 1 - else: - toffset = tindex0.offset - 1 + toffset = tindex0.offset + 1 if direction is Forward else tindex0.offset - 1 try: tindex = mapper[toffset % tindex0.modulo] except KeyError: @@ -271,10 +268,7 @@ def _actions_from_update_memcpy(c, d, clusters, actions, sregistry): # Turn `c` into a prefetch Cluster `pc` expr = uxreplace(e, {tindex0: tindex, fetch: findex}) - if tindex is not tindex0: - ispace = c.ispace.augment({pd: tindex}) - else: - ispace = c.ispace + ispace = c.ispace.augment({pd: tindex}) if tindex is not tindex0 else c.ispace guard0 = c.guards.get(d, true)._subs(fetch, findex) guard1 = GuardBoundNext(function.indices[d], direction) diff --git a/devito/passes/clusters/blocking.py b/devito/passes/clusters/blocking.py index b279f5779c..d04962a52c 100644 --- a/devito/passes/clusters/blocking.py +++ b/devito/passes/clusters/blocking.py @@ -115,7 +115,7 @@ def _has_data_reuse(self, cluster): # If we are going to skew, then we might exploit reuse along an # otherwise SEQUENTIAL Dimension - if self.skewing: + if self.skewing: # noqa: SIM103 return True return False @@ -335,10 +335,7 @@ def __init__(self, sregistry, options): def process(self, clusters): # A tool to unroll the explicit integer block shapes, should there be any - if self.par_tile: - blk_size_gen = BlockSizeGenerator(self.par_tile) - else: - blk_size_gen = None + blk_size_gen = BlockSizeGenerator(self.par_tile) if self.par_tile else None return self._process_fdta(clusters, 1, blk_size_gen=blk_size_gen) @@ -558,12 +555,9 @@ def next(self, prefix, d, clusters): self.umt_small.iter() return self.umt_small.next() - if x: - item = self.umt.curitem() - else: - # We can't `self.umt.iter()` because we might still want to - # fallback to `self.umt_small` - item = self.umt.nextitem() + # We can't `self.umt.iter()` because we might still want to + # fallback to `self.umt_small` + item = self.umt.curitem() if x else self.umt.nextitem() # Handle user-provided rules # TODO: This is also rudimentary diff --git a/devito/passes/clusters/buffering.py b/devito/passes/clusters/buffering.py index 6d96de77bc..22f9004ee7 100644 --- a/devito/passes/clusters/buffering.py +++ b/devito/passes/clusters/buffering.py @@ -97,10 +97,7 @@ def key(f): assert callable(key) v1 = kwargs.get('opt_init_onwrite', False) - if callable(v1): - init_onwrite = v1 - else: - init_onwrite = lambda f: v1 + init_onwrite = v1 if callable(v1) else lambda f: v1 options = dict(options) options.update({ @@ -182,7 +179,7 @@ def callback(self, clusters, prefix): # If a buffer is read but never written, then we need to add # an Eq to step through the next slot # E.g., `ub[0, x] = usave[time+2, x]` - for b, v in descriptors.items(): + for _, v in descriptors.items(): if not v.is_readonly: continue if c is not v.firstread: @@ -225,7 +222,7 @@ def callback(self, clusters, prefix): # Append the copy-back if `c` is the last-write of some buffers # E.g., `usave[time+1, x] = ub[t1, x]` - for b, v in descriptors.items(): + for _, v in descriptors.items(): if v.is_readonly: continue if c is not v.lastwrite: @@ -278,7 +275,7 @@ def _optimize(self, clusters, descriptors): # "buffer-wise" splitting of the IterationSpaces (i.e., only # relevant if there are at least two read-only buffers) stamp = Stamp() - key0 = lambda: stamp + key0 = lambda: stamp # noqa: B023 else: continue @@ -288,7 +285,7 @@ def _optimize(self, clusters, descriptors): processed.append(c) continue - key1 = lambda d: not d._defines & v.dim._defines + key1 = lambda d: not d._defines & v.dim._defines # noqa: B023 dims = c.ispace.project(key1).itdims ispace = c.ispace.lift(dims, key0()) processed.append(c.rebuild(ispace=ispace)) @@ -384,10 +381,11 @@ def generate_buffers(clusters, key, sregistry, options, **kwargs): if async_degree is not None: if async_degree < size: - warning("Ignoring provided asynchronous degree as it'd be " - "too small for the required buffer (provided %d, " - "but need at least %d for `%s`)" - % (async_degree, size, f.name)) + warning( + 'Ignoring provided asynchronous degree as it would be ' + f'too small for the required buffer (provided {async_degree}, ' + f'but need at least {size} for `{f.name}`)' + ) else: size = async_degree @@ -405,7 +403,7 @@ def generate_buffers(clusters, key, sregistry, options, **kwargs): # Finally create the actual buffer cls = callback or Array - name = sregistry.make_name(prefix='%sb' % f.name) + name = sregistry.make_name(prefix=f'{f.name}b') # We specify the padding to match the input Function's one, so that # the array can be used in place of the Function with valid strides # Plain Array do not track mapped so we default to no padding @@ -445,7 +443,7 @@ def __init__(self, f, b, clusters): self.indices = extract_indices(f, self.dim, clusters) def __repr__(self): - return "Descriptor[%s -> %s]" % (self.f, self.b) + return f"Descriptor[{self.f} -> {self.b}]" @property def size(self): @@ -564,7 +562,7 @@ def write_to(self): # Analogous to the above, we need to include the halo region as well ihalo = IntervalGroup([ Interval(i.dim, -h.left, h.right, i.stamp) - for i, h in zip(ispace, self.b._size_halo) + for i, h in zip(ispace, self.b._size_halo, strict=True) ]) ispace = IterationSpace.union(ispace, IterationSpace(ihalo)) @@ -580,10 +578,7 @@ def step_to(self): # May be `db0` (e.g., for double buffering) or `time` dim = self.ispace[self.dim].dim - if self.is_forward_buffering: - direction = Forward - else: - direction = Backward + direction = Forward if self.is_forward_buffering else Backward return self.write_to.switch(self.xd, dim, direction) @@ -669,7 +664,7 @@ def make_mds(descriptors, prefix, sregistry): # follows SymPy's index ordering (time, time-1, time+1) after modulo # replacement, so that associativity errors are consistent. This very # same strategy is also applied in clusters/algorithms/Stepper - key = lambda i: -np.inf if i - p == 0 else (i - p) + key = lambda i: -np.inf if i - p == 0 else (i - p) # noqa: B023 indices = sorted(v.indices, key=key) for i in indices: @@ -804,8 +799,9 @@ def offset_from_centre(d, indices): if not ((p - v).is_Integer or (p - v).is_Symbol): raise ValueError except (IndexError, ValueError): - raise NotImplementedError("Cannot apply buffering with nonlinear " - "index functions (found `%s`)" % v) + raise NotImplementedError( + f'Cannot apply buffering with nonlinear index functions (found `{v}`)' + ) from None try: # Start assuming e.g. `indices = [time - 1, time + 2]` diff --git a/devito/passes/clusters/cse.py b/devito/passes/clusters/cse.py index 025acdfdee..d4d7f0a8b8 100644 --- a/devito/passes/clusters/cse.py +++ b/devito/passes/clusters/cse.py @@ -353,10 +353,7 @@ def catch(exprs, mode): candidates = [] for k, v in mapper.items(): - if mode in ('basic', 'smartsort'): - sources = [i for i in v if i == k.expr] - else: - sources = v + sources = [i for i in v if i == k.expr] if mode in ('basic', 'smartsort') else v if len(sources) > 1: candidates.append(Candidate(k.expr, k.conditionals, sources)) diff --git a/devito/passes/clusters/derivatives.py b/devito/passes/clusters/derivatives.py index cced692527..507493c091 100644 --- a/devito/passes/clusters/derivatives.py +++ b/devito/passes/clusters/derivatives.py @@ -48,10 +48,7 @@ def dump(exprs, c): for e in c.exprs: # Optimization 1: if the LHS is already a Symbol, then surely it's # usable as a temporary for one of the IndexDerivatives inside `e` - if e.lhs.is_Symbol and e.operation is None: - reusable = {e.lhs} - else: - reusable = set() + reusable = {e.lhs} if e.lhs.is_Symbol and e.operation is None else set() expr, v = _core(e, c, c.ispace, weights, reusable, mapper, **kwargs) @@ -110,7 +107,7 @@ def _(expr, c, ispace, weights, reusables, mapper, **kwargs): cbk0 = deriv_schedule_registry[options['deriv-schedule']] cbk1 = deriv_unroll_registry[options['deriv-unroll']] except KeyError: - raise ValueError("Unknown derivative lowering mode") + raise ValueError("Unknown derivative lowering mode") from None # Lower the IndexDerivative init, ideriv = cbk0(expr) diff --git a/devito/passes/clusters/factorization.py b/devito/passes/clusters/factorization.py index 812dc9c180..3d157048c3 100644 --- a/devito/passes/clusters/factorization.py +++ b/devito/passes/clusters/factorization.py @@ -56,7 +56,9 @@ def collect_special(expr, strategy): Factorize elemental functions, pows, and other special symbolic objects, prioritizing the most expensive entities. """ - args, candidates = zip(*[_collect_nested(a, strategy) for a in expr.args]) + args, candidates = zip( + *[_collect_nested(a, strategy) for a in expr.args], strict=True + ) candidates = ReducerMap.fromdicts(*candidates) funcs = candidates.getall('funcs', []) @@ -168,11 +170,9 @@ def collect_const(expr): # Back to the running example # -> (a + c) add = Add(*v) - if add == 0: - mul = S.Zero - else: - # -> 3.*(a + c) - mul = Mul(k, add, evaluate=False) + + # -> 3.*(a + c) + mul = S.Zero if add == 0 else Mul(k, add, evaluate=False) terms.append(mul) @@ -200,7 +200,9 @@ def _collect_nested(expr, strategy): return expr, {'coeffs': expr} elif q_routine(expr): # E.g., a DefFunction - args, candidates = zip(*[_collect_nested(a, strategy) for a in expr.args]) + args, candidates = zip( + *[_collect_nested(a, strategy) for a in expr.args], strict=True + ) return expr.func(*args, evaluate=False), {} elif expr.is_Function: return expr, {'funcs': expr} @@ -212,7 +214,9 @@ def _collect_nested(expr, strategy): elif expr.is_Add: return strategies[strategy](expr, strategy), {} elif expr.is_Mul: - args, candidates = zip(*[_collect_nested(a, strategy) for a in expr.args]) + args, candidates = zip( + *[_collect_nested(a, strategy) for a in expr.args], strict=True + ) expr = reuse_if_untouched(expr, args, evaluate=True) return expr, ReducerMap.fromdicts(*candidates) elif expr.is_Equality: @@ -220,7 +224,9 @@ def _collect_nested(expr, strategy): expr = reuse_if_untouched(expr, (expr.lhs, rhs)) return expr, {} else: - args, candidates = zip(*[_collect_nested(a, strategy) for a in expr.args]) + args, candidates = zip( + *[_collect_nested(a, strategy) for a in expr.args], strict=True + ) return expr.func(*args), ReducerMap.fromdicts(*candidates) diff --git a/devito/passes/clusters/implicit.py b/devito/passes/clusters/implicit.py index af19e1d3c5..36058f6393 100644 --- a/devito/passes/clusters/implicit.py +++ b/devito/passes/clusters/implicit.py @@ -225,8 +225,10 @@ def _lower_msd(dim, cluster): @_lower_msd.register(MultiSubDimension) def _(dim, cluster): i_dim = dim.implicit_dimension - mapper = {tkn: dim.functions[i_dim, mM] - for tkn, mM in zip(dim.tkns, dim.bounds_indices)} + mapper = { + tkn: dim.functions[i_dim, mM] + for tkn, mM in zip(dim.tkns, dim.bounds_indices, strict=True) + } return mapper, i_dim @@ -258,10 +260,7 @@ def reduce(m0, m1, edims, prefix): raise NotImplementedError d, = edims - if prefix[d].direction is Forward: - func = max - else: - func = min + func = max if prefix[d].direction is Forward else min def key(i): try: diff --git a/devito/passes/clusters/misc.py b/devito/passes/clusters/misc.py index d2b4a2c9f2..0324f03eae 100644 --- a/devito/passes/clusters/misc.py +++ b/devito/passes/clusters/misc.py @@ -100,10 +100,7 @@ def callback(self, clusters, prefix): # Lifted scalar clusters cannot be guarded # as they would not be in the scope of the guarded clusters # unless the guard is for an outer dimension - if c.is_scalar and not (prefix[:-1] and c.guards): - guards = {} - else: - guards = c.guards + guards = {} if c.is_scalar and not (prefix[:-1] and c.guards) else c.guards lifted.append(c.rebuild(ispace=ispace, properties=properties, guards=guards)) @@ -144,7 +141,7 @@ def callback(self, cgroups, prefix): # Fusion processed = [] - for k, group in groupby(clusters, key=self._key): + for _, group in groupby(clusters, key=self._key): g = list(group) for maybe_fusible in self._apply_heuristics(g): @@ -348,7 +345,7 @@ def is_cross(source, sink): # True if a cross-ClusterGroup dependence, False otherwise t0 = source.timestamp t1 = sink.timestamp - v = len(cg0.exprs) + v = len(cg0.exprs) # noqa: B023 return t0 < v <= t1 or t1 < v <= t0 for n1, cg1 in enumerate(cgroups[n+1:], start=n+1): @@ -369,10 +366,12 @@ def is_cross(source, sink): # Any anti- and iaw-dependences impose that `cg1` follows `cg0` # and forbid any sort of fusion. Fences have the same effect - elif (any(scope.d_anti_gen()) or - any(i.is_iaw for i in scope.d_output_gen()) or - any(c.is_fence for c in flatten(cgroups[n:n1+1]))) or any(not (i.cause and i.cause & prefix) - for i in scope.d_flow_gen()) or any(scope.d_output_gen()): + elif ( + any(scope.d_anti_gen()) or + any(i.is_iaw for i in scope.d_output_gen()) or + any(c.is_fence for c in flatten(cgroups[n:n1+1])) + ) or any(not (i.cause and i.cause & prefix) for i in scope.d_flow_gen()) \ + or any(scope.d_output_gen()): dag.add_edge(cg0, cg1) return dag @@ -397,7 +396,7 @@ def fuse(clusters, toposort=False, options=None): nxt = clusters while True: nxt = fuse(clusters, toposort='nofuse', options=options) - if all(c0 is c1 for c0, c1 in zip(clusters, nxt)): + if all(c0 is c1 for c0, c1 in zip(clusters, nxt, strict=True)): break clusters = nxt clusters = fuse(clusters, toposort=False, options=options) diff --git a/devito/passes/equations/linearity.py b/devito/passes/equations/linearity.py index 9c5bfc1d28..d2914769be 100644 --- a/devito/passes/equations/linearity.py +++ b/devito/passes/equations/linearity.py @@ -1,4 +1,5 @@ from collections import Counter +from contextlib import suppress from functools import singledispatch from itertools import product @@ -58,10 +59,8 @@ def inspect(expr): m = inspect(a) mapper.update(m) - try: + with suppress(KeyError): counter.update(m[a]) - except KeyError: - pass mapper[expr] = counter diff --git a/devito/passes/iet/asynchrony.py b/devito/passes/iet/asynchrony.py index 9d0c3387c8..aa205e818c 100644 --- a/devito/passes/iet/asynchrony.py +++ b/devito/passes/iet/asynchrony.py @@ -160,7 +160,7 @@ def _(iet, key=None, tracker=None, sregistry=None, **kwargs): wrap = While(CondNe(FieldFromPointer(sdata.symbolic_flag, sbase), 0), wrap) # pthread functions expect exactly one argument of type void* - tparameter = Pointer(name='_%s' % sdata.name) + tparameter = Pointer(name=f'_{sdata.name}') # Unpack `sdata` unpacks = [PointerCast(sdata, tparameter), BlankLine] @@ -184,7 +184,7 @@ def _(iet, key=None, tracker=None, sregistry=None, **kwargs): callback = lambda body: Iteration(list(body) + footer, d, threads.size - 1) # Create an efunc to initialize `sdata` and tear up the pthreads - name = 'init_%s' % sdata.name + name = f'init_{sdata.name}' body = [] for i in sdata.cfields: if i.is_AbstractFunction: @@ -230,7 +230,7 @@ def inject_async_tear_updown(iet, tracker=None, **kwargs): tearup = [] teardown = [] - for sdata, threads, init, shutdown in tracker.values(): + for _, threads, init, shutdown in tracker.values(): # Tear-up arguments = list(init.parameters) for n, a in enumerate(list(arguments)): diff --git a/devito/passes/iet/definitions.py b/devito/passes/iet/definitions.py index 0a5416d6a2..29cc8c9787 100644 --- a/devito/passes/iet/definitions.py +++ b/devito/passes/iet/definitions.py @@ -98,10 +98,7 @@ def _alloc_object_on_low_lat_mem(self, site, obj, storage): """ decl = Definition(obj) - if obj._C_init: - definition = (decl, obj._C_init) - else: - definition = (decl) + definition = (decl, obj._C_init) if obj._C_init else (decl) frees = obj._C_free @@ -130,7 +127,7 @@ def _alloc_array_on_global_mem(self, site, obj, storage): return # Create input array - name = '%s_init' % obj.name + name = f'{obj.name}_init' initvalue = np.array([unevaluate(pow_to_mul(i)) for i in obj.initvalue]) src = Array(name=name, dtype=obj.dtype, dimensions=obj.dimensions, space='host', scope='stack', initvalue=initvalue) @@ -693,7 +690,9 @@ def process(self, graph): def make_zero_init(obj, rcompile, sregistry): cdims = [] - for d, (h0, h1), s in zip(obj.dimensions, obj._size_halo, obj.symbolic_shape): + for d, (h0, h1), s in zip( + obj.dimensions, obj._size_halo, obj.symbolic_shape, strict=True + ): if d.is_NonlinearDerived: assert h0 == h1 == 0 m = 0 diff --git a/devito/passes/iet/engine.py b/devito/passes/iet/engine.py index d119fbb664..5a383332e3 100644 --- a/devito/passes/iet/engine.py +++ b/devito/passes/iet/engine.py @@ -1,4 +1,5 @@ from collections import defaultdict +from contextlib import suppress from functools import partial, singledispatch, wraps import numpy as np @@ -118,11 +119,9 @@ def sync_mapper(self): continue for j in dag.all_predecessors(i.name): - try: + with suppress(KeyError): + # In the case where `j` is a foreign Callable v.extend(FindNodes(Iteration).visit(self.efuncs[j])) - except KeyError: - # `j` is a foreign Callable - pass return found @@ -217,10 +216,7 @@ def iet_pass(func): @wraps(func) def wrapper(*args, **kwargs): - if timed_pass.is_enabled(): - maybe_timed = timed_pass - else: - maybe_timed = lambda func, name: func + maybe_timed = timed_pass if timed_pass.is_enabled() else lambda func, name: func try: # If the pass has been disabled, skip it if not kwargs['options'][func.__name__]: @@ -316,7 +312,7 @@ def reuse_compounds(efuncs, sregistry=None): mapper.update({i0: i1, b0: b1}) - for f0, f1 in zip(i0.fields, i1.fields): + for f0, f1 in zip(i0.fields, i1.fields, strict=True): for cls in (FieldFromComposite, FieldFromPointer): if f0.is_AbstractFunction: mapper[cls(f0._C_symbol, b0)] = cls(f1._C_symbol, b1) @@ -395,7 +391,7 @@ def abstract_component_accesses(efuncs): f_flatten = f.func(name='flat_data', components=f.c0) subs = {} - for ca, o in zip(compaccs, compoff_params): + for ca, o in zip(compaccs, compoff_params, strict=True): indices = [Mul(arity_param, i, evaluate=False) for i in ca.indices] indices[-1] += o subs[ca] = f_flatten.indexed[indices] @@ -626,7 +622,7 @@ def _(i, mapper, sregistry): name0 = pp.name base = sregistry.make_name(prefix=name0) - name1 = sregistry.make_name(prefix='%s_blk' % base) + name1 = sregistry.make_name(prefix=f'{base}_blk') bd = i.parent._rebuild(name1, pp) d = i._rebuild(name0, bd, i._min.subs(p, bd), i._max.subs(p, bd)) diff --git a/devito/passes/iet/langbase.py b/devito/passes/iet/langbase.py index da28115d91..d4d00f6e8a 100644 --- a/devito/passes/iet/langbase.py +++ b/devito/passes/iet/langbase.py @@ -1,4 +1,3 @@ -from abc import ABC from functools import singledispatch from itertools import takewhile @@ -30,7 +29,7 @@ class LangMeta(type): def __getitem__(self, k): if k not in self.mapper: - raise NotImplementedError("Missing required mapping for `%s`" % k) + raise NotImplementedError(f"Missing required mapping for `{k}`") return self.mapper[k] def get(self, k, v=None): @@ -149,7 +148,7 @@ def _map_delete(cls, f, imask=None, devicerm=None): raise NotImplementedError -class LangTransformer(ABC): +class LangTransformer: """ Abstract base class defining a series of methods capable of specializing @@ -473,13 +472,13 @@ def _(iet): if objcomm is not None: body = _make_setdevice_mpi(iet, objcomm, nodes=lang_init) - header = c.Comment('Beginning of %s+MPI setup' % self.langbb['name']) - footer = c.Comment('End of %s+MPI setup' % self.langbb['name']) + header = c.Comment(f'Beginning of {self.langbb["name"]}+MPI setup') + footer = c.Comment(f'End of {self.langbb["name"]}+MPI setup') else: body = _make_setdevice_seq(iet, nodes=lang_init) - header = c.Comment('Beginning of %s setup' % self.langbb['name']) - footer = c.Comment('End of %s setup' % self.langbb['name']) + header = c.Comment(f'Beginning of {self.langbb["name"]} setup') + footer = c.Comment(f'End of {self.langbb["name"]} setup') init = List(header=header, body=body, footer=footer) iet = iet._rebuild(body=iet.body._rebuild(init=init)) @@ -543,7 +542,7 @@ def make_sections_from_imask(f, imask=None): datashape = infer_transfer_datashape(f, imask) sections = [] - for i, j in zip(imask, datashape): + for i, j in zip(imask, datashape, strict=False): if i is FULL: start, size = 0, j else: diff --git a/devito/passes/iet/languages/CXX.py b/devito/passes/iet/languages/CXX.py index 5453ea58d6..c05ac37822 100644 --- a/devito/passes/iet/languages/CXX.py +++ b/devito/passes/iet/languages/CXX.py @@ -12,13 +12,11 @@ __all__ = ['CXXBB', 'CXXDataManager', 'CXXOrchestrator'] -def std_arith(prefix=None): +def std_arith(prefix=''): if prefix: # Method definition prefix, e.g. "__host__" # Make sure there is a space between the prefix and the method name - prefix = prefix if prefix.endswith(" ") else f"{prefix} " - else: - prefix = "" + prefix = prefix if prefix.endswith(' ') else f'{prefix} ' return f""" #include diff --git a/devito/passes/iet/languages/openacc.py b/devito/passes/iet/languages/openacc.py index cb924845de..0d860a6dc9 100644 --- a/devito/passes/iet/languages/openacc.py +++ b/devito/passes/iet/languages/openacc.py @@ -33,9 +33,9 @@ def _make_clauses(cls, ncollapsed=0, reduction=None, tile=None, **kwargs): if tile: stile = [str(tile[i]) for i in range(ncollapsed)] - clauses.append('tile(%s)' % ','.join(stile)) + clauses.append('tile({})'.format(','.join(stile))) elif ncollapsed > 1: - clauses.append('collapse(%d)' % ncollapsed) + clauses.append(f'collapse({ncollapsed})') if reduction: clauses.append(cls._make_clause_reduction_from_imask(reduction)) @@ -49,10 +49,10 @@ def _make_clauses(cls, ncollapsed=0, reduction=None, tile=None, **kwargs): # The NVC 20.7 and 20.9 compilers have a bug which triggers data movement for # indirectly indexed arrays (e.g., a[b[i]]) unless a present clause is used if presents: - clauses.append("present(%s)" % ",".join(presents)) + clauses.append("present({})".format(",".join(presents))) if deviceptrs: - clauses.append("deviceptr(%s)" % ",".join(deviceptrs)) + clauses.append("deviceptr({})".format(",".join(deviceptrs))) return clauses diff --git a/devito/passes/iet/languages/openmp.py b/devito/passes/iet/languages/openmp.py index e7027ac272..32ace9d473 100644 --- a/devito/passes/iet/languages/openmp.py +++ b/devito/passes/iet/languages/openmp.py @@ -45,8 +45,8 @@ class OmpRegion(ParallelBlock): @classmethod def _make_header(cls, nthreads, private=None): - private = ('private(%s)' % ','.join(private)) if private else '' - return c.Pragma('omp parallel num_threads(%s) %s' % (nthreads.name, private)) + private = ('private({})'.format(','.join(private))) if private else '' + return c.Pragma(f'omp parallel num_threads({nthreads.name}) {private}') class OmpIteration(PragmaIteration): @@ -64,14 +64,16 @@ def _make_clauses(cls, ncollapsed=0, chunk_size=None, nthreads=None, clauses = [] if ncollapsed > 1: - clauses.append('collapse(%d)' % ncollapsed) + clauses.append(f'collapse({ncollapsed})') if chunk_size is not False: - clauses.append('schedule(%s,%s)' % (schedule or 'dynamic', - chunk_size or 1)) + clauses.append('schedule({},{})'.format( + schedule or 'dynamic', + chunk_size or 1 + )) if nthreads: - clauses.append('num_threads(%s)' % nthreads) + clauses.append(f'num_threads({nthreads})') if reduction: clauses.append(cls._make_clause_reduction_from_imask(reduction)) @@ -93,7 +95,7 @@ def _make_clauses(cls, **kwargs): indexeds = FindSymbols('indexeds').visit(kwargs['nodes']) deviceptrs = filter_ordered(i.name for i in indexeds if i.function._mem_local) if deviceptrs: - clauses.append("is_device_ptr(%s)" % ",".join(deviceptrs)) + clauses.append("is_device_ptr({})".format(",".join(deviceptrs))) return clauses @@ -261,21 +263,18 @@ def _support_array_reduction(cls, compiler): if isinstance(compiler, GNUCompiler) and \ compiler.version < Version("6.0"): return False - elif isinstance(compiler, NvidiaCompiler): - # NVC++ does not support array reduction and leads to segfault - return False else: - return True + # NVC++ does not support array reduction and leads to segfault + return not isinstance(compiler, NvidiaCompiler) @classmethod def _support_complex_reduction(cls, compiler): # In case we have a CustomCompiler if isinstance(compiler, CustomCompiler): compiler = compiler._base() - if isinstance(compiler, GNUCompiler): + else: # Gcc doesn't supports complex reduction - return False - return True + return not isinstance(compiler, GNUCompiler) class Ompizer(AbstractOmpizer): diff --git a/devito/passes/iet/linearization.py b/devito/passes/iet/linearization.py index 0d371ccb8e..aca2485444 100644 --- a/devito/passes/iet/linearization.py +++ b/devito/passes/iet/linearization.py @@ -34,10 +34,7 @@ def linearize(graph, **kwargs): else: key = lambda f: f.is_AbstractFunction and f.ndim > 1 and not f._mem_stack - if options['index-mode'] == 'int32': - dtype = np.int32 - else: - dtype = np.int64 + dtype = np.int32 if options['index-mode'] == 'int32' else np.int64 # NOTE: Even if `mode=False`, `key` may still want to enforce linearization # of some Functions, so it takes precedence and we then attempt to linearize @@ -157,7 +154,7 @@ def add(self, f): k = key1(f, d) if not k or k in self.sizes: continue - name = self.sregistry.make_name(prefix='%s_fsz' % d.name) + name = self.sregistry.make_name(prefix=f'{d.name}_fsz') self.sizes[k] = Size(name=name, dtype=dtype, is_const=True) # Update unique strides table @@ -168,7 +165,7 @@ def add(self, f): continue if k in self.strides: continue - name = self.sregistry.make_name(prefix='%s_stride' % d.name) + name = self.sregistry.make_name(prefix=f'{d.name}_stride') self.strides[k] = Stride(name=name, dtype=dtype, is_const=True) def update(self, functions): @@ -192,7 +189,7 @@ def map_strides(self, f): sizes = self.get_sizes(f) return {d: self.strides[sizes[n:]] for n, d in enumerate(dims)} elif f in self.strides_dynamic: - return {d: i for d, i in zip(dims, self.strides_dynamic[f])} + return {d: i for d, i in zip(dims, self.strides_dynamic[f], strict=True)} else: return {} @@ -270,9 +267,8 @@ def linearize_accesses(iet, key0, tracker=None): # 4) What `strides` can indeed be constructed? mapper = {} for sizes, stride in tracker.strides.items(): - if stride in candidates: - if set(sizes).issubset(instances): - mapper[stride] = sizes + if stride in candidates and set(sizes).issubset(instances): + mapper[stride] = sizes # 5) Construct what needs to *and* can be constructed stmts, stmts1 = [], [] @@ -316,7 +312,7 @@ def _(f, d): @singledispatch def _generate_linearization_basic(f, i, tracker): - assert False + raise AssertionError('This is not allowed') @_generate_linearization_basic.register(DiscreteFunction) @@ -397,7 +393,7 @@ def linearize_transfers(iet, sregistry=None, **kwargs): start, size = imask[0], 1 if start != 0: # Spare the ugly generated code if unnecessary (occurs often) - name = sregistry.make_name(prefix='%s_ofs' % n.function.name) + name = sregistry.make_name(prefix=f'{n.function.name}_ofs') wildcard = Wildcard(name=name, dtype=np.int32, is_const=True) symsect = n._rebuild(imask=imask).sections diff --git a/devito/passes/iet/misc.py b/devito/passes/iet/misc.py index dbfcbd8394..1dd2a7ad52 100644 --- a/devito/passes/iet/misc.py +++ b/devito/passes/iet/misc.py @@ -119,7 +119,7 @@ def relax_incr_dimensions(iet, options=None, **kwargs): roots_max = {i.dim.root: i.symbolic_max for i in outer} # Process inner iterations and adjust their bounds - for n, i in enumerate(inner): + for _, i in enumerate(inner): # If definitely in-bounds, as ensured by a prior compiler pass, then # we can skip this step if i.is_Inbound: @@ -193,7 +193,7 @@ def _generate_macros_findexeds(iet, sregistry=None, tracker=None, **kwargs): except KeyError: pass - pname = sregistry.make_name(prefix='%sL' % i.name) + pname = sregistry.make_name(prefix=f'{i.name}L') header, v = i.bind(pname) subs[i] = v @@ -284,7 +284,7 @@ def remove_redundant_moddims(iet): subs = {d: sympy.S.Zero for d in degenerates} redundants = as_mapper(others, key=lambda d: d.offset % d.modulo) - for k, v in redundants.items(): + for _, v in redundants.items(): chosen = v.pop(0) subs.update({d: chosen for d in v}) diff --git a/devito/passes/iet/mpi.py b/devito/passes/iet/mpi.py index 9dcad26008..3a3d354905 100644 --- a/devito/passes/iet/mpi.py +++ b/devito/passes/iet/mpi.py @@ -243,10 +243,7 @@ def _drop_if_unwritten(iet, options=None, **kwargs): which would call the generated library directly. """ drop_unwritten = options['dist-drop-unwritten'] - if not callable(drop_unwritten): - key = lambda f: drop_unwritten - else: - key = drop_unwritten + key = (lambda f: drop_unwritten) if not callable(drop_unwritten) else drop_unwritten # Analysis writes = {i.write for i in FindNodes(Expression).visit(iet)} @@ -525,7 +522,7 @@ def _semantical_eq_loc_indices(hsf0, hsf1): if hsf0.loc_indices != hsf1.loc_indices: return False - for v0, v1 in zip(hsf0.loc_values, hsf1.loc_values): + for v0, v1 in zip(hsf0.loc_values, hsf1.loc_values, strict=False): if v0 is v1: continue diff --git a/devito/passes/iet/orchestration.py b/devito/passes/iet/orchestration.py index b2bc5b0caf..3bea70fb0f 100644 --- a/devito/passes/iet/orchestration.py +++ b/devito/passes/iet/orchestration.py @@ -1,4 +1,5 @@ from collections import OrderedDict +from contextlib import suppress from functools import singledispatch from sympy import Or @@ -95,11 +96,11 @@ def _make_syncarray(self, iet, sync_ops, layer): qid = None body = list(iet.body) - try: - body.extend([self.langbb._map_update_device(s.target, s.imask, qid=qid) - for s in sync_ops]) - except NotImplementedError: - pass + with suppress(NotImplementedError): + body.extend([ + self.langbb._map_update_device(s.target, s.imask, qid=qid) + for s in sync_ops + ]) iet = List(body=body) return iet, [] @@ -212,11 +213,11 @@ def _(layer, iet, sync_ops, lang, sregistry): body.extend([DummyExpr(s.handle, 1) for s in sync_ops]) body.append(BlankLine) - name = 'copy_to_%s' % layer.suffix + name = f'copy_to_{layer.suffix}' except NotImplementedError: # A non-device backend body = [] - name = 'copy_from_%s' % layer.suffix + name = f'copy_from_{layer.suffix}' body.extend(list(iet.body)) @@ -243,10 +244,10 @@ def _(layer, iet, sync_ops, lang, sregistry): body.append(lang._map_wait(qid)) body.append(BlankLine) - name = 'prefetch_from_%s' % layer.suffix + name = f'prefetch_from_{layer.suffix}' except NotImplementedError: body = [] - name = 'prefetch_to_%s' % layer.suffix + name = f'prefetch_to_{layer.suffix}' body.extend([DummyExpr(s.handle, 2) for s in sync_ops]) diff --git a/devito/passes/iet/parpragma.py b/devito/passes/iet/parpragma.py index f03b8a3305..ec02a5e3cb 100644 --- a/devito/passes/iet/parpragma.py +++ b/devito/passes/iet/parpragma.py @@ -1,4 +1,5 @@ from collections import defaultdict +from contextlib import suppress from functools import cached_property import cgen as c @@ -190,21 +191,21 @@ def _make_clause_reduction_from_imask(cls, reductions): if i.is_Indexed: f = i.function bounds = [] - for k, d in zip(imask, f.dimensions): + for k, d in zip(imask, f.dimensions, strict=False): if is_integer(k): - bounds.append('[%s]' % k) + bounds.append(f'[{k}]') elif k is FULL: # Lower FULL Dimensions into a range spanning the entire # Dimension space, e.g. `reduction(+:f[0:f_vec->size[1]])` - bounds.append('[0:%s]' % f._C_get_field(FULL, d).size) + bounds.append(f'[0:{f._C_get_field(FULL, d).size}]') else: assert isinstance(k, tuple) and len(k) == 2 - bounds.append('[%s:%s]' % k) - mapper[r.name].append('%s%s' % (i.name, ''.join(bounds))) + bounds.append('[{}:{}]'.format(*k)) + mapper[r.name].append('{}{}'.format(i.name, ''.join(bounds))) else: mapper[r.name].append(str(i)) - args = ['reduction(%s:%s)' % (k, ','.join(v)) for k, v in mapper.items()] + args = ['reduction({}:{})'.format(k, ','.join(v)) for k, v in mapper.items()] return ' '.join(args) @@ -275,10 +276,7 @@ def _make_partree(self, candidates, nthreads=None): if all(i.is_Affine for i in candidates): bundles = FindNodes(ExpressionBundle).visit(root) sops = sum(i.ops for i in bundles) - if sops >= self.dynamic_work: - schedule = 'dynamic' - else: - schedule = 'static' + schedule = 'dynamic' if sops >= self.dynamic_work else 'static' if nthreads is None: # pragma ... for ... schedule(..., 1) nthreads = self.nthreads @@ -473,16 +471,14 @@ def functions(self): def expr_symbols(self): retval = [self.function.indexed] for i in self.arguments + tuple(flatten(self.sections)): - try: + with suppress(AttributeError): retval.extend(i.free_symbols) - except AttributeError: - pass return tuple(retval) @cached_property def _generate(self): # Stringify sections - sections = ''.join(['[%s:%s]' % (ccode(i), ccode(j)) + sections = ''.join([f'[{ccode(i)}:{ccode(j)}]' for i, j in self.sections]) arguments = [ccode(i) for i in self.arguments] return self.pragma % (self.function.name, sections, *arguments) @@ -503,7 +499,7 @@ def __init__(self, sregistry, options, platform, compiler): self.par_tile = options['par-tile'].reset() self.par_disabled = options['par-disabled'] - def _score_candidate(self, n0, root, collapsable=()): + def _score_candidate(self, n0, root, collapsible=()): # `ndptrs`, the number of device pointers, part of the score too to # ensure the outermost loop is offloaded ndptrs = len(self._device_pointers(root)) diff --git a/devito/symbolics/extended_dtypes.py b/devito/symbolics/extended_dtypes.py index 1bb5fbb91b..29bab821ca 100644 --- a/devito/symbolics/extended_dtypes.py +++ b/devito/symbolics/extended_dtypes.py @@ -97,6 +97,6 @@ class VOID(BaseCast): name = base_name.upper() globals()[name] = type(name, (BaseCast,), {'_dtype': dtype}) for i in ['2', '3', '4']: - v = '%s%s' % (base_name, i) + v = f'{base_name}{i}' globals()[v.upper()] = cast(v) globals()[f'{v.upper()}P'] = cast(v, '*') diff --git a/devito/symbolics/extended_sympy.py b/devito/symbolics/extended_sympy.py index d47a063bac..2c352d50d4 100644 --- a/devito/symbolics/extended_sympy.py +++ b/devito/symbolics/extended_sympy.py @@ -2,6 +2,7 @@ Extended SymPy hierarchy. """ import re +from contextlib import suppress import numpy as np import sympy @@ -214,8 +215,8 @@ def __new__(cls, call, pointer, params=None, **kwargs): else: try: _params.append(Number(p)) - except TypeError: - raise ValueError("`params` must be Expr, numbers or str") + except TypeError as e: + raise ValueError("`params` must be Expr, numbers or str") from e params = Tuple(*_params) obj = sympy.Expr.__new__(cls, call, pointer, params) @@ -330,8 +331,8 @@ def __new__(cls, params, dtype=None): for p in as_tuple(params): try: args.append(sympify(p)) - except sympy.SympifyError: - raise ValueError(f"Illegal param `{p}`") + except sympy.SympifyError as e: + raise ValueError(f"Illegal param `{p}`") from e obj = sympy.Expr.__new__(cls, *args) obj.params = tuple(args) @@ -367,11 +368,8 @@ def __new__(cls, base, **kwargs): # If an AbstractFunction, pull the underlying Symbol base = base.indexed.label except AttributeError: - if isinstance(base, str): - base = Symbol(base) - else: - # Fallback: go plain sympy - base = sympify(base) + # Fallback: go plain sympy + base = Symbol(base) if isinstance(base, str) else sympify(base) obj = sympy.Expr.__new__(cls, base) obj._base = base @@ -476,10 +474,8 @@ def reinterpret(self): @property def _C_ctype(self): ctype = ctypes_vector_mapper.get(self.dtype, self.dtype) - try: + with suppress(TypeError): ctype = dtype_to_ctype(ctype) - except TypeError: - pass return ctype @property @@ -507,7 +503,7 @@ def __new__(cls, base, index, **kwargs): base = base.indexed.label except AttributeError: if not isinstance(base, sympy.Basic): - raise ValueError("`base` must be of type sympy.Basic") + raise ValueError("`base` must be of type sympy.Basic") from None index = Tuple(*[sympify(i) for i in as_tuple(index)]) @@ -674,10 +670,7 @@ def template(self): return self._template def __str__(self): - if self.template: - template = f"<{','.join(str(i) for i in self.template)}>" - else: - template = '' + template = f"<{','.join(str(i) for i in self.template)}>" if self.template else '' arguments = ', '.join(str(i) for i in self.arguments) return f"{self.name}{template}({arguments})" diff --git a/devito/symbolics/inspection.py b/devito/symbolics/inspection.py index d9902e8364..3147118de1 100644 --- a/devito/symbolics/inspection.py +++ b/devito/symbolics/inspection.py @@ -1,3 +1,4 @@ +from contextlib import suppress from functools import singledispatch import numpy as np @@ -49,19 +50,18 @@ def compare_ops(e1, e2): """ if type(e1) is type(e2) and len(e1.args) == len(e2.args): if e1.is_Atom: - return True if e1 == e2 else False + return e1 == e2 elif isinstance(e1, IndexDerivative) and isinstance(e2, IndexDerivative): if e1.mapper == e2.mapper: return compare_ops(e1.expr, e2.expr) else: return False elif e1.is_Indexed and e2.is_Indexed: - return True if e1.base == e2.base else False + return e1.base == e2.base else: - for a1, a2 in zip(e1.args, e2.args): - if not compare_ops(a1, a2): - return False - return True + return all( + compare_ops(a1, a2) for a1, a2 in zip(e1.args, e2.args, strict=True) + ) else: return False @@ -110,7 +110,7 @@ def estimate_cost(exprs, estimate=False): return flops except: - warning("Cannot estimate cost of `%s`" % str(exprs)) + warning(f"Cannot estimate cost of `{str(exprs)}`") return 0 @@ -147,7 +147,9 @@ def _estimate_cost(expr, estimate, seen): # The flag tells whether it's an integer expression (implying flops==0) or not if not expr.args: return 0, False - flops, flags = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args]) + flops, flags = zip( + *[_estimate_cost(a, estimate, seen) for a in expr.args], strict=True + ) flops = sum(flops) if all(flags): # `expr` is an operation involving integer operands only @@ -162,7 +164,9 @@ def _estimate_cost(expr, estimate, seen): @_estimate_cost.register(CallFromPointer) def _(expr, estimate, seen): try: - flops, flags = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args]) + flops, flags = zip( + *[_estimate_cost(a, estimate, seen) for a in expr.args], strict=True + ) except ValueError: flops, flags = [], [] return sum(flops), all(flags) @@ -215,7 +219,9 @@ def _(expr, estimate, seen): @_estimate_cost.register(Application) def _(expr, estimate, seen): if q_routine(expr): - flops, _ = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args]) + flops, _ = zip( + *[_estimate_cost(a, estimate, seen) for a in expr.args], strict=True + ) flops = sum(flops) if isinstance(expr, DefFunction): # Bypass user-defined or language-specific functions @@ -235,7 +241,7 @@ def _(expr, estimate, seen): @_estimate_cost.register(Pow) def _(expr, estimate, seen): - flops, _ = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args]) + flops, _ = zip(*[_estimate_cost(a, estimate, seen) for a in expr.args], strict=True) flops = sum(flops) if estimate: if expr.exp.is_Number: @@ -314,10 +320,8 @@ def sympy_dtype(expr, base=None, default=None, smin=None): dtypes = {base} - {None} for i in expr.free_symbols: - try: + with suppress(AttributeError): dtypes.add(i.dtype) - except AttributeError: - pass dtype = infer_dtype(dtypes) diff --git a/devito/symbolics/manipulation.py b/devito/symbolics/manipulation.py index 6574b9f096..d90c366bb2 100644 --- a/devito/symbolics/manipulation.py +++ b/devito/symbolics/manipulation.py @@ -301,7 +301,7 @@ def xreplace_indices(exprs, mapper, key=None): handle = [i for i in handle if i.base.label in key] elif callable(key): handle = [i for i in handle if key(i)] - mapper = dict(zip(handle, [i.xreplace(mapper) for i in handle])) + mapper = dict(zip(handle, [i.xreplace(mapper) for i in handle], strict=True)) replaced = [uxreplace(i, mapper) for i in as_tuple(exprs)] return replaced if isinstance(exprs, Iterable) else replaced[0] @@ -312,10 +312,7 @@ def _eval_numbers(expr, args): """ numbers, others = split(args, lambda i: i.is_Number) if len(numbers) > 1: - if isinstance(expr, UnevaluableMixin): - cls = expr.func.__base__ - else: - cls = expr.func + cls = expr.func.__base__ if isinstance(expr, UnevaluableMixin) else expr.func args[:] = [cls(*numbers)] + others @@ -427,7 +424,7 @@ def reuse_if_untouched(expr, args, evaluate=False): Reconstruct `expr` iff any of the provided `args` is different than the corresponding arg in `expr.args`. """ - if all(a is b for a, b in zip(expr.args, args)): + if all(a is b for a, b in zip(expr.args, args, strict=False)): return expr else: return expr.func(*args, evaluate=evaluate) diff --git a/devito/symbolics/queries.py b/devito/symbolics/queries.py index 76cb737294..c8624c3578 100644 --- a/devito/symbolics/queries.py +++ b/devito/symbolics/queries.py @@ -95,10 +95,7 @@ def q_terminalop(expr, depth=0): return True elif expr.is_Add or expr.is_Mul: for a in expr.args: - if a.is_Pow: - elems = a.args - else: - elems = [a] + elems = a.args if a.is_Pow else [a] if any(not q_leaf(i) for i in elems): return False return True @@ -306,14 +303,13 @@ def case1(*args): if x0 is not x1: return False - if not isinstance(p1, Constant): - # TODO: Same considerations above about Constant apply - return False # At this point we are in the form `X {+,-} X / p0 + p1`, where # `X`, `p0`, and `p1` are definitely positive; since `X > X / p0`, # definitely the answer is True - return True + # OR we are a constant and return False + # TODO: Same considerations above about Constant apply + return isinstance(p1, Constant) if len(expr.args) == 2: return case0(*expr.args) or case1(S.Zero, *expr.args) diff --git a/devito/symbolics/search.py b/devito/symbolics/search.py index 500ffd25ed..cfb9d504be 100644 --- a/devito/symbolics/search.py +++ b/devito/symbolics/search.py @@ -104,10 +104,7 @@ def search(exprs: Expression | Iterable[Expression], assert mode in ('all', 'unique'), "Unknown mode" - if isinstance(query, type): - Q = lambda obj: isinstance(obj, query) - else: - Q = query + Q = (lambda obj: isinstance(obj, query)) if isinstance(query, type) else query # Search doesn't actually use a BFS (rather, a preorder DFS), but the terminology # is retained in this function's parameters for backwards compatibility @@ -164,7 +161,7 @@ def retrieve_function_carriers(exprs, mode='all'): # Filter off Indexeds not carrying a DiscreteFunction for i in list(retval): try: - i.function + _ = i.function except AttributeError: retval.remove(i) return retval diff --git a/devito/tools/abc.py b/devito/tools/abc.py index f325778239..1b92533f5c 100644 --- a/devito/tools/abc.py +++ b/devito/tools/abc.py @@ -1,10 +1,9 @@ -import abc from hashlib import sha1 __all__ = ['Pickable', 'Reconstructable', 'Signer', 'Singleton', 'Stamp', 'Tag'] -class Tag(abc.ABC): +class Tag: """ An abstract class to define categories of object decorators. @@ -39,10 +38,7 @@ def __hash__(self): return hash((self.name, self.val)) def __str__(self): - if self.val is None: - ret = self.name - else: - ret = "%s[%s]" % (self.name, str(self.val)) + ret = self.name if self.val is None else f"{self.name}[{str(self.val)}]" return ret __repr__ = __str__ @@ -277,6 +273,6 @@ class Stamp: """ def __repr__(self): - return "<%s>" % str(id(self))[-3:] + return f"<{str(id(self))[-3:]}>" __str__ = __repr__ diff --git a/devito/tools/algorithms.py b/devito/tools/algorithms.py index 13d349149e..0543dd05bd 100644 --- a/devito/tools/algorithms.py +++ b/devito/tools/algorithms.py @@ -75,6 +75,6 @@ def toposort(data): if item not in ordered]) if len(processed) != len(set(flatten(data) + flatten(data.values()))): - raise ValueError("A cyclic dependency exists amongst %r" % data) + raise ValueError(f"A cyclic dependency exists amongst {data!r}") return processed diff --git a/devito/tools/data_structures.py b/devito/tools/data_structures.py index 48a7a342a7..4d313b00ac 100644 --- a/devito/tools/data_structures.py +++ b/devito/tools/data_structures.py @@ -1,6 +1,7 @@ import json from collections import OrderedDict, deque from collections.abc import Callable, Iterable, Mapping, MutableSet, Set +from contextlib import suppress from functools import cached_property, reduce import numpy as np @@ -41,11 +42,12 @@ def __init__(self, **kwargs): self.__dict__.update(kwargs) def __repr__(self): - return "Bunch(%s)" % ", ".join(["%s=%s" % i for i in self.__dict__.items()]) + return "Bunch({})".format( + ", ".join(["{}={}".format(*i) for i in self.__dict__.items()]) + ) def __iter__(self): - for i in self.__dict__.values(): - yield i + yield from self.__dict__.values() class EnrichedTuple(tuple, Pickable): @@ -61,7 +63,7 @@ def __new__(cls, *items, getters=None, **kwargs): obj = super().__new__(cls, items) obj.__dict__.update(kwargs) # Convert to list if we're getting an OrderedDict from rebuild - obj.getters = OrderedDict(zip(list(getters or []), items)) + obj.getters = OrderedDict(zip(list(getters or []), items, strict=False)) return obj def _rebuild(self, *args, **kwargs): @@ -118,7 +120,7 @@ def fromdicts(cls, *dicts): ret = ReducerMap() for i in dicts: if not isinstance(i, Mapping): - raise ValueError("Expected Mapping, got `%s`" % type(i)) + raise ValueError(f"Expected Mapping, got `{type(i)}`") ret.update(i) return ret @@ -179,8 +181,9 @@ def compare_to_first(v): return c return candidates[0] else: - raise ValueError("Unable to find unique value for key %s, candidates: %s" - % (key, candidates)) + raise ValueError( + f'Unable to find unique value for key {key}, candidates: {candidates}' + ) def reduce(self, key, op=None): """ @@ -240,10 +243,7 @@ def __missing__(self, key): return value def __reduce__(self): - if self.default_factory is None: - args = tuple() - else: - args = self.default_factory, + args = tuple() if self.default_factory is None else (self.default_factory,) return type(self), args, None, None, self() def copy(self): @@ -303,10 +303,10 @@ def __gt__(self, other): return self >= other and self != other def __repr__(self): - return 'OrderedSet([%s])' % (', '.join(map(repr, self.keys()))) + return 'OrderedSet([{}])'.format(', '.join(map(repr, self.keys()))) def __str__(self): - return '{%s}' % (', '.join(map(repr, self.keys()))) + return '{{{}}}'.format(', '.join(map(repr, self.keys()))) difference = property(lambda self: self.__sub__) difference_update = property(lambda self: self.__isub__) @@ -448,15 +448,15 @@ def add_node(self, node_name, ignore_existing=False): if node_name in self.graph: if ignore_existing is True: return - raise KeyError('node %s already exists' % node_name) + raise KeyError(f'node {node_name} already exists') self.graph[node_name] = OrderedSet() def delete_node(self, node_name): """Delete a node and all edges referencing it.""" if node_name not in self.graph: - raise KeyError('node %s does not exist' % node_name) + raise KeyError(f'node {node_name} does not exist') self.graph.pop(node_name) - for node, edges in self.graph.items(): + for _, edges in self.graph.items(): if node_name in edges: edges.remove(node_name) @@ -476,10 +476,8 @@ def delete_edge(self, ind_node, dep_node): if dep_node not in self.graph.get(ind_node, []): raise KeyError('this edge does not exist in graph') self.graph[ind_node].remove(dep_node) - try: + with suppress(KeyError): del self.labels[ind_node][dep_node] - except KeyError: - pass def get_label(self, ind_node, dep_node, default=None): try: @@ -512,7 +510,7 @@ def _all_predecessors(n): def downstream(self, node): """Return a list of all nodes this node has edges towards.""" if node not in self.graph: - raise KeyError('node %s is not in graph' % node) + raise KeyError(f'node {node} is not in graph') return list(self.graph[node]) def all_downstreams(self, node): @@ -606,7 +604,7 @@ def connected_components(self, enumerated=False): def find_paths(self, node): if node not in self.graph: - raise KeyError('node %s is not in graph' % node) + raise KeyError(f'node {node} is not in graph') paths = [] @@ -660,7 +658,7 @@ def __len__(self): return len(self._dict) def __repr__(self): - return '<%s %r>' % (self.__class__.__name__, self._dict) + return f'<{self.__class__.__name__} {self._dict!r}>' def __hash__(self): if self._hash is None: @@ -782,7 +780,7 @@ def __len__(self): def __repr__(self): sitems = [s.__repr__() for s in self] - return "%s(%s)" % (self.__class__.__name__, ", ".join(sitems)) + return "{}({})".format(self.__class__.__name__, ", ".join(sitems)) def __getitem__(self, idx): if not self: diff --git a/devito/tools/dtypes_lowering.py b/devito/tools/dtypes_lowering.py index 7f0ba56911..bba63c6040 100644 --- a/devito/tools/dtypes_lowering.py +++ b/devito/tools/dtypes_lowering.py @@ -35,7 +35,7 @@ def build_dtypes_vector(field_names, counts, mapper=None): mapper = mapper or dtype_mapper for base_name, base_dtype in mapper.items(): for count in counts: - name = "%s%d" % (base_name, count) + name = f'{base_name}{count}' titles = field_names[:count] @@ -43,9 +43,9 @@ def build_dtypes_vector(field_names, counts, mapper=None): if count == 3: padded_count = 4 - names = ["s%d" % i for i in range(count)] + names = [f's{i}' for i in range(count)] while len(names) < padded_count: - names.append("padding%d" % (len(names) - count)) + names.append(f'padding{len(names) - count}') if len(titles) < len(names): titles.extend((len(names) - len(titles)) * [None]) @@ -82,7 +82,7 @@ def add_dtype(self, field_name, count): self.update(build_dtypes_vector([field_name], [count])) def get_base_dtype(self, v, default=None): - for (base_dtype, count), dtype in self.items(): + for (base_dtype, _), dtype in self.items(): if dtype is v: return base_dtype @@ -119,10 +119,12 @@ def __hash__(self): return hash((self.name, self.template, self.modifier)) def __repr__(self): - template = '<%s>' % ','.join([str(i) for i in self.template]) - return "%s%s%s" % (self.name, - template if self.template else '', - self.modifier) + template = '<{}>'.format(','.join([str(i) for i in self.template])) + return "{}{}{}".format( + self.name, + template if self.template else '', + self.modifier + ) __str__ = __repr__ @@ -241,7 +243,7 @@ class c_restrict_void_p(ctypes.c_void_p): for count in counts: dtype = dtypes_vector_mapper[(base_dtype, count)] - name = "%s%d" % (base_name, count) + name = f'{base_name}{count}' ctype = type(name, (ctypes.Structure,), {'_fields_': [(i, base_ctype) for i in field_names[:count]], '_base_dtype': True}) @@ -262,21 +264,21 @@ def ctypes_to_cstr(ctype, toarray=None): elif isinstance(ctype, CustomDtype): retval = str(ctype) elif issubclass(ctype, ctypes.Structure): - retval = 'struct %s' % ctype.__name__ + retval = f'struct {ctype.__name__}' elif issubclass(ctype, ctypes.Union): - retval = 'union %s' % ctype.__name__ + retval = f'union {ctype.__name__}' elif issubclass(ctype, ctypes._Pointer): if toarray: - retval = ctypes_to_cstr(ctype._type_, '(* %s)' % toarray) + retval = ctypes_to_cstr(ctype._type_, f'(* {toarray})') else: retval = ctypes_to_cstr(ctype._type_) if issubclass(ctype._type_, ctypes._Pointer): # Look-ahead to avoid extra ugly spaces - retval = '%s*' % retval + retval = f'{retval}*' else: - retval = '%s *' % retval + retval = f'{retval} *' elif issubclass(ctype, ctypes.Array): - retval = '%s[%d]' % (ctypes_to_cstr(ctype._type_, toarray), ctype._length_) + retval = f'{ctypes_to_cstr(ctype._type_, toarray)}[{ctype._length_}]' elif ctype.__name__.startswith('c_'): name = ctype.__name__[2:] # A primitive datatype @@ -304,9 +306,9 @@ def ctypes_to_cstr(ctype, toarray=None): retval = name if prefix: - retval = '%s %s' % (prefix, retval) + retval = f'{prefix} {retval}' if suffix: - retval = '%s %s' % (retval, suffix) + retval = f'{retval} {suffix}' else: # A custom datatype (e.g., a typedef-ed pointer to struct) retval = ctype.__name__ @@ -326,10 +328,7 @@ def is_external_ctype(ctype, includes): if issubclass(ctype, ctypes._SimpleCData): return False - if ctype in ctypes_vector_mapper.values(): - return True - - return False + return ctype in ctypes_vector_mapper.values() def is_numpy_dtype(dtype): diff --git a/devito/tools/os_helper.py b/devito/tools/os_helper.py index 8bc5a3eaf5..a7e45d442a 100644 --- a/devito/tools/os_helper.py +++ b/devito/tools/os_helper.py @@ -28,9 +28,9 @@ def make_tempdir(prefix=None): """Create a temporary directory having a deterministic name. The directory is created within the default OS temporary directory.""" if prefix is None: - name = 'devito-uid%s' % os.getuid() + name = f'devito-uid{os.getuid()}' else: - name = 'devito-%s-uid%s' % (str(prefix), os.getuid()) + name = f'devito-{str(prefix)}-uid{os.getuid()}' tmpdir = Path(gettempdir()).joinpath(name) tmpdir.mkdir(parents=True, exist_ok=True) return tmpdir diff --git a/devito/tools/timing.py b/devito/tools/timing.py index f3c562ac56..068197a786 100644 --- a/devito/tools/timing.py +++ b/devito/tools/timing.py @@ -1,4 +1,5 @@ from collections import OrderedDict, defaultdict +from contextlib import suppress from functools import partial from threading import get_ident from time import time @@ -36,7 +37,7 @@ def __new__(cls, *args, name=None): assert name is None func, name = args else: - assert False + raise AssertionError('Incorrect number of args') obj = object.__new__(cls) obj.__init__(func, name) return obj @@ -64,10 +65,7 @@ def __call__(self, *args, **kwargs): if not isinstance(timings, dict): raise ValueError("Attempting to use `timed_pass` outside a `timed_region`") - if self.name is not None: - frame = self.name - else: - frame = self.func.__name__ + frame = self.name if self.name is not None else self.func.__name__ stack = timed_pass.stack[tid] stack.append(frame) @@ -116,10 +114,8 @@ def __enter__(self): def __exit__(self, *args): self.timings[self.name] = time() - self.tic del timed_pass.timings[get_ident()] - try: + with suppress(KeyError): # Necessary clean up should one be constructing an Operator within # a try-except, with the Operator construction failing + # Typically we suppress del timed_pass.stack[get_ident()] - except KeyError: - # Typically we end up here - pass diff --git a/devito/tools/utils.py b/devito/tools/utils.py index 54c8a760cb..91b5bcdbf7 100644 --- a/devito/tools/utils.py +++ b/devito/tools/utils.py @@ -81,9 +81,9 @@ def as_tuple(item, type=None, length=None): t = (item,) * (length or 1) if length and not len(t) == length: - raise ValueError("Tuple needs to be of length %d" % length) + raise ValueError(f'Tuple needs to be of length {length}') if type and not all(isinstance(i, type) for i in t): - raise TypeError("Items need to be of type %s" % type) + raise TypeError(f'Items need to be of type {type}') return t @@ -213,7 +213,7 @@ def filter_ordered(elements, key=None): if key is None: return list(dict.fromkeys(elements)) else: - return list(dict(zip([key(i) for i in elements], elements)).values()) + return list(dict(zip([key(i) for i in elements], elements, strict=True)).values()) def filter_sorted(elements, key=None): @@ -245,7 +245,7 @@ def sweep(parameters, keys=None): sweep_values = [[v] if isinstance(v, str) or not isinstance(v, Iterable) else v for v in sweep_values] for vals in product(*sweep_values): - yield dict(zip(keys, vals)) + yield dict(zip(keys, vals, strict=True)) def indices_to_slices(inputlist): @@ -263,7 +263,7 @@ def indices_to_slices(inputlist): """ inputlist.sort() pointers = np.where(np.diff(inputlist) > 1)[0] - pointers = zip(np.r_[0, pointers+1], np.r_[pointers, len(inputlist)-1]) + pointers = zip(np.r_[0, pointers+1], np.r_[pointers, len(inputlist)-1], strict=True) slices = [(inputlist[i], inputlist[j]+1) for i, j in pointers] return slices @@ -310,7 +310,7 @@ def transitive_closure(R): {a:d, b:d, c:d} ''' ans = dict() - for k in R.keys(): + for k in R: visited = [] ans[k] = reachable_items(R, k, visited) return ans @@ -331,15 +331,15 @@ def humanbytes(B): TB = float(KB ** 4) # 1,099,511,627,776 if B < KB: - return '%d %s' % (int(B), 'B') + return f'{int(B)} B' elif KB <= B < MB: - return '%d KB' % round(B / KB) + return f'{round(B / KB)} KB' elif MB <= B < GB: - return '%d MB' % round(B / MB) + return f'{round(B / MB)} MB' elif GB <= B < TB: - return '%.1f GB' % round(B / GB, 1) + return f'{round(B / GB, 1):.1f} GB' elif TB <= B: - return '%.2f TB' % round(B / TB, 1) + return f'{round(B / TB, 1):.2f} TB' def sorted_priority(items, priority): diff --git a/devito/types/args.py b/devito/types/args.py index 2110f58e84..9e57339b63 100644 --- a/devito/types/args.py +++ b/devito/types/args.py @@ -13,16 +13,18 @@ class ArgProvider: @property @abc.abstractmethod def _arg_names(self): - raise NotImplementedError('%s does not provide any default argument names' % - self.__class__) + raise NotImplementedError( + f'{self.__class__} does not provide any default argument names' + ) @abc.abstractmethod def _arg_defaults(self): """ A map of default argument values defined by this type. """ - raise NotImplementedError('%s does not provide any default arguments' % - self.__class__) + raise NotImplementedError( + f'{self.__class__} does not provide any default arguments' + ) @abc.abstractmethod def _arg_values(self, **kwargs): @@ -34,8 +36,9 @@ def _arg_values(self, **kwargs): **kwargs User-provided argument overrides. """ - raise NotImplementedError('%s does not provide argument value derivation' % - self.__class__) + raise NotImplementedError( + f'{self.__class__} does not provide argument value derivation' + ) def _arg_check(self, *args, **kwargs): """ diff --git a/devito/types/array.py b/devito/types/array.py index 6f2914756b..c48425e33d 100644 --- a/devito/types/array.py +++ b/devito/types/array.py @@ -41,10 +41,7 @@ def __init_finalize__(self, *args, **kwargs): def __indices_setup__(cls, *args, **kwargs): dimensions = kwargs['dimensions'] - if args: - indices = args - else: - indices = dimensions + indices = args or dimensions return as_tuple(dimensions), as_tuple(indices) @@ -174,7 +171,7 @@ def __padding_setup__(self, **kwargs): elif isinstance(padding, tuple) and len(padding) == self.ndim: padding = tuple((0, i) if is_integer(i) else i for i in padding) else: - raise TypeError("`padding` must be int or %d-tuple of ints" % self.ndim) + raise TypeError(f'`padding` must be int or {self.ndim}-tuple of ints') return DimensionTuple(*padding, getters=self.dimensions) @property @@ -222,7 +219,7 @@ def free_symbols(self): return super().free_symbols - {d for d in self.dimensions if d.is_Default} def _make_pointer(self, dim): - return PointerArray(name='p%s' % self.name, dimensions=dim, array=self) + return PointerArray(name=f'p{self.name}', dimensions=dim, array=self) class MappedArrayMixin: @@ -282,13 +279,13 @@ def __init_finalize__(self, *args, **kwargs): fields = tuple(kwargs.pop('fields', ())) self._fields = fields - self._pname = kwargs.pop('pname', 't%s' % name) + self._pname = kwargs.pop('pname', f't{name}') super().__init_finalize__(*args, **kwargs) @classmethod def __dtype_setup__(cls, **kwargs): - pname = kwargs.get('pname', 't%s' % kwargs['name']) + pname = kwargs.get('pname', 't{}'.format(kwargs['name'])) pfields = cls.__pfields_setup__(**kwargs) return CtypesFactory.generate(pname, pfields) @@ -536,8 +533,10 @@ def __getitem__(self, index): component_index, indices = index[0], index[1:] return ComponentAccess(self.indexed[indices], component_index) else: - raise ValueError("Expected %d or %d indices, got %d instead" - % (self.ndim, self.ndim + 1, len(index))) + raise ValueError( + f'Expected {self.ndim} or {self.ndim + 1} indices, ' + f'got {len(index)} instead' + ) @property def _C_ctype(self): @@ -613,7 +612,7 @@ def _hashable_content(self): return super()._hashable_content() + (self._index,) def __str__(self): - return "%s.%s" % (self.base, self.sindex) + return f"{self.base}.{self.sindex}" __repr__ = __str__ diff --git a/devito/types/basic.py b/devito/types/basic.py index 7f5702a7d2..75aed9d32f 100644 --- a/devito/types/basic.py +++ b/devito/types/basic.py @@ -1,5 +1,6 @@ import abc import inspect +from contextlib import suppress from ctypes import POINTER, Structure, _Pointer, c_char, c_char_p from functools import cached_property, reduce from operator import mul @@ -101,7 +102,7 @@ def _C_typedata(self): try: # We have internal types such as c_complex that are # Structure too but should be treated as plain c_type - _type._base_dtype + _ = _type._base_dtype except AttributeError: if issubclass(_type, Structure): _type = f'struct {_type.__name__}' @@ -852,8 +853,10 @@ def __init_finalize__(self, *args, **kwargs): # Averaging mode for off the grid evaluation self._avg_mode = kwargs.get('avg_mode', 'arithmetic') if self._avg_mode not in ['arithmetic', 'harmonic', 'safe_harmonic']: - raise ValueError("Invalid averaging mode_mode %s, accepted values are" - " arithmetic or harmonic" % self._avg_mode) + raise ValueError( + f"Invalid averaging mode_mode {self._avg_mode}, accepted values are" + " arithmetic or harmonic" + ) @classmethod def __args_setup__(cls, *args, **kwargs): @@ -956,10 +959,14 @@ def origin(self): f(x) : origin = 0 f(x + hx/2) : origin = hx/2 """ - return DimensionTuple(*(r - d + o for d, r, o - in zip(self.dimensions, self.indices_ref, - self._offset_subdomain)), - getters=self.dimensions) + return DimensionTuple(*( + r - d + o + for d, r, o in zip( + self.dimensions, + self.indices_ref, + self._offset_subdomain, strict=True + ) + ), getters=self.dimensions) @property def dimensions(self): @@ -998,7 +1005,7 @@ def _grid_map(self): """ mapper = {} subs = {} - for i, j, d in zip(self.indices, self.indices_ref, self.dimensions): + for i, j, d in zip(self.indices, self.indices_ref, self.dimensions, strict=True): # Two indices are aligned if they differ by an Integer*spacing. if not i.has(d): # Maybe a SubDimension @@ -1114,7 +1121,7 @@ def symbolic_shape(self): padding = [sympy.Add(*i, evaluate=False) for i in self._size_padding] domain = [i.symbolic_size for i in self.dimensions] ret = tuple(sympy.Add(i, j, k) - for i, j, k in zip(domain, halo, padding)) + for i, j, k in zip(domain, halo, padding, strict=True)) return DimensionTuple(*ret, getters=self.dimensions) @property @@ -1263,8 +1270,8 @@ def _size_domain(self): @cached_property def _size_halo(self): """Number of points in the halo region.""" - left = tuple(zip(*self._halo))[0] - right = tuple(zip(*self._halo))[1] + left = tuple(zip(*self._halo, strict=True))[0] + right = tuple(zip(*self._halo, strict=True))[1] sizes = tuple(Size(i, j) for i, j in self._halo) @@ -1283,8 +1290,8 @@ def _size_owned(self): @cached_property def _size_padding(self): """Number of points in the padding region.""" - left = tuple(zip(*self._padding))[0] - right = tuple(zip(*self._padding))[1] + left = tuple(zip(*self._padding, strict=True))[0] + right = tuple(zip(*self._padding, strict=True))[1] sizes = tuple(Size(i, j) for i, j in self._padding) @@ -1293,7 +1300,10 @@ def _size_padding(self): @cached_property def _size_nopad(self): """Number of points in the domain+halo region.""" - sizes = tuple(i+sum(j) for i, j in zip(self._size_domain, self._size_halo)) + sizes = tuple( + i+sum(j) + for i, j in zip(self._size_domain, self._size_halo, strict=True) + ) return DimensionTuple(*sizes, getters=self.dimensions) @cached_property @@ -1326,7 +1336,7 @@ def _offset_halo(self): left = tuple(self._size_padding.left) right = tuple(np.add(np.add(left, self._size_halo.left), self._size_domain)) - offsets = tuple(Offset(i, j) for i, j in zip(left, right)) + offsets = tuple(Offset(i, j) for i, j in zip(left, right, strict=True)) return DimensionTuple(*offsets, getters=self.dimensions, left=left, right=right) @@ -1336,7 +1346,7 @@ def _offset_owned(self): left = tuple(self._offset_domain) right = tuple(np.add(self._offset_halo.left, self._size_domain)) - offsets = tuple(Offset(i, j) for i, j in zip(left, right)) + offsets = tuple(Offset(i, j) for i, j in zip(left, right, strict=True)) return DimensionTuple(*offsets, getters=self.dimensions, left=left, right=right) @@ -1383,7 +1393,7 @@ def indexify(self, indices=None, subs=None): # Indices after substitutions indices = [] - for a, d, o, s in zip(self.args, self.dimensions, self.origin, subs): + for a, d, o, s in zip(self.args, self.dimensions, self.origin, subs, strict=True): if a.is_Function and len(a.args) == 1: # E.g. Abs(expr) arg = a.args[0] @@ -1503,16 +1513,14 @@ def _fromrep(cls, rep): """ newobj = super()._fromrep(rep) grid, dimensions = newobj._infer_dims() - try: - # This is needed when `_fromrep` is called directly in 1.9 - # for example with mul. - newobj.__init_finalize__(newobj.rows, newobj.cols, newobj.flat(), - grid=grid, dimensions=dimensions) - except TypeError: + with suppress(TypeError): # We can end up here when `_fromrep` is called through the default _new # when input `comps` don't have grid or dimensions. For example # `test_non_devito_tens` in `test_tensor.py`. - pass + # This is suppressed when `_fromrep` is called directly in 1.9 + # for example with mul. + newobj.__init_finalize__(newobj.rows, newobj.cols, newobj.flat(), + grid=grid, dimensions=dimensions) return newobj @classmethod @@ -1659,7 +1667,10 @@ def _eval_matrix_mul(self, other): row, col = i // other.cols, i % other.cols row_indices = range(self_cols*row, self_cols*(row+1)) col_indices = range(col, other_len, other.cols) - vec = [mat[a]*other_mat[b] for a, b in zip(row_indices, col_indices)] + vec = [ + mat[a]*other_mat[b] + for a, b in zip(row_indices, col_indices, strict=True) + ] new_mat[i] = sum(vec) # Get new class and return product @@ -1739,10 +1750,8 @@ def dtype(self): def free_symbols(self): ret = {self} for i in self.indices: - try: + with suppress(AttributeError): ret.update(i.free_symbols) - except AttributeError: - pass return ret # Pickling support @@ -1865,7 +1874,7 @@ def compare(self, other): """ if (self.__class__ != other.__class__) or (self.function is not other.function): return super().compare(other) - for l, r in zip(self.indices, other.indices): + for l, r in zip(self.indices, other.indices, strict=True): try: c = int(sympy.sign(l - r)) except TypeError: diff --git a/devito/types/constant.py b/devito/types/constant.py index bea67674a2..dbd3781e7d 100644 --- a/devito/types/constant.py +++ b/devito/types/constant.py @@ -100,12 +100,14 @@ def _arg_check(self, args, intervals, **kwargs): Check that `args` contains legal runtime values bound to `self`. """ if self.name not in args: - raise InvalidArgument("No runtime value for %s" % self.name) + raise InvalidArgument(f"No runtime value for {self.name}") key = args[self.name] try: # Might be a plain number, w/o a dtype field if key.dtype != self.dtype: - warning("Data type %s of runtime value `%s` does not match the " - "Constant data type %s" % (key.dtype, self.name, self.dtype)) + warning( + f'Data type {key.dtype} of runtime value `{self.name}` ' + f'does not match the Constant data type {self.dtype}' + ) except AttributeError: pass diff --git a/devito/types/dense.py b/devito/types/dense.py index 8527cb089b..551dcad14f 100644 --- a/devito/types/dense.py +++ b/devito/types/dense.py @@ -2,6 +2,7 @@ from ctypes import POINTER, Structure, byref, c_int, c_ulong, c_void_p, cast from functools import cached_property, reduce, wraps from operator import mul +from textwrap import dedent, wrap import numpy as np import sympy @@ -110,10 +111,11 @@ def __init_finalize__(self, *args, function=None, **kwargs): # running with MPI and some processes get 0-size arrays after # domain decomposition. We touch the data anyway to avoid the # case `self._data is None` - self.data + _ = self.data else: - raise ValueError("`initializer` must be callable or buffer, not %s" - % type(initializer)) + raise ValueError( + f'`initializer` must be callable or buffer, not {type(initializer)}' + ) _subs = Differentiable._subs @@ -179,10 +181,12 @@ def __coefficients_setup__(self, **kwargs): coeffs = kwargs.get('coefficients', self._default_fd) if coeffs not in fd_weights_registry: if coeffs == 'symbolic': - deprecations.symbolic_warn + _ = deprecations.symbolic_warn else: - raise ValueError(f"coefficients must be one of {str(fd_weights_registry)}" - f" not {coeffs}") + raise ValueError( + f'coefficients must be one of {str(fd_weights_registry)}' + f' not {coeffs}' + ) return coeffs @cached_property @@ -248,7 +252,10 @@ def shape_with_halo(self): the outhalo of boundary ranks contains a number of elements depending on the rank position in the decomposed grid (corner, side, ...). """ - return tuple(j + i + k for i, (j, k) in zip(self.shape, self._size_outhalo)) + return tuple( + j + i + k + for i, (j, k) in zip(self.shape, self._size_outhalo, strict=True) + ) @cached_property def _shape_with_inhalo(self): @@ -263,7 +270,10 @@ def _shape_with_inhalo(self): Typically, this property won't be used in user code, but it may come in handy for testing or debugging """ - return tuple(j + i + k for i, (j, k) in zip(self.shape, self._halo)) + return tuple( + j + i + k + for i, (j, k) in zip(self.shape, self._halo, strict=True) + ) @cached_property def shape_allocated(self): @@ -275,9 +285,13 @@ def shape_allocated(self): ----- In an MPI context, this is the *local* with_halo region shape. """ - return DimensionTuple(*[j + i + k for i, (j, k) in zip(self._shape_with_inhalo, - self._padding)], - getters=self.dimensions) + return DimensionTuple( + *[ + j + i + k + for i, (j, k) in zip(self._shape_with_inhalo, self._padding, strict=True) + ], + getters=self.dimensions + ) @cached_property def shape_global(self): @@ -297,15 +311,19 @@ def shape_global(self): if self.grid is None: return self.shape retval = [] - for d, s in zip(self.dimensions, self.shape): + for d, s in zip(self.dimensions, self.shape, strict=True): size = self.grid.size_map.get(d) retval.append(size.glb if size is not None else s) return tuple(retval) @property def symbolic_shape(self): - return DimensionTuple(*[self._C_get_field(FULL, d).size for d in self.dimensions], - getters=self.dimensions) + return DimensionTuple( + *[ + self._C_get_field(FULL, d).size for d in self.dimensions + ], + getters=self.dimensions + ) @property def size_global(self): @@ -333,30 +351,49 @@ def _size_outhalo(self): # and inhalo correspond return self._size_inhalo - left = [abs(min(i.loc_abs_min-i.glb_min-j, 0)) if i and not i.loc_empty else 0 - for i, j in zip(self._decomposition, self._size_inhalo.left)] - right = [max(i.loc_abs_max+j-i.glb_max, 0) if i and not i.loc_empty else 0 - for i, j in zip(self._decomposition, self._size_inhalo.right)] + left = [ + abs(min(i.loc_abs_min-i.glb_min-j, 0)) + if i and not i.loc_empty else 0 + for i, j in zip(self._decomposition, self._size_inhalo.left, strict=True) + ] + right = [ + max(i.loc_abs_max+j-i.glb_max, 0) + if i and not i.loc_empty else 0 + for i, j in zip(self._decomposition, self._size_inhalo.right, strict=True) + ] - sizes = tuple(Size(i, j) for i, j in zip(left, right)) + sizes = tuple(Size(i, j) for i, j in zip(left, right, strict=True)) if self._distributor.is_parallel and (any(left) or any(right)): try: - warning_msg = f"""A space order of {self._space_order} and a halo size of {max(self._size_inhalo)} has been - set but the current rank ({self._distributor.myrank}) has a domain size of - only {min(self.grid.shape_local)}""" + warning_msg = dedent(f""" + A space order of {self._space_order} and a halo size of + {max(self._size_inhalo)} has been set but the current rank + ({self._distributor.myrank}) has a domain size of only + {min(self.grid.shape_local)} + """)[1:] if not self._distributor.is_boundary_rank: - warning(warning_msg) + warning(' '.join(wrap(warning_msg))) else: - left_dist = [i for i, d in zip(left, self.dimensions) if d - in self._distributor.dimensions] - right_dist = [i for i, d in zip(right, self.dimensions) if d - in self._distributor.dimensions] - for i, j, k, l in zip(left_dist, right_dist, - self._distributor.mycoords, - self._distributor.topology): + left_dist = [ + i + for i, d in zip(left, self.dimensions, strict=True) + if d in self._distributor.dimensions + ] + right_dist = [ + i + for i, d in zip(right, self.dimensions, strict=True) + if d in self._distributor.dimensions + ] + for i, j, k, l in zip( + left_dist, + right_dist, + self._distributor.mycoords, + self._distributor.topology, + strict=False + ): if l > 1 and ((j > 0 and k == 0) or (i > 0 and k == l-1)): - warning(warning_msg) + warning(' '.join(wrap(warning_msg))) break except AttributeError: pass @@ -375,25 +412,31 @@ def size_allocated(self): @cached_property def _mask_modulo(self): """Boolean mask telling which Dimensions support modulo-indexing.""" - return tuple(True if i.is_Stepping else False for i in self.dimensions) + return tuple(bool(i.is_Stepping) for i in self.dimensions) @cached_property def _mask_domain(self): """Slice-based mask to access the domain region of the allocated data.""" - return tuple(slice(i, j) for i, j in - zip(self._offset_domain, self._offset_halo.right)) + return tuple( + slice(i, j) + for i, j in zip(self._offset_domain, self._offset_halo.right, strict=True) + ) @cached_property def _mask_inhalo(self): """Slice-based mask to access the domain+inhalo region of the allocated data.""" - return tuple(slice(i.left, i.right + j.right) for i, j in - zip(self._offset_inhalo, self._size_inhalo)) + return tuple( + slice(i.left, i.right + j.right) + for i, j in zip(self._offset_inhalo, self._size_inhalo, strict=True) + ) @cached_property def _mask_outhalo(self): """Slice-based mask to access the domain+outhalo region of the allocated data.""" - return tuple(slice(i.start - j.left, i.stop and i.stop + j.right or None) - for i, j in zip(self._mask_domain, self._size_outhalo)) + return tuple( + slice(i.start - j.left, i.stop and i.stop + j.right or None) + for i, j in zip(self._mask_domain, self._size_outhalo, strict=True) + ) @cached_property def _decomposition(self): @@ -414,8 +457,11 @@ def _decomposition_outhalo(self): """ if self._distributor is None: return (None,)*self.ndim - return tuple(v.reshape(*self._size_inhalo[d]) if v is not None else v - for d, v in zip(self.dimensions, self._decomposition)) + return tuple( + v.reshape(*self._size_inhalo[d]) + if v is not None else v + for d, v in zip(self.dimensions, self._decomposition, strict=True) + ) @property def data(self): @@ -579,7 +625,7 @@ def _data_in_region(self, region, dim, side): index_array = [ slice(offset, offset+size) if d is dim else slice(pl, s - pr) for d, s, (pl, pr) - in zip(self.dimensions, self.shape_allocated, self._padding) + in zip(self.dimensions, self.shape_allocated, self._padding, strict=True) ] return np.asarray(self._data[index_array]) @@ -645,8 +691,10 @@ def local_indices(self): if self._distributor is None: return tuple(slice(0, s) for s in self.shape) else: - return tuple(self._distributor.glb_slices.get(d, slice(0, s)) - for s, d in zip(self.shape, self.dimensions)) + return tuple( + self._distributor.glb_slices.get(d, slice(0, s)) + for s, d in zip(self.shape, self.dimensions, strict=True) + ) @property def initializer(self): @@ -691,8 +739,10 @@ def _C_make_dataobj(self, alias=None, **args): dataobj._obj.nbytes = data.nbytes # MPI-related fields - dataobj._obj.npsize = (c_ulong*self.ndim)(*[i - sum(j) for i, j in - zip(data.shape, self._size_padding)]) + dataobj._obj.npsize = (c_ulong*self.ndim)(*[ + i - sum(j) + for i, j in zip(data.shape, self._size_padding, strict=True) + ]) dataobj._obj.dsize = (c_ulong*self.ndim)(*self._size_domain) dataobj._obj.hsize = (c_int*(self.ndim*2))(*flatten(self._size_halo)) dataobj._obj.hofs = (c_int*(self.ndim*2))(*flatten(self._offset_halo)) @@ -823,7 +873,7 @@ def _arg_defaults(self, alias=None, metadata=None, estimate_memory=False): args = ReducerMap({key.name: self._data_buffer(metadata=metadata)}) # Collect default dimension arguments from all indices - for a, i, s in zip(key.dimensions, self.dimensions, self.shape): + for a, i, s in zip(key.dimensions, self.dimensions, self.shape, strict=True): args.update(i._arg_defaults(_min=0, size=s, alias=a)) return args @@ -851,7 +901,7 @@ def _arg_values(self, metadata=None, estimate_memory=False, **kwargs): # We've been provided a pure-data replacement (array) values = {self.name: new} # Add value overrides for all associated dimensions - for i, s in zip(self.dimensions, new.shape): + for i, s in zip(self.dimensions, new.shape, strict=True): size = s - sum(self._size_nodomain[i]) values.update(i._arg_defaults(size=size)) else: @@ -883,7 +933,7 @@ def _arg_check(self, args, intervals, **kwargs): f"does not match the Function data type {self.dtype}") # Check each Dimension for potential OOB accesses - for i, s in zip(self.dimensions, data.shape): + for i, s in zip(self.dimensions, data.shape, strict=True): i._arg_check(args, s, intervals[i]) if args.options['index-mode'] == 'int32' and \ @@ -1133,8 +1183,10 @@ def __indices_setup__(cls, *args, **kwargs): if not staggered: staggered_indices = dimensions else: - staggered_indices = (d + i * d.spacing / 2 - for d, i in zip(dimensions, staggered)) + staggered_indices = ( + d + i * d.spacing / 2 + for d, i in zip(dimensions, staggered, strict=True) + ) return tuple(dimensions), tuple(staggered_indices) @property @@ -1169,7 +1221,7 @@ def __shape_setup__(cls, **kwargs): raise ValueError("`shape` and `dimensions` must have the " "same number of entries") loc_shape = [] - for d, s in zip(dimensions, shape): + for d, s in zip(dimensions, shape, strict=True): if d in grid.dimensions: size = grid.size_map[d] if size.glb != s and s is not None: @@ -1690,7 +1742,7 @@ def shape(self): def shape_with_halo(self): domain = self.shape halo = [sympy.Add(*i, evaluate=False) for i in self._size_halo] - ret = tuple(sum(i) for i in zip(domain, halo)) + ret = tuple(sum(i) for i in zip(domain, halo, strict=True)) return DimensionTuple(*ret, getters=self.dimensions) shape_allocated = AbstractFunction.symbolic_shape diff --git a/devito/types/dimension.py b/devito/types/dimension.py index 3482f8da2c..fa02ebb32d 100644 --- a/devito/types/dimension.py +++ b/devito/types/dimension.py @@ -1,5 +1,6 @@ import math from collections import namedtuple +from contextlib import suppress from functools import cached_property import numpy as np @@ -306,16 +307,12 @@ def _arg_values(self, interval, grid=None, args=None, **kwargs): defaults = self._arg_defaults() if glb_minv is None: loc_minv = args.get(self.min_name, defaults[self.min_name]) - try: + with suppress(AttributeError, TypeError): loc_minv -= min(interval.lower, 0) - except (AttributeError, TypeError): - pass if glb_maxv is None: loc_maxv = args.get(self.max_name, defaults[self.max_name]) - try: + with suppress(AttributeError, TypeError): loc_maxv -= max(interval.upper, 0) - except (AttributeError, TypeError): - pass # Some `args` may still be DerivedDimensions' defaults. These, in turn, # may represent sets of legal values. If that's the case, here we just @@ -323,17 +320,13 @@ def _arg_values(self, interval, grid=None, args=None, **kwargs): try: loc_minv = loc_minv.stop except AttributeError: - try: + with suppress(TypeError): loc_minv = sorted(loc_minv).pop(0) - except TypeError: - pass try: loc_maxv = loc_maxv.stop except AttributeError: - try: + with suppress(TypeError): loc_maxv = sorted(loc_maxv).pop(0) - except TypeError: - pass return {self.min_name: loc_minv, self.max_name: loc_maxv} @@ -366,9 +359,10 @@ def _arg_check(self, args, size, interval): # Allow the specific case of max=min-1, which disables the loop if args[self.max_name] < args[self.min_name]-1: - raise InvalidArgument("Illegal %s=%d < %s=%d" - % (self.max_name, args[self.max_name], - self.min_name, args[self.min_name])) + raise InvalidArgument( + f'Illegal {self.max_name}={args[self.max_name]} < ' + f'{self.min_name}={args[self.min_name]}' + ) elif args[self.max_name] == args[self.min_name]-1: debug("%s=%d and %s=%d might cause no iterations along Dimension %s", self.min_name, args[self.min_name], @@ -645,7 +639,7 @@ def _interval(self): def _symbolic_thickness(self, **kwargs): kwargs = {'dtype': np.int32, 'is_const': True, 'nonnegative': True} - names = ["%s_%stkn" % (self.parent.name, s) for s in ('l', 'r')] + names = [f"{self.parent.name}_{s}tkn" for s in ('l', 'r')] return SubDimensionThickness(*[Thickness(name=n, **kwargs) for n in names]) @cached_property @@ -757,10 +751,12 @@ def _symbolic_thickness(self, thickness=None): kwargs = {'dtype': np.int32, 'is_const': True, 'nonnegative': True, 'root': self.root, 'local': self.local} - names = ["%s_%stkn" % (self.parent.name, s) for s in ('l', 'r')] + names = [f"{self.parent.name}_{s}tkn" for s in ('l', 'r')] sides = [LEFT, RIGHT] - return SubDimensionThickness(*[Thickness(name=n, side=s, value=t, **kwargs) - for n, s, t in zip(names, sides, thickness)]) + return SubDimensionThickness(*[ + Thickness(name=n, side=s, value=t, **kwargs) + for n, s, t in zip(names, sides, thickness, strict=True) + ]) @cached_property def _interval(self): @@ -934,7 +930,7 @@ def __init_finalize__(self, name, parent=None, factor=None, condition=None, elif is_number(factor): self._factor = int(factor) elif factor.is_Constant: - deprecations.constant_factor_warn + _ = deprecations.constant_factor_warn self._factor = factor else: raise ValueError("factor must be an integer") @@ -987,10 +983,8 @@ def free_symbols(self): retval = set(super().free_symbols) if self.condition is not None: retval |= self.condition.free_symbols - try: + with suppress(AttributeError): retval |= self.factor.free_symbols - except AttributeError: - pass return retval def _arg_values(self, interval, grid=None, args=None, **kwargs): @@ -1005,15 +999,11 @@ def _arg_values(self, interval, grid=None, args=None, **kwargs): toint = lambda x: math.ceil(x / fact) vals = {} - try: + with suppress(KeyError, TypeError): vals[self.min_name] = toint(kwargs.get(self.parent.min_name)) - except (KeyError, TypeError): - pass - try: + with suppress(KeyError, TypeError): vals[self.max_name] = toint(kwargs.get(self.parent.max_name)) - except (KeyError, TypeError): - pass vals[self.symbolic_factor.name] = fact @@ -1147,10 +1137,7 @@ def symbolic_min(self): @cached_property def symbolic_incr(self): - if self._incr is not None: - incr = self._incr - else: - incr = self.offset + incr = self._incr if self._incr is not None else self.offset if self.modulo is not None: incr = incr % self.modulo # Make sure we return a symbolic object as this point `incr` may well @@ -1370,19 +1357,22 @@ def _arg_check(self, args, *_args): # sub-BlockDimensions must be perfect divisors of their parent parent_value = args[self.parent.step.name] if parent_value % value > 0: - raise InvalidArgument("Illegal block size `%s=%d`: sub-block sizes " - "must divide the parent block size evenly (`%s=%d`)" - % (name, value, self.parent.step.name, - parent_value)) + raise InvalidArgument( + f'Illegal block size `{name}={value}`: sub-block sizes ' + 'must divide the parent block size evenly ' + f'(`{self.parent.step.name}={parent_value}`)' + ) else: if value < 0: - raise InvalidArgument("Illegal block size `%s=%d`: it should be > 0" - % (name, value)) + raise InvalidArgument( + f'Illegal block size `{name}={value}`: it should be > 0' + ) if value > args[self.root.max_name] - args[self.root.min_name] + 1: # Avoid OOB - raise InvalidArgument("Illegal block size `%s=%d`: it's greater than the " - "iteration range and it will cause an OOB access" - % (name, value)) + raise InvalidArgument( + f'Illegal block size `{name}={value}`: it is greater than the ' + 'iteration range and it will cause an OOB access' + ) class CustomDimension(BasicDimension): @@ -1540,8 +1530,8 @@ class DynamicSubDimension(DynamicDimensionMixin, SubDimension): @classmethod def _symbolic_thickness(cls, name): - return (Scalar(name="%s_ltkn" % name, dtype=np.int32, nonnegative=True), - Scalar(name="%s_rtkn" % name, dtype=np.int32, nonnegative=True)) + return (Scalar(name=f"{name}_ltkn", dtype=np.int32, nonnegative=True), + Scalar(name=f"{name}_rtkn", dtype=np.int32, nonnegative=True)) class StencilDimension(BasicDimension): @@ -1571,13 +1561,13 @@ def __init_finalize__(self, name, _min, _max, spacing=1, step=1, self._spacing = sympy.sympify(spacing) if not is_integer(_min): - raise ValueError("Expected integer `min` (got %s)" % _min) + raise ValueError(f"Expected integer `min` (got {_min})") if not is_integer(_max): - raise ValueError("Expected integer `max` (got %s)" % _max) + raise ValueError(f"Expected integer `max` (got {_max})") if not is_integer(self._spacing): - raise ValueError("Expected integer `spacing` (got %s)" % self._spacing) + raise ValueError(f"Expected integer `spacing` (got {self._spacing})") if not is_integer(step): - raise ValueError("Expected integer `step` (got %s)" % step) + raise ValueError(f"Expected integer `step` (got {step})") self._min = int(_min) self._max = int(_max) @@ -1586,7 +1576,7 @@ def __init_finalize__(self, name, _min, _max, spacing=1, step=1, self._size = _max - _min + 1 if self._size < 1: - raise ValueError("Expected size greater than 0 (got %s)" % self._size) + raise ValueError(f"Expected size greater than 0 (got {self._size})") @property def step(self): @@ -1860,7 +1850,7 @@ def _separate_dims(cls, d0, d1, ofs_items): def dimensions(names, n=1): if n > 1: - return tuple(Dimension('%s%s' % (names, i)) for i in range(n)) + return tuple(Dimension(f'{names}{i}') for i in range(n)) else: assert type(names) is str return tuple(Dimension(i) for i in names.split()) diff --git a/devito/types/equation.py b/devito/types/equation.py index b1c918978d..3b6625c471 100644 --- a/devito/types/equation.py +++ b/devito/types/equation.py @@ -65,7 +65,7 @@ class Eq(sympy.Eq, Evaluable, Pickable): def __new__(cls, lhs, rhs=0, subdomain=None, coefficients=None, implicit_dims=None, **kwargs): if coefficients is not None: - deprecations.coeff_warn + _ = deprecations.coeff_warn kwargs['evaluate'] = False # Backward compatibility rhs = cls._apply_coeffs(rhs, coefficients) @@ -127,7 +127,7 @@ def _flatten(self): if self.lhs.is_Matrix: # Maps the Equations to retrieve the rhs from relevant lhs try: - eqs = dict(zip(self.lhs, self.rhs)) + eqs = dict(zip(self.lhs, self.rhs, strict=True)) except TypeError: # Same rhs for all lhs assert not self.rhs.is_Matrix @@ -183,7 +183,7 @@ def xreplace(self, rules): return self.func(self.lhs.xreplace(rules), self.rhs.xreplace(rules)) def __str__(self): - return "%s(%s, %s)" % (self.__class__.__name__, self.lhs, self.rhs) + return f"{self.__class__.__name__}({self.lhs}, {self.rhs})" __repr__ = __str__ @@ -198,7 +198,7 @@ class Reduction(Eq): is_Reduction = True def __str__(self): - return "%s(%s, %s)" % (self.__class__.__name__, self.lhs, self.rhs) + return f"{self.__class__.__name__}({self.lhs}, {self.rhs})" __repr__ = __str__ diff --git a/devito/types/grid.py b/devito/types/grid.py index f3e6c6b90b..e166dd1262 100644 --- a/devito/types/grid.py +++ b/devito/types/grid.py @@ -1,4 +1,3 @@ -from abc import ABC from collections import namedtuple from functools import cached_property from itertools import product @@ -27,7 +26,7 @@ GlobalLocal = namedtuple('GlobalLocal', 'glb loc') -class CartesianDiscretization(ABC): +class CartesianDiscretization: """ Abstract base class for objects representing discretizations of n-dimensional @@ -160,18 +159,22 @@ def __init__(self, shape, extent=None, origin=None, dimensions=None, ndim = len(shape) assert ndim <= 3 dim_names = self._default_dimensions[:ndim] - dim_spacing = tuple(Spacing(name='h_%s' % n, dtype=dtype, is_const=True) - for n in dim_names) - dimensions = tuple(SpaceDimension(name=n, spacing=s) - for n, s in zip(dim_names, dim_spacing)) + dim_spacing = tuple( + Spacing(name=f'h_{n}', dtype=dtype, is_const=True) + for n in dim_names + ) + dimensions = tuple( + SpaceDimension(name=n, spacing=s) + for n, s in zip(dim_names, dim_spacing, strict=True) + ) else: for d in dimensions: if not d.is_Space: - raise ValueError("Cannot create Grid with Dimension `%s` " - "since it's not a SpaceDimension" % d) + raise ValueError(f"Cannot create Grid with Dimension `{d}` " + "since it's not a SpaceDimension") if d.is_Derived and not d.is_Conditional: - raise ValueError("Cannot create Grid with derived Dimension `%s` " - "of type `%s`" % (d, type(d))) + raise ValueError(f"Cannot create Grid with derived Dimension `{d}` " + f"of type `{type(d)}`") dimensions = dimensions super().__init__(shape, dimensions, dtype) @@ -183,9 +186,8 @@ def __init__(self, shape, extent=None, origin=None, dimensions=None, if len(topology) == len(self.shape): self._topology = topology else: - warning("Ignoring the provided topology `%s` as it " - "is incompatible with the grid shape `%s`" % - (topology, self.shape)) + warning(f"Ignoring the provided topology `{topology}` as it " + f"is incompatible with the grid shape `{self.shape}`") self._topology = None else: self._topology = None @@ -198,7 +200,7 @@ def __init__(self, shape, extent=None, origin=None, dimensions=None, # The origin of the grid origin = as_tuple(origin or tuple(0. for _ in self.shape)) self._origin = tuple(dtype(o) for o in origin) - self._origin_symbols = tuple(Scalar(name='o_%s' % d.name, dtype=dtype, + self._origin_symbols = tuple(Scalar(name=f'o_{d.name}', dtype=dtype, is_const=True) for d in self.dimensions) @@ -212,22 +214,21 @@ def __init__(self, shape, extent=None, origin=None, dimensions=None, self._stepping_dim = SteppingDimension(name='t', parent=self.time_dim) elif isinstance(time_dimension, TimeDimension): self._time_dim = time_dimension - self._stepping_dim = SteppingDimension(name='%s_s' % self.time_dim.name, + self._stepping_dim = SteppingDimension(name=f'{self.time_dim.name}_s', parent=self.time_dim) else: raise ValueError("`time_dimension` must be None or of type TimeDimension") # Initialize SubDomains for legacy interface if subdomains is not None: - deprecations.subdomain_warn + _ = deprecations.subdomain_warn self._subdomains = tuple(i for i in (Domain(), Interior(), *as_tuple(subdomains))) for i in self._subdomains: i.__subdomain_finalize_legacy__(self) def __repr__(self): - return "Grid[extent=%s, shape=%s, dimensions=%s]" % ( - self.extent, self.shape, self.dimensions - ) + return 'Grid' + \ + f'[extent={self.extent}, shape={self.shape}, dimensions={self.dimensions}]' @property def extent(self): @@ -247,7 +248,7 @@ def origin_symbols(self): @property def origin_map(self): """Map between origin symbols and their values.""" - return dict(zip(self.origin_symbols, self.origin)) + return dict(zip(self.origin_symbols, self.origin, strict=True)) @property def origin_ioffset(self): @@ -259,8 +260,13 @@ def origin_ioffset(self): @property def origin_offset(self): """Physical offset of the local (per-process) origin from the domain origin.""" - return DimensionTuple(*[i*h for i, h in zip(self.origin_ioffset, self.spacing)], - getters=self.dimensions) + return DimensionTuple( + *[ + i*h + for i, h in zip(self.origin_ioffset, self.spacing, strict=True) + ], + getters=self.dimensions + ) @property def time_dim(self): @@ -302,7 +308,7 @@ def spacing_symbols(self): def spacing_map(self): """Map between spacing symbols and their values for each SpaceDimension.""" mapper = {} - for d, s in zip(self.dimensions, self.spacing): + for d, s in zip(self.dimensions, self.spacing, strict=True): if d.is_Conditional: # Special case subsampling: `Grid.dimensions` -> (xb, yb, zb)` # where `xb, yb, zb` are ConditionalDimensions whose parents @@ -313,7 +319,9 @@ def spacing_map(self): # the SpaceDimensions mapper[d.spacing] = s else: - assert False + raise AssertionError( + 'Cannot map between spacing symbol for SpaceDimension' + ) return mapper @@ -325,8 +333,10 @@ def shape_local(self): @property def size_map(self): """Map between SpaceDimensions and their global/local size.""" - return {d: GlobalLocal(g, l) - for d, g, l in zip(self.dimensions, self.shape, self.shape_local)} + return { + d: GlobalLocal(g, l) + for d, g, l in zip(self.dimensions, self.shape, self.shape_local, strict=True) + } @property def topology(self): @@ -463,7 +473,7 @@ def __hash__(self): return hash((self.name, self.dimensions, self.shape, self.dtype)) def __str__(self): - return "%s[%s%s]" % (self.__class__.__name__, self.name, self.dimensions) + return f"{self.__class__.__name__}[{self.name}{self.dimensions}]" __repr__ = __str__ @@ -528,15 +538,17 @@ def comm(self): """The MPI communicator inherited from the distributor.""" if self.grid: return self.grid.comm - raise ValueError("`SubDomain` %s has no `Grid` attached and thus no `comm`" - % self.name) + raise ValueError( + f'`SubDomain` {self.name} has no `Grid` attached and thus no `comm`' + ) def _arg_values(self, **kwargs): try: return self.grid._arg_values(**kwargs) - except AttributeError: - raise AttributeError("%s is not attached to a Grid and has no _arg_values" - % self) + except AttributeError as e: + raise AttributeError( + f'{self} is not attached to a Grid and has no _arg_values' + ) from e class SubDomain(AbstractSubDomain): @@ -604,8 +616,12 @@ def __subdomain_finalize_legacy__(self, grid): # Create the SubDomain's SubDimensions sub_dimensions = [] sdshape = [] - for k, v, s in zip(self.define(grid.dimensions).keys(), - self.define(grid.dimensions).values(), grid.shape): + for k, v, s in zip( + self.define(grid.dimensions).keys(), + self.define(grid.dimensions).values(), + grid.shape, + strict=True + ): if isinstance(v, Dimension): sub_dimensions.append(v) sdshape.append(s) @@ -624,11 +640,15 @@ def __subdomain_finalize_legacy__(self, grid): constructor = {'left': SubDimension.left, 'right': SubDimension.right}.get(side) if constructor is None: - raise ValueError(f"Expected sides 'left|right', not `{side}`") + raise ValueError( + f"Expected sides 'left|right', not `{side}`" + ) from None if s - thickness < 0: - raise ValueError(f"Maximum thickness of dimension {k.name} " - f"is {s}, not {thickness}") + raise ValueError( + f"Maximum thickness of dimension {k.name} " + f"is {s}, not {thickness}" + ) from None sub_dimensions.append(constructor(f'i{k.name}', k, thickness)) sdshape.append(thickness) @@ -643,8 +663,10 @@ def shape_local(self): @property def size_map(self): """Map between SpaceDimensions and their global/local size.""" - return {d: GlobalLocal(g, l) - for d, g, l in zip(self.dimensions, self.shape, self.shape_local)} + return { + d: GlobalLocal(g, l) + for d, g, l in zip(self.dimensions, self.shape, self.shape_local, strict=True) + } def define(self, dimensions): """ @@ -661,9 +683,10 @@ def define(self, dimensions): def _arg_names(self): try: ret = self.grid._arg_names - except AttributeError: - msg = f"{self} is not attached to a Grid and has no _arg_names" - raise AttributeError(msg) + except AttributeError as e: + raise AttributeError( + f'{self} is not attached to a Grid and has no _arg_names' + ) from e # Names for SubDomain thicknesses thickness_names = tuple([k.name for k in d._thickness_map] @@ -825,7 +848,7 @@ def __init__(self, **kwargs): super().__init__(**kwargs) try: - self.implicit_dimension + _ = self.implicit_dimension warning("`implicit_dimension` is deprecated. You may safely remove it " "from the class definition") except AttributeError: @@ -846,7 +869,7 @@ def __subdomain_finalize_core__(self, grid): shapes = [] for i in range(self._n_domains): dshape = [] - for s, m, M in zip(grid.shape, d_m, d_M): + for s, m, M in zip(grid.shape, d_m, d_M, strict=True): assert(m.size == M.size) dshape.append(s-m[i]-M[i]) shapes.append(as_tuple(dshape)) @@ -855,7 +878,7 @@ def __subdomain_finalize_core__(self, grid): if grid.distributor and grid.distributor.is_parallel: # Now create local bounds based on distributor processed = [] - for dec, m, M in zip(grid.distributor.decomposition, d_m, d_M): + for dec, m, M in zip(grid.distributor.decomposition, d_m, d_M, strict=True): processed.extend(self._bounds_glb_to_loc(dec, m, M)) self._local_bounds = as_tuple(processed) else: @@ -1086,7 +1109,7 @@ def _parse_border(border: BorderSpec, grid: Grid, raise ValueError(f"Length of {mode} specification should " "match number of dimensions") retval = [] - for b, d in zip(border, grid.dimensions): + for b, d in zip(border, grid.dimensions, strict=True): if isinstance(b, tuple): if not len(b) == 2: raise ValueError(f"{b}: more than two thicknesses supplied " @@ -1142,7 +1165,13 @@ def _build_domains_nooverlap(self, grid: Grid) -> tuple[int, tuple[np.ndarray]]: # Unpack the user-provided specification into a set of sides (on which # a cartesian product is taken) and a mapper from those sides to a set of # bounds for each dimension. - for d, s, b, i in zip(grid.dimensions, grid.shape, self.border, self.inset): + for d, s, b, i in zip( + grid.dimensions, + grid.shape, + self.border, + self.inset, + strict=True + ): if d in self.border_dims: side = self.border_dims[d] @@ -1173,12 +1202,14 @@ def _build_domains_nooverlap(self, grid: Grid) -> tuple[int, tuple[np.ndarray]]: maybe_domains = list(product(*domain_map.values())) domains = [] for d in maybe_domains: - if not all(i is CENTER for i in d): + if not all(i is CENTER for i in d): # noqa: SIM102 # Don't add any domains that are completely centered if self.corners != 'nocorners' or any(i is CENTER for i in d): # Don't add corners if 'no corners' option selected - domains.append([interval_map[dim][dom] for (dim, dom) - in zip(grid.dimensions, d)]) + domains.append([ + interval_map[dim][dom] + for (dim, dom) in zip(grid.dimensions, d, strict=True) + ]) domains = np.array(domains) @@ -1201,7 +1232,7 @@ class Domain(SubDomain): name = 'domain' def define(self, dimensions): - return dict(zip(dimensions, dimensions)) + return dict(zip(dimensions, dimensions, strict=True)) class Interior(SubDomain): diff --git a/devito/types/lazy.py b/devito/types/lazy.py index f4279327d9..3ac1b0540a 100644 --- a/devito/types/lazy.py +++ b/devito/types/lazy.py @@ -23,7 +23,10 @@ def _evaluate_maybe_nested(cls, maybe_evaluable, **kwargs): if maybe_evaluable.args: args = [Evaluable._evaluate_maybe_nested(i, **kwargs) for i in maybe_evaluable.args] - evaluate = not all(i is j for i, j in zip(args, maybe_evaluable.args)) + evaluate = not all( + i is j + for i, j in zip(args, maybe_evaluable.args, strict=True) + ) try: return maybe_evaluable.func(*args, evaluate=evaluate) except TypeError: @@ -52,7 +55,7 @@ def _evaluate(self, **kwargs): property `evaluate`. """ args = self._evaluate_args(**kwargs) - evaluate = not all(i is j for i, j in zip(args, self.args)) + evaluate = not all(i is j for i, j in zip(args, self.args, strict=True)) return self.func(*args, evaluate=evaluate) @cached_property diff --git a/devito/types/misc.py b/devito/types/misc.py index 0067d8b318..571197b717 100644 --- a/devito/types/misc.py +++ b/devito/types/misc.py @@ -109,7 +109,7 @@ def __new__(cls, base, *args, strides_map=None, accessor=None): return obj def __repr__(self): - return "%s(%s)" % (self.name, ", ".join(str(i) for i in self.indices)) + return "{}({})".format(self.name, ", ".join(str(i) for i in self.indices)) __str__ = __repr__ @@ -156,7 +156,10 @@ def bind(self, pname): macroargnames = [d.name for d in f.dimensions] macroargs = [MacroArgument(i) for i in macroargnames] - items = [m*strides_map[d] for m, d in zip(macroargs, f.dimensions[1:])] + items = [ + m*strides_map[d] + for m, d in zip(macroargs, f.dimensions[1:], strict=False) + ] items.append(MacroArgument(f.dimensions[-1].name)) define = DefFunction(pname, macroargnames) @@ -356,8 +359,10 @@ def __init__(self, opening, **kwargs): self.opening = opening def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, - 'OPEN' if self.opening else 'CLOSE') + return "{}({})".format( + self.__class__.__name__, + 'OPEN' if self.opening else 'CLOSE' + ) __str__ = __repr__ diff --git a/devito/types/object.py b/devito/types/object.py index 51790b35bc..4c30e6f8ff 100644 --- a/devito/types/object.py +++ b/devito/types/object.py @@ -1,3 +1,4 @@ +from contextlib import suppress from ctypes import byref import sympy @@ -207,11 +208,9 @@ def free_symbols(self): ret = set() ret.update(super().free_symbols) for i in self.cargs: - try: + with suppress(AttributeError): + # AttributeError with pure integers ret.update(i.free_symbols) - except AttributeError: - # E.g., pure integers - pass return ret @property diff --git a/devito/types/parallel.py b/devito/types/parallel.py index 89ebaab520..891eaad176 100644 --- a/devito/types/parallel.py +++ b/devito/types/parallel.py @@ -72,7 +72,7 @@ def _arg_defaults(self, **kwargs): try: npthreads = kwargs['metadata']['npthreads'] except KeyError: - raise InvalidArgument("Cannot determine `npthreads`") + raise InvalidArgument("Cannot determine `npthreads`") from None # If a symbolic object, it must be resolved if isinstance(npthreads, NPThreads): @@ -124,8 +124,9 @@ def _arg_values(self, **kwargs): if v < self.size: return {self.name: v} else: - raise InvalidArgument("Illegal `%s=%d`. It must be `%s<%d`" - % (self.name, v, self.name, self.size)) + raise InvalidArgument( + f'Illegal `{self.name}={v}`. It must be `{self.name}<{self.size}`' + ) else: return self._arg_defaults() @@ -251,10 +252,12 @@ def __init_finalize__(self, *args, **kwargs): dimensions = as_tuple(kwargs.get('dimensions')) if len(dimensions) != 1: - raise ValueError("Expected exactly one Dimension, got `%d`" % len(dimensions)) + raise ValueError( + f'Expected exactly one Dimension, got `{len(dimensions)}`' + ) d, = dimensions if not is_integer(d.symbolic_size): - raise ValueError("`%s` must have fixed size" % d) + raise ValueError(f"`{d}` must have fixed size") kwargs.setdefault('initvalue', np.full(d.symbolic_size, 2, dtype=np.int32)) super().__init_finalize__(*args, **kwargs) diff --git a/devito/types/sparse.py b/devito/types/sparse.py index cea1f65664..c319395419 100644 --- a/devito/types/sparse.py +++ b/devito/types/sparse.py @@ -1,4 +1,5 @@ from collections import OrderedDict +from contextlib import suppress from functools import cached_property from itertools import product @@ -85,7 +86,7 @@ def __indices_setup__(cls, *args, **kwargs): except (KeyError, AttributeError): continue else: - sparse_dim = Dimension(name='p_%s' % kwargs["name"]) + sparse_dim = Dimension(name='p_{}'.format(kwargs["name"])) dimensions = as_tuple(kwargs.get('dimensions')) if not dimensions: @@ -119,7 +120,7 @@ def __shape_setup__(cls, **kwargs): else: loc_shape = [] assert len(dimensions) == len(shape) - for i, (d, s) in enumerate(zip(dimensions, shape)): + for i, (d, s) in enumerate(zip(dimensions, shape, strict=True)): if i == cls._sparse_position or \ (cls._sparse_position == -1 and i == len(dimensions)-1): loc_shape.append(glb_npoint[grid.distributor.myrank]) @@ -170,7 +171,7 @@ def __subfunc_setup__(self, suffix, keys, dtype=None, inkwargs=False, **kwargs): return None # Shape and dimensions from args - name = '%s_%s' % (self.name, suffix) + name = f'{self.name}_{suffix}' if key is not None and not isinstance(key, SubFunction): key = np.array(key) @@ -209,8 +210,10 @@ def __subfunc_setup__(self, suffix, keys, dtype=None, inkwargs=False, **kwargs): if shape != key.shape and \ key.shape != (shape[1],) and \ self._distributor.nprocs == 1: - raise ValueError("Incompatible shape for %s, `%s`; expected `%s`" % - (suffix, key.shape[:2], shape)) + raise ValueError( + f'Incompatible shape for {suffix}, `{key.shape[:2]}`;' + f'expected `{shape}`' + ) # Infer dtype if np.issubdtype(key.dtype.type, np.integer): @@ -221,10 +224,7 @@ def __subfunc_setup__(self, suffix, keys, dtype=None, inkwargs=False, **kwargs): # Whether to initialize the subfunction with the provided data # Useful when rebuilding with a placeholder array only used to # infer shape and dtype and set the actual data later - if kwargs.get('init_subfunc', True): - init = {'initializer': key} - else: - init = {} + init = {'initializer': key} if kwargs.get('init_subfunc', True) else {} # Complex coordinates are not valid, so fall back to corresponding # real floating point type if dtype is complex. @@ -240,7 +240,7 @@ def __subfunc_setup__(self, suffix, keys, dtype=None, inkwargs=False, **kwargs): # running with MPI and some processes get 0-size arrays after # domain decomposition. We "touch" the data anyway to avoid the # case ``self._data is None`` - sf.data + _ = sf.data return sf @@ -353,7 +353,7 @@ def coordinates_data(self): @cached_property def _pos_symbols(self): - return [Symbol(name='pos%s' % d, dtype=np.int32) + return [Symbol(name=f'pos{d}', dtype=np.int32) for d in self.grid.dimensions] @cached_property @@ -371,11 +371,16 @@ def _position_map(self): Symbols map for the physical position of the sparse points relative to the grid origin. """ - return OrderedDict([((c - o)/d.spacing, p) - for p, c, d, o in zip(self._pos_symbols, - self._coordinate_symbols, - self.grid.dimensions, - self.grid.origin_symbols)]) + return OrderedDict([ + ((c - o)/d.spacing, p) + for p, c, d, o in zip( + self._pos_symbols, + self._coordinate_symbols, + self.grid.dimensions, + self.grid.origin_symbols, + strict=True + ) + ]) @cached_property def dist_origin(self): @@ -415,7 +420,11 @@ def guard(self, expr=None): temps = self.interpolator._positions(self.dimensions) # Create positions and indices temporaries/indirections - for ((di, d), pos) in zip(enumerate(self.grid.dimensions), pmap.values()): + for d, pos in zip( + self.grid.dimensions, + pmap.values(), + strict=True + ): # Add conditional to avoid OOB lb = sympy.And(pos >= d.symbolic_min, evaluate=False) ub = sympy.And(pos <= d.symbolic_max, evaluate=False) @@ -482,7 +491,7 @@ def _dist_alltoall(self, dmap=None): # Per-rank shape of send/recv data sshape = [] rshape = [] - for s, r in zip(ssparse, rsparse): + for s, r in zip(ssparse, rsparse, strict=True): handle = list(self.shape) handle[self._sparse_position] = s sshape.append(tuple(handle)) @@ -586,7 +595,8 @@ def _dist_subfunc_scatter(self, subfunc): dmap = self._dist_datamap mask = self._dist_scatter_mask(dmap=dmap) - # Pack (reordered) SubFunction values so that they can be sent out via an Alltoallv + # Pack (reordered) SubFunction values so that they can be sent out + # via an Alltoallv sfuncd = subfunc.data._local[mask[self._sparse_position]] # Send out the sparse point SubFunction @@ -608,10 +618,8 @@ def _dist_data_gather(self, data): return # Compute dist map only once - try: + with suppress(AttributeError): data = self._C_as_ndarray(data) - except AttributeError: - pass dmap = self._dist_datamap mask = self._dist_scatter_mask(dmap=dmap) @@ -630,10 +638,8 @@ def _dist_data_gather(self, data): self._data[mask] = gathered[:] def _dist_subfunc_gather(self, sfuncd, subfunc): - try: + with suppress(AttributeError): sfuncd = subfunc._C_as_ndarray(sfuncd) - except AttributeError: - pass # If not using MPI, don't waste time if self._distributor.nprocs == 1 or self.is_local: return @@ -642,7 +648,8 @@ def _dist_subfunc_gather(self, sfuncd, subfunc): dmap = self._dist_datamap mask = self._dist_scatter_mask(dmap=dmap) - # Pack (reordered) SubFunction values so that they can be sent out via an Alltoallv + # Pack (reordered) SubFunction values so that they can be sent out + # via an Alltoallv if self.dist_origin[subfunc] is not None: sfuncd = sfuncd + np.array(self.dist_origin[subfunc], dtype=subfunc.dtype) @@ -695,7 +702,7 @@ def _arg_defaults(self, alias=None, estimate_memory=False): # self's local domain only for k, v in self._dist_scatter(alias=alias).items(): args[mapper[k].name] = v - for i, s in zip(mapper[k].indices, v.shape): + for i, s in zip(mapper[k].indices, v.shape, strict=True): args.update(i._arg_defaults(_min=0, size=s)) return args @@ -713,7 +720,7 @@ def _arg_values(self, estimate_memory=False, **kwargs): values = {} for k, v in self._dist_scatter(data=new).items(): values[k.name] = v - for i, s in zip(k.indices, v.shape): + for i, s in zip(k.indices, v.shape, strict=True): size = s - sum(k._size_nodomain[i]) values.update(i._arg_defaults(size=size)) else: @@ -729,7 +736,7 @@ def _arg_apply(self, dataobj, alias=None): key._dist_data_gather(dataobj) elif self._distributor.nprocs > 1: raise NotImplementedError("Don't know how to gather data from an " - "object of type `%s`" % type(key)) + f"object of type `{type(key)}`") class AbstractSparseTimeFunction(AbstractSparseFunction): @@ -1206,8 +1213,10 @@ def __init_finalize__(self, *args, **kwargs): if nr == r: r = r // 2 else: - raise ValueError("Interpolation coefficients shape %d do " - "not match specified radius %d" % (r, nr)) + raise ValueError( + f'Interpolation coefficients shape {r} do not match' + f'specified radius {nr}' + ) self._radius = r self._dist_origin.update({self._interpolation_coeffs: None}) @@ -1227,9 +1236,14 @@ def _coordinate_symbols(self): """Symbol representing the coordinate values in each Dimension.""" if self.gridpoints is not None: d_dim = self.gridpoints.dimensions[1] - return tuple([self.gridpoints._subs(d_dim, di) * d.spacing + o - for ((di, d), o) in zip(enumerate(self.grid.dimensions), - self.grid.origin)]) + return tuple([ + self.gridpoints._subs(d_dim, di) * d.spacing + o + for ((di, d), o) in zip( + enumerate(self.grid.dimensions), + self.grid.origin, + strict=True + ) + ]) else: d_dim = self.coordinates.dimensions[1] return tuple([self.coordinates._subs(d_dim, i) @@ -1252,9 +1266,14 @@ def _position_map(self): """ if self.gridpoints_data is not None: ddim = self.gridpoints.dimensions[-1] - return OrderedDict((self.gridpoints._subs(ddim, di), p) - for (di, p) in zip(range(self.grid.dim), - self._pos_symbols)) + return OrderedDict( + (self.gridpoints._subs(ddim, di), p) + for (di, p) in zip( + range(self.grid.dim), + self._pos_symbols, + strict=True + ) + ) else: return super()._position_map @@ -1453,11 +1472,11 @@ def __init_finalize__(self, *args, **kwargs): # Validate radius is set correctly for all grid Dimensions for d in self.grid.dimensions: if d not in r: - raise ValueError("dimension %s not specified in r mapping" % d) + raise ValueError(f"dimension {d} not specified in r mapping") if r[d] is None: continue if not is_integer(r[d]) or r[d] <= 0: - raise ValueError('invalid parameter value r[%s] = %s' % (d, r[d])) + raise ValueError(f'invalid parameter value r[{d}] = {r[d]}') # TODO is this going to cause some trouble with users of self.r? self._radius = r @@ -1476,10 +1495,10 @@ def __init_finalize__(self, *args, **kwargs): # Sources have their own Dimension # As do Locations - locdim = Dimension('loc_%s' % self.name) + locdim = Dimension(f'loc_{self.name}') self._gridpoints = SubFunction( - name="%s_gridpoints" % self.name, + name=f"{self.name}_gridpoints", dtype=np.int32, dimensions=(locdim, ddim), shape=(nloc, self.grid.dim), @@ -1494,7 +1513,7 @@ def __init_finalize__(self, *args, **kwargs): for d in self.grid.dimensions: if self._radius[d] is not None: rdim = DefaultDimension( - name='r%s_%s' % (d.name, self.name), + name=f'r{d.name}_{self.name}', default_value=self._radius[d] ) self.rdims.append(rdim) @@ -1505,7 +1524,7 @@ def __init_finalize__(self, *args, **kwargs): coeff_shape = self.grid.size_map[d].glb self.interpolation_coefficients[d] = SubFunction( - name="%s_coefficients_%s" % (self.name, d.name), + name=f"{self.name}_coefficients_{d.name}", dtype=self.dtype, dimensions=(locdim, coeff_dim), shape=(nloc, coeff_shape), @@ -1515,7 +1534,7 @@ def __init_finalize__(self, *args, **kwargs): # For the _sub_functions, these must be named attributes of # this SparseFunction object setattr( - self, "coefficients_%s" % d.name, + self, f"coefficients_{d.name}", self.interpolation_coefficients[d]) # We also need arrays to represent the sparse matrix map @@ -1523,7 +1542,7 @@ def __init_finalize__(self, *args, **kwargs): # constructing the expression, # - the mpi logic dynamically constructs arrays to feed to the # operator C code. - self.nnzdim = Dimension('nnz_%s' % self.name) + self.nnzdim = Dimension(f'nnz_{self.name}') # In the non-MPI case, at least, we should fill these in once if self._distributor.nprocs == 1: @@ -1533,7 +1552,7 @@ def __init_finalize__(self, *args, **kwargs): nnz_size = 1 self._mrow = DynamicSubFunction( - name='mrow_%s' % self.name, + name=f'mrow_{self.name}', dtype=np.int32, dimensions=(self.nnzdim,), shape=(nnz_size,), @@ -1542,7 +1561,7 @@ def __init_finalize__(self, *args, **kwargs): allocator=self._allocator, ) self._mcol = DynamicSubFunction( - name='mcol_%s' % self.name, + name=f'mcol_{self.name}', dtype=np.int32, dimensions=(self.nnzdim,), shape=(nnz_size,), @@ -1551,7 +1570,7 @@ def __init_finalize__(self, *args, **kwargs): allocator=self._allocator, ) self._mval = DynamicSubFunction( - name='mval_%s' % self.name, + name=f'mval_{self.name}', dtype=self.dtype, dimensions=(self.nnzdim,), shape=(nnz_size,), @@ -1564,12 +1583,12 @@ def __init_finalize__(self, *args, **kwargs): # coordinate of the parallelised injection Dimension # This takes the form of a list of nnz indices, and a start/end # position in that list for each index in the parallel dim - self.par_dim_to_nnz_dim = DynamicDimension('par_dim_to_nnz_%s' % self.name) + self.par_dim_to_nnz_dim = DynamicDimension(f'par_dim_to_nnz_{self.name}') # This map acts as an indirect sort of the sources according to their # position along the parallelisation dimension self._par_dim_to_nnz_map = DynamicSubFunction( - name='par_dim_to_nnz_map_%s' % self.name, + name=f'par_dim_to_nnz_map_{self.name}', dtype=np.int32, dimensions=(self.par_dim_to_nnz_dim,), # shape is unknown at this stage @@ -1578,7 +1597,7 @@ def __init_finalize__(self, *args, **kwargs): parent=self, ) self._par_dim_to_nnz_m = DynamicSubFunction( - name='par_dim_to_nnz_m_%s' % self.name, + name=f'par_dim_to_nnz_m_{self.name}', dtype=np.int32, dimensions=(self._par_dim,), # shape is unknown at this stage @@ -1587,7 +1606,7 @@ def __init_finalize__(self, *args, **kwargs): parent=self, ) self._par_dim_to_nnz_M = DynamicSubFunction( - name='par_dim_to_nnz_M_%s' % self.name, + name=f'par_dim_to_nnz_M_{self.name}', dtype=np.int32, dimensions=(self._par_dim,), # shape is unknown at this stage @@ -1662,7 +1681,7 @@ def par_dim_to_nnz_M(self): @property def _sub_functions(self): return ('gridpoints', - *['coefficients_%s' % d.name for d in self.grid.dimensions], + *[f'coefficients_{d.name}' for d in self.grid.dimensions], 'mrow', 'mcol', 'mval', 'par_dim_to_nnz_map', 'par_dim_to_nnz_m', 'par_dim_to_nnz_M') @@ -1836,7 +1855,7 @@ def __shape_setup__(cls, **kwargs): def _arg_names(self): """Return a tuple of argument names introduced by this function.""" return tuple([self.name, self.name + "_" + self.gridpoints.name] - + ['%s_%s' % (self.name, x.name) + + [f'{self.name}_{x.name}' for x in self.interpolation_coefficients.values()]) @property @@ -1937,9 +1956,10 @@ def _rank_to_points(self): # so we argsort inverse_argsort = np.argsort(inverse).astype(np.int32) cumulative_counts = np.cumsum(counts) - gp_map = {tuple(bi): inverse_argsort[cci-ci:cci] - for bi, cci, ci in zip(bins, cumulative_counts, counts) - } + gp_map = { + tuple(bi): inverse_argsort[cci-ci:cci] + for bi, cci, ci in zip(bins, cumulative_counts, counts, strict=True) + } # the result is now going to be a concatenation of these lists # for each of the output ranks @@ -1957,8 +1977,10 @@ def _rank_to_points(self): from itertools import product for bi in bins: # This is a list of sets for the Dimension-specific rank - dim_rank_sets = [dgdr[bii] - for dgdr, bii in zip(dim_group_dim_rank, bi)] + dim_rank_sets = [ + dgdr[bii] + for dgdr, bii in zip(dim_group_dim_rank, bi, strict=True) + ] # Convert these to an absolute rank # This is where we will throw a KeyError if there are points OOB @@ -1970,9 +1992,12 @@ def _rank_to_points(self): empty = np.array([], dtype=np.int32) - return [np.concatenate(( - empty, *[gp_map[bi] for bi in global_rank_to_bins.get(rank, [])])) - for rank in range(distributor.comm.Get_size())] + return [ + np.concatenate( + (empty, *[gp_map[bi] for bi in global_rank_to_bins.get(rank, [])]) + ) + for rank in range(distributor.comm.Get_size()) + ] def _build_par_dim_to_nnz(self, active_gp, active_mrow): # The case where we parallelise over a non-local index is suboptimal, but @@ -2084,7 +2109,7 @@ def manual_scatter(self, *, data_all_zero=False): # handle None radius r_tuple_no_none = tuple( ri if ri is not None else self.grid.size_map[d].glb - for ri, d in zip(r_tuple, self.grid.dimensions) + for ri, d in zip(r_tuple, self.grid.dimensions, strict=True) ) # now all ranks can allocate the buffers to receive into @@ -2134,7 +2159,7 @@ def manual_scatter(self, *, data_all_zero=False): # first, build a reduced matrix excluding any points outside our domain for idim, (dim, mycoord) in enumerate(zip( - self.grid.dimensions, distributor.mycoords)): + self.grid.dimensions, distributor.mycoords, strict=True)): _left = distributor.decomposition[idim][mycoord][0] _right = distributor.decomposition[idim][mycoord][-1] + 1 @@ -2158,7 +2183,7 @@ def manual_scatter(self, *, data_all_zero=False): # domain. Do this on all the gridpoints for now, since this is a hack # anyway for idim, (dim, mycoord) in enumerate(zip( - self.grid.dimensions, distributor.mycoords)): + self.grid.dimensions, distributor.mycoords, strict=True)): _left = distributor.decomposition[idim][mycoord][0] _right = distributor.decomposition[idim][mycoord][-1] + 1 @@ -2217,7 +2242,7 @@ def _arg_apply(self, dataobj, *subfuncs, alias=None): key._dist_gather(self._C_as_ndarray(dataobj)) elif self._distributor.nprocs > 1: raise NotImplementedError("Don't know how to gather data from an " - "object of type `%s`" % type(key)) + f"object of type `{type(key)}`") def manual_gather(self): # data, in this case, is set to whatever dist_scatter provided? diff --git a/devito/types/tensor.py b/devito/types/tensor.py index 7a3f41d3e0..432d8a4437 100644 --- a/devito/types/tensor.py +++ b/devito/types/tensor.py @@ -154,8 +154,10 @@ def __getattr__(self, name): return super().__getattr__(self, name) try: return self.applyfunc(lambda x: x if x == 0 else getattr(x, name)) - except: - raise AttributeError("%r object has no attribute %r" % (self.__class__, name)) + except Exception as e: + raise AttributeError( + f'{self.__class__!r} object has no attribute {name!r}' + ) from e def _eval_at(self, func): """ @@ -250,7 +252,7 @@ def div(self, shift=None, order=None, method='FD', side=None, **kwargs): shift_x0 = make_shift_x0(shift, (ndim, ndim)) order = order or self.space_order for i in range(len(self.space_dimensions)): - comps.append(sum([getattr(self[j, i], 'd%s' % d.name) + comps.append(sum([getattr(self[j, i], f'd{d.name}') (x0=shift_x0(shift, d, i, j), fd_order=order, method=method, side=side, w=w) for j, d in enumerate(space_dims)])) @@ -296,7 +298,7 @@ def laplacian(self, shift=None, order=None, method='FD', side=None, **kwargs): ndim = len(self.space_dimensions) shift_x0 = make_shift_x0(shift, (ndim, ndim)) for j in range(ndim): - comps.append(sum([getattr(self[j, i], 'd%s2' % d.name) + comps.append(sum([getattr(self[j, i], f'd{d.name}2') (x0=shift_x0(shift, d, j, i), fd_order=order, method=method, side=side, w=w) for i, d in enumerate(space_dims)])) @@ -361,8 +363,8 @@ def __subfunc_setup__(cls, *args, **kwargs): # Custom repr and str def __str__(self): - st = ''.join([' %-2s,' % c for c in self])[1:-1] - return "Vector(%s)" % st + st = ''.join([' %-2s,' % c for c in self])[1:-1] # noqa: UP031 + return f"Vector({st})" __repr__ = __str__ @@ -390,10 +392,15 @@ def div(self, shift=None, order=None, method='FD', side=None, **kwargs): shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),)) order = order or self.space_order space_dims = self.root_dimensions - return sum([getattr(self[i], 'd%s' % d.name)(x0=shift_x0(shift, d, None, i), - fd_order=order, method=method, - side=side, w=w) - for i, d in enumerate(space_dims)]) + return sum([ + getattr(self[i], f'd{d.name}')( + x0=shift_x0(shift, d, None, i), + fd_order=order, + method=method, + side=side, + w=w + ) for i, d in enumerate(space_dims) + ]) @property def laplace(self): @@ -427,11 +434,17 @@ def laplacian(self, shift=None, order=None, method='FD', side=None, **kwargs): shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),)) order = order or self.space_order space_dims = self.root_dimensions - comps = [sum([getattr(s, 'd%s2' % d.name)(x0=shift_x0(shift, d, None, i), - fd_order=order, method=method, - side=side, w=w) - for i, d in enumerate(space_dims)]) - for s in self] + comps = [ + sum([ + getattr(s, f'd{d.name}2')( + x0=shift_x0(shift, d, None, i), + fd_order=order, + side=side, + w=w, + method=method + ) for i, d in enumerate(space_dims) + ]) for s in self + ] return func._new(comps) def curl(self, shift=None, order=None, method='FD', side=None, **kwargs): @@ -459,7 +472,7 @@ def curl(self, shift=None, order=None, method='FD', side=None, **kwargs): # The curl of a VectorFunction is a VectorFunction w = kwargs.get('weights', kwargs.get('w')) dims = self.root_dimensions - derivs = ['d%s' % d.name for d in dims] + derivs = [f'd{d.name}' for d in dims] shift_x0 = make_shift_x0(shift, (len(dims), len(dims))) order = order or self.space_order comp1 = (getattr(self[2], derivs[1])(x0=shift_x0(shift, dims[1], 2, 1), @@ -509,11 +522,17 @@ def grad(self, shift=None, order=None, method='FD', side=None, **kwargs): shift_x0 = make_shift_x0(shift, (ndim, ndim)) order = order or self.space_order space_dims = self.root_dimensions - comps = [[getattr(f, 'd%s' % d.name)(x0=shift_x0(shift, d, i, j), - fd_order=order, method=method, - side=side, w=w) - for j, d in enumerate(space_dims)] - for i, f in enumerate(self)] + comps = [ + [ + getattr(f, f'd{d.name}')( + x0=shift_x0(shift, d, i, j), + side=side, + w=w, + fd_order=order, + method=method + ) for j, d in enumerate(space_dims) + ] for i, f in enumerate(self) + ] return func._new(comps) def outer(self, other): diff --git a/devito/types/utils.py b/devito/types/utils.py index 25b697f2bd..fd0dfdb750 100644 --- a/devito/types/utils.py +++ b/devito/types/utils.py @@ -88,7 +88,7 @@ def __init__(self, suffix=''): self.suffix = suffix def __repr__(self): - return "Layer<%s>" % self.suffix + return f"Layer<{self.suffix}>" def __eq__(self, other): return (isinstance(other, HierarchyLayer) and diff --git a/examples/.ruff.toml b/examples/.ruff.toml new file mode 100644 index 0000000000..f517388941 --- /dev/null +++ b/examples/.ruff.toml @@ -0,0 +1,6 @@ +# Extend the `pyproject.toml` file in the parent directory +extend = "../pyproject.toml" + +# Use a different line length for examples only +# TODO: Shorten line lengths in examples +line-length = 120 diff --git a/examples/cfd/01_convection.ipynb b/examples/cfd/01_convection.ipynb index 7a7fe3b6c6..13ee3ac72e 100644 --- a/examples/cfd/01_convection.ipynb +++ b/examples/cfd/01_convection.ipynb @@ -52,7 +52,7 @@ "c = 1.\n", "dx = 2. / (nx - 1)\n", "dy = 2. / (ny - 1)\n", - "print(\"dx %s, dy %s\" % (dx, dy))\n", + "print(f\"dx {dx}, dy {dy}\")\n", "sigma = .2\n", "dt = sigma * dx" ] @@ -83,7 +83,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Create field and assign initial conditions\n", "u = np.empty((nx, ny))\n", @@ -109,7 +109,7 @@ "# Repeat initialisation, so we can re-run the cell\n", "init_hat(field=u, dx=dx, dy=dy, value=2.)\n", "\n", - "for n in range(nt + 1):\n", + "for _ in range(nt + 1):\n", " # Copy previous result into a new buffer\n", " un = u.copy()\n", "\n", @@ -119,10 +119,11 @@ "\n", " # Apply boundary conditions.\n", " u[0, :] = 1. # left\n", - " u[-1, :] = 1. # right\n", + " u[-1, :] = 1. # right\n", " u[:, 0] = 1. # bottom\n", - " u[:, -1] = 1. # top\n", - " # Note that in the above expressions the NumPy index -1 corresponds to the final point of the array along the indexed dimension,\n", + " u[:, -1] = 1. # top\n", + " # Note that in the above expressions the NumPy index -1 corresponds to the\n", + " # final point of the array along the indexed dimension,\n", " # i.e. here u[-1, :] is equivalent to u[80, :].\n" ] }, @@ -143,7 +144,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# A small sanity check for auto-testing\n", "assert (u[45:55, 45:55] > 1.8).all()\n", @@ -193,7 +194,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Grid, TimeFunction\n", "\n", "grid = Grid(shape=(nx, ny), extent=(2., 2.))\n", @@ -302,7 +303,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator\n", "\n", "# Reset our initial condition in both buffers.\n", @@ -364,7 +365,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Reset our data field and ICs in both buffers\n", "init_hat(field=u.data[0], dx=dx, dy=dy, value=2.)\n", diff --git a/examples/cfd/01_convection_revisited.ipynb b/examples/cfd/01_convection_revisited.ipynb index e7be39a448..ae762a6d16 100644 --- a/examples/cfd/01_convection_revisited.ipynb +++ b/examples/cfd/01_convection_revisited.ipynb @@ -77,7 +77,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Create field and assign initial conditions\n", "u = np.empty((nx, ny))\n", @@ -103,7 +103,7 @@ "# Repeat initialisation, so we can re-run the cell\n", "init_smooth(field=u, dx=dx, dy=dy)\n", "\n", - "for n in range(nt + 1):\n", + "for _ in range(nt + 1):\n", " # Copy previous result into a new buffer\n", " un = u.copy()\n", "\n", @@ -114,9 +114,9 @@ " # Apply boundary conditions.\n", " # Note: -1 here is the last index in the array, not the one at x=-1 or y=-1.\n", " u[0, :] = 1. # left\n", - " u[-1, :] = 1. # right\n", + " u[-1, :] = 1. # right\n", " u[:, 0] = 1. # bottom\n", - " u[:, -1] = 1. # top" + " u[:, -1] = 1. # top" ] }, { @@ -136,7 +136,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# A small sanity check for auto-testing\n", "assert (u[45:55, 45:55] > 1.8).all()\n", @@ -177,7 +177,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Grid, TimeFunction\n", "\n", "grid = Grid(shape=(nx, ny), extent=(2., 2.))\n", @@ -277,7 +277,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator\n", "\n", "# Reset our initial condition in both buffers.\n", @@ -339,7 +339,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Reset our data field and ICs in both buffers\n", "init_smooth(field=u.data[0], dx=dx, dy=dy)\n", diff --git a/examples/cfd/02_convection_nonlinear.ipynb b/examples/cfd/02_convection_nonlinear.ipynb index 55631f61af..9291b1444f 100644 --- a/examples/cfd/02_convection_nonlinear.ipynb +++ b/examples/cfd/02_convection_nonlinear.ipynb @@ -73,7 +73,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Allocate fields and assign initial conditions\n", "u = np.empty((nx, ny))\n", @@ -109,8 +109,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "for n in range(nt + 1): ##loop across number of time steps\n", + "# NBVAL_IGNORE_OUTPUT\n", + "for _ in range(nt + 1): # loop across number of time steps\n", " un = u.copy()\n", " vn = v.copy()\n", " u[1:, 1:] = (un[1:, 1:] -\n", @@ -159,7 +159,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Grid, TimeFunction\n", "\n", "# First we need two time-dependent data fields, both initialized with the hat function\n", @@ -211,8 +211,8 @@ "update_u = Eq(u.forward, stencil_u, subdomain=grid.interior)\n", "update_v = Eq(v.forward, stencil_v, subdomain=grid.interior)\n", "\n", - "print(\"U update:\\n%s\\n\" % update_u)\n", - "print(\"V update:\\n%s\\n\" % update_v)" + "print(f\"U update:\\n{update_u}\\n\")\n", + "print(f\"V update:\\n{update_v}\\n\")" ] }, { @@ -271,7 +271,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator\n", "\n", "# Reset our data field and ICs\n", @@ -452,7 +452,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op = Operator([update_U] + bc_u + bc_v)\n", "op(time=nt, dt=dt)\n", "\n", diff --git a/examples/cfd/03_diffusion.ipynb b/examples/cfd/03_diffusion.ipynb index 0fbd64c580..5406ba5861 100644 --- a/examples/cfd/03_diffusion.ipynb +++ b/examples/cfd/03_diffusion.ipynb @@ -57,11 +57,11 @@ "outputs": [], "source": [ "def diffuse(u, nt):\n", - " for n in range(nt + 1):\n", + " for _ in range(nt + 1):\n", " un = u.copy()\n", - " u[1:-1, 1:-1] = (un[1:-1,1:-1] +\n", + " u[1:-1, 1:-1] = (un[1:-1, 1:-1] +\n", " nu * dt / dy**2 * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", - " nu * dt / dx**2 * (un[2:,1: -1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", + " nu * dt / dx**2 * (un[2:, 1: -1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", " u[0, :] = 1\n", " u[-1, :] = 1\n", " u[:, 0] = 1\n", @@ -133,7 +133,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Initialise u with hat function\n", "u = np.empty((nx, ny))\n", @@ -141,20 +141,20 @@ "\n", "# Field initialization.\n", "# This will create 4 equally spaced 10x10 hat functions of various values.\n", - "u[ nx//4:nx//4+10 , ny//4:ny//4+10 ] = 2\n", - "u[ 3*nx//4:3*nx//4+10 , ny//4:ny//4+10 ] = 3\n", - "u[ nx//4:nx//4+10 , 3*ny//4:3*ny//4+10 ] = 4\n", - "u[ 3*ny//4:3*ny//4+10 , 3*ny//4:3*ny//4+10 ] = 5\n", + "u[nx//4:nx//4+10, ny//4:ny//4+10] = 2\n", + "u[3*nx//4:3*nx//4+10, ny//4:ny//4+10] = 3\n", + "u[nx//4:nx//4+10, 3*ny//4:3*ny//4+10] = 4\n", + "u[3*ny//4:3*ny//4+10, 3*ny//4:3*ny//4+10] = 5\n", "\n", - "print (\"Initial state\")\n", + "print(\"Initial state\")\n", "plot_field(u, zmax=4.5)\n", "\n", "diffuse(u, nt)\n", - "print (\"After\", nt, \"timesteps\")\n", + "print(\"After\", nt, \"timesteps\")\n", "plot_field(u, zmax=4.5)\n", "\n", "diffuse(u, nt)\n", - "print (\"After another\", nt, \"timesteps\")\n", + "print(\"After another\", nt, \"timesteps\")\n", "plot_field(u, zmax=4.5)" ] }, @@ -296,17 +296,17 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator, Constant, Eq, solve\n", "\n", "# Reset our data field and ICs\n", "init_hat(field=u.data[0], dx=dx, dy=dy, value=1.)\n", "\n", "# Field initialization\n", - "u.data[0][ nx//4:nx//4+10 , ny//4:ny//4+10 ] = 2\n", - "u.data[0][ 3*nx//4:3*nx//4+10 , ny//4:ny//4+10 ] = 3\n", - "u.data[0][ nx//4:nx//4+10 , 3*ny//4:3*ny//4+10 ] = 4\n", - "u.data[0][ 3*ny//4:3*ny//4+10 , 3*ny//4:3*ny//4+10 ] = 5\n", + "u.data[0][nx//4:nx//4+10, ny//4:ny//4+10] = 2\n", + "u.data[0][3*nx//4:3*nx//4+10, ny//4:ny//4+10] = 3\n", + "u.data[0][nx//4:nx//4+10, 3*ny//4:3*ny//4+10] = 4\n", + "u.data[0][3*ny//4:3*ny//4+10, 3*ny//4:3*ny//4+10] = 5\n", "\n", "\n", "# Create an operator with second-order derivatives\n", @@ -326,11 +326,11 @@ "op = Operator([eq_stencil] + bc)\n", "op(time=nt, dt=dt, a=nu)\n", "\n", - "print (\"After\", nt, \"timesteps\")\n", + "print(\"After\", nt, \"timesteps\")\n", "plot_field(u.data[0], zmax=4.5)\n", "\n", "op(time=nt, dt=dt, a=nu)\n", - "print (\"After another\", nt, \"timesteps\")\n", + "print(\"After another\", nt, \"timesteps\")\n", "plot_field(u.data[0], zmax=4.5)" ] }, @@ -396,23 +396,23 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u2 = TimeFunction(name='u2', grid=grid, space_order=2)\n", "init_hat(field=u2.data[0], dx=dx, dy=dy, value=1.)\n", "\n", "# Field initialization\n", - "u2.data[0][ nx//4:nx//4+10 , ny//4:ny//4+10 ] = 2\n", - "u2.data[0][ 3*nx//4:3*nx//4+10 , ny//4:ny//4+10 ] = 3\n", - "u2.data[0][ nx//4:nx//4+10 , 3*ny//4:3*ny//4+10 ] = 4\n", - "u2.data[0][ 3*ny//4:3*ny//4+10 , 3*ny//4:3*ny//4+10 ] = 5\n", + "u2.data[0][nx//4:nx//4+10, ny//4:ny//4+10] = 2\n", + "u2.data[0][3*nx//4:3*nx//4+10, ny//4:ny//4+10] = 3\n", + "u2.data[0][nx//4:nx//4+10, 3*ny//4:3*ny//4+10] = 4\n", + "u2.data[0][3*ny//4:3*ny//4+10, 3*ny//4:3*ny//4+10] = 5\n", "\n", "op(u=u2, time=2*nt, dt=dt, a=nu)\n", "\n", - "print (\"After\", 2*nt, \"timesteps\")\n", + "print(\"After\", 2*nt, \"timesteps\")\n", "plot_field(u2.data[0], zmax=4.5)\n", "\n", "op(u=u2, time=2*nt, dt=dt, a=nu)\n", - "print (\"After another\", 2*nt, \"timesteps\")\n", + "print(\"After another\", 2*nt, \"timesteps\")\n", "plot_field(u2.data[0], zmax=4.5)" ] } diff --git a/examples/cfd/03_diffusion_nonuniform.ipynb b/examples/cfd/03_diffusion_nonuniform.ipynb index e42a2c6a26..09121abc22 100644 --- a/examples/cfd/03_diffusion_nonuniform.ipynb +++ b/examples/cfd/03_diffusion_nonuniform.ipynb @@ -38,16 +38,16 @@ "ny = 100\n", "nt = 1000\n", "\n", - "nu = 0.15 #the value of base viscosity\n", + "nu = 0.15 # the value of base viscosity\n", "\n", - "offset = 1 # Used for field definition\n", + "offset = 1 # Used for field definition\n", "\n", - "visc = np.full((nx, ny), nu) # Initialize viscosity\n", - "visc[nx//4-offset:nx//4+offset, 1:-1] = 0.0001 # Adding a material with different viscosity\n", - "visc[1:-1,nx//4-offset:nx//4+offset ] = 0.0001\n", + "visc = np.full((nx, ny), nu) # Initialize viscosity\n", + "visc[nx//4-offset:nx//4+offset, 1:-1] = 0.0001 # Adding a material with different viscosity\n", + "visc[1:-1, nx//4-offset:nx//4+offset] = 0.0001\n", "visc[3*nx//4-offset:3*nx//4+offset, 1:-1] = 0.0001\n", "\n", - "visc_nb = visc[1:-1,1:-1]\n", + "visc_nb = visc[1:-1, 1:-1]\n", "\n", "dx = 2. / (nx - 1)\n", "dy = 2. / (ny - 1)\n", @@ -63,7 +63,7 @@ "u_init[10:-10, 10:-10] = 1.5\n", "\n", "\n", - "zmax = 2.5 # zmax for plotting" + "zmax = 2.5 # zmax for plotting" ] }, { @@ -79,12 +79,12 @@ "metadata": {}, "outputs": [], "source": [ - "def diffuse(u, nt ,visc):\n", - " for n in range(nt + 1):\n", + "def diffuse(u, nt, visc):\n", + " for _ in range(nt + 1):\n", " un = u.copy()\n", - " u[1:-1, 1:-1] = (un[1:-1,1:-1] +\n", + " u[1:-1, 1:-1] = (un[1:-1, 1:-1] +\n", " visc*dt / dy**2 * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", - " visc*dt / dx**2 * (un[2:,1: -1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", + " visc*dt / dx**2 * (un[2:, 1: -1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", " u[0, :] = 1\n", " u[-1, :] = 1\n", " u[:, 0] = 1\n", @@ -166,24 +166,24 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot material according to viscosity, uncomment to plot\n", "import matplotlib.pyplot as plt\n", - "plt.imshow(visc_nb, cmap='Greys', interpolation='nearest')\n", + "plt.imshow(visc_nb, cmap='Greys', interpolation='nearest')\n", "\n", "# Field initialization\n", "u = u_init\n", "\n", - "print (\"Initial state\")\n", + "print(\"Initial state\")\n", "plot_field(u, zmax=zmax)\n", "\n", - "diffuse(u, nt , visc_nb )\n", - "print (\"After\", nt, \"timesteps\")\n", + "diffuse(u, nt, visc_nb)\n", + "print(\"After\", nt, \"timesteps\")\n", "plot_field(u, zmax=zmax)\n", "\n", "diffuse(u, nt, visc_nb)\n", - "print (\"After another\", nt, \"timesteps\")\n", + "print(\"After another\", nt, \"timesteps\")\n", "plot_field(u, zmax=zmax)" ] }, @@ -218,14 +218,14 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Field initialization\n", "u = u_init\n", "\n", "\n", - "diffuse(u, nt , visc_nb)\n", - "print (\"After\", nt, \"timesteps\")\n", + "diffuse(u, nt, visc_nb)\n", + "print(\"After\", nt, \"timesteps\")\n", "plot_field(u, zmax=zmax)" ] }, @@ -257,15 +257,13 @@ ], "source": [ "from devito import Grid, TimeFunction, Eq, solve, Function\n", - "from sympy.abc import a\n", "\n", "# Initialize `u` for space order 2\n", "grid = Grid(shape=(nx, ny), extent=(2., 2.))\n", "\n", "# Create an operator with second-order derivatives\n", - "a = Function(name='a',grid = grid) # Define as Function\n", - "a.data[:]= visc # Pass the viscosity in order to be used in the operator.\n", - "\n", + "a = Function(name='a', grid=grid) # Define as Function\n", + "a.data[:] = visc # Pass the viscosity in order to be used in the operator.\n", "\n", "\n", "u = TimeFunction(name='u', grid=grid, space_order=2)\n", @@ -340,7 +338,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator, Eq, solve, Function\n", "\n", "\n", @@ -352,8 +350,8 @@ "\n", "\n", "# Create an operator with second-order derivatives\n", - "a = Function(name='a',grid = grid)\n", - "a.data[:]= visc\n", + "a = Function(name='a', grid=grid)\n", + "a.data[:] = visc\n", "\n", "eq = Eq(u.dt, a * u.laplace, subdomain=grid.interior)\n", "stencil = solve(eq, u.forward)\n", @@ -369,13 +367,13 @@ "\n", "\n", "op = Operator([eq_stencil] + bc)\n", - "op(time=nt, dt=dt, a = a)\n", + "op(time=nt, dt=dt, a=a)\n", "\n", - "print (\"After\", nt, \"timesteps\")\n", + "print(\"After\", nt, \"timesteps\")\n", "plot_field(u.data[0], zmax=zmax)\n", "\n", - "op(time=nt, dt=dt, a = a)\n", - "print (\"After another\", nt, \"timesteps\")\n", + "op(time=nt, dt=dt, a=a)\n", + "print(\"After another\", nt, \"timesteps\")\n", "plot_field(u.data[0], zmax=zmax)" ] } diff --git a/examples/cfd/04_burgers.ipynb b/examples/cfd/04_burgers.ipynb index 1ad8811331..2e917238e0 100644 --- a/examples/cfd/04_burgers.ipynb +++ b/examples/cfd/04_burgers.ipynb @@ -42,10 +42,10 @@ "nx = 41 # Grid size on x axis\n", "ny = 41 # Grid size on y axis\n", "\n", - "batches = 5 # Batches of timesteps, increase number of batches to extend evolution in time\n", + "batches = 5 # Batches of timesteps, increase number of batches to extend evolution in time\n", "# A figure of the wave state will be produced for each batch.\n", - "batch_size = 640 # Number of timesteps for every batch\n", - "nt = batches*batch_size # Number of total timesteps\n", + "batch_size = 640 # Number of timesteps for every batch\n", + "nt = batches*batch_size # Number of total timesteps\n", "\n", "c = 1\n", "dx = 2. / (nx - 1)\n", @@ -72,7 +72,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Assign initial conditions\n", "u = np.empty((nx, ny))\n", @@ -193,9 +193,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", - "for n in range(nt + 1): ##loop across number of time steps\n", + "for n in range(nt + 1): # loop across number of time steps\n", " un = u.copy()\n", " vn = v.copy()\n", "\n", @@ -205,7 +205,7 @@ " dt / dx * vn[1:-1, 1:-1] *\n", " (un[1:-1, 1:-1] - un[0:-2, 1:-1]) +\n", " nu * dt / dy**2 *\n", - " (un[1:-1,2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", + " (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +\n", " nu * dt / dx**2 *\n", " (un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1]))\n", "\n", @@ -229,10 +229,9 @@ " v[:, 0] = 1\n", " v[:, -1] = 1\n", "\n", - "\n", " # A figure of the wave state will be produced for each batch\n", - " if (n%batch_size) == 0:\n", - " print (\"Batch:\",n/(batch_size))\n", + " if (n % batch_size) == 0:\n", + " print(\"Batch:\", n/(batch_size))\n", " plot_field(u)" ] }, @@ -274,14 +273,14 @@ "t = grid.stepping_dim\n", "\n", "u1 = TimeFunction(name='u1', grid=grid, space_order=1)\n", - "print(\"Space order 1:\\n%s\\n\" % u1.dxl)\n", + "print(f\"Space order 1:\\n{u1.dxl}\\n\")\n", "\n", "u2 = TimeFunction(name='u2', grid=grid, space_order=2)\n", - "print(\"Space order 2:\\n%s\\n\" % u2.dxl)\n", + "print(f\"Space order 2:\\n{u2.dxl}\\n\")\n", "\n", "# We use u2 to create the explicit first-order derivative\n", "u1_dx = first_derivative(u2, dim=x, side=left, fd_order=1)\n", - "print(\"Explicit space order 1:\\n%s\\n\" % u1_dx)" + "print(f\"Explicit space order 1:\\n{u1_dx}\\n\")" ] }, { @@ -418,7 +417,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator, Constant, Eq, solve\n", "\n", "# Define our velocity fields and initialize with hat function\n", @@ -458,7 +457,7 @@ "# Execute the operator for a number of timesteps\n", "for batch_no in range(batches):\n", " op(time=batch_size, dt=dt, a=nu)\n", - " print (\"Batch:\",batch_no+1)\n", + " print(\"Batch:\", batch_no+1)\n", " plot_field(u.data[0])\n" ] }, @@ -701,12 +700,12 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op = Operator([update_U] + bc_U)\n", "# Execute the operator for a number of timesteps\n", "for batch_no in range(batches):\n", " op(time=batch_size, dt=dt, a=nu)\n", - " print (\"Batch:\",batch_no+1)\n", + " print(\"Batch:\", batch_no+1)\n", " plot_field(U[0].data[0])" ] } diff --git a/examples/cfd/05_laplace.ipynb b/examples/cfd/05_laplace.ipynb index 74ee48608e..8ba567936d 100644 --- a/examples/cfd/05_laplace.ipynb +++ b/examples/cfd/05_laplace.ipynb @@ -93,7 +93,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Our initial condition is 0 everywhere, except at the boundary\n", "p = np.zeros((ny, nx))\n", @@ -125,7 +125,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "p = laplace2d(p, bc_right, dx, dy, 1e-4)\n", "plot_field(p, ymax=1.0, view=(30, 225))" @@ -181,7 +181,7 @@ "\n", "# In the resulting stencil `pn` is exclusively used on the RHS\n", "# and `p` on the LHS is the grid the kernel will update\n", - "print(\"Update stencil:\\n%s\\n\" % eq_stencil)" + "print(f\"Update stencil:\\n{eq_stencil}\\n\")" ] }, { @@ -272,7 +272,7 @@ ], "source": [ "%%time\n", - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Silence the runtime performance logging\n", "from devito import configuration\n", @@ -345,7 +345,7 @@ ], "source": [ "%%time\n", - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Initialise the two buffer fields\n", "p.data[:] = 0.\n", diff --git a/examples/cfd/06_poisson.ipynb b/examples/cfd/06_poisson.ipynb index 8fd2e7875e..eff42e3a5b 100644 --- a/examples/cfd/06_poisson.ipynb +++ b/examples/cfd/06_poisson.ipynb @@ -38,15 +38,14 @@ "# Some variable declarations\n", "nx = 50\n", "ny = 50\n", - "nt = 100\n", + "nt = 100\n", "xmin = 0.\n", "xmax = 2.\n", "ymin = 0.\n", "ymax = 1.\n", "\n", "dx = (xmax - xmin) / (nx - 1)\n", - "dy = (ymax - ymin) / (ny - 1)\n", - "\n" + "dy = (ymax - ymin) / (ny - 1)\n" ] }, { @@ -56,12 +55,12 @@ "outputs": [], "source": [ "# Initialization\n", - "p = np.zeros((nx, ny))\n", + "p = np.zeros((nx, ny))\n", "pd = np.zeros((nx, ny))\n", - "b = np.zeros((nx, ny))\n", + "b = np.zeros((nx, ny))\n", "\n", "# Source\n", - "b[int(nx / 4), int(ny / 4)] = 100\n", + "b[int(nx / 4), int(ny / 4)] = 100\n", "b[int(3 * nx / 4), int(3 * ny / 4)] = -100" ] }, @@ -81,10 +80,10 @@ ], "source": [ "%%time\n", - "#NBVAL_IGNORE_OUTPUT\n", - "for it in range(nt):\n", + "# NBVAL_IGNORE_OUTPUT\n", + "for _ in range(nt):\n", " pd = p.copy()\n", - " p[1:-1,1:-1] = (((pd[1:-1, 2:] + pd[1:-1, :-2]) * dy**2 +\n", + " p[1:-1, 1:-1] = (((pd[1:-1, 2:] + pd[1:-1, :-2]) * dy**2 +\n", " (pd[2:, 1:-1] + pd[:-2, 1:-1]) * dx**2 -\n", " b[1:-1, 1:-1] * dx**2 * dy**2) /\n", " (2 * (dx**2 + dy**2)))\n", @@ -92,8 +91,7 @@ " p[0, :] = 0\n", " p[nx-1, :] = 0\n", " p[:, 0] = 0\n", - " p[:, ny-1] = 0\n", - "\n" + " p[:, ny-1] = 0\n" ] }, { @@ -113,7 +111,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_field(p, xmax=xmax, ymax=ymax, view=(30, 225))" ] }, @@ -144,7 +142,7 @@ "# Initialise the source term `b`\n", "b = Function(name='b', grid=grid)\n", "b.data[:] = 0.\n", - "b.data[int(nx / 4), int(ny / 4)] = 100\n", + "b.data[int(nx / 4), int(ny / 4)] = 100\n", "b.data[int(3 * nx / 4), int(3 * ny / 4)] = -100\n", "\n", "# Create Laplace equation base on `pd`\n", @@ -182,7 +180,7 @@ ], "source": [ "%%time\n", - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Run the outer loop explicitly in Python\n", "for i in range(nt):\n", @@ -215,7 +213,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Plot result\n", "plot_field(p.data, xmax=xmax, ymax=ymax, view=(30, 225))" ] @@ -246,7 +244,7 @@ "# Initialise the source term `b`\n", "b = Function(name='b', grid=grid)\n", "b.data[:] = 0.\n", - "b.data[int(nx / 4), int(ny / 4)] = 100\n", + "b.data[int(nx / 4), int(ny / 4)] = 100\n", "b.data[int(3 * nx / 4), int(3 * ny / 4)] = -100\n", "\n", "# Create Laplace equation base on `p`\n", @@ -292,7 +290,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "configuration['log-level'] = 'ERROR'\n", "# Create and execute the operator for a number of timesteps\n", "op = Operator([eq_stencil] + bc)\n", @@ -316,7 +314,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_field(p.data[0], xmax=xmax, ymax=ymax, view=(30, 225))" ] } diff --git a/examples/cfd/07_cavity_flow.ipynb b/examples/cfd/07_cavity_flow.ipynb index f2c8506082..132305efaa 100644 --- a/examples/cfd/07_cavity_flow.ipynb +++ b/examples/cfd/07_cavity_flow.ipynb @@ -122,7 +122,7 @@ "dy = 1. / (ny - 1)\n", "x = np.linspace(0, 1, nx)\n", "y = np.linspace(0, 1, ny)\n", - "Y, X = np.meshgrid(x, y)\n", + "Y, X = np.meshgrid(x, y)\n", "\n", "rho = 1\n", "nu = .1\n", @@ -176,15 +176,15 @@ " pn = np.empty_like(p)\n", " pn = p.copy()\n", "\n", - " for q in range(nit):\n", + " for _ in range(nit):\n", " pn = p.copy()\n", " p[1:-1, 1:-1] = (((pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dy**2 +\n", " (pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dx**2) /\n", " (2 * (dx**2 + dy**2)) -\n", " dx**2 * dy**2 / (2 * (dx**2 + dy**2)) *\n", - " b[1:-1,1:-1])\n", + " b[1:-1, 1:-1])\n", "\n", - " p[-1, :] = p[-2, :] # dp/dx = 0 at x = 2\n", + " p[-1, :] = p[-2, :] # dp/dx = 0 at x = 2\n", " p[:, 0] = p[:, 1] # dp/dy = 0 at y = 0\n", " p[0, :] = p[1, :] # dp/dx = 0 at x = 0\n", " p[:, -1] = p[:, -2] # p = 0 at y = 2\n", @@ -211,7 +211,7 @@ " vn = np.empty_like(v)\n", " b = np.zeros((nx, ny))\n", "\n", - " for n in range(0,nt):\n", + " for _ in range(0, nt):\n", " un = u.copy()\n", " vn = v.copy()\n", "\n", @@ -230,7 +230,7 @@ " dt / dy**2 *\n", " (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2])))\n", "\n", - " v[1:-1,1:-1] = (vn[1:-1, 1:-1] -\n", + " v[1:-1, 1:-1] = (vn[1:-1, 1:-1] -\n", " un[1:-1, 1:-1] * dt / dx *\n", " (vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) -\n", " vn[1:-1, 1:-1] * dt / dy *\n", @@ -241,14 +241,14 @@ " dt / dy**2 *\n", " (vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2])))\n", "\n", - " u[:, 0] = 0\n", - " u[0, :] = 0\n", + " u[:, 0] = 0\n", + " u[0, :] = 0\n", " u[-1, :] = 0\n", " u[:, -1] = 1 # Set velocity on cavity lid equal to 1\n", "\n", - " v[:, 0] = 0\n", + " v[:, 0] = 0\n", " v[:, -1] = 0\n", - " v[0, :] = 0\n", + " v[0, :] = 0\n", " v[-1, :] = 0\n", "\n", " return u, v, p, pn" @@ -271,7 +271,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u = np.zeros((nx, ny))\n", "v = np.zeros((nx, ny))\n", "p = np.zeros((nx, ny))\n", @@ -314,36 +314,36 @@ "source": [ "# Import u values at x=L/2 (table 6, column 2 rows 12-26) in Marchi et al.\n", "Marchi_Re10_u = np.array([[0.0625, -3.85425800e-2],\n", - " [0.125, -6.96238561e-2],\n", + " [0.125, -6.96238561e-2],\n", " [0.1875, -9.6983962e-2],\n", - " [0.25, -1.22721979e-1],\n", + " [0.25, -1.22721979e-1],\n", " [0.3125, -1.47636199e-1],\n", - " [0.375, -1.71260757e-1],\n", + " [0.375, -1.71260757e-1],\n", " [0.4375, -1.91677043e-1],\n", - " [0.5, -2.05164738e-1],\n", + " [0.5, -2.05164738e-1],\n", " [0.5625, -2.05770198e-1],\n", - " [0.625, -1.84928116e-1],\n", + " [0.625, -1.84928116e-1],\n", " [0.6875, -1.313892353e-1],\n", - " [0.75, -3.1879308e-2],\n", - " [0.8125, 1.26912095e-1],\n", - " [0.875, 3.54430364e-1],\n", - " [0.9375, 6.50529292e-1]])\n", + " [0.75, -3.1879308e-2],\n", + " [0.8125, 1.26912095e-1],\n", + " [0.875, 3.54430364e-1],\n", + " [0.9375, 6.50529292e-1]])\n", "# Import v values at y=L/2 (table 6, column 2 rows 27-41) in Marchi et al.\n", "Marchi_Re10_v = np.array([[0.0625, 9.2970121e-2],\n", - " [0.125, 1.52547843e-1],\n", + " [0.125, 1.52547843e-1],\n", " [0.1875, 1.78781456e-1],\n", - " [0.25, 1.76415100e-1],\n", + " [0.25, 1.76415100e-1],\n", " [0.3125, 1.52055820e-1],\n", - " [0.375, 1.121477612e-1],\n", + " [0.375, 1.121477612e-1],\n", " [0.4375, 6.21048147e-2],\n", - " [0.5, 6.3603620e-3],\n", - " [0.5625,-5.10417285e-2],\n", + " [0.5, 6.3603620e-3],\n", + " [0.5625, -5.10417285e-2],\n", " [0.625, -1.056157259e-1],\n", - " [0.6875,-1.51622101e-1],\n", - " [0.75, -1.81633561e-1],\n", - " [0.8125,-1.87021651e-1],\n", + " [0.6875, -1.51622101e-1],\n", + " [0.75, -1.81633561e-1],\n", + " [0.8125, -1.87021651e-1],\n", " [0.875, -1.59898186e-1],\n", - " [0.9375,-9.6409942e-2]])" + " [0.9375, -9.6409942e-2]])" ] }, { @@ -363,22 +363,22 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Check results with Marchi et al 2009.\n", - "npgrid=[nx,ny]\n", + "npgrid = [nx, ny]\n", "\n", "x_coord = np.linspace(0, 1, npgrid[0])\n", "y_coord = np.linspace(0, 1, npgrid[1])\n", "\n", "fig = pyplot.figure(figsize=(12, 6))\n", "ax1 = fig.add_subplot(121)\n", - "ax1.plot(a[int(npgrid[0]/2),:],y_coord[:])\n", - "ax1.plot(Marchi_Re10_u[:,1],Marchi_Re10_u[:,0],'ro')\n", + "ax1.plot(a[int(npgrid[0]/2), :], y_coord[:])\n", + "ax1.plot(Marchi_Re10_u[:, 1], Marchi_Re10_u[:, 0], 'ro')\n", "ax1.set_xlabel('$u$')\n", "ax1.set_ylabel('$y$')\n", "ax1 = fig.add_subplot(122)\n", - "ax1.plot(x_coord[:],b[:,int(npgrid[1]/2)])\n", - "ax1.plot(Marchi_Re10_v[:,0],Marchi_Re10_v[:,1],'ro')\n", + "ax1.plot(x_coord[:], b[:, int(npgrid[1]/2)])\n", + "ax1.plot(Marchi_Re10_v[:, 0], Marchi_Re10_v[:, 1], 'ro')\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$v$')\n", "\n", @@ -463,42 +463,42 @@ "# --------------------------------------\n", "u = TimeFunction(name='u', grid=grid, space_order=2)\n", "v = TimeFunction(name='v', grid=grid, space_order=2)\n", - "p = TimeFunction(name='p', grid=grid, space_order=2)\n", - "#Variables are automatically initialized at 0.\n", + "p = TimeFunction(name='p', grid=grid, space_order=2)\n", + "# Variables are automatically initialized at 0.\n", "\n", "# First order derivatives will be handled with p.dxc\n", - "eq_u =Eq(u.dt + u*u.dx + v*u.dy, -1./rho * p.dxc + nu*(u.laplace), subdomain=grid.interior)\n", - "eq_v =Eq(v.dt + u*v.dx + v*v.dy, -1./rho * p.dyc + nu*(v.laplace), subdomain=grid.interior)\n", - "eq_p =Eq(p.laplace,rho*(1./dt*(u.dxc+v.dyc)-(u.dxc*u.dxc)-2*(u.dyc*v.dxc)-(v.dyc*v.dyc)), subdomain=grid.interior)\n", + "eq_u = Eq(u.dt + u*u.dx + v*u.dy, -1./rho * p.dxc + nu*(u.laplace), subdomain=grid.interior)\n", + "eq_v = Eq(v.dt + u*v.dx + v*v.dy, -1./rho * p.dyc + nu*(v.laplace), subdomain=grid.interior)\n", + "eq_p = Eq(p.laplace, rho*(1./dt*(u.dxc+v.dyc)-(u.dxc*u.dxc)-2*(u.dyc*v.dxc)-(v.dyc*v.dyc)), subdomain=grid.interior)\n", "\n", "# NOTE: Pressure has no time dependence so we solve for the other pressure buffer.\n", - "stencil_u =solve(eq_u , u.forward)\n", - "stencil_v =solve(eq_v , v.forward)\n", - "stencil_p=solve(eq_p, p)\n", + "stencil_u = solve(eq_u, u.forward)\n", + "stencil_v = solve(eq_v, v.forward)\n", + "stencil_p = solve(eq_p, p)\n", "\n", - "update_u =Eq(u.forward, stencil_u)\n", - "update_v =Eq(v.forward, stencil_v)\n", - "update_p =Eq(p.forward, stencil_p)\n", + "update_u = Eq(u.forward, stencil_u)\n", + "update_v = Eq(v.forward, stencil_v)\n", + "update_p = Eq(p.forward, stencil_p)\n", "\n", "# Boundary Conds. u=v=0 for all sides\n", - "bc_u = [Eq(u[t+1, 0, y], 0)]\n", + "bc_u = [Eq(u[t+1, 0, y], 0)]\n", "bc_u += [Eq(u[t+1, nx-1, y], 0)]\n", "bc_u += [Eq(u[t+1, x, 0], 0)]\n", "bc_u += [Eq(u[t+1, x, ny-1], 1)] # except u=1 for y=2\n", - "bc_v = [Eq(v[t+1, 0, y], 0)]\n", + "bc_v = [Eq(v[t+1, 0, y], 0)]\n", "bc_v += [Eq(v[t+1, nx-1, y], 0)]\n", "bc_v += [Eq(v[t+1, x, ny-1], 0)]\n", "bc_v += [Eq(v[t+1, x, 0], 0)]\n", "\n", - "bc_p = [Eq(p[t+1, 0, y],p[t+1, 1,y])] # dpn/dx = 0 for x=0.\n", - "bc_p += [Eq(p[t+1,nx-1, y],p[t+1,nx-2, y])] # dpn/dx = 0 for x=2.\n", - "bc_p += [Eq(p[t+1, x, 0],p[t+1,x ,1])] # dpn/dy = 0 at y=0\n", - "bc_p += [Eq(p[t+1, x, ny-1],p[t+1, x, ny-2])] # pn=0 for y=2\n", + "bc_p = [Eq(p[t+1, 0, y], p[t+1, 1, y])] # dpn/dx = 0 for x=0.\n", + "bc_p += [Eq(p[t+1, nx-1, y], p[t+1, nx-2, y])] # dpn/dx = 0 for x=2.\n", + "bc_p += [Eq(p[t+1, x, 0], p[t+1, x, 1])] # dpn/dy = 0 at y=0\n", + "bc_p += [Eq(p[t+1, x, ny-1], p[t+1, x, ny-2])] # pn=0 for y=2\n", "bc_p += [Eq(p[t+1, 0, 0], 0)]\n", - "bc=bc_u+bc_v\n", + "bc = bc_u+bc_v\n", "\n", - "optime=Operator([update_u, update_v]+bc_u+bc_v)\n", - "oppres=Operator([update_p]+bc_p)" + "optime = Operator([update_u, update_v]+bc_u+bc_v)\n", + "oppres = Operator([update_p]+bc_p)" ] }, { @@ -514,9 +514,9 @@ "\n", "\n", "# This is the time loop.\n", - "for step in range(0,nt):\n", - " if step>0:\n", - " oppres(time_M = nit)\n", + "for step in range(0, nt):\n", + " if step > 0:\n", + " oppres(time_M=nit)\n", " optime(time_m=step, time_M=step, dt=dt)" ] }, @@ -537,15 +537,15 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "fig = pyplot.figure(figsize=(11,7), dpi=100)\n", + "# NBVAL_IGNORE_OUTPUT\n", + "fig = pyplot.figure(figsize=(11, 7), dpi=100)\n", "# Plotting the pressure field as a contour.\n", "pyplot.contourf(X, Y, p.data[0], alpha=0.5, cmap=cm.viridis)\n", "pyplot.colorbar()\n", "# Plotting the pressure field outlines.\n", "pyplot.contour(X, Y, p.data[0], cmap=cm.viridis)\n", "# Plotting velocity field.\n", - "pyplot.quiver(X[::2,::2], Y[::2,::2], u.data[0,::2,::2], v.data[0,::2,::2])\n", + "pyplot.quiver(X[::2, ::2], Y[::2, ::2], u.data[0, ::2, ::2], v.data[0, ::2, ::2])\n", "pyplot.xlabel('X')\n", "pyplot.ylabel('Y');\n" ] @@ -574,17 +574,17 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Again, check results with Marchi et al 2009.\n", "fig = pyplot.figure(figsize=(12, 6))\n", "ax1 = fig.add_subplot(121)\n", - "ax1.plot(u.data[0,int(grid.shape[0]/2),:],y_coord[:])\n", - "ax1.plot(Marchi_Re10_u[:,1],Marchi_Re10_u[:,0],'ro')\n", + "ax1.plot(u.data[0, int(grid.shape[0]/2), :], y_coord[:])\n", + "ax1.plot(Marchi_Re10_u[:, 1], Marchi_Re10_u[:, 0], 'ro')\n", "ax1.set_xlabel('$u$')\n", "ax1.set_ylabel('$y$')\n", "ax1 = fig.add_subplot(122)\n", - "ax1.plot(x_coord[:],v.data[0,:,int(grid.shape[0]/2)])\n", - "ax1.plot(Marchi_Re10_v[:,0],Marchi_Re10_v[:,1],'ro')\n", + "ax1.plot(x_coord[:], v.data[0, :, int(grid.shape[0]/2)])\n", + "ax1.plot(Marchi_Re10_v[:, 0], Marchi_Re10_v[:, 1], 'ro')\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$v$')\n", "\n", @@ -623,28 +623,28 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "fig = pyplot.figure(figsize=(12, 6))\n", "ax1 = fig.add_subplot(121)\n", - "ax1.plot(a[int(npgrid[0]/2),:],y_coord[:])\n", - "ax1.plot(u.data[0,int(grid.shape[0]/2),:],y_coord[:],'--')\n", - "ax1.plot(Marchi_Re10_u[:,1],Marchi_Re10_u[:,0],'ro')\n", + "ax1.plot(a[int(npgrid[0]/2), :], y_coord[:])\n", + "ax1.plot(u.data[0, int(grid.shape[0]/2), :], y_coord[:], '--')\n", + "ax1.plot(Marchi_Re10_u[:, 1], Marchi_Re10_u[:, 0], 'ro')\n", "ax1.set_xlabel('$u$')\n", "ax1.set_ylabel('$y$')\n", "ax1 = fig.add_subplot(122)\n", - "ax1.plot(x_coord[:],b[:,int(npgrid[1]/2)])\n", - "ax1.plot(x_coord[:],v.data[0,:,int(grid.shape[0]/2)],'--')\n", - "ax1.plot(Marchi_Re10_v[:,0],Marchi_Re10_v[:,1],'ro')\n", + "ax1.plot(x_coord[:], b[:, int(npgrid[1]/2)])\n", + "ax1.plot(x_coord[:], v.data[0, :, int(grid.shape[0]/2)], '--')\n", + "ax1.plot(Marchi_Re10_v[:, 0], Marchi_Re10_v[:, 1], 'ro')\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$v$')\n", - "ax1.legend(['numpy','devito','Marchi (2009)'])\n", + "ax1.legend(['numpy', 'devito', 'Marchi (2009)'])\n", "\n", "pyplot.show()\n", "\n", - "#Pressure norm check\n", + "# Pressure norm check\n", "tol = 1e-3\n", - "assert np.sum((c[:,:]-d[:,:])**2/ np.maximum(d[:,:]**2,1e-10)) < tol\n", - "assert np.sum((p.data[0]-p.data[1])**2/np.maximum(p.data[0]**2,1e-10)) < tol" + "assert np.sum((c[:, :]-d[:, :])**2/ np.maximum(d[:, :]**2, 1e-10)) < tol\n", + "assert np.sum((p.data[0]-p.data[1])**2/np.maximum(p.data[0]**2, 1e-10)) < tol" ] }, { diff --git a/examples/cfd/08_shallow_water_equation.ipynb b/examples/cfd/08_shallow_water_equation.ipynb index 343b1638af..14c041cc71 100644 --- a/examples/cfd/08_shallow_water_equation.ipynb +++ b/examples/cfd/08_shallow_water_equation.ipynb @@ -90,7 +90,7 @@ " Operator that solves the equations expressed above.\n", " It computes and returns the discharge fluxes M, N and wave height eta from\n", " the 2D Shallow water equation using the FTCS finite difference method.\n", - " \n", + "\n", " Parameters\n", " ----------\n", " eta : TimeFunction\n", @@ -113,25 +113,28 @@ " animations.\n", " \"\"\"\n", "\n", - " eps = np.finfo(grid.dtype).eps\n", + " # eps = np.finfo(grid.dtype).eps\n", "\n", " # Friction term expresses the loss of amplitude from the friction with the seafloor\n", " frictionTerm = g * alpha**2 * sqrt(M**2 + N**2) / D**(7./3.)\n", "\n", " # System of equations\n", " pde_eta = Eq(eta.dt + M.dxc + N.dyc)\n", - " pde_M = Eq(M.dt + (M**2/D).dxc + (M*N/D).dyc + g*D*eta.forward.dxc + frictionTerm*M)\n", - " pde_N = Eq(N.dt + (M.forward*N/D).dxc + (N**2/D).dyc + g*D*eta.forward.dyc + g * alpha**2 * sqrt(M.forward**2 + N**2) / D**(7./3.)*N)\n", + " pde_M = Eq(M.dt + (M**2/D).dxc + (M*N/D).dyc + g*D*eta.forward.dxc + frictionTerm*M)\n", + " pde_N = Eq(\n", + " N.dt + (M.forward*N/D).dxc + (N**2/D).dyc + g*D*eta.forward.dyc\n", + " + g * alpha**2 * sqrt(M.forward**2 + N**2) / D**(7./3.)*N\n", + " )\n", "\n", " stencil_eta = solve(pde_eta, eta.forward)\n", - " stencil_M = solve(pde_M, M.forward)\n", - " stencil_N = solve(pde_N, N.forward)\n", + " stencil_M = solve(pde_M, M.forward)\n", + " stencil_N = solve(pde_N, N.forward)\n", "\n", " # Equations with the forward in time term isolated\n", - " update_eta = Eq(eta.forward, stencil_eta, subdomain=grid.interior)\n", - " update_M = Eq(M.forward, stencil_M, subdomain=grid.interior)\n", - " update_N = Eq(N.forward, stencil_N, subdomain=grid.interior)\n", - " eq_D = Eq(D, eta.forward + h)\n", + " update_eta = Eq(eta.forward, stencil_eta, subdomain=grid.interior)\n", + " update_M = Eq(M.forward, stencil_M, subdomain=grid.interior)\n", + " update_N = Eq(N.forward, stencil_N, subdomain=grid.interior)\n", + " eq_D = Eq(D, eta.forward + h)\n", "\n", " return Operator([update_eta, update_M, update_N, eq_D] + [Eq(etasave, eta)])" ] @@ -155,7 +158,7 @@ "import matplotlib.animation as animation\n", "\n", "\n", - "def snaps2video (eta, title):\n", + "def snaps2video(eta, title):\n", " fig, ax = plt.subplots()\n", " matrice = ax.imshow(eta.data[0, :, :].T, vmin=-1, vmax=1, cmap=\"seismic\")\n", " plt.colorbar(matrice)\n", @@ -182,7 +185,7 @@ "metadata": {}, "outputs": [], "source": [ - "def plotDepthProfile (h, title):\n", + "def plotDepthProfile(h, title):\n", " fig, ax = plt.subplots()\n", " matrice = ax.imshow(h0)\n", " plt.colorbar(matrice)\n", @@ -232,26 +235,26 @@ } ], "source": [ - "Lx = 100.0 # width of the mantle in the x direction []\n", - "Ly = 100.0 # thickness of the mantle in the y direction []\n", - "nx = 401 # number of points in the x direction\n", - "ny = 401 # number of points in the y direction\n", - "dx = Lx / (nx - 1) # grid spacing in the x direction []\n", - "dy = Ly / (ny - 1) # grid spacing in the y direction []\n", - "g = 9.81 # gravity acceleration [m/s^2]\n", - "alpha = 0.025 # friction coefficient for natural channels in good condition\n", + "Lx = 100.0 # width of the mantle in the x direction []\n", + "Ly = 100.0 # thickness of the mantle in the y direction []\n", + "nx = 401 # number of points in the x direction\n", + "ny = 401 # number of points in the y direction\n", + "dx = Lx / (nx - 1) # grid spacing in the x direction []\n", + "dy = Ly / (ny - 1) # grid spacing in the y direction []\n", + "g = 9.81 # gravity acceleration [m/s^2]\n", + "alpha = 0.025 # friction coefficient for natural channels in good condition\n", "\n", "# Maximum wave propagation time [s]\n", - "Tmax = 3.\n", - "dt = 1/4500.\n", - "nt = (int)(Tmax/dt)\n", + "Tmax = 3.\n", + "dt = 1/4500.\n", + "nt = (int)(Tmax/dt)\n", "print(dt, nt)\n", "\n", "x = np.linspace(0.0, Lx, num=nx)\n", "y = np.linspace(0.0, Ly, num=ny)\n", "\n", "# Define initial eta, M, N\n", - "X, Y = np.meshgrid(x,y) # coordinates X,Y required to define eta, h, M, N\n", + "X, Y = np.meshgrid(x, y) # coordinates X,Y required to define eta, h, M, N\n", "\n", "# Define constant ocean depth profile h = 50 m\n", "h0 = 50. * np.ones_like(X)\n", @@ -264,7 +267,7 @@ "N0 = 0. * M0\n", "D0 = eta0 + 50.\n", "\n", - "grid = Grid(shape=(ny, nx), extent=(Ly, Lx), dtype=np.float32)" + "grid = Grid(shape=(ny, nx), extent=(Ly, Lx), dtype=np.float32)" ] }, { @@ -307,18 +310,18 @@ "nsnaps = 400\n", "\n", "# Defining symbolic functions\n", - "eta = TimeFunction(name='eta', grid=grid, space_order=2)\n", - "M = TimeFunction(name='M', grid=grid, space_order=2)\n", - "N = TimeFunction(name='N', grid=grid, space_order=2)\n", - "h = Function(name='h', grid=grid)\n", - "D = Function(name='D', grid=grid)\n", + "eta = TimeFunction(name='eta', grid=grid, space_order=2)\n", + "M = TimeFunction(name='M', grid=grid, space_order=2)\n", + "N = TimeFunction(name='N', grid=grid, space_order=2)\n", + "h = Function(name='h', grid=grid)\n", + "D = Function(name='D', grid=grid)\n", "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", @@ -1785,7 +1788,7 @@ "outputs": [], "source": [ "# To look at the code, uncomment the line below\n", - "#print(op.ccode)" + "# print(op.ccode)" ] }, { @@ -1809,8 +1812,8 @@ "h0 = 50 * np.ones_like(X)\n", "\n", "# Define initial Gaussian eta distribution [m]\n", - "eta0 = 0.5 * np.exp(-((X-35)**2/10)-((Y-35)**2/10)) # first Tsunami source\n", - "eta0 -= 0.5 * np.exp(-((X-65)**2/10)-((Y-65)**2/10)) # add second Tsunami source\n", + "eta0 = 0.5 * np.exp(-((X-35)**2/10)-((Y-35)**2/10)) # first Tsunami source\n", + "eta0 -= 0.5 * np.exp(-((X-65)**2/10)-((Y-65)**2/10)) # add second Tsunami source\n", "\n", "# Define initial M and N\n", "M0 = 100. * eta0\n", @@ -1862,10 +1865,10 @@ "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", @@ -4007,10 +4010,10 @@ "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", @@ -5494,10 +5497,10 @@ "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", @@ -6808,9 +6811,9 @@ "pert = 5. # perturbation amplitude\n", "\n", "np.random.seed(102034)\n", - "r = 2.0 * (np.random.rand(ny, nx) - 0.5) * pert # create random number perturbations\n", - "r = gaussian_filter(r, sigma=16) # smooth random number perturbation\n", - "h0 = h0 * (1 + r) # add perturbations to constant seafloor\n", + "r = 2.0 * (np.random.rand(ny, nx) - 0.5) * pert # create random number perturbations\n", + "r = gaussian_filter(r, sigma=16) # smooth random number perturbation\n", + "h0 = h0 * (1 + r) # add perturbations to constant seafloor\n", "\n", "# Define initial eta [m]\n", "eta0 = 0.2 * np.exp(-((X-30)**2/5)-((Y-50)**2/5))\n", @@ -6897,10 +6900,10 @@ "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", @@ -8100,7 +8103,7 @@ "eta0[mask] = 0.5\n", "\n", "# Smooth dam boundaries with gaussian filter\n", - "eta0 = gaussian_filter(eta0, sigma=8) # smooth random number perturbation\n", + "eta0 = gaussian_filter(eta0, sigma=8) # smooth random number perturbation\n", "\n", "# Define initial M and N\n", "M0 = 1. * eta0\n", @@ -8152,10 +8155,10 @@ "\n", "# Inserting initial conditions\n", "eta.data[0] = eta0.copy()\n", - "M.data[0] = M0.copy()\n", - "N.data[0] = N0.copy()\n", - "D.data[:] = eta0 + h0\n", - "h.data[:] = h0.copy()\n", + "M.data[0] = M0.copy()\n", + "N.data[0] = N0.copy()\n", + "D.data[:] = eta0 + h0\n", + "h.data[:] = h0.copy()\n", "\n", "# Setting up function to save the snapshots\n", "factor = round(nt / nsnaps)\n", diff --git a/examples/cfd/09_Darcy_flow_equation.ipynb b/examples/cfd/09_Darcy_flow_equation.ipynb index 482cda7096..2a90cf92c1 100644 --- a/examples/cfd/09_Darcy_flow_equation.ipynb +++ b/examples/cfd/09_Darcy_flow_equation.ipynb @@ -98,18 +98,18 @@ " k_max = size//2\n", "\n", " if dim == 2:\n", - " wavenumers = (np.concatenate((np.arange(0, k_max, 1), \\\n", - " np.arange(-k_max, 0, 1)),0))\n", - " wavenumers = np.tile(wavenumers, (size,1))\n", + " wavenumers = (np.concatenate((np.arange(0, k_max, 1),\n", + " np.arange(-k_max, 0, 1)), 0))\n", + " wavenumers = np.tile(wavenumers, (size, 1))\n", "\n", - " k_x = wavenumers.transpose(1,0)\n", + " k_x = wavenumers.transpose(1, 0)\n", " k_y = wavenumers\n", "\n", " self.sqrt_eig = (size**2)*math.sqrt(2.0)*sigma*((4*(math.pi**2)*(k_x**2 + k_y**2) + tau**2)**(-alpha/2.0))\n", - " self.sqrt_eig[0,0] = 0.0\n", + " self.sqrt_eig[0, 0] = 0.0\n", "\n", " self.size = []\n", - " for j in range(self.dim):\n", + " for _ in range(self.dim):\n", " self.size.append(size)\n", "\n", " self.size = tuple(self.size)\n", @@ -119,7 +119,6 @@ " coeff = np.random.randn(N, *self.size)\n", " coeff = self.sqrt_eig * coeff\n", "\n", - "\n", " return fft.ifftn(coeff).real" ] }, @@ -147,7 +146,7 @@ "s = 256\n", "\n", "# Create s x s grid with spacing 1\n", - "grid = Grid(shape=(s, s), extent=(1.0,1.0))\n", + "grid = Grid(shape=(s, s), extent=(1.0, 1.0))\n", "\n", "x, y = grid.dimensions\n", "t = grid.stepping_dim" @@ -176,8 +175,8 @@ "# Sample random fields\n", "# Create a threshold, either 4 or 12 (common for permeability)\n", "thresh_a = norm_a.sample(3)\n", - "thresh_a[thresh_a>=0] = 12\n", - "thresh_a[thresh_a<0] = 4\n", + "thresh_a[thresh_a >= 0] = 12\n", + "thresh_a[thresh_a < 0] = 4\n", "\n", "# The inputs:\n", "w1 = thresh_a[0]\n", @@ -218,7 +217,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Plot to show the input:\n", "ax1 = plt.subplot(221)\n", "ax2 = plt.subplot(222)\n", @@ -288,7 +287,7 @@ "source": [ "# Define 2D Darcy flow equation\n", "# Staggered FD is used to avoid numerical instability\n", - "equation_u = Eq(-div(a*grad(u,shift=.5),shift=-.5),f1)" + "equation_u = Eq(-div(a*grad(u, shift=.5), shift=-.5), f1)" ] }, { @@ -335,10 +334,10 @@ "# Boundary Conditions\n", "nx = s\n", "ny = s\n", - "bc = [Eq(u[t+1, 0, y],u[t+1, 1,y])] # du/dx = 0 for x=0.\n", - "bc += [Eq(u[t+1,nx-1, y],u[t+1,nx-2, y])] # du/dx = 0 for x=1.\n", - "bc += [Eq(u[t+1, x, 0],u[t+1,x ,1])] # du/dx = 0 at y=0\n", - "bc += [Eq(u[t+1, x, ny-1],u[t+1, x, ny-2])] # du/dx=0 for y=1\n", + "bc = [Eq(u[t+1, 0, y], u[t+1, 1, y])] # du/dx = 0 for x=0.\n", + "bc += [Eq(u[t+1, nx-1, y], u[t+1, nx-2, y])] # du/dx = 0 for x=1.\n", + "bc += [Eq(u[t+1, x, 0], u[t+1, x, 1])] # du/dx = 0 at y=0\n", + "bc += [Eq(u[t+1, x, ny-1], u[t+1, x, ny-2])] # du/dx=0 for y=1\n", "# u=0 for all sides\n", "bc += [Eq(u[t+1, x, 0], 0.)]\n", "bc += [Eq(u[t+1, x, ny-1], 0.)]\n", @@ -366,15 +365,16 @@ "'''\n", "Function to generate 'u' from 'a' using Devito\n", "\n", - "parameters \n", - "-----------------\n", + "Parameters\n", + "----------\n", "perm: Array of size (s, s)\n", " This is \"a\"\n", "f: Array of size (s, s)\n", " The forcing function f(x) = 1\n", " '''\n", - "def darcy_flow_2d(perm, f):\n", "\n", + "\n", + "def darcy_flow_2d(perm, f):\n", " # a(x) is the coefficients\n", " # f is the forcing function\n", " # initialize a, f with inputs permeability and forcing\n", @@ -382,7 +382,7 @@ " initialize_function(a, perm, 0)\n", "\n", " # call operator for the 15,000th pseudo-timestep\n", - " op(time= 15000)\n", + " op(time=15000)\n", "\n", " return np.array(u.data[0])" ] @@ -419,9 +419,9 @@ "metadata": {}, "outputs": [], "source": [ - "assert np.isclose(LA.norm(output1),1.0335084, atol=1e-3, rtol=0)\n", - "assert np.isclose(LA.norm(output2),1.3038709, atol=1e-3, rtol=0)\n", - "assert np.isclose(LA.norm(output3),1.3940924, atol=1e-3, rtol=0)" + "assert np.isclose(LA.norm(output1), 1.0335084, atol=1e-3, rtol=0)\n", + "assert np.isclose(LA.norm(output2), 1.3038709, atol=1e-3, rtol=0)\n", + "assert np.isclose(LA.norm(output3), 1.3940924, atol=1e-3, rtol=0)" ] }, { @@ -457,7 +457,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# plot to show the output:\n", "ax1 = plt.subplot(221)\n", "ax2 = plt.subplot(222)\n", diff --git a/examples/cfd/example_diffusion.py b/examples/cfd/example_diffusion.py index ccaf5f3faf..d4fc284ca0 100644 --- a/examples/cfd/example_diffusion.py +++ b/examples/cfd/example_diffusion.py @@ -51,8 +51,10 @@ def execute_python(ui, spacing=0.01, a=0.5, timesteps=500): uyy = (u[t0, i, j+1] - 2*u[t0, i, j] + u[t0, i, j-1]) / dy2 u[t1, i, j] = u[t0, i, j] + dt * a * (uxx + uyy) runtime = time.time() - tstart - log("Python: Diffusion with dx=%0.4f, dy=%0.4f, executed %d timesteps in %f seconds" - % (spacing, spacing, timesteps, runtime)) + log( + f'Python: Diffusion with dx={spacing:0.4f}, dy={spacing:0.4f}, ' + f'executed {timesteps} timesteps in {runtime} seconds' + ) return u[ti % 2, :, :], runtime @@ -73,8 +75,10 @@ def execute_numpy(ui, spacing=0.01, a=0.5, timesteps=500): uyy = (u[t0, 1:-1, 2:] - 2*u[t0, 1:-1, 1:-1] + u[t0, 1:-1, :-2]) / dy2 u[t1, 1:-1, 1:-1] = u[t0, 1:-1, 1:-1] + a * dt * (uxx + uyy) runtime = time.time() - tstart - log("Numpy: Diffusion with dx=%0.4f, dy=%0.4f, executed %d timesteps in %f seconds" - % (spacing, spacing, timesteps, runtime)) + log( + f'Numpy: Diffusion with dx={spacing:0.4f}, dy={spacing:0.4f}, ' + f'executed {timesteps} timesteps in {runtime} seconds' + ) return u[ti % 2, :, :], runtime @@ -108,8 +112,10 @@ def diffusion_stencil(): u[t0, :-2, 1:-1], u[t0, 1:-1, 2:], u[t0, 1:-1, :-2], dt, spacing) runtime = time.time() - tstart - log("Lambdify: Diffusion with dx=%0.4f, dy=%0.4f, executed %d timesteps in %f seconds" - % (spacing, spacing, timesteps, runtime)) + log( + f'Lambdify: Diffusion with dx={spacing:0.4f}, dy={spacing:0.4f}, ' + f'executed {timesteps} timesteps in {runtime} seconds' + ) return u[ti % 2, :, :], runtime @@ -133,8 +139,10 @@ def execute_devito(ui, spacing=0.01, a=0.5, timesteps=500): tstart = time.time() op.apply(u=u, t=timesteps, dt=dt) runtime = time.time() - tstart - log("Devito: Diffusion with dx=%0.4f, dy=%0.4f, executed %d timesteps in %f seconds" - % (spacing, spacing, timesteps, runtime)) + log( + f'Devito: Diffusion with dx={spacing:0.4f}, dy={spacing:0.4f}, ' + f'executed {timesteps} timesteps in {runtime} seconds' + ) return u.data[1, :], runtime diff --git a/examples/cfd/tools.py b/examples/cfd/tools.py index 5221176581..76daeed19b 100644 --- a/examples/cfd/tools.py +++ b/examples/cfd/tools.py @@ -29,9 +29,8 @@ def plot_field(field, xmin=0., xmax=2., ymin=0., ymax=2., zmin=None, zmax=None, elif(zmin is None and zmax is not None): if np.min(field) >= zmax: warning("zmax is less than field's minima. Figure deceptive.") - elif(zmin is not None and zmax is None): - if np.max(field) <= zmin: - warning("zmin is larger than field's maxima. Figure deceptive.") + elif(zmin is not None and zmax is None) and np.max(field) <= zmin: + warning("zmin is larger than field's maxima. Figure deceptive.") x_coord = np.linspace(xmin, xmax, field.shape[0]) y_coord = np.linspace(ymin, ymax, field.shape[1]) fig = pyplot.figure(figsize=(11, 7), dpi=100) diff --git a/examples/compiler/01_data_regions.ipynb b/examples/compiler/01_data_regions.ipynb index b6392845cb..5f97892694 100644 --- a/examples/compiler/01_data_regions.ipynb +++ b/examples/compiler/01_data_regions.ipynb @@ -425,7 +425,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op.apply(time_M=2)\n", "print(u_new.data_with_halo)" ] @@ -499,7 +499,7 @@ } ], "source": [ - "u_pad = TimeFunction(name='u_pad', grid=grid, space_order=2, padding=(0,2,2))\n", + "u_pad = TimeFunction(name='u_pad', grid=grid, space_order=2, padding=(0, 2, 2))\n", "u_pad._data_allocated[:] = 0\n", "u_pad.data_with_halo[:] = 1\n", "u_pad.data[:] = 2\n", diff --git a/examples/compiler/02_indexification.ipynb b/examples/compiler/02_indexification.ipynb index 793d8b008f..22f79729ad 100644 --- a/examples/compiler/02_indexification.ipynb +++ b/examples/compiler/02_indexification.ipynb @@ -82,7 +82,7 @@ } ], "source": [ - "u_i = u.indexify() # For more details about the method `indexify`, see `devito/symbolics/manipulation.py`\n", + "u_i = u.indexify() # For more details about the method `indexify`, see `devito/symbolics/manipulation.py`\n", "print(u_i)" ] }, @@ -126,7 +126,7 @@ "metadata": {}, "outputs": [], "source": [ - "a = u[time,x+1]" + "a = u[time, x+1]" ] }, { @@ -166,7 +166,7 @@ "metadata": {}, "outputs": [], "source": [ - "b = u[time+1,x-2]" + "b = u[time+1, x-2]" ] }, { diff --git a/examples/compiler/04_iet-B.ipynb b/examples/compiler/04_iet-B.ipynb index 5f49f72e78..ac81c5c19e 100644 --- a/examples/compiler/04_iet-B.ipynb +++ b/examples/compiler/04_iet-B.ipynb @@ -71,11 +71,11 @@ " 'b': Constant(name='b'),\n", " 'c': Array(name='c', shape=(3,), dimensions=(dims['i'],)).indexify(),\n", " 'd': Array(name='d',\n", - " shape=(3,3),\n", - " dimensions=(dims['j'],dims['k'])).indexify(),\n", + " shape=(3, 3),\n", + " dimensions=(dims['j'], dims['k'])).indexify(),\n", " 'e': Function(name='e',\n", - " shape=(3,3,3),\n", - " dimensions=(dims['t0'],dims['t1'],dims['i'])).indexify(),\n", + " shape=(3, 3, 3),\n", + " dimensions=(dims['t0'], dims['t1'], dims['i'])).indexify(),\n", " 'f': TimeFunction(name='f', grid=grid).indexify()}\n", "symbs" ] @@ -108,12 +108,14 @@ "from devito.ir.equations import DummyEq\n", "from devito.tools import pprint\n", "\n", + "\n", "def get_exprs(a, b, c, d, e, f):\n", " return [Expression(DummyEq(a, b + c + 5.)),\n", " Expression(DummyEq(d, e - f)),\n", " Expression(DummyEq(a, 4 * (b * a))),\n", " Expression(DummyEq(a, (6. / b) + (8. * a)))]\n", "\n", + "\n", "exprs = get_exprs(symbs['a'],\n", " symbs['b'],\n", " symbs['c'],\n", @@ -139,6 +141,7 @@ "source": [ "from devito.ir.iet import Iteration\n", "\n", + "\n", "def get_iters(dims):\n", " return [lambda ex: Iteration(ex, dims['i'], (0, 3, 1)),\n", " lambda ex: Iteration(ex, dims['j'], (0, 5, 1)),\n", @@ -146,6 +149,7 @@ " lambda ex: Iteration(ex, dims['t0'], (0, 4, 1)),\n", " lambda ex: Iteration(ex, dims['t1'], (0, 4, 1))]\n", "\n", + "\n", "iters = get_iters(dims)" ] }, @@ -199,6 +203,7 @@ " # expr0\n", " return iters[0](iters[1](iters[2](exprs[0])))\n", "\n", + "\n", "def get_block2(exprs, iters):\n", " # Non-perfect simple loop nest:\n", " # for i\n", @@ -208,6 +213,7 @@ " # expr1\n", " return iters[0]([exprs[0], iters[1](iters[2](exprs[1]))])\n", "\n", + "\n", "def get_block3(exprs, iters):\n", " # Non-perfect non-trivial loop nest:\n", " # for i\n", @@ -223,6 +229,7 @@ " iters[1](iters[2]([exprs[1], exprs[2]])),\n", " iters[4](exprs[3])])\n", "\n", + "\n", "block1 = get_block1(exprs, iters)\n", "block2 = get_block2(exprs, iters)\n", "block3 = get_block3(exprs, iters)\n", diff --git a/examples/finance/bs_ivbp.ipynb b/examples/finance/bs_ivbp.ipynb index 4c46d48e14..807054aa2b 100644 --- a/examples/finance/bs_ivbp.ipynb +++ b/examples/finance/bs_ivbp.ipynb @@ -51,7 +51,7 @@ "\n", "configuration[\"log-level\"] = 'INFO'\n", "\n", - "## Constants\n", + "# Constants\n", "# The strike price of the option\n", "K = 100.0\n", "\n", @@ -67,14 +67,14 @@ "\n", "# If you want to try some different problems, uncomment these lines\n", "\n", - "## Example 2\n", + "# Example 2\n", "# K = 10.0\n", "# r = 0.1\n", "# sigma = 0.2\n", "# smin = 0.0\n", "# smax = 20.0\n", "\n", - "## Example 3\n", + "# Example 3\n", "# K = 100.0\n", "# r = 0.05\n", "# sigma = 0.25\n", @@ -89,16 +89,16 @@ "# Extent calculations\n", "tmax = 1.0\n", "dt0 = 0.0005\n", - "ds0 = 1.0\n", + "ds0 = 1.0\n", "nt = (int)(tmax / dt0) + 1\n", "ns = int((smax - smin) / ds0) + 1\n", "\n", "shape = (ns, )\n", - "origin =(smin, )\n", + "origin = (smin, )\n", "spacing = (ds0, )\n", "extent = int(ds0 * (ns - 1))\n", "\n", - "print(\"dt,tmax,nt;\", dt0,tmax,nt)\n", + "print(\"dt,tmax,nt;\", dt0, tmax, nt)\n", "print(\"shape; \", shape)\n", "print(\"origin; \", origin)\n", "print(\"spacing; \", spacing)\n", @@ -162,10 +162,10 @@ "grid = Grid(shape=shape, origin=origin, extent=extent, dimensions=(s, ))\n", "\n", "so = 2\n", - "v = TimeFunction(name='v', grid=grid, space_order=so, time_order=1, save=nt)\n", - "v_no_bc = TimeFunction(name='v_no_bc', grid=grid, space_order=so, time_order=1, save=nt)\n", + "v = TimeFunction(name='v', grid=grid, space_order=so, time_order=1, save=nt)\n", + "v_no_bc = TimeFunction(name='v_no_bc', grid=grid, space_order=so, time_order=1, save=nt)\n", "\n", - "t,s = v.dimensions\n", + "t, s = v.dimensions\n", "ds = s.spacing\n", "dt = t.spacing\n", "\n", @@ -215,18 +215,18 @@ "outputs": [], "source": [ "# Equations with Neumann boundary conditions\n", - "eq = [Eq(v[t,extent], v[t,extent-1]+(v[t,extent-1]-v[t,extent-2])),\n", - " Eq(v[t,extent+1], v[t,extent]+(v[t,extent-1]-v[t,extent-2])),\n", + "eq = [Eq(v[t, extent], v[t, extent-1]+(v[t, extent-1]-v[t, extent-2])),\n", + " Eq(v[t, extent+1], v[t, extent]+(v[t, extent-1]-v[t, extent-2])),\n", " Eq(v.forward, update_centered)]\n", "eq_no_bc = [Eq(v.forward, update_centered)]\n", "\n", - "op = Operator(eq, subs=v.grid.spacing_map)\n", + "op = Operator(eq, subs=v.grid.spacing_map)\n", "op_no_bc = Operator(eq_no_bc, subs=v_no_bc.grid.spacing_map)\n", "\n", "# Initial conditions\n", "\n", "for i in range(shape[0]):\n", - " v.data[0, i] = max((smin + ds0 * i) - K, 0)\n", + " v.data[0, i] = max((smin + ds0 * i) - K, 0)\n", " v_no_bc.data[0, i] = max((smin + ds0 * i) - K, 0)" ] }, @@ -256,7 +256,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Run our operators\n", "startDevito = timer.time()\n", @@ -287,27 +287,27 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Get an appropriate ylimit\n", - "slice_smax = v.data[:,int(smax-smin-padding)]\n", + "slice_smax = v.data[:, int(smax-smin-padding)]\n", "ymax = max(slice_smax) + 2\n", "\n", "# Plot\n", "s = np.linspace(smin, smax, shape[0])\n", - "plt.figure(figsize=(12,10), facecolor='w')\n", + "plt.figure(figsize=(12, 10), facecolor='w')\n", "\n", "time = [1*nt//5, 2*nt//5, 3*nt//5, 4*nt//5, 5*nt//5-1]\n", "colors = [\"blue\", \"green\", \"gold\", \"darkorange\", \"red\"]\n", "\n", "# initial conditions\n", - "plt.plot(s, v_no_bc.data[0,:], '-', color=\"black\", label='initial condition', linewidth=1)\n", + "plt.plot(s, v_no_bc.data[0, :], '-', color=\"black\", label='initial condition', linewidth=1)\n", "\n", "for i in range(len(time)):\n", - " plt.plot(s, v_no_bc.data[time[i],:], '-', color=colors[i], label='t='+str(time[i]*dt0), linewidth=1.5)\n", + " plt.plot(s, v_no_bc.data[time[i], :], '-', color=colors[i], label='t='+str(time[i]*dt0), linewidth=1.5)\n", "\n", - "plt.xlim([smin+padding,smax-padding])\n", - "plt.ylim([0,ymax])\n", + "plt.xlim([smin+padding, smax-padding])\n", + "plt.ylim([0, ymax])\n", "\n", "plt.legend(loc=2)\n", "plt.grid(True)\n", @@ -341,27 +341,27 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Get an appropriate ylimit\n", - "slice_smax = v.data[:,int(smax-smin-padding)]\n", + "slice_smax = v.data[:, int(smax-smin-padding)]\n", "ymax = max(slice_smax) + 2\n", "\n", "# Plot\n", "s = np.linspace(smin, smax, shape[0])\n", - "plt.figure(figsize=(12,10), facecolor='w')\n", + "plt.figure(figsize=(12, 10), facecolor='w')\n", "\n", "time = [1*nt//5, 2*nt//5, 3*nt//5, 4*nt//5, 5*nt//5-1]\n", "colors = [\"blue\", \"green\", \"gold\", \"darkorange\", \"red\"]\n", "\n", "# initial conditions\n", - "plt.plot(s, v.data[0,:], '-', color=\"black\", label='initial condition', linewidth=1)\n", + "plt.plot(s, v.data[0, :], '-', color=\"black\", label='initial condition', linewidth=1)\n", "\n", "for i in range(len(time)):\n", - " plt.plot(s, v.data[time[i],:], '-', color=colors[i], label='t='+str(time[i]*dt0), linewidth=1.5)\n", + " plt.plot(s, v.data[time[i], :], '-', color=colors[i], label='t='+str(time[i]*dt0), linewidth=1.5)\n", "\n", - "plt.xlim([smin+padding,smax-padding])\n", - "plt.ylim([0,ymax])\n", + "plt.xlim([smin+padding, smax-padding])\n", + "plt.ylim([0, ymax])\n", "\n", "plt.legend(loc=2)\n", "plt.grid(True)\n", @@ -403,7 +403,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "\n", "# Trim the padding off smin and smax\n", @@ -413,15 +413,15 @@ "tt = np.linspace(0.0, dt0*(nt-1), nt)\n", "ss = np.linspace(smin+padding, smax-padding, shape[0]-padding*2)\n", "\n", - "hf = plt.figure(figsize=(12,12))\n", + "hf = plt.figure(figsize=(12, 12))\n", "ha = plt.axes(projection='3d')\n", "\n", "# 45 degree viewpoint\n", "ha.view_init(elev=25, azim=-45)\n", "\n", - "ha.set_xlim3d([0.0,1.0])\n", - "ha.set_ylim3d([smin+padding,smax-padding])\n", - "ha.set_zlim3d([0,ymax])\n", + "ha.set_xlim3d([0.0, 1.0])\n", + "ha.set_ylim3d([smin+padding, smax-padding])\n", + "ha.set_zlim3d([0, ymax])\n", "\n", "ha.set_xlabel('Time to expiration', labelpad=12, fontsize=16)\n", "ha.set_ylabel('Stock value', labelpad=12, fontsize=16)\n", @@ -491,7 +491,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Derived formula for Black Scholes call from\n", "# https://aaronschlegel.me/black-scholes-formula-python.html\n", @@ -504,6 +504,7 @@ " call = (S * cdf(N)(d1) - K * np.exp(-r * T) * cdf(N)(d2))\n", " return call\n", "\n", + "\n", "startBF = timer.time()\n", "\n", "# Calculate truth and compare to our solution\n", @@ -518,21 +519,21 @@ "\n", "endBF = timer.time()\n", "\n", - "print(\"devito pde timesteps: %12.6s, %12.6fs runtime\" % (nt-1, endDevito - startDevito))\n", - "print(\"call_value_bs timesteps: %12.6s, %12.6fs runtime\" % (len(time), endBF - startBF))\n", + "print(f\"devito pde timesteps: {nt - 1}, {endDevito - startDevito:12.6f}s runtime\")\n", + "print(f\"call_value_bs timesteps: {len(time)}, {endBF - startBF:12.6f}s runtime\")\n", "\n", "s2 = np.linspace(smin, smax, shape[0])\n", - "plt.figure(figsize=(12,10))\n", + "plt.figure(figsize=(12, 10))\n", "\n", "colors = [\"blue\", \"green\", \"gold\", \"darkorange\", \"red\"]\n", - "plt.plot(s2, v.data[0,:], '-', color=\"black\", label='initial condition', linewidth=1)\n", + "plt.plot(s2, v.data[0, :], '-', color=\"black\", label='initial condition', linewidth=1)\n", "\n", "for i in range(len(time)):\n", - " plt.plot(s2, results[i], ':', color=colors[i], label='truth t='+str(time[i]), linewidth=3)\n", - " plt.plot(s2, v.data[int(time[i]*nt),:], '-', color=colors[i], label='pde t='+str(time[i]), linewidth=1)\n", + " plt.plot(s2, results[i], ':', color=colors[i], label='truth t='+str(time[i]), linewidth=3)\n", + " plt.plot(s2, v.data[int(time[i]*nt), :], '-', color=colors[i], label='pde t='+str(time[i]), linewidth=1)\n", "\n", - "plt.xlim([smin+padding,smax-padding])\n", - "plt.ylim([0,ymax])\n", + "plt.xlim([smin+padding, smax-padding])\n", + "plt.ylim([0, ymax])\n", "\n", "plt.legend()\n", "plt.grid(True)\n", @@ -580,24 +581,24 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot the l2 norm of the formula and our solution over time\n", - "t_range = np.linspace(dt0,1.0,50)\n", - "x_range = range(padding, smax-smin-padding*2, 1)\n", + "t_range = np.linspace(dt0, 1.0, 50)\n", + "x_range = range(padding, smax-smin-padding*2, 1)\n", "vals = []\n", "\n", "for t in t_range:\n", " l2 = 0.0\n", " for x in x_range:\n", " truth = call_value_bs(x+smin, K, t, r, sigma)\n", - " val = v.data[int(t*(nt-1)), x]\n", - " l2 += (truth - val)**2\n", + " val = v.data[int(t*(nt-1)), x]\n", + " l2 += (truth - val)**2\n", "\n", " rms = np.sqrt(np.float64(l2 / len(x_range)))\n", " vals.append(rms)\n", "\n", - "plt.figure(figsize=(12,10))\n", + "plt.figure(figsize=(12, 10))\n", "plt.plot(t_range, np.array(vals))" ] }, @@ -618,7 +619,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "np.mean(vals)" ] diff --git a/examples/performance/02_advisor_roofline.ipynb b/examples/performance/02_advisor_roofline.ipynb index dc76d7ecf0..2fbc63174d 100644 --- a/examples/performance/02_advisor_roofline.ipynb +++ b/examples/performance/02_advisor_roofline.ipynb @@ -107,9 +107,13 @@ } ], "source": [ - "#NBVAL_SKIP\n", - "\n", - "! python3 $DEVITO_JUPYTER/benchmarks/user/advisor/run_advisor.py --path $DEVITO_JUPYTER/benchmarks/user/benchmark.py --exec-args \"run -P acoustic -d 64 64 64 -so 4 --tn 50 --autotune off\" --output $DEVITO_JUPYTER/examples/performance/profilings --name JupyterProfiling\n" + "# NBVAL_SKIP\n", + "%%bash\n", + "python3 $DEVITO_JUPYTER/benchmarks/user/advisor/run_advisor.py \\\n", + " --path $DEVITO_JUPYTER/benchmarks/user/benchmark.py \\\n", + " --exec-args \"run -P acoustic -d 64 64 64 -so 4 --tn 50 --autotune off\" \\\n", + " --output $DEVITO_JUPYTER/examples/performance/profilings \\\n", + " --name JupyterProfiling" ] }, { @@ -166,9 +170,13 @@ } ], "source": [ - "#NBVAL_SKIP\n", - "\n", - "! python3 $DEVITO_JUPYTER/benchmarks/user/advisor/roofline.py --mode overview --name $DEVITO_JUPYTER/examples/performance/resources/OverviewRoof --project $DEVITO_JUPYTER/examples/performance/profilings/JupyterProfiling\n" + "# NBVAL_SKIP\n", + "%%bash\n", + "python3 $DEVITO_JUPYTER/benchmarks/user/advisor/roofline.py \\\n", + " --mode overview \\\n", + " --name $DEVITO_JUPYTER/examples/performance/resources/OverviewRoof \\\n", + " --project $DEVITO_JUPYTER/examples/performance/profilings/JupyterProfiling \\\n", + " $DEVITO_JUPYTER/benchmarks/user/advisor/run_advisor.py" ] }, { @@ -230,9 +238,12 @@ } ], "source": [ - "#NBVAL_SKIP\n", - "\n", - "! python3 $DEVITO_JUPYTER/benchmarks/user/advisor/roofline.py --mode top-loops --name $DEVITO_JUPYTER/examples/performance/resources/TopLoopsRoof --project $DEVITO_JUPYTER/examples/performance/profilings/JupyterProfiling\n" + "# NBVAL_SKIP\n", + "%%bash\n", + "python3 $DEVITO_JUPYTER/benchmarks/user/advisor/roofline.py \\\n", + " --mode top-loops \\\n", + " --name $DEVITO_JUPYTER/examples/performance/resources/TopLoopsRoof \\\n", + " --project $DEVITO_JUPYTER/examples/performance/profilings/JupyterProfiling \\\n" ] }, { diff --git a/examples/seismic/abc_methods/01_introduction.ipynb b/examples/seismic/abc_methods/01_introduction.ipynb index d041564b41..65cd36e38b 100644 --- a/examples/seismic/abc_methods/01_introduction.ipynb +++ b/examples/seismic/abc_methods/01_introduction.ipynb @@ -188,11 +188,11 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "import numpy as np\n", - "import matplotlib.pyplot as plot\n", - "import matplotlib.ticker as mticker\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", - "from matplotlib import cm" + "import numpy as np\n", + "import matplotlib.pyplot as plot\n", + "import matplotlib.ticker as mticker\n", + "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", + "from matplotlib import cm" ] }, { @@ -217,10 +217,10 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "%matplotlib inline\n", - "from examples.seismic import TimeAxis\n", - "from examples.seismic import RickerSource\n", - "from examples.seismic import Receiver\n", - "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" + "from examples.seismic import TimeAxis\n", + "from examples.seismic import RickerSource\n", + "from examples.seismic import Receiver\n", + "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" ] }, { @@ -244,16 +244,16 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = 101\n", - "nptz = 101\n", - "x0 = 0.\n", - "x1 = 1000.\n", - "compx = x1-x0\n", - "z0 = 0.\n", - "z1 = 1000.\n", - "compz = z1-z0\n", - "hx = (x1-x0)/(nptx-1)\n", - "hz = (z1-z0)/(nptz-1)" + "nptx = 101\n", + "nptz = 101\n", + "x0 = 0.\n", + "x1 = 1000.\n", + "compx = x1-x0\n", + "z0 = 0.\n", + "z1 = 1000.\n", + "compz = z1-z0\n", + "hx = (x1-x0)/(nptx-1)\n", + "hz = (z1-z0)/(nptz-1)" ] }, { @@ -276,10 +276,10 @@ "metadata": {}, "outputs": [], "source": [ - "origin = (x0,z0)\n", - "extent = (compx,compz)\n", - "shape = (nptx,nptz)\n", - "spacing = (hx,hz)" + "origin = (x0, z0)\n", + "extent = (compx, compz)\n", + "shape = (nptx, nptz)\n", + "spacing = (hx, hz)" ] }, { @@ -297,9 +297,12 @@ "source": [ "class d0domain(SubDomain):\n", " name = 'd0'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " return {x: z, z: z}\n", + "\n", + "\n", "d0_domain = d0domain()" ] }, @@ -332,12 +335,12 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz))\n", + "v0 = np.zeros((nptx, nptz))\n", "p0 = 0\n", "p1 = int((1/2)*nptz)\n", "p2 = nptz\n", - "v0[0:nptx,p0:p1] = 1.5\n", - "v0[0:nptx,p1:p2] = 2.5" + "v0[0:nptx, p0:p1] = 1.5\n", + "v0[0:nptx, p1:p2] = 2.5" ] }, { @@ -354,22 +357,22 @@ "outputs": [], "source": [ "def graph2dvel(vel):\n", - " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel)\n", - " extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]\n", - " fig = plot.imshow(np.transpose(vel), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.title('Velocity Profile')\n", - " plot.grid()\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " cbar.set_label('Velocity [km/s]')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel)\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(vel), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.title('Velocity Profile')\n", + " plot.grid()\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " cbar.set_label('Velocity [km/s]')\n", + " plot.show()" ] }, { @@ -433,13 +436,13 @@ "metadata": {}, "outputs": [], "source": [ - "t0 = 0.\n", - "tn = 1000.\n", - "CFL = 0.4\n", - "vmax = np.amax(v0)\n", - "dtmax = np.float64((min(hx,hz)*CFL)/(vmax))\n", + "t0 = 0.\n", + "tn = 1000.\n", + "CFL = 0.4\n", + "vmax = np.amax(v0)\n", + "dtmax = np.float64((min(hx, hz)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", - "dt0 = np.float64((tn-t0)/ntmax)" + "dt0 = np.float64((tn-t0)/ntmax)" ] }, { @@ -463,8 +466,8 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)\n", - "nt = time_range.num - 1" + "time_range = TimeAxis(start=t0, stop=tn, num=ntmax+1)\n", + "nt = time_range.num - 1" ] }, { @@ -487,10 +490,10 @@ "metadata": {}, "outputs": [], "source": [ - "(hxs,hzs) = grid.spacing_map\n", - "(x, z) = grid.dimensions\n", - "t = grid.stepping_dim\n", - "dt = grid.stepping_dim.spacing" + "(hxs, hzs) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", + "t = grid.stepping_dim\n", + "dt = grid.stepping_dim.spacing" ] }, { @@ -517,10 +520,10 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.01\n", + "f0 = 0.01\n", "nsource = 1\n", - "xposf = 0.5*compx\n", - "zposf = hz" + "xposf = 0.5*compx\n", + "zposf = hz" ] }, { @@ -558,7 +561,15 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src',grid=grid,f0=f0,npoint=nsource,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "src = RickerSource(\n", + " name='src',\n", + " grid=grid,\n", + " f0=f0,\n", + " npoint=nsource,\n", + " time_range=time_range,\n", + " staggered=NODE,\n", + " dtype=np.float64\n", + ")\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -613,9 +624,9 @@ "metadata": {}, "outputs": [], "source": [ - "nrec = nptx\n", - "nxpos = np.linspace(x0,x1,nrec)\n", - "nzpos = hz" + "nrec = nptx\n", + "nxpos = np.linspace(x0, x1, nrec)\n", + "nzpos = hz" ] }, { @@ -651,7 +662,7 @@ "metadata": {}, "outputs": [], "source": [ - "rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "rec = Receiver(name='rec', grid=grid, npoint=nrec, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "rec.coordinates.data[:, 0] = nxpos\n", "rec.coordinates.data[:, 1] = nzpos" ] @@ -675,7 +686,7 @@ "metadata": {}, "outputs": [], "source": [ - "u = TimeFunction(name=\"u\",grid=grid,time_order=2,space_order=2,staggered=NODE,dtype=np.float64)" + "u = TimeFunction(name=\"u\", grid=grid, time_order=2, space_order=2, staggered=NODE, dtype=np.float64)" ] }, { @@ -702,8 +713,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel0 = Function(name=\"vel0\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "vel0.data[:,:] = v0[:,:]" + "vel0 = Function(name=\"vel0\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "vel0.data[:, :] = v0[:, :]" ] }, { @@ -731,7 +742,7 @@ "metadata": {}, "outputs": [], "source": [ - "src_term = src.inject(field=u.forward,expr=src*dt**2*vel0**2)" + "src_term = src.inject(field=u.forward, expr=src*dt**2*vel0**2)" ] }, { @@ -804,7 +815,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil = Eq(u.forward, solve(pde,u.forward),subdomain = grid.subdomains['d0'])" + "stencil = Eq(u.forward, solve(pde, u.forward), subdomain=grid.subdomains['d0'])" ] }, { @@ -838,7 +849,7 @@ "metadata": {}, "outputs": [], "source": [ - "bc = [Eq(u[t+1,0,z],0.),Eq(u[t+1,nptx-1,z],0.),Eq(u[t+1,x,nptz-1],0.),Eq(u[t+1,x,0],u[t+1,x,1])]" + "bc = [Eq(u[t+1, 0, z], 0.), Eq(u[t+1, nptx-1, z], 0.), Eq(u[t+1, x, nptz-1], 0.), Eq(u[t+1, x, 0], u[t+1, x, 1])]" ] }, { @@ -864,7 +875,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op = Operator([stencil] + src_term + bc + rec_term,subs=grid.spacing_map)" + "op = Operator([stencil] + src_term + bc + rec_term, subs=grid.spacing_map)" ] }, { @@ -928,7 +939,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op(time=nt,dt=dt0)" + "op(time=nt, dt=dt0)" ] }, { @@ -946,11 +957,11 @@ "source": [ "def graph2d(U):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(U)/10.\n", - " extent = [fscale*x0,fscale*x1,fscale*z1,fscale*z0]\n", - " fig = plot.imshow(np.transpose(U),vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(U)/10.\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(U), vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.axis('equal')\n", @@ -1004,7 +1015,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "graph2d(u.data[0,:,:])" + "graph2d(u.data[0, :, :])" ] }, { @@ -1023,22 +1034,22 @@ "outputs": [], "source": [ "def graph2drec(rec):\n", - " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscaled = 1/10**(3)\n", - " fscalet = 1/10**(3)\n", - " scale = np.amax(rec)/10.\n", - " extent = [fscaled*x0,fscaled*x1, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec, vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", - " plot.axis('equal')\n", - " plot.title('Receivers Signal Profile - Devito')\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscaled = 1/10**(3)\n", + " fscalet = 1/10**(3)\n", + " scale = np.amax(rec)/10.\n", + " extent = [fscaled*x0, fscaled*x1, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec, vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", + " plot.axis('equal')\n", + " plot.title('Receivers Signal Profile - Devito')\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " _ = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " plot.show()" ] }, { diff --git a/examples/seismic/abc_methods/02_damping.ipynb b/examples/seismic/abc_methods/02_damping.ipynb index 6111471cf8..1c21b1e464 100644 --- a/examples/seismic/abc_methods/02_damping.ipynb +++ b/examples/seismic/abc_methods/02_damping.ipynb @@ -148,11 +148,11 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "import numpy as np\n", - "import matplotlib.pyplot as plot\n", - "import matplotlib.ticker as mticker\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", - "from matplotlib import cm" + "import numpy as np\n", + "import matplotlib.pyplot as plot\n", + "import matplotlib.ticker as mticker\n", + "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", + "from matplotlib import cm" ] }, { @@ -171,10 +171,10 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "%matplotlib inline\n", - "from examples.seismic import TimeAxis\n", - "from examples.seismic import RickerSource\n", - "from examples.seismic import Receiver\n", - "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" + "from examples.seismic import TimeAxis\n", + "from examples.seismic import RickerSource\n", + "from examples.seismic import Receiver\n", + "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" ] }, { @@ -190,16 +190,16 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = 101\n", - "nptz = 101\n", - "x0 = 0.\n", - "x1 = 1000.\n", - "compx = x1-x0\n", - "z0 = 0.\n", - "z1 = 1000.\n", - "compz = z1-z0\n", - "hxv = (x1-x0)/(nptx-1)\n", - "hzv = (z1-z0)/(nptz-1)" + "nptx = 101\n", + "nptz = 101\n", + "x0 = 0.\n", + "x1 = 1000.\n", + "compx = x1-x0\n", + "z0 = 0.\n", + "z1 = 1000.\n", + "compz = z1-z0\n", + "hxv = (x1-x0)/(nptx-1)\n", + "hzv = (z1-z0)/(nptz-1)" ] }, { @@ -227,8 +227,8 @@ "metadata": {}, "outputs": [], "source": [ - "npmlx = 20\n", - "npmlz = 20" + "npmlx = 20\n", + "npmlz = 20" ] }, { @@ -261,18 +261,18 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = nptx + 2*npmlx\n", - "nptz = nptz + 1*npmlz\n", - "x0 = x0 - hxv*npmlx\n", - "x1 = x1 + hxv*npmlx\n", - "compx = x1-x0\n", - "z0 = z0\n", - "z1 = z1 + hzv*npmlz\n", - "compz = z1-z0\n", - "origin = (x0,z0)\n", - "extent = (compx,compz)\n", - "shape = (nptx,nptz)\n", - "spacing = (hxv,hzv)" + "nptx = nptx + 2*npmlx\n", + "nptz = nptz + 1*npmlz\n", + "x0 = x0 - hxv*npmlx\n", + "x1 = x1 + hxv*npmlx\n", + "compx = x1-x0\n", + "z0 = z0\n", + "z1 = z1 + hzv*npmlz\n", + "compz = z1-z0\n", + "origin = (x0, z0)\n", + "extent = (compx, compz)\n", + "shape = (nptx, nptz)\n", + "spacing = (hxv, hzv)" ] }, { @@ -312,9 +312,12 @@ "source": [ "class d0domain(SubDomain):\n", " name = 'd0'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " return {x: ('middle', npmlx, npmlx), z: ('middle', 0, npmlz)}\n", + "\n", + "\n", "d0_domain = d0domain()" ] }, @@ -339,23 +342,34 @@ "source": [ "class d1domain(SubDomain):\n", " name = 'd1'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('left',npmlx), z: z}\n", + " return {x: ('left', npmlx), z: z}\n", + "\n", + "\n", "d1_domain = d1domain()\n", "\n", + "\n", "class d2domain(SubDomain):\n", " name = 'd2'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('right',npmlx), z: z}\n", + " return {x: ('right', npmlx), z: z}\n", + "\n", + "\n", "d2_domain = d2domain()\n", "\n", + "\n", "class d3domain(SubDomain):\n", " name = 'd3'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('middle', npmlx, npmlx), z: ('right',npmlz)}\n", + " return {x: ('middle', npmlx, npmlx), z: ('right', npmlz)}\n", + "\n", + "\n", "d3_domain = d3domain()" ] }, @@ -376,7 +390,13 @@ "metadata": {}, "outputs": [], "source": [ - "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain,d1_domain,d2_domain,d3_domain), dtype=np.float64)" + "grid = Grid(\n", + " origin=origin,\n", + " extent=extent,\n", + " shape=shape,\n", + " subdomains=(d0_domain, d1_domain, d2_domain, d3_domain),\n", + " dtype=np.float64\n", + ")" ] }, { @@ -392,9 +412,9 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz))\n", - "X0 = np.linspace(x0,x1,nptx)\n", - "Z0 = np.linspace(z0,z1,nptz)\n", + "v0 = np.zeros((nptx, nptz))\n", + "X0 = np.linspace(x0, x1, nptx)\n", + "Z0 = np.linspace(z0, z1, nptz)\n", "\n", "x10 = x0+lx\n", "x11 = x1-lx\n", @@ -408,18 +428,20 @@ "pxm = 0\n", "pzm = 0\n", "\n", - "for i in range(0,nptx):\n", - " if(X0[i]==xm): pxm = i\n", + "for i in range(0, nptx):\n", + " if(X0[i] == xm):\n", + " pxm = i\n", "\n", - "for j in range(0,nptz):\n", - " if(Z0[j]==zm): pzm = j\n", + "for j in range(0, nptz):\n", + " if(Z0[j] == zm):\n", + " pzm = j\n", "\n", "p0 = 0\n", "p1 = pzm\n", "p2 = nptz\n", "\n", - "v0[0:nptx,p0:p1] = 1.5\n", - "v0[0:nptx,p1:p2] = 2.5" + "v0[0:nptx, p0:p1] = 1.5\n", + "v0[0:nptx, p1:p2] = 2.5" ] }, { @@ -436,22 +458,28 @@ "outputs": [], "source": [ "def graph2dvel(vel):\n", - " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel[npmlx:-npmlx,0:-npmlz])\n", - " extent = [fscale*(x0+lx),fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", - " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx,0:-npmlz]), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.title('Velocity Profile')\n", - " plot.grid()\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " cbar.set_label('Velocity [km/s]')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", + " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", + " fig = plot.imshow(\n", + " np.transpose(vel[npmlx:-npmlx, 0:-npmlz]),\n", + " vmin=0.,\n", + " vmax=scale,\n", + " cmap=cm.seismic,\n", + " extent=extent\n", + " )\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.title('Velocity Profile')\n", + " plot.grid()\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " cbar.set_label('Velocity [km/s]')\n", + " plot.show()" ] }, { @@ -505,13 +533,13 @@ "metadata": {}, "outputs": [], "source": [ - "t0 = 0.\n", - "tn = 1000.\n", - "CFL = 0.4\n", - "vmax = np.amax(v0)\n", - "dtmax = np.float64((min(hxv,hzv)*CFL)/(vmax))\n", + "t0 = 0.\n", + "tn = 1000.\n", + "CFL = 0.4\n", + "vmax = np.amax(v0)\n", + "dtmax = np.float64((min(hxv, hzv)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", - "dt0 = np.float64((tn-t0)/ntmax)" + "dt0 = np.float64((tn-t0)/ntmax)" ] }, { @@ -527,8 +555,8 @@ "metadata": {}, "outputs": [], "source": [ - "time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)\n", - "nt = time_range.num - 1" + "time_range = TimeAxis(start=t0, stop=tn, num=ntmax+1)\n", + "nt = time_range.num - 1" ] }, { @@ -544,10 +572,10 @@ "metadata": {}, "outputs": [], "source": [ - "(hx,hz) = grid.spacing_map\n", - "(x, z) = grid.dimensions\n", - "t = grid.stepping_dim\n", - "dt = grid.stepping_dim.spacing" + "(hx, hz) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", + "t = grid.stepping_dim\n", + "dt = grid.stepping_dim.spacing" ] }, { @@ -563,10 +591,10 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.01\n", + "f0 = 0.01\n", "nsource = 1\n", - "xposf = 0.5*(compx-2*npmlx*hxv)\n", - "zposf = hzv" + "xposf = 0.5*(compx-2*npmlx*hxv)\n", + "zposf = hzv" ] }, { @@ -582,7 +610,15 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src',grid=grid,f0=f0,npoint=nsource,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "src = RickerSource(\n", + " name='src',\n", + " grid=grid,\n", + " f0=f0,\n", + " npoint=nsource,\n", + " time_range=time_range,\n", + " staggered=NODE,\n", + " dtype=np.float64\n", + ")\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -629,9 +665,9 @@ "metadata": {}, "outputs": [], "source": [ - "nrec = nptx\n", - "nxpos = np.linspace(x0,x1,nrec)\n", - "nzpos = hzv" + "nrec = nptx\n", + "nxpos = np.linspace(x0, x1, nrec)\n", + "nzpos = hzv" ] }, { @@ -647,7 +683,7 @@ "metadata": {}, "outputs": [], "source": [ - "rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "rec = Receiver(name='rec', grid=grid, npoint=nrec, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "rec.coordinates.data[:, 0] = nxpos\n", "rec.coordinates.data[:, 1] = nzpos" ] @@ -665,7 +701,7 @@ "metadata": {}, "outputs": [], "source": [ - "u = TimeFunction(name=\"u\",grid=grid,time_order=2,space_order=2,staggered=NODE,dtype=np.float64)" + "u = TimeFunction(name=\"u\", grid=grid, time_order=2, space_order=2, staggered=NODE, dtype=np.float64)" ] }, { @@ -681,8 +717,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel0 = Function(name=\"vel0\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "vel0.data[:,:] = v0[:,:]" + "vel0 = Function(name=\"vel0\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "vel0.data[:, :] = v0[:, :]" ] }, { @@ -691,7 +727,7 @@ "metadata": {}, "outputs": [], "source": [ - "src_term = src.inject(field=u.forward,expr=src*dt**2*vel0**2)" + "src_term = src.inject(field=u.forward, expr=src*dt**2*vel0**2)" ] }, { @@ -723,10 +759,10 @@ "metadata": {}, "outputs": [], "source": [ - "x0pml = x0 + npmlx*hxv\n", - "x1pml = x1 - npmlx*hxv\n", - "z0pml = z0\n", - "z1pml = z1 - npmlz*hzv" + "x0pml = x0 + npmlx*hxv\n", + "x1pml = x1 - npmlx*hxv\n", + "z0pml = z0\n", + "z1pml = z1 - npmlz*hzv" ] }, { @@ -749,13 +785,13 @@ "metadata": {}, "outputs": [], "source": [ - "def fdamp(x,z):\n", + "def fdamp(x, z):\n", "\n", - " quibar = 1.5*np.log(1.0/0.001)/(40)\n", - " cte = 1./vmax\n", + " quibar = 1.5*np.log(1.0/0.001)/(40)\n", + " cte = 1./vmax\n", "\n", - " a = np.where(x<=x0pml,(np.abs(x-x0pml)/lx),np.where(x>=x1pml,(np.abs(x-x1pml)/lx),0.))\n", - " b = np.where(z<=z0pml,(np.abs(z-z0pml)/lz),np.where(z>=z1pml,(np.abs(z-z1pml)/lz),0.))\n", + " a = np.where(x <= x0pml, (np.abs(x-x0pml)/lx), np.where(x >= x1pml, (np.abs(x-x1pml)/lx), 0.))\n", + " b = np.where(z <= z0pml, (np.abs(z-z0pml)/lz), np.where(z >= z1pml, (np.abs(z-z1pml)/lz), 0.))\n", " adamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))/hxv\n", " bdamp = quibar*(b-(1./(2.*np.pi))*np.sin(2.*np.pi*b))/hzv\n", " fdamp = cte*(adamp+bdamp)\n", @@ -778,11 +814,11 @@ "source": [ "def generatemdamp():\n", "\n", - " X0 = np.linspace(x0,x1,nptx)\n", - " Z0 = np.linspace(z0,z1,nptz)\n", - " X0grid,Z0grid = np.meshgrid(X0,Z0)\n", - " D0 = np.zeros((nptx,nptz))\n", - " D0 = np.transpose(fdamp(X0grid,Z0grid))\n", + " X0 = np.linspace(x0, x1, nptx)\n", + " Z0 = np.linspace(z0, z1, nptz)\n", + " X0grid, Z0grid = np.meshgrid(X0, Z0)\n", + " D0 = np.zeros((nptx, nptz))\n", + " D0 = np.transpose(fdamp(X0grid, Z0grid))\n", "\n", " return D0" ] @@ -818,12 +854,12 @@ "source": [ "def graph2damp(D):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", + " plot.figure(figsize=(16, 8))\n", " fscale = 1/10**(-3)\n", " fscale = 10**(-3)\n", - " scale = np.amax(D)\n", - " extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]\n", - " fig = plot.imshow(np.transpose(D), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", + " scale = np.amax(D)\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(D), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.title('Absorbing Layer Function')\n", @@ -887,8 +923,8 @@ "metadata": {}, "outputs": [], "source": [ - "damp = Function(name=\"damp\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "damp.data[:,:] = D0" + "damp = Function(name=\"damp\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "damp.data[:, :] = D0" ] }, { @@ -931,7 +967,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil0 = Eq(u.forward, solve(pde0,u.forward),subdomain = grid.subdomains['d0'])" + "stencil0 = Eq(u.forward, solve(pde0, u.forward), subdomain=grid.subdomains['d0'])" ] }, { @@ -947,7 +983,7 @@ "metadata": {}, "outputs": [], "source": [ - "subds = ['d1','d2','d3']" + "subds = ['d1', 'd2', 'd3']" ] }, { @@ -956,7 +992,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil1 = [Eq(u.forward, solve(pde1,u.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]" + "stencil1 = [Eq(u.forward, solve(pde1, u.forward), subdomain=grid.subdomains[subds[i]]) for i in range(0, len(subds))]" ] }, { @@ -972,7 +1008,7 @@ "metadata": {}, "outputs": [], "source": [ - "bc = [Eq(u[t+1,0,z],0.),Eq(u[t+1,nptx-1,z],0.),Eq(u[t+1,x,nptz-1],0.),Eq(u[t+1,x,0],u[t+1,x,1])]" + "bc = [Eq(u[t+1, 0, z], 0.), Eq(u[t+1, nptx-1, z], 0.), Eq(u[t+1, x, nptz-1], 0.), Eq(u[t+1, x, 0], u[t+1, x, 1])]" ] }, { @@ -996,7 +1032,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op = Operator([stencil0,stencil1] + src_term + bc + rec_term,subs=grid.spacing_map)" + "op = Operator([stencil0, stencil1] + src_term + bc + rec_term, subs=grid.spacing_map)" ] }, { @@ -1059,7 +1095,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op(time=nt,dt=dt0)" + "op(time=nt, dt=dt0)" ] }, { @@ -1077,11 +1113,11 @@ "source": [ "def graph2d(U):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(U[npmlx:-npmlx,0:-npmlz])/10.\n", - " extent = [fscale*x0pml,fscale*x1pml,fscale*z1pml,fscale*z0pml]\n", - " fig = plot.imshow(np.transpose(U[npmlx:-npmlx,0:-npmlz]),vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(U[npmlx:-npmlx, 0:-npmlz])/10.\n", + " extent = [fscale*x0pml, fscale*x1pml, fscale*z1pml, fscale*z0pml]\n", + " fig = plot.imshow(np.transpose(U[npmlx:-npmlx, 0:-npmlz]), vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.axis('equal')\n", @@ -1124,7 +1160,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "graph2d(u.data[0,:,:])" + "graph2d(u.data[0, :, :])" ] }, { @@ -1141,22 +1177,22 @@ "outputs": [], "source": [ "def graph2drec(rec):\n", - " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscaled = 1/10**(3)\n", - " fscalet = 1/10**(3)\n", - " scale = np.amax(rec[:,npmlx:-npmlx])/10.\n", - " extent = [fscaled*x0pml,fscaled*x1pml, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec[:,npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", - " plot.axis('equal')\n", - " plot.title('Receivers Signal Profile with Damping - Devito')\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscaled = 1/10**(3)\n", + " fscalet = 1/10**(3)\n", + " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", + " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", + " plot.axis('equal')\n", + " plot.title('Receivers Signal Profile with Damping - Devito')\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " _ = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " plot.show()" ] }, { diff --git a/examples/seismic/abc_methods/03_pml.ipynb b/examples/seismic/abc_methods/03_pml.ipynb index 3be3fc9deb..b8a346dfeb 100644 --- a/examples/seismic/abc_methods/03_pml.ipynb +++ b/examples/seismic/abc_methods/03_pml.ipynb @@ -153,11 +153,11 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "import numpy as np\n", - "import matplotlib.pyplot as plot\n", - "import matplotlib.ticker as mticker\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", - "from matplotlib import cm" + "import numpy as np\n", + "import matplotlib.pyplot as plot\n", + "import matplotlib.ticker as mticker\n", + "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", + "from matplotlib import cm" ] }, { @@ -176,8 +176,8 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "%matplotlib inline\n", - "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator\n", - "from examples.seismic import TimeAxis, RickerSource, Receiver" + "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator\n", + "from examples.seismic import TimeAxis, RickerSource, Receiver" ] }, { @@ -193,16 +193,16 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = 101\n", - "nptz = 101\n", - "x0 = 0.\n", - "x1 = 1000.\n", - "compx = x1-x0\n", - "z0 = 0.\n", - "z1 = 1000.\n", - "compz = z1-z0\n", - "hxv = (x1-x0)/(nptx-1)\n", - "hzv = (z1-z0)/(nptz-1)" + "nptx = 101\n", + "nptz = 101\n", + "x0 = 0.\n", + "x1 = 1000.\n", + "compx = x1-x0\n", + "z0 = 0.\n", + "z1 = 1000.\n", + "compz = z1-z0\n", + "hxv = (x1-x0)/(nptx-1)\n", + "hzv = (z1-z0)/(nptz-1)" ] }, { @@ -218,8 +218,8 @@ "metadata": {}, "outputs": [], "source": [ - "npmlx = 20\n", - "npmlz = 20" + "npmlx = 20\n", + "npmlz = 20" ] }, { @@ -252,18 +252,18 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = nptx + 2*npmlx\n", - "nptz = nptz + 1*npmlz\n", - "x0 = x0 - hxv*npmlx\n", - "x1 = x1 + hxv*npmlx\n", - "compx = x1-x0\n", - "z0 = z0\n", - "z1 = z1 + hzv*npmlz\n", - "compz = z1-z0\n", - "origin = (x0,z0)\n", - "extent = (compx,compz)\n", - "shape = (nptx,nptz)\n", - "spacing = (hxv,hzv)" + "nptx = nptx + 2*npmlx\n", + "nptz = nptz + 1*npmlz\n", + "x0 = x0 - hxv*npmlx\n", + "x1 = x1 + hxv*npmlx\n", + "compx = x1-x0\n", + "z0 = z0\n", + "z1 = z1 + hzv*npmlz\n", + "compz = z1-z0\n", + "origin = (x0, z0)\n", + "extent = (compx, compz)\n", + "shape = (nptx, nptz)\n", + "spacing = (hxv, hzv)" ] }, { @@ -314,9 +314,12 @@ "source": [ "class d0domain(SubDomain):\n", " name = 'd0'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " return {x: ('middle', npmlx, npmlx), z: ('middle', 0, npmlz)}\n", + "\n", + "\n", "d0_domain = d0domain()" ] }, @@ -335,23 +338,34 @@ "source": [ "class d1domain(SubDomain):\n", " name = 'd1'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('left',npmlx), z: z}\n", + " return {x: ('left', npmlx), z: z}\n", + "\n", + "\n", "d1_domain = d1domain()\n", "\n", + "\n", "class d2domain(SubDomain):\n", " name = 'd2'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('right',npmlx), z: z}\n", + " return {x: ('right', npmlx), z: z}\n", + "\n", + "\n", "d2_domain = d2domain()\n", "\n", + "\n", "class d3domain(SubDomain):\n", " name = 'd3'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('middle', npmlx, npmlx), z: ('right',npmlz)}\n", + " return {x: ('middle', npmlx, npmlx), z: ('right', npmlz)}\n", + "\n", + "\n", "d3_domain = d3domain()" ] }, @@ -372,7 +386,13 @@ "metadata": {}, "outputs": [], "source": [ - "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain,d1_domain,d2_domain,d3_domain), dtype=np.float64)" + "grid = Grid(\n", + " origin=origin,\n", + " extent=extent,\n", + " shape=shape,\n", + " subdomains=(d0_domain, d1_domain, d2_domain, d3_domain),\n", + " dtype=np.float64\n", + ")" ] }, { @@ -388,10 +408,10 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz))\n", - "v1 = np.zeros((nptx-1,nptz-1))\n", - "X0 = np.linspace(x0,x1,nptx)\n", - "Z0 = np.linspace(z0,z1,nptz)\n", + "v0 = np.zeros((nptx, nptz))\n", + "v1 = np.zeros((nptx-1, nptz-1))\n", + "X0 = np.linspace(x0, x1, nptx)\n", + "Z0 = np.linspace(z0, z1, nptz)\n", "\n", "x10 = x0+lx\n", "x11 = x1-lx\n", @@ -405,23 +425,25 @@ "pxm = 0\n", "pzm = 0\n", "\n", - "for i in range(0,nptx):\n", - " if(X0[i]==xm): pxm = i\n", + "for i in range(0, nptx):\n", + " if(X0[i] == xm):\n", + " pxm = i\n", "\n", - "for j in range(0,nptz):\n", - " if(Z0[j]==zm): pzm = j\n", + "for j in range(0, nptz):\n", + " if(Z0[j] == zm):\n", + " pzm = j\n", "\n", "p0 = 0\n", "p1 = pzm\n", "p2 = nptz\n", - "v0[0:nptx,p0:p1] = 1.5\n", - "v0[0:nptx,p1:p2] = 2.5\n", + "v0[0:nptx, p0:p1] = 1.5\n", + "v0[0:nptx, p1:p2] = 2.5\n", "\n", "p0 = 0\n", "p1 = pzm\n", "p2 = nptz-1\n", - "v1[0:nptx-1,p0:p1] = 1.5\n", - "v1[0:nptx-1,p1:p2] = 2.5" + "v1[0:nptx-1, p0:p1] = 1.5\n", + "v1[0:nptx-1, p1:p2] = 2.5" ] }, { @@ -438,22 +460,28 @@ "outputs": [], "source": [ "def graph2dvel(vel):\n", - " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel[npmlx:-npmlx,0:-npmlz])\n", - " extent = [fscale*(x0+lx),fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", - " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx,0:-npmlz]), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.title('Velocity Profile')\n", - " plot.grid()\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " cbar.set_label('Velocity [km/s]')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", + " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", + " fig = plot.imshow(\n", + " np.transpose(vel[npmlx:-npmlx, 0:-npmlz]),\n", + " vmin=0.,\n", + " vmax=scale,\n", + " cmap=cm.seismic,\n", + " extent=extent\n", + " )\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.title('Velocity Profile')\n", + " plot.grid()\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " cbar.set_label('Velocity [km/s]')\n", + " plot.show()" ] }, { @@ -507,13 +535,13 @@ "metadata": {}, "outputs": [], "source": [ - "t0 = 0.\n", - "tn = 1000.\n", - "CFL = 0.4\n", - "vmax = np.amax(v0)\n", - "dtmax = np.float64((min(hxv,hzv)*CFL)/(vmax))\n", + "t0 = 0.\n", + "tn = 1000.\n", + "CFL = 0.4\n", + "vmax = np.amax(v0)\n", + "dtmax = np.float64((min(hxv, hzv)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", - "dt0 = np.float64((tn-t0)/ntmax)" + "dt0 = np.float64((tn-t0)/ntmax)" ] }, { @@ -522,8 +550,8 @@ "metadata": {}, "outputs": [], "source": [ - "time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)\n", - "nt = time_range.num - 1" + "time_range = TimeAxis(start=t0, stop=tn, num=ntmax+1)\n", + "nt = time_range.num - 1" ] }, { @@ -539,10 +567,10 @@ "metadata": {}, "outputs": [], "source": [ - "(hx,hz) = grid.spacing_map\n", - "(x, z) = grid.dimensions\n", - "t = grid.stepping_dim\n", - "dt = grid.stepping_dim.spacing" + "(hx, hz) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", + "t = grid.stepping_dim\n", + "dt = grid.stepping_dim.spacing" ] }, { @@ -558,10 +586,10 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.01\n", + "f0 = 0.01\n", "nsource = 1\n", - "xposf = 0.5*(compx-2*npmlx*hxv)\n", - "zposf = hzv" + "xposf = 0.5*(compx-2*npmlx*hxv)\n", + "zposf = hzv" ] }, { @@ -570,7 +598,15 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src',grid=grid,f0=f0,npoint=nsource,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "src = RickerSource(\n", + " name='src',\n", + " grid=grid,\n", + " f0=f0,\n", + " npoint=nsource,\n", + " time_range=time_range,\n", + " staggered=NODE,\n", + " dtype=np.float64\n", + ")\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -617,9 +653,9 @@ "metadata": {}, "outputs": [], "source": [ - "nrec = nptx\n", - "nxpos = np.linspace(x0,x1,nrec)\n", - "nzpos = hzv" + "nrec = nptx\n", + "nxpos = np.linspace(x0, x1, nrec)\n", + "nzpos = hzv" ] }, { @@ -628,7 +664,7 @@ "metadata": {}, "outputs": [], "source": [ - "rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "rec = Receiver(name='rec', grid=grid, npoint=nrec, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "rec.coordinates.data[:, 0] = nxpos\n", "rec.coordinates.data[:, 1] = nzpos" ] @@ -646,7 +682,7 @@ "metadata": {}, "outputs": [], "source": [ - "u = TimeFunction(name=\"u\",grid=grid,time_order=2,space_order=2,staggered=NODE,dtype=np.float64)" + "u = TimeFunction(name=\"u\", grid=grid, time_order=2, space_order=2, staggered=NODE, dtype=np.float64)" ] }, { @@ -662,8 +698,8 @@ "metadata": {}, "outputs": [], "source": [ - "phi1 = TimeFunction(name=\"phi1\",grid=grid,time_order=2,space_order=2,staggered=(x,z),dtype=np.float64)\n", - "phi2 = TimeFunction(name=\"phi2\",grid=grid,time_order=2,space_order=2,staggered=(x,z),dtype=np.float64)" + "phi1 = TimeFunction(name=\"phi1\", grid=grid, time_order=2, space_order=2, staggered=(x, z), dtype=np.float64)\n", + "phi2 = TimeFunction(name=\"phi2\", grid=grid, time_order=2, space_order=2, staggered=(x, z), dtype=np.float64)" ] }, { @@ -679,8 +715,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel0 = Function(name=\"vel0\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "vel0.data[:,:] = v0[:,:]" + "vel0 = Function(name=\"vel0\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "vel0.data[:, :] = v0[:, :]" ] }, { @@ -696,8 +732,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel1 = Function(name=\"vel1\", grid=grid,space_order=2,staggered=(x,z),dtype=np.float64)\n", - "vel1.data[0:nptx-1,0:nptz-1] = v1" + "vel1 = Function(name=\"vel1\", grid=grid, space_order=2, staggered=(x, z), dtype=np.float64)\n", + "vel1.data[0:nptx-1, 0:nptz-1] = v1" ] }, { @@ -713,8 +749,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel1.data[nptx-1,0:nptz-1] = vel1.data[nptx-2,0:nptz-1]\n", - "vel1.data[0:nptx,nptz-1] = vel1.data[0:nptx,nptz-2]" + "vel1.data[nptx-1, 0:nptz-1] = vel1.data[nptx-2, 0:nptz-1]\n", + "vel1.data[0:nptx, nptz-1] = vel1.data[0:nptx, nptz-2]" ] }, { @@ -730,7 +766,7 @@ "metadata": {}, "outputs": [], "source": [ - "src_term = src.inject(field=u.forward,expr=src*dt**2*vel0**2)" + "src_term = src.inject(field=u.forward, expr=src*dt**2*vel0**2)" ] }, { @@ -764,10 +800,10 @@ "metadata": {}, "outputs": [], "source": [ - "x0pml = x0 + npmlx*hxv\n", - "x1pml = x1 - npmlx*hxv\n", - "z0pml = z0\n", - "z1pml = z1 - npmlz*hzv" + "x0pml = x0 + npmlx*hxv\n", + "x1pml = x1 - npmlx*hxv\n", + "z0pml = z0\n", + "z1pml = z1 - npmlz*hzv" ] }, { @@ -783,15 +819,15 @@ "metadata": {}, "outputs": [], "source": [ - "def fdamp(x,z,i):\n", + "def fdamp(x, z, i):\n", "\n", - " quibar = 0.05\n", + " quibar = 0.05\n", "\n", - " if(i==1):\n", - " a = np.where(x<=x0pml,(np.abs(x-x0pml)/lx),np.where(x>=x1pml,(np.abs(x-x1pml)/lx),0.))\n", + " if(i == 1):\n", + " a = np.where(x <= x0pml, (np.abs(x-x0pml)/lx), np.where(x >= x1pml, (np.abs(x-x1pml)/lx), 0.))\n", " fdamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))\n", - " if(i==2):\n", - " a = np.where(z<=z0pml,(np.abs(z-z0pml)/lz),np.where(z>=z1pml,(np.abs(z-z1pml)/lz),0.))\n", + " if(i == 2):\n", + " a = np.where(z <= z0pml, (np.abs(z-z0pml)/lz), np.where(z >= z1pml, (np.abs(z-z1pml)/lz), 0.))\n", " fdamp = quibar*(a-(1./(2.*np.pi))*np.sin(2.*np.pi*a))\n", "\n", " return fdamp" @@ -816,23 +852,23 @@ "source": [ "def generatemdamp():\n", "\n", - " X0 = np.linspace(x0,x1,nptx)\n", - " Z0 = np.linspace(z0,z1,nptz)\n", - " X0grid,Z0grid = np.meshgrid(X0,Z0)\n", - " X1 = np.linspace((x0+0.5*hxv),(x1-0.5*hxv),nptx-1)\n", - " Z1 = np.linspace((z0+0.5*hzv),(z1-0.5*hzv),nptz-1)\n", - " X1grid,Z1grid = np.meshgrid(X1,Z1)\n", + " X0 = np.linspace(x0, x1, nptx)\n", + " Z0 = np.linspace(z0, z1, nptz)\n", + " X0grid, Z0grid = np.meshgrid(X0, Z0)\n", + " X1 = np.linspace((x0+0.5*hxv), (x1-0.5*hxv), nptx-1)\n", + " Z1 = np.linspace((z0+0.5*hzv), (z1-0.5*hzv), nptz-1)\n", + " X1grid, Z1grid = np.meshgrid(X1, Z1)\n", "\n", - " D01 = np.zeros((nptx,nptz))\n", - " D02 = np.zeros((nptx,nptz))\n", - " D11 = np.zeros((nptx,nptz))\n", - " D12 = np.zeros((nptx,nptz))\n", + " D01 = np.zeros((nptx, nptz))\n", + " D02 = np.zeros((nptx, nptz))\n", + " D11 = np.zeros((nptx, nptz))\n", + " D12 = np.zeros((nptx, nptz))\n", "\n", - " D01 = np.transpose(fdamp(X0grid,Z0grid,1))\n", - " D02 = np.transpose(fdamp(X0grid,Z0grid,2))\n", + " D01 = np.transpose(fdamp(X0grid, Z0grid, 1))\n", + " D02 = np.transpose(fdamp(X0grid, Z0grid, 2))\n", "\n", - " D11 = np.transpose(fdamp(X1grid,Z1grid,1))\n", - " D12 = np.transpose(fdamp(X1grid,Z1grid,2))\n", + " D11 = np.transpose(fdamp(X1grid, Z1grid, 1))\n", + " D12 = np.transpose(fdamp(X1grid, Z1grid, 2))\n", "\n", " return D01, D02, D11, D12" ] @@ -861,12 +897,12 @@ "source": [ "def graph2damp(D):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", + " plot.figure(figsize=(16, 8))\n", " fscale = 1/10**(-3)\n", " fscale = 10**(-3)\n", - " scale = np.amax(D)\n", - " extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]\n", - " fig = plot.imshow(np.transpose(D), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", + " scale = np.amax(D)\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(D), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.title('Absorbing Layer Function')\n", @@ -970,10 +1006,10 @@ "metadata": {}, "outputs": [], "source": [ - "dampx0 = Function(name=\"dampx0\", grid=grid,space_order=2,staggered=NODE ,dtype=np.float64)\n", - "dampz0 = Function(name=\"dampz0\", grid=grid,space_order=2,staggered=NODE ,dtype=np.float64)\n", - "dampx0.data[:,:] = D01\n", - "dampz0.data[:,:] = D02" + "dampx0 = Function(name=\"dampx0\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "dampz0 = Function(name=\"dampz0\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "dampx0.data[:, :] = D01\n", + "dampz0.data[:, :] = D02" ] }, { @@ -982,10 +1018,10 @@ "metadata": {}, "outputs": [], "source": [ - "dampx1 = Function(name=\"dampx1\", grid=grid,space_order=2,staggered=(x,z),dtype=np.float64)\n", - "dampz1 = Function(name=\"dampz1\", grid=grid,space_order=2,staggered=(x,z),dtype=np.float64)\n", - "dampx1.data[0:nptx-1,0:nptz-1] = D11\n", - "dampz1.data[0:nptx-1,0:nptz-1] = D12" + "dampx1 = Function(name=\"dampx1\", grid=grid, space_order=2, staggered=(x, z), dtype=np.float64)\n", + "dampz1 = Function(name=\"dampz1\", grid=grid, space_order=2, staggered=(x, z), dtype=np.float64)\n", + "dampx1.data[0:nptx-1, 0:nptz-1] = D11\n", + "dampz1.data[0:nptx-1, 0:nptz-1] = D12" ] }, { @@ -1001,10 +1037,10 @@ "metadata": {}, "outputs": [], "source": [ - "dampx1.data[nptx-1,0:nptz-1] = dampx1.data[nptx-2,0:nptz-1]\n", - "dampx1.data[0:nptx,nptz-1] = dampx1.data[0:nptx,nptz-2]\n", - "dampz1.data[nptx-1,0:nptz-1] = dampz1.data[nptx-2,0:nptz-1]\n", - "dampz1.data[0:nptx,nptz-1] = dampz1.data[0:nptx,nptz-2]" + "dampx1.data[nptx-1, 0:nptz-1] = dampx1.data[nptx-2, 0:nptz-1]\n", + "dampx1.data[0:nptx, nptz-1] = dampx1.data[0:nptx, nptz-2]\n", + "dampz1.data[nptx-1, 0:nptz-1] = dampz1.data[nptx-2, 0:nptz-1]\n", + "dampz1.data[0:nptx, nptz-1] = dampz1.data[0:nptx, nptz-2]" ] }, { @@ -1056,25 +1092,25 @@ "outputs": [], "source": [ "# White Region\n", - "pde01 = Eq(u.dt2-u.laplace*vel0**2)\n", + "pde01 = Eq(u.dt2-u.laplace*vel0**2)\n", "\n", "# Blue Region\n", - "pde02a = u.dt2 + (dampx0+dampz0)*u.dtc + (dampx0*dampz0)*u - u.laplace*vel0*vel0\n", - "pde02b = - (0.5/hx)*(phi1[t,x,z-1]+phi1[t,x,z]-phi1[t,x-1,z-1]-phi1[t,x-1,z])\n", - "pde02c = - (0.5/hz)*(phi2[t,x-1,z]+phi2[t,x,z]-phi2[t,x-1,z-1]-phi2[t,x,z-1])\n", - "pde02 = Eq(pde02a + pde02b + pde02c)\n", + "pde02a = u.dt2 + (dampx0+dampz0)*u.dtc + (dampx0*dampz0)*u - u.laplace*vel0*vel0\n", + "pde02b = - (0.5/hx)*(phi1[t, x, z-1]+phi1[t, x, z]-phi1[t, x-1, z-1]-phi1[t, x-1, z])\n", + "pde02c = - (0.5/hz)*(phi2[t, x-1, z]+phi2[t, x, z]-phi2[t, x-1, z-1]-phi2[t, x, z-1])\n", + "pde02 = Eq(pde02a + pde02b + pde02c)\n", "\n", "pde10 = phi1.dt + dampx1*0.5*(phi1.forward+phi1)\n", - "a1 = u[t+1,x+1,z] + u[t+1,x+1,z+1] - u[t+1,x,z] - u[t+1,x,z+1]\n", - "a2 = u[t,x+1,z] + u[t,x+1,z+1] - u[t,x,z] - u[t,x,z+1]\n", + "a1 = u[t+1, x+1, z] + u[t+1, x+1, z+1] - u[t+1, x, z] - u[t+1, x, z+1]\n", + "a2 = u[t, x+1, z] + u[t, x+1, z+1] - u[t, x, z] - u[t, x, z+1]\n", "pde11 = -(dampz1-dampx1)*0.5*(0.5/hx)*(a1+a2)*vel1**2\n", - "pde1 = Eq(pde10+pde11)\n", + "pde1 = Eq(pde10+pde11)\n", "\n", "pde20 = phi2.dt + dampz1*0.5*(phi2.forward+phi2)\n", - "b1 = u[t+1,x,z+1] + u[t+1,x+1,z+1] - u[t+1,x,z] - u[t+1,x+1,z]\n", - "b2 = u[t,x,z+1] + u[t,x+1,z+1] - u[t,x,z] - u[t,x+1,z]\n", + "b1 = u[t+1, x, z+1] + u[t+1, x+1, z+1] - u[t+1, x, z] - u[t+1, x+1, z]\n", + "b2 = u[t, x, z+1] + u[t, x+1, z+1] - u[t, x, z] - u[t, x+1, z]\n", "pde21 = -(dampx1-dampz1)*0.5*(0.5/hz)*(b1+b2)*vel1**2\n", - "pde2 = Eq(pde20+pde21)" + "pde2 = Eq(pde20+pde21)" ] }, { @@ -1090,7 +1126,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil01 = Eq(u.forward,solve(pde01,u.forward) ,subdomain = grid.subdomains['d0'])" + "stencil01 = Eq(u.forward, solve(pde01, u.forward), subdomain=grid.subdomains['d0'])" ] }, { @@ -1106,7 +1142,7 @@ "metadata": {}, "outputs": [], "source": [ - "subds = ['d1','d2','d3']" + "subds = ['d1', 'd2', 'd3']" ] }, { @@ -1115,9 +1151,18 @@ "metadata": {}, "outputs": [], "source": [ - "stencil02 = [Eq(u.forward,solve(pde02, u.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]\n", - "stencil1 = [Eq(phi1.forward, solve(pde1,phi1.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]\n", - "stencil2 = [Eq(phi2.forward, solve(pde2,phi2.forward),subdomain = grid.subdomains[subds[i]]) for i in range(0,len(subds))]" + "stencil02 = [\n", + " Eq(u.forward, solve(pde02, u.forward), subdomain=grid.subdomains[subds[i]])\n", + " for i in range(0, len(subds))\n", + "]\n", + "stencil1 = [\n", + " Eq(phi1.forward, solve(pde1, phi1.forward), subdomain=grid.subdomains[subds[i]])\n", + " for i in range(0, len(subds))\n", + "]\n", + "stencil2 = [\n", + " Eq(phi2.forward, solve(pde2, phi2.forward), subdomain=grid.subdomains[subds[i]])\n", + " for i in range(0, len(subds))\n", + "]" ] }, { @@ -1133,7 +1178,7 @@ "metadata": {}, "outputs": [], "source": [ - "bc = [Eq(u[t+1,0,z],0.),Eq(u[t+1,nptx-1,z],0.),Eq(u[t+1,x,nptz-1],0.),Eq(u[t+1,x,0],u[t+1,x,1])]" + "bc = [Eq(u[t+1, 0, z], 0.), Eq(u[t+1, nptx-1, z], 0.), Eq(u[t+1, x, nptz-1], 0.), Eq(u[t+1, x, 0], u[t+1, x, 1])]" ] }, { @@ -1161,7 +1206,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op = Operator([stencil01,stencil02] + src_term + bc + [stencil1,stencil2] + rec_term,subs=grid.spacing_map)" + "op = Operator([stencil01, stencil02] + src_term + bc + [stencil1, stencil2] + rec_term, subs=grid.spacing_map)" ] }, { @@ -1177,9 +1222,9 @@ "metadata": {}, "outputs": [], "source": [ - "u.data[:] = 0.\n", - "phi1.data[:] = 0.\n", - "phi2.data[:] = 0." + "u.data[:] = 0.\n", + "phi1.data[:] = 0.\n", + "phi2.data[:] = 0." ] }, { @@ -1228,7 +1273,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op(time=nt,dt=dt0)" + "op(time=nt, dt=dt0)" ] }, { @@ -1246,11 +1291,11 @@ "source": [ "def graph2d(U):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(U[npmlx:-npmlx,0:-npmlz])/10.\n", - " extent = [fscale*x0pml,fscale*x1pml,fscale*z1pml,fscale*z0pml]\n", - " fig = plot.imshow(np.transpose(U[npmlx:-npmlx,0:-npmlz]),vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(U[npmlx:-npmlx, 0:-npmlz])/10.\n", + " extent = [fscale*x0pml, fscale*x1pml, fscale*z1pml, fscale*z0pml]\n", + " fig = plot.imshow(np.transpose(U[npmlx:-npmlx, 0:-npmlz]), vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.axis('equal')\n", @@ -1293,7 +1338,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "graph2d(u.data[0,:,:])" + "graph2d(u.data[0, :, :])" ] }, { @@ -1310,22 +1355,22 @@ "outputs": [], "source": [ "def graph2drec(rec):\n", - " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscaled = 1/10**(3)\n", - " fscalet = 1/10**(3)\n", - " scale = np.amax(rec[:,npmlx:-npmlx])/10.\n", - " extent = [fscaled*x0pml,fscaled*x1pml, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec[:,npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", - " plot.axis('equal')\n", - " plot.title('Receivers Signal Profile with PML - Devito')\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscaled = 1/10**(3)\n", + " fscalet = 1/10**(3)\n", + " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", + " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", + " plot.axis('equal')\n", + " plot.title('Receivers Signal Profile with PML - Devito')\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " _ = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " plot.show()" ] }, { diff --git a/examples/seismic/abc_methods/04_habc.ipynb b/examples/seismic/abc_methods/04_habc.ipynb index 0e452d6914..7ba4148b02 100644 --- a/examples/seismic/abc_methods/04_habc.ipynb +++ b/examples/seismic/abc_methods/04_habc.ipynb @@ -202,11 +202,11 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "import numpy as np\n", - "import matplotlib.pyplot as plot\n", - "import matplotlib.ticker as mticker\n", - "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", - "from matplotlib import cm" + "import numpy as np\n", + "import matplotlib.pyplot as plot\n", + "import matplotlib.ticker as mticker\n", + "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", + "from matplotlib import cm" ] }, { @@ -225,10 +225,10 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "%matplotlib inline\n", - "from examples.seismic import TimeAxis\n", - "from examples.seismic import RickerSource\n", - "from examples.seismic import Receiver\n", - "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" + "from examples.seismic import TimeAxis\n", + "from examples.seismic import RickerSource\n", + "from examples.seismic import Receiver\n", + "from devito import SubDomain, Grid, NODE, TimeFunction, Function, Eq, solve, Operator" ] }, { @@ -244,16 +244,16 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = 101\n", - "nptz = 101\n", - "x0 = 0.\n", - "x1 = 1000.\n", - "compx = x1-x0\n", - "z0 = 0.\n", - "z1 = 1000.\n", - "compz = z1-z0\n", - "hxv = (x1-x0)/(nptx-1)\n", - "hzv = (z1-z0)/(nptz-1)" + "nptx = 101\n", + "nptz = 101\n", + "x0 = 0.\n", + "x1 = 1000.\n", + "compx = x1-x0\n", + "z0 = 0.\n", + "z1 = 1000.\n", + "compz = z1-z0\n", + "hxv = (x1-x0)/(nptx-1)\n", + "hzv = (z1-z0)/(nptz-1)" ] }, { @@ -280,8 +280,8 @@ "metadata": {}, "outputs": [], "source": [ - "habctype = 3\n", - "habcw = 2" + "habctype = 3\n", + "habcw = 2" ] }, { @@ -297,8 +297,8 @@ "metadata": {}, "outputs": [], "source": [ - "npmlx = 20\n", - "npmlz = 20" + "npmlx = 20\n", + "npmlz = 20" ] }, { @@ -331,18 +331,18 @@ "metadata": {}, "outputs": [], "source": [ - "nptx = nptx + 2*npmlx\n", - "nptz = nptz + 1*npmlz\n", - "x0 = x0 - hxv*npmlx\n", - "x1 = x1 + hxv*npmlx\n", - "compx = x1-x0\n", - "z0 = z0\n", - "z1 = z1 + hzv*npmlz\n", - "compz = z1-z0\n", - "origin = (x0,z0)\n", - "extent = (compx,compz)\n", - "shape = (nptx,nptz)\n", - "spacing = (hxv,hzv)" + "nptx = nptx + 2*npmlx\n", + "nptz = nptz + 1*npmlz\n", + "x0 = x0 - hxv*npmlx\n", + "x1 = x1 + hxv*npmlx\n", + "compx = x1-x0\n", + "z0 = z0\n", + "z1 = z1 + hzv*npmlz\n", + "compz = z1-z0\n", + "origin = (x0, z0)\n", + "extent = (compx, compz)\n", + "shape = (nptx, nptz)\n", + "spacing = (hxv, hzv)" ] }, { @@ -362,9 +362,12 @@ "source": [ "class d0domain(SubDomain):\n", " name = 'd0'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " return {x: x, z: z}\n", + "\n", + "\n", "d0_domain = d0domain()" ] }, @@ -389,26 +392,37 @@ "source": [ "class d1domain(SubDomain):\n", " name = 'd1'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('left',npmlx), z: z}\n", + " return {x: ('left', npmlx), z: z}\n", + "\n", + "\n", "d1_domain = d1domain()\n", "\n", + "\n", "class d2domain(SubDomain):\n", " name = 'd2'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " return {x: ('right',npmlx), z: z}\n", + " return {x: ('right', npmlx), z: z}\n", + "\n", + "\n", "d2_domain = d2domain()\n", "\n", + "\n", "class d3domain(SubDomain):\n", " name = 'd3'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", - " if((habctype==3)&(habcw==1)):\n", - " return {x: x, z: ('right',npmlz)}\n", + " if((habctype == 3) & (habcw == 1)):\n", + " return {x: x, z: ('right', npmlz)}\n", " else:\n", - " return {x: ('middle', npmlx, npmlx), z: ('right',npmlz)}\n", + " return {x: ('middle', npmlx, npmlx), z: ('right', npmlz)}\n", + "\n", + "\n", "d3_domain = d3domain()" ] }, @@ -429,7 +443,13 @@ "metadata": {}, "outputs": [], "source": [ - "grid = Grid(origin=origin, extent=extent, shape=shape, subdomains=(d0_domain,d1_domain,d2_domain,d3_domain), dtype=np.float64)" + "grid = Grid(\n", + " origin=origin,\n", + " extent=extent,\n", + " shape=shape,\n", + " subdomains=(d0_domain, d1_domain, d2_domain, d3_domain),\n", + " dtype=np.float64\n", + ")" ] }, { @@ -438,9 +458,9 @@ "metadata": {}, "outputs": [], "source": [ - "v0 = np.zeros((nptx,nptz))\n", - "X0 = np.linspace(x0,x1,nptx)\n", - "Z0 = np.linspace(z0,z1,nptz)\n", + "v0 = np.zeros((nptx, nptz))\n", + "X0 = np.linspace(x0, x1, nptx)\n", + "Z0 = np.linspace(z0, z1, nptz)\n", "\n", "x10 = x0+lx\n", "x11 = x1-lx\n", @@ -454,18 +474,20 @@ "pxm = 0\n", "pzm = 0\n", "\n", - "for i in range(0,nptx):\n", - " if(X0[i]==xm): pxm = i\n", + "for i in range(0, nptx):\n", + " if(X0[i] == xm):\n", + " pxm = i\n", "\n", - "for j in range(0,nptz):\n", - " if(Z0[j]==zm): pzm = j\n", + "for j in range(0, nptz):\n", + " if(Z0[j] == zm):\n", + " pzm = j\n", "\n", "p0 = 0\n", "p1 = pzm\n", "p2 = nptz\n", "\n", - "v0[0:nptx,p0:p1] = 1.5\n", - "v0[0:nptx,p1:p2] = 2.5" + "v0[0:nptx, p0:p1] = 1.5\n", + "v0[0:nptx, p1:p2] = 2.5" ] }, { @@ -482,22 +504,28 @@ "outputs": [], "source": [ "def graph2dvel(vel):\n", - " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " scale = np.amax(vel[npmlx:-npmlx,0:-npmlz])\n", - " extent = [fscale*(x0+lx),fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", - " fig = plot.imshow(np.transpose(vel[npmlx:-npmlx,0:-npmlz]), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.title('Velocity Profile')\n", - " plot.grid()\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " cbar.set_label('Velocity [km/s]')\n", - " plot.show()" + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " scale = np.amax(vel[npmlx:-npmlx, 0:-npmlz])\n", + " extent = [fscale*(x0+lx), fscale*(x1-lx), fscale*(z1-lz), fscale*(z0)]\n", + " fig = plot.imshow(\n", + " np.transpose(vel[npmlx:-npmlx, 0:-npmlz]),\n", + " vmin=0.,\n", + " vmax=scale,\n", + " cmap=cm.seismic,\n", + " extent=extent\n", + " )\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.title('Velocity Profile')\n", + " plot.grid()\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " cbar.set_label('Velocity [km/s]')\n", + " plot.show()" ] }, { @@ -551,13 +579,13 @@ "metadata": {}, "outputs": [], "source": [ - "t0 = 0.\n", - "tn = 1000.\n", - "CFL = 0.4\n", - "vmax = np.amax(v0)\n", - "dtmax = np.float64((min(hxv,hzv)*CFL)/(vmax))\n", + "t0 = 0.\n", + "tn = 1000.\n", + "CFL = 0.4\n", + "vmax = np.amax(v0)\n", + "dtmax = np.float64((min(hxv, hzv)*CFL)/(vmax))\n", "ntmax = int((tn-t0)/dtmax)+1\n", - "dt0 = np.float64((tn-t0)/ntmax)" + "dt0 = np.float64((tn-t0)/ntmax)" ] }, { @@ -573,8 +601,8 @@ "metadata": {}, "outputs": [], "source": [ - "time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)\n", - "nt = time_range.num - 1" + "time_range = TimeAxis(start=t0, stop=tn, num=ntmax+1)\n", + "nt = time_range.num - 1" ] }, { @@ -590,10 +618,10 @@ "metadata": {}, "outputs": [], "source": [ - "(hx,hz) = grid.spacing_map\n", - "(x, z) = grid.dimensions\n", - "t = grid.stepping_dim\n", - "dt = grid.stepping_dim.spacing" + "(hx, hz) = grid.spacing_map\n", + "(x, z) = grid.dimensions\n", + "t = grid.stepping_dim\n", + "dt = grid.stepping_dim.spacing" ] }, { @@ -609,10 +637,10 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.01\n", + "f0 = 0.01\n", "nsource = 1\n", - "xposf = 0.5*(compx-2*npmlx*hxv)\n", - "zposf = hzv" + "xposf = 0.5*(compx-2*npmlx*hxv)\n", + "zposf = hzv" ] }, { @@ -621,7 +649,15 @@ "metadata": {}, "outputs": [], "source": [ - "src = RickerSource(name='src',grid=grid,f0=f0,npoint=nsource,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "src = RickerSource(\n", + " name='src',\n", + " grid=grid,\n", + " f0=f0,\n", + " npoint=nsource,\n", + " time_range=time_range,\n", + " staggered=NODE,\n", + " dtype=np.float64\n", + ")\n", "src.coordinates.data[:, 0] = xposf\n", "src.coordinates.data[:, 1] = zposf" ] @@ -668,9 +704,9 @@ "metadata": {}, "outputs": [], "source": [ - "nrec = nptx\n", - "nxpos = np.linspace(x0,x1,nrec)\n", - "nzpos = hzv" + "nrec = nptx\n", + "nxpos = np.linspace(x0, x1, nrec)\n", + "nzpos = hzv" ] }, { @@ -679,7 +715,7 @@ "metadata": {}, "outputs": [], "source": [ - "rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)\n", + "rec = Receiver(name='rec', grid=grid, npoint=nrec, time_range=time_range, staggered=NODE, dtype=np.float64)\n", "rec.coordinates.data[:, 0] = nxpos\n", "rec.coordinates.data[:, 1] = nzpos" ] @@ -697,7 +733,7 @@ "metadata": {}, "outputs": [], "source": [ - "u = TimeFunction(name=\"u\",grid=grid,time_order=2,space_order=2,staggered=NODE,dtype=np.float64)" + "u = TimeFunction(name=\"u\", grid=grid, time_order=2, space_order=2, staggered=NODE, dtype=np.float64)" ] }, { @@ -706,8 +742,8 @@ "metadata": {}, "outputs": [], "source": [ - "vel = Function(name=\"vel\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "vel.data[:,:] = v0[:,:]" + "vel = Function(name=\"vel\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "vel.data[:, :] = v0[:, :]" ] }, { @@ -723,7 +759,7 @@ "metadata": {}, "outputs": [], "source": [ - "src_term = src.inject(field=u.forward,expr=src*dt**2*vel**2)" + "src_term = src.inject(field=u.forward, expr=src*dt**2*vel**2)" ] }, { @@ -757,69 +793,69 @@ "source": [ "def generateweights():\n", "\n", - " weightsx = np.zeros(npmlx)\n", - " weightsz = np.zeros(npmlz)\n", - " Mweightsx = np.zeros((nptx,nptz))\n", - " Mweightsz = np.zeros((nptx,nptz))\n", + " weightsx = np.zeros(npmlx)\n", + " weightsz = np.zeros(npmlz)\n", + " Mweightsx = np.zeros((nptx, nptz))\n", + " Mweightsz = np.zeros((nptx, nptz))\n", "\n", - " if(habcw==1):\n", + " if(habcw == 1):\n", "\n", - " for i in range(0,npmlx):\n", + " for i in range(0, npmlx):\n", " weightsx[i] = (npmlx-i)/(npmlx)\n", "\n", - " for i in range(0,npmlz):\n", + " for i in range(0, npmlz):\n", " weightsz[i] = (npmlz-i)/(npmlz)\n", "\n", - " if(habcw==2):\n", + " if(habcw == 2):\n", "\n", " mx = 2\n", " mz = 2\n", "\n", - " if(habctype==3):\n", + " if(habctype == 3):\n", "\n", - " alphax = 1.0 + 0.15*(npmlx-mx)\n", - " alphaz = 1.0 + 0.15*(npmlz-mz)\n", + " alphax = 1.0 + 0.15*(npmlx-mx)\n", + " alphaz = 1.0 + 0.15*(npmlz-mz)\n", "\n", " else:\n", "\n", - " alphax = 1.5 + 0.07*(npmlx-mx)\n", - " alphaz = 1.5 + 0.07*(npmlz-mz)\n", + " alphax = 1.5 + 0.07*(npmlx-mx)\n", + " alphaz = 1.5 + 0.07*(npmlz-mz)\n", "\n", - " for i in range(0,npmlx):\n", + " for i in range(0, npmlx):\n", "\n", - " if(0<=i<=(mx)):\n", + " if(0 <= i <= (mx)):\n", " weightsx[i] = 1\n", - " elif((mx+1)<=i<=npmlx-1):\n", + " elif((mx+1) <= i <= npmlx-1):\n", " weightsx[i] = ((npmlx-i)/(npmlx-mx))**(alphax)\n", " else:\n", " weightsx[i] = 0\n", "\n", - " for i in range(0,npmlz):\n", + " for i in range(0, npmlz):\n", "\n", - " if(0<=i<=(mz)):\n", + " if(0 <= i <= (mz)):\n", " weightsz[i] = 1\n", - " elif((mz+1)<=i<=npmlz-1):\n", + " elif((mz+1) <= i <= npmlz-1):\n", " weightsz[i] = ((npmlz-i)/(npmlz-mz))**(alphaz)\n", " else:\n", " weightsz[i] = 0\n", "\n", - " for k in range(0,npmlx):\n", + " for k in range(0, npmlx):\n", "\n", " ai = k\n", " af = nptx - k - 1\n", " bi = 0\n", " bf = nptz - k\n", - " Mweightsx[ai,bi:bf] = weightsx[k]\n", - " Mweightsx[af,bi:bf] = weightsx[k]\n", + " Mweightsx[ai, bi:bf] = weightsx[k]\n", + " Mweightsx[af, bi:bf] = weightsx[k]\n", "\n", - " for k in range(0,npmlz):\n", + " for k in range(0, npmlz):\n", "\n", " ai = k\n", " af = nptx - k\n", " bf = nptz - k - 1\n", - " Mweightsz[ai:af,bf] = weightsz[k]\n", + " Mweightsz[ai:af, bf] = weightsz[k]\n", "\n", - " return Mweightsx,Mweightsz" + " return Mweightsx, Mweightsz" ] }, { @@ -835,7 +871,7 @@ "metadata": {}, "outputs": [], "source": [ - "Mweightsx,Mweightsz = generateweights();" + "Mweightsx, Mweightsz = generateweights();" ] }, { @@ -853,12 +889,12 @@ "source": [ "def graph2dweight(D):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", + " plot.figure(figsize=(16, 8))\n", " fscale = 1/10**(-3)\n", " fscale = 10**(-3)\n", - " scale = np.amax(D)\n", - " extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]\n", - " fig = plot.imshow(np.transpose(D), vmin=0.,vmax=scale, cmap=cm.seismic, extent=extent)\n", + " scale = np.amax(D)\n", + " extent = [fscale*x0, fscale*x1, fscale*z1, fscale*z0]\n", + " fig = plot.imshow(np.transpose(D), vmin=0., vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.title('Weight Function')\n", @@ -960,11 +996,11 @@ "metadata": {}, "outputs": [], "source": [ - "weightsx = Function(name=\"weightsx\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "weightsx.data[:,:] = Mweightsx[:,:]\n", + "weightsx = Function(name=\"weightsx\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "weightsx.data[:, :] = Mweightsx[:, :]\n", "\n", - "weightsz = Function(name=\"weightsz\",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "weightsz.data[:,:] = Mweightsz[:,:]" + "weightsz = Function(name=\"weightsz\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "weightsz.data[:, :] = Mweightsz[:, :]" ] }, { @@ -980,9 +1016,9 @@ "metadata": {}, "outputs": [], "source": [ - "u1 = Function(name=\"u1\" ,grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "u2 = Function(name=\"u2\" ,grid=grid,space_order=2,staggered=NODE,dtype=np.float64)\n", - "u3 = Function(name=\"u3\" ,grid=grid,space_order=2,staggered=NODE,dtype=np.float64)" + "u1 = Function(name=\"u1\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "u2 = Function(name=\"u2\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)\n", + "u3 = Function(name=\"u3\", grid=grid, space_order=2, staggered=NODE, dtype=np.float64)" ] }, { @@ -1004,7 +1040,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil01 = [Eq(u1,u.backward),Eq(u2,u),Eq(u3,u.forward)]" + "stencil01 = [Eq(u1, u.backward), Eq(u2, u), Eq(u3, u.forward)]" ] }, { @@ -1020,7 +1056,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil02 = [Eq(u3,u.forward)]" + "stencil02 = [Eq(u3, u.forward)]" ] }, { @@ -1056,7 +1092,7 @@ "metadata": {}, "outputs": [], "source": [ - "stencil0 = Eq(u.forward, solve(pde0,u.forward))" + "stencil0 = Eq(u.forward, solve(pde0, u.forward))" ] }, { @@ -1082,22 +1118,22 @@ "metadata": {}, "outputs": [], "source": [ - "if(habctype==1):\n", + "if(habctype == 1):\n", "\n", " # Region B_{1}\n", - " aux1 = ((-vel[x,z]*dt+hx)*u2[x,z] + (vel[x,z]*dt+hx)*u2[x+1,z] + (vel[x,z]*dt-hx)*u3[x+1,z])/(vel[x,z]*dt+hx)\n", - " pde1 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux1\n", - " stencil1 = Eq(u.forward,pde1,subdomain = grid.subdomains['d1'])\n", + " aux1 = ((-vel[x, z]*dt+hx)*u2[x, z] + (vel[x, z]*dt+hx)*u2[x+1, z] + (vel[x, z]*dt-hx)*u3[x+1, z])/(vel[x, z]*dt+hx)\n", + " pde1 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux1\n", + " stencil1 = Eq(u.forward, pde1, subdomain=grid.subdomains['d1'])\n", "\n", " # Region B_{3}\n", - " aux2 = ((-vel[x,z]*dt+hx)*u2[x,z] + (vel[x,z]*dt+hx)*u2[x-1,z] + (vel[x,z]*dt-hx)*u3[x-1,z])/(vel[x,z]*dt+hx)\n", - " pde2 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux2\n", - " stencil2 = Eq(u.forward,pde2,subdomain = grid.subdomains['d2'])\n", + " aux2 = ((-vel[x, z]*dt+hx)*u2[x, z] + (vel[x, z]*dt+hx)*u2[x-1, z] + (vel[x, z]*dt-hx)*u3[x-1, z])/(vel[x, z]*dt+hx)\n", + " pde2 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux2\n", + " stencil2 = Eq(u.forward, pde2, subdomain=grid.subdomains['d2'])\n", "\n", " # Region B_{2}\n", - " aux3 = ((-vel[x,z]*dt+hz)*u2[x,z] + (vel[x,z]*dt+hz)*u2[x,z-1] + (vel[x,z]*dt-hz)*u3[x,z-1])/(vel[x,z]*dt+hz)\n", - " pde3 = (1-weightsz[x,z])*u3[x,z] + weightsz[x,z]*aux3\n", - " stencil3 = Eq(u.forward,pde3,subdomain = grid.subdomains['d3'])" + " aux3 = ((-vel[x, z]*dt+hz)*u2[x, z] + (vel[x, z]*dt+hz)*u2[x, z-1] + (vel[x, z]*dt-hz)*u3[x, z-1])/(vel[x, z]*dt+hz)\n", + " pde3 = (1-weightsz[x, z])*u3[x, z] + weightsz[x, z]*aux3\n", + " stencil3 = Eq(u.forward, pde3, subdomain=grid.subdomains['d3'])" ] }, { @@ -1113,62 +1149,85 @@ "metadata": {}, "outputs": [], "source": [ - "if(habctype==2):\n", + "if(habctype == 2):\n", "\n", " # Region B_{1}\n", - " cte11 = (1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z]\n", - " cte21 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z] - (1/(2*hz**2))*vel[x,z]*vel[x,z]\n", - " cte31 = -(1/(2*dt**2)) - (1/(2*dt*hx))*vel[x,z]\n", - " cte41 = (1/(dt**2))\n", - " cte51 = (1/(4*hz**2))*vel[x,z]**2\n", - "\n", - " aux1 = (cte21*(u3[x+1,z] + u1[x,z]) + cte31*u1[x+1,z] + cte41*(u2[x,z]+u2[x+1,z]) + cte51*(u3[x+1,z+1] + u3[x+1,z-1] + u1[x,z+1] + u1[x,z-1]))/cte11\n", - " pde1 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux1\n", - " stencil1 = Eq(u.forward,pde1,subdomain = grid.subdomains['d1'])\n", + " cte11 = (1/(2*dt**2)) + (1/(2*dt*hx))*vel[x, z]\n", + " cte21 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x, z] - (1/(2*hz**2))*vel[x, z]*vel[x, z]\n", + " cte31 = -(1/(2*dt**2)) - (1/(2*dt*hx))*vel[x, z]\n", + " cte41 = (1/(dt**2))\n", + " cte51 = (1/(4*hz**2))*vel[x, z]**2\n", + "\n", + " aux1 = (\n", + " cte21*(u3[x+1, z] + u1[x, z])\n", + " + cte31*u1[x+1, z] + cte41*(u2[x, z]+u2[x+1, z])\n", + " + cte51*(u3[x+1, z+1] + u3[x+1, z-1] + u1[x, z+1] + u1[x, z-1])\n", + " )/cte11\n", + " pde1 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux1\n", + " stencil1 = Eq(u.forward, pde1, subdomain=grid.subdomains['d1'])\n", "\n", " # Region B_{3}\n", - " cte12 = (1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z]\n", - " cte22 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x,z] - (1/(2*hz**2))*vel[x,z]**2\n", - " cte32 = -(1/(2*dt**2)) - (1/(2*dt*hx))*vel[x,z]\n", - " cte42 = (1/(dt**2))\n", - " cte52 = (1/(4*hz**2))*vel[x,z]*vel[x,z]\n", - "\n", - " aux2 = (cte22*(u3[x-1,z] + u1[x,z]) + cte32*u1[x-1,z] + cte42*(u2[x,z]+u2[x-1,z]) + cte52*(u3[x-1,z+1] + u3[x-1,z-1] + u1[x,z+1] + u1[x,z-1]))/cte12\n", - " pde2 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux2\n", - " stencil2 = Eq(u.forward,pde2,subdomain = grid.subdomains['d2'])\n", + " cte12 = (1/(2*dt**2)) + (1/(2*dt*hx))*vel[x, z]\n", + " cte22 = -(1/(2*dt**2)) + (1/(2*dt*hx))*vel[x, z] - (1/(2*hz**2))*vel[x, z]**2\n", + " cte32 = -(1/(2*dt**2)) - (1/(2*dt*hx))*vel[x, z]\n", + " cte42 = (1/(dt**2))\n", + " cte52 = (1/(4*hz**2))*vel[x, z]*vel[x, z]\n", + "\n", + " aux2 = (\n", + " cte22*(u3[x-1, z] + u1[x, z])\n", + " + cte32*u1[x-1, z] + cte42*(u2[x, z]+u2[x-1, z])\n", + " + cte52*(u3[x-1, z+1] + u3[x-1, z-1] + u1[x, z+1] + u1[x, z-1])\n", + " )/cte12\n", + " pde2 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux2\n", + " stencil2 = Eq(u.forward, pde2, subdomain=grid.subdomains['d2'])\n", "\n", " # Region B_{2}\n", - " cte13 = (1/(2*dt**2)) + (1/(2*dt*hz))*vel[x,z]\n", - " cte23 = -(1/(2*dt**2)) + (1/(2*dt*hz))*vel[x,z] - (1/(2*hx**2))*vel[x,z]**2\n", - " cte33 = -(1/(2*dt**2)) - (1/(2*dt*hz))*vel[x,z]\n", - " cte43 = (1/(dt**2))\n", - " cte53 = (1/(4*hx**2))*vel[x,z]*vel[x,z]\n", - "\n", - " aux3 = (cte23*(u3[x,z-1] + u1[x,z]) + cte33*u1[x,z-1] + cte43*(u2[x,z]+u2[x,z-1]) + cte53*(u3[x+1,z-1] + u3[x-1,z-1] + u1[x+1,z] + u1[x-1,z]))/cte13\n", - " pde3 = (1-weightsz[x,z])*u3[x,z] + weightsz[x,z]*aux3\n", - " stencil3 = Eq(u.forward,pde3,subdomain = grid.subdomains['d3'])\n", + " cte13 = (1/(2*dt**2)) + (1/(2*dt*hz))*vel[x, z]\n", + " cte23 = -(1/(2*dt**2)) + (1/(2*dt*hz))*vel[x, z] - (1/(2*hx**2))*vel[x, z]**2\n", + " cte33 = -(1/(2*dt**2)) - (1/(2*dt*hz))*vel[x, z]\n", + " cte43 = (1/(dt**2))\n", + " cte53 = (1/(4*hx**2))*vel[x, z]*vel[x, z]\n", + "\n", + " aux3 = (\n", + " cte23*(u3[x, z-1] + u1[x, z])\n", + " + cte33*u1[x, z-1] + cte43*(u2[x, z]+u2[x, z-1])\n", + " + cte53*(u3[x+1, z-1] + u3[x-1, z-1] + u1[x+1, z] + u1[x-1, z])\n", + " )/cte13\n", + " pde3 = (1-weightsz[x, z])*u3[x, z] + weightsz[x, z]*aux3\n", + " stencil3 = Eq(u.forward, pde3, subdomain=grid.subdomains['d3'])\n", "\n", " # Red point right side\n", - " stencil4 = [Eq(u[t+1,nptx-1-k,nptz-1-k],(1-weightsz[nptx-1-k,nptz-1-k])*u3[nptx-1-k,nptz-1-k] +\n", - " weightsz[nptx-1-k,nptz-1-k]*(((-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-1-k,nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-2-k,nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u3[nptx-2-k,nptz-2-k]\n", - " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-1-k,nptz-1-k]\n", - " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-1-k,nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-2-k,nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt))*u2[nptx-2-k,nptz-2-k])\n", - " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k,nptz-1-k]*dt)))) for k in range(0,npmlz)]\n", + " stencil4 = [\n", + " Eq(\n", + " u[t+1, nptx-1-k, nptz-1-k],\n", + " (1-weightsz[nptx-1-k, nptz-1-k])*u3[nptx-1-k, nptz-1-k]\n", + " + weightsz[nptx-1-k, nptz-1-k]*(((-(1/(4*hx))\n", + " + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-1-k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-2-k, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u3[nptx-2-k, nptz-2-k]\n", + " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-1-k, nptz-1-k]\n", + " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-1-k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-2-k, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt))*u2[nptx-2-k, nptz-2-k])\n", + " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[nptx-1-k, nptz-1-k]*dt)))) for k in range(0, npmlz)\n", + " ]\n", "\n", " # Red point left side\n", - " stencil5 = [Eq(u[t+1,k,nptz-1-k],(1-weightsx[k,nptz-1-k] )*u3[k,nptz-1-k]\n", - " + weightsx[k,nptz-1-k]*(( (-(1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k,nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k+1,nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u3[k+1,nptz-2-k]\n", - " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k,nptz-1-k]\n", - " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k,nptz-2-k]\n", - " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k+1,nptz-1-k]\n", - " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt))*u2[k+1,nptz-2-k])\n", - " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k,nptz-1-k]*dt)))) for k in range(0,npmlx)]" + " stencil5 = [\n", + " Eq(\n", + " u[t+1, k, nptz-1-k],\n", + " (1-weightsx[k, nptz-1-k])*u3[k, nptz-1-k]\n", + " + weightsx[k, nptz-1-k]*(((-(1/(4*hx))\n", + " + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k+1, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) - (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u3[k+1, nptz-2-k]\n", + " + (-(1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k, nptz-1-k]\n", + " + (-(1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k, nptz-2-k]\n", + " + ((1/(4*hx)) - (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k+1, nptz-1-k]\n", + " + ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt))*u2[k+1, nptz-2-k])\n", + " / ((1/(4*hx)) + (1/(4*hz)) + (np.sqrt(2))/(4*vel[k, nptz-1-k]*dt)))\n", + " ) for k in range(0, npmlx)\n", + " ]" ] }, { @@ -1184,7 +1243,7 @@ "metadata": {}, "outputs": [], "source": [ - "if(habctype==3):\n", + "if(habctype == 3):\n", "\n", " alpha1 = 0.0\n", " alpha2 = np.pi/4\n", @@ -1196,83 +1255,104 @@ " # Region B_{1}\n", " gama111 = np.cos(alpha1)*(1-a1)*(1/dt)\n", " gama121 = np.cos(alpha1)*(a1)*(1/dt)\n", - " gama131 = np.cos(alpha1)*(1-b1)*(1/hx)*vel[x,z]\n", - " gama141 = np.cos(alpha1)*(b1)*(1/hx)*vel[x,z]\n", + " gama131 = np.cos(alpha1)*(1-b1)*(1/hx)*vel[x, z]\n", + " gama141 = np.cos(alpha1)*(b1)*(1/hx)*vel[x, z]\n", "\n", " gama211 = np.cos(alpha2)*(1-a2)*(1/dt)\n", " gama221 = np.cos(alpha2)*(a2)*(1/dt)\n", - " gama231 = np.cos(alpha2)*(1-b2)*(1/hx)*vel[x,z]\n", - " gama241 = np.cos(alpha2)*(b2)*(1/hx)*vel[x,z]\n", + " gama231 = np.cos(alpha2)*(1-b2)*(1/hx)*vel[x, z]\n", + " gama241 = np.cos(alpha2)*(b2)*(1/hx)*vel[x, z]\n", "\n", - " c111 = gama111 + gama131\n", + " c111 = gama111 + gama131\n", " c121 = -gama111 + gama141\n", - " c131 = gama121 - gama131\n", + " c131 = gama121 - gama131\n", " c141 = -gama121 - gama141\n", "\n", - " c211 = gama211 + gama231\n", + " c211 = gama211 + gama231\n", " c221 = -gama211 + gama241\n", - " c231 = gama221 - gama231\n", + " c231 = gama221 - gama231\n", " c241 = -gama221 - gama241\n", "\n", - " aux1 = ( u2[x,z]*(-c111*c221-c121*c211) + u3[x+1,z]*(-c111*c231-c131*c211) + u2[x+1,z]*(-c111*c241-c121*c231-c141*c211-c131*c221)\n", - " + u1[x,z]*(-c121*c221) + u1[x+1,z]*(-c121*c241-c141*c221) + u3[x+2,z]*(-c131*c231) +u2[x+2,z]*(-c131*c241-c141*c231)\n", - " + u1[x+2,z]*(-c141*c241))/(c111*c211)\n", - " pde1 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux1\n", - " stencil1 = Eq(u.forward,pde1,subdomain = grid.subdomains['d1'])\n", + " aux1 = (\n", + " u2[x, z]*(-c111*c221-c121*c211)\n", + " + u3[x+1, z]*(-c111*c231-c131*c211)\n", + " + u2[x+1, z]*(-c111*c241-c121*c231-c141*c211-c131*c221)\n", + " + u1[x, z]*(-c121*c221)\n", + " + u1[x+1, z]*(-c121*c241-c141*c221)\n", + " + u3[x+2, z]*(-c131*c231)\n", + " + u2[x+2, z]*(-c131*c241-c141*c231)\n", + " + u1[x+2, z]*(-c141*c241)\n", + " )/(c111*c211)\n", + " pde1 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux1\n", + " stencil1 = Eq(u.forward, pde1, subdomain=grid.subdomains['d1'])\n", "\n", " # Region B_{3}\n", " gama112 = np.cos(alpha1)*(1-a1)*(1/dt)\n", " gama122 = np.cos(alpha1)*(a1)*(1/dt)\n", - " gama132 = np.cos(alpha1)*(1-b1)*(1/hx)*vel[x,z]\n", - " gama142 = np.cos(alpha1)*(b1)*(1/hx)*vel[x,z]\n", + " gama132 = np.cos(alpha1)*(1-b1)*(1/hx)*vel[x, z]\n", + " gama142 = np.cos(alpha1)*(b1)*(1/hx)*vel[x, z]\n", "\n", " gama212 = np.cos(alpha2)*(1-a2)*(1/dt)\n", " gama222 = np.cos(alpha2)*(a2)*(1/dt)\n", - " gama232 = np.cos(alpha2)*(1-b2)*(1/hx)*vel[x,z]\n", - " gama242 = np.cos(alpha2)*(b2)*(1/hx)*vel[x,z]\n", + " gama232 = np.cos(alpha2)*(1-b2)*(1/hx)*vel[x, z]\n", + " gama242 = np.cos(alpha2)*(b2)*(1/hx)*vel[x, z]\n", "\n", - " c112 = gama112 + gama132\n", + " c112 = gama112 + gama132\n", " c122 = -gama112 + gama142\n", - " c132 = gama122 - gama132\n", + " c132 = gama122 - gama132\n", " c142 = -gama122 - gama142\n", "\n", - " c212 = gama212 + gama232\n", + " c212 = gama212 + gama232\n", " c222 = -gama212 + gama242\n", - " c232 = gama222 - gama232\n", + " c232 = gama222 - gama232\n", " c242 = -gama222 - gama242\n", "\n", - " aux2 = ( u2[x,z]*(-c112*c222-c122*c212) + u3[x-1,z]*(-c112*c232-c132*c212) + u2[x-1,z]*(-c112*c242-c122*c232-c142*c212-c132*c222)\n", - " + u1[x,z]*(-c122*c222) + u1[x-1,z]*(-c122*c242-c142*c222) + u3[x-2,z]*(-c132*c232) +u2[x-2,z]*(-c132*c242-c142*c232)\n", - " + u1[x-2,z]*(-c142*c242))/(c112*c212)\n", - " pde2 = (1-weightsx[x,z])*u3[x,z] + weightsx[x,z]*aux2\n", - " stencil2 = Eq(u.forward,pde2,subdomain = grid.subdomains['d2'])\n", + " aux2 = (\n", + " u2[x, z]*(-c112*c222-c122*c212)\n", + " + u3[x-1, z]*(-c112*c232-c132*c212)\n", + " + u2[x-1, z]*(-c112*c242-c122*c232-c142*c212-c132*c222)\n", + " + u1[x, z]*(-c122*c222)\n", + " + u1[x-1, z]*(-c122*c242-c142*c222)\n", + " + u3[x-2, z]*(-c132*c232)\n", + " + u2[x-2, z]*(-c132*c242-c142*c232)\n", + " + u1[x-2, z]*(-c142*c242)\n", + " )/(c112*c212)\n", + " pde2 = (1-weightsx[x, z])*u3[x, z] + weightsx[x, z]*aux2\n", + " stencil2 = Eq(u.forward, pde2, subdomain=grid.subdomains['d2'])\n", "\n", " # Region B_{2}\n", " gama113 = np.cos(alpha1)*(1-a1)*(1/dt)\n", " gama123 = np.cos(alpha1)*(a1)*(1/dt)\n", - " gama133 = np.cos(alpha1)*(1-b1)*(1/hz)*vel[x,z]\n", - " gama143 = np.cos(alpha1)*(b1)*(1/hz)*vel[x,z]\n", + " gama133 = np.cos(alpha1)*(1-b1)*(1/hz)*vel[x, z]\n", + " gama143 = np.cos(alpha1)*(b1)*(1/hz)*vel[x, z]\n", "\n", " gama213 = np.cos(alpha2)*(1-a2)*(1/dt)\n", " gama223 = np.cos(alpha2)*(a2)*(1/dt)\n", - " gama233 = np.cos(alpha2)*(1-b2)*(1/hz)*vel[x,z]\n", - " gama243 = np.cos(alpha2)*(b2)*(1/hz)*vel[x,z]\n", + " gama233 = np.cos(alpha2)*(1-b2)*(1/hz)*vel[x, z]\n", + " gama243 = np.cos(alpha2)*(b2)*(1/hz)*vel[x, z]\n", "\n", - " c113 = gama113 + gama133\n", + " c113 = gama113 + gama133\n", " c123 = -gama113 + gama143\n", - " c133 = gama123 - gama133\n", + " c133 = gama123 - gama133\n", " c143 = -gama123 - gama143\n", "\n", - " c213 = gama213 + gama233\n", + " c213 = gama213 + gama233\n", " c223 = -gama213 + gama243\n", - " c233 = gama223 - gama233\n", + " c233 = gama223 - gama233\n", " c243 = -gama223 - gama243\n", "\n", - " aux3 = ( u2[x,z]*(-c113*c223-c123*c213) + u3[x,z-1]*(-c113*c233-c133*c213) + u2[x,z-1]*(-c113*c243-c123*c233-c143*c213-c133*c223)\n", - " + u1[x,z]*(-c123*c223) + u1[x,z-1]*(-c123*c243-c143*c223) + u3[x,z-2]*(-c133*c233) +u2[x,z-2]*(-c133*c243-c143*c233)\n", - " + u1[x,z-2]*(-c143*c243))/(c113*c213)\n", - " pde3 = (1-weightsz[x,z])*u3[x,z] + weightsz[x,z]*aux3\n", - " stencil3 = Eq(u.forward,pde3,subdomain = grid.subdomains['d3'])" + " aux3 = (\n", + " u2[x, z]*(-c113*c223-c123*c213)\n", + " + u3[x, z-1]*(-c113*c233-c133*c213)\n", + " + u2[x, z-1]*(-c113*c243-c123*c233-c143*c213-c133*c223)\n", + " + u1[x, z]*(-c123*c223)\n", + " + u1[x, z-1]*(-c123*c243-c143*c223)\n", + " + u3[x, z-2]*(-c133*c233)\n", + " + u2[x, z-2]*(-c133*c243-c143*c233)\n", + " + u1[x, z-2]*(-c143*c243)\n", + " )/(c113*c213)\n", + " pde3 = (1-weightsz[x, z])*u3[x, z] + weightsz[x, z]*aux3\n", + " stencil3 = Eq(u.forward, pde3, subdomain=grid.subdomains['d3'])" ] }, { @@ -1288,7 +1368,7 @@ "metadata": {}, "outputs": [], "source": [ - "bc = [Eq(u[t+1,x,0],u[t+1,x,1])]" + "bc = [Eq(u[t+1, x, 0], u[t+1, x, 1])]" ] }, { @@ -1321,10 +1401,24 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "if(habctype!=2):\n", - " op = Operator([stencil0] + src_term + [stencil01,stencil3,stencil02,stencil2,stencil1] + bc + rec_term,subs=grid.spacing_map)\n", + "if(habctype != 2):\n", + " op = Operator(\n", + " [stencil0]\n", + " + src_term\n", + " + [stencil01, stencil3, stencil02, stencil2, stencil1]\n", + " + bc\n", + " + rec_term,\n", + " subs=grid.spacing_map\n", + " )\n", "else:\n", - " op = Operator([stencil0] + src_term + [stencil01,stencil3,stencil02,stencil2,stencil1,stencil02,stencil4,stencil5] + bc + rec_term,subs=grid.spacing_map)" + " op = Operator(\n", + " [stencil0]\n", + " + src_term\n", + " + [stencil01, stencil3, stencil02, stencil2, stencil1, stencil02, stencil4, stencil5]\n", + " + bc\n", + " + rec_term,\n", + " subs=grid.spacing_map\n", + " )" ] }, { @@ -1340,7 +1434,7 @@ "metadata": {}, "outputs": [], "source": [ - "u.data[:] = 0.\n", + "u.data[:] = 0.\n", "u1.data[:] = 0.\n", "u2.data[:] = 0.\n", "u3.data[:] = 0." @@ -1394,7 +1488,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "op(time=nt,dt=dt0)" + "op(time=nt, dt=dt0)" ] }, { @@ -1410,23 +1504,26 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2d(U,i):\n", + "def graph2d(U, i):\n", " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscale = 1/10**(3)\n", - " x0pml = x0 + npmlx*hxv\n", - " x1pml = x1 - npmlx*hxv\n", - " z0pml = z0\n", - " z1pml = z1 - npmlz*hzv\n", - " scale = np.amax(U[npmlx:-npmlx,0:-npmlz])/10.\n", - " extent = [fscale*x0pml,fscale*x1pml,fscale*z1pml,fscale*z0pml]\n", - " fig = plot.imshow(np.transpose(U[npmlx:-npmlx,0:-npmlz]),vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.figure(figsize=(16, 8))\n", + " fscale = 1/10**(3)\n", + " x0pml = x0 + npmlx*hxv\n", + " x1pml = x1 - npmlx*hxv\n", + " z0pml = z0\n", + " z1pml = z1 - npmlz*hzv\n", + " scale = np.amax(U[npmlx:-npmlx, 0:-npmlz])/10.\n", + " extent = [fscale*x0pml, fscale*x1pml, fscale*z1pml, fscale*z0pml]\n", + " fig = plot.imshow(np.transpose(U[npmlx:-npmlx, 0:-npmlz]), vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", " plot.axis('equal')\n", - " if(i==1): plot.title('Map - Acoustic Problem with Devito - HABC A1')\n", - " if(i==2): plot.title('Map - Acoustic Problem with Devito - HABC A2')\n", - " if(i==3): plot.title('Map - Acoustic Problem with Devito - HABC Higdon')\n", + " if(i == 1):\n", + " plot.title('Map - Acoustic Problem with Devito - HABC A1')\n", + " if(i == 2):\n", + " plot.title('Map - Acoustic Problem with Devito - HABC A2')\n", + " if(i == 3):\n", + " plot.title('Map - Acoustic Problem with Devito - HABC Higdon')\n", " plot.grid()\n", " ax = plot.gca()\n", " divider = make_axes_locatable(ax)\n", @@ -1465,7 +1562,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "graph2d(u.data[0,:,:],habctype)" + "graph2d(u.data[0, :, :], habctype)" ] }, { @@ -1481,27 +1578,30 @@ "metadata": {}, "outputs": [], "source": [ - "def graph2drec(rec,i):\n", - " plot.figure()\n", - " plot.figure(figsize=(16,8))\n", - " fscaled = 1/10**(3)\n", - " fscalet = 1/10**(3)\n", - " x0pml = x0 + npmlx*hxv\n", - " x1pml = x1 - npmlx*hxv\n", - " scale = np.amax(rec[:,npmlx:-npmlx])/10.\n", - " extent = [fscaled*x0pml,fscaled*x1pml, fscalet*tn, fscalet*t0]\n", - " fig = plot.imshow(rec[:,npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", - " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", - " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", - " plot.axis('equal')\n", - " if(i==1): plot.title('Receivers Signal Profile - Devito with HABC A1')\n", - " if(i==2): plot.title('Receivers Signal Profile - Devito with HABC A2')\n", - " if(i==3): plot.title('Receivers Signal Profile - Devito with HABC Higdon')\n", - " ax = plot.gca()\n", - " divider = make_axes_locatable(ax)\n", - " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", - " cbar = plot.colorbar(fig, cax=cax, format='%.2e')\n", - " plot.show()" + "def graph2drec(rec, i):\n", + " plot.figure()\n", + " plot.figure(figsize=(16, 8))\n", + " fscaled = 1/10**(3)\n", + " fscalet = 1/10**(3)\n", + " x0pml = x0 + npmlx*hxv\n", + " x1pml = x1 - npmlx*hxv\n", + " scale = np.amax(rec[:, npmlx:-npmlx])/10.\n", + " extent = [fscaled*x0pml, fscaled*x1pml, fscalet*tn, fscalet*t0]\n", + " fig = plot.imshow(rec[:, npmlx:-npmlx], vmin=-scale, vmax=scale, cmap=cm.seismic, extent=extent)\n", + " plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))\n", + " plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f s'))\n", + " plot.axis('equal')\n", + " if(i == 1):\n", + " plot.title('Receivers Signal Profile - Devito with HABC A1')\n", + " if(i == 2):\n", + " plot.title('Receivers Signal Profile - Devito with HABC A2')\n", + " if(i == 3):\n", + " plot.title('Receivers Signal Profile - Devito with HABC Higdon')\n", + " ax = plot.gca()\n", + " divider = make_axes_locatable(ax)\n", + " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n", + " _ = plot.colorbar(fig, cax=cax, format='%.2e')\n", + " plot.show()" ] }, { @@ -1532,7 +1632,7 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "graph2drec(rec.data,habctype)" + "graph2drec(rec.data, habctype)" ] }, { diff --git a/examples/seismic/acoustic/accuracy.ipynb b/examples/seismic/acoustic/accuracy.ipynb index 7c9bccc058..7c76eb9192 100644 --- a/examples/seismic/acoustic/accuracy.ipynb +++ b/examples/seismic/acoustic/accuracy.ipynb @@ -88,7 +88,7 @@ "t0 = 0.\n", "tn = dt * (nt-1)\n", "time = np.linspace(t0, tn, nt)\n", - "print(\"t0, tn, dt, nt; %.4f %.4f %.4f %d\" % (t0, tn, dt, nt))\n", + "print(f't0, tn, dt, nt; {t0:.4f} {tn:.4f} {dt:.4f} {nt:d}')\n", "# Source peak frequency in KHz\n", "f0 = .09" ] @@ -141,10 +141,10 @@ "rec_coordinates = np.empty((1, 2))\n", "rec_coordinates[:, :] = 260.\n", "\n", - "print(\"The computational Grid has (%s, %s) grid points \"\n", - " \"and a physical extent of (%sm, %sm)\" % (*model.grid.shape, *model.grid.extent))\n", - "print(\"Source is at the center with coordinates (%sm, %sm)\" % tuple(src_coordinates[0]))\n", - "print(\"Receiver (single receiver) is located at (%sm, %sm) \" % tuple(rec_coordinates[0]))\n", + "print(\"The computational Grid has ({}, {}) grid points \"\n", + " \"and a physical extent of ({}m, {}m)\".format(*model.grid.shape, *model.grid.extent))\n", + "print(\"Source is at the center with coordinates ({}m, {}m)\".format(*tuple(src_coordinates[0])))\n", + "print(\"Receiver (single receiver) is located at ({}m, {}m) \".format(*tuple(rec_coordinates[0])))\n", "\n", "# Note: gets time sampling from model.critical_dt\n", "geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,\n", @@ -198,6 +198,7 @@ "sx, sz = src_coordinates[0, :]\n", "rx, rz = rec_coordinates[0, :]\n", "\n", + "\n", "# Define a Ricker wavelet shifted to zero lag for the Fourier transform\n", "def ricker(f, T, dt, t0):\n", " t = np.linspace(-t0, T-t0, int(T/dt))\n", @@ -205,11 +206,12 @@ " y = (1.0 - 2.0 * tt) * np.exp(- tt)\n", " return y\n", "\n", + "\n", "def analytical(nt, model, time, **kwargs):\n", " dt = kwargs.get('dt', model.critical_dt)\n", " # Fourier constants\n", " nf = int(nt/2 + 1)\n", - " fnyq = 1. / (2 * dt)\n", + " # fnyq = 1. / (2 * dt)\n", " df = 1.0 / time[-1]\n", " faxis = df * np.arange(nf)\n", "\n", @@ -260,11 +262,15 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "print(\"Numerical data min,max,abs; %+.6e %+.6e %+.6e\" %\n", - " (np.min(ref_rec.data), np.max(ref_rec.data), np.max(np.abs(ref_rec.data)) ))\n", - "print(\"Analytic data min,max,abs; %+.6e %+.6e %+.6e\" %\n", - " (np.min(U_t), np.max(U_t), (np.max(np.abs(U_t)))))" + "# NBVAL_IGNORE_OUTPUT\n", + "print(\n", + " f'Numerical data min,max,abs; {np.min(ref_rec.data):+.6e} '\n", + " f'{np.max(ref_rec.data):+.6e} {np.max(np.abs(ref_rec.data)):+.6e}'\n", + ")\n", + "print(\n", + " f'Analytic data min,max,abs; {np.min(U_t):+.6e} '\n", + " f'{np.max(U_t):+.6e} {np.max(np.abs(U_t)):+.6e}'\n", + ")" ] }, { @@ -298,31 +304,33 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Plot wavefield and source/rec position\n", - "plt.figure(figsize=(8,8))\n", - "amax = np.max(np.abs(ref_u.data[1,:,:]))\n", - "plt.imshow(ref_u.data[1,:,:], vmin=-1.0 * amax, vmax=+1.0 * amax, cmap=\"seismic\")\n", - "plt.plot(2*sx+40, 2*sz+40, 'r*', markersize=11, label='source') # plot position of the source in model, add nbl for correct position\n", - "plt.plot(2*rx+40, 2*rz+40, 'k^', markersize=8, label='receiver') # plot position of the receiver in model, add nbl for correct position\n", + "plt.figure(figsize=(8, 8))\n", + "amax = np.max(np.abs(ref_u.data[1, :, :]))\n", + "plt.imshow(ref_u.data[1, :, :], vmin=-1.0 * amax, vmax=+1.0 * amax, cmap=\"seismic\")\n", + "# plot position of the source in model, add nbl for correct position\n", + "plt.plot(2*sx+40, 2*sz+40, 'r*', markersize=11, label='source')\n", + "# plot position of the receiver in model, add nbl for correct position\n", + "plt.plot(2*rx+40, 2*rz+40, 'k^', markersize=8, label='receiver')\n", "plt.legend()\n", "plt.xlabel('x position (m)')\n", "plt.ylabel('z position (m)')\n", "plt.savefig('wavefieldperf.pdf')\n", "\n", "# Plot trace\n", - "plt.figure(figsize=(12,8))\n", - "plt.subplot(2,1,1)\n", + "plt.figure(figsize=(12, 8))\n", + "plt.subplot(2, 1, 1)\n", "plt.plot(time, ref_rec.data[:, 0], '-b', label='numerical')\n", "plt.plot(time, U_t[:], '--r', label='analytical')\n", - "plt.xlim([0,150])\n", + "plt.xlim([0, 150])\n", "plt.ylim([1.15*np.min(U_t[:]), 1.15*np.max(U_t[:])])\n", "plt.xlabel('time (ms)')\n", "plt.ylabel('amplitude')\n", "plt.legend()\n", - "plt.subplot(2,1,2)\n", + "plt.subplot(2, 1, 2)\n", "plt.plot(time, 100 *(ref_rec.data[:, 0] - U_t[:]), '-b', label='difference x100')\n", - "plt.xlim([0,150])\n", + "plt.xlim([0, 150])\n", "plt.ylim([1.15*np.min(U_t[:]), 1.15*np.max(U_t[:])])\n", "plt.xlabel('time (ms)')\n", "plt.ylabel('amplitude x100')\n", @@ -345,7 +353,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "error_time = np.zeros(5)\n", "error_time[0] = np.linalg.norm(U_t[:-1] - ref_rec.data[:-1, 0], 2) / np.sqrt(nt)\n", "errors_plot = [(time, U_t[:-1] - ref_rec.data[:-1, 0])]\n", @@ -390,7 +398,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "dt = [0.1000, 0.0800, 0.0750, 0.0625, 0.0500]\n", "nnt = (np.divide(150.0, dt) + 1).astype(int)\n", "\n", @@ -408,8 +416,16 @@ " rec_coordinates = np.empty((1, 2))\n", " rec_coordinates[:, :] = 260.\n", "\n", - " geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,\n", - " t0=t0, tn=tn, src_type='Ricker', f0=f0, t0w=1.5/f0)\n", + " geometry = AcquisitionGeometry(\n", + " model,\n", + " rec_coordinates,\n", + " src_coordinates,\n", + " t0=t0,\n", + " tn=tn,\n", + " src_type='Ricker',\n", + " f0=f0,\n", + " t0w=1.5/f0\n", + " )\n", "\n", " # Note: incorrect data size will be generated here due to AcquisitionGeometry bug ...\n", " # temporarily fixed below by resizing the output from the solver\n", @@ -418,7 +434,7 @@ "\n", " solver = AcousticWaveSolver(model, geometry, time_order=2, space_order=8)\n", " ref_rec1, ref_u1, _ = solver.forward(dt=dt[i])\n", - " ref_rec1_data = ref_rec1.data[0:nnt[i],:]\n", + " ref_rec1_data = ref_rec1.data[0:nnt[i], :]\n", "\n", " time1 = np.linspace(0.0, 3000., 20*(nnt[i]-1) + 1)\n", " U_t1 = analytical(20*(nnt[i]-1) + 1, model, time1, dt=time1[1] - time1[0])\n", @@ -428,8 +444,10 @@ "\n", " ratio_d = dt[i-1]/dt[i] if i > 0 else 1.0\n", " ratio_e = error_time[i-1]/error_time[i] if i > 0 else 1.0\n", - " print(\"error for dt=%.4f is %12.6e -- ratio dt^2,ratio err; %12.6f %12.6f \\n\" %\n", - " (dt[i], error_time[i], ratio_d**2, ratio_e))\n", + " print(\n", + " f'error for dt={dt[i]:.4f} is {error_time[i]:12.6e} '\n", + " f'-- ratio dt^2,ratio err; {ratio_d**2:12.6f} {ratio_e:12.6f} \\n'\n", + " )\n", " errors_plot.append((geometry.time_axis.time_values, U_t1[:-1] - ref_rec1_data[:-1, 0]))" ] }, @@ -452,16 +470,20 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(20, 10))\n", "theory = [t**2 for t in dt]\n", "theory = [error_time[0]*th/theory[0] for th in theory]\n", "plt.loglog([t for t in dt], error_time, '-ob', label=('Numerical'), linewidth=4, markersize=10)\n", "plt.loglog([t for t in dt], theory, '-^r', label=('Theory (2nd order)'), linewidth=4, markersize=10)\n", - "for x, y, a in zip([t for t in dt], theory, [('dt = %s ms' % (t)) for t in dt]):\n", - " plt.annotate(a, xy=(x, y), xytext=(4, 2),\n", - " textcoords='offset points', size=20,\n", - " horizontalalignment='left', verticalalignment='top')\n", + "for x, y, a in zip([t for t in dt], theory, [(f'dt = {t} ms') for t in dt], strict=True):\n", + " plt.annotate(\n", + " a, xy=(x, y), xytext=(4, 2),\n", + " textcoords='offset points',\n", + " size=20,\n", + " horizontalalignment='left',\n", + " verticalalignment='top'\n", + " )\n", "plt.xlabel(\"Time-step $dt$ (ms)\", fontsize=20)\n", "plt.ylabel(\"$|| u_{num} - u_{ana}||_2$\", fontsize=20)\n", "plt.tick_params(axis='both', which='both', labelsize=20)\n", @@ -492,7 +514,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "stylel = ('--y', '--b', '--r', '--g', '--c')\n", "\n", "start_t = lambda dt: int(50/dt)\n", @@ -506,8 +528,8 @@ " s, e = start_t(dti), end_t(dti)\n", " if i == 0:\n", " plt.plot(timei[s:e], U_t[s:e], 'k', label='analytical', linewidth=2)\n", - " plt.plot(timei[s:e], 100*erri[s:e], stylel[i], label=\"100 x error dt=%sms\"%dti, linewidth=2)\n", - "plt.xlim([50,100])\n", + " plt.plot(timei[s:e], 100*erri[s:e], stylel[i], label=f\"100 x error dt={dti}ms\", linewidth=2)\n", + "plt.xlim([50, 100])\n", "plt.xlabel(\"Time (ms)\", fontsize=20)\n", "plt.legend(fontsize=20)\n", "plt.show()" @@ -527,9 +549,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "pf = np.polyfit(np.log([t for t in dt]), np.log(error_time), deg=1)\n", - "print(\"Convergence rate in time is: %.4f\" % pf[0])\n", + "print(f\"Convergence rate in time is: {pf[0]:.4f}\")\n", "assert np.isclose(pf[0], 1.9, atol=0, rtol=.1)" ] }, @@ -569,17 +591,14 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "errorl2 = np.zeros((norder, nshapes))\n", "timing = np.zeros((norder, nshapes))\n", "\n", "set_log_level(\"ERROR\")\n", "ind_o = -1\n", - "for spc in orders:\n", - " ind_o +=1\n", - " ind_spc = -1\n", - " for nn, h in shapes:\n", - " ind_spc += 1\n", + "for ind_o, spc in enumerate(orders):\n", + " for ind_spc, (nn, h) in enumerate(shapes):\n", " time = np.linspace(0., 150., nt)\n", "\n", " model_space = ModelBench(vp=c0, origin=(0., 0.), spacing=(h, h), bcs=\"damp\",\n", @@ -606,10 +625,14 @@ "\n", " # Compare to reference solution\n", " # Note: we need to normalize by the factor of grid spacing squared\n", - " errorl2[ind_o, ind_spc] = np.linalg.norm(loc_rec.data[:-1, 0] * c_num - U_t[:-1] * c_ana, 2) / np.sqrt(U_t.shape[0] - 1)\n", + " errorl2[ind_o, ind_spc] = np.linalg.norm(\n", + " loc_rec.data[:-1, 0] * c_num - U_t[:-1] * c_ana, 2\n", + " ) / np.sqrt(U_t.shape[0] - 1)\n", " timing[ind_o, ind_spc] = np.max([v for _, v in summary.timings.items()])\n", - " print(\"starting space order %s with (%s, %s) grid points the error is %s for %s seconds runtime\" %\n", - " (spc, nn, nn, errorl2[ind_o, ind_spc], timing[ind_o, ind_spc]))" + " print(\n", + " f'starting space order {spc} with ({nn}, {nn}) grid points the error is '\n", + " f'{errorl2[ind_o, ind_spc]} for {timing[ind_o, ind_spc]} seconds runtime'\n", + " )" ] }, { @@ -631,15 +654,17 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "stylel = ('-^k', '-^b', '-^r', '-^g', '-^c')\n", "\n", "plt.figure(figsize=(20, 10))\n", "for i in range(0, 5):\n", - " plt.loglog(errorl2[i, :], timing[i, :], stylel[i], label=('order %s' % orders[i]), linewidth=4, markersize=10)\n", - " for x, y, a in zip(errorl2[i, :], timing[i, :], [('dx = %s m' % (sc)) for sc in dx]):\n", - " plt.annotate(a, xy=(x, y), xytext=(4, 2),\n", - " textcoords='offset points', size=20)\n", + " plt.loglog(errorl2[i, :], timing[i, :], stylel[i], label=(f'order {orders[i]}'), linewidth=4, markersize=10)\n", + " for x, y, a in zip(errorl2[i, :], timing[i, :], [(f'dx = {sc} m') for sc in dx], strict=True):\n", + " plt.annotate(\n", + " a, xy=(x, y), xytext=(4, 2),\n", + " textcoords='offset points', size=20\n", + " )\n", "plt.xlabel(\"$|| u_{num} - u_{ref}||_{inf}$\", fontsize=20)\n", "plt.ylabel(\"Runtime (sec)\", fontsize=20)\n", "plt.tick_params(axis='both', which='both', labelsize=20)\n", @@ -669,7 +694,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "stylel = ('-^k', '-^b', '-^r', '-^g', '-^c')\n", "style2 = ('--k', '--b', '--r', '--g', '--c')\n", "\n", @@ -677,9 +702,9 @@ "for i in range(0, 5):\n", " theory = [k**(orders[i]) for k in dx]\n", " theory = [errorl2[i, 2]*th/theory[2] for th in theory]\n", - " plt.loglog([sc for sc in dx], errorl2[i, :], stylel[i], label=('Numerical order %s' % orders[i]),\n", + " plt.loglog([sc for sc in dx], errorl2[i, :], stylel[i], label=(f'Numerical order {orders[i]}'),\n", " linewidth=4, markersize=10)\n", - " plt.loglog([sc for sc in dx], theory, style2[i], label=('Theory order %s' % orders[i]),\n", + " plt.loglog([sc for sc in dx], theory, style2[i], label=(f'Theory order {orders[i]}'),\n", " linewidth=4, markersize=10)\n", "plt.xlabel(\"Grid spacing $dx$ (m)\", fontsize=20)\n", "plt.ylabel(\"$||u_{num} - u_{ref}||_{inf}$\", fontsize=20)\n", @@ -710,13 +735,13 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "for i in range(5):\n", " pf = np.polyfit(np.log([sc for sc in dx]), np.log(errorl2[i, :]), deg=1)[0]\n", - " if i==3:\n", + " if i == 3:\n", " pf = np.polyfit(np.log([sc for sc in dx][1:]), np.log(errorl2[i, 1:]), deg=1)[0]\n", - " print(\"Convergence rate for order %s is %s\" % (orders[i], pf))\n", - " if i<4:\n", + " print(f\"Convergence rate for order {orders[i]} is {pf}\")\n", + " if i < 4:\n", " assert np.isclose(pf, orders[i], atol=0, rtol=.2)" ] } diff --git a/examples/seismic/acoustic/acoustic_example.py b/examples/seismic/acoustic/acoustic_example.py index 9a994340e9..95820cb3cb 100644 --- a/examples/seismic/acoustic/acoustic_example.py +++ b/examples/seismic/acoustic/acoustic_example.py @@ -1,9 +1,9 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except ImportError: - pass from devito import Constant, Function, norm, smooth from devito.logger import info diff --git a/examples/seismic/elastic/elastic_example.py b/examples/seismic/elastic/elastic_example.py index 5ce82a696b..45da5a6c34 100644 --- a/examples/seismic/elastic/elastic_example.py +++ b/examples/seismic/elastic/elastic_example.py @@ -1,9 +1,10 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except ImportError: - pass + from devito import norm from devito.logger import info from examples.seismic import demo_model, seismic_args, setup_geometry diff --git a/examples/seismic/inversion/fwi.py b/examples/seismic/inversion/fwi.py index b1445e6466..2eda7c73f7 100644 --- a/examples/seismic/inversion/fwi.py +++ b/examples/seismic/inversion/fwi.py @@ -111,6 +111,6 @@ def fwi_gradient(vp_in): update_with_box(model0.vp, alpha, direction) # Log the progress made - print('Objective value is %f at iteration %d' % (phi, i+1)) + print(f'Objective value is {phi} at iteration {i + 1}') assert np.isclose(history[-1], 3828, atol=1e1, rtol=0) diff --git a/examples/seismic/model.py b/examples/seismic/model.py index c927b88abb..610f98313c 100644 --- a/examples/seismic/model.py +++ b/examples/seismic/model.py @@ -1,10 +1,10 @@ +from contextlib import suppress + import numpy as np from sympy import finite_diff_weights as fd_w -try: +with suppress(ImportError): import pytest -except: - pass from devito import ( Abs, Constant, Eq, Function, Grid, Inc, Operator, SubDimension, SubDomain, div, sin, @@ -41,11 +41,11 @@ def initialize_damp(damp, padsizes, spacing, abc_type="damp", fs=False): """ eqs = [Eq(damp, 1.0 if abc_type == "mask" else 0.0)] - for (nbl, nbr), d in zip(padsizes, damp.dimensions): + for (nbl, nbr), d in zip(padsizes, damp.dimensions, strict=True): if not fs or d is not damp.dimensions[-1]: dampcoeff = 1.5 * np.log(1.0 / 0.001) / (nbl) # left - dim_l = SubDimension.left(name='abc_%s_l' % d.name, parent=d, + dim_l = SubDimension.left(name=f'abc_{d.name}_l', parent=d, thickness=nbl) pos = Abs((nbl - (dim_l - d.symbolic_min) + 1) / float(nbl)) val = dampcoeff * (pos - sin(2*np.pi*pos)/(2*np.pi)) @@ -53,7 +53,7 @@ def initialize_damp(damp, padsizes, spacing, abc_type="damp", fs=False): eqs += [Inc(damp.subs({d: dim_l}), val/d.spacing)] # right dampcoeff = 1.5 * np.log(1.0 / 0.001) / (nbr) - dim_r = SubDimension.right(name='abc_%s_r' % d.name, parent=d, + dim_r = SubDimension.right(name=f'abc_{d.name}_r', parent=d, thickness=nbr) pos = Abs((nbr - (d.symbolic_max - dim_r) + 1) / float(nbr)) val = dampcoeff * (pos - sin(2*np.pi*pos)/(2*np.pi)) @@ -92,7 +92,7 @@ def define(self, dimensions): Definition of the upper section of the domain for wrapped indices FS. """ - return {d: (d if not d == dimensions[-1] else ('left', self.size)) + return {d: (d if d != dimensions[-1] else ('left', self.size)) for d in dimensions} @@ -109,7 +109,7 @@ def __init__(self, origin, spacing, shape, space_order, nbl=20, self.origin = tuple([dtype(o) for o in origin]) self.fs = fs # Default setup - origin_pml = [dtype(o - s*nbl) for o, s in zip(origin, spacing)] + origin_pml = [dtype(o - s*nbl) for o, s in zip(origin, spacing, strict=True)] shape_pml = np.array(shape) + 2 * self.nbl # Model size depending on freesurface @@ -154,9 +154,9 @@ def _initialize_bcs(self, bcs="damp"): if init or re_init: if re_init and not init: bcs_o = "damp" if bcs == "mask" else "mask" - warning("Re-initializing damp profile from %s to %s" % (bcs_o, bcs)) - warning("Model has to be created with `bcs=\"%s\"`" - "for this WaveSolver" % bcs) + warning(f"Re-initializing damp profile from {bcs_o} to {bcs}") + warning(f"Model has to be created with `bcs=\"{bcs}\"`" + "for this WaveSolver") initialize_damp(self.damp, self.padsizes, self.spacing, abc_type=bcs, fs=self.fs) self._physical_parameters.update(['damp']) @@ -234,7 +234,7 @@ def domain_size(self): """ Physical size of the domain as determined by shape and spacing """ - return tuple((d-1) * s for d, s in zip(self.shape, self.spacing)) + return tuple((d-1) * s for d, s in zip(self.shape, self.spacing, strict=True)) class SeismicModel(GenericModel): @@ -398,9 +398,8 @@ def update(self, name, value): elif value.shape == self.shape: initialize_function(param, value, self.nbl) else: - raise ValueError("Incorrect input size %s for model" % value.shape + - " %s without or %s with padding" % (self.shape, - param.shape)) + raise ValueError(f"Incorrect input size {value.shape} for model" + + f" {self.shape} without or {param.shape} with padding") else: param.data = value diff --git a/examples/seismic/plotting.py b/examples/seismic/plotting.py index a5974d212b..907b371627 100644 --- a/examples/seismic/plotting.py +++ b/examples/seismic/plotting.py @@ -69,7 +69,7 @@ def plot_velocity(model, source=None, receiver=None, colorbar=True, cmap="jet"): model.origin[1] + domain_size[1], model.origin[1]] slices = tuple(slice(model.nbl, -model.nbl) for _ in range(2)) - if getattr(model, 'vp', None) is not None: + if getattr(model, 'vp', None) is not None: # noqa: SIM108 field = model.vp.data[slices] else: field = model.lam.data[slices] diff --git a/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb b/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb index 01f9d32716..204870d4b2 100644 --- a/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb +++ b/examples/seismic/self_adjoint/sa_01_iso_implementation1.ipynb @@ -266,7 +266,7 @@ "diff = (f1g2+g1f2)/(f1g2-g1f2)\n", "\n", "tol = 100 * np.finfo(dtype).eps\n", - "print(\"f1g2, g1f2, diff, tol; %+.6e %+.6e %+.6e %+.6e\" % (f1g2, g1f2, diff, tol))\n", + "print(f\"f1g2, g1f2, diff, tol; {f1g2:+.6e} {g1f2:+.6e} {diff:+.6e} {tol:+.6e}\")\n", "\n", "# At last the unit test\n", "# Assert these dot products are float epsilon close in relative error\n", @@ -506,19 +506,19 @@ ], "source": [ "# Define dimensions for the interior of the model\n", - "nx,nz = 751,751\n", - "dx,dz = 10.0,10.0 # Grid spacing in m\n", + "nx, nz = 751, 751\n", + "dx, dz = 10.0, 10.0 # Grid spacing in m\n", "shape = (nx, nz) # Number of grid points\n", - "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", + "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", "origin = (0., 0.) # Origin of coordinate system, specified in m.\n", - "extent = tuple([s*(n-1) for s, n in zip(spacing, shape)])\n", + "extent = tuple([s*(n-1) for s, n in zip(spacing, shape, strict=True)])\n", "\n", "# Define dimensions for the model padded with absorbing boundaries\n", "npad = 50 # number of points in absorbing boundary region (all sides)\n", - "nxpad,nzpad = nx+2*npad, nz+2*npad\n", - "shape_pad = np.array(shape) + 2 * npad\n", - "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)])\n", - "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])\n", + "nxpad, nzpad = nx+2*npad, nz+2*npad\n", + "shape_pad = np.array(shape) + 2 * npad\n", + "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing, strict=True)])\n", + "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad, strict=True)])\n", "\n", "# Define the dimensions\n", "# Note if you do not specify dimensions, you get in order x,y,z\n", @@ -588,7 +588,7 @@ "\n", "# Constant density\n", "b = Function(name='b', grid=grid, space_order=space_order)\n", - "b.data[:,:] = 1.0 / 1.0" + "b.data[:, :] = 1.0 / 1.0" ] }, { @@ -623,7 +623,7 @@ " \"\"\"\n", " coeff = 0.38 if len(v.grid.shape) == 3 else 0.42\n", " dt = 0.75 * v.dtype(coeff * np.min(v.grid.spacing) / (np.max(v.data)))\n", - " return v.dtype(\"%.5e\" % dt)" + " return v.dtype(f\"{dt:.5e}\")" ] }, { @@ -645,7 +645,7 @@ "tn = dtype(2000.) # Simulation time end (1 second = 1000 msec)\n", "dt = compute_critical_dt(m)\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", - "print(\"Time min, max, dt, num; %10.6f %10.6f %10.6f %d\" % (t0, tn, dt, int(tn//dt) + 1))\n", + "print(f'Time min, max, dt, num; {t0:10.6f} {tn:10.6f} {dt:10.6f} {int(tn//dt) + 1}')\n", "print(\"time_range; \", time_range)" ] }, @@ -706,20 +706,24 @@ "# Source in the center of the model at 10 Hz center frequency\n", "fpeak = 0.010\n", "src = RickerSource(name='src', grid=grid, f0=fpeak, npoint=1, time_range=time_range)\n", - "src.coordinates.data[0,0] = dx * (nx//2)\n", - "src.coordinates.data[0,1] = dz * (nz//2)\n", + "src.coordinates.data[0, 0] = dx * (nx//2)\n", + "src.coordinates.data[0, 1] = dz * (nz//2)\n", "\n", "# line of receivers along the right edge of the model\n", "rec = Receiver(name='rec', grid=grid, npoint=nz, time_range=time_range)\n", - "rec.coordinates.data[:,0] = dx * (nx//2)\n", - "rec.coordinates.data[:,1] = np.linspace(0.0, dz*(nz-1), nz)\n", - "\n", - "print(\"src_coordinate X; %+12.4f\" % (src.coordinates.data[0,0]))\n", - "print(\"src_coordinate Z; %+12.4f\" % (src.coordinates.data[0,1]))\n", - "print(\"rec_coordinates X min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(rec.coordinates.data[:,0]), np.max(rec.coordinates.data[:,0])))\n", - "print(\"rec_coordinates Z min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(rec.coordinates.data[:,1]), np.max(rec.coordinates.data[:,1])))\n", + "rec.coordinates.data[:, 0] = dx * (nx//2)\n", + "rec.coordinates.data[:, 1] = np.linspace(0.0, dz*(nz-1), nz)\n", + "\n", + "print(f\"src_coordinate X; {src.coordinates.data[0, 0]:+12.4f}\")\n", + "print(f\"src_coordinate Z; {src.coordinates.data[0, 1]:+12.4f}\")\n", + "print(\n", + " f'rec_coordinates X min/max; {np.min(rec.coordinates.data[:, 0]):+12.4f} '\n", + " f'{np.max(rec.coordinates.data[:, 0]):+12.4f}'\n", + ")\n", + "print(\n", + " f'rec_coordinates Z min/max; {np.min(rec.coordinates.data[:, 1]):+12.4f} '\n", + " f'{np.max(rec.coordinates.data[:, 1]):+12.4f}'\n", + ")\n", "\n", "# We can plot the time signature to see the wavelet\n", "src.show()" @@ -762,7 +766,7 @@ "vmin, vmax = 1.4, 1.7\n", "dmin, dmax = 0.9, 1.1\n", "\n", - "plt.figure(figsize=(12,8))\n", + "plt.figure(figsize=(12, 8))\n", "\n", "plt.subplot(1, 2, 1)\n", "plt.imshow(np.transpose(m.data), cmap=cm.jet,\n", @@ -771,9 +775,9 @@ "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'white', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \\\n", + "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -786,9 +790,9 @@ "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'white', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \\\n", + "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -910,7 +914,7 @@ "q100 = np.log10(w / wOverQ_100.data)\n", "lmin, lmax = np.log10(qmin), np.log10(100)\n", "\n", - "plt.figure(figsize=(12,8))\n", + "plt.figure(figsize=(12, 8))\n", "\n", "plt.subplot(1, 2, 1)\n", "plt.imshow(np.transpose(q025.data), cmap=cm.jet,\n", @@ -922,9 +926,9 @@ "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'white', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \\\n", + "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -940,9 +944,9 @@ "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'white', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1], \\\n", + "plt.plot(rec.coordinates.data[:, 0], rec.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -974,7 +978,7 @@ "\n", "# Get the symbols for dimensions for t, x, z\n", "# We need these below in order to write the source injection and the\n", - "t,x,z = u.dimensions" + "t, x, z = u.dimensions" ] }, { @@ -1053,7 +1057,7 @@ "\n", "# Generate the time update equation and operator for Q=25 model\n", "eq_time_update = (t.spacing**2 * m**2 / b) * \\\n", - " ((b * u.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \\\n", + " ((b * u.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", " (b * u.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2)) + \\\n", " (2 - t.spacing * wOverQ_025) * u + \\\n", " (t.spacing * wOverQ_025 - 1) * u.backward\n", @@ -1063,7 +1067,7 @@ "# Update the dimension spacing_map to include the time dimension\n", "# These symbols will be replaced with the relevant scalars by the Operator\n", "spacing_map = grid.spacing_map\n", - "spacing_map.update({t.spacing : dt})\n", + "spacing_map.update({t.spacing: dt})\n", "print(\"spacing_map; \", spacing_map)\n", "\n", "# op = Operator([stencil] + src_term + rec_term)\n", @@ -1395,29 +1399,27 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", + "import copy\n", "\n", "# Run the operator for the Q=25 model\n", - "print(\"m min/max; %+12.6e %+12.6e\" % (np.min(m.data), np.max(m.data)))\n", - "print(\"b min/max; %+12.6e %+12.6e\" % (np.min(b.data), np.max(b.data)))\n", - "print(\"wOverQ_025 min/max; %+12.6e %+12.6e\" % (np.min(wOverQ_025.data), np.max(wOverQ_025.data)))\n", - "print(\"wOverQ_100 min/max; %+12.6e %+12.6e\" % (np.min(wOverQ_100.data), np.max(wOverQ_100.data)))\n", + "print(f\"m min/max; {np.min(m.data):+12.6e} {np.max(m.data):+12.6e}\")\n", + "print(f\"b min/max; {np.min(b.data):+12.6e} {np.max(b.data):+12.6e}\")\n", + "print(f\"wOverQ_025 min/max; {np.min(wOverQ_025.data):+12.6e} {np.max(wOverQ_025.data):+12.6e}\")\n", + "print(f\"wOverQ_100 min/max; {np.min(wOverQ_100.data):+12.6e} {np.max(wOverQ_100.data):+12.6e}\")\n", "print(time_range)\n", "u.data[:] = 0\n", "op(time=time_range.num-1)\n", "# summary = op(time=time_range.num-1, h_x=dx, h_z=dz, dt=dt)\n", "\n", "# Save the Q=25 results and run the Q=100 case\n", - "import copy\n", "uQ25 = copy.copy(u)\n", "recQ25 = copy.copy(rec)\n", "\n", "u.data[:] = 0\n", "op(time=time_range.num-1, wOverQ_025=wOverQ_100)\n", "\n", - "print(\"Q= 25 receiver data min/max; %+12.6e %+12.6e\" %\\\n", - " (np.min(recQ25.data[:]), np.max(recQ25.data[:])))\n", - "print(\"Q=100 receiver data min/max; %+12.6e %+12.6e\" %\\\n", - " (np.min(rec.data[:]), np.max(rec.data[:])))" + "print(f\"Q= 25 receiver data min/max; {np.min(recQ25.data[:]):+12.6e} {np.max(recQ25.data[:]):+12.6e}\")\n", + "print(f\"Q=100 receiver data min/max; {np.min(rec.data[:]):+12.6e} {np.max(rec.data[:]):+12.6e}\")" ] }, { @@ -1469,34 +1471,34 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot the two wavefields, normalized to Q=100 (the larger amplitude)\n", - "amax_Q25 = 1.0 * np.max(np.abs(uQ25.data[1,:,:]))\n", - "amax_Q100 = 1.0 * np.max(np.abs(u.data[1,:,:]))\n", - "print(\"amax Q= 25; %12.6f\" % (amax_Q25))\n", - "print(\"amax Q=100; %12.6f\" % (amax_Q100))\n", + "amax_Q25 = 1.0 * np.max(np.abs(uQ25.data[1, :, :]))\n", + "amax_Q100 = 1.0 * np.max(np.abs(u.data[1, :, :]))\n", + "print(f\"amax Q= 25; {amax_Q25:12.6f}\")\n", + "print(f\"amax Q=100; {amax_Q100:12.6f}\")\n", "\n", - "plt.figure(figsize=(12,8))\n", + "plt.figure(figsize=(12, 8))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(np.transpose(uQ25.data[1,:,:] / amax_Q100), cmap=\"seismic\",\n", + "plt.imshow(np.transpose(uQ25.data[1, :, :] / amax_Q100), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'black', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", "plt.title(\"Data for $Q=25$ model\")\n", "\n", "plt.subplot(1, 2, 2)\n", - "plt.imshow(np.transpose(u.data[1,:,:] / amax_Q100), cmap=\"seismic\",\n", + "plt.imshow(np.transpose(u.data[1, :, :] / amax_Q100), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.plot([origin[0], origin[0], extent[0], extent[0], origin[0]],\n", " [origin[1], extent[1], extent[1], origin[1], origin[1]],\n", " 'black', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -1541,15 +1543,15 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot the two receiver gathers, normalized to Q=100 (the larger amplitude)\n", - "amax_Q25 = 0.1 * np.max(np.abs(recQ25.data[:]))\n", + "amax_Q25 = 0.1 * np.max(np.abs(recQ25.data[:]))\n", "amax_Q100 = 0.1 * np.max(np.abs(rec.data[:]))\n", - "print(\"amax Q= 25; %12.6f\" % (amax_Q25))\n", - "print(\"amax Q=100; %12.6f\" % (amax_Q100))\n", + "print(f\"amax Q= 25; {amax_Q25:12.6f}\")\n", + "print(f\"amax Q=100; {amax_Q100:12.6f}\")\n", "\n", - "plt.figure(figsize=(12,8))\n", + "plt.figure(figsize=(12, 8))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(recQ25.data[:,:] / amax_Q100, cmap=\"seismic\",\n", + "plt.imshow(recQ25.data[:, :] / amax_Q100, cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -1557,7 +1559,7 @@ "plt.title(\"Receiver gather for $Q=25$ model\")\n", "\n", "plt.subplot(1, 2, 2)\n", - "plt.imshow(rec.data[:,:] / amax_Q100, cmap=\"seismic\",\n", + "plt.imshow(rec.data[:, :] / amax_Q100, cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.xlabel(\"X Coordinate (m)\")\n", diff --git a/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb b/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb index 38e0b5b472..2df14289a9 100644 --- a/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb +++ b/examples/seismic/self_adjoint/sa_02_iso_implementation2.ipynb @@ -315,19 +315,19 @@ ], "source": [ "# Define dimensions for the interior of the model\n", - "nx,nz = 301,301\n", - "dx,dz = 10.0,10.0 # Grid spacing in m\n", + "nx, nz = 301, 301\n", + "dx, dz = 10.0, 10.0 # Grid spacing in m\n", "shape = (nx, nz) # Number of grid points\n", - "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", + "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", "origin = (0., 0.) # Origin of coordinate system, specified in m.\n", - "extent = tuple([s*(n-1) for s, n in zip(spacing, shape)])\n", + "extent = tuple([s*(n-1) for s, n in zip(spacing, shape, strict=True)])\n", "\n", "# Define dimensions for the model padded with absorbing boundaries\n", "npad = 50 # number of points in absorbing boundary region (all sides)\n", - "nxpad,nzpad = nx + 2 * npad, nz + 2 * npad\n", - "shape_pad = np.array(shape) + 2 * npad\n", - "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing)])\n", - "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])\n", + "nxpad, nzpad = nx + 2 * npad, nz + 2 * npad\n", + "shape_pad = np.array(shape) + 2 * npad\n", + "origin_pad = tuple([o - s*npad for o, s in zip(origin, spacing, strict=True)])\n", + "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad, strict=True)])\n", "\n", "# Define the dimensions\n", "# Note if you do not specify dimensions, you get in order x,y,z\n", @@ -405,7 +405,7 @@ "\n", "# Constant density\n", "b = Function(name='b', grid=grid, space_order=space_order)\n", - "b.data[:,:] = 1.0 / 1.0\n", + "b.data[:, :] = 1.0 / 1.0\n", "\n", "# Initialize the attenuation profile for Q=100 model\n", "fpeak = 0.010\n", @@ -460,7 +460,7 @@ " \"\"\"\n", " coeff = 0.38 if len(v.grid.shape) == 3 else 0.42\n", " dt = 0.75 * v.dtype(coeff * np.min(v.grid.spacing) / (np.max(v.data)))\n", - " return v.dtype(\"%.5e\" % dt)" + " return v.dtype(f\"{dt:.5e}\")" ] }, { @@ -486,25 +486,29 @@ "tn = 1200.0 # Simulation time end (1 second = 1000 msec)\n", "dt = compute_critical_dt(m0)\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", - "print(\"Time min, max, dt, num; %10.6f %10.6f %10.6f %d\" % (t0, tn, dt, int(tn//dt) + 1))\n", + "print(f'Time min, max, dt, num; {t0:10.6f} {tn:10.6f} {dt:10.6f} {int(tn//dt) + 1}')\n", "print(\"time_range; \", time_range)\n", "\n", "# Source at 1/4 X, 1/2 Z, Ricker with 10 Hz center frequency\n", "src_nl = RickerSource(name='src_nl', grid=grid, f0=fpeak, npoint=1, time_range=time_range)\n", - "src_nl.coordinates.data[0,0] = dx * 1 * nx//4\n", - "src_nl.coordinates.data[0,1] = dz * shape[1]//2\n", + "src_nl.coordinates.data[0, 0] = dx * 1 * nx//4\n", + "src_nl.coordinates.data[0, 1] = dz * shape[1]//2\n", "\n", "# Receivers at 3/4 X, line in Z\n", "rec_nl = Receiver(name='rec_nl', grid=grid, npoint=nz, time_range=time_range)\n", - "rec_nl.coordinates.data[:,0] = dx * 3 * nx//4\n", - "rec_nl.coordinates.data[:,1] = np.linspace(0.0, dz*(nz-1), nz)\n", - "\n", - "print(\"src_coordinate X; %+12.4f\" % (src_nl.coordinates.data[0,0]))\n", - "print(\"src_coordinate Z; %+12.4f\" % (src_nl.coordinates.data[0,1]))\n", - "print(\"rec_coordinates X min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(rec_nl.coordinates.data[:,0]), np.max(rec_nl.coordinates.data[:,0])))\n", - "print(\"rec_coordinates Z min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(rec_nl.coordinates.data[:,1]), np.max(rec_nl.coordinates.data[:,1])))" + "rec_nl.coordinates.data[:, 0] = dx * 3 * nx//4\n", + "rec_nl.coordinates.data[:, 1] = np.linspace(0.0, dz*(nz-1), nz)\n", + "\n", + "print(f\"src_coordinate X; {src_nl.coordinates.data[0, 0]:+12.4f}\")\n", + "print(f\"src_coordinate Z; {src_nl.coordinates.data[0, 1]:+12.4f}\")\n", + "print(\n", + " f'rec_coordinates X min/max; {np.min(rec_nl.coordinates.data[:, 0]):+12.4f} '\n", + " f'{np.max(rec_nl.coordinates.data[:, 0]):+12.4f}'\n", + ")\n", + "print(\n", + " f'rec_coordinates Z min/max; {np.min(rec_nl.coordinates.data[:, 1]):+12.4f} '\n", + " f'{np.max(rec_nl.coordinates.data[:, 1]):+12.4f}'\n", + ")" ] }, { @@ -558,18 +562,18 @@ "x2 = dx * nx\n", "z1 = 0.0\n", "z2 = dz * nz\n", - "abcX = [x1,x1,x2,x2,x1]\n", - "abcZ = [z1,z2,z2,z1,z1]\n", + "abcX = [x1, x1, x2, x2, x1]\n", + "abcZ = [z1, z2, z2, z1, z1]\n", "\n", - "plt.figure(figsize=(12,12))\n", + "plt.figure(figsize=(12, 12))\n", "\n", "plt.subplot(2, 2, 1)\n", "plt.imshow(np.transpose(m0.data), cmap=cm.jet,\n", " vmin=vmin, vmax=vmax, extent=plt_extent)\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -580,9 +584,9 @@ "plt.imshow(np.transpose(1 / b.data), cmap=cm.jet,\n", " vmin=bmin, vmax=bmax, extent=plt_extent)\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='Density (kg/m^3)')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -593,9 +597,9 @@ "plt.imshow(np.transpose(dm.data), cmap=\"seismic\",\n", " vmin=pmin, vmax=pmax, extent=plt_extent)\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -606,9 +610,9 @@ "plt.imshow(np.transpose(np.log10(q.data)), cmap=cm.jet,\n", " vmin=np.log10(qmin), vmax=np.log10(qmax), extent=plt_extent)\n", "plt.plot(abcX, abcZ, 'white', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='log10 $Q_p$')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -643,7 +647,7 @@ "duAdj = TimeFunction(name=\"duAdj\", grid=grid, time_order=2, space_order=space_order, save=None)\n", "\n", "# Get the dimensions for t, x, z\n", - "t,x,z = u0.dimensions" + "t, x, z = u0.dimensions" ] }, { @@ -717,7 +721,7 @@ "# Update the dimension spacing_map to include the time dimension\n", "# Please refer to the first implementation notebook for more information\n", "spacing_map = grid.spacing_map\n", - "spacing_map.update({t.spacing : dt})\n", + "spacing_map.update({t.spacing: dt})\n", "print(\"spacing_map; \", spacing_map)\n", "\n", "# Source injection and Receiver extraction\n", @@ -802,7 +806,7 @@ "\n", "# Receiver container and receiver extraction for the linearized operator\n", "rec_ln = Receiver(name='rec_ln', grid=grid, npoint=nz, time_range=time_range)\n", - "rec_ln.coordinates.data[:,:] = rec_nl.coordinates.data[:,:]\n", + "rec_ln.coordinates.data[:, :] = rec_nl.coordinates.data[:, :]\n", "rec_term_ln_fwd = rec_ln.interpolate(expr=duFwd.forward)\n", "\n", "# Instantiate and run the operator for the linearized forward\n", @@ -861,37 +865,37 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot the two wavefields, each normalized to own maximum\n", - "kt = nt - 2\n", + "kt = nt - 2\n", "\n", - "amax_nl = 1.0 * np.max(np.abs(u0.data[kt,:,:]))\n", - "amax_ln = 0.1 * np.max(np.abs(duFwd.data[kt,:,:]))\n", + "amax_nl = 1.0 * np.max(np.abs(u0.data[kt, :, :]))\n", + "amax_ln = 0.1 * np.max(np.abs(duFwd.data[kt, :, :]))\n", "\n", - "print(\"amax nl; %12.6f\" % (amax_nl))\n", - "print(\"amax ln t=%.2fs; %12.6f\" % (dt * kt / 1000, amax_ln))\n", + "print(f\"amax nl; {amax_nl:12.6f}\")\n", + "print(f\"amax ln t={dt * kt / 1000:.2f}s; {amax_ln:12.6f}\")\n", "\n", - "plt.figure(figsize=(12,12))\n", + "plt.figure(figsize=(12, 12))\n", "\n", "plt.subplot(1, 2, 1)\n", - "plt.imshow(np.transpose(u0.data[kt,:,:]), cmap=\"seismic\",\n", + "plt.imshow(np.transpose(u0.data[kt, :, :]), cmap=\"seismic\",\n", " vmin=-amax_nl, vmax=+amax_nl, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", "plt.title(\"Nonlinear wavefield at t=%.2fs\" % (dt * kt / 1000))\n", "\n", "plt.subplot(1, 2, 2)\n", - "plt.imshow(np.transpose(duFwd.data[kt,:,:]), cmap=\"seismic\",\n", + "plt.imshow(np.transpose(duFwd.data[kt, :, :]), cmap=\"seismic\",\n", " vmin=-amax_ln, vmax=+amax_ln, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Amplitude')\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -1061,21 +1065,21 @@ "amax1 = 0.5 * np.max(np.abs(dm.data[:]))\n", "amax2 = 0.5 * np.max(np.abs(dmAdj.data[:]))\n", "\n", - "print(\"amax dm; %12.6e\" % (amax1))\n", - "print(\"amax dmAdj %12.6e\" % (amax2))\n", + "print(f\"amax dm; {amax1:12.6e}\")\n", + "print(f\"amax dmAdj {amax2:12.6e}\")\n", "\n", "dm.data[:] = dm.data / amax1\n", "dmAdj.data[:] = dmAdj.data / amax2\n", "\n", - "plt.figure(figsize=(12,8))\n", + "plt.figure(figsize=(12, 8))\n", "\n", "plt.subplot(1, 2, 1)\n", "plt.imshow(np.transpose(dm.data), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.xlabel(\"X Coordinate (m)\")\n", @@ -1086,9 +1090,9 @@ "plt.imshow(np.transpose(dmAdj.data), cmap=\"seismic\",\n", " vmin=-1, vmax=+1, extent=plt_extent, aspect=\"auto\")\n", "plt.plot(abcX, abcZ, 'gray', linewidth=4, linestyle=':', label=\"Absorbing Boundary\")\n", - "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1], \\\n", + "plt.plot(src_nl.coordinates.data[:, 0], src_nl.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", - "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1], \\\n", + "plt.plot(rec_nl.coordinates.data[:, 0], rec_nl.coordinates.data[:, 1],\n", " 'black', linestyle='None', marker='^', markersize=2, label=\"Receivers\")\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", "plt.xlabel(\"X Coordinate (m)\")\n", diff --git a/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb b/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb index 62c4fdde57..c4933e85ee 100644 --- a/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb +++ b/examples/seismic/self_adjoint/sa_03_iso_correctness.ipynb @@ -260,19 +260,19 @@ "def analytic_response(fpeak, time_axis, src_coords, rec_coords, v):\n", " nt = time_axis.num\n", " dt = time_axis.step\n", - " v0 = v.data[0,0]\n", + " v0 = v.data[0, 0]\n", " sx, sz = src_coords[0, :]\n", " rx, rz = rec_coords[0, :]\n", " ntpad = 20 * (nt - 1) + 1\n", " tmaxpad = dt * (ntpad - 1)\n", " time_axis_pad = TimeAxis(start=tmin, stop=tmaxpad, step=dt)\n", - " timepad = np.linspace(tmin, tmaxpad, ntpad)\n", + " # timepad = np.linspace(tmin, tmaxpad, ntpad)\n", " print(time_axis)\n", " print(time_axis_pad)\n", " srcpad = RickerSource(name='srcpad', grid=v.grid, f0=fpeak, npoint=1,\n", " time_range=time_axis_pad, t0w=t0w)\n", " nf = int(ntpad / 2 + 1)\n", - " fnyq = 1.0 / (2 * dt)\n", + " # fnyq = 1.0 / (2 * dt)\n", " df = 1.0 / tmaxpad\n", " faxis = df * np.arange(nf)\n", "\n", @@ -286,7 +286,7 @@ " for a in range(1, nf - 1):\n", " w = 2 * np.pi * faxis[a]\n", " r = np.sqrt((rx - sx)**2 + (rz - sz)**2)\n", - " U_a[a] = -1j * np.pi * hankel2(0.0, w * r / v0) * R[a]\n", + " U_a[a] = -1j * np.pi * hankel2(0.0, w * r / v0) * R[a]\n", "\n", " # Do inverse fft on 0:dt:T and you have analytical solution\n", " U_t = 1.0/(2.0 * np.pi) * np.real(np.fft.ifft(U_a[:], ntpad))\n", @@ -381,7 +381,7 @@ "arms = np.max(np.abs(recAna))\n", "drms = np.max(np.abs(diff))\n", "\n", - "print(\"\\nMaximum absolute numerical,analytic,diff; %+12.6e %+12.6e %+12.6e\" % (nrms, arms, drms))\n", + "print(f\"\\nMaximum absolute numerical,analytic,diff; {nrms:+12.6e} {arms:+12.6e} {drms:+12.6e}\")\n", "\n", "# This isnt a very strict tolerance ...\n", "tol = 0.1\n", @@ -391,8 +391,8 @@ "amin, amax = np.min(recAna), np.max(recAna)\n", "\n", "print(\"\")\n", - "print(\"Numerical min/max; %+12.6e %+12.6e\" % (nmin, nmax))\n", - "print(\"Analytic min/max; %+12.6e %+12.6e\" % (amin, amax))" + "print(f\"Numerical min/max; {nmin:+12.6e} {nmax:+12.6e}\")\n", + "print(f\"Analytic min/max; {amin:+12.6e} {amax:+12.6e}\")" ] }, { @@ -429,9 +429,9 @@ "\n", "# Plot\n", "x1 = origin[0] - model.nbl * model.spacing[0]\n", - "x2 = model.domain_size[0] + model.nbl * model.spacing[0]\n", + "x2 = model.domain_size[0] + model.nbl * model.spacing[0]\n", "z1 = origin[1] - model.nbl * model.spacing[1]\n", - "z2 = model.domain_size[1] + model.nbl * model.spacing[1]\n", + "z2 = model.domain_size[1] + model.nbl * model.spacing[1]\n", "\n", "xABC1 = origin[0]\n", "xABC2 = model.domain_size[0]\n", @@ -442,12 +442,12 @@ "abc_pairsX = [xABC1, xABC1, xABC2, xABC2, xABC1]\n", "abc_pairsZ = [zABC1, zABC2, zABC2, zABC1, zABC1]\n", "\n", - "plt.figure(figsize=(12.5,12.5))\n", + "plt.figure(figsize=(12.5, 12.5))\n", "\n", "# Plot wavefield\n", - "plt.subplot(2,2,1)\n", + "plt.subplot(2, 2, 1)\n", "amax = 1.1 * np.max(np.abs(recNum.data[:]))\n", - "plt.imshow(uNum.data[1,:,:], vmin=-amax, vmax=+amax, cmap=\"seismic\",\n", + "plt.imshow(uNum.data[1, :, :], vmin=-amax, vmax=+amax, cmap=\"seismic\",\n", " aspect=\"auto\", extent=plt_extent)\n", "plt.plot(src_coords[0, 0], src_coords[0, 1], 'r*', markersize=15, label='Source')\n", "plt.plot(rec_coords[0, 0], rec_coords[0, 1], 'k^', markersize=11, label='Receiver')\n", @@ -460,23 +460,23 @@ "plt.tight_layout()\n", "\n", "# Plot trace\n", - "plt.subplot(2,2,3)\n", + "plt.subplot(2, 2, 3)\n", "plt.plot(time, recNum.data[:, 0], '-b', label='Numeric')\n", "plt.plot(time, recAna[:], '--r', label='Analytic')\n", "plt.xlabel('Time (ms)')\n", "plt.ylabel('Amplitude')\n", "plt.title('Trace comparison of solutions')\n", "plt.legend(loc=\"upper right\")\n", - "plt.xlim([50,90])\n", + "plt.xlim([50, 90])\n", "plt.ylim([-0.7 * amax, +amax])\n", "\n", - "plt.subplot(2,2,4)\n", - "plt.plot(time, 10 * (recNum.data[:, 0] - recAna[:]), '-k', label='Difference x10')\n", + "plt.subplot(2, 2, 4)\n", + "plt.plot(time, 10 * (recNum.data[:, 0] - recAna[:]), '-k', label='Difference x10')\n", "plt.xlabel('Time (ms)')\n", "plt.ylabel('Amplitude')\n", "plt.title('Difference of solutions (x10)')\n", "plt.legend(loc=\"upper right\")\n", - "plt.xlim([50,90])\n", + "plt.xlim([50, 90])\n", "plt.ylim([-0.7 * amax, +amax])\n", "\n", "plt.tight_layout()\n", @@ -559,10 +559,11 @@ "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(rec2.data**2))\n", "diff = (rec1.data - rec2.data) / rms2\n", - "print(\"\\nlinearity forward F %s (so=%d) rms 1,2,diff; \"\n", - " \"%+16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, np.sqrt(np.mean(rec1.data**2)), np.sqrt(np.mean(rec2.data**2)),\n", - " np.sqrt(np.mean(diff**2))))\n", + "print(\n", + " f'\\nlinearity forward F {shape} (so=8) rms 1,2,diff; '\n", + " f'{np.sqrt(np.mean(rec1.data**2)):+16.10e} {np.sqrt(np.mean(rec2.data**2)):+16.10e} '\n", + " f'{np.sqrt(np.mean(diff**2)):+16.10e}'\n", + ")\n", "tol = 1.e-12\n", "assert np.allclose(diff, 0.0, atol=tol)" ] @@ -607,10 +608,11 @@ "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(src2.data**2))\n", "diff = (src1.data - src2.data) / rms2\n", - "print(\"\\nlinearity adjoint F %s (so=%d) rms 1,2,diff; \"\n", - " \"%+16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, np.sqrt(np.mean(src1.data**2)), np.sqrt(np.mean(src2.data**2)),\n", - " np.sqrt(np.mean(diff**2))))\n", + "print(\n", + " f'\\nlinearity adjoint F {shape} (so=8) rms 1,2,diff; '\n", + " f'{np.sqrt(np.mean(src1.data**2)):+16.10e} {np.sqrt(np.mean(src2.data**2)):+16.10e} '\n", + " f'{np.sqrt(np.mean(diff**2)):+16.10e}'\n", + ")\n", "tol = 1.e-12\n", "assert np.allclose(diff, 0.0, atol=tol)" ] @@ -663,8 +665,10 @@ "sum_s = np.dot(src1.data.reshape(-1), src2.data.reshape(-1))\n", "sum_r = np.dot(rec1.data.reshape(-1), rec2.data.reshape(-1))\n", "diff = (sum_s - sum_r) / (sum_s + sum_r)\n", - "print(\"\\nadjoint F %s (so=%d) sum_s, sum_r, diff; %+16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, sum_s, sum_r, diff))\n", + "print(\n", + " f'\\nadjoint F {shape} (so=8) sum_s, sum_r, diff; '\n", + " f'{sum_s:+16.10e} {sum_r:+16.10e} {diff:+16.10e}'\n", + ")\n", "assert np.isclose(diff, 0., atol=1.e-12)" ] }, @@ -766,8 +770,11 @@ "# Assert the 2nd order error has slope dh^4\n", "p1 = np.polyfit(np.log10(scale), np.log10(norm1), 1)\n", "p2 = np.polyfit(np.log10(scale), np.log10(norm2), 1)\n", - "print(\"\\nlinearization F %s (so=%d) 1st (%.1f) = %.4f, 2nd (%.1f) = %.4f\" %\n", - " (shape, 8, dh**2, p1[0], dh**4, p2[0]))\n", + "print(\n", + " f'\\nlinearization F {shape} (so=8) '\n", + " f'1st ({dh**2:.1f}) = {p1[0]:.4f}, '\n", + " f'2nd ({dh**4:.1f}) = {p2[0]:.4f}'\n", + ")\n", "assert np.isclose(p1[0], dh**2, rtol=0.1)\n", "assert np.isclose(p2[0], dh**4, rtol=0.1)" ] @@ -792,7 +799,7 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot linearization tests\n", - "plt.figure(figsize=(14,8))\n", + "plt.figure(figsize=(14, 8))\n", "\n", "expected1 = np.empty(nstep)\n", "expected2 = np.empty(nstep)\n", @@ -888,10 +895,11 @@ "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(rec2.data**2))\n", "diff = (rec1.data - rec2.data) / rms2\n", - "print(\"\\nlinearity forward J %s (so=%d) rms 1,2,diff; \"\n", - " \"%+16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, np.sqrt(np.mean(rec1.data**2)), np.sqrt(np.mean(rec2.data**2)),\n", - " np.sqrt(np.mean(diff**2))))\n", + "print(\n", + " f'\\nlinearity forward J {shape} (so=8) rms 1,2,diff; '\n", + " f'{np.sqrt(np.mean(rec1.data**2)):+16.10e} {np.sqrt(np.mean(rec2.data**2)):+16.10e} '\n", + " f'{np.sqrt(np.mean(diff**2)):+16.10e}'\n", + ")\n", "tol = 1.e-12\n", "assert np.allclose(diff, 0.0, atol=tol)" ] @@ -946,10 +954,11 @@ "# Normalize by rms of rec2, to enable using absolute tolerance below\n", "rms2 = np.sqrt(np.mean(dm2.data**2))\n", "diff = (dm1.data - dm2.data) / rms2\n", - "print(\"\\nlinearity adjoint J %s (so=%d) rms 1,2,diff; \"\n", - " \"%+16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, np.sqrt(np.mean(dm1.data**2)), np.sqrt(np.mean(dm2.data**2)),\n", - " np.sqrt(np.mean(diff**2))))" + "print(\n", + " f'\\nlinearity adjoint J {shape} (so=8) rms 1,2,diff; '\n", + " f'{np.sqrt(np.mean(dm1.data**2)):+16.10e} {np.sqrt(np.mean(dm2.data**2)):+16.10e} '\n", + " f'{np.sqrt(np.mean(diff**2)):+16.10e}'\n", + ")" ] }, { @@ -1021,8 +1030,10 @@ "sum_m = np.dot(dm1.data.reshape(-1), dm2.data.reshape(-1))\n", "sum_d = np.dot(rec1.data.reshape(-1), rec2.data.reshape(-1))\n", "diff = (sum_m - sum_d) / (sum_m + sum_d)\n", - "print(\"\\nadjoint J %s (so=%d) sum_m, sum_d, diff; %16.10e %+16.10e %+16.10e\" %\n", - " (shape, 8, sum_m, sum_d, diff))\n", + "print(\n", + " f'\\nadjoint J {shape} (so=8) sum_m, sum_d, diff; '\n", + " f'{sum_m:16.10e} {sum_d:+16.10e} {diff:+16.10e}'\n", + ")\n", "assert np.isclose(diff, 0., atol=1.e-11)\n", "\n", "del rec0, u0" @@ -1124,13 +1135,13 @@ "diff = (f1g2+g1f2)/(f1g2-g1f2)\n", "\n", "tol = 100 * np.finfo(dtype).eps\n", - "print(\"f1g2, g1f2, diff, tol; %+.6e %+.6e %+.6e %+.6e\" % (f1g2, g1f2, diff, tol))\n", + "print(f\"f1g2, g1f2, diff, tol; {f1g2:+.6e} {g1f2:+.6e} {diff:+.6e} {tol:+.6e}\")\n", "\n", "# At last the unit test\n", "# Assert these dot products are float epsilon close in relative error\n", "assert diff < 100 * np.finfo(np.float32).eps\n", "\n", - "del f1,f2,g1,g2" + "del f1, f2, g1, g2" ] }, { diff --git a/examples/seismic/self_adjoint/test_utils.py b/examples/seismic/self_adjoint/test_utils.py index 02527c85b5..6b4f37c36a 100644 --- a/examples/seismic/self_adjoint/test_utils.py +++ b/examples/seismic/self_adjoint/test_utils.py @@ -1,9 +1,10 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except: - pass + from devito import Function, Grid from examples.seismic.self_adjoint import setup_w_over_q diff --git a/examples/seismic/self_adjoint/test_wavesolver_iso.py b/examples/seismic/self_adjoint/test_wavesolver_iso.py index 7d55122608..77e665f931 100644 --- a/examples/seismic/self_adjoint/test_wavesolver_iso.py +++ b/examples/seismic/self_adjoint/test_wavesolver_iso.py @@ -1,10 +1,11 @@ +from contextlib import suppress + import numpy as np from scipy.special import hankel2 -try: +with suppress(ImportError): import pytest -except: - pass + from devito import Eq, Function, Grid, Operator, info from examples.seismic import AcquisitionGeometry, Model, RickerSource, TimeAxis from examples.seismic.self_adjoint import ( @@ -40,10 +41,12 @@ def test_linearity_forward_F(self, shape, dtype, so): # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(rec2.data**2)) diff = (rec1.data - rec2.data) / rms2 - info("linearity forward F %s (so=%d) rms 1,2,diff; " - "%+16.10e %+16.10e %+16.10e" % - (shape, so, np.sqrt(np.mean(rec1.data**2)), np.sqrt(np.mean(rec2.data**2)), - np.sqrt(np.mean(diff**2)))) + info( + f'linearity forward F {shape} ({so=}) rms 1,2,diff; ' + f'{np.sqrt(np.mean(rec1.data**2)):+16.10e}' + f'{np.sqrt(np.mean(rec2.data**2)):+16.10e}' + f'{np.sqrt(np.mean(diff**2)):+16.10e}' + ) tol = 1.e-12 assert np.allclose(diff, 0.0, atol=tol) @@ -69,10 +72,12 @@ def test_linearity_adjoint_F(self, shape, dtype, so): # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(src2.data**2)) diff = (src1.data - src2.data) / rms2 - info("linearity adjoint F %s (so=%d) rms 1,2,diff; " - "%+16.10e %+16.10e %+16.10e" % - (shape, so, np.sqrt(np.mean(src1.data**2)), np.sqrt(np.mean(src2.data**2)), - np.sqrt(np.mean(diff**2)))) + info( + f'linearity adjoint F {shape} ({so=}) rms 1,2,diff; ' + f'{np.sqrt(np.mean(src1.data**2)):+16.10e}' + f'{np.sqrt(np.mean(src2.data**2)):+16.10e} ' + f'{np.sqrt(np.mean(diff**2)):+16.10e}' + ) tol = 1.e-12 assert np.allclose(diff, 0.0, atol=tol) @@ -95,8 +100,10 @@ def test_adjoint_F(self, shape, dtype, so): sum_s = np.dot(src1.data.reshape(-1), src2.data.reshape(-1)) sum_r = np.dot(rec1.data.reshape(-1), rec2.data.reshape(-1)) diff = (sum_s - sum_r) / (sum_s + sum_r) - info("adjoint F %s (so=%d) sum_s, sum_r, diff; %+16.10e %+16.10e %+16.10e" % - (shape, so, sum_s, sum_r, diff)) + info( + f'adjoint F {shape} ({so=}) sum_s, sum_r, diff; ' + f'{sum_s:+16.10e} {sum_r:+16.10e} {diff:+16.10e}' + ) assert np.isclose(diff, 0., atol=1.e-12) @pytest.mark.parametrize('shape', shapes) @@ -165,8 +172,10 @@ def test_linearization_F(self, shape, dtype, so): # Assert the 2nd order error has slope dh^4 p1 = np.polyfit(np.log10(scale), np.log10(norm1), 1) p2 = np.polyfit(np.log10(scale), np.log10(norm2), 1) - info("linearization F %s (so=%d) 1st (%.1f) = %.4f, 2nd (%.1f) = %.4f" % - (shape, so, dh**2, p1[0], dh**4, p2[0])) + info( + f'linearization F {shape} ({so=}) ' + f'1st ({dh**2:.1f}) = {p1[0]:.4f}, 2nd ({dh**4:.1f}) = {p2[0]:.4f}' + ) # we only really care the 2nd order err is valid, not so much the 1st order error assert np.isclose(p1[0], dh**2, rtol=0.25) @@ -213,10 +222,12 @@ def test_linearity_forward_J(self, shape, dtype, so): # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(rec2.data**2)) diff = (rec1.data - rec2.data) / rms2 - info("linearity forward J %s (so=%d) rms 1,2,diff; " - "%+16.10e %+16.10e %+16.10e" % - (shape, so, np.sqrt(np.mean(rec1.data**2)), np.sqrt(np.mean(rec2.data**2)), - np.sqrt(np.mean(diff**2)))) + info( + f'linearity forward J {shape} ({so=}) rms 1,2,diff; ' + f'{np.sqrt(np.mean(rec1.data**2)):+16.10e} ' + f'{np.sqrt(np.mean(rec2.data**2)):+16.10e} ' + f'{np.sqrt(np.mean(diff**2)):+16.10e}' + ) tol = 1.e-12 assert np.allclose(diff, 0.0, atol=tol) @@ -262,10 +273,12 @@ def test_linearity_adjoint_J(self, shape, dtype, so): # Normalize by rms of rec2, to enable using absolute tolerance below rms2 = np.sqrt(np.mean(dm2.data**2)) diff = (dm1.data - dm2.data) / rms2 - info("linearity adjoint J %s (so=%d) rms 1,2,diff; " - "%+16.10e %+16.10e %+16.10e" % - (shape, so, np.sqrt(np.mean(dm1.data**2)), np.sqrt(np.mean(dm2.data**2)), - np.sqrt(np.mean(diff**2)))) + info( + f'linearity adjoint J {shape} ({so=}) rms 1,2,diff; ' + f'{np.sqrt(np.mean(dm1.data**2)):+16.10e} ' + f'{np.sqrt(np.mean(dm2.data**2)):+16.10e} ' + f'{np.sqrt(np.mean(diff**2)):+16.10e}' + ) @pytest.mark.parametrize('shape', shapes) @pytest.mark.parametrize('dtype', dtypes) @@ -311,8 +324,10 @@ def test_adjoint_J(self, shape, dtype, so): sum_m = np.dot(dm1.data.reshape(-1), dm2.data.reshape(-1)) sum_d = np.dot(rec1.data.reshape(-1), rec2.data.reshape(-1)) diff = (sum_m - sum_d) / (sum_m + sum_d) - info("adjoint J %s (so=%d) sum_m, sum_d, diff; %16.10e %+16.10e %+16.10e" % - (shape, so, sum_m, sum_d, diff)) + info( + f'adjoint J {shape} ({so=}) sum_m, sum_d, diff; ' + f'{sum_m:16.10e} {sum_d:+16.10e} {diff:+16.10e}' + ) assert np.isclose(diff, 0., atol=1.e-11) @pytest.mark.parametrize('dtype', dtypes) @@ -358,8 +373,10 @@ def test_derivative_symmetry(self, dtype, so): g1f2 = np.dot(g1.data, f2.data) diff = (f1g2 + g1f2) / (f1g2 - g1f2) - info("skew symmetry (so=%d) -- f1g2, g1f2, diff; %+16.10e %+16.10e %+16.10e" % - (so, f1g2, g1f2, diff)) + info( + f'skew symmetry ({so=}) -- f1g2, g1f2, diff; ' + f'{f1g2:+16.10e} {g1f2:+16.10e} {diff:+16.10e}' + ) assert np.isclose(diff, 0., atol=1.e-12) @pytest.mark.parametrize('dtype', dtypes) @@ -459,8 +476,10 @@ def analytic_response(): arms = np.max(np.abs(uAna)) drms = np.max(np.abs(diff)) - info("Maximum absolute numerical,analytic,diff; %+12.6e %+12.6e %+12.6e" % - (nrms, arms, drms)) + info( + 'Maximum absolute numerical,analytic,diff; ' + f'{nrms:+12.6e} {arms:+12.6e} {drms:+12.6e}' + ) # This isnt a very strict tolerance ... tol = 0.1 diff --git a/examples/seismic/self_adjoint/utils.py b/examples/seismic/self_adjoint/utils.py index 6cdcf06c9d..4926564cd8 100644 --- a/examples/seismic/self_adjoint/utils.py +++ b/examples/seismic/self_adjoint/utils.py @@ -31,10 +31,10 @@ def setup_w_over_q(wOverQ, w, qmin, qmax, npad, sigma=0): sigma value for call to scipy gaussian smoother, default 5. """ # sanity checks - assert w > 0, "supplied w value [%f] must be positive" % (w) - assert qmin > 0, "supplied qmin value [%f] must be positive" % (qmin) - assert qmax > 0, "supplied qmax value [%f] must be positive" % (qmax) - assert npad > 0, "supplied npad value [%f] must be positive" % (npad) + assert w > 0, f"supplied w value [{w:f}] must be positive" + assert qmin > 0, f"supplied qmin value [{qmin:f}] must be positive" + assert qmax > 0, f"supplied qmax value [{qmax:f}] must be positive" + assert npad > 0, f"supplied npad value [{npad:f}] must be positive" for n in wOverQ.grid.shape: if n - 2*npad < 1: raise ValueError("2 * npad must not exceed dimension size!") @@ -47,12 +47,12 @@ def setup_w_over_q(wOverQ, w, qmin, qmax, npad, sigma=0): eqs = [Eq(wOverQ, 1)] for d in wOverQ.dimensions: # left - dim_l = SubDimension.left(name='abc_%s_l' % d.name, parent=d, + dim_l = SubDimension.left(name=f'abc_{d.name}_l', parent=d, thickness=npad) pos = Abs(dim_l - d.symbolic_min) / float(npad) eqs.append(Eq(wOverQ.subs({d: dim_l}), Min(wOverQ.subs({d: dim_l}), pos))) # right - dim_r = SubDimension.right(name='abc_%s_r' % d.name, parent=d, + dim_r = SubDimension.right(name=f'abc_{d.name}_r', parent=d, thickness=npad) pos = Abs(d.symbolic_max - dim_r) / float(npad) eqs.append(Eq(wOverQ.subs({d: dim_r}), Min(wOverQ.subs({d: dim_r}), pos))) diff --git a/examples/seismic/source.py b/examples/seismic/source.py index 12e420565d..ca86c41c30 100644 --- a/examples/seismic/source.py +++ b/examples/seismic/source.py @@ -63,7 +63,9 @@ def __init__(self, start=None, step=None, num=None, stop=None): else: raise ValueError("Only three of start, step, num and stop may be set") except: - raise ValueError("Three of args start, step, num and stop may be set") + raise ValueError( + "Three of args start, step, num and stop may be set" + ) from None if not isinstance(num, int): raise TypeError("input argument must be of type int") @@ -74,8 +76,8 @@ def __init__(self, start=None, step=None, num=None, stop=None): self.num = int(num) def __str__(self): - return "TimeAxis: start=%g, stop=%g, step=%g, num=%g" % \ - (self.start, self.stop, self.step, self.num) + return f'TimeAxis: start={self.start:g}, stop={self.stop:g}, ' + \ + f'step={self.step:g}, num={self.num:g}' def _rebuild(self): return TimeAxis(start=self.start, stop=self.stop, num=self.num) diff --git a/examples/seismic/test_seismic_utils.py b/examples/seismic/test_seismic_utils.py index 76f5ff4560..3a70a330e6 100644 --- a/examples/seismic/test_seismic_utils.py +++ b/examples/seismic/test_seismic_utils.py @@ -1,7 +1,8 @@ -try: +from contextlib import suppress + +with suppress(ImportError): import pytest -except: - pass + import numpy as np from devito import norm @@ -26,7 +27,7 @@ def test_damp(nbl, bcs): except AttributeError: center = model.damp - assert all([s == s0 + 2 * nbl for s, s0 in zip(model.vp.shape, shape)]) + assert all([s == s0 + 2 * nbl for s, s0 in zip(model.vp.shape, shape, strict=True)]) assert center == bcs[1] switch_bcs = not_bcs(bcs[0]) diff --git a/examples/seismic/tti/operators.py b/examples/seismic/tti/operators.py index 3e79d974bd..7ce74f1663 100644 --- a/examples/seismic/tti/operators.py +++ b/examples/seismic/tti/operators.py @@ -151,10 +151,7 @@ def Gh_centered(model, field): ------- Sum of the 3D rotated second order derivative in the direction x and y. """ - if model.dim == 3: - Gzz = Gzz_centered(model, field) - else: - Gzz = Gzz_centered_2d(model, field) + Gzz = Gzz_centered(model, field) if model.dim == 3 else Gzz_centered_2d(model, field) b = getattr(model, 'b', None) if b is not None: _diff = lambda f, d: getattr(f, f'd{d.name}') diff --git a/examples/seismic/tti/tti_example.py b/examples/seismic/tti/tti_example.py index aebbac62c7..4fdeb96674 100644 --- a/examples/seismic/tti/tti_example.py +++ b/examples/seismic/tti/tti_example.py @@ -1,9 +1,9 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except ImportError: - pass from devito import Constant, Function, info, norm, smooth from examples.seismic import demo_model, seismic_args, setup_geometry @@ -94,15 +94,9 @@ def test_tti_stability(shape, kernel): args = parser.parse_args() if args.constant: - if args.azi: - preset = 'constant-tti-noazimuth' - else: - preset = 'constant-tti' + preset = 'constant-tti-noazimuth' if args.azi else 'constant-tti' else: - if args.azi: - preset = 'layers-tti-noazimuth' - else: - preset = 'layers-tti' + preset = 'layers-tti-noazimuth' if args.azi else 'layers-tti' # Preset parameters ndim = args.ndim diff --git a/examples/seismic/tti/wavesolver.py b/examples/seismic/tti/wavesolver.py index 3ca1c740d4..ae6c23eef5 100644 --- a/examples/seismic/tti/wavesolver.py +++ b/examples/seismic/tti/wavesolver.py @@ -39,12 +39,11 @@ def __init__(self, model, geometry, space_order=4, kernel='centered', raise ValueError("Free surface only supported for centered TTI kernel") if space_order % 2 != 0: - raise ValueError("space_order must be even but got %s" - % space_order) + raise ValueError(f"space_order must be even but got {space_order}") if space_order % 4 != 0: warning("It is recommended for space_order to be a multiple of 4" + - "but got %s" % space_order) + f"but got {space_order}") self.space_order = space_order diff --git a/examples/seismic/tutorials/01_modelling.ipynb b/examples/seismic/tutorials/01_modelling.ipynb index 5fe0e78a8c..14e4ab876c 100644 --- a/examples/seismic/tutorials/01_modelling.ipynb +++ b/examples/seismic/tutorials/01_modelling.ipynb @@ -73,7 +73,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Adding ignore due to (probably an np notebook magic) bug\n", "import numpy as np\n", "%matplotlib inline" @@ -117,7 +117,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import Model, plot_velocity\n", "\n", "# Define a physical size\n", @@ -193,7 +193,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import RickerSource\n", "\n", "f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)\n", @@ -234,7 +234,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import Receiver\n", "\n", "# Create symbol for 101 receivers\n", @@ -414,7 +414,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Operator\n", "\n", "op = Operator([stencil] + src_term + rec_term, subs=model.spacing_map)" @@ -456,7 +456,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(time=time_range.num-1, dt=model.critical_dt)" ] }, @@ -484,7 +484,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import plot_shotrecord\n", "\n", "plot_shotrecord(rec.data, model, t0, tn)" diff --git a/examples/seismic/tutorials/02_rtm.ipynb b/examples/seismic/tutorials/02_rtm.ipynb index 7ed3736463..cf90a02585 100644 --- a/examples/seismic/tutorials/02_rtm.ipynb +++ b/examples/seismic/tutorials/02_rtm.ipynb @@ -140,7 +140,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import plot_velocity, plot_perturbation\n", "from devito import gaussian_smooth\n", "\n", @@ -184,7 +184,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Define acquisition geometry: source\n", "from examples.seismic import AcquisitionGeometry\n", "\n", @@ -229,7 +229,7 @@ "from examples.seismic.acoustic import AcousticWaveSolver\n", "\n", "solver = AcousticWaveSolver(model, geometry, space_order=4)\n", - "true_d , _, _ = solver.forward(vp=model.vp)" + "true_d, _, _ = solver.forward(vp=model.vp)" ] }, { @@ -279,7 +279,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Plot shot record for true and smooth velocity model and the difference\n", "from examples.seismic import plot_shotrecord\n", "\n", @@ -346,6 +346,7 @@ "from devito import TimeFunction, Operator, Eq, solve\n", "from examples.seismic import PointSource\n", "\n", + "\n", "def ImagingOperator(model, image):\n", " # Define the wavefield with the size of the model and the time dimension\n", " v = TimeFunction(name='v', grid=model.grid, time_order=2, space_order=4)\n", @@ -405,7 +406,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Prepare the varying source locations\n", "source_locations = np.empty((nshots, 2), dtype=np.float32)\n", @@ -499,7 +500,7 @@ "op_imaging = ImagingOperator(model, image)\n", "\n", "for i in range(nshots):\n", - " print('Imaging source %d out of %d' % (i+1, nshots))\n", + " print(f'Imaging source {i + 1} out of {nshots}')\n", "\n", " # Update source location\n", " geometry.src_positions[0, :] = source_locations[i, :]\n", @@ -534,7 +535,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import plot_image\n", "\n", "# Plot the inverted image\n", diff --git a/examples/seismic/tutorials/03_fwi.ipynb b/examples/seismic/tutorials/03_fwi.ipynb index c309542c12..c077628d20 100644 --- a/examples/seismic/tutorials/03_fwi.ipynb +++ b/examples/seismic/tutorials/03_fwi.ipynb @@ -104,7 +104,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import demo_model, plot_velocity, plot_perturbation\n", "\n", "# Define true and initial model\n", @@ -117,7 +117,7 @@ "\n", "model0 = demo_model('circle-isotropic', vp_circle=2.5, vp_background=2.5,\n", " origin=origin, shape=shape, spacing=spacing, nbl=40,\n", - " grid = model.grid)\n", + " grid=model.grid)\n", "\n", "plot_velocity(model)\n", "plot_velocity(model0)\n", @@ -160,7 +160,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Define acquisition geometry: source\n", "from examples.seismic import AcquisitionGeometry\n", "\n", @@ -204,7 +204,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Plot acquisition geometry\n", "plot_velocity(model, source=geometry.src_positions,\n", " receiver=geometry.rec_positions[::4, :])" @@ -279,7 +279,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import plot_shotrecord\n", "\n", "# Plot shot record for true and smooth velocity model and the difference\n", @@ -357,7 +357,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Prepare the varying source locations sources\n", "source_locations = np.empty((nshots, 2), dtype=np.float32)\n", @@ -375,6 +375,7 @@ "source": [ "from devito import Eq, Operator\n", "\n", + "\n", "# Computes the residual between observed and synthetic data into the residual\n", "def compute_residual(residual, dobs, dsyn):\n", " if residual.grid.distributor.is_parallel:\n", @@ -383,7 +384,7 @@ " # same position\n", " assert np.allclose(dobs.coordinates.data[:], dsyn.coordinates.data)\n", " assert np.allclose(residual.coordinates.data[:], dsyn.coordinates.data)\n", - " # Create a difference operator\n", + " # Create a difference operator\n", " diff_eq = Eq(residual, dsyn.subs({dsyn.dimensions[-1]: residual.dimensions[-1]}) -\n", " dobs.subs({dobs.dimensions[-1]: residual.dimensions[-1]}))\n", " Operator(diff_eq)()\n", @@ -404,6 +405,7 @@ "from devito import Function, norm\n", "from examples.seismic import Receiver\n", "\n", + "\n", "def fwi_gradient(vp_in):\n", " # Create symbols to hold the gradient\n", " grad = Function(name=\"grad\", grid=model.grid)\n", @@ -492,7 +494,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import mmax\n", "from examples.seismic import plot_image\n", "\n", @@ -522,6 +524,8 @@ "outputs": [], "source": [ "from devito import Min, Max\n", + "\n", + "\n", "# Define bounding box constraints on the solution.\n", "def update_with_box(vp, alpha, dm, vmin=2.0, vmax=3.5):\n", " \"\"\"\n", @@ -579,7 +583,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "from devito import mmax\n", "\n", @@ -600,10 +604,10 @@ " alpha = .05 / mmax(direction)\n", "\n", " # Update the model estimate and enforce minimum/maximum values\n", - " update_with_box(model0.vp , alpha , direction)\n", + " update_with_box(model0.vp, alpha, direction)\n", "\n", " # Log the progress made\n", - " print('Objective value is %f at iteration %d' % (phi, i+1))" + " print(f'Objective value is {phi} at iteration {i + 1}')" ] }, { @@ -623,7 +627,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plot inverted velocity model\n", "plot_velocity(model0)" @@ -646,7 +650,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "import matplotlib.pyplot as plt\n", "\n", "# Plot objective function decrease\n", diff --git a/examples/seismic/tutorials/04_dask.ipynb b/examples/seismic/tutorials/04_dask.ipynb index 5677323a22..6ac2b18977 100644 --- a/examples/seismic/tutorials/04_dask.ipynb +++ b/examples/seismic/tutorials/04_dask.ipynb @@ -105,7 +105,7 @@ "\n", "# Initial model\n", "model0 = demo_model('circle-isotropic', vp_circle=2.5, vp_background=2.5,\n", - " origin=origin, shape=shape, spacing=spacing, nbl=nbl, grid = model1.grid)" + " origin=origin, shape=shape, spacing=spacing, nbl=nbl, grid=model1.grid)" ] }, { @@ -176,6 +176,7 @@ "source": [ "from examples.seismic.acoustic import AcousticWaveSolver\n", "\n", + "\n", "# Serial modeling function\n", "def forward_modeling_single_shot(model, geometry, save=False, dt=4.0):\n", " solver = AcousticWaveSolver(model, geometry, space_order=4)\n", @@ -203,7 +204,7 @@ " for i in range(geometry.nsrc):\n", "\n", " # Geometry for current shot\n", - " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i,:],\n", + " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i, :],\n", " geometry.t0, geometry.tn, f0=geometry.f0, src_type=geometry.src_type)\n", "\n", " # Call serial modeling function for each index\n", @@ -332,6 +333,7 @@ "from devito import Function\n", "from examples.seismic import Receiver\n", "\n", + "\n", "# Serial FWI objective function\n", "def fwi_objective_single_shot(model, geometry, d_obs):\n", "\n", @@ -376,7 +378,7 @@ " for i in range(geometry.nsrc):\n", "\n", " # Geometry for current shot\n", - " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i,:],\n", + " geometry_i = AcquisitionGeometry(model, geometry.rec_positions, geometry.src_positions[i, :],\n", " geometry.t0, geometry.tn, f0=geometry.f0, src_type=geometry.src_type)\n", "\n", " # Call serial FWI objective function for each shot location\n", @@ -546,6 +548,8 @@ "source": [ "# Callback to track model error\n", "model_error = []\n", + "\n", + "\n", "def fwi_callback(xk):\n", " vp = model1.vp.data[model1.nbl:-model1.nbl, model1.nbl:-model1.nbl]\n", " m = 1.0 / (vp.reshape(-1).astype(np.float64))**2\n", @@ -889,7 +893,7 @@ "ftol = 0.1\n", "maxiter = 5\n", "result = optimize.minimize(loss, m0, args=(model0, geometry0, d_obs), method='L-BFGS-B', jac=True,\n", - " callback=fwi_callback, bounds=bounds, options={'ftol':ftol, 'maxiter':maxiter, 'disp':True})" + " callback=fwi_callback, bounds=bounds, options={'ftol': ftol, 'maxiter': maxiter, 'disp': True})" ] }, { @@ -977,7 +981,9 @@ "import matplotlib.pyplot as plt\n", "\n", "# Plot model error\n", - "plt.plot(range(1, maxiter+1), model_error); plt.xlabel('Iteration number'); plt.ylabel('L2-model error')\n", + "plt.plot(range(1, maxiter+1), model_error)\n", + "plt.xlabel('Iteration number')\n", + "plt.ylabel('L2-model error')\n", "plt.show()" ] }, diff --git a/examples/seismic/tutorials/04_dask_pickling.ipynb b/examples/seismic/tutorials/04_dask_pickling.ipynb index 1fe51dd70e..f78ea912e3 100644 --- a/examples/seismic/tutorials/04_dask_pickling.ipynb +++ b/examples/seismic/tutorials/04_dask_pickling.ipynb @@ -78,18 +78,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "\n", - "# Set up inversion parameters.\n", - "param = {'t0': 0.,\n", - " 'tn': 1000., # Simulation last 1 second (1000 ms)\n", - " 'f0': 0.010, # Source peak frequency is 10Hz (0.010 kHz)\n", - " 'nshots': 5, # Number of shots to create gradient from\n", - " 'shape': (101, 101), # Number of grid points (nx, nz).\n", - " 'spacing': (10., 10.), # Grid spacing in m. The domain size is now 1km by 1km.\n", - " 'origin': (0, 0), # Need origin to define relative source and receiver locations.\n", - " 'nbl': 40} # nbl thickness.\n", - "\n", + "# NBVAL_IGNORE_OUTPUT\n", "import numpy as np\n", "\n", "from scipy import optimize\n", @@ -107,6 +96,19 @@ "from examples.seismic import plot_image\n", "\n", "\n", + "# Set up inversion parameters.\n", + "param = {\n", + " 't0': 0.,\n", + " 'tn': 1000., # Simulation last 1 second (1000 ms)\n", + " 'f0': 0.010, # Source peak frequency is 10Hz (0.010 kHz)\n", + " 'nshots': 5, # Number of shots to create gradient from\n", + " 'shape': (101, 101), # Number of grid points (nx, nz).\n", + " 'spacing': (10., 10.), # Grid spacing in m. The domain size is now 1km by 1km.\n", + " 'origin': (0, 0), # Need origin to define relative source and receiver locations.\n", + " 'nbl': 40 # nbl thickness.\n", + "}\n", + "\n", + "\n", "def get_true_model():\n", " ''' Define the test phantom; in this case we are using\n", " a simple circle so we can easily see what is going on.\n", @@ -115,6 +117,7 @@ " origin=param['origin'], shape=param['shape'],\n", " spacing=param['spacing'], nbl=param['nbl'])\n", "\n", + "\n", "def get_initial_model():\n", " '''The initial guess for the subsurface model.\n", " '''\n", @@ -125,6 +128,7 @@ " spacing=param['spacing'], nbl=param['nbl'],\n", " grid=grid)\n", "\n", + "\n", "def wrap_model(x, astype=None):\n", " '''Wrap a flat array as a subsurface model.\n", " '''\n", @@ -137,30 +141,39 @@ " model.update('vp', v_curr.reshape(model.shape))\n", " return model\n", "\n", + "\n", "def load_model(filename):\n", " \"\"\" Returns the current model. This is used by the\n", " worker to get the current model.\n", " \"\"\"\n", - " pkl = pickle.load(open(filename, \"rb\"))\n", + " with open(filename, 'rb') as fh:\n", + " pkl = pickle.load(fh)\n", "\n", " return pkl['model']\n", "\n", + "\n", "def dump_model(filename, model):\n", " ''' Dump model to disk.\n", " '''\n", - " pickle.dump({'model':model}, open(filename, \"wb\"))\n", + " with open(filename, \"wb\") as fh:\n", + " pickle.dump({'model': model}, fh)\n", + "\n", "\n", "def load_shot_data(shot_id, dt):\n", " ''' Load shot data from disk, resampling to the model time step.\n", " '''\n", - " pkl = pickle.load(open(\"shot_%d.p\"%shot_id, \"rb\"))\n", + " with open(f\"shot_{shot_id}.p\", \"rb\") as fh:\n", + " pkl = pickle.load(fh)\n", "\n", " return pkl['geometry'], pkl['rec'].resample(dt)\n", "\n", + "\n", "def dump_shot_data(shot_id, rec, geometry):\n", " ''' Dump shot data to disk.\n", " '''\n", - " pickle.dump({'rec':rec, 'geometry': geometry}, open('shot_%d.p'%shot_id, \"wb\"))\n", + " with open(f'shot_{shot_id}.p', \"wb\") as fh:\n", + " pickle.dump({'rec': rec, 'geometry': geometry}, fh)\n", + "\n", "\n", "def generate_shotdata_i(param):\n", " \"\"\" Inversion crime alert! Here the worker is creating the\n", @@ -174,12 +187,13 @@ " solver = cp['solver']\n", "\n", " # source position changes according to the index\n", - " shot_id=param['shot_id']\n", + " shot_id = param['shot_id']\n", "\n", - " solver.geometry.src_positions[0,:]=[20, shot_id*1000./(param['nshots']-1)]\n", + " solver.geometry.src_positions[0, :] = [20, shot_id*1000./(param['nshots']-1)]\n", " true_d = solver.forward()[0]\n", " dump_shot_data(shot_id, true_d.resample(4.0), solver.geometry.src_positions)\n", "\n", + "\n", "def generate_shotdata(solver):\n", " # Pick devito objects (save on disk)\n", " cp = {'solver': solver}\n", @@ -188,7 +202,7 @@ "\n", " work = [dict(param) for i in range(param['nshots'])]\n", " # synthetic data is generated here twice: serial(loop below) and parallel (via dask map functionality)\n", - " for i in range(param['nshots']):\n", + " for i in range(param['nshots']):\n", " work[i]['shot_id'] = i\n", " generate_shotdata_i(work[i])\n", "\n", @@ -278,7 +292,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Client setup\n", "cluster = LocalCluster(n_workers=2, death_timeout=600)\n", @@ -292,8 +306,12 @@ "nreceivers = 101\n", "# Set up receiver data and geometry.\n", "rec_coordinates = np.empty((nreceivers, len(param['shape'])))\n", - "rec_coordinates[:, 1] = np.linspace(param['spacing'][0], true_model.domain_size[0] - param['spacing'][0], num=nreceivers)\n", - "rec_coordinates[:, 0] = 980. # 20m from the right end\n", + "rec_coordinates[:, 1] = np.linspace(\n", + " param['spacing'][0],\n", + " true_model.domain_size[0] - param['spacing'][0],\n", + " num=nreceivers\n", + ")\n", + "rec_coordinates[:, 0] = 980. # 20m from the right end\n", "# Geometry\n", "geometry = AcquisitionGeometry(true_model, rec_coordinates, src_coordinates,\n", " param['t0'], param['tn'], src_type='Ricker',\n", @@ -360,9 +378,10 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Function\n", "\n", + "\n", "# Create FWI gradient kernel for a single shot\n", "def fwi_gradient_i(param):\n", "\n", @@ -386,7 +405,7 @@ " solver = cp['solver']\n", "\n", " # Set attributes to solver\n", - " solver.geometry.src_positions=src_positions\n", + " solver.geometry.src_positions = src_positions\n", " solver.geometry.resample(dt)\n", "\n", " # Compute simulated data and full forward wavefield u0\n", @@ -397,7 +416,7 @@ " time_range=solver.geometry.time_axis,\n", " coordinates=solver.geometry.rec_positions)\n", "\n", - " #residual.data[:] = d.data[:residual.shape[0], :] - rec.data[:residual.shape[0], :]\n", + " # residual.data[:] = d.data[:residual.shape[0], :] - rec.data[:residual.shape[0], :]\n", " residual.data[:] = d.data[:] - rec.data[0:d.data.shape[0], :]\n", " f = .5*np.linalg.norm(residual.data.flatten())**2\n", "\n", @@ -437,7 +456,7 @@ "\n", " # Define work list\n", " work = [dict(param) for i in range(param['nshots'])]\n", - " for i in range(param['nshots']):\n", + " for i in range(param['nshots']):\n", " work[i]['shot_id'] = i\n", "\n", " # Distribute worklist to workers.\n", @@ -469,16 +488,20 @@ "# function that can operate on the solution after every iteration. Here\n", "# we use this to monitor the true relative solution error.\n", "relative_error = []\n", + "\n", + "\n", "def fwi_callbacks(x):\n", " # Calculate true relative error\n", " true_vp = get_true_model().vp.data[param['nbl']:-param['nbl'], param['nbl']:-param['nbl']]\n", " true_m = 1.0 / (true_vp.reshape(-1).astype(np.float64))**2\n", " relative_error.append(np.linalg.norm((x-true_m)/true_m))\n", "\n", + "\n", "# FWI with L-BFGS\n", "ftol = 0.1\n", "maxiter = 5\n", "\n", + "\n", "def fwi(model, param, ftol=ftol, maxiter=maxiter):\n", " # Initial guess\n", " v0 = model.vp.data[param['nbl']:-param['nbl'], param['nbl']:-param['nbl']]\n", @@ -492,9 +515,9 @@ " result = optimize.minimize(fwi_gradient,\n", " m0, args=(param, ), method='L-BFGS-B', jac=True,\n", " bounds=bounds, callback=fwi_callbacks,\n", - " options={'ftol':ftol,\n", - " 'maxiter':maxiter,\n", - " 'disp':True})\n", + " options={'ftol': ftol,\n", + " 'maxiter': maxiter,\n", + " 'disp': True})\n", "\n", " return result" ] @@ -832,7 +855,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "model0 = get_initial_model()\n", "\n", @@ -870,11 +893,11 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Plot FWI result\n", "\n", - "slices = tuple(slice(param['nbl'],-param['nbl']) for _ in range(2))\n", + "slices = tuple(slice(param['nbl'], -param['nbl']) for _ in range(2))\n", "vp = 1.0/np.sqrt(result['x'].reshape(true_model.shape))\n", "plot_image(true_model.vp.data[slices], vmin=2.4, vmax=2.8, cmap=\"cividis\")\n", "plot_image(vp, vmin=2.4, vmax=2.8, cmap=\"cividis\")" @@ -897,11 +920,13 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "import matplotlib.pyplot as plt\n", "\n", "# Plot model error\n", - "plt.plot(range(1, maxiter+1), relative_error); plt.xlabel('Iteration number'); plt.ylabel('L2-model error')\n", + "plt.plot(range(1, maxiter+1), relative_error)\n", + "plt.xlabel('Iteration number')\n", + "plt.ylabel('L2-model error')\n", "plt.show()" ] }, diff --git a/examples/seismic/tutorials/05_staggered_acoustic.ipynb b/examples/seismic/tutorials/05_staggered_acoustic.ipynb index 03f6c62b45..e342d2aacc 100644 --- a/examples/seismic/tutorials/05_staggered_acoustic.ipynb +++ b/examples/seismic/tutorials/05_staggered_acoustic.ipynb @@ -68,7 +68,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "src.show()" ] @@ -191,7 +191,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Propagate the source\n", "op_2(time=src.time_range.num-1, dt=dt)" @@ -234,7 +234,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Let's see what we got....\n", "plot_image(v[0].data[0])\n", @@ -293,7 +293,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op_4 = Operator([u_v_4, u_p_4] + src_p)\n", "# Propagate the source\n", "op_4(time=src.time_range.num-1, dt=dt)" @@ -336,7 +336,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Let's see what we got....\n", "plot_image(v4[0].data[-1])\n", diff --git a/examples/seismic/tutorials/06_elastic.ipynb b/examples/seismic/tutorials/06_elastic.ipynb index 4ee235fc8f..8c6a80acdd 100644 --- a/examples/seismic/tutorials/06_elastic.ipynb +++ b/examples/seismic/tutorials/06_elastic.ipynb @@ -62,6 +62,7 @@ " a = 0.004\n", " return -2.*a*(t - 1/f0) * np.exp(-a * (t - 1/f0)**2)\n", "\n", + "\n", "# Timestep size from Eq. 7 with V_p=6000. and dx=100\n", "t0, tn = 0., 300.\n", "dt = (10. / np.sqrt(2.)) / 6.\n", @@ -88,7 +89,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "src.show()" ] @@ -122,8 +123,8 @@ "density = 1.8\n", "\n", "# The source injection term\n", - "src_xx = src.inject(field=tau.forward[0,0], expr=src)\n", - "src_zz = src.inject(field=tau.forward[1,1], expr=src)\n", + "src_xx = src.inject(field=tau.forward[0, 0], expr=src)\n", + "src_zz = src.inject(field=tau.forward[1, 1], expr=src)\n", "\n", "# Thorbecke's parameter notation\n", "cp2 = V_p*V_p\n", @@ -140,7 +141,7 @@ "u_v = Eq(v.forward, solve(pde_v, v.forward))\n", "u_t = Eq(tau.forward, solve(pde_tau, tau.forward))\n", "\n", - "op = Operator([u_v] + [u_t] + src_xx + src_zz)" + "op = Operator([u_v] + [u_t] + src_xx + src_zz)" ] }, { @@ -193,7 +194,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "op(dt=dt)" ] @@ -255,14 +256,14 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Let's see what we got....\n", "plot_image(v[0].data[0], vmin=-.5*1e-1, vmax=.5*1e-1, cmap=\"seismic\")\n", "plot_image(v[1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", "plot_image(tau[0, 0].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", - "plot_image(tau[1,1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", - "plot_image(tau[0,1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")" + "plot_image(tau[1, 1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", + "plot_image(tau[0, 1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")" ] }, { @@ -271,7 +272,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "assert np.isclose(norm(v[0]), 0.6285093, atol=1e-4, rtol=0)" ] @@ -287,8 +288,8 @@ "v = VectorTimeFunction(name='v', grid=grid, space_order=so, time_order=1)\n", "tau = TensorTimeFunction(name='t', grid=grid, space_order=so, time_order=1)\n", "# The source injection term\n", - "src_xx = src.inject(field=tau.forward[0,0], expr=src)\n", - "src_zz = src.inject(field=tau.forward[1,1], expr=src)\n", + "src_xx = src.inject(field=tau.forward[0, 0], expr=src)\n", + "src_zz = src.inject(field=tau.forward[1, 1], expr=src)\n", "\n", "# First order elastic wave equation\n", "pde_v = v.dt - ro * div(tau)\n", @@ -297,7 +298,7 @@ "u_v = Eq(v.forward, solve(pde_v, v.forward))\n", "u_t = Eq(tau.forward, solve(pde_tau, tau.forward))\n", "\n", - "op = Operator([u_v]+ [u_t] + src_xx + src_zz )" + "op = Operator([u_v]+ [u_t] + src_xx + src_zz)" ] }, { @@ -327,12 +328,12 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "v[0].data.fill(0.)\n", "v[1].data.fill(0.)\n", - "tau[0,0].data.fill(0.)\n", - "tau[0,1].data.fill(0.)\n", - "tau[1,1].data.fill(0.)\n", + "tau[0, 0].data.fill(0.)\n", + "tau[0, 1].data.fill(0.)\n", + "tau[1, 1].data.fill(0.)\n", "\n", "op(dt=dt)" ] @@ -394,14 +395,14 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Let's see what we got....\n", "plot_image(v[0].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", "plot_image(v[1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", "plot_image(tau[0, 0].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", - "plot_image(tau[1,1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", - "plot_image(tau[0,1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")" + "plot_image(tau[1, 1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")\n", + "plot_image(tau[0, 1].data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap=\"seismic\")" ] }, { @@ -410,7 +411,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "assert np.isclose(norm(v[0]), 0.62521476, atol=1e-4, rtol=0)" ] diff --git a/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb b/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb index 1663635e8a..72161dbd30 100644 --- a/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb +++ b/examples/seismic/tutorials/06_elastic_varying_parameters.ipynb @@ -53,7 +53,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Initial grid: 3km x 3km, with spacing 10m\n", "nlayers = 5\n", "so = 8\n", @@ -78,7 +78,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "aspect_ratio = model.shape[0]/model.shape[1]\n", "\n", "plt_options_model = {'cmap': 'jet', 'extent': [model.origin[0], model.origin[0] + model.domain_size[0],\n", @@ -146,7 +146,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "src.show()" ] @@ -257,7 +257,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "from examples.seismic import plot_velocity\n", "plot_velocity(model, source=src.coordinates.data,\n", " receiver=rec.coordinates.data[::10, :])\n", @@ -280,7 +280,7 @@ "pde_tau = tau.dt - l * diag(div(v.forward)) - mu * (grad(v.forward) + grad(v.forward).transpose(inner=False))\n", "# Time update\n", "u_v = Eq(v.forward, model.damp * solve(pde_v, v.forward))\n", - "u_t = Eq(tau.forward, model.damp * solve(pde_tau, tau.forward))\n", + "u_t = Eq(tau.forward, model.damp * solve(pde_tau, tau.forward))\n", "\n", "op = Operator([u_v] + [u_t] + src_xx + src_zz + rec_term)" ] @@ -371,7 +371,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Partial ru for 1.2sec to plot the wavefield\n", "op(dt=model.critical_dt, time_M=int(1000/model.critical_dt))" ] @@ -393,10 +393,10 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "scale = .5*1e-3\n", "\n", - "plt_options_model = {'extent': [model.origin[0] , model.origin[0] + model.domain_size[0],\n", + "plt_options_model = {'extent': [model.origin[0], model.origin[0] + model.domain_size[0],\n", " model.origin[1] + model.domain_size[1], model.origin[1]]}\n", "\n", "\n", @@ -416,7 +416,7 @@ "ax[0, 1].set_xlabel('X (m)', fontsize=20)\n", "ax[0, 1].set_ylabel('Depth (m)', fontsize=20)\n", "\n", - "ax[1, 0].imshow(np.transpose(tau[0,0].data[0][slices]+tau[1,1].data[0][slices]),\n", + "ax[1, 0].imshow(np.transpose(tau[0, 0].data[0][slices]+tau[1, 1].data[0][slices]),\n", " vmin=-10*scale, vmax=10*scale, cmap=\"RdGy\", **plt_options_model)\n", "ax[1, 0].imshow(np.transpose(model.lam.data[slices]), vmin=2.5, vmax=15.0, cmap=\"jet\",\n", " alpha=.5, **plt_options_model)\n", @@ -426,7 +426,7 @@ "ax[1, 0].set_ylabel('Depth (m)', fontsize=20)\n", "\n", "\n", - "ax[1, 1].imshow(np.transpose(tau[0,1].data[0][slices]), vmin=-scale, vmax=scale, cmap=\"RdGy\", **plt_options_model)\n", + "ax[1, 1].imshow(np.transpose(tau[0, 1].data[0][slices]), vmin=-scale, vmax=scale, cmap=\"RdGy\", **plt_options_model)\n", "ax[1, 1].imshow(np.transpose(model.lam.data[slices]), vmin=2.5, vmax=15.0, cmap=\"jet\", alpha=.5, **plt_options_model)\n", "ax[1, 1].set_aspect('auto')\n", "ax[1, 1].set_title('τ_xy', fontsize=20)\n", @@ -490,7 +490,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Full run for the data\n", "op(dt=model.critical_dt, time_m=int(1000/model.critical_dt))" ] @@ -543,7 +543,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "# Pressure (txx + tzz) data at sea surface\n", "extent = [rec_plot.coordinates.data[0, 0], rec_plot.coordinates.data[-1, 0], 1e-3*tn, t0]\n", "aspect = rec_plot.coordinates.data[-1, 0]/(1e-3*tn)/.5\n", @@ -582,7 +582,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "# OBC data of vx/vz\n", "plt.figure(figsize=(15, 15))\n", "plt.subplot(121)\n", @@ -638,7 +638,10 @@ "# Time update\n", "u_v = Eq(v2.forward, solve(pde_v2, v2.forward))\n", "# The stress equation isn't time dependent so we don't need solve.\n", - "u_t = Eq(tau0, model.damp * (l * diag(div(v2.forward)) + mu * (grad(v2.forward) + grad(v2.forward).transpose(inner=False))))\n", + "u_t = Eq(\n", + " tau0,\n", + " model.damp * (l * diag(div(v2.forward)) + mu * (grad(v2.forward) + grad(v2.forward).transpose(inner=False)))\n", + ")\n", "\n", "rec_term2 = rec2.interpolate(expr=v2[0])\n", "rec_term2 += rec3.interpolate(expr=v2[1])\n", @@ -676,7 +679,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Partial ru for 1.2sec to plot the wavefield\n", "op(dt=model.critical_dt, time_M=int(1000/model.critical_dt))" ] @@ -698,10 +701,10 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "scale = 1e-4\n", "\n", - "plt_options_model = {'extent': [model.origin[0] , model.origin[0] + model.domain_size[0],\n", + "plt_options_model = {'extent': [model.origin[0], model.origin[0] + model.domain_size[0],\n", " model.origin[1] + model.domain_size[1], model.origin[1]]}\n", "\n", "\n", @@ -721,7 +724,7 @@ "ax[0, 1].set_xlabel('X (m)', fontsize=20)\n", "ax[0, 1].set_ylabel('Depth (m)', fontsize=20)\n", "\n", - "ax[1, 0].imshow(np.transpose(tau0[0,0].data[slices]+tau0[1,1].data[slices]),\n", + "ax[1, 0].imshow(np.transpose(tau0[0, 0].data[slices]+tau0[1, 1].data[slices]),\n", " vmin=-10*scale, vmax=10*scale, cmap=\"RdGy\", **plt_options_model)\n", "ax[1, 0].imshow(np.transpose(model.lam.data[slices]), vmin=2.5, vmax=15.0, cmap=\"jet\",\n", " alpha=.5, **plt_options_model)\n", @@ -731,7 +734,7 @@ "ax[1, 0].set_ylabel('Depth (m)', fontsize=20)\n", "\n", "\n", - "ax[1, 1].imshow(np.transpose(tau0[0,1].data[slices]), vmin=-scale, vmax=scale, cmap=\"RdGy\", **plt_options_model)\n", + "ax[1, 1].imshow(np.transpose(tau0[0, 1].data[slices]), vmin=-scale, vmax=scale, cmap=\"RdGy\", **plt_options_model)\n", "ax[1, 1].imshow(np.transpose(model.lam.data[slices]), vmin=2.5, vmax=15.0, cmap=\"jet\", alpha=.5, **plt_options_model)\n", "ax[1, 1].set_aspect('auto')\n", "ax[1, 1].set_title('τ_xy', fontsize=20)\n", @@ -773,7 +776,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(dt=model.critical_dt, time_m=int(1000/model.critical_dt))" ] }, @@ -814,7 +817,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "# OBC data of vx/vz\n", "plt.figure(figsize=(15, 15))\n", "plt.subplot(121)\n", @@ -949,10 +952,12 @@ "\n", "# First order elastic wave equation\n", "pde_v = v_rsfd.dt - ro * div45(tau_rsfd)\n", - "pde_tau = tau_rsfd.dt - l * diag(div45(v_rsfd.forward)) - mu * (grad45(v_rsfd.forward) + grad45(v_rsfd.forward).transpose(inner=False))\n", + "pde_tau = tau_rsfd.dt \\\n", + " - l * diag(div45(v_rsfd.forward)) \\\n", + " - mu * (grad45(v_rsfd.forward) + grad45(v_rsfd.forward).transpose(inner=False))\n", "# Time update\n", "u_v = Eq(v_rsfd.forward, model.damp * solve(pde_v, v_rsfd.forward))\n", - "u_t = Eq(tau_rsfd.forward, model.damp * solve(pde_tau, tau_rsfd.forward))\n", + "u_t = Eq(tau_rsfd.forward, model.damp * solve(pde_tau, tau_rsfd.forward))\n", "\n", "# Receiver\n", "rec_term = rec.interpolate(expr=tau_rsfd[0, 0] + tau_rsfd[1, 1])\n", @@ -995,7 +1000,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(dt=model.critical_dt)" ] }, @@ -1037,7 +1042,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "# Pressure (txx + tzz) data at sea surface\n", "extent = [rec_plot.coordinates.data[0, 0], rec_plot.coordinates.data[-1, 0], 1e-3*tn, t0]\n", "aspect = rec_plot.coordinates.data[-1, 0]/(1e-3*tn)/.5\n", @@ -1076,7 +1081,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "# OBC data of vx/vz\n", "plt.figure(figsize=(15, 15))\n", "plt.subplot(121)\n", diff --git a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb index 70b5e5c51b..2a766121fc 100644 --- a/examples/seismic/tutorials/07.1_dispersion_relation.ipynb +++ b/examples/seismic/tutorials/07.1_dispersion_relation.ipynb @@ -196,13 +196,14 @@ "fmax = 100\n", "\n", "# Spatial parameters\n", - "extent = 1000 # 1km\n", + "extent = 1000 # 1km\n", "npoints = 140\n", "h = extent/npoints\n", "\n", "# Time parameters\n", "dt = 0.0008\n", "\n", + "\n", "def critical_dt(weights, h=1000/140, vmax=5500):\n", " return float(h*np.sqrt(2/np.sum([np.abs(a) for a in weights]))/vmax)" ] @@ -316,7 +317,7 @@ "N = 2\n", "weights = sym.finite_diff_weights(N, x, 0)\n", "\n", - "for ii, (derivative, th) in enumerate(zip(weights, ['ᵗʰ', 'ˢᵗ', 'ⁿᵈ'])):\n", + "for ii, (derivative, th) in enumerate(zip(weights, ['ᵗʰ', 'ˢᵗ', 'ⁿᵈ'], strict=True)):\n", " for jj, w in enumerate(derivative[ii:]):\n", " print(\n", " f'Weights for {ii}{th} derivative on the {jj + ii + 1} point(s)'\n", @@ -387,7 +388,7 @@ " else:\n", " m = len(weights)\n", " cosines = np.array(\n", - " np.cos(np.arange(1, m)*k*h*np.cos(alpha)) + \\\n", + " np.cos(np.arange(1, m)*k*h*np.cos(alpha)) +\n", " np.cos(np.arange(1, m)*k*h*np.sin(alpha)) - 2\n", " )\n", " total = np.sum(np.array(weights)[1:]*cosines)\n", @@ -474,7 +475,7 @@ "\n", " # Fix beta, vary alpha\n", " alines = []\n", - " for r, v in zip(courant, velocity):\n", + " for r, v in zip(courant, velocity, strict=True):\n", " data = np.array([dispersion_ratio(weights, h, dt, v, k, a) for a in linspace])\n", " line, = ax[0].plot(linspace, data, label=f'{r=:.3g}')\n", " alines.append(line)\n", @@ -498,7 +499,7 @@ "\n", " # Fix alpha, vary beta\n", " blines = []\n", - " for r, v in zip(courant, velocity):\n", + " for r, v in zip(courant, velocity, strict=True):\n", " data = np.array([dispersion_ratio(weights, h, dt, v, b/h, alpha) for b in linspace])\n", " line, = ax[1].plot(linspace, data, label=f'{r=:.3g}')\n", " blines.append(line)\n", @@ -534,14 +535,14 @@ " ax[1].set_title(f'α={a:.3g}')\n", " ax[0].set_title(f'β={b:.3g}')\n", " k = b/h\n", - " for line, r, v in zip(alines, courant, velocity):\n", + " for line, v in zip(alines, velocity, strict=True):\n", " new_data = np.array([dispersion_ratio(weights, h, dt, v, k, a_) for a_ in linspace])\n", " line.set_ydata(new_data)\n", " bvline.set_xdata((b, b))\n", " aann.set_text(f'α={a:.3g}')\n", " aann.xy = (a, ylim[0] + (ylim[1] - ylim[0])*2/3)\n", "\n", - " for line, r, v in zip(blines, courant, velocity):\n", + " for line, v in zip(blines, velocity, strict=True):\n", " new_data = np.array([dispersion_ratio(weights, h, dt, v, b_/h, a) for b_ in linspace])\n", " line.set_ydata(new_data)\n", " avline.set_xdata((a, a))\n", @@ -557,9 +558,10 @@ "\n", " return ax, (alpha_slider, beta_slider)\n", "\n", + "\n", "fig, ax = plt.subplots(1, 2)\n", "widget_handle1 = plot_dispersion(fornberg, h, dt, velocity=vrange, ax=ax)\n", - "fig.set_size_inches(12,6)\n", + "fig.set_size_inches(12, 6)\n", "plt.show()" ] }, @@ -731,7 +733,7 @@ } ], "source": [ - "u , data , r = acoustic(weights=fornberg, h=h, dt=dt, v=1500)\n", + "u, data, r = acoustic(weights=fornberg, h=h, dt=dt, v=1500)\n", "um, datam, rm = acoustic(weights=fornberg, h=h, dt=dt/2, v=1500)\n", "up, datap, rp = acoustic(weights=fornberg, h=h, dt=3*dt, v=1500)" ] @@ -770,9 +772,10 @@ " 'g--', lw=2\n", " )\n", "\n", + "\n", "def plot_shot(data, ax, clip=0.1, extents=(0, 1000, 0, 0.6), vline=None, r=None, first_arrival=True):\n", " ax.imshow(\n", - " data[::-1,:],\n", + " data[::-1, :],\n", " extent=extents,\n", " vmin=-clip,\n", " vmax=clip,\n", @@ -788,7 +791,7 @@ " time = np.linspace(extents[2], extents[3], data.shape[0])\n", " space = np.linspace(extents[0], extents[1], data.shape[1])\n", " if not isinstance(first_arrival, np.ndarray):\n", - " arrival = time[np.argmax(np.abs(data)>0.01, axis=0)]\n", + " arrival = time[np.argmax(np.abs(data) > 0.01, axis=0)]\n", " ax.plot(space, arrival, c='red', lw=1)\n", " ax.annotate('first arrival',\n", " xy=((extents[1] - extents[0])/2, arrival[arrival.size//2]), xycoords='data',\n", @@ -808,6 +811,7 @@ " )\n", " return arrival\n", "\n", + "\n", "def plot_profile(array, ax, clip=1, extent=(0, 1), axis_labels=('x', 'A'), first_arrival=None):\n", " ax.plot(np.linspace(extent[0], extent[1], array.size), array)\n", " ax.set_xlim(extent)\n", @@ -870,13 +874,13 @@ "source": [ "fig, ax = plt.subplots(2, 3)\n", "plot_wave(um, ax[0, 0], hline=(0, 500, 500, 500), r=rm)\n", - "plot_wave( u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", + "plot_wave(u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", "plot_wave(up, ax[0, 2], hline=(0, 500, 500, 500), r=rp)\n", "\n", "shape = u.shape\n", - "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1,0], extent=(0, 500))\n", - "plot_profile( u[shape[0]//2, :shape[1]//2], ax[1,1], extent=(0, 500))\n", - "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1,2], extent=(0, 500))\n", + "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1, 0], extent=(0, 500))\n", + "plot_profile(u[shape[0]//2, :shape[1]//2], ax[1, 1], extent=(0, 500))\n", + "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1, 2], extent=(0, 500))\n", "\n", "fig.set_size_inches(12, 6)\n", "plt.show()" @@ -932,12 +936,12 @@ "fig, ax = plt.subplots(2, 3)\n", "\n", "arrival = plot_shot(datam, ax[0, 0], vline=(500, 0, 500, 0.6), r=rm)\n", - "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", + "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", "plot_shot(datap, ax[0, 2], vline=(500, 0, 500, 0.6), r=rp, first_arrival=arrival)\n", "\n", "width = data.shape[1]\n", "plot_profile(datam[:, width//2], ax[1, 0], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", - "plot_profile( data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", + "plot_profile(data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "plot_profile(datap[:, width//2], ax[1, 2], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "\n", "fig.set_size_inches(12, 6)\n", @@ -998,7 +1002,7 @@ "}]\n", "constraints += [{\n", " 'type': 'eq',\n", - " 'fun': lambda x: np.sum([xi*m**(2*jj) for m, xi in enumerate(x)])\n", + " 'fun': lambda x: np.sum([xi*m**(2*jj) for m, xi in enumerate(x)]) # noqa: B023\n", "} for jj in range(2, (len(initial_guess) + 1)//2)]" ] }, @@ -1047,11 +1051,18 @@ "def objective(a):\n", " x = np.linspace(0, np.pi/2, 201)\n", " m = np.arange(1, len(a) + 1)\n", - " y = x**2 + a[0] + 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(a[1:], m)], axis=0)\n", + " y = x**2 + a[0] + 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(a[1:], m, strict=False)], axis=0)\n", " return sp.integrate.trapezoid(y**2, x=x)\n", "\n", + "\n", "print(f'Value of objective function at initial guess: {objective(initial_guess)}')\n", - "opt1 = sp.optimize.minimize(objective, initial_guess, method='SLSQP', constraints=constraints, options=dict(ftol=1e-15, maxiter=500))\n", + "opt1 = sp.optimize.minimize(\n", + " objective,\n", + " initial_guess,\n", + " method='SLSQP',\n", + " constraints=constraints,\n", + " options=dict(ftol=1e-15, maxiter=500)\n", + ")\n", "print(opt1)" ] }, @@ -1106,36 +1117,26 @@ }, "outputs": [ { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "cb77139907c04e22b10e06210f00cfc6", - "version_major": 2, - "version_minor": 0 - }, - "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAGQCAYAAAC+tZleAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAqFFJREFUeJzs3XdY1fX/xvHnYQ8ZAoKAoLi3OHKXO1PLHKWVmtvKhmWammlmw9Ww9c20XC21oVZuTTNH7r0HLmSKgGw45/z+OIrxU8sBHMb9uK5znfMZ5/N+HTIO5z7vYTCbzWZEREREREREREQKKBtrFyAiIiIiIiIiIvJvFGCJiIiIiIiIiEiBpgBLREREREREREQKNAVYIiIiIiIiIiJSoCnAEhERERERERGRAk0BloiIiIiIiIiIFGgKsEREREREREREpEBTgCUiIiIiIiIiIgWaAiwRERERERERESnQFGCJiIiIiIiIiEiBpgBLREREREREREQKNAVYIiIiIiIiIiJSoCnAEhERERERERGRAk0BloiIiIiIiIiIFGgKsEREREREREREpEBTgCUiIiIiIiIiIgWaAiwRERERERERESnQFGCJiIiIiIiIiEiBpgBLREREREREREQKNAVYIiIiIiIiIiJSoCnAEhERERERERGRAk0BloiIiIiIiIiIFGgKsEREREREREREpEBTgCUiIiIiIiIiIgWaAiwRERERERERESnQFGCJiBRCZ86cwWAwMHfuXGuXIiIiUmRMmDABg8GQY19WVhavvfYaQUFB2NjY0KVLFwCSkpIYNGgQpUuXxmAw8PLLL+d/wSIixYgCLBERYO7cuRgMhpveRo8ebbW6vv/+e6ZPn2619kVERAqz///+7uTkREBAAO3bt+eTTz7hypUr/3mN2bNnM23aNB577DHmzZvHK6+8AsB7773H3Llzee655/jmm2/o06dPXr8cEZFizWA2m83WLkJExNrmzp1L//79mThxIiEhITmO1axZk9DQUKvU9fDDD3Pw4EHOnDmTY7/ZbCY9PR17e3tsbW2tUpuIiEhB9//f3zMzM4mMjGTDhg2sWbOG4OBgfv31V2rXrg1YeltlZWXh5OSUfY0nnniCTZs2ceHChRzXbty4MXZ2dmzatClfX5OISHFlZ+0CREQKkg4dOtCgQQNrl/Gfrn2LLCIiIv/t/7+/jxkzhj/++IOHH36Yzp07c+TIEZydnbGzs8POLudHpOjoaDw9PW+4ZnR0NNWrV8+1Gk0mExkZGXp/FxG5BQ0hFBG5DQaDgQkTJtywv1y5cvTr1y97+9pQhc2bNzN8+HBKlSqFq6srXbt2JSYm5obnr1ixghYtWuDm5oa7uzv33Xcf33//PQAtW7Zk2bJlnD17NnvoQ7ly5YBbz4H1xx9/cP/99+Pq6oqnpyePPvooR44cyXHOtfk9Tp48Sb9+/fD09MTDw4P+/fuTkpKS49w1a9bQvHlzPD09KVGiBFWqVOH111+/8x+giIhIAdO6dWvGjRvH2bNn+fbbb4Gcc2Bde69dv349hw4dyn4v3rBhAwaDgbCwMJYtW5a9/1pv6fT0dN58800qVqyIo6MjQUFBvPbaa6Snp+do32Aw8MILL/Ddd99Ro0YNHB0dWblyJQDh4eEMGDAAPz8/HB0dqVGjBrNnz87x/Gt1LFq0iHfffZcyZcrg5OREmzZtOHny5A2vd9u2bXTs2JGSJUvi6upK7dq1+fjjj3Occ/ToUR577DG8vLxwcnKiQYMG/Prrr7ny8xYRuVfqgSUi8g8JCQnExsbm2Ofj43PH13nxxRcpWbIkb775JmfOnGH69Om88MILLFy4MPucuXPnMmDAAGrUqMGYMWPw9PRkz549rFy5kqeeeoqxY8eSkJDAhQsX+OijjwAoUaLELdtcu3YtHTp0oHz58kyYMIHU1FQ+/fRTmjVrxu7du7PDr2t69OhBSEgIkyZNYvfu3Xz11Vf4+voyZcoUAA4dOsTDDz9M7dq1mThxIo6Ojpw8eZLNmzff8c9DRESkIOrTpw+vv/46q1evZvDgwTmOlSpVim+++YZ3332XpKQkJk2aBEC1atX45ptveOWVVyhTpgyvvvpq9vkmk4nOnTuzadMmhgwZQrVq1Thw4AAfffQRx48fZ8mSJTna+OOPP1i0aBEvvPACPj4+lCtXjqioKBo3bpwdcJUqVYoVK1YwcOBAEhMTb5gsfvLkydjY2DBixAgSEhKYOnUqvXr1Ytu2bdnnrFmzhocffhh/f3+GDRtG6dKlOXLkCL///jvDhg0DLO/7zZo1IzAwkNGjR+Pq6sqiRYvo0qULP//8M127ds3ln76IyJ1RgCUi8g9t27a9Yd/dTBXo7e3N6tWrs7/FNZlMfPLJJyQkJODh4UFCQgIvvfQSDRs2ZMOGDTmGC1xrr127dgQGBnL58mV69+79n22OHDkSLy8vtm7dipeXFwBdunShbt26vPnmm8ybNy/H+XXr1uXrr7/O3r506RJff/11doC1Zs0aMjIyWLFixV2FeCIiIgVdmTJl8PDw4NSpUzccc3V1pXfv3nz11VfY2trmeC/u3bs3b7zxBoGBgTn2f/vtt6xdu5Y///yT5s2bZ++vWbMmzz77LFu2bKFp06bZ+48dO8aBAwdyDEUcNGgQRqORAwcO4O3tDcCzzz7Lk08+yYQJE3jmmWdwdnbOPj8tLY29e/fi4OAAQMmSJRk2bBgHDx6kZs2aGI1GnnnmGfz9/dm7d2+O4ZD//Btn2LBhBAcHs2PHDhwdHQEYOnQozZs3Z9SoUQqwRMTqNIRQROQfPv/8c9asWZPjdjeGDBmSYxnu+++/H6PRyNmzZwFLOHTlyhVGjx59w1wX/3/57tsRERHB3r176devX3Z4BVC7dm3atWvH8uXLb3jOs88+m2P7/vvv59KlSyQmJgJk/4G7dOlSTCbTHdckIiJSGJQoUeK2ViO8HT/++CPVqlWjatWqxMbGZt9at24NwPr163Oc36JFixzhldls5ueff+aRRx7BbDbnuEb79u1JSEhg9+7dOa7Rv3//7PAKLO/nAKdPnwZgz549hIWF8fLLL98wl9e1vzni4uL4448/6NGjB1euXMlu89KlS7Rv354TJ04QHh6eKz8jEZG7pR5YIiL/0LBhw1yZxD04ODjHdsmSJQG4fPkyQPY3vTVr1rzntoDsYKxKlSo3HKtWrRqrVq0iOTkZV1fX26rR3d2dnj178tVXXzFo0CBGjx5NmzZt6NatG4899hg2Nvr+Q0REioakpCR8fX1z5VonTpzgyJEjlCpV6qbHo6Ojc2z//5WPY2JiiI+PZ+bMmcycOfO2rpEbf3OcPHkSs9nMuHHjGDdu3C3bDQwMvOU1RETymgIsEZF7YDQab7rf1tb2pvvvZjhiXvmvGp2dndm4cSPr169n2bJlrFy5koULF9K6dWtWr159y+eLiIgUFhcuXCAhIYGKFSvmyvVMJhO1atXiww8/vOnxoKCgHNv/HAp47flgGaLYt2/fm16jdu3aObZz42+Oa+2OGDGC9u3b3/Sc3PoZiYjcLQVYIiK3oWTJksTHx+fYl5GRQURExF1dr0KFCgAcPHjwX/8gvN3hhGXLlgUsc2n8f0ePHsXHxydH76vbZWNjQ5s2bWjTpg0ffvgh7733HmPHjmX9+vU3nS9MRESkMPnmm28Abhna3KkKFSqwb98+2rRpc1dTApQqVQo3NzeMRmOuvc/+82+OW12zfPnyANjb2+v9XUQKLI0BERG5DRUqVGDjxo059s2cOfOWPbD+y4MPPoibmxuTJk0iLS0tx7F/fmPq6upKQkLCf17P39+f0NBQ5s2blyNoO3jwIKtXr6Zjx453XGNcXNwN+0JDQwFuWApcRESksPnjjz94++23CQkJoVevXrlyzR49ehAeHs6sWbNuOJaamkpycvK/Pt/W1pbu3bvz888/c/DgwRuOx8TE3HFN9erVIyQkhOnTp9/wZdy1vzl8fX1p2bIlX3755U2/nLubdkVEcpt6YImI3IZBgwbx7LPP0r17d9q1a8e+fftYtWrVXa/O5+7uzkcffcSgQYO47777eOqppyhZsiT79u0jJSUle8XA+vXrs3DhQoYPH859991HiRIleOSRR256zWnTptGhQweaNGnCwIEDSU1N5dNPP8XDw4MJEybccY0TJ05k48aNdOrUibJlyxIdHc3//vc/ypQpk2NlJRERkYJuxYoVHD16lKysLKKiovjjjz9Ys2YNZcuW5ddff71hQZW71adPHxYtWsSzzz7L+vXradasGUajkaNHj7Jo0SJWrVr1n3NtTp48mfXr19OoUSMGDx5M9erViYuLY/fu3axdu/amXzD9GxsbG7744gseeeQRQkND6d+/P/7+/hw9epRDhw6xatUqwLKQTfPmzalVqxaDBw+mfPnyREVFsXXrVi5cuMC+ffvu+uciIpIbFGCJiNyGwYMHExYWxtdff83KlSu5//77WbNmDW3atLnraw4cOBBfX18mT57M22+/jb29PVWrVuWVV17JPmfo0KHs3buXOXPm8NFHH1G2bNlbBlht27Zl5cqVvPnmm4wfPx57e3tatGjBlClTbpgk9nZ07tyZM2fOMHv2bGJjY/Hx8aFFixa89dZbeHh43PXrFhERyW/jx48HwMHBAS8vL2rVqsX06dPp378/bm5uudaOjY0NS5Ys4aOPPmL+/PksXrwYFxcXypcvz7Bhw6hcufJ/XsPPz4/t27czceJEfvnlF/73v//h7e1NjRo1mDJlyl3V1b59e9avX89bb73FBx98gMlkokKFCgwePDj7nOrVq7Nz507eeust5s6dy6VLl/D19aVu3brZPz8REWsymAvSjMIiIiIiIiIiIiL/j+bAEhERERERERGRAk0BloiIiIiIiIiIFGgKsEREREREREREpEBTgCUiIiIiIiIiIgWaAiwRERERERERESnQFGCJiIiIiIiIiEiBZmftAqToMZlMXLx4ETc3NwwGg7XLERERwWw2c+XKFQICArCx0fd390rv9SIiUtDovb7oU4Alue7ixYsEBQVZuwwREZEbnD9/njJlyli7jEJP7/UiIlJQ6b2+6FKAJbnOzc0NsPzicHd3t3I1IiIikJiYSFBQUPZ7lNwbvdeLiEhBo/f6ok8BluS6a0MJ3N3d9UetiIgUKBruljv0Xi8iIgWV3uuLLg0MFRERERERERGRAk0BloiIiIiIiIiIFGgKsEREREREREREpEDTHFjFzMaNG5k2bRq7du0iIiKCxYsX06VLl+zjZrOZN998k1mzZhEfH0+zZs344osvqFSpUq7XYjQayczMzPXrStFib2+Pra2ttcsQERERERERK1KAVcwkJydTp04dBgwYQLdu3W44PnXqVD755BPmzZtHSEgI48aNo3379hw+fBgnJ6dcqcFsNhMZGUl8fHyuXE+KPk9PT0qXLq0JGUVERERERIopBVjFTIcOHejQocNNj5nNZqZPn84bb7zBo48+CsD8+fPx8/NjyZIlPPHEE7lSw7XwytfXFxcXF4UScktms5mUlBSio6MB8Pf3t3JFIiIiIiIiYg0KsCRbWFgYkZGRtG3bNnufh4cHjRo1YuvWrbcMsNLT00lPT8/eTkxMvGUbRqMxO7zy9vbOveKlyHJ2dgYgOjoaX19fDScUEREREREphjSJu2SLjIwEwM/PL8d+Pz+/7GM3M2nSJDw8PLJvQUFBtzz32pxXLi4uuVCxFBfX/r1ozjQREREREZHiSQGW3LMxY8aQkJCQfTt//vx/PkfDBuVO6N+LiIiIiIhI8aYAS7KVLl0agKioqBz7o6Kiso/djKOjI+7u7jlukjsmTJhAaGiotcsQEcl3ZrOZbdu2WbsMERERyUPpWUbikjOsXYYUEpoDS7KFhIRQunRp1q1blx2aJCYmsm3bNp577jnrFlcA9OvXj3nz5t2w/8SJE1SsWNEKFYmIFF0zZsxg6NChDB8+nA8++MDa5YiIiMgdMJvNXE7JJDIhjagraUQlpBGZmEZUYhqRCWnEJSRiSAzHNS2Cir4leOvlF6xdshQCCrCKmaSkJE6ePJm9HRYWxt69e/Hy8iI4OJiXX36Zd955h0qVKhESEsK4ceMICAigS5cu1iu6AHnooYeYM2dOjn2lSpW64+tkZGTg4OCQW2XdsczMTOzt7a3WvojIvzlx4gQjRowAIDg42MrViIiIyD+ZzWYuJWcQEZ9GeHwqEQmpXIxPJSLhWkCVSsaVWEoZowk0XCLQEEuAIZbKhku0uvq4lOHqwl8OcCyxPKAAS/6bAqxiZufOnbRq1Sp7e/jw4QD07duXuXPn8tprr5GcnMyQIUOIj4+nefPmrFy5EicnJ2uVXKA4OjredDjln3/+yciRI9m3bx9eXl707duXd955Bzs7y/9iLVu2pGbNmtjZ2fHtt99Sq1Yt3nzzTVq1asXatWsZNWoUhw8fJjQ0lDlz5lClSpUc1//yyy955513uHTpEg8//DCzZs3Cw8Mj+/hXX33FBx98QFhYGOXKleOll15i6NChAJw5c4aQkBAWLFjA//73P7Zt28aMGTPo3bs3w4cPZ/78+dja2jJo0CAiIyNJSEhgyZIlefdDFBH5F1lZWfTp04eUlBTatGnDiy++aO2SREREipWk9Cwi4lOvhlNpXIxP5WJ82tWQynJzz7pMkCH66i2G8oZY7jfEEmC4RIDhEi526f+ZNpjsXDB7BFHZv2b+vDAp9BRgFTMtW7bEbDbf8rjBYGDixIlMnDgxH6sq3MLDw+nYsSP9+vVj/vz5HD16lMGDB+Pk5MSECROyz5s3bx7PPfccmzdvBiAiIgKAsWPH8sEHH1CqVCmeffZZBgwYkH0OwMmTJ1m0aBG//fYbiYmJDBw4kKFDh/Ldd98B8N133zF+/Hg+++wz6taty549exg8eDCurq707ds3+zqjR4/mgw8+oG7dujg5OTFlyhS+++475syZQ7Vq1fj4449ZsmRJjoBTRCS/TZkyhW3btuHh4cGcOXOwsdF0nSIiIrnFZDITm5TO+cspnI+zhFQX46/3oLoYn0piWhbOpBFkiCHYEE2wIZqahmg6XH0cZBuDs91/z1tlLuGHwSMIPMpYbp7B1x97BGHjXBK0WJPcAQVYYnVms5nUTGO+t+tsb3vHq9v9/vvvlChRInu7Q4cOVK5cmaCgID777DMMBgNVq1bl4sWLjBo1ivHjx2d/+KpUqRJTp07Nfu61AOvdd9+lRYsWgCVk6tSpE2lpadm93tLS0pg/fz6BgYEAfPrpp3Tq1IkPPviA0qVL8+abb/LBBx/QrVs3wDKX2eHDh/nyyy9zBFgvv/xy9jnXrjNmzBi6du0KwGeffcby5cvv6OchIpKbdu/enR38f/rppwQFBVm3IBERkULm2txT5+NSOH85hQuXUzkfd/X+6nZGlgkw400iIYYIQmwiqXctnDJEE+QYfX2I363aMdhgcC8DJcuCZ1nwDIJ/hlUeZTDYOebPi5ZiQwGWWF1qppHq41fle7uHJ7bHxeHO/hdo1aoVX3zxRfa2q6srzz//PE2aNMkRhjVr1oykpCQuXLiQPX9L/fr1b3rN2rVrZz/29/cHIDo6Ovt5wcHB2eEVQJMmTTCZTBw7dgw3NzdOnTrFwIEDGTx4cPY5WVlZOYYYAjRo0CD7cUJCAlFRUTRs2DB7n62tLfXr18dkMt3+D0REJJekpaXRp08fsrKy6N69O71797Z2SSIiIgVScnoWZy+lXO1FZQmlLlztUXXhcgrJGdc7B7iTRHlDJOUMkdSzibQEVg6RhNhEUYKUf2/IydMSUJUsd/3madk2eASBnfXm9JXiSQGWyB1wdXW96xUHXV1db7r/n5OpXwvBbjdESkpKAmDWrFk0atQoxzFbW9vbal9EpCAYM2YMhw8fxs/PjxkzZtxxD1kREZGiJCE1k3OXUjhzKZmzl5I5cykl+z7mSnqOc51Ip4IhgkqGCNoZIgmxj6SyXRRlicTd/G89qQyW3lLeFaBkCHiF/COkKgvOJfP0NYrcKQVYYnXO9rYcntjeKu3mhmrVqvHzzz9jNpuzP3Bt3rwZNzc3ypQpc8/XP3fuHBcvXiQgIACAv//+GxsbG6pUqYKfnx8BAQGcPn2aXr163fY1PTw88PPzY8eOHTzwwAMAGI1Gdu/eTWho6D3XLCJyJ9auXcv06dMB+Prrr/Hx8bFuQSIiInns2lC/7IAqNoVzcdcCqxTikm+cY8qDJCoawmlte5Ga9hFUc4iknOkC3llRGPh/8xz/c9PNH7wqgHd58K549fHV0Mpei3VJ4aEAS6zOYDDc8VC+gmTo0KFMnz6dF198kRdeeIFjx47x5ptvMnz48FyZfNjJyYm+ffvy/vvvk5iYyEsvvUSPHj2yV0N86623eOmll/Dw8OChhx4iPT2dnTt3cvny5exVJm/mxRdfZNKkSVSsWJGqVavy6aefcvnyZfV6EJF8FRcXlz1f33PPPUenTp2sXJGIiEjuSc0wEhabzOnYJE5FW+5PxyRz5lIyV9KybvIMM35cpplNOKFO0dR2jKSC4SL+medwzbyU89R/ZlzOXuBT6Xo45V3B8tirPDiWQKQoKLypgUgBERgYyPLlyxk5ciR16tTBy8uLgQMH8sYbb+TK9StWrEi3bt3o2LEjcXFxPPzww/zvf//LPj5o0CBcXFyYNm0aI0eOxNXVlVq1avHyyy//63VHjRpFZGQkTz/9NLa2tgwZMoT27dvfMPRQRCSvmM1mnn32WS5evEiVKlV4//33rV2SiIjIHTObzUQlpnMqJonTMUmcikm++jiZ8PjUWz7PjRSaukVxn0skNWwvUNZ4hlIpp7DPvDrszwT8/6e7l4FSlcGnyj/uq4Crei9L0Wcwm83m/z5N5PYlJibi4eFBQkIC7u7uOY6lpaURFhZGSEhI9ip7UjCYTCaqVatGjx49ePvtt61dTg76dyNSNH3zzTc8/fTT2NnZsXXr1hyLTeS2f3tvkjunn6eIFEeZRhNnYpM5HpXEyeik7N5Up2OSckyc/v95O8H9Xpe5zzmSarYXKJMZRskrJ7BPCr/5Ewy2lp5TpaqAT+Xr9z6V1ZvqX+i9qehTDyyRYurs2bOsXr2aFi1akJ6ezmeffUZYWBhPPfWUtUsTkWLgzJkzPP/88wBMmDAhT8MrERGRO5FlNHHmUgonoq5wPCqJ49FXOBF1hbDYZDKNN+//YWtjINjLhZolTTR2vUh1wgjOOIFH4nFs405iiMu8eWPugeBXA3yrX7/3qQR2jnn4CkUKJwVYIsWUjY0Nc+fOZcSIEZjNZmrWrMnatWupVq2atUsTkSLOaDTSp08frly5QrNmzRg9erS1SxIRkWLIaDJz9pKlR9WJqCscj7bcn45JJsN481XBXR1sqejnRsVSJajpkUpt27OUyzhJycQj2ETuh/Nnb96Yo/vVkKr6P8KqalrpT+QOKMASKaaCgoLYvHmztcsQkWJo2rRpbNq0CTc3N7755hvNvSciInnuUlI6RyOvcCQikcMRiRyNuMLJmCQysm4eVDnb21LJrwSVfN2o7OtKLbcrVDadwjvxKIbI/XBuHyRF3rwxz7LgXwf8a0Pp2pbAyqMMaLEkkXuiAEtERETyze7duxk3bhwAn3zyCSEhIVauSEREipIso4nTsckciUjkSMSVq/eJRF9Jv+n5TvY2VPJ1o5JfCSr7uVHJtwRVPbLwTzqCzcXNEL4Ltu+ClNibPNtgmZfqWljlXwdK11KvKpE8ogBLRERE8kVKSgq9evUiKyuL7t2707dvX2uXJCIihVh8SgaHrwZVRyMSORKZyPGoW/eqKuftQtXS7lTzd6eavxtVS7tTxs0Gm+iDEL4NLuyEfbsg7tSNT7axtwz5868N/qGWsMqvBji45u2LFJFsCrBEREQkX4waNYqjR4/i7+/Pl19+iUFDKURE5DZdSkrnQHgCB8MTrt4nEh6fetNzXR1sqervTtXSblfDKstjV0c7SAiH83/D2W2weQdEHgDTTSZY96oAgfWhTAPLfelamlhdxMoUYImIiEieW7FiBZ999hkAc+fOxdvb28oViYhIQRVzJT07qDoQnsCh8AQuJqTd9NwgL2eqlXanqr871f0tgVVQSRdsbAxgMkL0ETi/Bnb+Dee2QcK5Gy/i4g2BDa6GVfUgoB64eOXxqxSRO6UAS0RERPJUbGwsAwYMAOCll17iwQcftHJFAtC1a1c2bNhAmzZt+Omnn6xdjogUU3HJGew9f5n9Fyy9qg6GJxCZePOwqryPKzUDPagV6EHNQA+qB7jj4Wx//YSMFLi4Gw5ttYRV57dDekLOixhsLBOrBzexBFZlGlgmXVevYJECTwGWiIiI5Bmz2cyQIUOIjIykevXqTJ482dolyVXDhg1jwIABzJs3z9qliEgxkZZp5NDFRPadj2fv1du5uJQbzjMYoEKpEtQMcM8OrKoHuOPmZJ/zxMxUOL0Zwv6CM39B+O4bhwPau0LQfZbAKqiRJbBydMvDVykieUUBlojctTNnzhASEsKePXsIDQ1lw4YNtGrVisuXL+Pp6Wnt8kSkAJg1axaLFy/G3t6e7777DmdnZ2uXJFe1bNmSDRs2WLsMESmizGYzYbHJ2UHV3vPxHIlIJNNovuHcCqVcqVPG0xJWlfGgur+7Zb6q/y8rHS7suB5YXdgBxoyc55QoDcGNLYFVcGPwqwm2+tgrUhTo/2SR29CvX7/sb6jt7Ozw8vKidu3aPPnkk/Tr1w8bG5vsc8uVK8fZs2cBcHZ2pkKFCgwbNoxBgwZln3Mt6LnG19eX5s2bM23aNMqXL3/H9U2YMIElS5awd+/eu3yFdycoKIiIiAh8fHxu+zkzZ87k+++/Z/fu3Vy5ckVhl0gRdvjwYV5++WUAJk2aRGhoqFXrKUw2btzItGnT2LVrFxERESxevJguXbrkOOfzzz9n2rRpREZGUqdOHT799FMaNmxonYJFpNhLTMtkz7l4dp29zN7z8ew7H09C6o2To/uUcCA0yJM6ZTwJDfakdhnPnMMA/8lkhIi9cOoPS2h1fhtk/b/hhW4BEHI/lLsfyjWHkuU0HFCkiFKAJXKbHnroIebMmYPRaCQqKoqVK1cybNgwfvrpJ3799Vfs7K7/7zRx4kQGDx5MSkoKP/74I4MHDyYwMJAOHTrkuOaxY8dwc3PjxIkTDBkyhEceeYT9+/dja2ub3y/vrtja2lK6dOk7ek5KSgoPPfQQDz30EGPGjMmjykTE2tLS0njqqadITU3lwQcf5JVXXrF2SYVKcnIyderUYcCAAXTr1u2G4wsXLmT48OHMmDGDRo0aMX36dNq3b8+xY8fw9fUFIDQ0lKysrBueu3r1agICAvL8NYhI0RYen8rOM3HsPHOZnWcvczQyEfP/61zlaGdDzUAPQoM8s29lSjr/+yq0iRctgdXJdXB6A6TG5Tzu6ns9sAp5ALzKK7ASKSYUYIncJkdHx+ywJjAwkHr16tG4cWPatGnD3Llzc/SwcnNzyz531KhRTJ06lTVr1twQYPn6+uLp6Ym/vz/jx4+nV69enDx5kipVqtzQ/oYNG3jttdc4dOgQ9vb21KhRg++//57169fz1ltvAWT/MTBnzhz69etHfHw8I0aMYOnSpaSnp9OgQQM++ugj6tSpA1zvufXqq68ybtw4Ll++TIcOHZg1axZubpa5AUwmE++//z4zZ87k/Pnz+Pn58cwzzzB27NgbhhDejmu9MTRsRaRoGz16NPv27aNUqVLMmzcvR09V+W8dOnS44T3jnz788EMGDx5M//79AZgxYwbLli1j9uzZjB49GiBXe+Wmp6eTnp6evZ2YmJhr1xaRgs9oMnMkIpFdZy+z40wcu85eJuImqwKW9XahfnBJ6pYtSd0gT6qUdsPe9j9+/2emwtktltDq1B8QfTjncUd3S1BVvqXl3qeyAiuRYkoBlsg9aN26NXXq1OGXX37JEWBdYzKZWLx4MZcvX8bBweFfr3VtXpiMjIwbjmVlZdGlSxcGDx7MDz/8QEZGBtu3b8dgMNCzZ08OHjzIypUrWbt2LQAeHh4APP744zg7O7NixQo8PDz48ssvadOmDcePH8fLy7I08KlTp1iyZAm///47ly9fpkePHkyePJl3330XgDFjxjBr1iw++ugjmjdvTkREBEePHr37H5qIFHnLly/n448/BmDu3Ll33FNT/l1GRga7du3K0YvVxsaGtm3bsnXr1jxpc9KkSdlflohI0ZeWaWT32ctsvxpW7T57meQMY45z7GwM1Ahwp0E5LxqULUn9ciXxdXO6vQYSwuH4Sji2wjKXVY5hgQYIrAcV2kDFNhBYH2xvMcRQRIoVBVhifWYzZN64+kies3fJlW9vqlatyv79+3PsGzVqFG+88Qbp6elkZWXh5eV104DrmoiICN5//30CAwNv2vsqMTGRhIQEHn74YSpUqABAtWrVso+XKFECOzu7HB8SN23axPbt24mOjsbR0RGA999/nyVLlvDTTz8xZMgQwBKyzZ07N7vHVZ8+fVi3bh3vvvsuV65c4eOPP+azzz6jb9++AFSoUIHmzZvfzY9KRIqBqKio7F5BL730Eh07drRyRUVPbGwsRqMRPz+/HPv9/Pzu6AuGtm3bsm/fPpKTkylTpgw//vgjTZo0uem5Y8aMYfjw4dnbiYmJBAUF3d0LEJEC51pg9ffpS/x9Oo695+PJMJpynOPmaEe9siVpULYkDcp5USfIAxeH2/w4aTZD5H5LYHVsOUTsy3ncLQAqtoYKraF8K3DxyqVXJiJFiQIssb7MFHjPCnNxvH4RHFzv+TJms/mGcfwjR46kX79+REREMHLkSIYOHUrFihVveG6ZMmUwm82kpKRQp04dfv7555v21PLy8qJfv360b9+edu3a0bZtW3r06IG/v/8t69q3bx9JSUl4e3vn2J+amsqpU6eyt8uVK5cdXgH4+/sTHR0NwJEjR0hPT6dNmza398MQkWLNZDLRt29foqOjqV27NlOmTLF2SfIvrvXavR2Ojo7ZX4aISOGXmmFk9zlLYLXtFoFVaXcnGpX3yu5hVdnPDVubO/jyNyvdMvH6seWW3laJ4f84aICghlD5IcvNt5qGBYrIf1KAJXKPjhw5QkhISI59Pj4+VKxYkYoVK/Ljjz9Sq1YtGjRoQPXq1XOc99dff+Hu7o6vr2+OEOlm5syZw0svvcTKlStZuHAhb7zxBmvWrKFx48Y3PT8pKQl/f/+bzjX1z1X/7O1zdsk2GAyYTJY/YLTcvYjciY8//phVq1bh5OTEDz/8gJPTbQ4lkTvi4+ODra0tUVFROfZHRUVpuKaI3FR6lpHdZ+PZciqWv09fYu/5eDKNOWdcL+3uRJMK3jQu70Xj8t4Ee7n8+2TrN5ORAifXwKElcGI1ZCRdP2bvYulhVaUDVGoPJUrd+wsTkWJFAZZYn72LpTeUNdq9R3/88QcHDhz419W1goKC6NmzJ2PGjGHp0qU5joWEhOQIk/5L3bp1qVu3LmPGjKFJkyZ8//33NG7cGAcHB4zGnPMS1KtXj8jISOzs7ChXrtydvKxslSpVwtnZmXXr1v3rEEgRkT179jBq1CgAPvrooxsCe8k9Dg4O1K9fn3Xr1tGlSxfA0vtt3bp1vPDCC9YtTkQKBLPZzNHIK2w+GctfJ2LZHhZHambOvxX9PZxoXP4eAyv4R2i1GI6vhszk68fc/C09rKp0tEzAbq8vNkTk7inAEuszGHJlKF9eS09PJzIyEqPRSFRUFCtXrmTSpEk8/PDDPP300//63GHDhlGzZk127txJgwYN7rjtsLAwZs6cSefOnQkICODYsWOcOHEiu91y5coRFhbG3r17KVOmDG5ubrRt25YmTZrQpUsXpk6dSuXKlbl48SLLli2ja9eut1WHk5MTo0aN4rXXXsPBwYFmzZoRExPDoUOHGDhw4B2/DoDIyEgiIyM5efIkAAcOHMDNzY3g4ODsieVFpHBJTk7mySefJDMzky5duvDMM89Yu6RCLykpKfv3JJD9O97Ly4vg4GCGDx9O3759adCgAQ0bNmT69OkkJydnzz8mIsVPREIqm07EsulkLJtPXiI2KT3HcZ8SjjSr6E3TCt73FlgBZKbBiVVXQ6tVOeez9QiGGo9C9a6Wydg1NFBEcokCLJHbtHLlSvz9/bGzs6NkyZLUqVOHTz75hL59+/7n8vDVq1fnwQcfZPz48SxfvvyO23ZxceHo0aPMmzePS5cu4e/vz/PPP5/9IbF79+788ssvtGrVivj4eObMmUO/fv1Yvnw5Y8eOpX///sTExFC6dGkeeOCBGyb+/Tfjxo3Dzs6O8ePHc/HiRfz9/Xn22Wfv+DVcM2PGjBwrWT3wwAMA2TWLSOHz8ssvc+zYMQICAvjqq6/u/gORZNu5cyetWrXK3r42gXrfvn2ZO3cuPXv2JCYmhvHjxxMZGUloaCgrV668o9/vIlK4pWRkseXkJTadjOWvEzGciknOcdzZ3pZG5b1oXtGH5pV8qOLndm+/n81mOPc37PvBMkQwPeH6MY9gqNHFcgtQaCUiecNgNpvN/32ayO1LTEzEw8ODhIQE3N3dcxxLS0sjLCyMkJAQzY0it03/bkQKrp9++onHH38cg8HAunXrcoQuBcm/vTfJndPPUyT/mc1mTscms+FYDBuORbPtdFyOiddtDFC7jGd2YFU32BNHO9t7b/jSKdi3APYvhPiz1/e7B0LNblCjq0IrKRD03lT0qQeWiIiI3JVz584xePBgAEaPHl1gwysRkcIqNcPI36cvseFYNOuPxXAuLiXH8TIlnWlZpRTNK5aiSXlvPFzsb3GlO5SWCAd+tPS2urDj+n6HElD9UajzBJRtDv8xCkFEJDcpwBIREZE7lpWVxVNPPUV8fDz33XdfjqHBIiJy985eSmb9UUtg9ffpS6RnXe9lZW9roFGINy2rlKJlFV8qlHLNvWHbZjNc3A0758DBn6/Pa2WwgQptLKFVlY7gcO8LIYmI3A0FWCIiInLHJkyYwObNm3Fzc+OHH37A3j6XvvUXESlmTCYz+y7Es/pwFGsOR3EyOinH8QAPJ1pW9aVVFV+aVvDG1TGXP8KlJcKBRbBrLkQeuL7fpwrUexpqPQ5uml9PRKxPAZaIiIjckbVr1/Lee+8BMGvWLCpUqGDlikRECpf0LCNbTl1i9aEo1h2JIvrK9RUD7WwM3FfOi5ZVStGqqi+VfEvkzeIYcadh20zY8y1kXLHss3W0TMRevz8EN9a8ViJSoCjAEhERkdsWFRVF7969MZvNDBkyhJ49e1q7JBGRQiEhJZP1x6JZfTiSP4/FkJxhzD5WwtGOllVK0a66Hy2r+OLhnEe9Ws1mCNsI22bAsRXA1fW8fCpDgwFQuye4eOVN2yIi90gBlliFFr+UO6F/LyIFg8lkonfv3kRFRVGzZk2mT59u7ZJERAq0qMQ0Vh6MZNWhSLaHxZFluv43jZ+7I+2q+9Guemkal/fKnRUDb8WYZZnXavPHEH3o+v6K7aDxs1C+tSZkF5ECTwGW5Ktrc6SkpKTg7Oxs5WqksEhJsUwiqjl2RKxr8uTJrF27FmdnZxYuXKjf4yIiNxGZkMaKgxEsPxDBzrOX+ef3cJX9SvBg9dK0q+5HrUAPbGzyeIheVrplJcFNH8HlM5Z99i4Q+hQ0fAZKVc7b9kVEcpECLMlXtra2eHp6Eh0dDYCLi0vejOmXIsFsNpOSkkJ0dDSenp7Y2ubhN5Mi8q82b97M+PHjAfj888+pXr26lSsSESk4IhPSWH7AElrtOpcztKoX7EmHmv60q+5HOR/X/CkoIwV2z4PNn8CVi5Z9Lt7QeCjcNxCcS+ZPHSIiuUgBluS70qVLA2SHWCL/xdPTM/vfjYjkv0uXLvHkk09iNBrp1asX/fr1s3ZJIiJWF5WYxrL913ta/VP9siXpWMufDjVLE+CZj71Vs9Jh5xz4631IjrHsc/OHpi9C/X7gkE8BmohIHlCAJfnOYDDg7++Pr68vmZmZ1i5HCjh7e3v1vBKxIrPZzIABAzh//jyVKlXiiy++UM9ZESm2EtMyWXkgkqX7wtly6lKOnlYNroVWtUrj75HPQ6xNRjjwI6x/F+LPWfZ5loXmL0NoL7BzzN96RETygAIssRpbW1sFEyIiBdwnn3zCr7/+ioODA4sWLcLNzc3aJYmI5Kv0LCPrj8awdG84645Gk5Flyj5Wv2xJHq7tT4ea/pT2cLJOgWF/wcrREHXQsl2iNLQcBXX7gK3mDxWRokMBloiIiNzUzp07GTlyJAAffvghoaGh1i1IRCSfmExmtoXFsXRvOMsPRJCYlpV9rJJvCbrUDaRznQCCvFysV2T8OVg9Dg4vsWw7eUDzVyyTsztYsS4RkTyiAEtERERukJiYyBNPPEFmZiZdu3Zl6NCh1i5JRCTPnYlN5sdd5/lldzgRCWnZ+0u7O/FoaACdQwOo7u9u3aHUWRmw+WPLPFdZaWCwgQYDoNVYcPGyXl0iInlMAZaIiIjkYDabeeaZZzh16hRly5bl66+/1rxXIlJkJadnsexABD/tvMD2M3HZ+92d7OhYy59HQwNpFOKFjU0B+D14cQ8seR6iD1m2yzaHDlOgdE3r1iUikg8UYImIiEgOM2fOZMGCBdjZ2bFgwQJKltRy6yJStJjNZnacucyPO8+z7EAEKRlGAGwM8EDlUjxeP4i21X1xtCsg87VmpsGGSbDlUzAbwcUbHpoCtR4DfcEgIsWEAiwRERHJtnv3bl566SUAJk2aROPGja1ckYhI7olMSOPn3Rf4ced5zlxKyd4f4uPK4w3K0K1uGetNxn4rMcfhx74QfdiyXbM7dJgKrj7WrUtEJJ8pwBIREREA4uPjeeyxx8jIyODRRx/l1VdftXZJIiL3zGQys+lkLN/+fZZ1R6MxmswAuDrY0qm2Pz0aBFG/bMmCOVR6/4/w2zDITAZXX3hkOlTtZO2qRESsQgGWiIiIYDab6d+/P2FhYZQrV445c+YUzA9zIiK36VJSOj/tusD3289x9h+9rRqW86LHfUF0qFkaV8cC+nEoMxVWjILd8yzb5e6H7l+Dm5916xIRsaIC+htbrMVoNDJhwgS+/fZbIiMjCQgIoF+/frzxxhv6ICMiUoR99NFHLFmyBAcHB3788UfNeyUihZLZbGbn2ct89/dZlh+IJMNoAsDN0Y7u9cvQq1EwlfzcrFzlf0iKhh+egPBdgAFavAYtRoFNAZmPS0TEShRgSQ5Tpkzhiy++YN68edSoUYOdO3fSv39/PDw8sudEERGRomXLli2MGjUKsARZDRo0sHJFIiJ3Jik9i192X+C7v89xLOpK9v5agR70bhzMI3UCcHEoBB99Yo7Dd90h/hw4ecJjs6FiG2tXJSJSIBSC3+KSn7Zs2cKjjz5Kp06WsfXlypXjhx9+YPv27VauTERE8kJMTAw9evQgKyuLJ554gueee87aJYmI3LbzcSnM3XKGRTvOcyU9CwAnexs61wmgd+Oy1C7jad0C70TkQZj/KKTEgld5eOpH8Klo7apERAoMBViSQ9OmTZk5cybHjx+ncuXK7Nu3j02bNvHhhx/e8jnp6emkp6dnbycmJuZHqSIico+MRiO9e/cmPDycKlWqMHPmTA0XF5ECz2w2sy0sjjmbw1hzOIqrc7JT3seVPk3K0q1eGTyc7a1b5J2K2A/zO0PqZShdG/os1iqDIiL/jwIsyWH06NEkJiZStWpVbG1tMRqNvPvuu/Tq1euWz5k0aRJvvfVWPlYpIiK54b333mP16tU4Ozvz008/4eZWwOeFEZFiLT3LyG/7Ipi9KYzDEde/MH2gcin6NytHi0qlsLEphCF8XBh8290SXpW5D3r9BM6e1q5KRKTAUYAlOSxatIjvvvuO77//nho1arB3715efvllAgIC6Nu3702fM2bMGIYPH569nZiYSFBQUH6VLCIid2HdunW8+eabAHzxxRfUrFnTyhWJiNxcQmom3/59ljmbzxCbZOn172RvQ7d6ZejftFzBn5T93yTHwrfdIDka/GpB75/BycPaVYmIFEgKsCSHkSNHMnr0aJ544gkAatWqxdmzZ5k0adItAyxHR0ccHR3zs0wREbkHFy9e5KmnnsJsNjNw4MBb/n4XEbGmyIQ0Zm8O4/tt50i6Or+Vv4cTTzcpx5MNg/B0cbByhffIZISfB0HcafAMht4/KbwSEfkXCrAkh5SUFGxsbHLss7W1xWQyWakiERHJTdcma4+OjqZ27dp8+umn1i5JRCSHk9FX+PLP0yzZG06m0TLBVRU/N55tWZ6Hawdgb2vzH1coJP76AE6vBztneGoRuJW2dkUiIgWaAizJ4ZFHHuHdd98lODiYGjVqsGfPHj788EMGDBhg7dJERCQXjB07lr/++gs3Nzd++uknnJ2drV2SiAgAe8/H87/1J1l9OCp7X8NyXjzXsgItq5QqWotMnN8OGyZZHj/8IfhWs249IiKFgAIsyeHTTz9l3LhxDB06lOjoaAICAnjmmWcYP368tUsTEZF79MsvvzB16lQAvv76aypVqmTlikREYNfZy3y87gQbj8dk72tX3Y9nW1SgftmSVqwsj5iMsHwkmE1QuyeEPmXtikRECgUFWJKDm5sb06dPZ/r06dYuRUREctGxY8fo168fAK+88gqPP/64dQsSkWJv55k4Pl53gr9OxAJga2Oga91Anm1Rnoq+hXhi9v+y51uI2AuO7vDgu9auRkSk0FCAJSIiUsQlJSXRrVs3rly5wv3338+UKVOsXZKIFGPbw+L4eN1xNp+8BICdjYHu9crwfKuKBHu7WLm6/yczFSL2Q8Q+iD8LSVFgzAQbW3DzB6/yUL4leFe4veulJcC6iZbHLUdDiVJ5VrqISFGjAEtERKQIM5vNDBo0iMOHD+Pv78+iRYuwt7e3dlkiUpRlJFuCnvQrkJECmcmQmcrJ6Cv8uvcix6KScMaG5rbONKpWju7NahLgHwBOBSS8SkuEQ7/Akd8h7E8wZvz3c0pVgxavQY2u8G9zde1bCCmx4F0RGg7JvZpFRIoBBVgiIiJF2PTp01m4cCF2dnb8+OOPlC6tVa5E5B6YTHDlIlw6CZdOWW6J4ZbAKikKkqIhI+mmT60IDAdw+MfOk1dvAE4eULIceJYFn8oQ1BDK3AcuXnn5iq5LuACbPoJ9C3K+BldfCKxv6WXlVhpsHcGUCQnhELkfzm2FmCPwU3/YNgN6fgslfG/exp5vLPf3DQZbfZkgInInFGCJiIgUURs3bmTkyJEAfPjhhzRr1szKFUlBk5KSQrVq1Xj88cd5//33rV2OFDQpcZaA5toQuugjEHcaslL/+7n2Lhgd3LiUaU9Mmi2pOGLGQKkSDvh7OuNoMFl6aKUnWno8ZSZbhtdF7LPc/smnMoS0gOqdIbgp2ObyR5iMZMuKgNtmgjH9ept1noCqD1se/1uvqtR42PYlbP4Yzm+DeZ2h7283Dg+M2Gf5edo6QO0eufsaRESKAQVYIiIiRdDFixfp0aMHRqORp556ihdeeMHaJUkB9O6779K4cWNrlyEFgTHTElSd3QwXtl+d8+nczc+1sbf0lPKuaOmV5BEEbn5QwnKLwZPPNkXw/fZzZBrNAHSq7c+r7SpTrlSJm18zIxkun7XMM3X5DEQetIRBl05A7HHLbccscA+E+v2hwQBw9b73133ub1jynCWYAyjb3DIUMOSBfw+t/snZE1qOglqPwdyHLb2xvukCg9aCvfP183Zf7X1VtVP+9SoTESlCFGCJiIgUMRkZGTz++ONERUVRs2ZNZs6cieF2P4hJsXHixAmOHj3KI488wsGDB61djuS3rAxLQHR2C5zbAue3Q2bKjeeVLAf+dSw3v1rgUxE8gm/aCyopPYuZf57iq007SMkwAnB/JR9ea1+VWmU8/r0eB1fwq265/VPyJTj/NxxbDkeXWYYrrn/H0tup2TBo8jw43OXcWbvnw28vg9loCcYe/ggqPXj7wdX/510B+v0Os9tD1EHY94MlaAPITIMDiyyP6/a5u+uLiBRzCrBERESKmJEjR7Jlyxbc3d355ZdfcHV1tXZJcoc2btzItGnT2LVrFxERESxevJguXbrkOOfzzz9n2rRpREZGUqdOHT799FMaNmx4222MGDGCadOmsWXLllyuXgqs+PNwcg2cWGuZnPz/z1Xl5AnBTSC4MQTWg9K1wLnkf17WZDLz0+4LTFt1jJgrliF4dYI8GdW+Ck0r+txbza7elh5LVTtBpw/h0BLY+ilEHrAEWfsXwmNfWwK2O7FhCmx4z/K4ZndLeOX0HyHb7fCuAPe/CitHw5ZPoV5fy4qFUYcsQyRdfCyrFoqIyB1TgCUiIlKEfP/993zyyScAfPPNN1SqVMnKFcndSE5Opk6dOgwYMIBu3brdcHzhwoUMHz6cGTNm0KhRI6ZPn0779u05duwYvr6WyaNDQ0PJysq64bmrV69mx44dVK5cmcqVKyvAKsrMZgjfDYeXwInVEHM053HXUpahcsFNoGwzKFUVbGzuqIntYXFM/P0QB8MTASjr7cLoh6ryUM3Sud/z084R6vSEWo9bVglcPc4yxPCrtvDIxxD61O1dZ+ec6+HVA69Bq9fvvtfVzdTtAxsmW4YlHv0dqj9qGRoJloDLxjb32hIRKUYUYImIiBQR+/fvZ9CgQQCMHTuWzp07W7kiuVsdOnSgQ4cOtzz+4YcfMnjwYPr37w/AjBkzWLZsGbNnz2b06NEA7N2795bP//vvv1mwYAE//vgjSUlJZGZm4u7uzvjx4296fnp6Ounp6dnbiYmJd/GqJF+YTBC+Ew4vtdwSzl8/ZrCBMg2hUluo2A5K177jwOqa83EpTF5xlGUHIgBwc7TjxTYV6du0HI52eRzQ2NhY5puq0BqWvgDHlsGSoZbXV+eJf39u2EZYPsLyuNVYy3xXuc2xBDQcDBunwabpUK3z9f8OHkG5356ISDGhAEtERKQIiI+Pp3v37qSmptKuXTveeusta5ckeSQjI4Ndu3YxZsyY7H02Nja0bduWrVu33tY1Jk2axKRJkwCYO3cuBw8evGV4de18/Zsq4KIOw97v4NBiyzxR1ziUgMrtLavpVWh1W0MC/01Sehb/W3+SrzaFkZFlwsYATzQMZni7yviUcLzHF3GHXLzgie9g2auw82vLZOxOHlDlFuFvehL8PAhMWVDzMXhgZN7V1vAZ+OtDuLgbrkRYhm8CeCrAEhG5WwqwRERECjmj0UivXr04efIkwcHBfP/999jaaohKURUbG4vRaMTPzy/Hfj8/P44ePXqLZ92bMWPGMHz48OztxMREgoL0QdzqUuLg4M+W4Orinuv7HdygykNQvQtUbJNzJby7ZDab+X1/BG//fpjoq/NcNSnvzfhHqlPN3/2er3/XDAbo+D4YM2DPN/DbMCjb9ObzWW39DJKioGQIPPpZ7g4b/P9KlIKSZS3DCGNPXO+B5Rmcd22KiBRxCrBEREQKuTfffJPly5fj5OTEL7/8go/PPU6aLMVKv379/vMcR0dHHB3zuXeN3JzZDGf+gp2zLavyGTMs+23soPJDUOdJqNgW7J1yrcnTMUm8+esh/joRC1jmuRrbsRrtqvsVjBVObWwsIdbZLRB3Cta9DZ3ez3lOUjRstswPSJvxuRLq/SfvSpYA69KJ6z2wPBRgiYjcLQVYIiIihdhPP/3Eu+++C8CsWbOoX7++lSuSvObj44OtrS1RUVE59kdFRVG6dGkrVSV5LiMFDiyCbV9C9OHr+/1qQmgvqN0DXHM3vE7LNPK/9SeZ8edpMowmHOxsGNqyAs+2qICTfQHr5WnvBA9/CPMfhR1fWSZ0D6x3/fjGaZCZDAH1oEbX/KnJpxKcWAWXTv2jB5Z6LoqI3C0FWCIiIoXUgQMHsnvPDB8+nN69e1u3IMkXDg4O1K9fn3Xr1tGlSxcATCYT69at44UXXrBucZL74s/Djlmwax6kxVv22btYJiuv1xf86+TJULg/j8cwbslBzsWlAPBA5VJM7FyDcj6uud5WrinfEmp0s6xQuHve9QDLZIIDP1oet34jb4cO/pN3Bct9+C5Iv7rwgUeZ/GlbRKQIUoAlIiJSCMXFxdGlSxeSk5Np06YNU6ZMsXZJkouSkpI4efJk9nZYWBh79+7Fy8uL4OBghg8fTt++fWnQoAENGzZk+vTpJCcnZ69KKEVAzDH46wM48BOYjZZ9nmWh4RCo2xucPfOk2cvJGby97DC/7LZMBF/a3Yk3H6nOQzVLF4zhgv+lzpOWAOvEWstwS4PB0mMt9TLYu0LIA/lXi3cly/2FnZZ7F29wKMABoIhIAacAS0REpJDJysriiSee4PTp05QrV46FCxdiZ6e39KJk586dtGrVKnv72gTqffv2Ze7cufTs2ZOYmBjGjx9PZGQkoaGhrFy58oaJ3aUQijwAG9+Hw0sBs2VfyAPQ6DnLaoI2eTN0z2w2s+JgJOOXHiQ2KQODAfo1LcerD1ahhGMh+v1SrjnYOkLiBYg5Cr7V4Oxmy7HgRmBrn3+1+FwNsK4FkB4aPigici8K0buRiIiIALz++uusWbMGFxcXlixZgre3t7VLklzWsmVLzGbzv57zwgsvaMhgURK+2zJP07Hl1/dVfRgeGAEBdfO06ejENMYtPciqQ5Z51Sr5lmBy99rUL1syT9vNEw4uEHI/nFwLJ9ZYAqwzmyzHyjbL31pK+FlWhMy4YtnW/FciIvdEAZaIiEgh8sMPPzBt2jQA5syZQ506daxckYjck9gTsG4iHPn16g6DZZLxB0aAX408bdpsNrN4Tzhv/nqIK2lZ2NkYGNqqIs+3qoCjXQGbpP1OVGxnCbBOroGmL1pWJwRL76z8ZDBY5sGK2GvZ1gqEIiL3RAGWiIhIIbFnzx4GDhwIwOjRo+nRo4eVKxKRu3YlEjZMht3zLUPMDDZQq4cluLo29CwPxSVn8PovB1h5KBKAOmU8mNy9NtX83fO87TxXqR2sHAVnt1omUE+JBTtnywqE+c2n0vUASz2wRETuiQIsERGRQiAmJoYuXbqQmppKhw4deOedd6xdkojcjax02Pq5ZZ6rzGTLvsodoM148KueLyWsOxLFqJ8PEJuUjp2NgVfaVeaZB8pjZ2uTL+3nOe8K4FUe4k7D0uct+4LuAzsHK9TyjzBSc2CJiNwTBVgiIiIFXGZmJj179uTcuXNUqlSJ77//HlvbQjy8R6S4Or4KVo62BCsAZe6DdhOhbNN8aT4pPYt3fj/Mgh3nActcVx/1DKVmoEe+tJ+vGj0HK0ZaJnIHKJvPwwev8a5w/bF6YImI3BMFWCIiIgXciBEjWL9+PSVKlGDJkiV4enpauyQRuRPx52H5CDi+0rJdorQluKrdwzJPUj7Yez6el37Yw7m4FAwGGNgshBHtq+BkX0TD8EZDLHOIrR4Ll05BzW7WqeOfw0E9NQeWiMi9UIAlIiJSgH311Vd88sknAHzzzTdUr54/Q4xEJBeYjLDjK8sk7RlJYGMPTZ63zHPl6JY/JZjMfL0pjCkrj5JlMhPo6cz7j9ehSYVisHppuWYwZAOYzfkWFN6gVFXwLAsuXuDkaZ0aRESKCAVYIiIiBdTGjRsZOnQoAG+99RZdunSxbkEicvuij8KvL8CFHZbtoMbQ+RMoVSXfSohLzuDVRXtZfywGgE61/HmvWy08nO3zrYYCwVrhFYCdI7ywE2xsrVuHiEgRoABLRESkAAoLC6N79+7Z81+NGzfO2iXljvQrkHgREi5Y7lMuQVo8pMZDWoJlgmtTJhgzwJgJNbpCw8HWrlrk9plMsG0GrJ0AxnRwcIN2b0H9/mCTf5Ok/336EsMW7CEqMR0HOxvefKQ6TzUMxqAQJf9ZY/J4EZEiSAGWiIhIAXPlyhU6d+5MbGws9evXZ/bs2YXvQ2dSNETsg6hDEHsCLp2A2OOQevnOruNfJ2/qE8kLCeGw5DkI+9OyXelBeORjcA/ItxJMJjOfrT/J9LXHMZmhQilXPnuqHtX83fOtBhERkbygAEtERKQAMRqNPPXUUxw8eBB/f3+WLl2Ki4uLtcv6d1npEL4bzm2B8zsgYi9cibj1+Y4e4BFo+VDvWsoyL4yzJzi6g70z2DqArb3l5l0xn16EyD06/KtlyGBaAti7wIPvQIMB+TpsLCE1k+EL97LuaDQAj9Uvw8RHa+DioD/5RUSk8NO7mYiISAEyduxYfv/9d5ycnFiyZAmBgYHWLulGJhNE7oMTa+H0BsscP8b0/3eSAXwqQ+ma4FMFfCpatkuWy7fJq0XyhTET1rwJf39u2Q6oB91mWf7N56PjUVd45ptdhMUm42Bnw7tdavJ4g6B8rUFERCQvKcASEREpIObPn8+UKVMAmD17Ng0bNrRyRf+QkQIn18LRZZb7lNicx11LQdmmlomqA+uBX01wLGGdWkXyS0I4/NQfzm+zbDd9CdqMt/QezEfLD0Qw4sd9pGQYCfR0Zkbv+tQq45GvNYiIiOQ1BVgiIiIFwNatWxk82DJZ+dixY3nyySetXBGW0OrYcji8xNLbKiv1+jGHElC+JVRsA+UeAO8KWmFLipewv+DHvpaFCBw9oOsXULVTvpZgNJmZtuoYM/48BUDTCt58+mRdvEs45msdIiIi+UEBloiIiJWdO3eOLl26kJGRQdeuXZk4caL1ijGb4dzfsPc7OLQEMq5cP+YZDNU6Q5UOUKahVtaS4mv3fPj9FTBlQena0GM+eIXkawlX0jJ54fs9/Hk8BoAhD5TntfZVsLPNv5UORURE8pMCLBEREStKTk7m0UcfJTo6mjp16jB//nxsbKzwATT+POz7AfZ+D5fDru/3LAu1HrMEV/511MtKijeTEda+CVs+tWzX6AZd/mdZfCAfhcenMnDuDo5GXsHZ3papj9XmkTr5t9KhiIiINSjAEhERsRKTycTTTz/N3r178fX1ZenSpZQokY/zRpnNcHYz/P2FZaig2WTZ71ACqneB0KcguAlYI1ArRNLT03F01JCtIi8jBX4eBMeWWbZbjIaWo/M91N1/IZ6B83YScyWdUm6OzO57n+a7EhGRYkEBloiIiJW8+eab/PLLLzg4OLB48WLKli2bPw1npsKBH2HblxB18Pr+cvdD3d5Q7RFwcM2fWgqhFStWsGDBAv766y/Onz+PyWTC1dWVunXr8uCDD9K/f38CAtQbpkhJjYfve8L5v8HW0dLrqtZj+V7GqkORDFuwh7RME1VLu/F1v/sI9Mzf3l8iIiLWogBLRETECubNm8c777wDwMyZM2natGneN5oSB9tmwPZZkBpn2WfvAnWegIbPgG/VvK8By8TTiamZJKRmEn/1/totI8tEltFElslMptFE3eCStKhcKl/q+i+LFy9m1KhRXLlyhY4dOzJq1CgCAgJwdnYmLi6OgwcPsnbtWt5++2369evH22+/TalSBaN2uQdJ0fBNN4g6AE4e8NQiCG6cryWYzWa++iuM91YcwWyGFpVL8dlTdXFzyt/VDkVERKxJAZaIiEg+27BhQ/aKg6+//jp9+/bN2waTomHrZ7Dja8hIsuzzCIaGg6FeH3AumetNpmYYOR51hdOxSVyIS+XC5VTOX07hwuVULsankmUy39Z1BjYPKTAB1tSpU/noo4/o0KHDTecp69GjBwDh4eF8+umnfPvtt7zyyiv5XabkpvhzML8LxJ0CV1/osxhK18zXEkwmMxN/P8zcLWcA6NO4LG8+Ul2TtYuISLGjAEtERCQfHTt2jG7dupGZmUnPnj15++23866xxIuw+RPYNReyUi37SteC+0dYhgna2OZKM7FJ6ew7H8+RiESORFzhSGQiZ2KT+a+MytXBFg9nezxcHPBwtsPdyR4ne1vsbA3Y29hgZ2ugftncD9fu1tatW2/rvMDAQCZPnpzH1Uieu3wG5nSExHBL4Pv0EvCukK8lZBpNvPbTfhbvCQfgjU7VGNg8BIMWUxARkWJIAZaIiEg+iY2NpVOnTly+fJnGjRszZ86cvFlxMDkWNk6DnbPBmGHZF1gfHngNKre/p0mnzWYzFy6nsj0sjh1n4th+Jo7TMck3PdenhAOVfN0I8nKmTEkXypS8fl/KzRH7ItCDJCMjg7CwMCpUqICdnf6sKjISwmHeI5bwyqcy9FkCHoH5WkJappEXvt/N2iPR2NoY+ODxOnSpm781iIiIFCT6S0tERCQfpKen07VrV06dOkVISAhLly7F2TmXJ19OT4Ktn8OWT64PFSzbDB4YAeVb3XVwlZSexaYTsaw/Gs3GEzFEJKTdcE4l3xLUDPSgamk3qvm7U83fnVJuRXdlvpSUFF588UXmzZsHwPHjxylfvjwvvvgigYGBjB492soVyl1Liob5nS3DB73KQ9/fwK10vpZwJS2TQfN2si0sDkc7G/7Xqx5tqvnlaw0iIiIFjQIsERGRPGY2mxkwYACbNm3Cw8ODZcuW4evrm3sNZGXA7nnw5xRIjrHs8w+FthOgQqu7uuSZ2GTWHY1m/dFotoVdItN4fTygnY2BWmU8aFjOi/vKedGgXEk8XRzu/XUUImPGjGHfvn1s2LCBhx56KHt/27ZtmTBhggKswiolDuY/CpdOgkcQPP1rvodXl5LS6TtnOwfDE3FztOOrvg1oVN47X2sQEREpiBRgiYiI5LG33nqL77//Hjs7O37++WeqVauWOxc2m+HYclg1Fi6HWfZ5lYfW46B6F7jD4Ynh8an8vu8iv+2/yMHwxBzHynq70KqKL62q+tKwnBfODrkzf1ZhtWTJEhYuXEjjxo1zzEdUo0YNTp06ZcXK5K5lJMO33SD6MJQoDU8vBc+gfC0hIiGVXl9t43RMMt6uDswb0JCagR75WoOIiEhBpQBLREQkD3377be89dZbAHzxxRe0adMmdy4ccwxWjoZTf1i2XX2h5Sio1xds7W/7MrFJ6SzbH8Fv+y6y8+zl7P22NgYal/eiVRVfWlf1pXypErlTdxERExNz0150ycnJmmC7MDKZ4JchcHEPOHtZwqt8nrA9IiGVJ2f+zZlLKQR6OvPNwIb6/05EROQfFGCJiIjkkb/++ouBAwcC8NprrzFo0KB7v2haAvw5FbbNAFMW2DpA0xeh+XBwvL0Pu0aTmb9OxLBg+3nWHoki6+pygQYDNArx4pE6AXSo6Y+Xa/EaFngnGjRowLJly3jxxRcBskOrr776iiZNmlizNLkba9+Eo79b/n968gfwrZqvzf8zvArycuaHwY0pU9IlX2sQEREp6BRgiYiI5IETJ07QpUsXMjIy6N69O5MmTbq3C5rNsPd7ywfta/NcVekI7d+1DBu8DREJqfy48wILd5wnPD41e3+dMh50Dg3k4dr++Lk73VudxcR7771Hhw4dOHz4MFlZWXz88cccPnyYLVu28Oeff1q7PLkTu+ZZFj4AePR/ENw4X5tXeCUiInJ7FGCJiIjkskuXLtGpUyfi4uJo2LAh8+fPx+YO56PKIfYE/PYynN1k2fauBA9Nhkpt//OpZrOZnWcv89Vfp1lzOIqrna1wd7KjW70yPNEwiKql3e++tmKqefPm7N27l8mTJ1OrVi1Wr15NvXr12Lp1K7Vq1bJ2eXK7Tv8Jy4ZbHrccA7Ufz9fmoxPTFF6JiIjcJgVYcoPw8HBGjRrFihUrSElJoWLFisyZM4cGDRpYuzQRkQIvNTWVzp07c+LECYKDg1m6dCkuLnf5gTQrHTZ9BH99AMYMsHeBlqOh0XNg9+/D+zKNJpYfiODrTWHsv5CQvb9hiBdPNgyiQ01/nOyL90Ts96pChQrMmjXL2mXI3UoIhx/7WYbi1nocWozK1+YvJ2fQ++ttnLmUQpmSCq9ERET+iwIsyeHy5cs0a9aMVq1asWLFCkqVKsWJEycoWbKktUsTESnwjEYjvXr1YsuWLXh6erJixQpKly59dxc7uwV+Gwaxxy3bFdtBpw+gZNl/fdqVtEy+33aOuVvOEJGQBoCDnQ3d6wXSv1kIlf3c7q4eyWH58uXY2trSvn37HPtXrVqFyWSiQ4cOVqpMbosxC34eBKlx4F8HOn9mmQQunySlZ9FvznaORyXh5+6o8EpEROQ2KMCSHKZMmUJQUBBz5szJ3hcSEmLFikRECgez2cwrr7zC4sWLcXBwYOnSpVSvXv3OL5SWCGvGw66rv4ddfaHDZKjR7V8/YCekZDJ7cxhzNoeRmJYFgE8JR55uUpZejYLxLuF4Ny9LbmH06NFMnjz5hv1ms5nRo0crwCroNkyCc1vAwQ0emwP2+Tf3W1qmkUHzdrDvQgIlXez5dmAjgrwUXomIiPwXBViSw6+//kr79u15/PHH+fPPPwkMDGTo0KEMHjzY2qWJiBRoH374IZ9++ikA8+fP54EHHrjzi5xaD7++CAnnLdv1+kK7t8D51r1gLyWl8/WmMOZvPUtSuiW4quhbgmceKE/n0AAc7TRMMC+cOHHipgFl1apVOXnypBUqujPHjh2jZ8+eObZ/+OEHunTpYr2i8supPyzDcgE6fwzeFfKt6SyjiRe+383fp+Mo4WjH/AGNqKRekSIiIrdFAZbkcPr0ab744guGDx/O66+/zo4dO3jppZdwcHCgb9++N31Oeno66enp2duJiYn5Va6ISIGwYMECRowYAcD777+fIxi4LelJll5XO7+2bHuWhUc/h5D7b/mUuOQMZvx5im+2niU10whA1dJuvNSmEg/VKI2NTf4NhyqOPDw8OH36NOXKlcux/+TJk7i6ulqnqDtQpUoV9u7dC0BSUhLlypWjXbt21i0qP1yJhF+GAGao3x9qds+3ps1mM+OWHmTtkWgc7Wz4um8DapXxyLf2RURECjsFWJKDyWSiQYMGvPfeewDUrVuXgwcPMmPGjFsGWJMmTeKtt97KzzJFRAqMP//8M/v340svvcTw4cPv7AJhG2Hp8xB/zrJ93yBo+xY4lrjp6cnpWXz1Vxiz/jqd3eOqVqAHL7auSNtqfgqu8smjjz7Kyy+/zOLFi6lQwdKD5+TJk7z66qt07tzZytXdmV9//ZU2bdoUiuDtnpjNsGQoJMeAX014aFK+Nv/5+pP8sP08Ngb49Mm6NCrvna/ti4iIFHb3sKa3FEX+/v43DImoVq0a586du+VzxowZQ0JCQvbt/PnzeV2miEiBcOjQIbp06UJGRgbdunXjww8/xHC7E0FnpsHKMTDvEUt45REETy+1TNR+k/AqPcvI3M1hPDB1PR+tPU5SehY1AtyZ0+8+fn2hGQ+q11W+mjp1Kq6urlStWpWQkBBCQkKoVq0a3t7evP/++/d8/Y0bN/LII48QEBCAwWBgyZIlN5zz+eefU65cOZycnGjUqBHbt2+/q7YWLVp0570GC6M938CpdWDnBI/NBnvnfGv6510XeH+1ZUGGCZ1r8GCNu1zcQUREpBhTDyzJoVmzZhw7dizHvuPHj1O27K1XvXJ0dMTRUZMDi0jxcvHiRTp06EB8fDxNmzbl22+/xdb2NuebijpsWQEt+pBlu15fePAdcHK/4VSTycyv+y7y/upjXLicCkA5bxdGtK9Cx5r+Cq2sxMPDgy1btrBmzRr27duHs7MztWvXvru5z24iOTmZOnXqMGDAALp163bD8YULFzJ8+HBmzJhBo0aNmD59Ou3bt+fYsWP4+voCEBoaSlZW1g3PXb16NQEBAYBl2P+WLVtYsGBBrtRdYCVcgFVjLY9bvwGlquRb05tOxDLq5/0APNOiPE83KZdvbYuIiBQlBrPZbLZ2EVJw7Nixg6ZNm/LWW2/Ro0cPtm/fzuDBg5k5cya9evW6rWskJibi4eFBQkIC7u43fhgTESnsEhMTeeCBB9i3bx+VK1dmy5YteHvfxnAgsxm2z4LVb4AxHVx8oMv/oHL7m56+6+xlJv5+mH3n4wHwdXNkWNtK9GgQhL2tOlHficL83mQwGFi8eHGOCdYbNWrEfffdx2effQZYpgAICgrixRdfZPTo0bd97W+++YZVq1bx7bff3lFNhernaTbDD0/A8ZVQpiEMWAk2+bO4wamYJLp8vpkraVl0rhPA9J6hCp1FRPJIoXpvkruiHliSw3333cfixYsZM2YMEydOJCQkhOnTp992eCUiUtRlZGTQvXt39u3bh6+vLytXrry98CopBpYOhROrLdsV21nCqxK+N5x6MT6VKSuPsnTvRQBcHWwZ2qoiA5qF4OygVQULinXr1rFu3Tqio6MxmUw5js2ePTvP2s3IyGDXrl2MGTMme5+NjQ1t27Zl69atd3StRYsWMWTIkP88r1Av2HJ0mSW8srGHRz/Lt/AqITWTwfN2ciUtiwZlSzLt8doKr0RERO6BAiy5wcMPP8zDDz9s7TJERAock8nE4MGDWbt2La6urixbtoyQkJD/fuKJNbDkOcvk0baO0G4iNHoG/t98WSkZWcz48zQzN54iLdOEwQCP1y/DiPZV8HVzyqNXJXfjrbfeYuLEiTRo0AB/f//bn/ssF8TGxmI0GvHz88ux38/Pj6NHj972dRISEti+fTs///zzf55baBdsSU+CFa9ZHjd7Kd+GDhpNZl78YQ+nY5MJ8HDii971cbRT+CwiInIvFGCJiIjcplGjRjF//nxsbW1ZtGgRDRo0+PcnZKbBmvGw/UvLtm916P4V+NXIcZrZbGb14Sgm/naY8HjLPFcNy3kx/pHq1Az0yIuXIvdoxowZzJ07lz59+li7lLvm4eFBVFTUbZ07ZsyYHCtsJiYmEhQUlFel5Z6N0yAxHDzLwv0j8q3ZySuOsPF4DM72tszq24BSbporVERE5F4pwBIREbkN06ZNy15d7uuvv6Zjx47//oS40/BjP4jYZ9lu9Cy0nXDDymfnLqUw4bdD/HE0GoBAT2de71iNjrVK52uvHrkzGRkZNG3a1Cpt+/j4YGtre0P4FBUVRenSebO6XaFcsOXyGfj7f5bHHaaAg0u+NPvzrgvM+isMgPcfr0ONAIXQIiIiuUEzwIqIiPyHefPm8dprlmFI06ZNo2/fvv/+hMO/wpctLOGVsxc89aPlA/Q/wqv0LCOfrjtBu4/+5I+j0djbGni+VQXWDm9Bp9r5OyRN7tygQYP4/vvvrdK2g4MD9evXZ926ddn7TCYT69ato0mTJlapqUBaOwGMGRDSAio/lC9NHo1MZOySAwC81LoinWr750u7IiIixYF6YImIiPyL33//nYEDBwIwYsQIRoz4l2FIWRmWIYPbvrBsBzWCx2aDR5kcp205FcvYxQcJi00GoGkFbyY+WpOKviXy5DVI7ktLS2PmzJmsXbuW2rVrY29vn+P4hx9+eE/XT0pK4uTJk9nbYWFh7N27Fy8vL4KDgxk+fDh9+/alQYMGNGzYkOnTp5OcnEz//v3vqd0i49w2OLQYDDbQ/r0b5pvLC1fSMhn67W7SMk20qFyKl9tWzvM2RUREihMFWCIiIrewefNmevTogdFo5Omnn2bKlCm3Pjn+nGXIYPguy3bTl6DNeLC9HmwkpGYyecURfth+HoBSbo6Me7g6j6jHVaGzf/9+QkNDATh48GCOY7nx33Lnzp20atUqe/va/FN9+/Zl7ty59OzZk5iYGMaPH09kZCShoaGsXLnyhondiyWzGdZNtDwO7QWla+ZDk2ZG/3KA07HJ+Hs48VHPUK04KCIikssMZrPZbO0ipGhJTEzEw8ODhIQE3N3drV2OiMhdOXjwIPfffz/x8fF06tSJxYsX39DLJtuxFbD4WUiLBydP6DoDqnTIccqaw1G8seQAUYnpAPRuHMxrD1XF3ekW15Rcpfem3FWgf56nN8D8R8HWAV7ac0MPyLwwf+sZxi89hJ2NgYXPNKF+2ZJ53qaIiORUoN+bJFeoB5aIiMj/c/bsWdq3b098fDxNmzZl0aJFNw+vjFnwx0TY/LFlO7A+PDYHSpbNPiU2KZ0Jvx7i9/0RAIT4uDK5Wy0alffOj5cieezkyZOcOnWKBx54AGdnZ8xms3rTWZPZDH+8Y3ncYEC+hFf7L8Tz9u+HARjdoarCKxERkTyiAEtEROQfYmJiePDBB7l48SI1atTgt99+w8XlJquXJV+Cn/pD2J+W7UbPQbuJYOcAWIYULd4TzsTfDxOfkomtjYEhD5RnWJtKONnb5uMrkrxw6dIlevTowfr16zEYDJw4cYLy5cszcOBASpYsyQcffGDtEounk+vgwg6wc4bmw/O8uZSMLIYt2Eum0cxDNUozsHlInrcpIiJSXGkVQhERkauSkpLo1KkTx48fJzg4mFWrVuHl5XXjiRf3wswWlvDK3hUenwsdJmeHV5EJafSfu4Phi/YRn5JJdX93lj7fjFEPVVV4VUS88sor2Nvbc+7cuRwBZ8+ePVm5cqUVKyvmNn1kuW8wANzyfj6wd5YdIezqvFdTutdW7zsREZE8pB5YIiIiQEZGBt26dWPHjh34+PiwevVqAgMDbzxx7w/w+8uQlQZe5aHnd+BXHbD0uvp130XGLTlIYloWDnY2DGtTiSEPlMfeVt8ZFSWrV69m1apVlCmTc4hapUqVOHv2rJWqKuYu7ISzm8DGHpo8n+fNrT0cxffbzgHwweN18HDRfHYiIiJ5SQGWiIgUe1lZWfTq1Ys1a9bg6urK8uXLqVKlSs6TjJmw6nXYPtOyXak9dJsJzp4AXE7O4I2lB1l2da6rOmU8+KBHKBV9S+TjK5H8kpycfNOhpXFxcTg6OlqhIsnufVW7B3jcJHzORTFX0hn1834ABjUPoWlFnzxtT0RERDSEUEREijmTycSQIUP46aefcHBwYPHixdx33305T7oSBfMeuR5etRgNTy7IDq/+OBrFg9M3smx/BHY2Bl5pW5mfn2uq8KoIu//++5k/f372tsFgwGQyMXXqVFq1amXFyoqp2JNwdJnlcbNhedqU2Wxm9M/7uZScQdXSboxoX+W/nyQiIiL3TD2wRESk2DKbzbzyyivMmTMHW1tbFixYQLt27XKedH4HLOoDVyLA0R26fglVOwKQlJ7Fu8sO88P28wBU9C3BRz1CqVXGI79fiuSzqVOn0qZNG3bu3ElGRgavvfYahw4dIi4ujs2bN1u7vOJn59eA2dIzslTeBko/7rzAuqPRONjaMP2JUM1rJyIikk8UYImISLH15ptv8sknnwAwe/ZsunbtmvOEnXNg+UgwZYJPFXjiO/CpBMCOM3EMX7SX83GpGAwwsFkII9pX0YfZYqJmzZocP36czz77DDc3N5KSkujWrRvPP/88/v7+1i6veMlIhj3fWR43HJKnTUUnpvHOssMAvPpgZaqWds/T9kREROQ6BVgiIlIsvf/++7z99tsAfPbZZzz99NPXDxozLcHVrjmW7Wqdocv/wNGNLKOJT/44yWd/nMBkhkBPZ95/vA5NKnhb4VWINWRmZvLQQw8xY8YMxo4da+1y5ODPkJ4AJctBhdZ52tSE3w6RmJZFrUAPBjYPydO2REREJCcFWIXAkSNHWLBgAX/99Rdnz54lJSWFUqVKUbduXdq3b0/37t01YayIyB2YOXMmI0eOBOC9997j+ef/sWJZShwsehrO/AUYoM04aD4cDAbOx6Xw8sK97Dp7GYDu9cowoXN13Jy0+lhxYm9vz/79+61dhgCYzbB9luVxg4Fgk3fTu646FMnyA5HY2hiY3L0WdlpZVEREJF/pnbcA2717N23btqVu3bps2rSJRo0a8fLLL/P222/Tu3dvzGYzY8eOJSAggClTppCenm7tkkVECrwffviBZ599FoDRo0czZsyY6wejj8CsVpbwysHNMlH7/a+CwcCv+y7S8eO/2HX2Mm6OdnzyZF0+6FFH4VUx1bt3b77++mtrlyEX90DkfrB1hLq986yZhNRMxi05CMAzD5SnRoDmuRMREclv6oFVgHXv3p2RI0fy008/4enpecvztm7dyscff8wHH3zA66+/nn8FiogUMr/99ht9+vTBbDYzdOhQ3nvvvesHj6+CnwZCxhXwLAtPLQTfaiSlZzHh10P8tOsCAPWCPfn4iboEeblY6VVIQZCVlcXs2bNZu3Yt9evXx9XVNcfxDz/80EqVFTP7Fljuqz0MLl551szkFUeJvpJOiI8rL7WplGftiIiIyK0pwCrAjh8/jr39f3+z36RJE5o0aUJmZmY+VCUiUjj98ccfPP744xiNRvr06cOnn36KwWCwDEHa8imsGQ+YoWxz6DEfXL3ZfyGel37Yw5lLKdgY4IXWlXipdUUNHRIOHjxIvXr1AMv79T8ZDAZrlFT8GDPh4E+Wx3WezLNmdp2N44ft5wCY3K2WFmoQERGxEgVYBdjthFcAKSkpuLi43Pb5IiLFzd9//03nzp1JT0+nS5cuzJ49GxsbG8hKh99ehn3fW06s1xc6vo/Z1p6vNp5mysqjZJnMBHg4Mf2JujQMybseHlK4rF+/3tolyMm1kHIJXH2hfKs8acJoMjPhV8uqgz0bBNGovBZrEBERsRZ9hVxItGnThvDw8Bv2b9++ndDQ0PwvSESkkNizZw8dOnQgOTmZdu3asWDBAuzs7CApGuY9YgmvDDbQYSo88jHxGTB4/k7eXX6ELJOZjrVKs2LYAwqv5KZOnjzJqlWrSE1NBcBsNlu5omLk2vDBWo+Dbd58J/vjzvMcCE/AzcmOkQ9VyZM2RERE5PYowCoknJycqF27NgsXLgTAZDIxYcIEmjdvTseOHa1cnYhIwbR//37atm1LfHw8zZo1Y/HixZZVWyMPwKzWcH4bOHpAr5+g0TPsPh9Pp082sfZINA52NrzbtSafP1UPDxf1cJWcLl26RJs2bahcuTIdO3YkIiICgIEDB/Lqq69aubpiIC0Bjq2wPK7zRJ40kZCaybRVxwAY1qYSPiW04rOIiIg1aQhhIbFs2TI+//xzBgwYwNKlSzlz5gxnz57l999/58EHH7R2eSIiBc7hw4dp27YtcXFxNGrUiOXLl1sm2j7yG/wyBDJTwLsiPLkAs3fFHEMGy3m78HmvelppTG7plVdewd7ennPnzlGtWrXs/T179mT48OF88MEHVqyuGDi+Gozp4FMZStfKkyY+WXeCS8kZVPQtQd+m5fKkDREREbl9CrAKkeeff54LFy4wZcoU7Ozs2LBhA02bNrV2WSIiBc6xY8do3bo1MTEx1KtXj5UrV+Lu5gYbp8Ef71hOKt8KHp9DvNmVEfN3svZINAAP1/ZnUrdauDmp15Xc2urVq1m1ahVlypTJsb9SpUqcPXvWSlUVI0d/s9xXfRjyYNL8k9FXmLflDADjH66OvRZuEBERsTq9GxcSly9fpnv37nzxxRd8+eWX9OjRgwcffJD//e9/1i5NRKRAOXnyJK1btyYqKoo6deqwZs0aPEs4w+Jnr4dXjZ6FXj+xO4brQwZtbXi7S00+fbKuwiv5T8nJybi4uNywPy4uzjJMVfJOZhqcWGt5XO3hXL+82Wzmrd8Ok2Uy07aaHw9ULpXrbYiIiMidU4BVSNSsWZOoqCj27NnD4MGD+fbbb/n6668ZN24cnTp1snZ5IiIFQlhYGK1bt+bixYvUqFGDNWvW4OUEfNMV9i8Agy10+hDzQ5P5ass5eszYSnh8KmW9XfhlaFP6NC6LIQ96c0jRc//99zN//vzsbYPBgMlkYurUqbRqlTcr4slVpzdAZjK4B0JAvVy//OaTl/jrRCwOtjaMe7jafz9BRERE8oWGEBYSzz77LGPHjrUs+35Vz549adasGf3797diZSIiBcO5c+do3bo158+fp2rVqqxbt45SNonw1eMQdwoc3KDHXBICWzDim12sORwFQKda/kzuriGDcmemTp1KmzZt2LlzJxkZGbz22mscOnSIuLg4Nm/ebO3yirbs4YOdcn34oNlsZtqqowD0ahxMWW/XXL2+iIiI3D0FWIXEuHHjbrq/TJkyrFmzJp+rEREpWMLDw2ndujVnzpyhUqVK/PHHH/ilnYIFT0HqZfAIgqcWcZwgnvl8M2Gxydm9K3qr15XchZo1a3L8+HE+++wz3NzcSEpKolu3bjz//PP4+/tbu7yiy2S8vvpg1dwfPrjqUBT7LiTg4mDL860q5vr1RURE5O4pwCrAzp07R3Bw8G2fHx4eTmBgYB5WJCJS8ERGRtK6dWtOnTpF+fLl+eOPP/CP3gi/vgDGDMsQoycXsPyMiRE/biYlw0iAhxMz+tSndhlPa5cvhUi3bt2YO3cu7u7uzJ8/n549ezJ27Fhrl1W8ROyDlEvg6A5lm+XqpY0mMx+sPgbAwOYh+JTQXGYiIiIFiebAKsDuu+8+nnnmGXbs2HHLcxISEpg1axY1a9bk559/zsfqRESsLzo6mjZt2nD8+HGCg4P5Y906ypz4BhYPsYRX1Tpj7Ps7kzddZuh3u0nJMNK0gje/vdhc4ZXcsd9//53k5GQA+vfvT0JCgpUrKoZOr7fchzwAtrn7PezSveGciE7Cw9meQfeXz9Vri4iIyL1TD6wC7MiRI7zzzju0a9cOJycn6tevT0BAAE5OTly+fJnDhw9z6NAh6tWrx9SpU+nYsaO1SxYRyTdRUVG0adOGw4cPExgYyPq1qyi7+z3Yv9ByQrNhXG7yOi99u4+/TsQCMOSB8rzWvgp2tvr+Ru5c1apVGTNmDK1atcJsNrNo0SLc3d1veu7TTz+dz9UVE6c3WO7Lt8zVy2Zkmfho7XEAnm1RAQ9nzYknIiJS0BjMZrPZ2kXIze3fv58aNWqQkZHB8uXL+euvvzh79iypqan4+PhQt25d2rdvT82aNa1dag6JiYl4eHiQkJBwyz/sRUTuxbVhg0eOHCEgIIA/Vyym4s7xcG6rZaXBhz/kYOmuPPvtLi5cTsXZ3pYpj9Wmc50Aa5cuVpIb702bN2/m1Vdf5dSpU8TFxeHm5nbT+dMMBgNxcXH3WnKBZpX3+owUmFLW0rvyhZ3gUynXLv3N1jOMW3qIUm6ObBzZCmcH21y7toiI5A99Di361AOrAKtbty6RkZGUKlWKkSNHsmPHDry9va1dloiIVUVERNC6dWuOHj1KmTJl2LR0LmXXD4HLYZZ5cXrMZ3FiJUZ/sYX0LBPBXi582ac+1fz1h4zcm2bNmvH3338DYGNjw/Hjx/H19bVyVcXI+b8t4ZV7IHjn3gTr6VlGPlt/EoCXWldUeCUiIlJAaQxFAebp6cnp06cBOHPmDCaTycoViYhYV3h4OC1btuTo0aMEBQXx98IPKLt6gCW88ggms99KJhzy5ZWF+0jPMtGySil+e6G5wivJFd26dSMxMRGAOXPm4ObmZuWKiplTV+e/Kt8ScnHl0CV7wolKTKe0uxM977v9xXNEREQkf6kHVgHWvXt3WrRogb+/PwaDgQYNGmBre/NvBa8FXSIiRdWFCxdo1aoVJ0+epGzZsuz4ajil1g4FUyYENiD2kbkMXXqB7WGWoVsvtKrIK+0qY2uTex90pXi7Nom7u7s7AwYMoEOHDjg7O1u7rOIjD+a/MprMfPmn5W+oQfeH4GCn73ZFREQKKgVYBdjMmTPp1q0bJ0+e5KWXXmLw4MH6tldEiqVz587RqlUrTp8+TblyZdnz4eN4bhpnOVi9C/vum8Izsw8RmZhGCUc7PuhRh/Y1Slu3aClyNIm7FaUlQuQBy+OQB3LtsmsOR3I6Nhl3JzueaKjeVyIiIgWZAqwC7qGHHgJg165dDBs2TAGWiBQ7Z86coVWrVpw5c4aqFUPYOa4hrvtmWg42H84Ct76M/2oPGUYT5Uu5MrNPAyr6lrBu0VIkzZgxg+HDh7Ns2TIMBgNvvPHGLSdxV4CVyy7uBszgEQxuuRNOm81mvrja++rpJuUo4ag/i0VERAoyvVMXEnPmzLF2CSIi+S4sLIyWLVty7tw5GtYoz8bng3A8vQJs7Mjs+CHjz9Xjh7WHAHiwuh8f9KiDm5O9lauWoqpp06aaxN1aLuy03JdpkGuX/Pt0HPvOx+NoZ0O/ZuVy7boiIiKSNzTQX0RECqRTp07RokULzp07R/v65dnc3xnHqD3g6EFc1x/osb0iP2w/h8EAIx6szIze9RVeSb4JCwujVKlS1i7jtnTt2pWSJUvy2GOP3XDs999/p0qVKlSqVImvvvrKCtXdpuwA675cu+SMP08B0KNBED4lHHPtuiIiIpI31ANLREQKnOPHj9OmTRsuXLhAn/vLMbdDJjZXYsGzLAdafkX/pYnEJsXj7mTHx0/WpVUV9YKRvLd//35q1qyJjY0NCQkJHDhw4Jbn1q5dOx8r+3fDhg1jwIABzJs3L8f+rKwshg8fzvr16/Hw8KB+/fp07doVb29vK1V6C2YzXNhheZxLAdahiwn8eTwGGwMMvr98rlxTRERE8pYCLBERKVAOHDhAu3btiIqKYvRDwbzX5AqG9EzMZRqyqMJkxi6KIstkpmppN77sU5+y3q7WLlmKidDQUCIjI/H19SU0NBSDwYDZbM4+fm3bYDBgNBqtWGlOLVu2ZMOGDTfs3759OzVq1CAwMBCADh06sHr1ap588sl8rvA/xJ+FlFiwsYfStXLlkrM2Wua+6lQ7gGBvl1y5poiIiOQtDSEUEZECY/v27bRo0YLoqChmPhHMpEbxGEyZGKt3ZXSJdxi1KpIsk5lH6gTwy9CmCq8kX/1z2GBYWBinT58mLCws+3Zt+/Tp07d9zY0bN/LII48QEBCAwWBgyZIlN5zz+eefU65cOZycnGjUqBHbt2/Plddz8eLF7PAKIDAwkPDw8Fy5dq66NnzQvzbYO93z5aKvpLHsQAQAQ9T7SkREpNBQDywRESkQNm7cSKdOnchMTWLVkDK0848HIPG+l+h1qi0HLsZiY4AxHaox6P6Qm67+JpKXypYte9PH9yI5OZk6deowYMAAunXrdsPxhQsXMnz4cGbMmEGjRo2YPn067du359ixY9kTyIeGhpKVlXXDc1evXk1AQECu1GlVuTx88Idt58k0mqlftiS1ynjkyjVFREQk7ynAEhERq1u1ahVdu3bF1ZDG3y/6U8MjEWzsONHoXXpsK8/llCS8XB347Mm6NK3oY+1ypZj69ddfb/vczp0739Z5HTp0oEOHDrc8/uGHHzJ48GD69+8PwIwZM1i2bBmzZ89m9OjRAOzdu/e26/qngICAHD2uwsPDadiw4U3PTU9PJz09PXs7MTHxrtq8K7k4gXum0cR3284C8HST3AkhRUREJH8owBIREatavHgxPXv2pLy7kfWDSuHvlIzZyYPfq01l2AY3TOZMagV6MKNPfQI9na1drhRjXbp0ybF9szmwrsmNObAyMjLYtWsXY8aMyd5nY2ND27Zt2bp16z1fv2HDhhw8eJDw8HA8PDxYsWIF48aNu+m5kyZN4q233rrnNu+YyQRRhyyP/UPv+XKrDkUSfSWdUm6OdKjpf8/XExERkfyjObBERMRqvv32Wx5//HGaB5rY+Zwn/k5pmDzL8U7pT3hxqxsmMzxWvww/PttE4ZVYnclkyr6tXr2a0NBQVqxYQXx8PPHx8Sxfvpx69eqxcuXKXGkvNjYWo9GIn59fjv1+fn5ERkbe9nXatm3L448/zvLlyylTpkx2+GVnZ8cHH3xAq1atCA0N5dVXX73lCoRjxowhISEh+3b+/Pm7f2F3Iv4MZKWCnRN4hdzz5b7Zaul99WTDYBzs9GewiIhIYaIeWPKvJk+ezJgxYxg2bBjTp0+3djkiUoR8+eWXPPfcc/StY8eszi7YGbJIK92Ap5NfZvtRG+xsDLz5SHV6Ny6r+a6kwHn55ZeZMWMGzZs3z97Xvn17XFxcGDJkCEeOHLFidTmtXbv2lsc6d+58W8MdHR0dcXR0zM2ybk/0Ucu9TyWwsb2nS52OSWJbWBw2BnjivqBcKE5ERETykwIsuaUdO3bw5ZdfUrt2bWuXIiJFzAcffMDIESN4p7Ujr9/vCJiJCu5Ep3NPEptmQyk3R/7Xqx73lfOydqkiN3Xq1Ck8PT1v2O/h4cGZM2dypQ0fHx9sbW2JiorKsT8qKorSpUvnShsFXszVILBUtXu+1MKdll5jLSqXIkA9OkVERAod9Z2Wm0pKSqJXr17MmjWLkiVLWrscESkizGYzEyZM4I3RI/ihu/PV8Aq2Bw2kyQlLeFUv2JPfX2yu8EoKtPvuu4/hw4fnCJeioqIYOXLkLSdCv1MODg7Ur1+fdevWZe8zmUysW7eOJk2a5EobBd61Hli+Ve/pMplGEz/vugDAEw2D77UqERERsQIFWHJTzz//PJ06daJt27bWLkVEigij0cgLL7zA/6ZN5I+nXehZ0x6zjT2zS71GjxNtMJlt6NUomAVDmuDn7mTtckX+1ezZs4mIiCA4OJiKFStSsWJFgoODCQ8P5+uvv77t6yQlJbF3797slQTDwsLYu3cv586dA2D48OHMmjWLefPmceTIEZ577jmSk5OzVyUs8nKpB9a6I1HEJmXgU8KR1lV9c6EwERERyW8aQig3WLBgAbt372bHjh23db5Vl9YWkUIhPT2dp59+mv1//MTfg1wpX9IGo6MHI2xGsvh8eRxsbXinS016aF4aKSQqVqzI/v37WbNmDUePWnoJVatWjbZt297RnG07d+6kVatW2dvDhw8HoG/fvsydO5eePXsSExPD+PHjiYyMJDQ0lJUrV94wsXuRZDJCzHHL43vsgfXjTkvvq8fql8HeVt/fioiIFEYKsCSH8+fPM2zYMNasWYOT0+31gLDa0toiUihcuXKFbt26YTy5nq0DXfF0MpDiGsTjV4ZzKMMPfw8nZvSuT50gT2uXKnJHDAYDDz74IA8++OBdX6Nly5aYzeZ/PeeFF17ghRdeuOs2Cq24MDCmg50zeJa768tcSkrnz+MxADxWPzCXihMREZH8pq+gJIddu3YRHR1NvXr1sLOzw87Ojj///JNPPvkEOzs7jEbjDc+x2tLaIlLgxcTE0KZNG4Ji/2RVbxc8nQyEu9Wm+aWxHMrwo1GIF7+92FzhlRQKCxYsuO1zz58/z+bNm/OwmmIge/hgZbC5+z9Zf9t3kSyTmdplPKjo65ZLxYmIiEh+U4AlObRp04YDBw5kz8exd+9eGjRoQK9evdi7dy+2tjcuYe3o6Ii7u3uOm4jI2bNneeD+5nRx28/sR52xtzWw1aUVrWOGE4c7A5qF8O2gRviUcLR2qSK35YsvvqBatWpMnTqVI0eO3HA8ISGB5cuX89RTT1GvXj0uXbpkhSqLkGsTuN/j/FeL94QD0LWuel+JiIgUZhpCKDm4ublRs2bNHPtcXV3x9va+Yb+IyK0cOnSIRzs+yHsNL9OjhiWgmmvXgwlxj+Jkb8vH3WvzaKg+TErh8ueff/Lrr7/y6aefMmbMGFxdXfHz88PJyYnLly8TGRmJj48P/fr14+DBg8Vjnqq8dK0Hlu/dB1inYpLYdyEBWxsDj9QJyKXCRERExBoUYImISK7aunUrfR/ryDcPZdAkyB6jwY6xxiEsSGpOkJczX/ZuQPUA9dSUwqlz58507tyZ2NhYNm3axNmzZ0lNTcXHx4e6detSt25dbO5huJv8Q+zVCdxLVbnrSyy92vuqReVS6u0pIiJSyCnAkv+0YcMGa5cgIoXEihUrGDukO6sesyWkpB0ptm70T3mZbeZq3F/Jh0+frIuni4O1yxS5Zz4+PnTp0sXaZRRt8Vfn1PQse1dPN5vN/H4gAoBHQ9X7SkREpLBTgCUiIrli/vz5LHx3MOt7OeLhZCDS1p+nUl7ltDmAoS0r8OqDVbC1MVi7TBEpDNKvQFq85bFHmbu6xNHIK5yOScbBzoY21TScU0REpLBTgCUiIvfEbDbzzttvE7PsXX7t6YitjYG9hur0T36JDIeSfPF4HTrU8rd2mSK5qmTJkhgMNwayBoMBJycnKlasSL9+/ejfv78VqisCEi5Y7p08wOnuhhwvv9r7qmXlUpRw1J+8IiIihZ3ezUVE5K5lZmby4tBnCY34nnEdnAD4ydSSMRkDCPLx4Ms+9ankp2XrpegZP3487777Lh06dKBhw4YAbN++nZUrV/L8888TFhbGc889R1ZWFoMHD7ZytYXQteGDHsF39XSz2cyy/ZYAq1NtBegiIiJFgQIsERG5K1euXGHAk1151msrbRo4YMLApMwnmWXsRNtqfnzYMxR3J3trlymSJzZt2sQ777zDs88+m2P/l19+yerVq/n555+pXbs2n3zyiQKsu5Fwbf6roLt6+pGIK5yO1fBBERGRokTL5IiIyB2LiIigT8cmvBvyN23K25GCE4MzhvOV6WFeaVuFmX0aKLySIm3VqlW0bdv2hv1t2rRh1apVAHTs2JHTp0/nd2lFw7UA6y7nv1p5KBKwrD6o4YMiIiJFgwIsERG5I4cPH+bVrvWZ0/Qclb1tuYgP3dInsN2hEV/3bcCwtpWw0WTtUsR5eXnx22+/3bD/t99+w8vLC4Dk5GTc3DSE9q5kDyG8ux5Yaw9HAdC+RuncqkhERESsTF9JiYjIbduwYQNLxz3K/AfN2NkY2G2qxJCM4Xj7leHXPvUJ8XG1doki+WLcuHE899xzrF+/PnsOrB07drB8+XJmzJgBwJo1a2jRooU1yyy8rk3ifhdDCMPjUzkckYiNAVpVKZXLhYmIiIi1KMASEZHbsuD7b7n0/TN81MYOMLDY2IzRmYN5sE45pnSvhYuD3lKk+Bg8eDDVq1fns88+45dffgGgSpUq/PnnnzRt2hSAV1991ZolFm4Jd98Da90RS++r+mVL4l3CMTerEhERESvSpw0REflXZrOZ6VMmUu3gVJ5oYHnbmJrZgy/NXXj94eoMaFYOg0FDBqX4adasGc2aNbN2GUWPMROuWFYQvJsAa83V4YPtqmvydhERkaJEAZaIiNxSRkYGb77Ymz52y6le0Y4UswOvZA5ll0tzvnuqHo3Le1u7RBGrMRqNLFmyhCNHjgBQo0YNOnfujK2trZUrK+QSL4LZBLaO4HpnQwAT0zL5+/QlANpq9UEREZEiRQGWiIjc1KVLl5g8qB2vVz5JSWdbIswlGZQxAoeguvzWqx7+Hs7WLlHEak6ePEnHjh0JDw+nSpUqAEyaNImgoCCWLVtGhQoVrFxhIZY9fDAQbO5svaG/jseSaTRT3seV8qVK5EFxIiIiYi1ahVBERG5w9MgRZvWvxeRaJynpbGCXqRKd098ltGELFgxprPBKir2X/q+9e4+Lusz7P/6eGZgB5SQiKIHnTS1NPGHYZlqoqZlabZYdTDsnpTe77a333r9cq812tzWrdXMPd9ld21bWandallKKlVqhmKdMi9Q8IKSCchhg5vr9gZIEKijwHWZez8eDB8x3roHP1bcv1/jmuq7vQw+pS5cu2rt3rzZs2KANGzZoz5496tSpkx566CGry2vezuMOhJlf50mShnaPbciKAACAD2AGFgCgmg/fX6b8FydqRl9Jsum1iiF6THfqtzf00S/6n9st7QF/s3r1aq1bt07R0dFVx1q3bq0nn3ySfbHO1znegdAYo8ydlQHW4Au5+yAAAP6GAAsAUOXl+U+qx6bHdWV3hyqMXbMrbteHYdfqtdv6q1dCpNXlAT7D5XLp2LFjNY4fP35cTqfTgor8SMGeys/1nIH1Td5xHSgolTPIruSO0Wd/AQAAaFZYQggAUEVFhZ5Jv0nDc55Q/3iHfjDhurX8v/Rd54l656HLCa+An7jmmmt0zz33aP369TLGyBijdevW6b777tO1115rdXnNW+H+ys8RF9TrZau/zpckDewUrVAnG+kDAOBvCLAAIMAVFhZq/p3Jur/le4oLs2mbt4PGlj2uvoPHaOHkZEW3ZDYJ8FPPPvusunTpopSUFIWEhCgkJESDBg1S165dNW/ePKvLa96KD1d+bhlTr5ed3P9q8M9YPggAgD9iCSEABLCcXV9r/ewhmtalSJK01DNQs+1T9dgtA3V1z7bWFgf4sKioKL399tvatWuXtm/fLknq0aOHunbtanFlfqD0aOXn0FZ1f0m5R+tzfpDE/lcAAPgrAiwACFBrlv9bjrem6KYuRl5j01MVN+rDmFv0+q39uP08UIv09PQzPv/RRx9VfT137tzGLsd/lRyp/FyPACtr9xGVlnsVG+7ShXH8/gIAwB8RYAFAgDHG6NU/zdDP9y9QhwukYyZU08qnKrrPtVo8tid7xwCnsXHjxjq1s9lsjVyJH/N6pZKjlV/XI8Ba923l7KtBXVrz3x8AAD9FgAUAAaSkpEQv/2qUbmu1QaER0rfetprqfViTx4/QjQPqd8cvINCcOsMKjcRdIMlUfh0SVeeXrf+2ct+sSzu3bviaAACATyDAAoAAsfe7b7R+9lDd06FAkpTh6aO5Yb/UU7cN1sXx3GUQgA84uXwwuKUUVLcbSJSWe5S996gkaSABFgAAfosACwACwPoVixX07zt1QwePvMameRXX6+tu9+pfN/ZRREiw1eUBQKVz2P9qw54jKvNU7n/VsXWLRioMAABYjQALAPyYMUbvPPsrXbr/fxQbZ3TUtNR/VEzVZVffrOd/3om9YgD4lnMIsE4uHxzYmf2vAADwZwRYAOCn3KWlemfmcI2P2CRHqLTV20G/cf5av5k8SgM6RltdHgDUVLWBe1SdX7I+p3ID90s783sNAAB/RoAFAH5of87X+uoPqbohrnK/qzc9g7W0/a/0j5svVUyYy+LqAOA06jkDq7Tcow17jkqSBnZi/ysAAPwZARYA+Jl1S19SdMYvdWVcucqMQ49W3K7owffpf4Z1k8PO8hoAPqxqBlbdAqwt+wpUVuFVTJhTXdq0bLy6AACA5QiwAMBPeL1e/d8Td+iqkncUHunVAROt/7T9h+6dPFGXdY2xujwAOLt6zsDaeGL2Vd/2rdj/CgAAP2e3ugAAwPk7mn9Qyx/qpXEVbys82Ku1nos0u+2f9adf3kt4BQS48ePHq1WrVrrhhhuqHT969Kj69++vpKQk9ezZU3//+98tqvAU9Q2w9la2T2of1UgFAQAAX8EMLABo5rZ9vFTmrbs0KqZEXmPTXzzXyn7lTP1lSHfZWTIIBLxp06ZpypQpeumll6odDw8PV2Zmplq0aKGioiL17NlT1113nVq3tnAvqXOcgdUnse53LQQAAM0TM7AAoBn78M/TlLj8dl0cWaIfTLgess3QpXfN0wNX9iC8AiBJGjJkiMLDw2scdzgcatGihSTJ7XbLGCNjTFOXV11VgBV11qYHCkp0oKBUdpt0SUJk49YFAAAsR4AFAM1QceERrfxVf12Zv1DhQR6t93bXo+3+osceTlf/jtxKHmguMjMzNWbMGMXHx8tms2nJkiU12syfP18dO3ZUSEiIBg4cqM8++6zBfv7Ro0fVu3dvJSQk6OGHH1ZMjMVLjusxAyv7xOyrbm0j1NLFogIAAPwdARYANDM7P1uhb3/bU6lhOyVJz1eM0Zar/lfz7h2jVi2dFlcHoD6KiorUu3dvzZ8/v9bnX3/9daWnp2vWrFnasGGDevfurREjRujQoUNVbU7uYfXTj/3795/150dFRWnTpk3KycnRq6++qtzc3Abr2zmpT4C196gkqQ/7XwEAEBD4cxUANBPGGGX8JV0DDrysyIhyHTZhmm2bqjvve0CXJERZXR6AczBy5EiNHDnytM/PnTtXd999tyZPnixJWrBggZYtW6YXXnhBM2bMkCRlZ2efdx1xcXHq3bu31qxZU2Ozd6lyiaHb7a56XFhYeN4/swZj6hVg/bj/VVTD1wIAAHwOM7AAoBk4fjRf7/0yWal5LygyqFxfeC/Un9o/r8dnPEx4BfipsrIyZWVlKTU1teqY3W5Xamqq1q5de97fPzc3V8eOHZMkFRQUKDMzU926dau17Zw5cxQZGVn1kZiYeN4/v4byYslbXvn1WQKsco9XX+47Kknq054N3AEACATMwAIAH7d9zRLp7Qc1KqJyxsPfK0YpbvwT+l2/TtYWBqBR5efny+PxKC4urtrxuLg4ffXVV3X+Pqmpqdq0aZOKioqUkJCgRYsWKSUlRbt379Y999xTtXn7gw8+qF69etX6PWbOnKn09PSqx4WFhQ0fYp2cfeVwSsEtzth016HjKi33KtwVpM4xLRu2DgAA4JMIsADARxmvVx8+fadSCv5PLcIqlG8i9LTrAd0/bZoSWp35H3cAcNLKlStrPZ6cnFzn5Ycul0sul6sBq6rFqcsHbWe+i+q2/ZWBfo/4CO64CgBAgCDAAgAfdGTfN9r29FhdFbZXskuZnl7a1OdRPTpuqBz8Yw0ICDExMXI4HDU2Vs/NzVXbtm0tqqoR1WP/q20HKgOsi9pFNGZFAADAh7AHFgD4mKx/P6vS+ZfpsrC9KjMO/bHiJrWcslgPXncl4RUQQJxOp/r166eMjIyqY16vVxkZGUpJSbGwskZSnwDrxAysi+IJsAAACBTMwAIAH1FWUqQPfzdewx3rZXdK33jbaWHrdP363skKDwm2ujwAjeD48ePatWtX1eOcnBxlZ2crOjpa7du3V3p6uiZNmqT+/fsrOTlZ8+bNU1FRUdVdCf3KyQArJOqMzYwxzMACACAAEWABgA/4dsOHOvavu3V1y3xJ0hsVgxV09Rw99vOeFlcGoDF98cUXGjp0aNXjkxulT5o0SQsXLtSECROUl5enRx55RAcPHlRSUpKWL19eY2N3v1DHGVj7C0pVUFKuILtNP4sLa4LCAACALyDAAgALGa9XHzyTpkGHF6lzyzIVmhZ6JmiK7pz+34qPCrW6PACNbMiQITLGnLFNWlqa0tLSmqgiC9UxwNp+Yvlg19gwuYIcjV0VAADwEQRYAGCRH/Z8pW1/nqARLb6THNLnnp9pc9/H9ZuxI7irFoDAU8cAi+WDAAAEJgIsALDAmpdmq8fO53V5ixKVGYf+7h2rEQ/M1ZR2Z9+8GAD8UoW78nNwyBmbsYE7AACBiQALAJrQsfx9+uyp63VVyHYpSNruTdSKDr/SA3fcriAHN4YFAOnMM1CZgQUAQGAiwAKAJrL+red0wYbf66qQY/IamxZWDFefyX/SQ10TrS4NAJqFIneF9hwuliT1IMACACCgEGABQCMrPXZEH825XiOdWVKwtNsbqzfbpGnqfWkKCWYDYgCoq2/ziiRJMWFOtWrptLgaAADQlFivghrmzJmjAQMGKDw8XLGxsRo3bpx27NhhdVlAs/TFe6/o+zl9K8MrSa+XD9ae65fqlw9OI7wCgHr6Ju+4JKlzmzCLKwEAAE2NAAs1rF69WlOnTtW6deu0YsUKlZeXa/jw4SoqKrK6NKDZKDl2RP/3/0Ypad2D6uo8rFxvlJ4Jf1hjf7tEl/fuZnV5ANAsnQywuhBgAQAQcFhCiBqWL19e7fHChQsVGxurrKwsDR482KKqgOZj3dv/UJv1v9O1zsOSpHfL+yly/FxN659kbWEA0Mz9GGC1tLgSAADQ1AiwcFYFBQWSpOjo6Fqfd7vdcrvdVY8LCwubpC7A1xQdydNHf7xZo4K/kN1pdMhE6s3wO3Tn9FlyBbFcEADO1zeHKmeDd4llBhYAAIGGAAtn5PV6NX36dF122WXq2bNnrW3mzJmj2bNnN3FlgG/JfG2eOmx+Wtc4j0qSFpcPVMKEuXrgktqvGwBA/Xi8Rjn5lQFWV5YQAgAQcAiwcEZTp07Vli1b9PHHH5+2zcyZM5Wenl71uLCwUImJiU1RHmC5I4f26dOnb9Fo10YpWNrnjdY7re/WXWkzFORgm0EAaCjfHylWmccrV5Bd8VGhVpcDAACaGAEWTistLU1Lly5VZmamEhISTtvO5XLJ5XI1YWWADzBGy/76/9Rv30sa7apcNvtG+eXqfts83de9q8XFAYD/Obn/VaeYlnLYbRZXAwAAmhoBFmowxujBBx/U4sWLtWrVKnXq1MnqkgCfsuvLddr3appGh+yUHNJ33jb66IKpmnT3dNn5RxUANAr2vwIAILARYKGGqVOn6tVXX9Xbb7+t8PBwHTx4UJIUGRmp0FCm7CNwVZS5teQPd2tk+XvqGlKmcuPQa54rNXTqfE1uF2d1eQDg1368AyEBFgAAgYgACzU8//zzkqQhQ4ZUO/7iiy/qjjvuaPqCAB/w6bJXFP7J47rBeUCySVkVnbVvwH/ptrG/sLo0AAgIPwZYLS2uBAAAWIEACzUYY6wuAfAZhw/t05pnpuiaoPVyOI0KTAstCh6viTOeVr8Q9n4DgKbybd6JJYTMwAIAICARYAFALYzXqyXP/VqD8l/X2ODKTdrfLeuj2Ov/oLv6JVtcHQAEFq/X6HBxmSQpNpw/HgAAEIgIsADgJz7PWCyzcpbGu3ZXbtLuaaPVbafo9gdmymZjk3YAaGpFZRU6OUE8IjTY2mIAAIAlCLAA4IQfDuxV5p/v0ZigdQpyeVVinFrkTdWIh57VpDZtrC4PAAJWYWmFJCnYYZMryG5xNQAAwAoEWAACnvF69da8X2rwkUUaH3xMkrSirJdCr35Utw++0uLqAADHSsslSREhwcyEBQAgQBFgAQhoa5a+opBP/qAbTiwX/NYTq0/jp+iW+2bwjyQA8BGFJZUzsMJDeOsKAECg4l0AgIC0a2uWdrySrpHOTbK7jIqNU2+aYRo9/c+6tXW01eUBAE5RNQOL/a8AAAhYBFgAAkrhkXwtn/eARmuVurrckqRl7j6KGfOobh802OLqAAC1OVbKDCwAAAId7wIABASvx6M3n52hQYcX6UbHEUnSxvIO+q7ngxp/890WVwcAOJPCEzOwwl3MwAIAIFARYAHweyve/Idab3hGNzr3SA5pn7eVMiJu0M3TnlSfYH4NAoCvOzkDKyKU39kAAAQq3gUA8FufrVqmwuWPa1jINskpFRun/m2u0vC0Z3R7bJzV5QEA6qiw5MQMrBBmYAEAEKgIsAD4nR1ffqGv/jVDo51ZCgrxymNsWlbWTx1+8YRu7TvQ6vIAAHWw69AxZe0+ol/0S1ThyRlYBFgAAAQsAiwAfuPg999p9YLpGuP4RN1cZZKkj9w95L38YV076nqLqwMAnJEx1R7+ZvEWrc85rMRWLX7cA4tN3AEACFi8CwDQ7BUePaylz/6Hrq5YoQlBRZKkDeUdtKf7vRp321SLqwMAnIuDhaWSpD2Hi0/ZA4sZWAAABCoCLADNVtHxQi155lcaWvq+JjqOSnZpV0WcPo+7STfeP0t9gxxWlwgAOEcFJ/a9yjvmPmUPLN66AgAQqHgXAKDZKSk6rn8/92sNLnpPtzgOSw7pgDdKGaGjdf1/PqWuoS2sLhEAcB68XlMVWuUdd+sYSwgBAAh4dqsLAIC6cpeW6tU/Tlfuk711S+k/leg4rEPeCL0SdINapG/UrTP/olDCKwCoZvz48WrVqpVuuOGGGs/l5ORo6NChuuiii9SrVy8VFRVZUGFNx8sq5D2xJdahQvePSwjZxB0AgIDFn7EA+Lwyt1tvzv9vJR9eoolBhySHlO8N1/tBV+rqtD/p1pg2VpcIAD5r2rRpmjJlil566aUaz91xxx16/PHHdfnll+vw4cNyuVwWVFjTydlXUuUMrJObuBNgAQAQuAiwAPisouPHtGT+TF16bLkmBuVJQdIRb0u9Zx+iYQ88pVvaxltdIgD4vCFDhmjVqlU1jm/dulXBwcG6/PLLJUnR0dFNXNnpFZwSYO0/WqLScq8kKSKUt64AAAQqlhAC8DlHD+fplcfv1JHf99ItJS+rS1Cejnhb6l/eq1Vyz1pN/O2rakN4BcAPZGZmasyYMYqPj5fNZtOSJUtqtJk/f746duyokJAQDRw4UJ999lmD/OydO3cqLCxMY8aMUd++ffXEE080yPdtCKcGWAcKSqu+DnMRYAEAEKh4FwDAZ+Tu/14r/vafGu7N1K32QskhHfJGaIVjiIbdP0c3t0uwukQAaFBFRUXq3bu3pkyZouuuu67G86+//rrS09O1YMECDRw4UPPmzdOIESO0Y8cOxcbGSpKSkpJUUVFR47UffPCB4uNPH/ZXVFRozZo1ys7OVmxsrK6++moNGDBAw4YNa7gOnqNTlxCe1NLpUJCDv70CABCoCLAAWO7rbZuU9a9HdbVtrW61F0l26XtPK60OSdXo+5/ULdExVpcIAI1i5MiRGjly5Gmfnzt3ru6++25NnjxZkrRgwQItW7ZML7zwgmbMmCFJys7OPqeffcEFF6h///5KTEyUJI0aNUrZ2dk+EmDVDOTC2f8KAICARoAFwDKfrHxHeRnP6Gpnti50VP61PcfTRmvDRmrs1Cd0S1i4xRUCgHXKysqUlZWlmTNnVh2z2+1KTU3V2rVrz/v7DxgwQIcOHdKRI0cUGRmpzMxM3XvvvbW2dbvdcrvdVY8LCwvP++efSUEtM7DCQ3jbCgBAIOOdAIAm5fV49M4/5yti+z811PWVdOKGV5vKE7W9zWhdd/+jmugjd8ECACvl5+fL4/EoLi6u2vG4uDh99dVXdf4+qamp2rRpk4qKipSQkKBFixYpJSVFQUFBeuKJJzR48GAZYzR8+HBdc801tX6POXPmaPbs2efVn/qoLcCKCGUGFgAAgYwAC0CTKC4u0pIFv9XFP7ynscF7q4Krj9w9VNTzNo266T71djisLRIA/NDKlStP+9zZljCeNHPmTKWnp1c9LiwsrFp62BiYgQUAAH6KdwIAGtVXW7KV9cYcDTXrNNFxVAqWSk2wlpf1Udzw6Ro6dLTVJQKAT4qJiZHD4VBubm6147m5uWrbtm2T1uJyueRqwtmxhaWVAZbDbpPHayRJEeyBBQBAQCPAAtDgvF6v3l/0gkz2y7rKuUXd7ZWb8eZ7w7RSKRowcZbGde9lcZUA4NucTqf69eunjIwMjRs3TlLl79eMjAylpaVZW1xjstmqZmB1aN1C3+YVSWIGFgAAgY53AgAazNEj+Vr298fUsyBDI09ZJphd3l7bo4fpmrse0U2RUZbWCAC+5Pjx49q1a1fV45ycHGVnZys6Olrt27dXenq6Jk2apP79+ys5OVnz5s1TUVFR1V0J/dXJAKtrm7BTAixmYAEAEMgIsACctzUfLNHBVX/XlUHZusV+XAqW3CZIK8t6yTlgsoZdN0lJVhcJAD7oiy++0NChQ6sen9xnatKkSVq4cKEmTJigvLw8PfLIIzp48KCSkpK0fPnyGhu7+5uqACs2TB9sq1xCGRHK21YAAAIZ7wQAnJO8Q7l6f+Hv1OPoal3u/E5yVh7f52mlTEeKBk78b43udrGlNQKArxsyZIiMMWdsk5aW5t9LBmtRWFK59PxncWFVx5iBBQBAYCPAAlAvK97+p46ve0lXOb/UrbYSySlVGLvWlHXTDx1Ga8wdv9LNIaFWlwkAaKaMMSqsWkIYXnU8gj2wAAAIaLwTAHBWX23eoC8WP6M+7s80LHh/1d5Wezyt9Yk9WUnX/0pDL+lvbZEAAL9Q7jUq83glSe1bt1CQ3aYKr+EuhAAABDgCLAC1ys/L1Qf/+wcl5mdqkHOnuttM1d5Wq8p6qPTC8Rp960O6OZh/UAAAGk5puUeS5LDbFBESpLiIEO07WqKoFow3AAAEMgIsAFXK3G4te/XPCv56qYY4t2qizV0122pjWXtta3mpBk1I14iuPawtFADgt0rLKmdfRYQEyWazaeao7vriuyO6JCHK2sIAAIClCLCAAFfmdmv5or+pYttS/Txom8bbC6stEVyrPupw1d26dMjV6mNtqQCAAFBaUTkDKzK0csbVNZfE65pL4q0sCQAA+AACLCAAVZSX6703/qbyre/osqBtutZeUHUXwQLTQqvLL5aj1/UaeeM9muBwWFssACCglJZVD7AAAAAkAiwgYBQXHdcHb/xNnp0rdFnQVo2pFlqF6uOyHirpmKqrJz6oa8MjrC0WABCwSk7sgRVBgAUAAE5BgAX4sX17cpT51gJFHVqrQc6dGmcrrgqtCk2o1pR1V3GHVI2c+KBGR0RaWywAAPpxE3cCLAAAcCoCLMDPbFi7StsyXlan4mwNCM7RzTZP1Z5W+d5wrSu/UMXtr9DIidM0OjLK0loBAPiRkSSVVlRu4s4SQgAAcCoCLKCZyzu4X6uWvCDHno+VZN+lvkF56itVzbTaVRGrDaaHWvYarWHj79A1LpeV5QIAcEYnZ2ARYAEAgFMRYAHNTEV5uVYvX6TcrKXqXLZdfYN36xc2T1VgVWHs2ljeQV+7eqnz4JuVMnSUulpbMgAAdVZCgAUAAGpBgAX4uIrycmW+/5b2b1yudsU71Cd4t66yF1U+eSK02uuJVlZFV7njB+rnY+/UgPadNMC6kgEAOGcn70IYEUKABQAAfkSABfiY0pJirXn/LR36cqXiSyoDqyvtxZVPnlj9V2yc+rysk/a27Kkul92ggYNHKNHhsK5oAAAayKFjbklSx9YtLK4EAAD4EgIswGJbsz/XljVvy3bwS3U0e9QreJ+G2coqnzwRWB03LmWXtdce14WK6jFEQ66ZqCtahllXNAAAjaSgpFw2m9QrgbvjAgCAHxFgAU0oZ9d2ZWe+q+K92WpTmqOLgr7XxY4juliSTlkpUWBCtamsvb53/Uyte16lK0bfpJ+H8pdoAEBg6NImTOEsIQQAAKcgwAIagdfj0cb1q7Ur60OZQ9sUV7FPFwblqpPjiDqdbHRidpXH2LSjop12ei/Q8age6tA3VZcOGaXBwbxxBwAEpt4JUVaXAAAAfAwBFnAeiouOa+Paj/T99vXy5O9SVNkBJdrz1TkoT/1sbvWTJMeJjxO+88RoZ0Vb5Tnbq2XnSzVoxC90Udt4XWRRHwAA8DVJiSwfBAAA1RFgoVbz58/XH//4Rx08eFC9e/fWc889p+TkZKvLskTu/r3atvFTHcrZqorDuxVamqvW5rAucBxRB8cPusxWebck2VQ1q0qSyoxDOyvilONtp6MtOiqyc1/1HzJGHS/ooI5WdAQAAB9mVDmUStIlzMACAAA/QYCFGl5//XWlp6drwYIFGjhwoObNm6cRI0Zox44dio2Ntbq8BlPmdmvvdzu1e+cWHd73rdxH9slenKfQ8iOKVKFi7McV7ziqOPtxxZ36Qmf171NkXPq2oo32emN01NlOjpiuSugxUH1ShurilmGV+1sBAIAzKnZXqKUkh92m7u3CrS4HAAD4GAIs1DB37lzdfffdmjx5siRpwYIFWrZsmV544QXNmDGjSWspOPKDSkuKVVZernJ3qcrL3Kood6usrEwV5W5VlJerpKhQJccLVFZUqLLiAnncxTJlRVJFiewVJXJWHFeoKVaYShRhL1GkvVhRthJF2orVxWbU5dQfaFe1WVQnHfa21PeeVjrobaWjjtYqb9lOLWK7qPMll+ripEvVKzhYvZrovwkAAP7oSHG5WkqKiwiRK8hx1vYAACCwEGChmrKyMmVlZWnmzJlVx+x2u1JTU7V27dpaX+N2u+V2u6seFxYWNlg9eX+6VF2DDp37N7Crxoypn8r3huuQJ1z53nAdVbiKg6LkCY1RcKt4tU74mbr3TtEF7TspWtIl514JAAA4g6PFZUqQdEFUiNWlAAAAH0SAhWry8/Pl8XgUF1dt0Zzi4uL01Vdf1fqaOXPmaPbs2Y1Sj0f2GsfKjUMe2VUhuzyyy22CVWycKvEGq8Q4VapglZpgueVUmZxyO1qqPDhcckUqKKy1WrSKVWRMvOIu6KQLOnRRTHiEYhqlegAAUFfuCq8kKTKUu/ACAICaCLBw3mbOnKn09PSqx4WFhUpMTGyQ7x390Gr9YLfL5QpRSEiogoKDFSyJt7YAAPgr29mbAACAgEOAhWpiYmLkcDiUm5tb7Xhubq7atm1b62tcLpdcrlo2jmoAbWJr/5kAAAAAACBw1FyfhYDmdDrVr18/ZWRkVB3zer3KyMhQSkqKhZUBAAAAAIBAxQws1JCenq5Jkyapf//+Sk5O1rx581RUVFR1V0IAAAAAAICmRICFGiZMmKC8vDw98sgjOnjwoJKSkrR8+fIaG7sDAAAAAAA0BQIs1CotLU1paWlWlwEAAAAAAMAeWAAAAAAAAPBtBFgAAAAAAADwaQRYAAAAAAAA8GkEWAAAAAAAAPBpBFgAAAAAAADwaQRYAAAAAAAA8GlBVhcA/2OMkSQVFhZaXAkAAJVOjkknxyicn8YY64+XlqvQbVRUUsp7CABAvTHW+z+b4eyigX3//fdKTEy0ugwAAGrYu3evEhISrC6j2WOsBwD4KsZ6/0WAhQbn9Xq1f/9+hYeHy2azndf3KiwsVGJiovbu3auIiIgGqtC3BWKfpcDsN32mz/7KF/tsjNGxY8cUHx8vu50dFM4XY/35C8R+02f67K/os2/0mbHe/7GEEA3Obrc3eOIdERHhM78Ym0og9lkKzH7T58BAn60XGRlpdQl+g7G+4QRiv+lzYKDPgcHX+sxY79+IJQEAAAAAAODTCLAAAAAAAADg0wiw4NNcLpdmzZoll8tldSlNJhD7LAVmv+lzYKDPwJkF6v8vgdhv+hwY6HNgCMQ+w3ps4g4AAAAAAACfxgwsAAAAAAAA+DQCLAAAAAAAAPg0AiwAAAAAAAD4NAIsAAAAAAAA+DQCLFhu/vz56tixo0JCQjRw4EB99tlnZ2y/aNEide/eXSEhIerVq5fefffdJqq04dSnzwsXLpTNZqv2ERIS0oTVnr/MzEyNGTNG8fHxstlsWrJkyVlfs2rVKvXt21cul0tdu3bVwoULG73OhlTfPq9atarGebbZbDp48GDTFNwA5syZowEDBig8PFyxsbEaN26cduzYcdbXNedr+lz63Nyv6eeff16XXHKJIiIiFBERoZSUFL333ntnfE1zPsdoGIE41kuBNd4H4lgvBd54z1gfGGO9xHgP30SABUu9/vrrSk9P16xZs7Rhwwb17t1bI0aM0KFDh2pt/+mnn+rmm2/WnXfeqY0bN2rcuHEaN26ctmzZ0sSVn7v69lmSIiIidODAgaqP3bt3N2HF56+oqEi9e/fW/Pnz69Q+JydHo0eP1tChQ5Wdna3p06frrrvu0vvvv9/IlTac+vb5pB07dlQ717GxsY1UYcNbvXq1pk6dqnXr1mnFihUqLy/X8OHDVVRUdNrXNPdr+lz6LDXvazohIUFPPvmksrKy9MUXX+jKK6/U2LFjtXXr1lrbN/dzjPMXiGO9FHjjfSCO9VLgjfeM9YEx1kuM9/BRBrBQcnKymTp1atVjj8dj4uPjzZw5c2ptf+ONN5rRo0dXOzZw4EBz7733NmqdDam+fX7xxRdNZGRkE1XX+CSZxYsXn7HNr3/9a3PxxRdXOzZhwgQzYsSIRqys8dSlzx999JGRZI4cOdIkNTWFQ4cOGUlm9erVp23jD9f0qerSZ3+7po0xplWrVuYf//hHrc/52zlG/QXiWG9MYI/3gTjWGxOY4z1jfe386Xo+FeM9rMYMLFimrKxMWVlZSk1NrTpmt9uVmpqqtWvX1vqatWvXVmsvSSNGjDhte19zLn2WpOPHj6tDhw5KTEw8418+/EVzP8/nIykpSe3atdOwYcP0ySefWF3OeSkoKJAkRUdHn7aNv53ruvRZ8p9r2uPx6LXXXlNRUZFSUlJqbeNv5xj1E4hjvcR4Xxf+cJ7Ph7+M94z1p+dP1zPjPXwFARYsk5+fL4/Ho7i4uGrH4+LiTrsPwMGDB+vV3tecS5+7deumF154QW+//bZeeeUVeb1eDRo0SN9//31TlGyJ053nwsJClZSUWFRV42rXrp0WLFigt956S2+99ZYSExM1ZMgQbdiwwerSzonX69X06dN12WWXqWfPnqdt19yv6VPVtc/+cE1v3rxZYWFhcrlcuu+++7R48WJddNFFtbb1p3OM+gvEsV5ivK+LQBzrJf8a7xnr/Xuslxjv4XuCrC4AwJmlpKRU+0vHoEGD1KNHD/31r3/VY489ZmFlaEjdunVTt27dqh4PGjRI33zzjZ5++mm9/PLLFlZ2bqZOnaotW7bo448/trqUJlPXPvvDNd2tWzdlZ2eroKBAb775piZNmqTVq1ef9k0tgLPzh98NODt/Gu8Z60/PX65nxnv4GmZgwTIxMTFyOBzKzc2tdjw3N1dt27at9TVt27atV3tfcy59/qng4GD16dNHu3btaowSfcLpznNERIRCQ0MtqqrpJScnN8vznJaWpqVLl+qjjz5SQkLCGds292v6pPr0+aea4zXtdDrVtWtX9evXT3PmzFHv3r31zDPP1NrWX84xzk0gjvUS431dMNb/qDmO94z1/j/WS4z38D0EWLCM0+lUv379lJGRUXXM6/UqIyPjtGurU1JSqrWXpBUrVpy2va85lz7/lMfj0ebNm9WuXbvGKtNyzf08N5Ts7OxmdZ6NMUpLS9PixYv14YcfqlOnTmd9TXM/1+fS55/yh2va6/XK7XbX+lxzP8c4P4E41kuM93XhD+e5oTSn8Z6xPnDHeonxHj7A2j3kEehee+0143K5zMKFC822bdvMPffcY6KioszBgweNMcbcdtttZsaMGVXtP/nkExMUFGSeeuops337djNr1iwTHBxsNm/ebFUX6q2+fZ49e7Z5//33zTfffGOysrLMTTfdZEJCQszWrVut6kK9HTt2zGzcuNFs3LjRSDJz5841GzduNLt37zbGGDNjxgxz2223VbX/9ttvTYsWLczDDz9stm/fbubPn28cDodZvny5VV2ot/r2+emnnzZLliwxO3fuNJs3bzbTpk0zdrvdrFy50qou1Nv9999vIiMjzapVq8yBAweqPoqLi6va+Ns1fS59bu7X9IwZM8zq1atNTk6O+fLLL82MGTOMzWYzH3zwgTHG/84xzl8gjvXGBN54H4hjvTGBN94z1gfGWG8M4z18EwEWLPfcc8+Z9u3bG6fTaZKTk826deuqnrviiivMpEmTqrV/4403zIUXXmicTqe5+OKLzbJly5q44vNXnz5Pnz69qm1cXJwZNWqU2bBhgwVVn7uTt4z+6cfJfk6aNMlcccUVNV6TlJRknE6n6dy5s3nxxRebvO7zUd8+//73vzddunQxISEhJjo62gwZMsR8+OGH1hR/jmrrr6Rq587frulz6XNzv6anTJliOnToYJxOp2nTpo256qqrqt7MGuN/5xgNIxDHemMCa7wPxLHemMAb7xnrA2OsN4bxHr7JZowxDT+vCwAAAAAAAGgY7IEFAAAAAAAAn0aABQAAAAAAAJ9GgAUAAAAAAACfRoAFAAAAAAAAn0aABQAAAAAAAJ9GgAUAAAAAAACfRoAFAAAAAAAAn0aABQAAAAAAAJ9GgAUAAAAAAACfRoAFAAAAAAAAn0aABQA+Ji8vT23bttUTTzxRdezTTz+V0+lURkaGhZUBAICGwFgPAPVnM8YYq4sAAFT37rvvaty4cfr000/VrVs3JSUlaezYsZo7d67VpQEAgAbAWA8A9UOABQA+aurUqVq5cqX69++vzZs36/PPP5fL5bK6LAAA0EAY6wGg7giwAMBHlZSUqGfPntq7d6+ysrLUq1cvq0sCAAANiLEeAOqOPbAAwEd988032r9/v7xer7777jurywEAAA2MsR4A6o4ZWADgg8rKypScnKykpCR169ZN8+bN0+bNmxUbG2t1aQAAoAEw1gNA/RBgAYAPevjhh/Xmm29q06ZNCgsL0xVXXKHIyEgtXbrU6tIAAEADYKwHgPphCSEA+JhVq1Zp3rx5evnllxURESG73a6XX35Za9as0fPPP291eQAA4Dwx1gNA/TEDCwAAAAAAAD6NGVgAAAAAAADwaQRYAAAAAAAA8GkEWAAAAAAAAPBp/x8BMX7gj0RFewAAAABJRU5ErkJggg==", - "text/html": [ - "\n", - "
\n", - "
\n", - " Figure\n", - "
\n", - " \n", - "
\n", - " " - ], - "text/plain": [ - "Canvas(toolbar=Toolbar(toolitems=[('Home', 'Reset original view', 'home', 'home'), ('Back', 'Back to previous …" - ] - }, - "metadata": {}, - "output_type": "display_data" + "ename": "ValueError", + "evalue": "zip() argument 2 is longer than argument 1", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mValueError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[19]\u001b[39m\u001b[32m, line 4\u001b[39m\n\u001b[32m 1\u001b[39m x = np.linspace(\u001b[32m0\u001b[39m, np.pi, \u001b[32m201\u001b[39m)\n\u001b[32m 2\u001b[39m m = np.arange(\u001b[32m1\u001b[39m, \u001b[38;5;28mlen\u001b[39m(fornberg) + \u001b[32m1\u001b[39m)\n\u001b[32m 3\u001b[39m y_fornberg = - fornberg[\u001b[32m0\u001b[39m] - \u001b[32m2\u001b[39m*np.sum(\n\u001b[32m----> \u001b[39m\u001b[32m4\u001b[39m \u001b[43m[\u001b[49m\u001b[43ma_\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m \u001b[49m\u001b[43mnp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcos\u001b[49m\u001b[43m(\u001b[49m\u001b[43mm_\u001b[49m\u001b[43m*\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43ma_\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mm_\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mzip\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mfornberg\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m1\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mm\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstrict\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m, axis=\u001b[32m0\u001b[39m\n\u001b[32m 5\u001b[39m )\n\u001b[32m 6\u001b[39m y_drp1 = - drp_stencil1[\u001b[32m0\u001b[39m] - \u001b[32m2\u001b[39m*np.sum(\n\u001b[32m 7\u001b[39m [a_ * np.cos(m_*x) \u001b[38;5;28;01mfor\u001b[39;00m a_, m_ \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(drp_stencil1[\u001b[32m1\u001b[39m:], m, strict=\u001b[38;5;28;01mTrue\u001b[39;00m)], axis=\u001b[32m0\u001b[39m\n\u001b[32m 8\u001b[39m )\n\u001b[32m 10\u001b[39m fig, ax = plt.subplots(\u001b[32m1\u001b[39m, \u001b[32m2\u001b[39m)\n", + "\u001b[31mValueError\u001b[39m: zip() argument 2 is longer than argument 1" + ] } ], "source": [ "x = np.linspace(0, np.pi, 201)\n", "m = np.arange(1, len(fornberg) + 1)\n", - "y_fornberg = - fornberg[0] - 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(fornberg[1:], m)], axis=0)\n", - "y_drp1 = - drp_stencil1[0] - 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(drp_stencil1[1:], m)], axis=0)\n", + "y_fornberg = - fornberg[0] - 2*np.sum(\n", + " [a_ * np.cos(m_*x) for a_, m_ in zip(fornberg[1:], m, strict=False)], axis=0\n", + ")\n", + "y_drp1 = - drp_stencil1[0] - 2*np.sum(\n", + " [a_ * np.cos(m_*x) for a_, m_ in zip(drp_stencil1[1:], m, strict=False)], axis=0\n", + ")\n", "\n", "fig, ax = plt.subplots(1, 2)\n", "ax[0].plot(x, x**2, 'k')\n", @@ -1203,7 +1204,7 @@ "source": [ "fig, ax = plt.subplots(1, 2)\n", "widget_handle2 = plot_dispersion(drp_stencil1, h, dt, velocity=vrange, ax=ax)\n", - "fig.set_size_inches(12,6)\n", + "fig.set_size_inches(12, 6)\n", "plt.show()" ] }, @@ -1246,7 +1247,7 @@ } ], "source": [ - "u , data , r = acoustic(weights=drp_stencil1, h=h, dt=dt, v=1500)\n", + "u, data, r = acoustic(weights=drp_stencil1, h=h, dt=dt, v=1500)\n", "um, datam, rm = acoustic(weights=drp_stencil1, h=h, dt=dt/2, v=1500)\n", "up, datap, rp = acoustic(weights=drp_stencil1, h=h, dt=3*dt, v=1500)" ] @@ -1289,13 +1290,13 @@ "source": [ "fig, ax = plt.subplots(2, 3)\n", "plot_wave(um, ax[0, 0], hline=(0, 500, 500, 500), r=rm)\n", - "plot_wave( u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", + "plot_wave(u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", "plot_wave(up, ax[0, 2], hline=(0, 500, 500, 500), r=rp)\n", "\n", "shape = u.shape\n", - "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1,0], extent=(0, 500))\n", - "plot_profile( u[shape[0]//2, :shape[1]//2], ax[1,1], extent=(0, 500))\n", - "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1,2], extent=(0, 500))\n", + "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1, 0], extent=(0, 500))\n", + "plot_profile(u[shape[0]//2, :shape[1]//2], ax[1, 1], extent=(0, 500))\n", + "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1, 2], extent=(0, 500))\n", "\n", "fig.set_size_inches(12, 6)\n", "plt.show()" @@ -1347,12 +1348,12 @@ "fig, ax = plt.subplots(2, 3)\n", "\n", "arrival = plot_shot(datam, ax[0, 0], vline=(500, 0, 500, 0.6), r=rm)\n", - "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", + "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", "plot_shot(datap, ax[0, 2], vline=(500, 0, 500, 0.6), r=rp, first_arrival=arrival)\n", "\n", "width = data.shape[1]\n", "plot_profile(datam[:, width//2], ax[1, 0], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", - "plot_profile( data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", + "plot_profile(data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "plot_profile(datap[:, width//2], ax[1, 2], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "\n", "fig.set_size_inches(12, 6)\n", @@ -1428,7 +1429,7 @@ "\n", " beta = k*h\n", " xs, ys = np.meshgrid(beta, alpha)\n", - " for ii, (axis, zs) in enumerate(zip(ax.flatten(), level_sets)):\n", + " for ii, (axis, zs) in enumerate(zip(ax.flatten(), level_sets, strict=False)):\n", " r = courant[ii]\n", " cb = axis.pcolormesh(xs, ys, zs, norm=norm, shading='gouraud')\n", " axis.set_title(f'{r = :.3g}')\n", @@ -1699,21 +1700,32 @@ ] }, "outputs": [ + { + "ename": "NameError", + "evalue": "name 'y_fornberg' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mNameError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[31]\u001b[39m\u001b[32m, line 5\u001b[39m\n\u001b[32m 3\u001b[39m fig, ax = plt.subplots(\u001b[32m1\u001b[39m, \u001b[32m2\u001b[39m)\n\u001b[32m 4\u001b[39m ax[\u001b[32m0\u001b[39m].plot(x, x**\u001b[32m2\u001b[39m, \u001b[33m'\u001b[39m\u001b[33mk\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m----> \u001b[39m\u001b[32m5\u001b[39m ax[\u001b[32m0\u001b[39m].plot(x, \u001b[43my_fornberg\u001b[49m, label=\u001b[33m'\u001b[39m\u001b[33mFornberg\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m 6\u001b[39m ax[\u001b[32m0\u001b[39m].plot(x, y_drp1, label=\u001b[33m'\u001b[39m\u001b[33mDRP stencil 1\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m 7\u001b[39m ax[\u001b[32m0\u001b[39m].plot(x, y_drp1, label=\u001b[33m'\u001b[39m\u001b[33mDRP stencil 2\u001b[39m\u001b[33m'\u001b[39m, ls=\u001b[33m'\u001b[39m\u001b[33m:\u001b[39m\u001b[33m'\u001b[39m)\n", + "\u001b[31mNameError\u001b[39m: name 'y_fornberg' is not defined" + ] + }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "184ec5ee95cc403d8cc5cb755f1b2f84", + "model_id": "657e77c5d694452ebf779e993e27113a", "version_major": 2, "version_minor": 0 }, - "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAGQCAYAAAC+tZleAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAx3ZJREFUeJzs3Xd4U2Ubx/Fvku69d0tL2atlTxmypwUUlCEbERUEQUCU5UCGgOKrCMhUBAQZsoegLNl7F8rq3nsmef8IBGoZBdqmlPtzXb2anPOcc+5EbJJfnqHQarVahBBCCCGEEEIIIYQoppSGLkAIIYQQQgghhBBCiMeRAEsIIYQQQgghhBBCFGsSYAkhhBBCCCGEEEKIYk0CLCGEEEIIIYQQQghRrEmAJYQQQgghhBBCCCGKNQmwhBBCCCGEEEIIIUSxJgGWEEIIIYQQQgghhCjWJMASQgghhBBCCCGEEMWaBFhCCCGEEEIIIYQQoliTAEsIIYQQQgghhBBCFGsSYAkhhBBCCCGEEEKIYk0CLCGEEEIIIYQQQghRrEmAJYQQQgghhBBCCCGKNQmwhBBCCCGEEEIIIUSxJgGWEEIIIYQQQgghhCjWJMASQgghhBBCCCGEEMWaBFhCCCGEEEIIIYQQoliTAEsIIYQQQgghhBBCFGsSYAkhhBBCCCGEEEKIYk0CLCGEEEIIIYQQQghRrEmAJYQQQgghhBBCCCGKNQmwhBBCCCGEEEIIIUSxJgGWEEIIIYQQQgghhCjWJMASQgghhBBCCCGEEMWaBFhCCCGEEEIIIYQQoliTAEsIIV5AN27cQKFQsGTJEkOXIoQQQpQYkyZNQqFQ5NqWk5PDxx9/jLe3N0qlkqCgIABSUlIYOHAgbm5uKBQKPvzww6IvWAghXiISYAkhBLBkyRIUCsVDf8aOHWuwulasWMGcOXMMdn0hhBDiRfbf13czMzM8PDxo3bo13333HcnJyU88x6JFi5gxYwavv/46S5cuZcSIEQB89dVXLFmyhHfffZfly5fTu3fvwn44QgjxUlNotVqtoYsQQghDW7JkCf369WPKlCn4+fnl2lelShUCAwMNUleHDh04d+4cN27cyLVdq9WSmZmJsbExKpXKILUJIYQQxd1/X9+zs7OJiIhg79697Ny5Ex8fHzZu3Ei1atUAXW+rnJwczMzM9Od488032b9/P3fu3Ml17nr16mFkZMT+/fuL9DEJIcTLysjQBQghRHHStm1batWqZegynujet8hCCCGEeLL/vr6PGzeOv/76iw4dOtCpUycuXryIubk5RkZGGBnl/ogUFRWFnZ1dnnNGRUVRqVKlAqtRo9GQlZUlr+9CCPEIMoRQCCHyQaFQMGnSpDzbfX196du3r/7+vaEKBw4cYOTIkTg7O2NpaUnnzp2Jjo7Oc/zWrVtp0qQJ1tbW2NjYULt2bVasWAFA06ZN2bx5Mzdv3tQPffD19QUePQfWX3/9xSuvvIKlpSV2dna89tprXLx4MVebe/N7BAcH07dvX+zs7LC1taVfv36kpaXlartz504aNWqEnZ0dVlZWlC9fnk8++eTpn0AhhBCimHn11Vf57LPPuHnzJr/88guQew6se6+1e/bs4fz58/rX4r1796JQKAgJCWHz5s367fd6S2dmZjJx4kTKlCmDqakp3t7efPzxx2RmZua6vkKh4P333+fXX3+lcuXKmJqasm3bNgBCQ0Pp378/rq6umJqaUrlyZRYtWpTr+Ht1rF69mi+//BIvLy/MzMxo3rw5wcHBeR7v4cOHadeuHfb29lhaWlKtWjW+/fbbXG0uXbrE66+/joODA2ZmZtSqVYuNGzcWyPMthBDPS3pgCSHEAxITE4mJicm1zcnJ6anP88EHH2Bvb8/EiRO5ceMGc+bM4f3332fVqlX6NkuWLKF///5UrlyZcePGYWdnx8mTJ9m2bRs9evRg/PjxJCYmcufOHWbPng2AlZXVI6+5a9cu2rZtS+nSpZk0aRLp6enMnTuXhg0bcuLECX34dU+3bt3w8/Nj6tSpnDhxgoULF+Li4sK0adMAOH/+PB06dKBatWpMmTIFU1NTgoODOXDgwFM/H0IIIURx1Lt3bz755BN27NjBoEGDcu1zdnZm+fLlfPnll6SkpDB16lQAKlasyPLlyxkxYgReXl589NFH+vYajYZOnTqxf/9+Bg8eTMWKFTl79iyzZ8/mypUrrF+/Ptc1/vrrL1avXs3777+Pk5MTvr6+REZGUq9ePX3A5ezszNatWxkwYABJSUl5Jov/+uuvUSqVjBo1isTERKZPn07Pnj05fPiwvs3OnTvp0KED7u7uDB8+HDc3Ny5evMimTZsYPnw4oHvdb9iwIZ6enowdOxZLS0tWr15NUFAQa9eupXPnzgX87AshxNORAEsIIR7QokWLPNueZapAR0dHduzYof8WV6PR8N1335GYmIitrS2JiYkMGzaMOnXqsHfv3lzDBe5dr2XLlnh6ehIfH0+vXr2eeM3Ro0fj4ODAoUOHcHBwACAoKIjq1aszceJEli5dmqt99erV+fnnn/X3Y2Nj+fnnn/UB1s6dO8nKymLr1q3PFOIJIYQQxZ2Xlxe2trZcu3Ytzz5LS0t69erFwoULUalUuV6Le/Xqxaeffoqnp2eu7b/88gu7du3i77//plGjRvrtVapUYciQIRw8eJAGDRrot1++fJmzZ8/mGoo4cOBA1Go1Z8+exdHREYAhQ4bw1ltvMWnSJN555x3Mzc317TMyMjh16hQmJiYA2NvbM3z4cM6dO0eVKlVQq9W88847uLu7c+rUqVzDIR98jzN8+HB8fHw4evQopqamAAwdOpRGjRoxZswYCbCEEAYnQwiFEOIB//vf/9i5c2eun2cxePDgXMtwv/LKK6jVam7evAnowqHk5GTGjh2bZ66L/y7fnR/h4eGcOnWKvn376sMrgGrVqtGyZUu2bNmS55ghQ4bkuv/KK68QGxtLUlISgP4N7oYNG9BoNE9dkxBCCPEisLKyytdqhPnx+++/U7FiRSpUqEBMTIz+59VXXwVgz549udo3adIkV3il1WpZu3YtHTt2RKvV5jpH69atSUxM5MSJE7nO0a9fP314BbrXc4Dr168DcPLkSUJCQvjwww/zzOV17z1HXFwcf/31F926dSM5OVl/zdjYWFq3bs3Vq1cJDQ0tkOdICCGelfTAEkKIB9SpU6dAJnH38fHJdd/e3h6A+Ph4AP03vVWqVHnuawH6YKx8+fJ59lWsWJHt27eTmpqKpaVlvmq0sbGhe/fuLFy4kIEDBzJ27FiaN29Oly5deP3111Eq5fsPIYQQJUNKSgouLi4Fcq6rV69y8eJFnJ2dH7o/Kioq1/3/rnwcHR1NQkIC8+fPZ/78+fk6R0G85wgODkar1fLZZ5/x2WefPfK6np6ejzyHEEIUNgmwhBDiOajV6oduV6lUD93+LMMRC8uTajQ3N+eff/5hz549bN68mW3btrFq1SpeffVVduzY8cjjhRBCiBfFnTt3SExMpEyZMgVyPo1GQ9WqVZk1a9ZD93t7e+e6/+BQwHvHg26IYp8+fR56jmrVquW6XxDvOe5dd9SoUbRu3fqhbQrqORJCiGclAZYQQuSDvb09CQkJubZlZWURHh7+TOfz9/cH4Ny5c499Q5jf4YSlSpUCdHNp/NelS5dwcnLK1fsqv5RKJc2bN6d58+bMmjWLr776ivHjx7Nnz56HzhcmhBBCvEiWL18O8MjQ5mn5+/tz+vRpmjdv/kxTAjg7O2NtbY1arS6w19kH33M86pylS5cGwNjYWF7fhRDFlowBEUKIfPD39+eff/7JtW3+/PmP7IH1JK1atcLa2pqpU6eSkZGRa9+D35haWlqSmJj4xPO5u7sTGBjI0qVLcwVt586dY8eOHbRr1+6pa4yLi8uzLTAwECDPUuBCCCHEi+avv/7i888/x8/Pj549exbIObt160ZoaCgLFizIsy89PZ3U1NTHHq9SqejatStr167l3LlzefZHR0c/dU01atTAz8+POXPm5Pky7t57DhcXF5o2bcpPP/300C/nnuW6QghR0KQHlhBC5MPAgQMZMmQIXbt2pWXLlpw+fZrt27c/8+p8NjY2zJ49m4EDB1K7dm169OiBvb09p0+fJi0tTb9iYM2aNVm1ahUjR46kdu3aWFlZ0bFjx4eec8aMGbRt25b69eszYMAA0tPTmTt3Lra2tkyaNOmpa5wyZQr//PMP7du3p1SpUkRFRfHDDz/g5eWVa2UlIYQQorjbunUrly5dIicnh8jISP766y927txJqVKl2LhxY54FVZ5V7969Wb16NUOGDGHPnj00bNgQtVrNpUuXWL16Ndu3b3/iXJtff/01e/bsoW7dugwaNIhKlSoRFxfHiRMn2LVr10O/YHocpVLJjz/+SMeOHQkMDKRfv364u7tz6dIlzp8/z/bt2wHdQjaNGjWiatWqDBo0iNKlSxMZGcmhQ4e4c+cOp0+ffubnRQghCoIEWEIIkQ+DBg0iJCSEn3/+mW3btvHKK6+wc+dOmjdv/sznHDBgAC4uLnz99dd8/vnnGBsbU6FCBUaMGKFvM3ToUE6dOsXixYuZPXs2pUqVemSA1aJFC7Zt28bEiROZMGECxsbGNGnShGnTpuWZJDY/OnXqxI0bN1i0aBExMTE4OTnRpEkTJk+ejK2t7TM/biGEEKKoTZgwAQATExMcHByoWrUqc+bMoV+/flhbWxfYdZRKJevXr2f27NksW7aMdevWYWFhQenSpRk+fDjlypV74jlcXV05cuQIU6ZM4Y8//uCHH37A0dGRypUrM23atGeqq3Xr1uzZs4fJkyfzzTffoNFo8Pf3Z9CgQfo2lSpV4tixY0yePJklS5YQGxuLi4sL1atX1z9/QghhSAptcZpRWAghhBBCCCGEEEKI/5A5sIQQQgghhBBCCCFEsSYBlhBCCCGEEEIIIYQo1iTAEkIIIYQQQgghhBDFmgRYQgghhBBCCCGEEKJYkwBLCCGEEEIIIYQQQhRrEmAJIYQQQgghhBBCiGLNyNAFiJJHo9EQFhaGtbU1CoXC0OUIIYQQaLVakpOT8fDwQKmU7++el7zWCyGEKG7ktb7kkwBLFLiwsDC8vb0NXYYQQgiRx+3bt/Hy8jJ0GS88ea0XQghRXMlrfcklAZYocNbW1oDuD4eNjY2BqxFCCCEgKSkJb29v/WuUeD7yWi+EEKK4kdf6kk8CLFHg7g0lsLGxkTe1QgghihUZ7lYw5LVeCCFEcSWv9SWXDAwVQgghhBBCCCGEEMWaBFhCCCGEEEIIIYQQoliTAEsIIYQQQgghhBBCFGsyB9ZL5p9//mHGjBkcP36c8PBw1q1bR1BQkH6/Vqtl4sSJLFiwgISEBBo2bMiPP/5I2bJlC7wWtVpNdnZ2gZ9XlCzGxsaoVCpDlyGEEEIIIYQQwoAkwHrJpKamEhAQQP/+/enSpUue/dOnT+e7775j6dKl+Pn58dlnn9G6dWsuXLiAmZlZgdSg1WqJiIggISGhQM4nSj47Ozvc3NxkQkYhhBBCCCGEeElJgPWSadu2LW3btn3oPq1Wy5w5c/j000957bXXAFi2bBmurq6sX7+eN998s0BquBdeubi4YGFhIaGEeCStVktaWhpRUVEAuLu7G7giIYQQQgghhBCGIAGW0AsJCSEiIoIWLVrot9na2lK3bl0OHTr0yAArMzOTzMxM/f2kpKRHXkOtVuvDK0dHx4IrXpRY5ubmAERFReHi4iLDCYUQQgghhBDiJSSTuAu9iIgIAFxdXXNtd3V11e97mKlTp2Jra6v/8fb2fmTbe3NeWVhYFEDF4mVx79+LzJkmhBBCCCGEEC8nCbDEcxs3bhyJiYn6n9u3bz/xGBk2KJ6G/HsRQgghhBBCiJebBFhCz83NDYDIyMhc2yMjI/X7HsbU1BQbG5tcP6JgTJo0icDAQEOXIYQQRU6r1XL48GFDlyGEEEKIQpSZoyYuNcvQZYgXhMyBJfT8/Pxwc3Nj9+7d+tAkKSmJw4cP8+677xq2uGKgb9++LF26NM/2q1evUqZMGQNUJIQQJde8efMYOnQoI0eO5JtvvjF0OUIIIYR4Clqtlvi0bCISM4hMziAyMYOIpAwikzKISMwgLjEJRVIolhnhlHGxYvKH7xu6ZPECkADrJZOSkkJwcLD+fkhICKdOncLBwQEfHx8+/PBDvvjiC8qWLYufnx+fffYZHh4eBAUFGa7oYqRNmzYsXrw41zZnZ+enPk9WVhYmJiYFVdZTy87OxtjY2GDXF0KIx7l69SqjRo0CwMfHx8DVCCGEEOJBWq2W2NQswhMyCE1IJzwxnbCEdMIT7wVU6WQlx+CsjsJTEYunIgYPRQzlFLE0u3vbWXF34S8TuJxUGpAASzyZBFgvmWPHjtGsWTP9/ZEjRwLQp08flixZwscff0xqaiqDBw8mISGBRo0asW3bNszMzAxVcrFiamr60OGUf//9N6NHj+b06dM4ODjQp08fvvjiC4yMdP+LNW3alCpVqmBkZMQvv/xC1apVmThxIs2aNWPXrl2MGTOGCxcuEBgYyOLFiylfvnyu8//000988cUXxMbG0qFDBxYsWICtra1+/8KFC/nmm28ICQnB19eXYcOGMXToUABu3LiBn58fK1eu5IcffuDw4cPMmzePXr16MXLkSJYtW4ZKpWLgwIFERESQmJjI+vXrC+9JFEKIx8jJyaF3796kpaXRvHlzPvjgA0OXJIQQQrxUUjJzCE9IvxtOZRCWkE5YQsbdkEr3Y5MTj7ci6u5PNKUVMbyiiMFDEYuHIhYLo8w8aUM2cNvYiFiFAucs0BhZoLX1ppx7FYM8TvHikQDrJdO0aVO0Wu0j9ysUCqZMmcKUKVOKsKoXW2hoKO3ataNv374sW7aMS5cuMWjQIMzMzJg0aZK+3dKlS3n33Xc5cOAAAOHh4QCMHz+eb775BmdnZ4YMGUL//v31bQCCg4NZvXo1f/75J0lJSQwYMIChQ4fy66+/AvDrr78yYcIEvv/+e6pXr87JkycZNGgQlpaW9OnTR3+esWPH8s0331C9enXMzMyYNm0av/76K4sXL6ZixYp8++23rF+/PlfAKYQQRW3atGkcPnwYW1tbFi9ejFIp03UKIYQQBUWj0RKTksnt+DRux+lCqrCE+z2owhLSScrIwZwMvBXR+Cii8FFEUUURRdu7t71V0ZgbPX7eqtOmJly2dqSxqRtutqXA1ovdykxGh20n0K4sy1suRGluD7JYk3gKEmAJg9NqtaRnq4v8uubGqqde3W7Tpk1YWVnp77dt25Zy5crh7e3N999/j0KhoEKFCoSFhTFmzBgmTJig//BVtmxZpk+frj/2XoD15Zdf0qRJE0AXMrVv356MjAx9r7eMjAyWLVuGp6cnAHPnzqV9+/Z88803uLm5MXHiRL755hu6dOkC6OYyu3DhAj/99FOuAOvDDz/Ut7l3nnHjxtG5c2cAvv/+e7Zs2fJUz4cQQhSkEydO6IP/uXPn4u3tbdiChBBCiBfMvbmnbselcTs+jTvx6dyOu/v77v2sHA2gxZEk/BTh+CkjqHEvnFJE4W0adX+I36Ouo1CisPEiyt6THeZmqM1t6ePdCmy9wNaLGf9O4HTMGWY0GUsb3zYA+MZdwjzqH8zMHcHCoQieDVHSSIAlDC49W02lCduL/LoXprTGwuTp/hdo1qwZP/74o/6+paUl7733HvXr188VhjVs2JCUlBTu3Lmjn7+lZs2aDz1ntWrV9Lfd3d0BiIqK0h/n4+OjD68A6tevj0aj4fLly1hbW3Pt2jUGDBjAoEGD9G1ycnJyDTEEqFWrlv52YmIikZGR1KlTR79NpVJRs2ZNNBpN/p8QIYQoIBkZGfTu3ZucnBy6du1Kr169DF2SEEIIUSylZuZwMzbtbi8qXSh1526PqjvxaaRm3e8cYEMKpRUR+CoiqKGM0AVWJhH4KSOxIu3xFzKzA/tSYO8L9r5sUKRyJCueoDKvUbt0WzAyISrmHNM2v4UjjvSp3lN/aG33OliZWmNtbK3fVt6+PId7HH7qTgRC3CMBlhBPwdLS8plXHLS0tHzo9gcnU7/3xzy/IVJKSgoACxYsoG7durn2qVSqfF1fCCGKg3HjxnHhwgVcXV2ZN2+evLkVQgjxUktMz+ZWbBo3YlO5GZvKjdg0/e/o5Mxcbc3IxF8RTllFOC0VEfgZR1DOKJJSRGCjfVxPKoWux5SjP9j7gYMf2PsSY27LgrC9xOWkMqPJDH3rf/eNY1PoUXy96lPbSLcglZ+tH819mlPatjRqjRqVUvcZZHiN4XmvJq/t4jlJgCUMztxYxYUprQ1y3YJQsWJF1q5di1ar1f9RPnDgANbW1nh5eT33+W/dukVYWBgeHh4A/PvvvyiVSsqXL4+rqyseHh5cv36dnj17PuFM99na2uLq6srRo0dp3LgxAGq1mhMnThAYGPjcNQshxNPYtWsXc+bMAeDnn3/GycnJsAUJIYQQhezeUD99QBWTxq24e4FVGnGpeeeYsiWFMopQXlWFUcU4nIomEfhq7uCYE4mC/8xz/OBda3dw8AfH0uBY5u5tXWi1PfRvtoVso6l3U14r8xoAxpmJrPhHt4jKhPoTsDbR9aJq49sGXxtf6nvU15/a0tiSOc3mFOhzI8SjSIAlDE6hUDz1UL7iZOjQocyZM4cPPviA999/n8uXLzNx4kRGjhxZIJMPm5mZ0adPH2bOnElSUhLDhg2jW7du+tUQJ0+ezLBhw7C1taVNmzZkZmZy7Ngx4uPj9atMPswHH3zA1KlTKVOmDBUqVGDu3LnEx8fLNyNCiCIVFxenn6/v3XffpX379gauSAghhCg46VlqQmJSuR6TwrUo3e/r0anciE0lOSPnIUdocSWehspQAs2iqGYagb8iDPfsW1hmx+Zu+mDGZe4ATmXvh1OO/rrbDqXB1IosdRbzTs/jcvxlZpd7FxOVrgfV9YTr7Lq1C0tjS32AZWtqy7sB7+Jl7YVKcf9L/ybeTWji3aSAnyEh8u/FTQ2EKCY8PT3ZsmULo0ePJiAgAAcHBwYMGMCnn35aIOcvU6YMXbp0oV27dsTFxdGhQwd++OEH/f6BAwdiYWHBjBkzGD16NJaWllStWpUPP/zwsecdM2YMERERvP3226hUKgYPHkzr1q3zDD0UQojCotVqGTJkCGFhYZQvX56ZM2cauiQhhBDiqWm1WiKTMrkWncL16BSuRafevZ1KaEL6I4+zJo0G1pHUtoigsuoOpdQ3cE67hnH23WF/GuC/h9t4gXM5cCr/wO/yYHm/9/LluMtsv7Edt6Rsurnr5ts1Vhqz6vIqkrKSuJpwlcqOlQFo7NUYC2MLarjUyHWZoYFDn/t5EaKgKbRarfbJzYTIv6SkJGxtbUlMTMTGxibXvoyMDEJCQvDz89OvsieKB41GQ8WKFenWrRuff/65ocvJRf7dCFEyLV++nLfffhsjIyMOHTqUa7GJgva41ybx9OT5FEK8jLLVGm7EpHIlMoXgqBR9b6rr0Sm5Jk7/L0czeMUhntrmEVRU3cErOwT75KsYp4Q+/ACFStdzyrk8OJW7/9upHJha5Wp6KuoUJ6JO0LF0R5wtnAH489qffLL/EwKdA1nebrm+7dLzSzFVmdKiVAuczEvecH15bSr5pAeWEC+pmzdvsmPHDpo0aUJmZibff/89ISEh9OjRw9ClCSFeAjdu3OC9994DYNKkSYUaXgkhhBBPI0et4UZsGlcjk7kSmcKVqGSuRiYTEpNKtvrh/T9USgU+DhZUsddQzzKMSoTgk3UV26QrqOKCUcRlP/xiNp7gWhlcKt3/7VQWjExzNdNqtdxJuUNE/EVqu9XWb596ZCoXYi/gYelBG782AAQ6B9KlbBcCnQNznaNP5T7P/qQIUQxIgCXES0qpVLJkyRJGjRqFVqulSpUq7Nq1i4oVKxq6NCFECadWq+nduzfJyck0bNiQsWPHGrokIYQQLyG1RsvNWF2PqquRyVyJ0v2+Hp1Klvrhq4Jbmqgo42pNGWcrqtimU011E9+sYOyTLqKMOAO3bz78YqY2d0OqSg+EVRXB3P6hzbPV2WRrsrEwtgDgdPRpem/tjYOZA3u77dXPW9vUqykelh7Ym90/j7eNN5MbTH6OZ0aI4kkCLCFeUt7e3hw4cMDQZQghXkIzZsxg//79WFtbs3z5cpl7TwghRKGLTcnkUkQyF8OTuBCexKXwZIKjU8jKeXhQZW6soqyrFWVdrCnnYklV62TKaa7hmHQJRcQZuHUaUiIefjG7UuAeAO7VwK2aLrCy9YJ8Lpb0w6kfWHJ+Ce9Ue4cBVQcAUNGxIhZGFnhZeZGcnYyNiW6I3LuB7z79kyHEC0oCLCGEEEIUmRMnTvDZZ58B8N133+Hn52fgioQQQpQkOWoN12NSuRiexMXw5Lu/k4hKznxoezNjJWVdrCnrakU5V2vKulhRwTYH95SLKMMOQOhxOHIc0mIecrRCNy/VvbDKPQDcqj6yV9V/ZWuy+eHUD5yIPMGPLX7U97ayNrEmPSedC7EX9G1NVabsf2s/xkrjp35OhCgpJMASQgghRJFIS0ujZ8+e5OTk0LVrV/r0kbk4hBBCPLuEtCwu3A2qLoUncTEiiSuRj+5V5etoQQU3Gyq621DR3ZoKbjZ4WStRRp2D0MNw5xicPg5x1/IerDTWDflzrwbugbqwyrUymFjmq9ZsTTZno8+Smp3KK16vALqVAbdc30JYahinok/RwKMBAG392tLAowF+trm/5JHwSrzsJMASQgghRJEYM2YMly5dwt3dnZ9++kk/f4cQQgjxJLEpmZwNTeRcaOLd30mEJqQ/tK2liYoK7jZUcLO+G1bpbluaGkFiKNz+F24ehgNHIeIsaB4ywbqDP3jWBK9aut9uVfNMrP44Gq0GtVatD53+vv03I/aOwN/WXx9gAQyqNgiVQkV5+/L6bU7mTiVylUAhnpcEWEIIIYQodFu3buX7778HYMmSJTg6Ohq4IiGEEMVVdHKmPqg6G5rI+dBEwhIzHtrW28Gcim42VHC3oZK7LrDytrdAqVSARg1RF+H2Tjj2L9w6DIm38p7EwhE8a90Nq2qARw2wcHjm+v936n+svryaYdWH0bVcVwBqu9XG0cyRsvZlydZk64Ot18u9/szXEeJlIwGWEEIIIQpVTEwM/fv3B2DYsGG0atXKwBUJgM6dO7N3716aN2/OmjVrDF2OEOIlFZeaxanb8Zy5o+tVdS40kYikh4dVpZ0sqeJpS1VPW6p42lLJwwZb8weG1WWlQdgJOH9IF1bdPgKZiblPolDqJlb3qa8LrLxq6SZdf4ZewRqthjPRZ/g3/F8GVxuMUqEEQKvVEpcRx7HIY/oAy9bUlj3d9kjvYyGegwRYQgghhCg0Wq2WwYMHExERQaVKlfj6668NXZK4a/jw4fTv35+lS5cauhQhxEsiI1vN+bAkTt9O4NTdn1txaXnaKRTg72xFFQ8bfWBVycMGa7P/zAGVnQ7XD0DIPrixD0JP5B0OaGwJ3rV1gZV3XV1gZWr9zI8hW52NsUpXh1qr5t1d75KSnUJDj4ZUda4KQOeynannXo8A54D/PC4Jr4R4HhJgCSGe2Y0bN/Dz8+PkyZMEBgayd+9emjVrRnx8PHZ2doYuTwhRDCxYsIB169ZhbGzMr7/+irm5uaFLEnc1bdqUvXv3GroMIUQJpdVqCYlJ1QdVp24ncDE8iWy1Nk9bf2dLArzsdGGVly2V3G1081X9V04m3Dl6P7C6cxTUWbnbWLmBTz1dYOVTD1yrgOr5P/aejT7LF4e/wNLYkkWtFwG6SdVb+bYiLTtNH2oBeFp54mnl+dzXFELkJgGWEPnQt29f/TfURkZGODg4UK1aNd566y369u2LUqnUt/X19eXmzZsAmJub4+/vz/Dhwxk4cKC+zb2g5x4XFxcaNWrEjBkzKF269FPXN2nSJNavX8+pU6ee8RE+G29vb8LDw3Fyyv8kk/Pnz2fFihWcOHGC5ORkCbuEKMEuXLjAhx9+CMDUqVMJDAw0aD0vkn/++YcZM2Zw/PhxwsPDWbduHUFBQbna/O9//2PGjBlEREQQEBDA3LlzqVOnjmEKFkK89JIysjl5K4HjN+M5dTuB07cTSEzPOzm6k5UJgd52BHjZEehjRzUvu9zDAB+kUUP4Kbj2ly60un0Ycv4zvNDaA/xeAd9XwLcR2Ps+03DAB8Wkx3Ag9ABl7MtQ2bEyAHZmdlyIvYCR0oi07DQsjC0AmNxg8nNdSwiRfxJgCZFPbdq0YfHixajVaiIjI9m2bRvDhw9nzZo1bNy4ESOj+/87TZkyhUGDBpGWlsbvv//OoEGD8PT0pG3btrnOefnyZaytrbl69SqDBw+mY8eOnDlzBpVKVdQP75moVCrc3Nye6pi0tDTatGlDmzZtGDduXCFVJoQwtIyMDHr06EF6ejqtWrVixIgRhi7phZKamkpAQAD9+/enS5cuefavWrWKkSNHMm/ePOrWrcucOXNo3bo1ly9fxsXFBYDAwEBycnLyHLtjxw48PDwK/TEIIUq20IR0jt2I49iNeI7djOdSRBLa/3SuMjVSUsXTlkBvO/2Pl73544fSJYXpAqvg3XB9L6TH5d5v6XI/sPJrDA6lnzuwUmvUqJT333/POz2PVZdX0b18d32A5W3tzcwmM6npWlMfXgkhipYEWELkk6mpqT6s8fT0pEaNGtSrV4/mzZuzZMmSXD2srK2t9W3HjBnD9OnT2blzZ54Ay8XFBTs7O9zd3ZkwYQI9e/YkODiY8uXL81979+7l448/5vz58xgbG1O5cmVWrFjBnj17mDxZ983PvTcDixcvpm/fviQkJDBq1Cg2bNhAZmYmtWrVYvbs2QQE6Mbj3+u59dFHH/HZZ58RHx9P27ZtWbBgAdbWurkBNBoNM2fOZP78+dy+fRtXV1feeecdxo8fn2cIYX7c640hw1aEKNnGjh3L6dOncXZ2ZunSpbl6qoona9u2bZ7XjAfNmjWLQYMG0a9fPwDmzZvH5s2bWbRoEWPHjgUo0F65mZmZZGZm6u8nJSUV2LmFEMWfWqPlYngSx2/Gc/RGHMdvxhP+kFUBSzlaUNPHnuql7KnubUd5N2uMVU/4+5+dDjcP6kKra39B1IXc+01tdEFV6aa6307lnjuwuker1TJu/zj23dnHqg6r8LL2AqCxV2PORJ/Bz9YvV/vWvq0L5LpCiGcjAZYQz+HVV18lICCAP/74I1eAdY9Go2HdunXEx8djYmLy2HPdmxcmKysrz76cnByCgoIYNGgQv/32G1lZWRw5cgSFQkH37t05d+4c27ZtY9euXQDY2toC8MYbb2Bubs7WrVuxtbXlp59+onnz5ly5cgUHB93SwNeuXWP9+vVs2rSJ+Ph4unXrxtdff82XX34JwLhx41iwYAGzZ8+mUaNGhIeHc+nSpWd/0oQQJd6WLVv49ttvAViyZMlT99QUj5eVlcXx48dz9WJVKpW0aNGCQ4cOFco1p06dqv+yRAhR8mVkqzlxM54jd8OqEzfjSc1S52pjpFRQ2cOGWr4O1CplT01fe1yszfJ3gcRQuLINLm/VzWWVa1igAjxrgH9zKNMcPGuC6hFDDJ9Ctiab01GniUiLoEPpDrorKRSEp4STlJXEwbCDdCvfDdAFWI29Gj/3NYUQBUsCLGF4Wi1k5119pNAZWxTItzcVKlTgzJkzubaNGTOGTz/9lMzMTHJycnBwcHhowHVPeHg4M2fOxNPT86G9r5KSkkhMTKRDhw74+/sDULFiRf1+KysrjIyMcn1I3L9/P0eOHCEqKgpTU1MAZs6cyfr161mzZg2DBw8GdCHbkiVL9D2uevfuze7du/nyyy9JTk7m22+/5fvvv6dPnz4A+Pv706hRo2d5qoQQL4HIyEh9r6Bhw4bRrl07A1dU8sTExKBWq3F1dc213dXV9am+YGjRogWnT58mNTUVLy8vfv/9d+rXr//QtuPGjWPkyJH6+0lJSXh7ez/bAxBCFDv3Aqt/r8fy7/U4Tt1OIEutydXG2tSIGqXsqVXKnlq+DgR422Jhks+Pk1otRJzRBVaXt0D46dz7rT2gzKvg/yqUbgYWDs/9mNQaNZnqTP1wvyvxV+i3vR9Wxla09m2NsVIXin1Q/QOMlEZUdar63NcUQhQuCbCE4WWnwVcGmIvjkzAwsXzu02i12jzj+EePHk3fvn0JDw9n9OjRDB06lDJlyuQ51svLC61WS1paGgEBAaxdu/ahPbUcHBzo27cvrVu3pmXLlrRo0YJu3brh7u7+yLpOnz5NSkoKjo6Oubanp6dz7do1/X1fX199eAXg7u5OVFQUABcvXiQzM5PmzZvn78kQQrzUNBoNffr0ISoqimrVqjFt2jRDlyQe416v3fwwNTXVfxkihHjxpWepOXFLF1gdfkRg5WZjRt3SDvoeVuVcrVEpn+LL35xM3cTrl7foelslhT6wUwHedaBcG92PS8UCGxYIsPzCcn48/SNvln+TYTWGAVDBvgKlbEpR2bEyKVkp2JvZA1DLrVaBXVcIUbgkwBLiOV28eBE/v9zj452cnChTpgxlypTh999/p2rVqtSqVYtKlSrlardv3z5sbGxwcXHJFSI9zOLFixk2bBjbtm1j1apVfPrpp+zcuZN69eo9tH1KSgru7u4PnWvqwVX/jI1zd8lWKBRoNLo3MLLcvRDiaXz77bds374dMzMzfvvtN8zM8jmURDwVJycnVCoVkZGRubZHRkbKcE0hXjAarQal4v4cURdiL5CRk0Flp8qYqnSh8e2k21yOv4yLhQvVnKvp226+vpksdRav+ryKralu+og7yXc4H3seFwsXqrtU17c9FXmWi+FJ3Iyw5PiNNE7dTiBbnQMKDWiNAAVuNmbU93ekXmkH6pV2xMfB4vGTrT9MVhoE74Tz6+HqDshKub/P2ELXw6p8WyjbGqycn/r5+i+tVsuXh7/kVNQpfmzxI84WunNaGVuRnJXM2Ziz+rYqpYo/g/58+sckhCg2JMAShmdsoesNZYjrPqe//vqLs2fPPnZ1LW9vb7p37864cePYsGFDrn1+fn65wqQnqV69OtWrV2fcuHHUr1+fFStWUK9ePUxMTFCrc89LUKNGDSIiIjAyMsLX1/dpHpZe2bJlMTc3Z/fu3Y8dAimEECdPnmTMmDEAzJ49O09gLwqOiYkJNWvWZPfu3QQFBQG63m+7d+/m/fffN2xxQrzEbibdJDg+GDcrN/3KddmabMbtG0diZiLfNvtWP5zt57M/8/3J7wkqG8TE+hP15+i9pTdZmiy2dtmqn1D87zt/M+3oNNr6tWW683R92+lHpxOXEUdlp8r6AOtw+GEmHZpEY6/GvF9pKgeCY9h3NYbj6o9RmMSSeuNdNOmlAHByvUymwzJ8LKryw6sL9IHV2H1j2XY0klG1RlHZSfc4biTeYMfNHXhZedGu9P2h4dcTr0N2Bm7h57G4tBmu7IDs1PtPirW7rodV+Xa6CdiN8/fFRlp2GvGZ8dib2uufszPRZ5h3eh6ulq7650yhUHAi6gRX469yJuYMzX10owaaejdlRbsVVHSsmOu8El4J8WKTAEsYnkJRIEP5CltmZiYRERGo1WoiIyPZtm0bU6dOpUOHDrz99tuPPXb48OFUqVKFY8eOUavW03dTDgkJYf78+XTq1AkPDw8uX77M1atX9df19fUlJCSEU6dO4eXlhbW1NS1atKB+/foEBQUxffp0ypUrR1hYGJs3b6Zz5875qsPMzIwxY8bw8ccfY2JiQsOGDYmOjub8+fMMGDDgqR8HQEREBBEREQQHBwNw9uxZrK2t8fHx0U8sL4R4saSmpvLWW2+RnZ1NUFAQ77zzjqFLeuGlpKTo/04C+r/xDg4O+Pj4MHLkSPr06UOtWrWoU6cOc+bMITU1VT//mBCiYIQkhnAl/goelh5UddbNkZSRk8HbW98mOj2aLV22YG6k67G+6fom5p2eR/fy3fUBlpHCiL9u/UW2JpvEzER9GGOkNCJHm0N6Tnqu6/nY+JClzsoVtNzrTVXKplSutvU96pOclYyVsRUA4YnpXArVYqcoz4ELxmzeuU/f1tzHBiOlmmZlvWhVtir1SjtyMj6bzw6At70NpRzvvxc/F3OOm0k3yVTfX3n0asJV5p6cS3WX6roAKzsDrm5n3PGvuKBJ438RUTRO103E/q+jDx/ZGlPFvjw/dVihHxo45dAUriVcY1iNYdR0rQnAqahTfHX4K7ysvZjVdJb+egO2D+Bc7DnmvjqXpt5NAchUZ7IvdB9eVl65noch1YagVCip6VJTv83ezF4/RFAIUXJIgCVEPm3btg13d3eMjIywt7cnICCA7777jj59+jxxefhKlSrRqlUrJkyYwJYtW5762hYWFly6dImlS5cSGxuLu7s77733nv5DYteuXfnjjz9o1qwZCQkJLF68mL59+7JlyxbGjx9Pv379iI6Oxs3NjcaNG+eZ+PdxPvvsM4yMjJgwYQJhYWG4u7szZMiQp34M98ybNy/XSlaNG+tWeLlXsxDixfPhhx9y+fJlPDw8WLhwoXzDXQCOHTtGs2bN9PfvTaDep08flixZQvfu3YmOjmbChAlEREQQGBjItm3bnurvuxAvsyx1FhnqDGxMbABdODL54GTCU8P5qeVPmKh0c5L+ee1PFpxdQPfy3fUBlqnKlJDEEDLUGcSmx+p7SvlY+xDoHIiH1f25XRUKBePrjsdEZYK1yf3pIrqU7UJr39a5tgGse21dnlpb+bailW+rPNsn1P2Cg8GxzP8rhn1Xr3AtOhUwB3RBtrmxirqlHWhUxolGZX+hvKt1rr/PXvYdaVmqJRpt7rmvPqv3GfEZ8ZS2La3f5mbhRpcynfFSa2HjMN0QwcxELN1csDExxsrCBaoHQeUgUnPiSdo7gjSVUa55rS7FXeJszFmSMpNy/Xe4GHeRLHXuVbjtzewxVZmS9sBCT2XtyvJZvc/wt/PP8/wIIV4OCq1WqzV0EaJkSUpKwtbWlsTERGxsbHLty8jIICQkBD8/P5kbReSb/LsRovhas2YNb7zxBgqFgt27d+cKXYqTx702iacnz6d4EWi1WsJSw7iZeJM67nUwUuq+u194diFzT87ljXJv8Gm9T/Vt666oS3pOOps7b8bHxgeAbTe28dvF32jm3Yy+Vfrqz30k/AjWJtb42/nrw66ieDzXY1LZezmavZejOHw9LtfE60oFVPOyuxtYOVHdxw5TI9XzXzj2GpxeCWdWQcLN+9ttPKFKF6jcGTxq6MOqtOw0ItIiUKLE19ZX3/xoxFHiM+IJdAnExcIFgMTMRM5En8HG1IYA5wB922x1NkZKI/lCRDwVeW0q+aQHlhBCCCGeya1btxg0aBAAY8eOLbbhlRCi5ItMjeRk1EmsTaxp6NkQAC1aXlv/GpnqzFyhlIOZAxqthpj0GP3xCoWCUbVGYWlsiZ2ZnX57G982tPFtk+d6ddzrFO4Duis9S82/12PZezmKPZejuRWXlmu/l705Tcs706iMM/VLO2JrYfyIMz2ljCQ4+zuc/g3uHL2/3cQKKr0GAW9CqUbwkFEIFsYWuXpv3VPbrXaebbamtrzi9Uqe7caqAnocQogSRQIsIYQQQjy1nJwcevToQUJCArVr1841NFgIIQrTgdADXI6/TOcynfXzHO29vZcvDn/BK56v6AMspUJJGbsyZKozScm+vxpey1ItaezVGEczx1zn7Va+W5E9hse5GZvKnku6wOrf67Fk5tzvZWWsUlDXz5Gm5Z1pWt4Ff2fLguulpNVC2Ak4thjOrYV7w/cUSvBvrgutyrcDk+dfCEkIIZ6FBFhCCCGEeGqTJk3iwIEDWFtb89tvv2FsLN+WCyEKVkJGAkcijqDWqmnr11a/fdrRaYQkhlDevrw+rKrgWIFqztWo4FAh1zl+a/9bnoDH2sQaa3LPPWVIGo2W03cS2HEhkp0XIgmOSsm138PWjKYVXGhW3oUG/o5YmhbwR7iMJDi7Go4vgYiz97c7lYcab0PVN8Ba5tcTQhieBFhCCCGEeCq7du3iq6++AmDBggX4+/s/4QghhHi8uIw4TkWdooJDBf0k6Odjz/PR3x/ha+ObK8Bq4tWE8vblsTKx0m8LcA7g13a/5jlvcZ1DKTNHzcFrsew4H8nui5FEJd9f8c9IqaC2rwNNyzvTrIILZV2sCudxxF2Hw/Ph5C+QlazbpjKFykFQsx/41Ms1CbsQQhiaBFhCCCGEyLfIyEh69eqFVqtl8ODBdO/e3dAlCSFeMBqthrCUMP3qfQCf7PuEA2EHGF93PG9WeBOASo6VqOhQkSpOVdBoNSgVuvmWPqr1kUHqfl6JadnsuRzFjgsR/H05mtQstX6flakRTcs707KSK03Lu2BrXki9WrVaCPkHDs+Dy1uBu+t5OZWDWv2hWnewcCicawshxHOSAEsIIYQQ+aLRaOjVqxeRkZFUqVKFOXPmGLokIcQLJiI1gs4bOpOtyeZQj0MYK3VBTaBLIBGpEZiqTPVt7c3sWd1xtaFKLRCRSRlsOxfB9vMRHAmJI0dzfwF4VxtTWlZypWUlN+qVdiiYFQMfRZ2jm9fqwLcQdf7+9jItod4QKP3qQydkF0KI4kQCLCGEEELky9dff82uXbswNzdn1apVmJubG7okIUQxtj90P79e/JUA5wCGBAwBwMXCBZVShVqr5nbybf1qde9Ue0ff5kUXkZjB1nPhbDkbzrGb8WjvZ1aUc7WiVSU3WlZypaqnLUplIQ/Ry8nUrSS4fzbE39BtM7aAwB5Q5x1wLle41xdCiAIkAZYQQgghnujAgQNMmDABgP/9739UqlTJwBUJIYqTO8l3OBB6gDZ+bbA1tQV081rtD91PYmaiPpxSKpT81v433C3dMVLe/yhSXOeqyq+IxAy2nNWFVsdv5Q6tavjY0baKOy0rueLrZFk0BWWlwYmlcOA7SA7TbbNwhHpDofYAMLcvmjqEEKIASYAlhBBCiMeKjY3lrbfeQq1W07NnT/r27WvokoQQBvbgnFQAw/YM42r8VaxNrGlXuh0A9d3rM7rWaOq61811rLe1d5HWWlgikzLYfOZ+T6sH1SxlT7uq7rSt4oaHXRH2Vs3JhGOLYd9MSI3WbbN2hwYfQM2+YFJEAZoQQhQCCbCEEM/sxo0b+Pn5cfLkSQIDA9m7dy/NmjUjPj4eOzs7Q5cnhCgAWq2W/v37c/v2bcqWLcuPP/74wveUEEI8u9CUUL749wtuJ9/mz6A/9X8Pmno1xcbEBkvj+wGJs4Uzb1d+21ClFoqkjGy2nY1gw+lQDl6LzdXTqta90KqqG+62RTzEWqOGs7/Dni8h4ZZum10paPQhBPYEI9PHHi6EEC8CmalPiHzo27cvCoUChUKBsbExrq6utGzZkkWLFqHRaHK19fX11be1sLCgatWqLFy4MFebvXv36tsoFApcXV3p2rUr169ff6b6Jk2aRGBg4LM+vGfm7e1NeHg4VapUyVf7uLg4PvjgA8qXL4+5uTk+Pj4MGzaMxMTEQq5UCPGsvvvuOzZu3IiJiQmrV6/G2tra0CUJIYrQ9cTrXIy9qL/vYObAsYhj3Ey6SXBCsH77B9U/YEmbJTTxbmKIMgtVZo6abeciePeX49T6Yhcfrz3DgWBdeFWzlD0TO1bi33HNWfNuA/o38iv68CpkH/zUGNa9owuvrNygw2z44LhuZUEJr4QQJYT0wBIin9q0acPixYtRq9VERkaybds2hg8fzpo1a9i4cSNGRvf/d5oyZQqDBg0iLS2N33//nUGDBuHp6Unbtm1znfPy5ctYW1tz9epVBg8eTMeOHTlz5gwqVSGuQlOAVCoVbm5u+W4fFhZGWFgYM2fOpFKlSty8eZMhQ4YQFhbGmjVrCrFSIcSzOHbsGKNHjwZg1qxZBgnKhRCGs/ryaj7/93MaeDTgp5Y/AWBuZM7UV6ZSyqYUZezK6NuWtJ6ZGo2WwyFxbDgVypaz4SRl5Oj3lXWxIqi6J50CPPB2sDBckQm3YMdncGG97r6ZLTQaoZuc3cSAdQkhRCGRHlhC5JOpqSlubm54enpSo0YNPvnkEzZs2MDWrVtZsmRJrrbW1ta4ublRunRpxowZg4ODAzt37sxzThcXF9zd3WncuDETJkzgwoULBAcH52kHul5bderUwdLSEjs7Oxo2bMjNmzdZsmQJkydP5vTp0/oeXffqSUhIYODAgTg7O2NjY8Orr77K6dOn9ee813Nr+fLl+Pr6Ymtry5tvvklycrK+jUajYfr06ZQpUwZTU1N8fHz48ssvAd0QQoVCwalTp/L1HFapUoW1a9fSsWNH/P39efXVV/nyyy/5888/ycnJefIJhBBFJikpiTfffJPs7Gw6d+7M0KFDDV2SEKIQXUu4xncnvuNC7AX9tjpudTBWGmOiMkGjvd/jvEWpFpS1L1viQiuAGzGpzNh+iYbT/uKtBf+y8uhtkjJycLMx453Gpdk8rBE7RjTmvWZlDBde5WTB3zPg+9q68EqhhNoDYdgpXYAl4ZUQooSSHlhCPIdXX32VgIAA/vjjDwYOHJhnv0ajYd26dcTHx2NiYvLYc91bjj4rKyvPvpycHIKCghg0aBC//fYbWVlZHDlyBIVCQffu3Tl37hzbtm1j165dANja6lb/eeONNzA3N2fr1q3Y2try008/0bx5c65cuYKDgwMA165dY/369WzatIn4+Hi6devG119/rQ+pxo0bx4IFC5g9ezaNGjUiPDycS5cuPfuT9h+JiYnY2Njk6sEmhDAsrVbLO++8w7Vr1yhVqhQ///xzifygKoS4b+HZhWy6vonU7FQqOepWGfW19WXfm/tyzWtVEqVm5rD5bDhrjt3hyI04/XYbMyPaVXXntUBP6vo5oFQWg7+DYSdh/XsQdV53v1QjaDsN3PI3nYMQQrzI5BOjKDbSstMAXdf0ex+UstXZZGuyMVIaYaIyydPWzMhMvwJOtiabbHU2KqUKU5XpY9sWpAoVKnDmzJlc28aMGcOnn35KZmYmOTk5ODg4PDTguic8PJyZM2fi6elJ+fLl8+xPSkoiMTGRDh064O/vD0DFihX1+62srDAyMso1nG///v0cOXKEqKgoTE11z8fMmTNZv349a9asYfDgwYAuZFuyZIl+XpvevXuze/duvvzyS5KTk/n222/5/vvv6dOnDwD+/v40atToWZ6qPGJiYvj888/1tQghiof58+ezcuVKjIyMWLlyJfb2sty6ECWFRqthzZU1bAnZwswmM3EydwKgY+mOpGan0sCjQa72JTW80mq1HL0Rz+/HbrP5bDhpWWoAlApoXM6ZN2p606KSC6ZGxWRah+wM2DsVDs4FrRosHKHNNKj6OsgXDEKIl4QEWKLYqLtCt8Ty393/xsFM1zto8fnFzD05l65luzKpwSR926arm5Kek862rtvwtPIEYOWllUw/Op12fu2Y1niavm2btW2Iz4xnXad1lLG/P1dDQdFqtXl6JowePZq+ffsSHh7O6NGjGTp0KGXK5L22l5cXWq2WtLQ0AgICWLt27UN7ajk4ONC3b19at25Ny5YtadGiBd26dcPd3f2RdZ0+fZqUlBQcHR1zbU9PT+fatWv6+76+vrkmZXZ3dycqKgqAixcvkpmZSfPmzfP3ZDyFpKQk2rdvT6VKlZg0aVKBn18I8WxOnDjBsGHDAJg6dSr16tUzcEVCiIKkVChZe3UtF2IvsP3GdnpW7AlAA88GNPBs8ISjX3wRiRmsPXGH34/d5kZsmn67n5Mlb9Tyokt1L9xszQxY4UNEX4Hf+0DU3eGdVbpC2+lg6WTYuoQQoohJgCXEc7p48SJ+fn65tjk5OVGmTBnKlCnD77//TtWqValVqxaVKlXK1W7fvn3Y2Njg4uLyxJW9Fi9ezLBhw9i2bRurVq3i008/ZefOnY/8cJmSkoK7uzt79+7Ns8/Ozk5/29jYONc+hUKhX1nx3rDGgpacnEybNm2wtrZm3bp1eWoQQhhGQkICr7/+OllZWbz22mt89NFHhi5JCPEc0rLTWHFpBfvu7OPn1j9jpNS99R9YdSChyaG0LNXSwBUWDY1Gy/7gGH759ya7L0Wh1mgBsDRR0b6aO91qeVOzlH3xHCp95nf4czhkp4KlC3ScAxXaG7oqIYQwCAmwRLFxuMdhQDeE8J5+lfvRq2Iv/Ruue/Z22wvohgXe82aFN+latisqZe6u3tu6bsvTtqD89ddfnD17lhEjRjyyjbe3N927d2fcuHFs2LAh1z4/P79cYdKTVK9enerVqzNu3Djq16/PihUrqFevHiYmJqjV6lxta9SoQUREBEZGRvj6+j7Nw9IrW7Ys5ubm7N69+7FDIJ9GUlISrVu3xtTUlI0bN2JmVsy+5RTiJaXVaunXrx8hISH4+vqyePHi4vlhTgiRb0ZKI5aeX0pCZgIHww7S2KsxwEsTXMWmZLLm+B1WHLnFzQd6W9XxdaBbbW/aVnHD0rSYfhzKToetY+DEUt1931eg689g7WrYuoQQwoCK6V9sYShqtZpJkybxyy+/EBERgYeHB3379uXTTz8t9A8yFsZ5V0wxVhljrMrbO+ehbZXGGCvz1/ZZZGZmEhERgVqtJjIykm3btjF16lQ6dOjA22+//dhjhw8fTpUqVTh27Bi1atV66muHhIQwf/58OnXqhIeHB5cvX+bq1av66/r6+hISEsKpU6fw8vLC2tqaFi1aUL9+fYKCgpg+fTrlypUjLCyMzZs307lz53zVYWZmxpgxY/j4448xMTGhYcOGREdHc/78eQYMGPDUjyMpKYlWrVqRlpbGL7/8QlJSEklJSQA4OzujUhWTeSaEeAnNnj2b9evXY2Jiwu+//y7zXgnxgsnIyWDz9c1cjLvIp/U+BcBEZcL7ge9jZmRGLdenf//xItJqtRy7Gc+v/95ky9kIstS6XuXWpkZ0relFz7o+lHV9fK93g0uJgt/ehNDjgAKafAxNxoBS3icJIV5uEmCJXKZNm8aPP/7I0qVLqVy5MseOHaNfv37Y2trq50R5WW3btg13d3eMjIywt7cnICCA7777jj59+qBUPn5y+EqVKtGqVSsmTJjAli1bnvraFhYWXLp0iaVLlxIbG4u7uzvvvfce77zzDgBdu3bljz/+oFmzZiQkJLB48WL69u3Lli1bGD9+PP369SM6Oho3NzcaN26Mq2v+v7377LPPMDIyYsKECYSFheHu7s6QIUOe+jGAbm6dw4d1Pe3+OyfYvV4fQoiid/DgQcaMGQPogqxnCdqFEIaVkJnAlH+noNFqeKvCW/jb6RZ96V6hu4ErKxopmTn8ceIOv/57i8uRyfrtVT1t6VXPh44BHliYvAAffaKvwK9dIeEWmNnB64ugTMHPRSqEEC8ihVar1Rq6CFF8dOjQAVdXV37++Wf9tq5du2Jubs4vv/ySr3MkJSVha2tLYmIiNjY2ufZlZGQQEhKCn5+fDB0T+Sb/boQoPNHR0VSvXp3Q0FDefPNNVqxYUSKHDj7utUk8PXk+De96wnWuxF+hjV8b/bZpR6bhZulG57KdsTF5Of673I5LY8nBG6w+epvkzBwAzIyVdArwoFe9UlTzsjNsgU8j4hwsew3SYsChNPT4HZwKfgEiIUoqeW0q+V6AryFEUWrQoAHz58/nypUrlCtXjtOnT7N//35mzZr1yGMyMzPJzMzU3783JEwIIUTxplar6dWrF6GhoZQvX5758+eXyPBKiJLmQuwFum/qjrmROfU96mNragvAmDpjDFxZ0dBqtRwOiWPxgRB2Xojk7pzslHaypHf9UnSp4YWt+Qu2QEz4GVjWCdLjwa0a9F4nqwwKIcR/SIAlchk7dixJSUlUqFABlUqFWq3myy+/pGfPno88ZurUqUyePLkIqxRCCFEQvvrqK3bs2IG5uTlr1qx54mqoQgjDScxM1AdVFR0qUtGhIu6W7qRkp+i3l3SZOWr+PB3Oov0hXAi//4Vp43LO9GvoS5OyziiVL2AIHxcCv3TVhVdetaHnGjC3M3RVQghR7EiAJXJZvXo1v/76KytWrKBy5cqcOnWKDz/8EA8PD/r06fPQY8aNG8fIkSP195OSkvD29i6qkoUQQjyD3bt3M3HiRAB+/PFHqlSpYuCKhBAPE5oSysSDE4lMjWTda+swUhqhUChY3m45pipTQ5dXJBLTs/nl35ssPnCDmBRdr38zYyVdanjRr4Fv8Z+U/XFSY+CXLpAaBa5VoddaMHs5AkkhhHhaEmCJXEaPHs3YsWN58803AahatSo3b95k6tSpjwywTE1NMTV9Od5ACSFESRAWFkaPHj3QarUMGDDgkX/fhRCGZ2dqx6W4S6Rmp3I+9jwBzgEAL0V4FZGYwaIDIaw4fIuUu/Nbudua8XZ9X96q442dhYmBK3xOGjWsHQhx18HOB3qtkfBKCCEeQwIskUtaWlqeFfVUKhUajcZAFQkhhChIOTk5vPnmm0RFRVGtWjXmzp1r6JKEEA+4nXSbfaH76FGxBwCWxpZ8/crX+Nn64WnlaeDqikZwVDI//X2d9adCyVbrJrgq72rNkKal6VDNA2PV41d/fmHs+wau7wEjc+ixGqzdDF2REEIUaxJgiVw6duzIl19+iY+PD5UrV+bkyZPMmjWL/v37F+h1ZPFL8TTk34sQBWf8+PHs27cPa2tr1qxZg7m5uaFLEkLcFZMeQ9CGILI0WVR1qkpV56oANPJsZODKisap2wn8sCeYHRci9dvq+DrwblN/mpZ3LlmLTNw+Anun6m53mAUuFQ1bjxBCvAAkwBK5zJ07l88++4yhQ4cSFRWFh4cH77zzDhMmTCiQ8xsb61aESUtLkw9NIt/S0tKA+/9+hBDP5o8//mD69OkA/Pzzz5QtW9bAFQkhHuRk7kQbvzbEpsdiZWJl6HKKzPGb8Xy7+yr/XInWb2tZyZUhTfypWcregJUVEo0atowGrQaqdYfAHoauSAghXggKrXRtEAUsKSkJW1tbEhMTsbGxybM/PDychIQEXFxcsLCwKFnfpokCpdVqSUtLIyoqCjs7O9zd3Q1dkhAvrMuXL1O7dm2Sk5MZMWIEs2bNMnRJRepJr03i6cjzWTAOhh3kfyf/xw8tftCvJJilzsJE9YLP7ZRPx27E8e3uq+y7GgOASqmgc3VPhjQpTRmXF3hi9ic5vhT+HAamNvDBCbByNnRFQpQI8tpU8kkPLFHk3Nx04/ujoqIMXIl4UdjZ2en/3Qghnl5KSgpdunQhOTmZV155hWnTphm6JCFeemqNmmlHpnE98TpLzy9lWI1hAC9FeHUkJI5vd1/hQHAsAEZKBV1rePFeszL4OFoYuLr/yE6H8DMQfhoSbkJKJKizQakCa3dwKA2lm4Kjf/7Ol5EIu6fobjcdK+GVEEI8BQmwRJFTKBS4u7vj4uJCdna2ocsRxZyxsTEqlcrQZQjxwtJqtQwcOJALFy7g7u7O6tWrZTiuEAai0WpQoEChUKBSqvi03qfsvrWb/lUKdq5Rg8tK1QU9mcmQlQbZqZCdTnBUMhtPhXE5MgVzlDRSmVO3oi9dG1bBw90DzIpJeJWRBOf/gIubIORvUGc9+RjnitDkY6jcGR43uuD0KkiLAccyUGdwwdUshBAvAQmwhMGoVCoJJoQQopDNmTOHVatWYWRkxO+//y69GYUwkMtxl5lyaAq9K/emjW8bAGq71aa2W20DV/aUNBpIDoPYYIi9pvtJCtUFVimRkBIFWSkPPbQMMBLgwU5mwXd/AMxswd4X7EqBUznwrgNetcHCoTAf0X2Jd2D/bDi9MvdjsHQBz5q6XlbWbqAyBU02JIZCxBm4dQiiL8KafnB4HnT/BaxcHn6Nk8t1v2sPApV8mSCEEE9DAiwhhBCihPrnn38YPXo0ALNmzaJhw4YGrkgUN2lpaVSsWJE33niDmTNnGrqcEm37je2ciTnD/07+j1alWqFUKA1d0pOlxekCmntD6KIuQtx1yEl/8rHGFqhNrInNNiY6Q0U6pmhR4GxlgrudOaYKja6HVmaSrsdTdqpueF34ad3Pg5zKgV8TqNQJfBqAqoA/wmSl6lYEPDwf1Jn3rxnwJlTooLv9uF5V6Qlw+Cc48C3cPgxLO0GfP/MODww/rXs+VSZQrVvBPgYhhHgJSIAlhBBClEBhYWF069YNtVpNjx49eP/99w1dkiiGvvzyS+rVq2foMkosrVarX6xmSMAQMtWZvF3p7eIZXqmzdUHVzQNw58jdOZ9uPbyt0ljXU8qxjK5Xkq03WLuCle4nGju+3x/OiiO3yFbr1otqX82dj1qWw9f5EasrZqVC/E3dPFPxNyDinC4Mir0KMVd0P0cXgI0n1OwHtfqDpePzP+5b/8L6d3XBHECpRrqhgH6NHx9aPcjcDpqOgaqvw5IOut5Yy4Ng4C4wfmDV7RN3e19VaF90vcqEEKIEkQBLCCGEKGGysrJ44403iIyMpEqVKsyfP19WfBV5XL16lUuXLtGxY0fOnTtn6HJKFI1Wwy8XfuFczDmmNZ6GQqHARGXC6NqjDV3afTlZuoDo5kG4dRBuH4HstLzt7H3BPUD341oVnMqArc9De0GlZOYw/+9rLNx/lLQsNQCvlHXi49YVqOpl+/h6TCzBtZLu50GpsXD7X7i8BS5t1g1X3POFrrdTw+FQ/z0weca5s04sgz8/BK1aF4x1mA1lW+U/uPovR3/ouwkWtYbIc3D6N13QBpCdAWdX625X7/1s5xdCiJecBFhCCCFECTN69GgOHjyIjY0Nf/zxB5aWloYuSTylf/75hxkzZnD8+HHCw8NZt24dQUFBudr873//Y8aMGURERBAQEMDcuXOpU6dOvq8xatQoZsyYwcGDBwu4enEj8Qazj88mR5tDUJkgGng2MHRJOgm3IXgnXN2lm5z8v3NVmdmBT33wqQeeNcCtKpjbP/G0Go2WNSfuMGP7ZaKTdUPwArztGNO6PA3KOD1fzZaOuh5LFdpD+1lwfj0cmgsRZ3VB1plV8PrPuoDtaeydBnu/0t2u0lUXXpk9IWTLD0d/eOUj2DYWDs6FGn10KxZGntcNkbRw0q1aKIQQ4qlJgCWEEEKUICtWrOC7774DYPny5ZQtW9bAFYlnkZqaSkBAAP3796dLly559q9atYqRI0cyb9486taty5w5c2jdujWXL1/GxUU3eXRgYCA5OTl5jt2xYwdHjx6lXLlylCtXTgKsQlDarjSjao/CSGFEfY/6hitEq4XQE3BhPVzdAdGXcu+3dNYNlfOpD6UagnMFUD7d8MYjIXFM2XSec6FJAJRytGBsmwq0qeJW8D0/jUwhoDtUfUO3SuCOz3RDDBe2gI7fQmCP/J3n2OL74VXjj6HZJ8/e6+phqveGvV/rhiVe2gSVXtMNjQRdwKWURYyEEOJZSIAlhBBClBBnzpxh4MCBAIwfP55OnToZuCLxrNq2bUvbtm0fuX/WrFkMGjSIfv36ATBv3jw2b97MokWLGDt2LACnTp165PH//vsvK1eu5PfffyclJYXs7GxsbGyYMGHCQ9tnZmaSmZmpv5+UlPQMj6rk0mg1LD2/lHZ+7XC1dAWgZ8WeBipGA6HH4MIG3U/i7fv7FErwqgNlW0CZluBW7akDq3tux6Xx9dZLbD4bDoC1qREfNC9Dnwa+mBoVckCjVOrmm/J/FTa8D5c3w/qhuscX8Objjw35B7aM0t1uNl4331VBM7WCOoPgnxmwfw5U7HT/v4Otd8FfTwghXhISYAkhhBAlQEJCAl27diU9PZ2WLVsyefJkQ5ckCklWVhbHjx9n3Lhx+m1KpZIWLVpw6NChfJ1j6tSpTJ06FYAlS5Zw7ty5R4ZX99rLv6lH++bYNyy7sIx9oftY2GqhYSZpj7wAp36F8+t080TdY2IF5VrrVtPzb5avIYGPk5KZww97glm4P4SsHA1KBbxZx4eRLcvhZGX6nA/iKVk4wJu/wuaP4NjPusnYzWyh/CPC38wUWDsQNDlQ5XVoXIhzktV5B/bNgrATkByuG74JYCcBlhBCPCsJsIQQQogXnFqtpmfPngQHB+Pj48OKFStQqWSISkkVExODWq3G1dU113ZXV1cuXbr0iKOez7hx4xg5cqT+flJSEt7e8kH8ntfLvc6m65voWLpj0YZXaXFwbq0uuAo7eX+7iTWUbwOVgqBM89wr4T0jrVbLpjPhfL7pAlF357mqX9qRCR0rUdHd5rnP/8wUCmg3E9RZcHI5/DkcSjV4+HxWh76HlEiw94PXvi/YYYP/ZeUM9qV0wwhjrt7vgWXnU3jXFEKIEk4CLCGEEOIFN3HiRLZs2YKZmRl//PEHTk7POWmyeKn07dv3iW1MTU0xNS3i3jXFXHJWMtYm1gD42fqxtctWLIyfcTW8p6HVwo19cGyRblU+dZZuu9IIyrWBgLegTAswNiuwS16PTmHixvPsuxoD6Oa5Gt+uIi0ruRaPFU6VSl2IdfMgxF2D3Z9D+5m526REwQHd/IA0n1Agod4TOZbVBVixV+/3wLKVAEsIIZ6VBFhCCCHEC2zNmjV8+eWXACxYsICaNWsauCJR2JycnFCpVERGRubaHhkZiZubm4GqermsubKGb098y6LWiyhrr1soodDDq6w0OLsaDv8EURfub3etAoE9oVo3sCzY8DojW80Pe4KZ9/d1stQaTIyUDG3qz5Am/pgZF7NensZm0GEWLHsNji7UTejuWeP+/n9mQHYqeNSAyp2LpiansnB1O8Ree6AHlvRcFEKIZyUBlhBCCPGCOnv2rL73zMiRI+nVq5dhCxJFwsTEhJo1a7J7926CgoIA0Gg07N69m/fff9+wxb0E1Bo1f177k4TMBP68/icja4588kHPI+E2HF0Ax5dCRoJum7GFbrLyGn3APaBQhsL9fSWaz9af41ZcGgCNyzkzpVNlfJ0sC/xaBaZ0U6jcRbdC4Yml9wMsjQbO/q67/eqnhTt08EGO/rrfocch8+7CB7ZeRXNtIYQogSTAEkIIIV5AcXFxBAUFkZqaSvPmzZk2bZqhSxIFKCUlheDgYP39kJAQTp06hYODAz4+PowcOZI+ffpQq1Yt6tSpw5w5c0hNTdWvSigKj0qp4ttm37Lp+qbCXWkw+jLs+wbOrgGtWrfNrhTUGQzVe4G5XaFcNj41i883X+CPE7qJ4N1szJjYsRJtqrgVj+GCTxLwli7AurpLN9xSodD1WEuPB2NL8GtcdLU46nrnceeY7reFI5gU4wBQCCGKOQmwhBBCiBdMTk4Ob775JtevX8fX15dVq1ZhZCQv6SXJsWPHaNasmf7+vQnU+/Tpw5IlS+jevTvR0dFMmDCBiIgIAgMD2bZtW56J3UXByFRncirqFHXd6wJgZ2ZHr0qF1OMx4iz8MxMubAC0um1+jaHuu7rVBJWFM3RPq9Wy9VwEEzacIyYlC4UC+jbw5aNW5bEyfYH+vvg2ApUpJN2B6EvgUhFuHtDt86kLKuOiq8XpboB1L4C0leGDQgjxPF6gVyMhhBBCAHzyySfs3LkTCwsL1q9fj6Ojo6FLEgWsadOmaLXax7Z5//33ZchgEcjIyeD9v97nWMQxvmnyDc1LNS+cC4We0M3TdHnL/W0VOkDjUeBRvXCueVdUUgafbTjH9vO6edXKuljxdddq1CxlX6jXLRQmFuD3CgTvgqs7dQHWjf26faUaFm0tVq66FSGzknX3Zf4rIYR4LhJgCSGEEC+Q3377jRkzZgCwePFiAgICDFyRECWbicoEVwtXTFWm2JraFvwFYq7C7ilwcePdDQrdJOONR4Fr5YK/3gO0Wi3rToYyceN5kjNyMFIqGNqsDO8188fUqJhN0v40yrTUBVjBO6HBB7rVCUHXO6soKRS6ebDCT+nuywqEQgjxXCTAEkIIIV4QJ0+eZMCAAQCMHTuWbt26GbgiIUo+pULJ5AaTGVB1AKVtSxfciZMjYO/XcGKZboiZQglVu+mCq3tDzwpRXGoWn/xxlm3nIwAI8LLl667VqOhuU+jXLnRlW8K2MXDzkG4C9bQYMDLXrUBY1JzK3g+wpAeWEEI8FwmwhBBCiBdAdHQ0QUFBpKen07ZtW7744gtDlyREiZWek86um7vo6N8RACOlUcGFVzmZcOh/unmuslN128q1heYTwLVSwVzjCXZfjGTM2rPEpGRipFQwomU53mlcGiOVskiuX+gc/cGhNMRdhw3v6bZ51wYjEwPU8kAYKXNgCSHEc5EASwghhCjmsrOz6d69O7du3aJs2bKsWLECleoFHt4jRDGm1qgZsXcEB0IPcCflDu8GvFtwJ7+yHbaN1QUrAF61oeUUKNWg4K7xGCmZOXyx6QIrj94GdHNdze4eSBXPQhgaaWh134Wto3UTuQOUKuLhg/c4+t+/LT2whBDiuUiAJYQQQhRzo0aNYs+ePVhZWbF+/Xrs7OwMXZIQJZZKqaKuW11ORJ6gnnu9gjlpwm3YMgqubNPdt3LTBVfVuunmSSoCp24nMOy3k9yKS0OhgAEN/RjVujxmxiU0DK87WDeH2I7xEHsNqnQxTB0PDge1kzmwhBDieUiAJYQQQhRjCxcu5LvvvgNg+fLlVKpUNEOMhHiZ9avSj/al2+Ni4fJ8J9Ko4ehC3STtWSmgNIb67+nmuTK1Lphin1SCRsvP+0OYtu0SORotnnbmzHwjgPr+L8Hqpb4NYfBe0GqLLCjMw7kC2JUCCwcwszNMDUIIUUJIgCWEEEIUU//88w9Dhw4FYPLkyQQFBRm2ICFKsMPhh6npWhMjpe7t8XOHV1GXYOP7cOeo7r53Pej0HTiXf85K8y8uNYuPVp9iz+VoANpXdeerLlWxNTcushqKBUOFVwBGpvD+MVCqDFuHEEKUABJgCSGEEMVQSEgIXbt21c9/9dlnnxm6pIKRmcyNsGOExV6kssYI26w0yEjgfPItlqQG46ZV8ZHSCdRZoM6Gyp2hziBDVy1KuD239jB8z3AaeDTgu1e/w0T1HJN9azRweB7smgTqTDCxhpaToWY/UBbdJOn/Xo9l+MqTRCZlYmKkZGLHSvSo44NCQpSiZ4jJ44UQogSSAEsIIYQoZpKTk+nUqRMxMTHUrFmTRYsWvXgfOlOiuHV9F7tu7sI0LY6eKWkQcwXS4xnp6cZVExPmRUTRMD0DgEQzM7a5u1AhMwvCTtw/j3uAgR6AeJlotBrMjMzwsvbCWPkcvZMSQ2H9uxDyt+5+2VbQ8Vuw8SiYQvNBo9Hy/Z5g5uy6gkYL/s6WfN+jBhXdbYqsBiGEEKIwSIAlhBBCFCNqtZoePXpw7tw53N3d2bBhAxYWFoYu6/FyMjlzcQ1HQnbwanISpSMuQnI4t8zNmO3mgl9WNj1Dw/XN/dWg1SjRelQHK18ws8Pf2IQxWVE4m9pD7SqgMtb9OJYx3OMSL43mpZqz0nYl3jbezx4WX9ioGzKYkQjGFtDqC6jVv0iHjSWmZzNy1Sl2X4oC4PWaXkx5rTIWJvKWXwghxItPXs2EEEKIYmT8+PFs2rQJMzMz1q9fj6enp6FLykOjziE0ZBfeoWfh+l64c5T/Odlw0MIcy5g4SienAAoqWfnQVmVBORcfqB8ETuXA3pcZD5m82hXoVcSPQ7zccjQ5qLVqTFWmAJS2K/1sJ1Jnw86J8O//dPc9akCXBeBUtOHrlchk3ll+nJCYVEyMlHwZVIU3ankXaQ1CCCFEYZIASwghhCgmli1bxrRp0wBYtGgRderUMXBFD8hKg+BdRFz4g+7Jx0hHy4Gbd7g32KqJWoWl0o5SAR2g3GvgWgUHUyumG7RoIR7tx9M/8vftv5neZDqlbZ8xvEoMhTX94PZh3f0Gw6D5BF3vwSK05Ww4o34/TVqWGk87c+b1qklVL9sirUEIIYQobBJgCSGEEMXAoUOHGDRIN1n5+PHjeeuttwxcEaSnxbHn6LcYhR6n1fVjkJOOK6D09kShVBJSrjnlynUA38b0cPSnx4s2T5d4aaVkpbD2ylpiM2K5Enfl2QKskH3wex9IiwVTW+j8I1RoX/DFPoZao2XG9svM+/saAA38HZn7VnUcrUyLtA4hhBCiKEiAJYQQQhjYrVu3CAoKIisri86dOzNlyhTDFaPVwq1/4dSvbArZwhR7S8pnZtEqJx3sfFBU7MQi7+p4+bfF2NTScHUK8RysTKz4vePv7Li5gzZ+bZ7+BCeWwaYRoMkBt2rQbRk4+BV8oY+RnJHN+ytO8veVaAAGNy7Nx63LY6QqupUOhRBCiKIkAZYQQghhQKmpqbz22mtERUUREBDAsmXLUCqL/gNoeNhxNhydTfU7Z6kbfQOA1koFv9hY0tipGur2v6HyqA4KBUX7MV2IwuFs4UzPij2f7iCNGnZNhINzdfcrd4GgH8DYvOALfIzQhHQGLDnKpYhkzI1VTH+9Gh0Dim6lQyGEEMIQJMASQgghDESj0fD2229z6tQpXFxc2LBhA1ZWVkVXgFYLNw/Avz+yOPogv9lY0UKRRl0TK6gUhE1gDzb41AcDBGovkszMTExNZcjWi2B98Hr8bP0IcA54+oOz0mDtQLi8WXe/yVhoOrZIVxkEOHMngQFLjxGdnImztSmL+tSW+a6EEEK8FCTAEkIIIQxk4sSJ/PHHH5iYmLBu3TpKlSpVJNfNSI9ny4EvqRd8AI+ICwC8ZWzENWtHWlYKggZjwUSGBz7K1q1bWblyJfv27eP27dtoNBosLS2pXr06rVq1ol+/fnh4SG+Y4uZG4g2++PcL1Bo1v3X4jQoOFfJ/cHoCrOgOt/8Flamu11XV1wut1kfZfj6C4StPkpGtoYKbNT/3rY2nXdH2/hJCCCEMRQIsIYQQwgCWLl3KF198AcD8+fNp0KBB4V80LQ4Oz+OTK8vZaWZE38wkPjK2gIA38avzDj+7PMUH+ueg1mhJSs8mMT2bhLu/7/1k5WjIUWvI0WjJVmuo7mNPk3LORVLXk6xbt44xY8aQnJxMu3btGDNmDB4eHpibmxMXF8e5c+fYtWsXn3/+OX379uXzzz/H2bl41C7AztSO5j7NSchMoLx9+fwfmBIFy7tA5Fkws4Ueq8GnXuEV+hBarZaF+0L4autFtFpoUs6Z73tUx9qsaFc7FEIIIQxJAiwhhBCiiO3du1e/4uAnn3xCnz59CvV6MTGXsD3xC8bHFkNWCkHmZpx3dsG7bFtoNhXM7Qv8mulZaq5EJnM9JoU7cenciU/ndnwad+LTCUtIJ0ejzdd5BjTyKzYB1vTp05k9ezZt27Z96Dxl3bp1AyA0NJS5c+fyyy+/MGLEiKIuUzyCnZkd0xpPI1OdiSK/w/4SbsGyIIi7BpYu0HsduFUp1Dr/S6PRMmXTBZYcvAFA73qlmNixkkzWLoQQ4qWj0Gq1+XsHKUQ+JSUlYWtrS2JiIjY2NoYuRwghipXLly9Tv3594uPj6d69OytWrCi8SduTwvhpxwcsSL7MJ7FxdElJBbeqaBqNRFuhAyojkwK5TExKJqdvJ3AxPImL4clcjEjiRkwqT8qoLE1U2JobY2thgq25ETZmxpgZqzBSKTBWKjFSKWhYxol2Vd2fu0Z5bSpYL9LzqdVq8x9YPSj+BixuB0mhYOsDb68HR/+CLu+xstUaPl5zhnUnQwH4tH1FBjTye7bHI4QQJdyL9Nokno30wBJCCCGKSExMDO3btyc+Pp569eqxePHiwgmvUmPgnxlwbBHmlqZkOtrzr4MHXTp+AeVao3yOD79arZY78ekcCYnj6I04jtyI43p06kPbOlmZUNbFGm8Hc7zsLfCyv//b2doU4xLQgyQrK4uQkBD8/f0xMpK3VcXRJ/s/wd7MnvcC38PSOJ9zuyWGwtKOuvDKqRz0Xg+2noVa539lZKt5f8UJdl2MQqVU8M0bAQRVL9oahBBCiOJE3mkJIYQQRSAzM5POnTtz7do1/Pz82LBhA+bmBTv5clpKFEt3jaDx1X1UTk0E4HX7mpSp3In6NYY882qCKZk57L8aw55LUfxzNZrwxIw8bcq6WFHF05YKbtZUdLehorsNztYld2W+tLQ0PvjgA5YuXQrAlStXKF26NB988AGenp6MHTvWwBUKgAuxF9h0fRNKhZIOpTtQybHSkw9KiYJlnXTDBx1KQ58/wdqt8It9QHJGNgOXHuNwSBymRkp+6FmD5hVdi7QGIYQQoriRAEsIIYQoZFqtlv79+7N//35sbW3ZvHkzLi4uBXeBnCw4sZRZx2exysKIE9YmLLAJhBaTsPBvxrNMD38jJpXdl6LYcymKwyGxZKvvjwc0Uiqo6mVLHV8Havs6UMvXHjuLghmO+KIYN24cp0+fZu/evbRp00a/vUWLFkyaNEkCrGKikmMl5rWYx9X4q/kLr9LiYNlrEBsMtt7w9sYiD69iUzLps/gI50KTsDY1YmGfWtQt7VikNQghhBDFkQRYQgghRCGbPHkyK1aswMjIiLVr11KxYsUCOa9Wo0F9eRNGOyZAfAj9jFQcM/Wka6W30TYcj0KleqrzhSaks+l0GH+eCeNcaFKufaUcLWhW3oVmFVyo4+uAucnTnbukWb9+PatWraJevXq55iOqXLky165dM2Bl4r8aejakoWfDJzfMSoVfukDUBbByg7c3gJ134Rf4gPDEdHouPMz16FQcLU1Y2r8OVTxti7QGIYQQoriSAEsIIYQoRL/88guTJ08G4Mcff6R58+YFct7rIX8x7Z+xVEqIYHh8Ili64Nl0DOuqv43iKSZnj0nJZPOZcP48Hcaxm/H67SqlgnqlHWhW3oVXK7hQ2tmqQOouKaKjox/aiy41NVUm2C4G0nPSUSqUmKryOYxVo4E/BkPYSTB30IVXRTxhe3hiOm/N/5cbsWl42pmzfEAd+f9OCCGEeIAEWEIIIUQh2bdvHwMGDADg448/ZuDAgc9/0oxE+Hs6N88s5qCLI6dtrBlQqQ9WjceAqRX5iU7UGi37rkaz8shtdl2MJOfucoEKBdT1c6BjgAdtq7jjYPlyDQt8GrVq1WLz5s188MEHAPrQauHChdSvX9+QpQlgyfklrL+6nvH1xtPYq/GTD9g1ES5tApUJvPUbuFQo/CIf8GB45e1gzm+D6uFlb1GkNQghhBDFnQRYQgghRCG4evUqQUFBZGVl0bVrV6ZOnfpc59NqNMQcW4Dz3zMgNZqmwFBVRTq8OgUr73wMj0L3Ifn3Y3dYdfQ2oQnp+u0BXrZ0CvSkQzV3XG3MnqvOl8VXX31F27ZtuXDhAjk5OXz77bdcuHCBgwcP8vfffxu6vJeaRqthy/UthKWGkZaT9uQDji+Fg9/pbr/2A/jUK9wC/0PCKyGEECJ/JMASQgghClhsbCzt27cnLi6OOnXqsGzZMpTPuAIgQMTtQ4zb/QHROSmsTYvG1LEsijZf827ZFk88VqvVcuxmPAv3XWfnhUjudrbCxsyILjW8eLOONxXcbJ65tpdVo0aNOHXqFF9//TVVq1Zlx44d1KhRg0OHDlG1alVDl/dSUyqUrO64mi3Xt9CqVKvHN77+N2weqbvddBxUe6PwC3xAVFKGhFdCCCFEPkmAJfIIDQ1lzJgxbN26lbS0NMqUKcPixYupVauWoUsTQohiLz09nU6dOnH16lV8fHzYsGEDFhbP+IE0JxP2z8Zy/yxuujuSojLiQsOhVG86GZ4wz1W2WsOWs+H8vD+EM3cS9dvr+DnwVh1v2lZxx8z45Z6I/Xn5+/uzYMECQ5chHsLcyJyu5bo+vlFiKPzeFzQ5UPUNaDKmSGq7Jz41i14/H+ZGbBpe9hJeCSGEEE8iAZbIJT4+noYNG9KsWTO2bt2Ks7MzV69exd7e3tClCSFEsadWq+nZsycHDx7Ezs6OrVu34ubm9kznunlxHaV2fwUxV7AGZpj44fbqJDw96zz2uOSMbFYcvsWSgzcIT8wAwMRISdcanvRr6Ec5V+tnqkfktmXLFlQqFa1bt861ffv27Wg0Gtq2bWugyl5u8Rnx2Jvl4z2LOgfWDoT0OHAPgE7f6yaBKyIpmTn0XXyEK5EpuNqYSnglhBBC5IMEWCKXadOm4e3tzeLFi/Xb/Pz8DFiREEK8GLRaLSNGjGDdunWYmJiwYcMGKlWq9NTn0aQn8MWGN1mTcYefU6KobekCbb+mZuUuj/2AnZiWzaIDISw+EEJSRg4ATlamvF2/FD3r+uBolc/V2ES+jB07lq+//jrPdq1Wy9ixYyXAMoC07DQ6ru9IZcfKTH1lKg5mDo9uvHcq3DoIJtbw+mIwLrq53zKy1QxcepTTdxKxtzDmlwF18XaQ8EoIIYR4EgmwRC4bN26kdevWvPHGG/z99994enoydOhQBg0aZOjShBCiWJs1axZz584FYNmyZTRunI+Vz/7r2h6UGz8AoxS0Ntac8KtH7U5LwfzRPUpiUzL5eX8Iyw7dJCVTF1yVcbHincal6RTogamRDBMsDFevXn1oQFmhQgWCg4MNUNHTuXz5Mt27d891/7fffiMoKMhwRT2nY5HHSM5KJjQlFFsT20c3vPYX7PtGd7vTt+DoXzQFAjlqDe+vOMG/1+OwMjViWf+6lJVekUIIIUS+SIAlcrl+/To//vgjI0eO5JNPPuHo0aMMGzYMExMT+vTp89BjMjMzyczM1N9PSkoqqnKFEKJYWLlyJaNGjQJg5syZuYKB/EhLiUK75wssjy8F4CM7H9pVG0Gt6v0feUxcahbz/r7G8kM3Sc9WA1DBzZphzcvSprIbSmXRDYd6Gdna2nL9+nV8fX1zbQ8ODsbS0tIwRT2F8uXLc+rUKQBSUlLw9fWlZcuWhi3qOTX2aszmzpuJSotCpXxEcJscAX8MBrRQsx9UecI8WQVIq9Xy2YZz7LoYhamRkp/71KKq12OCNiGEEELkIgGWyEWj0VCrVi2++uorAKpXr865c+eYN2/eIwOsqVOnMnny5KIsUwghio2///5b//dx2LBhjBw58qmOP3lqMeNOzKJeWgqTAGoPxLLFZGqZWj20fWpmDgv3hbBg33V9j6uqnrZ88GoZWlR0leCqiLz22mt8+OGHrFu3Dn9/XQ+e4OBgPvroIzp16mTg6p7Oxo0bad68+QsRvD2Jl7UXXtZeD9+p1cL6oZAaDa5VoM3UIq3tf3uC+e3IbZQKmPtWdeqWdizS6wshhBAvumdf01uUSO7u7nmGRFSsWJFbt2498phx48aRmJio/7l9+3ZhlymEEMXC+fPnCQoKIisriy5dujBr1iwU+Z0IOjsDto1Ds3UMYUothywsSerxG7T/Bh4SXmXmqFlyIITG0/cwe9cVUjJzqOxhw+K+tdn4fkNaSa+rIjV9+nQsLS2pUKECfn5++Pn5UbFiRRwdHZk5c+Zzn/+ff/6hY8eOeHh4oFAoWL9+fZ42//vf//D19cXMzIy6dety5MiRZ7rW6tWrn7rXYHGTnpP+5EYnl8O13WBkBq8vAmPzwi/srrXH7zBzxxUAJnWqTKvKz7a4gxBCCPEykx5YIpeGDRty+fLlXNuuXLlCqVKlHnmMqakppqYyObAQ4uUSFhZG27ZtSUhIoEGDBvzyyy+oVPmbbyo7/AzG64ZA1HlqAjOsqtCo9Wwsrd3ztNVotGw8HcbMHZe5E6/7kO7raMGo1uVpV8VdQisDsbW15eDBg+zcuZPTp09jbm5OtWrVnm3us4dITU0lICCA/v3706VLlzz7V61axciRI5k3bx5169Zlzpw5tG7dmsuXL+Pi4gJAYGAgOTk5eY7dsWMHHh4egG7Y/8GDB1m5cmWB1G0IwfHB9NzSk6AyQYytM/bhIXLiHdg+Xnf71U/BuXyR1bf/agxj1p4B4J0mpXm7vm+RXVsIIYQoSSTAErmMGDGCBg0a8NVXX9GtWzeOHDnC/PnzmT9/vqFLE0KIYiMpKYl27dpx+/ZtypUrx8aNGzE3f3JvDq1Gw8odw1h+ZzcrYiKws3CCoB9oXa71Q9sfvxnPlE0XOH07AQAXa1OGtyhLt1reGKukE7WhKRQKWrVqRatWrQr83G3btn3sSoazZs1i0KBB9OvXD4B58+axefNmFi1axNixYwH0c1w9zoYNG2jVqhVmZkW3Cl9B23FzB2k5aUSlRT08vNJqYfNHkJkEXnWg3tAiq+1adArv/nqcHI2WTgEejGldociuLYQQQpQ0EmCJXGrXrs26desYN24cU6ZMwc/Pjzlz5tCzZ09DlyaEEMVCVlYWXbt25fTp07i4uLBt2zYcHfMxl01KNJnrh7Ai/Ty3TYz53TeQQV1WgZVLnqZhCelM23aJDafCALA0UTG0WRn6N/TD3ERWFSwudu/eze7du4mKikKj0eTat2jRokK7blZWFsePH2fcuHH6bUqlkhYtWnDo0KGnOtfq1asZPHjwE9sV5wVb3g14l1qutbAxtXl4g0ub4co2UBrDa9/DoyZ4L2CJ6dkMWnqM5IwcapWyZ8Yb1aTHpBBCCPEcJMASeXTo0IEOHToYugwhhCh2NBoNgwYNYteuXVhaWrJ582b8/PyefODVnbD+XcxSo5luZsmJgNfo0fp/oMzdiyotK4d5f19n/j/XyMjWoFDAGzW9GNW6PC7WL24PmZJo8uTJTJkyhVq1auHu7p7/uc8KQExMDGq1GldX11zbXV1duXTpUr7Pk5iYyJEjR1i7du0T2xbnBVsUCgV13Os8fGdmCmz9WHe74bAiGzqo1mj54LeTXI9JxcPWjB971cTUSMJnIYQQ4nlIgCWEEELk05gxY1i2bBkqlYrVq1dTq1atx7bPzEhk9sae1LxxjJZp6eBSiYpdF1LRtXKudlqtlh0XIpny5wVCE3TzXNXxdWBCx0pU8bQttMcjnt28efNYsmQJvXv3NnQpz8zW1pbIyMh8tR03blyuFTaTkpLw9vYurNIKzj8zICkU7ErBK6OK7LJfb73IP1eiMTdWsaBPLZytZa5QIYQQ4nlJgCWEEELkw4wZM/Sry/3888+0a9fu8QfEXWflH2/yq3E6m5wcqO/aBqtWX+ZZ+exWbBqT/jzPX5eiAPC0M+eTdhVpV9WtSHv1iKeTlZVFgwYNDHJtJycnVCpVnvApMjISN7fCWd2uOC7YEpEawdDdQ+lQugP9KvfL+/9L/A349wfd7bbTwMSiSOpae/wOC/aFADDzjQAqe0gILYQQQhQEmQFWCCGEeIKlS5fy8ce6YUgzZsygT58+jz/gwkb4qQk97lymaUYOX1V+B6v2s3KFV5k5aubuvkrL2X/z16UojFUK3mvmz66RTWhfrWiHpImnN3DgQFasWGGQa5uYmFCzZk12796t36bRaNi9ezf169c3SE2GsCVkC1fjr/L37b8f/v/LrkmgzgK/JlCuTZHUdCkiifHrzwIw7NUytK+Wd2VRIYQQQjwb6YElhBBCPMamTZsYMGAAAKNGjWLUqEcPQ8rOTGXr5nfoeOZPFICxd13mvr4IbL1ytTt4LYbx684REpMKQAN/R6a8VoUyLlaF9jhEwcrIyGD+/Pns2rWLatWqYWxsnGv/rFmznuv8KSkpBAcH6++HhIRw6tQpHBwc8PHxYeTIkfTp04datWpRp04d5syZQ2pqqn5VwpfB6+Vex9bEFmcL57w7bx2G8+tAoYTWX0ERBMLJGdkM/eUEGdkampRz5sMW5Qr9mkIIIcTLRAIsIYQQ4hEOHDhAt27dUKvVvP3220ybNu2RbdXxIQxc34UTyhzSra3oXrU/NJ8AqvvBRmJ6Nl9vvchvR24D4GxtymcdKtFRely9cM6cOUNgYCAA586dy7WvIP5bHjt2jGbNmunv35t/qk+fPixZsoTu3bsTHR3NhAkTiIiIIDAwkG3btuWZ2L0kszGxoWu5rnl3aLWwe4rudmBPcKtS6LVotVrG/nGW6zGpuNuaMbt7oKw4KIQQQhQwCbCEEEKIhzh37hwdOnQgPT2d9u3bs3DhQpTKR4y8v7wV1bohvGqiJtjeDteGI6HeR7ma7LwQyafrzxKZlAlAr3o+fNymAjZmxg87oyjm9uzZU6jnb9q0KVqt9rFt3n//fd5///1CreOFFPI33NwPKhNoOrZILrn835tsPhOOkVLB9z1q4GBpUiTXFUIIIV4mEmAJIYQQ/3Hz5k1at25NQkICDRo0YPXq1XmGiAHkZGeQ/tdkrA/pJop+27EG7drMwtm9ur5NTEomkzaeZ9OZcAD8nCz5uktV6pZ2LJoHIwpVcHAw165do3Hjxpibm6PVaqU3XRH49sS3VHCoQBOvJpgZmd3fodXCX1/obtfqn2f4bmE4cyeBzzddAGBs2wrULGVf6NcUQgghXkYSYAkhhBAPiI6OplWrVoSFhVG5cmX+/PNPLCzyrl4WH3eN0X++iTY9gZ8Ao7rvomg5BWcjXc8LrVbLupOhTNl0gYS0bFRKBYMbl2Z487KYGauK+FGJghYbG0u3bt3Ys2cPCoWCq1evUrp0aQYMGIC9vT3ffPONoUssscJSwlh4diFKhZI93fbkDrCCd8Odo2BkDo1GFnotaVk5DF95imy1ljaV3RjQyK/QrymEEEK8rGQVQiGEEOKulJQU2rdvz5UrV/Dx8WH79u04ODjkbRh2irjlr3FWk8ZZU1Outp8Kbb+Gu+FVRGIG/ZYcZeTq0ySkZVPJ3YYN7zVkTJsKEl6VECNGjMDY2Jhbt27lCji7d+/Otm3bDFhZyadSqOhXuR8dSnfAwew//3/un637Xas/WBf+fGBfbL5IyN15r6Z1rSa974QQQohCJD2whBBCCCArK4suXbpw9OhRnJyc2LFjB56ennkbnvoNNn2If04GM419cWs1jbJl2gC6XlcbT4fx2fpzJGXkYGKkZHjzsgxuXBpjlXxnVJLs2LGD7du34+WVe4ha2bJluXnzpoGqejm4WroystZDelfdOaab+0ppDPXfK/Q6dl2IZMXhWwB880YAthYyn50QQghRmCTAEkII8dLLycmhZ8+e7Ny5E0tLS7Zs2UL58uVztcnOTuO7dd3peuUgvjk5ULY1r3SZD+Z2AMSnZvHphnNsvjvXVYCXLd90C6SMi1VRPxxRBFJTUx86tDQuLg5TU1MDVCT0va+qdQPbh4TPBSg6OZMxa88AMLCRHw3KOBXq9YQQQgghQwiFEEK85DQaDYMHD2bNmjWYmJiwbt06ateunbtRciSzfnmVJek3GOHqRE7jj+Gtlfrw6q9LkbSa849+FbIRLcqx9t0GEl6VYK+88grLli3T31coFGg0GqZPn06zZs0MWFnJdivpFsHxwXlXaIwJhkubdbcbDi/UGrRaLWPXniE2NYsKbtaMal3+yQcJIYQQ4rlJDywhhBAvLa1Wy4gRI1i8eDEqlYqVK1fSsmXL3I1uH4XVvRmQFskhd3eGVRmEUf1RAKRk5vDl5gv8duQ2AGVcrJjdLZCqXrZF/VBEEZs+fTrNmzfn2LFjZGVl8fHHH3P+/Hni4uI4cOCAocsrsZZdWMaqy6voV6UfI2s+MIzw2M+AFsq2BufCDZR+P3aH3ZeiMFEpmfNmoMxrJ4QQQhQRCbCEEEK8tCZOnMh3330HwKJFi+jcuXOu/df2z8T/r69Bk42TU3nWvrYMlXMFAI7eiGPk6lPcjktHoYABDf0Y1bq8fJh9SVSpUoUrV67w/fffY21tTUpKCl26dOG9997D3d3d0OWVWDmaHExVpgQ4B9zfmJUKJ3/V3a4zuFCvH5WUwRebLwDwUatyVHCzKdTrCSGEEOI+CbCEEEK8lGbOnMnnn38OwPfff8/bb7+t35eTnc5XazuzLuMOC42V1CzdCYJ+QGVqTY5aw3d/BfP9X1fRaMHTzpyZbwRQ39/RUA9FFLHs7GzatGnDvHnzGD9+vKHLealMajCJcXXHoXxwFoxzayEzEex9wf/Vwr3+n+dJysihqqctAxr5Feq1hBBCCJGbBFgvgIsXL7Jy5Ur27dvHzZs3SUtLw9nZmerVq9O6dWu6du0qE8YKIcRTmD9/PqNHjwbgq6++4r33HlixLC0O1ereJKdeQW1pwZWqnajZfgEoFNyOS+PDVac4fjMegK41vJjUqRLWZrL62MvE2NiYM2fOGLqMl5ap6oH3PFotHFmgu11rACgLb3rX7ecj2HI2ApVSwdddq2IkK4sKIYQQRUpeeYuxEydO0KJFC6pXr87+/fupW7cuH374IZ9//jm9evVCq9Uyfvx4PDw8mDZtGpmZmYYuWQghir3ffvuNIUOGADB27FjGjRt3f2fURVjQDMWN/UxJymJBpSG81WEhKBRsPB1Gu2/3cfxmPNamRnz3VnW+6RYg4dVLqlevXvz888+GLuOlotFq8m4MOwkRZ0BlCtV7Fdq1E9Oz+Wz9OQDeaVyayh4yz50QQghR1KQHVjHWtWtXRo8ezZo1a7Czs3tku0OHDvHtt9/yzTff8MknnxRdgUII8YL5888/6d27N1qtlqFDh/LVV1/p9/1zeA7nj3zPu/HRYFcK8x6rqOtSkZTMHCZtPM+a43cAqOFjx7dvVsfbwcJQD0MUAzk5OSxatIhdu3ZRs2ZNLC0tc+2fNWuWgSormbRaLe3+aEcpm1JMaTAFV0tX3Y7TK3W/K3YAC4dCu/7XWy8RlZyJn5Mlw5qXLbTrCCGEEOLRJMAqxq5cuYKx8ZO/2a9fvz7169cnOzu7CKoSQogX019//cUbb7yBWq2md+/ezJ07F4VCAVotIXs/54Obq9HYmFPZqiaNu/0Olo6cuZPAsN9OciM2DaUC3n+1LMNeLSNDhwTnzp2jRo0agO71+kEKhcIQJZVod1LuEJoSSmRaJHZmdrqN6mw4t0Z3O+CtQrv28Ztx/HbkFgBfd6kqCzUIIYQQBiIBVjGWn/AKIC0tDQsLi3y3F0KIl82///5Lp06dyMzMJCgoiEWLFqFUKiEnE/78EL/TKxhgb0ucSwXqd1mH1sSChf9cZ9q2S+RotHjYmjHnzerU8Su8Hh7ixbJnzx5Dl/BScbd0Z03HNdxKvnV/DqzgXZAWC5YuULpZoVxXrdEyaaNu1cHutbypW1oWaxBCCCEMRb5CfkE0b96c0NDQPNuPHDlCYGBg0RckhBAviJMnT9K2bVtSU1Np2bIlK1euxMjIiNiYy6QtbQ+nV4BCyQd1P2Fi922kqo0ZtOwYX265SI5GS7uqbmwd3ljCK/FQwcHBbN++nfT0dEA31E0UPCOlEeUdytOyVMv7G+8NH6z6BqgK5zvZ34/d5mxoItZmRoxuU75QriGE+H979x0eVZX/cfw9kzKT3hMISegC0kKXIjWCoCBgQWwIig0EjLo/2F11rai7IhZWXF3FsiqWBaVIka6C1ChSBQKEkgbppM7c3x+RrEhASsidZD6v55knmXvv3Pkcx+FMvnPOuSIi50YFrBrCbrfTpk0bZs+eDYDT6eRvf/sbPXr0YNCgQSanExFxTT/99BMJCQlkZ2fTvXt35syZg81mY9fu+Yz86nr+WpyM0xYEt36O5Yr72HIoh2te/ZZvdqTj7Wnl2WGtmHFLe4J8NcJVTnXs2DH69evHZZddxqBBgzh69CgAd911Fw8//LDJ6dxAUQ7s+rr897Y3X5KnyCks5e+LdwEwsV9Twv11xWcREREzqYBVQyxYsICnnnqKMWPGcMstt9CjRw/eeust5s+fz/Tp082OJyLicrZv305CQgLHjx+nS5cuLFy4sHyh7R3zOPHl/WRa4Re7L1l3fIHRuC9vrd7HTTPXcji7kAZhvsx5oBu3dqmv9YykUg899BBeXl4cPHgQX9//Leg/YsQIFi1aZGKy2qfEUcKrm19lZcpKHE5H+cbdS8BRDOGXQZ3Wl+R5X132C8cKSmgS6c+obg0uyXOIiIjIudMaWDXIuHHjOHToEC+88AKenp6sXLmSbt26mR1LRMTl7Nq1i759+5KRkUH79u1ZtGgRgQEBsPrvsPwZ2gGvRbag1fVvY3jVYez7G/lmRzoA17apy9ThrQmwa9SVnNmSJUtYvHgxMTExp2xv2rQpBw4cMClV7bTj+A7e2voWofZQVt60snzjznnlP5tfC5egyLwnPY/3vt8PwOPXXo6XLtwgIiJiOhWwaoisrCzuvvtuli1bxptvvsmqVavo378/L774Ig888IDZ8UREXMaePXvo27cvaWlptG3blqVLl+Lr48HfPx7AqL0biQToch/d+z/L5sN5PPjRtxzOLsTbw8pjgy/nti5xGnUlf6igoOCUkVcnHT9+HJtNU82qko+nD8OaDMPH06f8vVlaBL98U76zxbVV/nyGYfDkvO2UOQ0SWkTR87KIKn8OEREROX8qYNUQrVq1omHDhmzZsoWGDRsyduxYZs+ezQMPPMCCBQtYsGCB2RFFREyXnJxM3759OXLkCC1btmTp0qWE2uGJj6/iv5YCkqIi+CD+USyd7uLf3ybz/NflVxmsH+bLjFva06pekNlNkBriyiuv5P333+fpp58GwGKx4HQ6efHFF+nT59JcEc9dXRZyGU91f+p/G/athNICCKwH0e2r/Pm+23OMNb9klhe1r21R5ecXERGRC6MCVg1x33338Ze//KX8su+/GjFiBN27d2f06NEmJhMRcQ0HDx6kb9++pKSk0Lx5c5YtW0aENRfevpG7cw+woW4U49o9SF6rUTzywSaWbk8D4JrWdXn+ek0ZlPPz4osv0q9fPzZu3EhJSQl/+tOf2LZtG8ePH+e7774zO17tVjF98Joqnz5oGAZ/X7wTgFuviKN+mF+Vnl9EREQunApYNcRjjz1W6faYmBiWLl1azWlERFzL4cOH6du3L/v376dp06YsX74c//xt8PkYKMwiNiiWLwd9RLK1IUNnfEdyZkHF6IrbrtBC7XL+WrVqxe7du3n99dcJCAggPz+f4cOHM27cOOrWrWt2vFqj1FnKidITBNl+HR3pdPzv6oPNq3764OJtafx4KAdfbw/G9WlS5ecXERGRC6cClgs7ePAgcXFx53z84cOHqVev3iVMJCLielJTU+nbty979+6lUaNGLF++nI07Z/BC8hzeduTTPLo9jPyEpfudPPLZd5wocRAdZGfm7R1oExNsdnypQYYPH86sWbMIDAzk/fffZ8SIEfzlL38xO1attvv4bm5ecDPNQprx+ZDP4eiPcOIY2AKhfvcqfS6H0+ClJbsAuKtHQ8L9tZaZiIiIK9ElVVxYp06duPfee9mwYcMZj8nJyeGtt96iVatWfPHFF9WYTkTEfOnp6fTr14/du3cTFxfH8mXLqLf7febt+oQcDyufx7XGMWo+z3+bxQP/2cyJEgfdGocx78EeKl7JeZs/fz4FBQUAjB49mpycHJMT1X4p+SkABNoCyzfsW1H+s2FP8Kja72G/TDrML+n5BPl4cfeVjar03CIiInLxNALLhe3YsYNnnnmGq666CrvdTocOHYiOjsZut5OVlcX27dvZtm0b7du358UXX2TQoEFmRxYRqTZpaWn069eP7du3U69ePVZ8s5j6m5+Dn2bzD6uFOa0Hcm3Cv7nzw62s+SUTgHt6NuJPA5rh6aHvb+T8NW/enClTptCnTx8Mw+DTTz8lMDCw0mPvuOOOak5XO13d4Gp6RPcgtyS3fMO+leU/G/Wu0ucpKXPy8je7AbivV2OCfLQmnoiIiKuxGIZhmB1CKvfTTz/RsmVLSkpKWLhwIWvWrOHAgQMUFhYSHh5Ou3btGDBgAK1atTI76ilyc3MJCgoiJyfnjB/sRUQuxslpgzt27CA6Opp5894nI+kpBhxMAosHXDuNn+sM474PN3EoqxAfLw9euKENQ9pGmx1dTFIVfdN3333Hww8/zN69ezl+/DgBAQGVrp9msVg4fvz4xUZ2aab09SUn4IX64CiB8RshvGmVnfqDtft57MttRATYWP1oH3y8Pars3CIiUj30d2jtpxFYLqxdu3akpqYSERHBo48+yoYNGwgLCzM7loiIqY4ePUrfvn3ZuXMnMTExfP3FDP4vaRIpVgPvwFD6XPcOc3KbMvmN7ykucxIX6subt3egRV19kJGL0717d9atWweA1Wpl9+7dREZGmpzKjaSsKy9eBdaDsKpbYL24zMHrK/YAMKFvExWvREREXJTmULiw4OBg9u3bB8D+/ftxOp0mJxIRMdfhw4fp3bs3O3fuJDY2lnWzX6Ll8gfpUpBHtNNC9OCZ/G1bJA/N/pHiMie9m0Uwb3wPFa+kSgwfPpzc3PKpbO+++y4BAQEmJ6rdCkoLmLJmCm9vfRun4YS9v65/1ag3VOGVQ+duOUxabjF1Au2M6HTuF88RERGR6qURWC7s+uuvp1evXtStWxeLxULHjh3x8Kj8W8GThS4Rkdrq0KFD9OnThz179lC/fn3Wv/UQkd88AM5S/hzWgf0Jr/Lnr/NYn7wfgPF9mvDQVZfhYa26P3TFvZ1cxD0wMJAxY8YwcOBAfHx8zI5Va+3N3sv8ffMJ9wnn7tZ3X5L1rxxOgzdXlX+GuvvKhnh76rtdERERV6UClgv717/+xfDhw9mzZw8TJkxg7Nix+rZXRNzSwYMH6dOnD/v27aNBg/qMey6eN3a+zOPOUiyXD2Vbpxe498NtpOYW4W/z5KWb2jKgZR2zY0sto0Xcq1e4TzgT2k0ov1OUC6lby39v2LPKnmPp9lT2ZRYQaPfk5s4afSUiIuLKVMBycVdffTUAmzZtYuLEiSpgiYjb2b9/P3369GH//v00b9KQ9//amruK92AEBnBto2v5JfwRHn97CyUOJ40i/PjX7R1pEulvdmyphWbOnEliYiILFizAYrHw17/+9YyLuKuAdfGi/aMZ22Zs+Z19KwEDguIgoGqK04Zh8Mavo6/u6NoAf5s+FouIiLgy9dQ1xLvvvmt2BBGRapecnEzv3r05ePAgnVs2YvW4WGzJK/m/oCC8W93E53mj+HjONgD6Xx7FSze1JcDuZXJqqa26deumRdzNcmhj+c+YjlV2ynX7jvNjSjY2Tyt3dm9QZecVERGRS0MT/UVExCXt3buXXr16cfDgQfr2a8yiMT7Y0raALYiBCW/z0S/D+Hj9QSwWeKT/Zcy8rYOKV1JtkpOTiYiIMDvGORk2bBghISHccMMNp+2bP38+zZo1o2nTprz99tsmpDuz5Jxk8kvyy+9UFLA6Vdn5Z67aC8BNHWMJ97dV2XlFRETk0tAILBERcTm7d++mX79+HDp0iGE3NCJtkI1Hi4t5wxrHzt7/ZvSXuWTmZxNo9+SVke3o00yjYOTS++mnn2jVqhVWq5WcnBy2bt16xmPbtGlTjcnObuLEiYwZM4b33nvvlO1lZWUkJiayYsUKgoKC6NChA8OGDSMsLMykpP/jNJxc/9X1lDpLWTx8EdGHNpTvqKIC1rYjOazanYHVAmOvbFQl5xQREZFLSwUsERFxKVu3buWqq64iLS2NyVfHcUf7AkZhp9AWwH+avMwzn2ZS5jRoXieAN2/vQP0wP7Mji5uIj48nNTWVyMhI4uPjsVgsGIZRsf/kfYvFgsPhMDHpqXr37s3KlStP275+/XpatmxJvXr1ABg4cCBLlixh5MiR1ZzwdLnFufh6+ZJXkkdESSGcyASrF9RpXSXnf2t1+dpX17SJJi7Mt0rOKSIiIpeWphCKiIjLWL9+Pb169SI9LY1/3RzH1C7ZtCgu5C2fy6lrf4W/fZNPmdNgcNto/vtANxWvpFr9dtpgcnIy+/btIzk5ueJ28v6+ffvO+ZyrV69m8ODBREdHY7FYmDt37mnHzJgxgwYNGmC32+nSpQvr16+vkvYcOXKkongFUK9ePQ4fPlwl575YwfZgvr35W9aOXIvXkaTyjXXbgJf9os+dnlfEgq1HAbhHo69ERERqDI3AEhERl7B69WquueYaykoLGPt8U64uPQZlkNtpAn/dm8DWI7lYLTBlYAvuvrJhpVd/E7mU6tevX+nvF6OgoIC2bdsyZswYhg8fftr+2bNnk5iYyMyZM+nSpQvTp09nwIAB7Nq1q2IB+fj4eMrKyk577JIlS4iOjq6SnGbx9fKFKp4++PEPKZQ6DDrUD6F1TFCVnFNEREQuPRWwRETEdIsXL2bYsGH4WYq47dnGfBNuY2JJBM/FTmDk+iZkncgn1M+b10e2o1uTcLPjipv66quvzvnYIUOGnNNxAwcOZODAgWfcP23aNMaOHcvo0aMBmDlzJgsWLOCdd95h8uTJACQlJZ1zrt+Kjo4+ZcTV4cOH6dy5c6XHFhcXU1xcXHE/Nzf3gp7zglThAu6lDif/+eEAAHd0rZoipIiIiFQPFbBERMRUc+bMYcSIETQKdLDi7gg8T2RysLQuV4bcyaBV9XEapbSuF8TM2ztQL9jH7LjixoYOHXrK/crWwDqpKtbAKikpYdOmTUyZMqVim9VqJSEhgbVr1170+Tt37szPP//M4cOHCQoK4uuvv+axxx6r9NipU6fy5JNPXvRznquPd37M9mPbuabBIK5I21a+sW78RZ938bZU0vOKiQiwMbBV3Ys+n4iIiFQfrYElIiKm+fDDD7nxxhvp1sTCxvuDqWsvIiwgjraezzB9Y2ucBtzQIYbP7uuq4pWYzul0VtyWLFlCfHw8X3/9NdnZ2WRnZ7Nw4ULat2/PokWLquT5MjMzcTgcREVFnbI9KiqK1NTUcz5PQkICN954IwsXLiQmJqai+OXp6clLL71Enz59iI+P5+GHHz7jFQinTJlCTk5OxS0lJeXCG3YOvj/yPXP3zOVg2mYoKwRPO4Q2vOjzfrC2fPTVyM5xeHvqY7CIiEhNohFYclbPP/88U6ZMYeLEiUyfPt3sOCJSi7z55pvcf//9XDMylPSEKHalZtAyJJ47CiaxfqcVT6uFJwZfzm1X1Nd6V+JyJk2axMyZM+nRo0fFtgEDBuDr68s999zDjh07TEx3qm+++eaM+4YMGXJO0x1tNhs2m60qY53VjZfdSKuwVsQ7fv2oGt4UrB4Xdc59Gfn8kHwcqwVu7hRbBSlFRESkOqmAJWe0YcMG3nzzTdq0aWN2FBGpZV566SUefeQRnu5r41j3IPZbrXxepwX3HphEZpGViAAb/7y1PZ0ahJodVaRSe/fuJTg4+LTtQUFB7N+/v0qeIzw8HA8PD9LS0k7ZnpaWRp06darkOVxVz5ie9IzpCWteKt8Q0eKizzl7Y/mosV6XRRCtEZ0iIiI1jsZOS6Xy8/O59dZbeeuttwgJCTE7jojUEoZh8Le//Y2/Tn6Ej6/34S9X2ng24xi3OZoze3cimUVW2scFM//BHipeiUvr1KkTiYmJpxSX0tLSePTRR8+4EPr58vb2pkOHDixbtqxim9PpZNmyZXTt2rVKnsPlpe8s/xnZ/KJOU+pw8sWmQwDc3DnuYlOJiIiICTQCSyo1btw4rrnmGhISEnjmmWfMjiMitYDD4WDChAl8/vm/eOpPUYzwKsSwevFx2EO8sTsegFu7xPHE4JZam0Zc3jvvvMOwYcOIi4sjNrZ8OlpKSgpNmzZl7ty553ye/Px89uzZU3E/OTmZpKQkQkNDiYuLIzExkVGjRtGxY0c6d+7M9OnTKSgoqLgqYW2UX5LP0YKj1PWri3/Gr1MxL3IE1rIdaWTmlxDub6Nv88gqSCkiIiLVTQUsOc0nn3zC5s2b2bBhwzkdb+qltUWkRiguLuaOO+7gp+/+S+unG/O+zYvLsk6wsngSc1Ia4e1h5ZmhrbhJ69JIDdGkSRN++uknli5dys6d5aOEWrRoQUJCwnmt2bZx40b69OlTcT8xMRGAUaNGMWvWLEaMGEFGRgaPP/44qampxMfHs2jRotMWdq9NNqdvZtyycTQPacZnGbvLN17kCKzPNpaPvrqhQwxeHiqQi4iI1EQqYMkpUlJSmDhxIkuXLsVut5/TY6r70toiUrPk5eUxfPhwHHtWsPY2X94rPMHXnoHMyLuf3QWNqBtkZ+ZtHWgbG2x2VJHzYrFY6N+/P/3797/gc/Tu3RvDMM56zPjx4xk/fvwFP0dNc6LsBEG2IOp6B4GjGDx9ILjBBZ/vWH4xq3ZnAHBDh3pVlFJERESqm76CklNs2rSJ9PR02rdvj6enJ56enqxatYpXX30VT09PHA7HaY+p7ktri0jNkZGRQb9+/Yg5torFt/kSbLcwvCyOtD0Ps7ugBV0ahjLvwR4qXkmN8Mknn5zzsSkpKXz33XeXME3tdXWDq/n25m+ZFje4fEPEZWC98I+s8348QpnToE1MEE0iA6oopYiIiFQ3FbDkFP369WPr1q0kJSVV3Dp27Mitt95KUlISHh6nX8LaZrMRGBh4yk1E5MCBA/S8sgcNex4m9K4YPDwsrPXtQ0LGw2Q5IxnTvSEf3t2FcH+b2VFFzskbb7xBixYtePHFF9mxY8dp+3Nycli4cCG33HIL7du359ixYyakrD08M34p/+Ui17+as+UwAMPaafSViIhITaYphHKKgIAAWrVqdco2Pz8/wsLCTtsuInIm27Zt47pB/ZnUr4B/t4phu8VCwIkOzDo2CruXB69c34br4vXHpNQsq1at4quvvuK1115jypQp+Pn5ERUVhd1uJysri9TUVMLDw7nzzjv5+eefa/U6VdXi5ALukRdewNqbkc+Ph3LwsFoY3Da6ioKJiIiIGVTAEhGRKrV27VpG3TCID64uoWusJ3GZ2Xxq9GbWsZHEhvrw5m0duTxaIzWlZhoyZAhDhgwhMzOTb7/9lgMHDlBYWEh4eDjt2rWjXbt2WC9iupvA1B+mkl+az93HdtAQIKLZBZ/ry19HX/W6LEKjPUVERGo4FbDkD61cudLsCCJSQ3z99df8+S8388nNFtoHeHLCI4D/HJvED0YLrmwazmsj2xHs6212TJGLFh4eztChQ82OUSstT1lOakEqI/PzyzcE17+g8xiGwfytRwG4Ll6jr0RERGo6FbBERKRKvP/++7z34QRs42P4R2kpz2VYGVPwMPuMaB7o3ZiH+zfDw2oxO6aIuLgH2z1IZt5h6n315/INQTEXdJ6dqXnsyyjA29NKvxaazikiIlLTqYAlIiIXxTAMnnn6aTIWPMvb1/hxOwYl+DD0xERKvaJ548a2DGxd1+yYIlUqJCQEi+X0gqzFYsFut9OkSRPuvPNORo8ebUK6mm1I4yGQvgOcTrAHgf3Cphwv/HX0Ve/LIvC36SOviIhITafeXERELlhpaSkPPnAf8Uc/4rGBdnA6uOVwfV4uuJvY8DDevL0DTaN02XqpfR5//HGeffZZBg4cSOfOnQFYv349ixYtYty4cSQnJ3P//fdTVlbG2LFjTU5bA2WnlP8MirughxuGwYKfygtY17RRAV1ERKQ2UAFLREQuSF5eHqPuuA5b9/30aeCHs7SMqaUjeavoGhJaRDFtRDyBdi+zY4pcEt9++y3PPPMM99133ynb33zzTZYsWcIXX3xBmzZtePXVV1XAOg8nSk9wtOAowcd2EQYQHHtB59lxNI99mZo+KCIiUpvoMjkiInLejh49yu2DulK/+z5+jvDj0cgI7i55iLed1/JQQjP+dXtHFa+kVlu8eDEJCQmnbe/Xrx+LFy8GYNCgQezbt6+6o9VoO4/vZOiXQ7lj73/KN1zg+leLtqUC5Vcf1PRBERGR2kEFLBEROS/bt2/n4WEdeLfbQf52Ipv2hQ4KDt/CBu8r+PeojkxMaIpVi7VLLRcaGsq8efNO2z5v3jxCQ0MBKCgoICBAU2jPR7GjmCBbEMHGrxuCLmwE1jfb0wAY0LJOFSUTERERs+krKREROWcrV67kP9Nu4P3+JXhaLWwua8K2/YmERcXw1e0daBjuZ3ZEkWrx2GOPcf/997NixYqKNbA2bNjAwoULmTlzJgBLly6lV69eZsascbpGd+Xbm7/F+Hf/8g0XMIXwcHYh24/mYrVAn2YRVZxQREREzKICloiInJOP//MBX+34C9uH1+H7tAyO5XdgculY+rdtwAvXt8bXW12KuI+xY8dy+eWX8/rrr/Pf//4XgGbNmrFq1Sq6desGwMMPP2xmxBrNknOo/JcLGIG1bEf56KsO9UMI87dVZSwRERExkf7aEBGRszIMg+kvPEXzn1+kWZdIfrZYeMfaiTWO+/nztZczpnsDLBZNGRT30717d7p37252jNrHUQp55VcQvJAC1tJfpw9edbkWbxcREalNVMASEZEzKikp4YkHb+N2z4Vc3sSTbpn57My5mp8Zzn/ubs8VjcLMjihiGofDwdy5c9mxYwcALVu2ZMiQIXh4eJicrOb6dNenJB36loF2b64sMcDv/KYA5haVsm7fMQASdPVBERGRWkUFLBERqdSxY8d4+M/9aNLsOC1yPThqhHB3ySN4R7Vj3q3tqRvkY3ZEEdPs2bOHQYMGcfjwYZo1awbA1KlTiY2NZcGCBTRu3NjkhDXTxtSNfH1oBS28vLjSJwKs53e9oTW7Myl1GDQK96NRhP8lSikiIiJm0FUIRUTkNDt37GD6A235sWsZs8OCmOnTiCHFzxLfuRef3HOFilfi9iZMmEDjxo1JSUlh8+bNbN68mYMHD9KwYUMmTJhgdrwaa0iTISTW7UuHoqILmj64encGAH2aR1Z1NBERETGZRmCJiMgpli9eQOa7t/B0C2iYBcs94ng9/xH+dkMnbux4YZe0F6ltVq1axbp16wgNDa3YFhYWxvPPP691sS5Cj3o96LFnLZSUnvcVCA3DYPUv5QWsnpfp6oMiIiK1jQpYIiJSYcY/H6Plz69wU3MLZYaVfZnD2ec/hE/u7UTrmCCz44m4DJvNRl5e3mnb8/Pz8fb2NiFRLZJzsPzneY7A2puRz9GcIrw9rXRuEPrHDxAREZEaRVMIRUSEsrIy/vzXIXzi/QUftYkkzQjgttI/c6DRrcyf0FPFK5Hfufbaa7nnnnv44YcfMAwDwzBYt24d9913H0OGDDE7Xo21L3sfGTkHcQIE1juvx67anQlAl4ah+HhrIX0REZHaRgUsERE3l5uby4y7OnOL7XuKPSwctvowzPFn2vcczKzRnQn102gSkd979dVXady4MV27dsVut2O32+nWrRtNmjRh+vTpZserkRxOB9d9eR19nfvIslrBL/y8Hn9y/aueTTV9UEREpDbSFEIRETeWvGc3PzzZm4mNC8ABdxyJYFbpOJ65+UqublXH7HgiLis4OJgvv/ySPXv2sGPHDgBatGhBkyZNTE5WcxWUFRBsCya3KJtApxN8Qs75sUWlDn5IPgZo/SsREZHaSgUsERE3tXDh+3y880mea1aMs9TCP8puYnnQrXx6Wwddfl6kEomJiWfdv2LFiorfp02bdqnj1DqB3oGsuXkNzhcalE8ROI8C1qYDWRSVOokMsHFZlP79EhERqY1UwBIRcTOGYfDRS5NZ4/U5SWG+/J+fF7YDtxDW7jrmXNdKa8eInMGWLVvO6TiLxXKJk9RiTifWwuzy38+jgLVuX/noq26Nw/TfX0REpJZSAUtExI0UFhbywSODuD1kM1fZPJhos5OeNoZ7ho3kpk7nd8UvEXfz2xFWcokU5wBG+e/24HN+2A/7jgNwRaOwqs8kIiIiLkEFLBERN7Fv307+++pAHoko/0Pv+5I25OY8zCtje9IyWlcZFBFzbUjdwJxt79MmwJ+biwzwPLcLSBSVOkhKyQagiwpYIiIitZYKWCIibmDF4o+ZvudvHIz3o9+RAlYUDGZ3s3v5+KZ2BNq9zI4nIsIvWb8w79BKinzs3Gyxn/PjNh/MosRRvv5VgzDfS5hQREREzGQ1O4CIiFw6hmHw1SsPc/ny+2lsKcPHafC0MYLAq//CG7d3UvFKRFxGu8h2JNYfzDX5Bee1/tXJ6YNdGmn9KxERkdpMI7BERGqposJCvvpzf64P/AkPH7glzZdd9nuZcustdGoQanY8EZFTtAhrQYuw9nBiBkQGn/PjfkguX8D9ikb6d01ERKQ2UwFLRKQW2vPLj/xt3gg6NjiBRxZ87ujJ/LhHeG/kFYT728yOJyJSucKs8p/nOAKrqNTB5oPZAHRpqPWvREREajMVsEREapl1899j74Y/82OjULYbgRw8PpCYHg/x76ua4WHV9BoRcU05xTkU5R3B32LB7xwLWD8fzqGkzEm4vzeNI/wucUIRERExkwpYIiK1hNPp5Kvn7qRf4Tyu8HKSdtyP74pu5PrbH6J7k3Cz44mInNUL619g3qF5PBLgz6hzLGBt+XX0Vfu4EK1/JSIiUstpEXcRkVogLe0gDz/fhv6OrwjwcrLWcTm77S8zc/zjKl6JuLlhw4YREhLCDTfccMr27OxsOnbsSHx8PK1ateKtt94yKWE5q8WKJxYscM5TCLeklE85jI8LvmS5RERExDVoBJaISA23/dv5vLA5kc31bNjzQqmX2gNr3yn8s3dzrJoyKOL2Jk6cyJgxY3jvvfdO2R4QEMDq1avx9fWloKCAVq1aMXz4cMLCzFlL6pkez/DMwT2Q+/W5F7B+HYHVLvbcr1ooIiIiNZNGYImI1GDLX59I7KI7mFiQTViZkwMFg7ni7uk80LeFilciAkDv3r0JCAg4bbuHhwe+vr4AFBcXYxgGhmFUd7xTVSziHvyHhx7NKeRoThFWC7SJCbq0uURERMR0KmCJiNRA2Vlp/Ocv7embOYsATwelhQ1pVfoUM8b/nY4NdCl5kZpi9erVDB48mOjoaCwWC3Pnzj3tmBkzZtCgQQPsdjtdunRh/fr1Vfb82dnZtG3blpiYGB599FHCw02ecnweVyFM+nX0VbM6gfjZNKlARESktlMBS0Skhln33Wfc8Ulv/t2okEyrlTfKBvNzv/d57d7hhPh5mx1PRM5DQUEBbdu2ZcaMGZXunz17NomJiTzxxBNs3ryZtm3bMmDAANLT0yuOObmG1e9vR44c+cPnDw4O5scffyQ5OZmPPvqItLS0Kmvb+fpyz5c845HHOrvt3ApYKdkAtNP6VyIiIm5BX1eJiNQQhmGw7J+JtEr9EGtcKCUWTx7xupPEMU/QJibY7HgicgEGDhzIwIEDz7h/2rRpjB07ltGjRwMwc+ZMFixYwDvvvMPkyZMBSEpKuugcUVFRtG3bljVr1py22DuUTzEsLi6uuJ+bm3vRz/l7PxxdxzxfT2ILvbniHApY/1v/KrjKs4iIiIjr0QgsEZEaIPt4Kl8/3JmEjHeo41HC3an+dDb+j9ceelHFK5FaqqSkhE2bNpGQkFCxzWq1kpCQwNq1ay/6/GlpaeTl5QGQk5PD6tWradasWaXHTp06laCgoIpbbGzsRT//7/WL7sH9WTm0LS7+wxFYpQ4nPx3OBqBdnBZwFxERcQcqYImIuLily/7FbZ/2xVL3EABvlQ3COehzpt11BwF2L5PTicilkpmZicPhICoq6pTtUVFRpKamnvN5EhISuPHGG1m4cCExMTEVxa8DBw5w5ZVX0rZtW6688koefPBBWrduXek5pkyZQk5OTsUtJSXlwht2Bv3C2/JAdg7xZYCX71mP3ZOeT1GpkwCbJ43C/ao8i4iIiLgeTSEUEXFRhtPJ8pfvYod1OQdCA3nVI4TVpeMZN/EhYkLO/sediMhJ33zzTaXbO3fufM7TD202GzabrQpTVeK3C7hbzn4V1e1HyqcwtogO1BVXRURE3IQKWCIiLijr8F62v3wd/fxT6AXss4RTN+YxHrnrOjz0x5qIWwgPD8fDw+O0hdXT0tKoU6eOSakunRP5qZRZLdh8gvmjUtn2o+UFrMvrBl76YCIiIuISNIVQRMTFfPjpZJ7/fADd/FMoMTx4uexmbr9uEf93w1AVr0TciLe3Nx06dGDZsmUV25xOJ8uWLaNr164mJrs0ntn5Pt3rx/KJ7x9PjT45AuvyaBWwRERE3IVGYImIuIiSwgK+en4IrzRIpSjYh8Yl0aR4P8Sf7h2tta5Eaqn8/Hz27NlTcT85OZmkpCRCQ0OJi4sjMTGRUaNG0bFjRzp37sz06dMpKCiouCphreIoKf/p6XPWwwzD0AgsERERN6QCloiIC9i3eTl5H4/lBr9MirP8WeQZR2CX13i6Z0ezo4nIJbRx40b69OlTcT8xMRGAUaNGMWvWLEaMGEFGRgaPP/44qampxMfHs2jRotMWdq8NngrtzJNbFmFt0/2sxx3JKSKnsBRPq4WmUf7VlE5ERETMpgKWiIiJnA4HU2fexPWZ39HWr5Bcw5e9BaN44cHHiA4++ygEEan5evfujWEYZz1m/PjxjB8/vpoSmcezKKf8F9/Qsx6349fpg00i/bF5elzqWCIiIuIiVMASETHJsYM7+fvsm1gQabDbM4AHDtVjW/tn+et1A3RVLRFxP7+9CuFZaPqgiIiIe1IBS0TEBGvee5IWv7zBeHsJ3zrq4FnQmOD7P2RMdLjZ0URETLGw8DA7QoLpWXqcTmc5Tgu4i4iIuCcVsEREqtHRo7v54u1bGO/4BTxhR0ksQ40JTJp4N54eujCsiLivNWVZzA8OJLw48+wFLI3AEhERcUsqYImIVJNFnz3P33PeJzvGyoDDXqwp7Eu70S/xSJNYs6OJiJjuSs8QwjP3cXm9iDMeU1BcxsHjJwBooQKWiIiIW1EBS0TkEivKy2LF1Ou52nsTc6Mi2IuNd0Lu44n/exK7lxYgFhEBGOQVzqCsbPCNPuMx+zIKAAj39ybEz7uakomIiIgr0HwVOc3UqVPp1KkTAQEBREZGMnToUHbt2mV2LJEaac68aRyY2p6B3puwAJ2PXsaf2v+HqeOeUfFKROQ87c3IB6BRhL/JSURERKS6qYAlp1m1ahXjxo1j3bp1LF26lNLSUvr3709BQYHZ0URqjMK8LB55qQdPHnuH5ZFlpDmDeSXgUW55bB5XtY83O56IiMspMwxKAYfhPOMxJwtYjVXAEhERcTuaQiinWbRo0Sn3Z82aRWRkJJs2baJnz54mpRKpOdZ9+TYRPzxL35AiFlvCWesRRevBbzGxUzuzo4mIuKzHivYwv2Ecj2RvZdQZjvlfAcuv+oKJiIiIS1ABS/5QTk4OAKGhoZXuLy4upri4uOJ+bm5uteQScTWZGQdZ8Pod3E4SVm8D//wghnr15K/jX8XmqemCIiIXa296+WjwxpEagSUiIuJuNIVQzsrpdDJp0iS6d+9Oq1atKj1m6tSpBAUFVdxiY3VFNXE/H3/0OCPnDmROzFHKLAZzSrtw8PqFPD1phopXIiLn4DF7I747kMLNQS0r3e9wGiRnlhewmmgKoYiIiNtRAUvOaty4cfz888988sknZzxmypQp5OTkVNxSUlKqMaGIubLSD7NgSm8G7nmNEg+DXIsnL4Xdz+CnFtGpTeVFXxEROZ2vxYNAp4HNWvkEgUNZJyhxOLF5WokO9qnmdCIiImI2TSGUMxo/fjzz589n9erVxMTEnPE4m82GzWarxmQi5jOcTt5+ayKDD83lGlsuOGHYoXpcMfwlrmjVxux4IiK1zsn1rxqG++FhtZicRkRERKqbClhyGsMwePDBB5kzZw4rV66kYcOGZkcScSm7fvyOp9Y8wE8BTjr6FlFUGMGKeuOYMHYSVv1RJSJyQb4pPcbu4CC6FaYSX8l+rX8lIiLi3lTAktOMGzeOjz76iC+//JKAgABSU1MBCAoKwsdHQ/bFfZWVFDP3xbEMLP2axhH+bDf8+MCzDYn3f8ToulFmxxMRqdGWlR1nfkgQfkXplRewKq5AqAKWiIiIO1IBS07zxhtvANC7d+9Ttr/77rvceeed1R9IxAV88uU/aLjhbW7wPAoW6JceRGz0WMbeOc7saCIitcIVnkH4HU+mWb3Kr3r8vwKWX3XGEhERERehApacxjAMsyOIuIzj6YeZ+v5NLAnP4caoIppn+vKZ1zBueeRletm19puISFW5ziuS645lgW/l627uy/h1CqFGYImIiLglFbBERCphOJ3Mfe1PdMuczY1+xSyyRLGbMHYNnsHdHa8wO56IiFtxOg2OnygBIDJAXx6IiIi4IxWwRER+Z+6iN8ne8Dp3Og6CBxQWRDDqxEAeHv8CFosWaRcRqW4FJWWcHCAe6ONlbhgRERExhQpYIiK/OnY0hXffuYMP62UQFu3g2hQbCx39GDDhVR6JiDA7nohIrfaXwj0sbBBLYtZP3P67fblFZQB4eViweVqrP5yIiIiYTgUsEXF7htPJF9MfpmfWZzzomcfysroEF/mwvte/uKPPULPjiYi4BScGZRYLzkr25RWVAhBo99JIWBERETelApaIuLX/zP0HW/e+x9S8g1g8YF9ZJMNKb+bucU/ojyQRkWo02d6ASdt/xL9x89P25RaWj8AKsOujq4iIiLvSpwARcUt7tm1i80eTeLnJcYrDrfQuCyC9oCfXTHqdsWGVX8JdREQunSCLF0EOB1i9T9tXMQJL61+JiIi4LRWwRMStZB9PZ8kr47mGldzkVczh3CA2WyIwrpjBHb2uMzueiIhUIq9II7BERETcnT4FiIhbcJSV8eSbd7LBezPveKbi53CwpbQ+9SPG89At95gdT0TE7a0uy2JvUACditJp9bt9ub+OwAqwaQSWiIiIu1IBS0RqvaWfv03o5ldIiSvikLed14IiaF4ymJETn6edl/4ZFBFxBV+XZjI/NIRHClNPK2CdHIEV6KN/s0VERNyVPgWISK311dJZeC+fydW2HeANE4758pZPHPeNfIe4uvXNjiciIr/R3iMQa9Z+GkcHn7Yvt/DXEVh2jcASERFxVypgiUits+unjby+dDxrwvJ5OCIbR46FBSUdqD/kOf7ZvovZ8UREpBI3ekdxY+ZxaB8HwJ70PDYdyOLGDrHknhyBpQKWiIiI21IBS0RqjdRD+1k1cxKDPb6jV6AXKy1hrPKIILrLKwwZdL3Z8URE5GwM45S7f5nzMz8kHyc2xPd/a2BpEXcRERG3pU8BIlLjZRw7yrPvj2FY4TZGeOYAUP94HcZGDGXCxKdNTiciIhciNbcIgIPHT/xmDSyNwBIREXFXKmCJSI1VkJ/L3FceYb/vSpaF2sgoshOdYmdj1M3cdP8TdPL0MDuiiIicoyeL9rE4LoYHs39mJJDz67pXGXnFv1kDSx9dRURE3JU+BYhIjZOXl81n//wTV+Uv5VaP42TkW1njX5e6RS2J/vMHjPT1NzuiiIicpyKc5HlYKTGcOJ1GRdEqI7+YPE0hFBERcXtWswOIiJyr4qIinp1+O9d/0p0U3yXEehwn3RnIYstwPr5hNf+YOAc/Fa9ERE4xbNgwQkJCuOGGG07bl5ycTJ8+fbj88stp3bo1BQUFJiQs97B3HPNTjjA0sBn5JWU4f10SKz23+H9TCLWIu4iIiNtSAUtEXF5JcTEfTXuUlGdac3XhYo56W1np68csj8F4TNjMbX/9NyEh4WbHFBFxSRMnTuT999+vdN+dd97JU089xfbt21m1ahU2m62a0/1PuNWb+mVlBHnYKkZfQfkIrJOLuKuAJSIi4r40DltEXFZObhbPzRrLZYVbuaswFTwhq9CPIemx3DvyNeJiGpsdUUTE5fXu3ZuVK1eetn3btm14eXlx5ZVXAhAaGlrNyc4s5zcFrCPZhRSVOgEI9NFHVxEREXelEVgi4nKyj2fw4TN38d+ZnVgYtIuPwq2kOf342Hk1hfes5dlHF6p4JSK1wurVqxk8eDDR0dFYLBbmzp172jEzZsygQYMG2O12unTpwvr166vkuX/55Rf8/f0ZPHgw7du357nnnquS816o78uy+TjAn53FmacUsI7mFFX87m9TAUtERMRd6VOAiLiM/Qd/Yf77U7ip5Adus+ZSUgYLiqKJPhFHyT3/ZKSKViJSyxQUFNC2bVvGjBnD8OHDT9s/e/ZsEhMTmTlzJl26dGH69OkMGDCAXbt2ERkZCUB8fDxlZWWnPXbJkiVER0ef8bnLyspYs2YNSUlJREZGcvXVV9OpUyeuuuqqqmvgefiqLJMF4aE8cuIIUb8pYJ3k5+2Bp4e+exUREXFXKmCJiOl2b/+Rz+f+HwvqpNA0vITxqbkccoSwyp7AWzdMJSQswuyIIiKXxMCBAxk4cOAZ90+bNo2xY8cyevRoAGbOnMmCBQt45513mDx5MgBJSUkX9Nz16tWjY8eOxMbGAjBo0CCSkpJMK2C1tvpRUnCQ+nUDSS88vSAXoPWvRERE3Jq+xhIR06xZ+iVzpyQQN7sfdznWc8Jq4aiHN2/7jiTk/7Zy61/eVvFKRNxWSUkJmzZtIiEhoWKb1WolISGBtWvXXvT5O3XqRHp6OllZWTidTlavXk2LFi0qPba4uJjc3NxTblXtVu+6TEvPpLd/g1OmEJ4UYNf3riIiIu5MnwREpFo5HQ7e+vApVuZ9SXMjmydsWQDsKorltrzuPHD3y/j4+JqcUkTEfJmZmTgcDqKiok7ZHhUVxc6dO8/5PAkJCfz4448UFBQQExPDZ599RteuXfH09OS5556jZ8+eGIZB//79ufbaays9x9SpU3nyyScvqj3no7ICVqCPRmCJiIi4MxWwRKRanDhRwNyZf6Plsa/p5J/O69FR7HH60Sk1CufldzDo5vto6+FhdkwRkVrnm2++OeO+P5rCeNKUKVNITEysuJ+bm1sx9fBS0AgsERER+T19EhCRS+qHLat4f8WTtHMc5O6CDPCCwiIvEtL96dn2XgaNvtPsiCIiLik8PBwPDw/S0tJO2Z6WlkadOnWqNYvNZsNms13S53iuOJllsdHcl7Od3KLuAHhYLTicBgCBWgNLRETEramAJSJVzul0svizdzCSPuBE6F5WR4bwS5kXg/P8WUFXOt3yBC83b212TBERl+bt7U2HDh1YtmwZQ4cOBcr/fV22bBnjx483N9wlkGs4SPf0pNAoqxiBVT/Ml30ZBYBGYImIiLg7fRIQkSqTlnmElz+aRNvc7YwsSwEbFJ2w8N+CEBo64/F+6DVuDgk3O6aIiMvIz89nz549FfeTk5NJSkoiNDSUuLg4EhMTGTVqFB07dqRz585Mnz6dgoKCiqsS1iYTvGO588BWIhpdxpz95QWsJhH+vylgaQSWiIiIO1MBS0Qu2polc0ld+RaHwneyIMyXTO9ihh/15JuS1nh3Gs1Hw0eZHVFExCVt3LiRPn36VNw/uc7UqFGjmDVrFiNGjCAjI4PHH3+c1NRU4uPjWbRo0WkLu9cG0VYb0SWl4OlTMQKrSaQ/S7aXT6EM9NHHVhEREXemTwIickFSjh7g9c8eofvxXQzhAHjDkRMezA+041NYn5SR/+UaTRMUETmr3r17YxjGWY8ZP358rZwyeDa5hWUANI3yr9imEVgiIiLuTQUsETkvS7/8D/nr3mNdvWQWBtnxt+QxKNPKmpJmHKtzDfNvTcTHx8/smCIiUsNsdORyyN+PlkXHyD05AisioGJ/oNbAEhERcWv6JCAif2jt5uV8supFbso6wFXWI2CD6Hwbm+zeHC9pyi/XT6VPm45mxxQRkRrs89J0FkSEkViQQomjAwBxYb54Wi2UOQ1dhVBERMTNqYAlIpXKzEhjyfsvEpu5mjcb5PBjqI2ORh4dczxZWdKCwvpDWXjrg3h7X9rLqouIiHtobvUl90QhEeHlo3g9rBYC7Z5EBdo5nF1IsK8KWCIiIu5MBSwRqVBUVMiMjx9jZ84q/p51gFssxWCDw/n+5Bs2dtOZI7c/y4AmLcyOKiIitcyd3tHcmfYt6Y3igPIpgxaLhSmDmrNxfxZtYoLNDSgiIiKmUgFLxM2VFBez6LN/UbZ9Pl09tzOvfgDHQjz4qdhCg/ww1tKO+u3vZm6fgWZHFRERN1BU5gAgyKd8xNW1baK5tk20mZFERETEBaiAJeKGykpLmT37ZVZk/heLdzZvpR8F7/J9g/K82WEJY0/MBHrcPJkRHh7mhhUREbdSVHJqAUtEREQEVMAScRu5ednM+/yf2H9ZQ3fPbQz0zOPFuHo4LV5s9/Aj+UQzChsk8MAtD+IfEGh2XBERcTN/Lz7Aypi6DCj8BWhNoApYIiIi8hsqYInUYocPJrP6i5nsL1zFV3XyuLKwkBe9jwGQ6/Chf4YfwUEdqPvgU1weHG5yWhERcWfHjFIOenmR4ygGUAFLRERETqEClkgts2T1f/l68zv0yDrCYPYz0uIgyebNRx51+Mnbzrzi9hTG9WbgLRP5e1Cw2XFFREQAuM87mhEHf+Zg9CBAUwhFRETkVCpgidRwh4/s5/uvPsTj4LfEW/fwdbTBNyG+XEY23tkO9pRFsrO0Ofd5dWDMTX/Cx8fX7MgiIiKnaWD1oUFxCeml5YsyqoAlIiIiv6UClkgNU1ZayqpFn3Fo81fMj9jOAR8Hy9MO4+9tANDthD8HLb6klLVlba8H6NpnEE1MziwiInKuCku1iLuIiIicTgUsERdXVlrKlwvfZU3yHEKKM3iw4Aj9rAUAfOZdl0KrF4u9w/HIbUhxdBd6XHcXN8Y1NDm1iIjI+fnJkc9RXx8Ol+UAEGhXAUtERET+RwUsEReTX5DLp4vepGzHFpoW7KOd1wEi/JwsqxNJXGkpoYUFnDC82VDSkG5ZcdzddjCDb70VD0+9nUVEpOb6qDSVBVERXJF1FIAGYZryLiIiIv+jv3hFTLZu4woOrF2GJfUnGhgHeS+mgNV+dv7sc5w+ZfkAXFZo5/J8C5ElMcxvPom+g2+jl58/vUzOLiIiUlUaWX3oWFiEpdATiwVaxwSZHUlERERciApYItVozy8/s3XNYk6kJGEv3cs/G+SQ7WFh3bEUvH6dKbGpJJDNdm9+dkbyEb0Ja9WPXtfczGwtvi4iIrXYPd71uCf1e54ujaBxhD8BmkIoIiIiv6EClsgl4CgrY8sPq9i7eQVG+nb2+R1gaXgR/U8UMOV4FgCGJ7xsrUeZxcI3lmhKiuqRH9yCZpf3YlXvwXh720xuhYiIiDnaxgSbHUFERERcjApYIhchNy+b77//mpzd23Bk7iG45CjvxR0j2W7wyZFUOpaWgQfM8/TlY69wdtq82O8I55eyOmR4x3GbR1MG9ruD+jG6TqCIiMhJ8bGaPigiIiKnUgFLKjVjxgz+/ve/k5qaStu2bXnttdfo3Lmz2bFMkXYkhWUbvmTP0STqZp4g7MRxwozjHAjK4eU6XnQqKuJfxzLAAtjgPY8oCq02dnvaKDwRSbKzLke96jHWI45BV99Gg/otaGB2o0RERFzM9OKDfBtdh+DjaQzRCCwRERH5HRWw5DSzZ88mMTGRmTNn0qVLF6ZPn86AAQPYtWsXkZGRZserMkVFhWzfs4X05L3kH0mhOOswx0oPkRRwlGBHKaOPlxDtkU2UNZ+50XXY4ePNa14Z9PYuBCDJ8KbMUofDHl5sLY0hxRlOtnddrigN46bY9nQbfj2BAcG0NLmdIiIiNUFKWTG7bN608yyjed0As+OIiIiIi1EBS04zbdo0xo4dy+jRowGYOXMmCxYs4J133mHy5MnVmiUn6xhZ+ccpLC7EWgaUOSgrLSa/KJ/MogyMMie+xR4U5udQUpDL7uI95DhyiTlhI6jYibWskCxrFj+EZhHscDA600GQ9QTBlkIm1PMnyW7jlbQMbjhRXpTaZLPxVkgUcaWltM47WpGjYbGTYsPgx9L6pJfVodSvLt6RDZjetAVXdrwab28brav1v4yIiEjtMrQkjOszd/KDV09snh5mxxEREREXowKWnKKkpIRNmzYxZcqUim1Wq5WEhATWrl1b6WOKi4spLi6uuJ+bm1tleTJeuoK/xlnYZrMxIzWdnoVFAHzvY2dynUiaFZfw+ZHUiuNH1Y1ks5+dlwoz6O8sBCtsttl4I6i8KBWf87+iVJCz/Kp+KRZftpeGkOkM4FCZHz2OOwm0hPBp1EjCYprSvG1XXohrWGVtEhERkdOFnPCgVVERuWEhZkcRERERF6QClpwiMzMTh8NBVFTUKdujoqLYuXNnpY+ZOnUqTz755CXJ48CKh2GU/26xAFBqeGA4PfB1OvEyLKQ7AzlheFPo9CLihBctHBb2FcfydbEPJXhzzLDR53gp/pYgZoffhG9IJEHh0dxVpw5PN2hGSFDYKc95yyVpiYiIiJxNcZkTgCAfL5OTiIiIiCtSAUsu2pQpU0hMTKy4n5ubS2xsbJWcO3TCKv5ulOFj98HPxx+8bXgB3YEfKjn+H1XyrCIiIlLdDngUUeBjJ9soMDuKiIiIuCAVsOQU4eHheHh4kJaWdsr2tLQ06tSpU+ljbDYbNpvtkuSJiKz8OUVERKR2+doni3XBkdxoHGGI2WFERETE5VjNDiCuxdvbmw4dOrBs2bKKbU6nk2XLltG1a1cTk4mIiEhtFuHwomVxMYHYzY4iIiIiLkgjsOQ0iYmJjBo1io4dO9K5c2emT59OQUFBxVUJRURERKra9YXhdMhNYm2TOLOjiIiIiAtSAUtOM2LECDIyMnj88cdJTU0lPj6eRYsWnbawu4iIiIiIiIhIdVABSyo1fvx4xo8fb3YMERERERERERGtgSUiIiIi5pvrc4zb60axwZlidhQRERFxQSpgiYiIiIjpUj1KSLLbyKHQ7CgiIiLigjSFUERERERMN6AwhJuz95FfN9rsKCIiIuKCNAJLREREREzX0GGn34lCIi3+ZkcRERERF6QCloiIiIiIiIiIuDRNIRQRERER0x32KMZpt5FlnDA7ioiIiLggFbCkyhmGAUBubq7JSURERMqd7JNO9lFycS5FX/8F6WwIDmfYiWR66DOEiIicJ/X1tZ/F0KsrVezQoUPExsaaHUNEROQ0KSkpxMTEmB2jxlNfLyIirkp9fe2lApZUOafTyZEjRwgICMBisVzUuXJzc4mNjSUlJYXAwMAqSuja3LHN4J7tVpvV5trKFdtsGAZ5eXlER0djtWoJ0Iulvv7iuWO71Wa1ubZSm12jzerraz9NIZQqZ7Vaq7ziHRgY6DL/MFYXd2wzuGe71Wb3oDabLygoyOwItYb6+qrjju1Wm92D2uweXK3N6utrN5UlRURERERERETEpamAJSIiIiIiIiIiLk0FLHFpNpuNJ554ApvNZnaUauOObQb3bLfa7B7UZpGzc9f/X9yx3Wqze1Cb3YM7tlnMp0XcRURERERERETEpWkEloiIiIiIiIiIuDQVsERERERERERExKWpgCUiIiIiIiIiIi5NBSwREREREREREXFpKmCJ6WbMmEGDBg2w2+106dKF9evXn/X4zz77jObNm2O322ndujULFy6spqRV53zaPGvWLCwWyyk3u91ejWkv3urVqxk8eDDR0dFYLBbmzp37h49ZuXIl7du3x2az0aRJE2bNmnXJc1al823zypUrT3udLRYLqamp1RO4CkydOpVOnToREBBAZGQkQ4cOZdeuXX/4uJr8nr6QNtf09/Qbb7xBmzZtCAwMJDAwkK5du/L111+f9TE1+TWWquGOfT24V3/vjn09uF9/r77ePfp6UH8vrkkFLDHV7NmzSUxM5IknnmDz5s20bduWAQMGkJ6eXunx33//PSNHjuSuu+5iy5YtDB06lKFDh/Lzzz9Xc/ILd75tBggMDOTo0aMVtwMHDlRj4otXUFBA27ZtmTFjxjkdn5yczDXXXEOfPn1ISkpi0qRJ3H333SxevPgSJ60659vmk3bt2nXKax0ZGXmJEla9VatWMW7cONatW8fSpUspLS2lf//+FBQUnPExNf09fSFthpr9no6JieH5559n06ZNbNy4kb59+3Ldddexbdu2So+v6a+xXDx37OvB/fp7d+zrwf36e/X17tHXg/p7cVGGiIk6d+5sjBs3ruK+w+EwoqOjjalTp1Z6/E033WRcc801p2zr0qWLce+9917SnFXpfNv87rvvGkFBQdWU7tIDjDlz5pz1mD/96U9Gy5YtT9k2YsQIY8CAAZcw2aVzLm1esWKFARhZWVnVkqk6pKenG4CxatWqMx5TG97Tv3Uuba5t72nDMIyQkBDj7bffrnRfbXuN5fy5Y19vGO7d37tjX28Y7tnfq6+vXG16P/+W+nsxm0ZgiWlKSkrYtGkTCQkJFdusVisJCQmsXbu20sesXbv2lOMBBgwYcMbjXc2FtBkgPz+f+vXrExsbe9ZvPmqLmv46X4z4+Hjq1q3LVVddxXfffWd2nIuSk5MDQGho6BmPqW2v9bm0GWrPe9rhcPDJJ59QUFBA165dKz2mtr3Gcn7csa8H9ffnoja8zhejtvT36uvPrDa9n9Xfi6tQAUtMk5mZicPhICoq6pTtUVFRZ1wHIDU19byOdzUX0uZmzZrxzjvv8OWXX/Lhhx/idDrp1q0bhw4dqo7IpjjT65ybm0thYaFJqS6tunXrMnPmTL744gu++OILYmNj6d27N5s3bzY72gVxOp1MmjSJ7t2706pVqzMeV9Pf0791rm2uDe/prVu34u/vj81m47777mPOnDlcfvnllR5bm15jOX/u2NeD+vtz4Y59PdSu/l59fe3u60H9vbgeT7MDiMjZde3a9ZRvOrp160aLFi148803efrpp01MJlWpWbNmNGvWrOJ+t27d2Lt3Ly+//DIffPCBickuzLhx4/j555/59ttvzY5Sbc61zbXhPd2sWTOSkpLIycnh888/Z9SoUaxateqMH2pF5I/Vhn8b5I/Vpv5eff2Z1Zb3s/p7cTUagSWmCQ8Px8PDg7S0tFO2p6WlUadOnUofU6dOnfM63tVcSJt/z8vLi3bt2rFnz55LEdElnOl1DgwMxMfHx6RU1a9z58418nUeP3488+fPZ8WKFcTExJz12Jr+nj7pfNr8ezXxPe3t7U2TJk3o0KEDU6dOpW3btrzyyiuVHltbXmO5MO7Y14P6+3Ohvv5/amJ/r76+9vf1oP5eXI8KWGIab29vOnTowLJlyyq2OZ1Oli1bdsa51V27dj3leIClS5ee8XhXcyFt/j2Hw8HWrVupW7fupYppupr+OleVpKSkGvU6G4bB+PHjmTNnDsuXL6dhw4Z/+Jia/lpfSJt/rza8p51OJ8XFxZXuq+mvsVwcd+zrQf39uagNr3NVqUn9vfp69+3rQf29uABz15AXd/fJJ58YNpvNmDVrlrF9+3bjnnvuMYKDg43U1FTDMAzj9ttvNyZPnlxx/HfffWd4enoa//jHP4wdO3YYTzzxhOHl5WVs3brVrCact/Nt85NPPmksXrzY2Lt3r7Fp0ybj5ptvNux2u7Ft2zazmnDe8vLyjC1bthhbtmwxAGPatGnGli1bjAMHDhiGYRiTJ082br/99orj9+3bZ/j6+hqPPvqosWPHDmPGjBmGh4eHsWjRIrOacN7Ot80vv/yyMXfuXOOXX34xtm7dakycONGwWq3GN998Y1YTztv9999vBAUFGStXrjSOHj1acTtx4kTFMbXtPX0hba7p7+nJkycbq1atMpKTk42ffvrJmDx5smGxWIwlS5YYhlH7XmO5eO7Y1xuG+/X37tjXG4b79ffq692jrzcM9ffimlTAEtO99tprRlxcnOHt7W107tzZWLduXcW+Xr16GaNGjTrl+E8//dS47LLLDG9vb6Nly5bGggULqjnxxTufNk+aNKni2KioKGPQoEHG5s2bTUh94U5eMvr3t5PtHDVqlNGrV6/THhMfH294e3sbjRo1Mt59991qz30xzrfNL7zwgtG4cWPDbrcboaGhRu/evY3ly5ebE/4CVdZe4JTXrra9py+kzTX9PT1mzBijfv36hre3txEREWH069ev4sOsYdS+11iqhjv29YbhXv29O/b1huF+/b36evfo6w1D/b24JothGEbVj+sSERERERERERGpGloDS0REREREREREXJoKWCIiIiIiIiIi4tJUwBIREREREREREZemApaIiIiIiIiIiLg0FbBERERERERERMSlqYAlIiIiIiIiIiIuTQUsERERERERERFxaSpgiYiIiIiIiIiIS1MBS0REREREREREXJoKWCIiIiIiIiIi4tJUwBIRcTEZGRnUqVOH5557rmLb999/j7e3N8uWLTMxmYiIiFQF9fUiIufPYhiGYXYIERE51cKFCxk6dCjff/89zZo1Iz4+nuuuu45p06aZHU1ERESqgPp6EZHzowKWiIiLGjduHN988w0dO3Zk69atbNiwAZvNZnYsERERqSLq60VEzp0KWCIiLqqwsJBWrVqRkpLCpk2baN26tdmRREREpAqprxcROXdaA0tExEXt3buXI0eO4HQ62b9/v9lxREREpIqprxcROXcagSUi4oJKSkro3Lkz8fHxNGvWjOnTp7N161YiIyPNjiYiIiJVQH29iMj5UQFLRMQFPfroo3z++ef8+OOP+Pv706tXL4KCgpg/f77Z0URERKQKqK8XETk/mkIoIuJiVq5cyfTp0/nggw8IDAzEarXywQcfsGbNGt544w2z44mIiMhFUl8vInL+NAJLRERERERERERcmkZgiYiIiIiIiIiIS1MBS0REREREREREXJoKWCIiIiIiIiIi4tL+H6ialkb6aHALAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAHgCAYAAAA10dzkAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAP6FJREFUeJzt3X18z/Xi//HnLuyCbGTZ0Ip0gQi5WGs5UYuiaccpxGGpqFCx6rByEcp0QSu5CLkohOSi0FI7h05Z1NiJXFVT/GhD2Gaysc/794ezz/csc7HZPq/PZ+/H/Xb7/PF5e7/bc+vz/uy51+vzfr+8LMuyBAAAANvwNh0AAAAArkUBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAAAACboQACAADYDAUQAADAZiiAAAAANkMBBAAAsBkKIAAAgM1QAAEAAGyGAgjAtr788kvFxMSobt268vLy0ooVKy54zLp163TzzTfL399f1157rebOnVvhOQGgvFEAAdhWXl6emjdvrilTplzU/nv27FGXLl3UoUMHpaena8iQIXr00Uf12WefVXBSAChfXpZlWaZDAIBpXl5eWr58uWJjY8+5z7Bhw7R69Wpt27bNua1nz546duyYkpOTXZASAMoHI4AAcJFSU1MVHR1dbFunTp2UmppqKBEAlI2v6QCezOFw6MCBA6pevbq8vLxMxwFsx7Is5ebmqm7duvL2rvi/ZzMzMxUaGlpsW2hoqHJycvTHH38oMDCwxOPy8/OVn5/vfO5wOHTkyBHVqlWL9w7AAFe/d7gjCuAlOHDggMLDw03HAGxv3759uvLKK03HOKfExESNGTPGdAwAf+Lu7x0ViQJ4CapXry7pzAsoKCjIcBrAfnJychQeHu48FytaWFiYsrKyim3LyspSUFDQOUf/JCkhIUHx8fHO59nZ2brqqqt47wAMcfV7hzuiAF6CoqmboKAg3sQBg1w1jRoZGak1a9YU2/b5558rMjLyvMf5+/vL39//rO28dwBm2fkjGPac+AYAScePH1d6errS09MlnbnNS3p6uvbu3SvpzMhd3759nfs//vjjysjI0D/+8Q/t3LlTU6dO1ZIlSzR06FAT8QGgzCiAAGzru+++U8uWLdWyZUtJUnx8vFq2bKlRo0ZJkn777TdnGZSkBg0aaPXq1fr888/VvHlzTZw4UbNmzVKnTp2M5AeAsuI+gJcgJydHwcHBys7OZhoHMMBTz0FPzQ1UFpyDjAACAADYTqUtgBda49OyLI0aNUp16tRRYGCgoqOj9eOPP5oJCwAA4EKVtgBeaI3PV199VW+99ZamT5+ujRs3qlq1aurUqZNOnjzp4qQAAACuVWlvA3PPPffonnvuKfHfLMtSUlKSRowYofvuu0+S9N577yk0NFQrVqxQz549XRkVAADApSrtCOD57NmzR5mZmcXW9AwODlZERARregIAgEqv0o4Ank9mZqYklbimZ9G/leTP63nm5ORUTEAAAIAKZMsRwLJKTExUcHCw88E6wAAAwBPZsgCGhYVJUolrehb9W0kSEhKUnZ3tfOzbt69CcwIAAFQEWxbABg0aKCwsTCkpKc5tOTk52rhx43nX9PT393eu3ckangAAwFNV2s8AHj9+XD/99JPzedEan5dffrmuuuoqDRkyRC+99JKuu+46NWjQQCNHjlTdunUVGxtrLjQASWdG23///Xc9+eSTatasmek4AFDpVNoC+N1336lDhw7O5/Hx8ZKkuLg4zZ07V//4xz+Ul5enAQMG6NixY7rtttuUnJysgIAAU5EBSCosLNTs2bN18OBBPfDAAxRAAKgArAV8CVhLECh/GzZsUFRUlIKDg3Xw4EH5+fmdc19PPQc9NTdQWXAO2vQzgADcV9GyjZ07dz5v+QMAlB0FEIBbWblypSQ5V+kBAJQ/CiAAt7Fz507t3r1bVapUOedSjgCAS0cBBOA2ikb/7rjjDtt+LgcAXIECCMBtFH3+j+lfAKhYFEAAbiEzM1MbN26UJHXt2tVwGgCo3CiAANzCJ598Isuy1KZNG9WrV890HACo1CiAANwC078A4DoUQADGHT9+3Lk2N8sxAkDFowACMO6zzz5Tfn6+GjZsqCZNmpiOAwCVHgUQgHH/O/3r5eVlNgwA2AAFEIBRp06d0urVqyXx+T8AcBUKIACjvvrqKx09elQhISG69dZbTccBAFugAAIwqmj1j3vvvVe+vr6G0wCAPVAAARhjWRa3fwEAAyiAAIz5/vvv9euvvyowMFAdO3Y0HQcAbIMCCMCYounfu+66S1WrVjWcBgDsgwIIwBimfwHADAogACN+/fVXbdmyRd7e3rr33ntNxwEAW6EAAjBi+fLlkqR27dqpdu3ahtMAgL1QAAEYsWzZMklSt27dDCcBAPuhAAJwuaysLH311VeSpL/+9a+G0wCA/VAAAbjcypUrZVmW2rRpo/DwcNNxAMB2KIAAXI7pXwAwiwIIwKWOHTumlJQUSUz/AoApFEAALrVq1SqdPn1aTZo00Q033GA6DgDYEgUQgEsx/QsA5lEAAbhMXl6ekpOTJVEAAcAkCiAAl/nss8/0xx9/qH79+mrRooXpOABgWxRAAC5TtPpHt27d5OXlZTgNANgXBRCASxQUFOiTTz6RxPQvAJhGAQTgEv/617+UnZ2tsLAwRUZGmo4DALZGAQTgEkVX/8bGxsrbm7ceADCJd2EAFa6wsFArVqyQxPQvALgDCiCACrdhwwYdPHhQNWrUUPv27U3HAQDbowACqHBF079du3ZVlSpVDKcBAFAAAVQoy7JY/QMA3AwFEECF2rx5s/bu3auqVauqY8eOpuMAAEQBBFDBikb/OnfurMDAQMNpAAASBRBABWP6FwDcDwUQQIXZvn27du7cqSpVqqhz586m4wAA/osCCKDCfPjhh5Kkjh07Kjg42HAaAEARCiCAClNUAB944AHDSQAA/4sCCKBC7NixQz/88IOqVKmirl27mo4DAPgfFEAAFWLp0qWSpOjoaNWsWdNwGgDA/6IAAqgQTP8CgPuiAAIod7t27dLWrVvl6+ur++67z3QcAMCfUAABlLui0b/o6GhdfvnlhtMAAP6MAgig3DH9CwDujQIIoFzt3r1b33//vXx9fRUbG2s6DgCgBBRAAOWq6OrfO++8k+lfAHBTFEAA5coTp3+nTJmi+vXrKyAgQBEREdq0adN5909KStINN9ygwMBAhYeHa+jQoTp58qSL0gLApaMAAig3P/30k9LT0+Xj4+Mx07+LFy9WfHy8Ro8erc2bN6t58+bq1KmTDh48WOL+Cxcu1PDhwzV69Gjt2LFD7777rhYvXqznn3/exckBoOwogADKTdHo3x133KFatWoZTnNxJk2apP79+6tfv35q0qSJpk+frqpVq2r27Nkl7r9hwwZFRUWpV69eql+/vjp27KgHH3zwgqOGAOBOKIAAyo2nTf8WFBQoLS1N0dHRzm3e3t6Kjo5WampqicfceuutSktLcxa+jIwMrVmzRp07d3ZJZgAoD76mAwCoHH7++Wdt2bJFPj4++utf/2o6zkU5fPiwCgsLFRoaWmx7aGiodu7cWeIxvXr10uHDh3XbbbfJsiydPn1ajz/++HmngPPz85Wfn+98npOTUz7fAACUESOAAMpF0dW/HTp0UEhIiOE0FWfdunUaP368pk6dqs2bN2vZsmVavXq1xo0bd85jEhMTFRwc7HyEh4e7MDEAnI0RQADlwtOmfyUpJCREPj4+ysrKKrY9KytLYWFhJR4zcuRI9enTR48++qgkqVmzZsrLy9OAAQP0wgsvyNv77L+rExISFB8f73yek5NDCQRglG1HAAsLCzVy5Eg1aNBAgYGBatiwocaNGyfLskxHAzzOnj17lJaW5lHTv5Lk5+enVq1aKSUlxbnN4XAoJSVFkZGRJR5z4sSJs0qej4+PJJ3z/cPf319BQUHFHgBgkm1HAF955RVNmzZN8+bN04033qjvvvtO/fr1U3BwsJ566inT8QCPsmTJEklS+/btdcUVVxhOUzrx8fGKi4tT69at1bZtWyUlJSkvL0/9+vWTJPXt21f16tVTYmKiJCkmJkaTJk1Sy5YtFRERoZ9++kkjR45UTEyMswgCgLuzbQHcsGGD7rvvPnXp0kWSVL9+fX3wwQfcygEog0WLFkmSevbsaThJ6fXo0UOHDh3SqFGjlJmZqRYtWig5Odl5YcjevXuLjfiNGDFCXl5eGjFihPbv368rrrhCMTExevnll019CwBQal6WTec8x48frxkzZmjt2rW6/vrr9Z///EcdO3bUpEmT1Lt37xKPKelKvvDwcGVnZzOlA9vauXOnGjduLF9fX2VlZbl0+becnBwFBwd73DnoqbmByoJz0MYjgMOHD1dOTo4aNWokHx8fFRYW6uWXXz5n+ZPOXMk3ZswYF6YE3N/ixYslSR07dmTtXwDwELa9CGTJkiVasGCBFi5cqM2bN2vevHl6/fXXNW/evHMek5CQoOzsbOdj3759LkwMuB/Lsjx6+hcA7Mq2I4DPPfechg8f7vyl1axZM/36669KTExUXFxcicf4+/vL39/flTEBt7Z161bt3LlT/v7+uu+++0zHAQBcJNuOAJ7rVg4Oh8NQIsDzFI3+de7c2bafowEAT2TbEcCiq/auuuoq3XjjjdqyZYsmTZqkhx9+2HQ0wCMw/QsAnsu2BXDy5MkaOXKkBg4cqIMHD6pu3bp67LHHNGrUKNPRAI/w7bffas+ePapWrZrzdkoAAM9g2wJYvXp1JSUlKSkpyXQUwCMVXf0bExOjatWqGU4DACgN234GEEDZORwOZwFk+hcAPA8FEECpff3119q/f7+Cg4N19913m44DACglCiCAUiu6+OOvf/0rt0YCAA9EAQRQKqdPn9bSpUslnVlHFwDgeSiAAEpl3bp1OnjwoGrVqqU777zTdBwAQBlQAAGUStH07/33368qVaoYTgMAKAsKIICLVlBQoI8++kgSV/8CgCejAAK4aGvXrtWxY8cUFhamdu3amY4DACgjCiCAi7Zw4UJJUvfu3eXj42M4DQCgrCiAAC7K8ePHtXLlSklS7969DacBAFwKCiCAi7Jy5UqdOHFCDRs2VJs2bUzHAQBcAgoggIuyYMECSWdG/7y8vAynAQBcCgoggAs6ePCg1q5dK4npXwCoDCiAAC5oyZIlKiwsVOvWrXX99debjgMAuEQUQAAXVHT1L6N/AFA5UAABnFdGRoZSU1Pl7e3N2r8AUElQAAGcV9Ho3x133KE6deoYTgMAKA8UQADnZFlWsat/AQCVAwUQwDlt2bJFO3fuVEBAgLp162Y6DgCgnFAAAZxT0fRvTEyMgoKCDKcBAJQXCiCAEhUWFuqDDz6QJPXq1ctwGgBAeaIAAijR+vXrdeDAAdWoUUP33HOP6TgAgHJEAQRQoqKLPx544AH5+/sbTgMAKE8UQABnOXnypJYuXSqJq38BoDKiAAI4y5o1a5STk6Mrr7xS7dq1Mx0HAFDOKIAAzjJ//nxJ0oMPPihvb94mAKCy4Z0dQDG///67Vq1aJUnq27ev4TQAgIpAAQRQzOLFi3Xq1Cm1bNlSTZs2NR0HAFABKIAAinnvvfckMfoHAJUZBRCA065du7Rx40b5+PjowQcfNB0HAFBBKIAAnN5//31J0t13363Q0FDDaQAAFYUCCECS5HA4nAWQ6V8AqNwogAAknVn6be/evQoODlZMTIzpOACACkQBBCDp/y7+6N69uwIDAw2nAQBUJAogAOXl5TmXfmP6FwAqPwogAK1YsULHjx9XgwYNFBUVZToOAKCCUQABFLv3n5eXl+E0AICKRgEEbG7//v364osvJEl9+vQxnAYA4AoUQMDmFi5cKIfDoaioKDVs2NB0HACAC1AAARuzLEvz5s2TxMUfAGAnFEDAxtLT0/XDDz/I399fDzzwgOk4AAAXoQACNlZ08UfXrl1Vs2ZNw2kAAK5CAQRsqqCgQPPnz5fE9C8A2A0FELCp1atX6/DhwwoLC9Pdd99tOg4AwIUogIBNzZ49W9KZ0T9fX1/DaQAArkQBBGzot99+06effipJ6tevn+E0AABXowACNvT++++rsLBQkZGRatSokek4AAAXowACNmNZlnP69+GHHzacBgBgAgUQsJlvvvlGu3btUmBgoLp37246DgDAAAogYDNFo38PPPCAgoKCDKcBAJhAAQRsJC8vT4sXL5bE9C8A2BkFELCRjz76SLm5ubrmmmv0l7/8xXQcAIAhFEDARoqmf/v16ycvLy/DaQAAplAAAZv4+eeftX79enl5eSkuLs50HACAQRRAwCbmzp0rSbrrrrsUHh5uNgwAwCgKIGADhYWFmjdvniRW/gAAUAABW0hJSdG+fftUo0YNxcbGmo7jdqZMmaL69esrICBAERER2rRp03n3P3bsmAYNGqQ6derI399f119/vdasWeOitABw6WxdAPfv36+///3vqlWrlgIDA9WsWTN99913pmMB5a7o4o/evXsrICDAcBr3snjxYsXHx2v06NHavHmzmjdvrk6dOungwYMl7l9QUKC77rpLv/zyi5YuXapdu3Zp5syZqlevnouTA0DZ+ZoOYMrRo0cVFRWlDh066NNPP9UVV1yhH3/8UTVr1jQdDShXv//+u5YvXy6J6d+STJo0Sf3793f+bKZPn67Vq1dr9uzZGj58+Fn7z549W0eOHNGGDRtUpUoVSVL9+vVdGRkALpltC+Arr7yi8PBwzZkzx7mtQYMGBhMBFeO9995TQUGBWrZsqZtvvtl0HLdSUFCgtLQ0JSQkOLd5e3srOjpaqampJR7z8ccfKzIyUoMGDdLKlSt1xRVXqFevXho2bJh8fHxKPCY/P1/5+fnO5zk5OeX7jQBAKdl2Cvjjjz9W69at9cADD6h27dpq2bKlZs6ced5j8vPzlZOTU+wBuDPLspyv6/79+3Pvvz85fPiwCgsLFRoaWmx7aGioMjMzSzwmIyNDS5cuVWFhodasWaORI0dq4sSJeumll875dRITExUcHOx8cBU2ANNsWwAzMjI0bdo0XXfddfrss8/0xBNP6KmnnnJeKVkS3sThaTZs2KAdO3aoatWq6tWrl+k4lYLD4VDt2rU1Y8YMtWrVSj169NALL7yg6dOnn/OYhIQEZWdnOx/79u1zYWIAOJttp4AdDodat26t8ePHS5Jatmypbdu2afr06ee8SW5CQoLi4+Odz3NyciiBcGszZsyQJPXo0UPBwcGG07ifkJAQ+fj4KCsrq9j2rKwshYWFlXhMnTp1VKVKlWLTvY0bN1ZmZqYKCgrk5+d31jH+/v7y9/cv3/AAcAlsOwJYp04dNWnSpNi2xo0ba+/evec8xt/fX0FBQcUegLs6duyYPvzwQ0lnpn9xNj8/P7Vq1UopKSnObQ6HQykpKYqMjCzxmKioKP30009yOBzObbt371adOnVKLH8A4I5sWwCjoqK0a9euYtt2796tq6++2lAioHwtWLBAf/zxh5o2bapbbrnFdBy3FR8fr5kzZ2revHnasWOHnnjiCeXl5TmvCu7bt2+xi0SeeOIJHTlyRE8//bR2796t1atXa/z48Ro0aJCpbwEASs22U8BDhw7VrbfeqvHjx6t79+7atGmTZsyY4ZwyAzyZZVnO1zIXf5xfjx49dOjQIY0aNUqZmZlq0aKFkpOTnReG7N27V97e//e3cnh4uD777DMNHTpUN910k+rVq6enn35aw4YNM/UtAECpeVmWZZkOYcqqVauUkJCgH3/8UQ0aNFB8fHyppspycnIUHBys7OxspoPhVjZt2qSIiAgFBARo//79uvzyy01HqhCeeg56am6gsuActPEIoCTde++9uvfee03HAMpd0ejf/fffX2nLHwCg7Gz7GUCgssrNzdWiRYskSQMGDDCcBgDgjiiAQCXzwQcfKC8vT40aNdJtt91mOg4AwA1RAIFKhos/AAAXQgEEKpEtW7YoLS1Nfn5+6tu3r+k4AAA3RQEEKpGidX+7deumkJAQw2kAAO6KAghUEnl5eVqwYIEkVv4AAJwfBRCoJBYuXKicnBxde+21at++vek4AAA3RgEEKgHLsjR16lRJZ5Yq+9+VKwAA+DN+SwCVwMaNG5Wenq6AgAA99NBDpuMAANwcBRCoBIpG/3r27MnKHwCAC6IAAh7u8OHDWrJkiaQz078AAFwIBRDwcHPmzFF+fr5atWqlNm3amI4DAPAAFEDAgzkcDk2fPl3SmdE/Vv4AAFwMCiDgwdauXauMjAwFBwfrwQcfNB0HAOAhKICAByu6+OOhhx5S1apVDacBAHgKCiDgoX799VetXr1aEhd/AABKhwIIeKgZM2bI4XDojjvu0A033GA6DgDAg1AAAQ9UUFCgWbNmSZIGDhxoOA0AwNNQAAEPtGzZMh08eFB16tRR165dTccBAHgYCiDggaZNmyZJ6t+/v6pUqWI4DQDA01AAAQ+zbds2ffnll/Lx8VH//v1NxwEAeCAKIOBhJk+eLEm67777dOWVVxpOAwDwRBRAwIMcOXJE77//viTp6aefNpwGAOCpKICAB3n33Xf1xx9/qHnz5mrXrp3pOAAAD0UBBDzE6dOn9fbbb0uSnnrqKdb9BQCUGQUQ8BCffPKJ9u7dq1q1arHuLwDgklAAAQ/x1ltvSZIGDBigwMBAw2kAAJ6MAgh4gO+//17r1q2Tj48P6/4CAC4ZBRDwAEW3funWrZvCw8MNpwEAeDoKIODmfv/9d82fP1/SmYs/AAC4VBRAwM3NmjVLJ0+eVMuWLRUVFWU6DgCgEqAAAm7s9OnTmjp1qiRu/QIAKD8UQMCNffzxx9q7d69CQkLUs2dP03EAAJUEBRBwY0W3fnnssccUEBBgOA0AoLKgAAJu6j//+Y/Wr1/PrV8AAOWOAgi4qUmTJkmS7r//ftWrV89wGgBAZUIBBNzQgQMH9MEHH0iS4uPjDacBAFQ2FEDADb399ts6deqUbrvtNrVt29Z0HABAJUMBBNxMXl6epk+fLkl65plnDKcBAFRGFEDAzcyZM0dHjx7Vtddeq5iYGNNxAACVEAUQcCOFhYV64403JElDhw6Vj4+P4UQAgMqIAgi4kZUrVyojI0M1a9ZUXFyc6TgAgEqKAgi4kYkTJ0qSnnjiCVWrVs1wGgBAZUUBBNzEN998ow0bNsjPz0+DBw82HQcAUIlRAAE3UXTj5169eqlOnTqG0wAAKjMKIOAG9uzZo48++kgSN34GAFQ8CiDgBt588005HA517NhRzZo1Mx0HAFDJUQABw44dO6Z3331XEjd+BgC4BgUQMOydd97R8ePH1bRpU911112m4wAAbIACCBh08uRJJSUlSToz+ufl5WU2EADAFiiAgEHvvfeeMjMzFR4erl69epmOAwCwCQogYEhhYaFeffVVSWdG//z8/AwnAgDYBQUQMGTp0qX6+eefdfnll+vRRx81HQcAYCMUQMAAy7I0YcIESdJTTz3Fsm8AAJeiAAIGrF27Vunp6apWrRrLvgEAXI4CCBhQNPo3YMAA1apVy3AaAIDdUAABF/vmm2+0bt06ValShWXfAABGUAD/a8KECfLy8tKQIUNMR0El98orr0iS/v73v+vKK680nAYAYEcUQEnffvut3nnnHd10002mo6CS27Fjh1asWCEvLy8999xzpuMAAGzK9gXw+PHj6t27t2bOnKmaNWuajoNKrmj0LzY2Vo0bNzacBgBgV7YvgIMGDVKXLl0UHR19wX3z8/OVk5NT7AFcrL1792rBggWSpGHDhhlOAwCwM1/TAUxatGiRNm/erG+//fai9k9MTNSYMWMqOBUqq4kTJ+r06dPq0KGDIiIiTMcBANiYbUcA9+3bp6effloLFixQQEDARR2TkJCg7Oxs52Pfvn0VnBKVRWZmpmbMmCHpzOsI7mXKlCmqX7++AgICFBERoU2bNl3UcYsWLZKXl5diY2MrNiAAlDPbFsC0tDQdPHhQN998s3x9feXr66v169frrbfekq+vrwoLC886xt/fX0FBQcUewMWYOHGiTp48qYiIiIv6uAFcZ/HixYqPj9fo0aO1efNmNW/eXJ06ddLBgwfPe9wvv/yiZ599Vu3atXNRUgAoP7YtgHfeeae2bt2q9PR056N169bq3bu30tPT5ePjYzoiKolDhw5p6tSpkqRRo0bJy8vLcCL8r0mTJql///7q16+fmjRpounTp6tq1aqaPXv2OY8pLCxU7969NWbMGF1zzTUuTAsA5cO2nwGsXr26mjZtWmxbtWrVVKtWrbO2A5fijTfe0IkTJ9SqVSvdc889puPgfxQUFCgtLa3YtLy3t7eio6OVmpp6zuPGjh2r2rVr65FHHtG///3vC36d/Px85efnO59zARkA02w7Agi4wpEjR/T2229LkkaOHMnon5s5fPiwCgsLFRoaWmx7aGioMjMzSzzmq6++0rvvvquZM2de9NdJTExUcHCw8xEeHn5JuQHgUtl2BLAk69atMx0Blcybb76p3Nxc3XTTTeratavpOLhEubm56tOnj2bOnKmQkJCLPi4hIaHYsn85OTmUQABGUQCBCpKdna0333xTEqN/7iokJEQ+Pj7Kysoqtj0rK0thYWFn7f/zzz/rl19+UUxMjHObw+GQJPn6+mrXrl1q2LDhWcf5+/vL39+/nNMDQNkxBQxUkMmTJys7O1tNmjRRt27dTMdBCfz8/NSqVSulpKQ4tzkcDqWkpCgyMvKs/Rs1anTWxWNdu3ZVhw4dlJ6ezqgeAI/BCCBQAXJzc/XGG29IkkaMGCFvb/7Wclfx8fGKi4tT69at1bZtWyUlJSkvL0/9+vWTJPXt21f16tVTYmKiAgICzrpIrEaNGpLExWMAPAoFEKgA06ZN05EjR3T99dere/fupuPgPHr06KFDhw5p1KhRyszMVIsWLZScnOy8MGTv3r0UeACVjpdlWZbpEJ4qJydHwcHBys7O5qbQcMrNzdU111yjw4cPa+7cuYqLizMdqdLy1HPQU3MDlQXnIJ8BBMrd5MmTdfjwYV133XXq3bu36TgAAJyFAgiUo+zsbL3++uuSpNGjR8vXl09ZAADcDwUQKEdJSUk6evSoGjdurJ49e5qOAwBAiSiAQDk5evSoJk2aJEl68cUXWU8aAOC2KIBAOZk4caJycnLUrFkz3X///abjAABwThRAoBwcPnzYuerHmDFjuG0IAMCt8VsKKAevvfaajh8/rptvvlmxsbGm4wAAcF4UQOASZWVl6e2335YkjR07ljV/AQBujwIIXKIJEyboxIkTioiIUOfOnU3HAQDggiiAwCXYu3evpk2bJunMZ/8Y/QMAeAIKIHAJxowZo/z8fN1+++3q2LGj6TgAAFwUCiBQRjt27NDcuXMlnZkGZvQPAOApKIBAGb3wwgtyOByKjY3VLbfcYjoOAAAXjQIIlMHGjRu1fPlyeXt76+WXXzYdBwCAUqEAAqVkWZaGDx8uSYqLi1OTJk0MJwIAoHQogEAprV27VuvWrZOfn59efPFF03EAACg1CiBQCg6Hwzn6N2jQIF111VWGEwEAUHoUQKAUlixZovT0dFWvXl3PP/+86TgAAJQJBRC4SAUFBRoxYoQk6bnnnlNISIjhRAAAlA0FELhI06ZN088//6zQ0FANHTrUdBwAAMqMAghchKNHj2rs2LGSpLFjx+qyyy4znAgAgLKjAAIX4eWXX9aRI0d044036uGHHzYdBwCAS0IBBC4gIyNDkydPliS99tpr8vX1NZwIAIBLQwEELiAhIUEFBQW66667dPfdd5uOAwDAJaMAAueRmpqqJUuWyMvLS6+99pq8vLxMRwIA4JJRAIFzsCxL8fHxkqR+/fqpefPmhhMBAFA+KIDAOSxdulTffPONqlatqnHjxpmOAwBAuaEAAiXIz893Lvn23HPPqW7duoYTAQBQfiiAQAneeOMNZWRkqE6dOnruuedMxwEAoFxRAIE/OXDggF566SVJ0iuvvKJq1aoZTgQAQPmiAAJ/Mnz4cOXl5emWW25R7969TccBAKDcUQCB/5Gamqr3339fXl5eeuutt+TtzSkCAKh8+O0G/JfD4dCTTz4p6cxtX9q0aWM4EQAAFYMCCPzXnDlzlJaWpqCgII0fP950HAAAKgwFEJCUnZ2t559/XpI0evRohYaGGk4EAEDFoQACksaOHauDBw/qhhtu0ODBg03HAQCgQlEAYXs7duzQW2+9JUlKSkqSn5+f4UQAAFQsCiBszbIsDRw4UKdPn1ZMTIzuvvtu05EAAKhwFEDY2vz587Vu3ToFBgY6RwEBAKjsKICwraNHj+qZZ56RJI0cOVL169c3GwgAABehAMK2nn/+eR06dEiNGzd2FkEAAOyAAghb2rRpk9555x1J0tSpU7nwAwBgKxRA2M7p06f1+OOPy7Is9enTR+3btzcdCQAAl6IAwnamTp2qLVu2qEaNGnr99ddNxwEAwOUogLCVAwcOaMSIEZKkxMRE1a5d23AiAABcjwIIWxk8eLByc3PVtm1bDRgwwHQcAACMoADCNj766CMtX75cvr6+mjlzpry9efkDAOyJ34CwhSNHjmjQoEGSpGHDhummm24ynAgAAHMogLCFZ599VllZWWrUqJHzM4AAANgVBRCV3hdffKE5c+bIy8tLs2bNUkBAgOlIAAAYRQFEpZaXl+e82GPgwIGKiooynAgAAPMogKjURo0apT179ig8PFyJiYmm4wAA4BZsXQATExPVpk0bVa9eXbVr11ZsbKx27dplOhbKyaZNm5SUlCRJmj59uqpXr242EAAAbsLWBXD9+vUaNGiQvvnmG33++ec6deqUOnbsqLy8PNPRcIn++OMPxcXFyeFwqFevXurcubPpSAAAuA1f0wFMSk5OLvZ87ty5ql27ttLS0vSXv/zFUCqUhxEjRmjnzp0KCwvTW2+9ZToOAABuxdYF8M+ys7MlSZdffnmJ/56fn6/8/Hzn85ycHJfkQumsX79eb7zxhiRp1qxZqlWrluFEAAC4F1tPAf8vh8OhIUOGKCoqSk2bNi1xn8TERAUHBzsf4eHhLk6JC8nNzVW/fv1kWZYeeeQRdenSxXQkAADcDgXwvwYNGqRt27Zp0aJF59wnISFB2dnZzse+fftcmBAX49lnn9WePXt09dVXa9KkSabjAADglpgCljR48GCtWrVKX375pa688spz7ufv7y9/f38XJkNpJCcna8aMGZKkOXPmKCgoyHAiAADck60LoGVZevLJJ7V8+XKtW7dODRo0MB0JZXT06FE98sgjkqSnnnpKHTp0MJwIAAD3ZesCOGjQIC1cuFArV65U9erVlZmZKUkKDg5WYGCg4XS4WJZlaeDAgTpw4ICuv/56bvgMAMAF2PozgNOmTVN2drbat2+vOnXqOB+LFy82HQ2lMG/ePC1atEg+Pj567733VLVqVdORAABwa7YeAbQsy3QEXKLdu3dr8ODBkqSxY8cqIiLCcCIAANyfrUcA4dkKCgrUq1cv5eXlqX379ho2bJjpSPBQU6ZMUf369RUQEKCIiAht2rTpnPvOnDlT7dq1U82aNVWzZk1FR0efd38AcEcUQHisESNGKC0tTZdffrnef/99+fj4mI4ED7R48WLFx8dr9OjR2rx5s5o3b65OnTrp4MGDJe6/bt06Pfjgg/rXv/6l1NRUhYeHq2PHjtq/f7+LkwNA2XlZzIOWWU5OjoKDg5Wdnc0tR1zs888/V8eOHSVJy5cvV2xsrNlAMKI8zsGIiAi1adNGb7/9tqQzN4UPDw/Xk08+qeHDh1/w+MLCQtWsWVNvv/22+vbt67LcAMqOc5ARQHigQ4cOOX/RPv7445Q/lFlBQYHS0tIUHR3t3Obt7a3o6GilpqZe1H/jxIkTOnXq1DmXkJTOLCOZk5NT7AEAJlEA4VEcDofi4uKUmZmpJk2aaOLEiaYjwYMdPnxYhYWFCg0NLbY9NDTUeVuoCxk2bJjq1q1brET+GctIAnA3FEB4lMTERH366acKCAjQBx98wC1fYNSECRO0aNEiLV++XAEBAefcj2UkAbgbW98GBp4lJSVFo0aNkiRNnTpVN910k+FE8HQhISHy8fFRVlZWse1ZWVkKCws777Gvv/66JkyYoC+++OKCr0WWkQTgbhgBhEfYv3+/HnzwQTkcDj3yyCPq16+f6UioBPz8/NSqVSulpKQ4tzkcDqWkpCgyMvKcx7366qsaN26ckpOT1bp1a1dEBYByxQgg3N6pU6fUo0cPHTp0SM2bN9fkyZNNR0IlEh8fr7i4OLVu3Vpt27ZVUlKS8vLynH9k9O3bV/Xq1XMuMfjKK69o1KhRWrhwoerXr+/8rOBll12myy67zNj3AQClQQGE2xs+fLi+/vprBQUFaenSpazTjHJV9MfFqFGjlJmZqRYtWig5Odl5YcjevXvl7f1/kyXTpk1TQUGB7r///mL/ndGjR+vFF190ZXQAKDPuA3gJuI9QxVu2bJn+9re/SeJ+fzibp56DnpobqCw4B/kMINzYDz/8oLi4OEnSs88+S/kDAKCcUADhln7//Xd17dpVx48fV/v27TV+/HjTkQAAqDQogHA7p06dUvfu3ZWRkaEGDRroww8/VJUqVUzHAgCg0qAAwu0888wz+uc//6lq1app5cqVCgkJMR0JAIBKhQIItzJr1iznbV7mz5+vZs2aGU4EAEDlQwGE2/jqq680cOBASdLYsWO56AMAgApCAYRbyMjIULdu3XTq1Ck98MADGjFihOlIAABUWhRAGHfkyBF17txZhw4dUsuWLTVnzhx5eXmZjgUAQKVFAYRRJ0+eVGxsrHbt2qXw8HCtWrVK1apVMx0LAIBKjQIIYxwOh/r166d///vfCgoK0po1a1S3bl3TsQAAqPQogDDmhRde0KJFi+Tr66tly5apadOmpiMBAGALFEAY8eabb2rChAmSztz65c477zScCAAA+6AAwuXef/99DRkyRJI0btw453q/AADANSiAcKlPPvlE/fr1kyQNGTJEL7zwguFEAADYDwUQLvPll1+qe/fuKiwsVFxcnCZOnMjtXgAAMIACCJf49ttvFRMTo5MnT6pr166aNWuWvL15+QEAYAK/gVHhNm/erI4dOyonJ0e3336788pfAABgBgUQFSo9PV3R0dE6duyYoqKi9MknnygwMNB0LAAAbI0CiAqzdetWRUdH6+jRo7rlllu0Zs0aVa9e3XQsAABsjwKICvH999/rzjvv1O+//642bdooOTlZQUFBpmMBAABRAFEBNm3apPbt2+vQoUNq1aqV1q5dq+DgYNOxAADAf1EAUa7Wr1+vO++8U0ePHlVkZKS++OIL1ahRw3QsAADwPyiAKDfJycm6++67dfz4cd1xxx1au3Yt5Q8AADdEAUS5WLJkibp27aqTJ0/q3nvv1erVq3XZZZeZjgUAAEpAAcQlmzRpknr06KFTp06pe/fuWrZsmQICAkzHAgAA50ABRJk5HA4NHTpUzzzzjCTpqaee0sKFC1WlShXDyQAAwPmwHAPK5OTJk+rTp4+WLl0qSXr99dcVHx/P2r4AAHgACiBK7cCBA+rWrZs2btwoPz8/zZs3Tz179jQdCwAAXCQKIErl22+/VWxsrA4cOKCaNWtq2bJlat++velYAACgFPgMIC7a/Pnz1a5dOx04cEBNmjTRt99+S/kDAMADUQBxQQUFBRo6dKj69Omj/Px8xcTEKDU1VQ0bNjQdDQAAlAEFEOf166+/6i9/+YuSkpIkSQkJCVqxYgXr+gIA4MH4DCDOadWqVerbt6+OHj2qGjVqaO7cubrvvvtMxwIAAJeIEUCc5eTJk4qPj1dMTIyOHj2qNm3aaPPmzZQ/AAAqCQogitm8ebNatWqlN954Q5L05JNP6t///rcaNGhgOBkAACgvFEBIkk6fPq2XXnpJERER2r59u0JDQ/XJJ5/orbfekr+/v+l4AACgHPEZQCgtLU2PPfaY0tLSJEl/+9vfNH36dIWEhBhOBgAAKgIjgDaWk5Ojp59+Wm3btlVaWppq1Kih+fPn68MPP6T8AQBQiTECaEOWZWnp0qUaOnSo9u/fL0nq1auXJk2apNDQUMPpAABARaMA2szGjRv1zDPP6Ouvv5YkNWzYUFOnTlXHjh0NJwMAAK7CFLBN7NmzRz179tQtt9yir7/+WoGBgRo9erS2bt1K+QMAwGYYAazkMjIyNH78eM2bN0+nT5+Wl5eXHnroIY0bN0716tUzHQ8AABhAAaykdu/erfHjx2v+/PkqLCyUJN1111169dVX1aJFC7PhAACAURTASsThcGjt2rWaPHmyPv30U1mWJUm6++67NXLkSN16662GEwIAAHdAAawEDh48qIULF2rq1Kn68ccfndtjYmI0YsQItW3b1mA6AADgbiiAHurEiRP6+OOP9f777+uzzz5zTvMGBQXp4Ycf1qBBg3TttdcaTgkAANyR7QvglClT9NprrykzM1PNmzfX5MmT3XbE7LffftOaNWu0evVqrV27Vnl5ec5/a9OmjR566CH17dtXl112mcGUAADA3dm6AC5evFjx8fGaPn26IiIilJSUpE6dOmnXrl2qXbu26Xg6cOCAvv76a23YsEFffvmlNm/eXOzf69evr7///e/q3bu3GjVqZCglAADwNLYugJMmTVL//v3Vr18/SdL06dO1evVqzZ49W8OHD3dJhsLCQh06dEj79u3Tjh07tH37dm3fvl3ff/+9fv3117P2b9u2rbp06aIuXbqoZcuW8vbmVo4AAKB0bFsACwoKlJaWpoSEBOc2b29vRUdHKzU1tcRj8vPzlZ+f73yek5NzUV9r1KhR+vHHH3Xy5En98ccfOnnypHJzc/Xbb78pKytLDoejxOO8vb3VrFkzRUVFKSoqSnfccYfCwsJK8V0CAACczbYF8PDhwyosLDxr7dvQ0FDt3LmzxGMSExM1ZsyYUn+tTz/9VN999905/93b21uhoaFq1KiRmjRposaNG6tJkyZq1aqVgoKCSv31AAAAzse2BbAsEhISFB8f73yek5Oj8PDwCx43ZMgQHT58WAEBAQoMDFRAQICqVaumsLAw1a1bV1dccYV8fflfAQAAXMO2rSMkJEQ+Pj7Kysoqtj0rK+uc06z+/v7y9/cv9dfq3bt3mTICAABUBNteQeDn56dWrVopJSXFuc3hcCglJUWRkZEGkwEAAFQs244ASlJ8fLzi4uLUunVrtW3bVklJScrLy3NeFQwAAFAZ2boA9ujRQ4cOHdKoUaOUmZmpFi1aKDk5+awLQwAAACoTWxdASRo8eLAGDx5sOgYAAIDL2PYzgAAAAHZFAQQAALAZCiAAAIDNUAABAABshgIIAABgMxRAAAAAm6EAArC9KVOmqH79+goICFBERIQ2bdp03v0//PBDNWrUSAEBAWrWrJnWrFnjoqQAUD4ogABsbfHixYqPj9fo0aO1efNmNW/eXJ06ddLBgwdL3H/Dhg168MEH9cgjj2jLli2KjY1VbGystm3b5uLkAFB2XpZlWaZDeKqcnBwFBwcrOztbQUFBpuMAtlMe52BERITatGmjt99+W9KZNcHDw8P15JNPavjw4Wft36NHD+Xl5WnVqlXObbfccotatGih6dOnuyw3gLLjHGQlkEtS1J1zcnIMJwHsqejcK+vfsQUFBUpLS1NCQoJzm7e3t6Kjo5WamlriMampqYqPjy+2rVOnTlqxYsU5v05+fr7y8/Odz7Ozs4vlB+Bal/reURlQAC9Bbm6uJCk8PNxwEsDecnNzFRwcXOrjDh8+rMLCwrPW/w4NDdXOnTtLPCYzM7PE/TMzM8/5dRITEzVmzJiztvPeAZj1+++/l+m9ozKgAF6CunXrat++fapevbq8vLzOuV9OTo7Cw8O1b98+2w418zM4g59D+f4MLMtSbm6u6tatW07pKkZCQkKxUcNjx47p6quv1t69ez3ml48nvnbJ7BqemDk7O1tXXXWVLr/8ctNRjKEAXgJvb29deeWVF71/UFCQx5wcFYWfwRn8HMrvZ3ApBSokJEQ+Pj7Kysoqtj0rK0thYWElHhMWFlaq/SXJ399f/v7+Z20PDg72uNeBJ752yewanpjZ29u+18La9zsHYHt+fn5q1aqVUlJSnNscDodSUlIUGRlZ4jGRkZHF9pekzz///Jz7A4A7YgQQgK3Fx8crLi5OrVu3Vtu2bZWUlKS8vDz169dPktS3b1/Vq1dPiYmJkqSnn35at99+uyZOnKguXbpo0aJF+u677zRjxgyT3wYAlAoF0AX8/f01evToEqeA7IKfwRn8HNzvZ9CjRw8dOnRIo0aNUmZmplq0aKHk5GTnhR579+4tNk106623auHChRoxYoSef/55XXfddVqxYoWaNm160V/T3X4GF4PMrkFm1/DEzOWN+wACAADYDJ8BBAAAsBkKIAAAgM1QAAEAAGyGAggAAGAzFEAXmDJliurXr6+AgABFRERo06ZNpiO51JdffqmYmBjVrVtXXl5e510ztTJKTExUmzZtVL16ddWuXVuxsbHatWuX6VguN23aNN10003Om8VGRkbq008/NR2rQpT2nP/www/VqFEjBQQEqFmzZlqzZo2LkhZXmtwzZ85Uu3btVLNmTdWsWVPR0dFG3tvK+v66aNEieXl5KTY2tmIDlqC0mY8dO6ZBgwapTp068vf31/XXX+/y10hpMyclJemGG25QYGCgwsPDNXToUJ08edJFacv2e2fdunW6+eab5e/vr2uvvVZz586t8JxGWahQixYtsvz8/KzZs2dbP/zwg9W/f3+rRo0aVlZWluloLrNmzRrrhRdesJYtW2ZJspYvX246kkt16tTJmjNnjrVt2zYrPT3d6ty5s3XVVVdZx48fNx3NpT7++GNr9erV1u7du61du3ZZzz//vFWlShVr27ZtpqOVq9Ke819//bXl4+Njvfrqq9b27dutESNGWFWqVLG2bt3q1rl79eplTZkyxdqyZYu1Y8cO66GHHrKCg4Ot//f//p/bZi6yZ88eq169ela7du2s++67zzVh/6u0mfPz863WrVtbnTt3tr766itrz5491rp166z09HS3zbxgwQLL39/fWrBggbVnzx7rs88+s+rUqWMNHTrUZZlL+3snIyPDqlq1qhUfH29t377dmjx5suXj42MlJye7JrABFMAK1rZtW2vQoEHO54WFhVbdunWtxMREg6nMsWMB/LODBw9akqz169ebjmJczZo1rVmzZpmOUa5Ke853797d6tKlS7FtERER1mOPPVahOf/sUt+rTp8+bVWvXt2aN29eRUU8S1kynz592rr11lutWbNmWXFxcS4vgKXNPG3aNOuaa66xCgoKXBXxLKXNPGjQIOuOO+4oti0+Pt6Kioqq0JzncjG/d/7xj39YN954Y7FtPXr0sDp16lSBycxiCrgCFRQUKC0tTdHR0c5t3t7eio6OVmpqqsFkMCk7O1uSbL0IeWFhoRYtWqS8vLxKtYRaWc751NTUYvtLUqdOnVz6HlEe71UnTpzQqVOnXPa6LmvmsWPHqnbt2nrkkUdcEbOYsmT++OOPFRkZqUGDBik0NFRNmzbV+PHjVVhY6LaZb731VqWlpTmniTMyMrRmzRp17tzZJZnLwh3OQ1djJZAKdPjwYRUWFjpXFCgSGhqqnTt3GkoFkxwOh4YMGaKoqKhSrRxRWWzdulWRkZE6efKkLrvsMi1fvlxNmjQxHavclOWcz8zMLHH/zMzMCsv5Z+XxXjVs2DDVrVv3rF+iFaUsmb/66iu9++67Sk9Pd0HCs5Ulc0ZGhv75z3+qd+/eWrNmjX766ScNHDhQp06d0ujRo90yc69evXT48GHddtttsixLp0+f1uOPP67nn3++wvOW1bnOw5ycHP3xxx8KDAw0lKziMAIIuNCgQYO0bds2LVq0yHQUI2644Qalp6dr48aNeuKJJxQXF6ft27ebjoVLNGHCBC1atEjLly9XQECA6Tglys3NVZ8+fTRz5kyFhISYjnPRHA6HateurRkzZqhVq1bq0aOHXnjhBU2fPt10tHNat26dxo8fr6lTp2rz5s1atmyZVq9erXHjxpmOhv/BCGAFCgkJkY+Pj7Kysoptz8rKUlhYmKFUMGXw4MFatWqVvvzyS1155ZWm4xjh5+ena6+9VpLUqlUrffvtt3rzzTf1zjvvGE5WPspyzoeFhRl/j7iU96rXX39dEyZM0BdffKGbbrqpImMWU9rMP//8s3755RfFxMQ4tzkcDkmSr6+vdu3apYYNG7pVZkmqU6eOqlSpIh8fH+e2xo0bKzMzUwUFBfLz83O7zCNHjlSfPn306KOPSpKaNWumvLw8DRgwQC+88EKxtbXdxbnOw6CgoEo5+icxAlih/Pz81KpVK6WkpDi3ORwOpaSkVKrPPeH8LMvS4MGDtXz5cv3zn/9UgwYNTEdyGw6HQ/n5+aZjlJuynPORkZHF9pekzz//3KXvEWV9r3r11Vc1btw4JScnq3Xr1q6I6lTazI0aNdLWrVuVnp7ufHTt2lUdOnRQenq6wsPD3S6zJEVFRemnn35yllVJ2r17t+rUqVPh5a+smU+cOHFWySsqsJZlVVzYS+AO56HLGb4IpdJbtGiR5e/vb82dO9favn27NWDAAKtGjRpWZmam6Wguk5uba23ZssXasmWLJcmaNGmStWXLFuvXX381Hc0lnnjiCSs4ONhat26d9dtvvzkfJ06cMB3NpYYPH26tX7/e2rNnj/X9999bw4cPt7y8vKy1a9eajlauLnTO9+nTxxo+fLhz/6+//try9fW1Xn/9dWvHjh3W6NGjjd0GpjS5J0yYYPn5+VlLly4t9rrOzc1128x/ZuIq4NJm3rt3r1W9enVr8ODB1q5du6xVq1ZZtWvXtl566SW3zTx69GirevXq1gcffGBlZGRYa9eutRo2bGh1797dZZkv9Htn+PDhVp8+fZz7F90G5rnnnrN27NhhTZkyhdvA4NJNnjzZuuqqqyw/Pz+rbdu21jfffGM6kkv961//siSd9YiLizMdzSVK+t4lWXPmzDEdzaUefvhh6+qrr7b8/PysK664wrrzzjsrXfkrcr5z/vbbbz/rtb9kyRLr+uuvt/z8/Kwbb7zRWr16tYsTn1Ga3FdffXWJr+vRo0e7beY/M1EALav0mTds2GBFRERY/v7+1jXXXGO9/PLL1unTp90286lTp6wXX3zRatiwoRUQEGCFh4dbAwcOtI4ePeqyvBf6vRMXF2fdfvvtZx3TokULy8/Pz7rmmmsq/Xu0l2W56XgsAAAAKgSfAQQAALAZCiAAAIDNUAABAABshgIIAABgMxRAAAAAm6EAAgAA2AwFEAAAwGYogAAAADZDAQQAALAZCiAAAIDNUAABAABshgIIAABgMxRAAAAAm6EAAgAA2AwFEAAAwGYogAAAADZDAQQAALAZCiAAAIDNUAABAABshgIIAABgMxRAAAAAm6EAAgAA2AwFEAAAwGYogAAAADZDAQQAALAZCiAAAIDNUAABAABshgIIAABgMxRAAAAAm6EAAgAA2AwFEAAAwGb+P+L8AOPWQKQlAAAAAElFTkSuQmCC", "text/html": [ "\n", "
\n", "
\n", " Figure\n", "
\n", - " \n", + " \n", "
\n", " " ], @@ -1726,7 +1738,7 @@ } ], "source": [ - "y_drp2 = - drp_stencil2[0] - 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(drp_stencil2[1:], m)], axis=0)\n", + "y_drp2 = - drp_stencil2[0] - 2*np.sum([a_ * np.cos(m_*x) for a_, m_ in zip(drp_stencil2[1:], m, strict=False)], axis=0)\n", "\n", "fig, ax = plt.subplots(1, 2)\n", "ax[0].plot(x, x**2, 'k')\n", @@ -1843,7 +1855,7 @@ } ], "source": [ - "u , data , r = acoustic(weights=drp_stencil2, h=h, dt=dt, v=1500)\n", + "u, data, r = acoustic(weights=drp_stencil2, h=h, dt=dt, v=1500)\n", "um, datam, rm = acoustic(weights=drp_stencil2, h=h, dt=dt/2, v=1500)\n", "up, datap, rp = acoustic(weights=drp_stencil2, h=h, dt=2*dt, v=1500)" ] @@ -1886,13 +1898,13 @@ "source": [ "fig, ax = plt.subplots(2, 3)\n", "plot_wave(um, ax[0, 0], hline=(0, 500, 500, 500), r=rm)\n", - "plot_wave( u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", + "plot_wave(u, ax[0, 1], hline=(0, 500, 500, 500), r=r)\n", "plot_wave(up, ax[0, 2], hline=(0, 500, 500, 500), r=rp)\n", "\n", "shape = u.shape\n", - "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1,0], extent=(0, 500))\n", - "plot_profile( u[shape[0]//2, :shape[1]//2], ax[1,1], extent=(0, 500))\n", - "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1,2], extent=(0, 500))\n", + "plot_profile(um[shape[0]//2, :shape[1]//2], ax[1, 0], extent=(0, 500))\n", + "plot_profile(u[shape[0]//2, :shape[1]//2], ax[1, 1], extent=(0, 500))\n", + "plot_profile(up[shape[0]//2, :shape[1]//2], ax[1, 2], extent=(0, 500))\n", "\n", "fig.set_size_inches(12, 6)\n", "plt.show()" @@ -1944,12 +1956,12 @@ "fig, ax = plt.subplots(2, 3)\n", "\n", "arrival = plot_shot(datam, ax[0, 0], vline=(500, 0, 500, 0.6), r=rm)\n", - "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", + "plot_shot(data, ax[0, 1], vline=(500, 0, 500, 0.6), r=r, first_arrival=arrival)\n", "plot_shot(datap, ax[0, 2], vline=(500, 0, 500, 0.6), r=rp, first_arrival=arrival)\n", "\n", "width = data.shape[1]\n", "plot_profile(datam[:, width//2], ax[1, 0], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", - "plot_profile( data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", + "plot_profile(data[:, width//2], ax[1, 1], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "plot_profile(datap[:, width//2], ax[1, 2], extent=(0, 0.6), axis_labels=('t', 'A'), first_arrival=arrival[width//2])\n", "\n", "fig.set_size_inches(12, 6)\n", diff --git a/examples/seismic/tutorials/07_DRP_schemes.ipynb b/examples/seismic/tutorials/07_DRP_schemes.ipynb index e6bcf5fe10..74f310ad6e 100644 --- a/examples/seismic/tutorials/07_DRP_schemes.ipynb +++ b/examples/seismic/tutorials/07_DRP_schemes.ipynb @@ -28,7 +28,7 @@ "Ny = Nx\n", "dx = Lx/(Nx-1)\n", "dy = dx\n", - "grid = Grid(shape=(Nx,Ny), extent=(Lx,Ly))\n", + "grid = Grid(shape=(Nx, Ny), extent=(Lx, Ly))\n", "\n", "# Define u(x,y,t) on this grid\n", "u = TimeFunction(name='u', grid=grid, time_order=2, space_order=2)\n", @@ -143,7 +143,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import Model, plot_velocity\n", "%matplotlib inline\n", "\n", @@ -199,7 +199,8 @@ } ], "source": [ - "from examples.seismic import TimeAxis\n", + "# NBVAL_IGNORE_OUTPUT\n", + "from examples.seismic import RickerSource, TimeAxis\n", "\n", "t0 = 0. # Simulation starts a t=0\n", "tn = 500. # Simulation lasts 0.5 seconds (500 ms)\n", @@ -207,9 +208,6 @@ "\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", "\n", - "#NBVAL_IGNORE_OUTPUT\n", - "from examples.seismic import RickerSource\n", - "\n", "f0 = 0.025 # Source peak frequency is 25Hz (0.025 kHz)\n", "src = RickerSource(name='src', grid=model.grid, f0=f0,\n", " npoint=1, time_range=time_range)\n", @@ -235,6 +233,8 @@ "metadata": {}, "outputs": [], "source": [ + "from devito import solve\n", + "\n", "# Define the wavefield with the size of the model and the time dimension\n", "u = TimeFunction(name=\"u\", grid=model.grid, time_order=2, space_order=order)\n", "\n", @@ -244,7 +244,6 @@ "# This discrete PDE can be solved in a time-marching way updating u(t+dt) from the previous time step\n", "# Devito as a shortcut for u(t+dt) which is u.forward. We can then rewrite the PDE as\n", "# a time marching updating equation known as a stencil using customized SymPy functions\n", - "from devito import solve\n", "\n", "stencil = Eq(u.forward, solve(pde, u.forward).subs({H: u.laplace}))" ] @@ -304,7 +303,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(time=time_range.num-1, dt=dt)" ] }, @@ -333,7 +332,7 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "#import matplotlib\n", + "# import matplotlib\n", "import matplotlib.pyplot as plt\n", "from matplotlib import cm\n", "\n", @@ -351,7 +350,7 @@ "\n", "fig = plt.figure(figsize=(14, 7))\n", "ax1 = fig.add_subplot(111)\n", - "cont = ax1.imshow(u.data[0,:,:].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", + "cont = ax1.imshow(u.data[0, :, :].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", "fig.colorbar(cont)\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$z$')\n", @@ -378,9 +377,11 @@ "source": [ "from devito import SubDomain\n", "\n", + "\n", "# Define our 'upper' and 'lower' SubDomains:\n", "class Upper(SubDomain):\n", " name = 'upper'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " # We want our upper layer to span the entire x-dimension and all\n", @@ -388,8 +389,10 @@ " # the following notation:\n", " return {x: x, z: ('left', 80+nbl)}\n", "\n", + "\n", "class Lower(SubDomain):\n", " name = 'lower'\n", + "\n", " def define(self, dimensions):\n", " x, z = dimensions\n", " # We want our lower layer to span the entire x-dimension and all\n", @@ -422,7 +425,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Create our model passing it our 'upper' and 'lower' subdomains:\n", "model = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n", @@ -482,15 +485,15 @@ "# Define our custom FD coefficients:\n", "x, z = model.grid.dimensions\n", "# Upper layer\n", - "weights_u = np.array([ 2.00462e-03, -1.63274e-02, 7.72781e-02,\n", - " -3.15476e-01, 1.77768e+00, -3.05033e+00,\n", - " 1.77768e+00, -3.15476e-01, 7.72781e-02,\n", - " -1.63274e-02, 2.00462e-03])\n", + "weights_u = np.array([2.00462e-03, -1.63274e-02, 7.72781e-02,\n", + " -3.15476e-01, 1.77768e+00, -3.05033e+00,\n", + " 1.77768e+00, -3.15476e-01, 7.72781e-02,\n", + " -1.63274e-02, 2.00462e-03])\n", "# Lower layer\n", - "weights_l = np.array([ 0. , 0. , 0.0274017,\n", - " -0.223818, 1.64875 , -2.90467,\n", - " 1.64875 , -0.223818, 0.0274017,\n", - " 0. , 0. ])\n", + "weights_l = np.array([0., 0., 0.0274017,\n", + " -0.223818, 1.64875, -2.90467,\n", + " 1.64875, -0.223818, 0.0274017,\n", + " 0., 0.])\n", "# Create the Devito Coefficient objects:\n", "ux_u_coeffs = weights_u/x.spacing**2\n", "uz_u_coeffs = weights_u/z.spacing**2\n", @@ -547,7 +550,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(time=time_range.num-1, dt=dt)" ] }, @@ -578,7 +581,7 @@ "# NBVAL_IGNORE_OUTPUT\n", "fig = plt.figure(figsize=(14, 7))\n", "ax1 = fig.add_subplot(111)\n", - "cont = ax1.imshow(u_DRP.data[0,:,:].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", + "cont = ax1.imshow(u_DRP.data[0, :, :].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", "fig.colorbar(cont)\n", "ax1.axis([0, Lx, 0, Lz])\n", "ax1.set_xlabel('$x$')\n", @@ -616,7 +619,13 @@ "# NBVAL_IGNORE_OUTPUT\n", "fig = plt.figure(figsize=(14, 7))\n", "ax1 = fig.add_subplot(111)\n", - "cont = ax1.imshow(u_DRP.data[0,:,:].T-u.data[0,:,:].T, vmin=-clip, vmax=clip, cmap=cm.seismic, extent=[0, Lx, 0, Lz])\n", + "cont = ax1.imshow(\n", + " u_DRP.data[0, :, :].T - u.data[0, :, :].T,\n", + " vmin=-clip,\n", + " vmax=clip,\n", + " cmap=cm.seismic,\n", + " extent=[0, Lx, 0, Lz]\n", + ")\n", "fig.colorbar(cont)\n", "ax1.set_xlabel('$x$')\n", "ax1.set_ylabel('$z$')\n", @@ -631,7 +640,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Wavefield norm checks\n", "assert np.isclose(np.linalg.norm(u.data[-1]), 82.170, atol=0, rtol=1e-4)\n", diff --git a/examples/seismic/tutorials/08_snapshotting.ipynb b/examples/seismic/tutorials/08_snapshotting.ipynb index 5474ff2a72..080e054b1c 100644 --- a/examples/seismic/tutorials/08_snapshotting.ipynb +++ b/examples/seismic/tutorials/08_snapshotting.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "%reset -f\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", @@ -87,8 +87,8 @@ "source": [ "# This cell sets up the problem that is already explained in the first TLE tutorial.\n", "\n", - "#NBVAL_IGNORE_OUTPUT\n", - "#%%flake8\n", + "# NBVAL_IGNORE_OUTPUT\n", + "# %%flake8\n", "from examples.seismic import Receiver\n", "from examples.seismic import RickerSource\n", "from examples.seismic import Model, plot_velocity, TimeAxis\n", @@ -141,7 +141,7 @@ "plot_velocity(model, source=src.coordinates.data,\n", " receiver=rec.coordinates.data[::4, :])\n", "\n", - "#Used for reshaping\n", + "# Used for reshaping\n", "vnx = nx+20\n", "vnz = nz+20\n", "\n", @@ -181,10 +181,9 @@ "factor = round(u.shape[0] / nsnaps) # Get approx nsnaps, for any nt\n", "ucopy = u.data.copy(order='C')\n", "filename = \"naivsnaps.bin\"\n", - "file_u = open(filename, 'wb')\n", - "for it in range(0, nsnaps):\n", - " file_u.write(ucopy[it*factor, :, :])\n", - "file_u.close()" + "with open(filename, 'wb') as fh:\n", + " for it in range(0, nsnaps):\n", + " fh.write(ucopy[it*factor, :, :])" ] }, { @@ -211,11 +210,11 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.rcParams['figure.figsize'] = (20, 20) # Increases figure size\n", "\n", - "imcnt = 1 # Image counter for plotting\n", - "plot_num = 5 # Number of images to plot\n", + "imcnt = 1 # Image counter for plotting\n", + "plot_num = 5 # Number of images to plot\n", "\n", "for i in range(0, nsnaps, int(nsnaps/plot_num)):\n", " plt.subplot(1, plot_num+1, imcnt+1)\n", @@ -249,21 +248,20 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "fobj = open(\"naivsnaps.bin\", \"rb\")\n", - "snaps = np.fromfile(fobj, dtype = np.float32)\n", - "snaps = np.reshape(snaps, (nsnaps, vnx, vnz)) #reshape vec2mtx, devito format. nx first\n", - "fobj.close()\n", + "# NBVAL_IGNORE_OUTPUT\n", + "with open(\"naivsnaps.bin\", \"rb\") as fh:\n", + " snaps = np.fromfile(fh, dtype=np.float32)\n", + " snaps = np.reshape(snaps, (nsnaps, vnx, vnz)) # reshape vec2mtx, devito format. nx first\n", "\n", - "plt.rcParams['figure.figsize'] = (20,20) # Increases figure size\n", + "plt.rcParams['figure.figsize'] = (20, 20) # Increases figure size\n", "\n", - "imcnt = 1 # Image counter for plotting\n", - "plot_num = 5 # Number of images to plot\n", + "imcnt = 1 # Image counter for plotting\n", + "plot_num = 5 # Number of images to plot\n", "\n", "for i in range(0, nsnaps, int(nsnaps/plot_num)):\n", - " plt.subplot(1, plot_num+1, imcnt+1)\n", - " imcnt = imcnt + 1\n", - " plt.imshow(np.transpose(snaps[i,:,:]), vmin=-1, vmax=1, cmap=\"seismic\")\n", + " plt.subplot(1, plot_num+1, imcnt+1)\n", + " imcnt = imcnt + 1\n", + " plt.imshow(np.transpose(snaps[i, :, :]), vmin=-1, vmax=1, cmap=\"seismic\")\n", "\n", "plt.show()" ] @@ -325,7 +323,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import ConditionalDimension\n", "\n", "nsnaps = 103 # desired number of equally spaced snaps\n", @@ -333,7 +331,7 @@ "\n", "print(f\"factor is {factor}\")\n", "\n", - "#Part 1 #############\n", + "# Part 1 #############\n", "time_subsampled = ConditionalDimension(\n", " 't_sub', parent=model.grid.time_dim, factor=factor)\n", "usave = TimeFunction(name='usave', grid=model.grid, time_order=2, space_order=2,\n", @@ -349,7 +347,7 @@ " expr=src * dt**2 / model.m)\n", "rec_term = rec.interpolate(expr=u)\n", "\n", - "#Part 2 #############\n", + "# Part 2 #############\n", "op1 = Operator([stencil] + src_term + rec_term,\n", " subs=model.spacing_map) # usual operator\n", "op2 = Operator([stencil] + src_term + [Eq(usave, u)] + rec_term,\n", @@ -360,7 +358,7 @@ "op2(time=nt - 2, dt=model.critical_dt)\n", "#####################\n", "\n", - "#Part 3 #############\n", + "# Part 3 #############\n", "print(\"Saving snaps file\")\n", "print(f\"Dimensions: nz = {nz + 2 * nb:d}, nx = {nx + 2 * nb:d}\")\n", "filename = \"snaps2.bin\"\n", @@ -392,21 +390,20 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "fobj = open(\"snaps2.bin\", \"rb\")\n", - "snaps = np.fromfile(fobj, dtype=np.float32)\n", - "snaps = np.reshape(snaps, (nsnaps, vnx, vnz))\n", - "fobj.close()\n", + "# NBVAL_IGNORE_OUTPUT\n", + "with open(\"snaps2.bin\", \"rb\") as fh:\n", + " snaps = np.fromfile(fh, dtype=np.float32)\n", + " snaps = np.reshape(snaps, (nsnaps, vnx, vnz))\n", "\n", "plt.rcParams['figure.figsize'] = (20, 20) # Increases figure size\n", "\n", - "imcnt = 1 # Image counter for plotting\n", - "plot_num = 5 # Number of images to plot\n", + "imcnt = 1 # Image counter for plotting\n", + "plot_num = 5 # Number of images to plot\n", "for i in range(0, plot_num):\n", - " plt.subplot(1, plot_num, i+1)\n", - " imcnt = imcnt + 1\n", - " ind = i * int(nsnaps/plot_num)\n", - " plt.imshow(np.transpose(snaps[ind,:,:]), vmin=-1, vmax=1, cmap=\"seismic\")\n", + " plt.subplot(1, plot_num, i+1)\n", + " imcnt = imcnt + 1\n", + " ind = i * int(nsnaps/plot_num)\n", + " plt.imshow(np.transpose(snaps[ind, :, :]), vmin=-1, vmax=1, cmap=\"seismic\")\n", "\n", "plt.show()" ] @@ -620,10 +617,10 @@ "\n", " orig_stdout = sys.stdout\n", "\n", - " f = open(filename, 'w')\n", - " sys.stdout = f\n", + " with open(filename, 'w') as fh:\n", + " sys.stdout = fh\n", + "\n", " print(thingToPrint)\n", - " f.close()\n", "\n", " sys.stdout = orig_stdout\n", "\n", @@ -3340,18 +3337,17 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "#NBVAL_SKIP\n", + "# NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_SKIP\n", "from IPython.display import HTML\n", "import matplotlib.pyplot as plt\n", "import matplotlib.animation as animation\n", "\n", "filename = \"naivsnaps.bin\"\n", "nsnaps = 100\n", - "fobj = open(filename, \"rb\")\n", - "snapsObj = np.fromfile(fobj, dtype=np.float32)\n", - "snapsObj = np.reshape(snapsObj, (nsnaps, vnx, vnz))\n", - "fobj.close()\n", + "with open(filename, \"rb\") as fh:\n", + " snapsObj = np.fromfile(fh, dtype=np.float32)\n", + " snapsObj = np.reshape(snapsObj, (nsnaps, vnx, vnz))\n", "\n", "fig, ax = plt.subplots()\n", "fig.set_size_inches(10, 8)\n", @@ -3362,15 +3358,17 @@ "plt.ylabel('z')\n", "plt.title('Modelling one shot over a 2-layer velocity model with Devito.')\n", "\n", + "\n", "def update(i):\n", " matrice.set_array(snapsObj[i, :, :].T)\n", " return matrice,\n", "\n", + "\n", "# Animation\n", "ani = animation.FuncAnimation(fig, update, frames=nsnaps, interval=50, blit=True)\n", "\n", "plt.close(ani._fig)\n", - "HTML(ani.to_html5_video())\n" + "HTML(ani.to_html5_video())" ] }, { diff --git a/examples/seismic/tutorials/09_viscoelastic.ipynb b/examples/seismic/tutorials/09_viscoelastic.ipynb index 76ab0fa2f7..799777fb26 100644 --- a/examples/seismic/tutorials/09_viscoelastic.ipynb +++ b/examples/seismic/tutorials/09_viscoelastic.ipynb @@ -40,8 +40,8 @@ "outputs": [], "source": [ "# Domain size:\n", - "extent = (200., 100., 100.) # 200 x 100 x 100 m domain\n", - "h = 1.0 # Desired grid spacing\n", + "extent = (200., 100., 100.) # 200 x 100 x 100 m domain\n", + "h = 1.0 # Desired grid spacing\n", "shape = (int(extent[0]/h+1), int(extent[1]/h+1), int(extent[2]/h+1))\n", "\n", "# Model physical parameters:\n", @@ -52,23 +52,23 @@ "rho = np.zeros(shape)\n", "\n", "# Set up three horizontally separated layers:\n", - "vp[:,:,:int(0.5*shape[2])+1] = 1.52\n", - "qp[:,:,:int(0.5*shape[2])+1] = 10000.\n", - "vs[:,:,:int(0.5*shape[2])+1] = 0.\n", - "qs[:,:,:int(0.5*shape[2])+1] = 0.\n", - "rho[:,:,:int(0.5*shape[2])+1] = 1.05\n", + "vp[:, :, :int(0.5*shape[2])+1] = 1.52\n", + "qp[:, :, :int(0.5*shape[2])+1] = 10000.\n", + "vs[:, :, :int(0.5*shape[2])+1] = 0.\n", + "qs[:, :, :int(0.5*shape[2])+1] = 0.\n", + "rho[:, :, :int(0.5*shape[2])+1] = 1.05\n", "\n", - "vp[:,:,int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 1.6\n", - "qp[:,:,int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 40.\n", - "vs[:,:,int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 0.4\n", - "qs[:,:,int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 30.\n", - "rho[:,:,int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 1.3\n", + "vp[:, :, int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 1.6\n", + "qp[:, :, int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 40.\n", + "vs[:, :, int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 0.4\n", + "qs[:, :, int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 30.\n", + "rho[:, :, int(0.5*shape[2])+1:int(0.5*shape[2])+1+int(4/h)] = 1.3\n", "\n", - "vp[:,:,int(0.5*shape[2])+1+int(4/h):] = 2.2\n", - "qp[:,:,int(0.5*shape[2])+1+int(4/h):] = 100.\n", - "vs[:,:,int(0.5*shape[2])+1+int(4/h):] = 1.2\n", - "qs[:,:,int(0.5*shape[2])+1+int(4/h):] = 70.\n", - "rho[:,:,int(0.5*shape[2])+1+int(4/h):] = 2." + "vp[:, :, int(0.5*shape[2])+1+int(4/h):] = 2.2\n", + "qp[:, :, int(0.5*shape[2])+1+int(4/h):] = 100.\n", + "vs[:, :, int(0.5*shape[2])+1+int(4/h):] = 1.2\n", + "qs[:, :, int(0.5*shape[2])+1+int(4/h):] = 70.\n", + "rho[:, :, int(0.5*shape[2])+1+int(4/h):] = 2." ] }, { @@ -99,8 +99,8 @@ "# Create model\n", "origin = (0, 0, 0)\n", "spacing = (h, h, h)\n", - "so = 4 # FD space order (Note that the time order is by default 1).\n", - "nbl = 20 # Number of absorbing boundary layers cells\n", + "so = 4 # FD space order (Note that the time order is by default 1).\n", + "nbl = 20 # Number of absorbing boundary layers cells\n", "model = ModelViscoelastic(space_order=so, vp=vp, qp=qp, vs=vs, qs=qs,\n", " b=1/rho, origin=origin, shape=shape, spacing=spacing,\n", " nbl=nbl)" @@ -114,7 +114,7 @@ "source": [ "# As pointed out in Thorbecke's implementation and documentation, the viscoelastic wave equation is\n", "# not always stable with the standard elastic CFL condition. We enforce a smaller critical dt here\n", - "# to ensure the stability.\n", + "# to ensure the stability.\n", "model.dt_scale = .9" ] }, @@ -187,7 +187,7 @@ "# Memory variable:\n", "r = TensorTimeFunction(name='r', grid=model.grid, space_order=so, time_order=1)\n", "\n", - "s = model.grid.stepping_dim.spacing # Symbolic representation of the model grid spacing" + "s = model.grid.stepping_dim.spacing # Symbolic representation of the model grid spacing" ] }, { @@ -219,7 +219,7 @@ "e = grad(v.forward) + grad(v.forward).transpose(inner=False)\n", "\n", "# Stress equations\n", - "pde_tau = tau.dt - r.forward - l * t_ep / t_s * diag(div(v.forward)) - mu * t_es / t_s * e\n", + "pde_tau = tau.dt - r.forward - l * t_ep / t_s * diag(div(v.forward)) - mu * t_es / t_s * e\n", "u_t = Eq(tau.forward, model.damp * solve(pde_tau, tau.forward))\n", "\n", "# Memory variable equations:\n", @@ -276,7 +276,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Execute the operator:\n", "op(dt=dt)" @@ -362,7 +362,7 @@ } ], "source": [ - "np.mod(time_range.num,2)" + "np.mod(time_range.num, 2)" ] }, { @@ -419,7 +419,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "\n", "# Mid-points:\n", "mid_x = int(0.5*(v[0].data.shape[1]-1))+1\n", @@ -440,7 +440,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "assert np.isclose(norm(v[0]), 0.102959, atol=1e-4, rtol=0)" ] diff --git a/examples/seismic/tutorials/10_nmo_correction.ipynb b/examples/seismic/tutorials/10_nmo_correction.ipynb index fb67563c40..107c0955f4 100644 --- a/examples/seismic/tutorials/10_nmo_correction.ipynb +++ b/examples/seismic/tutorials/10_nmo_correction.ipynb @@ -81,7 +81,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import Model, plot_velocity\n", "\n", "shape = (301, 501) # Number of grid point (nx, ny, nz)\n", @@ -90,9 +90,9 @@ "\n", "# Define a velocity profile. The velocity is in km/s\n", "v = np.empty(shape, dtype=np.float32)\n", - "v[:,:100] = 1.5\n", - "v[:,100:350] = 2.5\n", - "v[:,350:] = 4.5\n", + "v[:, :100] = 1.5\n", + "v[:, 100:350] = 2.5\n", + "v[:, 350:] = 4.5\n", "\n", "# With the velocity and model size defined, we can create the seismic model that\n", "# encapsulates these properties. We also define the size of the absorbing layer as 10 grid points\n", @@ -122,7 +122,7 @@ "\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", "\n", - "nrcv = 250 # Number of Receivers" + "nrcv = 250 # Number of Receivers" ] }, { @@ -131,7 +131,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import RickerSource\n", "\n", "f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)\n", @@ -145,7 +145,7 @@ "pde = model.m * u.dt2 - u.laplace + model.damp * u.dt\n", "stencil = Eq(u.forward, solve(pde, u.forward))\n", "\n", - "src.coordinates.data[:, 0] = 400 # Source coordinates\n", + "src.coordinates.data[:, 0] = 400 # Source coordinates\n", "src.coordinates.data[:, -1] = 20. # Depth is 20m" ] }, @@ -178,12 +178,12 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from examples.seismic import Receiver\n", "\n", "rec = Receiver(name='rec', grid=model.grid, npoint=nrcv, time_range=time_range)\n", - "rec.coordinates.data[:,0] = np.linspace(src.coordinates.data[0, 0], model.domain_size[0], num=nrcv)\n", - "rec.coordinates.data[:,-1] = 20. # Depth is 20m\n", + "rec.coordinates.data[:, 0] = np.linspace(src.coordinates.data[0, 0], model.domain_size[0], num=nrcv)\n", + "rec.coordinates.data[:, -1] = 20. # Depth is 20m\n", "\n", "# Finally we define the source injection and receiver read function to generate the corresponding code\n", "src_term = src.inject(field=u.forward, expr=src * dt**2 / model.m)\n", @@ -213,7 +213,7 @@ "for i, coord in enumerate(rec.coordinates.data):\n", " off = (src.coordinates.data[0, 0] - coord[0])\n", " offset.append(off)\n", - " data.append(rec.data[:,i])" + " data.append(rec.data[:, i])" ] }, { @@ -229,7 +229,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "import matplotlib as mpl\n", "import matplotlib.pyplot as plt\n", "from matplotlib import cm\n", @@ -238,6 +238,7 @@ "mpl.rc('font', size=16)\n", "mpl.rc('figure', figsize=(8, 6))\n", "\n", + "\n", "def plot_traces(rec, xb, xe, t0, tn, colorbar=True):\n", " scale = np.max(rec)/100\n", " extent = [xb, xe, 1e-3*tn, t0]\n", @@ -285,7 +286,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_traces(np.transpose(data), rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)" ] }, @@ -323,8 +324,8 @@ "metadata": {}, "outputs": [], "source": [ - "ns = time_range.num # Number of samples in each trace\n", - "grid = Grid(shape=(ns, nrcv)) # Construction of grid with samples X traces dimension" + "ns = time_range.num # Number of samples in each trace\n", + "grid = Grid(shape=(ns, nrcv)) # Construction of grid with samples X traces dimension" ] }, { @@ -435,11 +436,11 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", - "dtms = model.critical_dt/1000 # Time discretization in ms\n", + "dtms = model.critical_dt/1000 # Time discretization in ms\n", "E1 = Eq(t_0, sample*dtms)\n", - "E2 = Eq(tt, sp.sqrt(t_0**2 + (off[trace]**2)/(vguide[sample]**2) ))\n", + "E2 = Eq(tt, sp.sqrt(t_0**2 + (off[trace]**2)/(vguide[sample]**2)))\n", "E3 = Eq(s, sp.floor(tt/dtms))\n", "op1 = Operator([E1, E2, E3])\n", "op1()" @@ -476,7 +477,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "s.data[s.data >= time_range.num] = 0\n", "E4 = Eq(snmo, amps[s[sample, trace], trace])\n", @@ -484,7 +485,7 @@ "op2 = Operator([E4])\n", "op2()\n", "\n", - "stack = snmo.data.sum(axis=1) # We can stack traces and create a ZO section!!!\n", + "stack = snmo.data.sum(axis=1) # We can stack traces and create a ZO section!!!\n", "\n", "plot_traces(snmo.data, rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)" ] diff --git a/examples/seismic/tutorials/11_viscoacoustic.ipynb b/examples/seismic/tutorials/11_viscoacoustic.ipynb index 0f6317dfe1..132dad6888 100644 --- a/examples/seismic/tutorials/11_viscoacoustic.ipynb +++ b/examples/seismic/tutorials/11_viscoacoustic.ipynb @@ -155,9 +155,9 @@ "for i in range(1, nlayers):\n", " v[..., i*int(shape[-1] / nlayers):] = vp_i[i] # Bottom velocity\n", "\n", - "qp[:] = 3.516*((v[:]*1000.)**2.2)*10**(-6) # Li's empirical formula\n", + "qp[:] = 3.516*((v[:]*1000.)**2.2)*10**(-6) # Li's empirical formula\n", "\n", - "rho[:] = 0.31*(v[:]*1000.)**0.25 # Gardner's relation" + "rho[:] = 0.31*(v[:]*1000.)**0.25 # Gardner's relation" ] }, { @@ -178,7 +178,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "model = ModelViscoacoustic(space_order=space_order, vp=v, qp=qp, b=1/rho,\n", " origin=origin, shape=shape, spacing=spacing,\n", " nbl=nbl)" @@ -201,7 +201,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "aspect_ratio = model.shape[0]/model.shape[1]\n", "\n", "plt_options_model = {'cmap': 'jet', 'extent': [model.origin[0], model.origin[0] + model.domain_size[0],\n", @@ -240,7 +240,7 @@ "metadata": {}, "outputs": [], "source": [ - "f0 = 0.005 # peak/dominant frequency\n", + "f0 = 0.005 # peak/dominant frequency\n", "b = model.b\n", "rho = 1./b\n", "\n", @@ -276,6 +276,7 @@ "source": [ "from examples.seismic import Receiver\n", "\n", + "\n", "def src_rec(p, model):\n", " src = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range)\n", " src.coordinates.data[0, :] = np.array(model.domain_size) * .5\n", @@ -332,7 +333,7 @@ " slices = [slice(model.nbl, -model.nbl), slice(model.nbl, -model.nbl)]\n", " scale = .5*1e-3\n", "\n", - " plt_options_model = {'extent': [model.origin[0] , model.origin[0] + model.domain_size[0],\n", + " plt_options_model = {'extent': [model.origin[0], model.origin[0] + model.domain_size[0],\n", " model.origin[1] + model.domain_size[1], model.origin[1]]}\n", "\n", " fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 7))\n", @@ -459,7 +460,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "rec, v, p = modelling_SLS(model)" ] }, @@ -480,7 +481,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_receiver(rec)" ] }, @@ -510,7 +511,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_v_and_p(model, v, p)" ] }, @@ -622,7 +623,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "rec, v, p = modelling_KV(model)" ] }, @@ -643,7 +644,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_receiver(rec)" ] }, @@ -673,7 +674,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_v_and_p(model, v, p)" ] }, @@ -782,7 +783,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "rec, v, p = modelling_Maxwell(model)" ] }, @@ -803,7 +804,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_receiver(rec)" ] }, @@ -833,7 +834,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_v_and_p(model, v, p)" ] }, diff --git a/examples/seismic/tutorials/12_time_blocking.ipynb b/examples/seismic/tutorials/12_time_blocking.ipynb index 4079423a32..4cc455e5fa 100644 --- a/examples/seismic/tutorials/12_time_blocking.ipynb +++ b/examples/seismic/tutorials/12_time_blocking.ipynb @@ -295,8 +295,10 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Install pyzfp package in the current Jupyter kernel\n", + "import sys\n", + "_ = sys.executable\n", "!{sys.executable} -m pip install blosc\n", - "import blosc" + "import blosc # noqa: E402" ] }, { @@ -401,13 +403,13 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Define dimensions for the interior of the model\n", - "nx,nz = 101,101\n", + "nx, nz = 101, 101\n", "npad = 10\n", - "dx,dz = 20.0,20.0 # Grid spacing in m\n", + "dx, dz = 20.0, 20.0 # Grid spacing in m\n", "shape = (nx, nz) # Number of grid points\n", - "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", + "spacing = (dx, dz) # Domain size is now 5 km by 5 km\n", "origin = (0., 0.) # Origin of coordinate system, specified in m.\n", - "extent = tuple([s*(n-1) for s, n in zip(spacing, shape)])\n", + "extent = tuple([s*(n-1) for s, n in zip(spacing, shape, strict=True)])\n", "\n", "# Define the dimensions\n", "x = SpaceDimension(name='x', spacing=Constant(name='h_x', value=extent[0]/(shape[0]-1)))\n", @@ -432,7 +434,7 @@ "m0 = Function(name='m0', grid=grid, space_order=space_order)\n", "b = Function(name='b', grid=grid, space_order=space_order)\n", "m0.data[:] = 1.5\n", - "b.data[:,:] = 1.0 / 1.0\n", + "b.data[:, :] = 1.0 / 1.0\n", "\n", "# Perturbation to velocity: a square offset from the center of the model\n", "dm = Function(name='dm', grid=grid, space_order=space_order)\n", @@ -460,7 +462,7 @@ "\n", "# Source 10 Hz center frequency\n", "src = RickerSource(name='src', grid=grid, f0=fpeak, npoint=1, time_range=time_range)\n", - "src.coordinates.data[0,:] = [dx * ((nx-1) / 2 - 10), dz * (nz-1) / 2]\n", + "src.coordinates.data[0, :] = [dx * ((nx-1) / 2 - 10), dz * (nz-1) / 2]\n", "\n", "# Receivers: for nonlinear forward and linearized forward\n", "# one copy each for save all and time blocking implementations\n", @@ -472,18 +474,22 @@ "nl_rec2 = Receiver(name='nl_rec2', grid=grid, npoint=nr, time_range=time_range)\n", "ln_rec1 = Receiver(name='ln_rec1', grid=grid, npoint=nr, time_range=time_range)\n", "ln_rec2 = Receiver(name='ln_rec2', grid=grid, npoint=nr, time_range=time_range)\n", - "nl_rec1.coordinates.data[:,0] = nl_rec2.coordinates.data[:,0] = \\\n", - " ln_rec1.coordinates.data[:,0] = ln_rec2.coordinates.data[:,0] = dx * ((nx-1) / 2 + 10)\n", - "nl_rec1.coordinates.data[:,1] = nl_rec2.coordinates.data[:,1] = \\\n", - " ln_rec1.coordinates.data[:,1] = ln_rec2.coordinates.data[:,1] = np.linspace(z1, z2, nr)\n", + "nl_rec1.coordinates.data[:, 0] = nl_rec2.coordinates.data[:, 0] = \\\n", + " ln_rec1.coordinates.data[:, 0] = ln_rec2.coordinates.data[:, 0] = dx * ((nx-1) / 2 + 10)\n", + "nl_rec1.coordinates.data[:, 1] = nl_rec2.coordinates.data[:, 1] = \\\n", + " ln_rec1.coordinates.data[:, 1] = ln_rec2.coordinates.data[:, 1] = np.linspace(z1, z2, nr)\n", "\n", "print(\"\")\n", - "print(\"src_coordinate X; %+12.4f\" % (src.coordinates.data[0,0]))\n", - "print(\"src_coordinate Z; %+12.4f\" % (src.coordinates.data[0,1]))\n", - "print(\"rec_coordinates X min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(nl_rec1.coordinates.data[:,0]), np.max(nl_rec1.coordinates.data[:,0])))\n", - "print(\"rec_coordinates Z min/max; %+12.4f %+12.4f\" % \\\n", - " (np.min(nl_rec1.coordinates.data[:,1]), np.max(nl_rec1.coordinates.data[:,1])))" + "print(f\"src_coordinate X; {src.coordinates.data[0, 0]:+12.4f}\")\n", + "print(f\"src_coordinate Z; {src.coordinates.data[0, 1]:+12.4f}\")\n", + "print(\n", + " f'rec_coordinates X min/max; {np.min(nl_rec1.coordinates.data[:, 0]):+12.4f} '\n", + " f'{np.max(nl_rec1.coordinates.data[:, 0]):+12.4f}'\n", + ")\n", + "print(\n", + " f'rec_coordinates Z min/max; {np.min(nl_rec1.coordinates.data[:, 1]):+12.4f} '\n", + " f'{np.max(nl_rec1.coordinates.data[:, 1]):+12.4f}'\n", + ")" ] }, { @@ -512,7 +518,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# note: flip sense of second dimension to make the plot positive downwards\n", "plt_extent = [origin[0], origin[0] + extent[0], origin[1] + extent[1], origin[1]]\n", "\n", @@ -520,16 +526,16 @@ "pmin, pmax = -1, +1\n", "dmin, dmax = 0.9, 1.1\n", "\n", - "plt.figure(figsize=(12,14))\n", + "plt.figure(figsize=(12, 14))\n", "\n", "# plot velocity\n", "plt.subplot(2, 2, 1)\n", "plt.imshow(np.transpose(m0.data), cmap=cm.jet,\n", " vmin=vmin, vmax=vmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", - "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1], \\\n", + "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -540,9 +546,9 @@ "plt.imshow(np.transpose(1 / b.data), cmap=cm.jet,\n", " vmin=dmin, vmax=dmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Density (m^3/kg)')\n", - "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1], \\\n", + "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -553,9 +559,9 @@ "plt.imshow(np.transpose(dm.data), cmap=\"seismic\",\n", " vmin=pmin, vmax=pmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='Velocity (m/msec)')\n", - "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1], \\\n", + "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -568,9 +574,9 @@ "plt.subplot(2, 2, 4)\n", "plt.imshow(np.transpose(q.data), cmap=cm.jet, vmin=lmin, vmax=lmax, extent=plt_extent)\n", "plt.colorbar(orientation='horizontal', label='log10(Q)')\n", - "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1], \\\n", + "plt.plot(nl_rec1.coordinates.data[:, 0], nl_rec1.coordinates.data[:, 1],\n", " 'black', linestyle='-', label=\"Receiver\")\n", - "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1], \\\n", + "plt.plot(src.coordinates.data[:, 0], src.coordinates.data[:, 1],\n", " 'red', linestyle='None', marker='*', markersize=15, label=\"Source\")\n", "plt.xlabel(\"X Coordinate (m)\")\n", "plt.ylabel(\"Z Coordinate (m)\")\n", @@ -659,7 +665,7 @@ "v2 = TimeFunction(name=\"v2\", grid=grid, time_order=2, space_order=space_order, save=Buffer(M))\n", "\n", "# get time and space dimensions\n", - "t,x,z = u1.dimensions\n", + "t, x, z = u1.dimensions\n", "\n", "# Source terms (see notebooks linked above for more detail)\n", "src1_term = src.inject(field=u1.forward, expr=src * t.spacing**2 * m0**2 / b)\n", @@ -669,13 +675,13 @@ "\n", "# The nonlinear forward time update equation\n", "update1 = (t.spacing**2 * m0**2 / b) * \\\n", - " ((b * u1.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \\\n", + " ((b * u1.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", " (b * u1.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2)) + \\\n", " (2 - t.spacing * wOverQ) * u1 + \\\n", " (t.spacing * wOverQ - 1) * u1.backward\n", "\n", "update2 = (t.spacing**2 * m0**2 / b) * \\\n", - " ((b * u2.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \\\n", + " ((b * u2.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", " (b * u2.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2)) + \\\n", " (2 - t.spacing * wOverQ) * u2 + \\\n", " (t.spacing * wOverQ - 1) * u2.backward\n", @@ -689,7 +695,7 @@ "\n", "# Update spacing_map (see notebooks linked above for more detail)\n", "spacing_map = grid.spacing_map\n", - "spacing_map.update({t.spacing : dt})\n", + "spacing_map.update({t.spacing: dt})\n", "\n", "# Build the Operators\n", "nl_op1 = Operator([stencil1, src1_term, nl_rec1_term, v1_term], subs=spacing_map)\n", @@ -730,9 +736,9 @@ "\n", "# Continuous integration hooks for the save all timesteps implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", - "print(\"%.3e\" % norm(u1))\n", - "print(\"%.3e\" % norm(nl_rec1))\n", - "print(\"%.3e\" % norm(v1))\n", + "print(f\"{norm(u1):.3e}\")\n", + "print(f\"{norm(nl_rec1):.3e}\")\n", + "print(f\"{norm(v1):.3e}\")\n", "assert np.isclose(norm(u1), 4.145e+01, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(nl_rec1), 2.669e-03, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(v1), 1.381e-02, atol=0, rtol=1e-3)" @@ -916,56 +922,52 @@ "\n", "if os.path.exists(filename):\n", " os.remove(filename)\n", - "f = open(filename, \"ab\")\n", - "\n", - "# Arrays to save offset and length of compressed data\n", - "file_offset = np.zeros(nt, dtype=np.int64)\n", - "file_length = np.zeros(nt, dtype=np.int64)\n", - "\n", - "# The length of the data type, 4 bytes for float32\n", - "itemsize = v2.data[0,:,:].dtype.itemsize\n", - "\n", - "# The length of a an uncompressed wavefield, used to compute compression ratio below\n", - "len0 = 4.0 * np.prod(v2._data[0,:,:].shape)\n", - "\n", - "# Loop over time blocks\n", - "v2_all[:] = 0\n", - "u2.data[:] = 0\n", - "v2.data[:] = 0\n", - "nl_rec2.data[:] = 0\n", - "for kN in range(0,N,1):\n", - " kt1 = max((kN + 0) * M, 1)\n", - " kt2 = min((kN + 1) * M - 1, nt-2)\n", - " nl_op2(time_m=kt1, time_M=kt2)\n", - "\n", - " # Copy computed Born term for correctness testing\n", - " for kt in range(kt1,kt2+1):\n", - "\n", - " # assign\n", - " v2_all[kt,:,:] = v2.data[(kt%M),:,:]\n", - "\n", - " # compression\n", - " c = blosc.compress_ptr(v2._data[(kt%M),:,:].__array_interface__['data'][0],\n", - " np.prod(v2._data[(kt%M),:,:].shape),\n", - " v2._data[(kt%M),:,:].dtype.itemsize, 9, True, 'zstd')\n", - "\n", - " # compression ratio\n", - " cratio = len0 / (1.0 * len(c))\n", - "\n", - " # serialization\n", - " file_offset[kt] = f.tell()\n", - " f.write(c)\n", - " file_length[kt] = len(c)\n", + "with open(filename, \"ab\") as f:\n", + " # Arrays to save offset and length of compressed data\n", + " file_offset = np.zeros(nt, dtype=np.int64)\n", + " file_length = np.zeros(nt, dtype=np.int64)\n", + "\n", + " # The length of the data type, 4 bytes for float32\n", + " itemsize = v2.data[0, :, :].dtype.itemsize\n", + "\n", + " # The length of a an uncompressed wavefield, used to compute compression ratio below\n", + " len0 = 4.0 * np.prod(v2._data[0, :, :].shape)\n", + "\n", + " # Loop over time blocks\n", + " v2_all[:] = 0\n", + " u2.data[:] = 0\n", + " v2.data[:] = 0\n", + " nl_rec2.data[:] = 0\n", + " for kN in range(0, N, 1):\n", + " kt1 = max((kN + 0) * M, 1)\n", + " kt2 = min((kN + 1) * M - 1, nt-2)\n", + " nl_op2(time_m=kt1, time_M=kt2)\n", + "\n", + " # Copy computed Born term for correctness testing\n", + " for kt in range(kt1, kt2+1):\n", + "\n", + " # assign\n", + " v2_all[kt, :, :] = v2.data[(kt % M), :, :]\n", + "\n", + " # compression\n", + " c = blosc.compress_ptr(v2._data[(kt % M), :, :].__array_interface__['data'][0],\n", + " np.prod(v2._data[(kt % M), :, :].shape),\n", + " v2._data[(kt % M), :, :].dtype.itemsize, 9, True, 'zstd')\n", + "\n", + " # compression ratio\n", + " cratio = len0 / (1.0 * len(c))\n", + "\n", + " # serialization\n", + " file_offset[kt] = f.tell()\n", + " f.write(c)\n", + " file_length[kt] = len(c)\n", "\n", " # Uncomment these lines to see per time step output\n", "# rms_v1 = np.linalg.norm(v1.data[kt,:,:].reshape(-1))\n", "# rms_v2 = np.linalg.norm(v2_all[kt,:,:].reshape(-1))\n", "# rms_12 = np.linalg.norm(v1.data[kt,:,:].reshape(-1) - v2_all[kt,:,:].reshape(-1))\n", "# print(\"kt1,kt2,len,cratio,|u1|,|u2|,|v1-v2|; %3d %3d %3d %10.4f %12.6e %12.6e %12.6e\" %\n", - "# (kt1, kt2, kt2 - kt1 + 1, cratio, rms_v1, rms_v2, rms_12), flush=True)\n", - "\n", - "# Close the binary file\n", - "f.close()" + "# (kt1, kt2, kt2 - kt1 + 1, cratio, rms_v1, rms_v2, rms_12), flush=True)" ] }, { @@ -988,8 +990,8 @@ "# Continuous integration hooks for the time blocking implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", "# Note these are exactly the same norm values as the save all timesteps check above\n", - "print(\"%.3e\" % norm(nl_rec1))\n", - "print(\"%.3e\" % np.linalg.norm(v2_all))\n", + "print(f\"{norm(nl_rec1):.3e}\")\n", + "print(f\"{np.linalg.norm(v2_all):.3e}\")\n", "assert np.isclose(norm(nl_rec1), 2.669e-03, atol=0, rtol=1e-3)\n", "assert np.isclose(np.linalg.norm(v2_all), 1.381e-02, atol=0, rtol=1e-3)" ] @@ -1065,14 +1067,14 @@ "\n", "# The Jacobian linearized forward time update equation\n", "update1 = (t.spacing**2 * m0**2 / b) * \\\n", - " ((b * duFwd1.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \\\n", - " (b * duFwd1.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) + \\\n", + " ((b * duFwd1.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", + " (b * duFwd1.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) +\n", " (dm * v1)) + (2 - t.spacing * wOverQ) * duFwd1 + \\\n", " (t.spacing * wOverQ - 1) * duFwd1.backward\n", "\n", "update2 = (t.spacing**2 * m0**2 / b) * \\\n", - " ((b * duFwd2.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) + \\\n", - " (b * duFwd2.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) + \\\n", + " ((b * duFwd2.dx(x0=x+x.spacing/2)).dx(x0=x-x.spacing/2) +\n", + " (b * duFwd2.dz(x0=z+z.spacing/2)).dz(x0=z-z.spacing/2) +\n", " (dm * v2)) + (2 - t.spacing * wOverQ) * duFwd2 + \\\n", " (t.spacing * wOverQ - 1) * duFwd2.backward\n", "\n", @@ -1110,8 +1112,8 @@ "\n", "# Continuous integration hooks for the save all timesteps implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", - "print(\"%.3e\" % norm(duFwd1))\n", - "print(\"%.3e\" % norm(ln_rec1))\n", + "print(f\"{norm(duFwd1):.3e}\")\n", + "print(f\"{norm(ln_rec1):.3e}\")\n", "assert np.isclose(norm(duFwd1), 6.438e+00, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(ln_rec1), 2.681e-02, atol=0, rtol=1e-3)" ] @@ -1285,32 +1287,31 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Open the binary file in read only mode\n", - "f = open(filename, \"rb\")\n", - "\n", - "# Temporary nd array for decompression\n", - "d = copy.copy(v2._data[0,:,:])\n", - "\n", - "# Array to hold compression ratio\n", - "cratio = np.zeros(nt, dtype=dtype)\n", - "\n", - "# Loop over time blocks\n", - "duFwd2.data[:] = 0\n", - "ln_rec2.data[:] = 0\n", - "for kN in range(0,N,1):\n", - " kt1 = max((kN + 0) * M, 1)\n", - " kt2 = min((kN + 1) * M - 1, nt-2)\n", - "\n", - " # 1. Seek to file_offset[kt]\n", - " # 2. Read file_length[kt1] bytes from file\n", - " # 3. Decompress wavefield and assign to v2 Buffer\n", - " for kt in range(kt1,kt2+1):\n", - " f.seek(file_offset[kt], 0)\n", - " c = f.read(file_length[kt])\n", - " blosc.decompress_ptr(c, v2._data[(kt%M),:,:].__array_interface__['data'][0])\n", - " cratio[kt] = len0 / (1.0 * len(c))\n", - "\n", - " # Run the operator for this time block\n", - " lf_op2(time_m=kt1, time_M=kt2)\n", + "with open(filename, \"rb\") as f:\n", + " # Temporary nd array for decompression\n", + " d = copy.copy(v2._data[0, :, :])\n", + "\n", + " # Array to hold compression ratio\n", + " cratio = np.zeros(nt, dtype=dtype)\n", + "\n", + " # Loop over time blocks\n", + " duFwd2.data[:] = 0\n", + " ln_rec2.data[:] = 0\n", + " for kN in range(0, N, 1):\n", + " kt1 = max((kN + 0) * M, 1)\n", + " kt2 = min((kN + 1) * M - 1, nt-2)\n", + "\n", + " # 1. Seek to file_offset[kt]\n", + " # 2. Read file_length[kt1] bytes from file\n", + " # 3. Decompress wavefield and assign to v2 Buffer\n", + " for kt in range(kt1, kt2+1):\n", + " f.seek(file_offset[kt], 0)\n", + " c = f.read(file_length[kt])\n", + " blosc.decompress_ptr(c, v2._data[(kt % M), :, :].__array_interface__['data'][0])\n", + " cratio[kt] = len0 / (1.0 * len(c))\n", + "\n", + " # Run the operator for this time block\n", + " lf_op2(time_m=kt1, time_M=kt2)\n", "\n", " # Uncomment these lines to see per time step outputs\n", "# for kt in range(kt1,kt2+1):\n", @@ -1341,8 +1342,8 @@ "# Continuous integration hooks for the save all timesteps implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", "# Note these are exactly the same norm values as the save all timesteps check above\n", - "print(\"%.3e\" % norm(duFwd2))\n", - "print(\"%.3e\" % norm(ln_rec2))\n", + "print(f\"{norm(duFwd2):.3e}\")\n", + "print(f\"{norm(ln_rec2):.3e}\")\n", "assert np.isclose(norm(duFwd2), 6.438e+00, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(ln_rec2), 2.681e-02, atol=0, rtol=1e-3)" ] @@ -1472,8 +1473,8 @@ "\n", "# Continuous integration hooks for the save all timesteps implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", - "print(\"%.3e\" % norm(duAdj1))\n", - "print(\"%.3e\" % norm(dm1))\n", + "print(f\"{norm(duAdj1):.3e}\")\n", + "print(f\"{norm(dm1):.3e}\")\n", "assert np.isclose(norm(duAdj1), 4.626e+01, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(dm1), 1.426e-04, atol=0, rtol=1e-3)" ] @@ -1644,32 +1645,31 @@ "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Open the binary file in read only mode\n", - "f = open(filename, \"rb\")\n", - "\n", - "# Temporary nd array for decompression\n", - "d = copy.copy(v2._data[0,:,:])\n", - "\n", - "# Array to hold compression ratio\n", - "cratio = np.zeros(nt, dtype=dtype)\n", - "\n", - "# Loop over time blocks\n", - "duAdj2.data[:] = 0\n", - "dm2.data[:] = 0\n", - "for kN in range(N-1,-1,-1):\n", - " kt1 = max((kN + 0) * M, 1)\n", - " kt2 = min((kN + 1) * M - 1, nt-2)\n", - "\n", - " # 1. Seek to file_offset[kt]\n", - " # 2. Read file_length[kt1] bytes from file\n", - " # 3. Decompress wavefield and assign to v2 Buffer\n", - " for kt in range(kt1,kt2+1,+1):\n", - " f.seek(file_offset[kt], 0)\n", - " c = f.read(file_length[kt])\n", - " blosc.decompress_ptr(c, v2._data[(kt%M),:,:].__array_interface__['data'][0])\n", - " cratio[kt] = len0 / (1.0 * len(c))\n", - "\n", - " # Run the operator for this time block\n", - " la_op2(time_m=kt1, time_M=kt2)\n", + "with open(filename, \"rb\") as f:\n", + " # Temporary nd array for decompression\n", + " d = copy.copy(v2._data[0, :, :])\n", + "\n", + " # Array to hold compression ratio\n", + " cratio = np.zeros(nt, dtype=dtype)\n", + "\n", + " # Loop over time blocks\n", + " duAdj2.data[:] = 0\n", + " dm2.data[:] = 0\n", + " for kN in range(N-1, -1, -1):\n", + " kt1 = max((kN + 0) * M, 1)\n", + " kt2 = min((kN + 1) * M - 1, nt-2)\n", + "\n", + " # 1. Seek to file_offset[kt]\n", + " # 2. Read file_length[kt1] bytes from file\n", + " # 3. Decompress wavefield and assign to v2 Buffer\n", + " for kt in range(kt1, kt2+1, +1):\n", + " f.seek(file_offset[kt], 0)\n", + " c = f.read(file_length[kt])\n", + " blosc.decompress_ptr(c, v2._data[(kt % M), :, :].__array_interface__['data'][0])\n", + " cratio[kt] = len0 / (1.0 * len(c))\n", + "\n", + " # Run the operator for this time block\n", + " la_op2(time_m=kt1, time_M=kt2)\n", "\n", " # Uncomment these lines to see per time step outputs\n", "# for kt in range(kt2,kt1-1,-1):\n", @@ -1700,8 +1700,8 @@ "# Continuous integration hooks for the save all timesteps implementation\n", "# We ensure the norm of these computed wavefields is repeatable\n", "# Note these are exactly the same norm values as the save all timesteps check above\n", - "print(\"%.3e\" % norm(duAdj2))\n", - "print(\"%.3e\" % norm(dm2))\n", + "print(f\"{norm(duAdj2):.3e}\")\n", + "print(f\"{norm(dm2):.3e}\")\n", "assert np.isclose(norm(duAdj2), 4.626e+01, atol=0, rtol=1e-3)\n", "assert np.isclose(norm(dm2), 1.426e-04, atol=0, rtol=1e-3)" ] @@ -1737,8 +1737,7 @@ "norm_dm1 = np.linalg.norm(dm1.data.reshape(-1))\n", "norm_dm12 = np.linalg.norm(dm1.data.reshape(-1) - dm2.data.reshape(-1))\n", "\n", - "print(\"Relative norm of difference wavefield,gradient; %+.4e %+.4e\" %\n", - " (norm_du12 / norm_du1, norm_dm12 /norm_dm1))\n", + "print(f\"Relative norm of difference wavefield,gradient; {norm_du12 / norm_du1:+.4e} {norm_dm12 /norm_dm1:+.4e}\")\n", "\n", "assert norm_du12 / norm_du1 < 1e-7\n", "assert norm_dm12 / norm_dm1 < 1e-7" diff --git a/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb b/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb index 9e3f0ee186..d2964f150f 100644 --- a/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb +++ b/examples/seismic/tutorials/13_LSRTM_acoustic.ipynb @@ -126,7 +126,7 @@ "%matplotlib inline\n", "import numpy as np\n", "\n", - "from devito import Operator,Eq,norm\n", + "from devito import Operator, Eq, norm\n", "from devito import Function\n", "from devito import gaussian_smooth\n", "from devito import mmax\n", @@ -135,7 +135,7 @@ "from examples.seismic import Model\n", "from examples.seismic import plot_velocity\n", "from examples.seismic import Receiver\n", - "from examples.seismic import plot_image,AcquisitionGeometry\n", + "from examples.seismic import AcquisitionGeometry\n", "from examples.seismic import TimeAxis\n", "\n", "from examples.seismic.self_adjoint import (setup_w_over_q)\n", @@ -172,40 +172,40 @@ "spacing = (10., 10.) # Grid spacing in m. The domain size is now 1km by 1km\n", "origin = (0., 0.) # What is the location of the top left corner. This is necessary to define\n", "\n", - "fpeak = 0.025# Source peak frequency is 25Hz (0.025 kHz)\n", + "fpeak = 0.025 # Source peak frequency is 25Hz (0.025 kHz)\n", "t0w = 1.0 / fpeak\n", "omega = 2.0 * np.pi * fpeak\n", "qmin = 0.1\n", "qmax = 100000\n", - "npad=50\n", + "npad = 50\n", "dtype = np.float32\n", "\n", "nshots = 21\n", "nreceivers = 101\n", "t0 = 0.\n", "tn = 1000. # Simulation last 1 second (1000 ms)\n", - "filter_sigma = (5, 5) # Filter's length\n", + "filter_sigma = (5, 5) # Filter's length\n", "\n", "v = np.empty(shape, dtype=dtype)\n", "\n", "# Define a velocity profile. The velocity is in km/s\n", "vp_top = 1.5\n", "\n", - "v[:] = vp_top # Top velocity\n", - "v[:, 30:65]= vp_top +0.5\n", - "v[:, 65:101]= vp_top +1.5\n", - "v[40:60, 35:55]= vp_top+1\n", + "v[:] = vp_top # Top velocity\n", + "v[:, 30:65] = vp_top +0.5\n", + "v[:, 65:101] = vp_top +1.5\n", + "v[40:60, 35:55] = vp_top+1\n", "\n", "init_damp = lambda func, nbl: setup_w_over_q(func, omega, qmin, qmax, npad, sigma=0)\n", "model = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n", - " space_order=8, bcs=init_damp,nbl=npad,dtype=dtype)\n", + " space_order=8, bcs=init_damp, nbl=npad, dtype=dtype)\n", "model0 = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n", - " space_order=8, bcs=init_damp,nbl=npad,dtype=dtype)\n", + " space_order=8, bcs=init_damp, nbl=npad, dtype=dtype)\n", "\n", "dt = model.critical_dt\n", "s = model.grid.stepping_dim.spacing\n", "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", - "nt=time_range.num" + "nt = time_range.num" ] }, { @@ -235,7 +235,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Create initial model and smooth the boundaries\n", "gaussian_smooth(model0.vp, sigma=filter_sigma)\n", "# Plot the true and initial model\n", @@ -282,7 +282,7 @@ "source": [ "source_locations = np.empty((nshots, 2), dtype=dtype)\n", "source_locations[:, 0] = np.linspace(0., 1000, num=nshots)\n", - "source_locations[:, 1] = 30. # Depth is 30m" + "source_locations[:, 1] = 30. # Depth is 30m" ] }, { @@ -303,44 +303,44 @@ " residual = Receiver(name='residual', grid=model.grid, time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", "\n", - " d_obs = Receiver(name='d_obs', grid=model.grid,time_range=geometry.time_axis,\n", + " d_obs = Receiver(name='d_obs', grid=model.grid, time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", "\n", - " d_syn = Receiver(name='d_syn', grid=model.grid,time_range=geometry.time_axis,\n", + " d_syn = Receiver(name='d_syn', grid=model.grid, time_range=geometry.time_axis,\n", " coordinates=geometry.rec_positions)\n", "\n", " grad_full = Function(name='grad_full', grid=model.grid)\n", "\n", " grad_illum = Function(name='grad_illum', grid=model.grid)\n", "\n", - " src_illum = Function (name =\"src_illum\", grid = model.grid)\n", + " src_illum = Function(name=\"src_illum\", grid=model.grid)\n", "\n", " # Using devito's reference of virtual source\n", - " dm_true = (solver.model.vp.data**(-2) - model0.vp.data**(-2))\n", + " dm_true = (solver.model.vp.data**(-2) - model0.vp.data**(-2))\n", "\n", " objective = 0.\n", " u0 = None\n", " for i in range(nshots):\n", "\n", - " #Observed Data using Born's operator\n", + " # Observed Data using Born's operator\n", " geometry.src_positions[0, :] = source_locations[i, :]\n", "\n", " _, u0, _ = solver.forward(vp=model0.vp, save=True, u=u0)\n", "\n", - " _, _, _,_ = solver.jacobian(dm_true, vp=model0.vp, rec = d_obs)\n", + " _, _, _, _ = solver.jacobian(dm_true, vp=model0.vp, rec=d_obs)\n", "\n", - " #Calculated Data using Born's operator\n", - " solver.jacobian(dm, vp=model0.vp, rec = d_syn)\n", + " # Calculated Data using Born's operator\n", + " solver.jacobian(dm, vp=model0.vp, rec=d_syn)\n", "\n", " residual.data[:] = d_syn.data[:]- d_obs.data[:]\n", "\n", - " grad_shot,_ = solver.gradient(rec=residual, u=u0, vp=model0.vp)\n", + " grad_shot, _ = solver.gradient(rec=residual, u=u0, vp=model0.vp)\n", "\n", - " src_illum_upd = Eq(src_illum, src_illum + u0**2)\n", + " src_illum_upd = Eq(src_illum, src_illum + u0**2)\n", " op_src = Operator([src_illum_upd])\n", " op_src.apply()\n", "\n", - " grad_sum = Eq(grad_full, grad_full + grad_shot)\n", + " grad_sum = Eq(grad_full, grad_full + grad_shot)\n", " op_grad = Operator([grad_sum])\n", " op_grad.apply()\n", "\n", @@ -350,7 +350,7 @@ " op_gradf = Operator([grad_f])\n", " op_gradf.apply()\n", "\n", - " return objective,grad_illum,d_obs,d_syn" + " return objective, grad_illum, d_obs, d_syn" ] }, { @@ -383,8 +383,7 @@ "metadata": {}, "outputs": [], "source": [ - "def get_alfa(grad_iter,image_iter,niter_lsrtm):\n", - "\n", + "def get_alfa(grad_iter, image_iter, niter_lsrtm):\n", "\n", " term1 = np.dot(image_iter.reshape(-1), image_iter.reshape(-1))\n", "\n", @@ -403,10 +402,7 @@ "\n", " abb3 = abb2 / abb1\n", "\n", - " if abb3 > 0 and abb3 < 1:\n", - " alfa = abb2\n", - " else:\n", - " alfa = abb1\n", + " alfa = abb2 if abb3 > 0 and abb3 < 1 else abb1\n", "\n", " return alfa" ] @@ -565,30 +561,30 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "image_up_dev = np.zeros((model0.vp.shape[0], model0.vp.shape[1]),dtype)\n", + "# NBVAL_IGNORE_OUTPUT\n", + "image_up_dev = np.zeros((model0.vp.shape[0], model0.vp.shape[1]), dtype)\n", "\n", "image = np.zeros((model0.vp.shape[0], model0.vp.shape[1]))\n", "\n", - "nrec=101\n", - "niter=20 # number of iterations of the LSRTM\n", - "history = np.zeros((niter, 1)) #objective function\n", + "nrec = 101\n", + "niter = 20 # number of iterations of the LSRTM\n", + "history = np.zeros((niter, 1)) # objective function\n", "\n", - "image_prev = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", + "image_prev = np.zeros((model0.vp.shape[0], model0.vp.shape[1]))\n", "\n", - "grad_prev = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", + "grad_prev = np.zeros((model0.vp.shape[0], model0.vp.shape[1]))\n", "\n", - "yk = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", + "yk = np.zeros((model0.vp.shape[0], model0.vp.shape[1]))\n", "\n", - "sk = np.zeros((model0.vp.shape[0],model0.vp.shape[1]))\n", + "sk = np.zeros((model0.vp.shape[0], model0.vp.shape[1]))\n", "\n", - "for k in range(niter) :\n", + "for k in range(niter):\n", "\n", - " dm = image_up_dev # Reflectivity for Calculated data via Born\n", + " dm = image_up_dev # Reflectivity for Calculated data via Born\n", "\n", - " print('LSRTM Iteration',k+1)\n", + " print('LSRTM Iteration', k+1)\n", "\n", - " objective,grad_full,d_obs,d_syn = lsrtm_gradient(dm)\n", + " objective, grad_full, d_obs, d_syn = lsrtm_gradient(dm)\n", "\n", " history[k] = objective\n", "\n", @@ -596,7 +592,7 @@ "\n", " sk = image_up_dev - image_prev\n", "\n", - " alfa = get_alfa(yk,sk,k)\n", + " alfa = get_alfa(yk, sk, k)\n", "\n", " grad_prev = grad_full.data\n", "\n", @@ -604,7 +600,7 @@ "\n", " image_up_dev = image_up_dev - alfa*grad_full.data\n", "\n", - " if k == 0: # Saving the first migration using Born operator.\n", + " if k == 0: # Saving the first migration using Born operator.\n", "\n", " image = image_up_dev" ] @@ -626,7 +622,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "plt.figure()\n", "plt.plot(history)\n", "plt.xlabel('Iteration number')\n", @@ -659,7 +655,7 @@ " plot = plt.imshow(np.transpose(data),\n", " vmin=-.05,\n", " vmax=.05,\n", - " cmap=cmap,extent=extent)\n", + " cmap=cmap, extent=extent)\n", "\n", " plt.xlabel('X position (km)')\n", " plt.ylabel('Depth (km)')\n", @@ -697,8 +693,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "slices=tuple(slice(model.nbl,-model.nbl) for _ in range(2))\n", + "# NBVAL_IGNORE_OUTPUT\n", + "slices = tuple(slice(model.nbl, -model.nbl) for _ in range(2))\n", "rtm = image[slices]\n", "plot_image(np.diff(rtm, axis=1))" ] @@ -727,8 +723,8 @@ } ], "source": [ - "#NBVAL_SKIP\n", - "slices=tuple(slice(model.nbl,-model.nbl) for _ in range(2))\n", + "# NBVAL_SKIP\n", + "slices = tuple(slice(model.nbl, -model.nbl) for _ in range(2))\n", "lsrtm = image_up_dev[slices]\n", "plot_image(np.diff(lsrtm, axis=1))" ] @@ -757,8 +753,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "slices=tuple(slice(model.nbl,-model.nbl) for _ in range(2))\n", + "# NBVAL_IGNORE_OUTPUT\n", + "slices = tuple(slice(model.nbl, -model.nbl) for _ in range(2))\n", "dm_true = (solver.model.vp.data**(-2) - model0.vp.data**(-2))[slices]\n", "plot_image(np.diff(dm_true, axis=1))" ] @@ -780,14 +776,14 @@ } ], "source": [ - "#NBVAL_SKIP\n", - "plt.figure(figsize=(8,9))\n", - "x = np.linspace(0,1,101)\n", - "plt.plot(rtm[50,:],x,color=plt.gray(),linewidth=2)\n", - "plt.plot(lsrtm[50,:],x,'r',linewidth=2)\n", - "plt.plot(dm_true[50,:],x, 'k--',linewidth=2)\n", - "\n", - "plt.legend(['Initial reflectivity', 'Reflectivity via LSRTM','True Reflectivity'],fontsize=15)\n", + "# NBVAL_SKIP\n", + "plt.figure(figsize=(8, 9))\n", + "x = np.linspace(0, 1, 101)\n", + "plt.plot(rtm[50, :], x, color=plt.gray(), linewidth=2)\n", + "plt.plot(lsrtm[50, :], x, 'r', linewidth=2)\n", + "plt.plot(dm_true[50, :], x, 'k--', linewidth=2)\n", + "\n", + "plt.legend(['Initial reflectivity', 'Reflectivity via LSRTM', 'True Reflectivity'], fontsize=15)\n", "plt.ylabel('Depth (Km)')\n", "plt.xlabel('Amplitude')\n", "plt.gca().invert_yaxis()\n", @@ -811,14 +807,14 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "time = np.linspace(t0, tn, nt)\n", - "plt.figure(figsize=(8,9))\n", + "plt.figure(figsize=(8, 9))\n", "plt.ylabel('Time (ms)')\n", "plt.xlabel('Amplitude')\n", - "plt.plot(d_syn.data[:, 20],time, 'y', label='Calculated data (Last Iteration)')\n", - "plt.plot(d_obs.data[:, 20],time, 'm', label='Observed data')\n", - "plt.legend(loc=\"upper left\",fontsize=12)\n", + "plt.plot(d_syn.data[:, 20], time, 'y', label='Calculated data (Last Iteration)')\n", + "plt.plot(d_obs.data[:, 20], time, 'm', label='Observed data')\n", + "plt.legend(loc=\"upper left\", fontsize=12)\n", "ax = plt.gca()\n", "ax.invert_yaxis()\n", "plt.show()" diff --git a/examples/seismic/tutorials/14_creating_synthetics.ipynb b/examples/seismic/tutorials/14_creating_synthetics.ipynb index 86dc692d1d..287a0caefa 100644 --- a/examples/seismic/tutorials/14_creating_synthetics.ipynb +++ b/examples/seismic/tutorials/14_creating_synthetics.ipynb @@ -69,7 +69,7 @@ "\n", "try:\n", " # Import jinja2 (used for colour coding geology)\n", - " import jinja2\n", + " import jinja2 # noqa: F401\n", "except ModuleNotFoundError:\n", " # Install jinja2\n", " ! pip install jinja2\n", @@ -77,7 +77,7 @@ "\n", "try:\n", " # Check vtk notebook backend is installed\n", - " import ipyvtklink\n", + " import ipyvtklink # noqa: F401\n", "except ModuleNotFoundError:\n", " ! pip install ipyvtklink" ] @@ -333,9 +333,10 @@ " \"\"\"Add a list of points to a surface in a model\"\"\"\n", " xyz = ('X', 'Y', 'Z')\n", " for point in points:\n", - " kwargs = {**dict(zip(xyz, point)), 'surface': surface}\n", + " kwargs = {**dict(zip(xyz, point, strict=True)), 'surface': surface}\n", " model.add_surface_points(**kwargs)\n", "\n", + "\n", "# The points defining the base of the sand layer\n", "sand_points = [(322, 135, -783), (635, 702, -791), (221, 668, -772), (732, 235, -801), (442, 454, -702)]\n", "\n", @@ -1139,7 +1140,15 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "seis_model = Model(vp=reshaped, origin=(0., 0., -1000.), spacing=(10., 10., 10.), shape=shape, nbl=30, space_order=4, bcs=\"damp\")" + "seis_model = Model(\n", + " vp=reshaped,\n", + " origin=(0., 0., -1000.),\n", + " spacing=(10., 10., 10.),\n", + " shape=shape,\n", + " nbl=30,\n", + " space_order=4,\n", + " bcs=\"damp\"\n", + ")" ] }, { diff --git a/examples/seismic/tutorials/15_tti_qp_pure.ipynb b/examples/seismic/tutorials/15_tti_qp_pure.ipynb index c936aae461..2601fc3be9 100644 --- a/examples/seismic/tutorials/15_tti_qp_pure.ipynb +++ b/examples/seismic/tutorials/15_tti_qp_pure.ipynb @@ -69,19 +69,19 @@ "source": [ "# NBVAL_IGNORE_OUTPUT\n", "\n", - "shape = (101,101) # 101x101 grid\n", - "spacing = (10.,10.) # spacing of 10 meters\n", - "origin = (0.,0.)\n", + "shape = (101, 101) # 101x101 grid\n", + "spacing = (10., 10.) # spacing of 10 meters\n", + "origin = (0., 0.)\n", "nbl = 0 # number of pad points\n", "\n", "model = demo_model('layers-tti', spacing=spacing, space_order=8,\n", " shape=shape, nbl=nbl, nlayers=1)\n", "\n", "# initialize Thomsem parameters to those used in Mu et al., (2020)\n", - "model.update('vp', np.ones(shape)*3.6) # km/s\n", + "model.update('vp', np.ones(shape)*3.6) # km/s\n", "model.update('epsilon', np.ones(shape)*0.23)\n", "model.update('delta', np.ones(shape)*0.17)\n", - "model.update('theta', np.ones(shape)*(45.*(np.pi/180.))) # radians" + "model.update('theta', np.ones(shape)*(45.*(np.pi/180.))) # radians" ] }, { @@ -106,8 +106,8 @@ "m = model.m\n", "\n", "# Use trigonometric functions from Devito\n", - "costheta = cos(theta)\n", - "sintheta = sin(theta)\n", + "costheta = cos(theta)\n", + "sintheta = sin(theta)\n", "cos2theta = cos(2*theta)\n", "sin2theta = sin(2*theta)\n", "sin4theta = sin(4*theta)" @@ -168,10 +168,10 @@ ], "source": [ "# Compute the dt and set time range\n", - "t0 = 0. # Simulation time start\n", - "tn = 150. # Simulation time end (0.15 second = 150 msec)\n", - "dt = (dvalue/(np.pi*vmax))*np.sqrt(1/(1+etamax*(max_cos_sin)**2)) # eq. above (cell 3)\n", - "time_range = TimeAxis(start=t0,stop=tn,step=dt)\n", + "t0 = 0. # Simulation time start\n", + "tn = 150. # Simulation time end (0.15 second = 150 msec)\n", + "dt = (dvalue/(np.pi*vmax))*np.sqrt(1/(1+etamax*(max_cos_sin)**2)) # eq. above (cell 3)\n", + "time_range = TimeAxis(start=t0, stop=tn, step=dt)\n", "print(\"time_range; \", time_range)" ] }, @@ -200,7 +200,7 @@ "term1_p = (1 + 2*delta*(sintheta**2)*(costheta**2) + 2*epsilon*costheta**4)*q.dx4\n", "term2_p = (1 + 2*delta*(sintheta**2)*(costheta**2) + 2*epsilon*sintheta**4)*q.dy4\n", "term3_p = (2-delta*(sin2theta)**2 + 3*epsilon*(sin2theta)**2 + 2*delta*(cos2theta)**2)*((q.dy2).dx2)\n", - "term4_p = ( delta*sin4theta - 4*epsilon*sin2theta*costheta**2)*((q.dy).dx3)\n", + "term4_p = (delta*sin4theta - 4*epsilon*sin2theta*costheta**2)*((q.dy).dx3)\n", "term5_p = (-delta*sin4theta - 4*epsilon*sin2theta*sintheta**2)*((q.dy3).dx)\n", "\n", "stencil_p = solve(m*p.dt2 - (term1_p + term2_p + term3_p + term4_p + term5_p), p.forward)\n", @@ -214,30 +214,36 @@ "x, z = model.grid.dimensions\n", "t = model.grid.stepping_dim\n", "\n", - "update_q = Eq( pp[t+1,x,z],((pp[t,x+1,z] + pp[t,x-1,z])*z.spacing**2 + (pp[t,x,z+1] + pp[t,x,z-1])*x.spacing**2 -\n", - " b[x,z]*x.spacing**2*z.spacing**2) / (2*(x.spacing**2 + z.spacing**2)))\n", + "update_q = Eq(\n", + " pp[t+1, x, z],\n", + " (\n", + " (pp[t, x+1, z] + pp[t, x-1, z])*z.spacing**2\n", + " + (pp[t, x, z+1] + pp[t, x, z-1])*x.spacing**2\n", + " - b[x, z]*x.spacing**2*z.spacing**2\n", + " ) / (2*(x.spacing**2 + z.spacing**2))\n", + ")\n", "\n", - "bc = [Eq(pp[t+1,x, 0], 0.)]\n", - "bc += [Eq(pp[t+1,x, shape[1]+2*nbl-1], 0.)]\n", - "bc += [Eq(pp[t+1,0, z], 0.)]\n", - "bc += [Eq(pp[t+1,shape[0]-1+2*nbl, z], 0.)]\n", + "bc = [Eq(pp[t+1, x, 0], 0.)]\n", + "bc += [Eq(pp[t+1, x, shape[1]+2*nbl-1], 0.)]\n", + "bc += [Eq(pp[t+1, 0, z], 0.)]\n", + "bc += [Eq(pp[t+1, shape[0]-1+2*nbl, z], 0.)]\n", "\n", "# set source and receivers\n", - "src = RickerSource(name='src',grid=model.grid,f0=0.02,npoint=1,time_range=time_range)\n", - "src.coordinates.data[:,0] = model.domain_size[0]* .5\n", - "src.coordinates.data[:,1] = model.domain_size[0]* .5\n", + "src = RickerSource(name='src', grid=model.grid, f0=0.02, npoint=1, time_range=time_range)\n", + "src.coordinates.data[:, 0] = model.domain_size[0]* .5\n", + "src.coordinates.data[:, 1] = model.domain_size[0]* .5\n", "# Define the source injection\n", - "src_term = src.inject(field=p.forward,expr=src * dt**2 / m)\n", + "src_term = src.inject(field=p.forward, expr=src * dt**2 / m)\n", "\n", - "rec = Receiver(name='rec',grid=model.grid,npoint=shape[0],time_range=time_range)\n", - "rec.coordinates.data[:, 0] = np.linspace(model.origin[0],model.domain_size[0], num=model.shape[0])\n", + "rec = Receiver(name='rec', grid=model.grid, npoint=shape[0], time_range=time_range)\n", + "rec.coordinates.data[:, 0] = np.linspace(model.origin[0], model.domain_size[0], num=model.shape[0])\n", "rec.coordinates.data[:, 1] = 2*spacing[1]\n", "# Create interpolation expression for receivers\n", "rec_term = rec.interpolate(expr=p.forward)\n", "\n", "# Operators\n", - "optime=Operator([update_p] + src_term + rec_term)\n", - "oppres=Operator([update_q] + bc)\n", + "optime = Operator([update_p] + src_term + rec_term)\n", + "oppres = Operator([update_q] + bc)\n", "\n", "# you can print the generated code for both operators by typing print(optime) and print(oppres)" ] @@ -2625,17 +2631,17 @@ ], "source": [ "# NBVAL_IGNORE_OUTPUT\n", - "psave =np.empty ((time_range.num,model.grid.shape[0],model.grid.shape[1]))\n", + "psave = np.empty((time_range.num, model.grid.shape[0], model.grid.shape[1]))\n", "niter_poisson = 1200\n", "\n", "# This is the time loop.\n", - "for step in range(0,time_range.num-2):\n", - " q.data[:,:]=pp.data[(niter_poisson+1)%2,:,:]\n", + "for step in range(0, time_range.num-2):\n", + " q.data[:, :] = pp.data[(niter_poisson+1) % 2, :, :]\n", " optime(time_m=step, time_M=step, dt=dt)\n", - " pp.data[:,:]=0.\n", - " b.data[:,:]=p.data[(step+1)%3,:,:]\n", - " oppres(time_M = niter_poisson)\n", - " psave[step,:,:]=p.data[(step+1)%3,:,:]" + " pp.data[:, :] = 0.\n", + " b.data[:, :] = p.data[(step+1) % 3, :, :]\n", + " oppres(time_M=niter_poisson)\n", + " psave[step, :, :] = p.data[(step+1) % 3, :, :]" ] }, { @@ -2646,10 +2652,10 @@ "outputs": [], "source": [ "# Some useful definitions for plotting if nbl is set to any other value than zero\n", - "nxpad,nzpad = shape[0] + 2 * nbl, shape[1] + 2 * nbl\n", - "shape_pad = np.array(shape) + 2 * nbl\n", - "origin_pad = tuple([o - s*nbl for o, s in zip(origin, spacing)])\n", - "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad)])" + "nxpad, nzpad = shape[0] + 2 * nbl, shape[1] + 2 * nbl\n", + "shape_pad = np.array(shape) + 2 * nbl\n", + "origin_pad = tuple([o - s*nbl for o, s in zip(origin, spacing, strict=True)])\n", + "extent_pad = tuple([s*(n-1) for s, n in zip(spacing, shape_pad, strict=True)])" ] }, { @@ -2686,7 +2692,7 @@ "\n", "# Plot the wavefields, each normalized to scaled maximum of last time step\n", "kt = (time_range.num - 2) - 1\n", - "amax = 0.05 * np.max(np.abs(psave[kt,:,:]))\n", + "amax = 0.05 * np.max(np.abs(psave[kt, :, :]))\n", "\n", "nsnaps = 10\n", "factor = round(time_range.num/nsnaps)\n", @@ -2695,17 +2701,17 @@ "fig.suptitle(\"Snapshots\", size=14)\n", "for count, ax in enumerate(axes.ravel()):\n", " snapshot = factor*count\n", - " ax.imshow(np.transpose(psave[snapshot,:,:]), cmap=\"seismic\",\n", + " ax.imshow(np.transpose(psave[snapshot, :, :]), cmap=\"seismic\",\n", " vmin=-amax, vmax=+amax, extent=plt_extent)\n", - " ax.plot(model.domain_size[0]* .5, model.domain_size[1]* .5, \\\n", + " ax.plot(model.domain_size[0]* .5, model.domain_size[1]* .5,\n", " 'red', linestyle='None', marker='*', markersize=8, label=\"Source\")\n", " ax.grid()\n", - " ax.tick_params('both', length=2, width=0.5, which='major',labelsize=10)\n", - " ax.set_title(\"Wavefield at t=%.2fms\" % (factor*count*dt),fontsize=10)\n", + " ax.tick_params('both', length=2, width=0.5, which='major', labelsize=10)\n", + " ax.set_title(\"Wavefield at t=%.2fms\" % (factor*count*dt), fontsize=10)\n", "for ax in axes[1, :]:\n", - " ax.set_xlabel(\"X Coordinate (m)\",fontsize=10)\n", + " ax.set_xlabel(\"X Coordinate (m)\", fontsize=10)\n", "for ax in axes[:, 0]:\n", - " ax.set_ylabel(\"Z Coordinate (m)\",fontsize=10)" + " ax.set_ylabel(\"Z Coordinate (m)\", fontsize=10)" ] }, { diff --git a/examples/seismic/tutorials/16_ader_fd.ipynb b/examples/seismic/tutorials/16_ader_fd.ipynb index c98c83d15f..697ee9f8e3 100644 --- a/examples/seismic/tutorials/16_ader_fd.ipynb +++ b/examples/seismic/tutorials/16_ader_fd.ipynb @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Necessary imports\n", "import devito as dv\n", "import sympy as sp\n", @@ -132,20 +132,26 @@ " return sp.Matrix([[f[0].dx2 + f[1].dxdy],\n", " [f[0].dxdy + f[1].dy2]])\n", "\n", + "\n", "def lapdiv(f):\n", " return f[0].dx3 + f[0].dxdy2 + f[1].dx2dy + f[1].dy3\n", "\n", + "\n", "def gradlap(f):\n", " return sp.Matrix([[f.dx3 + f.dxdy2],\n", " [f.dx2dy + f.dy3]])\n", "\n", + "\n", "def gradlapdiv(f):\n", " return sp.Matrix([[f[0].dx4 + f[0].dx2dy2 + f[1].dx3dy + f[1].dxdy3],\n", " [f[0].dx3dy + f[0].dxdy3 + f[1].dx2dy2 + f[1].dy4]])\\\n", "\n", + "\n", + "\n", "def biharmonic(f):\n", " return f.dx4 + 2*f.dx2dy2 + f.dy4\n", "\n", + "\n", "# First time derivatives\n", "pdt = rho*c2*dv.div(v)\n", "vdt = b*dv.grad(p)\n", @@ -230,7 +236,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "t0 = 0. # Simulation starts a t=0\n", "tn = 450. # Simulation last 0.45 seconds (450 ms)\n", "\n", @@ -283,7 +289,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "src_term = src.inject(field=p.forward, expr=src)\n", "\n", "op = dv.Operator([eq_p, eq_v] + src_term)\n", @@ -308,7 +314,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "extent = [0, 1000, 1000, 0]\n", "vmax = np.abs(np.amax(p.data[-1]))\n", "plt.imshow(c.data.T, cmap='Greys', extent=extent)\n", @@ -355,7 +361,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "ps = dv.TimeFunction(name='ps', grid=grid, space_order=16, staggered=dv.NODE)\n", "vs = dv.VectorTimeFunction(name='vs', grid=grid, space_order=16)\n", "\n", @@ -390,7 +396,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "vmax = np.abs(np.amax(ps.data[-1]))\n", "plt.imshow(c.data.T, cmap='Greys', extent=extent)\n", "plt.imshow(ps.data[-1].T, cmap='seismic', alpha=0.75, extent=extent, vmin=-vmax, vmax=vmax)\n", @@ -472,7 +478,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Reset the fields\n", "p.data[:] = 0\n", "ps.data[:] = 0\n", @@ -506,7 +512,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "vmax = np.amax(np.abs(p.data[-1]))\n", "\n", "fig, ax = plt.subplots(1, 3, figsize=(15, 10), tight_layout=True, sharey=True)\n", diff --git a/examples/seismic/tutorials/17_fourier_mode.ipynb b/examples/seismic/tutorials/17_fourier_mode.ipynb index 8db48cd4a9..6a239d7532 100644 --- a/examples/seismic/tutorials/17_fourier_mode.ipynb +++ b/examples/seismic/tutorials/17_fourier_mode.ipynb @@ -82,7 +82,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "model = demo_model('layers-isotropic', vp=3.0, origin=(0., 0.), shape=(101, 101), spacing=(10., 10.), nbl=40, nlayers=4)" ] }, @@ -103,7 +103,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plot_velocity(model)" ] }, @@ -113,7 +113,7 @@ "metadata": {}, "outputs": [], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Define acquisition geometry: source\n", "\n", "# First, position source centrally in all dimensions, then set depth\n", @@ -712,7 +712,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "Code(str(op.ccode), language='C')" ] }, @@ -745,7 +745,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op(dt=model.critical_dt)" ] }, @@ -766,7 +766,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(12, 6))\n", "plt.subplot(1, 2, 1)\n", "plt.imshow(np.real(freq_mode.data.T), cmap='seismic', vmin=-1e2, vmax=1e2)\n", @@ -1459,7 +1459,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "Code(str(op.ccode), language='C')" ] }, @@ -1496,7 +1496,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u.data.fill(0)\n", "op(dt=model.critical_dt)" ] @@ -1518,7 +1518,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(12, 30))\n", "for i in range(5):\n", " plt.subplot(5, 2, 2*i+1)\n", diff --git a/examples/seismic/utils.py b/examples/seismic/utils.py index cacce8a3ee..468e44ddd6 100644 --- a/examples/seismic/utils.py +++ b/examples/seismic/utils.py @@ -84,7 +84,7 @@ def __init__(self, model, rec_positions, src_positions, t0, tn, **kwargs): self._t0w = kwargs.get('t0w') if self._src_type is not None and self._f0 is None: error("Peak frequency must be provided in KHz" + - " for source of type %s" % self._src_type) + f" for source of type {self._src_type}") self._grid = model.grid self._model = model @@ -228,15 +228,21 @@ def __call__(self, parser, args, values, option_string=None): # E.g., `('advanced', {'par-tile': True})` values = eval(values) if not isinstance(values, tuple) and len(values) >= 1: - raise ArgumentError(self, ("Invalid choice `%s` (`opt` must be " - "either str or tuple)" % str(values))) + raise ArgumentError( + self, + f'Invalid choice `{str(values)}` ' + '(`opt` must be either str or tuple)' + ) opt = values[0] except NameError: # E.g. `'advanced'` opt = values if opt not in configuration._accepted['opt']: - raise ArgumentError(self, ("Invalid choice `%s` (choose from %s)" - % (opt, str(configuration._accepted['opt'])))) + raise ArgumentError( + self, + f'Invalid choice `{opt}`' + f'(choose from {configuration._accepted["opt"]!s})' + ) setattr(args, self.dest, values) parser = ArgumentParser(description=description) diff --git a/examples/seismic/viscoacoustic/operators.py b/examples/seismic/viscoacoustic/operators.py index d4a0d0adb4..e74a62a4a5 100755 --- a/examples/seismic/viscoacoustic/operators.py +++ b/examples/seismic/viscoacoustic/operators.py @@ -612,7 +612,7 @@ def GradientOperator(model, geometry, space_order=4, kernel='sls', time_order=2, eq_kernel = kernels[kernel] eqn = eq_kernel(model, geometry, pa, forward=False, save=False, **kwargs) - if time_order == 1: + if time_order == 1: # noqa: SIM108 gradient_update = Eq(grad, grad - p.dt * pa) else: gradient_update = Eq(grad, grad + p.dt * pa.dt) diff --git a/examples/seismic/viscoacoustic/viscoacoustic_example.py b/examples/seismic/viscoacoustic/viscoacoustic_example.py index fdf6992f89..142d3c433e 100755 --- a/examples/seismic/viscoacoustic/viscoacoustic_example.py +++ b/examples/seismic/viscoacoustic/viscoacoustic_example.py @@ -1,9 +1,9 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except ImportError: - pass from devito import norm from devito.logger import info diff --git a/examples/seismic/viscoacoustic/wavesolver.py b/examples/seismic/viscoacoustic/wavesolver.py index 9b7a6c5b8a..ca971ef23e 100755 --- a/examples/seismic/viscoacoustic/wavesolver.py +++ b/examples/seismic/viscoacoustic/wavesolver.py @@ -285,7 +285,7 @@ def jacobian_adjoint(self, rec, p, pa=None, grad=None, r=None, va=None, model=No space_order=self.space_order, staggered=NODE) if self.time_order == 1: - for i in {k.name: k for k in v}.keys(): + for i in {k.name: k for k in v}: kwargs.pop(i) va = VectorTimeFunction(name="va", grid=self.model.grid, time_order=self.time_order, diff --git a/examples/seismic/viscoelastic/viscoelastic_example.py b/examples/seismic/viscoelastic/viscoelastic_example.py index 4813131325..f8d9037045 100644 --- a/examples/seismic/viscoelastic/viscoelastic_example.py +++ b/examples/seismic/viscoelastic/viscoelastic_example.py @@ -1,9 +1,9 @@ +from contextlib import suppress + import numpy as np -try: +with suppress(ImportError): import pytest -except ImportError: - pass from devito import norm from devito.logger import info diff --git a/examples/timestepping/ic_superstep.py b/examples/timestepping/ic_superstep.py index d102991d3f..4a8bb5f680 100644 --- a/examples/timestepping/ic_superstep.py +++ b/examples/timestepping/ic_superstep.py @@ -63,7 +63,7 @@ def simulate_ic(parameters, step=1, snapshots=-1): # Initial condition msh = np.meshgrid(*[ np.linspace(o, e, s) for o, e, s - in zip(p.origin, p.extent, p.shape) + in zip(p.origin, p.extent, p.shape, strict=True) ]) ic = gaussian(msh, mu=p.mu, sigma_sq=p.sigma_sq) diff --git a/examples/timestepping/superstep.ipynb b/examples/timestepping/superstep.ipynb index d71a9cd7cc..838fda9853 100644 --- a/examples/timestepping/superstep.ipynb +++ b/examples/timestepping/superstep.ipynb @@ -91,15 +91,15 @@ "# Spatial Domain\n", "shape = (301, 301)\n", "origin = (0., 0.)\n", - "extent = (3000, 3000) # 3kmx3km\n", + "extent = (3000, 3000) # 3kmx3km\n", "\n", "# Velocity\n", "background_velocity = 3500\n", "\n", "# Time Domain\n", "t0 = 0\n", - "t1 = 0.07 # (length of pulse)\n", - "t2 = 1.0 # (time for pulse to be reflected)\n", + "t1 = 0.07 # (length of pulse)\n", + "t2 = 1.0 # (time for pulse to be reflected)\n", "dt = 0.0020203\n", "superstep_size = 5\n", "\n", @@ -180,6 +180,7 @@ " trm = (np.pi * f * (t - 1 / f)) ** 2\n", " return A * (1 - 2 * trm) * np.exp(-trm)\n", "\n", + "\n", "nt1 = int(np.ceil((t1 - t0)/dt))\n", "t = np.linspace(t0, t1, nt1)\n", "rick = ricker(t, f=peak_freq)\n", @@ -233,7 +234,15 @@ } ], "source": [ - "source = SparseTimeFunction(name=\"ricker\", npoint=1, coordinates=[source_loc], nt=nt1, grid=grid, time_order=2, space_order=4)\n", + "source = SparseTimeFunction(\n", + " name=\"ricker\",\n", + " npoint=1,\n", + " coordinates=[source_loc],\n", + " nt=nt1,\n", + " grid=grid,\n", + " time_order=2,\n", + " space_order=4\n", + ")\n", "source.data[:, 0] = rick\n", "src_term = source.inject(field=u.forward, expr=source*velocity**2*dt**2)\n", "\n", @@ -459,7 +468,7 @@ "import shutil\n", "\n", "# Fetch and setup the Marmousi velocity field\n", - "url = 'https://github.com/devitocodes/data/raw/refs/heads/master/Simple2D/vp_marmousi_bi' # noqa: E501\n", + "url = 'https://github.com/devitocodes/data/raw/refs/heads/master/Simple2D/vp_marmousi_bi' # noqa: E501\n", "filename = Path('marmousi.np')\n", "shape = (1601, 401)\n", "if not filename.exists():\n", diff --git a/examples/userapi/00_sympy.ipynb b/examples/userapi/00_sympy.ipynb index 431cf31534..d0435907c8 100644 --- a/examples/userapi/00_sympy.ipynb +++ b/examples/userapi/00_sympy.ipynb @@ -438,7 +438,7 @@ "# The following piece of code is supposed to fail as it is\n", "# The exercise is to fix the code\n", "\n", - "expr2 = x + 2*y +3*z" + "expr2 = x + 2*y +3*z # noqa: F821" ] }, { @@ -542,7 +542,7 @@ "# NBVAL_SKIP\n", "# The following code will error until the code in cell 16 above is\n", "# fixed\n", - "z" + "z # noqa: F821" ] }, { diff --git a/examples/userapi/02_apply.ipynb b/examples/userapi/02_apply.ipynb index 9a6c809309..d8776e25eb 100644 --- a/examples/userapi/02_apply.ipynb +++ b/examples/userapi/02_apply.ipynb @@ -49,7 +49,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "summary = op.apply()" ] }, @@ -152,7 +152,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op.arguments()" ] }, @@ -179,7 +179,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u.data[:] = 0. # Explicit reset to initial value\n", "summary = op.apply(x_m=2, y_m=2, time_M=0)" ] @@ -280,7 +280,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u2 = TimeFunction(name='u', grid=grid, save=5)\n", "summary = op.apply(u=u2)" ] @@ -373,7 +373,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "summary = op2.apply(time_M=4)" ] }, @@ -430,7 +430,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "summary" ] }, @@ -466,7 +466,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import configuration\n", "configuration['profiling'] = 'advanced'\n", "\n", @@ -637,7 +637,10 @@ "\n", "function_size = (f.size_allocated + g.size_allocated + a.size_allocated)*np.dtype(f.dtype).itemsize\n", "\n", - "print(f\"Functions have a total size of {function_size} bytes, but {memreport['host']} bytes are consumed by the `Operator`\")" + "print(\n", + " f'Functions have a total size of {function_size} bytes, '\n", + " f'but {memreport[\"host\"]} bytes are consumed by the `Operator`'\n", + ")" ] }, { diff --git a/examples/userapi/03_subdomains.ipynb b/examples/userapi/03_subdomains.ipynb index cf0697f238..3980638815 100644 --- a/examples/userapi/03_subdomains.ipynb +++ b/examples/userapi/03_subdomains.ipynb @@ -247,6 +247,7 @@ "source": [ "class FullDomain(SubDomain):\n", " name = 'mydomain'\n", + "\n", " def define(self, dimensions):\n", " x, y, z = dimensions\n", " return {x: x, y: y, z: z}" @@ -301,8 +302,8 @@ "source": [ "class InnerDomain(SubDomain):\n", " name = 'inner'\n", + "\n", " def define(self, dimensions):\n", - " d = dimensions\n", " return {d: ('middle', 1, 1) for d in dimensions}" ] }, @@ -384,6 +385,7 @@ "source": [ "class Middle(SubDomain):\n", " name = 'middle'\n", + "\n", " def define(self, dimensions):\n", " x, y = dimensions\n", " return {x: ('middle', 3, 4), y: ('middle', 4, 3)}" @@ -410,7 +412,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Function, Eq, Operator\n", "\n", "grid = Grid(shape=(10, 10))\n", @@ -474,6 +476,7 @@ "source": [ "class Left(SubDomain):\n", " name = 'left'\n", + "\n", " def define(self, dimensions):\n", " x, y = dimensions\n", " return {x: ('left', 2), y: y}" @@ -494,6 +497,7 @@ "source": [ "class Right(SubDomain):\n", " name = 'right'\n", + "\n", " def define(self, dimensions):\n", " x, y = dimensions\n", " return {x: x, y: ('right', 2)}" @@ -543,7 +547,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "Operator([eq1, eq2, eq3])()" ] }, @@ -621,8 +625,8 @@ "metadata": {}, "outputs": [], "source": [ - "extent = (200., 100., 100.) # 200 x 100 x 100 m domain\n", - "h = 1.0 # Desired grid spacing\n", + "extent = (200., 100., 100.) # 200 x 100 x 100 m domain\n", + "h = 1.0 # Desired grid spacing\n", "# Set the grid to have a shape (201, 101, 101) for h=1:\n", "shape = (int(extent[0]/h+1), int(extent[1]/h+1), int(extent[2]/h+1))\n", "\n", @@ -632,23 +636,23 @@ "rho = np.zeros(shape)\n", "\n", "# Set up three horizontally separated layers:\n", - "l1 = int(0.5*shape[2])+1 # End of the water layer at 50m depth\n", - "l2 = int(0.5*shape[2])+1+int(4/h) # End of the soft rock section at 54m depth\n", + "l1 = int(0.5*shape[2])+1 # End of the water layer at 50m depth\n", + "l2 = int(0.5*shape[2])+1+int(4/h) # End of the soft rock section at 54m depth\n", "\n", "# Water layer model\n", - "vp[:,:,:l1] = 1.52\n", - "vs[:,:,:l1] = 0.\n", - "rho[:,:,:l1] = 1.05\n", + "vp[:, :, :l1] = 1.52\n", + "vs[:, :, :l1] = 0.\n", + "rho[:, :, :l1] = 1.05\n", "\n", "# Soft-rock layer model\n", - "vp[:,:,l1:l2] = 1.6\n", - "vs[:,:,l1:l2] = 0.4\n", - "rho[:,:,l1:l2] = 1.3\n", + "vp[:, :, l1:l2] = 1.6\n", + "vs[:, :, l1:l2] = 0.4\n", + "rho[:, :, l1:l2] = 1.3\n", "\n", "# Hard-rock layer model\n", - "vp[:,:,l2:] = 2.2\n", - "vs[:,:,l2:] = 1.2\n", - "rho[:,:,l2:] = 2.\n", + "vp[:, :, l2:] = 2.2\n", + "vs[:, :, l2:] = 1.2\n", + "rho[:, :, l2:] = 2.\n", "\n", "origin = (0, 0, 0)\n", "spacing = (h, h, h)" @@ -667,7 +671,7 @@ "metadata": {}, "outputs": [], "source": [ - "nbl = 20 # Number of absorbing boundary layers cells" + "nbl = 20 # Number of absorbing boundary layers cells" ] }, { @@ -686,12 +690,15 @@ "# Define our 'upper' and 'lower' SubDomains:\n", "class Upper(SubDomain):\n", " name = 'upper'\n", + "\n", " def define(self, dimensions):\n", " x, y, z = dimensions\n", " return {x: x, y: y, z: ('left', l1+nbl)}\n", "\n", + "\n", "class Lower(SubDomain):\n", " name = 'lower'\n", + "\n", " def define(self, dimensions):\n", " x, y, z = dimensions\n", " return {x: x, y: y, z: ('right', shape[2]+nbl-l1)}" @@ -718,8 +725,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "so = 4 # FD space order (Note that the time order is by default 1).\n", + "# NBVAL_IGNORE_OUTPUT\n", + "so = 4 # FD space order (Note that the time order is by default 1).\n", "\n", "model = ModelElastic(space_order=so, vp=vp, vs=vs, b=1/rho, origin=origin, shape=shape,\n", " spacing=spacing, nbl=nbl)\n", @@ -788,9 +795,15 @@ " subdomain=ur)\n", "\n", "u_v_l = Eq(v.forward, model.damp*(v + s*ro*div(tau)), subdomain=lr)\n", - "u_t_l = Eq(tau.forward,\n", - " model.damp*(tau + s*l*diag(div(v.forward)) + s*mu*(grad(v.forward) + grad(v.forward).transpose(inner=False))),\n", - " subdomain=lr)" + "u_t_l = Eq(\n", + " tau.forward,\n", + " model.damp*(\n", + " tau\n", + " + s*l*diag(div(v.forward))\n", + " + s*mu*(grad(v.forward) + grad(v.forward).transpose(inner=False))\n", + " ),\n", + " subdomain=lr\n", + ")" ] }, { @@ -827,7 +840,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op = Operator([u_v_u, u_v_l, u_t_u, u_t_l] + src_xx + src_yy + src_zz, subs=model.spacing_map)\n", "op(dt=dt)" ] @@ -886,7 +899,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Plots\n", "%matplotlib inline\n", @@ -952,7 +965,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "left = Left()\n", "right = Right()\n", "mid = Middle()\n", @@ -1031,12 +1044,14 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import SubDomainSet\n", "\n", + "\n", "class MySubDomains(SubDomainSet):\n", " name = 'mydomains'\n", "\n", + "\n", "sds_grid = Grid(shape=(10, 10))\n", "\n", "# Bounds for the various subdomains as (x_ltkn, x_rtkn, y_ltkn, y_rtkn)\n", @@ -1116,7 +1131,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Border\n", "\n", "# Reset the data\n", @@ -1189,7 +1204,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Reset the data\n", "sds_f.data[:] = 0\n", @@ -1263,7 +1278,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "# Reset the data\n", "sds_f.data[:] = 0\n", diff --git a/examples/userapi/04_boundary_conditions.ipynb b/examples/userapi/04_boundary_conditions.ipynb index d154dc64ba..3f662de5ea 100644 --- a/examples/userapi/04_boundary_conditions.ipynb +++ b/examples/userapi/04_boundary_conditions.ipynb @@ -54,8 +54,10 @@ "\n", "so = 6 # Space order\n", "\n", + "\n", "class MainDomain(SubDomain): # Main section with no damping\n", " name = 'main'\n", + "\n", " def __init__(self, pmls, so, grid=None):\n", " # NOTE: These attributes are used in `define`, and thus must be\n", " # set up before `super().__init__` is called.\n", @@ -71,6 +73,7 @@ "\n", "class Left(SubDomain): # Left PML region\n", " name = 'left'\n", + "\n", " def __init__(self, pmls, grid=None):\n", " self.pmls = pmls\n", " super().__init__(grid=grid)\n", @@ -82,6 +85,7 @@ "\n", "class Right(SubDomain): # Right PML region\n", " name = 'right'\n", + "\n", " def __init__(self, pmls, grid=None):\n", " self.pmls = pmls\n", " super().__init__(grid=grid)\n", @@ -90,8 +94,10 @@ " x, y = dimensions\n", " return {x: ('right', self.pmls), y: y}\n", "\n", + "\n", "class Base(SubDomain): # Base PML region\n", " name = 'base'\n", + "\n", " def __init__(self, pmls, grid=None):\n", " self.pmls = pmls\n", " super().__init__(grid=grid)\n", @@ -100,8 +106,10 @@ " x, y = dimensions\n", " return {x: ('middle', self.pmls, self.pmls), y: ('right', self.pmls)}\n", "\n", + "\n", "class FreeSurface(SubDomain): # Free surface region\n", " name = 'freesurface'\n", + "\n", " def __init__(self, pmls, so, grid=None):\n", " self.pmls = pmls\n", " self.so = so\n", @@ -410,6 +418,7 @@ "from devito import sign, norm\n", "from devito.symbolics import retrieve_functions, INT\n", "\n", + "\n", "def free_surface_top(eq, subdomain, update):\n", " \"\"\"\n", " Modify a stencil such that it is folded back on\n", @@ -420,7 +429,7 @@ " velocity free-surface. This is the MPI-safe method\n", " of implementing a free-surface boundary condition\n", " in Devito.\n", - " \n", + "\n", " Parameters\n", " ----------\n", " eq : Eq\n", @@ -458,6 +467,7 @@ " mapper.update({f: f.subs({yind: INT(abs(yind))})})\n", " return Eq(lhs, rhs.subs(mapper), subdomain=subdomain)\n", "\n", + "\n", "fs_p = free_surface_top(eq_p, freesurface, 'pressure')\n", "fs_v = free_surface_top(eq_v, freesurface, 'velocity')" ] diff --git a/examples/userapi/05_conditional_dimension.ipynb b/examples/userapi/05_conditional_dimension.ipynb index cd4c4cca60..7f5e75fed5 100644 --- a/examples/userapi/05_conditional_dimension.ipynb +++ b/examples/userapi/05_conditional_dimension.ipynb @@ -45,7 +45,7 @@ "\n", "# We define a 10x10 grid, dimensions are x, y\n", "shape = (10, 10)\n", - "grid = Grid(shape = shape)\n", + "grid = Grid(shape=shape)\n", "x, y = grid.dimensions\n", "\n", "# Define function 𝑓. We will initialize f's data with ones on its diagonal.\n", @@ -95,7 +95,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Eq, Operator\n", "op0 = Operator(Eq(f, f + 1))\n", "op0.apply()\n", @@ -115,7 +115,7 @@ "metadata": {}, "outputs": [], "source": [ - "#print(op0.ccode) # Print the generated code" + "# print(op0.ccode) # Print the generated code" ] }, { @@ -280,7 +280,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import Gt\n", "\n", "f.data[:] = np.eye(10)\n", @@ -356,7 +356,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "f.data[:] = np.eye(10)\n", "\n", @@ -364,7 +364,7 @@ "print(op2.body.body[-1])\n", "op2.apply()\n", "\n", - "assert (np.count_nonzero(f.data - np.diag(np.diagonal(f.data)))==0)\n", + "assert (np.count_nonzero(f.data - np.diag(np.diagonal(f.data))) == 0)\n", "assert (np.count_nonzero(f.data) == 10)\n", "\n", "f.data" @@ -432,7 +432,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "from sympy import And\n", "from devito import Ne, Lt\n", @@ -450,10 +450,10 @@ "\n", "print(op3.body.body[-1])\n", "\n", - "assert (np.count_nonzero(f.data - np.diag(np.diagonal(f.data)))==0)\n", + "assert (np.count_nonzero(f.data - np.diag(np.diagonal(f.data))) == 0)\n", "assert (np.count_nonzero(f.data) == 10)\n", - "assert np.all(f.data[np.nonzero(f.data[:5,:5])] == 2)\n", - "assert np.all(f.data[5:,5:] == np.eye(5))\n", + "assert np.all(f.data[np.nonzero(f.data[:5, :5])] == 2)\n", + "assert np.all(f.data[5:, 5:] == np.eye(5))\n", "\n", "f.data" ] @@ -523,7 +523,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "\n", "h = Function(name='h', shape=grid.shape, dimensions=(x, ci))\n", "\n", @@ -581,14 +581,14 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "size, factor = 16, 4\n", "i = Dimension(name='i')\n", "ci = ConditionalDimension(name='ci', parent=i, factor=factor)\n", "\n", "g = Function(name='g', shape=(size,), dimensions=(i,))\n", "# Initialize g\n", - "g.data[:,]= list(range(size))\n", + "g.data[:,] = list(range(size))\n", "f = Function(name='f', shape=(int(size/factor),), dimensions=(ci,))\n", "\n", "op5 = Operator([Eq(f, g)])\n", @@ -703,7 +703,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# ConditionalDimension with Ne(f, 0) condition\n", "ci = ConditionalDimension(name='ci', parent=f.dimensions[-1],\n", " condition=Ne(f, 0))\n", @@ -768,8 +768,8 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", - "count = g.data[0] # Number of nonzeros\n", + "# NBVAL_IGNORE_OUTPUT\n", + "count = g.data[0] # Number of nonzeros\n", "\n", "# Dimension used only to nest different size of Functions under the same dim\n", "id_dim = Dimension(name='id_dim')\n", @@ -789,7 +789,7 @@ "eqii = Inc(k, 1, implicit_dims=(f.dimensions + (ci,)))\n", "eqs.append(eqii)\n", "\n", - "for n, i in enumerate(f.dimensions):\n", + "for n, _ in enumerate(f.dimensions):\n", " eqs.append(Eq(g[k, n], f.dimensions[n], implicit_dims=(f.dimensions + (ci,))))\n", "\n", "# TODO: Must be language='C' for now due to issue #2061\n", diff --git a/examples/userapi/06_sparse_operations.ipynb b/examples/userapi/06_sparse_operations.ipynb index c71d9d5be8..37f0888d19 100644 --- a/examples/userapi/06_sparse_operations.ipynb +++ b/examples/userapi/06_sparse_operations.ipynb @@ -75,7 +75,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "npoint = 5\n", "coords = np.random.rand(npoint, 2)/2 + .25\n", "base = np.floor(coords / grid.spacing)*grid.spacing\n", @@ -85,13 +85,13 @@ "ax.set_xlim([0, 1])\n", "ax.set_ylim([0, 1])\n", "ax.scatter(coords[:, 0], coords[:, 1], s=10, label=\"Sparse positions\")\n", - "ax.grid(which = \"major\")\n", - "ax.grid(which = \"minor\", alpha = 0.2)\n", + "ax.grid(which=\"major\")\n", + "ax.grid(which=\"minor\", alpha=0.2)\n", "ax.xaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.yaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.set_title(\"Off the grid sparse positions\")\n", "for i in range(npoint):\n", - " ax.annotate(\"(%.3f, %.3f)\" % (coords[i, 0], coords[i, 1]), coords[i, :])\n", + " ax.annotate(f\"({coords[i, 0]:.3f}, {coords[i, 1]:.3f})\", coords[i, :])\n", "ax.legend()\n", "plt.show()" ] @@ -244,19 +244,19 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "fig, ax = plt.subplots(figsize=(10, 10))\n", "ax.set_xlim([0, 1])\n", "ax.set_ylim([0, 1])\n", "ax.scatter(coords[:, 0], coords[:, 1], s=10, label=\"Sparse positions\")\n", "ax.scatter(interp_points[:, 0], interp_points[:, 1], s=10, label=\"Interpolation support\")\n", - "ax.grid(which = \"major\")\n", - "ax.grid(which = \"minor\", alpha = 0.2)\n", + "ax.grid(which=\"major\")\n", + "ax.grid(which=\"minor\", alpha=0.2)\n", "ax.xaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.yaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.set_title(\"Off the grid sparse positions\")\n", "for i in range(npoint):\n", - " ax.annotate(\"(%.3f, %.3f)\" % (coords[i, 0], coords[i, 1]), coords[i, :])\n", + " ax.annotate(f\"({coords[i, 0]:.3f}, {coords[i, 1]:.3f})\", coords[i, :])\n", "ax.legend()\n", "plt.show()" ] @@ -335,7 +335,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op()\n", "s.data" ] @@ -365,7 +365,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op = Operator(s.inject(u, expr=s))\n", "op()" ] @@ -387,9 +387,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(u.data[1], vmin=0, vmax=1, cmap=\"jet\", extent=[0,1,0,1])\n", + "plt.imshow(u.data[1], vmin=0, vmax=1, cmap=\"jet\", extent=[0, 1, 0, 1])\n", "plt.colorbar(fraction=0.046, pad=0.04)\n", "plt.title(\"Linear weights\")\n", "plt.show()" @@ -458,19 +458,19 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "fig, ax = plt.subplots(figsize=(10, 10))\n", "ax.set_xlim([0, 1])\n", "ax.set_ylim([0, 1])\n", "ax.scatter(coords[:, 0], coords[:, 1], s=10, label=\"Sparse positions\")\n", "ax.scatter(interp_points[:, 0], interp_points[:, 1], s=10, label=\"Interpolation support\")\n", - "ax.grid(which = \"major\")\n", - "ax.grid(which = \"minor\", alpha = 0.2)\n", + "ax.grid(which=\"major\")\n", + "ax.grid(which=\"minor\", alpha=0.2)\n", "ax.xaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.yaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.set_title(\"Off the grid sparse positions\")\n", "for i in range(npoint):\n", - " ax.annotate(\"(%.3f, %.3f)\" % (coords[i, 0], coords[i, 1]), coords[i, :])\n", + " ax.annotate(f\"({coords[i, 0]:.3f}, {coords[i, 1]:.3f})\", coords[i, :])\n", "ax.legend()\n", "plt.show()" ] @@ -541,7 +541,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op()\n", "s.data" ] @@ -571,7 +571,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op = Operator(s.inject(u, expr=s))\n", "op()" ] @@ -593,9 +593,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(u.data[1], vmin=0, vmax=1, cmap=\"jet\", extent=[0,1,0,1])\n", + "plt.imshow(u.data[1], vmin=0, vmax=1, cmap=\"jet\", extent=[0, 1, 0, 1])\n", "plt.colorbar(fraction=0.046, pad=0.04)\n", "plt.title(\"Sinc weights\")\n", "plt.show()" @@ -698,8 +698,8 @@ " coordinates=coords, r=2)\n", "\n", "\n", - "pos = tuple(product((-grid.spacing[1], 0, grid.spacing[1],2*grid.spacing[1]),\n", - " (-grid.spacing[1], 0, grid.spacing[1],2*grid.spacing[1])))\n", + "pos = tuple(product((-grid.spacing[1], 0, grid.spacing[1], 2*grid.spacing[1]),\n", + " (-grid.spacing[1], 0, grid.spacing[1], 2*grid.spacing[1])))\n", "interp_points = np.concatenate([base+p for p in pos])" ] }, @@ -720,19 +720,19 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "fig, ax = plt.subplots(figsize=(10, 10))\n", "ax.set_xlim([0, 1])\n", "ax.set_ylim([0, 1])\n", "ax.scatter(coords[:, 0], coords[:, 1], s=10, label=\"Sparse positions\")\n", "ax.scatter(interp_points[:, 0], interp_points[:, 1], s=10, label=\"Interpolation support\")\n", - "ax.grid(which = \"major\")\n", - "ax.grid(which = \"minor\", alpha = 0.2)\n", + "ax.grid(which=\"major\")\n", + "ax.grid(which=\"minor\", alpha=0.2)\n", "ax.xaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.yaxis.set_minor_locator(FixedLocator(np.linspace(0, 1, 51)))\n", "ax.set_title(\"Off the grid sparse positions\")\n", "for i in range(npoint):\n", - " ax.annotate(\"(%.3f, %.3f)\" % (coords[i, 0], coords[i, 1]), coords[i, :])\n", + " ax.annotate(f\"({coords[i, 0]:.3f}, {coords[i, 1]:.3f})\", coords[i, :])\n", "ax.legend()\n", "plt.show()" ] @@ -781,7 +781,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "op()\n", "s.data" ] @@ -811,7 +811,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u.data.fill(0)\n", "op = Operator(s.inject(u, expr=s))\n", "op()" @@ -834,9 +834,9 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(u.data[1], vmin=0, vmax=2, cmap=\"jet\", extent=[0,1,0,1])\n", + "plt.imshow(u.data[1], vmin=0, vmax=2, cmap=\"jet\", extent=[0, 1, 0, 1])\n", "plt.colorbar(fraction=0.046, pad=0.04)\n", "plt.title(\"Precomputed weights\")\n", "plt.show()" diff --git a/examples/userapi/07_functions_on_subdomains.ipynb b/examples/userapi/07_functions_on_subdomains.ipynb index 7724809163..f18747348b 100644 --- a/examples/userapi/07_functions_on_subdomains.ipynb +++ b/examples/userapi/07_functions_on_subdomains.ipynb @@ -227,7 +227,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "eq_f = Eq(f, 1)\n", "eq_g = Eq(g, 1, subdomain=middle)\n", "\n", @@ -299,7 +299,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(f.data.T, vmin=0, vmax=3, origin='lower', extent=(1.5, 8.5, 1.5, 8.5))\n", "plt.colorbar()\n", "plt.title(\"f.data\")\n", @@ -351,7 +351,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "eq_fg = Eq(g, g + f)\n", "Operator(eq_fg)()" ] @@ -382,7 +382,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(g.data.T, vmin=0, vmax=3, origin='lower', extent=(-0.5, 10.5, -0.5, 10.5))\n", "plt.colorbar()\n", "plt.title(\"g.data\")\n", @@ -494,7 +494,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Equations operating on Functions defined on SubDomains must be applied over\n", "# the SubDomain, or a SubDomain representing some subset thereof.\n", "\n", @@ -542,7 +542,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(g.data.T, vmin=0, vmax=3, origin='lower', extent=(-0.5, 10.5, -0.5, 10.5))\n", "plt.colorbar()\n", "plt.title(\"g.data\")\n", @@ -594,7 +594,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "eq_gf = Eq(f, g)\n", "Operator(eq_gf)()" ] @@ -625,7 +625,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(f.data.T, vmin=0, vmax=3, origin='lower', extent=(1.5, 8.5, 1.5, 8.5))\n", "plt.colorbar()\n", "plt.title(\"f.data\")\n", @@ -693,7 +693,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "rec = src_rec.interpolate(expr=f)\n", "Operator(rec)()" ] @@ -761,7 +761,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "rec = src_rec.interpolate(expr=f+g)\n", "Operator(rec)()" ] @@ -829,7 +829,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "src = src_rec.inject(field=h, expr=1)\n", "Operator(src)()" ] @@ -860,7 +860,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.imshow(h.data.T, vmin=0, vmax=3, origin='lower', extent=(-0.5, 5.5, -0.5, 10.5))\n", "plt.colorbar()\n", "plt.title(\"h.data\")\n", @@ -895,7 +895,7 @@ "origin = (0., 0.)\n", "shape = (201, 201)\n", "spacing = (5., 5.)\n", - "extent = tuple((sh-1)*sp for sh, sp in zip(shape, spacing))\n", + "extent = tuple((sh-1)*sp for sh, sp in zip(shape, spacing, strict=True))\n", "\n", "# Layered model\n", "vp = np.full(shape, 1.5)\n", @@ -936,7 +936,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "model = Model(vp=vp, origin=origin, shape=shape, spacing=spacing,\n", " space_order=2, nbl=20, bcs=\"damp\")\n", "\n", @@ -1011,7 +1011,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Make a Function and set it equal to one to check the alignment of subdomain\n", "test_func = Function(name='testfunc', grid=model.grid)\n", "Operator(Eq(test_func, 1, subdomain=snapshotdomain))()\n", @@ -1153,7 +1153,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "u = TimeFunction(name=\"u\", grid=model.grid, time_order=2, space_order=8)\n", "\n", "pde = model.m * u.dt2 - u.laplace + model.damp * u.dt\n", @@ -2433,7 +2433,7 @@ } ], "source": [ - "#NBVAL_SKIP\n", + "# NBVAL_SKIP\n", "def animate_wavefield(f, fg, v, interval=100):\n", " \"\"\"\n", " Create an animation of the wavefield.\n", @@ -2464,7 +2464,7 @@ " # Initialize with the first frame (frame index 0)\n", " im_fg = ax.imshow(fg.data[0].T, vmin=vmin, vmax=vmax, cmap=\"Greys\",\n", " extent=(-100, 1100, 1100, -100), zorder=1)\n", - " im_f = ax.imshow(f.data[0].T, vmin=vmin, vmax=vmax, cmap='seismic',\n", + " im_f = ax.imshow(f.data[0].T, vmin=vmin, vmax=vmax, cmap='seismic',\n", " extent=(0, 1000, 1000, 245), zorder=2)\n", "\n", " # Set axis limits\n", @@ -2490,6 +2490,7 @@ "\n", " return ani\n", "\n", + "\n", "ani = animate_wavefield(u_save, u_save_grid, model.vp)\n", "\n", "HTML(ani.to_html5_video())" @@ -2540,7 +2541,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "plt.plot((-100., 1100., 1100., -100., -100.),\n", " (-100., -100., 520., 520., -100.),\n", " 'k--', label='Acoustic')\n", @@ -2680,7 +2681,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "# Make a trivial operator to check location of fields\n", "test_func = Function(name='testfunc', grid=grid1)\n", "Operator([Eq(test_func, 1, subdomain=upper),\n", @@ -2838,7 +2839,10 @@ "\n", "# Elastic update\n", "pde_v = v.dt - b*div(tau) + damp*v.forward\n", - "pde_tau = tau.dt - lam*diag(div(v.forward)) - mu*(grad(v.forward) + grad(v.forward).transpose(inner=False)) + damp*tau.forward\n", + "pde_tau = tau.dt \\\n", + " - lam*diag(div(v.forward)) \\\n", + " - mu*(grad(v.forward) + grad(v.forward).transpose(inner=False)) \\\n", + " + damp*tau.forward\n", "\n", "eq_v = Eq(v.forward, solve(pde_v, v.forward), subdomain=lowerfield)\n", "eq_t = Eq(tau.forward, solve(pde_tau, tau.forward), subdomain=lower)\n", @@ -2892,7 +2896,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "from devito import switchconfig\n", "\n", "# Note: switchconfig(safe_math=True) is only required here to get consistent norms for testing purposes\n", @@ -2931,7 +2935,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "vmax_p = np.amax(np.abs(p.data[-1]))\n", "vmax_tau = np.amax(np.abs((tau[0, 0].data[-1] + tau[1, 1].data[-1])/2))\n", "vmax = max(vmax_p, vmax_tau)\n", @@ -2988,7 +2992,7 @@ } ], "source": [ - "#NBVAL_IGNORE_OUTPUT\n", + "# NBVAL_IGNORE_OUTPUT\n", "vmax = 0.05*np.amax(np.abs(rec.data))\n", "plt.imshow(rec.data, cmap='Greys', aspect='auto', vmax=vmax, vmin=-vmax)\n", "plt.xlabel(\"Receiver number\")\n", diff --git a/scripts/gen_sympy_funcs.py b/scripts/gen_sympy_funcs.py index dab404a567..44341be656 100644 --- a/scripts/gen_sympy_funcs.py +++ b/scripts/gen_sympy_funcs.py @@ -6,12 +6,12 @@ for fn in funcs: try: - strc = """class %s(DifferentiableOp, sympy.%s): - __sympy_class__ = sympy.%s - __new__ = DifferentiableOp.__new__\n\n""" % (fn, fn, fn) + strc = f"""class {fn}(DifferentiableOp, sympy.{fn}): + __sympy_class__ = sympy.{fn} + __new__ = DifferentiableOp.__new__\n\n""" exec(strc) print(strc) except: # Some are not classes such as sqrt - print("""def %s(x): - diffify(sympy.%s(x))\n\n""" % (fn, fn)) + print(f"""def {fn}(x): + diffify(sympy.{fn}(x))\n\n""") diff --git a/tests/test_adjoint.py b/tests/test_adjoint.py index d7c9bbf736..a0dcc69169 100644 --- a/tests/test_adjoint.py +++ b/tests/test_adjoint.py @@ -114,8 +114,10 @@ def test_adjoint_F(self, mkey, shape, kernel, space_order, time_order, setup_fun # Adjoint test: Verify matches closely term1 = inner(srca, solver.geometry.src) term2 = norm(rec) ** 2 - info(': %f, : %f, difference: %4.4e, ratio: %f' - % (term1, term2, (term1 - term2)/term1, term1 / term2)) + info( + f': {term1:f}, : {term2:f}, ' + f'difference: {(term1 - term2)/term1:4.4e}, ratio: {term1 / term2:f}' + ) assert np.isclose((term1 - term2)/term1, 0., atol=1.e-11) @pytest.mark.parametrize('mkey, shape, kernel, space_order, time_order, setup_func', [ @@ -192,8 +194,10 @@ def test_adjoint_J(self, mkey, shape, kernel, space_order, time_order, setup_fun # Adjoint test: Verify matches closely term1 = np.dot(im.data.reshape(-1), dm.reshape(-1)) term2 = norm(du)**2 - info(': %f, : %f, difference: %4.4e, ratio: %f' - % (term1, term2, (term1 - term2)/term1, term1 / term2)) + info( + f': {term1:f}, : {term2:f}, ' + f'difference: {(term1 - term2)/term1:4.4e}, ratio: {term1 / term2:f}' + ) assert np.isclose((term1 - term2)/term1, 0., atol=1.e-12) @pytest.mark.parametrize('shape, coords', [ diff --git a/tests/test_benchmark.py b/tests/test_benchmark.py index 0de45e4799..dfb71a3135 100644 --- a/tests/test_benchmark.py +++ b/tests/test_benchmark.py @@ -34,11 +34,12 @@ def test_bench(mode, problem, op): pyversion = sys.executable baseline = os.path.realpath(__file__).split("tests/test_benchmark.py")[0] - benchpath = '%sbenchmarks/user/benchmark.py' % baseline + benchpath = f'{baseline}benchmarks/user/benchmark.py' - command_bench = [pyversion, benchpath, mode, - '-P', problem, '-d', '%d' % nx, '%d' % ny, '%d' % nz, '--tn', - '%d' % tn, '-op', op] + command_bench = [ + pyversion, benchpath, mode, '-P', problem, + '-d', str(nx), str(ny), str(nz), '--tn', str(tn), '-op', op + ] if mode == "bench": command_bench.extend(['-x', '1']) check_call(command_bench) @@ -48,14 +49,14 @@ def test_bench(mode, problem, op): base_filename = problem filename_suffix = '.json' arch = 'arch[unknown]' - shape = 'shape[%d,%d,%d]' % (nx, ny, nz) + shape = f'shape[{nx}{ny}{nz}]' nbl = 'nbl[10]' - t = 'tn[%d]' % tn + t = f'tn[{tn}]' so = 'so[2]' to = 'to[2]' opt = 'opt[advanced]' at = 'at[aggressive]' - nt = 'nt[%d]' % nthreads + nt = f'nt[{nthreads}]' mpi = 'mpi[False]' np = 'np[1]' rank = 'rank[0]' @@ -64,7 +65,7 @@ def test_bench(mode, problem, op): bench_corename = os.path.join('_'.join([base_filename, arch, shape, nbl, t, so, to, opt, at, nt, mpi, np, rank])) - bench_filename = "%s%s%s" % (dir_name, bench_corename, filename_suffix) + bench_filename = f"{dir_name}{bench_corename}{filename_suffix}" assert os.path.isfile(bench_filename) else: assert True diff --git a/tests/test_builtins.py b/tests/test_builtins.py index 1a5c98dcb0..275ee4bbe2 100644 --- a/tests/test_builtins.py +++ b/tests/test_builtins.py @@ -112,7 +112,7 @@ def test_assign_parallel(self, mode): stop = loc_shape*(loc_coords+1) slices = [] - for i, j in zip(start, stop): + for i, j in zip(start, stop, strict=True): slices.append(slice(i, j, 1)) slices = as_tuple(slices) assert np.all(a[slices] - np.array(g.data[:]) == 0) @@ -194,7 +194,7 @@ def test_gs_parallel(self, mode): stop = loc_shape*(loc_coords+1) slices = [] - for i, j in zip(start, stop): + for i, j in zip(start, stop, strict=True): slices.append(slice(i, j, 1)) slices = as_tuple(slices) assert np.all(sp_smoothed[slices] - np.array(dv_smoothed.data[:]) == 0) diff --git a/tests/test_caching.py b/tests/test_caching.py index 71ace0680a..8bfaaae837 100644 --- a/tests/test_caching.py +++ b/tests/test_caching.py @@ -581,7 +581,7 @@ def test_clear_cache(self, operate_on_empty_cache, nx=1000, ny=1000): grid = Grid(shape=(nx, ny), dtype=np.float64) cache_size = len(_SymbolCache) - for i in range(10): + for _ in range(10): assert(len(_SymbolCache) == cache_size) Function(name='u', grid=grid, space_order=2) @@ -604,7 +604,7 @@ def test_clear_cache_with_Csymbol(self, operate_on_empty_cache, nx=1000, ny=1000 ncreated = 0 assert(len(_SymbolCache) == cache_size + ncreated) - u._C_symbol + _ = u._C_symbol # Cache size won't change since _C_symbol isn't cached by devito to # avoid circular references in the cache assert(len(_SymbolCache) == cache_size + ncreated) @@ -678,7 +678,8 @@ def test_sparse_function(self, operate_on_empty_cache): ncreated = 2+1+2+2+2+1+4 # Note that injection is now lazy so no new symbols should be created assert len(_SymbolCache) == cur_cache_size - i.evaluate + # The expression is not redundant, but storing it changes the symbol count + i.evaluate # noqa: B018 assert len(_SymbolCache) == cur_cache_size + ncreated diff --git a/tests/test_cinterface.py b/tests/test_cinterface.py index a2ce4b6eb5..1c9d1968cb 100644 --- a/tests/test_cinterface.py +++ b/tests/test_cinterface.py @@ -26,7 +26,7 @@ def test_basic(): ccode = str(ccode) hcode = str(hcode) - assert 'include "%s.h"' % name in ccode + assert f'include "{name}.h"' in ccode # The public `struct dataobj` only appears in the header file assert 'struct dataobj\n{' not in ccode diff --git a/tests/test_cse.py b/tests/test_cse.py index fe41ca0564..cb459ace26 100644 --- a/tests/test_cse.py +++ b/tests/test_cse.py @@ -109,11 +109,11 @@ def test_default_algo(exprs, expected, min_cost): exprs[i] = DummyEq(indexify(diffify(eval(e).evaluate))) counter = generator() - make = lambda _: CTemp(name='r%d' % counter()).indexify() + make = lambda _: CTemp(name=f'r{counter()}').indexify() processed = _cse(exprs, make, min_cost) assert len(processed) == len(expected) - assert all(str(i.rhs) == j for i, j in zip(processed, expected)) + assert all(str(i.rhs) == j for i, j in zip(processed, expected, strict=True)) def test_temp_order(): @@ -241,11 +241,11 @@ def test_advanced_algo(exprs, expected): exprs[i] = DummyEq(indexify(diffify(eval(e).evaluate))) counter = generator() - make = lambda _: CTemp(name='r%d' % counter(), dtype=np.float32).indexify() + make = lambda _: CTemp(name=f'r{counter()}', dtype=np.float32).indexify() processed = _cse(exprs, make, mode='advanced') assert len(processed) == len(expected) - assert all(str(i.rhs) == j for i, j in zip(processed, expected)) + assert all(str(i.rhs) == j for i, j in zip(processed, expected, strict=True)) def test_advanced_algo_order(): @@ -261,7 +261,7 @@ def test_advanced_algo_order(): eq_b = DummyEq(indexify(diffify(Eq(v.forward, v + u.forward).evaluate))) counter = generator() - make = lambda _: CTemp(name='r%d' % counter(), dtype=np.float32).indexify() + make = lambda _: CTemp(name=f'r{counter()}', dtype=np.float32).indexify() processed = _cse([eq0, eq1, eq_b], make, mode='advanced') # Three input equation and 2 CTemps diff --git a/tests/test_data.py b/tests/test_data.py index f7a30b2bd9..73a76fcc79 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -131,8 +131,8 @@ def test_broadcasting(self): u.data[:] = v except ValueError: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Assign from array having shape with some 1-valued entries v = np.zeros(shape=(4, 1, 4), dtype=u.dtype) @@ -175,12 +175,12 @@ def test_illegal_indexing(self): try: u.data[5] - assert False + raise AssertionError('Assert False') except IndexError: pass try: v.data[nt] - assert False + raise AssertionError('Assert False') except IndexError: pass @@ -269,8 +269,9 @@ def test_w_halo_wo_padding(self): assert u._offset_domain == (2, 2, 2) assert u._offset_halo == ((0, 6), (0, 6), (0, 6)) assert u._offset_owned == ((2, 4), (2, 4), (2, 4)) - assert tuple(i + j*2 for i, j in zip(u.shape, u._size_halo.left)) ==\ - u.shape_with_halo + assert tuple( + i + j*2 for i, j in zip(u.shape, u._size_halo.left, strict=True) + ) == u.shape_with_halo # Try with different grid shape and space_order grid2 = Grid(shape=(3, 3, 3)) @@ -278,16 +279,18 @@ def test_w_halo_wo_padding(self): assert u2.shape == (3, 3, 3) assert u2._offset_domain == (4, 4, 4) assert u2._offset_halo == ((0, 7), (0, 7), (0, 7)) - assert tuple(i + j*2 for i, j in zip(u2.shape, u2._size_halo.left)) ==\ - u2.shape_with_halo + assert tuple( + i + j*2 for i, j in zip(u2.shape, u2._size_halo.left, strict=True) + ) == u2.shape_with_halo assert u2.shape_with_halo == (11, 11, 11) def test_wo_halo_w_padding(self): grid = Grid(shape=(4, 4, 4)) u = Function(name='u', grid=grid, space_order=2, padding=((1, 1), (3, 3), (4, 4))) - assert tuple(i + j + k for i, (j, k) in zip(u.shape_with_halo, u._padding)) ==\ - u.shape_allocated + assert tuple( + i + j + k for i, (j, k) in zip(u.shape_with_halo, u._padding, strict=True) + ) == u.shape_allocated assert u._halo == ((2, 2), (2, 2), (2, 2)) assert u._size_padding == ((1, 1), (3, 3), (4, 4)) assert u._size_padding.left == u._size_padding.right == (1, 3, 4) @@ -409,7 +412,7 @@ def test_convert_index(self): idx0 = (5, slice(8, 11, 1)) result0 = [] - for i, j in zip(idx0, decomposition): + for i, j in zip(idx0, decomposition, strict=True): result0.append(convert_index(i, j)) expected0 = (0, slice(0, 3, 1)) assert as_tuple(result0) == expected0 @@ -419,58 +422,92 @@ def test_reshape_identity(self): # Identity decomposition assert len(d.reshape(0, 0)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(0, 0), [[0, 1], [2, 3]])) + assert all( + list(i) == j for i, j in zip(d.reshape(0, 0), [[0, 1], [2, 3]], strict=True) + ) def test_reshape_right_only(self): d = Decomposition([[0, 1], [2, 3]], 2) # Extension at right only assert len(d.reshape(0, 2)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(0, 2), [[0, 1], [2, 3, 4, 5]])) + assert all( + list(i) == j for i, j in zip( + d.reshape(0, 2), [[0, 1], [2, 3, 4, 5]], strict=True + ) + ) # Reduction at right affecting one sub-domain only, but not the whole subdomain assert len(d.reshape(0, -1)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(0, -1), [[0, 1], [2]])) + assert all( + list(i) == j for i, j in zip(d.reshape(0, -1), [[0, 1], [2]], strict=True) + ) # Reduction at right over one whole sub-domain assert len(d.reshape(0, -2)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(0, -2), [[0, 1], []])) + assert all( + list(i) == j for i, j in zip(d.reshape(0, -2), [[0, 1], []], strict=True) + ) # Reduction at right over multiple sub-domains assert len(d.reshape(0, -3)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(0, -3), [[0], []])) + assert all( + list(i) == j for i, j in zip(d.reshape(0, -3), [[0], []], strict=True) + ) def test_reshape_left_only(self): d = Decomposition([[0, 1], [2, 3]], 2) # Extension at left only assert len(d.reshape(2, 0)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(2, 0), [[0, 1, 2, 3], [4, 5]])) + assert all( + list(i) == j for i, j in zip( + d.reshape(2, 0), [[0, 1, 2, 3], [4, 5]], strict=True + ) + ) # Reduction at left affecting one sub-domain only, but not the whole subdomain assert len(d.reshape(-1, 0)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-1, 0), [[0], [1, 2]])) + assert all( + list(i) == j for i, j in zip(d.reshape(-1, 0), [[0], [1, 2]], strict=True) + ) # Reduction at left over one whole sub-domain assert len(d.reshape(-2, 0)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-2, 0), [[], [0, 1]])) + assert all( + list(i) == j for i, j in zip(d.reshape(-2, 0), [[], [0, 1]], strict=True) + ) # Reduction at right over multiple sub-domains assert len(d.reshape(-3, 0)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-3, 0), [[], [0]])) + assert all( + list(i) == j for i, j in zip(d.reshape(-3, 0), [[], [0]], strict=True) + ) def test_reshape_left_right(self): d = Decomposition([[0, 1], [2, 3]], 2) # Extension at both left and right assert len(d.reshape(1, 1)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(1, 1), [[0, 1, 2], [3, 4, 5]])) + assert all( + list(i) == j for i, j in zip( + d.reshape(1, 1), [[0, 1, 2], [3, 4, 5]], strict=True + ) + ) # Reduction at both left and right assert len(d.reshape(-1, -1)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-1, -1), [[0], [1]])) + assert all( + list(i) == j for i, j in zip(d.reshape(-1, -1), [[0], [1]], strict=True) + ) # Reduction at both left and right, with the right one obliterating one subdomain assert len(d.reshape(-1, -2)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-1, -2), [[0], []])) + assert all( + list(i) == j for i, j in zip(d.reshape(-1, -2), [[0], []], strict=True) + ) # Reduction at both left and right obliterating all subdomains # triggering an exception assert len(d.reshape(-1, -3)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-1, -3), [[], []])) + assert all( + list(i) == j for i, j in zip(d.reshape(-1, -3), [[], []], strict=True) + ) assert len(d.reshape(-2, -2)) == 2 - assert all(list(i) == j for i, j in zip(d.reshape(-1, -3), [[], []])) + assert all( + list(i) == j for i, j in zip(d.reshape(-1, -3), [[], []], strict=True) + ) def test_reshape_slice(self): d = Decomposition([[0, 1, 2], [3, 4], [5, 6, 7], [8, 9, 10, 11]], 2) @@ -666,7 +703,8 @@ def test_getitem(self, mode): assert np.all(result[3] == [[3, 2, 1, 0]]) result1 = np.array(f.data[5, 6:1:-1]) - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] \ + or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert result1.size == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(result1 == [[46, 45]]) @@ -674,7 +712,8 @@ def test_getitem(self, mode): assert np.all(result1 == [[44, 43, 42]]) result2 = np.array(f.data[6:4:-1, 6:1:-1]) - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] \ + or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert result2.size == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(result2[0] == [[54, 53]]) @@ -684,7 +723,8 @@ def test_getitem(self, mode): assert np.all(result2[1] == [[44, 43, 42]]) result3 = np.array(f.data[6:4:-1, 2:7]) - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] \ + or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert result3.size == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(result3[0] == [[50, 51]]) @@ -779,7 +819,8 @@ def test_setitem(self, mode): [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) - elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y] or RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: + elif LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y] \ + or RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(np.array(g.data)) == 0 else: assert np.all(np.array(g.data)) == 0 @@ -914,7 +955,9 @@ def test_niche_slicing(self, mode): t.data[:] = b tdat0 = np.array(f.data[-2::, -2::]) - if LEFT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0] or LEFT in glb_pos_map0[x0] and RIGHT in glb_pos_map0[y0] or RIGHT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0]: + if LEFT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0] \ + or LEFT in glb_pos_map0[x0] and RIGHT in glb_pos_map0[y0] \ + or RIGHT in glb_pos_map0[x0] and LEFT in glb_pos_map0[y0]: assert tdat0.size == 0 else: assert np.all(tdat0 == [[54, 55], @@ -1080,7 +1123,8 @@ def test_neg_start_stop(self, mode): h.data[8:10, 0:4] = f.data[slices] - if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: + if LEFT in glb_pos_map[x] and LEFT in glb_pos_map[y] \ + or LEFT in glb_pos_map[x] and RIGHT in glb_pos_map[y]: assert np.count_nonzero(h.data[:]) == 0 elif RIGHT in glb_pos_map[x] and LEFT in glb_pos_map[y]: assert np.all(np.array(h.data) == [[0, 0, 0, 0, 0, 0], @@ -1202,8 +1246,8 @@ def test_from_replicated_to_distributed(self, mode): u.data[1:3, 1:3] = a[1:2, 1:2] except ValueError: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e @pytest.mark.parallel(mode=4) def test_misc_setup(self, mode): @@ -1225,7 +1269,7 @@ def test_misc_setup(self, mode): # The following should all raise an exception as illegal try: Function(name='c3', grid=grid, dimensions=(y, dy)) - assert False + raise AssertionError('Assert False') except TypeError: # Missing `shape` assert True @@ -1233,7 +1277,7 @@ def test_misc_setup(self, mode): # The following should all raise an exception as illegal try: Function(name='c4', grid=grid, dimensions=(y, dy), shape=(3, 5)) - assert False + raise AssertionError('Assert False') except ValueError: # The provided y-size, 3, doesn't match the y-size in grid (4) assert True @@ -1241,7 +1285,7 @@ def test_misc_setup(self, mode): # The following should all raise an exception as illegal try: Function(name='c4', grid=grid, dimensions=(y, dy), shape=(4,)) - assert False + raise AssertionError('Assert False') except ValueError: # Too few entries for `shape` (two expected, for `y` and `dy`) assert True @@ -1314,7 +1358,7 @@ def test_inversions(self, gslice, mode): lslice = loc_data_idx(f.data._index_glb_to_loc(gslice)) sl = [] Null = slice(-1, -2, None) - for s, gs, d in zip(lslice, gslice, f._decomposition): + for s, gs, d in zip(lslice, gslice, f._decomposition, strict=True): if type(s) is slice and s == Null: sl.append(s) elif type(gs) is not slice: @@ -1420,7 +1464,7 @@ def test_sliced_gather_2D(self, start, stop, step, mode): if isinstance(stop, int) or stop is None: stop = [stop for _ in grid.shape] idx = [] - for i, j, k in zip(start, stop, step): + for i, j, k in zip(start, stop, step, strict=True): idx.append(slice(i, j, k)) idx = tuple(idx) @@ -1454,7 +1498,7 @@ def test_sliced_gather_3D(self, start, stop, step, mode): if isinstance(stop, int) or stop is None: stop = [stop for _ in grid.shape] idx = [] - for i, j, k in zip(start, stop, step): + for i, j, k in zip(start, stop, step, strict=True): idx.append(slice(i, j, k)) idx = tuple(idx) diff --git a/tests/test_derivatives.py b/tests/test_derivatives.py index 4600305fb9..a18771d245 100644 --- a/tests/test_derivatives.py +++ b/tests/test_derivatives.py @@ -112,7 +112,7 @@ def test_unevaluation(self, SymbolType, derivative, dim, expected): expr = getattr(expr, d) assert(expr.__str__() == expected) # Make sure the FD evaluation executes - expr.evaluate + _ = expr.evaluate @pytest.mark.parametrize('expr,expected', [ ('u.dx + u.dy', 'Derivative(u, x) + Derivative(u, y)'), @@ -540,11 +540,11 @@ def test_all_shortcuts(self, so): assert getattr(g, fd) for d in grid.dimensions: - assert 'd%s' % d.name in f._fd - assert 'd%s' % d.name in g._fd + assert f'd{d.name}' in f._fd + assert f'd{d.name}' in g._fd for o in range(2, min(7, so+1)): - assert 'd%s%s' % (d.name, o) in f._fd - assert 'd%s%s' % (d.name, o) in g._fd + assert f'd{d.name}{o}' in f._fd + assert f'd{d.name}{o}' in g._fd def test_shortcuts_mixed(self): grid = Grid(shape=(10,)) @@ -611,7 +611,7 @@ def test_shifted_div(self, shift, ndim): for i, d in enumerate(grid.dimensions): x0 = (None if shift is None else d + shift[i] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - ref += getattr(f, 'd%s' % d.name)(x0=x0, fd_order=order) + ref += getattr(f, f'd{d.name}')(x0=x0, fd_order=order) assert df == ref.evaluate @pytest.mark.parametrize('shift, ndim', [(None, 2), (.5, 2), (.5, 3), @@ -621,10 +621,10 @@ def test_shifted_grad(self, shift, ndim): f = Function(name="f", grid=grid, space_order=4) for order in [None, 2]: g = grad(f, shift=shift, order=order).evaluate - for i, (d, gi) in enumerate(zip(grid.dimensions, g)): + for i, (d, gi) in enumerate(zip(grid.dimensions, g, strict=True)): x0 = (None if shift is None else d + shift[i] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - gk = getattr(f, 'd%s' % d.name)(x0=x0, fd_order=order).evaluate + gk = getattr(f, f'd{d.name}')(x0=x0, fd_order=order).evaluate assert gi == gk @pytest.mark.parametrize('side', [left, right, centered]) @@ -1170,7 +1170,7 @@ def test_tensor_algebra(self): v = grad(f)._evaluate(expand=False) assert all(isinstance(i, IndexDerivative) for i in v) - assert all(zip([Add(*i.args) for i in grad(f).evaluate], v.evaluate)) + assert all(zip([Add(*i.args) for i in grad(f).evaluate], v.evaluate, strict=True)) def test_laplacian_opt(self): grid = Grid(shape=(4, 4)) @@ -1178,7 +1178,7 @@ def test_laplacian_opt(self): assert f.laplacian() == f.laplace df = f.laplacian(order=2, shift=.5) - for (v, d) in zip(df.args, grid.dimensions): + for (v, d) in zip(df.args, grid.dimensions, strict=True): assert v.dims[0] == d assert v.fd_order == (2,) assert v.deriv_order == (2,) diff --git a/tests/test_differentiable.py b/tests/test_differentiable.py index 0f97a2ab2a..65554d4487 100644 --- a/tests/test_differentiable.py +++ b/tests/test_differentiable.py @@ -135,15 +135,20 @@ def test_avg_mode(ndim, io): vars = ['i', 'j', 'k'][:ndim] rule = ','.join(vars) + '->' + ''.join(vars) ndcoeffs = np.einsum(rule, *([coeffs]*ndim)) - args = [{d: d + i * d.spacing for d, i in zip(grid.dimensions, s)} for s in all_shift] + args = [ + {d: d + i * d.spacing for d, i in zip(grid.dimensions, s, strict=True)} + for s in all_shift + ] # Default is arithmetic average - expected = sum(c * a.subs(arg) for c, arg in zip(ndcoeffs.flatten(), args)) + expected = sum( + c * a.subs(arg) for c, arg in zip(ndcoeffs.flatten(), args, strict=True) + ) assert sympy.simplify(a_avg - expected) == 0 # Harmonic average, h(a[.5]) = 1/(.5/a[0] + .5/a[1]) expected = (sum(c * SafeInv(b.subs(arg), b.subs(arg)) - for c, arg in zip(ndcoeffs.flatten(), args))) + for c, arg in zip(ndcoeffs.flatten(), args, strict=True))) assert sympy.simplify(b_avg.args[0] - expected) == 0 assert isinstance(b_avg, SafeInv) assert b_avg.base == b diff --git a/tests/test_dimension.py b/tests/test_dimension.py index d72d3404a2..8c4781d58e 100644 --- a/tests/test_dimension.py +++ b/tests/test_dimension.py @@ -858,7 +858,7 @@ def test_overrides(self): op.apply(u=u, usave1=u1, usave2=u2, time_M=nt-2) assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1)) - for (uk, fk) in zip((u1, u2), (f1, f2)): + for (uk, fk) in zip((u1, u2), (f1, f2), strict=True): assert np.all([np.allclose(uk.data[i], i*fk) for i in range((nt+fk-1)//fk)]) @@ -886,7 +886,7 @@ def test_overrides_newfact(self): op.apply(u=u, usave1=u2, time_M=nt-2) assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1)) - for (uk, fk) in zip((u1, u2), (f1, f2)): + for (uk, fk) in zip((u1, u2), (f1, f2), strict=True): assert np.all([np.allclose(uk.data[i], i*fk) for i in range((nt+fk-1)//fk)]) @@ -1253,15 +1253,15 @@ def test_no_index_sparse(self): radius = 1 indices = [(INT(floor(i)), INT(floor(i))+radius) - for i in sf._position_map.keys()] + for i in sf._position_map] bounds = [i.symbolic_size - radius for i in grid.dimensions] eqs = [Eq(p, v) for (v, p) in sf._position_map.items()] for e, i in enumerate(product(*indices)): args = [j > 0 for j in i] - args.extend([j < k for j, k in zip(i, bounds)]) + args.extend([j < k for j, k in zip(i, bounds, strict=True)]) condition = And(*args, evaluate=False) - cd = ConditionalDimension('sfc%d' % e, parent=sd, condition=condition) + cd = ConditionalDimension(f'sfc{e}', parent=sd, condition=condition) index = [time] + list(i) eqs.append(Eq(f[index], f[index] + sf[cd])) @@ -1290,7 +1290,7 @@ def test_no_index_symbolic(self): # Ensure both code generation and jitting work op = Operator(eq) - op.cfunction + _ = op.cfunction @pytest.mark.parametrize('value', [0, 1]) def test_constant_as_condition(self, value): @@ -1492,8 +1492,29 @@ def test_stepping_dim_in_condition_lowering(self): op.apply(time_M=threshold+3) assert np.all(g.data[0, :, :] == threshold) assert np.all(g.data[1, :, :] == threshold + 1) - assert 'if (g[t0][x + 1][y + 1] <= 10)\n' - '{\n g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' in str(op.ccode) + + # We want to assert that the following snippet: + # ``` + # if (g[t0][x + 1][y + 1] <= 10) + # { + # g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1 + # ``` + # is in the generated code, but indentation etc. seems to vary. + part1 = 'if (g[t0][x + 1][y + 1] <= 10)\n' + part2 = 'g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' + whole_code = str(op.ccode) + + try: + loc = whole_code.find(part1) + assert loc != -1 + assert whole_code.find(part2, loc + len(part1)) != -1 + except AssertionError: + # Try the alternative string + part1 = 'if (gL0(t0, x + 1, y + 1) <= 10)\n' + part2 = 'gL0(t1, x + 1, y + 1) = gL0(t0, x + 1, y + 1) + 1' + loc = whole_code.find(part1) + assert loc != -1 + assert whole_code.find(part2, loc + len(part1)) != -1 def test_expr_like_lowering(self): """ @@ -1689,7 +1710,7 @@ def test_sparse_time_function(self): shape = (21, 21, 21) origin = (0., 0., 0.) spacing = (1., 1., 1.) - extent = tuple([d * (s - 1) for s, d in zip(shape, spacing)]) + extent = tuple([d * (s - 1) for s, d in zip(shape, spacing, strict=True)]) grid = Grid(shape=shape, extent=extent, origin=origin) time = grid.time_dim x, y, z = grid.dimensions @@ -1698,7 +1719,10 @@ def test_sparse_time_function(self): # Place source in the middle of the grid src_coords = np.empty((1, len(shape)), dtype=np.float32) - src_coords[0, :] = [o + d * (s-1)//2 for o, d, s in zip(origin, spacing, shape)] + src_coords[0, :] = [ + o + d * (s-1)//2 + for o, d, s in zip(origin, spacing, shape, strict=True) + ] src = SparseTimeFunction(name='src', grid=grid, npoint=1, nt=nt) src.data[:] = 1. src.coordinates.data[:] = src_coords[:] @@ -1746,7 +1770,7 @@ def test_issue_1435(self): Eq(f2[t5, t6, t7, t8], 2 * t9 + t10, implicit_dims=cd)]) # Check it compiles correctly! See issue report - op.cfunction + _ = op.cfunction @pytest.mark.parametrize('factor', [ 4, @@ -1809,9 +1833,12 @@ def test_issue_2007(self): # proxy integral f.data[:] = np.array(freq[:]) # Proxy Fourier integral holder - u_re = Function(name="u_re", grid=grid, - dimensions=(freq_dim,) + u.indices[1:], - shape=(nfreq,) + u.shape[1:]) + u_re = Function( + name="u_re", + grid=grid, + dimensions=(freq_dim,) + u.indices[1:], + shape=(nfreq,) + u.shape[1:] + ) # ConditionalDimension based on `f` to simulate bounds of Fourier integral ct = ConditionalDimension(name="ct", parent=time, condition=Ge(time, f)) @@ -1856,7 +1883,7 @@ def test_diff_guards_halts_topofuse(self): op = Operator(eqns) - op.cfunction + _ = op.cfunction assert_structure(op, ['t', 't,x', 't,x'], 't,x,x') @@ -2026,7 +2053,7 @@ def test_shifted_minmax(self): for d in grid.dimensions] eqn = Eq(v, u) - eqn = eqn.xreplace(dict(zip(grid.dimensions, subdims))) + eqn = eqn.xreplace(dict(zip(grid.dimensions, subdims, strict=True))) op = Operator(eqn) diff --git a/tests/test_dle.py b/tests/test_dle.py index c3330ad26a..ced0ad9937 100644 --- a/tests/test_dle.py +++ b/tests/test_dle.py @@ -21,8 +21,10 @@ def get_blocksizes(op, opt, grid, blockshape, level=0): - blocksizes = {'%s0_blk%d_size' % (d, level): v - for d, v in zip(grid.dimensions, blockshape)} + blocksizes = { + f'{d}0_blk{level}_size': v + for d, v in zip(grid.dimensions, blockshape, strict=False) + } blocksizes = {k: v for k, v in blocksizes.items() if k in op._known_arguments} # Sanity check if grid.dim == 1 or len(blockshape) == 0: @@ -171,7 +173,7 @@ def test_cache_blocking_structure_distributed(mode): eqns += [Eq(U.forward, U.dx + u.forward)] op = Operator(eqns) - op.cfunction + _ = op.cfunction bns0, _ = assert_blocking(op._func_table['compute0'].root, {'x0_blk0'}) bns1, _ = assert_blocking(op._func_table['compute2'].root, {'x1_blk0'}) @@ -252,10 +254,10 @@ def test_leftright_subdims(self): eqns = [Eq(damp, 0.)] for d in damp.dimensions: # Left - dl = SubDimension.left(name='%sl' % d.name, parent=d, thickness=nbl) + dl = SubDimension.left(name=f'{d.name}l', parent=d, thickness=nbl) eqns.extend([Inc(damp.subs({d: dl}), 1.)]) # right - dr = SubDimension.right(name='%sr' % d.name, parent=d, thickness=nbl) + dr = SubDimension.right(name=f'{d.name}r', parent=d, thickness=nbl) eqns.extend([Inc(damp.subs({d: dr}), 1.)]) op = Operator(eqns, opt=('fission', 'blocking', {'blockrelax': 'device-aware'})) @@ -340,11 +342,11 @@ def test_structure(self, par_tile, expected): bns, _ = assert_blocking(op, {'x0_blk0', 'x1_blk0'}) assert len(bns) == len(expected) - for root, v in zip(bns.values(), expected): + for root, v in zip(bns.values(), expected, strict=True): iters = FindNodes(Iteration).visit(root) iters = [i for i in iters if i.dim.is_Block and i.dim._depth == 1] assert len(iters) == len(v) - assert all(i.step == j for i, j in zip(iters, v)) + assert all(i.step == j for i, j in zip(iters, v, strict=True)) def test_structure_2p5D(self): grid = Grid(shape=(80, 80, 80)) @@ -396,7 +398,7 @@ def test_custom_rule0(self): iters = FindNodes(Iteration).visit(root) iters = [i for i in iters if i.dim.is_Block and i.dim._depth == 1] assert len(iters) == 3 - assert all(i.step == j for i, j in zip(iters, par_tile)) + assert all(i.step == j for i, j in zip(iters, par_tile, strict=True)) def test_custom_rule1(self): grid = Grid(shape=(8, 8, 8)) @@ -426,7 +428,7 @@ def test_custom_rule1(self): iters = FindNodes(Iteration).visit(root) iters = [i for i in iters if i.dim.is_Block and i.dim._depth == 1] assert len(iters) == 3 - assert all(i.step == j for i, j in zip(iters, par_tile)) + assert all(i.step == j for i, j in zip(iters, par_tile, strict=True)) @pytest.mark.parametrize("shape", [(10,), (10, 45), (20, 33), (10, 31, 45), (45, 31, 45)]) @@ -508,8 +510,8 @@ def test_cache_blocking_hierarchical(blockshape0, blockshape1, exception): assert np.allclose(wo_blocking, w_blocking, rtol=1e-12) except InvalidArgument: assert exception - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e @pytest.mark.parametrize("blockinner", [False, True]) @@ -533,7 +535,7 @@ def test_cache_blocking_imperfect_nest(blockinner): trees = retrieve_iteration_tree(bns['x0_blk0']) assert len(trees) == 2 assert len(trees[0]) == len(trees[1]) - assert all(i is j for i, j in zip(trees[0][:4], trees[1][:4])) + assert all(i is j for i, j in zip(trees[0][:4], trees[1][:4], strict=True)) assert trees[0][4] is not trees[1][4] assert trees[0].root.dim.is_Block assert trees[1].root.dim.is_Block @@ -581,7 +583,7 @@ def test_cache_blocking_imperfect_nest_v2(blockinner): trees = retrieve_iteration_tree(bns['x0_blk0']) assert len(trees) == 2 assert len(trees[0]) == len(trees[1]) - assert all(i is j for i, j in zip(trees[0][:2], trees[1][:2])) + assert all(i is j for i, j in zip(trees[0][:2], trees[1][:2], strict=True)) assert trees[0][2] is not trees[1][2] assert trees[0].root.dim.is_Block assert trees[1].root.dim.is_Block @@ -710,7 +712,7 @@ def test_iterations_ompized(self, exprs, expected): assert len(iterations) == len(expected) # Check for presence of pragma omp - for i, j in zip(iterations, expected): + for i, j in zip(iterations, expected, strict=True): pragmas = i.pragmas if j is True: assert len(pragmas) == 1 @@ -776,11 +778,11 @@ def test_collapsing(self, eqns, expected, blocking): assert len(iterations) == len(expected) # Check for presence of pragma omp + collapse clause - for i, j in zip(iterations, expected): + for i, j in zip(iterations, expected, strict=True): if j > 0: assert len(i.pragmas) == 1 pragma = i.pragmas[0] - assert 'omp for collapse(%d)' % j in pragma.ccode.value + assert f'omp for collapse({j})' in pragma.ccode.value else: for k in i.pragmas: assert 'omp for collapse' not in k.ccode.value diff --git a/tests/test_docstrings.py b/tests/test_docstrings.py index dae7abb149..bb8416d245 100644 --- a/tests/test_docstrings.py +++ b/tests/test_docstrings.py @@ -25,5 +25,5 @@ 'symbolics.inspection', 'tools.utils', 'tools.data_structures' ]) def test_docstrings(modname): - module = import_module('devito.%s' % modname) + module = import_module(f'devito.{modname}') assert doctest.testmod(module).failed == 0 diff --git a/tests/test_dse.py b/tests/test_dse.py index b79605c099..24eb0cda2b 100644 --- a/tests/test_dse.py +++ b/tests/test_dse.py @@ -50,7 +50,9 @@ def test_scheduling_after_rewrite(): trees = retrieve_iteration_tree(op) # Check loop nest structure - assert all(i.dim is j for i, j in zip(trees[0], grid.dimensions)) # time invariant + assert all( + i.dim is j for i, j in zip(trees[0], grid.dimensions, strict=True) + ) # time invariant assert trees[1].root.dim is grid.time_dim assert all(trees[1].root.dim is tree.root.dim for tree in trees[1:]) @@ -703,8 +705,8 @@ def test_min_storage_in_isolation(self): Operator(eqn, opt=('advanced-fsg', {'openmp': True, 'min-storage': True})) except InvalidOperator: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Check that `cire-rotate=True` has no effect in this code has there's # no blocking @@ -2132,7 +2134,7 @@ def test_sum_of_nested_derivatives(self, expr, exp_arrays, exp_ops): # Also check against expected operation count to make sure # all redundancies have been detected correctly for i, expected in enumerate(as_tuple(exp_ops[n])): - assert summary[('section%d' % i, None)].ops == expected + assert summary[(f'section{i}', None)].ops == expected def test_derivatives_from_different_levels(self): """ @@ -2326,10 +2328,7 @@ def test_blocking_options(self, rotate): 'cire-rotate': rotate, 'min-storage': True})) # Check code generation - if 'openmp' in configuration['language']: - prefix = ['t'] - else: - prefix = [] + prefix = ['t'] if 'openmp' in configuration['language'] else [] if rotate: assert_structure( op1, @@ -2554,7 +2553,7 @@ def test_invariants_with_conditional(self): op = Operator(eqn, opt='advanced') assert_structure(op, ['t', 't,fd', 't,fd,x,y'], 't,fd,x,y') # Make sure it compiles - op.cfunction + _ = op.cfunction # Check hoisting for time invariant eqn = Eq(u, u - (cos(time_sub * factor * f) * sin(g) * uf)) @@ -2562,7 +2561,7 @@ def test_invariants_with_conditional(self): op = Operator(eqn, opt='advanced') assert_structure(op, ['x,y', 't', 't,fd', 't,fd,x,y'], 'x,y,t,fd,x,y') # Make sure it compiles - op.cfunction + _ = op.cfunction def test_hoisting_pow_one(self): """ @@ -2678,7 +2677,7 @@ def test_space_and_time_invariant_together(self): op = Operator(eqn, opt=('advanced', {'openmp': False})) - op.cfunction + _ = op.cfunction assert_structure( op, diff --git a/tests/test_dtypes.py b/tests/test_dtypes.py index a2a04e858f..9e4872f058 100644 --- a/tests/test_dtypes.py +++ b/tests/test_dtypes.py @@ -94,7 +94,7 @@ def test_dtype_mapping(dtype: np.dtype[np.inexact], kwargs: dict[str, str], # Check ctypes of the mapped parameters params: dict[str, Basic] = {p.name: p for p in op.parameters} _u, _c = params['u'], params['c'] - assert type(_u.indexed._C_ctype._type_()) == ctypes_vector_mapper[dtype] + assert isinstance(_u.indexed._C_ctype._type_(), ctypes_vector_mapper[dtype]) assert _c._C_ctype == expected or ctypes_vector_mapper[dtype] @@ -125,7 +125,7 @@ def test_cse_ctypes(dtype: np.dtype[np.inexact], kwargs: dict[str, str]) -> None @pytest.mark.parametrize('dtype', [np.float32, np.complex64, np.complex128]) @pytest.mark.parametrize('kwargs', _configs, ids=kw_id) def test_complex_headers(dtype: np.dtype[np.inexact], kwargs: dict[str, str]) -> None: - np.dtype + _ = np.dtype """ Tests that the correct complex headers are included when complex dtypes are present in the operator, and omitted otherwise. @@ -160,10 +160,7 @@ def test_imag_unit(dtype: np.complexfloating, kwargs: dict[str, str]) -> None: unit_str = '_Complex_I' else: # C++ provides imaginary literals - if dtype == np.complex64: - unit_str = '1if' - else: - unit_str = '1i' + unit_str = '1if' if dtype == np.complex64 else '1i' # Set up an operator s = Symbol(name='s', dtype=dtype) @@ -191,10 +188,10 @@ def test_math_functions(dtype: np.dtype[np.inexact], if 'CXX' not in configuration['language']: if np.issubdtype(dtype, np.complexfloating): # Complex functions have a 'c' prefix - call_str = 'c%s' % call_str + call_str = f'c{call_str}' if dtype(0).real.itemsize <= 4: # Single precision have an 'f' suffix (half is promoted to single) - call_str = '%sf' % call_str + call_str = f'{call_str}f' # Operator setup a = Symbol(name='a', dtype=dtype) diff --git a/tests/test_gpu_common.py b/tests/test_gpu_common.py index af5c71fc11..a639c5bbe6 100644 --- a/tests/test_gpu_common.py +++ b/tests/test_gpu_common.py @@ -509,7 +509,7 @@ def test_attempt_tasking_but_no_temporaries(self, opt): # a host Function piters = FindNodes(OmpIteration).visit(op) assert len(piters) == 1 - assert type(piters.pop()) == OmpIteration + assert isinstance(piters.pop(), OmpIteration) def test_tasking_multi_output(self): nt = 10 diff --git a/tests/test_gpu_openacc.py b/tests/test_gpu_openacc.py index ccaaa0bd6a..ada536197e 100644 --- a/tests/test_gpu_openacc.py +++ b/tests/test_gpu_openacc.py @@ -61,8 +61,8 @@ def test_basic_customop(self): platform='nvidiaX', language='openacc', opt='openmp') except InvalidOperator: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e @pytest.mark.parametrize('opt', opts_device_tiling) def test_blocking(self, opt): @@ -112,7 +112,7 @@ def test_tile_insteadof_collapse(self, par_tile): 'acc parallel loop tile(32,4) present(u)' strtile = ','.join([str(i) for i in stile]) assert trees[3][1].pragmas[0].ccode.value ==\ - 'acc parallel loop tile(%s) present(src,src_coords,u)' % strtile + f'acc parallel loop tile({strtile}) present(src,src_coords,u)' @pytest.mark.parametrize('par_tile', [((32, 4, 4), (8, 8)), ((32, 4), (8, 8)), ((32, 4, 4), (8, 8, 8)), @@ -141,7 +141,7 @@ def test_multiple_tile_sizes(self, par_tile): 'acc parallel loop tile(8,8) present(u)' sclause = 'collapse(4)' if par_tile[-1] is None else 'tile(8,8,8,8)' assert trees[3][1].pragmas[0].ccode.value ==\ - 'acc parallel loop %s present(src,src_coords,u)' % sclause + f'acc parallel loop {sclause} present(src,src_coords,u)' def test_multi_tile_blocking_structure(self): grid = Grid(shape=(8, 8, 8)) @@ -166,11 +166,11 @@ def test_multi_tile_blocking_structure(self): 'acc parallel loop tile(32,4,4) present(u)' assert bns['x1_blk0'].pragmas[0].ccode.value ==\ 'acc parallel loop tile(16,4,4) present(u,v)' - for root, v in zip(bns.values(), expected): + for root, v in zip(bns.values(), expected, strict=True): iters = FindNodes(Iteration).visit(root) iters = [i for i in iters if i.dim.is_Block and i.dim._depth == 1] assert len(iters) == len(v) - assert all(i.step == j for i, j in zip(iters, v)) + assert all(i.step == j for i, j in zip(iters, v, strict=True)) def test_std_max(self): grid = Grid(shape=(3, 3, 3)) diff --git a/tests/test_gpu_openmp.py b/tests/test_gpu_openmp.py index 3d3b0d4b09..d6505a6f71 100644 --- a/tests/test_gpu_openmp.py +++ b/tests/test_gpu_openmp.py @@ -84,8 +84,8 @@ def test_basic_customop(self): Operator(Eq(u.forward, u + 1), language='openmp', opt='openacc') except InvalidOperator: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e @pytest.mark.parametrize('opt', opts_device_tiling) def test_blocking(self, opt): @@ -125,17 +125,15 @@ def test_multiple_eqns(self): 'omp target teams distribute parallel for collapse(3)' for i, f in enumerate([u, v]): assert op.body.maps[i].ccode.value ==\ - ('omp target enter data map(to: %(n)s[0:%(n)s_vec->size[0]*' - '%(n)s_vec->size[1]*%(n)s_vec->size[2]*%(n)s_vec->size[3]])' % - {'n': f.name}) + (f'omp target enter data map(to: {f.name}[0:{f.name}_vec->size[0]*' + f'{f.name}_vec->size[1]*{f.name}_vec->size[2]*{f.name}_vec->size[3]])') assert op.body.unmaps[2*i + 0].ccode.value ==\ - ('omp target update from(%(n)s[0:%(n)s_vec->size[0]*' - '%(n)s_vec->size[1]*%(n)s_vec->size[2]*%(n)s_vec->size[3]])' % - {'n': f.name}) + (f'omp target update from({f.name}[0:{f.name}_vec->size[0]*' + f'{f.name}_vec->size[1]*{f.name}_vec->size[2]*{f.name}_vec->size[3]])') assert op.body.unmaps[2*i + 1].ccode.value ==\ - ('omp target exit data map(release: %(n)s[0:%(n)s_vec->size[0]*' - '%(n)s_vec->size[1]*%(n)s_vec->size[2]*%(n)s_vec->size[3]]) ' - 'if(devicerm)' % {'n': f.name}) + (f'omp target exit data map(release: {f.name}[0:{f.name}_vec->size[0]*' + f'{f.name}_vec->size[1]*{f.name}_vec->size[2]*{f.name}_vec->size[3]]) ' + 'if(devicerm)') def test_multiple_loops(self): grid = Grid(shape=(3, 3, 3)) @@ -164,18 +162,21 @@ def test_multiple_loops(self): # Check `u` and `v` for i, f in enumerate([u, v], 1): - assert op.body.maps[i].ccode.value ==\ - ('omp target enter data map(to: %(n)s[0:%(n)s_vec->size[0]]' - '[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]])' % - {'n': f.name}) - assert op.body.unmaps[2*i + 0].ccode.value ==\ - ('omp target update from(%(n)s[0:%(n)s_vec->size[0]]' - '[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]])' % - {'n': f.name}) - assert op.body.unmaps[2*i + 1].ccode.value ==\ - ('omp target exit data map(release: %(n)s[0:%(n)s_vec->size[0]]' - '[0:%(n)s_vec->size[1]][0:%(n)s_vec->size[2]][0:%(n)s_vec->size[3]]) ' - 'if(devicerm)' % {'n': f.name}) + assert op.body.maps[i].ccode.value == ( + f'omp target enter data map(to: {f.name}' + f'[0:{f.name}_vec->size[0]][0:{f.name}_vec->size[1]]' + f'[0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]])' + ) + assert op.body.unmaps[2*i + 0].ccode.value == ( + f'omp target update from({f.name}' + f'[0:{f.name}_vec->size[0]][0:{f.name}_vec->size[1]]' + f'[0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]])' + ) + assert op.body.unmaps[2*i + 1].ccode.value == ( + f'omp target exit data map(release: {f.name}' + f'[0:{f.name}_vec->size[0]][0:{f.name}_vec->size[1]]' + f'[0:{f.name}_vec->size[2]][0:{f.name}_vec->size[3]]) ' + 'if(devicerm)') # Check `f` assert op.body.maps[0].ccode.value ==\ diff --git a/tests/test_gradient.py b/tests/test_gradient.py index fd070a91f5..3ab672d73a 100644 --- a/tests/test_gradient.py +++ b/tests/test_gradient.py @@ -220,7 +220,7 @@ def test_gradientFWI(self, dtype, space_order, kernel, shape, ckp, setup_func, # Add the perturbation to the model def initializer(data): data[:] = np.sqrt(vel0.data**2 * v**2 / - ((1 - H[i]) * v**2 + H[i] * vel0.data**2)) + ((1 - H[i]) * v**2 + H[i] * vel0.data**2)) # noqa: B023 vloc = Function(name='vloc', grid=wave.model.grid, space_order=space_order, initializer=initializer) # Data for the new model @@ -234,8 +234,8 @@ def initializer(data): # Test slope of the tests p1 = np.polyfit(np.log10(H), np.log10(error1), 1) p2 = np.polyfit(np.log10(H), np.log10(error2), 1) - info('1st order error, Phi(m0+dm)-Phi(m0): %s' % (p1)) - info(r'2nd order error, Phi(m0+dm)-Phi(m0) - : %s' % (p2)) + info(f'1st order error, Phi(m0+dm)-Phi(m0): {p1}') + info(rf'2nd order error, Phi(m0+dm)-Phi(m0) - : {p2}') assert np.isclose(p1[0], 1.0, rtol=0.1) assert np.isclose(p2[0], 2.0, rtol=0.1) @@ -280,7 +280,7 @@ def test_gradientJ(self, dtype, space_order, kernel, shape, spacing, time_order, # Add the perturbation to the model def initializer(data): data[:] = np.sqrt(v0.data**2 * v**2 / - ((1 - H[i]) * v**2 + H[i] * v0.data**2)) + ((1 - H[i]) * v**2 + H[i] * v0.data**2)) # noqa: B023 vloc = Function(name='vloc', grid=wave.model.grid, space_order=space_order, initializer=initializer) # Data for the new model @@ -295,9 +295,9 @@ def initializer(data): # Test slope of the tests p1 = np.polyfit(np.log10(H), np.log10(error1), 1) p2 = np.polyfit(np.log10(H), np.log10(error2), 1) - info('1st order error, Phi(m0+dm)-Phi(m0) with slope: %s compared to 1' % (p1[0])) + info(f'1st order error, Phi(m0+dm)-Phi(m0) with slope: {p1[0]} compared to 1') info(r'2nd order error, Phi(m0+dm)-Phi(m0) - with slope:' - ' %s compared to 2' % (p2[0])) + f' {p2[0]} compared to 2') assert np.isclose(p1[0], 1.0, rtol=0.1) assert np.isclose(p2[0], 2.0, rtol=0.1) diff --git a/tests/test_iet.py b/tests/test_iet.py index 7f0675eaf0..7bc4f1e709 100644 --- a/tests/test_iet.py +++ b/tests/test_iet.py @@ -77,11 +77,11 @@ def test_make_efuncs(exprs, nfuncs, ntimeiters, nests): efuncs = [] for n, tree in enumerate(retrieve_iteration_tree(op)): root = filter_iterations(tree, key=lambda i: i.dim.is_Space)[0] - efuncs.append(make_efunc('f%d' % n, root)) + efuncs.append(make_efunc(f'f{n}', root)) assert len(efuncs) == len(nfuncs) == len(ntimeiters) == len(nests) - for efunc, nf, nt, nest in zip(efuncs, nfuncs, ntimeiters, nests): + for efunc, nf, nt, nest in zip(efuncs, nfuncs, ntimeiters, nests, strict=True): # Check the `efunc` parameters assert all(i in efunc.parameters for i in (x.symbolic_min, x.symbolic_max)) assert all(i in efunc.parameters for i in (y.symbolic_min, y.symbolic_max)) @@ -98,7 +98,7 @@ def test_make_efuncs(exprs, nfuncs, ntimeiters, nests): trees = retrieve_iteration_tree(efunc) assert len(trees) == 1 tree = trees[0] - assert all(i.dim.name == j for i, j in zip(tree, nest)) + assert all(i.dim.name == j for i, j in zip(tree, nest, strict=True)) assert efunc.make_call() diff --git a/tests/test_interpolation.py b/tests/test_interpolation.py index cdb6f7773f..1cde2e13e2 100644 --- a/tests/test_interpolation.py +++ b/tests/test_interpolation.py @@ -466,7 +466,7 @@ def test_multi_inject(shape, coords, nexpr, result, npoints=19): indices = [slice(4, 6, 1) for _ in coords] indices[0] = slice(1, -1, 1) result = (result, result) if nexpr == 1 else (result, 2 * result) - for r, a in zip(result, (a1, a2)): + for r, a in zip(result, (a1, a2), strict=True): assert np.allclose(a.data[indices], r, rtol=1.e-5) @@ -628,7 +628,7 @@ def test_edge_sparse(): sf1.coordinates.data[0, :] = (25.0, 35.0) expr = sf1.interpolate(u) - subs = {d.spacing: v for d, v in zip(u.grid.dimensions, u.grid.spacing)} + subs = {d.spacing: v for d, v in zip(u.grid.dimensions, u.grid.spacing, strict=True)} op = Operator(expr, subs=subs) op() @@ -766,7 +766,7 @@ def test_interpolation_radius(r, interp): r=r, interpolation=interp) try: src.interpolate(u) - assert False + raise AssertionError('Assert False') except ValueError: assert True diff --git a/tests/test_ir.py b/tests/test_ir.py index 90de31ddfd..16440ec54a 100644 --- a/tests/test_ir.py +++ b/tests/test_ir.py @@ -172,11 +172,11 @@ def test_iteration_instance_arithmetic(self, x, y, ii_num, ii_literal): for ii in [fax, fa4]: try: ii + fcx1y - assert False + raise AssertionError('Assert False') except TypeError: pass - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e def test_iteration_instance_distance(self, ii_num, ii_literal): """ @@ -197,11 +197,11 @@ def test_iteration_instance_distance(self, ii_num, ii_literal): # Should fail due mismatching indices try: fcxy.distance(fax) - assert False + raise AssertionError('Assert False') except TypeError: pass - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e def test_iteration_instance_cmp(self, ii_num, ii_literal): """ @@ -219,14 +219,14 @@ def test_iteration_instance_cmp(self, ii_num, ii_literal): assert fc23 > fc00 assert fc00 >= fc00 - # Lexicographic comparison with numbers but different rank should faxl + # Lexicographic comparison with numbers but different rank should fail try: - fa4 > fc23 - assert False + fa4 > fc23 # noqa: B015 + raise AssertionError('Assert False') except TypeError: pass - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Lexicographic comparison with literals assert fcxy <= fcxy @@ -315,30 +315,30 @@ def test_timed_access_cmp(self, ta_literal): # Non-comparable due to different direction try: - rev_tcxy_w0 > tcxy_r0 - assert False + rev_tcxy_w0 > tcxy_r0 # noqa: B015 + raise AssertionError('Assert False') except TypeError: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Non-comparable due to different aindices try: - tcxy_w0 > tcyx_irr0 - assert False + tcxy_w0 > tcyx_irr0 # noqa: B015 + raise AssertionError('Assert False') except TypeError: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Non-comparable due to mismatching Intervals try: - tcxy_w0 > tcyx_irr0 - assert False + tcxy_w0 > tcyx_irr0 # noqa: B015 + raise AssertionError('Assert False') except TypeError: assert True - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e # Comparable even though the TimedAccess is irregular (reflexivity) assert tcyx_irr0 >= tcyx_irr0 @@ -445,12 +445,12 @@ def test_intervals_union(self, x, y): for i, j in [(ix, nully), (ix, iy), (iy, ix), (ix, ixs1), (ixs1, ix)]: try: i.union(j) - assert False # Shouldn't arrive here + raise AssertionError('Assert False') # Shouldn't arrive here except ValueError: assert True - except: + except Exception as e: # No other types of exception expected - assert False + raise AssertionError('Assert False') from e # Mixed symbolic and non-symbolic c = Constant(name='c') @@ -637,13 +637,13 @@ def test_single_eq(self, expr, expected, ti0, ti1, fa, grid): types = ['flow', 'anti'] if type != 'all': types.remove(type) - assert len(getattr(scope, 'd_%s' % type)) == 1 - assert all(len(getattr(scope, 'd_%s' % i)) == 0 for i in types) + assert len(getattr(scope, f'd_{type}')) == 1 + assert all(len(getattr(scope, f'd_{i}')) == 0 for i in types) else: - assert all(len(getattr(scope, 'd_%s' % i)) == 1 for i in types) + assert all(len(getattr(scope, f'd_{i}')) == 1 for i in types) # Check mode - assert getattr(dep, 'is_%s' % mode)() + assert getattr(dep, f'is_{mode}')() # Check cause if exp_cause == 'None': @@ -655,13 +655,13 @@ def test_single_eq(self, expr, expected, ti0, ti1, fa, grid): assert cause.name == exp_cause # Check mode restricted to the cause - assert getattr(dep, 'is_%s' % mode)(cause) + assert getattr(dep, f'is_{mode}')(cause) non_causes = [i for i in grid.dimensions if i is not cause] - assert all(not getattr(dep, 'is_%s' % mode)(i) for i in non_causes) + assert all(not getattr(dep, f'is_{mode}')(i) for i in non_causes) # Check if it's regular or irregular - assert getattr(dep.source, 'is_%s' % regular) or\ - getattr(dep.sink, 'is_%s' % regular) + assert getattr(dep.source, f'is_{regular}') or\ + getattr(dep.sink, f'is_{regular}') @pytest.mark.parametrize('exprs,expected', [ # Trivial flow dep @@ -723,7 +723,7 @@ def test_multiple_eqs(self, exprs, expected, ti0, ti1, ti3, fa): assert len(scope.d_all) == len(expected) for i in ['flow', 'anti', 'output']: - for dep in getattr(scope, 'd_%s' % i): + for dep in getattr(scope, f'd_{i}'): item = (dep.function.name, i, str(set(dep.cause))) assert item in expected expected.remove(item) diff --git a/tests/test_mpi.py b/tests/test_mpi.py index ede7989683..9159071997 100644 --- a/tests/test_mpi.py +++ b/tests/test_mpi.py @@ -167,7 +167,9 @@ def test_ctypes_neighborhood(self, mode): (0, 1, PN, 2, 3, PN, PN, PN, PN)] } - mapper = dict(zip(attrs, expected[distributor.nprocs][distributor.myrank])) + mapper = dict(zip( + attrs, expected[distributor.nprocs][distributor.myrank], strict=True + )) obj = distributor._obj_neighborhood value = obj._arg_defaults()[obj.name] assert all(getattr(value._obj, k) == v for k, v in mapper.items()) @@ -357,8 +359,13 @@ def define(self, dimensions): md = MyDomain(grid=grid) d = md.distributor - for dec, pdec, sdi, sh in zip(d.decomposition, d.parent.decomposition, - d.subdomain_interval, grid.shape): + for dec, pdec, sdi, sh in zip( + d.decomposition, + d.parent.decomposition, + d.subdomain_interval, + grid.shape, + strict=True + ): # Get the global min and max lower_bounds = [np.amin(i) for i in dec if i.size != 0] upper_bounds = [np.amax(i) for i in dec if i.size != 0] @@ -578,8 +585,14 @@ def test_local_indices(self, shape, expected, mode): grid = Grid(shape=shape) f = Function(name='f', grid=grid) - assert all(i == slice(*j) - for i, j in zip(f.local_indices, expected[grid.distributor.myrank])) + assert all( + i == slice(*j) + for i, j in zip( + f.local_indices, + expected[grid.distributor.myrank], + strict=True + ) + ) @pytest.mark.parallel(mode=4) @pytest.mark.parametrize('shape', [(1,), (2, 3), (4, 5, 6)]) @@ -1498,7 +1511,7 @@ def test_avoid_merging_if_diff_functions(self, mode): eqns += [Eq(U.forward, U.dx + u.forward)] op = Operator(eqns) - op.cfunction + _ = op.cfunction check_halo_exchanges(op, 2, 2) @@ -1521,7 +1534,7 @@ def test_merge_haloupdate_if_diff_locindices(self, mode): op = Operator(eqns) assert len(FindNodes(HaloUpdateCall).visit(op)) == 1 - op.cfunction + _ = op.cfunction @pytest.mark.parallel(mode=2) def test_merge_and_hoist_haloupdate_if_diff_locindices(self, mode): @@ -1601,7 +1614,7 @@ def test_merge_haloupdate_if_diff_but_equivalent_locindices(self, mode): rec.interpolate(expr=v1)] op = Operator(eqns) - op.cfunction + _ = op.cfunction calls, _ = check_halo_exchanges(op, 2, 2) for i, v in enumerate([v2, v1]): @@ -1794,7 +1807,7 @@ def test_min_code_size(self, mode): op = Operator(eqns) - op.cfunction + _ = op.cfunction calls = FindNodes(Call).visit(op) @@ -1834,7 +1847,7 @@ def test_many_functions(self, mode): op = Operator(eqns) - op.cfunction + _ = op.cfunction calls = FindNodes(Call).visit(op) assert len(calls) == 2 @@ -1932,7 +1945,7 @@ def test_haloupdate_buffer_cases(self, sz, fwd, expr, exp0, exp1, args, mode): eval(expr)] op = Operator(eqns) - op.cfunction + _ = op.cfunction calls, _ = check_halo_exchanges(op, exp0, exp1) for i, v in enumerate(args): @@ -1954,7 +1967,7 @@ def test_avoid_hoisting_if_antidep(self, mode): Eq(v3, v2.laplace + v1)] op = Operator(eqns) - op.cfunction + _ = op.cfunction calls, _ = check_halo_exchanges(op, 3, 2) # More specifically, we ensure HaloSpot(v2) is on the last loop nest @@ -1975,7 +1988,7 @@ def test_hoist_haloupdate_if_in_the_middle(self, mode): rec.interpolate(expr=v1.forward)] op = Operator(eqns) - op.cfunction + _ = op.cfunction calls, _ = check_halo_exchanges(op, 3, 2) assert calls[0].arguments[0] is v2 @@ -2011,7 +2024,7 @@ def test_merge_smart_if_within_conditional(self, mode): eq1 = Eq(f.backward, f.laplace + .002) op1 = Operator(rec + [eq1]) - op1.cfunction + _ = op1.cfunction check_halo_exchanges(op1, 1, 1) @@ -2541,9 +2554,9 @@ def test_nontrivial_operator(self, mode): t = grid.stepping_dim # SubDimensions to implement BCs - xl, yl = [SubDimension.left('%sl' % d.name, d, tkn) for d in [x, y]] - xi, yi = [SubDimension.middle('%si' % d.name, d, tkn, tkn) for d in [x, y]] - xr, yr = [SubDimension.right('%sr' % d.name, d, tkn) for d in [x, y]] + xl, yl = [SubDimension.left(f'{d.name}l', d, tkn) for d in [x, y]] + xi, yi = [SubDimension.middle(f'{d.name}i', d, tkn, tkn) for d in [x, y]] + xr, yr = [SubDimension.right(f'{d.name}r', d, tkn) for d in [x, y]] # Functions u = TimeFunction(name='f', grid=grid) @@ -3127,7 +3140,7 @@ def test_interpolation_at_uforward(self, mode): op = Operator(eqns) - op.cfunction + _ = op.cfunction calls, _ = check_halo_exchanges(op, 2, 1) args = calls[0].arguments @@ -3141,7 +3154,7 @@ def gen_serial_norms(shape, so): """ day = np.datetime64('today') try: - l = np.load("norms%s.npy" % len(shape), allow_pickle=True) + l = np.load(f"norms{len(shape)}.npy", allow_pickle=True) assert l[-1] == day except: tn = 500. # Final time @@ -3161,7 +3174,7 @@ def gen_serial_norms(shape, so): Ev = norm(v) Esrca = norm(srca) - np.save("norms%s.npy" % len(shape), (Eu, Erec, Ev, Esrca, day), allow_pickle=True) + np.save(f"norms{len(shape)}.npy", (Eu, Erec, Ev, Esrca, day), allow_pickle=True) class TestIsotropicAcoustic: @@ -3280,7 +3293,7 @@ def test_elastic_structure(self, mode): u_t = Eq(tau.forward, damp * solve(pde_tau, tau.forward)) op = Operator([u_v] + [u_t] + rec_term) - op.cfunction + _ = op.cfunction assert len(op._func_table) == 11 @@ -3350,7 +3363,7 @@ def test_issue_2448_v1(self, mode, setup): rec_term1 = rec.interpolate(expr=v.forward) op1 = Operator([u_v, u_tau, rec_term1]) - op1.cfunction + _ = op1.cfunction calls, _ = check_halo_exchanges(op1, 2, 2) assert calls[0].arguments[0] is tau @@ -3411,7 +3424,7 @@ def test_issue_2448_v3(self, mode, setup): rec_term3 = rec2.interpolate(expr=v2.forward) op3 = Operator([u_v, u_v2, u_tau, u_tau2, rec_term0, rec_term3]) - op3.cfunction + _ = op3.cfunction calls = [i for i in FindNodes(Call).visit(op3) if isinstance(i, HaloUpdateCall)] @@ -3493,7 +3506,7 @@ def get_time_loop(op): for i in iters: if i.dim.is_Time: return i - assert False + raise AssertionError('Assert False') if __name__ == "__main__": diff --git a/tests/test_operator.py b/tests/test_operator.py index b05aa642b8..3e417d74b8 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -69,7 +69,7 @@ def test_platform_compiler_language(self): # Unrecognised platform name -> exception try: Operator(Eq(u, u + 1), platform='asga') - assert False + raise AssertionError('Assert False') except InvalidOperator: assert True @@ -93,7 +93,7 @@ def test_platform_compiler_language(self): # ... but it will raise an exception if an unknown one try: Operator(Eq(u, u + 1), platform='nvidiaX', compiler='asf') - assert False + raise AssertionError('Assert False') except InvalidOperator: assert True @@ -107,7 +107,7 @@ def test_platform_compiler_language(self): # Unsupported combination of `platform` and `language` should throw an error try: Operator(Eq(u, u + 1), platform='bdw', language='openacc') - assert False + raise AssertionError('Assert False') except InvalidOperator: assert True @@ -123,14 +123,14 @@ def test_opt_options(self): # Unknown pass try: Operator(Eq(u, u + 1), opt=('aaa')) - assert False + raise AssertionError('Assert False') except InvalidOperator: assert True # Unknown optimization option try: Operator(Eq(u, u + 1), opt=('advanced', {'aaa': 1})) - assert False + raise AssertionError('Assert False') except InvalidOperator: assert True @@ -302,7 +302,7 @@ def test_timedlist_wraps_time_if_parallel(self): ompreg = timedlist.body[0] assert ompreg.body[0].dim is grid.time_dim else: - timedlist.body[0].dim is grid.time_dim + timedlist.body[0].dim is grid.time_dim # noqa: B015 def test_nested_lowering(self): """ @@ -761,8 +761,7 @@ def verify_arguments(self, arguments, expected): condition = arguments[name] == v if not condition: - error('Wrong argument %s: expected %s, got %s' % - (name, v, arguments[name])) + error(f'Wrong argument {name}: expected {v}, got {arguments[name]}') assert condition def verify_parameters(self, parameters, expected): @@ -774,11 +773,11 @@ def verify_parameters(self, parameters, expected): parameters = [p.name for p in parameters] for expi in expected: if expi not in parameters + boilerplate: - error("Missing parameter: %s" % expi) + error(f"Missing parameter: {expi}") assert expi in parameters + boilerplate extra = [p for p in parameters if p not in expected and p not in boilerplate] if len(extra) > 0: - error("Redundant parameters: %s" % str(extra)) + error(f"Redundant parameters: {str(extra)}") assert len(extra) == 0 def test_default_functions(self): @@ -1173,7 +1172,7 @@ def test_argument_unknown(self): op = Operator(Eq(a, a + a)) try: op.apply(b=3) - assert False + raise AssertionError('Assert False') except ValueError: # `b` means nothing to `op`, so we end up here assert True @@ -1182,9 +1181,9 @@ def test_argument_unknown(self): configuration['ignore-unknowns'] = True op.apply(b=3) assert True - except ValueError: + except ValueError as e: # we should not end up here as we're now ignoring unknown arguments - assert False + raise AssertionError('Assert False') from e finally: configuration['ignore-unknowns'] = configuration._defaults['ignore-unknowns'] @@ -1226,11 +1225,11 @@ def test_illegal_override(self): try: op.apply(a=a1, b=b0) - assert False + raise AssertionError('Assert False') except ValueError as e: assert 'Override' in e.args[0] # Check it's hitting the right error msg - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e def test_incomplete_override(self): """ @@ -1249,11 +1248,11 @@ def test_incomplete_override(self): try: op.apply(a=a1) - assert False + raise AssertionError('Assert False') except ValueError as e: assert 'Default' in e.args[0] # Check it's hitting the right error msg - except: - assert False + except Exception as e: + raise AssertionError('Assert False') from e @pytest.mark.parallel(mode=1) def test_new_distributor(self, mode): @@ -1633,7 +1632,10 @@ def test_consistency_anti_dependences(self, exprs, directions, expected, visit): assert "".join(mapper.get(i.dim.name, i.dim.name) for i in iters) == visit # mapper just makes it quicker to write out the test parametrization mapper = {'+': Forward, '-': Backward, '*': Any} - assert all(i.direction == mapper[j] for i, j in zip(iters, directions)) + assert all( + i.direction == mapper[j] + for i, j in zip(iters, directions, strict=True) + ) def test_expressions_imperfect_loops(self): """ @@ -1750,7 +1752,10 @@ def test_equations_mixed_functions(self, shape): Eq(b, time*b*a + b)] eqns2 = [Eq(a.forward, a.laplace + 1.), Eq(b2, time*b2*a + b2)] - subs = {d.spacing: v for d, v in zip(dims0, [2.5, 1.5, 2.0][:grid.dim])} + subs = { + d.spacing: v + for d, v in zip(dims0, [2.5, 1.5, 2.0][:grid.dim], strict=True) + } op = Operator(eqns, subs=subs, opt='noop') trees = retrieve_iteration_tree(op) diff --git a/tests/test_pickle.py b/tests/test_pickle.py index 018720a330..407e0433ff 100644 --- a/tests/test_pickle.py +++ b/tests/test_pickle.py @@ -245,10 +245,7 @@ def test_sparse_op(self, pickle, interp, op): interpolation=interp) u = Function(name='u', grid=grid, space_order=4) - if op == 'inject': - expr = sf.inject(u, sf) - else: - expr = sf.interpolate(u) + expr = sf.inject(u, sf) if op == 'inject' else sf.interpolate(u) pkl_expr = pickle.dumps(expr) new_expr = pickle.loads(pkl_expr) @@ -391,8 +388,8 @@ def test_lock(self, pickle): pkl_lock = pickle.dumps(lock) new_lock = pickle.loads(pkl_lock) - lock.name == new_lock.name - new_lock.dimensions[0].symbolic_size == ld.symbolic_size + assert lock.name == new_lock.name + assert new_lock.dimensions[0].symbolic_size == ld.symbolic_size def test_p_thread_array(self, pickle): a = PThreadArray(name='threads', npthreads=4) @@ -686,7 +683,7 @@ def test_foreign(self): coordinates=[(0.,), (1.,), (2.,)]) # Plain `pickle` doesn't support pickling of dynamic classes - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 pickle0.dumps(msf) # But `cloudpickle` does @@ -776,7 +773,7 @@ def test_operator_function(self, pickle): def test_operator_function_w_preallocation(self, pickle): grid = Grid(shape=(3, 3, 3)) f = Function(name='f', grid=grid) - f.data + _ = f.data op = Operator(Eq(f, f + 1)) op.apply() @@ -807,7 +804,7 @@ def test_operator_timefunction(self, pickle): def test_operator_timefunction_w_preallocation(self, pickle): grid = Grid(shape=(3, 3, 3)) f = TimeFunction(name='f', grid=grid, save=3) - f.data + _ = f.data op = Operator(Eq(f.forward, f + 1)) op.apply(time=0) @@ -852,8 +849,8 @@ def test_elemental(self, pickle): pkl_op = pickle.dumps(op) new_op = pickle.loads(pkl_op) - op.cfunction - new_op.cfunction + _ = op.cfunction + _ = new_op.cfunction assert str(op) == str(new_op) @@ -953,7 +950,11 @@ def test_mpi_fullmode_objects(self, pickle, mode): assert obj.key == new_obj.key assert obj.name == new_obj.name assert len(new_obj.arguments) == 2 - assert all(d0.name == d1.name for d0, d1 in zip(obj.arguments, new_obj.arguments)) + assert all( + d0.name == d1.name for d0, d1 in zip( + obj.arguments, new_obj.arguments, strict=True + ) + ) assert all(new_obj.arguments[i] is new_obj.owned[i][0][0][0] # `x` and `y` for i in range(2)) assert new_obj.owned[0][0][0][1] is new_obj.owned[1][0][0][1] # `OWNED` @@ -1017,7 +1018,7 @@ def test_full_model(self, pickle): pkl_origin = pickle.dumps(model.grid.origin_symbols) new_origin = pickle.loads(pkl_origin) - for a, b in zip(model.grid.origin_symbols, new_origin): + for a, b in zip(model.grid.origin_symbols, new_origin, strict=True): assert a.compare(b) == 0 # Test Class TimeDimension pickling @@ -1040,7 +1041,7 @@ def test_full_model(self, pickle): assert model.grid.extent == new_grid.extent assert model.grid.shape == new_grid.shape - for a, b in zip(model.grid.dimensions, new_grid.dimensions): + for a, b in zip(model.grid.dimensions, new_grid.dimensions, strict=True): assert a.compare(b) == 0 ricker = RickerSource(name='src', grid=model.grid, f0=f0, time_range=time_range) @@ -1076,7 +1077,9 @@ def test_usave_sampled(self, pickle, subs): op_fwd = Operator(eqn, subs=subs) tmp_pickle_op_fn = "tmp_operator.pickle" - pickle.dump(op_fwd, open(tmp_pickle_op_fn, "wb")) - op_new = pickle.load(open(tmp_pickle_op_fn, "rb")) + with open(tmp_pickle_op_fn, "wb") as fh: + pickle.dump(op_fwd, fh) + with open(tmp_pickle_op_fn, "rb") as fh: + op_new = pickle.load(fh) assert str(op_fwd) == str(op_new) diff --git a/tests/test_sparse.py b/tests/test_sparse.py index b8fab00c2d..578c4b16a8 100644 --- a/tests/test_sparse.py +++ b/tests/test_sparse.py @@ -193,7 +193,7 @@ def _pure_python_coeffs(self, mstf): m_coo = mstf.matrix.tocoo() - for row, col, val in zip(m_coo.row, m_coo.col, m_coo.data): + for row, col, val in zip(m_coo.row, m_coo.col, m_coo.data, strict=True): base_gridpoint = mstf.gridpoints.data[row, :] # construct the stencil and the slices to which it will be applied @@ -387,12 +387,12 @@ def test_mpi(self, mode): op = Operator(sf.interpolate(m)) sf.manual_scatter() args = op.arguments(time_m=0, time_M=9) - print("rank %d: %s" % (grid.distributor.myrank, str(args))) + print(f'rank {grid.distributor.myrank}: {args!s}') op.apply(time_m=0, time_M=0) sf.manual_gather() for i in range(grid.distributor.nprocs): - print("==== from rank %d" % i) + print(f'==== from rank {i}') if i == grid.distributor.myrank: print(repr(sf.data)) grid.distributor.comm.Barrier() diff --git a/tests/test_staggered_utils.py b/tests/test_staggered_utils.py index b522b70d3f..daec7f2108 100644 --- a/tests/test_staggered_utils.py +++ b/tests/test_staggered_utils.py @@ -50,7 +50,9 @@ def test_avg(ndim): shifted = f for dd in d: shifted = shifted.subs({dd: dd - dd.spacing/2}) - assert all(i == dd for i, dd in zip(shifted.indices, grid.dimensions)) + assert all( + i == dd for i, dd in zip(shifted.indices, grid.dimensions, strict=True) + ) # Average automatically i.e.: # f not defined at x so f(x, y) = 0.5*f(x - h_x/2, y) + 0.5*f(x + h_x/2, y) avg = f @@ -180,7 +182,7 @@ def test_staggered_rebuild(stagg): # Check that rebuild correctly set the staggered indices # with the new dimensions - for (d, nd) in zip(grid.dimensions, new_dims): + for (d, nd) in zip(grid.dimensions, new_dims, strict=True): if d in as_tuple(stagg) or stagg is CELL: assert f2.indices[nd] == nd + nd.spacing / 2 else: diff --git a/tests/test_subdomains.py b/tests/test_subdomains.py index 72c9a320a7..2fafd4e02b 100644 --- a/tests/test_subdomains.py +++ b/tests/test_subdomains.py @@ -536,7 +536,7 @@ class DummySubdomains(SubDomainSet): op = Operator(eqns) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['x,y', 't,n0', 't,n0,x,y'], 'x,y,t,n0,x,y') @@ -572,7 +572,7 @@ class DummySubdomains2(SubDomainSet): op = Operator(eqns) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['x,y', 't,n0', 't,n0,x,y', 't,n1', 't,n1,x,y'], @@ -607,7 +607,7 @@ class DummySubdomains2(SubDomainSet): op = Operator(eqns) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['x,y', 't,n0', 't,n0,x,y', 't,n1', 't,n1,x,y', 't,n0', 't,n0,x,y'], @@ -633,7 +633,7 @@ class Dummy(SubDomainSet): op = Operator(eqn) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['t,n0', 't,n0,x,y', 't,n0,x,y'], 't,n0,x,y,x,y') @@ -660,7 +660,7 @@ class Dummy(SubDomainSet): op = Operator(eqns) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['t', 't,n0', 't,n0,x,y', 't,n0', 't,n0,x,y'], 't,n0,x,y,n0,x,y') @@ -680,7 +680,7 @@ class Dummy(SubDomainSet): op = Operator(eqn) # Make sure it jit-compiles - op.cfunction + _ = op.cfunction assert_structure(op, ['t,n0', 't,n0,x0_blk0,y0_blk0,x,y,z'], 't,n0,x0_blk0,y0_blk0,x,y,z') @@ -1527,7 +1527,7 @@ def test_function_data_shape_mpi(self, x, y, mode): assert np.count_nonzero(g.data) == f.data.size shape = [] - for i, s in zip(f._distributor.subdomain_interval, slices): + for i, s in zip(f._distributor.subdomain_interval, slices, strict=True): if i is None: shape.append(s.stop - s.start) else: diff --git a/tests/test_symbolic_coefficients.py b/tests/test_symbolic_coefficients.py index cc540691a8..7f4e2e2f29 100644 --- a/tests/test_symbolic_coefficients.py +++ b/tests/test_symbolic_coefficients.py @@ -241,10 +241,7 @@ def test_with_timefunction(self, stagger): """Check compatibility of custom coefficients and TimeFunctions""" grid = Grid(shape=(11,), extent=(10.,)) x = grid.dimensions[0] - if stagger: - staggered = x - else: - staggered = None + staggered = x if stagger else None f = TimeFunction(name='f', grid=grid, space_order=2, staggered=staggered) g = TimeFunction(name='g', grid=grid, space_order=2, staggered=staggered) diff --git a/tests/test_symbolics.py b/tests/test_symbolics.py index 5945e7ce9a..85d2818940 100644 --- a/tests/test_symbolics.py +++ b/tests/test_symbolics.py @@ -216,7 +216,7 @@ def test_bundle(): fg = Bundle(name='fg', components=(f, g)) # Test reconstruction - fg._rebuild().components == fg.components + assert fg._rebuild().components == fg.components def test_call_from_pointer(): diff --git a/tests/test_tensors.py b/tests/test_tensors.py index dee3c40696..512c48cf55 100644 --- a/tests/test_tensors.py +++ b/tests/test_tensors.py @@ -179,7 +179,7 @@ def test_transpose_vs_T(func1): # inner=True is the same as T assert f3 == f2 # inner=False doesn't transpose inner derivatives - for f4i, f2i in zip(f4, f2): + for f4i, f2i in zip(f4, f2, strict=True): assert f4i == f2i.T @@ -188,7 +188,7 @@ def test_transpose_vs_T(func1): def test_tensor_fd(func1): grid = Grid(tuple([5]*3)) f1 = func1(name="f1", grid=grid) - assert np.all([f.dx == f2 for f, f2 in zip(f1, f1.dx)]) + assert np.all([f.dx == f2 for f, f2 in zip(f1, f1.dx, strict=True)]) @pytest.mark.parametrize('func1, symm, diag, expected', @@ -229,8 +229,8 @@ def test_sympy_matrix(func1): sympy_f1 = f1.as_mutable() vec = sympy.Matrix(3, 1, np.random.rand(3)) mat = sympy.Matrix(3, 3, np.random.rand(3, 3).ravel()) - assert all(sp - dp == 0 for sp, dp in zip(mat * f1, mat * sympy_f1)) - assert all(sp - dp == 0 for sp, dp in zip(f1 * vec, sympy_f1 * vec)) + assert all(sp - dp == 0 for sp, dp in zip(mat * f1, mat * sympy_f1, strict=True)) + assert all(sp - dp == 0 for sp, dp in zip(f1 * vec, sympy_f1 * vec, strict=True)) @pytest.mark.parametrize('func1', [VectorFunction, VectorTimeFunction]) @@ -241,7 +241,7 @@ def test_sympy_vector(func1): sympy_f1 = f1.as_mutable() mat = sympy.Matrix(3, 3, np.random.rand(3, 3).ravel()) - assert all(sp - dp == 0 for sp, dp in zip(mat * f1, mat * sympy_f1)) + assert all(sp - dp == 0 for sp, dp in zip(mat * f1, mat * sympy_f1, strict=True)) @pytest.mark.parametrize('func1', [TensorFunction, TensorTimeFunction]) @@ -300,7 +300,7 @@ def test_shifted_grad_of_vector(shift, ndim): for j, d in enumerate(grid.dimensions): x0 = (None if shift is None else d + shift[i][j] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - ge = getattr(f[i], 'd%s' % d.name)(x0=x0, fd_order=order) + ge = getattr(f[i], f'd{d.name}')(x0=x0, fd_order=order) ref.append(ge.evaluate) for i, d in enumerate(gf): @@ -317,7 +317,7 @@ def test_shifted_div_of_vector(shift, ndim): for i, d in enumerate(grid.dimensions): x0 = (None if shift is None else d + shift[i] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - ref += getattr(v[i], 'd%s' % d.name)(x0=x0, fd_order=order) + ref += getattr(v[i], f'd{d.name}')(x0=x0, fd_order=order) assert df == ref.evaluate @@ -331,12 +331,12 @@ def test_shifted_div_of_tensor(shift, ndim): df = div(f, shift=shift, order=order).evaluate ref = [] - for i, a in enumerate(grid.dimensions): + for i, _ in enumerate(grid.dimensions): elems = [] for j, d in reversed(list(enumerate(grid.dimensions))): x0 = (None if shift is None else d + shift[i][j] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - ge = getattr(f[i, j], 'd%s' % d.name)(x0=x0, fd_order=order) + ge = getattr(f[i, j], f'd{d.name}')(x0=x0, fd_order=order) elems.append(ge.evaluate) ref.append(sum(elems)) @@ -369,7 +369,7 @@ def test_shifted_lap_of_vector(shift, ndim): assert v.laplacian() == v.laplace for order in [None, 2]: df = v.laplacian(shift=shift, order=order) - for (vi, dfvi) in zip(v, df): + for (vi, dfvi) in zip(v, df, strict=True): ref = vi.laplacian(shift=shift, order=order) assert dfvi == ref @@ -388,7 +388,7 @@ def test_shifted_lap_of_tensor(shift, ndim): for i, d in enumerate(v.space_dimensions): x0 = (None if shift is None else d + shift[i][j] * d.spacing if type(shift) is tuple else d + shift * d.spacing) - ref += getattr(v[j, i], 'd%s2' % d.name)(x0=x0, fd_order=order) + ref += getattr(v[j, i], f'd{d.name}2')(x0=x0, fd_order=order) assert df[j] == ref @@ -398,10 +398,10 @@ def test_basic_arithmetic(): # Scalar operations t1 = tau + 1 - assert all(t1i == ti + 1 for (t1i, ti) in zip(t1, tau)) + assert all(t1i == ti + 1 for (t1i, ti) in zip(t1, tau, strict=True)) t1 = tau * 2 - assert all(t1i == ti * 2 for (t1i, ti) in zip(t1, tau)) + assert all(t1i == ti * 2 for (t1i, ti) in zip(t1, tau, strict=True)) def test_custom_coeffs_vector(): @@ -430,7 +430,7 @@ def test_custom_coeffs_tensor(): c = [10, 10, 10] dtau = div(tau, weights=c) - for i, d in enumerate(grid.dimensions): + for i, _ in enumerate(grid.dimensions): assert dtau[i] == tau[i, 0].dx(w=c) + tau[i, 1].dy(w=c) + tau[i, 2].dz(w=c) assert list(dtau[i].args[0].weights) == c @@ -451,7 +451,7 @@ def test_custom_coeffs_tensor_basic(func): c = [10, 20, 30] df = f.dx(w=c) - for (fi, dfi) in zip(f.values(), df.values()): + for (fi, dfi) in zip(f.values(), df.values(), strict=True): assert dfi == fi.dx(w=c) assert list(dfi.weights) == c @@ -465,7 +465,7 @@ def test_rebuild(func1): assert f1.grid == f2.grid assert f2.name == 'f2' - for (i, j) in zip(f1.flat(), f2.flat()): + for (i, j) in zip(f1.flat(), f2.flat(), strict=True): assert j.name == i.name.replace('f1', 'f2') assert j.grid == i.grid assert j.dimensions == i.dimensions @@ -477,7 +477,7 @@ def test_rebuild(func1): assert f3.grid == grid assert f3.name == f1.name - for (i, j) in zip(f1.flat(), f3.flat()): + for (i, j) in zip(f1.flat(), f3.flat(), strict=True): assert j.name == i.name assert j.grid == i.grid assert j.dimensions == tuple(new_dims) diff --git a/tests/test_threading.py b/tests/test_threading.py index 1c753a16a9..3bc0cce3f0 100644 --- a/tests/test_threading.py +++ b/tests/test_threading.py @@ -16,7 +16,7 @@ def test_concurrent_executing_operators(): op = Operator(Eq(u.forward, u + 1)) # this forces the compile - op.cfunction + _ = op.cfunction def do_run(op): # choose a new size @@ -42,7 +42,7 @@ def do_run(op): info("Running operator in threadpool") futures = [] - for i in range(1000): + for _ in range(1000): futures.append(tpe.submit(do_run, op)) # Get results - exceptions will be raised here if there are any diff --git a/tests/test_tti.py b/tests/test_tti.py index 698df07f79..ecf4805111 100644 --- a/tests/test_tti.py +++ b/tests/test_tti.py @@ -73,5 +73,5 @@ def test_tti(shape, so, rot): res = linalg.norm((normal_u - normal_utti - normal_vtti).reshape(-1))**2 res /= np.linalg.norm(normal_u.reshape(-1))**2 - log("Difference between acoustic and TTI with all coefficients to 0 %2.4e" % res) + log(f"Difference between acoustic and TTI with all coefficients to 0 {res:2.4e}") assert np.isclose(res, 0.0, atol=1e-4) diff --git a/tests/test_unexpansion.py b/tests/test_unexpansion.py index d5ef86c7c4..026effcf79 100644 --- a/tests/test_unexpansion.py +++ b/tests/test_unexpansion.py @@ -41,16 +41,16 @@ def test_numeric_coeffs(self): w = np.zeros(3) # Pure derivative - Operator(Eq(u, u.dx2(weights=w)), opt=opt).cfunction + _ = Operator(Eq(u, u.dx2(weights=w)), opt=opt).cfunction # Mixed derivative - Operator(Eq(u, u.dx.dx), opt=opt).cfunction + _ = Operator(Eq(u, u.dx.dx), opt=opt).cfunction # Non-perfect mixed derivative - Operator(Eq(u, (u.dx(weights=w) + v.dx).dx), opt=opt).cfunction + _ = Operator(Eq(u, (u.dx(weights=w) + v.dx).dx), opt=opt).cfunction # Compound expression - Operator(Eq(u, (v*u.dx).dy(weights=w)), opt=opt).cfunction + _ = Operator(Eq(u, (v*u.dx).dy(weights=w)), opt=opt).cfunction @pytest.mark.parametrize('coeffs,expected', [ ((7, 7, 7), 3), # We've had a bug triggered by identical coeffs @@ -71,7 +71,7 @@ def test_multiple_cross_derivs(self, coeffs, expected): p.dx(weights=coeffs0).dy(weights=coeffs1)) op = Operator(eq, opt=('advanced', {'expand': False})) - op.cfunction + _ = op.cfunction # w0, w1, ... functions = FindSymbols().visit(op) @@ -244,7 +244,7 @@ def test_v3(self): 'cire-mingain': 200})) # Check generated code -- redundant IndexDerivatives have been caught! - op1._profiler._sections['section0'].sops == 65 + assert op1._profiler._sections['section0'].sops == 65 op0.apply(time_M=5) op1.apply(time_M=5, u=u1, v=v1) @@ -269,7 +269,7 @@ def test_v4(self): 't,x0_blk0,y0_blk0,x,y,z,i1,i0'], 'x,y,z,t,x0_blk0,y0_blk0,x,y,z,i1,i0') - op.cfunction + _ = op.cfunction def test_v5(self): grid = Grid(shape=(16, 16)) @@ -290,7 +290,7 @@ def test_v5(self): assert op._profiler._sections['section0'].sops == 127 assert_structure(op, ['t,x,y', 't,x,y,i1', 't,x,y,i1,i0'], 't,x,y,i1,i0') - op.cfunction + _ = op.cfunction def test_v6(self): grid = Grid(shape=(16, 16)) @@ -315,7 +315,7 @@ def test_v6(self): assert op._profiler._sections['section0'].sops == 133 assert_structure(op, ['t,x,y', 't,x,y,i1', 't,x,y,i1,i0'], 't,x,y,i1,i0') - op.cfunction + _ = op.cfunction def test_transpose(self): shape = (11, 11, 11) @@ -364,7 +364,7 @@ def test_redundant_derivatives(self): temps = [i for i in FindSymbols().visit(exprs) if isinstance(i, Symbol)] assert len(temps) == 2 + nlin - op.cfunction + _ = op.cfunction def test_buffering_timestencil(self): grid = Grid((11, 11)) @@ -441,7 +441,7 @@ def test_v1(self): 't,x0_blk0,y0_blk0,x,y,z,i1'], 'x,y,z,t,x0_blk0,y0_blk0,x,y,z,i0,x,y,z,i1') - op.cfunction + _ = op.cfunction def test_diff_first_deriv(self): grid = Grid(shape=(16, 16, 16)) diff --git a/tests/test_visitors.py b/tests/test_visitors.py index 28b61761e1..06eb933351 100644 --- a/tests/test_visitors.py +++ b/tests/test_visitors.py @@ -391,7 +391,7 @@ def test_map_nodes(block1): assert len(map_nodes.keys()) == 1 - for iters, (expr,) in map_nodes.items(): + for iters in map_nodes: # Replace the outermost `Iteration` with a `Call` callback = Callable('solver', iters[0], 'void', ()) processed = Transformer({iters[0]: Call(callback.name)}).visit(block1) diff --git a/tests/test_warnings.py b/tests/test_warnings.py index b3f9e22741..701554bd6e 100644 --- a/tests/test_warnings.py +++ b/tests/test_warnings.py @@ -62,11 +62,15 @@ class TestWarning: """ def test_raise(self): with pytest.warns(UserWarning): - warnings.warn('Let this be a warning to you') + warnings.warn('Let this be a warning to you', stacklevel=1) def test_raise_devito(self): with pytest.warns(DevitoWarning): - warnings.warn('Let this be another warning to you', DevitoWarning) + warnings.warn( + 'Let this be another warning to you', + DevitoWarning, + stacklevel=1 + ) def test_raise_devito_kw(self): with pytest.warns(DevitoWarning): @@ -74,4 +78,4 @@ def test_raise_devito_kw(self): def test_raise_from_custom(self, custom_warning): with pytest.warns(NewWarning): - warnings.warn(custom_warning) + warnings.warn(custom_warning, stacklevel=1)