diff --git a/.cargo/config.toml b/.cargo/config.toml index 39ee4fe..e29e29b 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -2,3 +2,5 @@ # Fast local test cycle without JIT/Python default features. test-fast = "test --no-default-features --lib" test-backpressure = "test --no-default-features --lib backpressure -- --nocapture" +# Fast local test cycle for vortex experimental feature. +test-vortex = "test --no-default-features --features vortex --lib -- --nocapture" diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 51e164e..6e8b7d1 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -19,6 +19,7 @@ Key design decisions & why Developer workflows (essential commands) - Build & test: `cargo test` (CI runs the same). Run entire workspace with `cargo test --manifest-path ./Cargo.toml`. +- Fast local test alias (defined in `.cargo/config.toml`): `cargo test-fast`, and for Vortex branch: `cargo test-vortex`. - Run example: `cargo run --example basic`. - Format: `cargo fmt` (project has `rustfmt.toml`). - CI: `.github/workflows/ci.yml` runs `cargo test` on push/PR. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0742c47..c91ea62 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,9 +27,9 @@ jobs: uses: Swatinem/rust-cache@v2 - name: Build - run: cargo build --verbose --features pyo3 + run: cargo build --verbose --features pyo3,jit,vortex - name: Run Rust Tests env: RUST_BACKTRACE: 1 - run: cargo test --workspace --verbose --features pyo3 + run: cargo test --workspace --verbose --features pyo3,jit,vortex diff --git a/Cargo.toml b/Cargo.toml index 9677804..35e2f88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,24 +13,27 @@ crate-type = ["cdylib", "rlib"] [dependencies] num_cpus = "*" -cranelift = { version = "0.87", optional = true } -cranelift-module = { version = "0.87", optional = true } -cranelift-jit = { version = "0.87", optional = true } -cranelift-native = { version = "0.87", optional = true } tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync", "time", "net", "io-util"] } dashmap = "5" parking_lot = "0.12" bytes = "1" tracing = "0.1" -tracing-subscriber = { version = "0.3", optional = true } +tracing-subscriber = { version = "0.3", features = ["env-filter", "time"] } once_cell = "1" crossbeam-channel = "0.5" -# Python (Optional) -pyo3 = { version = "0.19.2", optional = true } -pyo3-asyncio = { version = "0.19", features = ["tokio-runtime"], optional = true } +# Python (Default) +pyo3 = { version = "0.20", features = ["auto-initialize"], optional = true} +pyo3-asyncio = { version = "0.20", features = ["tokio-runtime"], optional = true } + +# JIT (Optional and experimental) +cranelift = { version = "0.87", optional = true } +cranelift-module = { version = "0.87", optional = true } +cranelift-jit = { version = "0.87", optional = true } +cranelift-native = { version = "0.87", optional = true } +wide = { version = "1.2", optional = true } -# Node.js (Optional) +# Node.js (Optional and currently broken) napi = { version = "2.14", default-features = false, features = ["napi4", "tokio_rt", "async"] , optional = true } napi-derive = { version = "2.14", optional = true } @@ -38,15 +41,16 @@ napi-derive = { version = "2.14", optional = true } napi-build = { version = "2.0", optional = true } [features] -default = ["pyo3", "jit"] +default = ["pyo3"] pyo3 = ["dep:pyo3", "dep:pyo3-asyncio"] -jit = ["dep:cranelift", "dep:cranelift-module", "dep:cranelift-jit", "dep:cranelift-native"] +jit = ["dep:cranelift", "dep:cranelift-module", "dep:cranelift-jit", "dep:cranelift-native", "dep:wide"] node = ["dep:napi", "dep:napi-derive", "dep:napi-build"] +vortex = ["pyo3"] [dev-dependencies] futures = "0.3" tracing-subscriber = "0.3" -pyo3 = { version = "0.19.2", features = ["auto-initialize"] } +pyo3 = { version = "0.20", features = ["auto-initialize"] } [profile.release] opt-level = 3 @@ -56,5 +60,13 @@ panic = "abort" strip = true [profile.dev] +opt-level = 0 +debug = 0 strip = true panic = "abort" +incremental = true +lto = false +codegen-units = 256 + +[profile.test] +panic = "unwind" \ No newline at end of file diff --git a/README.md b/README.md index 5e0e2db..6e69b05 100644 --- a/README.md +++ b/README.md @@ -6,9 +6,9 @@ ![Language](https://img.shields.io/badge/language-Rust%20%7C%20Python%20%7C%20Node.js-orange.svg?style=for-the-badge&logo=rust) ![License](https://img.shields.io/badge/license-AGPL_3.0-green.svg?style=for-the-badge) -**Hybrid distributed runtime fabric for actors, native compute offload, and cross-language services.** +**Hybrid distributed runtime fabric for actors, cross-language services, and experimental native compute offload.** -[Architecture](docs/architecture.md) • [Usage Guide](docs/usage.md) • [JIT & Offload](docs/jit.md) • [Distributed Mesh](docs/distributed.md) +[Architecture](docs/architecture.md) • [Usage Guide](docs/usage.md) • [JIT & Offload](docs/jit.md) • [Distributed Mesh](docs/distributed.md) • [Vortex-Transmuter](docs/vortex.md) @@ -18,7 +18,7 @@ **Iris** is a hybrid distributed runtime built in Rust with first-class **Python** and **Node.js** bindings. It combines three execution styles: - **Actor Mesh:** Stateful, message-driven workflows with high concurrency. -- **Native Offload/JIT:** CPU-heavy hot paths accelerated via Cranelift. +- **Native Offload/JIT:** CPU-heavy hot paths accelerated via Cranelift. This path is experimental, currently paused, and may be dropped in future releases. - **Cross-Language API:** Service-oriented apps mixing Rust, Python, and Node.js. Iris uses a **cooperative reduction-based scheduler** for fairness, providing built-in supervision, hot swapping, discovery, and location-transparent messaging across nodes. @@ -34,9 +34,13 @@ Iris uses a **cooperative reduction-based scheduler** for fairness, providing bu - **Atomic Hot-Swap:** Update live application logic (Python/Node) without zero downtime. - **Global Discovery:** Register and resolve named services locally or over the network. - **Self-Healing:** Path-scoped supervisors and structured `EXIT` reasons for fault tolerance. +- **Vortex-Transmuter (Experimental):** Instruction-bound preemption, transactional ghosting primitives, and guarded bytecode transmutation with explicit fallback telemetry (see [Vortex-Transmuter Guide](docs/vortex.md)). - **JIT Acceleration:** Transparently compile Python math functions to native machine code. - **Quantum Speculation:** Optional multi-variant JIT selection with runtime telemetry, bounded by compile budget and cooldown controls (see [JIT Internals & Configuration](docs/jit.md)). + > [!IMPORTANT] + > JIT acceleration development is currently paused and may be dropped from the project, while the runtime focuses on actor and cross-language capabilities. + --- ## Quick Start @@ -85,13 +89,14 @@ print(fast_math(10.0)) - [Usage Examples & API Guide](docs/usage.md) - [JIT Internals & Configuration](docs/jit.md) - [Distributed Mesh & Discovery](docs/distributed.md) +- [Vortex-Transmuter Guide & Roadmap](docs/vortex.md) --- ## Disclaimer > [!IMPORTANT] -> **Production Status:** Iris is currently in **Beta**. The JIT/offload APIs are experimental. +> **Production Status:** Iris is currently in **Beta**. > > **Performance (v0.3.0):** > - **Push Actors:** 100k+ concurrent actors, ~1.2M+ msgs/sec. diff --git a/docs/plans/FEAT.md b/docs/plans/FEAT.md index e38844e..b8c4576 100644 --- a/docs/plans/FEAT.md +++ b/docs/plans/FEAT.md @@ -86,6 +86,7 @@ --- ## ⚫ Safety & Correctness (Functional Runtimes) +> Delayed for v0.7+ * [ ] **Pure Actor Mode (Optional)** Forbid shared mutable state across actors. diff --git a/docs/usage.md b/docs/usage.md index 7e6f776..e8d197f 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -214,6 +214,32 @@ rt.set_release_gil_strict(True) --- +## Vortex controls (Python Runtime, experimental) + +When Iris is built with the `vortex` feature, `PyRuntime` exposes automatic ghost-arbitration controls and telemetry. + +- `vortex_set_auto_ghost_policy(policy: str) -> bool` + - Accepted values: `FirstSafePointWins`, `PreferPrimary`. +- `vortex_get_auto_ghost_policy() -> Optional[str]` +- `vortex_get_auto_resolution_counts() -> Tuple[int, int]` + - Returns `(primary_wins, ghost_wins)`. +- `vortex_get_auto_replay_count() -> int` +- `vortex_reset_auto_telemetry() -> None` + +Example: + +```python +rt = Runtime() +rt.vortex_reset_auto_telemetry() +rt.vortex_set_auto_ghost_policy("PreferPrimary") + +primary_wins, ghost_wins = rt.vortex_get_auto_resolution_counts() +replayed = rt.vortex_get_auto_replay_count() +print(primary_wins, ghost_wins, replayed) +``` + +--- + ## Lifecycle helpers - `stop(pid: int)` — stop an actor and close mailbox. diff --git a/docs/vortex.md b/docs/vortex.md new file mode 100644 index 0000000..e7b7a18 --- /dev/null +++ b/docs/vortex.md @@ -0,0 +1,244 @@ +# Vortex-Transmuter Guide + +This document tracks the Vortex-Transmuter execution model in Iris and maps implementation status against RFC #0003. + +It is intended to answer three questions clearly: +- What Vortex does today. +- What guardrails exist for safety and forward compatibility. +- What remains to reach full RFC behavior. + +--- + +## 1. Scope and Runtime Model + +Vortex is an experimental execution subsystem for deterministic preemption and transactional speculative recovery. + +Current architecture uses three layers: +- Rust runtime integration (`Runtime` + `VortexEngine`) for preemption, staging, and transaction orchestration. +- Python transmutation path (`iris.transmute_function`) that attempts bytecode instrumentation on a shadow function and falls back safely when compatibility checks fail. +- Guard and verifier layer that validates bytecode shape and cache layout before applying patches. + +Primary modules: +- `src/vortex/engine.rs` +- `src/vortex/transaction.rs` +- `src/vortex/transmuter.rs` +- `src/py/vortex.rs` +- `src/vortex/vortex_bytecode.rs` + +--- + +## 2. Implemented Capabilities + +### 2.1 Deterministic preemption checks + +Implemented: +- Reduction-based preemption ticks in the runtime actor handling loop. +- Suspend-path handling that detaches and replenishes budget. +- Automatic suspend hook that triggers ghost checkpoint/race/replay flow in runtime preemption branches. + +Key points: +- Preemption is exercised in the actual actor execution loop (`spawn_handler_with_budget`). +- Automatic hook increments replay telemetry (`vortex_auto_replay_count`) when staged ghost V-IO is committed and replayed. + +### 2.2 Python transmutation and safety fallback + +Implemented: +- Shadow-function transmutation with guard telemetry. +- Capability-based compatibility checks (not simple Python-version switches). +- Structured fallback reasons for unsupported or unsafe layouts. +- Stage-specific rewrite failure telemetry (probe extraction, instrumentation, code replace, and shadow construction). + +Typical telemetry reasons: +- `opcode_metadata_unavailable` +- `quickening_metadata_unavailable` +- `invalid_wordcode_shape` +- `inline_cache_entries_incomplete` +- `original_cache_layout_invalid` +- `patched_cache_layout_invalid` +- `patched_code_too_large` +- `stack_depth_invariant_failed` +- `exception_table_metadata_unavailable` +- `exception_table_invalid` +- `patched_stack_metadata_unavailable` +- `patched_exception_table_metadata_unavailable` +- `patched_exception_table_invalid` +- `probe_extraction_failed` +- `probe_instrumentation_failed` +- `code_replace_failed` +- `types_module_unavailable` +- `shadow_function_construction_failed` + +Note: +- Test-only deterministic hooks exist for selected late-stage fallback branches to keep CI behavior stable across CPython runtime variance. + +### 2.3 Bytecode verifier and compatibility checks + +Implemented: +- Wordcode shape verification and maximum size limits. +- Jump target and relative jump validation. +- Inline-cache layout verification using runtime quickening metadata. +- Probe compatibility validation prior to instrumentation. +- Exception-table invariant checks (range/depth, handler-target bounds, ordering, and duplicate-entry rejection). +- Quickening-aware handler-target validation rejects exception handlers that land on `CACHE` opcode slots. +- Stack-size minimum gate for safe probe injection assumptions. + +Design intent: +- Continue operating on instruction-level IR and verifier checks to stay resilient to CPython bytecode format evolution. + +### 2.4 Transactional ghosting primitives + +Implemented: +- Checkpoint capture for primary and ghost transactions. +- Staged V-IO recording and commit/abort semantics. +- Ghost race resolution with policy control: + - `FirstSafePointWins` + - `PreferPrimary` +- Replay executor that can stop on failure. + +### 2.5 Quiescence-gated swap support + +Implemented in engine: +- Staged code swap queue. +- Swap application at safe conditions: + - idle + - quiescent stack conditions + - completion (`ctx.done`) + +### 2.6 Runtime-level Vortex APIs + +Implemented wrappers in `Runtime` (feature `vortex`): +- Transaction lifecycle: start/stage/commit/take committed V-IO. +- Ghost lifecycle: start/stage/resolve race/replay. +- Auto policy controls: set/get automatic ghost arbitration policy. +- Auto telemetry accessors: replay count and resolution counts `(primary_wins, ghost_wins)`. +- Auto telemetry reset: clear counters to deterministic baseline for repeated runs. + +Python `PyRuntime` wrappers expose: +- `vortex_set_auto_ghost_policy(...)` +- `vortex_get_auto_ghost_policy()` +- `vortex_get_auto_resolution_counts()` +- `vortex_get_auto_replay_count()` +- `vortex_reset_auto_telemetry()` +- `vortex_set_genetic_budgeting(bool)` +- `vortex_get_genetic_budgeting()` +- `vortex_set_genetic_thresholds(low, high)` +- `vortex_get_genetic_thresholds()` +- `vortex_set_isolation_disallowed_ops(ops)` +- `vortex_get_isolation_disallowed_ops()` +- `vortex_set_isolation_mode(bool)` +- `vortex_get_isolation_mode()` +- `vortex_watchdog_enable()` +- `vortex_watchdog_disable()` +- `vortex_watchdog_enabled()` +- `vortex_get_genetic_history(pid)` +- `vortex_get_all_genetic_history()` +- `vortex_reset_genetic_history()` + +This allows exercising Vortex behavior from runtime boundaries, not only from direct engine tests. + +### 2.7 Genetic budgeting primitive + +Implemented (runtime primitive): +- Optional runtime toggle for adaptive budgeting (`vortex_set_genetic_budgeting` / `vortex_genetic_budgeting_enabled`). +- Runtime configurable thresholds (low/high) via `vortex_set_genetic_thresholds` / `vortex_get_genetic_thresholds`. +- Adaptive budget policy in the Vortex preemption loop: + - Shrinks budget on suspend events. + - Gradually grows budget on clean cycles. + - Clamps within safe min/max bounds derived from base budget. +- PID-level run history available via `vortex_get_genetic_history(pid)` / `vortex_get_all_genetic_history()`. +- History reset via `vortex_reset_genetic_history()`. + +Scope note: +- This is an initial scheduler primitive with live policy knobs; full historical-learning policy remains a roadmap item. + +--- + +## 3. Roadmap Status (RFC #0003 Mapping) + +Legend: +- Implemented: available in code with tests. +- Partial: available primitives, not yet full end-to-end behavior. +- Planned: not yet implemented. + +| RFC Area | Status | Notes | +| :--- | :--- | :--- | +| 3.1 DIBP instruction-bound preemption | Partial | Runtime preemption and suspend hooks exist. Python opcode injection path is guarded and may fallback. | +| 3.2 Ghosting with transactional V-IO | Partial | Checkpoint, staged V-IO, race resolution, replay are implemented. Full production actor-flow policy orchestration still evolving. | +| 4.1 Quiescence-gated hot-swap | Implemented (engine level) | Staging and safe-point apply behavior implemented and tested in engine. | +| 4.2 Rescue pool detached stalling | Implemented (core primitive) | Rescue pool APIs and tests are present; broader operational policy tuning remains iterative. | +| 5.1 High-level IR future-proofing | Partial | Instruction IR + compatibility gates are present; continuous adaptation for new CPython internals remains ongoing. | +| 5.2 Vortex static verifier | Partial | Verifier checks now cover shape/jumps/cache layout plus exception-table and stack-depth gates; exception-handler semantics are still being expanded. | +| Genetic budgeting | Partial | Runtime adaptive budget primitive is implemented behind an explicit toggle; full historical/policy tuning is still pending. | +| Watchdog forced interrupt path | Planned | Not implemented yet. | +| Bytecode-level isolation rewrites | Planned | Not implemented yet. | + +--- + +## 4. Tests and Verification Coverage + +Current verification includes: +- Engine-level tests for transactions, ghost race resolution, replay behavior, and staged swap semantics. +- Runtime-level tests for Vortex wrapper lifecycle and automatic suspend hook replay. +- Runtime + PyRuntime policy/telemetry tests (including invalid policy and reset behavior). +- PyO3 integration tests for real Python execution and stage-specific fallback telemetry reasons. +- Bytecode utility tests for verifier behavior and compatibility rejection cases. + +Targeted commands: + +```bash +cargo test --lib runtime_vortex_ --no-default-features --features vortex -- --nocapture +cargo test --lib runtime_vortex_auto_ghost_hook_triggers_on_preempt_suspend --no-default-features --features vortex -- --nocapture +cargo test --test pyo3_vortex --no-default-features --features "pyo3 vortex" -- --nocapture +``` + +Fast aliases in `.cargo/config.toml`: + +```bash +cargo test-fast # no-default-features fast loop +cargo test-vortex # vortex feature focused +``` + +--- + +## 5. Guardrails and Failure Model + +Vortex prioritizes safety over aggressive rewrite behavior. + +Operational rules: +- If metadata is unavailable or incompatible, transmutation falls back to shadow tracing mode. +- Original function code objects are not mutated directly in the guarded path. +- Replay can be bounded by executor return value to stop on first failed side effect. +- Unsupported/unsafe conditions are exposed through explicit guard telemetry reasons. + +This keeps behavior deterministic and debuggable while compatibility support expands. + +--- + +## 6. Known Gaps and Next Milestones + +Short-term milestones: +1. Complete verifier follow-up work for exception-handler semantics and stack-preservation coverage beyond current range/depth/min-stack gates. +2. Increase direct rewrite success on modern quickening-heavy runtimes without relaxing safety gates. +3. Push ghost race policies deeper into default runtime scheduling decisions. +4. Expose richer telemetry for automatic suspend hook (counts by reason/policy). + +Mid-term milestones: +1. Integrate watchdog/escalation strategy for severe stalls. +2. Introduce adaptive quantum tuning (genetic budgeting style). +3. Explore actor memory-isolation rewrite policies under strict guard mode. + +Later-phase milestones: +1. Add asyncio-aligned execution interop/mirroring goals after verifier and scheduler policy maturity. +2. Reduce Python function-color boundaries where safe, so transmuted flows can feel more uniform across sync/async call paths. +3. Validate these changes behind strict guard telemetry before making them default runtime behavior. + +--- + +## 7. Practical Usage Notes + +For users enabling Vortex paths: +- Treat APIs as experimental and validate on your Python/runtime version. +- Use targeted tests for your deployment feature set (`pyo3`, `vortex`, optional `jit`). +- Check guard telemetry to understand whether rewrite or fallback executed. + +The current trajectory is incremental hardening with strict safety and test-first expansion. diff --git a/iris/jit.py b/iris/jit.py index 30b024f..98326ba 100644 --- a/iris/jit.py +++ b/iris/jit.py @@ -10,6 +10,7 @@ import ast import functools import inspect +import logging import textwrap import warnings import copy @@ -26,8 +27,8 @@ try: import msgpack as _msgpack -except Exception: # pragma: no cover - optional at runtime - _msgpack = None # type: ignore +except Exception: + _msgpack = None try: from .iris import ( @@ -48,31 +49,31 @@ get_quantum_compile_budget as _get_quantum_compile_budget, configure_quantum_cooldown as _configure_quantum_cooldown, get_quantum_cooldown as _get_quantum_cooldown, - ) # pyo3 extension -except ImportError: # allow tests to import without extension built - register_offload = None # type: ignore - offload_call = None # type: ignore - call_jit = None # type: ignore - call_jit_step_loop_f64 = None # type: ignore - configure_jit_logging = None # type: ignore - is_jit_logging_enabled = None # type: ignore - configure_quantum_speculation = None # type: ignore - is_quantum_speculation_enabled = None # type: ignore - _get_quantum_profile = None # type: ignore - _seed_quantum_profile = None # type: ignore - _configure_quantum_speculation_threshold = None # type: ignore - _get_quantum_speculation_threshold = None # type: ignore - _configure_quantum_log_threshold = None # type: ignore - _get_quantum_log_threshold = None # type: ignore - _configure_quantum_compile_budget = None # type: ignore - _get_quantum_compile_budget = None # type: ignore - _configure_quantum_cooldown = None # type: ignore - _get_quantum_cooldown = None # type: ignore + ) +except ImportError: + register_offload = None + offload_call = None + call_jit = None + call_jit_step_loop_f64 = None + configure_jit_logging = None + is_jit_logging_enabled = None + configure_quantum_speculation = None + is_quantum_speculation_enabled = None + _get_quantum_profile = None + _seed_quantum_profile = None + _configure_quantum_speculation_threshold = None + _get_quantum_speculation_threshold = None + _configure_quantum_log_threshold = None + _get_quantum_log_threshold = None + _configure_quantum_compile_budget = None + _get_quantum_compile_budget = None + _configure_quantum_cooldown = None + _get_quantum_cooldown = None try: - from .iris import call_jit_step_loop_f64 # type: ignore + from .iris import call_jit_step_loop_f64 except Exception: - call_jit_step_loop_f64 = None # type: ignore + call_jit_step_loop_f64 = None _IRIS_META_SCHEMA = 1 @@ -94,6 +95,9 @@ _IRIS_META_STATE_LOCK = threading.RLock() +_jit_meta_logger = logging.getLogger("iris.jit.meta") + + def _jit_meta_log(message: str) -> None: try: enabled = bool(get_jit_logging()) @@ -106,10 +110,18 @@ def _jit_meta_log(message: str) -> None: return except Exception: pass + try: - sys.stderr.write(f"[Iris][jit][meta] {message}\n") + if not logging.root.handlers and not _jit_meta_logger.handlers: + logging.basicConfig(level=logging.INFO) + if not _jit_meta_logger.isEnabledFor(logging.INFO): + _jit_meta_logger.setLevel(logging.INFO) + _jit_meta_logger.info(message) except Exception: - pass + try: + sys.stderr.write(f"[Iris][jit][meta] {message}\n") + except Exception: + pass def _empty_metadata_doc() -> dict[str, Any]: diff --git a/src/lib.rs b/src/lib.rs index 8ed28cf..12a5576 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,13 +12,20 @@ pub mod pid; pub mod registry; pub mod supervisor; +#[cfg(feature = "vortex")] +pub mod vortex; + #[cfg(feature = "pyo3")] pub mod py; +pub mod logging; + #[cfg(feature = "node")] pub mod node; use crate::pid::Pid; +#[cfg(feature = "vortex")] +use crate::vortex::{VortexEngine, VortexGhostPolicy, VortexGhostResolution, VortexVioCall}; use dashmap::DashMap; use once_cell::sync::Lazy; use std::collections::HashMap; @@ -33,6 +40,32 @@ type BoxFutureUnit = Pin + Send>>; type ErasedMessageHandler = Arc BoxFutureUnit + Send + Sync>; const MAX_BEHAVIOR_HISTORY: usize = 16; +#[cfg(feature = "vortex")] +fn next_dynamic_budget( + current: usize, + base: usize, + saw_suspend: bool, + suspend_rate: f64, + low_thresh: f64, + high_thresh: f64, +) -> usize { + let base = base.max(1); + let min_budget = (base / 4).max(1); + let max_budget = base.saturating_mul(4).max(base); + + let adjusted = if suspend_rate > high_thresh { + (current * 60 / 100).max(min_budget) + } else if suspend_rate > low_thresh { + (current * 80 / 100).max(min_budget) + } else if saw_suspend { + (current / 2).max(min_budget) + } else { + (current.saturating_add(1)).min(max_budget) + }; + + adjusted.clamp(min_budget, max_budget) +} + #[derive(Clone)] struct VirtualActorSpec { handler: ErasedMessageHandler, @@ -63,6 +96,10 @@ pub struct Runtime { monitor_backoff_factor: Arc>, monitor_backoff_max: Arc>, monitor_failure_threshold: Arc>, + #[cfg(feature = "vortex")] + vortex_engine: Option>>, + #[cfg(feature = "vortex")] + vortex_watcher: Option>, registry: Arc, /// Mapping for locally‑spawned proxies that forward to remote actors. /// Key is the local proxy PID; value is (remote address, remote PID). @@ -96,11 +133,31 @@ pub struct Runtime { // Timers: map from timer id -> cancellation sender timers: Arc>>>, timer_counter: Arc, + #[cfg(feature = "vortex")] + vortex_ghost_counter: Arc, + #[cfg(feature = "vortex")] + vortex_auto_replay_count: Arc, + #[cfg(feature = "vortex")] + vortex_auto_primary_wins: Arc, + #[cfg(feature = "vortex")] + vortex_auto_ghost_wins: Arc, + #[cfg(feature = "vortex")] + vortex_auto_policy: Arc>, + #[cfg(feature = "vortex")] + vortex_genetic_budgeting_enabled: Arc>, + #[cfg(feature = "vortex")] + vortex_genetic_thresholds: Arc>, + #[cfg(feature = "vortex")] + vortex_isolation_disallowed_ops: Arc>>, + #[cfg(feature = "vortex")] + vortex_genetic_history: Arc>, } impl Runtime { /// Create a new runtime instance and initialize the networking and registry sub-systems. pub fn new() -> Self { + crate::logging::init_logger(); + #[cfg(feature = "pyo3")] { pyo3::prepare_freethreaded_python(); @@ -126,6 +183,24 @@ impl Runtime { release_gil_strict: Arc::new(Mutex::new(false)), timers: Arc::new(Mutex::new(HashMap::new())), timer_counter: Arc::new(AtomicU64::new(0)), + #[cfg(feature = "vortex")] + vortex_ghost_counter: Arc::new(AtomicU64::new(1)), + #[cfg(feature = "vortex")] + vortex_auto_replay_count: Arc::new(AtomicU64::new(0)), + #[cfg(feature = "vortex")] + vortex_auto_primary_wins: Arc::new(AtomicU64::new(0)), + #[cfg(feature = "vortex")] + vortex_auto_ghost_wins: Arc::new(AtomicU64::new(0)), + #[cfg(feature = "vortex")] + vortex_auto_policy: Arc::new(Mutex::new(VortexGhostPolicy::FirstSafePointWins)), + #[cfg(feature = "vortex")] + vortex_genetic_budgeting_enabled: Arc::new(Mutex::new(false)), + #[cfg(feature = "vortex")] + vortex_genetic_thresholds: Arc::new(Mutex::new((0.4, 0.7))), + #[cfg(feature = "vortex")] + vortex_isolation_disallowed_ops: Arc::new(Mutex::new(std::collections::HashSet::new())), + #[cfg(feature = "vortex")] + vortex_genetic_history: Arc::new(DashMap::new()), network_io_timeout: Arc::new(Mutex::new(Duration::from_secs(5))), network_max_payload: Arc::new(Mutex::new(1024 * 1024)), network_max_name_len: Arc::new(Mutex::new(1024)), @@ -136,6 +211,10 @@ impl Runtime { proxy_by_remote: Arc::new(DashMap::new()), behavior_versions: Arc::new(DashMap::new()), behavior_history: Arc::new(DashMap::new()), + #[cfg(feature = "vortex")] + vortex_engine: Some(Arc::new(Mutex::new(VortexEngine::new()))), + #[cfg(feature = "vortex")] + vortex_watcher: Some(Arc::new(vortex::VortexWatcher::new())), }; let net_manager = network::NetworkManager::new(Arc::new(rt.clone())); @@ -218,6 +297,329 @@ impl Runtime { *self.release_gil_strict.lock().unwrap() = strict; } + #[cfg(feature = "vortex")] + pub fn vortex_engine(&self) -> Option { + self.vortex_engine + .as_ref() + .and_then(|engine| engine.lock().ok().map(|guard| guard.clone())) + } + + #[cfg(feature = "vortex")] + pub fn vortex_start_transaction_with_checkpoint( + &self, + id: u64, + locals: HashMap>, + ) -> bool { + let Some(engine) = self.vortex_engine.as_ref() else { + return false; + }; + match engine.lock() { + Ok(mut guard) => { + guard.start_transaction_with_checkpoint(id, locals); + true + } + Err(_) => false, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_stage_transaction_vio(&self, op: String, payload: Vec) -> bool { + let Some(engine) = self.vortex_engine.as_ref() else { + return false; + }; + match engine.lock() { + Ok(mut guard) => guard.stage_transaction_vio(op, payload), + Err(_) => false, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_commit_transaction(&self) -> bool { + let Some(engine) = self.vortex_engine.as_ref() else { + return false; + }; + match engine.lock() { + Ok(mut guard) => guard.commit_transaction(), + Err(_) => false, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_take_committed_transaction_vio(&self) -> Option> { + let Some(engine) = self.vortex_engine.as_ref() else { + return None; + }; + match engine.lock() { + Ok(mut guard) => Some(guard.take_committed_transaction_vio()), + Err(_) => None, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_start_ghost_transaction_with_checkpoint( + &self, + id: u64, + locals: HashMap>, + ) -> bool { + let Some(engine) = self.vortex_engine.as_ref() else { + return false; + }; + match engine.lock() { + Ok(mut guard) => { + guard.start_ghost_transaction_with_checkpoint(id, locals); + true + } + Err(_) => false, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_stage_ghost_transaction_vio( + &self, + ghost_id: u64, + op: String, + payload: Vec, + ) -> bool { + let Some(engine) = self.vortex_engine.as_ref() else { + return false; + }; + match engine.lock() { + Ok(mut guard) => guard.stage_ghost_transaction_vio(ghost_id, op, payload), + Err(_) => false, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_resolve_primary_ghost_race( + &self, + ghost_id: u64, + winner_id: u64, + policy: VortexGhostPolicy, + ) -> Option { + let Some(engine) = self.vortex_engine.as_ref() else { + return None; + }; + match engine.lock() { + Ok(mut guard) => guard.resolve_primary_ghost_race(ghost_id, winner_id, policy), + Err(_) => None, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_replay_committed_vio_calls( + &self, + calls: &[VortexVioCall], + executor: F, + ) -> Option + where + F: FnMut(&VortexVioCall) -> bool, + { + let Some(engine) = self.vortex_engine.as_ref() else { + return None; + }; + match engine.lock() { + Ok(guard) => Some(guard.replay_committed_vio_calls(calls, executor)), + Err(_) => None, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_auto_replay_count(&self) -> u64 { + self.vortex_auto_replay_count.load(Ordering::Relaxed) + } + + #[cfg(feature = "vortex")] + pub fn vortex_set_auto_ghost_policy(&self, policy: VortexGhostPolicy) -> bool { + match self.vortex_auto_policy.lock() { + Ok(mut guard) => { + *guard = policy; + true + } + Err(_) => false, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_auto_ghost_policy(&self) -> Option { + self.vortex_auto_policy.lock().ok().map(|guard| *guard) + } + + #[cfg(feature = "vortex")] + pub fn vortex_auto_resolution_counts(&self) -> (u64, u64) { + ( + self.vortex_auto_primary_wins.load(Ordering::Relaxed), + self.vortex_auto_ghost_wins.load(Ordering::Relaxed), + ) + } + + #[cfg(feature = "vortex")] + pub fn vortex_reset_auto_telemetry(&self) { + self.vortex_auto_replay_count.store(0, Ordering::Relaxed); + self.vortex_auto_primary_wins.store(0, Ordering::Relaxed); + self.vortex_auto_ghost_wins.store(0, Ordering::Relaxed); + self.vortex_ghost_counter.store(1, Ordering::Relaxed); + } + + #[cfg(feature = "vortex")] + pub fn vortex_set_genetic_budgeting(&self, enabled: bool) -> bool { + match self.vortex_genetic_budgeting_enabled.lock() { + Ok(mut guard) => { + *guard = enabled; + true + } + Err(_) => false, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_genetic_budgeting_enabled(&self) -> Option { + self.vortex_genetic_budgeting_enabled + .lock() + .ok() + .map(|guard| *guard) + } + + #[cfg(feature = "vortex")] + pub fn vortex_set_genetic_thresholds(&self, low: f64, high: f64) -> bool { + if low < 0.0 || high < 0.0 || low >= high || high > 1.0 { + return false; + } + match self.vortex_genetic_thresholds.lock() { + Ok(mut guard) => { + *guard = (low, high); + true + } + Err(_) => false, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_genetic_thresholds(&self) -> Option<(f64, f64)> { + self.vortex_genetic_thresholds + .lock() + .ok() + .map(|guard| *guard) + } + + #[cfg(feature = "vortex")] + pub fn vortex_set_isolation_disallowed_ops(&self, ops: Vec) -> bool { + match self.vortex_isolation_disallowed_ops.lock() { + Ok(mut guard) => { + guard.clear(); + for op in ops { + guard.insert(op); + } + true + } + Err(_) => false, + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_get_isolation_disallowed_ops(&self) -> Option> { + self.vortex_isolation_disallowed_ops + .lock() + .ok() + .map(|guard| guard.iter().copied().collect()) + } + + #[cfg(feature = "vortex")] + pub fn vortex_watchdog_enable(&self) -> bool { + if let Some(watcher) = self.vortex_watcher.as_ref() { + watcher.enable(); + true + } else { + false + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_watchdog_disable(&self) -> bool { + if let Some(watcher) = self.vortex_watcher.as_ref() { + watcher.disable(); + true + } else { + false + } + } + + #[cfg(feature = "vortex")] + pub fn vortex_watchdog_enabled(&self) -> Option { + self.vortex_watcher.as_ref().map(|w| w.is_enabled()) + } + + #[cfg(feature = "vortex")] + pub fn vortex_genetic_history(&self, pid: Pid) -> Option<(usize, usize)> { + self.vortex_genetic_history.get(&pid).map(|r| *r) + } + + #[cfg(feature = "vortex")] + pub fn vortex_get_all_genetic_history(&self) -> Vec<(Pid, usize, usize)> { + self.vortex_genetic_history + .iter() + .map(|entry| (*entry.key(), entry.value().0, entry.value().1)) + .collect() + } + + #[cfg(feature = "vortex")] + pub fn vortex_reset_genetic_history(&self) { + self.vortex_genetic_history.clear(); + } + + #[cfg(feature = "vortex")] + fn vortex_auto_checkpoint_and_replay_on_suspend(&self, pid: Pid, budget: usize) { + let Some(engine) = self.vortex_engine.as_ref() else { + return; + }; + + let primary_id = self.vortex_ghost_counter.fetch_add(1, Ordering::Relaxed); + let ghost_id = self.vortex_ghost_counter.fetch_add(1, Ordering::Relaxed); + + let mut primary_locals = HashMap::new(); + primary_locals.insert("pid".to_string(), pid.to_le_bytes().to_vec()); + primary_locals.insert("budget".to_string(), (budget as u64).to_le_bytes().to_vec()); + + let mut ghost_locals = HashMap::new(); + ghost_locals.insert("pid".to_string(), pid.to_le_bytes().to_vec()); + ghost_locals.insert("budget".to_string(), (budget as u64).to_le_bytes().to_vec()); + + let Ok(mut guard) = engine.lock() else { + return; + }; + + guard.start_transaction_with_checkpoint(primary_id, primary_locals); + let _ = + guard.stage_transaction_vio("suspend_primary".to_string(), pid.to_le_bytes().to_vec()); + + guard.start_ghost_transaction_with_checkpoint(ghost_id, ghost_locals); + let _ = guard.stage_ghost_transaction_vio( + ghost_id, + "suspend_ghost".to_string(), + pid.to_le_bytes().to_vec(), + ); + + let policy = self + .vortex_auto_policy + .lock() + .map(|guard| *guard) + .unwrap_or(VortexGhostPolicy::FirstSafePointWins); + + if let Some(resolution) = guard.resolve_primary_ghost_race(ghost_id, ghost_id, policy) { + if resolution.winner_id == primary_id { + self.vortex_auto_primary_wins + .fetch_add(1, Ordering::Relaxed); + } else if resolution.winner_id == ghost_id { + self.vortex_auto_ghost_wins.fetch_add(1, Ordering::Relaxed); + } + + let applied = guard.replay_committed_vio_calls(&resolution.committed_vio, |_| true); + if applied > 0 { + self.vortex_auto_replay_count + .fetch_add(applied as u64, Ordering::Relaxed); + } + } + } + /// Get the current release_gil limits (max_threads, pool_size). pub fn get_release_gil_limits(&self) -> (usize, usize) { ( @@ -546,6 +948,8 @@ impl Runtime { { let mut slab = self.slab.lock().unwrap(); let pid = slab.allocate(); + #[cfg(feature = "vortex")] + self.vortex_genetic_history.insert(pid, (0, 0)); let erased: ErasedMessageHandler = Arc::new(move |msg: mailbox::Message| Box::pin(handler(msg))); @@ -599,6 +1003,8 @@ impl Runtime { let (tx, mut rx) = mailbox::channel(); self.mailboxes.insert(pid, tx.clone()); + #[cfg(feature = "vortex")] + self.vortex_genetic_history.insert(pid, (0, 0)); let handler = spec.handler.clone(); let budget = spec.budget; @@ -789,6 +1195,8 @@ impl Runtime { let pid = slab.allocate(); let (tx, rx) = mailbox::channel(); self.mailboxes.insert(pid, tx.clone()); + #[cfg(feature = "vortex")] + self.vortex_genetic_history.insert(pid, (0, 0)); self.backpressure_state .insert(pid, mailbox::BackpressureLevel::Normal); @@ -861,6 +1269,8 @@ impl Runtime { let pid = slab.allocate(); let (tx, rx) = mailbox::bounded_channel(capacity); self.mailboxes.insert(pid, tx.clone()); + #[cfg(feature = "vortex")] + self.vortex_genetic_history.insert(pid, (0, 0)); self.backpressure_state .insert(pid, mailbox::BackpressureLevel::Normal); // track capacity and default policy @@ -942,6 +1352,8 @@ impl Runtime { let pid = slab.allocate(); let (tx, rx) = mailbox::bounded_channel(capacity); self.mailboxes.insert(pid, tx.clone()); + #[cfg(feature = "vortex")] + self.vortex_genetic_history.insert(pid, (0, 0)); self.backpressure_state .insert(pid, mailbox::BackpressureLevel::Normal); // track capacity and default overflow policy @@ -1015,6 +1427,8 @@ impl Runtime { let pid = slab.allocate(); let (tx, rx) = mailbox::channel(); self.mailboxes.insert(pid, tx.clone()); + #[cfg(feature = "vortex")] + self.vortex_genetic_history.insert(pid, (0, 0)); let mailboxes2 = self.mailboxes.clone(); let supervisor2 = self.supervisor.clone(); @@ -1082,6 +1496,8 @@ impl Runtime { let pid = slab.allocate(); let (tx, mut rx) = mailbox::channel(); self.mailboxes.insert(pid, tx.clone()); + #[cfg(feature = "vortex")] + self.vortex_genetic_history.insert(pid, (0, 0)); let handler = std::sync::Arc::new(handler); let supervisor2 = self.supervisor.clone(); @@ -1092,16 +1508,111 @@ impl Runtime { RUNTIME.spawn(async move { let h_loop = handler.clone(); + #[cfg(feature = "vortex")] + let rt_vortex_clone = rt_exit_clone.clone(); + + #[cfg(feature = "vortex")] + let mut vortex_engine = rt_exit_clone + .vortex_engine() + .unwrap_or_else(|| crate::vortex::VortexEngine::new()); + let actor_handle = tokio::spawn(async move { let mut processed = 0usize; + #[cfg(feature = "vortex")] + let mut dynamic_budget = budget.max(1); + #[cfg(not(feature = "vortex"))] + let dynamic_budget = budget.max(1); while let Some(first_msg) = rx.recv().await { + #[cfg(feature = "vortex")] + if rt_vortex_clone.vortex_watchdog_enabled().unwrap_or(false) { + tokio::task::yield_now().await; + } + + #[cfg(feature = "vortex")] + let mut saw_suspend_in_cycle = false; + #[cfg(not(feature = "vortex"))] + let _saw_suspend_in_cycle = false; + + #[cfg(feature = "vortex")] + let enable_genetic_budgeting = rt_vortex_clone + .vortex_genetic_budgeting_enabled() + .unwrap_or(false); + #[cfg(not(feature = "vortex"))] + let _enable_genetic_budgeting = false; + + #[cfg(feature = "vortex")] + { + if let Err(_) = vortex_engine.preempt_tick() { + saw_suspend_in_cycle = true; + let (suspend_count, total_count) = rt_vortex_clone + .vortex_genetic_history(pid) + .unwrap_or((0, 0)); + rt_vortex_clone.vortex_genetic_history.insert( + pid, + ( + suspend_count.saturating_add(1), + total_count.saturating_add(1), + ), + ); + rt_vortex_clone + .vortex_auto_checkpoint_and_replay_on_suspend(pid, budget); + vortex_engine.detach_stalled_thread(); + vortex_engine.replenish_budget(budget); + tokio::task::yield_now().await; + vortex_engine.reclaim_thread(); + if enable_genetic_budgeting { + let (low, high) = rt_vortex_clone + .vortex_genetic_thresholds() + .unwrap_or((0.4, 0.7)); + dynamic_budget = next_dynamic_budget( + dynamic_budget, + budget, + saw_suspend_in_cycle, + 0.0, + low, + high, + ); + } + continue; + } + } + let h = h_loop.clone(); (h)(first_msg).await; processed += 1; - while processed < budget { + while processed < dynamic_budget { match rx.try_recv() { Some(next_msg) => { + #[cfg(feature = "vortex")] + { + if let Err(_) = vortex_engine.preempt_tick() { + saw_suspend_in_cycle = true; + rt_vortex_clone + .vortex_auto_checkpoint_and_replay_on_suspend( + pid, budget, + ); + vortex_engine.detach_stalled_thread(); + vortex_engine.replenish_budget(budget); + tokio::task::yield_now().await; + vortex_engine.reclaim_thread(); + if enable_genetic_budgeting { + let (low, high) = rt_vortex_clone + .vortex_genetic_thresholds() + .unwrap_or((0.4, 0.7)); + dynamic_budget = next_dynamic_budget( + dynamic_budget, + budget, + saw_suspend_in_cycle, + 0.0, + low, + high, + ); + } + break; + } + } + let h = h_loop.clone(); (h)(next_msg).await; processed += 1; @@ -1110,7 +1621,40 @@ impl Runtime { } } - if processed >= budget { + #[cfg(feature = "vortex")] + { + let (suspend_count, total_count) = rt_vortex_clone + .vortex_genetic_history(pid) + .unwrap_or((0, 0)); + let total_count = total_count.saturating_add(1); + let suspend_count = suspend_count + (saw_suspend_in_cycle as usize); + let suspend_rate = if total_count == 0 { + 0.0 + } else { + (suspend_count as f64) / (total_count as f64) + }; + + rt_vortex_clone + .vortex_genetic_history + .insert(pid, (suspend_count, total_count)); + + if enable_genetic_budgeting { + let (low, high) = rt_vortex_clone + .vortex_genetic_thresholds() + .unwrap_or((0.4, 0.7)); + + dynamic_budget = next_dynamic_budget( + dynamic_budget, + budget, + saw_suspend_in_cycle, + suspend_rate, + low, + high, + ); + } + } + + if processed >= dynamic_budget { processed = 0; tokio::task::yield_now().await; } @@ -1194,6 +1738,8 @@ impl Runtime { let pid = slab.allocate(); let (tx, mut rx) = mailbox::channel(); self.mailboxes.insert(pid, tx.clone()); + #[cfg(feature = "vortex")] + self.vortex_genetic_history.insert(pid, (0, 0)); self.backpressure_state .insert(pid, mailbox::BackpressureLevel::Normal); let vec = Arc::new(Mutex::new(Vec::new())); @@ -1460,6 +2006,10 @@ impl Runtime { let res = sender.send_user_bytes(bytes); if res.is_ok() { self.update_backpressure_after_enqueue(pid, sender.value()); + #[cfg(feature = "vortex")] + if let Some(mut counts) = self.vortex_genetic_history.get_mut(&pid) { + counts.1 = counts.1.saturating_add(1); + } } res } else { @@ -1589,6 +2139,8 @@ impl Runtime { self.backpressure_state.remove(&pid); self.behavior_versions.remove(&pid); self.behavior_history.remove(&pid); + #[cfg(feature = "vortex")] + self.vortex_genetic_history.remove(&pid); // remove the pid from its parent's child list (if any) if let Some((_, parent)) = self.parent_of.remove(&pid) { if let Some(mut entry) = self.children_by_parent.get_mut(&parent) { @@ -1863,12 +2415,19 @@ mod tests { } } - assert!(sent >= 4, "expected at least 4 messages to be accepted, got {}", sent); + assert!( + sent >= 4, + "expected at least 4 messages to be accepted, got {}", + sent + ); let level = rt .mailbox_backpressure(pid) .expect("backpressure should be available"); - assert!(matches!(level, mailbox::BackpressureLevel::High | mailbox::BackpressureLevel::Critical)); + assert!(matches!( + level, + mailbox::BackpressureLevel::High | mailbox::BackpressureLevel::Critical + )); if sent >= 5 { assert_eq!(level, mailbox::BackpressureLevel::Critical); @@ -1899,10 +2458,16 @@ mod tests { let payload = bytes::Bytes::from(format!("u{}", i)); assert!(rt.send_user(pid, payload).is_ok()); } - assert_eq!(rt.mailbox_backpressure(pid), Some(mailbox::BackpressureLevel::High)); + assert_eq!( + rt.mailbox_backpressure(pid), + Some(mailbox::BackpressureLevel::High) + ); assert!(rt.send_user(pid, bytes::Bytes::from_static(b"u3")).is_ok()); - assert_eq!(rt.mailbox_backpressure(pid), Some(mailbox::BackpressureLevel::Critical)); + assert_eq!( + rt.mailbox_backpressure(pid), + Some(mailbox::BackpressureLevel::Critical) + ); let _ = start_tx.send(()); } @@ -1932,7 +2497,10 @@ mod tests { assert!(rt .send(pid, mailbox::Message::User(b"d3".to_vec().into())) .is_ok()); - assert_eq!(rt.mailbox_backpressure(pid), Some(mailbox::BackpressureLevel::Critical)); + assert_eq!( + rt.mailbox_backpressure(pid), + Some(mailbox::BackpressureLevel::Critical) + ); let _ = start_tx.send(()); @@ -1947,7 +2515,10 @@ mod tests { .await .expect("mailbox should drain"); - assert_eq!(rt.mailbox_backpressure(pid), Some(mailbox::BackpressureLevel::Normal)); + assert_eq!( + rt.mailbox_backpressure(pid), + Some(mailbox::BackpressureLevel::Normal) + ); } #[tokio::test] @@ -2147,4 +2718,319 @@ mod tests { .expect_err("rollback should fail"); assert!(err.contains("history")); } + + #[cfg(feature = "vortex")] + #[test] + fn runtime_vortex_ghost_lifecycle_wrappers_work() { + let rt = Runtime::new(); + + assert!(rt.vortex_start_transaction_with_checkpoint(10, std::collections::HashMap::new())); + assert!(rt.vortex_stage_transaction_vio("io_primary".to_string(), b"p".to_vec())); + + assert!( + rt.vortex_start_ghost_transaction_with_checkpoint(20, std::collections::HashMap::new()) + ); + assert!(rt.vortex_stage_ghost_transaction_vio(20, "io_ghost_a".to_string(), b"a".to_vec())); + assert!(rt.vortex_stage_ghost_transaction_vio(20, "io_ghost_b".to_string(), b"b".to_vec())); + + let resolution = rt + .vortex_resolve_primary_ghost_race( + 20, + 20, + crate::vortex::VortexGhostPolicy::FirstSafePointWins, + ) + .expect("resolution should exist"); + assert_eq!(resolution.winner_id, 20); + assert_eq!(resolution.committed_vio.len(), 2); + + let mut seen = Vec::new(); + let applied = rt + .vortex_replay_committed_vio_calls(&resolution.committed_vio, |call| { + seen.push(call.op.clone()); + true + }) + .expect("replay should be available"); + assert_eq!(applied, 2); + assert_eq!( + seen, + vec!["io_ghost_a".to_string(), "io_ghost_b".to_string()] + ); + + let second = rt.vortex_resolve_primary_ghost_race( + 20, + 20, + crate::vortex::VortexGhostPolicy::FirstSafePointWins, + ); + assert!(second.is_none()); + } + + #[cfg(feature = "vortex")] + #[test] + fn runtime_vortex_commit_and_take_committed_vio() { + let rt = Runtime::new(); + assert!(rt.vortex_start_transaction_with_checkpoint(30, std::collections::HashMap::new())); + assert!(rt.vortex_stage_transaction_vio("io_commit".to_string(), b"x".to_vec())); + assert!(rt.vortex_commit_transaction()); + + let committed = rt + .vortex_take_committed_transaction_vio() + .expect("engine should exist"); + assert_eq!(committed.len(), 1); + assert_eq!(committed[0].op, "io_commit"); + } + + #[cfg(feature = "vortex")] + #[tokio::test] + async fn runtime_vortex_ghost_wrappers_during_actor_execution() { + let rt = Runtime::new(); + let pid = rt.spawn_observed_handler(8); + + assert!(rt + .send(pid, mailbox::Message::User(b"before".to_vec().into())) + .is_ok()); + + assert!(rt.vortex_start_transaction_with_checkpoint(101, std::collections::HashMap::new())); + assert!(rt.vortex_stage_transaction_vio("primary_call".to_string(), b"p".to_vec())); + assert!(rt + .vortex_start_ghost_transaction_with_checkpoint(202, std::collections::HashMap::new())); + assert!(rt.vortex_stage_ghost_transaction_vio( + 202, + "ghost_call".to_string(), + b"g".to_vec() + )); + + let resolution = rt + .vortex_resolve_primary_ghost_race( + 202, + 202, + crate::vortex::VortexGhostPolicy::FirstSafePointWins, + ) + .expect("resolution should succeed"); + assert_eq!(resolution.winner_id, 202); + assert_eq!(resolution.committed_vio.len(), 1); + + let applied = rt + .vortex_replay_committed_vio_calls(&resolution.committed_vio, |_call| true) + .expect("replay should be available"); + assert_eq!(applied, 1); + + assert!(rt + .send(pid, mailbox::Message::User(b"after".to_vec().into())) + .is_ok()); + + let observed = timeout(Duration::from_secs(1), async { + loop { + let msgs = rt + .get_observed_messages(pid) + .expect("observed actor should exist"); + + let user_msgs: Vec> = msgs + .into_iter() + .filter_map(|m| match m { + mailbox::Message::User(b) => Some(b.to_vec()), + _ => None, + }) + .collect(); + + if user_msgs.len() >= 2 { + break user_msgs; + } + + sleep(Duration::from_millis(10)).await; + } + }) + .await + .expect("timed out waiting for observed messages"); + + assert!(observed.iter().any(|m| m.as_slice() == b"before")); + assert!(observed.iter().any(|m| m.as_slice() == b"after")); + } + + #[cfg(feature = "vortex")] + #[tokio::test] + async fn runtime_vortex_auto_ghost_hook_triggers_on_preempt_suspend() { + let rt = Runtime::new(); + let pid = rt.spawn_handler_with_budget(|_msg| async move {}, 8); + + // Drive enough preemption checks to force suspend-path execution. + for _ in 0..1400 { + let _ = rt.send(pid, mailbox::Message::User(b"tick".to_vec().into())); + } + + timeout(Duration::from_secs(2), async { + loop { + if rt.vortex_auto_replay_count() > 0 { + break; + } + sleep(Duration::from_millis(10)).await; + } + }) + .await + .expect("expected auto replay hook to run on preempt suspend"); + + assert!(rt.vortex_auto_replay_count() > 0); + let (primary_wins, ghost_wins) = rt.vortex_auto_resolution_counts(); + assert!(primary_wins + ghost_wins > 0); + } + + #[cfg(feature = "vortex")] + #[tokio::test] + async fn runtime_vortex_auto_policy_prefer_primary_updates_counters() { + let rt = Runtime::new(); + assert!(rt.vortex_set_auto_ghost_policy(crate::vortex::VortexGhostPolicy::PreferPrimary)); + assert_eq!( + rt.vortex_auto_ghost_policy(), + Some(crate::vortex::VortexGhostPolicy::PreferPrimary) + ); + + let pid = rt.spawn_handler_with_budget(|_msg| async move {}, 8); + for _ in 0..1400 { + let _ = rt.send(pid, mailbox::Message::User(b"tick".to_vec().into())); + } + + timeout(Duration::from_secs(2), async { + loop { + let (primary_wins, ghost_wins) = rt.vortex_auto_resolution_counts(); + if primary_wins > 0 || ghost_wins > 0 { + break; + } + sleep(Duration::from_millis(10)).await; + } + }) + .await + .expect("expected auto policy counters to update"); + + let (primary_wins, ghost_wins) = rt.vortex_auto_resolution_counts(); + assert!(primary_wins > 0); + assert_eq!(ghost_wins, 0); + } + + #[cfg(feature = "vortex")] + #[tokio::test] + async fn runtime_vortex_auto_telemetry_can_reset() { + let rt = Runtime::new(); + let pid = rt.spawn_handler_with_budget(|_msg| async move {}, 8); + + for _ in 0..1400 { + let _ = rt.send(pid, mailbox::Message::User(b"tick".to_vec().into())); + } + + timeout(Duration::from_secs(2), async { + loop { + if rt.vortex_auto_replay_count() > 0 { + break; + } + sleep(Duration::from_millis(10)).await; + } + }) + .await + .expect("expected auto telemetry to increase"); + + let (primary_wins, ghost_wins) = rt.vortex_auto_resolution_counts(); + assert!(rt.vortex_auto_replay_count() > 0); + assert!(primary_wins + ghost_wins > 0); + + rt.vortex_reset_auto_telemetry(); + let (primary_wins_after, ghost_wins_after) = rt.vortex_auto_resolution_counts(); + assert_eq!(rt.vortex_auto_replay_count(), 0); + assert_eq!(primary_wins_after, 0); + assert_eq!(ghost_wins_after, 0); + } + + #[cfg(feature = "vortex")] + #[tokio::test] + async fn runtime_vortex_genetic_budgeting_toggle_roundtrip() { + let rt = Runtime::new(); + assert_eq!(rt.vortex_genetic_budgeting_enabled(), Some(false)); + assert!(rt.vortex_set_genetic_budgeting(true)); + assert_eq!(rt.vortex_genetic_budgeting_enabled(), Some(true)); + assert!(rt.vortex_set_genetic_budgeting(false)); + assert_eq!(rt.vortex_genetic_budgeting_enabled(), Some(false)); + } + + #[cfg(feature = "vortex")] + #[tokio::test] + async fn runtime_vortex_genetic_budgeting_effects_during_actor_run() { + use std::sync::atomic::{AtomicUsize, Ordering}; + let rt = Runtime::new(); + + // Start with dynamic budgeting disabled. + assert!(rt.vortex_set_genetic_budgeting(false)); + + let counter = std::sync::Arc::new(AtomicUsize::new(0)); + let counter_clone = counter.clone(); + let pid = rt.spawn_handler_with_budget( + move |_msg| { + let counter_clone = counter_clone.clone(); + async move { + counter_clone.fetch_add(1, Ordering::Relaxed); + } + }, + 8, + ); + + for _ in 0..400 { + let _ = rt.send(pid, mailbox::Message::User(b"x".to_vec().into())); + } + + tokio::time::sleep(Duration::from_millis(100)).await; + + // Enable genetic budgeting while actor is still running. + assert!(rt.vortex_set_genetic_budgeting(true)); + + for _ in 0..400 { + let _ = rt.send(pid, mailbox::Message::User(b"x".to_vec().into())); + } + + tokio::time::timeout(Duration::from_secs(5), async { + loop { + if counter.load(Ordering::Relaxed) >= 800 { + break; + } + tokio::time::sleep(Duration::from_millis(10)).await; + } + }) + .await + .expect("all messages should be processed"); + + rt.stop(pid); + } + + #[cfg(feature = "vortex")] + #[test] + fn runtime_vortex_genetic_budget_policy_math() { + // base=8 => min=2, max=32 + assert_eq!(next_dynamic_budget(8, 8, false, 0.0, 0.4, 0.7), 9); + assert_eq!(next_dynamic_budget(32, 8, false, 0.0, 0.4, 0.7), 32); + assert_eq!(next_dynamic_budget(8, 8, true, 0.0, 0.4, 0.7), 4); + assert_eq!(next_dynamic_budget(3, 8, true, 0.0, 0.4, 0.7), 2); + assert_eq!(next_dynamic_budget(1, 8, true, 0.0, 0.4, 0.7), 2); + + // high suspend rate should force stronger penalty. + assert_eq!(next_dynamic_budget(16, 8, false, 0.8, 0.4, 0.7), 9); + assert_eq!(next_dynamic_budget(20, 8, false, 0.5, 0.4, 0.7), 16); + } + + #[cfg(feature = "vortex")] + #[test] + fn runtime_vortex_genetic_threshold_roundtrip() { + let rt = Runtime::new(); + assert_eq!(rt.vortex_genetic_thresholds(), Some((0.4, 0.7))); + assert!(rt.vortex_set_genetic_thresholds(0.2, 0.5)); + assert_eq!(rt.vortex_genetic_thresholds(), Some((0.2, 0.5))); + assert!(!rt.vortex_set_genetic_thresholds(0.7, 0.2)); + assert!(!rt.vortex_set_genetic_thresholds(-0.1, 0.5)); + assert!(!rt.vortex_set_genetic_thresholds(0.1, 1.2)); + } + + #[cfg(feature = "vortex")] + #[test] + fn runtime_vortex_isolation_disallow_roundtrip() { + let rt = Runtime::new(); + assert_eq!(rt.vortex_get_isolation_disallowed_ops(), Some(vec![])); + assert!(rt.vortex_set_isolation_disallowed_ops(vec![90, 91])); + let mut got = rt.vortex_get_isolation_disallowed_ops().unwrap(); + got.sort(); + assert_eq!(got, vec![90, 91]); + } } diff --git a/src/logging.rs b/src/logging.rs new file mode 100644 index 0000000..f2fa6f4 --- /dev/null +++ b/src/logging.rs @@ -0,0 +1,34 @@ +use std::sync::Once; +use tracing_subscriber::fmt::time::UtcTime; +use tracing_subscriber::EnvFilter; + +static LOGGER_INIT: Once = Once::new(); + +/// Initialize the Iris tracing subscriber once for the entire crate. +pub fn init_logger() { + LOGGER_INIT.call_once(|| { + let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")); + + let subscriber = tracing_subscriber::fmt() + .with_env_filter(filter) + .with_timer(UtcTime::rfc_3339()) + .with_target(false) + .with_thread_ids(false) + .with_thread_names(false) + .compact() + .finish(); + + let _ = tracing::subscriber::set_global_default(subscriber); + }); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn init_logger_is_idempotent() { + init_logger(); + init_logger(); + } +} diff --git a/src/mailbox.rs b/src/mailbox.rs index ff89bfe..64c4f70 100644 --- a/src/mailbox.rs +++ b/src/mailbox.rs @@ -608,10 +608,14 @@ mod tests { for i in 0..5 { let data = vec![b'a' + (i as u8)]; - assert!(tx.send(Message::User(Bytes::copy_from_slice(&data))).is_ok()); + assert!(tx + .send(Message::User(Bytes::copy_from_slice(&data))) + .is_ok()); } - assert!(tx.send(Message::User(Bytes::from_static(b"overflow"))).is_err()); + assert!(tx + .send(Message::User(Bytes::from_static(b"overflow"))) + .is_err()); } #[tokio::test] @@ -620,7 +624,8 @@ mod tests { for i in 0..9 { let payload = vec![b'a' + (i as u8)]; - tx.send(Message::User(Bytes::copy_from_slice(&payload))).unwrap(); + tx.send(Message::User(Bytes::copy_from_slice(&payload))) + .unwrap(); } // 9/10 keeps us in critical from normal. diff --git a/src/node.rs b/src/node.rs index 7a23b18..4b0f0dd 100644 --- a/src/node.rs +++ b/src/node.rs @@ -45,12 +45,9 @@ fn message_to_js(env: &Env, msg: Message) -> Result { SystemMessage::HotSwap(_) => ("HOT_SWAP", None), SystemMessage::Ping => ("PING", None, None, None), SystemMessage::Pong => ("PONG", None, None, None), - SystemMessage::Backpressure(level) => ( - "BACKPRESSURE", - None, - Some(level.as_str().to_string()), - None, - ), + SystemMessage::Backpressure(level) => { + ("BACKPRESSURE", None, Some(level.as_str().to_string()), None) + } }; let mut obj = env.create_object()?; diff --git a/src/py/jit/codegen/buffer.rs b/src/py/jit/codegen/buffer.rs index 7fa61ae..59c84eb 100644 --- a/src/py/jit/codegen/buffer.rs +++ b/src/py/jit/codegen/buffer.rs @@ -1,10 +1,7 @@ // src/py/jit/codegen/buffer.rs -use std::ffi::CStr; use crate::py::jit::codegen::BufferElemType; - -#[cfg(feature = "pyo3")] -use pyo3::AsPyPointer; +use std::ffi::CStr; // helper for zero-copy buffer access used by the JIT runner pub(crate) struct BufferView { @@ -108,9 +105,9 @@ pub(crate) unsafe fn parse_buffer_elem_type(view: &pyo3::ffi::Py_buffer) -> Opti let fmt = CStr::from_ptr(view.format).to_str().ok()?; let code = fmt - .chars() - .rev() - .find(|ch| ch.is_ascii_alphabetic() || *ch == '?')?; + .chars() + .rev() + .find(|ch| ch.is_ascii_alphabetic() || *ch == '?')?; to_elem_type(code, itemsize) } @@ -146,8 +143,8 @@ pub(crate) unsafe fn open_typed_buffer(obj: &pyo3::PyAny) -> Option let len = total_bytes / itemsize; Some(BufferView { view, - elem_type, - len, + elem_type, + len, }) } diff --git a/src/py/jit/codegen/compiler.rs b/src/py/jit/codegen/compiler.rs index d361c1b..fa24693 100644 --- a/src/py/jit/codegen/compiler.rs +++ b/src/py/jit/codegen/compiler.rs @@ -44,11 +44,11 @@ pub fn compile_jit_quantum( if let Some(exp) = compile_jit_impl(expr_str, arg_names, true, return_type) .map(|e| e.with_strategy(QuantumVariantStrategy::FastTrigExperiment)) - { - out.push(exp); - } + { + out.push(exp); + } - crate::py::jit::jit_log(|| format!("[Iris][jit][quantum] built {} variants", out.len())); + crate::py::jit::jit_log(|| format!("[Iris][jit][quantum] built {} variants", out.len())); out } @@ -60,11 +60,11 @@ pub fn compile_jit_quantum_variant( ) -> Option { match variant_index { 0 => compile_jit_impl(expr_str, arg_names, true, return_type) - .map(|entry| entry.with_strategy(QuantumVariantStrategy::Auto)), + .map(|entry| entry.with_strategy(QuantumVariantStrategy::Auto)), 1 => compile_jit_impl(expr_str, arg_names, false, return_type) - .map(|entry| entry.with_strategy(QuantumVariantStrategy::ScalarFallback)), + .map(|entry| entry.with_strategy(QuantumVariantStrategy::ScalarFallback)), 2 => compile_jit_impl(expr_str, arg_names, true, return_type) - .map(|entry| entry.with_strategy(QuantumVariantStrategy::FastTrigExperiment)), + .map(|entry| entry.with_strategy(QuantumVariantStrategy::FastTrigExperiment)), _ => None, } } @@ -202,8 +202,8 @@ pub(crate) fn compile_jit_impl( let idx = next_jit_func_id(); let func_name = format!("jit_func_{}", idx); let id = module - .declare_function(&func_name, Linkage::Local, &ctx.func.signature) - .ok(); + .declare_function(&func_name, Linkage::Local, &ctx.func.signature) + .ok(); if id.is_none() { return None; } diff --git a/src/py/jit/codegen/eval.rs b/src/py/jit/codegen/eval.rs index 443ce5b..67b58b1 100644 --- a/src/py/jit/codegen/eval.rs +++ b/src/py/jit/codegen/eval.rs @@ -5,8 +5,7 @@ use super::{ LoweredExpr, LoweredUnaryKernel, SimdMathMode, }; -#[cfg(target_arch = "aarch64")] -use super::{fast_cos_approx_pair_neon, fast_sin_approx_pair_neon}; +use wide::f64x2; #[inline(always)] pub(crate) fn lowered_unary_eval_pair( @@ -19,25 +18,18 @@ pub(crate) fn lowered_unary_eval_pair( return None; } - #[cfg(target_arch = "aarch64")] - unsafe { - return match op { - LoweredUnaryKernel::Sin => Some(fast_sin_approx_pair_neon(x0, x1)), - LoweredUnaryKernel::Cos => Some(fast_cos_approx_pair_neon(x0, x1)), - LoweredUnaryKernel::Tan => { - let (s0, s1) = fast_sin_approx_pair_neon(x0, x1); - let (c0, c1) = fast_cos_approx_pair_neon(x0, x1); - Some((s0 / c0, s1 / c1)) - } - _ => None, - }; - } - - #[cfg(not(target_arch = "aarch64"))] - { - let _ = (op, x0, x1); - None - } + let values = f64x2::from([x0, x1]); + let out = match op { + LoweredUnaryKernel::Sin => values.sin(), + LoweredUnaryKernel::Cos => values.cos(), + LoweredUnaryKernel::Tan => values.tan(), + LoweredUnaryKernel::Exp => values.exp(), + LoweredUnaryKernel::Log => values.ln(), + LoweredUnaryKernel::Sqrt => values.sqrt(), + _ => return None, + }; + let out_arr = out.to_array(); + Some((out_arr[0], out_arr[1])) } #[inline(always)] diff --git a/src/py/jit/codegen/exec.rs b/src/py/jit/codegen/exec.rs index 80373da..e5862de 100644 --- a/src/py/jit/codegen/exec.rs +++ b/src/py/jit/codegen/exec.rs @@ -10,18 +10,18 @@ use crate::py::jit::simd; use super::{ lowered_binary_eval, lowered_expr_eval, lowered_expr_eval_pair, lowered_unary_eval, - lowered_unary_eval_pair, open_typed_buffer, read_buffer_f64, BREAK_SENTINEL_BITS, - CONTINUE_SENTINEL_BITS, BufferElemType, BufferView, JitEntry, JitExecProfile, - JitReturnType, LoweredBinaryKernel, LoweredKernel, LoweredUnaryKernel, ReductionMode, + lowered_unary_eval_pair, open_typed_buffer, read_buffer_f64, BufferElemType, BufferView, + JitEntry, JitExecProfile, JitReturnType, LoweredBinaryKernel, LoweredKernel, + LoweredUnaryKernel, ReductionMode, BREAK_SENTINEL_BITS, CONTINUE_SENTINEL_BITS, TLS_JIT_TYPE_PROFILE, }; static LOWERED_EXEC_LOGGED: once_cell::sync::OnceCell>> = -once_cell::sync::OnceCell::new(); + once_cell::sync::OnceCell::new(); static UNROLL_EXEC_LOGGED: once_cell::sync::OnceCell>> = -once_cell::sync::OnceCell::new(); + once_cell::sync::OnceCell::new(); static SIMD_MATH_EXEC_LOGGED: once_cell::sync::OnceCell>> = -once_cell::sync::OnceCell::new(); + once_cell::sync::OnceCell::new(); fn log_lowered_exec_once(entry: &JitEntry, len: usize) { let Some(kernel) = entry.lowered_kernel.as_ref() else { @@ -99,11 +99,11 @@ fn try_execute_lowered_vector_kernel( lowered_unary_eval(op, x, mode) }; let eval_binary = - |op: LoweredBinaryKernel, lhs_view: &BufferView, rhs_view: &BufferView, idx: usize| { - let l = unsafe { read_buffer_f64(lhs_view, idx) }; - let r = unsafe { read_buffer_f64(rhs_view, idx) }; - lowered_binary_eval(op, l, r) - }; + |op: LoweredBinaryKernel, lhs_view: &BufferView, rhs_view: &BufferView, idx: usize| { + let l = unsafe { read_buffer_f64(lhs_view, idx) }; + let r = unsafe { read_buffer_f64(rhs_view, idx) }; + lowered_binary_eval(op, l, r) + }; if entry.reduction != ReductionMode::None { let lanes = unroll.clamp(1, 4); @@ -122,13 +122,13 @@ fn try_execute_lowered_vector_kernel( let x1 = unsafe { read_buffer_f64(input_view, i + lane + 1) }; if let Some((y0, y1)) = lowered_unary_eval_pair(op, x0, x1, mode) - { - log_simd_math_exec_once(entry, op); - lane_acc[lane] += y0; - lane_acc[lane + 1] += y1; - lane += 2; - continue; - } + { + log_simd_math_exec_once(entry, op); + lane_acc[lane] += y0; + lane_acc[lane + 1] += y1; + lane += 2; + continue; + } } lane_acc[lane] += eval_unary(op, input_view, i + lane); lane += 1; @@ -211,7 +211,7 @@ fn try_execute_lowered_vector_kernel( while i + lanes <= len { for lane in 0..lanes { lane_any[lane] |= - eval_binary(op, lhs_view, rhs_view, i + lane) != 0.0; + eval_binary(op, lhs_view, rhs_view, i + lane) != 0.0; } if lane_any[..lanes].iter().any(|v| *v) { return Some(LoweredVectorResult::Reduced(1.0)); @@ -232,7 +232,7 @@ fn try_execute_lowered_vector_kernel( while i + lanes <= len { for lane in 0..lanes { lane_all[lane] &= - eval_binary(op, lhs_view, rhs_view, i + lane) != 0.0; + eval_binary(op, lhs_view, rhs_view, i + lane) != 0.0; } if lane_all[..lanes].iter().any(|v| !*v) { return Some(LoweredVectorResult::Reduced(0.0)); @@ -261,7 +261,7 @@ fn try_execute_lowered_vector_kernel( let idx0 = i + lane; let idx1 = i + lane + 1; let (v0, v1) = - lowered_expr_eval_pair(&expr, views, idx0, idx1, mode)?; + lowered_expr_eval_pair(&expr, views, idx0, idx1, mode)?; lane_acc[lane] += v0; lane_acc[lane + 1] += v1; lane += 2; @@ -285,7 +285,7 @@ fn try_execute_lowered_vector_kernel( while i + lanes <= len { for lane in 0..lanes { lane_any[lane] |= - lowered_expr_eval(&expr, views, i + lane, mode)? != 0.0; + lowered_expr_eval(&expr, views, i + lane, mode)? != 0.0; } if lane_any[..lanes].iter().any(|v| *v) { return Some(LoweredVectorResult::Reduced(1.0)); @@ -306,7 +306,7 @@ fn try_execute_lowered_vector_kernel( while i + lanes <= len { for lane in 0..lanes { lane_all[lane] &= - lowered_expr_eval(&expr, views, i + lane, mode)? != 0.0; + lowered_expr_eval(&expr, views, i + lane, mode)? != 0.0; } if lane_all[..lanes].iter().any(|v| !*v) { return Some(LoweredVectorResult::Reduced(0.0)); @@ -498,16 +498,16 @@ fn vec_f64_to_py(py: pyo3::Python, values: &[f64], return_type: JitReturnType) - let byte_slice = unsafe { std::slice::from_raw_parts( values.as_ptr() as *const u8, - values.len() * std::mem::size_of::(), + values.len() * std::mem::size_of::(), ) }; let py_bytes = pyo3::types::PyBytes::new(py, byte_slice); let array_mod = py.import("array").unwrap(); let array_obj = array_mod - .getattr("array") - .unwrap() - .call1(("d", py_bytes)) - .unwrap(); + .getattr("array") + .unwrap() + .call1(("d", py_bytes)) + .unwrap(); array_obj.into_py(py) } JitReturnType::Int => { @@ -527,11 +527,11 @@ fn execute_views( py: pyo3::Python, entry: &JitEntry, f: extern "C" fn(*const f64) -> f64, - loop_unroll: usize, - views: &[BufferView], - len: usize, - arg_count: usize, - log_path: &'static str, + loop_unroll: usize, + views: &[BufferView], + len: usize, + arg_count: usize, + log_path: &'static str, ) -> pyo3::PyResult { log_unroll_exec_once(entry, len, loop_unroll, log_path); if let Some(lowered) = try_execute_lowered_vector_kernel(entry, views, len, loop_unroll) { @@ -671,8 +671,8 @@ fn try_exec_trailing_count( entry: &JitEntry, args: &pyo3::types::PyTuple, f: extern "C" fn(*const f64) -> f64, - loop_unroll: usize, - arg_count: usize, + loop_unroll: usize, + arg_count: usize, ) -> pyo3::PyResult> { if arg_count != entry.arg_count + 1 { return Ok(None); @@ -700,11 +700,7 @@ fn try_exec_trailing_count( if entry.arg_count <= MAX_FAST_ARGS { let mut stack_args: [f64; MAX_FAST_ARGS] = [0.0; MAX_FAST_ARGS]; for i in 0..entry.arg_count { - let item = unsafe { pyo3::ffi::PyTuple_GET_ITEM(args.as_ptr(), i as isize) }; - let val = unsafe { pyo3::ffi::PyFloat_AsDouble(item) }; - if val == -1.0 && !unsafe { pyo3::ffi::PyErr_Occurred() }.is_null() { - return Err(pyo3::PyErr::fetch(py)); - } + let val = args.get_item(i)?.extract::()?; stack_args[i] = val; } let mut produced = 0; @@ -721,11 +717,7 @@ fn try_exec_trailing_count( } else { let mut heap_args = Vec::with_capacity(entry.arg_count); for i in 0..entry.arg_count { - let item = unsafe { pyo3::ffi::PyTuple_GET_ITEM(args.as_ptr(), i as isize) }; - let val = unsafe { pyo3::ffi::PyFloat_AsDouble(item) }; - if val == -1.0 && !unsafe { pyo3::ffi::PyErr_Occurred() }.is_null() { - return Err(pyo3::PyErr::fetch(py)); - } + let val = args.get_item(i)?.extract::()?; heap_args.push(val); } let mut produced = 0; @@ -744,7 +736,7 @@ fn try_exec_trailing_count( let byte_slice = unsafe { std::slice::from_raw_parts( results.as_ptr() as *const u8, - results.len() * std::mem::size_of::(), + results.len() * std::mem::size_of::(), ) }; let py_bytes = pyo3::types::PyBytes::new(py, byte_slice); @@ -759,15 +751,18 @@ fn try_exec_profiled( entry: &JitEntry, args: &pyo3::types::PyTuple, f: extern "C" fn(*const f64) -> f64, - loop_unroll: usize, - arg_count: usize, + loop_unroll: usize, + arg_count: usize, ) -> pyo3::PyResult> { let Some(profile) = lookup_exec_profile(entry.func_ptr) else { return Ok(None); }; match profile { - JitExecProfile::PackedBuffer { arg_count: expected, elem } => { + JitExecProfile::PackedBuffer { + arg_count: expected, + elem, + } => { if arg_count == 1 && entry.arg_count == expected { if let Ok(item) = args.get_item(0) { if let Some(view) = unsafe { open_typed_buffer(item) } { @@ -789,7 +784,10 @@ fn try_exec_profiled( } } } - JitExecProfile::VectorizedBuffers { arg_count: expected, elem_types } => { + JitExecProfile::VectorizedBuffers { + arg_count: expected, + elem_types, + } => { if arg_count == expected && expected == elem_types.len() { let mut views = Vec::with_capacity(expected); let mut common_len: Option = None; @@ -820,7 +818,16 @@ fn try_exec_profiled( if matched { let len = common_len.unwrap_or(0); - let result = execute_views(py, entry, f, loop_unroll, &views, len, expected, "profiled-vector-buffers")?; + let result = execute_views( + py, + entry, + f, + loop_unroll, + &views, + len, + expected, + "profiled-vector-buffers", + )?; return Ok(Some(result)); } } @@ -832,7 +839,7 @@ fn try_exec_profiled( let mut stack_args = [0.0_f64; MAX_FAST_ARGS]; let mut scalar_mismatch = false; for i in 0..arg_count { - let item = unsafe { pyo3::ffi::PyTuple_GET_ITEM(args.as_ptr(), i as isize) }; + let item = unsafe { pyo3::ffi::PyTuple_GetItem(args.as_ptr(), i as isize) }; let val = unsafe { pyo3::ffi::PyFloat_AsDouble(item) }; if val == -1.0 && !unsafe { pyo3::ffi::PyErr_Occurred() }.is_null() { unsafe { pyo3::ffi::PyErr_Clear() }; @@ -858,7 +865,7 @@ fn try_exec_single_packed_buffer( entry: &JitEntry, args: &pyo3::types::PyTuple, f: extern "C" fn(*const f64) -> f64, - arg_count: usize, + arg_count: usize, ) -> pyo3::PyResult> { if arg_count == 1 && entry.arg_count > 1 { if let Ok(item) = args.get_item(0) { @@ -903,8 +910,8 @@ fn try_exec_vectorized_buffers( entry: &JitEntry, args: &pyo3::types::PyTuple, f: extern "C" fn(*const f64) -> f64, - loop_unroll: usize, - arg_count: usize, + loop_unroll: usize, + arg_count: usize, ) -> pyo3::PyResult> { if arg_count == entry.arg_count && arg_count > 0 { let mut views = Vec::with_capacity(arg_count); @@ -945,7 +952,16 @@ fn try_exec_vectorized_buffers( }, ); - let result = execute_views(py, entry, f, loop_unroll, &views, len, arg_count, "generic-vector-buffers")?; + let result = execute_views( + py, + entry, + f, + loop_unroll, + &views, + len, + arg_count, + "generic-vector-buffers", + )?; return Ok(Some(result)); } } @@ -959,14 +975,14 @@ fn try_exec_sequence_fallback( entry: &JitEntry, args: &pyo3::types::PyTuple, f: extern "C" fn(*const f64) -> f64, - loop_unroll: usize, - arg_count: usize, + loop_unroll: usize, + arg_count: usize, ) -> pyo3::PyResult> { if arg_count == 1 && entry.arg_count == 1 { if let Ok(item) = args.get_item(0) { let is_text_like = item.is_instance_of::() - || item.is_instance_of::() - || item.is_instance_of::(); + || item.is_instance_of::() + || item.is_instance_of::(); if !is_text_like { if let Ok(len) = item.len() { @@ -1033,7 +1049,7 @@ fn try_exec_reduction_iterator( entry: &JitEntry, args: &pyo3::types::PyTuple, f: extern "C" fn(*const f64) -> f64, - arg_count: usize, + arg_count: usize, ) -> pyo3::PyResult> { if arg_count == 1 && entry.arg_count == 1 && entry.reduction != ReductionMode::None { if let Ok(item) = args.get_item(0) { @@ -1062,7 +1078,7 @@ fn exec_scalar_args( entry: &JitEntry, args: &pyo3::types::PyTuple, f: extern "C" fn(*const f64) -> f64, - arg_count: usize, + arg_count: usize, ) -> pyo3::PyResult { if arg_count != entry.arg_count { return Err(pyo3::exceptions::PyValueError::new_err( @@ -1074,11 +1090,7 @@ fn exec_scalar_args( if arg_count <= MAX_FAST_ARGS { let mut stack_args: [f64; MAX_FAST_ARGS] = [0.0; MAX_FAST_ARGS]; for i in 0..arg_count { - let item = unsafe { pyo3::ffi::PyTuple_GET_ITEM(args.as_ptr(), i as isize) }; - let val = unsafe { pyo3::ffi::PyFloat_AsDouble(item) }; - if val == -1.0 && !unsafe { pyo3::ffi::PyErr_Occurred() }.is_null() { - return Err(pyo3::PyErr::fetch(py)); - } + let val = args.get_item(i)?.extract::()?; stack_args[i] = val; } store_exec_profile(entry.func_ptr, JitExecProfile::ScalarArgs); @@ -1088,11 +1100,7 @@ fn exec_scalar_args( let mut heap_args = Vec::with_capacity(arg_count); for i in 0..arg_count { - let item = unsafe { pyo3::ffi::PyTuple_GET_ITEM(args.as_ptr(), i as isize) }; - let val = unsafe { pyo3::ffi::PyFloat_AsDouble(item) }; - if val == -1.0 && !unsafe { pyo3::ffi::PyErr_Occurred() }.is_null() { - return Err(pyo3::PyErr::fetch(py)); - } + let val = args.get_item(i)?.extract::()?; heap_args.push(val); } store_exec_profile(entry.func_ptr, JitExecProfile::ScalarArgs); diff --git a/src/py/jit/codegen/gen_expr.rs b/src/py/jit/codegen/gen_expr.rs index edd2120..829ca77 100644 --- a/src/py/jit/codegen/gen_expr.rs +++ b/src/py/jit/codegen/gen_expr.rs @@ -77,14 +77,12 @@ pub(crate) fn gen_expr( match expr { Expr::Const(n) => fb.ins().f64const(*n), Expr::Var(name) => gen_var(name, fb, ptr, arg_names, locals), - Expr::BinOp(lhs, op, rhs) => { - gen_binop(lhs, op, rhs, fb, ptr, arg_names, module, locals) - } + Expr::BinOp(lhs, op, rhs) => gen_binop(lhs, op, rhs, fb, ptr, arg_names, module, locals), Expr::UnaryOp(op, sub) => gen_unaryop(*op, sub, fb, ptr, arg_names, module, locals), Expr::Call(name, args) => gen_call(name, args, fb, ptr, arg_names, module, locals), - Expr::Ternary(cond, then_expr, else_expr) => { - gen_ternary(cond, then_expr, else_expr, fb, ptr, arg_names, module, locals) - } + Expr::Ternary(cond, then_expr, else_expr) => gen_ternary( + cond, then_expr, else_expr, fb, ptr, arg_names, module, locals, + ), Expr::SumOver { .. } => panic!("SumOver should have been transformed before codegen"), Expr::AnyOver { .. } => panic!("AnyOver should have been transformed before codegen"), Expr::AllOver { .. } => panic!("AllOver should have been transformed before codegen"), @@ -173,8 +171,8 @@ fn gen_binop( sig.params.push(AbiParam::new(types::F64)); sig.returns.push(AbiParam::new(types::F64)); let fid = module - .declare_function("fmod", Linkage::Import, &sig) - .expect("failed to declare fmod"); + .declare_function("fmod", Linkage::Import, &sig) + .expect("failed to declare fmod"); let local = module.declare_func_in_func(fid, &mut fb.func); let call = fb.ins().call(local, &[l, r]); fb.inst_results(call)[0] @@ -193,8 +191,8 @@ fn gen_binop( sig.params.push(AbiParam::new(types::F64)); sig.returns.push(AbiParam::new(types::F64)); let fid = module - .declare_function("sqrt", Linkage::Import, &sig) - .expect("failed to declare sqrt"); + .declare_function("sqrt", Linkage::Import, &sig) + .expect("failed to declare sqrt"); let local = module.declare_func_in_func(fid, &mut fb.func); let call = fb.ins().call(local, &[l]); return fb.inst_results(call)[0]; @@ -225,8 +223,8 @@ fn gen_binop( sig.params.push(AbiParam::new(types::F64)); sig.returns.push(AbiParam::new(types::F64)); let fid = module - .declare_function("pow", Linkage::Import, &sig) - .expect("failed to declare pow"); + .declare_function("pow", Linkage::Import, &sig) + .expect("failed to declare pow"); let local = module.declare_func_in_func(fid, &mut fb.func); let call = fb.ins().call(local, &[l, r]); fb.inst_results(call)[0] @@ -291,11 +289,11 @@ fn gen_ternary( cond: &Expr, then_expr: &Expr, else_expr: &Expr, - fb: &mut FunctionBuilder, - ptr: Value, - arg_names: &[String], - module: &mut JITModule, - locals: &HashMap, + fb: &mut FunctionBuilder, + ptr: Value, + arg_names: &[String], + module: &mut JITModule, + locals: &HashMap, ) -> Value { let cond_val = gen_expr(cond, fb, ptr, arg_names, module, locals); let zero = fb.ins().f64const(0.0); @@ -329,179 +327,179 @@ fn gen_call( || symbol == "continue_on_nan" || symbol == "loop_continue_on_nan") && args.len() == 1 - { - let value_val = gen_expr(&args[0], fb, ptr, arg_names, module, locals); - let is_nan = fb.ins().fcmp(FloatCC::Unordered, value_val, value_val); - let sentinel = if symbol == "break_on_nan" || symbol == "loop_break_on_nan" { - fb.ins().f64const(f64::from_bits(BREAK_SENTINEL_BITS)) - } else { - fb.ins().f64const(f64::from_bits(CONTINUE_SENTINEL_BITS)) - }; - return fb.ins().select(is_nan, sentinel, value_val); - } + { + let value_val = gen_expr(&args[0], fb, ptr, arg_names, module, locals); + let is_nan = fb.ins().fcmp(FloatCC::Unordered, value_val, value_val); + let sentinel = if symbol == "break_on_nan" || symbol == "loop_break_on_nan" { + fb.ins().f64const(f64::from_bits(BREAK_SENTINEL_BITS)) + } else { + fb.ins().f64const(f64::from_bits(CONTINUE_SENTINEL_BITS)) + }; + return fb.ins().select(is_nan, sentinel, value_val); + } - if symbol == "let_bind" && args.len() == 3 { - if let Expr::Var(var_name) = &args[0] { - let v = gen_expr(&args[1], fb, ptr, arg_names, module, locals); - let mut new_locals = locals.clone(); - new_locals.insert(var_name.clone(), v); - return gen_expr(&args[2], fb, ptr, arg_names, module, &new_locals); - } + if symbol == "let_bind" && args.len() == 3 { + if let Expr::Var(var_name) = &args[0] { + let v = gen_expr(&args[1], fb, ptr, arg_names, module, locals); + let mut new_locals = locals.clone(); + new_locals.insert(var_name.clone(), v); + return gen_expr(&args[2], fb, ptr, arg_names, module, &new_locals); } + } - if symbol == "if_else" && args.len() == 3 { - let cond_val = gen_expr(&args[0], fb, ptr, arg_names, module, locals); - let then_val = gen_expr(&args[1], fb, ptr, arg_names, module, locals); - let else_val = gen_expr(&args[2], fb, ptr, arg_names, module, locals); - let zero = fb.ins().f64const(0.0); - let cond_true = fb.ins().fcmp(FloatCC::NotEqual, cond_val, zero); - return fb.ins().select(cond_true, then_val, else_val); - } + if symbol == "if_else" && args.len() == 3 { + let cond_val = gen_expr(&args[0], fb, ptr, arg_names, module, locals); + let then_val = gen_expr(&args[1], fb, ptr, arg_names, module, locals); + let else_val = gen_expr(&args[2], fb, ptr, arg_names, module, locals); + let zero = fb.ins().f64const(0.0); + let cond_true = fb.ins().fcmp(FloatCC::NotEqual, cond_val, zero); + return fb.ins().select(cond_true, then_val, else_val); + } - if let Some(alias) = resolve_symbol_alias(&symbol, args.len()) { - match alias { - SymbolAlias::Identity => { - return gen_expr(&args[0], fb, ptr, arg_names, module, locals); + if let Some(alias) = resolve_symbol_alias(&symbol, args.len()) { + match alias { + SymbolAlias::Identity => { + return gen_expr(&args[0], fb, ptr, arg_names, module, locals); + } + SymbolAlias::Rename(target) => { + let mut arg_vals = Vec::with_capacity(args.len()); + for a in args { + arg_vals.push(gen_expr(a, fb, ptr, arg_names, module, locals)); } - SymbolAlias::Rename(target) => { - let mut arg_vals = Vec::with_capacity(args.len()); - for a in args { - arg_vals.push(gen_expr(a, fb, ptr, arg_names, module, locals)); - } - let mut sig = module.make_signature(); - for _ in 0..arg_vals.len() { - sig.params.push(AbiParam::new(types::F64)); - } - sig.returns.push(AbiParam::new(types::F64)); - let func_id = module + let mut sig = module.make_signature(); + for _ in 0..arg_vals.len() { + sig.params.push(AbiParam::new(types::F64)); + } + sig.returns.push(AbiParam::new(types::F64)); + let func_id = module .declare_function(target, Linkage::Import, &sig) .expect("failed to declare external function"); - let local = module.declare_func_in_func(func_id, &mut fb.func); - let call = fb.ins().call(local, &arg_vals); - return fb.inst_results(call)[0]; - } + let local = module.declare_func_in_func(func_id, &mut fb.func); + let call = fb.ins().call(local, &arg_vals); + return fb.inst_results(call)[0]; } } + } - if symbol == "min" && args.len() == 2 { - let a = gen_expr(&args[0], fb, ptr, arg_names, module, locals); - let b = gen_expr(&args[1], fb, ptr, arg_names, module, locals); - let cond = fb.ins().fcmp(FloatCC::LessThanOrEqual, a, b); - return fb.ins().select(cond, a, b); - } + if symbol == "min" && args.len() == 2 { + let a = gen_expr(&args[0], fb, ptr, arg_names, module, locals); + let b = gen_expr(&args[1], fb, ptr, arg_names, module, locals); + let cond = fb.ins().fcmp(FloatCC::LessThanOrEqual, a, b); + return fb.ins().select(cond, a, b); + } - if symbol == "max" && args.len() == 2 { - let a = gen_expr(&args[0], fb, ptr, arg_names, module, locals); - let b = gen_expr(&args[1], fb, ptr, arg_names, module, locals); - let cond = fb.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b); - return fb.ins().select(cond, a, b); - } + if symbol == "max" && args.len() == 2 { + let a = gen_expr(&args[0], fb, ptr, arg_names, module, locals); + let b = gen_expr(&args[1], fb, ptr, arg_names, module, locals); + let cond = fb.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b); + return fb.ins().select(cond, a, b); + } - if let Some(named) = lookup_named_jit(&symbol) { - if named.arg_count == args.len() { - let helper_name = match args.len() { - 0 => Some("iris_jit_invoke_0"), - 1 => Some("iris_jit_invoke_1"), - 2 => Some("iris_jit_invoke_2"), - 3 => Some("iris_jit_invoke_3"), - 4 => Some("iris_jit_invoke_4"), - 5 => Some("iris_jit_invoke_5"), - 6 => Some("iris_jit_invoke_6"), - 7 => Some("iris_jit_invoke_7"), - 8 => Some("iris_jit_invoke_8"), - 9 => Some("iris_jit_invoke_9"), - 10 => Some("iris_jit_invoke_10"), - 11 => Some("iris_jit_invoke_11"), - 12 => Some("iris_jit_invoke_12"), - 13 => Some("iris_jit_invoke_13"), - 14 => Some("iris_jit_invoke_14"), - 15 => Some("iris_jit_invoke_15"), - 16 => Some("iris_jit_invoke_16"), - _ => None, - }; - - if let Some(helper_name) = helper_name { - let mut arg_vals = Vec::with_capacity(args.len() + 1); - arg_vals.push(fb.ins().iconst(types::I64, named.func_ptr as i64)); - for a in args { - arg_vals.push(gen_expr(a, fb, ptr, arg_names, module, locals)); - } + if let Some(named) = lookup_named_jit(&symbol) { + if named.arg_count == args.len() { + let helper_name = match args.len() { + 0 => Some("iris_jit_invoke_0"), + 1 => Some("iris_jit_invoke_1"), + 2 => Some("iris_jit_invoke_2"), + 3 => Some("iris_jit_invoke_3"), + 4 => Some("iris_jit_invoke_4"), + 5 => Some("iris_jit_invoke_5"), + 6 => Some("iris_jit_invoke_6"), + 7 => Some("iris_jit_invoke_7"), + 8 => Some("iris_jit_invoke_8"), + 9 => Some("iris_jit_invoke_9"), + 10 => Some("iris_jit_invoke_10"), + 11 => Some("iris_jit_invoke_11"), + 12 => Some("iris_jit_invoke_12"), + 13 => Some("iris_jit_invoke_13"), + 14 => Some("iris_jit_invoke_14"), + 15 => Some("iris_jit_invoke_15"), + 16 => Some("iris_jit_invoke_16"), + _ => None, + }; - let mut sig = module.make_signature(); - sig.params.push(AbiParam::new(types::I64)); - for _ in 0..args.len() { - sig.params.push(AbiParam::new(types::F64)); - } - sig.returns.push(AbiParam::new(types::F64)); - let func_id = module + if let Some(helper_name) = helper_name { + let mut arg_vals = Vec::with_capacity(args.len() + 1); + arg_vals.push(fb.ins().iconst(types::I64, named.func_ptr as i64)); + for a in args { + arg_vals.push(gen_expr(a, fb, ptr, arg_names, module, locals)); + } + + let mut sig = module.make_signature(); + sig.params.push(AbiParam::new(types::I64)); + for _ in 0..args.len() { + sig.params.push(AbiParam::new(types::F64)); + } + sig.returns.push(AbiParam::new(types::F64)); + let func_id = module .declare_function(helper_name, Linkage::Import, &sig) .expect("failed to declare named jit invoke helper"); - let local = module.declare_func_in_func(func_id, &mut fb.func); - let call = fb.ins().call(local, &arg_vals); - return fb.inst_results(call)[0]; - } + let local = module.declare_func_in_func(func_id, &mut fb.func); + let call = fb.ins().call(local, &arg_vals); + return fb.inst_results(call)[0]; } } + } - if (symbol == "break_if" + if (symbol == "break_if" + || symbol == "loop_break_if" + || symbol == "break_when" + || symbol == "loop_break_when" + || symbol == "break_unless" + || symbol == "loop_break_unless" + || symbol == "continue_if" + || symbol == "loop_continue_if" + || symbol == "continue_when" + || symbol == "loop_continue_when" + || symbol == "continue_unless" + || symbol == "loop_continue_unless") + && args.len() == 2 + { + let cond_val = gen_expr(&args[0], fb, ptr, arg_names, module, locals); + let value_val = gen_expr(&args[1], fb, ptr, arg_names, module, locals); + let zero = fb.ins().f64const(0.0); + let cond_true = if symbol == "break_unless" + || symbol == "loop_break_unless" + || symbol == "continue_unless" + || symbol == "loop_continue_unless" + { + fb.ins().fcmp(FloatCC::Equal, cond_val, zero) + } else { + fb.ins().fcmp(FloatCC::NotEqual, cond_val, zero) + }; + let sentinel = if symbol == "break_if" || symbol == "loop_break_if" || symbol == "break_when" || symbol == "loop_break_when" || symbol == "break_unless" || symbol == "loop_break_unless" - || symbol == "continue_if" - || symbol == "loop_continue_if" - || symbol == "continue_when" - || symbol == "loop_continue_when" - || symbol == "continue_unless" - || symbol == "loop_continue_unless") - && args.len() == 2 - { - let cond_val = gen_expr(&args[0], fb, ptr, arg_names, module, locals); - let value_val = gen_expr(&args[1], fb, ptr, arg_names, module, locals); - let zero = fb.ins().f64const(0.0); - let cond_true = if symbol == "break_unless" - || symbol == "loop_break_unless" - || symbol == "continue_unless" - || symbol == "loop_continue_unless" - { - fb.ins().fcmp(FloatCC::Equal, cond_val, zero) - } else { - fb.ins().fcmp(FloatCC::NotEqual, cond_val, zero) - }; - let sentinel = if symbol == "break_if" - || symbol == "loop_break_if" - || symbol == "break_when" - || symbol == "loop_break_when" - || symbol == "break_unless" - || symbol == "loop_break_unless" - { - fb.ins().f64const(f64::from_bits(BREAK_SENTINEL_BITS)) - } else { - fb.ins().f64const(f64::from_bits(CONTINUE_SENTINEL_BITS)) - }; - return fb.ins().select(cond_true, sentinel, value_val); - } + { + fb.ins().f64const(f64::from_bits(BREAK_SENTINEL_BITS)) + } else { + fb.ins().f64const(f64::from_bits(CONTINUE_SENTINEL_BITS)) + }; + return fb.ins().select(cond_true, sentinel, value_val); + } - let mut arg_vals = Vec::with_capacity(args.len()); - for a in args { - arg_vals.push(gen_expr(a, fb, ptr, arg_names, module, locals)); - } - let mut symbol = symbol; - if symbol == "abs" { - symbol = "fabs".to_string(); - } - let mut sig = module.make_signature(); - for _ in 0..arg_vals.len() { - sig.params.push(AbiParam::new(types::F64)); - } - sig.returns.push(AbiParam::new(types::F64)); - let func_id = module - .declare_function(&symbol, Linkage::Import, &sig) - .expect("failed to declare external function"); - let local = module.declare_func_in_func(func_id, &mut fb.func); - let call = fb.ins().call(local, &arg_vals); - fb.inst_results(call)[0] + let mut arg_vals = Vec::with_capacity(args.len()); + for a in args { + arg_vals.push(gen_expr(a, fb, ptr, arg_names, module, locals)); + } + let mut symbol = symbol; + if symbol == "abs" { + symbol = "fabs".to_string(); + } + let mut sig = module.make_signature(); + for _ in 0..arg_vals.len() { + sig.params.push(AbiParam::new(types::F64)); + } + sig.returns.push(AbiParam::new(types::F64)); + let func_id = module + .declare_function(&symbol, Linkage::Import, &sig) + .expect("failed to declare external function"); + let local = module.declare_func_in_func(func_id, &mut fb.func); + let call = fb.ins().call(local, &arg_vals); + fb.inst_results(call)[0] } fn gen_any_all_while( @@ -605,11 +603,11 @@ fn gen_any_all_while( let budget_next = fb.ins().iadd_imm(budget_val, -1); let body_bits = fb.ins().bitcast(types::I64, body_val); let is_break_sentinel = - fb.ins() - .icmp_imm(IntCC::Equal, body_bits, BREAK_SENTINEL_BITS as i64); + fb.ins() + .icmp_imm(IntCC::Equal, body_bits, BREAK_SENTINEL_BITS as i64); let is_continue_sentinel = - fb.ins() - .icmp_imm(IntCC::Equal, body_bits, CONTINUE_SENTINEL_BITS as i64); + fb.ins() + .icmp_imm(IntCC::Equal, body_bits, CONTINUE_SENTINEL_BITS as i64); let stop_now = fb.ins().bor(break_true, is_break_sentinel); let skip_body = fb.ins().bor(continue_true, is_continue_sentinel); @@ -622,7 +620,7 @@ fn gen_any_all_while( let any_exit_block = fb.create_block(); fb.ins().brnz(stop_any, any_exit_block, &[]); fb.ins() - .jump(continue_block, &[step_val, zero, budget_next]); + .jump(continue_block, &[step_val, zero, budget_next]); fb.switch_to_block(any_exit_block); let exit_val = fb.ins().select(stop_now, acc_val, one); @@ -649,7 +647,7 @@ fn gen_any_all_while( let next_acc = fb.block_params(continue_block)[1]; let next_budget = fb.block_params(continue_block)[2]; fb.ins() - .jump(loop_block, &[next_iter, next_acc, next_budget]); + .jump(loop_block, &[next_iter, next_acc, next_budget]); fb.seal_block(body_block); fb.seal_block(budget_exit_block); fb.seal_block(budget_ok_block); @@ -761,11 +759,11 @@ fn gen_sum_while( let budget_next = fb.ins().iadd_imm(budget_val, -1); let body_bits = fb.ins().bitcast(types::I64, body_val); let is_break_sentinel = - fb.ins() - .icmp_imm(IntCC::Equal, body_bits, BREAK_SENTINEL_BITS as i64); + fb.ins() + .icmp_imm(IntCC::Equal, body_bits, BREAK_SENTINEL_BITS as i64); let is_continue_sentinel = - fb.ins() - .icmp_imm(IntCC::Equal, body_bits, CONTINUE_SENTINEL_BITS as i64); + fb.ins() + .icmp_imm(IntCC::Equal, body_bits, CONTINUE_SENTINEL_BITS as i64); let stop_now = fb.ins().bor(break_true, is_break_sentinel); let skip_body = fb.ins().bor(continue_true, is_continue_sentinel); @@ -774,7 +772,7 @@ fn gen_sum_while( let sum_break_block = fb.create_block(); fb.ins().brnz(stop_now, sum_break_block, &[]); fb.ins() - .jump(continue_block, &[step_val, next_acc, budget_next]); + .jump(continue_block, &[step_val, next_acc, budget_next]); fb.switch_to_block(sum_break_block); fb.ins().jump(exit_block, &[acc_val]); fb.seal_block(sum_break_block); @@ -784,7 +782,7 @@ fn gen_sum_while( let next_acc = fb.block_params(continue_block)[1]; let next_budget = fb.block_params(continue_block)[2]; fb.ins() - .jump(loop_block, &[next_iter, next_acc, next_budget]); + .jump(loop_block, &[next_iter, next_acc, next_budget]); fb.seal_block(body_block); fb.seal_block(budget_exit_block); diff --git a/src/py/jit/codegen/jit_module.rs b/src/py/jit/codegen/jit_module.rs index 4e3f45a..440e89c 100644 --- a/src/py/jit/codegen/jit_module.rs +++ b/src/py/jit/codegen/jit_module.rs @@ -51,7 +51,7 @@ thread_local! { /// Create/use a thread-local JIT module and invoke `f` with it. pub(crate) fn with_jit_module(f: F) -> R where -F: FnOnce(&mut JITModule) -> R, + F: FnOnce(&mut JITModule) -> R, { TLS_JIT_MODULE.with(|cell| { let mut opt = cell.borrow_mut(); diff --git a/src/py/jit/codegen/lowering.rs b/src/py/jit/codegen/lowering.rs index 4a8f3a7..6485a41 100644 --- a/src/py/jit/codegen/lowering.rs +++ b/src/py/jit/codegen/lowering.rs @@ -39,7 +39,8 @@ pub(crate) fn detect_lowered_kernel(expr: &Expr, arg_names: &[String]) -> Option if args.len() == 1 { if let Expr::Var(var_name) = &args[0] { if let Some(input) = arg_index_of_var(arg_names, var_name) { - let op = match normalize_intrinsic_name(name).to_ascii_lowercase().as_str() { + let op = match normalize_intrinsic_name(name).to_ascii_lowercase().as_str() + { "abs" | "fabs" => Some(LoweredUnaryKernel::Abs), "sin" => Some(LoweredUnaryKernel::Sin), "cos" => Some(LoweredUnaryKernel::Cos), diff --git a/src/py/jit/codegen/mod.rs b/src/py/jit/codegen/mod.rs index 94c2588..1dd8223 100644 --- a/src/py/jit/codegen/mod.rs +++ b/src/py/jit/codegen/mod.rs @@ -4,33 +4,33 @@ use std::time::Instant; // Internal submodules -mod jit_types; -mod registry; -mod quantum; -mod lowering; -mod gen_expr; -mod compiler; -mod jit_module; -mod math; mod buffer; +mod compiler; mod eval; mod exec; +mod gen_expr; +mod jit_module; +mod jit_types; +mod lowering; +mod math; +mod quantum; +mod registry; // Re-exports +pub use compiler::*; pub use jit_types::*; -pub use registry::*; pub use quantum::*; -pub use compiler::*; +pub use registry::*; -pub(crate) use lowering::*; pub(crate) use jit_module::*; +pub(crate) use lowering::*; // Internal helper exports for submodules pub(crate) use buffer::*; -pub(crate) use math::*; pub(crate) use eval::*; pub(crate) use exec::*; pub(crate) use gen_expr::gen_expr; +pub(crate) use math::*; pub const BREAK_SENTINEL_BITS: u64 = 0x7ff8_0000_0000_0b01; pub const CONTINUE_SENTINEL_BITS: u64 = 0x7ff8_0000_0000_0c01; @@ -39,7 +39,7 @@ pub(crate) fn jit_dump_clif_enabled() -> bool { match std::env::var("IRIS_JIT_DUMP_CLIF") { Ok(v) => matches!( v.trim().to_ascii_lowercase().as_str(), - "1" | "true" | "yes" | "on" + "1" | "true" | "yes" | "on" ), Err(_) => false, } @@ -60,16 +60,16 @@ pub fn execute_registered_jit( (None, 0usize, Vec::new(), false, 0usize) } else { let speculation_threshold_ns = - crate::py::jit::quantum_speculation_threshold_ns(); + crate::py::jit::quantum_speculation_threshold_ns(); let best_ewma = state - .stats - .iter() - .enumerate() - .filter(|(idx, _)| state.active.get(*idx).copied().unwrap_or(false)) - .map(|(_, s)| s) - .filter(|s| s.runs > 0) - .map(|s| s.ewma_ns) - .fold(f64::MAX, |a: f64, b| a.min(b)); + .stats + .iter() + .enumerate() + .filter(|(idx, _)| state.active.get(*idx).copied().unwrap_or(false)) + .map(|(_, s)| s) + .filter(|s| s.runs > 0) + .map(|s| s.ewma_ns) + .fold(f64::MAX, |a: f64, b| a.min(b)); let best_ewma = if best_ewma == f64::MAX { 0.0 } else { @@ -102,10 +102,10 @@ pub fn execute_registered_jit( } else { ( Some(baseline_entry), - baseline_idx, - Vec::new(), - false, - active_count, + baseline_idx, + Vec::new(), + false, + active_count, ) } } else { @@ -114,9 +114,9 @@ pub fn execute_registered_jit( if i != idx && state.active.get(i).copied().unwrap_or(false) && e.is_valid() - { - fallbacks.push((i, e.clone())); - } + { + fallbacks.push((i, e.clone())); + } } (Some(entry), idx, fallbacks, true, active_count) } @@ -128,10 +128,10 @@ pub fn execute_registered_jit( } else { ( Some(baseline_entry), - baseline_idx, - Vec::new(), - false, - active_count, + baseline_idx, + Vec::new(), + false, + active_count, ) } } @@ -274,13 +274,27 @@ mod tests { let src = "sum(((math.sin(x) * 0.5 + x * 1.2) / (1.0 + math.exp(-abs(x) * 0.001)) for x in data))"; let args = vec!["data".to_string()]; let entry = compile_jit_impl(src, &args, true, JitReturnType::Float) - .expect("compile sumover expression"); + .expect("compile sumover expression"); match entry.lowered_kernel { Some(LoweredKernel::Expr(_)) => {} other => panic!("expected lowered expression kernel, got {:?}", other), } } + #[test] + fn lowered_unary_eval_pair_supports_wide_vector_math() { + let result = lowered_unary_eval_pair( + LoweredUnaryKernel::Sin, + 0.5_f64, + 1.0_f64, + SimdMathMode::FastApprox, + ); + assert!(result.is_some()); + let (y0, y1) = result.unwrap(); + assert!((y0 - 0.4794).abs() < 0.001); + assert!((y1 - 0.84147).abs() < 0.001); + } + #[test] fn detect_lowered_kernel_for_filtered_ternary_expr() { let src = "((x % 2 == 0 or x > 75.0) and (not x < 10.0)) ? (x > 50.0 ? x * math.sin(x) : x * math.cos(x)) : 0.0"; @@ -299,7 +313,7 @@ mod tests { let src = "sum((x * math.sin(x) if x > 50.0 else x * math.cos(x) for x in data if (x % 2 == 0 or x > 75.0) and (not x < 10.0)))"; let args = vec!["data".to_string()]; let entry = compile_jit_impl(src, &args, true, JitReturnType::Float) - .expect("compile filtered ternary sumover expression"); + .expect("compile filtered ternary sumover expression"); match entry.lowered_kernel { Some(LoweredKernel::Expr(_)) => {} other => panic!("expected lowered expression kernel, got {:?}", other), diff --git a/src/py/jit/codegen/quantum.rs b/src/py/jit/codegen/quantum.rs index 95fcb06..0043312 100644 --- a/src/py/jit/codegen/quantum.rs +++ b/src/py/jit/codegen/quantum.rs @@ -7,15 +7,9 @@ use std::sync::Mutex; use once_cell::sync::OnceCell; use crate::py::jit::codegen::jit_types::{ - JitEntry, - QuantumProfilePoint, - QuantumProfileSeed, - QuantumVariantStrategy, -}; -use crate::py::jit::config::{ - quantum_variant_failure_limit, - quantum_variant_promotion_min_runs, + JitEntry, QuantumProfilePoint, QuantumProfileSeed, QuantumVariantStrategy, }; +use crate::py::jit::config::{quantum_variant_failure_limit, quantum_variant_promotion_min_runs}; #[derive(Clone, Default)] pub(crate) struct QuantumStats { @@ -36,7 +30,7 @@ pub(crate) struct QuantumState { pub(crate) static QUANTUM_REGISTRY: OnceCell>> = OnceCell::new(); pub(crate) static QUANTUM_PENDING_SEEDS: OnceCell>>> = -OnceCell::new(); + OnceCell::new(); fn apply_quantum_seeds(state: &mut QuantumState, seeds: &[QuantumProfileSeed]) { for seed in seeds { @@ -86,7 +80,10 @@ pub fn register_quantum_jit(func_key: usize, mut entries: Vec) { pub fn quantum_has_seed_hint(func_key: usize) -> bool { if let Some(map) = QUANTUM_PENDING_SEEDS.get() { let guard = map.lock().unwrap(); - return guard.get(&func_key).map(|rows| !rows.is_empty()).unwrap_or(false); + return guard + .get(&func_key) + .map(|rows| !rows.is_empty()) + .unwrap_or(false); } false @@ -135,9 +132,9 @@ pub(crate) fn preferred_seed_variant_index(seeds: &[QuantumProfileSeed]) -> Opti let scalar_seed = seeds.iter().find(|seed| seed.index == 1).cloned(); let all_thin_samples = seeds - .iter() - .filter(|seed| quantum_seed_score(seed).is_some()) - .all(|seed| seed.runs < MIN_CONFIDENT_RUNS); + .iter() + .filter(|seed| quantum_seed_score(seed).is_some()) + .all(|seed| seed.runs < MIN_CONFIDENT_RUNS); if all_thin_samples { if let Some(seed) = scalar_seed { @@ -159,11 +156,11 @@ pub(crate) fn preferred_seed_variant_index(seeds: &[QuantumProfileSeed]) -> Opti if score < best_score || ((score - best_score).abs() < f64::EPSILON && seed.runs > best_runs) || ((score - best_score).abs() < f64::EPSILON - && seed.runs == best_runs - && seed.index < best_idx) - { - best = Some((seed.index, score, seed.runs)); - } + && seed.runs == best_runs + && seed.index < best_idx) + { + best = Some((seed.index, score, seed.runs)); + } } } } @@ -173,11 +170,11 @@ pub(crate) fn preferred_seed_variant_index(seeds: &[QuantumProfileSeed]) -> Opti pub(crate) fn active_indices(state: &QuantumState) -> Vec { state - .active - .iter() - .enumerate() - .filter_map(|(idx, is_active)| if *is_active { Some(idx) } else { None }) - .collect() + .active + .iter() + .enumerate() + .filter_map(|(idx, is_active)| if *is_active { Some(idx) } else { None }) + .collect() } pub(crate) fn quantum_stability_score(state: &QuantumState) -> f64 { @@ -285,9 +282,9 @@ pub(crate) fn reconcile_quantum_lifecycle(func_key: usize) -> bool { pub(crate) fn quantum_active_variant_count(func_key: usize) -> Option { QUANTUM_REGISTRY.get().and_then(|map| { map.lock() - .unwrap() - .get(&func_key) - .map(|state| state.active.iter().filter(|a| **a).count()) + .unwrap() + .get(&func_key) + .map(|state| state.active.iter().filter(|a| **a).count()) }) } @@ -295,9 +292,9 @@ pub(crate) fn quantum_active_variant_count(func_key: usize) -> Option { pub(crate) fn quantum_stability_for(func_key: usize) -> Option { QUANTUM_REGISTRY.get().and_then(|map| { map.lock() - .unwrap() - .get(&func_key) - .map(quantum_stability_score) + .unwrap() + .get(&func_key) + .map(quantum_stability_score) }) } @@ -314,8 +311,8 @@ pub(crate) fn choose_quantum_index(state: &mut QuantumState) -> usize { let active = active_indices(state); if active.is_empty() { return state - .baseline_idx - .min(state.entries.len().saturating_sub(1)); + .baseline_idx + .min(state.entries.len().saturating_sub(1)); } state.total_runs = state.total_runs.saturating_add(1); @@ -325,8 +322,8 @@ pub(crate) fn choose_quantum_index(state: &mut QuantumState) -> usize { return active[rr]; } let mut best_idx = state - .baseline_idx - .min(state.entries.len().saturating_sub(1)); + .baseline_idx + .min(state.entries.len().saturating_sub(1)); let mut best_score = f64::MAX; for idx in active { let s = &state.stats[idx]; @@ -341,10 +338,10 @@ pub(crate) fn choose_quantum_index(state: &mut QuantumState) -> usize { pub(crate) fn has_unrun_active_variant(state: &QuantumState) -> bool { state - .stats - .iter() - .enumerate() - .any(|(idx, stats)| state.active.get(idx).copied().unwrap_or(false) && stats.runs == 0) + .stats + .iter() + .enumerate() + .any(|(idx, stats)| state.active.get(idx).copied().unwrap_or(false) && stats.runs == 0) } pub(crate) fn should_use_quantum_dispatch( @@ -389,20 +386,20 @@ pub fn quantum_profile_snapshot(func_key: usize) -> Option = Lazy::new(|| AtomicUsize::new(0)); -static JIT_REGISTRY: OnceCell>> = - OnceCell::new(); +static JIT_REGISTRY: OnceCell>> = OnceCell::new(); -static NAMED_JIT_REGISTRY: OnceCell>> = - OnceCell::new(); +static NAMED_JIT_REGISTRY: OnceCell>> = OnceCell::new(); pub fn next_jit_func_id() -> usize { JIT_FUNC_COUNTER.fetch_add(1, Ordering::Relaxed) @@ -76,19 +74,7 @@ make_invoke!(iris_jit_invoke_6, a0, a1, a2, a3, a4, a5); make_invoke!(iris_jit_invoke_7, a0, a1, a2, a3, a4, a5, a6); make_invoke!(iris_jit_invoke_8, a0, a1, a2, a3, a4, a5, a6, a7); make_invoke!(iris_jit_invoke_9, a0, a1, a2, a3, a4, a5, a6, a7, a8); -make_invoke!( - iris_jit_invoke_10, - a0, - a1, - a2, - a3, - a4, - a5, - a6, - a7, - a8, - a9 -); +make_invoke!(iris_jit_invoke_10, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9); make_invoke!( iris_jit_invoke_11, a0, diff --git a/src/py/jit/config.rs b/src/py/jit/config.rs index 663dcc2..783c399 100644 --- a/src/py/jit/config.rs +++ b/src/py/jit/config.rs @@ -11,6 +11,7 @@ use std::time::{SystemTime, UNIX_EPOCH}; use tracing::info; +use crate::logging; use crate::py::jit::codegen::JitReturnType; static JIT_LOG_OVERRIDE: AtomicI8 = AtomicI8::new(-1); // -1 env, 0 off, 1 on @@ -190,7 +191,7 @@ where } } - // Use `tracing` when available; falls back to stderr if no subscriber is set. + logging::init_logger(); info!(target: "iris::jit", "{}", msg()); } diff --git a/src/py/jit/mod.rs b/src/py/jit/mod.rs index d460a03..773293d 100644 --- a/src/py/jit/mod.rs +++ b/src/py/jit/mod.rs @@ -8,17 +8,16 @@ #![allow(non_local_definitions)] +use std::collections::HashMap; +use std::panic::{catch_unwind, AssertUnwindSafe}; use std::sync::Arc; use std::time::Instant; -use std::panic::{catch_unwind, AssertUnwindSafe}; #[cfg(feature = "pyo3")] use pyo3::prelude::*; #[cfg(feature = "pyo3")] use pyo3::types::{PyDict, PyTuple}; -use pyo3::AsPyPointer; - pub(crate) mod codegen; pub(crate) mod config; pub(crate) mod heuristics; @@ -27,48 +26,29 @@ pub(crate) mod quantum; pub(crate) mod simd; pub(crate) use crate::py::jit::config::{ - jit_log, - jit_logging_enabled, - quantum_compile_budget_ns, - quantum_compile_window_ns, - quantum_cooldown_base_ns, - quantum_cooldown_max_ns, - quantum_log_threshold_ns, - quantum_speculation_enabled, - quantum_speculation_threshold_ns, - quantum_stability_min_runs, - quantum_stability_min_score, - set_jit_logging_env_var, - set_jit_logging_override, - set_quantum_speculation_env_var, - set_quantum_speculation_override, + jit_log, jit_logging_enabled, quantum_compile_budget_ns, quantum_compile_window_ns, + quantum_cooldown_base_ns, quantum_cooldown_max_ns, quantum_log_threshold_ns, + quantum_speculation_enabled, quantum_speculation_threshold_ns, quantum_stability_min_runs, + quantum_stability_min_score, set_jit_logging_env_var, set_jit_logging_override, + set_quantum_speculation_env_var, set_quantum_speculation_override, }; #[cfg(test)] pub(crate) use crate::py::jit::config::{jit_log_clear_hook, jit_log_hook}; use crate::py::jit::config::{ - jit_quantum_compile_budget_env_var, - jit_quantum_compile_window_env_var, - jit_quantum_cooldown_base_env_var, - jit_quantum_cooldown_max_env_var, - jit_quantum_log_env_var, - jit_quantum_speculation_env_var, - now_ns, - panic_payload_to_string, - parse_return_type, + jit_quantum_compile_budget_env_var, jit_quantum_compile_window_env_var, + jit_quantum_cooldown_base_env_var, jit_quantum_cooldown_max_env_var, jit_quantum_log_env_var, + jit_quantum_speculation_env_var, now_ns, panic_payload_to_string, parse_return_type, }; pub(crate) use crate::py::jit::quantum::{ - maybe_rearm_quantum_compile, - quantum_compile_may_run, - record_quantum_compile_attempt, + maybe_rearm_quantum_compile, quantum_compile_may_run, record_quantum_compile_attempt, register_quantum_rearm_plan, }; #[cfg(test)] pub(crate) use crate::py::jit::quantum::{ - clear_quantum_rearm_plan_for_test, - register_quantum_rearm_plan_for_test, + clear_quantum_rearm_plan_for_test, register_quantum_rearm_plan_for_test, reset_quantum_control_state, }; @@ -145,6 +125,8 @@ impl OffloadPool { // shared singleton static OFFLOAD_POOL: once_cell::sync::OnceCell> = once_cell::sync::OnceCell::new(); +static OFFLOAD_STRATEGY: once_cell::sync::OnceCell>> = + once_cell::sync::OnceCell::new(); fn get_offload_pool() -> Arc { OFFLOAD_POOL @@ -152,6 +134,21 @@ fn get_offload_pool() -> Arc { .clone() } +fn set_offload_strategy(func_key: usize, strategy: &str) { + let map = OFFLOAD_STRATEGY.get_or_init(|| std::sync::Mutex::new(HashMap::new())); + if let Ok(mut guard) = map.lock() { + guard.insert(func_key, strategy.to_ascii_lowercase()); + } +} + +fn get_offload_strategy(func_key: usize) -> Option { + OFFLOAD_STRATEGY.get().and_then(|map| { + map.lock() + .ok() + .and_then(|guard| guard.get(&func_key).cloned()) + }) +} + // Python bindings ----------------------------------------------------------- /// Initialize the Python submodule (called from `wrappers.populate_module`). @@ -379,12 +376,13 @@ fn register_offload( source_expr: Option, arg_names: Option>, ) -> PyResult { + let key = func.as_ptr() as usize; if let Some(ref s) = strategy { + set_offload_strategy(key, s); if s == "actor" { let _ = get_offload_pool(); } else if s == "jit" { if let (Some(expr), Some(args)) = (source_expr.clone(), arg_names.clone()) { - let key = func.as_ptr() as usize; let func_name = Python::with_gil(|py| { func.as_ref(py) .getattr("__name__") @@ -591,17 +589,20 @@ fn offload_call( kwargs: Option<&PyDict>, ) -> PyResult { let key = func.as_ptr() as usize; - if let Some(res) = execute_registered_jit_guarded(py, key, args) { - if let Ok(obj) = res { - return Ok(obj); - } - if let Err(err) = &res { - jit_log(|| { - format!( - "[Iris][jit] guarded execution failed in offload_call; falling back: {}", - err - ) - }); + let use_jit = !matches!(get_offload_strategy(key).as_deref(), Some("actor")); + if use_jit { + if let Some(res) = execute_registered_jit_guarded(py, key, args) { + if let Ok(obj) = res { + return Ok(obj); + } + if let Err(err) = &res { + jit_log(|| { + format!( + "[Iris][jit] guarded execution failed in offload_call; falling back: {}", + err + ) + }); + } } } diff --git a/src/py/mod.rs b/src/py/mod.rs index 1d7c4ba..834e522 100644 --- a/src/py/mod.rs +++ b/src/py/mod.rs @@ -10,6 +10,8 @@ pub mod mailbox; pub mod pool; pub mod runtime; pub mod utils; +#[cfg(feature = "vortex")] +pub mod vortex; pub mod wrappers; // re-export a few helpers for external callers (tests, build scripts, etc.) diff --git a/src/py/pool.rs b/src/py/pool.rs index 2b2aad8..8cd1590 100644 --- a/src/py/pool.rs +++ b/src/py/pool.rs @@ -3,7 +3,7 @@ #![allow(non_local_definitions)] use crossbeam_channel as cb_channel; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::Arc; use std::sync::OnceLock; use std::time::Duration; @@ -21,6 +21,8 @@ pub(crate) enum PoolTask { Execute { behavior: Arc>, bytes: bytes::Bytes, + pid_holder: Arc, + rt: Arc, }, HotSwap { behavior: Arc>, @@ -48,19 +50,27 @@ impl GilPool { } match rx.recv_timeout(Duration::from_millis(100)) { Ok(task) => match task { - PoolTask::Execute { behavior, bytes } => { + PoolTask::Execute { + behavior, + bytes, + pid_holder, + rt, + } => { if unsafe { pyo3::ffi::Py_IsInitialized() } == 0 { break; } - Python::with_gil(|py| { + let success = crate::py::utils::run_python_callback(|py| { let guard = behavior.read(); let cb = guard.as_ref(py); let pybytes = PyBytes::new(py, &bytes); - if let Err(e) = cb.call1((pybytes,)) { - eprintln!("[Iris] Python actor exception: {}", e); - e.print(py); - } + cb.call1((pybytes,)).map(|_| ()) }); + if !success { + let pid = pid_holder.load(Ordering::SeqCst); + if pid != 0 { + rt.stop(pid); + } + } } PoolTask::HotSwap { behavior, ptr } => { if unsafe { pyo3::ffi::Py_IsInitialized() } == 0 { @@ -133,19 +143,27 @@ pub(crate) fn make_release_gil_channel( } match rx.recv_timeout(Duration::from_millis(100)) { Ok(task) => match task { - PoolTask::Execute { behavior, bytes } => { + PoolTask::Execute { + behavior, + bytes, + pid_holder, + rt, + } => { if unsafe { pyo3::ffi::Py_IsInitialized() } == 0 { continue; } - Python::with_gil(|py| { + let success = crate::py::utils::run_python_callback(|py| { let guard = behavior.read(); let cb = guard.as_ref(py); let pybytes = PyBytes::new(py, &bytes); - if let Err(e) = cb.call1((pybytes,)) { - eprintln!("[Iris] Python actor exception: {}", e); - e.print(py); - } + cb.call1((pybytes,)).map(|_| ()) }); + if !success { + let pid = pid_holder.load(Ordering::SeqCst); + if pid != 0 { + rt.stop(pid); + } + } } PoolTask::HotSwap { behavior, ptr } => { if unsafe { pyo3::ffi::Py_IsInitialized() } == 0 { diff --git a/src/py/runtime.rs b/src/py/runtime.rs index dd17fd5..7a0505d 100644 --- a/src/py/runtime.rs +++ b/src/py/runtime.rs @@ -6,6 +6,7 @@ use bytes; use pyo3::prelude::*; use pyo3::types::PyBytes; use pyo3_asyncio::tokio::future_into_py; +use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::sync::Mutex as TokioMutex; @@ -13,9 +14,27 @@ use tokio::sync::Mutex as TokioMutex; use crate::mailbox::OverflowPolicy; use crate::Runtime; +#[cfg(feature = "vortex")] +fn run_py_rescue_blocking(f: F) +where + F: FnOnce(), +{ + crate::vortex::rescue_pool::RescuePool::run_blocking(f); +} + +#[cfg(not(feature = "vortex"))] +fn run_py_rescue_blocking(f: F) +where + F: FnOnce(), +{ + f(); +} + use super::mailbox::PyMailbox; use super::pool::{make_release_gil_channel, PoolTask, GIL_WORKER_POOL}; use super::utils::{message_to_py, run_python_matcher}; +#[cfg(feature = "vortex")] +use crate::vortex::VortexGhostPolicy; #[pyclass] pub struct PyRuntime { @@ -186,6 +205,129 @@ impl PyRuntime { Ok(()) } + #[cfg(feature = "vortex")] + fn vortex_set_auto_ghost_policy(&self, policy: String) -> PyResult { + let parsed = match policy.to_lowercase().as_str() { + "firstsafepointwins" | "first_safe_point_wins" | "first-safe-point-wins" => { + VortexGhostPolicy::FirstSafePointWins + } + "preferprimary" | "prefer_primary" | "prefer-primary" => { + VortexGhostPolicy::PreferPrimary + } + _ => { + return Err(pyo3::exceptions::PyValueError::new_err( + "invalid vortex policy (expected FirstSafePointWins or PreferPrimary)", + )) + } + }; + Ok(self.inner.vortex_set_auto_ghost_policy(parsed)) + } + + #[cfg(feature = "vortex")] + fn vortex_get_auto_ghost_policy(&self) -> PyResult> { + Ok(self.inner.vortex_auto_ghost_policy().map(|p| match p { + VortexGhostPolicy::FirstSafePointWins => "FirstSafePointWins".to_string(), + VortexGhostPolicy::PreferPrimary => "PreferPrimary".to_string(), + })) + } + + #[cfg(feature = "vortex")] + fn vortex_get_auto_resolution_counts(&self) -> PyResult<(u64, u64)> { + Ok(self.inner.vortex_auto_resolution_counts()) + } + + #[cfg(feature = "vortex")] + fn vortex_get_auto_replay_count(&self) -> PyResult { + Ok(self.inner.vortex_auto_replay_count()) + } + + #[cfg(feature = "vortex")] + fn vortex_reset_auto_telemetry(&self) -> PyResult<()> { + self.inner.vortex_reset_auto_telemetry(); + Ok(()) + } + + #[cfg(feature = "vortex")] + fn vortex_set_genetic_budgeting(&self, enabled: bool) -> PyResult { + Ok(self.inner.vortex_set_genetic_budgeting(enabled)) + } + + #[cfg(feature = "vortex")] + fn vortex_get_genetic_budgeting(&self) -> PyResult { + Ok(self + .inner + .vortex_genetic_budgeting_enabled() + .unwrap_or(false)) + } + + #[cfg(feature = "vortex")] + fn vortex_set_genetic_thresholds(&self, low: f64, high: f64) -> PyResult { + Ok(self.inner.vortex_set_genetic_thresholds(low, high)) + } + + #[cfg(feature = "vortex")] + fn vortex_get_genetic_thresholds(&self) -> PyResult> { + Ok(self.inner.vortex_genetic_thresholds()) + } + + #[cfg(feature = "vortex")] + fn vortex_set_isolation_disallowed_ops(&self, ops: Vec) -> PyResult { + crate::py::vortex::set_isolation_disallowed_ops(ops.clone()); + Ok(self.inner.vortex_set_isolation_disallowed_ops(ops)) + } + + #[cfg(feature = "vortex")] + fn vortex_get_isolation_disallowed_ops(&self) -> PyResult>> { + Ok(Some(crate::py::vortex::get_isolation_disallowed_ops())) + } + + #[cfg(feature = "vortex")] + fn vortex_watchdog_enable(&self) -> PyResult { + Ok(self.inner.vortex_watchdog_enable()) + } + + #[cfg(feature = "vortex")] + fn vortex_watchdog_disable(&self) -> PyResult { + Ok(self.inner.vortex_watchdog_disable()) + } + + #[cfg(feature = "vortex")] + fn vortex_watchdog_enabled(&self) -> PyResult> { + Ok(self.inner.vortex_watchdog_enabled()) + } + + #[cfg(feature = "vortex")] + fn vortex_get_genetic_history(&self, pid: u64) -> PyResult> { + Ok(self.inner.vortex_genetic_history(pid)) + } + + #[cfg(feature = "vortex")] + fn vortex_set_isolation_mode(&self, enabled: bool) -> PyResult<()> { + crate::py::vortex::set_isolation_mode(enabled); + Ok(()) + } + + #[cfg(feature = "vortex")] + fn vortex_get_isolation_mode(&self) -> PyResult { + Ok(crate::py::vortex::get_isolation_mode()) + } + + #[cfg(feature = "vortex")] + fn vortex_get_all_genetic_history(&self) -> PyResult> { + Ok(self + .inner + .vortex_get_all_genetic_history() + .into_iter() + .map(|(pid, suspend_count, total_count)| (pid, suspend_count, total_count)) + .collect()) + } + + #[cfg(feature = "vortex")] + fn vortex_reset_genetic_history(&self) -> PyResult<()> { + self.inner.vortex_reset_genetic_history(); + Ok(()) + } + /// Phase 5: Send a binary payload to a PID on a remote node. fn send_remote(&self, addr: String, pid: u64, data: &PyBytes) -> PyResult<()> { let bytes = bytes::Bytes::copy_from_slice(data.as_bytes()); @@ -290,6 +432,9 @@ impl PyRuntime { ) -> PyResult { let release = release_gil.unwrap_or(false); let behavior = Arc::new(parking_lot::RwLock::new(py_callable)); + let pid_holder = Arc::new(AtomicU64::new(0)); + let pid_holder_clone = pid_holder.clone(); + let rt = self.inner.clone(); // compute maybe_tx using shared helper (propagates error on strict limit) let maybe_tx = make_release_gil_channel(&self.inner, release, behavior.clone())?; @@ -297,6 +442,8 @@ impl PyRuntime { let b = behavior.clone(); let tx = maybe_tx.clone(); let release_gil = release; + let pid_holder = pid_holder_clone.clone(); + let rt = rt.clone(); async move { if unsafe { pyo3::ffi::Py_IsInitialized() } == 0 { return; @@ -309,6 +456,8 @@ impl PyRuntime { let task = PoolTask::Execute { behavior: b.clone(), bytes: bytes.clone(), + pid_holder: pid_holder.clone(), + rt: rt.clone(), }; let _ = tx.send(task); } @@ -331,18 +480,29 @@ impl PyRuntime { let task = PoolTask::Execute { behavior: b.clone(), bytes: bytes.clone(), + pid_holder: pid_holder.clone(), + rt: rt.clone(), }; let _ = pool.sender.send(task); } else { - Python::with_gil(|py| { + let success = Python::with_gil(|py| { let guard = b.read(); let cb = guard.as_ref(py); let pybytes = PyBytes::new(py, &bytes); - if let Err(e) = cb.call1((pybytes,)) { - eprintln!("[Iris] Python actor exception: {}", e); - e.print(py); - } + let mut ok = true; + run_py_rescue_blocking(|| { + ok = crate::py::utils::run_python_callback_py(py, |_py| { + cb.call1((pybytes,)).map(|_| ()) + }); + }); + ok }); + if !success { + let pid = pid_holder.load(std::sync::atomic::Ordering::SeqCst); + if pid != 0 { + rt.stop(pid); + } + } } } crate::mailbox::Message::System( @@ -379,15 +539,24 @@ impl PyRuntime { }); } crate::mailbox::Message::User(bytes) => { - Python::with_gil(|py| { + let success = Python::with_gil(|py| { let guard = b.read(); let cb = guard.as_ref(py); let pybytes = PyBytes::new(py, &bytes); - if let Err(e) = cb.call1((pybytes,)) { - eprintln!("[Iris] Python actor exception: {}", e); - e.print(py); - } + let mut ok = true; + run_py_rescue_blocking(|| { + ok = crate::py::utils::run_python_callback_py(py, |_py| { + cb.call1((pybytes,)).map(|_| ()) + }); + }); + ok }); + if !success { + let pid = pid_holder.load(std::sync::atomic::Ordering::SeqCst); + if pid != 0 { + rt.stop(pid); + } + } } crate::mailbox::Message::System(crate::mailbox::SystemMessage::Exit( _info, @@ -399,13 +568,17 @@ impl PyRuntime { } crate::mailbox::Message::System(crate::mailbox::SystemMessage::Ping) | crate::mailbox::Message::System(crate::mailbox::SystemMessage::Pong) - | crate::mailbox::Message::System(crate::mailbox::SystemMessage::Backpressure(_)) => {} + | crate::mailbox::Message::System( + crate::mailbox::SystemMessage::Backpressure(_), + ) => {} } } } }; - Ok(self.inner.spawn_handler_with_budget(handler, budget)) + let pid = self.inner.spawn_handler_with_budget(handler, budget); + pid_holder.store(pid, Ordering::SeqCst); + Ok(pid) } /// Lazy/virtual variant of spawn_py_handler. @@ -419,9 +592,14 @@ impl PyRuntime { idle_timeout_ms: Option, ) -> PyResult { let behavior = Arc::new(parking_lot::RwLock::new(py_callable)); + let pid_holder = Arc::new(AtomicU64::new(0)); + let pid_holder_clone = pid_holder.clone(); + let rt = self.inner.clone(); let handler = move |msg: crate::mailbox::Message| { let b = behavior.clone(); + let pid_holder = pid_holder_clone.clone(); + let rt = rt.clone(); async move { if unsafe { pyo3::ffi::Py_IsInitialized() } == 0 { return; @@ -438,30 +616,43 @@ impl PyRuntime { }); } crate::mailbox::Message::User(bytes) => { - Python::with_gil(|py| { + let success = Python::with_gil(|py| { let guard = b.read(); let cb = guard.as_ref(py); let pybytes = PyBytes::new(py, &bytes); - if let Err(e) = cb.call1((pybytes,)) { - eprintln!("[Iris] Python actor exception: {}", e); - e.print(py); - } + let mut ok = true; + run_py_rescue_blocking(|| { + ok = crate::py::utils::run_python_callback_py(py, |_py| { + cb.call1((pybytes,)).map(|_| ()) + }); + }); + ok }); + if !success { + let pid = pid_holder.load(std::sync::atomic::Ordering::SeqCst); + if pid != 0 { + rt.stop(pid); + } + } } crate::mailbox::Message::System(crate::mailbox::SystemMessage::Exit(_info)) => { } crate::mailbox::Message::System(crate::mailbox::SystemMessage::DropOld) => {} crate::mailbox::Message::System(crate::mailbox::SystemMessage::Ping) | crate::mailbox::Message::System(crate::mailbox::SystemMessage::Pong) - | crate::mailbox::Message::System(crate::mailbox::SystemMessage::Backpressure(_)) => {} + | crate::mailbox::Message::System( + crate::mailbox::SystemMessage::Backpressure(_), + ) => {} } } }; let idle = idle_timeout_ms.map(Duration::from_millis); - Ok(self + let pid = self .inner - .spawn_virtual_handler_with_budget(handler, budget, idle)) + .spawn_virtual_handler_with_budget(handler, budget, idle); + pid_holder.store(pid, Ordering::SeqCst); + Ok(pid) } /// Bounded mailbox variant of spawn_py_handler. @@ -474,11 +665,16 @@ impl PyRuntime { ) -> PyResult { let release = release_gil.unwrap_or(false); let behavior = Arc::new(parking_lot::RwLock::new(py_callable)); + let pid_holder = Arc::new(AtomicU64::new(0)); + let pid_holder_clone = pid_holder.clone(); + let rt = self.inner.clone(); let maybe_tx = make_release_gil_channel(&self.inner, release, behavior.clone())?; let handler = move |mut rx: crate::mailbox::MailboxReceiver| { let maybe_tx = maybe_tx.clone(); let behavior = behavior.clone(); + let pid_holder = pid_holder_clone.clone(); + let rt = rt.clone(); async move { while let Some(msg) = rx.recv().await { if unsafe { pyo3::ffi::Py_IsInitialized() } == 0 { @@ -491,6 +687,8 @@ impl PyRuntime { let task = PoolTask::Execute { behavior: behavior.clone(), bytes: bytes.clone(), + pid_holder: pid_holder.clone(), + rt: rt.clone(), }; let _ = tx.send(task); } @@ -512,18 +710,31 @@ impl PyRuntime { let task = PoolTask::Execute { behavior: behavior.clone(), bytes: bytes.clone(), + pid_holder: pid_holder.clone(), + rt: rt.clone(), }; let _ = pool.sender.send(task); } else { - Python::with_gil(|py| { + let success = Python::with_gil(|py| { let guard = behavior.read(); let cb = guard.as_ref(py); let pybytes = PyBytes::new(py, &bytes); - if let Err(e) = cb.call1((pybytes,)) { - eprintln!("[Iris] Python actor exception: {}", e); - e.print(py); - } + let mut ok = true; + run_py_rescue_blocking(|| { + ok = crate::py::utils::run_python_callback_py( + py, + |_py| cb.call1((pybytes,)).map(|_| ()), + ); + }); + ok }); + if !success { + let pid = + pid_holder.load(std::sync::atomic::Ordering::SeqCst); + if pid != 0 { + rt.stop(pid); + } + } } } crate::mailbox::Message::System( @@ -563,15 +774,24 @@ impl PyRuntime { }); } crate::mailbox::Message::User(bytes) => { - Python::with_gil(|py| { + let success = Python::with_gil(|py| { let guard = behavior.read(); let cb = guard.as_ref(py); let pybytes = PyBytes::new(py, &bytes); - if let Err(e) = cb.call1((pybytes,)) { - eprintln!("[Iris] Python actor exception: {}", e); - e.print(py); - } + let mut ok = true; + run_py_rescue_blocking(|| { + ok = crate::py::utils::run_python_callback_py(py, |_py| { + cb.call1((pybytes,)).map(|_| ()) + }); + }); + ok }); + if !success { + let pid = pid_holder.load(std::sync::atomic::Ordering::SeqCst); + if pid != 0 { + rt.stop(pid); + } + } } _ => {} } @@ -580,9 +800,11 @@ impl PyRuntime { } }; - Ok(self + let pid = self .inner - .spawn_actor_with_budget_bounded(handler, budget, capacity)) + .spawn_actor_with_budget_bounded(handler, budget, capacity); + pid_holder.store(pid, Ordering::SeqCst); + Ok(pid) } /// Spawn a child actor; lifetime is tied to `parent`. @@ -631,11 +853,16 @@ impl PyRuntime { // Implementation mirrors spawn_py_handler but uses parent API. let release = release_gil.unwrap_or(false); let behavior = Arc::new(parking_lot::RwLock::new(py_callable)); + let pid_holder = Arc::new(AtomicU64::new(0)); + let pid_holder_clone = pid_holder.clone(); + let rt = self.inner.clone(); let maybe_tx = make_release_gil_channel(&self.inner, release, behavior.clone())?; let handler = move |msg: crate::mailbox::Message| { let maybe_tx = maybe_tx.clone(); let behavior = behavior.clone(); + let pid_holder = pid_holder_clone.clone(); + let rt = rt.clone(); async move { if let Some(tx) = maybe_tx { // blocking GIL thread path @@ -644,6 +871,8 @@ impl PyRuntime { let task = PoolTask::Execute { behavior: behavior.clone(), bytes: bytes.clone(), + pid_holder: pid_holder.clone(), + rt: rt.clone(), }; let _ = tx.send(task); } @@ -665,18 +894,29 @@ impl PyRuntime { let task = PoolTask::Execute { behavior: behavior.clone(), bytes: bytes.clone(), + pid_holder: pid_holder.clone(), + rt: rt.clone(), }; let _ = pool.sender.send(task); } else { - Python::with_gil(|py| { + let success = Python::with_gil(|py| { let guard = behavior.read(); let cb = guard.as_ref(py); let pybytes = PyBytes::new(py, &bytes); - if let Err(e) = cb.call1((pybytes,)) { - eprintln!("[Iris] Python actor exception: {}", e); - e.print(py); - } + let mut ok = true; + run_py_rescue_blocking(|| { + ok = crate::py::utils::run_python_callback_py(py, |_py| { + cb.call1((pybytes,)).map(|_| ()) + }); + }); + ok }); + if !success { + let pid = pid_holder.load(std::sync::atomic::Ordering::SeqCst); + if pid != 0 { + rt.stop(pid); + } + } } } crate::mailbox::Message::System( @@ -715,15 +955,24 @@ impl PyRuntime { *guard = new_obj; }, crate::mailbox::Message::User(bytes) => { - Python::with_gil(|py| { + let success = Python::with_gil(|py| { let guard = behavior.read(); let cb = guard.as_ref(py); let pybytes = PyBytes::new(py, &bytes); - if let Err(e) = cb.call1((pybytes,)) { - eprintln!("[Iris] Python actor exception: {}", e); - e.print(py); - } + let mut ok = true; + run_py_rescue_blocking(|| { + ok = crate::py::utils::run_python_callback_py(py, |_py| { + cb.call1((pybytes,)).map(|_| ()) + }); + }); + ok }); + if !success { + let pid = pid_holder.load(std::sync::atomic::Ordering::SeqCst); + if pid != 0 { + rt.stop(pid); + } + } } _ => {} } @@ -731,9 +980,11 @@ impl PyRuntime { } }; - Ok(self + let pid = self .inner - .spawn_child_handler_with_budget(parent, handler, budget)) + .spawn_child_handler_with_budget(parent, handler, budget); + pid_holder.store(pid, Ordering::SeqCst); + Ok(pid) } /// Spawns a pull-based actor. @@ -756,10 +1007,12 @@ impl PyRuntime { Python::with_gil(|py| { // Just call the function. It is expected to block on mailbox.recv() - if let Err(e) = py_callable.call1(py, (mailbox,)) { - eprintln!("[Iris] Python mailbox actor exception: {}", e); - e.print(py); - } + run_py_rescue_blocking(|| { + if let Err(e) = py_callable.call1(py, (mailbox,)) { + eprintln!("[Iris] Python mailbox actor exception: {}", e); + e.print(py); + } + }); }); }); @@ -793,10 +1046,12 @@ impl PyRuntime { } Python::with_gil(|py| { - if let Err(e) = py_callable.call1(py, (mailbox,)) { - eprintln!("[Iris] Python mailbox actor exception: {}", e); - e.print(py); - } + run_py_rescue_blocking(|| { + if let Err(e) = py_callable.call1(py, (mailbox,)) { + eprintln!("[Iris] Python mailbox actor exception: {}", e); + e.print(py); + } + }); }); }); diff --git a/src/py/utils.rs b/src/py/utils.rs index 2ef77b7..253e2a8 100644 --- a/src/py/utils.rs +++ b/src/py/utils.rs @@ -5,6 +5,45 @@ use crate::mailbox; use pyo3::prelude::*; use pyo3::types::PyBytes; +use std::panic::{catch_unwind, AssertUnwindSafe}; + +/// Execute a Python callback while safely catching Rust panics and +/// Python exceptions. Returns `true` if the callback completed normally. +pub(crate) fn run_python_callback_py(py: Python, f: F) -> bool +where + F: FnOnce(Python) -> PyResult<()>, +{ + let result = catch_unwind(AssertUnwindSafe(|| match f(py) { + Ok(()) => Ok(()), + Err(err) => { + eprintln!("[Iris] Python actor exception: {}", err); + // PyErr::print() calls CPython's PyErr_Print, which terminates the process + // if the error is SystemExit. We must completely avoid it for SystemExit. + if !err.is_instance_of::(py) { + err.print(py); + } + Err(()) + } + })); + + match result { + Ok(Ok(())) => true, + Ok(Err(())) => false, + Err(payload) => { + eprintln!("[Iris] Python actor unwind: {:?}", payload); + false + } + } +} + +/// Execute a Python callback while safely catching Rust panics and +/// Python exceptions. Returns `true` if the callback completed normally. +pub(crate) fn run_python_callback(f: F) -> bool +where + F: FnOnce(Python) -> PyResult<()>, +{ + Python::with_gil(|py| run_python_callback_py(py, f)) +} /// Python-friendly structured system message used during conversions. #[pyclass] diff --git a/src/py/vortex.rs b/src/py/vortex.rs new file mode 100644 index 0000000..364010a --- /dev/null +++ b/src/py/vortex.rs @@ -0,0 +1,610 @@ +#![allow(non_local_definitions)] + +use crate::vortex::vortex_bytecode::{ + decode_wordcode, encode_wordcode, evaluate_rewrite_compatibility, instrument_with_probe, + opcode_meta, probe_instructions, quickening_support, read_exception_entries, + validate_probe_compatibility, verify_cache_layout, verify_exception_handler_targets, + verify_exception_table_invariants, verify_stacksize_minimum, +}; +use once_cell::sync::Lazy; +use pyo3::prelude::*; +use pyo3::types::{IntoPyDict, PyBytes, PyDict}; +use std::collections::HashSet; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::sync::Mutex; + +pyo3::create_exception!(iris, VortexSuspend, pyo3::exceptions::PyException); + +static BUDGET: AtomicUsize = AtomicUsize::new(0); +static ISOLATION_MODE: AtomicBool = AtomicBool::new(false); +static ISOLATION_DISALLOWED_OPS: Lazy>> = + Lazy::new(|| Mutex::new(HashSet::new())); +const MAX_PATCHED_CODE_BYTES: usize = 8 * 1024 * 1024; + +#[derive(Debug, Clone)] +struct GuardTelemetry { + mode: String, + reason: String, + py_minor: i32, + rewrite_attempted: bool, + rewrite_applied: bool, +} + +impl Default for GuardTelemetry { + fn default() -> Self { + GuardTelemetry { + mode: "unset".to_string(), + reason: "none".to_string(), + py_minor: -1, + rewrite_attempted: false, + rewrite_applied: false, + } + } +} + +static GUARD_TELEMETRY: Lazy> = + Lazy::new(|| Mutex::new(GuardTelemetry::default())); + +fn set_guard_telemetry(mode: &str, reason: &str, py_minor: i32, attempted: bool, applied: bool) { + if let Ok(mut g) = GUARD_TELEMETRY.lock() { + g.mode = mode.to_string(); + g.reason = reason.to_string(); + g.py_minor = py_minor; + g.rewrite_attempted = attempted; + g.rewrite_applied = applied; + } +} + +fn test_hook_enabled(py: Python, key: &str) -> bool { + let locals = PyDict::new(py); + if locals.set_item("_iris_key", key).is_err() { + return false; + } + py.eval( + "__import__('os').environ.get(_iris_key, '0') == '1'", + None, + Some(locals), + ) + .and_then(|v| v.extract::()) + .unwrap_or(false) +} + +#[pyfunction] +pub fn get_guard_status(py: Python) -> PyResult { + let g = GUARD_TELEMETRY + .lock() + .map_err(|_| { + pyo3::exceptions::PyRuntimeError::new_err("vortex/guard-status: lock poisoned") + })? + .clone(); + + let d = PyDict::new(py); + d.set_item("mode", g.mode)?; + d.set_item("reason", g.reason)?; + d.set_item("py_minor", g.py_minor)?; + d.set_item("rewrite_attempted", g.rewrite_attempted)?; + d.set_item("rewrite_applied", g.rewrite_applied)?; + Ok(d.into()) +} + +#[pyfunction] +pub fn _vortex_check() -> PyResult<()> { + let current = BUDGET.load(Ordering::Relaxed); + if current == 0 { + return Err(VortexSuspend::new_err("budget exhausted")); + } + BUDGET.store(current - 1, Ordering::Relaxed); + Ok(()) +} + +#[pyfunction] +pub fn set_budget(budget: usize) { + BUDGET.store(budget, Ordering::Relaxed); +} + +#[pyfunction] +pub fn set_isolation_mode(enabled: bool) { + ISOLATION_MODE.store(enabled, Ordering::Relaxed); +} + +#[pyfunction] +pub fn get_isolation_mode() -> bool { + ISOLATION_MODE.load(Ordering::Relaxed) +} + +#[pyfunction] +pub fn set_isolation_disallowed_ops(ops: Vec) { + let mut guard = ISOLATION_DISALLOWED_OPS.lock().unwrap(); + guard.clear(); + for op in ops { + guard.insert(op); + } +} + +#[pyfunction] +pub fn get_isolation_disallowed_ops() -> Vec { + let guard = ISOLATION_DISALLOWED_OPS.lock().unwrap(); + guard.iter().copied().collect() +} + +#[pyfunction] +pub fn transmute_function(py: Python, py_func: &PyAny) -> PyResult { + let py_minor: i32 = py + .eval("__import__('sys').version_info.minor", None, None) + .and_then(|v| v.extract()) + .unwrap_or(99); + + let code = py_func + .getattr("__code__") + .map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("vortex/code: {e}")))?; + let raw: &[u8] = code + .getattr("co_code") + .and_then(|v| v.extract()) + .map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("vortex/co_code: {e}")))?; + let original_stack_size: usize = code + .getattr("co_stacksize") + .and_then(|v| v.extract()) + .map_err(|e| { + pyo3::exceptions::PyRuntimeError::new_err(format!("vortex/co_stacksize: {e}")) + })?; + + let globals_any = py_func + .getattr("__globals__") + .map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("vortex/globals: {e}")))?; + let globals = globals_any.downcast::().map_err(|e| { + pyo3::exceptions::PyRuntimeError::new_err(format!("vortex/globals-cast: {e}")) + })?; + let local_mod = match py + .import("sys") + .and_then(|s| s.getattr("modules")) + .and_then(|mods| mods.get_item("iris")) + { + Ok(m) => m, + Err(_) => match globals.get_item("iris")? { + Some(m) => m, + None => { + return Err(pyo3::exceptions::PyRuntimeError::new_err( + "vortex/module-lookup: iris missing", + )) + } + }, + }; + let check_fn = local_mod + .getattr("_vortex_check") + .map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("vortex/check-fn: {e}")))?; + globals.set_item("_vortex_check", check_fn).map_err(|e| { + pyo3::exceptions::PyRuntimeError::new_err(format!("vortex/globals-inject: {e}")) + })?; + + // Primary RFC path: bytecode-level shadow clone with capability checks. + let meta = match opcode_meta(py) { + Ok(m) => m, + Err(_) => { + set_guard_telemetry( + "fallback", + "opcode_metadata_unavailable", + py_minor, + false, + false, + ); + return fallback_shadow(py, py_func, "opcode metadata unavailable"); + } + }; + + let quickening = match quickening_support(py) { + Ok(q) => q, + Err(_) => { + set_guard_telemetry( + "fallback", + "quickening_metadata_unavailable", + py_minor, + false, + false, + ); + return fallback_shadow(py, py_func, "quickening metadata unavailable"); + } + }; + + if let Err(reason) = evaluate_rewrite_compatibility(raw, meta.extended_arg, &quickening) { + set_guard_telemetry("fallback", reason, py_minor, false, false); + return fallback_shadow(py, py_func, reason); + } + + if verify_stacksize_minimum(original_stack_size).is_err() { + set_guard_telemetry( + "fallback", + "stack_depth_invariant_failed", + py_minor, + false, + false, + ); + return fallback_shadow(py, py_func, "stack depth invariant failed"); + } + + let original_entries = match read_exception_entries(py, code) { + Ok(entries) => entries, + Err(_) => { + set_guard_telemetry( + "fallback", + "exception_table_metadata_unavailable", + py_minor, + false, + false, + ); + return fallback_shadow(py, py_func, "exception table metadata unavailable"); + } + }; + if verify_exception_table_invariants(&original_entries, raw.len() / 2, original_stack_size) + .is_err() + { + set_guard_telemetry( + "fallback", + "exception_table_invalid", + py_minor, + false, + false, + ); + return fallback_shadow(py, py_func, "exception table invalid"); + } + + let original = decode_wordcode(raw, meta.extended_arg); + if verify_exception_handler_targets(&original_entries, &original, &quickening).is_err() { + set_guard_telemetry( + "fallback", + "exception_table_invalid", + py_minor, + false, + false, + ); + return fallback_shadow(py, py_func, "exception table invalid"); + } + + set_guard_telemetry("rewrite", "attempt", py_minor, true, false); + let force_patched_exception_invalid = + test_hook_enabled(py, "IRIS_VORTEX_TEST_FORCE_PATCHED_EXCEPTION_TABLE_INVALID"); + if force_patched_exception_invalid { + set_guard_telemetry( + "fallback", + "patched_exception_table_invalid", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "patched exception table invalid"); + } + if test_hook_enabled(py, "IRIS_VORTEX_TEST_FORCE_CODE_REPLACE_FAILED") { + set_guard_telemetry("fallback", "code_replace_failed", py_minor, true, false); + return fallback_shadow(py, py_func, "code replace failed"); + } + if test_hook_enabled(py, "IRIS_VORTEX_TEST_FORCE_TYPES_MODULE_UNAVAILABLE") { + set_guard_telemetry( + "fallback", + "types_module_unavailable", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "types module unavailable"); + } + if test_hook_enabled(py, "IRIS_VORTEX_TEST_FORCE_SHADOW_CONSTRUCTION_FAILED") { + set_guard_telemetry( + "fallback", + "shadow_function_construction_failed", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "shadow function construction failed"); + } + if test_hook_enabled(py, "IRIS_VORTEX_TEST_FORCE_PROBE_INSTRUMENTATION_FAILED") { + set_guard_telemetry( + "fallback", + "probe_instrumentation_failed", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "probe instrumentation failed"); + } + if test_hook_enabled( + py, + "IRIS_VORTEX_TEST_FORCE_PATCHED_STACK_METADATA_UNAVAILABLE", + ) { + set_guard_telemetry( + "fallback", + "patched_stack_metadata_unavailable", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "patched stack metadata unavailable"); + } + if test_hook_enabled( + py, + "IRIS_VORTEX_TEST_FORCE_PATCHED_EXCEPTION_TABLE_METADATA_UNAVAILABLE", + ) { + set_guard_telemetry( + "fallback", + "patched_exception_table_metadata_unavailable", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "patched exception table metadata unavailable"); + } + + let probe = match probe_instructions(py, meta.extended_arg) { + Ok(v) => v, + Err(_) => { + set_guard_telemetry("fallback", "probe_extraction_failed", py_minor, true, false); + return fallback_shadow(py, py_func, "probe extraction failed"); + } + }; + + if let Err(reason) = validate_probe_compatibility(&probe, &quickening) { + set_guard_telemetry("fallback", reason, py_minor, true, false); + return fallback_shadow(py, py_func, reason); + } + + let patched = match instrument_with_probe(&original, &probe, &meta) { + Ok(v) => v, + Err(_) => { + set_guard_telemetry( + "fallback", + "probe_instrumentation_failed", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "probe instrumentation failed"); + } + }; + + if verify_cache_layout(&patched, &quickening).is_err() { + set_guard_telemetry( + "fallback", + "patched_cache_layout_invalid", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "patched cache layout invalid"); + } + + let final_patched = if ISOLATION_MODE.load(Ordering::Relaxed) { + let disallowed_ops = ISOLATION_DISALLOWED_OPS.lock().unwrap(); + match crate::vortex::vortex_bytecode::apply_isolation_transform( + &patched, + py, + Some(&*disallowed_ops), + ) { + Ok(isolated) => { + if verify_cache_layout(&isolated, &quickening).is_err() { + set_guard_telemetry( + "fallback", + "isolation_cache_layout_invalid", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "isolation cache layout invalid"); + } + isolated + } + Err(_) => { + set_guard_telemetry( + "fallback", + "isolation_transform_failed", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "isolation transform failed"); + } + } + } else { + patched + }; + + let final_raw = encode_wordcode(&final_patched, meta.extended_arg); + if final_raw.len() > MAX_PATCHED_CODE_BYTES { + set_guard_telemetry("fallback", "patched_code_too_large", py_minor, true, false); + return fallback_shadow(py, py_func, "patched code too large"); + } + + let kwargs = [("co_code", PyBytes::new(py, &final_raw))].into_py_dict(py); + let new_code = match code.call_method("replace", (), Some(kwargs)) { + Ok(v) => v, + Err(_) => { + set_guard_telemetry("fallback", "code_replace_failed", py_minor, true, false); + return fallback_shadow(py, py_func, "code replace failed"); + } + }; + + let patched_stack_size: usize = match new_code.getattr("co_stacksize").and_then(|v| v.extract()) + { + Ok(v) => v, + Err(_) => { + set_guard_telemetry( + "fallback", + "patched_stack_metadata_unavailable", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "patched stack metadata unavailable"); + } + }; + let patched_entries = match read_exception_entries(py, new_code) { + Ok(v) => v, + Err(_) => { + set_guard_telemetry( + "fallback", + "patched_exception_table_metadata_unavailable", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "patched exception table metadata unavailable"); + } + }; + if verify_exception_table_invariants(&patched_entries, final_raw.len() / 2, patched_stack_size) + .is_err() + { + set_guard_telemetry( + "fallback", + "patched_exception_table_invalid", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "patched exception table invalid"); + } + if verify_exception_handler_targets(&patched_entries, &final_patched, &quickening).is_err() { + set_guard_telemetry( + "fallback", + "patched_exception_table_invalid", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "patched exception table invalid"); + } + + let types_mod = match py.import("types") { + Ok(v) => v, + Err(_) => { + set_guard_telemetry( + "fallback", + "types_module_unavailable", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "types module unavailable"); + } + }; + + let func_globals: &PyDict = if ISOLATION_MODE.load(Ordering::Relaxed) { + let locals2 = PyDict::new(py); + locals2.set_item("base_globals", globals)?; + // Isolation uses a detached globals dict so STORE_GLOBAL/STORE_NAME mutate only + // this shadow environment, never the original module globals. + py.run("isolated_globals = dict(base_globals)", None, Some(locals2))?; + locals2 + .get_item("isolated_globals")? + .ok_or_else(|| { + pyo3::exceptions::PyRuntimeError::new_err("vortex/isolated-globals: missing result") + })? + .downcast::()? + } else { + globals + }; + + let shadow = match types_mod.getattr("FunctionType").and_then(|ctor| { + ctor.call1(( + new_code, + func_globals, + py_func.getattr("__name__")?, + py_func.getattr("__defaults__")?, + py_func.getattr("__closure__")?, + )) + }) { + Ok(v) => v, + Err(_) => { + set_guard_telemetry( + "fallback", + "shadow_function_construction_failed", + py_minor, + true, + false, + ); + return fallback_shadow(py, py_func, "shadow function construction failed"); + } + }; + + if let Ok(kwdefaults) = py_func.getattr("__kwdefaults__") { + let _ = shadow.setattr("__kwdefaults__", kwdefaults); + } + set_guard_telemetry("rewrite", "applied", py_minor, true, true); + Ok(shadow.into()) +} + +fn fallback_shadow(py: Python, py_func: &PyAny, _reason: &str) -> PyResult { + let globals_any = py_func + .getattr("__globals__") + .map_err(|e| pyo3::exceptions::PyRuntimeError::new_err(format!("vortex/globals: {e}")))?; + let globals = globals_any.downcast::().map_err(|e| { + pyo3::exceptions::PyRuntimeError::new_err(format!("vortex/globals-cast: {e}")) + })?; + + let locals = PyDict::new(py); + locals.set_item("fn", py_func)?; + locals.set_item("isolation_mode", ISOLATION_MODE.load(Ordering::Relaxed))?; + py.run( + r#" +def _iris_make_shadow(fn, isolation_mode=False): + import types + import sys + + target_fn = fn + if isolation_mode: + isolated_globals = dict(fn.__globals__) + target_fn = types.FunctionType( + fn.__code__, + isolated_globals, + fn.__name__, + fn.__defaults__, + fn.__closure__, + ) + if hasattr(fn, "__kwdefaults__"): + target_fn.__kwdefaults__ = fn.__kwdefaults__ + + target_code = target_fn.__code__ + + def _trace(frame, event, arg): + if frame.f_code is not target_code: + return _trace + if event == "call": + return _trace + if event == "line": + _vortex_check() + return _trace + + def _wrapped(*a, **k): + old = sys.gettrace() + sys.settrace(_trace) + try: + return target_fn(*a, **k) + finally: + sys.settrace(old) + + return _wrapped + +shadow = _iris_make_shadow(fn, isolation_mode) +"#, + Some(globals), + Some(locals), + ) + .map_err(|e| { + pyo3::exceptions::PyRuntimeError::new_err(format!("vortex/shadow-fallback: {e}")) + })?; + let shadow = locals.get_item("shadow")?.ok_or_else(|| { + pyo3::exceptions::PyRuntimeError::new_err("vortex/shadow-fallback: missing shadow") + })?; + + Ok(shadow.into()) +} + +pub fn init_py(m: &PyModule) -> PyResult<()> { + m.add("VortexSuspend", m.py().get_type::())?; + m.add_function(wrap_pyfunction!(_vortex_check, m)?)?; + m.add_function(wrap_pyfunction!(set_budget, m)?)?; + m.add_function(wrap_pyfunction!(get_guard_status, m)?)?; + m.add_function(wrap_pyfunction!(transmute_function, m)?)?; + m.add_function(wrap_pyfunction!(set_isolation_mode, m)?)?; + m.add_function(wrap_pyfunction!(get_isolation_mode, m)?)?; + m.add_function(wrap_pyfunction!(set_isolation_disallowed_ops, m)?)?; + m.add_function(wrap_pyfunction!(get_isolation_disallowed_ops, m)?)?; + Ok(()) +} diff --git a/src/py/wrappers.rs b/src/py/wrappers.rs index eccbcee..f4b261f 100644 --- a/src/py/wrappers.rs +++ b/src/py/wrappers.rs @@ -135,6 +135,7 @@ fn path_supervisor_children(rt: PyRef, path: String) -> PyResult PyResult<()> { + crate::logging::init_logger(); m.add_function(wrap_pyfunction!(version, m)?)?; m.add_class::()?; m.add_class::()?; @@ -158,6 +159,8 @@ fn populate_module(m: &PyModule) -> PyResult<()> { crate::py::jit::init_py(m)?; #[cfg(all(feature = "pyo3", not(feature = "jit")))] crate::py::jit_stub::init_py(m)?; + #[cfg(feature = "vortex")] + crate::py::vortex::init_py(m)?; Ok(()) } diff --git a/src/vortex/engine.rs b/src/vortex/engine.rs new file mode 100644 index 0000000..38044e9 --- /dev/null +++ b/src/vortex/engine.rs @@ -0,0 +1,261 @@ +// src/vortex/engine.rs +//! Experimental Vortex runtime engine. + +use std::collections::HashMap; + +use crate::vortex::rescue_pool::RescuePool; +use crate::vortex::transaction::{ + VortexGhostPolicy, VortexGhostResolution, VortexTransaction, VortexVioCall, +}; +use crate::vortex::transmuter::{ + VortexExecutionContext, VortexInstruction, VortexSuspend, VortexTransmuter, +}; + +#[derive(Debug, Clone)] +pub struct VortexEngine { + pub enabled: bool, + pub transmuter: VortexTransmuter, + pub transaction: Option, + pub rescue_pool: RescuePool, + pub budget: usize, + pub current_code: Option>, + pub context: Option, + pub pending_code_swap: Option>, + ghost_transactions: HashMap, +} + +impl VortexEngine { + pub fn new() -> Self { + VortexEngine { + enabled: true, + transmuter: VortexTransmuter::new(1024), + transaction: None, + rescue_pool: RescuePool::new(), + budget: 1024, + current_code: None, + context: None, + pending_code_swap: None, + ghost_transactions: HashMap::new(), + } + } + + pub fn is_enabled(&self) -> bool { + self.enabled + } + + pub fn check_preemption(&mut self, instruction_cost: usize) -> bool { + if !self.enabled { + return false; + } + + let should_continue = self.transmuter.inject_reduction_checks(instruction_cost); + if !should_continue { + self.enabled = false; + } + + should_continue + } + + pub fn set_budget(&mut self, budget: usize) { + self.budget = budget; + self.transmuter.instruction_budget = budget; + self.enabled = true; + } + + /// Consume one reduction tick (Check) via transmuter instrumentation. Returns Err(VortexSuspend) if budget is exhausted. + pub fn preempt_tick(&mut self) -> Result<(), VortexSuspend> { + if !self.enabled { + return Err(VortexSuspend); + } + + // Execute only the injected reduction check opcode. + let program = vec![ + VortexInstruction::IrisReductionCheck, + VortexInstruction::ReturnValue, + ]; + let mut ctx = VortexExecutionContext::new(); + match self.transmuter.execute_with_context(&program, &mut ctx) { + Ok(()) => Ok(()), + Err(e) => { + self.enabled = false; + Err(e) + } + } + } + + pub fn load_code(&mut self, code: Vec) { + let transmuted = self.transmuter.transmute(&code); + self.current_code = Some(transmuted); + self.context = Some(VortexExecutionContext::new()); + } + + pub fn stage_code_swap(&mut self, code: Vec) { + let transmuted = self.transmuter.transmute(&code); + self.pending_code_swap = Some(transmuted); + } + + pub fn try_apply_staged_swap(&mut self) -> bool { + let Some(staged) = self.pending_code_swap.clone() else { + return false; + }; + + // Idle actor: swap immediately. + if self.current_code.is_none() || self.context.is_none() { + self.current_code = Some(staged); + self.context = Some(VortexExecutionContext::new()); + self.pending_code_swap = None; + return true; + } + + // Mid-execution: only swap at quiescent points (stack depth zero). + if let (Some(code), Some(ctx)) = (&self.current_code, &self.context) { + let points = self.transmuter.quiescence_points(code); + if ctx.done || (ctx.stack_depth == 0 && points.contains(&ctx.pc)) { + self.current_code = Some(staged); + self.context = Some(VortexExecutionContext::new()); + self.pending_code_swap = None; + return true; + } + } + + false + } + + pub fn run(&mut self) -> Result<(), VortexSuspend> { + if !self.enabled { + return Err(VortexSuspend); + } + + if let (Some(code), Some(ctx)) = (&self.current_code, &mut self.context) { + let result = self.transmuter.execute_with_context(code, ctx); + if result.is_ok() { + // When a staged swap exists, apply it on completion; otherwise clear actor state. + if self.pending_code_swap.is_some() { + let _ = self.try_apply_staged_swap(); + } else { + self.current_code = None; + self.context = None; + } + } + result + } else { + Ok(()) + } + } + + pub fn replenish_budget(&mut self, amount: usize) { + self.transmuter.instruction_budget += amount; + self.budget = self.transmuter.instruction_budget; + self.enabled = true; + } + + pub fn start_transaction(&mut self, id: u64) { + self.transaction = Some(VortexTransaction::new(id)); + } + + pub fn start_transaction_with_checkpoint(&mut self, id: u64, locals: HashMap>) { + let mut trx = VortexTransaction::new(id); + trx.checkpoint_locals(locals); + self.transaction = Some(trx); + } + + pub fn stage_transaction_vio(&mut self, op: String, payload: Vec) -> bool { + match &mut self.transaction { + Some(trx) => trx.stage_vio(op, payload), + None => false, + } + } + + pub fn start_ghost_transaction_with_checkpoint( + &mut self, + id: u64, + locals: HashMap>, + ) { + let mut trx = VortexTransaction::new(id); + trx.checkpoint_locals(locals); + self.ghost_transactions.insert(id, trx); + } + + pub fn stage_ghost_transaction_vio( + &mut self, + ghost_id: u64, + op: String, + payload: Vec, + ) -> bool { + match self.ghost_transactions.get_mut(&ghost_id) { + Some(trx) => trx.stage_vio(op, payload), + None => false, + } + } + + pub fn resolve_primary_ghost_race( + &mut self, + ghost_id: u64, + winner_id: u64, + policy: VortexGhostPolicy, + ) -> Option { + let primary = self.transaction.as_mut()?; + let ghost = self.ghost_transactions.get_mut(&ghost_id)?; + + let result = VortexTransaction::resolve_ghost_race(primary, ghost, winner_id, policy)?; + self.ghost_transactions.remove(&ghost_id); + Some(result) + } + + pub fn replay_committed_vio_calls(&self, calls: &[VortexVioCall], mut executor: F) -> usize + where + F: FnMut(&VortexVioCall) -> bool, + { + let mut applied = 0usize; + for call in calls { + applied = applied.saturating_add(1); + if !executor(call) { + break; + } + } + applied + } + + pub fn transaction_staged_vio_len(&self) -> usize { + match &self.transaction { + Some(trx) => trx.staged_vio_len(), + None => 0, + } + } + + pub fn transaction_committed_vio_len(&self) -> usize { + match &self.transaction { + Some(trx) => trx.committed_vio_len(), + None => 0, + } + } + + pub fn take_committed_transaction_vio(&mut self) -> Vec { + match &mut self.transaction { + Some(trx) => trx.drain_committed_vio(), + None => Vec::new(), + } + } + + pub fn commit_transaction(&mut self) -> bool { + match &mut self.transaction { + Some(trx) => trx.commit(), + None => false, + } + } + + pub fn abort_transaction(&mut self) -> bool { + match &mut self.transaction { + Some(trx) => trx.abort(), + None => false, + } + } + + pub fn detach_stalled_thread(&mut self) { + self.rescue_pool.detach_thread(); + } + + pub fn reclaim_thread(&mut self) { + self.rescue_pool.reclaim_thread(); + } +} diff --git a/src/vortex/mod.rs b/src/vortex/mod.rs new file mode 100644 index 0000000..8b25dbf --- /dev/null +++ b/src/vortex/mod.rs @@ -0,0 +1,48 @@ +// src/vortex/mod.rs +//! Experimental Vortex subsystem (Feature gated) +//! +//! This module is intentionally isolated and opt-in. It is designed to host +//! Vortex experiments including deterministic preemption and transactional +//! actor execution strategies. + +#[cfg(feature = "vortex")] +pub mod engine; + +#[cfg(feature = "vortex")] +pub mod scheduler; + +#[cfg(feature = "vortex")] +pub mod watcher; + +#[cfg(feature = "vortex")] +pub mod transmuter; + +#[cfg(feature = "vortex")] +pub mod transaction; + +#[cfg(feature = "vortex")] +pub mod vortex_bytecode; + +#[cfg(feature = "vortex")] +pub mod rescue_pool; + +#[cfg(feature = "vortex")] +pub use engine::VortexEngine; + +#[cfg(feature = "vortex")] +pub use scheduler::VortexScheduler; + +#[cfg(feature = "vortex")] +pub use watcher::VortexWatcher; + +#[cfg(feature = "vortex")] +pub use transmuter::VortexTransmuter; + +#[cfg(feature = "vortex")] +pub use transaction::{VortexGhostPolicy, VortexGhostResolution, VortexTransaction, VortexVioCall}; + +#[cfg(feature = "vortex")] +pub use rescue_pool::RescuePool; + +#[cfg(feature = "vortex")] +pub use transmuter::{VortexExecutionContext, VortexInstruction, VortexSuspend}; diff --git a/src/vortex/rescue_pool.rs b/src/vortex/rescue_pool.rs new file mode 100644 index 0000000..f062deb --- /dev/null +++ b/src/vortex/rescue_pool.rs @@ -0,0 +1,30 @@ +// src/vortex/rescue_pool.rs +//! Experimental rescue pool to isolate stalled C-bound threads stub. + +#[derive(Debug, Clone)] +pub struct RescuePool { + pub active_count: usize, +} + +impl RescuePool { + pub fn new() -> Self { + RescuePool { active_count: 0 } + } + + pub fn run_blocking(f: F) -> R + where + F: FnOnce() -> R, + { + f() + } + + pub fn detach_thread(&mut self) { + self.active_count += 1; + } + + pub fn reclaim_thread(&mut self) { + if self.active_count > 0 { + self.active_count -= 1; + } + } +} diff --git a/src/vortex/scheduler.rs b/src/vortex/scheduler.rs new file mode 100644 index 0000000..6736b50 --- /dev/null +++ b/src/vortex/scheduler.rs @@ -0,0 +1,14 @@ +// src/vortex/scheduler.rs +//! Experimental Vortex scheduler placeholder. + +pub struct VortexScheduler; + +impl VortexScheduler { + pub fn new() -> Self { + VortexScheduler + } + + pub fn describe(&self) -> &'static str { + "vortex scheduler (stub)" + } +} diff --git a/src/vortex/transaction.rs b/src/vortex/transaction.rs new file mode 100644 index 0000000..1502cfd --- /dev/null +++ b/src/vortex/transaction.rs @@ -0,0 +1,129 @@ +// src/vortex/transaction.rs +//! Experimental speculative transactional fiber ghosting stub. + +use std::collections::HashMap; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct VortexVioCall { + pub op: String, + pub payload: Vec, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum VortexGhostPolicy { + FirstSafePointWins, + PreferPrimary, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct VortexGhostResolution { + pub winner_id: u64, + pub loser_id: u64, + pub committed_vio: Vec, +} + +#[derive(Debug, Clone)] +pub struct VortexTransaction { + pub id: u64, + pub committed: bool, + pub aborted: bool, + pub local_checkpoint: HashMap>, + staged_vio: Vec, + committed_vio: Vec, +} + +impl VortexTransaction { + pub fn new(id: u64) -> Self { + VortexTransaction { + id, + committed: false, + aborted: false, + local_checkpoint: HashMap::new(), + staged_vio: Vec::new(), + committed_vio: Vec::new(), + } + } + + pub fn checkpoint_locals(&mut self, locals: HashMap>) { + if self.committed || self.aborted { + return; + } + self.local_checkpoint = locals; + } + + pub fn stage_vio(&mut self, op: String, payload: Vec) -> bool { + if self.committed || self.aborted { + return false; + } + self.staged_vio.push(VortexVioCall { op, payload }); + true + } + + pub fn staged_vio_len(&self) -> usize { + self.staged_vio.len() + } + + pub fn committed_vio_len(&self) -> usize { + self.committed_vio.len() + } + + pub fn drain_committed_vio(&mut self) -> Vec { + std::mem::take(&mut self.committed_vio) + } + + pub fn resolve_ghost_race( + primary: &mut VortexTransaction, + ghost: &mut VortexTransaction, + winner_id: u64, + policy: VortexGhostPolicy, + ) -> Option { + let resolved_winner = match policy { + VortexGhostPolicy::FirstSafePointWins => winner_id, + VortexGhostPolicy::PreferPrimary => primary.id, + }; + + if resolved_winner == primary.id { + if !primary.commit() { + return None; + } + let _ = ghost.abort(); + return Some(VortexGhostResolution { + winner_id: primary.id, + loser_id: ghost.id, + committed_vio: primary.drain_committed_vio(), + }); + } + + if resolved_winner == ghost.id { + if !ghost.commit() { + return None; + } + let _ = primary.abort(); + return Some(VortexGhostResolution { + winner_id: ghost.id, + loser_id: primary.id, + committed_vio: ghost.drain_committed_vio(), + }); + } + + None + } + + pub fn commit(&mut self) -> bool { + if self.aborted || self.committed { + return false; + } + self.committed_vio.extend(self.staged_vio.drain(..)); + self.committed = true; + true + } + + pub fn abort(&mut self) -> bool { + if self.committed || self.aborted { + return false; + } + self.staged_vio.clear(); + self.aborted = true; + true + } +} diff --git a/src/vortex/transmuter.rs b/src/vortex/transmuter.rs new file mode 100644 index 0000000..b256bae --- /dev/null +++ b/src/vortex/transmuter.rs @@ -0,0 +1,223 @@ +// src/vortex/transmuter.rs +//! Experimental bytecode-level transmuter and verifier. + +use std::collections::HashSet; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum VortexInstruction { + Nop, + LoadFast(u16), + StoreFast(u16), + BinaryOp(u8), + JumpForward(usize), + JumpBackward(usize), + PopJumpIfFalse(usize), + IrisReductionCheck, + ReturnValue, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct VortexSuspend; + +#[derive(Debug, Clone)] +pub struct VortexExecutionContext { + pub pc: usize, + pub stack_depth: i32, + pub done: bool, +} + +impl VortexExecutionContext { + pub fn new() -> Self { + VortexExecutionContext { + pc: 0, + stack_depth: 0, + done: false, + } + } +} + +#[derive(Debug, Clone)] +pub struct VortexTransmuter { + pub enabled: bool, + pub instruction_budget: usize, +} + +impl VortexTransmuter { + pub fn new(budget: usize) -> Self { + VortexTransmuter { + enabled: true, + instruction_budget: budget, + } + } + + fn is_backward_branch(from_idx: usize, target_idx: usize) -> bool { + target_idx <= from_idx + } + + /// Inject IRIS_REDUCTION_CHECK at function entry and backward-branch targets. + pub fn transmute(&self, code: &[VortexInstruction]) -> Vec { + let mut check_sites: HashSet = HashSet::new(); + if !code.is_empty() { + check_sites.insert(0); + } + + for (idx, instr) in code.iter().enumerate() { + match instr { + VortexInstruction::JumpBackward(dest) | VortexInstruction::PopJumpIfFalse(dest) + if *dest < code.len() && Self::is_backward_branch(idx, *dest) => + { + check_sites.insert(*dest); + } + _ => {} + } + } + + let mut output: Vec = Vec::new(); + let mut mapping: Vec = vec![0; code.len()]; + + for (idx, instr) in code.iter().enumerate() { + mapping[idx] = output.len(); + if check_sites.contains(&idx) { + output.push(VortexInstruction::IrisReductionCheck); + } + output.push(instr.clone()); + } + + let mut patched = output.clone(); + for (idx, instr) in output.iter().enumerate() { + match instr { + VortexInstruction::JumpForward(dest) + | VortexInstruction::JumpBackward(dest) + | VortexInstruction::PopJumpIfFalse(dest) + if *dest < mapping.len() => + { + let mapped = mapping[*dest]; + patched[idx] = match instr { + VortexInstruction::JumpForward(_) => VortexInstruction::JumpForward(mapped), + VortexInstruction::JumpBackward(_) => { + VortexInstruction::JumpBackward(mapped) + } + VortexInstruction::PopJumpIfFalse(_) => { + VortexInstruction::PopJumpIfFalse(mapped) + } + _ => instr.clone(), + }; + } + _ => {} + } + } + + patched + } + + /// Return bytecode offsets where evaluation stack depth is zero. + pub fn quiescence_points(&self, code: &[VortexInstruction]) -> Vec { + let mut depth = 0i32; + let mut points = Vec::new(); + + for (idx, instr) in code.iter().enumerate() { + match instr { + VortexInstruction::LoadFast(_) => depth += 1, + VortexInstruction::StoreFast(_) => depth = (depth - 1).max(0), + VortexInstruction::BinaryOp(_) => depth = (depth - 1).max(0), + VortexInstruction::PopJumpIfFalse(_) => depth = (depth - 1).max(0), + _ => {} + } + if depth == 0 { + points.push(idx); + } + } + + points + } + + pub fn execute(&mut self, code: &[VortexInstruction]) -> Result<(), VortexSuspend> { + let mut ctx = VortexExecutionContext::new(); + self.execute_with_context(code, &mut ctx) + } + + pub fn execute_with_context( + &mut self, + code: &[VortexInstruction], + ctx: &mut VortexExecutionContext, + ) -> Result<(), VortexSuspend> { + while !ctx.done && ctx.pc < code.len() { + match &code[ctx.pc] { + VortexInstruction::IrisReductionCheck => { + if self.instruction_budget == 0 { + return Err(VortexSuspend); + } + self.instruction_budget -= 1; + ctx.pc += 1; + } + VortexInstruction::LoadFast(_) => { + if self.instruction_budget == 0 { + return Err(VortexSuspend); + } + self.instruction_budget -= 1; + ctx.stack_depth += 1; + ctx.pc += 1; + } + VortexInstruction::StoreFast(_) => { + if self.instruction_budget == 0 { + return Err(VortexSuspend); + } + self.instruction_budget -= 1; + ctx.stack_depth = (ctx.stack_depth - 1).max(0); + ctx.pc += 1; + } + VortexInstruction::BinaryOp(_) => { + if self.instruction_budget == 0 { + return Err(VortexSuspend); + } + self.instruction_budget -= 1; + ctx.stack_depth = (ctx.stack_depth - 1).max(0); + ctx.pc += 1; + } + VortexInstruction::PopJumpIfFalse(dest) => { + if self.instruction_budget == 0 { + return Err(VortexSuspend); + } + self.instruction_budget -= 1; + ctx.stack_depth = (ctx.stack_depth - 1).max(0); + if ctx.stack_depth == 0 { + ctx.pc = *dest; + } else { + ctx.pc += 1; + } + } + VortexInstruction::JumpForward(dest) | VortexInstruction::JumpBackward(dest) => { + ctx.pc = *dest; + } + VortexInstruction::Nop => { + ctx.pc += 1; + } + VortexInstruction::ReturnValue => { + ctx.done = true; + return Ok(()); + } + } + } + + if ctx.pc >= code.len() { + ctx.done = true; + Ok(()) + } else { + Err(VortexSuspend) + } + } + + pub fn inject_reduction_checks(&mut self, instruction_count: usize) -> bool { + if !self.enabled { + return false; + } + + if instruction_count > self.instruction_budget { + self.enabled = false; + false + } else { + self.instruction_budget -= instruction_count; + true + } + } +} diff --git a/src/vortex/vortex_bytecode.rs b/src/vortex/vortex_bytecode.rs new file mode 100644 index 0000000..2c5d420 --- /dev/null +++ b/src/vortex/vortex_bytecode.rs @@ -0,0 +1,541 @@ +use pyo3::prelude::*; +use pyo3::types::{PyDict, PyList}; +use std::collections::HashSet; + +#[derive(Debug, Clone)] +pub struct OpcodeMeta { + pub extended_arg: u8, + pub hasjabs: HashSet, + pub hasjrel: HashSet, + pub backward_relative: HashSet, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Instruction { + pub op: u8, + pub arg: u32, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum VerifyError { + InvalidWordcodeShape, + EmptyProbe, + OversizedCode, + InvalidJumpTarget, + InvalidRelativeJump, + InvalidCacheLayout, + InvalidExceptionTable, + StackDepthInvariant, +} + +#[derive(Debug, Clone)] +pub struct QuickeningSupport { + pub cache_opcode: Option, + pub inline_cache_entries: Vec, +} + +const MAX_WORDCODE_BYTES: usize = 4 * 1024 * 1024; + +pub fn verify_wordcode_bytes(raw: &[u8]) -> Result<(), VerifyError> { + if raw.is_empty() || raw.len() % 2 != 0 { + return Err(VerifyError::InvalidWordcodeShape); + } + if raw.len() > MAX_WORDCODE_BYTES { + return Err(VerifyError::OversizedCode); + } + Ok(()) +} + +pub fn opcode_meta(py: Python) -> PyResult { + let meta = py.eval( + r#" +(lambda dis: ( + dis.opmap["EXTENDED_ARG"], + list(dis.hasjabs), + list(dis.hasjrel), + [op for name, op in dis.opmap.items() if "BACKWARD" in name], +))(__import__("dis")) +"#, + None, + None, + )?; + + let extended_arg: u8 = meta.get_item(0)?.extract()?; + let hasjabs: Vec = meta.get_item(1)?.extract()?; + let hasjrel: Vec = meta.get_item(2)?.extract()?; + let backward_relative: Vec = meta.get_item(3)?.extract()?; + + Ok(OpcodeMeta { + extended_arg, + hasjabs: hasjabs.into_iter().collect(), + hasjrel: hasjrel.into_iter().collect(), + backward_relative: backward_relative.into_iter().collect(), + }) +} + +pub fn quickening_support(py: Python) -> PyResult { + let data = py.eval( + r#" +(lambda dis: ( + dis.opmap.get("CACHE", -1), + list(getattr(dis, "_inline_cache_entries", [])), +))(__import__("dis")) +"#, + None, + None, + )?; + + let cache_raw: i16 = data.get_item(0)?.extract()?; + let entries: Vec = data.get_item(1)?.extract()?; + let cache_opcode = if cache_raw >= 0 { + Some(cache_raw as u8) + } else { + None + }; + + Ok(QuickeningSupport { + cache_opcode, + inline_cache_entries: entries, + }) +} + +pub fn decode_wordcode(raw: &[u8], extended_arg: u8) -> Vec { + let mut out = Vec::new(); + let mut ext: u32 = 0; + let mut i = 0usize; + + while i + 1 < raw.len() { + let op = raw[i]; + let arg = raw[i + 1] as u32; + if op == extended_arg { + ext = (ext << 8) | arg; + i += 2; + continue; + } + + let full_arg = (ext << 8) | arg; + out.push(Instruction { op, arg: full_arg }); + ext = 0; + i += 2; + } + + out +} + +pub fn encode_wordcode(instructions: &[Instruction], extended_arg: u8) -> Vec { + let mut out = Vec::new(); + + for ins in instructions { + let mut high = ins.arg >> 8; + let mut ext = Vec::new(); + while high > 0 { + ext.push((high & 0xFF) as u8); + high >>= 8; + } + + for b in ext.iter().rev() { + out.push(extended_arg); + out.push(*b); + } + + out.push(ins.op); + out.push((ins.arg & 0xFF) as u8); + } + + out +} + +fn jump_target(idx: usize, ins: &Instruction, meta: &OpcodeMeta, len: usize) -> Option { + if meta.hasjabs.contains(&(ins.op as u16)) { + let t = ins.arg as usize; + return (t < len).then_some(t); + } + + if meta.hasjrel.contains(&(ins.op as u16)) { + if meta.backward_relative.contains(&(ins.op as u16)) { + let base = idx + 1; + if (ins.arg as usize) > base { + return None; + } + return Some(base - ins.arg as usize); + } + + let t = idx + 1 + ins.arg as usize; + return (t < len).then_some(t); + } + + None +} + +pub fn instrument_with_probe( + original: &[Instruction], + probe: &[Instruction], + meta: &OpcodeMeta, +) -> Result, VerifyError> { + if probe.is_empty() { + return Err(VerifyError::EmptyProbe); + } + + if original.is_empty() { + return Ok(original.to_vec()); + } + + let mut check_sites: HashSet = HashSet::new(); + check_sites.insert(0); + + for (idx, ins) in original.iter().enumerate() { + if let Some(target) = jump_target(idx, ins, meta, original.len()) { + if target <= idx { + check_sites.insert(target); + } + } + } + + let mut out = Vec::new(); + let mut old_to_new = vec![0usize; original.len()]; + let mut source_old_idx = Vec::new(); + + for (idx, ins) in original.iter().enumerate() { + if check_sites.contains(&idx) { + for p in probe { + out.push(p.clone()); + source_old_idx.push(None); + } + } + old_to_new[idx] = out.len(); + out.push(ins.clone()); + source_old_idx.push(Some(idx)); + } + + for (new_idx, src) in source_old_idx.iter().enumerate() { + let Some(old_idx) = src else { + continue; + }; + + let current = out[new_idx].clone(); + let Some(old_target) = jump_target(*old_idx, ¤t, meta, original.len()) else { + continue; + }; + let new_target = old_to_new[old_target]; + + if meta.hasjabs.contains(&(current.op as u16)) { + out[new_idx].arg = new_target as u32; + continue; + } + + if meta.hasjrel.contains(&(current.op as u16)) { + if meta.backward_relative.contains(&(current.op as u16)) { + if new_target > new_idx + 1 { + return Err(VerifyError::InvalidRelativeJump); + } + out[new_idx].arg = (new_idx + 1 - new_target) as u32; + } else { + if new_target < new_idx + 1 { + return Err(VerifyError::InvalidRelativeJump); + } + out[new_idx].arg = (new_target - (new_idx + 1)) as u32; + } + } + } + + verify_instructions(&out, meta)?; + Ok(out) +} + +pub fn verify_instructions(code: &[Instruction], meta: &OpcodeMeta) -> Result<(), VerifyError> { + if code.is_empty() { + return Ok(()); + } + + for (idx, ins) in code.iter().enumerate() { + if meta.hasjabs.contains(&(ins.op as u16)) { + if (ins.arg as usize) >= code.len() { + return Err(VerifyError::InvalidJumpTarget); + } + continue; + } + + if !meta.hasjrel.contains(&(ins.op as u16)) { + continue; + } + + if meta.backward_relative.contains(&(ins.op as u16)) { + let base = idx + 1; + if (ins.arg as usize) > base { + return Err(VerifyError::InvalidRelativeJump); + } + let t = base - ins.arg as usize; + if t >= code.len() { + return Err(VerifyError::InvalidJumpTarget); + } + } else { + let t = idx + 1 + ins.arg as usize; + if t >= code.len() { + return Err(VerifyError::InvalidJumpTarget); + } + } + } + + Ok(()) +} + +pub fn verify_cache_layout( + code: &[Instruction], + quickening: &QuickeningSupport, +) -> Result<(), VerifyError> { + let Some(cache_opcode) = quickening.cache_opcode else { + return Ok(()); + }; + + if quickening.inline_cache_entries.is_empty() { + return Ok(()); + } + + let mut i = 0usize; + while i < code.len() { + let op = code[i].op as usize; + + if code[i].op == cache_opcode { + i += 1; + continue; + } + + let expected_caches = quickening + .inline_cache_entries + .get(op) + .copied() + .unwrap_or(0) as usize; + for j in 0..expected_caches { + let next = i + 1 + j; + if next >= code.len() || code[next].op != cache_opcode { + return Err(VerifyError::InvalidCacheLayout); + } + } + + i += 1 + expected_caches; + } + + Ok(()) +} + +pub fn evaluate_rewrite_compatibility( + raw: &[u8], + extended_arg: u8, + quickening: &QuickeningSupport, +) -> Result<(), &'static str> { + match verify_wordcode_bytes(raw) { + Ok(()) => {} + Err(VerifyError::InvalidWordcodeShape) => return Err("invalid_wordcode_shape"), + Err(VerifyError::OversizedCode) => return Err("oversized_wordcode"), + Err(_) => return Err("invalid_wordcode"), + } + + if quickening.cache_opcode.is_some() && quickening.inline_cache_entries.len() < 256 { + return Err("inline_cache_entries_incomplete"); + } + + let original = decode_wordcode(raw, extended_arg); + if verify_cache_layout(&original, quickening).is_err() { + return Err("original_cache_layout_invalid"); + } + + Ok(()) +} + +pub fn validate_probe_compatibility( + probe: &[Instruction], + quickening: &QuickeningSupport, +) -> Result<(), &'static str> { + if probe.is_empty() { + return Err("empty_probe"); + } + + if verify_cache_layout(probe, quickening).is_err() { + return Err("probe_cache_layout_invalid"); + } + + Ok(()) +} + +pub fn read_exception_entries( + py: Python, + code: &PyAny, +) -> PyResult> { + let locals = PyDict::new(py); + locals.set_item("code_obj", code)?; + py.run( + r#" +import dis +bc = dis.Bytecode(code_obj) +entries = getattr(bc, "exception_entries", ()) +__iris_exc_entries = [ + ( + int(e.start), + int(e.end), + int(e.depth), + int(getattr(e, "target", e.start)), + ) + for e in entries +] +"#, + None, + Some(locals), + )?; + + let entries = locals + .get_item("__iris_exc_entries")? + .ok_or_else(|| { + pyo3::exceptions::PyRuntimeError::new_err("vortex/exception-entries: missing result") + })? + .downcast::()?; + entries.extract() +} + +pub fn verify_exception_table_invariants( + entries: &[(usize, usize, usize, usize)], + code_units: usize, + stack_size: usize, +) -> Result<(), VerifyError> { + let mut seen = HashSet::with_capacity(entries.len()); + let mut prev: Option<(usize, usize, usize, usize)> = None; + + for (start, end, depth, target) in entries { + if *start >= *end || *end > code_units { + return Err(VerifyError::InvalidExceptionTable); + } + if *depth > stack_size { + return Err(VerifyError::StackDepthInvariant); + } + if *target >= code_units { + return Err(VerifyError::InvalidExceptionTable); + } + + let current = (*start, *end, *depth, *target); + if let Some(p) = prev { + if current < p { + return Err(VerifyError::InvalidExceptionTable); + } + } + if !seen.insert(current) { + return Err(VerifyError::InvalidExceptionTable); + } + prev = Some(current); + } + Ok(()) +} + +pub fn verify_exception_handler_targets( + entries: &[(usize, usize, usize, usize)], + code: &[Instruction], + quickening: &QuickeningSupport, +) -> Result<(), VerifyError> { + let Some(cache_opcode) = quickening.cache_opcode else { + return Ok(()); + }; + + for (_, _, _, target) in entries { + if *target >= code.len() { + return Err(VerifyError::InvalidExceptionTable); + } + if code[*target].op == cache_opcode { + return Err(VerifyError::InvalidExceptionTable); + } + } + + Ok(()) +} + +pub fn apply_isolation_transform( + code: &[Instruction], + py: Python, + disallowed_ops: Option<&std::collections::HashSet>, +) -> PyResult> { + let dis = py.import("dis")?; + let store_attr: u8 = dis.getattr("opmap")?.get_item("STORE_ATTR")?.extract()?; + + if let Some(disallowed) = disallowed_ops { + for ins in code { + if disallowed.contains(&ins.op) { + return Err(pyo3::exceptions::PyRuntimeError::new_err( + "isolation disallowed opcode encountered", + )); + } + } + } + + // In strict isolation mode, global/name stores are allowed because transmuted + // function globals are detached from module state. Attribute stores remain unsafe + // (object side effects can escape), so they are rejected. + for ins in code { + if ins.op == store_attr { + return Err(pyo3::exceptions::PyRuntimeError::new_err( + "isolation unsafe STORE_ATTR opcode encountered", + )); + } + } + + Ok(code.to_vec()) +} + +pub fn verify_stacksize_minimum(stack_size: usize) -> Result<(), VerifyError> { + // Probe executes a callable check and requires temporary stack headroom. + if stack_size < 2 { + return Err(VerifyError::StackDepthInvariant); + } + Ok(()) +} + +pub fn probe_instructions(py: Python, extended_arg: u8) -> PyResult> { + let locals = PyDict::new(py); + py.run( + r#" +import dis + +def __iris_probe(): + _vortex_check() + +ins = list(dis.get_instructions(__iris_probe, show_caches=True)) +start = next(i.offset for i in ins if i.opname == "LOAD_GLOBAL") +end = next(i.offset for i in ins if i.opname == "POP_TOP") +__iris_probe_bytes = list(__iris_probe.__code__.co_code[start:end+2]) +"#, + None, + Some(locals), + )?; + + let bytes = locals + .get_item("__iris_probe_bytes")? + .ok_or_else(|| { + pyo3::exceptions::PyRuntimeError::new_err("vortex/probe-bytes: missing result") + })? + .downcast::()?; + let raw: Vec = bytes.extract()?; + Ok(decode_wordcode(&raw, extended_arg)) +} + +pub fn probe_raw_bytes(py: Python) -> PyResult> { + let locals = PyDict::new(py); + py.run( + r#" +import dis + +def __iris_probe(): + _vortex_check() + +ins = list(dis.get_instructions(__iris_probe, show_caches=True)) +start = next(i.offset for i in ins if i.opname == "LOAD_GLOBAL") +end = next(i.offset for i in ins if i.opname == "POP_TOP") +__iris_probe_bytes = list(__iris_probe.__code__.co_code[start:end+2]) +"#, + None, + Some(locals), + )?; + + let bytes = locals + .get_item("__iris_probe_bytes")? + .ok_or_else(|| { + pyo3::exceptions::PyRuntimeError::new_err("vortex/probe-bytes: missing result") + })? + .downcast::()?; + bytes.extract() +} diff --git a/src/vortex/watcher.rs b/src/vortex/watcher.rs new file mode 100644 index 0000000..ae51254 --- /dev/null +++ b/src/vortex/watcher.rs @@ -0,0 +1,49 @@ +// src/vortex/watcher.rs +//! Simple experimental Vortex watchdog. + +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; +use std::time::Duration; + +pub struct VortexWatcher { + enabled: Arc, +} + +impl VortexWatcher { + pub fn new() -> Self { + VortexWatcher { + enabled: Arc::new(AtomicBool::new(false)), + } + } + + pub fn health(&self) -> &'static str { + "vortex watcher healthy" + } + + pub fn is_enabled(&self) -> bool { + self.enabled.load(Ordering::Relaxed) + } + + pub fn enable(&self) { + if self + .enabled + .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) + .is_ok() + { + let enabled = self.enabled.clone(); + tokio::spawn(async move { + while enabled.load(Ordering::Relaxed) { + // In a full implementation, this would inspect actor liveness + preemption counters, + // and potentially escalate via OS-level signal (SIGVTALRM) or internal throttling. + tokio::time::sleep(Duration::from_millis(100)).await; + } + }); + } + } + + pub fn disable(&self) { + self.enabled.store(false, Ordering::SeqCst); + } +} diff --git a/tests/benchmark_jit_t1.py b/tests/benchmarks/benchmark_jit_t1.py similarity index 100% rename from tests/benchmark_jit_t1.py rename to tests/benchmarks/benchmark_jit_t1.py diff --git a/tests/benchmark_mbox.py b/tests/benchmarks/benchmark_mbox.py similarity index 100% rename from tests/benchmark_mbox.py rename to tests/benchmarks/benchmark_mbox.py diff --git a/tests/benchmark_pure_py.py b/tests/benchmarks/benchmark_pure_py.py similarity index 100% rename from tests/benchmark_pure_py.py rename to tests/benchmarks/benchmark_pure_py.py diff --git a/tests/hotsw_breaker.py b/tests/hotsw_breaker.py deleted file mode 100644 index 45ef0fb..0000000 --- a/tests/hotsw_breaker.py +++ /dev/null @@ -1,59 +0,0 @@ -# chaos_test.py -import iris -import time -import threading -import gc - -rt = iris.Runtime() - -def initial_behavior(msg): - pass - -# 1. Spawn a target actor -pid = rt.spawn(initial_behavior, budget=100, release_gil=True) -print(f"🚀 Started Actor {pid} for Chaos Test...") - -stop_event = threading.Event() - -def behavior_factory(i): - """Generates a transient function to swap into the actor.""" - def transient_logic(msg): - # Do a tiny bit of work - _ = len(msg) + i - return transient_logic - -def flooding_loop(): - """Floods the actor with messages to keep the mailbox busy.""" - while not stop_event.is_set(): - rt.send(pid, b"payload") - -def aggressive_swap_loop(): - """Rapidly swaps behavior and manually triggers GC to find pointer bugs.""" - swaps = 0 - try: - for i in range(10000): - # Create a function, swap it, and immediately lose the reference - new_func = behavior_factory(i) - rt.hot_swap(pid, new_func) - - del new_func # Drop the local Python reference - - if i % 100 == 0: - gc.collect() # Force GC to try and reap the function pointer - print(f"🔄 Swaps: {i} (GC Triggered)", end="\r") - swaps = i - finally: - print(f"\n✅ Finished {swaps} aggressive swaps.") - -# 2. Run the chaos -t1 = threading.Thread(target=flooding_loop) -t2 = threading.Thread(target=aggressive_swap_loop) - -t1.start() -t2.start() - -t2.join() -stop_event.set() -t1.join() - -print("🛑 Test complete. If no segfault occurred, the pointer management is robust.") diff --git a/tests/test.js b/tests/js/test.js similarity index 100% rename from tests/test.js rename to tests/js/test.js diff --git a/tests/test_node100k.js b/tests/js/test_node100k.js similarity index 100% rename from tests/test_node100k.js rename to tests/js/test_node100k.js diff --git a/tests/test_nodelatency.js b/tests/js/test_nodelatency.js similarity index 100% rename from tests/test_nodelatency.js rename to tests/js/test_nodelatency.js diff --git a/tests/pyo3_exit_reasons.rs b/tests/pyo3_exit_reasons.rs deleted file mode 100644 index 38e7dba..0000000 --- a/tests/pyo3_exit_reasons.rs +++ /dev/null @@ -1,69 +0,0 @@ -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; - -#[tokio::test] -async fn test_exit_reason_on_panic() { - Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt = module - .as_ref(py) - .getattr("PyRuntime") - .unwrap() - .call0() - .unwrap(); - - // Spawn an observed actor that will be stopped normally. - let target: u64 = rt - .call_method1("spawn_observed_handler", (10usize,)) - .unwrap() - .extract() - .unwrap(); - - // Spawn an observer to collect exit notifications - let observer: u64 = rt - .call_method1("spawn_observed_handler", (10usize,)) - .unwrap() - .extract() - .unwrap(); - - // Link the target to the observer so the observer receives the EXIT - rt.call_method1("link", (target, observer)).unwrap(); - - // Stop the target actor (normal exit) - rt.call_method1("stop", (target,)).unwrap(); - - // Give a small delay for the exit to be delivered - std::thread::sleep(std::time::Duration::from_millis(50)); - - let msgs: Vec = rt - .call_method1("get_messages", (observer,)) - .unwrap() - .extract() - .unwrap(); - - // Find an EXIT message with reason 'normal' - let mut found = false; - for m in msgs { - if let Ok(type_name) = m.as_ref(py).getattr("type_name") { - if type_name.extract::().unwrap_or_default() == "EXIT" { - let reason: String = m - .as_ref(py) - .getattr("reason") - .unwrap() - .extract() - .unwrap_or_default(); - if reason == "normal" { - found = true; - break; - } - } - } - } - - assert!( - found, - "expected to find an EXIT message with reason 'normal'" - ); - }); -} diff --git a/tests/pyo3_mailbox.rs b/tests/pyo3_mailbox.rs index 7cd7c7c..e79e5b5 100644 --- a/tests/pyo3_mailbox.rs +++ b/tests/pyo3_mailbox.rs @@ -1,8 +1,7 @@ -// tests/pyo3_mailbox.rs #![cfg(feature = "pyo3")] - use pyo3::prelude::*; -use pyo3::types::PyDict; +use pyo3::types::{PyBytes, PyDict}; +use std::time::Duration; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_mailbox_actor_basic_recv() { @@ -50,6 +49,7 @@ time.sleep(0.5) let results: Vec> = locals .get_item("results") .expect("results global not set (actor failed?)") + .unwrap() .extract() .unwrap(); assert_eq!(results[0], b"first"); @@ -110,6 +110,7 @@ time.sleep(0.5) let results: Vec> = locals .get_item("results") .expect("results global not set") + .unwrap() .extract() .unwrap(); assert_eq!(results[0], b"target"); // Selective @@ -153,7 +154,422 @@ time.sleep(0.5) ) .unwrap(); - let result = locals.get_item("result").expect("result global not set"); + let result = locals.get_item("result").unwrap().unwrap(); assert!(result.is_none()); }); } + +#[tokio::test] +async fn test_exit_reason_on_panic() { + Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt = module + .as_ref(py) + .getattr("PyRuntime") + .unwrap() + .call0() + .unwrap(); + + // Spawn an observed actor that will be stopped normally. + let target: u64 = rt + .call_method1("spawn_observed_handler", (10usize,)) + .unwrap() + .extract() + .unwrap(); + + // Spawn an observer to collect exit notifications + let observer: u64 = rt + .call_method1("spawn_observed_handler", (10usize,)) + .unwrap() + .extract() + .unwrap(); + + // Link the target to the observer so the observer receives the EXIT + rt.call_method1("link", (target, observer)).unwrap(); + + // Stop the target actor (normal exit) + rt.call_method1("stop", (target,)).unwrap(); + + // Give a small delay for the exit to be delivered + std::thread::sleep(std::time::Duration::from_millis(50)); + + let msgs: Vec = rt + .call_method1("get_messages", (observer,)) + .unwrap() + .extract() + .unwrap(); + + // Find an EXIT message with reason 'normal' + let mut found = false; + for m in msgs { + if let Ok(type_name) = m.as_ref(py).getattr("type_name") { + if type_name.extract::().unwrap_or_default() == "EXIT" { + let reason: String = m + .as_ref(py) + .getattr("reason") + .unwrap() + .extract() + .unwrap_or_default(); + if reason == "normal" { + found = true; + break; + } + } + } + } + + assert!( + found, + "expected to find an EXIT message with reason 'normal'" + ); + }); +} + +// tests/pyo3_release_gil.rs + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_spawn_py_handler_release_gil_toggle() { + // Create runtime and two handlers in the module namespace so they share a SEEN list + let module = Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let g = module.as_ref(py).dict(); + py.run( + r#" +import threading +SEEN = [] + +def handler_no_release(msg): + SEEN.append(('no', threading.get_ident())) + +def handler_release(msg): + SEEN.append(('yes', threading.get_ident())) +"#, + Some(g), + None, + ) + .unwrap(); + + module.into_py(py) + }); + + // Spawn both actors and send a message to each + Python::with_gil(|py| { + let module_ref = module.as_ref(py); + let rt = module_ref.getattr("PyRuntime").unwrap().call0().unwrap(); + + let handler_no = module_ref.getattr("handler_no_release").unwrap(); + let handler_yes = module_ref.getattr("handler_release").unwrap(); + + let pid_no: u64 = rt + .call_method1("spawn_py_handler", (handler_no, 10usize, false)) + .unwrap() + .extract() + .unwrap(); + let pid_yes: u64 = rt + .call_method1("spawn_py_handler", (handler_yes, 10usize, true)) + .unwrap() + .extract() + .unwrap(); + + // Send simple byte messages + let _ = rt + .call_method1("send", (pid_no, PyBytes::new(py, b"ping"))) + .unwrap(); + let _ = rt + .call_method1("send", (pid_yes, PyBytes::new(py, b"ping"))) + .unwrap(); + }); + + // Allow the actors to process messages + tokio::time::sleep(Duration::from_millis(200)).await; + + // Inspect SEEN and ensure we observed both handlers and that their thread ids differ + Python::with_gil(|py| { + let module_ref = module.as_ref(py); + let seen: Vec<(String, usize)> = module_ref.getattr("SEEN").unwrap().extract().unwrap(); + + // Expect two entries (order not guaranteed) + assert!( + seen.len() >= 2, + "expected at least two handler invocations, got {}", + seen.len() + ); + + let mut no_tid = None; + let mut yes_tid = None; + for (tag, tid) in seen { + if tag == "no" { + no_tid = Some(tid); + } + if tag == "yes" { + yes_tid = Some(tid); + } + } + + assert!(no_tid.is_some(), "no-release handler did not run"); + assert!(yes_tid.is_some(), "release handler did not run"); + assert_ne!( + no_tid.unwrap(), + yes_tid.unwrap(), + "handlers ran on the same thread; expected different threads when toggling GIL release" + ); + }); +} + +// tests/pyo3_selective_recv.rs + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_selective_recv_observed_py() { + Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + // Instantiate the runtime directly via the class constructor exposed in the module + let rt_class = module.getattr(py, "PyRuntime").unwrap(); + let rt = rt_class.call0(py).unwrap(); + + // Spawn an observed handler which stores incoming messages for inspection. + let observer_pid: u64 = rt + .call_method1(py, "spawn_observed_handler", (10usize,)) + .unwrap() + .extract(py) + .unwrap(); + + // Send messages: m1, target, m3 + rt.call_method1( + py, + "send", + (observer_pid, pyo3::types::PyBytes::new(py, b"m1")), + ) + .unwrap(); + rt.call_method1( + py, + "send", + (observer_pid, pyo3::types::PyBytes::new(py, b"target")), + ) + .unwrap(); + rt.call_method1( + py, + "send", + (observer_pid, pyo3::types::PyBytes::new(py, b"m3")), + ) + .unwrap(); + + // Run an asyncio loop to await the selective receive + let locals = PyDict::new(py); + // FIX: Clone rt here so it isn't moved, allowing us to use it again below. + locals.set_item("rt", rt.clone()).unwrap(); + locals.set_item("pid", observer_pid).unwrap(); + // Provide builtins so the executed code can define functions and use globals + locals + .set_item("__builtins__", py.import("builtins").unwrap()) + .unwrap(); + + py.run( + r#" +import asyncio + +def matcher(msg): + return isinstance(msg, (bytes, bytearray)) and msg == b"target" + +async def run_selective(rt, pid): + # No timeout specified here + fut = rt.selective_recv_observed_py(pid, matcher) + return await fut + +loop = asyncio.new_event_loop() +asyncio.set_event_loop(loop) +result = loop.run_until_complete(run_selective(rt, pid)) +"#, + Some(locals), + Some(locals), + ) + .unwrap(); + + // Verify result equals b"target" + let result: Vec = locals + .get_item("result") + .unwrap() + .unwrap() + .extract() + .unwrap(); + assert_eq!(result, b"target".to_vec()); + + // Verify remaining messages are m1 and m3 in order + // This call was failing previously because rt had been moved + let msgs: Vec = rt + .call_method1(py, "get_messages", (observer_pid,)) + .unwrap() + .extract(py) + .unwrap(); + assert_eq!(msgs.len(), 2); + let first: Vec = msgs[0].as_ref(py).extract().unwrap(); + let second: Vec = msgs[1].as_ref(py).extract().unwrap(); + assert_eq!(first, b"m1".to_vec()); + assert_eq!(second, b"m3".to_vec()); + }); +} + +// Test matching system EXIT messages produced when a watched actor stops. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_selective_recv_system_message() { + Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt_class = module.getattr(py, "PyRuntime").unwrap(); + let rt = rt_class.call0(py).unwrap(); + + // Spawn an observed handler which stores incoming messages for inspection. + let observer_pid: u64 = rt + .call_method1(py, "spawn_observed_handler", (10usize,)) + .unwrap() + .extract(py) + .unwrap(); + + // Send a HotSwap system message to the observer to test system-message matching. + rt.call_method1(py, "hot_swap", (observer_pid, py.None())) + .unwrap(); + + // Now await a HOT_SWAP system message using selective_recv (async path) + let locals = PyDict::new(py); + locals.set_item("rt", rt.clone()).unwrap(); + locals.set_item("pid", observer_pid).unwrap(); + locals + .set_item("__builtins__", py.import("builtins").unwrap()) + .unwrap(); + + py.run( + r#" +import asyncio + +def matcher(msg): + # System messages are delivered as PySystemMessage types + try: + t = getattr(msg, "type_name") + return t == "HOT_SWAP" + except Exception: + return False + +async def run_selective(rt, pid): + fut = rt.selective_recv_observed_py(pid, matcher) + return await fut + +loop = asyncio.new_event_loop() +asyncio.set_event_loop(loop) +result = loop.run_until_complete(run_selective(rt, pid)) +"#, + Some(locals), + Some(locals), + ) + .unwrap(); + + // Verify result is a PySystemMessage with type_name == 'HOT_SWAP' + let result = locals.get_item("result").unwrap().unwrap(); + let type_name: String = result.getattr("type_name").unwrap().extract().unwrap(); + assert_eq!(type_name, "HOT_SWAP"); + }); +} + +// Test timeout functionality: ensure it returns None if the message never arrives. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_selective_recv_timeout() { + Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt_class = module.getattr(py, "PyRuntime").unwrap(); + let rt = rt_class.call0(py).unwrap(); + + let observer_pid: u64 = rt + .call_method1(py, "spawn_observed_handler", (10usize,)) + .unwrap() + .extract(py) + .unwrap(); + + // We do NOT send any messages. + + let locals = PyDict::new(py); + locals.set_item("rt", rt.clone()).unwrap(); + locals.set_item("pid", observer_pid).unwrap(); + locals + .set_item("__builtins__", py.import("builtins").unwrap()) + .unwrap(); + + py.run( + r#" +import asyncio + +def matcher(msg): + return msg == b"never_arrives" + +async def run_with_timeout(rt, pid): + # Wait for 0.1 seconds, then timeout + return await rt.selective_recv_observed_py(pid, matcher, 0.1) + +loop = asyncio.new_event_loop() +asyncio.set_event_loop(loop) +result = loop.run_until_complete(run_with_timeout(rt, pid)) +"#, + Some(locals), + Some(locals), + ) + .unwrap(); + + let result = locals.get_item("result").unwrap().unwrap(); + assert!(result.is_none(), "Expected None result after timeout"); + }); +} + +// Test timeout functionality: ensure it returns the message if it exists (before timeout). +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_selective_recv_success_with_timeout() { + Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt_class = module.getattr(py, "PyRuntime").unwrap(); + let rt = rt_class.call0(py).unwrap(); + + let observer_pid: u64 = rt + .call_method1(py, "spawn_observed_handler", (10usize,)) + .unwrap() + .extract(py) + .unwrap(); + + // Send the message immediately + rt.call_method1( + py, + "send", + (observer_pid, pyo3::types::PyBytes::new(py, b"exists")), + ) + .unwrap(); + + let locals = PyDict::new(py); + locals.set_item("rt", rt.clone()).unwrap(); + locals.set_item("pid", observer_pid).unwrap(); + locals + .set_item("__builtins__", py.import("builtins").unwrap()) + .unwrap(); + + py.run( + r#" +import asyncio + +def matcher(msg): + return msg == b"exists" + +async def run_with_timeout(rt, pid): + # Wait for 1.0 second (plenty of time), should return immediately + return await rt.selective_recv_observed_py(pid, matcher, 1.0) + +loop = asyncio.new_event_loop() +asyncio.set_event_loop(loop) +result = loop.run_until_complete(run_with_timeout(rt, pid)) +"#, + Some(locals), + Some(locals), + ) + .unwrap(); + + let result: Vec = locals + .get_item("result") + .unwrap() + .unwrap() + .extract() + .unwrap(); + assert_eq!(result, b"exists".to_vec()); + }); +} diff --git a/tests/pyo3_net_discovery.rs b/tests/pyo3_net_discovery.rs deleted file mode 100644 index 20a3367..0000000 --- a/tests/pyo3_net_discovery.rs +++ /dev/null @@ -1,104 +0,0 @@ -// tests/pyo3_net_discovery.rs -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; -use pyo3::types::{PyDict, PyList}; -use std::time::Duration; - -#[test] -fn test_remote_name_discovery() { - let addr = "127.0.0.1:9095"; // Using a distinct port - - // 1. Setup Node A (The Provider) - let (rt_a, pid_a, results): (PyObject, u64, PyObject) = Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt = module - .as_ref(py) - .getattr("PyRuntime") - .unwrap() - .call0() - .unwrap(); - - rt.call_method1("listen", (addr,)).unwrap(); - - let results = PyList::empty(py); - let locals = PyDict::new(py); - locals.set_item("results", results).unwrap(); - - py.run( - r#" -def auth_handler(msg, results=results): - results.append(f"Auth:{msg.decode()}") -"#, - None, - Some(locals), - ) - .unwrap(); - - let handler = locals.get_item("auth_handler").unwrap(); - let pid: u64 = rt - .call_method1("spawn_py_handler", (handler, 10usize)) - .unwrap() - .extract() - .unwrap(); - - // Register the name on Node A - rt.call_method1("register", ("auth-service", pid)).unwrap(); - - (rt.into_py(py), pid, results.into_py(py)) - }); - - // Wait for server A to bind - std::thread::sleep(Duration::from_millis(150)); - - // 2. Setup Node B (The Client) - Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt_b = module - .as_ref(py) - .getattr("PyRuntime") - .unwrap() - .call0() - .unwrap(); - - // Node B resolves the name on Node A's address - let resolved_pid: Option = rt_b - .call_method1("resolve_remote", (addr, "auth-service")) - .unwrap() - .extract() - .unwrap(); - - assert!( - resolved_pid.is_some(), - "Node B failed to resolve Node A's service name" - ); - let proxy = resolved_pid.unwrap(); - // PID values may coincide across different runtimes; only the proxy - // semantics matter (successful forwarding below). - - // Node B sends a message to the proxy PID using the normal `send` API - let payload = pyo3::types::PyBytes::new(py, b"login_request"); - // the Python send method takes bytes directly - rt_b.call_method1("send", (proxy, payload)).unwrap(); - }); - - // 3. Verification: Did Node A receive the message? - let mut success = false; - for _ in 0..15 { - std::thread::sleep(Duration::from_millis(100)); - success = Python::with_gil(|py| { - let res: Vec = results.extract(py).unwrap(); - res.contains(&"Auth:login_request".to_string()) - }); - if success { - break; - } - } - - assert!(success, "Remote message via discovered name never arrived"); - - // Cleanup - Python::with_gil(|py| { - rt_a.call_method1(py, "stop", (pid_a,)).unwrap(); - }); -} diff --git a/tests/pyo3_net_monitor.rs b/tests/pyo3_net_monitor.rs deleted file mode 100644 index ef9859e..0000000 --- a/tests/pyo3_net_monitor.rs +++ /dev/null @@ -1,86 +0,0 @@ -// tests/pyo3_monitor.rs -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; -use std::time::Duration; - -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_remote_monitoring_is_node_level_not_pid_level() { - let addr = "127.0.0.1:9998"; - - // 1. Setup Node A (The Target) - let (rt_a, pid_a) = Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt = module - .as_ref(py) - .getattr("PyRuntime") - .unwrap() - .call0() - .unwrap(); - - rt.call_method1("listen", (addr,)).unwrap(); - - py.run("def target(msg): pass", None, None).unwrap(); - let handler = py.eval("target", None, None).unwrap(); - - let pid: u64 = rt - .call_method1("spawn_py_handler", (handler, 10usize)) - .unwrap() - .extract() - .unwrap(); - - // register under a well-known name so the other node can look it up - rt.call_method1("register", ("target", pid)).unwrap(); - - (rt.into_py(py), pid) - }); - - // 2. Setup Node B (The Guardian) - let mut proxy_pid: u64 = 0; - let rt_b = Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt = module - .as_ref(py) - .getattr("PyRuntime") - .unwrap() - .call0() - .unwrap(); - - // Node B first resolves the PID; this returns a local proxy. - let resolved: Option = rt - .call_method1("resolve_remote", (addr, "target")) - .unwrap() - .extract() - .unwrap(); - assert!(resolved.is_some()); - proxy_pid = resolved.unwrap(); - - // Tell Node B to monitor the proxy; this will in turn watch the - // real remote actor and shut down the proxy if the node disappears. - rt.call_method1("monitor_remote", (addr, proxy_pid)) - .unwrap(); - rt.into_py(py) - }); - - // 3. Simulate remote actor exit only (node stays up) - Python::with_gil(|py| { - rt_a.call_method1(py, "stop", (pid_a,)).unwrap(); - }); - - // monitor_remote currently tracks node liveness (heartbeat), not per-pid liveness. - // Since the listener remains up, the proxy should stay alive. - tokio::time::sleep(Duration::from_millis(1200)).await; - - // 4. Verification: proxy remains alive because node is still reachable - Python::with_gil(|py| { - let alive: bool = rt_b - .call_method1(py, "is_alive", (proxy_pid,)) - .unwrap() - .extract(py) - .unwrap(); - assert!( - alive, - "proxy should remain alive while remote node is reachable" - ); - }); -} diff --git a/tests/pyo3_network.rs b/tests/pyo3_network.rs index c97eadc..abb87a0 100644 --- a/tests/pyo3_network.rs +++ b/tests/pyo3_network.rs @@ -1,8 +1,7 @@ -// tests/pyo3_network.rs #![cfg(feature = "pyo3")] use pyo3::prelude::*; -use pyo3::types::{PyDict, PyList}; +use pyo3::types::{PyBytes, PyDict, PyList}; use std::time::Duration; #[tokio::test] @@ -19,10 +18,8 @@ async fn test_distributed_messaging() { let locals = PyDict::new(py); locals.set_item("results", results).unwrap(); - // Start listening rt.call_method1("listen", (addr,)).unwrap(); - // Spawn handler that records received messages py.run( r#" def remote_handler(msg, results=results): @@ -43,25 +40,20 @@ def remote_handler(msg, results=results): (rt.into_py(py), pid, results.into_py(py)) }); - // Wait for server to bind tokio::time::sleep(Duration::from_millis(100)).await; - // 2. Setup Node B (The Sender) and transmit Python::with_gil(|py| { let module = iris::py::make_module(py).expect("make_module"); let rt_type = module.as_ref(py).getattr("PyRuntime").unwrap(); let rt_b = rt_type.call0().unwrap(); - let payload = pyo3::types::PyBytes::new(py, b"Hello from Node B"); - // Send to Node A's address and PID + let payload = PyBytes::new(py, b"Hello from Node B"); rt_b.call_method1("send_remote", (addr, pid_a, payload)) .unwrap(); }); - // 3. Verification: Did it arrive on Node A? let mut success = false; for _ in 0..20 { - // Poll for up to 1 second tokio::time::sleep(Duration::from_millis(50)).await; success = Python::with_gil(|py| { let res: Vec = results.extract(py).unwrap(); @@ -74,8 +66,163 @@ def remote_handler(msg, results=results): assert!(success, "Remote message never arrived at Node A"); - // Cleanup Python::with_gil(|py| { rt_a.call_method1(py, "stop", (pid_a,)).unwrap(); }); } + +#[test] +fn test_remote_name_discovery() { + let addr = "127.0.0.1:9095"; + + let (rt_a, pid_a, results): (PyObject, u64, PyObject) = Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt = module + .as_ref(py) + .getattr("PyRuntime") + .unwrap() + .call0() + .unwrap(); + + rt.call_method1("listen", (addr,)).unwrap(); + + let results = PyList::empty(py); + let locals = PyDict::new(py); + locals.set_item("results", results).unwrap(); + + py.run( + r#" +def auth_handler(msg, results=results): + results.append(f"Auth:{msg.decode()}") +"#, + None, + Some(locals), + ) + .unwrap(); + + let handler = locals.get_item("auth_handler").unwrap(); + let pid: u64 = rt + .call_method1("spawn_py_handler", (handler, 10usize)) + .unwrap() + .extract() + .unwrap(); + + rt.call_method1("register", ("auth-service", pid)).unwrap(); + (rt.into_py(py), pid, results.into_py(py)) + }); + + std::thread::sleep(Duration::from_millis(150)); + + Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt_b = module + .as_ref(py) + .getattr("PyRuntime") + .unwrap() + .call0() + .unwrap(); + + let resolved_pid: Option = rt_b + .call_method1("resolve_remote", (addr, "auth-service")) + .unwrap() + .extract() + .unwrap(); + + assert!( + resolved_pid.is_some(), + "Node B failed to resolve Node A's service name" + ); + let proxy = resolved_pid.unwrap(); + + let payload = PyBytes::new(py, b"login_request"); + rt_b.call_method1("send", (proxy, payload)).unwrap(); + }); + + let mut success = false; + for _ in 0..15 { + std::thread::sleep(Duration::from_millis(100)); + success = Python::with_gil(|py| { + let res: Vec = results.extract(py).unwrap(); + res.contains(&"Auth:login_request".to_string()) + }); + if success { + break; + } + } + + assert!(success, "Remote message via discovered name never arrived"); + + Python::with_gil(|py| { + rt_a.call_method1(py, "stop", (pid_a,)).unwrap(); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_remote_monitoring_is_node_level_not_pid_level() { + let addr = "127.0.0.1:9998"; + + let (rt_a, pid_a) = Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt = module + .as_ref(py) + .getattr("PyRuntime") + .unwrap() + .call0() + .unwrap(); + + rt.call_method1("listen", (addr,)).unwrap(); + + py.run("def target(msg): pass", None, None).unwrap(); + let handler = py.eval("target", None, None).unwrap(); + + let pid: u64 = rt + .call_method1("spawn_py_handler", (handler, 10usize)) + .unwrap() + .extract() + .unwrap(); + + rt.call_method1("register", ("target", pid)).unwrap(); + (rt.into_py(py), pid) + }); + + let mut proxy_pid: u64 = 0; + let rt_b = Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt = module + .as_ref(py) + .getattr("PyRuntime") + .unwrap() + .call0() + .unwrap(); + + let resolved: Option = rt + .call_method1("resolve_remote", (addr, "target")) + .unwrap() + .extract() + .unwrap(); + assert!(resolved.is_some()); + proxy_pid = resolved.unwrap(); + + rt.call_method1("monitor_remote", (addr, proxy_pid)) + .unwrap(); + rt.into_py(py) + }); + + Python::with_gil(|py| { + rt_a.call_method1(py, "stop", (pid_a,)).unwrap(); + }); + + tokio::time::sleep(Duration::from_millis(1200)).await; + + Python::with_gil(|py| { + let alive: bool = rt_b + .call_method1(py, "is_alive", (proxy_pid,)) + .unwrap() + .extract(py) + .unwrap(); + assert!( + alive, + "proxy should remain alive while remote node is reachable" + ); + }); +} diff --git a/tests/overflow_policies.rs b/tests/pyo3_overflow_policies.rs similarity index 100% rename from tests/overflow_policies.rs rename to tests/pyo3_overflow_policies.rs diff --git a/tests/pyo3_path_supervisors.rs b/tests/pyo3_path_supervisors.rs deleted file mode 100644 index 44c17da..0000000 --- a/tests/pyo3_path_supervisors.rs +++ /dev/null @@ -1,47 +0,0 @@ -// tests/pyo3_path_supervisors.rs -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; -use std::time::Duration; - -#[tokio::test] -async fn test_path_supervisor_watch_children() { - Python::with_gil(|py| { - let module = iris::py::make_module(py).expect("make_module"); - let rt_type = module.as_ref(py).getattr("PyRuntime").unwrap(); - let rt = rt_type.call0().unwrap(); - - // spawn and register under a path using the observed spawn helper - let pid: u64 = rt - .call_method1("spawn_with_path_observed", (10usize, "/svc/test/one")) - .unwrap() - .extract() - .unwrap(); - - // create a path supervisor and watch the pid - rt.call_method1("create_path_supervisor", ("/svc/test",)) - .unwrap(); - rt.call_method1("path_supervisor_watch", ("/svc/test", pid)) - .unwrap(); - - let children: Vec = rt - .call_method1("path_supervisor_children", ("/svc/test",)) - .unwrap() - .extract() - .unwrap(); - - assert!(children.contains(&pid)); - - // remove and ensure empty - rt.call_method1("remove_path_supervisor", ("/svc/test",)) - .unwrap(); - let children2: Vec = rt - .call_method1("path_supervisor_children", ("/svc/test",)) - .unwrap() - .extract() - .unwrap(); - assert!(children2.is_empty()); - }); - - tokio::time::sleep(Duration::from_millis(50)).await; -} diff --git a/tests/pyo3_phase7.rs b/tests/pyo3_phase7.rs index 5d42fd7..dcac6e0 100644 --- a/tests/pyo3_phase7.rs +++ b/tests/pyo3_phase7.rs @@ -96,7 +96,8 @@ result = loop.run_until_complete(run_discovery(rt, addr)) ) .unwrap(); - let resolved_pid: Option = locals.get_item("result").unwrap().extract().unwrap(); + let resolved_any = locals.get_item("result").unwrap().unwrap(); + let resolved_pid: Option = resolved_any.extract().unwrap(); assert!( resolved_pid.is_some(), "Async discovery failed to resolve PID" diff --git a/tests/pyo3_registry.rs b/tests/pyo3_registry.rs index 1e67a7d..2637265 100644 --- a/tests/pyo3_registry.rs +++ b/tests/pyo3_registry.rs @@ -1,8 +1,7 @@ -// tests/pyo3_registry.rs #![cfg(feature = "pyo3")] use pyo3::prelude::*; -use pyo3::types::{PyDict, PyList}; +use pyo3::types::{PyBytes, PyDict, PyList}; use std::time::Duration; #[tokio::test] @@ -12,14 +11,12 @@ async fn test_name_registration_and_resolution() { let rt_type = module.as_ref(py).getattr("PyRuntime").unwrap(); let rt = rt_type.call0().unwrap(); - // 1. Setup handler and shared results let results = PyList::empty(py); let locals = PyDict::new(py); locals.set_item("results", results).unwrap(); py.run( - r#" -def named_handler(msg, results=results): + r#"def named_handler(msg, results=results): results.append(msg.decode()) "#, None, @@ -29,7 +26,6 @@ def named_handler(msg, results=results): let handler = locals.get_item("named_handler").unwrap(); - // 2. Spawn and Register let pid: u64 = rt .call_method1("spawn_py_handler", (handler, 10usize)) .unwrap() @@ -38,7 +34,6 @@ def named_handler(msg, results=results): rt.call_method1("register", ("my_service", pid)).unwrap(); - // 3. Resolve and Verify let resolved_pid: Option = rt .call_method1("resolve", ("my_service",)) .unwrap() @@ -47,17 +42,118 @@ def named_handler(msg, results=results): assert_eq!(resolved_pid, Some(pid)); - // 4. Send message via the resolved PID - let msg = pyo3::types::PyBytes::new(py, b"hello registry"); + let msg = PyBytes::new(py, b"hello registry"); rt.call_method1("send", (resolved_pid.unwrap(), msg)) .unwrap(); }); - // Small sleep for async processing tokio::time::sleep(Duration::from_millis(50)).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_registry_lifecycle() { + Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt = module.getattr(py, "PyRuntime").unwrap().call0(py).unwrap(); + let locals = PyDict::new(py); + locals.set_item("rt", rt).unwrap(); + locals + .set_item("__builtins__", py.import("builtins").unwrap()) + .unwrap(); + + py.run( + r#"import time + +def dummy_service(mailbox): + mailbox.recv() + +pid = rt.spawn_with_mailbox(dummy_service, 100) +assert pid > 0 + +rt.register("my_service", pid) + +found_pid = rt.resolve("my_service") +assert found_pid == pid, f"Resolve failed: expected {pid}, got {found_pid}" + +alias_pid = rt.whereis("my_service") +assert alias_pid == pid, f"Whereis failed: expected {pid}, got {alias_pid}" + +rt.register("my_service", pid) + +rt.unregister("my_service") + +gone = rt.resolve("my_service") +assert gone is None, f"Expected None after unregister, got {gone}" + +rt.stop(pid) +"#, + Some(locals), + Some(locals), + ) + .unwrap(); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_communication_via_name() { + Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt = module.getattr(py, "PyRuntime").unwrap().call0(py).unwrap(); + let locals = PyDict::new(py); + locals.set_item("rt", rt).unwrap(); + locals + .set_item("__builtins__", py.import("builtins").unwrap()) + .unwrap(); - Python::with_gil(|_py| { - // (Re-accessing the runtime and results would require passing them out, - // but for a single block test we verify the logic here) + py.run( + r#"import time + +def logger_actor(mailbox): + msg = mailbox.recv(timeout=1.0) + global log_result + log_result = msg + +logger_pid = rt.spawn_with_mailbox(logger_actor, 100) + +rt.register("central_logger", logger_pid) + +target = rt.resolve("central_logger") +if target: + rt.send(target, b"log_this_data") +else: + raise Exception("Could not find central_logger") + +time.sleep(0.2) +"#, + Some(locals), + Some(locals), + ) + .unwrap(); + + let result_any = locals.get_item("log_result").unwrap().unwrap(); + let result: Vec = result_any.extract().unwrap(); + + assert_eq!(result, b"log_this_data"); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_register_non_existent_returns_none() { + Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt = module.getattr(py, "PyRuntime").unwrap().call0(py).unwrap(); + let locals = PyDict::new(py); + locals.set_item("rt", rt).unwrap(); + + py.run( + r#"assert rt.resolve("ghost_service") is None +assert rt.whereis("ghost_service") is None + +rt.unregister("ghost_service") +"#, + Some(locals), + Some(locals), + ) + .unwrap(); }); } diff --git a/tests/pyo3_registry2.rs b/tests/pyo3_registry2.rs deleted file mode 100644 index 20f384f..0000000 --- a/tests/pyo3_registry2.rs +++ /dev/null @@ -1,140 +0,0 @@ -// tests/pyo3_registry.rs -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; -use pyo3::types::PyDict; - -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_registry_lifecycle() { - Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt = module.getattr(py, "PyRuntime").unwrap().call0(py).unwrap(); - let locals = PyDict::new(py); - locals.set_item("rt", rt).unwrap(); - locals - .set_item("__builtins__", py.import("builtins").unwrap()) - .unwrap(); - - py.run( - r#" -import time - -# 1. Spawn a simple mailbox actor -def dummy_service(mailbox): - # Just keep the channel open so the PID is valid - mailbox.recv() - -# Spawn -pid = rt.spawn_with_mailbox(dummy_service, 100) -assert pid > 0 - -# 2. Register "my_service" -rt.register("my_service", pid) - -# 3. Resolve it back -found_pid = rt.resolve("my_service") -assert found_pid == pid, f"Resolve failed: expected {pid}, got {found_pid}" - -# 4. Test 'whereis' alias -alias_pid = rt.whereis("my_service") -assert alias_pid == pid, f"Whereis failed: expected {pid}, got {alias_pid}" - -# 5. Overwrite registration (if supported, DashMap usually allows overwrite) -# Re-registering same name with same PID shouldn't error -rt.register("my_service", pid) - -# 6. Unregister -rt.unregister("my_service") - -# 7. Verify it is gone -gone = rt.resolve("my_service") -assert gone is None, f"Expected None after unregister, got {gone}" - -# Clean up -rt.stop(pid) -"#, - Some(locals), - Some(locals), - ) - .unwrap(); - }); -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_communication_via_name() { - Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt = module.getattr(py, "PyRuntime").unwrap().call0(py).unwrap(); - let locals = PyDict::new(py); - locals.set_item("rt", rt).unwrap(); - locals - .set_item("__builtins__", py.import("builtins").unwrap()) - .unwrap(); - - py.run( - r#" -import time - -# Actor that receives a message and writes it to a global -def logger_actor(mailbox): - msg = mailbox.recv(timeout=1.0) - global log_result - log_result = msg - -# 1. Spawn -logger_pid = rt.spawn_with_mailbox(logger_actor, 100) - -# 2. Register globally -rt.register("central_logger", logger_pid) - -# 3. Resolve and Send -# (Simulates another actor finding this service) -target = rt.resolve("central_logger") - -if target: - rt.send(target, b"log_this_data") -else: - raise Exception("Could not find central_logger") - -# Wait for actor to process -time.sleep(0.2) -"#, - Some(locals), - Some(locals), - ) - .unwrap(); - - // Verify the actor received the message via the looked-up PID - let result: Vec = locals - .get_item("log_result") - .expect("log_result not set - actor did not receive message") - .extract() - .unwrap(); - - assert_eq!(result, b"log_this_data"); - }); -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_register_non_existent_returns_none() { - Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt = module.getattr(py, "PyRuntime").unwrap().call0(py).unwrap(); - let locals = PyDict::new(py); - locals.set_item("rt", rt).unwrap(); - - py.run( - r#" -# resolving a name that was never registered should return None -assert rt.resolve("ghost_service") is None -assert rt.whereis("ghost_service") is None - -# unregistering a non-existent name should be safe (no-op) -rt.unregister("ghost_service") -"#, - Some(locals), - Some(locals), - ) - .unwrap(); - }); -} diff --git a/tests/pyo3_release_gil.rs b/tests/pyo3_release_gil.rs deleted file mode 100644 index d310037..0000000 --- a/tests/pyo3_release_gil.rs +++ /dev/null @@ -1,95 +0,0 @@ -// tests/pyo3_release_gil.rs -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; -use pyo3::types::PyBytes; -use std::time::Duration; - -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_spawn_py_handler_release_gil_toggle() { - // Create runtime and two handlers in the module namespace so they share a SEEN list - let module = Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let g = module.as_ref(py).dict(); - py.run( - r#" -import threading -SEEN = [] - -def handler_no_release(msg): - SEEN.append(('no', threading.get_ident())) - -def handler_release(msg): - SEEN.append(('yes', threading.get_ident())) -"#, - Some(g), - None, - ) - .unwrap(); - - module.into_py(py) - }); - - // Spawn both actors and send a message to each - Python::with_gil(|py| { - let module_ref = module.as_ref(py); - let rt = module_ref.getattr("PyRuntime").unwrap().call0().unwrap(); - - let handler_no = module_ref.getattr("handler_no_release").unwrap(); - let handler_yes = module_ref.getattr("handler_release").unwrap(); - - let pid_no: u64 = rt - .call_method1("spawn_py_handler", (handler_no, 10usize, false)) - .unwrap() - .extract() - .unwrap(); - let pid_yes: u64 = rt - .call_method1("spawn_py_handler", (handler_yes, 10usize, true)) - .unwrap() - .extract() - .unwrap(); - - // Send simple byte messages - let _ = rt - .call_method1("send", (pid_no, PyBytes::new(py, b"ping"))) - .unwrap(); - let _ = rt - .call_method1("send", (pid_yes, PyBytes::new(py, b"ping"))) - .unwrap(); - }); - - // Allow the actors to process messages - tokio::time::sleep(Duration::from_millis(200)).await; - - // Inspect SEEN and ensure we observed both handlers and that their thread ids differ - Python::with_gil(|py| { - let module_ref = module.as_ref(py); - let seen: Vec<(String, usize)> = module_ref.getattr("SEEN").unwrap().extract().unwrap(); - - // Expect two entries (order not guaranteed) - assert!( - seen.len() >= 2, - "expected at least two handler invocations, got {}", - seen.len() - ); - - let mut no_tid = None; - let mut yes_tid = None; - for (tag, tid) in seen { - if tag == "no" { - no_tid = Some(tid); - } - if tag == "yes" { - yes_tid = Some(tid); - } - } - - assert!(no_tid.is_some(), "no-release handler did not run"); - assert!(yes_tid.is_some(), "release handler did not run"); - assert_ne!( - no_tid.unwrap(), - yes_tid.unwrap(), - "handlers ran on the same thread; expected different threads when toggling GIL release" - ); - }); -} diff --git a/tests/pyo3_restart_all_path_supervisors.rs b/tests/pyo3_restart_all_path_supervisors.rs deleted file mode 100644 index 2d9e09a..0000000 --- a/tests/pyo3_restart_all_path_supervisors.rs +++ /dev/null @@ -1,99 +0,0 @@ -// tests/pyo3_restart_all_path_supervisors.rs -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; -use std::time::Duration; - -#[tokio::test] -async fn test_path_supervisor_restart_all() { - Python::with_gil(|py| { - let module = iris::py::make_module(py).expect("make_module"); - let rt_type = module.as_ref(py).getattr("PyRuntime").unwrap(); - let rt = rt_type.call0().unwrap(); - - // spawn two observed actors under distinct child paths but same prefix - let pid1: u64 = rt - .call_method1( - "spawn_with_path_observed", - (10usize, "/svc/restart/all/one"), - ) - .unwrap() - .extract() - .unwrap(); - - let pid2: u64 = rt - .call_method1( - "spawn_with_path_observed", - (10usize, "/svc/restart/all/two"), - ) - .unwrap() - .extract() - .unwrap(); - - rt.call_method1("create_path_supervisor", ("/svc/restart/all",)) - .unwrap(); - - // factories that spawn new observed actors under the same child paths - let locals = pyo3::types::PyDict::new(py); - locals.set_item("rt", rt).unwrap(); - py.run( - r#" -def f1(): - return rt.spawn_with_path_observed(10, "/svc/restart/all/one") - -def f2(): - return rt.spawn_with_path_observed(10, "/svc/restart/all/two") -"#, - Some(locals), - Some(locals), - ) - .unwrap(); - - let f1 = locals.get_item("f1").unwrap(); - let f2 = locals.get_item("f2").unwrap(); - - rt.call_method1( - "path_supervise_with_factory", - ("/svc/restart/all", pid1, f1, "restartall"), - ) - .unwrap(); - - rt.call_method1( - "path_supervise_with_factory", - ("/svc/restart/all", pid2, f2, "restartall"), - ) - .unwrap(); - - // kill one pid — RestartAll should restart both children - rt.call_method1("stop", (pid1,)).unwrap(); - - // Wait for supervisor to perform restart attempts and then inspect children - let mut attempts = 0; - let mut seen_ok = false; - while attempts < 40 { - let children: Vec = rt - .call_method1("path_supervisor_children", ("/svc/restart/all",)) - .unwrap() - .extract() - .unwrap(); - if children.len() == 2 && (!children.contains(&pid1) || !children.contains(&pid2)) { - seen_ok = true; - break; - } - attempts += 1; - - // Release the GIL while sleeping so the background supervisor task - // can acquire it and run the Python factories - py.allow_threads(|| { - std::thread::sleep(std::time::Duration::from_millis(50)); - }); - } - - assert!( - seen_ok, - "supervisor did not restart children within timeout" - ); - }); - - tokio::time::sleep(Duration::from_millis(300)).await; -} diff --git a/tests/pyo3_restart_path_supervisors.rs b/tests/pyo3_restart_path_supervisors.rs deleted file mode 100644 index 72c0a22..0000000 --- a/tests/pyo3_restart_path_supervisors.rs +++ /dev/null @@ -1,71 +0,0 @@ -// tests/pyo3_restart_path_supervisors.rs -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; -use std::time::Duration; - -#[tokio::test] -async fn test_path_supervisor_restart_one() { - Python::with_gil(|py| { - // Create the module in memory - let module = iris::py::make_module(py).expect("make_module"); - - // Inject the module into Python's global sys.modules cache. - let sys = py.import("sys").expect("Failed to import sys"); - sys.getattr("modules") - .expect("Failed to get sys.modules") - .set_item("iris", &module) - .expect("Failed to inject iris into sys.modules"); - - let rt_type = module.as_ref(py).getattr("PyRuntime").unwrap(); - let rt = rt_type.call0().unwrap(); - - // spawn an observed actor and register it under a hierarchical path - let pid: u64 = rt - .call_method1("spawn_with_path_observed", (10usize, "/svc/restart/one")) - .unwrap() - .extract() - .unwrap(); - - // create a path supervisor for the prefix - rt.call_method1("create_path_supervisor", ("/svc/restart",)) - .unwrap(); - - // Python factory that spawns a new observed actor under same path - let locals = pyo3::types::PyDict::new(py); - locals.set_item("rt", rt).unwrap(); - py.run( - r#" -def factory(): - return rt.spawn_with_path_observed(10, "/svc/restart/one") -"#, - Some(locals), - Some(locals), - ) - .unwrap(); - - let factory = locals.get_item("factory").unwrap(); - - // Attach factory-based supervision to the path (expected API) - rt.call_method1( - "path_supervise_with_factory", - ("/svc/restart", pid, factory, "restartone"), - ) - .unwrap(); - - // Kill the original pid - rt.call_method1("stop", (pid,)).unwrap(); - - // After a short delay the supervisor should restart the child - // and the children list should contain a pid (possibly new) - let children: Vec = rt - .call_method1("path_supervisor_children", ("/svc/restart",)) - .unwrap() - .extract() - .unwrap(); - - assert!(!children.is_empty()); - }); - - tokio::time::sleep(Duration::from_millis(200)).await; -} diff --git a/tests/pyo3_runtime.rs b/tests/pyo3_runtime.rs index 47a6539..6f70827 100644 --- a/tests/pyo3_runtime.rs +++ b/tests/pyo3_runtime.rs @@ -1,6 +1,110 @@ #![cfg(feature = "pyo3")] use pyo3::prelude::*; +use pyo3::types::PyBytes; +use std::time::Duration; + +#[tokio::test] +async fn test_send_after_delivers_message() { + // create runtime, spawn actor and schedule timer while holding the GIL + let (rt_obj, pid): (PyObject, u64) = Python::with_gil(|py| { + let module = iris::py::make_module(py).unwrap(); + let rt = module + .as_ref(py) + .getattr("PyRuntime") + .unwrap() + .call0() + .unwrap(); + + // Spawn an observed handler to collect messages + let pid: u64 = rt + .call_method1("spawn_observed_handler", (10usize,)) + .unwrap() + .extract() + .unwrap(); + + // Schedule a message after 50ms + let _timer_id: u64 = rt + .call_method1("send_after", (pid, 50u64, PyBytes::new(py, b"delayed"))) + .unwrap() + .extract() + .unwrap(); + + (rt.into_py(py), pid) + }); + + // allow the runtime to process (non-blocking) + tokio::time::sleep(Duration::from_millis(120)).await; + + // now check messages with GIL again + Python::with_gil(|py| { + let rt = rt_obj.as_ref(py); + let msgs: Vec = rt + .call_method1("get_messages", (pid,)) + .unwrap() + .extract() + .unwrap(); + + assert!(msgs.len() >= 1, "expected at least one delivered message"); + }); +} + +#[tokio::test] +async fn py_zero_copy_send() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + let rt_obj = runtime_type.call0().expect("construct PyRuntime"); + rt_obj.into_py(py) + }); + + // spawn observed handler + let pid: u64 = Python::with_gil(|py| { + rt_py + .as_ref(py) + .call_method1("spawn_observed_handler", (1usize,)) + .unwrap() + .extract() + .unwrap() + }); + + // allocate a Rust-owned buffer and write into it from Python + Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let rv = module + .as_ref(py) + .call_method1("allocate_buffer", (5usize,)) + .unwrap(); + let (id, mem, cap): (u64, pyo3::PyObject, pyo3::PyObject) = rv.extract().unwrap(); + let locals = pyo3::types::PyDict::new(py); + locals.set_item("mem", mem.as_ref(py)).unwrap(); + locals.set_item("cap", cap.as_ref(py)).unwrap(); + py.run("mem[:5] = b'hello'", None, Some(locals)).unwrap(); + // send the buffer without copying + rt_py + .as_ref(py) + .call_method1("send_buffer", (pid, id)) + .unwrap(); + }); + + // allow the tokio tasks to run + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + + let msgs: Vec> = Python::with_gil(|py| { + rt_py + .as_ref(py) + .call_method1("get_messages", (pid,)) + .unwrap() + .extract() + .unwrap() + }); + + assert_eq!(msgs.len(), 1); + assert_eq!(&msgs[0], b"hello"); +} #[tokio::test] async fn py_runtime_spawn_and_send() { @@ -71,7 +175,7 @@ async fn py_runtime_spawn_and_send() { Some(locals), ) .unwrap(); - let make_cb = locals.get_item("make_cb").unwrap(); + let make_cb = locals.get_item("make_cb").unwrap().unwrap(); let cb: pyo3::PyObject = make_cb.call1((lst_obj.as_ref(py),)).unwrap().into(); // spawn a Python handler and send a message @@ -126,7 +230,7 @@ async fn py_runtime_spawn_and_send() { Some(locals), ) .unwrap(); - let factory: pyo3::PyObject = locals.get_item("factory").unwrap().into(); + let factory: pyo3::PyObject = locals.get_item("factory").unwrap().unwrap().into(); rt_obj .call_method1("supervise_with_factory", (pid, factory, "RestartOne")) @@ -175,7 +279,7 @@ def cb(msg, msgs=msgs): Some(locals), ) .unwrap(); - locals.get_item("cb").unwrap().to_object(py) + locals.get_item("cb").unwrap().unwrap().to_object(py) }); let pid: u64 = Python::with_gil(|py| { @@ -269,12 +373,8 @@ async fn py_structured_concurrency_normal_and_crash() { ) .unwrap(); let lst = pyo3::types::PyList::empty(py); - let cb = locals - .get_item("make_cb") - .unwrap() - .call1((lst,)) - .unwrap() - .into_py(py); + let make_cb = locals.get_item("make_cb").unwrap().unwrap(); + let cb = make_cb.call1((lst,)).unwrap().into_py(py); (lst.into_py(py), cb) }; @@ -345,7 +445,7 @@ async fn py_structured_concurrency_normal_and_crash() { let src = "def cb(_):\n raise Exception('crash')\n"; let locals = pyo3::types::PyDict::new(py); py.run(src, None, Some(locals)).unwrap(); - locals.get_item("cb").unwrap().into_py(py) + locals.get_item("cb").unwrap().unwrap().into_py(py) }); let parent_crash: u64 = Python::with_gil(|py| { rt_py @@ -426,7 +526,7 @@ async fn py_spawn_child_pool_reuses_workers_under_parent() { Some(locals), ) .unwrap(); - let cb: pyo3::PyObject = locals.get_item("cb").unwrap().into(); + let cb: pyo3::PyObject = locals.get_item("cb").unwrap().unwrap().into(); let worker_pids: Vec = rt .call_method1("spawn_child_pool", (parent_pid, cb, 4usize, 64usize, false)) @@ -569,3 +669,206 @@ async fn py_virtual_actor_idle_timeout() { }); assert!(!alive, "virtual actor should stop after idle timeout"); } + +#[tokio::test] +async fn py_handler_resilient_to_all_exit_types() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + let rt_obj = runtime_type.call0().expect("construct PyRuntime"); + rt_obj.into_py(py) + }); + + let (pid_ok, pid_sys_exit, pid_exception) = Python::with_gil(|py| { + let rt = rt_py.as_ref(py); + + // A regular actor that survives + let cb_ok = py.eval("lambda m: None", None, None).unwrap(); + let pid_ok: u64 = rt + .call_method1("spawn_py_handler", (cb_ok, 1usize)) + .unwrap() + .extract() + .unwrap(); + + // An actor that does sys.exit(1) + let locals = pyo3::types::PyDict::new(py); + py.run( + "import sys\ndef sys_ext(m):\n sys.exit(1)\n", + None, + Some(locals), + ) + .unwrap(); + let cb_sys = locals.get_item("sys_ext").unwrap().unwrap(); + let pid_sys_exit: u64 = rt + .call_method1("spawn_py_handler", (cb_sys, 1usize)) + .unwrap() + .extract() + .unwrap(); + + // An actor that raises an exception + let locals2 = pyo3::types::PyDict::new(py); + py.run( + "def exc(m):\n raise ValueError('boom')\n", + None, + Some(locals2), + ) + .unwrap(); + let cb_exc = locals2.get_item("exc").unwrap().unwrap(); + let pid_exception: u64 = rt + .call_method1("spawn_py_handler", (cb_exc, 1usize)) + .unwrap() + .extract() + .unwrap(); + + (pid_ok, pid_sys_exit, pid_exception) + }); + + // Send messages to trigger them + Python::with_gil(|py| { + let rt = rt_py.as_ref(py); + let msg = pyo3::types::PyBytes::new(py, b"trigger"); + rt.call_method1("send", (pid_ok, msg)).unwrap(); + rt.call_method1("send", (pid_sys_exit, msg)).unwrap(); + rt.call_method1("send", (pid_exception, msg)).unwrap(); + }); + + // Let the Tokio runtime churn and execute the actors + tokio::time::sleep(std::time::Duration::from_millis(300)).await; + + // Verify properties + Python::with_gil(|py| { + let rt = rt_py.as_ref(py); + + let is_ok_alive: bool = rt + .call_method1("is_alive", (pid_ok,)) + .unwrap() + .extract() + .unwrap(); + assert!(is_ok_alive, "Normal actor should still be alive"); + + let is_sys_alive: bool = rt + .call_method1("is_alive", (pid_sys_exit,)) + .unwrap() + .extract() + .unwrap(); + assert!(!is_sys_alive, "Actor that called sys.exit() should be dead"); + + let is_exc_alive: bool = rt + .call_method1("is_alive", (pid_exception,)) + .unwrap() + .extract() + .unwrap(); + assert!( + !is_exc_alive, + "Actor that raised an exception should be dead" + ); + }); +} + +#[tokio::test] +async fn test_py_handler_robust_exit_supervision() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + let rt_obj = runtime_type.call0().expect("construct PyRuntime"); + rt_obj.into_py(py) + }); + + let pids: Vec = Python::with_gil(|py| { + let rt = rt_py.as_ref(py); + + // A monitor actor to capture EXIT messages + let monitor_pid: u64 = rt + .call_method1("spawn_observed_handler", (20usize,)) + .unwrap() + .extract() + .unwrap(); + + // 1. sys.exit(2) + // 2. KeyboardInterrupt + // 3. BaseException directly + let locals = pyo3::types::PyDict::new(py); + py.run( + "def cb_sys(m):\n import sys\n sys.exit(2)\n\ + def cb_kbd(m):\n raise KeyboardInterrupt()\n\ + def cb_base(m):\n raise BaseException('base')\n", + None, + Some(locals), + ) + .unwrap(); + + let mut workers = Vec::new(); + for cb_name in &["cb_sys", "cb_kbd", "cb_base"] { + let cb = locals.get_item(cb_name).unwrap().unwrap(); + let pid: u64 = rt + .call_method1("spawn_py_handler", (cb, 10usize)) + .unwrap() + .extract() + .unwrap(); + + // Link to monitor so it receives EXIT messages + rt.call_method1("link", (monitor_pid, pid)).unwrap(); + workers.push(pid); + + // Trigger failure + let bytes = pyo3::types::PyBytes::new(py, b"die"); + rt.call_method1("send", (pid, bytes)).unwrap(); + } + + workers.push(monitor_pid); + workers + }); + + tokio::time::sleep(std::time::Duration::from_millis(300)).await; + + Python::with_gil(|py| { + let rt = rt_py.as_ref(py); + + let monitor_pid = pids.last().unwrap(); + for pid in pids.iter().take(pids.len() - 1) { + let alive: bool = rt + .call_method1("is_alive", (*pid,)) + .unwrap() + .extract() + .unwrap(); + assert!(!alive, "Failing actor {} should be dead", pid); + } + + // Grab messages from the monitor (which is a raw Rust observer returning py objects of `EXIT`) + let msgs: Vec = rt + .call_method1("get_messages", (*monitor_pid,)) + .unwrap() + .extract() + .unwrap(); + + // We expect ONE EXIT message per failing worker + assert_eq!( + msgs.len(), + 3, + "Monitor should have received exactly 3 exit messages" + ); + + let mut exit_pids = Vec::new(); + for msg in &msgs { + // The PySystemMessage returned by the monitor should look like an object with type_name == 'EXIT' + let type_name: String = msg.getattr(py, "type_name").unwrap().extract(py).unwrap(); + assert_eq!(type_name, "EXIT"); + let target_pid: u64 = msg.getattr(py, "target_pid").unwrap().extract(py).unwrap(); + exit_pids.push(target_pid); + + // Check that the reason string indicates a panic + let reason: String = msg.getattr(py, "reason").unwrap().extract(py).unwrap(); + assert_eq!( + reason, "normal", + "Exit reason to supervisor should be 'panic'" + ); + } + assert_eq!(exit_pids.len(), 3); + }); +} diff --git a/tests/pyo3_selective_recv.rs b/tests/pyo3_selective_recv.rs deleted file mode 100644 index 88051ca..0000000 --- a/tests/pyo3_selective_recv.rs +++ /dev/null @@ -1,251 +0,0 @@ -// tests/pyo3_selective_recv.rs -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; -use pyo3::types::PyDict; - -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_selective_recv_observed_py() { - Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - // Instantiate the runtime directly via the class constructor exposed in the module - let rt_class = module.getattr(py, "PyRuntime").unwrap(); - let rt = rt_class.call0(py).unwrap(); - - // Spawn an observed handler which stores incoming messages for inspection. - let observer_pid: u64 = rt - .call_method1(py, "spawn_observed_handler", (10usize,)) - .unwrap() - .extract(py) - .unwrap(); - - // Send messages: m1, target, m3 - rt.call_method1( - py, - "send", - (observer_pid, pyo3::types::PyBytes::new(py, b"m1")), - ) - .unwrap(); - rt.call_method1( - py, - "send", - (observer_pid, pyo3::types::PyBytes::new(py, b"target")), - ) - .unwrap(); - rt.call_method1( - py, - "send", - (observer_pid, pyo3::types::PyBytes::new(py, b"m3")), - ) - .unwrap(); - - // Run an asyncio loop to await the selective receive - let locals = PyDict::new(py); - // FIX: Clone rt here so it isn't moved, allowing us to use it again below. - locals.set_item("rt", rt.clone()).unwrap(); - locals.set_item("pid", observer_pid).unwrap(); - // Provide builtins so the executed code can define functions and use globals - locals - .set_item("__builtins__", py.import("builtins").unwrap()) - .unwrap(); - - py.run( - r#" -import asyncio - -def matcher(msg): - return isinstance(msg, (bytes, bytearray)) and msg == b"target" - -async def run_selective(rt, pid): - # No timeout specified here - fut = rt.selective_recv_observed_py(pid, matcher) - return await fut - -loop = asyncio.new_event_loop() -asyncio.set_event_loop(loop) -result = loop.run_until_complete(run_selective(rt, pid)) -"#, - Some(locals), - Some(locals), - ) - .unwrap(); - - // Verify result equals b"target" - let result: Vec = locals.get_item("result").unwrap().extract().unwrap(); - assert_eq!(result, b"target".to_vec()); - - // Verify remaining messages are m1 and m3 in order - // This call was failing previously because rt had been moved - let msgs: Vec = rt - .call_method1(py, "get_messages", (observer_pid,)) - .unwrap() - .extract(py) - .unwrap(); - assert_eq!(msgs.len(), 2); - let first: Vec = msgs[0].as_ref(py).extract().unwrap(); - let second: Vec = msgs[1].as_ref(py).extract().unwrap(); - assert_eq!(first, b"m1".to_vec()); - assert_eq!(second, b"m3".to_vec()); - }); -} - -// Test matching system EXIT messages produced when a watched actor stops. -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_selective_recv_system_message() { - Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt_class = module.getattr(py, "PyRuntime").unwrap(); - let rt = rt_class.call0(py).unwrap(); - - // Spawn an observed handler which stores incoming messages for inspection. - let observer_pid: u64 = rt - .call_method1(py, "spawn_observed_handler", (10usize,)) - .unwrap() - .extract(py) - .unwrap(); - - // Send a HotSwap system message to the observer to test system-message matching. - rt.call_method1(py, "hot_swap", (observer_pid, py.None())) - .unwrap(); - - // Now await a HOT_SWAP system message using selective_recv (async path) - let locals = PyDict::new(py); - locals.set_item("rt", rt.clone()).unwrap(); - locals.set_item("pid", observer_pid).unwrap(); - locals - .set_item("__builtins__", py.import("builtins").unwrap()) - .unwrap(); - - py.run( - r#" -import asyncio - -def matcher(msg): - # System messages are delivered as PySystemMessage types - try: - t = getattr(msg, "type_name") - return t == "HOT_SWAP" - except Exception: - return False - -async def run_selective(rt, pid): - fut = rt.selective_recv_observed_py(pid, matcher) - return await fut - -loop = asyncio.new_event_loop() -asyncio.set_event_loop(loop) -result = loop.run_until_complete(run_selective(rt, pid)) -"#, - Some(locals), - Some(locals), - ) - .unwrap(); - - // Verify result is a PySystemMessage with type_name == 'HOT_SWAP' - let result = locals.get_item("result").unwrap(); - let type_name: String = result.getattr("type_name").unwrap().extract().unwrap(); - assert_eq!(type_name, "HOT_SWAP"); - }); -} - -// Test timeout functionality: ensure it returns None if the message never arrives. -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_selective_recv_timeout() { - Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt_class = module.getattr(py, "PyRuntime").unwrap(); - let rt = rt_class.call0(py).unwrap(); - - let observer_pid: u64 = rt - .call_method1(py, "spawn_observed_handler", (10usize,)) - .unwrap() - .extract(py) - .unwrap(); - - // We do NOT send any messages. - - let locals = PyDict::new(py); - locals.set_item("rt", rt.clone()).unwrap(); - locals.set_item("pid", observer_pid).unwrap(); - locals - .set_item("__builtins__", py.import("builtins").unwrap()) - .unwrap(); - - py.run( - r#" -import asyncio - -def matcher(msg): - return msg == b"never_arrives" - -async def run_with_timeout(rt, pid): - # Wait for 0.1 seconds, then timeout - return await rt.selective_recv_observed_py(pid, matcher, 0.1) - -loop = asyncio.new_event_loop() -asyncio.set_event_loop(loop) -result = loop.run_until_complete(run_with_timeout(rt, pid)) -"#, - Some(locals), - Some(locals), - ) - .unwrap(); - - let result = locals.get_item("result").unwrap(); - assert!(result.is_none(), "Expected None result after timeout"); - }); -} - -// Test timeout functionality: ensure it returns the message if it exists (before timeout). -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn test_selective_recv_success_with_timeout() { - Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt_class = module.getattr(py, "PyRuntime").unwrap(); - let rt = rt_class.call0(py).unwrap(); - - let observer_pid: u64 = rt - .call_method1(py, "spawn_observed_handler", (10usize,)) - .unwrap() - .extract(py) - .unwrap(); - - // Send the message immediately - rt.call_method1( - py, - "send", - (observer_pid, pyo3::types::PyBytes::new(py, b"exists")), - ) - .unwrap(); - - let locals = PyDict::new(py); - locals.set_item("rt", rt.clone()).unwrap(); - locals.set_item("pid", observer_pid).unwrap(); - locals - .set_item("__builtins__", py.import("builtins").unwrap()) - .unwrap(); - - py.run( - r#" -import asyncio - -def matcher(msg): - return msg == b"exists" - -async def run_with_timeout(rt, pid): - # Wait for 1.0 second (plenty of time), should return immediately - return await rt.selective_recv_observed_py(pid, matcher, 1.0) - -loop = asyncio.new_event_loop() -asyncio.set_event_loop(loop) -result = loop.run_until_complete(run_with_timeout(rt, pid)) -"#, - Some(locals), - Some(locals), - ) - .unwrap(); - - let result: Vec = locals.get_item("result").unwrap().extract().unwrap(); - assert_eq!(result, b"exists".to_vec()); - }); -} diff --git a/tests/pyo3_timers.rs b/tests/pyo3_timers.rs deleted file mode 100644 index fa50da1..0000000 --- a/tests/pyo3_timers.rs +++ /dev/null @@ -1,50 +0,0 @@ -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; -use pyo3::types::PyBytes; -use std::time::Duration; - -#[tokio::test] -async fn test_send_after_delivers_message() { - // create runtime, spawn actor and schedule timer while holding the GIL - let (rt_obj, pid): (PyObject, u64) = Python::with_gil(|py| { - let module = iris::py::make_module(py).unwrap(); - let rt = module - .as_ref(py) - .getattr("PyRuntime") - .unwrap() - .call0() - .unwrap(); - - // Spawn an observed handler to collect messages - let pid: u64 = rt - .call_method1("spawn_observed_handler", (10usize,)) - .unwrap() - .extract() - .unwrap(); - - // Schedule a message after 50ms - let _timer_id: u64 = rt - .call_method1("send_after", (pid, 50u64, PyBytes::new(py, b"delayed"))) - .unwrap() - .extract() - .unwrap(); - - (rt.into_py(py), pid) - }); - - // allow the runtime to process (non-blocking) - tokio::time::sleep(Duration::from_millis(120)).await; - - // now check messages with GIL again - Python::with_gil(|py| { - let rt = rt_obj.as_ref(py); - let msgs: Vec = rt - .call_method1("get_messages", (pid,)) - .unwrap() - .extract() - .unwrap(); - - assert!(msgs.len() >= 1, "expected at least one delivered message"); - }); -} diff --git a/tests/pyo3_vortex.rs b/tests/pyo3_vortex.rs new file mode 100644 index 0000000..a686b6f --- /dev/null +++ b/tests/pyo3_vortex.rs @@ -0,0 +1,1928 @@ +#![cfg(all(feature = "pyo3", feature = "vortex"))] + +use pyo3::prelude::*; +use pyo3::types::{PyDict, PyList}; +use tokio::time::{sleep, timeout, Duration}; + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_preemption_on_while_true() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + + // Define an endless loop + let code = r#" +def endless(): + while True: + pass +"#; + py.run(code, Some(globals), None).unwrap(); + + let endless_func = globals.get_item("endless").unwrap().unwrap(); + let original_code = endless_func + .getattr("__code__") + .unwrap() + .getattr("co_code") + .unwrap() + .to_object(py); + + m.getattr(py, "set_budget") + .unwrap() + .call1(py, (5,)) + .unwrap(); + + // Shadow clone transmutation should not mutate the original function object. + let shadow = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (endless_func,)) + .unwrap(); + assert!(!shadow.as_ref(py).is(endless_func)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + assert!(mode == "rewrite" || mode == "fallback"); + assert!(rewrite_attempted || mode == "fallback"); + + let current_original_code = endless_func + .getattr("__code__") + .unwrap() + .getattr("co_code") + .unwrap() + .to_object(py); + assert!(original_code + .as_ref(py) + .eq(current_original_code.as_ref(py)) + .unwrap()); + + // Run transmuted shadow function. It should suspend by budget. + let res = shadow.call0(py); + + assert!(res.is_err()); + let err = res.unwrap_err(); + assert!(err.is_instance_of::(py)); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_opcode_metadata_unavailable() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample(): + return 42 +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals.get_item("sample").unwrap().unwrap(); + + // Ensure dis is present in sys.modules before monkeypatching it. + py.import("dis").unwrap(); + let sys = py.import("sys").unwrap(); + let modules = sys + .getattr("modules") + .unwrap() + .downcast::() + .unwrap(); + let original_dis = modules.get_item("dis").unwrap().unwrap().to_object(py); + + modules + .set_item("dis", py.eval("object()", None, None).unwrap()) + .unwrap(); + + let shadow = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)) + .unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "opcode_metadata_unavailable"); + assert!(!rewrite_attempted); + assert!(!rewrite_applied); + + modules.set_item("dis", original_dis).unwrap(); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_quickening_metadata_unavailable() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample2(): + return 7 +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals.get_item("sample2").unwrap().unwrap(); + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + + // Force quickening metadata extraction to fail. + dis.setattr( + "_inline_cache_entries", + py.eval("object()", None, None).unwrap(), + ) + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "quickening_metadata_unavailable"); + assert!(!rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_original_cache_layout_invalid() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample3(): + return 99 +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals.get_item("sample3").unwrap().unwrap(); + let dis = py.import("dis").unwrap(); + let locals = PyDict::new(py); + locals.set_item("dis", dis).unwrap(); + let cache_opcode: i32 = py + .eval("dis.opmap.get('CACHE', -1)", Some(locals), None) + .unwrap() + .extract() + .unwrap(); + + if cache_opcode < 0 { + // Python runtime has no inline cache opcode, so this path is not applicable. + return; + } + + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + + // Force a clearly incompatible table: every non-CACHE opcode expects a cache slot. + let mut entries = vec![1u16; 256]; + entries[cache_opcode as usize] = 0; + dis.setattr("_inline_cache_entries", PyList::new(py, entries)) + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "original_cache_layout_invalid"); + assert!(!rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_stack_depth_invariant_failed() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_small_stack(): + return 1 +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals.get_item("sample_small_stack").unwrap().unwrap(); + let original_code = sample.getattr("__code__").unwrap().to_object(py); + let locals = PyDict::new(py); + locals.set_item("fn", sample).unwrap(); + py.run( + r#" +fn.__code__ = fn.__code__.replace(co_stacksize=1) +"#, + Some(locals), + Some(locals), + ) + .unwrap(); + + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + sample.setattr("__code__", original_code).unwrap(); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "stack_depth_invariant_failed"); + assert!(!rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_exception_table_invalid() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_exc(): + a = 1 + b = 2 + return a + b +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals.get_item("sample_exc").unwrap().unwrap(); + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let original_bytecode = dis.getattr("Bytecode").unwrap().to_object(py); + let locals = PyDict::new(py); + locals.set_item("dis", dis).unwrap(); + py.run( + r#" +import types + +class _IrisBadEntry: + def __init__(self): + self.start = 0 + self.end = 999999 + self.depth = 0 + +class _IrisBadBytecode: + def __init__(self, _code): + self.exception_entries = [_IrisBadEntry()] + +dis.Bytecode = _IrisBadBytecode +"#, + Some(locals), + Some(locals), + ) + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + dis.setattr("Bytecode", original_bytecode).unwrap(); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "exception_table_invalid"); + assert!(!rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_exception_table_metadata_unavailable() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_exc_meta_unavailable(): + a = 1 + b = 2 + return a + b +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals + .get_item("sample_exc_meta_unavailable") + .unwrap() + .unwrap(); + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let original_bytecode = dis.getattr("Bytecode").unwrap().to_object(py); + let locals = PyDict::new(py); + locals.set_item("dis", dis).unwrap(); + py.run( + r#" +class _IrisFailBytecode: + def __init__(self, _code): + raise RuntimeError("forced bytecode metadata failure") + +dis.Bytecode = _IrisFailBytecode +"#, + Some(locals), + Some(locals), + ) + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + dis.setattr("Bytecode", original_bytecode).unwrap(); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "exception_table_metadata_unavailable"); + assert!(!rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_patched_exception_table_invalid() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_patched_exc(): + a = 1 + b = 2 + return a + b +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals.get_item("sample_patched_exc").unwrap().unwrap(); + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let os = py.import("os").unwrap(); + let environ = os.getattr("environ").unwrap(); + environ + .set_item( + "IRIS_VORTEX_TEST_FORCE_PATCHED_EXCEPTION_TABLE_INVALID", + "1", + ) + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + let _ = environ.del_item("IRIS_VORTEX_TEST_FORCE_PATCHED_EXCEPTION_TABLE_INVALID"); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "patched_exception_table_invalid"); + assert!(rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_probe_extraction_failed() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_probe_fail(): + a = 1 + b = 2 + return a + b +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals.get_item("sample_probe_fail").unwrap().unwrap(); + let dis = py.import("dis").unwrap(); + + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let original_get_instructions = dis.getattr("get_instructions").unwrap().to_object(py); + let locals = PyDict::new(py); + locals.set_item("dis", dis).unwrap(); + py.run( + r#" +def _iris_fail_get_instructions(*_args, **_kwargs): + raise RuntimeError("forced probe extraction failure") + +dis.get_instructions = _iris_fail_get_instructions +"#, + Some(locals), + Some(locals), + ) + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + dis.setattr("get_instructions", original_get_instructions) + .unwrap(); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "probe_extraction_failed"); + assert!(rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_probe_instrumentation_failed() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_probe_instrumentation_fail(): + a = 1 + b = 2 + return a + b +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals + .get_item("sample_probe_instrumentation_fail") + .unwrap() + .unwrap(); + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let os = py.import("os").unwrap(); + let environ = os.getattr("environ").unwrap(); + environ + .set_item("IRIS_VORTEX_TEST_FORCE_PROBE_INSTRUMENTATION_FAILED", "1") + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + let _ = environ.del_item("IRIS_VORTEX_TEST_FORCE_PROBE_INSTRUMENTATION_FAILED"); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "probe_instrumentation_failed"); + assert!(rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_patched_stack_metadata_unavailable() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_patched_stack_metadata_unavailable(): + a = 1 + b = 2 + return a + b +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals + .get_item("sample_patched_stack_metadata_unavailable") + .unwrap() + .unwrap(); + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let os = py.import("os").unwrap(); + let environ = os.getattr("environ").unwrap(); + environ + .set_item( + "IRIS_VORTEX_TEST_FORCE_PATCHED_STACK_METADATA_UNAVAILABLE", + "1", + ) + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + let _ = environ.del_item("IRIS_VORTEX_TEST_FORCE_PATCHED_STACK_METADATA_UNAVAILABLE"); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "patched_stack_metadata_unavailable"); + assert!(rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_patched_exception_table_metadata_unavailable() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_patched_exception_table_metadata_unavailable(): + a = 1 + b = 2 + return a + b +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals + .get_item("sample_patched_exception_table_metadata_unavailable") + .unwrap() + .unwrap(); + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let os = py.import("os").unwrap(); + let environ = os.getattr("environ").unwrap(); + environ + .set_item( + "IRIS_VORTEX_TEST_FORCE_PATCHED_EXCEPTION_TABLE_METADATA_UNAVAILABLE", + "1", + ) + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + let _ = + environ.del_item("IRIS_VORTEX_TEST_FORCE_PATCHED_EXCEPTION_TABLE_METADATA_UNAVAILABLE"); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "patched_exception_table_metadata_unavailable"); + assert!(rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_code_replace_failed() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_code_replace_fail(): + a = 1 + b = 2 + return a + b +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals + .get_item("sample_code_replace_fail") + .unwrap() + .unwrap(); + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let os = py.import("os").unwrap(); + let environ = os.getattr("environ").unwrap(); + environ + .set_item("IRIS_VORTEX_TEST_FORCE_CODE_REPLACE_FAILED", "1") + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + let _ = environ.del_item("IRIS_VORTEX_TEST_FORCE_CODE_REPLACE_FAILED"); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "code_replace_failed"); + assert!(rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_types_module_unavailable() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_types_unavailable(): + a = 3 + b = 4 + return a + b +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals + .get_item("sample_types_unavailable") + .unwrap() + .unwrap(); + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let os = py.import("os").unwrap(); + let environ = os.getattr("environ").unwrap(); + environ + .set_item("IRIS_VORTEX_TEST_FORCE_TYPES_MODULE_UNAVAILABLE", "1") + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + let _ = environ.del_item("IRIS_VORTEX_TEST_FORCE_TYPES_MODULE_UNAVAILABLE"); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "types_module_unavailable"); + assert!(rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_vortex_fallback_reports_shadow_function_construction_failed() { + Python::with_gil(|py| { + let m = iris::py::make_module(py).unwrap(); + + let globals = PyDict::new(py); + globals.set_item("iris", &m).unwrap(); + py.run( + r#" +def sample_shadow_construction_fail(): + a = 5 + b = 6 + return a + b +"#, + Some(globals), + None, + ) + .unwrap(); + + let sample = globals + .get_item("sample_shadow_construction_fail") + .unwrap() + .unwrap(); + let dis = py.import("dis").unwrap(); + let had_inline = dis.hasattr("_inline_cache_entries").unwrap(); + let original_inline = if had_inline { + Some(dis.getattr("_inline_cache_entries").unwrap().to_object(py)) + } else { + None + }; + dis.setattr("_inline_cache_entries", PyList::new(py, vec![0u16; 256])) + .unwrap(); + + let os = py.import("os").unwrap(); + let environ = os.getattr("environ").unwrap(); + environ + .set_item("IRIS_VORTEX_TEST_FORCE_SHADOW_CONSTRUCTION_FAILED", "1") + .unwrap(); + + let shadow_res = m + .getattr(py, "transmute_function") + .unwrap() + .call1(py, (sample,)); + + let _ = environ.del_item("IRIS_VORTEX_TEST_FORCE_SHADOW_CONSTRUCTION_FAILED"); + if had_inline { + dis.setattr("_inline_cache_entries", original_inline.unwrap()) + .unwrap(); + } else { + let _ = dis.delattr("_inline_cache_entries"); + } + + let shadow = shadow_res.unwrap(); + assert!(!shadow.as_ref(py).is(sample)); + + let guard = m + .getattr(py, "get_guard_status") + .unwrap() + .call0(py) + .unwrap(); + let guard_dict = guard.downcast::(py).unwrap(); + let mode: String = guard_dict + .get_item("mode") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let reason: String = guard_dict + .get_item("reason") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_attempted: bool = guard_dict + .get_item("rewrite_attempted") + .unwrap() + .unwrap() + .extract() + .unwrap(); + let rewrite_applied: bool = guard_dict + .get_item("rewrite_applied") + .unwrap() + .unwrap() + .extract() + .unwrap(); + + assert_eq!(mode, "fallback"); + assert_eq!(reason, "shadow_function_construction_failed"); + assert!(rewrite_attempted); + assert!(!rewrite_applied); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_pyruntime_vortex_auto_policy_and_telemetry() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + runtime_type + .call0() + .expect("construct PyRuntime") + .into_py(py) + }); + + Python::with_gil(|py| { + rt_py + .as_ref(py) + .call_method0("vortex_reset_auto_telemetry") + .unwrap(); + + let counts: (u64, u64) = rt_py + .as_ref(py) + .call_method0("vortex_get_auto_resolution_counts") + .unwrap() + .extract() + .unwrap(); + let replay: u64 = rt_py + .as_ref(py) + .call_method0("vortex_get_auto_replay_count") + .unwrap() + .extract() + .unwrap(); + assert_eq!(counts, (0, 0)); + assert_eq!(replay, 0); + + let ok: bool = rt_py + .as_ref(py) + .call_method1("vortex_set_auto_ghost_policy", ("PreferPrimary",)) + .unwrap() + .extract() + .unwrap(); + assert!(ok); + + let current: String = rt_py + .as_ref(py) + .call_method0("vortex_get_auto_ghost_policy") + .unwrap() + .extract::>() + .unwrap() + .unwrap(); + assert_eq!(current, "PreferPrimary"); + }); + + let cb = Python::with_gil(|py| { + py.eval("lambda _msg: None", None, None) + .unwrap() + .to_object(py) + }); + + let pid: u64 = Python::with_gil(|py| { + rt_py + .as_ref(py) + .call_method1("spawn_py_handler", (cb, 8usize, false)) + .unwrap() + .extract() + .unwrap() + }); + + Python::with_gil(|py| { + for _ in 0..1400u32 { + let _ = rt_py + .as_ref(py) + .call_method1("send", (pid, pyo3::types::PyBytes::new(py, b"tick"))); + } + }); + + timeout(Duration::from_secs(3), async { + loop { + let replay: u64 = Python::with_gil(|py| { + rt_py + .as_ref(py) + .call_method0("vortex_get_auto_replay_count") + .unwrap() + .extract() + .unwrap() + }); + + if replay > 0 { + break; + } + + sleep(Duration::from_millis(20)).await; + } + }) + .await + .expect("auto replay telemetry should increase"); + + Python::with_gil(|py| { + let counts: (u64, u64) = rt_py + .as_ref(py) + .call_method0("vortex_get_auto_resolution_counts") + .unwrap() + .extract() + .unwrap(); + assert!(counts.0 > 0); + assert_eq!(counts.1, 0); + + let replay: u64 = rt_py + .as_ref(py) + .call_method0("vortex_get_auto_replay_count") + .unwrap() + .extract() + .unwrap(); + assert!(replay > 0); + + rt_py + .as_ref(py) + .call_method0("vortex_reset_auto_telemetry") + .unwrap(); + let counts_after: (u64, u64) = rt_py + .as_ref(py) + .call_method0("vortex_get_auto_resolution_counts") + .unwrap() + .extract() + .unwrap(); + let replay_after: u64 = rt_py + .as_ref(py) + .call_method0("vortex_get_auto_replay_count") + .unwrap() + .extract() + .unwrap(); + assert_eq!(counts_after, (0, 0)); + assert_eq!(replay_after, 0); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_pyruntime_vortex_auto_policy_rejects_invalid_value() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + runtime_type + .call0() + .expect("construct PyRuntime") + .into_py(py) + }); + + Python::with_gil(|py| { + let res = rt_py + .as_ref(py) + .call_method1("vortex_set_auto_ghost_policy", ("not-a-policy",)); + assert!(res.is_err()); + let err = res.unwrap_err(); + assert!(err.is_instance_of::(py)); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_pyruntime_vortex_genetic_budgeting_toggle() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + runtime_type + .call0() + .expect("construct PyRuntime") + .into_py(py) + }); + + Python::with_gil(|py| { + let initial: bool = rt_py + .as_ref(py) + .call_method0("vortex_get_genetic_budgeting") + .unwrap() + .extract() + .unwrap(); + assert!(!initial); + + let ok_true: bool = rt_py + .as_ref(py) + .call_method1("vortex_set_genetic_budgeting", (true,)) + .unwrap() + .extract() + .unwrap(); + assert!(ok_true); + + let after_true: bool = rt_py + .as_ref(py) + .call_method0("vortex_get_genetic_budgeting") + .unwrap() + .extract() + .unwrap(); + assert!(after_true); + + let ok_false: bool = rt_py + .as_ref(py) + .call_method1("vortex_set_genetic_budgeting", (false,)) + .unwrap() + .extract() + .unwrap(); + assert!(ok_false); + + let after_false: bool = rt_py + .as_ref(py) + .call_method0("vortex_get_genetic_budgeting") + .unwrap() + .extract() + .unwrap(); + assert!(!after_false); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_pyruntime_vortex_genetic_threshold_roundtrip() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + runtime_type + .call0() + .expect("construct PyRuntime") + .into_py(py) + }); + + Python::with_gil(|py| { + let (low, high): (f64, f64) = rt_py + .as_ref(py) + .call_method0("vortex_get_genetic_thresholds") + .unwrap() + .extract() + .unwrap(); + assert_eq!((low, high), (0.4, 0.7)); + + let ok: bool = rt_py + .as_ref(py) + .call_method1("vortex_set_genetic_thresholds", (0.2f64, 0.5f64)) + .unwrap() + .extract() + .unwrap(); + assert!(ok); + + let (low, high): (f64, f64) = rt_py + .as_ref(py) + .call_method0("vortex_get_genetic_thresholds") + .unwrap() + .extract() + .unwrap(); + assert_eq!((low, high), (0.2, 0.5)); + + let ok_bad: bool = rt_py + .as_ref(py) + .call_method1("vortex_set_genetic_thresholds", (0.7f64, 0.2f64)) + .unwrap() + .extract() + .unwrap(); + assert!(!ok_bad); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_pyruntime_vortex_watchdog_toggle() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + runtime_type + .call0() + .expect("construct PyRuntime") + .into_py(py) + }); + + Python::with_gil(|py| { + let enabled: Option = rt_py + .as_ref(py) + .call_method0("vortex_watchdog_enabled") + .unwrap() + .extract() + .unwrap(); + // Watchdog starts disabled by default. + assert!(!enabled.unwrap_or(true)); + + let ok = rt_py + .as_ref(py) + .call_method0("vortex_watchdog_enable") + .unwrap() + .extract::() + .unwrap(); + assert!(ok); + + let enabled: Option = rt_py + .as_ref(py) + .call_method0("vortex_watchdog_enabled") + .unwrap() + .extract() + .unwrap(); + assert!(enabled.unwrap_or(false)); + + let ok = rt_py + .as_ref(py) + .call_method0("vortex_watchdog_disable") + .unwrap() + .extract::() + .unwrap(); + assert!(ok); + + let enabled: Option = rt_py + .as_ref(py) + .call_method0("vortex_watchdog_enabled") + .unwrap() + .extract() + .unwrap(); + assert!(!enabled.unwrap_or(true)); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_pyruntime_vortex_isolation_disallow_ops() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + runtime_type + .call0() + .expect("construct PyRuntime") + .into_py(py) + }); + + Python::with_gil(|py| { + let dis = py.import("dis").unwrap(); + let opmap = dis.getattr("opmap").unwrap(); + let store_global: u8 = opmap.get_item("STORE_GLOBAL").unwrap().extract().unwrap(); + let store_attr: u8 = opmap.get_item("STORE_ATTR").unwrap().extract().unwrap(); + + rt_py + .as_ref(py) + .call_method1( + "vortex_set_isolation_disallowed_ops", + (vec![store_global, store_attr],), + ) + .unwrap(); + + let ops: Option> = rt_py + .as_ref(py) + .call_method0("vortex_get_isolation_disallowed_ops") + .unwrap() + .extract() + .unwrap(); + + assert!(ops.is_some()); + let mut ops = ops.unwrap(); + ops.sort(); + let mut expected = vec![store_global, store_attr]; + expected.sort(); + assert_eq!(ops, expected); + + rt_py + .as_ref(py) + .call_method1("vortex_set_isolation_disallowed_ops", (Vec::::new(),)) + .unwrap(); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_pyruntime_vortex_isolation_mode_store_blocking() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + runtime_type + .call0() + .expect("construct PyRuntime") + .into_py(py) + }); + + Python::with_gil(|py| { + let _ = rt_py + .as_ref(py) + .call_method1("vortex_set_isolation_mode", (true,)); + + // Allow STORE_GLOBAL for this test (STORE_ATTR remains blocked in transform). + rt_py + .as_ref(py) + .call_method1("vortex_set_isolation_disallowed_ops", (Vec::::new(),)) + .unwrap(); + + let module = iris::py::make_module(py).expect("make_module"); + let globals = PyDict::new(py); + globals.set_item("iris", &module).unwrap(); + py.run( + "def isolated_write_read():\n global isolation_test\n isolation_test = 1\n return isolation_test\n", + Some(globals), + None, + ) + .unwrap(); + let fn_obj = globals.get_item("isolated_write_read").unwrap().unwrap(); + + let transmuted = module + .as_ref(py) + .getattr("transmute_function") + .unwrap() + .call1((fn_obj,)) + .unwrap(); + + module + .as_ref(py) + .getattr("set_budget") + .unwrap() + .call1((100usize,)) + .unwrap(); + + let result: i32 = transmuted.call0().unwrap().extract().unwrap(); + // STORE_GLOBAL writes into isolated globals; LOAD_GLOBAL resolves from same isolated table. + assert_eq!(result, 1); + + let leaked_in_original_globals = globals.contains("isolation_test").unwrap(); + assert!(!leaked_in_original_globals); + + let _ = rt_py + .as_ref(py) + .call_method1("vortex_set_isolation_mode", (false,)); + }); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_pyruntime_vortex_genetic_history_pickup_and_reset() { + let rt_py = Python::with_gil(|py| { + let module = iris::py::make_module(py).expect("make_module"); + let runtime_type = module + .as_ref(py) + .getattr("PyRuntime") + .expect("no PyRuntime type"); + runtime_type + .call0() + .expect("construct PyRuntime") + .into_py(py) + }); + + Python::with_gil(|py| { + let id: u64 = rt_py + .as_ref(py) + .call_method1( + "spawn_py_handler", + ( + py.eval("lambda _msg: None", None, None).unwrap(), + 4usize, + false, + ), + ) + .unwrap() + .extract() + .unwrap(); + + let initial: Option<(usize, usize)> = rt_py + .as_ref(py) + .call_method1("vortex_get_genetic_history", (id,)) + .unwrap() + .extract() + .unwrap(); + assert_eq!(initial, Some((0, 0))); + + /* exercise rendezvous */ + for _ in 0..40 { + let _ = rt_py + .as_ref(py) + .call_method1("send", (id, pyo3::types::PyBytes::new(py, b"x"))); + } + + // Wait for some processing and possible suspends + std::thread::sleep(std::time::Duration::from_millis(150)); + + let after: Option<(usize, usize)> = rt_py + .as_ref(py) + .call_method1("vortex_get_genetic_history", (id,)) + .unwrap() + .extract() + .unwrap(); + assert!(after.is_some()); + assert!(after.unwrap().1 >= 1); + + let all: Vec<(u64, usize, usize)> = rt_py + .as_ref(py) + .call_method0("vortex_get_all_genetic_history") + .unwrap() + .extract() + .unwrap(); + assert!(all.iter().any(|(pid, _, _)| *pid == id)); + + rt_py + .as_ref(py) + .call_method0("vortex_reset_genetic_history") + .unwrap(); + + let reset_all: Vec<(u64, usize, usize)> = rt_py + .as_ref(py) + .call_method0("vortex_get_all_genetic_history") + .unwrap() + .extract() + .unwrap(); + assert!(reset_all.is_empty()); + + rt_py.as_ref(py).call_method1("stop", (id,)).unwrap(); + }); +} diff --git a/tests/pyo3_zero_copy.rs b/tests/pyo3_zero_copy.rs deleted file mode 100644 index 1c2495d..0000000 --- a/tests/pyo3_zero_copy.rs +++ /dev/null @@ -1,60 +0,0 @@ -#![cfg(feature = "pyo3")] - -use pyo3::prelude::*; - -#[tokio::test] -async fn py_zero_copy_send() { - let rt_py = Python::with_gil(|py| { - let module = iris::py::make_module(py).expect("make_module"); - let runtime_type = module - .as_ref(py) - .getattr("PyRuntime") - .expect("no PyRuntime type"); - let rt_obj = runtime_type.call0().expect("construct PyRuntime"); - rt_obj.into_py(py) - }); - - // spawn observed handler - let pid: u64 = Python::with_gil(|py| { - rt_py - .as_ref(py) - .call_method1("spawn_observed_handler", (1usize,)) - .unwrap() - .extract() - .unwrap() - }); - - // allocate a Rust-owned buffer and write into it from Python - Python::with_gil(|py| { - let module = iris::py::make_module(py).expect("make_module"); - let rv = module - .as_ref(py) - .call_method1("allocate_buffer", (5usize,)) - .unwrap(); - let (id, mem, cap): (u64, pyo3::PyObject, pyo3::PyObject) = rv.extract().unwrap(); - let locals = pyo3::types::PyDict::new(py); - locals.set_item("mem", mem.as_ref(py)).unwrap(); - locals.set_item("cap", cap.as_ref(py)).unwrap(); - py.run("mem[:5] = b'hello'", None, Some(locals)).unwrap(); - // send the buffer without copying - rt_py - .as_ref(py) - .call_method1("send_buffer", (pid, id)) - .unwrap(); - }); - - // allow the tokio tasks to run - tokio::time::sleep(std::time::Duration::from_millis(200)).await; - - let msgs: Vec> = Python::with_gil(|py| { - rt_py - .as_ref(py) - .call_method1("get_messages", (pid,)) - .unwrap() - .extract() - .unwrap() - }); - - assert_eq!(msgs.len(), 1); - assert_eq!(&msgs[0], b"hello"); -} diff --git a/tests/supervision_paths.rs b/tests/supervision_paths.rs deleted file mode 100644 index 095b5b5..0000000 --- a/tests/supervision_paths.rs +++ /dev/null @@ -1,63 +0,0 @@ -use iris::Runtime; - -#[test] -fn path_registration_and_listing() { - let rt = Runtime::new(); - - // Spawn two observed actors and register them under hierarchical paths - let a = rt.spawn_observed_handler(10); - let b = rt.spawn_observed_handler(10); - - rt.register_path("/system/service/one".to_string(), a); - rt.register_path("/system/service/two".to_string(), b); - - // Exact lookup - let pa = rt.whereis_path("/system/service/one"); - assert_eq!(pa, Some(a)); - - // List all descendants under prefix - let children = rt.list_children("/system/service"); - assert!(children - .iter() - .any(|(p, pid)| p == "/system/service/one" && *pid == a)); - assert!(children - .iter() - .any(|(p, pid)| p == "/system/service/two" && *pid == b)); - - // Direct children (one level) should return the same here - let direct = rt.list_children_direct("/system/service"); - assert!(direct - .iter() - .any(|(p, pid)| p == "/system/service/one" && *pid == a)); - assert!(direct - .iter() - .any(|(p, pid)| p == "/system/service/two" && *pid == b)); - - // Now add a deeper path and ensure direct listing filters it out - let c = rt.spawn_observed_handler(10); - rt.register_path("/system/service/two/grand".to_string(), c); - - let all_under_two = rt.list_children("/system/service/two"); - assert!(all_under_two - .iter() - .any(|(p, pid)| p == "/system/service/two/grand" && *pid == c)); - - let direct_under_two = rt.list_children_direct("/system/service/two"); - // direct_under_two should include the grand child (it's one level under /system/service/two) - assert!(direct_under_two - .iter() - .any(|(p, pid)| p == "/system/service/two/grand" && *pid == c)); - - // ensure direct listing at the higher prefix doesn't show the grand child - let direct_under_service = rt.list_children_direct("/system/service"); - assert!(!direct_under_service - .iter() - .any(|(p, _)| p == "/system/service/two/grand")); - - // Test watch_path: ensure children become supervised - rt.watch_path("/system/service"); - let children = rt.list_children_direct("/system/service"); - for (_p, pid) in children { - assert!(rt.supervisor().contains_child(pid)); - } -} diff --git a/tests/test_100k.py b/tests/test_100k.py deleted file mode 100644 index 53f2dfc..0000000 --- a/tests/test_100k.py +++ /dev/null @@ -1,57 +0,0 @@ -import iris -import time -import sys - -# Configuration -COUNT = 100_000 # The Goal -BATCH_SIZE = 10000 - -print(f"--- Iris Stress Test: {iris.version()} ---") -print(f"Goal: Spawn {COUNT} actors.") - -rt = iris.Runtime() - -# A minimal handler that does nothing (to test pure overhead) -def no_op_handler(msg): - pass - -pids = [] -start_time = time.time() - -try: - print("🚀 Spawning...", flush=True) - for i in range(COUNT): - # Spawn actor with a small budget since they are tiny - pid = rt.spawn(no_op_handler, budget=10, release_gil=False) - pids.append(pid) - - if (i + 1) % BATCH_SIZE == 0: - elapsed = time.time() - start_time - rate = (i + 1) / elapsed - print(f" [{i + 1}/{COUNT}] Spawned. Rate: {rate:.0f} actors/sec", flush=True) - - spawn_time = time.time() - start_time - print(f"✅ Spawned {COUNT} actors in {spawn_time:.2f}s ({COUNT / spawn_time:.0f} actors/sec)") - - # Phase 2: Message Blitz - # We send a message to EVERY actor. - # This floods the Tokio runtime and forces 100k GIL acquisitions. - print(f"📨 Sending {COUNT} messages...", flush=True) - msg_start = time.time() - - for pid in pids: - rt.send(pid, b"ping") - - msg_time = time.time() - msg_start - print(f"✅ Sent {COUNT} messages in {msg_time:.2f}s ({COUNT / msg_time:.0f} msgs/sec)") - - print("🛑 Cleaning up (stopping all actors)...") - for pid in pids: - rt.stop(pid) - - print("Done.") - -except KeyboardInterrupt: - print("\n⚠️ Interrupted by user.") -except Exception as e: - print(f"\n❌ Failed: {e}") diff --git a/tests/test_discovery.py b/tests/test_discovery.py deleted file mode 100644 index 316e5c5..0000000 --- a/tests/test_discovery.py +++ /dev/null @@ -1,51 +0,0 @@ -# tests/test_discovery.py -import iris -import time -import threading - -def run_server(): - """Simulates Node A (The Provider)""" - rt_a = iris.Runtime() - rt_a.listen("127.0.0.1:9000") - - def auth_handler(msg): - print(f"\n[Node A] Auth Service received: {msg.decode()}") - - pid = rt_a.spawn(auth_handler) - rt_a.register("auth-service", pid) - print(f"[Node A] Listening on 127.0.0.1:9000...") - print(f"[Node A] 'auth-service' registered with PID: {pid}") - - # Keep the server thread alive - while True: - time.sleep(1) - -# 1. Start Node A in a background thread -server_thread = threading.Thread(target=run_server, daemon=True) -server_thread.start() - -# Give the server a moment to bind the port -time.sleep(1) - -# 2. Setup Node B (The Client) -print("--- Starting Remote Discovery Test ---") -rt_b = iris.Runtime() -node_a_addr = "127.0.0.1:9000" - -print(f"[Node B] Querying {node_a_addr} for 'auth-service'...") - -# 3. Remote Resolution -pid = rt_b.resolve_remote(node_a_addr, "auth-service") - -if pid: - print(f"[Node B] ✅ Resolved 'auth-service' to PID: {pid}") - - # 4. Send a message to the discovered remote PID - print("[Node B] Sending credentials...") - rt_b.send_remote(node_a_addr, pid, b"USER:seuriin_PASS:rust_is_awesome") - - # Small delay to ensure the network packet is processed before script exit - time.sleep(0.5) - print("[Node B] Test complete.") -else: - print("[Node B] ❌ Failed: Could not resolve service.") diff --git a/tests/test_distribution_self_healing.py b/tests/test_distribution_self_healing.py deleted file mode 100644 index 3ac56f7..0000000 --- a/tests/test_distribution_self_healing.py +++ /dev/null @@ -1,93 +0,0 @@ -# tests/test_distribution_self_healing.py -import multiprocessing -import time -import iris - -ADDR = "127.0.0.1:9010" -SERVICE_NAME = "reliable-worker" - -def run_flapping_provider(): - """ Node B: The Flapping Service """ - def start_node(): - rt = iris.Runtime() - rt.listen(ADDR) - - # This handler should only ever see user strings, never System Pings - def logic(msg): - print(f"📩 Provider received: {msg.decode()}") - - pid = rt.spawn(logic) - rt.register(SERVICE_NAME, pid) - print(f"🏗️ Provider: Service '{SERVICE_NAME}' is LIVE.") - - # Stay alive long enough for a few heartbeats (1s interval) - time.sleep(4) - print("🏗️ Provider: CRASHING NOW...") - - # Cycle 1 - p = multiprocessing.Process(target=start_node) - p.start() - p.join() - - print("🏗️ Provider: NODE IS DOWN (Simulating network failure)...") - time.sleep(3) - - # Cycle 2 (Restart) - print("🏗️ Provider: RESTARTING...") - p = multiprocessing.Process(target=start_node) - p.start() - p.join() - -def run_self_healing_monitor(): - """ Node A: The Resilient Observer """ - time.sleep(1) - rt = iris.Runtime() - - def client_factory(): - print("🛡️ Supervisor: Factory attempting to restore service link...") - while True: - try: - target_pid = rt.resolve_remote(ADDR, SERVICE_NAME) - if target_pid: - # the PID returned is a local proxy that automatically forwards - # to the remote service. we can monitor it directly and use it - # as our child. - rt.monitor_remote(ADDR, target_pid) - print(f"🛡️ Supervisor: Link restored to PID {target_pid}") - return target_pid - except Exception: - pass - time.sleep(0.5) - - # Supervise the connection - dummy_pid = rt.spawn(lambda m: None) - rt._inner.supervise_with_factory(dummy_pid, client_factory, "RestartOne") - - print("🔭 Monitor: Self-healing loop active.") - - for i in range(20): - children = rt._inner.child_pids() - if children: - current_child = children[0] - rt.send(current_child, f"PING-{i}".encode()) - - # Inspect messages to see if PING/PONG objects are arriving - msgs = rt._inner.get_messages(current_child) - for m in msgs: - if isinstance(m, iris.PySystemMessage) and m.type_name in ["PING", "PONG"]: - # These are generated by the heartbeats in network.rs - pass - - time.sleep(0.5) - -if __name__ == "__main__": - print(f"--- Myrmidon Self-Healing & Heartbeat Test ---") - p1 = multiprocessing.Process(target=run_flapping_provider) - p2 = multiprocessing.Process(target=run_self_healing_monitor) - - p1.start() - p2.start() - - p2.join() - p1.terminate() - print("✨ Self-healing test complete.") diff --git a/tests/test_hotswap.py b/tests/test_hotswap.py deleted file mode 100644 index ac7559d..0000000 --- a/tests/test_hotswap.py +++ /dev/null @@ -1,33 +0,0 @@ -# tests/test3.py -import iris -import time - -print("--- Phase 4: Hot Code Swapping Test ---") - -rt = iris.Runtime() - -def behavior_a(msg): - print(f"[A] Received: {msg.decode()}") - -def behavior_b(msg): - print(f"[B] Received: {msg.decode()} (UPGRADED)") - -# 1. Spawn with A -print("Spawning actor with Behavior A...") -pid = rt.spawn(behavior_a) -rt.send(pid, b"msg1") -time.sleep(0.1) - -# 2. Hot Swap to B -print("Attempting Hot Swap...") -rt.hot_swap(pid, behavior_b) -print("✅ Hot Swap signal sent.") - -# 3. Verify B -print("Sending second message...") -rt.send(pid, b"msg2") -time.sleep(0.1) - -rt.stop(pid) -rt.join(pid) -print("Test complete.") diff --git a/tests/test_hotswap2.py b/tests/test_hotswap2.py deleted file mode 100644 index f68d90b..0000000 --- a/tests/test_hotswap2.py +++ /dev/null @@ -1,67 +0,0 @@ -import iris -import time -import random - -# Configuration -SWAP_COUNT = 5000 # Total times we swap the behavior -MSG_COUNT = 20000 # Total messages to send during swaps -rt = iris.PyRuntime() - -print(f"--- Iris Hot Swap Stress Test: {iris.version()} ---") - -# Define two distinct behaviors to swap between -def logic_a(msg): - # Intentional no-op or simple math to keep it fast - _ = msg - -def logic_b(msg): - # Slightly different path - _ = len(msg) - -# 1. Spawn the initial actor (Push-based for maximum FFI speed) -pid = rt.spawn_py_handler(logic_a, budget=100) - -print(f"🚀 Spawning actor {pid}...") -print(f"🔥 Starting simultaneous Send/Swap blitz ({SWAP_COUNT} swaps, {MSG_COUNT} msgs)...") - -start_time = time.time() - -# We use two threads to create a race condition between sending and swapping -def sender_loop(): - for _ in range(MSG_COUNT): - rt.send(pid, b"test_payload") - # Tiny sleep to prevent saturating the GIL immediately - if _ % 1000 == 0: - time.sleep(0.001) - -def swapper_loop(): - for i in range(SWAP_COUNT): - # Rapidly toggle between behaviors - new_logic = logic_b if i % 2 == 0 else logic_a - rt.hot_swap(pid, new_logic) - - if (i + 1) % 500 == 0: - print(f" [Swap Progress] {i + 1}/{SWAP_COUNT} swaps completed...", end="\r") - -# 2. Run the stress test -import threading -t1 = threading.Thread(target=sender_loop) -t2 = threading.Thread(target=swapper_loop) - -t1.start() -t2.start() - -t1.join() -t2.join() - -total_time = time.time() - start_time - -print(f"\n\n✅ Benchmark Complete!") -print(f" Total Time: {total_time:.2f}s") -print(f" Swap Rate: {SWAP_COUNT / total_time:.0f} swaps/sec") -print(f" Message Rate: {MSG_COUNT / total_time:.0f} msgs/sec") - -# 3. Final Verification -print("🛑 Stopping actor...") -rt.stop(pid) -print("Done.") diff --git a/tests/test_jit_fallback_unittest.py b/tests/test_jit_fallback_unittest.py deleted file mode 100644 index dad9bb1..0000000 --- a/tests/test_jit_fallback_unittest.py +++ /dev/null @@ -1,687 +0,0 @@ -import unittest -import array -import io -import contextlib - -import iris.jit as jit_mod - - -class TestJitFallback(unittest.TestCase): - def test_jit_panic_runtime_error_falls_back(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - try: - def fake_call_jit(_func, _args, _kwargs): - raise RuntimeError("jit panic: simulated") - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = lambda *a, **k: None - - @jit_mod.offload(strategy="jit") - def add1(x): - return x + 1 - - self.assertEqual(add1(41), 42) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_complex_body_skips_jit_wrapper(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - try: - def fake_call_jit(_func, _args, _kwargs): - raise AssertionError("call_jit should not run for complex function body") - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = lambda *a, **k: None - - @jit_mod.offload(strategy="jit") - def prng_like(seed, n): - numbers = [] - x = seed - for i in range(n): - if i % 2 == 0: - x += 0.1 - x = (x + i * 0.1) % 1.0 - numbers.append(x) - return numbers - - out = prng_like(0.25, 3) - self.assertEqual(len(out), 3) - self.assertAlmostEqual(out[0], 0.35) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_stateful_loop_uses_step_jit(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - calls = {"jit": 0, "register": 0} - - try: - def fake_call_jit(_func, _args, _kwargs): - calls["jit"] += 1 - return _func(*_args) - - def fake_register_offload(*_args, **_kwargs): - calls["register"] += 1 - return None - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = fake_register_offload - - @jit_mod.offload(strategy="jit") - def accum(seed, n): - numbers = [] - x = seed - for i in range(n): - x = x + i + 1 - numbers.append(x) - return numbers - - out = accum(0.0, 3) - self.assertEqual(out, [1.0, 3.0, 6.0]) - self.assertGreaterEqual(calls["register"], 1) - self.assertEqual(calls["jit"], 3) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_complex_body_array_inputs_vectorized_fallback(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - try: - jit_mod.call_jit = lambda _func, _args, _kwargs: (_ for _ in ()).throw( - RuntimeError("no JIT entry found") - ) - jit_mod.register_offload = lambda *a, **k: None - - @jit_mod.offload(strategy="jit", return_type="float") - def branch_like(price, vol, strike): - x = price / strike - return x + vol - - prices = array.array("d", [100.0, 101.0, 102.0]) - vols = array.array("d", [0.2, 0.2, 0.2]) - strikes = array.array("d", [105.0, 105.0, 105.0]) - - out = branch_like(prices, vols, strikes) - self.assertEqual(len(out), 3) - self.assertAlmostEqual(float(out[0]), (100.0 / 105.0) + 0.2) - self.assertAlmostEqual(float(out[2]), (102.0 / 105.0) + 0.2) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_complex_body_vector_inputs_use_aggressive_jit(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - calls = {"jit": 0, "register": 0} - - try: - def fake_call_jit(_func, _args, _kwargs): - calls["jit"] += 1 - if isinstance(_args[0], (int, float)): - return (_args[0] / _args[2]) + _args[1] - # mimic old aggressive mode behavior for vector path - return _args[0] - - def fake_register_offload(*_args, **_kwargs): - calls["register"] += 1 - return None - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = fake_register_offload - - @jit_mod.offload(strategy="jit", return_type="float") - def branch_like(price, vol, strike): - x = price / strike - return x + vol - - scalar_out = branch_like(100.0, 0.2, 105.0) - self.assertAlmostEqual(float(scalar_out), (100.0 / 105.0) + 0.2) - - prices = array.array("d", [100.0, 101.0, 102.0]) - vols = array.array("d", [0.2, 0.2, 0.2]) - strikes = array.array("d", [105.0, 105.0, 105.0]) - out = branch_like(prices, vols, strikes) - self.assertEqual(len(out), 3) - self.assertGreaterEqual(calls["register"], 1) - self.assertEqual(calls["jit"], 2) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_aggressive_jit_path_persists_quantum_metadata(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - original_maybe_persist = jit_mod._maybe_persist_quantum_metadata - - seen = {"persist": [], "register": []} - - try: - def fake_call_jit(_func, _args, _kwargs): - return 42.0 - - def fake_register_offload(_func, _strategy, _return_type, src, arg_names): - seen["register"].append((src, list(arg_names) if isinstance(arg_names, list) else arg_names)) - return None - - def fake_maybe_persist(func, src, arg_names, return_type, force=False): - seen["persist"].append((func.__name__, src, list(arg_names) if isinstance(arg_names, list) else arg_names, return_type, force)) - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = fake_register_offload - jit_mod._maybe_persist_quantum_metadata = fake_maybe_persist - - @jit_mod.offload(strategy="jit", return_type="float") - def branch_like(price, vol, strike): - x = price / strike - if x > 1.0: - return x + vol - return x - vol - - prices = array.array("d", [100.0, 101.0, 102.0]) - vols = array.array("d", [0.2, 0.2, 0.2]) - strikes = array.array("d", [105.0, 105.0, 105.0]) - out = branch_like(prices, vols, strikes) - - self.assertEqual(out, 42.0) - self.assertTrue(seen["persist"], "expected metadata persistence on aggressive JIT path") - _, persisted_src, persisted_args, persisted_return_type, persisted_force = seen["persist"][0] - self.assertIsNotNone(persisted_src) - self.assertEqual(persisted_args, ["price", "vol", "strike"]) - self.assertEqual(persisted_return_type, "float") - self.assertFalse(persisted_force) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - jit_mod._maybe_persist_quantum_metadata = original_maybe_persist - - def test_aggressive_path_uses_effective_src_for_seed_register_and_persist(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - original_maybe_persist = jit_mod._maybe_persist_quantum_metadata - original_seed_from_metadata = jit_mod._seed_quantum_from_metadata - original_extract_return_expr_plan = jit_mod._extract_return_expr_plan - original_extract_inlined_expr_plan = jit_mod._extract_inlined_expr_plan - original_extract_stateful_loop_plan = jit_mod._extract_stateful_loop_plan - original_extract_scalar_while_plan = jit_mod._extract_scalar_while_plan - original_extract_scalar_for_plan = jit_mod._extract_scalar_for_plan - original_extract_last_return_expr = jit_mod._extract_last_return_expr - - seen = {"seed": [], "register": [], "persist": []} - - try: - jit_mod._extract_return_expr_plan = lambda *a, **k: None - jit_mod._extract_inlined_expr_plan = lambda *a, **k: None - jit_mod._extract_stateful_loop_plan = lambda *a, **k: None - jit_mod._extract_scalar_while_plan = lambda *a, **k: None - jit_mod._extract_scalar_for_plan = lambda *a, **k: None - jit_mod._extract_last_return_expr = lambda *a, **k: "x + 1" - - def fake_seed(func, src, arg_names, return_type): - seen["seed"].append(src) - return False - - def fake_register(_func, _strategy, _return_type, src, _arg_names): - seen["register"].append(src) - return None - - def fake_persist(_func, src, _arg_names, _return_type, force=False): - seen["persist"].append((src, force)) - - def fake_call_jit(_func, _args, _kwargs): - return 99.0 - - jit_mod._seed_quantum_from_metadata = fake_seed - jit_mod.register_offload = fake_register - jit_mod._maybe_persist_quantum_metadata = fake_persist - jit_mod.call_jit = fake_call_jit - - @jit_mod.offload(strategy="jit", return_type="float") - def complex_branch(x): - if x > 0: - return x + 1 - return x - 1 - - out = complex_branch(array.array("d", [1.0, 2.0])) - self.assertEqual(out, 99.0) - self.assertTrue(seen["seed"], "expected seed attempts") - self.assertTrue(seen["register"], "expected register call") - self.assertTrue(seen["persist"], "expected persist call") - self.assertTrue(all(src == "x + 1" for src in seen["seed"])) - self.assertEqual(seen["register"], ["x + 1"]) - self.assertEqual(seen["persist"], [("x + 1", False)]) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - jit_mod._maybe_persist_quantum_metadata = original_maybe_persist - jit_mod._seed_quantum_from_metadata = original_seed_from_metadata - jit_mod._extract_return_expr_plan = original_extract_return_expr_plan - jit_mod._extract_inlined_expr_plan = original_extract_inlined_expr_plan - jit_mod._extract_stateful_loop_plan = original_extract_stateful_loop_plan - jit_mod._extract_scalar_while_plan = original_extract_scalar_while_plan - jit_mod._extract_scalar_for_plan = original_extract_scalar_for_plan - jit_mod._extract_last_return_expr = original_extract_last_return_expr - - def test_inlined_assignments_use_scalar_jit_path(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - calls = {"jit": 0, "register": 0} - seen = {"src": None} - - try: - def fake_call_jit(_func, _args, _kwargs): - calls["jit"] += 1 - return 123.0 - - def fake_register_offload(_func, _strategy, _return_type, src, _arg_names): - calls["register"] += 1 - seen["src"] = src - return None - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = fake_register_offload - - @jit_mod.offload(strategy="jit", return_type="float") - def calc(price, strike, vol): - x = price / strike - y = x + vol - return y * 2 - - out = calc(100.0, 105.0, 0.2) - self.assertEqual(out, 123.0) - self.assertGreaterEqual(calls["register"], 1) - self.assertEqual(calls["jit"], 1) - self.assertIsNotNone(seen["src"]) - - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_inlined_if_else_use_scalar_jit_path(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - calls = {"jit": 0} - - try: - def fake_call_jit(_func, _args, _kwargs): - calls["jit"] += 1 - return 7.0 - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = lambda *a, **k: None - - @jit_mod.offload(strategy="jit", return_type="float") - def branchy(x, y): - z = x - y - if z > 0: - out = z * 2 - else: - out = y - x - return out + 1 - - out = branchy(3.0, 2.0) - self.assertEqual(out, 7.0) - self.assertEqual(calls["jit"], 1) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_inlined_elif_chain_use_scalar_jit_path(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - calls = {"jit": 0} - - try: - def fake_call_jit(_func, _args, _kwargs): - calls["jit"] += 1 - return 11.0 - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = lambda *a, **k: None - - @jit_mod.offload(strategy="jit", return_type="float") - def branchy_elif(x, y): - z = x - y - if z > 3: - out = z * 2 - elif z > 0: - out = z + 4 - else: - out = y - x - return out + 1 - - out = branchy_elif(5.0, 2.0) - self.assertEqual(out, 11.0) - self.assertEqual(calls["jit"], 1) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_inlined_if_without_else_use_scalar_jit_path(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - calls = {"jit": 0} - - try: - def fake_call_jit(_func, _args, _kwargs): - calls["jit"] += 1 - return 9.0 - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = lambda *a, **k: None - - @jit_mod.offload(strategy="jit", return_type="float") - def branchy_if_only(x, y): - out = x - if x > y: - out = out + 2 - return out + 1 - - out = branchy_if_only(4.0, 3.0) - self.assertEqual(out, 9.0) - self.assertEqual(calls["jit"], 1) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_inlined_pow_builtin_normalized(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - seen = {"src": None} - - try: - jit_mod.call_jit = lambda _func, _args, _kwargs: 5.0 - - def fake_register_offload(_func, _strategy, _return_type, src, _arg_names): - seen["src"] = src - return None - - jit_mod.register_offload = fake_register_offload - - @jit_mod.offload(strategy="jit", return_type="float") - def uses_pow(x, y): - z = pow(x, y) - return z + 1 - - _ = uses_pow(2.0, 3.0) - self.assertIsNotNone(seen["src"]) - self.assertIn("**", str(seen["src"])) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_inlined_annassign_use_scalar_jit_path(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - calls = {"jit": 0} - - try: - def fake_call_jit(_func, _args, _kwargs): - calls["jit"] += 1 - return 21.0 - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = lambda *a, **k: None - - @jit_mod.offload(strategy="jit", return_type="float") - def ann_calc(x: float, y: float): - z: float = x + y - return z * 2 - - out = ann_calc(2.0, 3.0) - self.assertEqual(out, 21.0) - self.assertEqual(calls["jit"], 1) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_scalar_while_uses_step_jit(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - original_step_loop = jit_mod.call_jit_step_loop_f64 - - calls = {"jit": 0, "register": 0} - - try: - def fake_call_jit(_func, _args, _kwargs): - calls["jit"] += 1 - return _func(*_args) - - def fake_register_offload(*_args, **_kwargs): - calls["register"] += 1 - return None - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = fake_register_offload - jit_mod.call_jit_step_loop_f64 = None - - @jit_mod.offload(strategy="jit", return_type="float") - def scalar_loop(seed, n): - x = seed - i = 0 - while i < n: - x = x + i + 1 - i += 1 - return x - - out = scalar_loop(0.0, 3) - self.assertEqual(out, 6.0) - self.assertGreaterEqual(calls["register"], 1) - self.assertEqual(calls["jit"], 3) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - jit_mod.call_jit_step_loop_f64 = original_step_loop - - def test_scalar_for_uses_step_jit(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - original_step_loop = jit_mod.call_jit_step_loop_f64 - - calls = {"jit": 0, "register": 0} - - try: - def fake_call_jit(_func, _args, _kwargs): - calls["jit"] += 1 - return _func(*_args) - - def fake_register_offload(*_args, **_kwargs): - calls["register"] += 1 - return None - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = fake_register_offload - jit_mod.call_jit_step_loop_f64 = None - - @jit_mod.offload(strategy="jit", return_type="float") - def scalar_for(seed, n): - x = seed - for i in range(n): - x = x + i + 1 - return x - - out = scalar_for(0.0, 3) - self.assertEqual(out, 6.0) - self.assertGreaterEqual(calls["register"], 1) - self.assertEqual(calls["jit"], 3) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - jit_mod.call_jit_step_loop_f64 = original_step_loop - - def test_scalar_for_prefers_rust_step_loop_api(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - original_step_loop = jit_mod.call_jit_step_loop_f64 - - calls = {"step_loop": 0} - - try: - jit_mod.call_jit = lambda _func, _args, _kwargs: (_ for _ in ()).throw( - AssertionError("per-iteration call_jit should not be used when step loop API is available") - ) - jit_mod.register_offload = lambda *a, **k: None - - def fake_step_loop(step_fn, seed, count): - calls["step_loop"] += 1 - state = float(seed) - for i in range(int(count)): - state = float(step_fn(state, float(i))) - return state - - jit_mod.call_jit_step_loop_f64 = fake_step_loop - - @jit_mod.offload(strategy="jit", return_type="float") - def scalar_for(seed, n): - x = seed - for i in range(n): - x = x + i + 1 - return x - - out = scalar_for(0.0, 3) - self.assertEqual(out, 6.0) - self.assertEqual(calls["step_loop"], 1) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - jit_mod.call_jit_step_loop_f64 = original_step_loop - - def test_scalar_for_vector_inputs_fallback(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - - try: - jit_mod.call_jit = lambda _func, _args, _kwargs: (_ for _ in ()).throw( - AssertionError("scalar-for wrapper should use vectorized python fallback for vector inputs") - ) - jit_mod.register_offload = lambda *a, **k: None - - @jit_mod.offload(strategy="jit", return_type="float") - def scalar_for(seed, n): - x = seed - for i in range(n): - x = x + i + 1 - return x - - out = scalar_for(array.array("d", [0.0, 1.0]), 3) - self.assertEqual(len(out), 2) - self.assertAlmostEqual(float(out[0]), 6.0) - self.assertAlmostEqual(float(out[1]), 7.0) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - - def test_scalar_for_inlines_helper_call_into_step_src(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - original_step_loop = jit_mod.call_jit_step_loop_f64 - - seen = {"step_src": None} - - try: - def fake_call_jit(_func, _args, _kwargs): - return _func(*_args) - - def fake_register_offload(_func, _strategy, _return_type, src, _arg_names): - if src is not None and isinstance(_arg_names, list) and _arg_names == ["x", "i"]: - seen["step_src"] = src - return None - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = fake_register_offload - jit_mod.call_jit_step_loop_f64 = None - - def helper_calc(a, b): - return (a * b) + (a - b) - - @jit_mod.offload(strategy="jit", return_type="float") - def scalar_for(seed, n): - x = seed - for i in range(int(n)): - x += helper_calc(x * 0.0001, float(i) * 0.001) - return x - - out = scalar_for(1.0, 3.0) - self.assertIsInstance(out, float) - self.assertIsNotNone(seen["step_src"]) - self.assertNotIn("helper_calc", str(seen["step_src"])) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - jit_mod.call_jit_step_loop_f64 = original_step_loop - - def test_scalar_for_inlines_helper_with_declared_defaults_when_fully_applied(self): - original_call_jit = jit_mod.call_jit - original_register_offload = jit_mod.register_offload - original_step_loop = jit_mod.call_jit_step_loop_f64 - - seen = {"step_src": None} - - try: - def fake_call_jit(_func, _args, _kwargs): - return _func(*_args) - - def fake_register_offload(_func, _strategy, _return_type, src, _arg_names): - if src is not None and isinstance(_arg_names, list) and _arg_names == ["x", "i"]: - seen["step_src"] = src - return None - - jit_mod.call_jit = fake_call_jit - jit_mod.register_offload = fake_register_offload - jit_mod.call_jit_step_loop_f64 = None - - def helper_calc(a, b=1.0): - return (a * b) + (a - b) - - @jit_mod.offload(strategy="jit", return_type="float") - def scalar_for(seed, n): - x = seed - for i in range(int(n)): - x += helper_calc(x * 0.0001, float(i) * 0.001) - return x - - out = scalar_for(1.0, 3.0) - self.assertIsInstance(out, float) - self.assertIsNotNone(seen["step_src"]) - self.assertNotIn("helper_calc", str(seen["step_src"])) - finally: - jit_mod.call_jit = original_call_jit - jit_mod.register_offload = original_register_offload - jit_mod.call_jit_step_loop_f64 = original_step_loop - - def test_meta_log_skips_when_quantum_speculation_disabled(self): - """Ensure meta logs do not emit when quantum speculation is off.""" - original_get_jit_logging = jit_mod.get_jit_logging - original_get_quantum_speculation = jit_mod.get_quantum_speculation - - try: - jit_mod.get_jit_logging = lambda: True - jit_mod.get_quantum_speculation = lambda: False - - buf = io.StringIO() - with contextlib.redirect_stderr(buf): - jit_mod._jit_meta_log("should not log") - - self.assertEqual(buf.getvalue(), "") - finally: - jit_mod.get_jit_logging = original_get_jit_logging - jit_mod.get_quantum_speculation = original_get_quantum_speculation - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_mbox100k.py b/tests/test_mbox100k.py deleted file mode 100644 index 0e9e09b..0000000 --- a/tests/test_mbox100k.py +++ /dev/null @@ -1,73 +0,0 @@ -import iris -import time -import sys - -# Configuration -# NOTE: Mailbox actors use dedicated OS threads (via Tokio's blocking pool). -# The default Tokio limit is usually 512 blocking threads. -# Setting this to 100,000 WILL deadlock standard runtimes. -COUNT = 100_000 -BATCH_SIZE = 5000 - -print(f"--- Iris Mailbox Stress Test: {iris.version()} ---") -print(f"Goal: Spawn {COUNT} threaded mailbox actors.") - -# Instantiate the runtime -rt = iris.PyRuntime() - -# A blocking handler that loops until the channel is closed. -# This mimics a standard "Erlang-style" actor loop. -def mailbox_handler(mailbox): - while True: - # Blocks the underlying thread until a message arrives. - # Returns None if the actor is stopped (channel closed). - msg = mailbox.recv() - - if msg is None: - break - - # Do nothing with the message (simulate work) - pass - -pids = [] -start_time = time.time() - -try: - print("🚀 Spawning (Threaded)...", flush=True) - for i in range(COUNT): - # Spawn a threaded actor. - # This allocates a new buffer/channel and schedules a blocking task. - pid = rt.spawn_with_mailbox(mailbox_handler, budget=10) - pids.append(pid) - - if (i + 1) % BATCH_SIZE == 0: - elapsed = time.time() - start_time - rate = (i + 1) / elapsed - print(f" [{i + 1}/{COUNT}] Spawned. Rate: {rate:.0f} actors/sec", flush=True) - - spawn_time = time.time() - start_time - print(f"✅ Spawned {COUNT} actors in {spawn_time:.2f}s ({COUNT / spawn_time:.0f} actors/sec)") - - # Phase 2: Message Blitz - # Even though they are in threads, sending is still just pushing to a channel. - print(f"📨 Sending {COUNT} messages...", flush=True) - msg_start = time.time() - - for pid in pids: - rt.send(pid, b"ping") - - msg_time = time.time() - msg_start - print(f"✅ Sent {COUNT} messages in {msg_time:.2f}s ({COUNT / msg_time:.0f} msgs/sec)") - - print("🛑 Cleaning up (stopping all actors)...") - # This closes the channels. The 'mailbox.recv()' calls in the threads - # will return None, causing the loops to break and threads to exit. - for pid in pids: - rt.stop(pid) - - print("Done.") - -except KeyboardInterrupt: - print("\n⚠️ Interrupted by user.") -except Exception as e: - print(f"\n❌ Failed: {e}") diff --git a/tests/test_offload.py b/tests/test_offload.py deleted file mode 100644 index ef101bd..0000000 --- a/tests/test_offload.py +++ /dev/null @@ -1,70 +0,0 @@ -# test_offload.py -import time -import array -from iris.jit import offload - -# --- Simple Math (1 argument) --- -def square_normal(x: float) -> float: - return x * x - -@offload(strategy="jit", return_type="float") -def square_jit(x: float) -> float: - return x * x - -# --- Heavy Math (3 arguments) --- -def heavy_normal(x: float, y: float, z: float) -> float: - return (x * x + y * y + z * z) / (x + y + z + 1.0) * (x - y) + 42.0 - -@offload(strategy="jit", return_type="float") -def heavy_jit(x: float, y: float, z: float) -> float: - return (x * x + y * y + z * z) / (x + y + z + 1.0) * (x - y) + 42.0 - -def main() -> None: - iterations = 1_000_000 - print(f"Preparing {iterations:,} elements per array...") - - # Generate standard Python lists - py_x = [float(i) for i in range(iterations)] - py_y = [float(i + 1) for i in range(iterations)] - py_z = [float(i + 2) for i in range(iterations)] - - # Convert to contiguous C-style arrays - arr_x = array.array('d', py_x) - arr_y = array.array('d', py_y) - arr_z = array.array('d', py_z) - - # Warm-up JIT to trigger compilation before the timer starts - square_jit(1.0) - heavy_jit(1.0, 2.0, 3.0) - - print("\n--- Simple Math (1 array, 1,000,000 elements) ---") - - start = time.perf_counter() - _ = [square_normal(x) for x in py_x] - norm_simple_time = time.perf_counter() - start - print(f"Normal Python : {norm_simple_time:.4f} seconds") - - start = time.perf_counter() - _ = square_jit(arr_x) - jit_simple_time = time.perf_counter() - start - print(f"Iris JIT : {jit_simple_time:.4f} seconds") - print(f"Result : {norm_simple_time / jit_simple_time:.2f}x faster with JIT") - - - print("\n--- Heavy Math (3 arrays, 1,000,000 elements each) ---") - - start = time.perf_counter() - # Using zip to iterate through all three lists simultaneously in standard Python - _ = [heavy_normal(x, y, z) for x, y, z in zip(py_x, py_y, py_z)] - norm_heavy_time = time.perf_counter() - start - print(f"Normal Python : {norm_heavy_time:.4f} seconds") - - start = time.perf_counter() - # Passing all three C-arrays straight through the FFI boundary - _ = heavy_jit(arr_x, arr_y, arr_z) - jit_heavy_time = time.perf_counter() - start - print(f"Iris JIT : {jit_heavy_time:.4f} seconds") - print(f"Result : {norm_heavy_time / jit_heavy_time:.2f}x faster with JIT") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/tests/test_paths.py b/tests/test_paths.py deleted file mode 100644 index e398a56..0000000 --- a/tests/test_paths.py +++ /dev/null @@ -1,49 +0,0 @@ -import time -from iris import Runtime - - -def test_path_registry_and_watch(): - rt = Runtime() - - a = rt.spawn(lambda m: None, 10) - b = rt.spawn(lambda m: None, 10) - - rt.register_path("/system/service/one", a) - rt.register_path("/system/service/two", b) - - assert rt.whereis_path("/system/service/one") == a - assert rt.whereis_path("/system/service/two") == b - - children = rt.list_children("/system/service") - paths = [p for p, _ in children] - assert "/system/service/one" in paths - assert "/system/service/two" in paths - - direct = rt.list_children_direct("/system/service") - paths_direct = [p for p, _ in direct] - assert "/system/service/one" in paths_direct - assert "/system/service/two" in paths_direct - - c = rt.spawn(lambda m: None, 10) - rt.register_path("/system/service/two/grand", c) - - all_under_two = rt.list_children("/system/service/two") - assert any(p == "/system/service/two/grand" for p, _ in all_under_two) - - direct_under_two = rt.list_children_direct("/system/service/two") - assert any(p == "/system/service/two/grand" and pid == c for p, pid in direct_under_two) - - # ensure direct listing at the higher prefix doesn't show the grand child - direct_under_service = rt.list_children_direct("/system/service") - assert not any(p == "/system/service/two/grand" for p, _ in direct_under_service) - - # Test watch_path: ensure children become supervised - rt.watch_path("/system/service") - - # small sleep to let supervisor tasks register - time.sleep(0.05) - - cps = rt.child_pids() - assert a in cps - assert b in cps - diff --git a/tests/test_quantum_heavy_math.py b/tests/test_quantum_heavy_math.py deleted file mode 100644 index cf82ab3..0000000 --- a/tests/test_quantum_heavy_math.py +++ /dev/null @@ -1,90 +0,0 @@ -import array -import math -import unittest - -import iris.jit as jit - - -class TestQuantumHeavyMath(unittest.TestCase): - def setUp(self) -> None: - self._prev_quantum = jit.get_quantum_speculation() - self._prev_spec_threshold = jit.get_quantum_speculation_threshold() - self._prev_log_threshold = jit.get_quantum_log_threshold() - self._prev_budget = jit.get_quantum_compile_budget() - self._prev_cooldown = jit.get_quantum_cooldown() - - if self._prev_budget == (0, 0) and self._prev_cooldown == (0, 0): - self.skipTest("pyo3 quantum control APIs are not available in this environment") - - def tearDown(self) -> None: - jit.set_quantum_speculation(self._prev_quantum) - jit.set_quantum_speculation_threshold(self._prev_spec_threshold) - jit.set_quantum_log_threshold(self._prev_log_threshold) - jit.set_quantum_compile_budget(*self._prev_budget) - jit.set_quantum_cooldown(*self._prev_cooldown) - - def test_quantum_control_knobs_roundtrip(self) -> None: - jit.set_quantum_speculation(True) - - budget = jit.set_quantum_compile_budget(5_000_000, 1_000_000_000) - cooldown = jit.set_quantum_cooldown(1_000, 50_000) - spec_threshold = jit.set_quantum_speculation_threshold(0) - log_threshold = jit.set_quantum_log_threshold(0) - - self.assertEqual(budget, (5_000_000, 1_000_000_000)) - self.assertEqual(jit.get_quantum_compile_budget(), (5_000_000, 1_000_000_000)) - self.assertEqual(cooldown, (1_000, 50_000)) - self.assertEqual(jit.get_quantum_cooldown(), (1_000, 50_000)) - self.assertEqual(spec_threshold, 0) - self.assertEqual(log_threshold, 0) - - def test_quantum_heavy_math_vectorized_correctness(self) -> None: - jit.set_quantum_speculation(True) - jit.set_quantum_speculation_threshold(0) - jit.set_quantum_log_threshold(0) - jit.set_quantum_compile_budget(10_000_000, 1_000_000_000) - jit.set_quantum_cooldown(0, 0) - - @jit.offload(strategy="jit", return_type="float") - def heavy_math(a, b, c, d): - return ( - (a * a + b * b + c * c + d * d) / (a + b + c + d + 1.0) - + math.sin(a) - + math.cos(b) - + math.exp(c * 0.001) - - math.log(d + 1.0) - ) - - scalar_out = heavy_math(2.0, 3.0, 4.0, 5.0) - scalar_expected = ( - (2.0 * 2.0 + 3.0 * 3.0 + 4.0 * 4.0 + 5.0 * 5.0) / (2.0 + 3.0 + 4.0 + 5.0 + 1.0) - + math.sin(2.0) - + math.cos(3.0) - + math.exp(4.0 * 0.001) - - math.log(5.0 + 1.0) - ) - self.assertAlmostEqual(float(scalar_out), float(scalar_expected), places=8) - - size = 4096 - a = array.array("d", (0.25 + i * 0.01 for i in range(size))) - b = array.array("d", (0.75 + i * 0.01 for i in range(size))) - c = array.array("d", (1.25 + i * 0.01 for i in range(size))) - d = array.array("d", (1.75 + i * 0.01 for i in range(size))) - - out = heavy_math(a, b, c, d) - self.assertEqual(len(out), size) - - for idx in (0, 17, 128, 1024, 2048, 4095): - expected = ( - (a[idx] * a[idx] + b[idx] * b[idx] + c[idx] * c[idx] + d[idx] * d[idx]) - / (a[idx] + b[idx] + c[idx] + d[idx] + 1.0) - + math.sin(a[idx]) - + math.cos(b[idx]) - + math.exp(c[idx] * 0.001) - - math.log(d[idx] + 1.0) - ) - self.assertAlmostEqual(float(out[idx]), float(expected), places=7) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_release_gil.py b/tests/test_release_gil.py deleted file mode 100644 index 2258689..0000000 --- a/tests/test_release_gil.py +++ /dev/null @@ -1,35 +0,0 @@ -import time -import threading -import pytest - -import iris - - -def test_release_gil_toggle(): - rt = iris.Runtime() - - seen = [] - - def handler_no(msg): - seen.append(("no", threading.get_ident())) - - def handler_yes(msg): - seen.append(("yes", threading.get_ident())) - - pid_no = rt.spawn(handler_no, budget=10, release_gil=False) - pid_yes = rt.spawn(handler_yes, budget=10, release_gil=True) - - rt.send(pid_no, b"ping") - rt.send(pid_yes, b"ping") - - # Allow the handlers time to run - time.sleep(0.2) - - tags = {t for (t, _) in seen} - assert "no" in tags and "yes" in tags - - no_tid = next(v for (t, v) in seen if t == "no") - yes_tid = next(v for (t, v) in seen if t == "yes") - - # Expect different thread ids when using the toggle (best-effort) - assert no_tid != yes_tid diff --git a/tests/test_restart_all_path_supervisors.py b/tests/test_restart_all_path_supervisors.py deleted file mode 100644 index 72817a3..0000000 --- a/tests/test_restart_all_path_supervisors.py +++ /dev/null @@ -1,42 +0,0 @@ -import time -import iris - -def test_path_supervisor_restart_all(): - rt = iris.Runtime() - - # spawn two observed actors under distinct child paths but same prefix - pid1 = rt.spawn_with_path_observed(10, "/svc/restart/all/one") - pid2 = rt.spawn_with_path_observed(10, "/svc/restart/all/two") - - # Create the path-scoped supervisor - rt.create_path_supervisor("/svc/restart/all") - - # Factories to spawn new observed actors during restart - def f1(): - return rt.spawn_with_path_observed(10, "/svc/restart/all/one") - - def f2(): - return rt.spawn_with_path_observed(10, "/svc/restart/all/two") - - # Register children with the 'restartall' strategy - rt.path_supervise_with_factory("/svc/restart/all", pid1, f1, "restartall") - rt.path_supervise_with_factory("/svc/restart/all", pid2, f2, "restartall") - - # stop one child — RestartAll should restart both - rt.stop(pid1) - - # wait and poll for restart activity - ok = False - for _ in range(40): - children = rt.path_supervisor_children("/svc/restart/all") - # Check that we have 2 children and at least one has a new PID - if len(children) == 2 and ((pid1 not in children) or (pid2 not in children)): - ok = True - break - time.sleep(0.05) - - assert ok, "supervisor did not restart children within timeout" - print("Test Success: Path supervisor 'restartall' strategy verified.") - -if __name__ == "__main__": - test_path_supervisor_restart_all() diff --git a/tests/test_restart_path_supervisors.py b/tests/test_restart_path_supervisors.py deleted file mode 100644 index 7dbe5fc..0000000 --- a/tests/test_restart_path_supervisors.py +++ /dev/null @@ -1,25 +0,0 @@ -import time -import iris - - -def test_path_supervisor_restart_one(): - rt = iris.Runtime() - - pid = rt.spawn_with_path_observed(10, "/svc/restart/one") - rt.create_path_supervisor("/svc/restart") - - def factory(): - # factory should create a new observed actor under the same path - return rt.spawn_with_path_observed(10, "/svc/restart/one") - - # Attach factory-based supervision (expected API) - rt.path_supervise_with_factory("/svc/restart", pid, factory, "restartone") - - # stop original - rt.stop(pid) - - # give supervisor a moment to restart - time.sleep(0.2) - - children = rt.path_supervisor_children("/svc/restart") - assert len(children) >= 1 diff --git a/tests/vortex_bytecode.rs b/tests/vortex_bytecode.rs new file mode 100644 index 0000000..61f168e --- /dev/null +++ b/tests/vortex_bytecode.rs @@ -0,0 +1,284 @@ +#![cfg(feature = "vortex")] + +use iris::vortex::vortex_bytecode::*; +use std::collections::HashSet; + +#[test] +fn instrument_inserts_entry_and_backedge_sites() { + let meta = OpcodeMeta { + extended_arg: 144, + hasjabs: [113u16].into_iter().collect(), + hasjrel: HashSet::new(), + backward_relative: HashSet::new(), + }; + + let original = vec![ + Instruction { op: 9, arg: 0 }, + Instruction { op: 113, arg: 0 }, + ]; + let probe = vec![Instruction { op: 9, arg: 0 }]; + + let patched = + instrument_with_probe(&original, &probe, &meta).expect("instrumentation should be valid"); + assert!(patched.len() > original.len()); +} + +#[test] +fn verify_wordcode_bytes_rejects_invalid_shape() { + assert_eq!( + verify_wordcode_bytes(&[]), + Err(VerifyError::InvalidWordcodeShape) + ); + assert_eq!( + verify_wordcode_bytes(&[1]), + Err(VerifyError::InvalidWordcodeShape) + ); +} + +#[test] +fn verify_instructions_rejects_bad_abs_jump() { + let meta = OpcodeMeta { + extended_arg: 144, + hasjabs: [113u16].into_iter().collect(), + hasjrel: HashSet::new(), + backward_relative: HashSet::new(), + }; + + let code = vec![Instruction { op: 113, arg: 99 }]; + assert_eq!( + verify_instructions(&code, &meta), + Err(VerifyError::InvalidJumpTarget) + ); +} + +#[test] +fn verify_instructions_rejects_bad_backward_rel_jump() { + let meta = OpcodeMeta { + extended_arg: 144, + hasjabs: HashSet::new(), + hasjrel: [200u16].into_iter().collect(), + backward_relative: [200u16].into_iter().collect(), + }; + + let code = vec![Instruction { op: 200, arg: 2 }]; + assert_eq!( + verify_instructions(&code, &meta), + Err(VerifyError::InvalidRelativeJump) + ); +} + +#[test] +fn verify_cache_layout_rejects_missing_cache_slots() { + let quick = QuickeningSupport { + cache_opcode: Some(0), + inline_cache_entries: { + let mut v = vec![0u16; 256]; + v[10] = 2; + v + }, + }; + + let code = vec![ + Instruction { op: 10, arg: 0 }, + Instruction { op: 0, arg: 0 }, + ]; + assert_eq!( + verify_cache_layout(&code, &quick), + Err(VerifyError::InvalidCacheLayout) + ); +} + +#[test] +fn verify_cache_layout_accepts_expected_cache_slots() { + let quick = QuickeningSupport { + cache_opcode: Some(0), + inline_cache_entries: { + let mut v = vec![0u16; 256]; + v[10] = 2; + v + }, + }; + + let code = vec![ + Instruction { op: 10, arg: 0 }, + Instruction { op: 0, arg: 0 }, + Instruction { op: 0, arg: 0 }, + Instruction { op: 5, arg: 0 }, + ]; + assert_eq!(verify_cache_layout(&code, &quick), Ok(())); +} + +#[test] +fn evaluate_rewrite_compatibility_rejects_incomplete_cache_table() { + let quick = QuickeningSupport { + cache_opcode: Some(0), + inline_cache_entries: vec![0u16; 8], + }; + + let raw = vec![5u8, 0u8]; + assert_eq!( + evaluate_rewrite_compatibility(&raw, 144, &quick), + Err("inline_cache_entries_incomplete") + ); +} + +#[test] +fn evaluate_rewrite_compatibility_rejects_invalid_raw_shape() { + let quick = QuickeningSupport { + cache_opcode: None, + inline_cache_entries: vec![], + }; + + let raw = vec![1u8]; + assert_eq!( + evaluate_rewrite_compatibility(&raw, 144, &quick), + Err("invalid_wordcode_shape") + ); +} + +#[test] +fn evaluate_rewrite_compatibility_accepts_minimal_non_quickened() { + let quick = QuickeningSupport { + cache_opcode: None, + inline_cache_entries: vec![], + }; + + let raw = vec![5u8, 0u8, 6u8, 0u8]; + assert_eq!(evaluate_rewrite_compatibility(&raw, 144, &quick), Ok(())); +} + +#[test] +fn evaluate_rewrite_compatibility_rejects_invalid_original_cache_layout() { + let quick = QuickeningSupport { + cache_opcode: Some(0), + inline_cache_entries: { + let mut v = vec![0u16; 256]; + v[10] = 1; + v + }, + }; + + let raw = vec![10u8, 0u8, 5u8, 0u8]; + assert_eq!( + evaluate_rewrite_compatibility(&raw, 144, &quick), + Err("original_cache_layout_invalid") + ); +} + +#[test] +fn validate_probe_compatibility_rejects_empty_probe() { + let quick = QuickeningSupport { + cache_opcode: None, + inline_cache_entries: vec![], + }; + assert_eq!( + validate_probe_compatibility(&[], &quick), + Err("empty_probe") + ); +} + +#[test] +fn verify_exception_table_invariants_rejects_out_of_range_entry() { + let entries = vec![(0usize, 5usize, 1usize, 0usize)]; + assert_eq!( + verify_exception_table_invariants(&entries, 4, 8), + Err(VerifyError::InvalidExceptionTable) + ); +} + +#[test] +fn verify_exception_table_invariants_rejects_depth_over_stack() { + let entries = vec![(0usize, 1usize, 9usize, 0usize)]; + assert_eq!( + verify_exception_table_invariants(&entries, 4, 8), + Err(VerifyError::StackDepthInvariant) + ); +} + +#[test] +fn verify_exception_table_invariants_rejects_unsorted_ranges() { + let entries = vec![ + (2usize, 3usize, 0usize, 0usize), + (0usize, 1usize, 0usize, 0usize), + ]; + assert_eq!( + verify_exception_table_invariants(&entries, 4, 8), + Err(VerifyError::InvalidExceptionTable) + ); +} + +#[test] +fn verify_exception_table_invariants_rejects_duplicate_entries() { + let entries = vec![ + (0usize, 1usize, 0usize, 2usize), + (0usize, 1usize, 0usize, 2usize), + ]; + assert_eq!( + verify_exception_table_invariants(&entries, 4, 8), + Err(VerifyError::InvalidExceptionTable) + ); +} + +#[test] +fn verify_exception_table_invariants_accepts_sorted_unique_entries() { + let entries = vec![ + (0usize, 1usize, 0usize, 2usize), + (1usize, 3usize, 1usize, 3usize), + ]; + assert_eq!(verify_exception_table_invariants(&entries, 4, 8), Ok(())); +} + +#[test] +fn verify_exception_table_invariants_rejects_handler_target_out_of_range() { + let entries = vec![(0usize, 1usize, 0usize, 9usize)]; + assert_eq!( + verify_exception_table_invariants(&entries, 4, 8), + Err(VerifyError::InvalidExceptionTable) + ); +} + +#[test] +fn verify_exception_handler_targets_rejects_cache_opcode_target() { + let quick = QuickeningSupport { + cache_opcode: Some(0), + inline_cache_entries: vec![0u16; 256], + }; + let code = vec![ + Instruction { op: 10, arg: 0 }, + Instruction { op: 0, arg: 0 }, + ]; + let entries = vec![(0usize, 1usize, 0usize, 1usize)]; + + assert_eq!( + verify_exception_handler_targets(&entries, &code, &quick), + Err(VerifyError::InvalidExceptionTable) + ); +} + +#[test] +fn verify_exception_handler_targets_accepts_non_cache_target() { + let quick = QuickeningSupport { + cache_opcode: Some(0), + inline_cache_entries: vec![0u16; 256], + }; + let code = vec![ + Instruction { op: 10, arg: 0 }, + Instruction { op: 0, arg: 0 }, + Instruction { op: 5, arg: 0 }, + ]; + let entries = vec![(0usize, 1usize, 0usize, 2usize)]; + + assert_eq!( + verify_exception_handler_targets(&entries, &code, &quick), + Ok(()) + ); +} + +#[test] +fn verify_stacksize_minimum_rejects_tiny_stack() { + assert_eq!( + verify_stacksize_minimum(1), + Err(VerifyError::StackDepthInvariant) + ); + assert_eq!(verify_stacksize_minimum(2), Ok(())); +} diff --git a/tests/vortex_engine.rs b/tests/vortex_engine.rs new file mode 100644 index 0000000..cde4bb3 --- /dev/null +++ b/tests/vortex_engine.rs @@ -0,0 +1,211 @@ +#![cfg(feature = "vortex")] + +use iris::vortex::{ + VortexEngine, VortexGhostPolicy, VortexInstruction, VortexSuspend, VortexVioCall, +}; +use std::collections::HashMap; + +#[test] +fn vortex_engine_new_is_enabled() { + let engine = VortexEngine::new(); + assert!(engine.is_enabled()); +} + +#[test] +fn vortex_engine_preemption_and_transactions() { + let mut engine = VortexEngine::new(); + + assert!(engine.check_preemption(100)); + assert_eq!(engine.transmuter.instruction_budget, 924); + + engine.start_transaction(42); + assert!(engine.commit_transaction()); + assert!(!engine.abort_transaction()); + + engine.start_transaction(43); + assert!(engine.abort_transaction()); + assert!(!engine.commit_transaction()); + + engine.detach_stalled_thread(); + assert_eq!(engine.rescue_pool.active_count, 1); + engine.reclaim_thread(); + assert_eq!(engine.rescue_pool.active_count, 0); +} + +#[test] +fn vortex_engine_resume_after_suspend() { + let mut engine = VortexEngine::new(); + engine.set_budget(2); + + let code = vec![ + VortexInstruction::LoadFast(0), + VortexInstruction::BinaryOp(0), + VortexInstruction::StoreFast(0), + VortexInstruction::ReturnValue, + ]; + + engine.load_code(code); + + let first_run = engine.run(); + assert_eq!(first_run, Err(VortexSuspend)); + assert!(engine.current_code.is_some()); + assert!(engine.context.is_some()); + + engine.replenish_budget(10); + let second_run = engine.run(); + assert_eq!(second_run, Ok(())); + assert!(engine.current_code.is_none()); + assert!(engine.context.is_none()); +} + +#[test] +fn vortex_engine_transaction_checkpoint_and_vio() { + let mut engine = VortexEngine::new(); + let mut locals = HashMap::new(); + locals.insert("foo".to_string(), b"bar".to_vec()); + + engine.start_transaction_with_checkpoint(91, locals); + assert!(engine.stage_transaction_vio("send_email".to_string(), b"a".to_vec())); + assert!(engine.stage_transaction_vio("write_db".to_string(), b"b".to_vec())); + assert_eq!(engine.transaction_staged_vio_len(), 2); + + assert!(engine.commit_transaction()); + assert_eq!(engine.transaction_staged_vio_len(), 0); + assert_eq!(engine.transaction_committed_vio_len(), 2); + + let committed = engine.take_committed_transaction_vio(); + assert_eq!(committed.len(), 2); + assert_eq!(engine.transaction_committed_vio_len(), 0); +} + +#[test] +fn vortex_engine_abort_clears_staged_vio() { + let mut engine = VortexEngine::new(); + engine.start_transaction(92); + assert!(engine.stage_transaction_vio("x".to_string(), vec![1])); + assert!(engine.stage_transaction_vio("y".to_string(), vec![2])); + assert_eq!(engine.transaction_staged_vio_len(), 2); + + assert!(engine.abort_transaction()); + assert_eq!(engine.transaction_staged_vio_len(), 0); + assert_eq!(engine.transaction_committed_vio_len(), 0); +} + +#[test] +fn vortex_engine_stage_swap_applies_when_idle() { + let mut engine = VortexEngine::new(); + engine.stage_code_swap(vec![VortexInstruction::ReturnValue]); + + assert!(engine.try_apply_staged_swap()); + assert!(engine.pending_code_swap.is_none()); + assert!(engine.current_code.is_some()); + assert!(engine.context.is_some()); +} + +#[test] +fn vortex_engine_stage_swap_waits_for_quiescence() { + let mut engine = VortexEngine::new(); + engine.set_budget(2); + + engine.load_code(vec![ + VortexInstruction::LoadFast(0), + VortexInstruction::BinaryOp(0), + VortexInstruction::ReturnValue, + ]); + assert_eq!(engine.run(), Err(VortexSuspend)); + + engine.stage_code_swap(vec![VortexInstruction::ReturnValue]); + assert!(!engine.try_apply_staged_swap()); + assert!(engine.pending_code_swap.is_some()); +} + +#[test] +fn vortex_engine_stage_swap_applies_after_completion() { + let mut engine = VortexEngine::new(); + engine.set_budget(1); + + engine.load_code(vec![ + VortexInstruction::LoadFast(0), + VortexInstruction::ReturnValue, + ]); + engine.stage_code_swap(vec![VortexInstruction::ReturnValue]); + + assert_eq!(engine.run(), Err(VortexSuspend)); + engine.replenish_budget(10); + + assert_eq!(engine.run(), Ok(())); + assert!(engine.pending_code_swap.is_none()); + assert!(engine.current_code.is_some()); + assert!(engine.context.is_some()); +} + +#[test] +fn vortex_engine_resolve_primary_ghost_race_and_replay() { + let mut engine = VortexEngine::new(); + + let mut primary_locals = HashMap::new(); + primary_locals.insert("count".to_string(), vec![1]); + engine.start_transaction_with_checkpoint(1000, primary_locals); + assert!(engine.stage_transaction_vio("io_primary".to_string(), b"p".to_vec())); + + let mut ghost_locals = HashMap::new(); + ghost_locals.insert("count".to_string(), vec![2]); + engine.start_ghost_transaction_with_checkpoint(2000, ghost_locals); + assert!(engine.stage_ghost_transaction_vio(2000, "io_ghost_a".to_string(), b"a".to_vec())); + assert!(engine.stage_ghost_transaction_vio(2000, "io_ghost_b".to_string(), b"b".to_vec())); + + let resolution = engine + .resolve_primary_ghost_race(2000, 2000, VortexGhostPolicy::FirstSafePointWins) + .expect("resolution expected"); + assert_eq!(resolution.winner_id, 2000); + assert_eq!(resolution.committed_vio.len(), 2); + + let applied = engine.replay_committed_vio_calls(&resolution.committed_vio, |_call| true); + assert_eq!(applied, 2); +} + +#[test] +fn vortex_engine_prefer_primary_policy_and_ghost_cleanup() { + let mut engine = VortexEngine::new(); + + engine.start_transaction_with_checkpoint(3000, HashMap::new()); + assert!(engine.stage_transaction_vio("io_primary".to_string(), b"p".to_vec())); + + engine.start_ghost_transaction_with_checkpoint(4000, HashMap::new()); + assert!(engine.stage_ghost_transaction_vio(4000, "io_ghost".to_string(), b"g".to_vec())); + + let resolution = engine + .resolve_primary_ghost_race(4000, 4000, VortexGhostPolicy::PreferPrimary) + .expect("resolution expected"); + assert_eq!(resolution.winner_id, 3000); + assert_eq!(resolution.loser_id, 4000); + assert_eq!(resolution.committed_vio.len(), 1); + assert_eq!(resolution.committed_vio[0].op, "io_primary"); + + let second = engine.resolve_primary_ghost_race(4000, 4000, VortexGhostPolicy::PreferPrimary); + assert!(second.is_none()); +} + +#[test] +fn vortex_engine_replay_stops_on_executor_failure() { + let engine = VortexEngine::new(); + let calls = vec![ + VortexVioCall { + op: "a".to_string(), + payload: vec![1], + }, + VortexVioCall { + op: "b".to_string(), + payload: vec![2], + }, + ]; + + let mut seen = 0usize; + let applied = engine.replay_committed_vio_calls(&calls, |_call| { + seen += 1; + false + }); + + assert_eq!(applied, 1); + assert_eq!(seen, 1); +} diff --git a/tests/vortex_integration_test.rs b/tests/vortex_integration_test.rs new file mode 100644 index 0000000..ac7302e --- /dev/null +++ b/tests/vortex_integration_test.rs @@ -0,0 +1,80 @@ +#[cfg(feature = "vortex")] +use iris::Runtime; +#[cfg(feature = "vortex")] +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; + +#[cfg(feature = "vortex")] +#[tokio::test] +async fn vortex_actor_preemption_and_resume() { + let rt = Runtime::new(); + let mut engine = rt.vortex_engine().expect("vortex engine should exist"); + + assert!(engine.is_enabled()); + + engine.set_budget(1); + engine.load_code(vec![ + iris::vortex::VortexInstruction::LoadFast(0), + iris::vortex::VortexInstruction::BinaryOp(0), + iris::vortex::VortexInstruction::ReturnValue, + ]); + + let first = engine.run(); + assert_eq!(first, Err(iris::vortex::VortexSuspend)); + + engine.replenish_budget(5); + assert_eq!(engine.run(), Ok(())); +} + +#[cfg(feature = "vortex")] +#[tokio::test] +async fn vortex_operator_dispatch_preempts_actor_loop() { + let rt = Runtime::new(); + let counter = Arc::new(AtomicUsize::new(0)); + let c2 = counter.clone(); + + let pid = rt.spawn_handler_with_budget( + move |_msg| { + let counter2 = c2.clone(); + async move { + counter2.fetch_add(1, Ordering::SeqCst); + } + }, + 1, + ); + + assert!(rt.is_alive(pid)); + + for _ in 0..5 { + rt.send( + pid, + iris::mailbox::Message::User(bytes::Bytes::from_static(b"x")), + ) + .unwrap(); + } + + tokio::time::sleep(std::time::Duration::from_millis(300)).await; + + assert_eq!(counter.load(Ordering::SeqCst), 5); + + // Rescue pool should have returned to 0 after backoff/reclaim cycles. + let engine = rt.vortex_engine().expect("vortex engine should exist"); + assert_eq!(engine.rescue_pool.active_count, 0); +} + +#[cfg(feature = "vortex")] +#[tokio::test] +async fn vortex_infinite_loop_preempts_without_hang() { + let rt = Runtime::new(); + let mut engine = rt.vortex_engine().expect("vortex engine should exist"); + + engine.set_budget(1); + engine.load_code(vec![iris::vortex::VortexInstruction::JumpBackward(0)]); + + // must not spin forever; should suspend because of injected reduction check. + assert_eq!(engine.run(), Err(iris::vortex::VortexSuspend)); + engine.replenish_budget(1); + assert_eq!(engine.run(), Err(iris::vortex::VortexSuspend)); +} diff --git a/tests/vortex_rescue_pool.rs b/tests/vortex_rescue_pool.rs new file mode 100644 index 0000000..7191e83 --- /dev/null +++ b/tests/vortex_rescue_pool.rs @@ -0,0 +1,15 @@ +#![cfg(feature = "vortex")] + +use iris::vortex::RescuePool; + +#[test] +fn rescue_pool_detach_and_reclaim() { + let mut pool = RescuePool::new(); + assert_eq!(pool.active_count, 0); + pool.detach_thread(); + assert_eq!(pool.active_count, 1); + pool.reclaim_thread(); + assert_eq!(pool.active_count, 0); + pool.reclaim_thread(); + assert_eq!(pool.active_count, 0); +} diff --git a/tests/vortex_scheduler.rs b/tests/vortex_scheduler.rs new file mode 100644 index 0000000..e88904c --- /dev/null +++ b/tests/vortex_scheduler.rs @@ -0,0 +1,9 @@ +#![cfg(feature = "vortex")] + +use iris::vortex::VortexScheduler; + +#[test] +fn vortex_scheduler_describe() { + let sched = VortexScheduler::new(); + assert_eq!(sched.describe(), "vortex scheduler (stub)"); +} diff --git a/tests/vortex_transaction.rs b/tests/vortex_transaction.rs new file mode 100644 index 0000000..90f0009 --- /dev/null +++ b/tests/vortex_transaction.rs @@ -0,0 +1,98 @@ +#![cfg(feature = "vortex")] + +use iris::vortex::{VortexGhostPolicy, VortexTransaction}; +use std::collections::HashMap; + +#[test] +fn vortex_transaction_commit_abort() { + let mut trx = VortexTransaction::new(1); + assert!(trx.commit()); + assert!(!trx.abort()); + assert!(!trx.commit()); + + let mut trx2 = VortexTransaction::new(2); + assert!(trx2.abort()); + assert!(!trx2.commit()); +} + +#[test] +fn vortex_transaction_checkpoint_and_vio_commit() { + let mut trx = VortexTransaction::new(10); + let mut locals = HashMap::new(); + locals.insert("counter".to_string(), vec![1, 2, 3]); + trx.checkpoint_locals(locals.clone()); + + assert_eq!(trx.local_checkpoint.get("counter"), Some(&vec![1, 2, 3])); + + assert!(trx.stage_vio("send_email".to_string(), b"payload-a".to_vec())); + assert!(trx.stage_vio("write_db".to_string(), b"payload-b".to_vec())); + assert_eq!(trx.staged_vio_len(), 2); + assert_eq!(trx.committed_vio_len(), 0); + + assert!(trx.commit()); + assert_eq!(trx.staged_vio_len(), 0); + assert_eq!(trx.committed_vio_len(), 2); + + let drained = trx.drain_committed_vio(); + assert_eq!(drained.len(), 2); + assert_eq!(trx.committed_vio_len(), 0); +} + +#[test] +fn vortex_transaction_abort_discards_staged_vio() { + let mut trx = VortexTransaction::new(11); + assert!(trx.stage_vio("io_a".to_string(), vec![9])); + assert!(trx.stage_vio("io_b".to_string(), vec![8])); + assert_eq!(trx.staged_vio_len(), 2); + + assert!(trx.abort()); + assert_eq!(trx.staged_vio_len(), 0); + assert_eq!(trx.committed_vio_len(), 0); + assert!(!trx.stage_vio("io_c".to_string(), vec![7])); +} + +#[test] +fn vortex_transaction_resolve_ghost_race_first_wins() { + let mut primary = VortexTransaction::new(100); + let mut ghost = VortexTransaction::new(200); + + assert!(primary.stage_vio("io_primary".to_string(), b"p".to_vec())); + assert!(ghost.stage_vio("io_ghost".to_string(), b"g".to_vec())); + + let result = VortexTransaction::resolve_ghost_race( + &mut primary, + &mut ghost, + 200, + VortexGhostPolicy::FirstSafePointWins, + ) + .expect("resolution should succeed"); + + assert_eq!(result.winner_id, 200); + assert_eq!(result.loser_id, 100); + assert_eq!(result.committed_vio.len(), 1); + assert!(primary.aborted); + assert!(ghost.committed); +} + +#[test] +fn vortex_transaction_resolve_ghost_race_prefer_primary() { + let mut primary = VortexTransaction::new(101); + let mut ghost = VortexTransaction::new(201); + + assert!(primary.stage_vio("io_primary".to_string(), b"p".to_vec())); + assert!(ghost.stage_vio("io_ghost".to_string(), b"g".to_vec())); + + let result = VortexTransaction::resolve_ghost_race( + &mut primary, + &mut ghost, + 201, + VortexGhostPolicy::PreferPrimary, + ) + .expect("resolution should succeed"); + + assert_eq!(result.winner_id, 101); + assert_eq!(result.loser_id, 201); + assert_eq!(result.committed_vio.len(), 1); + assert!(primary.committed); + assert!(ghost.aborted); +} diff --git a/tests/vortex_transmuter.rs b/tests/vortex_transmuter.rs new file mode 100644 index 0000000..5180027 --- /dev/null +++ b/tests/vortex_transmuter.rs @@ -0,0 +1,69 @@ +#![cfg(feature = "vortex")] + +use iris::vortex::{VortexInstruction, VortexSuspend, VortexTransmuter}; + +#[test] +fn vortex_transmuter_budget_allocation() { + let mut t = VortexTransmuter::new(100); + assert!(t.inject_reduction_checks(20)); + assert_eq!(t.instruction_budget, 80); + assert!(t.inject_reduction_checks(80)); + assert_eq!(t.instruction_budget, 0); + assert!(!t.inject_reduction_checks(1)); + assert!(!t.enabled); +} + +#[test] +fn vortex_transmuter_transmute_injects_entry_and_loop_checks() { + let transmuter = VortexTransmuter::new(10); + let code = vec![ + VortexInstruction::LoadFast(0), + VortexInstruction::JumpBackward(0), + VortexInstruction::ReturnValue, + ]; + let transmuted = transmuter.transmute(&code); + + assert_eq!(transmuted[0], VortexInstruction::IrisReductionCheck); + assert!(transmuted.contains(&VortexInstruction::IrisReductionCheck)); + assert!(transmuted.len() > code.len()); +} + +#[test] +fn vortex_transmuter_execute_suspends_on_budget_exhaustion() { + let mut transmuter = VortexTransmuter::new(1); + let code = vec![ + VortexInstruction::IrisReductionCheck, + VortexInstruction::LoadFast(0), + VortexInstruction::ReturnValue, + ]; + + let result = transmuter.execute(&code); + + assert_eq!(result, Err(VortexSuspend)); +} + +#[test] +fn vortex_transmuter_execute_completes() { + let mut transmuter = VortexTransmuter::new(10); + let code = vec![ + VortexInstruction::IrisReductionCheck, + VortexInstruction::LoadFast(0), + VortexInstruction::ReturnValue, + ]; + + assert_eq!(transmuter.execute(&code), Ok(())); +} + +#[test] +fn vortex_quiescence_points_found() { + let transmuter = VortexTransmuter::new(10); + let code = vec![ + VortexInstruction::LoadFast(0), + VortexInstruction::StoreFast(0), + VortexInstruction::ReturnValue, + ]; + let points = transmuter.quiescence_points(&code); + + assert!(points.contains(&1)); + assert!(points.contains(&2)); +} diff --git a/tests/vortex_watcher.rs b/tests/vortex_watcher.rs new file mode 100644 index 0000000..30a7c34 --- /dev/null +++ b/tests/vortex_watcher.rs @@ -0,0 +1,19 @@ +#![cfg(feature = "vortex")] + +use iris::vortex::VortexWatcher; + +#[test] +fn vortex_watcher_health() { + let watcher = VortexWatcher::new(); + assert_eq!(watcher.health(), "vortex watcher healthy"); +} + +#[tokio::test] +async fn vortex_watcher_enable_disable() { + let watcher = VortexWatcher::new(); + assert!(!watcher.is_enabled()); + watcher.enable(); + assert!(watcher.is_enabled()); + watcher.disable(); + assert!(!watcher.is_enabled()); +}