From 5921a6079195ee0bbed5595a12cc0c68984cf5a7 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Mon, 23 Mar 2026 16:49:20 +1100 Subject: [PATCH 1/6] Shrink `start_query` scope. There are three `start_query` calls. Two of them have very small scopes, just an `invoke_provider_fn` call and very little else. But the one in `execute_job_incr` has a huge scope that covers a `try_mark_green` call and everything done by `load_from_disk_or_invoke_provider_green`: a disk load attempt, `prof_timer` start/stop; an `invoke_provider_fn` call, and hash verification. This commit moves the `start_query` call into `load_from_disk_or_invoke_provider_green` to make the scope match the other two cases. Most of the code that is moved out of the `start_query` scope is unaffected. `try_mark_green` is affected, but in a good way. With the old scope, when going through `try_mark_green` two calls to `start_query` would occur on the way to an actual `invoke_provider_fn`. This is why `false` was passed for `depth_limit`; this `false` is no longer necessary. --- compiler/rustc_query_impl/src/execution.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index 8844d40f49cf3..3db5a57352a43 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs @@ -421,9 +421,7 @@ fn execute_job_incr<'tcx, C: QueryCache>( tcx.dep_graph.data().expect("should always be present in incremental mode"); if !query.eval_always { - // The diagnostics for this query will be promoted to the current session during - // `try_mark_green()`, so we can ignore them here. - if let Some(ret) = start_query(job_id, false, || try { + if let Some(ret) = try { let (prev_index, dep_node_index) = dep_graph_data.try_mark_green(tcx, &dep_node)?; let value = load_from_disk_or_invoke_provider_green( tcx, @@ -431,11 +429,12 @@ fn execute_job_incr<'tcx, C: QueryCache>( query, key, &dep_node, + job_id, prev_index, dep_node_index, ); (value, dep_node_index) - }) { + } { return ret; } } @@ -468,6 +467,7 @@ fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>( query: &'tcx QueryVTable<'tcx, C>, key: C::Key, dep_node: &DepNode, + job_id: QueryJobId, prev_index: SerializedDepNodeIndex, dep_node_index: DepNodeIndex, ) -> C::Value { @@ -502,7 +502,9 @@ fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>( // We could not load a result from the on-disk cache, so recompute. The dep-graph for // this computation is already in-place, so we can just call the query provider. let prof_timer = tcx.prof.query_provider(); - value = tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, key)); + value = start_query(job_id, query.depth_limit, || { + tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, key)) + }); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); verify = true; From 2f29593a1149d502730833661b4e96af434002b1 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Mon, 23 Mar 2026 19:36:35 +1100 Subject: [PATCH 2/6] Move the `cache_on_disk` check out of `try_load_from_disk_fn`. It doesn't need to be in there. Removing it makes `define_queries` smaller, which is always good, and also enables the next commit which tidies up profiling. Also change how `value` and `verify` are initialized, because I just don't like the current way of doing it after the declaration. --- compiler/rustc_middle/src/query/plumbing.rs | 4 +++- compiler/rustc_query_impl/src/execution.rs | 22 ++++++++++++--------- compiler/rustc_query_impl/src/query_impl.rs | 9 ++------- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index ef6259b1a0c1a..b6da73e672b70 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs @@ -98,9 +98,11 @@ pub struct QueryVTable<'tcx, C: QueryCache> { pub will_cache_on_disk_for_key_fn: fn(tcx: TyCtxt<'tcx>, key: C::Key) -> bool, + /// Function pointer that tries to load a query value from disk. + /// + /// This should only be called after a successful check of `will_cache_on_disk_for_key_fn`. pub try_load_from_disk_fn: fn( tcx: TyCtxt<'tcx>, - key: C::Key, prev_index: SerializedDepNodeIndex, index: DepNodeIndex, ) -> Option, diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index 3db5a57352a43..a904b44c96d56 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs @@ -477,16 +477,18 @@ fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>( debug_assert!(dep_graph_data.is_index_green(prev_index)); // First try to load the result from the on-disk cache. Some things are never cached on disk. - let value; - let verify; - match (query.try_load_from_disk_fn)(tcx, key, prev_index, dep_node_index) { - Some(loaded_value) => { + let try_value = if (query.will_cache_on_disk_for_key_fn)(tcx, key) { + (query.try_load_from_disk_fn)(tcx, prev_index, dep_node_index) + } else { + None + }; + + let (value, verify) = match try_value { + Some(value) => { if std::intrinsics::unlikely(tcx.sess.opts.unstable_opts.query_dep_graph) { dep_graph_data.mark_debug_loaded_from_disk(*dep_node) } - value = loaded_value; - let prev_fingerprint = dep_graph_data.prev_value_fingerprint_of(prev_index); // If `-Zincremental-verify-ich` is specified, re-hash results from // the cache and make sure that they have the expected fingerprint. @@ -495,19 +497,21 @@ fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>( // from disk. Re-hashing results is fairly expensive, so we can't // currently afford to verify every hash. This subset should still // give us some coverage of potential bugs. - verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32) + let verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32) || tcx.sess.opts.unstable_opts.incremental_verify_ich; + + (value, verify) } None => { // We could not load a result from the on-disk cache, so recompute. The dep-graph for // this computation is already in-place, so we can just call the query provider. let prof_timer = tcx.prof.query_provider(); - value = start_query(job_id, query.depth_limit, || { + let value = start_query(job_id, query.depth_limit, || { tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, key)) }); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - verify = true; + (value, true) } }; diff --git a/compiler/rustc_query_impl/src/query_impl.rs b/compiler/rustc_query_impl/src/query_impl.rs index 60ddf3e4ad381..15ba58452b830 100644 --- a/compiler/rustc_query_impl/src/query_impl.rs +++ b/compiler/rustc_query_impl/src/query_impl.rs @@ -148,14 +148,9 @@ macro_rules! define_queries { will_cache_on_disk_for_key_fn: |_, _| false, #[cfg($cache_on_disk)] - try_load_from_disk_fn: |tcx, key, prev_index, index| { + try_load_from_disk_fn: |tcx, prev_index, index| { use rustc_middle::queries::$name::{ProvidedValue, provided_to_erased}; - // Check the `cache_on_disk_if` condition for this key. - if !rustc_middle::queries::_cache_on_disk_if_fns::$name(tcx, key) { - return None; - } - let loaded_value: ProvidedValue<'tcx> = $crate::plumbing::try_load_from_disk(tcx, prev_index, index)?; @@ -163,7 +158,7 @@ macro_rules! define_queries { Some(provided_to_erased(tcx, loaded_value)) }, #[cfg(not($cache_on_disk))] - try_load_from_disk_fn: |_tcx, _key, _prev_index, _index| None, + try_load_from_disk_fn: |_tcx, _prev_index, _index| None, // The default just emits `err` and then aborts. // `handle_cycle_error::specialize_query_vtables` overwrites this default From 0127f5aa61cde4a8a962c175bc666c70ce95af12 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Mon, 23 Mar 2026 19:43:38 +1100 Subject: [PATCH 3/6] Move profiling of query loading outwards. From `plumbing.rs` to `execution.rs`, which is where most of the other query profiling occurs. It also avoids eliminates some fn parameters. This means the `provided_to_erased` call in `try_from_load_disk_fn` is now included in the profiling when previously it wasn't. This is good because `provided_to_erased` is included in other profiling calls (e.g. calls to `invoke_provider_fn`). --- compiler/rustc_middle/src/query/plumbing.rs | 7 ++----- compiler/rustc_query_impl/src/execution.rs | 5 ++++- compiler/rustc_query_impl/src/plumbing.rs | 13 ++----------- compiler/rustc_query_impl/src/query_impl.rs | 6 +++--- 4 files changed, 11 insertions(+), 20 deletions(-) diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index b6da73e672b70..4de531739d642 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs @@ -101,11 +101,8 @@ pub struct QueryVTable<'tcx, C: QueryCache> { /// Function pointer that tries to load a query value from disk. /// /// This should only be called after a successful check of `will_cache_on_disk_for_key_fn`. - pub try_load_from_disk_fn: fn( - tcx: TyCtxt<'tcx>, - prev_index: SerializedDepNodeIndex, - index: DepNodeIndex, - ) -> Option, + pub try_load_from_disk_fn: + fn(tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex) -> Option, /// Function pointer that hashes this query's result values. /// diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index a904b44c96d56..1b713cc1340e1 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs @@ -478,7 +478,10 @@ fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>( // First try to load the result from the on-disk cache. Some things are never cached on disk. let try_value = if (query.will_cache_on_disk_for_key_fn)(tcx, key) { - (query.try_load_from_disk_fn)(tcx, prev_index, dep_node_index) + let prof_timer = tcx.prof.incr_cache_loading(); + let value = (query.try_load_from_disk_fn)(tcx, prev_index); + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + value } else { None }; diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index ef4fff293bf65..b93bf26fc4f93 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -5,7 +5,7 @@ use rustc_hir::limit::Limit; use rustc_middle::bug; #[expect(unused_imports, reason = "used by doc comments")] use rustc_middle::dep_graph::DepKindVTable; -use rustc_middle::dep_graph::{DepNode, DepNodeIndex, DepNodeKey, SerializedDepNodeIndex}; +use rustc_middle::dep_graph::{DepNode, DepNodeKey, SerializedDepNodeIndex}; use rustc_middle::query::erase::{Erasable, Erased}; use rustc_middle::query::on_disk_cache::{CacheDecoder, CacheEncoder}; use rustc_middle::query::{QueryCache, QueryJobId, QueryMode, QueryVTable, erase}; @@ -184,23 +184,14 @@ pub(crate) fn loadable_from_disk<'tcx>(tcx: TyCtxt<'tcx>, id: SerializedDepNodeI pub(crate) fn try_load_from_disk<'tcx, V>( tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex, - index: DepNodeIndex, ) -> Option where V: for<'a> Decodable>, { let on_disk_cache = tcx.query_system.on_disk_cache.as_ref()?; - let prof_timer = tcx.prof.incr_cache_loading(); - // The call to `with_query_deserialization` enforces that no new `DepNodes` // are created during deserialization. See the docs of that method for more // details. - let value = tcx - .dep_graph - .with_query_deserialization(|| on_disk_cache.try_load_query_value(tcx, prev_index)); - - prof_timer.finish_with_query_invocation_id(index.into()); - - value + tcx.dep_graph.with_query_deserialization(|| on_disk_cache.try_load_query_value(tcx, prev_index)) } diff --git a/compiler/rustc_query_impl/src/query_impl.rs b/compiler/rustc_query_impl/src/query_impl.rs index 15ba58452b830..ee14b8e064cae 100644 --- a/compiler/rustc_query_impl/src/query_impl.rs +++ b/compiler/rustc_query_impl/src/query_impl.rs @@ -148,17 +148,17 @@ macro_rules! define_queries { will_cache_on_disk_for_key_fn: |_, _| false, #[cfg($cache_on_disk)] - try_load_from_disk_fn: |tcx, prev_index, index| { + try_load_from_disk_fn: |tcx, prev_index| { use rustc_middle::queries::$name::{ProvidedValue, provided_to_erased}; let loaded_value: ProvidedValue<'tcx> = - $crate::plumbing::try_load_from_disk(tcx, prev_index, index)?; + $crate::plumbing::try_load_from_disk(tcx, prev_index)?; // Arena-alloc the value if appropriate, and erase it. Some(provided_to_erased(tcx, loaded_value)) }, #[cfg(not($cache_on_disk))] - try_load_from_disk_fn: |_tcx, _prev_index, _index| None, + try_load_from_disk_fn: |_tcx, _prev_index| None, // The default just emits `err` and then aborts. // `handle_cycle_error::specialize_query_vtables` overwrites this default From dd203a062f562f72af7ff3cb77ed31382b80a4ef Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Mon, 23 Mar 2026 19:53:36 +1100 Subject: [PATCH 4/6] Inline and remove `load_from_disk_or_invoke_provider_green`. It has a single call site. After inlining we can refactor `execute_job_incr` quite a bit and the overall result is simpler and clearer, especially the control flow -- no `try` block in an `if` condition(!), no early returns, just vanilla `if`s and `match`es and about -40 lines of code. --- compiler/rustc_query_impl/src/execution.rs | 183 ++++++++------------- 1 file changed, 72 insertions(+), 111 deletions(-) diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index 1b713cc1340e1..84119d69177be 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs @@ -6,7 +6,7 @@ use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::sync::{DynSend, DynSync}; use rustc_data_structures::{outline, sharded, sync}; use rustc_errors::FatalError; -use rustc_middle::dep_graph::{DepGraphData, DepNodeKey, SerializedDepNodeIndex}; +use rustc_middle::dep_graph::DepNodeKey; use rustc_middle::query::{ ActiveKeyStatus, Cycle, EnsureMode, QueryCache, QueryJob, QueryJobId, QueryKey, QueryLatch, QueryMode, QueryState, QueryVTable, @@ -391,7 +391,6 @@ fn execute_job_non_incr<'tcx, C: QueryCache>( debug_assert!(!tcx.dep_graph.is_fully_enabled()); let prof_timer = tcx.prof.query_provider(); - // Call the query provider. let value = start_query(job_id, query.depth_limit, || (query.invoke_provider_fn)(tcx, key)); let dep_node_index = tcx.dep_graph.next_virtual_depnode_index(); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -420,125 +419,87 @@ fn execute_job_incr<'tcx, C: QueryCache>( let dep_graph_data = tcx.dep_graph.data().expect("should always be present in incremental mode"); - if !query.eval_always { - if let Some(ret) = try { - let (prev_index, dep_node_index) = dep_graph_data.try_mark_green(tcx, &dep_node)?; - let value = load_from_disk_or_invoke_provider_green( - tcx, - dep_graph_data, - query, - key, - &dep_node, - job_id, - prev_index, - dep_node_index, - ); - (value, dep_node_index) - } { - return ret; - } - } - - let prof_timer = tcx.prof.query_provider(); - - let (result, dep_node_index) = start_query(job_id, query.depth_limit, || { - // Call the query provider. - dep_graph_data.with_task( - dep_node, - tcx, - (query, key), - |tcx, (query, key)| (query.invoke_provider_fn)(tcx, key), - query.hash_value_fn, - ) - }); - - prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - - (result, dep_node_index) -} - -/// Given that the dep node for this query+key is green, obtain a value for it -/// by loading one from disk if possible, or by invoking its query provider if -/// necessary. -#[inline(always)] -fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>( - tcx: TyCtxt<'tcx>, - dep_graph_data: &DepGraphData, - query: &'tcx QueryVTable<'tcx, C>, - key: C::Key, - dep_node: &DepNode, - job_id: QueryJobId, - prev_index: SerializedDepNodeIndex, - dep_node_index: DepNodeIndex, -) -> C::Value { - // Note this function can be called concurrently from the same query - // We must ensure that this is handled correctly. + if !query.eval_always + && let Some((prev_index, dep_node_index)) = dep_graph_data.try_mark_green(tcx, &dep_node) + { + // Note this function can be called concurrently from the same query. We must ensure that + // this is handled correctly. + + // First try to load the result from the on-disk cache. Some things are never cached on + // disk. + let try_value = if (query.will_cache_on_disk_for_key_fn)(tcx, key) { + let prof_timer = tcx.prof.incr_cache_loading(); + let value = (query.try_load_from_disk_fn)(tcx, prev_index); + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + value + } else { + None + }; - debug_assert!(dep_graph_data.is_index_green(prev_index)); + let (value, verify) = match try_value { + Some(value) => { + if std::intrinsics::unlikely(tcx.sess.opts.unstable_opts.query_dep_graph) { + dep_graph_data.mark_debug_loaded_from_disk(dep_node) + } - // First try to load the result from the on-disk cache. Some things are never cached on disk. - let try_value = if (query.will_cache_on_disk_for_key_fn)(tcx, key) { - let prof_timer = tcx.prof.incr_cache_loading(); - let value = (query.try_load_from_disk_fn)(tcx, prev_index); - prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - value - } else { - None - }; + let prev_fingerprint = dep_graph_data.prev_value_fingerprint_of(prev_index); + // If `-Zincremental-verify-ich` is specified, re-hash every result from the cache + // to make sure is has the expected fingerprint. Otherwise, re-hash a fraction to + // give some coverage at low cost. + let verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32) + || tcx.sess.opts.unstable_opts.incremental_verify_ich; - let (value, verify) = match try_value { - Some(value) => { - if std::intrinsics::unlikely(tcx.sess.opts.unstable_opts.query_dep_graph) { - dep_graph_data.mark_debug_loaded_from_disk(*dep_node) + (value, verify) } + None => { + // We could not load a result from the on-disk cache, so recompute. The dep-graph + // for this computation is already in-place, so we can just call the query + // provider. + let prof_timer = tcx.prof.query_provider(); + let value = start_query(job_id, query.depth_limit, || { + tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, key)) + }); + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + + (value, true) + } + }; - let prev_fingerprint = dep_graph_data.prev_value_fingerprint_of(prev_index); - // If `-Zincremental-verify-ich` is specified, re-hash results from - // the cache and make sure that they have the expected fingerprint. + if verify { + // Verify that re-running the query produced a result with the expected hash. This + // catches bugs in query implementations, turning them into ICEs. For example, a query + // might sort its result by `DefId`. Because `DefId`s are not stable across compilation + // sessions, the result could get up getting sorted in a different order when the query + // is re-run, even though all of the inputs (e.g. `DefPathHash` values) were green. // - // If not, we still seek to verify a subset of fingerprints loaded - // from disk. Re-hashing results is fairly expensive, so we can't - // currently afford to verify every hash. This subset should still - // give us some coverage of potential bugs. - let verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32) - || tcx.sess.opts.unstable_opts.incremental_verify_ich; - - (value, verify) + // See issue #82920 for an example of a miscompilation that would get turned into an + // ICE by this check. + incremental_verify_ich( + tcx, + dep_graph_data, + &value, + prev_index, + query.hash_value_fn, + query.format_value, + ); } - None => { - // We could not load a result from the on-disk cache, so recompute. The dep-graph for - // this computation is already in-place, so we can just call the query provider. - let prof_timer = tcx.prof.query_provider(); - let value = start_query(job_id, query.depth_limit, || { - tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, key)) - }); - prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - (value, true) - } - }; + (value, dep_node_index) + } else { + let prof_timer = tcx.prof.query_provider(); + let (value, dep_node_index) = start_query(job_id, query.depth_limit, || { + dep_graph_data.with_task( + dep_node, + tcx, + (query, key), + |tcx, (query, key)| (query.invoke_provider_fn)(tcx, key), + query.hash_value_fn, + ) + }); + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - if verify { - // Verify that re-running the query produced a result with the expected hash. - // This catches bugs in query implementations, turning them into ICEs. - // For example, a query might sort its result by `DefId` - since `DefId`s are - // not stable across compilation sessions, the result could get up getting sorted - // in a different order when the query is re-run, even though all of the inputs - // (e.g. `DefPathHash` values) were green. - // - // See issue #82920 for an example of a miscompilation that would get turned into - // an ICE by this check - incremental_verify_ich( - tcx, - dep_graph_data, - &value, - prev_index, - query.hash_value_fn, - query.format_value, - ); + (value, dep_node_index) } - - value } /// Checks whether a `tcx.ensure_ok()` or `tcx.ensure_done()` query call can From c5508add04a0ac3ea3c3a6cc071bcd25d6c8e2c9 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Tue, 24 Mar 2026 09:33:12 +1100 Subject: [PATCH 5/6] Comment fiddling. Remove a low-value comment, and add a comment about hashing explaining something that I found non-obvious. --- compiler/rustc_query_impl/src/execution.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index 84119d69177be..43b24597f6325 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs @@ -380,7 +380,6 @@ fn check_feedable_consistency<'tcx, C: QueryCache>( } } -// Fast path for when incr. comp. is off. #[inline(always)] fn execute_job_non_incr<'tcx, C: QueryCache>( query: &'tcx QueryVTable<'tcx, C>, @@ -396,6 +395,8 @@ fn execute_job_non_incr<'tcx, C: QueryCache>( prof_timer.finish_with_query_invocation_id(dep_node_index.into()); // Sanity: Fingerprint the key and the result to assert they don't contain anything unhashable. + // (There is no corresponding sanity check in `execute_job_incr` because it fingerprints the + // key and result as part of its normal operation.) if cfg!(debug_assertions) { let _ = key.to_fingerprint(tcx); if let Some(hash_value_fn) = query.hash_value_fn { From d595192993d839f1870d1e69dccb42ca79d9644d Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Tue, 24 Mar 2026 09:55:39 +1100 Subject: [PATCH 6/6] Pass `QueryVTable` to two functions instead of two fn pointers. Specifically, `DepGraph::with_feed_task` and `incremental_verify_ich` This makes some things shorter, at the cost of making module `dep_graph` depend directly on module `query`. (It already depends indirectly via module `verify_ich`.) --- compiler/rustc_middle/src/dep_graph/graph.rs | 23 +++++++------------- compiler/rustc_middle/src/query/inner.rs | 8 +------ compiler/rustc_middle/src/verify_ich.rs | 15 ++++++------- compiler/rustc_query_impl/src/execution.rs | 9 +------- 4 files changed, 17 insertions(+), 38 deletions(-) diff --git a/compiler/rustc_middle/src/dep_graph/graph.rs b/compiler/rustc_middle/src/dep_graph/graph.rs index 444d02521148c..0b424a32a58a4 100644 --- a/compiler/rustc_middle/src/dep_graph/graph.rs +++ b/compiler/rustc_middle/src/dep_graph/graph.rs @@ -27,6 +27,7 @@ use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex use super::{DepKind, DepNode, WorkProductId, read_deps, with_deps}; use crate::dep_graph::edges::EdgesVec; use crate::ich::StableHashingContext; +use crate::query::{QueryCache, QueryVTable}; use crate::ty::TyCtxt; use crate::verify_ich::incremental_verify_ich; @@ -572,13 +573,12 @@ impl DepGraph { /// FIXME: If the code is changed enough for this node to be marked before requiring the /// caller's node, we suppose that those changes will be enough to mark this node red and /// force a recomputation using the "normal" way. - pub fn with_feed_task<'tcx, R>( + pub fn with_feed_task<'tcx, C: QueryCache>( &self, - node: DepNode, tcx: TyCtxt<'tcx>, - result: &R, - hash_result: Option, &R) -> Fingerprint>, - format_value_fn: fn(&R) -> String, + query: &'tcx QueryVTable<'tcx, C>, + node: DepNode, + value: &C::Value, ) -> DepNodeIndex { if let Some(data) = self.data.as_ref() { // The caller query has more dependencies than the node we are creating. We may @@ -590,17 +590,10 @@ impl DepGraph { if let Some(prev_index) = data.previous.node_to_index_opt(&node) { let dep_node_index = data.colors.current(prev_index); if let Some(dep_node_index) = dep_node_index { - incremental_verify_ich( - tcx, - data, - result, - prev_index, - hash_result, - format_value_fn, - ); + incremental_verify_ich(tcx, data, query, value, prev_index); #[cfg(debug_assertions)] - if hash_result.is_some() { + if query.hash_value_fn.is_some() { data.current.record_edge( dep_node_index, node, @@ -624,7 +617,7 @@ impl DepGraph { } }); - data.hash_result_and_alloc_node(tcx, node, edges, result, hash_result) + data.hash_result_and_alloc_node(tcx, node, edges, value, query.hash_value_fn) } else { // Incremental compilation is turned off. We just execute the task // without tracking. We still provide a dep-node index that uniquely diff --git a/compiler/rustc_middle/src/query/inner.rs b/compiler/rustc_middle/src/query/inner.rs index 402c448a1fa3a..f3f3e76dcbe42 100644 --- a/compiler/rustc_middle/src/query/inner.rs +++ b/compiler/rustc_middle/src/query/inner.rs @@ -158,13 +158,7 @@ pub(crate) fn query_feed<'tcx, C>( // There is no cached value for this key, so feed the query by // adding the provided value to the cache. let dep_node = dep_graph::DepNode::construct(tcx, query.dep_kind, &key); - let dep_node_index = tcx.dep_graph.with_feed_task( - dep_node, - tcx, - &value, - query.hash_value_fn, - query.format_value, - ); + let dep_node_index = tcx.dep_graph.with_feed_task(tcx, query, dep_node, &value); query.cache.complete(key, value, dep_node_index); } } diff --git a/compiler/rustc_middle/src/verify_ich.rs b/compiler/rustc_middle/src/verify_ich.rs index a1ab4d8cc4d00..c64f8d6fd0fbd 100644 --- a/compiler/rustc_middle/src/verify_ich.rs +++ b/compiler/rustc_middle/src/verify_ich.rs @@ -4,31 +4,30 @@ use rustc_data_structures::fingerprint::Fingerprint; use tracing::instrument; use crate::dep_graph::{DepGraphData, SerializedDepNodeIndex}; -use crate::ich::StableHashingContext; +use crate::query::{QueryCache, QueryVTable}; use crate::ty::TyCtxt; #[inline] -#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")] -pub fn incremental_verify_ich<'tcx, V>( +#[instrument(skip(tcx, dep_graph_data, result), level = "debug")] +pub fn incremental_verify_ich<'tcx, C: QueryCache>( tcx: TyCtxt<'tcx>, dep_graph_data: &DepGraphData, - result: &V, + query: &'tcx QueryVTable<'tcx, C>, + result: &C::Value, prev_index: SerializedDepNodeIndex, - hash_result: Option, &V) -> Fingerprint>, - format_value: fn(&V) -> String, ) { if !dep_graph_data.is_index_green(prev_index) { incremental_verify_ich_not_green(tcx, prev_index) } - let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| { + let new_hash = query.hash_value_fn.map_or(Fingerprint::ZERO, |f| { tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result)) }); let old_hash = dep_graph_data.prev_value_fingerprint_of(prev_index); if new_hash != old_hash { - incremental_verify_ich_failed(tcx, prev_index, &|| format_value(result)); + incremental_verify_ich_failed(tcx, prev_index, &|| (query.format_value)(result)); } } diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index 43b24597f6325..dc663ecd92d39 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs @@ -475,14 +475,7 @@ fn execute_job_incr<'tcx, C: QueryCache>( // // See issue #82920 for an example of a miscompilation that would get turned into an // ICE by this check. - incremental_verify_ich( - tcx, - dep_graph_data, - &value, - prev_index, - query.hash_value_fn, - query.format_value, - ); + incremental_verify_ich(tcx, dep_graph_data, query, &value, prev_index); } (value, dep_node_index)