diff --git a/compiler/rustc_middle/src/dep_graph/graph.rs b/compiler/rustc_middle/src/dep_graph/graph.rs index 444d02521148c..0b424a32a58a4 100644 --- a/compiler/rustc_middle/src/dep_graph/graph.rs +++ b/compiler/rustc_middle/src/dep_graph/graph.rs @@ -27,6 +27,7 @@ use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex use super::{DepKind, DepNode, WorkProductId, read_deps, with_deps}; use crate::dep_graph::edges::EdgesVec; use crate::ich::StableHashingContext; +use crate::query::{QueryCache, QueryVTable}; use crate::ty::TyCtxt; use crate::verify_ich::incremental_verify_ich; @@ -572,13 +573,12 @@ impl DepGraph { /// FIXME: If the code is changed enough for this node to be marked before requiring the /// caller's node, we suppose that those changes will be enough to mark this node red and /// force a recomputation using the "normal" way. - pub fn with_feed_task<'tcx, R>( + pub fn with_feed_task<'tcx, C: QueryCache>( &self, - node: DepNode, tcx: TyCtxt<'tcx>, - result: &R, - hash_result: Option, &R) -> Fingerprint>, - format_value_fn: fn(&R) -> String, + query: &'tcx QueryVTable<'tcx, C>, + node: DepNode, + value: &C::Value, ) -> DepNodeIndex { if let Some(data) = self.data.as_ref() { // The caller query has more dependencies than the node we are creating. We may @@ -590,17 +590,10 @@ impl DepGraph { if let Some(prev_index) = data.previous.node_to_index_opt(&node) { let dep_node_index = data.colors.current(prev_index); if let Some(dep_node_index) = dep_node_index { - incremental_verify_ich( - tcx, - data, - result, - prev_index, - hash_result, - format_value_fn, - ); + incremental_verify_ich(tcx, data, query, value, prev_index); #[cfg(debug_assertions)] - if hash_result.is_some() { + if query.hash_value_fn.is_some() { data.current.record_edge( dep_node_index, node, @@ -624,7 +617,7 @@ impl DepGraph { } }); - data.hash_result_and_alloc_node(tcx, node, edges, result, hash_result) + data.hash_result_and_alloc_node(tcx, node, edges, value, query.hash_value_fn) } else { // Incremental compilation is turned off. We just execute the task // without tracking. We still provide a dep-node index that uniquely diff --git a/compiler/rustc_middle/src/query/inner.rs b/compiler/rustc_middle/src/query/inner.rs index 402c448a1fa3a..f3f3e76dcbe42 100644 --- a/compiler/rustc_middle/src/query/inner.rs +++ b/compiler/rustc_middle/src/query/inner.rs @@ -158,13 +158,7 @@ pub(crate) fn query_feed<'tcx, C>( // There is no cached value for this key, so feed the query by // adding the provided value to the cache. let dep_node = dep_graph::DepNode::construct(tcx, query.dep_kind, &key); - let dep_node_index = tcx.dep_graph.with_feed_task( - dep_node, - tcx, - &value, - query.hash_value_fn, - query.format_value, - ); + let dep_node_index = tcx.dep_graph.with_feed_task(tcx, query, dep_node, &value); query.cache.complete(key, value, dep_node_index); } } diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index ef6259b1a0c1a..4de531739d642 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs @@ -98,12 +98,11 @@ pub struct QueryVTable<'tcx, C: QueryCache> { pub will_cache_on_disk_for_key_fn: fn(tcx: TyCtxt<'tcx>, key: C::Key) -> bool, - pub try_load_from_disk_fn: fn( - tcx: TyCtxt<'tcx>, - key: C::Key, - prev_index: SerializedDepNodeIndex, - index: DepNodeIndex, - ) -> Option, + /// Function pointer that tries to load a query value from disk. + /// + /// This should only be called after a successful check of `will_cache_on_disk_for_key_fn`. + pub try_load_from_disk_fn: + fn(tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex) -> Option, /// Function pointer that hashes this query's result values. /// diff --git a/compiler/rustc_middle/src/verify_ich.rs b/compiler/rustc_middle/src/verify_ich.rs index a1ab4d8cc4d00..c64f8d6fd0fbd 100644 --- a/compiler/rustc_middle/src/verify_ich.rs +++ b/compiler/rustc_middle/src/verify_ich.rs @@ -4,31 +4,30 @@ use rustc_data_structures::fingerprint::Fingerprint; use tracing::instrument; use crate::dep_graph::{DepGraphData, SerializedDepNodeIndex}; -use crate::ich::StableHashingContext; +use crate::query::{QueryCache, QueryVTable}; use crate::ty::TyCtxt; #[inline] -#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")] -pub fn incremental_verify_ich<'tcx, V>( +#[instrument(skip(tcx, dep_graph_data, result), level = "debug")] +pub fn incremental_verify_ich<'tcx, C: QueryCache>( tcx: TyCtxt<'tcx>, dep_graph_data: &DepGraphData, - result: &V, + query: &'tcx QueryVTable<'tcx, C>, + result: &C::Value, prev_index: SerializedDepNodeIndex, - hash_result: Option, &V) -> Fingerprint>, - format_value: fn(&V) -> String, ) { if !dep_graph_data.is_index_green(prev_index) { incremental_verify_ich_not_green(tcx, prev_index) } - let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| { + let new_hash = query.hash_value_fn.map_or(Fingerprint::ZERO, |f| { tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result)) }); let old_hash = dep_graph_data.prev_value_fingerprint_of(prev_index); if new_hash != old_hash { - incremental_verify_ich_failed(tcx, prev_index, &|| format_value(result)); + incremental_verify_ich_failed(tcx, prev_index, &|| (query.format_value)(result)); } } diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index 8844d40f49cf3..dc663ecd92d39 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs @@ -6,7 +6,7 @@ use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::sync::{DynSend, DynSync}; use rustc_data_structures::{outline, sharded, sync}; use rustc_errors::FatalError; -use rustc_middle::dep_graph::{DepGraphData, DepNodeKey, SerializedDepNodeIndex}; +use rustc_middle::dep_graph::DepNodeKey; use rustc_middle::query::{ ActiveKeyStatus, Cycle, EnsureMode, QueryCache, QueryJob, QueryJobId, QueryKey, QueryLatch, QueryMode, QueryState, QueryVTable, @@ -380,7 +380,6 @@ fn check_feedable_consistency<'tcx, C: QueryCache>( } } -// Fast path for when incr. comp. is off. #[inline(always)] fn execute_job_non_incr<'tcx, C: QueryCache>( query: &'tcx QueryVTable<'tcx, C>, @@ -391,12 +390,13 @@ fn execute_job_non_incr<'tcx, C: QueryCache>( debug_assert!(!tcx.dep_graph.is_fully_enabled()); let prof_timer = tcx.prof.query_provider(); - // Call the query provider. let value = start_query(job_id, query.depth_limit, || (query.invoke_provider_fn)(tcx, key)); let dep_node_index = tcx.dep_graph.next_virtual_depnode_index(); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); // Sanity: Fingerprint the key and the result to assert they don't contain anything unhashable. + // (There is no corresponding sanity check in `execute_job_incr` because it fingerprints the + // key and result as part of its normal operation.) if cfg!(debug_assertions) { let _ = key.to_fingerprint(tcx); if let Some(hash_value_fn) = query.hash_value_fn { @@ -420,116 +420,80 @@ fn execute_job_incr<'tcx, C: QueryCache>( let dep_graph_data = tcx.dep_graph.data().expect("should always be present in incremental mode"); - if !query.eval_always { - // The diagnostics for this query will be promoted to the current session during - // `try_mark_green()`, so we can ignore them here. - if let Some(ret) = start_query(job_id, false, || try { - let (prev_index, dep_node_index) = dep_graph_data.try_mark_green(tcx, &dep_node)?; - let value = load_from_disk_or_invoke_provider_green( - tcx, - dep_graph_data, - query, - key, - &dep_node, - prev_index, - dep_node_index, - ); - (value, dep_node_index) - }) { - return ret; - } - } - - let prof_timer = tcx.prof.query_provider(); - - let (result, dep_node_index) = start_query(job_id, query.depth_limit, || { - // Call the query provider. - dep_graph_data.with_task( - dep_node, - tcx, - (query, key), - |tcx, (query, key)| (query.invoke_provider_fn)(tcx, key), - query.hash_value_fn, - ) - }); + if !query.eval_always + && let Some((prev_index, dep_node_index)) = dep_graph_data.try_mark_green(tcx, &dep_node) + { + // Note this function can be called concurrently from the same query. We must ensure that + // this is handled correctly. + + // First try to load the result from the on-disk cache. Some things are never cached on + // disk. + let try_value = if (query.will_cache_on_disk_for_key_fn)(tcx, key) { + let prof_timer = tcx.prof.incr_cache_loading(); + let value = (query.try_load_from_disk_fn)(tcx, prev_index); + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + value + } else { + None + }; - prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + let (value, verify) = match try_value { + Some(value) => { + if std::intrinsics::unlikely(tcx.sess.opts.unstable_opts.query_dep_graph) { + dep_graph_data.mark_debug_loaded_from_disk(dep_node) + } - (result, dep_node_index) -} + let prev_fingerprint = dep_graph_data.prev_value_fingerprint_of(prev_index); + // If `-Zincremental-verify-ich` is specified, re-hash every result from the cache + // to make sure is has the expected fingerprint. Otherwise, re-hash a fraction to + // give some coverage at low cost. + let verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32) + || tcx.sess.opts.unstable_opts.incremental_verify_ich; -/// Given that the dep node for this query+key is green, obtain a value for it -/// by loading one from disk if possible, or by invoking its query provider if -/// necessary. -#[inline(always)] -fn load_from_disk_or_invoke_provider_green<'tcx, C: QueryCache>( - tcx: TyCtxt<'tcx>, - dep_graph_data: &DepGraphData, - query: &'tcx QueryVTable<'tcx, C>, - key: C::Key, - dep_node: &DepNode, - prev_index: SerializedDepNodeIndex, - dep_node_index: DepNodeIndex, -) -> C::Value { - // Note this function can be called concurrently from the same query - // We must ensure that this is handled correctly. - - debug_assert!(dep_graph_data.is_index_green(prev_index)); - - // First try to load the result from the on-disk cache. Some things are never cached on disk. - let value; - let verify; - match (query.try_load_from_disk_fn)(tcx, key, prev_index, dep_node_index) { - Some(loaded_value) => { - if std::intrinsics::unlikely(tcx.sess.opts.unstable_opts.query_dep_graph) { - dep_graph_data.mark_debug_loaded_from_disk(*dep_node) + (value, verify) } + None => { + // We could not load a result from the on-disk cache, so recompute. The dep-graph + // for this computation is already in-place, so we can just call the query + // provider. + let prof_timer = tcx.prof.query_provider(); + let value = start_query(job_id, query.depth_limit, || { + tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, key)) + }); + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + + (value, true) + } + }; - value = loaded_value; - - let prev_fingerprint = dep_graph_data.prev_value_fingerprint_of(prev_index); - // If `-Zincremental-verify-ich` is specified, re-hash results from - // the cache and make sure that they have the expected fingerprint. + if verify { + // Verify that re-running the query produced a result with the expected hash. This + // catches bugs in query implementations, turning them into ICEs. For example, a query + // might sort its result by `DefId`. Because `DefId`s are not stable across compilation + // sessions, the result could get up getting sorted in a different order when the query + // is re-run, even though all of the inputs (e.g. `DefPathHash` values) were green. // - // If not, we still seek to verify a subset of fingerprints loaded - // from disk. Re-hashing results is fairly expensive, so we can't - // currently afford to verify every hash. This subset should still - // give us some coverage of potential bugs. - verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32) - || tcx.sess.opts.unstable_opts.incremental_verify_ich; - } - None => { - // We could not load a result from the on-disk cache, so recompute. The dep-graph for - // this computation is already in-place, so we can just call the query provider. - let prof_timer = tcx.prof.query_provider(); - value = tcx.dep_graph.with_ignore(|| (query.invoke_provider_fn)(tcx, key)); - prof_timer.finish_with_query_invocation_id(dep_node_index.into()); - - verify = true; + // See issue #82920 for an example of a miscompilation that would get turned into an + // ICE by this check. + incremental_verify_ich(tcx, dep_graph_data, query, &value, prev_index); } - }; - if verify { - // Verify that re-running the query produced a result with the expected hash. - // This catches bugs in query implementations, turning them into ICEs. - // For example, a query might sort its result by `DefId` - since `DefId`s are - // not stable across compilation sessions, the result could get up getting sorted - // in a different order when the query is re-run, even though all of the inputs - // (e.g. `DefPathHash` values) were green. - // - // See issue #82920 for an example of a miscompilation that would get turned into - // an ICE by this check - incremental_verify_ich( - tcx, - dep_graph_data, - &value, - prev_index, - query.hash_value_fn, - query.format_value, - ); + (value, dep_node_index) + } else { + let prof_timer = tcx.prof.query_provider(); + let (value, dep_node_index) = start_query(job_id, query.depth_limit, || { + dep_graph_data.with_task( + dep_node, + tcx, + (query, key), + |tcx, (query, key)| (query.invoke_provider_fn)(tcx, key), + query.hash_value_fn, + ) + }); + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + + (value, dep_node_index) } - - value } /// Checks whether a `tcx.ensure_ok()` or `tcx.ensure_done()` query call can diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index ef4fff293bf65..b93bf26fc4f93 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -5,7 +5,7 @@ use rustc_hir::limit::Limit; use rustc_middle::bug; #[expect(unused_imports, reason = "used by doc comments")] use rustc_middle::dep_graph::DepKindVTable; -use rustc_middle::dep_graph::{DepNode, DepNodeIndex, DepNodeKey, SerializedDepNodeIndex}; +use rustc_middle::dep_graph::{DepNode, DepNodeKey, SerializedDepNodeIndex}; use rustc_middle::query::erase::{Erasable, Erased}; use rustc_middle::query::on_disk_cache::{CacheDecoder, CacheEncoder}; use rustc_middle::query::{QueryCache, QueryJobId, QueryMode, QueryVTable, erase}; @@ -184,23 +184,14 @@ pub(crate) fn loadable_from_disk<'tcx>(tcx: TyCtxt<'tcx>, id: SerializedDepNodeI pub(crate) fn try_load_from_disk<'tcx, V>( tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex, - index: DepNodeIndex, ) -> Option where V: for<'a> Decodable>, { let on_disk_cache = tcx.query_system.on_disk_cache.as_ref()?; - let prof_timer = tcx.prof.incr_cache_loading(); - // The call to `with_query_deserialization` enforces that no new `DepNodes` // are created during deserialization. See the docs of that method for more // details. - let value = tcx - .dep_graph - .with_query_deserialization(|| on_disk_cache.try_load_query_value(tcx, prev_index)); - - prof_timer.finish_with_query_invocation_id(index.into()); - - value + tcx.dep_graph.with_query_deserialization(|| on_disk_cache.try_load_query_value(tcx, prev_index)) } diff --git a/compiler/rustc_query_impl/src/query_impl.rs b/compiler/rustc_query_impl/src/query_impl.rs index 60ddf3e4ad381..ee14b8e064cae 100644 --- a/compiler/rustc_query_impl/src/query_impl.rs +++ b/compiler/rustc_query_impl/src/query_impl.rs @@ -148,22 +148,17 @@ macro_rules! define_queries { will_cache_on_disk_for_key_fn: |_, _| false, #[cfg($cache_on_disk)] - try_load_from_disk_fn: |tcx, key, prev_index, index| { + try_load_from_disk_fn: |tcx, prev_index| { use rustc_middle::queries::$name::{ProvidedValue, provided_to_erased}; - // Check the `cache_on_disk_if` condition for this key. - if !rustc_middle::queries::_cache_on_disk_if_fns::$name(tcx, key) { - return None; - } - let loaded_value: ProvidedValue<'tcx> = - $crate::plumbing::try_load_from_disk(tcx, prev_index, index)?; + $crate::plumbing::try_load_from_disk(tcx, prev_index)?; // Arena-alloc the value if appropriate, and erase it. Some(provided_to_erased(tcx, loaded_value)) }, #[cfg(not($cache_on_disk))] - try_load_from_disk_fn: |_tcx, _key, _prev_index, _index| None, + try_load_from_disk_fn: |_tcx, _prev_index| None, // The default just emits `err` and then aborts. // `handle_cycle_error::specialize_query_vtables` overwrites this default