diff options
Diffstat (limited to 'compiler/rustc_query_system/src/query')
-rw-r--r-- | compiler/rustc_query_system/src/query/caches.rs | 26 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/config.rs | 12 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/job.rs | 122 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/mod.rs | 10 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/plumbing.rs | 167 |
5 files changed, 148 insertions, 189 deletions
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs index 4ba9d53a9..0240f012d 100644 --- a/compiler/rustc_query_system/src/query/caches.rs +++ b/compiler/rustc_query_system/src/query/caches.rs @@ -2,7 +2,7 @@ use crate::dep_graph::DepNodeIndex; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sharded::{self, Sharded}; -use rustc_data_structures::sync::Lock; +use rustc_data_structures::sync::OnceLock; use rustc_index::{Idx, IndexVec}; use std::fmt::Debug; use std::hash::Hash; @@ -55,7 +55,7 @@ where #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { let key_hash = sharded::make_hash(key); - let lock = self.cache.get_shard_by_hash(key_hash).lock(); + let lock = self.cache.lock_shard_by_hash(key_hash); let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key); if let Some((_, value)) = result { Some(*value) } else { None } @@ -63,15 +63,14 @@ where #[inline] fn complete(&self, key: K, value: V, index: DepNodeIndex) { - let mut lock = self.cache.get_shard_by_value(&key).lock(); + let mut lock = self.cache.lock_shard_by_value(&key); // We may be overwriting another value. This is all right, since the dep-graph // will check that the fingerprint matches. lock.insert(key, (value, index)); } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - let shards = self.cache.lock_shards(); - for shard in shards.iter() { + for shard in self.cache.lock_shards() { for (k, v) in shard.iter() { f(k, &v.0, v.1); } @@ -88,12 +87,12 @@ impl<'tcx, V: 'tcx> CacheSelector<'tcx, V> for SingleCacheSelector { } pub struct SingleCache<V> { - cache: Lock<Option<(V, DepNodeIndex)>>, + cache: OnceLock<(V, DepNodeIndex)>, } impl<V> Default for SingleCache<V> { fn default() -> Self { - SingleCache { cache: Lock::new(None) } + SingleCache { cache: OnceLock::new() } } } @@ -106,16 +105,16 @@ where #[inline(always)] fn lookup(&self, _key: &()) -> Option<(V, DepNodeIndex)> { - *self.cache.lock() + self.cache.get().copied() } #[inline] fn complete(&self, _key: (), value: V, index: DepNodeIndex) { - *self.cache.lock() = Some((value, index)); + self.cache.set((value, index)).ok(); } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - if let Some(value) = self.cache.lock().as_ref() { + if let Some(value) = self.cache.get() { f(&(), &value.0, value.1) } } @@ -149,19 +148,18 @@ where #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { - let lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); + let lock = self.cache.lock_shard_by_hash(key.index() as u64); if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None } } #[inline] fn complete(&self, key: K, value: V, index: DepNodeIndex) { - let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); + let mut lock = self.cache.lock_shard_by_hash(key.index() as u64); lock.insert(key, (value, index)); } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - let shards = self.cache.lock_shards(); - for shard in shards.iter() { + for shard in self.cache.lock_shards() { for (k, v) in shard.iter_enumerated() { if let Some(v) = v { f(&k, &v.0, v.1); diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs index 7e47d7012..c025fac26 100644 --- a/compiler/rustc_query_system/src/query/config.rs +++ b/compiler/rustc_query_system/src/query/config.rs @@ -1,6 +1,6 @@ //! Query configuration and description traits. -use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex}; +use crate::dep_graph::{DepKind, DepNode, DepNodeParams, SerializedDepNodeIndex}; use crate::error::HandleCycleError; use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; @@ -8,6 +8,7 @@ use crate::query::DepNodeIndex; use crate::query::{QueryContext, QueryInfo, QueryState}; use rustc_data_structures::fingerprint::Fingerprint; +use rustc_span::ErrorGuaranteed; use std::fmt::Debug; use std::hash::Hash; @@ -26,7 +27,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy { fn format_value(self) -> fn(&Self::Value) -> String; // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind> + fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key> where Qcx: 'a; @@ -56,7 +57,8 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy { fn value_from_cycle_error( self, tcx: Qcx::DepContext, - cycle: &[QueryInfo<Qcx::DepKind>], + cycle: &[QueryInfo], + guar: ErrorGuaranteed, ) -> Self::Value; fn anon(self) -> bool; @@ -64,12 +66,12 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy { fn depth_limit(self) -> bool; fn feedable(self) -> bool; - fn dep_kind(self) -> Qcx::DepKind; + fn dep_kind(self) -> DepKind; fn handle_cycle_error(self) -> HandleCycleError; fn hash_result(self) -> HashResult<Self::Value>; // Just here for convenience and checking that the key matches the kind, don't override this. - fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> { + fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode { DepNode::construct(tcx, self.dep_kind(), key) } } diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index 1b1248924..f2c1f84fc 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -1,9 +1,8 @@ -use crate::dep_graph::DepKind; +use crate::dep_graph::DepContext; use crate::error::CycleStack; use crate::query::plumbing::CycleError; +use crate::query::DepKind; use crate::query::{QueryContext, QueryStackFrame}; -use core::marker::PhantomData; - use rustc_data_structures::fx::FxHashMap; use rustc_errors::{ Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level, @@ -30,48 +29,48 @@ use { /// Represents a span and a query key. #[derive(Clone, Debug)] -pub struct QueryInfo<D: DepKind> { +pub struct QueryInfo { /// The span corresponding to the reason for which this query was required. pub span: Span, - pub query: QueryStackFrame<D>, + pub query: QueryStackFrame, } -pub type QueryMap<D> = FxHashMap<QueryJobId, QueryJobInfo<D>>; +pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>; /// A value uniquely identifying an active query job. #[derive(Copy, Clone, Eq, PartialEq, Hash)] pub struct QueryJobId(pub NonZeroU64); impl QueryJobId { - fn query<D: DepKind>(self, map: &QueryMap<D>) -> QueryStackFrame<D> { + fn query(self, map: &QueryMap) -> QueryStackFrame { map.get(&self).unwrap().query.clone() } #[cfg(parallel_compiler)] - fn span<D: DepKind>(self, map: &QueryMap<D>) -> Span { + fn span(self, map: &QueryMap) -> Span { map.get(&self).unwrap().job.span } #[cfg(parallel_compiler)] - fn parent<D: DepKind>(self, map: &QueryMap<D>) -> Option<QueryJobId> { + fn parent(self, map: &QueryMap) -> Option<QueryJobId> { map.get(&self).unwrap().job.parent } #[cfg(parallel_compiler)] - fn latch<D: DepKind>(self, map: &QueryMap<D>) -> Option<&QueryLatch<D>> { + fn latch(self, map: &QueryMap) -> Option<&QueryLatch> { map.get(&self).unwrap().job.latch.as_ref() } } #[derive(Clone)] -pub struct QueryJobInfo<D: DepKind> { - pub query: QueryStackFrame<D>, - pub job: QueryJob<D>, +pub struct QueryJobInfo { + pub query: QueryStackFrame, + pub job: QueryJob, } /// Represents an active query job. #[derive(Clone)] -pub struct QueryJob<D: DepKind> { +pub struct QueryJob { pub id: QueryJobId, /// The span corresponding to the reason for which this query was required. @@ -82,11 +81,10 @@ pub struct QueryJob<D: DepKind> { /// The latch that is used to wait on this job. #[cfg(parallel_compiler)] - latch: Option<QueryLatch<D>>, - spooky: core::marker::PhantomData<D>, + latch: Option<QueryLatch>, } -impl<D: DepKind> QueryJob<D> { +impl QueryJob { /// Creates a new query job. #[inline] pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self { @@ -96,12 +94,11 @@ impl<D: DepKind> QueryJob<D> { parent, #[cfg(parallel_compiler)] latch: None, - spooky: PhantomData, } } #[cfg(parallel_compiler)] - pub(super) fn latch(&mut self) -> QueryLatch<D> { + pub(super) fn latch(&mut self) -> QueryLatch { if self.latch.is_none() { self.latch = Some(QueryLatch::new()); } @@ -124,13 +121,12 @@ impl<D: DepKind> QueryJob<D> { } impl QueryJobId { - #[cfg(not(parallel_compiler))] - pub(super) fn find_cycle_in_stack<D: DepKind>( + pub(super) fn find_cycle_in_stack( &self, - query_map: QueryMap<D>, + query_map: QueryMap, current_job: &Option<QueryJobId>, span: Span, - ) -> CycleError<D> { + ) -> CycleError { // Find the waitee amongst `current_job` parents let mut cycle = Vec::new(); let mut current_job = Option::clone(current_job); @@ -164,18 +160,18 @@ impl QueryJobId { #[cold] #[inline(never)] - pub fn try_find_layout_root<D: DepKind>( + pub fn try_find_layout_root( &self, - query_map: QueryMap<D>, - ) -> Option<(QueryJobInfo<D>, usize)> { + query_map: QueryMap, + layout_of_kind: DepKind, + ) -> Option<(QueryJobInfo, usize)> { let mut last_layout = None; let mut current_id = Some(*self); let mut depth = 0; while let Some(id) = current_id { let info = query_map.get(&id).unwrap(); - // FIXME: This string comparison should probably not be done. - if format!("{:?}", info.query.dep_kind) == "layout_of" { + if info.query.dep_kind == layout_of_kind { depth += 1; last_layout = Some((info.clone(), depth)); } @@ -186,15 +182,15 @@ impl QueryJobId { } #[cfg(parallel_compiler)] -struct QueryWaiter<D: DepKind> { +struct QueryWaiter { query: Option<QueryJobId>, condvar: Condvar, span: Span, - cycle: Mutex<Option<CycleError<D>>>, + cycle: Mutex<Option<CycleError>>, } #[cfg(parallel_compiler)] -impl<D: DepKind> QueryWaiter<D> { +impl QueryWaiter { fn notify(&self, registry: &rayon_core::Registry) { rayon_core::mark_unblocked(registry); self.condvar.notify_one(); @@ -202,19 +198,19 @@ impl<D: DepKind> QueryWaiter<D> { } #[cfg(parallel_compiler)] -struct QueryLatchInfo<D: DepKind> { +struct QueryLatchInfo { complete: bool, - waiters: Vec<Arc<QueryWaiter<D>>>, + waiters: Vec<Arc<QueryWaiter>>, } #[cfg(parallel_compiler)] #[derive(Clone)] -pub(super) struct QueryLatch<D: DepKind> { - info: Arc<Mutex<QueryLatchInfo<D>>>, +pub(super) struct QueryLatch { + info: Arc<Mutex<QueryLatchInfo>>, } #[cfg(parallel_compiler)] -impl<D: DepKind> QueryLatch<D> { +impl QueryLatch { fn new() -> Self { QueryLatch { info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), @@ -222,11 +218,7 @@ impl<D: DepKind> QueryLatch<D> { } /// Awaits for the query job to complete. - pub(super) fn wait_on( - &self, - query: Option<QueryJobId>, - span: Span, - ) -> Result<(), CycleError<D>> { + pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> { let waiter = Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() }); self.wait_on_inner(&waiter); @@ -241,7 +233,7 @@ impl<D: DepKind> QueryLatch<D> { } /// Awaits the caller on this latch by blocking the current thread. - fn wait_on_inner(&self, waiter: &Arc<QueryWaiter<D>>) { + fn wait_on_inner(&self, waiter: &Arc<QueryWaiter>) { let mut info = self.info.lock(); if !info.complete { // We push the waiter on to the `waiters` list. It can be accessed inside @@ -275,7 +267,7 @@ impl<D: DepKind> QueryLatch<D> { /// Removes a single waiter from the list of waiters. /// This is used to break query cycles. - fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<D>> { + fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter> { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters @@ -297,14 +289,9 @@ type Waiter = (QueryJobId, usize); /// required information to resume the waiter. /// If all `visit` calls returns None, this function also returns None. #[cfg(parallel_compiler)] -fn visit_waiters<F, D>( - query_map: &QueryMap<D>, - query: QueryJobId, - mut visit: F, -) -> Option<Option<Waiter>> +fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>> where F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>, - D: DepKind, { // Visit the parent query which is a non-resumable waiter since it's on the same stack if let Some(parent) = query.parent(query_map) { @@ -333,8 +320,8 @@ where /// If a cycle is detected, this initial value is replaced with the span causing /// the cycle. #[cfg(parallel_compiler)] -fn cycle_check<D: DepKind>( - query_map: &QueryMap<D>, +fn cycle_check( + query_map: &QueryMap, query: QueryJobId, span: Span, stack: &mut Vec<(Span, QueryJobId)>, @@ -374,8 +361,8 @@ fn cycle_check<D: DepKind>( /// from `query` without going through any of the queries in `visited`. /// This is achieved with a depth first search. #[cfg(parallel_compiler)] -fn connected_to_root<D: DepKind>( - query_map: &QueryMap<D>, +fn connected_to_root( + query_map: &QueryMap, query: QueryJobId, visited: &mut FxHashSet<QueryJobId>, ) -> bool { @@ -397,10 +384,9 @@ fn connected_to_root<D: DepKind>( // Deterministically pick an query from a list #[cfg(parallel_compiler)] -fn pick_query<'a, T, F, D>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T +fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T where F: Fn(&T) -> (Span, QueryJobId), - D: DepKind, { // Deterministically pick an entry point // FIXME: Sort this instead @@ -424,10 +410,10 @@ where /// If a cycle was not found, the starting query is removed from `jobs` and /// the function returns false. #[cfg(parallel_compiler)] -fn remove_cycle<D: DepKind>( - query_map: &QueryMap<D>, +fn remove_cycle( + query_map: &QueryMap, jobs: &mut Vec<QueryJobId>, - wakelist: &mut Vec<Arc<QueryWaiter<D>>>, + wakelist: &mut Vec<Arc<QueryWaiter>>, ) -> bool { let mut visited = FxHashSet::default(); let mut stack = Vec::new(); @@ -529,7 +515,7 @@ fn remove_cycle<D: DepKind>( /// There may be multiple cycles involved in a deadlock, so this searches /// all active queries for cycles before finally resuming all the waiters at once. #[cfg(parallel_compiler)] -pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) { +pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) { let on_panic = defer(|| { eprintln!("deadlock handler panicked, aborting process"); process::abort(); @@ -553,7 +539,9 @@ pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Regis // which in turn will wait on X causing a deadlock. We have a false dependency from // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here // only considers the true dependency and won't detect a cycle. - assert!(found_cycle); + if !found_cycle { + panic!("deadlock detected"); + } // FIXME: Ensure this won't cause a deadlock before we return for waiter in wakelist.into_iter() { @@ -565,9 +553,9 @@ pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Regis #[inline(never)] #[cold] -pub(crate) fn report_cycle<'a, D: DepKind>( +pub(crate) fn report_cycle<'a>( sess: &'a Session, - CycleError { usage, cycle: stack }: &CycleError<D>, + CycleError { usage, cycle: stack }: &CycleError, ) -> DiagnosticBuilder<'a, ErrorGuaranteed> { assert!(!stack.is_empty()); @@ -592,9 +580,7 @@ pub(crate) fn report_cycle<'a, D: DepKind>( }); } - let alias = if stack - .iter() - .all(|entry| matches!(entry.query.def_kind, Some(DefKind::TyAlias { .. }))) + let alias = if stack.iter().all(|entry| matches!(entry.query.def_kind, Some(DefKind::TyAlias))) { Some(crate::error::Alias::Ty) } else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) { @@ -654,8 +640,10 @@ pub fn print_query_stack<Qcx: QueryContext>( if let Some(ref mut file) = file { let _ = writeln!( file, - "#{} [{:?}] {}", - count_total, query_info.query.dep_kind, query_info.query.description + "#{} [{}] {}", + count_total, + qcx.dep_context().dep_kind_info(query_info.query.dep_kind).name, + query_info.query.description ); } diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index f7619d75b..05dee9f12 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -28,27 +28,27 @@ use thin_vec::ThinVec; /// /// This is mostly used in case of cycles for error reporting. #[derive(Clone, Debug)] -pub struct QueryStackFrame<D: DepKind> { +pub struct QueryStackFrame { pub description: String, span: Option<Span>, pub def_id: Option<DefId>, pub def_kind: Option<DefKind>, pub ty_adt_id: Option<DefId>, - pub dep_kind: D, + pub dep_kind: DepKind, /// This hash is used to deterministically pick /// a query to remove cycles in the parallel compiler. #[cfg(parallel_compiler)] hash: Hash64, } -impl<D: DepKind> QueryStackFrame<D> { +impl QueryStackFrame { #[inline] pub fn new( description: String, span: Option<Span>, def_id: Option<DefId>, def_kind: Option<DefKind>, - dep_kind: D, + dep_kind: DepKind, ty_adt_id: Option<DefId>, _hash: impl FnOnce() -> Hash64, ) -> Self { @@ -106,7 +106,7 @@ pub trait QueryContext: HasDepContext { /// Get the query information from the TLS context. fn current_query_job(self) -> Option<QueryJobId>; - fn try_collect_active_jobs(self) -> Option<QueryMap<Self::DepKind>>; + fn try_collect_active_jobs(self) -> Option<QueryMap>; /// Load side effects associated to the node in the previous session. fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects; diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 4adb4eb74..ae8414ebb 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -2,8 +2,8 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams}; -use crate::dep_graph::{DepGraphData, HasDepContext}; +use crate::dep_graph::DepGraphData; +use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams}; use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; #[cfg(parallel_compiler)] @@ -14,10 +14,11 @@ use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; use crate::HandleCycleError; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sharded::Sharded; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::sync::Lock; #[cfg(parallel_compiler)] -use rustc_data_structures::{cold_path, sharded::Sharded}; +use rustc_data_structures::{outline, sync}; use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError}; use rustc_span::{Span, DUMMY_SP}; use std::cell::Cell; @@ -29,68 +30,40 @@ use thin_vec::ThinVec; use super::QueryConfig; -pub struct QueryState<K, D: DepKind> { - #[cfg(parallel_compiler)] - active: Sharded<FxHashMap<K, QueryResult<D>>>, - #[cfg(not(parallel_compiler))] - active: Lock<FxHashMap<K, QueryResult<D>>>, +pub struct QueryState<K> { + active: Sharded<FxHashMap<K, QueryResult>>, } /// Indicates the state of a query for a given key in a query map. -enum QueryResult<D: DepKind> { +enum QueryResult { /// An already executing query. The query job can be used to await for its completion. - Started(QueryJob<D>), + Started(QueryJob), /// The query panicked. Queries trying to wait on this will raise a fatal error which will /// silently panic. Poisoned, } -impl<K, D> QueryState<K, D> +impl<K> QueryState<K> where K: Eq + Hash + Copy + Debug, - D: DepKind, { pub fn all_inactive(&self) -> bool { - #[cfg(parallel_compiler)] - { - let shards = self.active.lock_shards(); - shards.iter().all(|shard| shard.is_empty()) - } - #[cfg(not(parallel_compiler))] - { - self.active.lock().is_empty() - } + self.active.lock_shards().all(|shard| shard.is_empty()) } pub fn try_collect_active_jobs<Qcx: Copy>( &self, qcx: Qcx, - make_query: fn(Qcx, K) -> QueryStackFrame<D>, - jobs: &mut QueryMap<D>, + make_query: fn(Qcx, K) -> QueryStackFrame, + jobs: &mut QueryMap, ) -> Option<()> { let mut active = Vec::new(); - #[cfg(parallel_compiler)] - { - // We use try_lock_shards here since we are called from the - // deadlock handler, and this shouldn't be locked. - let shards = self.active.try_lock_shards()?; - for shard in shards.iter() { - for (k, v) in shard.iter() { - if let QueryResult::Started(ref job) = *v { - active.push((*k, job.clone())); - } - } - } - } - #[cfg(not(parallel_compiler))] - { - // We use try_lock here since we are called from the - // deadlock handler, and this shouldn't be locked. - // (FIXME: Is this relevant for non-parallel compilers? It doesn't - // really hurt much.) - for (k, v) in self.active.try_lock()?.iter() { + // We use try_lock_shards here since we are called from the + // deadlock handler, and this shouldn't be locked. + for shard in self.active.try_lock_shards() { + for (k, v) in shard?.iter() { if let QueryResult::Started(ref job) = *v { active.push((*k, job.clone())); } @@ -108,25 +81,25 @@ where } } -impl<K, D: DepKind> Default for QueryState<K, D> { - fn default() -> QueryState<K, D> { +impl<K> Default for QueryState<K> { + fn default() -> QueryState<K> { QueryState { active: Default::default() } } } /// A type representing the responsibility to execute the job in the `job` field. /// This will poison the relevant query if dropped. -struct JobOwner<'tcx, K, D: DepKind> +struct JobOwner<'tcx, K> where K: Eq + Hash + Copy, { - state: &'tcx QueryState<K, D>, + state: &'tcx QueryState<K>, key: K, } #[cold] #[inline(never)] -fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError<Qcx::DepKind>) -> Q::Value +fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError) -> Q::Value where Q: QueryConfig<Qcx>, Qcx: QueryContext, @@ -138,7 +111,7 @@ where fn handle_cycle_error<Q, Qcx>( query: Q, qcx: Qcx, - cycle_error: &CycleError<Qcx::DepKind>, + cycle_error: &CycleError, mut error: DiagnosticBuilder<'_, ErrorGuaranteed>, ) -> Q::Value where @@ -148,8 +121,8 @@ where use HandleCycleError::*; match query.handle_cycle_error() { Error => { - error.emit(); - query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle) + let guar = error.emit(); + query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle, guar) } Fatal => { error.emit(); @@ -157,13 +130,13 @@ where unreachable!() } DelayBug => { - error.delay_as_bug(); - query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle) + let guar = error.delay_as_bug(); + query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle, guar) } } } -impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D> +impl<'tcx, K> JobOwner<'tcx, K> where K: Eq + Hash + Copy, { @@ -184,10 +157,7 @@ where cache.complete(key, result, dep_node_index); let job = { - #[cfg(parallel_compiler)] - let mut lock = state.active.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut lock = state.active.lock(); + let mut lock = state.active.lock_shard_by_value(&key); match lock.remove(&key).unwrap() { QueryResult::Started(job) => job, QueryResult::Poisoned => panic!(), @@ -198,10 +168,9 @@ where } } -impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D> +impl<'tcx, K> Drop for JobOwner<'tcx, K> where K: Eq + Hash + Copy, - D: DepKind, { #[inline(never)] #[cold] @@ -209,10 +178,7 @@ where // Poison the query so jobs waiting on it panic. let state = self.state; let job = { - #[cfg(parallel_compiler)] - let mut shard = state.active.get_shard_by_value(&self.key).lock(); - #[cfg(not(parallel_compiler))] - let mut shard = state.active.lock(); + let mut shard = state.active.lock_shard_by_value(&self.key); let job = match shard.remove(&self.key).unwrap() { QueryResult::Started(job) => job, QueryResult::Poisoned => panic!(), @@ -227,10 +193,10 @@ where } #[derive(Clone)] -pub(crate) struct CycleError<D: DepKind> { +pub(crate) struct CycleError { /// The query and related span that uses the cycle. - pub usage: Option<(Span, QueryStackFrame<D>)>, - pub cycle: Vec<QueryInfo<D>>, + pub usage: Option<(Span, QueryStackFrame)>, + pub cycle: Vec<QueryInfo>, } /// Checks if the query is already computed and in the cache. @@ -255,7 +221,6 @@ where #[cold] #[inline(never)] -#[cfg(not(parallel_compiler))] fn cycle_error<Q, Qcx>( query: Q, qcx: Qcx, @@ -281,7 +246,7 @@ fn wait_for_query<Q, Qcx>( qcx: Qcx, span: Span, key: Q::Key, - latch: QueryLatch<Qcx::DepKind>, + latch: QueryLatch, current: Option<QueryJobId>, ) -> (Q::Value, Option<DepNodeIndex>) where @@ -300,7 +265,18 @@ where match result { Ok(()) => { let Some((v, index)) = query.query_cache(qcx).lookup(&key) else { - cold_path(|| panic!("value must be in cache after waiting")) + outline(|| { + // We didn't find the query result in the query cache. Check if it was + // poisoned due to a panic instead. + let lock = query.query_state(qcx).active.get_shard_by_value(&key).lock(); + match lock.get(&key) { + // The query we waited on panicked. Continue unwinding here. + Some(QueryResult::Poisoned) => FatalError.raise(), + _ => panic!( + "query result must in the cache or the query must be poisoned after a wait" + ), + } + }) }; qcx.dep_context().profiler().query_cache_hit(index.into()); @@ -318,17 +294,14 @@ fn try_execute_query<Q, Qcx, const INCR: bool>( qcx: Qcx, span: Span, key: Q::Key, - dep_node: Option<DepNode<Qcx::DepKind>>, + dep_node: Option<DepNode>, ) -> (Q::Value, Option<DepNodeIndex>) where Q: QueryConfig<Qcx>, Qcx: QueryContext, { let state = query.query_state(qcx); - #[cfg(parallel_compiler)] - let mut state_lock = state.active.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut state_lock = state.active.lock(); + let mut state_lock = state.active.lock_shard_by_value(&key); // For the parallel compiler we need to check both the query cache and query state structures // while holding the state lock to ensure that 1) the query has not yet completed and 2) the @@ -360,8 +333,18 @@ where } Entry::Occupied(mut entry) => { match entry.get_mut() { - #[cfg(not(parallel_compiler))] QueryResult::Started(job) => { + #[cfg(parallel_compiler)] + if sync::is_dyn_thread_safe() { + // Get the latch out + let latch = job.latch(); + drop(state_lock); + + // Only call `wait_for_query` if we're using a Rayon thread pool + // as it will attempt to mark the worker thread as blocked. + return wait_for_query(query, qcx, span, key, latch, current_job_id); + } + let id = job.id; drop(state_lock); @@ -369,14 +352,6 @@ where // so we just return the error. cycle_error(query, qcx, id, span) } - #[cfg(parallel_compiler)] - QueryResult::Started(job) => { - // Get the latch out - let latch = job.latch(); - drop(state_lock); - - wait_for_query(query, qcx, span, key, latch, current_job_id) - } QueryResult::Poisoned => FatalError.raise(), } } @@ -387,10 +362,10 @@ where fn execute_job<Q, Qcx, const INCR: bool>( query: Q, qcx: Qcx, - state: &QueryState<Q::Key, Qcx::DepKind>, + state: &QueryState<Q::Key>, key: Q::Key, id: QueryJobId, - dep_node: Option<DepNode<Qcx::DepKind>>, + dep_node: Option<DepNode>, ) -> (Q::Value, Option<DepNodeIndex>) where Q: QueryConfig<Qcx>, @@ -497,9 +472,9 @@ where fn execute_job_incr<Q, Qcx>( query: Q, qcx: Qcx, - dep_graph_data: &DepGraphData<Qcx::DepKind>, + dep_graph_data: &DepGraphData<Qcx::Deps>, key: Q::Key, - mut dep_node_opt: Option<DepNode<Qcx::DepKind>>, + mut dep_node_opt: Option<DepNode>, job_id: QueryJobId, ) -> (Q::Value, DepNodeIndex) where @@ -563,10 +538,10 @@ where #[inline(always)] fn try_load_from_disk_and_cache_in_memory<Q, Qcx>( query: Q, - dep_graph_data: &DepGraphData<Qcx::DepKind>, + dep_graph_data: &DepGraphData<Qcx::Deps>, qcx: Qcx, key: &Q::Key, - dep_node: &DepNode<Qcx::DepKind>, + dep_node: &DepNode, ) -> Option<(Q::Value, DepNodeIndex)> where Q: QueryConfig<Qcx>, @@ -660,7 +635,7 @@ where #[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")] pub(crate) fn incremental_verify_ich<Tcx, V>( tcx: Tcx, - dep_graph_data: &DepGraphData<Tcx::DepKind>, + dep_graph_data: &DepGraphData<Tcx::Deps>, result: &V, prev_index: SerializedDepNodeIndex, hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>, @@ -753,7 +728,7 @@ fn ensure_must_run<Q, Qcx>( qcx: Qcx, key: &Q::Key, check_cache: bool, -) -> (bool, Option<DepNode<Qcx::DepKind>>) +) -> (bool, Option<DepNode>) where Q: QueryConfig<Qcx>, Qcx: QueryContext, @@ -844,12 +819,8 @@ where Some(result) } -pub fn force_query<Q, Qcx>( - query: Q, - qcx: Qcx, - key: Q::Key, - dep_node: DepNode<<Qcx as HasDepContext>::DepKind>, -) where +pub fn force_query<Q, Qcx>(query: Q, qcx: Qcx, key: Q::Key, dep_node: DepNode) +where Q: QueryConfig<Qcx>, Qcx: QueryContext, { |