diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:35 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:35 +0000 |
commit | d1b2d29528b7794b41e66fc2136e395a02f8529b (patch) | |
tree | a4a17504b260206dec3cf55b2dca82929a348ac2 /compiler/rustc_query_system/src/query | |
parent | Releasing progress-linux version 1.72.1+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-d1b2d29528b7794b41e66fc2136e395a02f8529b.tar.xz rustc-d1b2d29528b7794b41e66fc2136e395a02f8529b.zip |
Merging upstream version 1.73.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_query_system/src/query')
-rw-r--r-- | compiler/rustc_query_system/src/query/caches.rs | 56 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/job.rs | 74 |
2 files changed, 56 insertions, 74 deletions
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs index 9a09f516e..4ba9d53a9 100644 --- a/compiler/rustc_query_system/src/query/caches.rs +++ b/compiler/rustc_query_system/src/query/caches.rs @@ -1,9 +1,7 @@ use crate::dep_graph::DepNodeIndex; use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::sharded; -#[cfg(parallel_compiler)] -use rustc_data_structures::sharded::Sharded; +use rustc_data_structures::sharded::{self, Sharded}; use rustc_data_structures::sync::Lock; use rustc_index::{Idx, IndexVec}; use std::fmt::Debug; @@ -37,10 +35,7 @@ impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelecto } pub struct DefaultCache<K, V> { - #[cfg(parallel_compiler)] cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>, - #[cfg(not(parallel_compiler))] - cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>, } impl<K, V> Default for DefaultCache<K, V> { @@ -60,10 +55,7 @@ where #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { let key_hash = sharded::make_hash(key); - #[cfg(parallel_compiler)] let lock = self.cache.get_shard_by_hash(key_hash).lock(); - #[cfg(not(parallel_compiler))] - let lock = self.cache.lock(); let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key); if let Some((_, value)) = result { Some(*value) } else { None } @@ -71,29 +63,16 @@ where #[inline] fn complete(&self, key: K, value: V, index: DepNodeIndex) { - #[cfg(parallel_compiler)] let mut lock = self.cache.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut lock = self.cache.lock(); // We may be overwriting another value. This is all right, since the dep-graph // will check that the fingerprint matches. lock.insert(key, (value, index)); } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - #[cfg(parallel_compiler)] - { - let shards = self.cache.lock_shards(); - for shard in shards.iter() { - for (k, v) in shard.iter() { - f(k, &v.0, v.1); - } - } - } - #[cfg(not(parallel_compiler))] - { - let map = self.cache.lock(); - for (k, v) in map.iter() { + let shards = self.cache.lock_shards(); + for shard in shards.iter() { + for (k, v) in shard.iter() { f(k, &v.0, v.1); } } @@ -151,10 +130,7 @@ impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector<K> { } pub struct VecCache<K: Idx, V> { - #[cfg(parallel_compiler)] cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>, - #[cfg(not(parallel_compiler))] - cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>, } impl<K: Idx, V> Default for VecCache<K, V> { @@ -173,38 +149,20 @@ where #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { - #[cfg(parallel_compiler)] let lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); - #[cfg(not(parallel_compiler))] - let lock = self.cache.lock(); if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None } } #[inline] fn complete(&self, key: K, value: V, index: DepNodeIndex) { - #[cfg(parallel_compiler)] let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); - #[cfg(not(parallel_compiler))] - let mut lock = self.cache.lock(); lock.insert(key, (value, index)); } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - #[cfg(parallel_compiler)] - { - let shards = self.cache.lock_shards(); - for shard in shards.iter() { - for (k, v) in shard.iter_enumerated() { - if let Some(v) = v { - f(&k, &v.0, v.1); - } - } - } - } - #[cfg(not(parallel_compiler))] - { - let map = self.cache.lock(); - for (k, v) in map.iter_enumerated() { + let shards = self.cache.lock_shards(); + for shard in shards.iter() { + for (k, v) in shard.iter_enumerated() { if let Some(v) = v { f(&k, &v.0, v.1); } diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index f45f7ca5d..1b1248924 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -13,6 +13,7 @@ use rustc_session::Session; use rustc_span::Span; use std::hash::Hash; +use std::io::Write; use std::num::NonZeroU64; #[cfg(parallel_compiler)] @@ -20,12 +21,11 @@ use { parking_lot::{Condvar, Mutex}, rayon_core, rustc_data_structures::fx::FxHashSet, - rustc_data_structures::sync::Lock, - rustc_data_structures::sync::Lrc, rustc_data_structures::{defer, jobserver}, rustc_span::DUMMY_SP, std::iter, std::process, + std::sync::Arc, }; /// Represents a span and a query key. @@ -190,7 +190,7 @@ struct QueryWaiter<D: DepKind> { query: Option<QueryJobId>, condvar: Condvar, span: Span, - cycle: Lock<Option<CycleError<D>>>, + cycle: Mutex<Option<CycleError<D>>>, } #[cfg(parallel_compiler)] @@ -204,20 +204,20 @@ impl<D: DepKind> QueryWaiter<D> { #[cfg(parallel_compiler)] struct QueryLatchInfo<D: DepKind> { complete: bool, - waiters: Vec<Lrc<QueryWaiter<D>>>, + waiters: Vec<Arc<QueryWaiter<D>>>, } #[cfg(parallel_compiler)] #[derive(Clone)] pub(super) struct QueryLatch<D: DepKind> { - info: Lrc<Mutex<QueryLatchInfo<D>>>, + info: Arc<Mutex<QueryLatchInfo<D>>>, } #[cfg(parallel_compiler)] impl<D: DepKind> QueryLatch<D> { fn new() -> Self { QueryLatch { - info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), + info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), } } @@ -228,11 +228,11 @@ impl<D: DepKind> QueryLatch<D> { span: Span, ) -> Result<(), CycleError<D>> { let waiter = - Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); + Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() }); self.wait_on_inner(&waiter); // FIXME: Get rid of this lock. We have ownership of the QueryWaiter - // although another thread may still have a Lrc reference so we cannot - // use Lrc::get_mut + // although another thread may still have a Arc reference so we cannot + // use Arc::get_mut let mut cycle = waiter.cycle.lock(); match cycle.take() { None => Ok(()), @@ -241,7 +241,7 @@ impl<D: DepKind> QueryLatch<D> { } /// Awaits the caller on this latch by blocking the current thread. - fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) { + fn wait_on_inner(&self, waiter: &Arc<QueryWaiter<D>>) { let mut info = self.info.lock(); if !info.complete { // We push the waiter on to the `waiters` list. It can be accessed inside @@ -275,7 +275,7 @@ impl<D: DepKind> QueryLatch<D> { /// Removes a single waiter from the list of waiters. /// This is used to break query cycles. - fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> { + fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<D>> { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters @@ -427,7 +427,7 @@ where fn remove_cycle<D: DepKind>( query_map: &QueryMap<D>, jobs: &mut Vec<QueryJobId>, - wakelist: &mut Vec<Lrc<QueryWaiter<D>>>, + wakelist: &mut Vec<Arc<QueryWaiter<D>>>, ) -> bool { let mut visited = FxHashSet::default(); let mut stack = Vec::new(); @@ -592,7 +592,10 @@ pub(crate) fn report_cycle<'a, D: DepKind>( }); } - let alias = if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TyAlias)) { + let alias = if stack + .iter() + .all(|entry| matches!(entry.query.def_kind, Some(DefKind::TyAlias { .. }))) + { Some(crate::error::Alias::Ty) } else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) { Some(crate::error::Alias::Trait) @@ -607,6 +610,7 @@ pub(crate) fn report_cycle<'a, D: DepKind>( alias, cycle_usage: cycle_usage, stack_count, + note_span: (), }; cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic) @@ -617,30 +621,50 @@ pub fn print_query_stack<Qcx: QueryContext>( mut current_query: Option<QueryJobId>, handler: &Handler, num_frames: Option<usize>, + mut file: Option<std::fs::File>, ) -> usize { // Be careful relying on global state here: this code is called from // a panic hook, which means that the global `Handler` may be in a weird // state if it was responsible for triggering the panic. - let mut i = 0; + let mut count_printed = 0; + let mut count_total = 0; let query_map = qcx.try_collect_active_jobs(); + if let Some(ref mut file) = file { + let _ = writeln!(file, "\n\nquery stack during panic:"); + } while let Some(query) = current_query { - if Some(i) == num_frames { - break; - } let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else { break; }; - let mut diag = Diagnostic::new( - Level::FailureNote, - format!("#{} [{:?}] {}", i, query_info.query.dep_kind, query_info.query.description), - ); - diag.span = query_info.job.span.into(); - handler.force_print_diagnostic(diag); + if Some(count_printed) < num_frames || num_frames.is_none() { + // Only print to stderr as many stack frames as `num_frames` when present. + let mut diag = Diagnostic::new( + Level::FailureNote, + format!( + "#{} [{:?}] {}", + count_printed, query_info.query.dep_kind, query_info.query.description + ), + ); + diag.span = query_info.job.span.into(); + handler.force_print_diagnostic(diag); + count_printed += 1; + } + + if let Some(ref mut file) = file { + let _ = writeln!( + file, + "#{} [{:?}] {}", + count_total, query_info.query.dep_kind, query_info.query.description + ); + } current_query = query_info.job.parent; - i += 1; + count_total += 1; } - i + if let Some(ref mut file) = file { + let _ = writeln!(file, "end of query stack"); + } + count_printed } |