summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_data_structures/src/sync.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_data_structures/src/sync.rs')
-rw-r--r--compiler/rustc_data_structures/src/sync.rs338
1 files changed, 24 insertions, 314 deletions
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index 25a082373..cca043ba0 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -26,7 +26,8 @@
//! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` |
//! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` |
//! | | | |
-//! | `Lock<T>` | `RefCell<T>` | `parking_lot::Mutex<T>` |
+//! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or |
+//! | | | `parking_lot::Mutex<T>` |
//! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` |
//! | `MTLock<T>` [^1] | `T` | `Lock<T>` |
//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` |
@@ -43,11 +44,18 @@ pub use crate::marker::*;
use std::collections::HashMap;
use std::hash::{BuildHasher, Hash};
use std::ops::{Deref, DerefMut};
-use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
+
+mod lock;
+pub use lock::{Lock, LockGuard, Mode};
mod worker_local;
pub use worker_local::{Registry, WorkerLocal};
+mod parallel;
+#[cfg(parallel_compiler)]
+pub use parallel::scope;
+pub use parallel::{join, par_for_each_in, par_map, parallel_guard};
+
pub use std::sync::atomic::Ordering;
pub use std::sync::atomic::Ordering::SeqCst;
@@ -55,6 +63,9 @@ pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
mod vec;
+mod freeze;
+pub use freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard};
+
mod mode {
use super::Ordering;
use std::sync::atomic::AtomicU8;
@@ -75,6 +86,12 @@ mod mode {
}
}
+ // Whether thread safety might be enabled.
+ #[inline]
+ pub fn might_be_dyn_thread_safe() -> bool {
+ DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE
+ }
+
// Only set by the `-Z threads` compile option
pub fn set_dyn_thread_safe_mode(mode: bool) {
let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE };
@@ -94,14 +111,15 @@ pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
cfg_if! {
if #[cfg(not(parallel_compiler))] {
+ use std::ops::Add;
+ use std::cell::Cell;
+
pub unsafe auto trait Send {}
pub unsafe auto trait Sync {}
unsafe impl<T> Send for T {}
unsafe impl<T> Sync for T {}
- use std::ops::Add;
-
/// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
/// It has explicit ordering arguments and is only intended for use with
/// the native atomic types.
@@ -182,88 +200,17 @@ cfg_if! {
pub type AtomicU32 = Atomic<u32>;
pub type AtomicU64 = Atomic<u64>;
- pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
- where A: FnOnce() -> RA,
- B: FnOnce() -> RB
- {
- (oper_a(), oper_b())
- }
-
- #[macro_export]
- macro_rules! parallel {
- ($($blocks:block),*) => {
- // We catch panics here ensuring that all the blocks execute.
- // This makes behavior consistent with the parallel compiler.
- let mut panic = None;
- $(
- if let Err(p) = ::std::panic::catch_unwind(
- ::std::panic::AssertUnwindSafe(|| $blocks)
- ) {
- if panic.is_none() {
- panic = Some(p);
- }
- }
- )*
- if let Some(panic) = panic {
- ::std::panic::resume_unwind(panic);
- }
- }
- }
-
- pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
- // We catch panics here ensuring that all the loop iterations execute.
- // This makes behavior consistent with the parallel compiler.
- let mut panic = None;
- t.into_iter().for_each(|i| {
- if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
- if panic.is_none() {
- panic = Some(p);
- }
- }
- });
- if let Some(panic) = panic {
- resume_unwind(panic);
- }
- }
-
- pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
- t: T,
- mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
- ) -> C {
- // We catch panics here ensuring that all the loop iterations execute.
- let mut panic = None;
- let r = t.into_iter().filter_map(|i| {
- match catch_unwind(AssertUnwindSafe(|| map(i))) {
- Ok(r) => Some(r),
- Err(p) => {
- if panic.is_none() {
- panic = Some(p);
- }
- None
- }
- }
- }).collect();
- if let Some(panic) = panic {
- resume_unwind(panic);
- }
- r
- }
-
pub use std::rc::Rc as Lrc;
pub use std::rc::Weak as Weak;
pub use std::cell::Ref as ReadGuard;
pub use std::cell::Ref as MappedReadGuard;
pub use std::cell::RefMut as WriteGuard;
pub use std::cell::RefMut as MappedWriteGuard;
- pub use std::cell::RefMut as LockGuard;
pub use std::cell::RefMut as MappedLockGuard;
- pub use std::cell::OnceCell;
+ pub use std::cell::OnceCell as OnceLock;
use std::cell::RefCell as InnerRwLock;
- use std::cell::RefCell as InnerLock;
-
- use std::cell::Cell;
pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
@@ -313,10 +260,9 @@ cfg_if! {
pub use parking_lot::RwLockWriteGuard as WriteGuard;
pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
- pub use parking_lot::MutexGuard as LockGuard;
pub use parking_lot::MappedMutexGuard as MappedLockGuard;
- pub use std::sync::OnceLock as OnceCell;
+ pub use std::sync::OnceLock;
pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
@@ -355,171 +301,10 @@ cfg_if! {
}
}
- use parking_lot::Mutex as InnerLock;
use parking_lot::RwLock as InnerRwLock;
use std::thread;
- #[inline]
- pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
- where
- A: FnOnce() -> RA + DynSend,
- B: FnOnce() -> RB + DynSend,
- {
- if mode::is_dyn_thread_safe() {
- let oper_a = FromDyn::from(oper_a);
- let oper_b = FromDyn::from(oper_b);
- let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()()));
- (a.into_inner(), b.into_inner())
- } else {
- (oper_a(), oper_b())
- }
- }
-
- // This function only works when `mode::is_dyn_thread_safe()`.
- pub fn scope<'scope, OP, R>(op: OP) -> R
- where
- OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
- R: DynSend,
- {
- let op = FromDyn::from(op);
- rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
- }
-
- /// Runs a list of blocks in parallel. The first block is executed immediately on
- /// the current thread. Use that for the longest running block.
- #[macro_export]
- macro_rules! parallel {
- (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
- parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
- };
- (impl $fblock:block [$($blocks:expr,)*] []) => {
- ::rustc_data_structures::sync::scope(|s| {
- $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
- s.spawn(move |_| block.into_inner()());)*
- (|| $fblock)();
- });
- };
- ($fblock:block, $($blocks:block),*) => {
- if rustc_data_structures::sync::is_dyn_thread_safe() {
- // Reverse the order of the later blocks since Rayon executes them in reverse order
- // when using a single thread. This ensures the execution order matches that
- // of a single threaded rustc.
- parallel!(impl $fblock [] [$($blocks),*]);
- } else {
- // We catch panics here ensuring that all the blocks execute.
- // This makes behavior consistent with the parallel compiler.
- let mut panic = None;
- if let Err(p) = ::std::panic::catch_unwind(
- ::std::panic::AssertUnwindSafe(|| $fblock)
- ) {
- if panic.is_none() {
- panic = Some(p);
- }
- }
- $(
- if let Err(p) = ::std::panic::catch_unwind(
- ::std::panic::AssertUnwindSafe(|| $blocks)
- ) {
- if panic.is_none() {
- panic = Some(p);
- }
- }
- )*
- if let Some(panic) = panic {
- ::std::panic::resume_unwind(panic);
- }
- }
- };
- }
-
- use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
-
- pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
- t: T,
- for_each: impl Fn(I) + DynSync + DynSend
- ) {
- if mode::is_dyn_thread_safe() {
- let for_each = FromDyn::from(for_each);
- let panic: Lock<Option<_>> = Lock::new(None);
- t.into_par_iter().for_each(|i| if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
- let mut l = panic.lock();
- if l.is_none() {
- *l = Some(p)
- }
- });
-
- if let Some(panic) = panic.into_inner() {
- resume_unwind(panic);
- }
- } else {
- // We catch panics here ensuring that all the loop iterations execute.
- // This makes behavior consistent with the parallel compiler.
- let mut panic = None;
- t.into_iter().for_each(|i| {
- if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
- if panic.is_none() {
- panic = Some(p);
- }
- }
- });
- if let Some(panic) = panic {
- resume_unwind(panic);
- }
- }
- }
-
- pub fn par_map<
- I,
- T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
- R: std::marker::Send,
- C: FromIterator<R> + FromParallelIterator<R>
- >(
- t: T,
- map: impl Fn(I) -> R + DynSync + DynSend
- ) -> C {
- if mode::is_dyn_thread_safe() {
- let panic: Lock<Option<_>> = Lock::new(None);
- let map = FromDyn::from(map);
- // We catch panics here ensuring that all the loop iterations execute.
- let r = t.into_par_iter().filter_map(|i| {
- match catch_unwind(AssertUnwindSafe(|| map(i))) {
- Ok(r) => Some(r),
- Err(p) => {
- let mut l = panic.lock();
- if l.is_none() {
- *l = Some(p);
- }
- None
- },
- }
- }).collect();
-
- if let Some(panic) = panic.into_inner() {
- resume_unwind(panic);
- }
- r
- } else {
- // We catch panics here ensuring that all the loop iterations execute.
- let mut panic = None;
- let r = t.into_iter().filter_map(|i| {
- match catch_unwind(AssertUnwindSafe(|| map(i))) {
- Ok(r) => Some(r),
- Err(p) => {
- if panic.is_none() {
- panic = Some(p);
- }
- None
- }
- }
- }).collect();
- if let Some(panic) = panic {
- resume_unwind(panic);
- }
- r
- }
- }
-
/// This makes locks panic if they are already held.
/// It is only useful when you are running in a single thread
const ERROR_CHECKING: bool = false;
@@ -542,81 +327,6 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S>
}
}
-#[derive(Debug)]
-pub struct Lock<T>(InnerLock<T>);
-
-impl<T> Lock<T> {
- #[inline(always)]
- pub fn new(inner: T) -> Self {
- Lock(InnerLock::new(inner))
- }
-
- #[inline(always)]
- pub fn into_inner(self) -> T {
- self.0.into_inner()
- }
-
- #[inline(always)]
- pub fn get_mut(&mut self) -> &mut T {
- self.0.get_mut()
- }
-
- #[cfg(parallel_compiler)]
- #[inline(always)]
- pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
- self.0.try_lock()
- }
-
- #[cfg(not(parallel_compiler))]
- #[inline(always)]
- pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
- self.0.try_borrow_mut().ok()
- }
-
- #[cfg(parallel_compiler)]
- #[inline(always)]
- #[track_caller]
- pub fn lock(&self) -> LockGuard<'_, T> {
- if ERROR_CHECKING {
- self.0.try_lock().expect("lock was already held")
- } else {
- self.0.lock()
- }
- }
-
- #[cfg(not(parallel_compiler))]
- #[inline(always)]
- #[track_caller]
- pub fn lock(&self) -> LockGuard<'_, T> {
- self.0.borrow_mut()
- }
-
- #[inline(always)]
- #[track_caller]
- pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
- f(&mut *self.lock())
- }
-
- #[inline(always)]
- #[track_caller]
- pub fn borrow(&self) -> LockGuard<'_, T> {
- self.lock()
- }
-
- #[inline(always)]
- #[track_caller]
- pub fn borrow_mut(&self) -> LockGuard<'_, T> {
- self.lock()
- }
-}
-
-impl<T: Default> Default for Lock<T> {
- #[inline]
- fn default() -> Self {
- Lock::new(T::default())
- }
-}
-
#[derive(Debug, Default)]
pub struct RwLock<T>(InnerRwLock<T>);