summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_data_structures/src/sync/parallel.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_data_structures/src/sync/parallel.rs')
-rw-r--r--compiler/rustc_data_structures/src/sync/parallel.rs67
1 files changed, 54 insertions, 13 deletions
diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs
index 1944ddfb7..7783de57f 100644
--- a/compiler/rustc_data_structures/src/sync/parallel.rs
+++ b/compiler/rustc_data_structures/src/sync/parallel.rs
@@ -3,6 +3,8 @@
#![allow(dead_code)]
+use crate::sync::IntoDynSyncSend;
+use crate::FatalErrorMarker;
use parking_lot::Mutex;
use std::any::Any;
use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
@@ -18,14 +20,17 @@ pub use enabled::*;
/// continuing with unwinding. It's also used for the non-parallel code to ensure error message
/// output match the parallel compiler for testing purposes.
pub struct ParallelGuard {
- panic: Mutex<Option<Box<dyn Any + Send + 'static>>>,
+ panic: Mutex<Option<IntoDynSyncSend<Box<dyn Any + Send + 'static>>>>,
}
impl ParallelGuard {
pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
catch_unwind(AssertUnwindSafe(f))
.map_err(|err| {
- *self.panic.lock() = Some(err);
+ let mut panic = self.panic.lock();
+ if panic.is_none() || !(*err).is::<FatalErrorMarker>() {
+ *panic = Some(IntoDynSyncSend(err));
+ }
})
.ok()
}
@@ -37,7 +42,7 @@ impl ParallelGuard {
pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
let guard = ParallelGuard { panic: Mutex::new(None) };
let ret = f(&guard);
- if let Some(panic) = guard.panic.into_inner() {
+ if let Some(IntoDynSyncSend(panic)) = guard.panic.into_inner() {
resume_unwind(panic);
}
ret
@@ -77,6 +82,15 @@ mod disabled {
})
}
+ pub fn try_par_for_each_in<T: IntoIterator, E>(
+ t: T,
+ mut for_each: impl FnMut(T::Item) -> Result<(), E>,
+ ) -> Result<(), E> {
+ parallel_guard(|guard| {
+ t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and)
+ })
+ }
+
pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
t: T,
mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
@@ -97,14 +111,20 @@ mod enabled {
parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
};
(impl $fblock:block [$($blocks:expr,)*] []) => {
- ::rustc_data_structures::sync::scope(|s| {
- $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
- s.spawn(move |_| block.into_inner()());)*
- (|| $fblock)();
+ $crate::sync::parallel_guard(|guard| {
+ $crate::sync::scope(|s| {
+ $(
+ let block = $crate::sync::FromDyn::from(|| $blocks);
+ s.spawn(move |_| {
+ guard.run(move || block.into_inner()());
+ });
+ )*
+ guard.run(|| $fblock);
+ });
});
};
($fblock:block, $($blocks:block),*) => {
- if rustc_data_structures::sync::is_dyn_thread_safe() {
+ if $crate::sync::is_dyn_thread_safe() {
// Reverse the order of the later blocks since Rayon executes them in reverse order
// when using a single thread. This ensures the execution order matches that
// of a single threaded rustc.
@@ -137,11 +157,13 @@ mod enabled {
if mode::is_dyn_thread_safe() {
let oper_a = FromDyn::from(oper_a);
let oper_b = FromDyn::from(oper_b);
- let (a, b) = rayon::join(
- move || FromDyn::from(oper_a.into_inner()()),
- move || FromDyn::from(oper_b.into_inner()()),
- );
- (a.into_inner(), b.into_inner())
+ let (a, b) = parallel_guard(|guard| {
+ rayon::join(
+ move || guard.run(move || FromDyn::from(oper_a.into_inner()())),
+ move || guard.run(move || FromDyn::from(oper_b.into_inner()())),
+ )
+ });
+ (a.unwrap().into_inner(), b.unwrap().into_inner())
} else {
super::disabled::join(oper_a, oper_b)
}
@@ -167,6 +189,25 @@ mod enabled {
});
}
+ pub fn try_par_for_each_in<
+ T: IntoIterator + IntoParallelIterator<Item = <T as IntoIterator>::Item>,
+ E: Send,
+ >(
+ t: T,
+ for_each: impl Fn(<T as IntoIterator>::Item) -> Result<(), E> + DynSync + DynSend,
+ ) -> Result<(), E> {
+ parallel_guard(|guard| {
+ if mode::is_dyn_thread_safe() {
+ let for_each = FromDyn::from(for_each);
+ t.into_par_iter()
+ .filter_map(|i| guard.run(|| for_each(i)))
+ .reduce(|| Ok(()), Result::and)
+ } else {
+ t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and)
+ }
+ })
+ }
+
pub fn par_map<
I,
T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,