summaryrefslogtreecommitdiff
path: root/vendor/rayon-core/tests
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/rayon-core/tests')
-rw-r--r--vendor/rayon-core/tests/double_init_fail.rs15
-rw-r--r--vendor/rayon-core/tests/init_zero_threads.rs10
-rw-r--r--vendor/rayon-core/tests/scope_join.rs45
-rw-r--r--vendor/rayon-core/tests/scoped_threadpool.rs99
-rw-r--r--vendor/rayon-core/tests/simple_panic.rs7
-rw-r--r--vendor/rayon-core/tests/stack_overflow_crash.rs97
-rw-r--r--vendor/rayon-core/tests/use_current_thread.rs57
7 files changed, 330 insertions, 0 deletions
diff --git a/vendor/rayon-core/tests/double_init_fail.rs b/vendor/rayon-core/tests/double_init_fail.rs
new file mode 100644
index 0000000..1591530
--- /dev/null
+++ b/vendor/rayon-core/tests/double_init_fail.rs
@@ -0,0 +1,15 @@
+use rayon_core::ThreadPoolBuilder;
+use std::error::Error;
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn double_init_fail() {
+ let result1 = ThreadPoolBuilder::new().build_global();
+ assert!(result1.is_ok());
+ let err = ThreadPoolBuilder::new().build_global().unwrap_err();
+ assert!(err.source().is_none());
+ assert_eq!(
+ err.to_string(),
+ "The global thread pool has already been initialized.",
+ );
+}
diff --git a/vendor/rayon-core/tests/init_zero_threads.rs b/vendor/rayon-core/tests/init_zero_threads.rs
new file mode 100644
index 0000000..3c1ad25
--- /dev/null
+++ b/vendor/rayon-core/tests/init_zero_threads.rs
@@ -0,0 +1,10 @@
+use rayon_core::ThreadPoolBuilder;
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn init_zero_threads() {
+ ThreadPoolBuilder::new()
+ .num_threads(0)
+ .build_global()
+ .unwrap();
+}
diff --git a/vendor/rayon-core/tests/scope_join.rs b/vendor/rayon-core/tests/scope_join.rs
new file mode 100644
index 0000000..9d88133
--- /dev/null
+++ b/vendor/rayon-core/tests/scope_join.rs
@@ -0,0 +1,45 @@
+/// Test that one can emulate join with `scope`:
+fn pseudo_join<F, G>(f: F, g: G)
+where
+ F: FnOnce() + Send,
+ G: FnOnce() + Send,
+{
+ rayon_core::scope(|s| {
+ s.spawn(|_| g());
+ f();
+ });
+}
+
+fn quick_sort<T: PartialOrd + Send>(v: &mut [T]) {
+ if v.len() <= 1 {
+ return;
+ }
+
+ let mid = partition(v);
+ let (lo, hi) = v.split_at_mut(mid);
+ pseudo_join(|| quick_sort(lo), || quick_sort(hi));
+}
+
+fn partition<T: PartialOrd + Send>(v: &mut [T]) -> usize {
+ let pivot = v.len() - 1;
+ let mut i = 0;
+ for j in 0..pivot {
+ if v[j] <= v[pivot] {
+ v.swap(i, j);
+ i += 1;
+ }
+ }
+ v.swap(i, pivot);
+ i
+}
+
+fn is_sorted<T: Send + Ord>(v: &[T]) -> bool {
+ (1..v.len()).all(|i| v[i - 1] <= v[i])
+}
+
+#[test]
+fn scope_join() {
+ let mut v: Vec<i32> = (0..256).rev().collect();
+ quick_sort(&mut v);
+ assert!(is_sorted(&v));
+}
diff --git a/vendor/rayon-core/tests/scoped_threadpool.rs b/vendor/rayon-core/tests/scoped_threadpool.rs
new file mode 100644
index 0000000..9321471
--- /dev/null
+++ b/vendor/rayon-core/tests/scoped_threadpool.rs
@@ -0,0 +1,99 @@
+use crossbeam_utils::thread;
+use rayon_core::ThreadPoolBuilder;
+
+#[derive(PartialEq, Eq, Debug)]
+struct Local(i32);
+
+scoped_tls::scoped_thread_local!(static LOCAL: Local);
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn missing_scoped_tls() {
+ LOCAL.set(&Local(42), || {
+ let pool = ThreadPoolBuilder::new()
+ .build()
+ .expect("thread pool created");
+
+ // `LOCAL` is not set in the pool.
+ pool.install(|| {
+ assert!(!LOCAL.is_set());
+ });
+ });
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn spawn_scoped_tls_threadpool() {
+ LOCAL.set(&Local(42), || {
+ LOCAL.with(|x| {
+ thread::scope(|scope| {
+ let pool = ThreadPoolBuilder::new()
+ .spawn_handler(move |thread| {
+ scope
+ .builder()
+ .spawn(move |_| {
+ // Borrow the same local value in the thread pool.
+ LOCAL.set(x, || thread.run())
+ })
+ .map(|_| ())
+ })
+ .build()
+ .expect("thread pool created");
+
+ // The pool matches our local value.
+ pool.install(|| {
+ assert!(LOCAL.is_set());
+ LOCAL.with(|y| {
+ assert_eq!(x, y);
+ });
+ });
+
+ // If we change our local value, the pool is not affected.
+ LOCAL.set(&Local(-1), || {
+ pool.install(|| {
+ assert!(LOCAL.is_set());
+ LOCAL.with(|y| {
+ assert_eq!(x, y);
+ });
+ });
+ });
+ })
+ .expect("scope threads ok");
+ // `thread::scope` will wait for the threads to exit before returning.
+ });
+ });
+}
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn build_scoped_tls_threadpool() {
+ LOCAL.set(&Local(42), || {
+ LOCAL.with(|x| {
+ ThreadPoolBuilder::new()
+ .build_scoped(
+ move |thread| LOCAL.set(x, || thread.run()),
+ |pool| {
+ // The pool matches our local value.
+ pool.install(|| {
+ assert!(LOCAL.is_set());
+ LOCAL.with(|y| {
+ assert_eq!(x, y);
+ });
+ });
+
+ // If we change our local value, the pool is not affected.
+ LOCAL.set(&Local(-1), || {
+ pool.install(|| {
+ assert!(LOCAL.is_set());
+ LOCAL.with(|y| {
+ assert_eq!(x, y);
+ });
+ });
+ });
+ },
+ )
+ .expect("thread pool created");
+ // Internally, `std::thread::scope` will wait for the threads to exit before returning.
+ });
+ });
+}
diff --git a/vendor/rayon-core/tests/simple_panic.rs b/vendor/rayon-core/tests/simple_panic.rs
new file mode 100644
index 0000000..2564482
--- /dev/null
+++ b/vendor/rayon-core/tests/simple_panic.rs
@@ -0,0 +1,7 @@
+use rayon_core::join;
+
+#[test]
+#[should_panic(expected = "should panic")]
+fn simple_panic() {
+ join(|| {}, || panic!("should panic"));
+}
diff --git a/vendor/rayon-core/tests/stack_overflow_crash.rs b/vendor/rayon-core/tests/stack_overflow_crash.rs
new file mode 100644
index 0000000..7dcde43
--- /dev/null
+++ b/vendor/rayon-core/tests/stack_overflow_crash.rs
@@ -0,0 +1,97 @@
+use rayon_core::ThreadPoolBuilder;
+
+use std::env;
+use std::process::{Command, ExitStatus, Stdio};
+
+#[cfg(target_os = "linux")]
+use std::os::unix::process::ExitStatusExt;
+
+fn force_stack_overflow(depth: u32) {
+ let mut buffer = [0u8; 1024 * 1024];
+ std::hint::black_box(&mut buffer);
+ if depth > 0 {
+ force_stack_overflow(depth - 1);
+ }
+}
+
+#[cfg(unix)]
+fn disable_core() {
+ unsafe {
+ libc::setrlimit(
+ libc::RLIMIT_CORE,
+ &libc::rlimit {
+ rlim_cur: 0,
+ rlim_max: 0,
+ },
+ );
+ }
+}
+
+#[cfg(unix)]
+fn overflow_code() -> Option<i32> {
+ None
+}
+
+#[cfg(windows)]
+fn overflow_code() -> Option<i32> {
+ use std::os::windows::process::ExitStatusExt;
+
+ ExitStatus::from_raw(0xc00000fd /*STATUS_STACK_OVERFLOW*/).code()
+}
+
+#[test]
+#[cfg_attr(not(any(unix, windows)), ignore)]
+fn stack_overflow_crash() {
+ // First check that the recursive call actually causes a stack overflow,
+ // and does not get optimized away.
+ let status = run_ignored("run_with_small_stack");
+ assert!(!status.success());
+ #[cfg(any(unix, windows))]
+ assert_eq!(status.code(), overflow_code());
+ #[cfg(target_os = "linux")]
+ assert!(matches!(
+ status.signal(),
+ Some(libc::SIGABRT | libc::SIGSEGV)
+ ));
+
+ // Now run with a larger stack and verify correct operation.
+ let status = run_ignored("run_with_large_stack");
+ assert_eq!(status.code(), Some(0));
+ #[cfg(target_os = "linux")]
+ assert_eq!(status.signal(), None);
+}
+
+fn run_ignored(test: &str) -> ExitStatus {
+ Command::new(env::current_exe().unwrap())
+ .arg("--ignored")
+ .arg("--exact")
+ .arg(test)
+ .stdout(Stdio::null())
+ .stderr(Stdio::null())
+ .status()
+ .unwrap()
+}
+
+#[test]
+#[ignore]
+fn run_with_small_stack() {
+ run_with_stack(8);
+}
+
+#[test]
+#[ignore]
+fn run_with_large_stack() {
+ run_with_stack(48);
+}
+
+fn run_with_stack(stack_size_in_mb: usize) {
+ let pool = ThreadPoolBuilder::new()
+ .stack_size(stack_size_in_mb * 1024 * 1024)
+ .build()
+ .unwrap();
+ pool.install(|| {
+ #[cfg(unix)]
+ disable_core();
+ force_stack_overflow(32);
+ });
+}
diff --git a/vendor/rayon-core/tests/use_current_thread.rs b/vendor/rayon-core/tests/use_current_thread.rs
new file mode 100644
index 0000000..ec801c9
--- /dev/null
+++ b/vendor/rayon-core/tests/use_current_thread.rs
@@ -0,0 +1,57 @@
+use rayon_core::ThreadPoolBuilder;
+use std::sync::{Arc, Condvar, Mutex};
+use std::thread::{self, JoinHandle};
+
+#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
+fn use_current_thread_basic() {
+ static JOIN_HANDLES: Mutex<Vec<JoinHandle<()>>> = Mutex::new(Vec::new());
+ let pool = ThreadPoolBuilder::new()
+ .num_threads(2)
+ .use_current_thread()
+ .spawn_handler(|builder| {
+ let handle = thread::Builder::new().spawn(|| builder.run())?;
+ JOIN_HANDLES.lock().unwrap().push(handle);
+ Ok(())
+ })
+ .build()
+ .unwrap();
+ assert_eq!(rayon_core::current_thread_index(), Some(0));
+ assert_eq!(
+ JOIN_HANDLES.lock().unwrap().len(),
+ 1,
+ "Should only spawn one extra thread"
+ );
+
+ let another_pool = ThreadPoolBuilder::new()
+ .num_threads(2)
+ .use_current_thread()
+ .build();
+ assert!(
+ another_pool.is_err(),
+ "Should error if the thread is already part of a pool"
+ );
+
+ let pair = Arc::new((Mutex::new(false), Condvar::new()));
+ let pair2 = Arc::clone(&pair);
+ pool.spawn(move || {
+ assert_ne!(rayon_core::current_thread_index(), Some(0));
+ // This should execute even if the current thread is blocked, since we have two threads in
+ // the pool.
+ let &(ref started, ref condvar) = &*pair2;
+ *started.lock().unwrap() = true;
+ condvar.notify_one();
+ });
+
+ let _guard = pair
+ .1
+ .wait_while(pair.0.lock().unwrap(), |ran| !*ran)
+ .unwrap();
+ std::mem::drop(pool); // Drop the pool.
+
+ // Wait until all threads have actually exited. This is not really needed, other than to
+ // reduce noise of leak-checking tools.
+ for handle in std::mem::take(&mut *JOIN_HANDLES.lock().unwrap()) {
+ let _ = handle.join();
+ }
+}