diff options
Diffstat (limited to 'vendor/memchr/src/arch/all')
-rw-r--r-- | vendor/memchr/src/arch/all/memchr.rs | 996 | ||||
-rw-r--r-- | vendor/memchr/src/arch/all/mod.rs | 234 | ||||
-rw-r--r-- | vendor/memchr/src/arch/all/packedpair/default_rank.rs | 258 | ||||
-rw-r--r-- | vendor/memchr/src/arch/all/packedpair/mod.rs | 359 | ||||
-rw-r--r-- | vendor/memchr/src/arch/all/rabinkarp.rs | 390 | ||||
-rw-r--r-- | vendor/memchr/src/arch/all/shiftor.rs | 89 | ||||
-rw-r--r-- | vendor/memchr/src/arch/all/twoway.rs | 877 |
7 files changed, 3203 insertions, 0 deletions
diff --git a/vendor/memchr/src/arch/all/memchr.rs b/vendor/memchr/src/arch/all/memchr.rs new file mode 100644 index 0000000..435b1be --- /dev/null +++ b/vendor/memchr/src/arch/all/memchr.rs @@ -0,0 +1,996 @@ +/*! +Provides architecture independent implementations of `memchr` and friends. + +The main types in this module are [`One`], [`Two`] and [`Three`]. They are for +searching for one, two or three distinct bytes, respectively, in a haystack. +Each type also has corresponding double ended iterators. These searchers +are typically slower than hand-coded vector routines accomplishing the same +task, but are also typically faster than naive scalar code. These routines +effectively work by treating a `usize` as a vector of 8-bit lanes, and thus +achieves some level of data parallelism even without explicit vector support. + +The `One` searcher also provides a [`One::count`] routine for efficiently +counting the number of times a single byte occurs in a haystack. This is +useful, for example, for counting the number of lines in a haystack. This +routine exists because it is usually faster, especially with a high match +count, then using [`One::find`] repeatedly. ([`OneIter`] specializes its +`Iterator::count` implementation to use this routine.) + +Only one, two and three bytes are supported because three bytes is about +the point where one sees diminishing returns. Beyond this point and it's +probably (but not necessarily) better to just use a simple `[bool; 256]` array +or similar. However, it depends mightily on the specific work-load and the +expected match frequency. +*/ + +use crate::{arch::generic::memchr as generic, ext::Pointer}; + +/// The number of bytes in a single `usize` value. +const USIZE_BYTES: usize = (usize::BITS / 8) as usize; +/// The bits that must be zero for a `*const usize` to be properly aligned. +const USIZE_ALIGN: usize = USIZE_BYTES - 1; + +/// Finds all occurrences of a single byte in a haystack. +#[derive(Clone, Copy, Debug)] +pub struct One { + s1: u8, + v1: usize, +} + +impl One { + /// The number of bytes we examine per each iteration of our search loop. + const LOOP_BYTES: usize = 2 * USIZE_BYTES; + + /// Create a new searcher that finds occurrences of the byte given. + #[inline] + pub fn new(needle: u8) -> One { + One { s1: needle, v1: splat(needle) } + } + + /// A test-only routine so that we can bundle a bunch of quickcheck + /// properties into a single macro. Basically, this provides a constructor + /// that makes it identical to most other memchr implementations, which + /// have fallible constructors. + #[cfg(test)] + pub(crate) fn try_new(needle: u8) -> Option<One> { + Some(One::new(needle)) + } + + /// Return the first occurrence of the needle in the given haystack. If no + /// such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option<usize> { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of the needle in the given haystack. If no + /// such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option<usize> { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Counts all occurrences of this byte in the given haystack. + #[inline] + pub fn count(&self, haystack: &[u8]) -> usize { + // SAFETY: All of our pointers are derived directly from a borrowed + // slice, which is guaranteed to be valid. + unsafe { + let start = haystack.as_ptr(); + let end = start.add(haystack.len()); + self.count_raw(start, end) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // The start of the search may not be aligned to `*const usize`, + // so we do an unaligned load here. + let chunk = start.cast::<usize>().read_unaligned(); + if self.has_needle(chunk) { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // And now we start our search at a guaranteed aligned position. + // The first iteration of the loop below will overlap with the the + // unaligned chunk above in cases where the search starts at an + // unaligned offset, but that's okay as we're only here if that + // above didn't find a match. + let mut cur = + start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); + debug_assert!(cur > start); + if len <= One::LOOP_BYTES { + return generic::fwd_byte_by_byte(cur, end, confirm); + } + debug_assert!(end.sub(One::LOOP_BYTES) >= start); + while cur <= end.sub(One::LOOP_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let a = cur.cast::<usize>().read(); + let b = cur.add(USIZE_BYTES).cast::<usize>().read(); + if self.has_needle(a) || self.has_needle(b) { + break; + } + cur = cur.add(One::LOOP_BYTES); + } + generic::fwd_byte_by_byte(cur, end, confirm) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let chunk = end.sub(USIZE_BYTES).cast::<usize>().read_unaligned(); + if self.has_needle(chunk) { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); + debug_assert!(start <= cur && cur <= end); + if len <= One::LOOP_BYTES { + return generic::rev_byte_by_byte(start, cur, confirm); + } + while cur >= start.add(One::LOOP_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let a = cur.sub(2 * USIZE_BYTES).cast::<usize>().read(); + let b = cur.sub(1 * USIZE_BYTES).cast::<usize>().read(); + if self.has_needle(a) || self.has_needle(b) { + break; + } + cur = cur.sub(One::LOOP_BYTES); + } + generic::rev_byte_by_byte(start, cur, confirm) + } + + /// Counts all occurrences of this byte in the given haystack represented + /// by raw pointers. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `0` will always be returned. + #[inline] + pub unsafe fn count_raw(&self, start: *const u8, end: *const u8) -> usize { + if start >= end { + return 0; + } + // Sadly I couldn't get the SWAR approach to work here, so we just do + // one byte at a time for now. PRs to improve this are welcome. + let mut ptr = start; + let mut count = 0; + while ptr < end { + count += (ptr.read() == self.s1) as usize; + ptr = ptr.offset(1); + } + count + } + + /// Returns an iterator over all occurrences of the needle byte in the + /// given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> OneIter<'a, 'h> { + OneIter { searcher: self, it: generic::Iter::new(haystack) } + } + + #[inline(always)] + fn has_needle(&self, chunk: usize) -> bool { + has_zero_byte(self.v1 ^ chunk) + } + + #[inline(always)] + fn confirm(&self, haystack_byte: u8) -> bool { + self.s1 == haystack_byte + } +} + +/// An iterator over all occurrences of a single byte in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`One::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`One`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct OneIter<'a, 'h> { + /// The underlying memchr searcher. + searcher: &'a One, + /// Generic iterator implementation. + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for OneIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option<usize> { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn count(self) -> usize { + self.it.count(|s, e| { + // SAFETY: We rely on our generic iterator to return valid start + // and end pointers. + unsafe { self.searcher.count_raw(s, e) } + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for OneIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option<usize> { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +/// Finds all occurrences of two bytes in a haystack. +/// +/// That is, this reports matches of one of two possible bytes. For example, +/// searching for `a` or `b` in `afoobar` would report matches at offsets `0`, +/// `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Two { + s1: u8, + s2: u8, + v1: usize, + v2: usize, +} + +impl Two { + /// Create a new searcher that finds occurrences of the two needle bytes + /// given. + #[inline] + pub fn new(needle1: u8, needle2: u8) -> Two { + Two { + s1: needle1, + s2: needle2, + v1: splat(needle1), + v2: splat(needle2), + } + } + + /// A test-only routine so that we can bundle a bunch of quickcheck + /// properties into a single macro. Basically, this provides a constructor + /// that makes it identical to most other memchr implementations, which + /// have fallible constructors. + #[cfg(test)] + pub(crate) fn try_new(needle1: u8, needle2: u8) -> Option<Two> { + Some(Two::new(needle1, needle2)) + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option<usize> { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option<usize> { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // The start of the search may not be aligned to `*const usize`, + // so we do an unaligned load here. + let chunk = start.cast::<usize>().read_unaligned(); + if self.has_needle(chunk) { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // And now we start our search at a guaranteed aligned position. + // The first iteration of the loop below will overlap with the the + // unaligned chunk above in cases where the search starts at an + // unaligned offset, but that's okay as we're only here if that + // above didn't find a match. + let mut cur = + start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); + debug_assert!(cur > start); + debug_assert!(end.sub(USIZE_BYTES) >= start); + while cur <= end.sub(USIZE_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let chunk = cur.cast::<usize>().read(); + if self.has_needle(chunk) { + break; + } + cur = cur.add(USIZE_BYTES); + } + generic::fwd_byte_by_byte(cur, end, confirm) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let chunk = end.sub(USIZE_BYTES).cast::<usize>().read_unaligned(); + if self.has_needle(chunk) { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); + debug_assert!(start <= cur && cur <= end); + while cur >= start.add(USIZE_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let chunk = cur.sub(USIZE_BYTES).cast::<usize>().read(); + if self.has_needle(chunk) { + break; + } + cur = cur.sub(USIZE_BYTES); + } + generic::rev_byte_by_byte(start, cur, confirm) + } + + /// Returns an iterator over all occurrences of one of the needle bytes in + /// the given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> TwoIter<'a, 'h> { + TwoIter { searcher: self, it: generic::Iter::new(haystack) } + } + + #[inline(always)] + fn has_needle(&self, chunk: usize) -> bool { + has_zero_byte(self.v1 ^ chunk) || has_zero_byte(self.v2 ^ chunk) + } + + #[inline(always)] + fn confirm(&self, haystack_byte: u8) -> bool { + self.s1 == haystack_byte || self.s2 == haystack_byte + } +} + +/// An iterator over all occurrences of two possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Two::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Two`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct TwoIter<'a, 'h> { + /// The underlying memchr searcher. + searcher: &'a Two, + /// Generic iterator implementation. + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for TwoIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option<usize> { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for TwoIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option<usize> { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +/// Finds all occurrences of three bytes in a haystack. +/// +/// That is, this reports matches of one of three possible bytes. For example, +/// searching for `a`, `b` or `o` in `afoobar` would report matches at offsets +/// `0`, `2`, `3`, `4` and `5`. +#[derive(Clone, Copy, Debug)] +pub struct Three { + s1: u8, + s2: u8, + s3: u8, + v1: usize, + v2: usize, + v3: usize, +} + +impl Three { + /// Create a new searcher that finds occurrences of the three needle bytes + /// given. + #[inline] + pub fn new(needle1: u8, needle2: u8, needle3: u8) -> Three { + Three { + s1: needle1, + s2: needle2, + s3: needle3, + v1: splat(needle1), + v2: splat(needle2), + v3: splat(needle3), + } + } + + /// A test-only routine so that we can bundle a bunch of quickcheck + /// properties into a single macro. Basically, this provides a constructor + /// that makes it identical to most other memchr implementations, which + /// have fallible constructors. + #[cfg(test)] + pub(crate) fn try_new( + needle1: u8, + needle2: u8, + needle3: u8, + ) -> Option<Three> { + Some(Three::new(needle1, needle2, needle3)) + } + + /// Return the first occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option<usize> { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.find_raw(s, e) + }) + } + } + + /// Return the last occurrence of one of the needle bytes in the given + /// haystack. If no such occurrence exists, then `None` is returned. + /// + /// The occurrence is reported as an offset into `haystack`. Its maximum + /// value for a non-empty haystack is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8]) -> Option<usize> { + // SAFETY: `find_raw` guarantees that if a pointer is returned, it + // falls within the bounds of the start and end pointers. + unsafe { + generic::search_slice_with_raw(haystack, |s, e| { + self.rfind_raw(s, e) + }) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn find_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // The start of the search may not be aligned to `*const usize`, + // so we do an unaligned load here. + let chunk = start.cast::<usize>().read_unaligned(); + if self.has_needle(chunk) { + return generic::fwd_byte_by_byte(start, end, confirm); + } + + // And now we start our search at a guaranteed aligned position. + // The first iteration of the loop below will overlap with the the + // unaligned chunk above in cases where the search starts at an + // unaligned offset, but that's okay as we're only here if that + // above didn't find a match. + let mut cur = + start.add(USIZE_BYTES - (start.as_usize() & USIZE_ALIGN)); + debug_assert!(cur > start); + debug_assert!(end.sub(USIZE_BYTES) >= start); + while cur <= end.sub(USIZE_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let chunk = cur.cast::<usize>().read(); + if self.has_needle(chunk) { + break; + } + cur = cur.add(USIZE_BYTES); + } + generic::fwd_byte_by_byte(cur, end, confirm) + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `< end`. + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// + /// Note that callers may pass a pair of pointers such that `start >= end`. + /// In that case, `None` will always be returned. + #[inline] + pub unsafe fn rfind_raw( + &self, + start: *const u8, + end: *const u8, + ) -> Option<*const u8> { + if start >= end { + return None; + } + let confirm = |b| self.confirm(b); + let len = end.distance(start); + if len < USIZE_BYTES { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let chunk = end.sub(USIZE_BYTES).cast::<usize>().read_unaligned(); + if self.has_needle(chunk) { + return generic::rev_byte_by_byte(start, end, confirm); + } + + let mut cur = end.sub(end.as_usize() & USIZE_ALIGN); + debug_assert!(start <= cur && cur <= end); + while cur >= start.add(USIZE_BYTES) { + debug_assert_eq!(0, cur.as_usize() % USIZE_BYTES); + + let chunk = cur.sub(USIZE_BYTES).cast::<usize>().read(); + if self.has_needle(chunk) { + break; + } + cur = cur.sub(USIZE_BYTES); + } + generic::rev_byte_by_byte(start, cur, confirm) + } + + /// Returns an iterator over all occurrences of one of the needle bytes in + /// the given haystack. + /// + /// The iterator returned implements `DoubleEndedIterator`. This means it + /// can also be used to find occurrences in reverse order. + pub fn iter<'a, 'h>(&'a self, haystack: &'h [u8]) -> ThreeIter<'a, 'h> { + ThreeIter { searcher: self, it: generic::Iter::new(haystack) } + } + + #[inline(always)] + fn has_needle(&self, chunk: usize) -> bool { + has_zero_byte(self.v1 ^ chunk) + || has_zero_byte(self.v2 ^ chunk) + || has_zero_byte(self.v3 ^ chunk) + } + + #[inline(always)] + fn confirm(&self, haystack_byte: u8) -> bool { + self.s1 == haystack_byte + || self.s2 == haystack_byte + || self.s3 == haystack_byte + } +} + +/// An iterator over all occurrences of three possible bytes in a haystack. +/// +/// This iterator implements `DoubleEndedIterator`, which means it can also be +/// used to find occurrences in reverse order. +/// +/// This iterator is created by the [`Three::iter`] method. +/// +/// The lifetime parameters are as follows: +/// +/// * `'a` refers to the lifetime of the underlying [`Three`] searcher. +/// * `'h` refers to the lifetime of the haystack being searched. +#[derive(Clone, Debug)] +pub struct ThreeIter<'a, 'h> { + /// The underlying memchr searcher. + searcher: &'a Three, + /// Generic iterator implementation. + it: generic::Iter<'h>, +} + +impl<'a, 'h> Iterator for ThreeIter<'a, 'h> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option<usize> { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'find_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next(|s, e| self.searcher.find_raw(s, e)) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.it.size_hint() + } +} + +impl<'a, 'h> DoubleEndedIterator for ThreeIter<'a, 'h> { + #[inline] + fn next_back(&mut self) -> Option<usize> { + // SAFETY: We rely on the generic iterator to provide valid start + // and end pointers, but we guarantee that any pointer returned by + // 'rfind_raw' falls within the bounds of the start and end pointer. + unsafe { self.it.next_back(|s, e| self.searcher.rfind_raw(s, e)) } + } +} + +/// Return `true` if `x` contains any zero byte. +/// +/// That is, this routine treats `x` as a register of 8-bit lanes and returns +/// true when any of those lanes is `0`. +/// +/// From "Matters Computational" by J. Arndt. +#[inline(always)] +fn has_zero_byte(x: usize) -> bool { + // "The idea is to subtract one from each of the bytes and then look for + // bytes where the borrow propagated all the way to the most significant + // bit." + const LO: usize = splat(0x01); + const HI: usize = splat(0x80); + + (x.wrapping_sub(LO) & !x & HI) != 0 +} + +/// Repeat the given byte into a word size number. That is, every 8 bits +/// is equivalent to the given byte. For example, if `b` is `\x4E` or +/// `01001110` in binary, then the returned value on a 32-bit system would be: +/// `01001110_01001110_01001110_01001110`. +#[inline(always)] +const fn splat(b: u8) -> usize { + // TODO: use `usize::from` once it can be used in const context. + (b as usize) * (usize::MAX / 255) +} + +#[cfg(test)] +mod tests { + use super::*; + + define_memchr_quickcheck!(super, try_new); + + #[test] + fn forward_one() { + crate::tests::memchr::Runner::new(1).forward_iter( + |haystack, needles| { + Some(One::new(needles[0]).iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_one() { + crate::tests::memchr::Runner::new(1).reverse_iter( + |haystack, needles| { + Some(One::new(needles[0]).iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn count_one() { + crate::tests::memchr::Runner::new(1).count_iter(|haystack, needles| { + Some(One::new(needles[0]).iter(haystack).count()) + }) + } + + #[test] + fn forward_two() { + crate::tests::memchr::Runner::new(2).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2).iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_two() { + crate::tests::memchr::Runner::new(2).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + Some(Two::new(n1, n2).iter(haystack).rev().collect()) + }, + ) + } + + #[test] + fn forward_three() { + crate::tests::memchr::Runner::new(3).forward_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3).iter(haystack).collect()) + }, + ) + } + + #[test] + fn reverse_three() { + crate::tests::memchr::Runner::new(3).reverse_iter( + |haystack, needles| { + let n1 = needles.get(0).copied()?; + let n2 = needles.get(1).copied()?; + let n3 = needles.get(2).copied()?; + Some(Three::new(n1, n2, n3).iter(haystack).rev().collect()) + }, + ) + } + + // This was found by quickcheck in the course of refactoring this crate + // after memchr 2.5.0. + #[test] + fn regression_double_ended_iterator() { + let finder = One::new(b'a'); + let haystack = "a"; + let mut it = finder.iter(haystack.as_bytes()); + assert_eq!(Some(0), it.next()); + assert_eq!(None, it.next_back()); + } + + // This regression test was caught by ripgrep's test suite on i686 when + // upgrading to memchr 2.6. Namely, something about the \x0B bytes here + // screws with the SWAR counting approach I was using. This regression test + // prompted me to remove the SWAR counting approach and just replace it + // with a byte-at-a-time loop. + #[test] + fn regression_count_new_lines() { + let haystack = "01234567\x0b\n\x0b\n\x0b\n\x0b\nx"; + let count = One::new(b'\n').count(haystack.as_bytes()); + assert_eq!(4, count); + } +} diff --git a/vendor/memchr/src/arch/all/mod.rs b/vendor/memchr/src/arch/all/mod.rs new file mode 100644 index 0000000..559cb75 --- /dev/null +++ b/vendor/memchr/src/arch/all/mod.rs @@ -0,0 +1,234 @@ +/*! +Contains architecture independent routines. + +These routines are often used as a "fallback" implementation when the more +specialized architecture dependent routines are unavailable. +*/ + +pub mod memchr; +pub mod packedpair; +pub mod rabinkarp; +#[cfg(feature = "alloc")] +pub mod shiftor; +pub mod twoway; + +/// Returns true if and only if `needle` is a prefix of `haystack`. +/// +/// This uses a latency optimized variant of `memcmp` internally which *might* +/// make this faster for very short strings. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +#[inline(always)] +pub fn is_prefix(haystack: &[u8], needle: &[u8]) -> bool { + needle.len() <= haystack.len() + && is_equal(&haystack[..needle.len()], needle) +} + +/// Returns true if and only if `needle` is a suffix of `haystack`. +/// +/// This uses a latency optimized variant of `memcmp` internally which *might* +/// make this faster for very short strings. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +#[inline(always)] +pub fn is_suffix(haystack: &[u8], needle: &[u8]) -> bool { + needle.len() <= haystack.len() + && is_equal(&haystack[haystack.len() - needle.len()..], needle) +} + +/// Compare corresponding bytes in `x` and `y` for equality. +/// +/// That is, this returns true if and only if `x.len() == y.len()` and +/// `x[i] == y[i]` for all `0 <= i < x.len()`. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +/// +/// # Motivation +/// +/// Why not use slice equality instead? Well, slice equality usually results in +/// a call out to the current platform's `libc` which might not be inlineable +/// or have other overhead. This routine isn't guaranteed to be a win, but it +/// might be in some cases. +#[inline(always)] +pub fn is_equal(x: &[u8], y: &[u8]) -> bool { + if x.len() != y.len() { + return false; + } + // SAFETY: Our pointers are derived directly from borrowed slices which + // uphold all of our safety guarantees except for length. We account for + // length with the check above. + unsafe { is_equal_raw(x.as_ptr(), y.as_ptr(), x.len()) } +} + +/// Compare `n` bytes at the given pointers for equality. +/// +/// This returns true if and only if `*x.add(i) == *y.add(i)` for all +/// `0 <= i < n`. +/// +/// # Inlining +/// +/// This routine is marked `inline(always)`. If you want to call this function +/// in a way that is not always inlined, you'll need to wrap a call to it in +/// another function that is marked as `inline(never)` or just `inline`. +/// +/// # Motivation +/// +/// Why not use slice equality instead? Well, slice equality usually results in +/// a call out to the current platform's `libc` which might not be inlineable +/// or have other overhead. This routine isn't guaranteed to be a win, but it +/// might be in some cases. +/// +/// # Safety +/// +/// * Both `x` and `y` must be valid for reads of up to `n` bytes. +/// * Both `x` and `y` must point to an initialized value. +/// * Both `x` and `y` must each point to an allocated object and +/// must either be in bounds or at most one byte past the end of the +/// allocated object. `x` and `y` do not need to point to the same allocated +/// object, but they may. +/// * Both `x` and `y` must be _derived from_ a pointer to their respective +/// allocated objects. +/// * The distance between `x` and `x+n` must not overflow `isize`. Similarly +/// for `y` and `y+n`. +/// * The distance being in bounds must not rely on "wrapping around" the +/// address space. +#[inline(always)] +pub unsafe fn is_equal_raw( + mut x: *const u8, + mut y: *const u8, + mut n: usize, +) -> bool { + // When we have 4 or more bytes to compare, then proceed in chunks of 4 at + // a time using unaligned loads. + // + // Also, why do 4 byte loads instead of, say, 8 byte loads? The reason is + // that this particular version of memcmp is likely to be called with tiny + // needles. That means that if we do 8 byte loads, then a higher proportion + // of memcmp calls will use the slower variant above. With that said, this + // is a hypothesis and is only loosely supported by benchmarks. There's + // likely some improvement that could be made here. The main thing here + // though is to optimize for latency, not throughput. + + // SAFETY: The caller is responsible for ensuring the pointers we get are + // valid and readable for at least `n` bytes. We also do unaligned loads, + // so there's no need to ensure we're aligned. (This is justified by this + // routine being specifically for short strings.) + while n >= 4 { + let vx = x.cast::<u32>().read_unaligned(); + let vy = y.cast::<u32>().read_unaligned(); + if vx != vy { + return false; + } + x = x.add(4); + y = y.add(4); + n -= 4; + } + // If we don't have enough bytes to do 4-byte at a time loads, then + // do partial loads. Note that I used to have a byte-at-a-time + // loop here and that turned out to be quite a bit slower for the + // memmem/pathological/defeat-simple-vector-alphabet benchmark. + if n >= 2 { + let vx = x.cast::<u16>().read_unaligned(); + let vy = y.cast::<u16>().read_unaligned(); + if vx != vy { + return false; + } + x = x.add(2); + y = y.add(2); + n -= 2; + } + if n > 0 { + if x.read() != y.read() { + return false; + } + } + true +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn equals_different_lengths() { + assert!(!is_equal(b"", b"a")); + assert!(!is_equal(b"a", b"")); + assert!(!is_equal(b"ab", b"a")); + assert!(!is_equal(b"a", b"ab")); + } + + #[test] + fn equals_mismatch() { + let one_mismatch = [ + (&b"a"[..], &b"x"[..]), + (&b"ab"[..], &b"ax"[..]), + (&b"abc"[..], &b"abx"[..]), + (&b"abcd"[..], &b"abcx"[..]), + (&b"abcde"[..], &b"abcdx"[..]), + (&b"abcdef"[..], &b"abcdex"[..]), + (&b"abcdefg"[..], &b"abcdefx"[..]), + (&b"abcdefgh"[..], &b"abcdefgx"[..]), + (&b"abcdefghi"[..], &b"abcdefghx"[..]), + (&b"abcdefghij"[..], &b"abcdefghix"[..]), + (&b"abcdefghijk"[..], &b"abcdefghijx"[..]), + (&b"abcdefghijkl"[..], &b"abcdefghijkx"[..]), + (&b"abcdefghijklm"[..], &b"abcdefghijklx"[..]), + (&b"abcdefghijklmn"[..], &b"abcdefghijklmx"[..]), + ]; + for (x, y) in one_mismatch { + assert_eq!(x.len(), y.len(), "lengths should match"); + assert!(!is_equal(x, y)); + assert!(!is_equal(y, x)); + } + } + + #[test] + fn equals_yes() { + assert!(is_equal(b"", b"")); + assert!(is_equal(b"a", b"a")); + assert!(is_equal(b"ab", b"ab")); + assert!(is_equal(b"abc", b"abc")); + assert!(is_equal(b"abcd", b"abcd")); + assert!(is_equal(b"abcde", b"abcde")); + assert!(is_equal(b"abcdef", b"abcdef")); + assert!(is_equal(b"abcdefg", b"abcdefg")); + assert!(is_equal(b"abcdefgh", b"abcdefgh")); + assert!(is_equal(b"abcdefghi", b"abcdefghi")); + } + + #[test] + fn prefix() { + assert!(is_prefix(b"", b"")); + assert!(is_prefix(b"a", b"")); + assert!(is_prefix(b"ab", b"")); + assert!(is_prefix(b"foo", b"foo")); + assert!(is_prefix(b"foobar", b"foo")); + + assert!(!is_prefix(b"foo", b"fob")); + assert!(!is_prefix(b"foobar", b"fob")); + } + + #[test] + fn suffix() { + assert!(is_suffix(b"", b"")); + assert!(is_suffix(b"a", b"")); + assert!(is_suffix(b"ab", b"")); + assert!(is_suffix(b"foo", b"foo")); + assert!(is_suffix(b"foobar", b"bar")); + + assert!(!is_suffix(b"foo", b"goo")); + assert!(!is_suffix(b"foobar", b"gar")); + } +} diff --git a/vendor/memchr/src/arch/all/packedpair/default_rank.rs b/vendor/memchr/src/arch/all/packedpair/default_rank.rs new file mode 100644 index 0000000..6aa3895 --- /dev/null +++ b/vendor/memchr/src/arch/all/packedpair/default_rank.rs @@ -0,0 +1,258 @@ +pub(crate) const RANK: [u8; 256] = [ + 55, // '\x00' + 52, // '\x01' + 51, // '\x02' + 50, // '\x03' + 49, // '\x04' + 48, // '\x05' + 47, // '\x06' + 46, // '\x07' + 45, // '\x08' + 103, // '\t' + 242, // '\n' + 66, // '\x0b' + 67, // '\x0c' + 229, // '\r' + 44, // '\x0e' + 43, // '\x0f' + 42, // '\x10' + 41, // '\x11' + 40, // '\x12' + 39, // '\x13' + 38, // '\x14' + 37, // '\x15' + 36, // '\x16' + 35, // '\x17' + 34, // '\x18' + 33, // '\x19' + 56, // '\x1a' + 32, // '\x1b' + 31, // '\x1c' + 30, // '\x1d' + 29, // '\x1e' + 28, // '\x1f' + 255, // ' ' + 148, // '!' + 164, // '"' + 149, // '#' + 136, // '$' + 160, // '%' + 155, // '&' + 173, // "'" + 221, // '(' + 222, // ')' + 134, // '*' + 122, // '+' + 232, // ',' + 202, // '-' + 215, // '.' + 224, // '/' + 208, // '0' + 220, // '1' + 204, // '2' + 187, // '3' + 183, // '4' + 179, // '5' + 177, // '6' + 168, // '7' + 178, // '8' + 200, // '9' + 226, // ':' + 195, // ';' + 154, // '<' + 184, // '=' + 174, // '>' + 126, // '?' + 120, // '@' + 191, // 'A' + 157, // 'B' + 194, // 'C' + 170, // 'D' + 189, // 'E' + 162, // 'F' + 161, // 'G' + 150, // 'H' + 193, // 'I' + 142, // 'J' + 137, // 'K' + 171, // 'L' + 176, // 'M' + 185, // 'N' + 167, // 'O' + 186, // 'P' + 112, // 'Q' + 175, // 'R' + 192, // 'S' + 188, // 'T' + 156, // 'U' + 140, // 'V' + 143, // 'W' + 123, // 'X' + 133, // 'Y' + 128, // 'Z' + 147, // '[' + 138, // '\\' + 146, // ']' + 114, // '^' + 223, // '_' + 151, // '`' + 249, // 'a' + 216, // 'b' + 238, // 'c' + 236, // 'd' + 253, // 'e' + 227, // 'f' + 218, // 'g' + 230, // 'h' + 247, // 'i' + 135, // 'j' + 180, // 'k' + 241, // 'l' + 233, // 'm' + 246, // 'n' + 244, // 'o' + 231, // 'p' + 139, // 'q' + 245, // 'r' + 243, // 's' + 251, // 't' + 235, // 'u' + 201, // 'v' + 196, // 'w' + 240, // 'x' + 214, // 'y' + 152, // 'z' + 182, // '{' + 205, // '|' + 181, // '}' + 127, // '~' + 27, // '\x7f' + 212, // '\x80' + 211, // '\x81' + 210, // '\x82' + 213, // '\x83' + 228, // '\x84' + 197, // '\x85' + 169, // '\x86' + 159, // '\x87' + 131, // '\x88' + 172, // '\x89' + 105, // '\x8a' + 80, // '\x8b' + 98, // '\x8c' + 96, // '\x8d' + 97, // '\x8e' + 81, // '\x8f' + 207, // '\x90' + 145, // '\x91' + 116, // '\x92' + 115, // '\x93' + 144, // '\x94' + 130, // '\x95' + 153, // '\x96' + 121, // '\x97' + 107, // '\x98' + 132, // '\x99' + 109, // '\x9a' + 110, // '\x9b' + 124, // '\x9c' + 111, // '\x9d' + 82, // '\x9e' + 108, // '\x9f' + 118, // '\xa0' + 141, // '¡' + 113, // '¢' + 129, // '£' + 119, // '¤' + 125, // '¥' + 165, // '¦' + 117, // '§' + 92, // '¨' + 106, // '©' + 83, // 'ª' + 72, // '«' + 99, // '¬' + 93, // '\xad' + 65, // '®' + 79, // '¯' + 166, // '°' + 237, // '±' + 163, // '²' + 199, // '³' + 190, // '´' + 225, // 'µ' + 209, // '¶' + 203, // '·' + 198, // '¸' + 217, // '¹' + 219, // 'º' + 206, // '»' + 234, // '¼' + 248, // '½' + 158, // '¾' + 239, // '¿' + 255, // 'À' + 255, // 'Á' + 255, // 'Â' + 255, // 'Ã' + 255, // 'Ä' + 255, // 'Å' + 255, // 'Æ' + 255, // 'Ç' + 255, // 'È' + 255, // 'É' + 255, // 'Ê' + 255, // 'Ë' + 255, // 'Ì' + 255, // 'Í' + 255, // 'Î' + 255, // 'Ï' + 255, // 'Ð' + 255, // 'Ñ' + 255, // 'Ò' + 255, // 'Ó' + 255, // 'Ô' + 255, // 'Õ' + 255, // 'Ö' + 255, // '×' + 255, // 'Ø' + 255, // 'Ù' + 255, // 'Ú' + 255, // 'Û' + 255, // 'Ü' + 255, // 'Ý' + 255, // 'Þ' + 255, // 'ß' + 255, // 'à' + 255, // 'á' + 255, // 'â' + 255, // 'ã' + 255, // 'ä' + 255, // 'å' + 255, // 'æ' + 255, // 'ç' + 255, // 'è' + 255, // 'é' + 255, // 'ê' + 255, // 'ë' + 255, // 'ì' + 255, // 'í' + 255, // 'î' + 255, // 'ï' + 255, // 'ð' + 255, // 'ñ' + 255, // 'ò' + 255, // 'ó' + 255, // 'ô' + 255, // 'õ' + 255, // 'ö' + 255, // '÷' + 255, // 'ø' + 255, // 'ù' + 255, // 'ú' + 255, // 'û' + 255, // 'ü' + 255, // 'ý' + 255, // 'þ' + 255, // 'ÿ' +]; diff --git a/vendor/memchr/src/arch/all/packedpair/mod.rs b/vendor/memchr/src/arch/all/packedpair/mod.rs new file mode 100644 index 0000000..148a985 --- /dev/null +++ b/vendor/memchr/src/arch/all/packedpair/mod.rs @@ -0,0 +1,359 @@ +/*! +Provides an architecture independent implementation of the "packed pair" +algorithm. + +The "packed pair" algorithm is based on the [generic SIMD] algorithm. The main +difference is that it (by default) uses a background distribution of byte +frequencies to heuristically select the pair of bytes to search for. Note that +this module provides an architecture independent version that doesn't do as +good of a job keeping the search for candidates inside a SIMD hot path. It +however can be good enough in many circumstances. + +[generic SIMD]: http://0x80.pl/articles/simd-strfind.html#first-and-last +*/ + +use crate::memchr; + +mod default_rank; + +/// An architecture independent "packed pair" finder. +/// +/// This finder picks two bytes that it believes have high predictive power for +/// indicating an overall match of a needle. At search time, it reports offsets +/// where the needle could match based on whether the pair of bytes it chose +/// match. +/// +/// This is architecture independent because it utilizes `memchr` to find the +/// occurrence of one of the bytes in the pair, and then checks whether the +/// second byte matches. If it does, in the case of [`Finder::find_prefilter`], +/// the location at which the needle could match is returned. +/// +/// It is generally preferred to use architecture specific routines for a +/// "packed pair" prefilter, but this can be a useful fallback when the +/// architecture independent routines are unavailable. +#[derive(Clone, Copy, Debug)] +pub struct Finder { + pair: Pair, + byte1: u8, + byte2: u8, +} + +impl Finder { + /// Create a new prefilter that reports possible locations where the given + /// needle matches. + #[inline] + pub fn new(needle: &[u8]) -> Option<Finder> { + Finder::with_pair(needle, Pair::new(needle)?) + } + + /// Create a new prefilter using the pair given. + /// + /// If the prefilter could not be constructed, then `None` is returned. + /// + /// This constructor permits callers to control precisely which pair of + /// bytes is used as a predicate. + #[inline] + pub fn with_pair(needle: &[u8], pair: Pair) -> Option<Finder> { + let byte1 = needle[usize::from(pair.index1())]; + let byte2 = needle[usize::from(pair.index2())]; + // Currently this can never fail so we could just return a Finder, + // but it's conceivable this could change. + Some(Finder { pair, byte1, byte2 }) + } + + /// Run this finder on the given haystack as a prefilter. + /// + /// If a candidate match is found, then an offset where the needle *could* + /// begin in the haystack is returned. + #[inline] + pub fn find_prefilter(&self, haystack: &[u8]) -> Option<usize> { + let mut i = 0; + let index1 = usize::from(self.pair.index1()); + let index2 = usize::from(self.pair.index2()); + loop { + // Use a fast vectorized implementation to skip to the next + // occurrence of the rarest byte (heuristically chosen) in the + // needle. + i += memchr(self.byte1, &haystack[i..])?; + let found = i; + i += 1; + + // If we can't align our first byte match with the haystack, then a + // match is impossible. + let aligned1 = match found.checked_sub(index1) { + None => continue, + Some(aligned1) => aligned1, + }; + + // Now align the second byte match with the haystack. A mismatch + // means that a match is impossible. + let aligned2 = match aligned1.checked_add(index2) { + None => continue, + Some(aligned_index2) => aligned_index2, + }; + if haystack.get(aligned2).map_or(true, |&b| b != self.byte2) { + continue; + } + + // We've done what we can. There might be a match here. + return Some(aligned1); + } + } + + /// Returns the pair of offsets (into the needle) used to check as a + /// predicate before confirming whether a needle exists at a particular + /// position. + #[inline] + pub fn pair(&self) -> &Pair { + &self.pair + } +} + +/// A pair of byte offsets into a needle to use as a predicate. +/// +/// This pair is used as a predicate to quickly filter out positions in a +/// haystack in which a needle cannot match. In some cases, this pair can even +/// be used in vector algorithms such that the vector algorithm only switches +/// over to scalar code once this pair has been found. +/// +/// A pair of offsets can be used in both substring search implementations and +/// in prefilters. The former will report matches of a needle in a haystack +/// where as the latter will only report possible matches of a needle. +/// +/// The offsets are limited each to a maximum of 255 to keep memory usage low. +/// Moreover, it's rarely advantageous to create a predicate using offsets +/// greater than 255 anyway. +/// +/// The only guarantee enforced on the pair of offsets is that they are not +/// equivalent. It is not necessarily the case that `index1 < index2` for +/// example. By convention, `index1` corresponds to the byte in the needle +/// that is believed to be most the predictive. Note also that because of the +/// requirement that the indices be both valid for the needle used to build +/// the pair and not equal, it follows that a pair can only be constructed for +/// needles with length at least 2. +#[derive(Clone, Copy, Debug)] +pub struct Pair { + index1: u8, + index2: u8, +} + +impl Pair { + /// Create a new pair of offsets from the given needle. + /// + /// If a pair could not be created (for example, if the needle is too + /// short), then `None` is returned. + /// + /// This chooses the pair in the needle that is believed to be as + /// predictive of an overall match of the needle as possible. + #[inline] + pub fn new(needle: &[u8]) -> Option<Pair> { + Pair::with_ranker(needle, DefaultFrequencyRank) + } + + /// Create a new pair of offsets from the given needle and ranker. + /// + /// This permits the caller to choose a background frequency distribution + /// with which bytes are selected. The idea is to select a pair of bytes + /// that is believed to strongly predict a match in the haystack. This + /// usually means selecting bytes that occur rarely in a haystack. + /// + /// If a pair could not be created (for example, if the needle is too + /// short), then `None` is returned. + #[inline] + pub fn with_ranker<R: HeuristicFrequencyRank>( + needle: &[u8], + ranker: R, + ) -> Option<Pair> { + if needle.len() <= 1 { + return None; + } + // Find the rarest two bytes. We make them distinct indices by + // construction. (The actual byte value may be the same in degenerate + // cases, but that's OK.) + let (mut rare1, mut index1) = (needle[0], 0); + let (mut rare2, mut index2) = (needle[1], 1); + if ranker.rank(rare2) < ranker.rank(rare1) { + core::mem::swap(&mut rare1, &mut rare2); + core::mem::swap(&mut index1, &mut index2); + } + let max = usize::from(core::u8::MAX); + for (i, &b) in needle.iter().enumerate().take(max).skip(2) { + if ranker.rank(b) < ranker.rank(rare1) { + rare2 = rare1; + index2 = index1; + rare1 = b; + index1 = u8::try_from(i).unwrap(); + } else if b != rare1 && ranker.rank(b) < ranker.rank(rare2) { + rare2 = b; + index2 = u8::try_from(i).unwrap(); + } + } + // While not strictly required for how a Pair is normally used, we + // really don't want these to be equivalent. If they were, it would + // reduce the effectiveness of candidate searching using these rare + // bytes by increasing the rate of false positives. + assert_ne!(index1, index2); + Some(Pair { index1, index2 }) + } + + /// Create a new pair using the offsets given for the needle given. + /// + /// This bypasses any sort of heuristic process for choosing the offsets + /// and permits the caller to choose the offsets themselves. + /// + /// Indices are limited to valid `u8` values so that a `Pair` uses less + /// memory. It is not possible to create a `Pair` with offsets bigger than + /// `u8::MAX`. It's likely that such a thing is not needed, but if it is, + /// it's suggested to build your own bespoke algorithm because you're + /// likely working on a very niche case. (File an issue if this suggestion + /// does not make sense to you.) + /// + /// If a pair could not be created (for example, if the needle is too + /// short), then `None` is returned. + #[inline] + pub fn with_indices( + needle: &[u8], + index1: u8, + index2: u8, + ) -> Option<Pair> { + // While not strictly required for how a Pair is normally used, we + // really don't want these to be equivalent. If they were, it would + // reduce the effectiveness of candidate searching using these rare + // bytes by increasing the rate of false positives. + if index1 == index2 { + return None; + } + // Similarly, invalid indices means the Pair is invalid too. + if usize::from(index1) >= needle.len() { + return None; + } + if usize::from(index2) >= needle.len() { + return None; + } + Some(Pair { index1, index2 }) + } + + /// Returns the first offset of the pair. + #[inline] + pub fn index1(&self) -> u8 { + self.index1 + } + + /// Returns the second offset of the pair. + #[inline] + pub fn index2(&self) -> u8 { + self.index2 + } +} + +/// This trait allows the user to customize the heuristic used to determine the +/// relative frequency of a given byte in the dataset being searched. +/// +/// The use of this trait can have a dramatic impact on performance depending +/// on the type of data being searched. The details of why are explained in the +/// docs of [`crate::memmem::Prefilter`]. To summarize, the core algorithm uses +/// a prefilter to quickly identify candidate matches that are later verified +/// more slowly. This prefilter is implemented in terms of trying to find +/// `rare` bytes at specific offsets that will occur less frequently in the +/// dataset. While the concept of a `rare` byte is similar for most datasets, +/// there are some specific datasets (like binary executables) that have +/// dramatically different byte distributions. For these datasets customizing +/// the byte frequency heuristic can have a massive impact on performance, and +/// might even need to be done at runtime. +/// +/// The default implementation of `HeuristicFrequencyRank` reads from the +/// static frequency table defined in `src/memmem/byte_frequencies.rs`. This +/// is optimal for most inputs, so if you are unsure of the impact of using a +/// custom `HeuristicFrequencyRank` you should probably just use the default. +/// +/// # Example +/// +/// ``` +/// use memchr::{ +/// arch::all::packedpair::HeuristicFrequencyRank, +/// memmem::FinderBuilder, +/// }; +/// +/// /// A byte-frequency table that is good for scanning binary executables. +/// struct Binary; +/// +/// impl HeuristicFrequencyRank for Binary { +/// fn rank(&self, byte: u8) -> u8 { +/// const TABLE: [u8; 256] = [ +/// 255, 128, 61, 43, 50, 41, 27, 28, 57, 15, 21, 13, 24, 17, 17, +/// 89, 58, 16, 11, 7, 14, 23, 7, 6, 24, 9, 6, 5, 9, 4, 7, 16, +/// 68, 11, 9, 6, 88, 7, 4, 4, 23, 9, 4, 8, 8, 5, 10, 4, 30, 11, +/// 9, 24, 11, 5, 5, 5, 19, 11, 6, 17, 9, 9, 6, 8, +/// 48, 58, 11, 14, 53, 40, 9, 9, 254, 35, 3, 6, 52, 23, 6, 6, 27, +/// 4, 7, 11, 14, 13, 10, 11, 11, 5, 2, 10, 16, 12, 6, 19, +/// 19, 20, 5, 14, 16, 31, 19, 7, 14, 20, 4, 4, 19, 8, 18, 20, 24, +/// 1, 25, 19, 58, 29, 10, 5, 15, 20, 2, 2, 9, 4, 3, 5, +/// 51, 11, 4, 53, 23, 39, 6, 4, 13, 81, 4, 186, 5, 67, 3, 2, 15, +/// 0, 0, 1, 3, 2, 0, 0, 5, 0, 0, 0, 2, 0, 0, 0, +/// 12, 2, 1, 1, 3, 1, 1, 1, 6, 1, 2, 1, 3, 1, 1, 2, 9, 1, 1, 0, +/// 2, 2, 4, 4, 11, 6, 7, 3, 6, 9, 4, 5, +/// 46, 18, 8, 18, 17, 3, 8, 20, 16, 10, 3, 7, 175, 4, 6, 7, 13, +/// 3, 7, 3, 3, 1, 3, 3, 10, 3, 1, 5, 2, 0, 1, 2, +/// 16, 3, 5, 1, 6, 1, 1, 2, 58, 20, 3, 14, 12, 2, 1, 3, 16, 3, 5, +/// 8, 3, 1, 8, 6, 17, 6, 5, 3, 8, 6, 13, 175, +/// ]; +/// TABLE[byte as usize] +/// } +/// } +/// // Create a new finder with the custom heuristic. +/// let finder = FinderBuilder::new() +/// .build_forward_with_ranker(Binary, b"\x00\x00\xdd\xdd"); +/// // Find needle with custom heuristic. +/// assert!(finder.find(b"\x00\x00\x00\xdd\xdd").is_some()); +/// ``` +pub trait HeuristicFrequencyRank { + /// Return the heuristic frequency rank of the given byte. A lower rank + /// means the byte is believed to occur less frequently in the haystack. + /// + /// Some uses of this heuristic may treat arbitrary absolute rank values as + /// significant. For example, an implementation detail in this crate may + /// determine that heuristic prefilters are inappropriate if every byte in + /// the needle has a "high" rank. + fn rank(&self, byte: u8) -> u8; +} + +/// The default byte frequency heuristic that is good for most haystacks. +pub(crate) struct DefaultFrequencyRank; + +impl HeuristicFrequencyRank for DefaultFrequencyRank { + fn rank(&self, byte: u8) -> u8 { + self::default_rank::RANK[usize::from(byte)] + } +} + +/// This permits passing any implementation of `HeuristicFrequencyRank` as a +/// borrowed version of itself. +impl<'a, R> HeuristicFrequencyRank for &'a R +where + R: HeuristicFrequencyRank, +{ + fn rank(&self, byte: u8) -> u8 { + (**self).rank(byte) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn forward_packedpair() { + fn find( + haystack: &[u8], + needle: &[u8], + _index1: u8, + _index2: u8, + ) -> Option<Option<usize>> { + // We ignore the index positions requested since it winds up making + // this test too slow overall. + let f = Finder::new(needle)?; + Some(f.find_prefilter(haystack)) + } + crate::tests::packedpair::Runner::new().fwd(find).run() + } +} diff --git a/vendor/memchr/src/arch/all/rabinkarp.rs b/vendor/memchr/src/arch/all/rabinkarp.rs new file mode 100644 index 0000000..e0bafba --- /dev/null +++ b/vendor/memchr/src/arch/all/rabinkarp.rs @@ -0,0 +1,390 @@ +/*! +An implementation of the [Rabin-Karp substring search algorithm][rabinkarp]. + +Rabin-Karp works by creating a hash of the needle provided and then computing +a rolling hash for each needle sized window in the haystack. When the rolling +hash matches the hash of the needle, a byte-wise comparison is done to check +if a match exists. The worst case time complexity of Rabin-Karp is `O(m * +n)` where `m ~ len(needle)` and `n ~ len(haystack)`. Its worst case space +complexity is constant. + +The main utility of Rabin-Karp is that the searcher can be constructed very +quickly with very little memory. This makes it especially useful when searching +for small needles in small haystacks, as it might finish its search before a +beefier algorithm (like Two-Way) even starts. + +[rabinkarp]: https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm +*/ + +/* +(This was the comment I wrote for this module originally when it was not +exposed. The comment still looks useful, but it's a bit in the weeds, so it's +not public itself.) + +This module implements the classical Rabin-Karp substring search algorithm, +with no extra frills. While its use would seem to break our time complexity +guarantee of O(m+n) (RK's time complexity is O(mn)), we are careful to only +ever use RK on a constant subset of haystacks. The main point here is that +RK has good latency properties for small needles/haystacks. It's very quick +to compute a needle hash and zip through the haystack when compared to +initializing Two-Way, for example. And this is especially useful for cases +where the haystack is just too short for vector instructions to do much good. + +The hashing function used here is the same one recommended by ESMAJ. + +Another choice instead of Rabin-Karp would be Shift-Or. But its latency +isn't quite as good since its preprocessing time is a bit more expensive +(both in practice and in theory). However, perhaps Shift-Or has a place +somewhere else for short patterns. I think the main problem is that it +requires space proportional to the alphabet and the needle. If we, for +example, supported needles up to length 16, then the total table size would be +len(alphabet)*size_of::<u16>()==512 bytes. Which isn't exactly small, and it's +probably bad to put that on the stack. So ideally, we'd throw it on the heap, +but we'd really like to write as much code without using alloc/std as possible. +But maybe it's worth the special casing. It's a TODO to benchmark. + +Wikipedia has a decent explanation, if a bit heavy on the theory: +https://en.wikipedia.org/wiki/Rabin%E2%80%93Karp_algorithm + +But ESMAJ provides something a bit more concrete: +http://www-igm.univ-mlv.fr/~lecroq/string/node5.html + +Finally, aho-corasick uses Rabin-Karp for multiple pattern match in some cases: +https://github.com/BurntSushi/aho-corasick/blob/3852632f10587db0ff72ef29e88d58bf305a0946/src/packed/rabinkarp.rs +*/ + +use crate::ext::Pointer; + +/// A forward substring searcher using the Rabin-Karp algorithm. +/// +/// Note that, as a lower level API, a `Finder` does not have access to the +/// needle it was constructed with. For this reason, executing a search +/// with a `Finder` requires passing both the needle and the haystack, +/// where the needle is exactly equivalent to the one given to the `Finder` +/// at construction time. This design was chosen so that callers can have +/// more precise control over where and how many times a needle is stored. +/// For example, in cases where Rabin-Karp is just one of several possible +/// substring search algorithms. +#[derive(Clone, Debug)] +pub struct Finder { + /// The actual hash. + hash: Hash, + /// The factor needed to multiply a byte by in order to subtract it from + /// the hash. It is defined to be 2^(n-1) (using wrapping exponentiation), + /// where n is the length of the needle. This is how we "remove" a byte + /// from the hash once the hash window rolls past it. + hash_2pow: u32, +} + +impl Finder { + /// Create a new Rabin-Karp forward searcher for the given `needle`. + /// + /// The needle may be empty. The empty needle matches at every byte offset. + /// + /// Note that callers must pass the same needle to all search calls using + /// this `Finder`. + #[inline] + pub fn new(needle: &[u8]) -> Finder { + let mut s = Finder { hash: Hash::new(), hash_2pow: 1 }; + let first_byte = match needle.get(0) { + None => return s, + Some(&first_byte) => first_byte, + }; + s.hash.add(first_byte); + for b in needle.iter().copied().skip(1) { + s.hash.add(b); + s.hash_2pow = s.hash_2pow.wrapping_shl(1); + } + s + } + + /// Return the first occurrence of the `needle` in the `haystack` + /// given. If no such occurrence exists, then `None` is returned. + /// + /// The `needle` provided must match the needle given to this finder at + /// construction time. + /// + /// The maximum value this can return is `haystack.len()`, which can only + /// occur when the needle and haystack both have length zero. Otherwise, + /// for non-empty haystacks, the maximum value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> { + unsafe { + let hstart = haystack.as_ptr(); + let hend = hstart.add(haystack.len()); + let nstart = needle.as_ptr(); + let nend = nstart.add(needle.len()); + let found = self.find_raw(hstart, hend, nstart, nend)?; + Some(found.distance(hstart)) + } + } + + /// Like `find`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `<= end`. The pointer returned is only ever equivalent + /// to `end` when both the needle and haystack are empty. (That is, the + /// empty string matches the empty string.) + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// Note that `start` and `end` below refer to both pairs of pointers given + /// to this routine. That is, the conditions apply to both `hstart`/`hend` + /// and `nstart`/`nend`. + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// * It must be the case that `start <= end`. + #[inline] + pub unsafe fn find_raw( + &self, + hstart: *const u8, + hend: *const u8, + nstart: *const u8, + nend: *const u8, + ) -> Option<*const u8> { + let hlen = hend.distance(hstart); + let nlen = nend.distance(nstart); + if nlen > hlen { + return None; + } + let mut cur = hstart; + let end = hend.sub(nlen); + let mut hash = Hash::forward(cur, cur.add(nlen)); + loop { + if self.hash == hash && is_equal_raw(cur, nstart, nlen) { + return Some(cur); + } + if cur >= end { + return None; + } + hash.roll(self, cur.read(), cur.add(nlen).read()); + cur = cur.add(1); + } + } +} + +/// A reverse substring searcher using the Rabin-Karp algorithm. +#[derive(Clone, Debug)] +pub struct FinderRev(Finder); + +impl FinderRev { + /// Create a new Rabin-Karp reverse searcher for the given `needle`. + #[inline] + pub fn new(needle: &[u8]) -> FinderRev { + let mut s = FinderRev(Finder { hash: Hash::new(), hash_2pow: 1 }); + let last_byte = match needle.last() { + None => return s, + Some(&last_byte) => last_byte, + }; + s.0.hash.add(last_byte); + for b in needle.iter().rev().copied().skip(1) { + s.0.hash.add(b); + s.0.hash_2pow = s.0.hash_2pow.wrapping_shl(1); + } + s + } + + /// Return the last occurrence of the `needle` in the `haystack` + /// given. If no such occurrence exists, then `None` is returned. + /// + /// The `needle` provided must match the needle given to this finder at + /// construction time. + /// + /// The maximum value this can return is `haystack.len()`, which can only + /// occur when the needle and haystack both have length zero. Otherwise, + /// for non-empty haystacks, the maximum value is `haystack.len() - 1`. + #[inline] + pub fn rfind(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> { + unsafe { + let hstart = haystack.as_ptr(); + let hend = hstart.add(haystack.len()); + let nstart = needle.as_ptr(); + let nend = nstart.add(needle.len()); + let found = self.rfind_raw(hstart, hend, nstart, nend)?; + Some(found.distance(hstart)) + } + } + + /// Like `rfind`, but accepts and returns raw pointers. + /// + /// When a match is found, the pointer returned is guaranteed to be + /// `>= start` and `<= end`. The pointer returned is only ever equivalent + /// to `end` when both the needle and haystack are empty. (That is, the + /// empty string matches the empty string.) + /// + /// This routine is useful if you're already using raw pointers and would + /// like to avoid converting back to a slice before executing a search. + /// + /// # Safety + /// + /// Note that `start` and `end` below refer to both pairs of pointers given + /// to this routine. That is, the conditions apply to both `hstart`/`hend` + /// and `nstart`/`nend`. + /// + /// * Both `start` and `end` must be valid for reads. + /// * Both `start` and `end` must point to an initialized value. + /// * Both `start` and `end` must point to the same allocated object and + /// must either be in bounds or at most one byte past the end of the + /// allocated object. + /// * Both `start` and `end` must be _derived from_ a pointer to the same + /// object. + /// * The distance between `start` and `end` must not overflow `isize`. + /// * The distance being in bounds must not rely on "wrapping around" the + /// address space. + /// * It must be the case that `start <= end`. + #[inline] + pub unsafe fn rfind_raw( + &self, + hstart: *const u8, + hend: *const u8, + nstart: *const u8, + nend: *const u8, + ) -> Option<*const u8> { + let hlen = hend.distance(hstart); + let nlen = nend.distance(nstart); + if nlen > hlen { + return None; + } + let mut cur = hend.sub(nlen); + let start = hstart; + let mut hash = Hash::reverse(cur, cur.add(nlen)); + loop { + if self.0.hash == hash && is_equal_raw(cur, nstart, nlen) { + return Some(cur); + } + if cur <= start { + return None; + } + cur = cur.sub(1); + hash.roll(&self.0, cur.add(nlen).read(), cur.read()); + } + } +} + +/// Whether RK is believed to be very fast for the given needle/haystack. +#[inline] +pub(crate) fn is_fast(haystack: &[u8], _needle: &[u8]) -> bool { + haystack.len() < 16 +} + +/// A Rabin-Karp hash. This might represent the hash of a needle, or the hash +/// of a rolling window in the haystack. +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] +struct Hash(u32); + +impl Hash { + /// Create a new hash that represents the empty string. + #[inline(always)] + fn new() -> Hash { + Hash(0) + } + + /// Create a new hash from the bytes given for use in forward searches. + /// + /// # Safety + /// + /// The given pointers must be valid to read from within their range. + #[inline(always)] + unsafe fn forward(mut start: *const u8, end: *const u8) -> Hash { + let mut hash = Hash::new(); + while start < end { + hash.add(start.read()); + start = start.add(1); + } + hash + } + + /// Create a new hash from the bytes given for use in reverse searches. + /// + /// # Safety + /// + /// The given pointers must be valid to read from within their range. + #[inline(always)] + unsafe fn reverse(start: *const u8, mut end: *const u8) -> Hash { + let mut hash = Hash::new(); + while start < end { + end = end.sub(1); + hash.add(end.read()); + } + hash + } + + /// Add 'new' and remove 'old' from this hash. The given needle hash should + /// correspond to the hash computed for the needle being searched for. + /// + /// This is meant to be used when the rolling window of the haystack is + /// advanced. + #[inline(always)] + fn roll(&mut self, finder: &Finder, old: u8, new: u8) { + self.del(finder, old); + self.add(new); + } + + /// Add a byte to this hash. + #[inline(always)] + fn add(&mut self, byte: u8) { + self.0 = self.0.wrapping_shl(1).wrapping_add(u32::from(byte)); + } + + /// Remove a byte from this hash. The given needle hash should correspond + /// to the hash computed for the needle being searched for. + #[inline(always)] + fn del(&mut self, finder: &Finder, byte: u8) { + let factor = finder.hash_2pow; + self.0 = self.0.wrapping_sub(u32::from(byte).wrapping_mul(factor)); + } +} + +/// Returns true when `x[i] == y[i]` for all `0 <= i < n`. +/// +/// We forcefully don't inline this to hint at the compiler that it is unlikely +/// to be called. This causes the inner rabinkarp loop above to be a bit +/// tighter and leads to some performance improvement. See the +/// memmem/krate/prebuilt/sliceslice-words/words benchmark. +/// +/// # Safety +/// +/// Same as `crate::arch::all::is_equal_raw`. +#[cold] +#[inline(never)] +unsafe fn is_equal_raw(x: *const u8, y: *const u8, n: usize) -> bool { + crate::arch::all::is_equal_raw(x, y, n) +} + +#[cfg(test)] +mod tests { + use super::*; + + define_substring_forward_quickcheck!(|h, n| Some( + Finder::new(n).find(h, n) + )); + define_substring_reverse_quickcheck!(|h, n| Some( + FinderRev::new(n).rfind(h, n) + )); + + #[test] + fn forward() { + crate::tests::substring::Runner::new() + .fwd(|h, n| Some(Finder::new(n).find(h, n))) + .run(); + } + + #[test] + fn reverse() { + crate::tests::substring::Runner::new() + .rev(|h, n| Some(FinderRev::new(n).rfind(h, n))) + .run(); + } +} diff --git a/vendor/memchr/src/arch/all/shiftor.rs b/vendor/memchr/src/arch/all/shiftor.rs new file mode 100644 index 0000000..b690564 --- /dev/null +++ b/vendor/memchr/src/arch/all/shiftor.rs @@ -0,0 +1,89 @@ +/*! +An implementation of the [Shift-Or substring search algorithm][shiftor]. + +[shiftor]: https://en.wikipedia.org/wiki/Bitap_algorithm +*/ + +use alloc::boxed::Box; + +/// The type of our mask. +/// +/// While we don't expose anyway to configure this in the public API, if one +/// really needs less memory usage or support for longer needles, then it is +/// suggested to copy the code from this module and modify it to fit your +/// needs. The code below is written to be correct regardless of whether Mask +/// is a u8, u16, u32, u64 or u128. +type Mask = u16; + +/// A forward substring searcher using the Shift-Or algorithm. +#[derive(Debug)] +pub struct Finder { + masks: Box<[Mask; 256]>, + needle_len: usize, +} + +impl Finder { + const MAX_NEEDLE_LEN: usize = (Mask::BITS - 1) as usize; + + /// Create a new Shift-Or forward searcher for the given `needle`. + /// + /// The needle may be empty. The empty needle matches at every byte offset. + #[inline] + pub fn new(needle: &[u8]) -> Option<Finder> { + let needle_len = needle.len(); + if needle_len > Finder::MAX_NEEDLE_LEN { + // A match is found when bit 7 is set in 'result' in the search + // routine below. So our needle can't be bigger than 7. We could + // permit bigger needles by using u16, u32 or u64 for our mask + // entries. But this is all we need for this example. + return None; + } + let mut searcher = Finder { masks: Box::from([!0; 256]), needle_len }; + for (i, &byte) in needle.iter().enumerate() { + searcher.masks[usize::from(byte)] &= !(1 << i); + } + Some(searcher) + } + + /// Return the first occurrence of the needle given to `Finder::new` in + /// the `haystack` given. If no such occurrence exists, then `None` is + /// returned. + /// + /// Unlike most other substring search implementations in this crate, this + /// finder does not require passing the needle at search time. A match can + /// be determined without the needle at all since the required information + /// is already encoded into this finder at construction time. + /// + /// The maximum value this can return is `haystack.len()`, which can only + /// occur when the needle and haystack both have length zero. Otherwise, + /// for non-empty haystacks, the maximum value is `haystack.len() - 1`. + #[inline] + pub fn find(&self, haystack: &[u8]) -> Option<usize> { + if self.needle_len == 0 { + return Some(0); + } + let mut result = !1; + for (i, &byte) in haystack.iter().enumerate() { + result |= self.masks[usize::from(byte)]; + result <<= 1; + if result & (1 << self.needle_len) == 0 { + return Some(i + 1 - self.needle_len); + } + } + None + } +} + +#[cfg(test)] +mod tests { + use super::*; + + define_substring_forward_quickcheck!(|h, n| Some(Finder::new(n)?.find(h))); + + #[test] + fn forward() { + crate::tests::substring::Runner::new() + .fwd(|h, n| Some(Finder::new(n)?.find(h))) + .run(); + } +} diff --git a/vendor/memchr/src/arch/all/twoway.rs b/vendor/memchr/src/arch/all/twoway.rs new file mode 100644 index 0000000..0df3b4a --- /dev/null +++ b/vendor/memchr/src/arch/all/twoway.rs @@ -0,0 +1,877 @@ +/*! +An implementation of the [Two-Way substring search algorithm][two-way]. + +[`Finder`] can be built for forward searches, while [`FinderRev`] can be built +for reverse searches. + +Two-Way makes for a nice general purpose substring search algorithm because of +its time and space complexity properties. It also performs well in practice. +Namely, with `m = len(needle)` and `n = len(haystack)`, Two-Way takes `O(m)` +time to create a finder, `O(1)` space and `O(n)` search time. In other words, +the preprocessing step is quick, doesn't require any heap memory and the worst +case search time is guaranteed to be linear in the haystack regardless of the +size of the needle. + +While vector algorithms will usually beat Two-Way handedly, vector algorithms +also usually have pathological or edge cases that are better handled by Two-Way. +Moreover, not all targets support vector algorithms or implementations for them +simply may not exist yet. + +Two-Way can be found in the `memmem` implementations in at least [GNU libc] and +[musl]. + +[two-way]: https://en.wikipedia.org/wiki/Two-way_string-matching_algorithm +[GNU libc]: https://www.gnu.org/software/libc/ +[musl]: https://www.musl-libc.org/ +*/ + +use core::cmp; + +use crate::{ + arch::all::{is_prefix, is_suffix}, + memmem::Pre, +}; + +/// A forward substring searcher that uses the Two-Way algorithm. +#[derive(Clone, Copy, Debug)] +pub struct Finder(TwoWay); + +/// A reverse substring searcher that uses the Two-Way algorithm. +#[derive(Clone, Copy, Debug)] +pub struct FinderRev(TwoWay); + +/// An implementation of the TwoWay substring search algorithm. +/// +/// This searcher supports forward and reverse search, although not +/// simultaneously. It runs in `O(n + m)` time and `O(1)` space, where +/// `n ~ len(needle)` and `m ~ len(haystack)`. +/// +/// The implementation here roughly matches that which was developed by +/// Crochemore and Perrin in their 1991 paper "Two-way string-matching." The +/// changes in this implementation are 1) the use of zero-based indices, 2) a +/// heuristic skip table based on the last byte (borrowed from Rust's standard +/// library) and 3) the addition of heuristics for a fast skip loop. For (3), +/// callers can pass any kind of prefilter they want, but usually it's one +/// based on a heuristic that uses an approximate background frequency of bytes +/// to choose rare bytes to quickly look for candidate match positions. Note +/// though that currently, this prefilter functionality is not exposed directly +/// in the public API. (File an issue if you want it and provide a use case +/// please.) +/// +/// The heuristic for fast skipping is automatically shut off if it's +/// detected to be ineffective at search time. Generally, this only occurs in +/// pathological cases. But this is generally necessary in order to preserve +/// a `O(n + m)` time bound. +/// +/// The code below is fairly complex and not obviously correct at all. It's +/// likely necessary to read the Two-Way paper cited above in order to fully +/// grok this code. The essence of it is: +/// +/// 1. Do something to detect a "critical" position in the needle. +/// 2. For the current position in the haystack, look if `needle[critical..]` +/// matches at that position. +/// 3. If so, look if `needle[..critical]` matches. +/// 4. If a mismatch occurs, shift the search by some amount based on the +/// critical position and a pre-computed shift. +/// +/// This type is wrapped in the forward and reverse finders that expose +/// consistent forward or reverse APIs. +#[derive(Clone, Copy, Debug)] +struct TwoWay { + /// A small bitset used as a quick prefilter (in addition to any prefilter + /// given by the caller). Namely, a bit `i` is set if and only if `b%64==i` + /// for any `b == needle[i]`. + /// + /// When used as a prefilter, if the last byte at the current candidate + /// position is NOT in this set, then we can skip that entire candidate + /// position (the length of the needle). This is essentially the shift + /// trick found in Boyer-Moore, but only applied to bytes that don't appear + /// in the needle. + /// + /// N.B. This trick was inspired by something similar in std's + /// implementation of Two-Way. + byteset: ApproximateByteSet, + /// A critical position in needle. Specifically, this position corresponds + /// to beginning of either the minimal or maximal suffix in needle. (N.B. + /// See SuffixType below for why "minimal" isn't quite the correct word + /// here.) + /// + /// This is the position at which every search begins. Namely, search + /// starts by scanning text to the right of this position, and only if + /// there's a match does the text to the left of this position get scanned. + critical_pos: usize, + /// The amount we shift by in the Two-Way search algorithm. This + /// corresponds to the "small period" and "large period" cases. + shift: Shift, +} + +impl Finder { + /// Create a searcher that finds occurrences of the given `needle`. + /// + /// An empty `needle` results in a match at every position in a haystack, + /// including at `haystack.len()`. + #[inline] + pub fn new(needle: &[u8]) -> Finder { + let byteset = ApproximateByteSet::new(needle); + let min_suffix = Suffix::forward(needle, SuffixKind::Minimal); + let max_suffix = Suffix::forward(needle, SuffixKind::Maximal); + let (period_lower_bound, critical_pos) = + if min_suffix.pos > max_suffix.pos { + (min_suffix.period, min_suffix.pos) + } else { + (max_suffix.period, max_suffix.pos) + }; + let shift = Shift::forward(needle, period_lower_bound, critical_pos); + Finder(TwoWay { byteset, critical_pos, shift }) + } + + /// Returns the first occurrence of `needle` in the given `haystack`, or + /// `None` if no such occurrence could be found. + /// + /// The `needle` given must be the same as the `needle` provided to + /// [`Finder::new`]. + /// + /// An empty `needle` results in a match at every position in a haystack, + /// including at `haystack.len()`. + #[inline] + pub fn find(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> { + self.find_with_prefilter(None, haystack, needle) + } + + /// This is like [`Finder::find`], but it accepts a prefilter for + /// accelerating searches. + /// + /// Currently this is not exposed in the public API because, at the time + /// of writing, I didn't want to spend time thinking about how to expose + /// the prefilter infrastructure (if at all). If you have a compelling use + /// case for exposing this routine, please create an issue. Do *not* open + /// a PR that just exposes `Pre` and friends. Exporting this routine will + /// require API design. + #[inline(always)] + pub(crate) fn find_with_prefilter( + &self, + pre: Option<Pre<'_>>, + haystack: &[u8], + needle: &[u8], + ) -> Option<usize> { + match self.0.shift { + Shift::Small { period } => { + self.find_small_imp(pre, haystack, needle, period) + } + Shift::Large { shift } => { + self.find_large_imp(pre, haystack, needle, shift) + } + } + } + + // Each of the two search implementations below can be accelerated by a + // prefilter, but it is not always enabled. To avoid its overhead when + // its disabled, we explicitly inline each search implementation based on + // whether a prefilter will be used or not. The decision on which to use + // is made in the parent meta searcher. + + #[inline(always)] + fn find_small_imp( + &self, + mut pre: Option<Pre<'_>>, + haystack: &[u8], + needle: &[u8], + period: usize, + ) -> Option<usize> { + let mut pos = 0; + let mut shift = 0; + let last_byte_pos = match needle.len().checked_sub(1) { + None => return Some(pos), + Some(last_byte) => last_byte, + }; + while pos + needle.len() <= haystack.len() { + let mut i = cmp::max(self.0.critical_pos, shift); + if let Some(pre) = pre.as_mut() { + if pre.is_effective() { + pos += pre.find(&haystack[pos..])?; + shift = 0; + i = self.0.critical_pos; + if pos + needle.len() > haystack.len() { + return None; + } + } + } + if !self.0.byteset.contains(haystack[pos + last_byte_pos]) { + pos += needle.len(); + shift = 0; + continue; + } + while i < needle.len() && needle[i] == haystack[pos + i] { + i += 1; + } + if i < needle.len() { + pos += i - self.0.critical_pos + 1; + shift = 0; + } else { + let mut j = self.0.critical_pos; + while j > shift && needle[j] == haystack[pos + j] { + j -= 1; + } + if j <= shift && needle[shift] == haystack[pos + shift] { + return Some(pos); + } + pos += period; + shift = needle.len() - period; + } + } + None + } + + #[inline(always)] + fn find_large_imp( + &self, + mut pre: Option<Pre<'_>>, + haystack: &[u8], + needle: &[u8], + shift: usize, + ) -> Option<usize> { + let mut pos = 0; + let last_byte_pos = match needle.len().checked_sub(1) { + None => return Some(pos), + Some(last_byte) => last_byte, + }; + 'outer: while pos + needle.len() <= haystack.len() { + if let Some(pre) = pre.as_mut() { + if pre.is_effective() { + pos += pre.find(&haystack[pos..])?; + if pos + needle.len() > haystack.len() { + return None; + } + } + } + + if !self.0.byteset.contains(haystack[pos + last_byte_pos]) { + pos += needle.len(); + continue; + } + let mut i = self.0.critical_pos; + while i < needle.len() && needle[i] == haystack[pos + i] { + i += 1; + } + if i < needle.len() { + pos += i - self.0.critical_pos + 1; + } else { + for j in (0..self.0.critical_pos).rev() { + if needle[j] != haystack[pos + j] { + pos += shift; + continue 'outer; + } + } + return Some(pos); + } + } + None + } +} + +impl FinderRev { + /// Create a searcher that finds occurrences of the given `needle`. + /// + /// An empty `needle` results in a match at every position in a haystack, + /// including at `haystack.len()`. + #[inline] + pub fn new(needle: &[u8]) -> FinderRev { + let byteset = ApproximateByteSet::new(needle); + let min_suffix = Suffix::reverse(needle, SuffixKind::Minimal); + let max_suffix = Suffix::reverse(needle, SuffixKind::Maximal); + let (period_lower_bound, critical_pos) = + if min_suffix.pos < max_suffix.pos { + (min_suffix.period, min_suffix.pos) + } else { + (max_suffix.period, max_suffix.pos) + }; + let shift = Shift::reverse(needle, period_lower_bound, critical_pos); + FinderRev(TwoWay { byteset, critical_pos, shift }) + } + + /// Returns the last occurrence of `needle` in the given `haystack`, or + /// `None` if no such occurrence could be found. + /// + /// The `needle` given must be the same as the `needle` provided to + /// [`FinderRev::new`]. + /// + /// An empty `needle` results in a match at every position in a haystack, + /// including at `haystack.len()`. + #[inline] + pub fn rfind(&self, haystack: &[u8], needle: &[u8]) -> Option<usize> { + // For the reverse case, we don't use a prefilter. It's plausible that + // perhaps we should, but it's a lot of additional code to do it, and + // it's not clear that it's actually worth it. If you have a really + // compelling use case for this, please file an issue. + match self.0.shift { + Shift::Small { period } => { + self.rfind_small_imp(haystack, needle, period) + } + Shift::Large { shift } => { + self.rfind_large_imp(haystack, needle, shift) + } + } + } + + #[inline(always)] + fn rfind_small_imp( + &self, + haystack: &[u8], + needle: &[u8], + period: usize, + ) -> Option<usize> { + let nlen = needle.len(); + let mut pos = haystack.len(); + let mut shift = nlen; + let first_byte = match needle.get(0) { + None => return Some(pos), + Some(&first_byte) => first_byte, + }; + while pos >= nlen { + if !self.0.byteset.contains(haystack[pos - nlen]) { + pos -= nlen; + shift = nlen; + continue; + } + let mut i = cmp::min(self.0.critical_pos, shift); + while i > 0 && needle[i - 1] == haystack[pos - nlen + i - 1] { + i -= 1; + } + if i > 0 || first_byte != haystack[pos - nlen] { + pos -= self.0.critical_pos - i + 1; + shift = nlen; + } else { + let mut j = self.0.critical_pos; + while j < shift && needle[j] == haystack[pos - nlen + j] { + j += 1; + } + if j >= shift { + return Some(pos - nlen); + } + pos -= period; + shift = period; + } + } + None + } + + #[inline(always)] + fn rfind_large_imp( + &self, + haystack: &[u8], + needle: &[u8], + shift: usize, + ) -> Option<usize> { + let nlen = needle.len(); + let mut pos = haystack.len(); + let first_byte = match needle.get(0) { + None => return Some(pos), + Some(&first_byte) => first_byte, + }; + while pos >= nlen { + if !self.0.byteset.contains(haystack[pos - nlen]) { + pos -= nlen; + continue; + } + let mut i = self.0.critical_pos; + while i > 0 && needle[i - 1] == haystack[pos - nlen + i - 1] { + i -= 1; + } + if i > 0 || first_byte != haystack[pos - nlen] { + pos -= self.0.critical_pos - i + 1; + } else { + let mut j = self.0.critical_pos; + while j < nlen && needle[j] == haystack[pos - nlen + j] { + j += 1; + } + if j == nlen { + return Some(pos - nlen); + } + pos -= shift; + } + } + None + } +} + +/// A representation of the amount we're allowed to shift by during Two-Way +/// search. +/// +/// When computing a critical factorization of the needle, we find the position +/// of the critical factorization by finding the needle's maximal (or minimal) +/// suffix, along with the period of that suffix. It turns out that the period +/// of that suffix is a lower bound on the period of the needle itself. +/// +/// This lower bound is equivalent to the actual period of the needle in +/// some cases. To describe that case, we denote the needle as `x` where +/// `x = uv` and `v` is the lexicographic maximal suffix of `v`. The lower +/// bound given here is always the period of `v`, which is `<= period(x)`. The +/// case where `period(v) == period(x)` occurs when `len(u) < (len(x) / 2)` and +/// where `u` is a suffix of `v[0..period(v)]`. +/// +/// This case is important because the search algorithm for when the +/// periods are equivalent is slightly different than the search algorithm +/// for when the periods are not equivalent. In particular, when they aren't +/// equivalent, we know that the period of the needle is no less than half its +/// length. In this case, we shift by an amount less than or equal to the +/// period of the needle (determined by the maximum length of the components +/// of the critical factorization of `x`, i.e., `max(len(u), len(v))`).. +/// +/// The above two cases are represented by the variants below. Each entails +/// a different instantiation of the Two-Way search algorithm. +/// +/// N.B. If we could find a way to compute the exact period in all cases, +/// then we could collapse this case analysis and simplify the algorithm. The +/// Two-Way paper suggests this is possible, but more reading is required to +/// grok why the authors didn't pursue that path. +#[derive(Clone, Copy, Debug)] +enum Shift { + Small { period: usize }, + Large { shift: usize }, +} + +impl Shift { + /// Compute the shift for a given needle in the forward direction. + /// + /// This requires a lower bound on the period and a critical position. + /// These can be computed by extracting both the minimal and maximal + /// lexicographic suffixes, and choosing the right-most starting position. + /// The lower bound on the period is then the period of the chosen suffix. + fn forward( + needle: &[u8], + period_lower_bound: usize, + critical_pos: usize, + ) -> Shift { + let large = cmp::max(critical_pos, needle.len() - critical_pos); + if critical_pos * 2 >= needle.len() { + return Shift::Large { shift: large }; + } + + let (u, v) = needle.split_at(critical_pos); + if !is_suffix(&v[..period_lower_bound], u) { + return Shift::Large { shift: large }; + } + Shift::Small { period: period_lower_bound } + } + + /// Compute the shift for a given needle in the reverse direction. + /// + /// This requires a lower bound on the period and a critical position. + /// These can be computed by extracting both the minimal and maximal + /// lexicographic suffixes, and choosing the left-most starting position. + /// The lower bound on the period is then the period of the chosen suffix. + fn reverse( + needle: &[u8], + period_lower_bound: usize, + critical_pos: usize, + ) -> Shift { + let large = cmp::max(critical_pos, needle.len() - critical_pos); + if (needle.len() - critical_pos) * 2 >= needle.len() { + return Shift::Large { shift: large }; + } + + let (v, u) = needle.split_at(critical_pos); + if !is_prefix(&v[v.len() - period_lower_bound..], u) { + return Shift::Large { shift: large }; + } + Shift::Small { period: period_lower_bound } + } +} + +/// A suffix extracted from a needle along with its period. +#[derive(Debug)] +struct Suffix { + /// The starting position of this suffix. + /// + /// If this is a forward suffix, then `&bytes[pos..]` can be used. If this + /// is a reverse suffix, then `&bytes[..pos]` can be used. That is, for + /// forward suffixes, this is an inclusive starting position, where as for + /// reverse suffixes, this is an exclusive ending position. + pos: usize, + /// The period of this suffix. + /// + /// Note that this is NOT necessarily the period of the string from which + /// this suffix comes from. (It is always less than or equal to the period + /// of the original string.) + period: usize, +} + +impl Suffix { + fn forward(needle: &[u8], kind: SuffixKind) -> Suffix { + // suffix represents our maximal (or minimal) suffix, along with + // its period. + let mut suffix = Suffix { pos: 0, period: 1 }; + // The start of a suffix in `needle` that we are considering as a + // more maximal (or minimal) suffix than what's in `suffix`. + let mut candidate_start = 1; + // The current offset of our suffixes that we're comparing. + // + // When the characters at this offset are the same, then we mush on + // to the next position since no decision is possible. When the + // candidate's character is greater (or lesser) than the corresponding + // character than our current maximal (or minimal) suffix, then the + // current suffix is changed over to the candidate and we restart our + // search. Otherwise, the candidate suffix is no good and we restart + // our search on the next candidate. + // + // The three cases above correspond to the three cases in the loop + // below. + let mut offset = 0; + + while candidate_start + offset < needle.len() { + let current = needle[suffix.pos + offset]; + let candidate = needle[candidate_start + offset]; + match kind.cmp(current, candidate) { + SuffixOrdering::Accept => { + suffix = Suffix { pos: candidate_start, period: 1 }; + candidate_start += 1; + offset = 0; + } + SuffixOrdering::Skip => { + candidate_start += offset + 1; + offset = 0; + suffix.period = candidate_start - suffix.pos; + } + SuffixOrdering::Push => { + if offset + 1 == suffix.period { + candidate_start += suffix.period; + offset = 0; + } else { + offset += 1; + } + } + } + } + suffix + } + + fn reverse(needle: &[u8], kind: SuffixKind) -> Suffix { + // See the comments in `forward` for how this works. + let mut suffix = Suffix { pos: needle.len(), period: 1 }; + if needle.len() == 1 { + return suffix; + } + let mut candidate_start = match needle.len().checked_sub(1) { + None => return suffix, + Some(candidate_start) => candidate_start, + }; + let mut offset = 0; + + while offset < candidate_start { + let current = needle[suffix.pos - offset - 1]; + let candidate = needle[candidate_start - offset - 1]; + match kind.cmp(current, candidate) { + SuffixOrdering::Accept => { + suffix = Suffix { pos: candidate_start, period: 1 }; + candidate_start -= 1; + offset = 0; + } + SuffixOrdering::Skip => { + candidate_start -= offset + 1; + offset = 0; + suffix.period = suffix.pos - candidate_start; + } + SuffixOrdering::Push => { + if offset + 1 == suffix.period { + candidate_start -= suffix.period; + offset = 0; + } else { + offset += 1; + } + } + } + } + suffix + } +} + +/// The kind of suffix to extract. +#[derive(Clone, Copy, Debug)] +enum SuffixKind { + /// Extract the smallest lexicographic suffix from a string. + /// + /// Technically, this doesn't actually pick the smallest lexicographic + /// suffix. e.g., Given the choice between `a` and `aa`, this will choose + /// the latter over the former, even though `a < aa`. The reasoning for + /// this isn't clear from the paper, but it still smells like a minimal + /// suffix. + Minimal, + /// Extract the largest lexicographic suffix from a string. + /// + /// Unlike `Minimal`, this really does pick the maximum suffix. e.g., Given + /// the choice between `z` and `zz`, this will choose the latter over the + /// former. + Maximal, +} + +/// The result of comparing corresponding bytes between two suffixes. +#[derive(Clone, Copy, Debug)] +enum SuffixOrdering { + /// This occurs when the given candidate byte indicates that the candidate + /// suffix is better than the current maximal (or minimal) suffix. That is, + /// the current candidate suffix should supplant the current maximal (or + /// minimal) suffix. + Accept, + /// This occurs when the given candidate byte excludes the candidate suffix + /// from being better than the current maximal (or minimal) suffix. That + /// is, the current candidate suffix should be dropped and the next one + /// should be considered. + Skip, + /// This occurs when no decision to accept or skip the candidate suffix + /// can be made, e.g., when corresponding bytes are equivalent. In this + /// case, the next corresponding bytes should be compared. + Push, +} + +impl SuffixKind { + /// Returns true if and only if the given candidate byte indicates that + /// it should replace the current suffix as the maximal (or minimal) + /// suffix. + fn cmp(self, current: u8, candidate: u8) -> SuffixOrdering { + use self::SuffixOrdering::*; + + match self { + SuffixKind::Minimal if candidate < current => Accept, + SuffixKind::Minimal if candidate > current => Skip, + SuffixKind::Minimal => Push, + SuffixKind::Maximal if candidate > current => Accept, + SuffixKind::Maximal if candidate < current => Skip, + SuffixKind::Maximal => Push, + } + } +} + +/// A bitset used to track whether a particular byte exists in a needle or not. +/// +/// Namely, bit 'i' is set if and only if byte%64==i for any byte in the +/// needle. If a particular byte in the haystack is NOT in this set, then one +/// can conclude that it is also not in the needle, and thus, one can advance +/// in the haystack by needle.len() bytes. +#[derive(Clone, Copy, Debug)] +struct ApproximateByteSet(u64); + +impl ApproximateByteSet { + /// Create a new set from the given needle. + fn new(needle: &[u8]) -> ApproximateByteSet { + let mut bits = 0; + for &b in needle { + bits |= 1 << (b % 64); + } + ApproximateByteSet(bits) + } + + /// Return true if and only if the given byte might be in this set. This + /// may return a false positive, but will never return a false negative. + #[inline(always)] + fn contains(&self, byte: u8) -> bool { + self.0 & (1 << (byte % 64)) != 0 + } +} + +#[cfg(test)] +mod tests { + use alloc::vec::Vec; + + use super::*; + + /// Convenience wrapper for computing the suffix as a byte string. + fn get_suffix_forward(needle: &[u8], kind: SuffixKind) -> (&[u8], usize) { + let s = Suffix::forward(needle, kind); + (&needle[s.pos..], s.period) + } + + /// Convenience wrapper for computing the reverse suffix as a byte string. + fn get_suffix_reverse(needle: &[u8], kind: SuffixKind) -> (&[u8], usize) { + let s = Suffix::reverse(needle, kind); + (&needle[..s.pos], s.period) + } + + /// Return all of the non-empty suffixes in the given byte string. + fn suffixes(bytes: &[u8]) -> Vec<&[u8]> { + (0..bytes.len()).map(|i| &bytes[i..]).collect() + } + + /// Return the lexicographically maximal suffix of the given byte string. + fn naive_maximal_suffix_forward(needle: &[u8]) -> &[u8] { + let mut sufs = suffixes(needle); + sufs.sort(); + sufs.pop().unwrap() + } + + /// Return the lexicographically maximal suffix of the reverse of the given + /// byte string. + fn naive_maximal_suffix_reverse(needle: &[u8]) -> Vec<u8> { + let mut reversed = needle.to_vec(); + reversed.reverse(); + let mut got = naive_maximal_suffix_forward(&reversed).to_vec(); + got.reverse(); + got + } + + define_substring_forward_quickcheck!(|h, n| Some( + Finder::new(n).find(h, n) + )); + define_substring_reverse_quickcheck!(|h, n| Some( + FinderRev::new(n).rfind(h, n) + )); + + #[test] + fn forward() { + crate::tests::substring::Runner::new() + .fwd(|h, n| Some(Finder::new(n).find(h, n))) + .run(); + } + + #[test] + fn reverse() { + crate::tests::substring::Runner::new() + .rev(|h, n| Some(FinderRev::new(n).rfind(h, n))) + .run(); + } + + #[test] + fn suffix_forward() { + macro_rules! assert_suffix_min { + ($given:expr, $expected:expr, $period:expr) => { + let (got_suffix, got_period) = + get_suffix_forward($given.as_bytes(), SuffixKind::Minimal); + let got_suffix = core::str::from_utf8(got_suffix).unwrap(); + assert_eq!(($expected, $period), (got_suffix, got_period)); + }; + } + + macro_rules! assert_suffix_max { + ($given:expr, $expected:expr, $period:expr) => { + let (got_suffix, got_period) = + get_suffix_forward($given.as_bytes(), SuffixKind::Maximal); + let got_suffix = core::str::from_utf8(got_suffix).unwrap(); + assert_eq!(($expected, $period), (got_suffix, got_period)); + }; + } + + assert_suffix_min!("a", "a", 1); + assert_suffix_max!("a", "a", 1); + + assert_suffix_min!("ab", "ab", 2); + assert_suffix_max!("ab", "b", 1); + + assert_suffix_min!("ba", "a", 1); + assert_suffix_max!("ba", "ba", 2); + + assert_suffix_min!("abc", "abc", 3); + assert_suffix_max!("abc", "c", 1); + + assert_suffix_min!("acb", "acb", 3); + assert_suffix_max!("acb", "cb", 2); + + assert_suffix_min!("cba", "a", 1); + assert_suffix_max!("cba", "cba", 3); + + assert_suffix_min!("abcabc", "abcabc", 3); + assert_suffix_max!("abcabc", "cabc", 3); + + assert_suffix_min!("abcabcabc", "abcabcabc", 3); + assert_suffix_max!("abcabcabc", "cabcabc", 3); + + assert_suffix_min!("abczz", "abczz", 5); + assert_suffix_max!("abczz", "zz", 1); + + assert_suffix_min!("zzabc", "abc", 3); + assert_suffix_max!("zzabc", "zzabc", 5); + + assert_suffix_min!("aaa", "aaa", 1); + assert_suffix_max!("aaa", "aaa", 1); + + assert_suffix_min!("foobar", "ar", 2); + assert_suffix_max!("foobar", "r", 1); + } + + #[test] + fn suffix_reverse() { + macro_rules! assert_suffix_min { + ($given:expr, $expected:expr, $period:expr) => { + let (got_suffix, got_period) = + get_suffix_reverse($given.as_bytes(), SuffixKind::Minimal); + let got_suffix = core::str::from_utf8(got_suffix).unwrap(); + assert_eq!(($expected, $period), (got_suffix, got_period)); + }; + } + + macro_rules! assert_suffix_max { + ($given:expr, $expected:expr, $period:expr) => { + let (got_suffix, got_period) = + get_suffix_reverse($given.as_bytes(), SuffixKind::Maximal); + let got_suffix = core::str::from_utf8(got_suffix).unwrap(); + assert_eq!(($expected, $period), (got_suffix, got_period)); + }; + } + + assert_suffix_min!("a", "a", 1); + assert_suffix_max!("a", "a", 1); + + assert_suffix_min!("ab", "a", 1); + assert_suffix_max!("ab", "ab", 2); + + assert_suffix_min!("ba", "ba", 2); + assert_suffix_max!("ba", "b", 1); + + assert_suffix_min!("abc", "a", 1); + assert_suffix_max!("abc", "abc", 3); + + assert_suffix_min!("acb", "a", 1); + assert_suffix_max!("acb", "ac", 2); + + assert_suffix_min!("cba", "cba", 3); + assert_suffix_max!("cba", "c", 1); + + assert_suffix_min!("abcabc", "abca", 3); + assert_suffix_max!("abcabc", "abcabc", 3); + + assert_suffix_min!("abcabcabc", "abcabca", 3); + assert_suffix_max!("abcabcabc", "abcabcabc", 3); + + assert_suffix_min!("abczz", "a", 1); + assert_suffix_max!("abczz", "abczz", 5); + + assert_suffix_min!("zzabc", "zza", 3); + assert_suffix_max!("zzabc", "zz", 1); + + assert_suffix_min!("aaa", "aaa", 1); + assert_suffix_max!("aaa", "aaa", 1); + } + + #[cfg(not(miri))] + quickcheck::quickcheck! { + fn qc_suffix_forward_maximal(bytes: Vec<u8>) -> bool { + if bytes.is_empty() { + return true; + } + + let (got, _) = get_suffix_forward(&bytes, SuffixKind::Maximal); + let expected = naive_maximal_suffix_forward(&bytes); + got == expected + } + + fn qc_suffix_reverse_maximal(bytes: Vec<u8>) -> bool { + if bytes.is_empty() { + return true; + } + + let (got, _) = get_suffix_reverse(&bytes, SuffixKind::Maximal); + let expected = naive_maximal_suffix_reverse(&bytes); + expected == got + } + } + + // This is a regression test caught by quickcheck that exercised a bug in + // the reverse small period handling. The bug was that we were using 'if j + // == shift' to determine if a match occurred, but the correct guard is 'if + // j >= shift', which matches the corresponding guard in the forward impl. + #[test] + fn regression_rev_small_period() { + let rfind = |h, n| FinderRev::new(n).rfind(h, n); + let haystack = "ababaz"; + let needle = "abab"; + assert_eq!(Some(0), rfind(haystack.as_bytes(), needle.as_bytes())); + } +} |